aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Willemsen <dwillemsen@google.com>2023-03-15 13:19:36 -0400
committerDan Willemsen <dwillemsen@google.com>2023-03-15 14:18:08 -0400
commit09c5a32afc5b66f28f166a68afe1fc71afbf9b73 (patch)
tree194d7b0e539d014393564a256bec571e18d6533a
parentf10932f763d058b0dcb3acfb795c869996fef47b (diff)
parent031fc75960d487b0b15db12fb328676236a3a39c (diff)
downloadgolang-x-tools-master.tar.gz
Upgrade golang-x-tools to v0.7.0HEADmastermain
Not using external_updater this time to switch to the new upstream tags. Test: treehugger Change-Id: I31488b4958a366ed7f183bb387d3e1446acc13ae
-rw-r--r--AUTHORS3
-rw-r--r--CONTRIBUTORS3
-rw-r--r--METADATA6
-rw-r--r--README.md96
-rw-r--r--cmd/auth/cookieauth/cookieauth.go4
-rw-r--r--cmd/auth/gitauth/gitauth.go3
-rw-r--r--cmd/auth/netrcauth/netrcauth.go3
-rw-r--r--cmd/benchcmp/compare.go4
-rw-r--r--cmd/benchcmp/doc.go2
-rw-r--r--cmd/bundle/main.go39
-rw-r--r--cmd/callgraph/main.go12
-rw-r--r--cmd/callgraph/main_test.go21
-rw-r--r--cmd/compilebench/main.go172
-rw-r--r--cmd/cover/README.md3
-rw-r--r--cmd/cover/cover.go722
-rw-r--r--cmd/cover/cover_test.go108
-rw-r--r--cmd/cover/doc.go24
-rw-r--r--cmd/cover/func.go166
-rw-r--r--cmd/cover/html.go284
-rw-r--r--cmd/cover/testdata/main.go112
-rw-r--r--cmd/cover/testdata/test.go218
-rw-r--r--cmd/digraph/digraph.go49
-rw-r--r--cmd/digraph/digraph_test.go4
-rw-r--r--cmd/file2fuzz/main.go1
-rw-r--r--cmd/fiximports/main.go56
-rw-r--r--cmd/fiximports/main_test.go3
-rw-r--r--cmd/godex/doc.go1
-rw-r--r--cmd/godoc/doc.go1
-rw-r--r--cmd/godoc/godoc_test.go189
-rw-r--r--cmd/godoc/main.go14
-rw-r--r--cmd/goimports/doc.go25
-rw-r--r--cmd/gorename/main.go1
-rw-r--r--cmd/gotype/gotype.go4
-rw-r--r--cmd/goyacc/doc.go2
-rw-r--r--cmd/goyacc/yacc.go78
-rw-r--r--cmd/guru/TODO11
-rw-r--r--cmd/guru/callers.go1
-rw-r--r--cmd/guru/callstack.go1
-rw-r--r--cmd/guru/describe.go15
-rw-r--r--cmd/guru/freevars.go1
-rw-r--r--cmd/guru/guru.go20
-rw-r--r--cmd/guru/implements.go1
-rw-r--r--cmd/guru/main.go3
-rw-r--r--cmd/guru/pointsto.go3
-rw-r--r--cmd/guru/pos.go3
-rw-r--r--cmd/guru/referrers.go3
-rw-r--r--cmd/guru/serial/serial.go45
-rw-r--r--cmd/guru/what.go2
-rw-r--r--cmd/present/dir.go11
-rw-r--r--cmd/present/doc.go35
-rw-r--r--cmd/present/main.go28
-rw-r--r--cmd/present/play.go25
-rw-r--r--cmd/present/static/article.css16
-rw-r--r--cmd/present/static/styles.css19
-rw-r--r--cmd/present2md/main.go1
-rw-r--r--cmd/signature-fuzzer/fuzz-runner/runner.go2
-rw-r--r--cmd/signature-fuzzer/internal/fuzz-generator/generator.go8
-rw-r--r--cmd/splitdwarf/splitdwarf.go6
-rw-r--r--cmd/ssadump/main.go25
-rw-r--r--cmd/stress/stress.go4
-rw-r--r--cmd/stringer/endtoend_test.go59
-rw-r--r--cmd/stringer/golden_test.go10
-rw-r--r--cmd/stringer/stringer.go8
-rwxr-xr-xcmd/toolstash/buildall4
-rw-r--r--cmd/toolstash/main.go15
-rw-r--r--container/intsets/sparse.go23
-rw-r--r--copyright/copyright.go3
-rw-r--r--go.mod15
-rw-r--r--go.sum43
-rw-r--r--go/analysis/analysis.go15
-rw-r--r--go/analysis/analysistest/analysistest.go96
-rw-r--r--go/analysis/diagnostic.go2
-rw-r--r--go/analysis/doc.go44
-rw-r--r--go/analysis/internal/analysisflags/flags.go75
-rw-r--r--go/analysis/internal/analysisflags/flags_test.go7
-rw-r--r--go/analysis/internal/checker/checker.go296
-rw-r--r--go/analysis/internal/checker/checker_test.go90
-rw-r--r--go/analysis/internal/checker/fix_test.go309
-rw-r--r--go/analysis/internal/checker/start_test.go85
-rw-r--r--go/analysis/internal/facts/facts.go323
-rw-r--r--go/analysis/internal/facts/facts_test.go384
-rw-r--r--go/analysis/internal/facts/imports.go119
-rw-r--r--go/analysis/passes/asmdecl/arches_go118.go12
-rw-r--r--go/analysis/passes/asmdecl/arches_go119.go14
-rw-r--r--go/analysis/passes/asmdecl/asmdecl.go5
-rw-r--r--go/analysis/passes/asmdecl/asmdecl_test.go19
-rw-r--r--go/analysis/passes/asmdecl/testdata/src/a/asm10.s192
-rw-r--r--go/analysis/passes/asmdecl/testdata/src/a/asm11.s13
-rw-r--r--go/analysis/passes/assign/assign.go15
-rw-r--r--go/analysis/passes/assign/testdata/src/a/a.go28
-rw-r--r--go/analysis/passes/assign/testdata/src/a/a.go.golden28
-rw-r--r--go/analysis/passes/bools/bools.go12
-rw-r--r--go/analysis/passes/buildssa/buildssa_test.go37
-rw-r--r--go/analysis/passes/buildssa/testdata/src/b/b.go20
-rw-r--r--go/analysis/passes/buildssa/testdata/src/c/c.go24
-rw-r--r--go/analysis/passes/buildtag/buildtag.go2
-rw-r--r--go/analysis/passes/buildtag/buildtag_old.go2
-rw-r--r--go/analysis/passes/cgocall/cgocall.go16
-rw-r--r--go/analysis/passes/composite/composite.go41
-rw-r--r--go/analysis/passes/composite/composite_test.go2
-rw-r--r--go/analysis/passes/composite/testdata/src/a/a.go17
-rw-r--r--go/analysis/passes/composite/testdata/src/a/a.go.golden144
-rw-r--r--go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden16
-rw-r--r--go/analysis/passes/composite/testdata/src/typeparams/typeparams.go10
-rw-r--r--go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden27
-rw-r--r--go/analysis/passes/copylock/copylock.go2
-rw-r--r--go/analysis/passes/copylock/testdata/src/a/copylock.go42
-rw-r--r--go/analysis/passes/copylock/testdata/src/a/copylock_func.go2
-rw-r--r--go/analysis/passes/directive/directive.go216
-rw-r--r--go/analysis/passes/directive/directive_test.go39
-rw-r--r--go/analysis/passes/directive/testdata/src/a/badspace.go11
-rw-r--r--go/analysis/passes/directive/testdata/src/a/misplaced.go10
-rw-r--r--go/analysis/passes/directive/testdata/src/a/misplaced.s19
-rw-r--r--go/analysis/passes/directive/testdata/src/a/misplaced_test.go10
-rw-r--r--go/analysis/passes/directive/testdata/src/a/p.go11
-rw-r--r--go/analysis/passes/errorsas/errorsas.go28
-rw-r--r--go/analysis/passes/errorsas/testdata/src/a/a.go4
-rw-r--r--go/analysis/passes/errorsas/testdata/src/typeparams/typeparams.go2
-rw-r--r--go/analysis/passes/fieldalignment/fieldalignment.go7
-rw-r--r--go/analysis/passes/httpresponse/httpresponse.go27
-rw-r--r--go/analysis/passes/httpresponse/httpresponse_test.go3
-rw-r--r--go/analysis/passes/httpresponse/testdata/src/a/a.go27
-rw-r--r--go/analysis/passes/ifaceassert/parameterized.go1
-rw-r--r--go/analysis/passes/inspect/inspect.go15
-rw-r--r--go/analysis/passes/loopclosure/loopclosure.go408
-rw-r--r--go/analysis/passes/loopclosure/loopclosure_test.go4
-rw-r--r--go/analysis/passes/loopclosure/testdata/src/a/a.go131
-rw-r--r--go/analysis/passes/loopclosure/testdata/src/a/b.go9
-rw-r--r--go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go202
-rw-r--r--go/analysis/passes/nilness/nilness.go18
-rw-r--r--go/analysis/passes/nilness/nilness_test.go17
-rw-r--r--go/analysis/passes/nilness/testdata/src/a/a.go41
-rw-r--r--go/analysis/passes/nilness/testdata/src/c/c.go14
-rw-r--r--go/analysis/passes/nilness/testdata/src/d/d.go55
-rw-r--r--go/analysis/passes/pkgfact/pkgfact.go8
-rw-r--r--go/analysis/passes/printf/printf.go27
-rw-r--r--go/analysis/passes/printf/testdata/src/a/a.go3
-rw-r--r--go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go22
-rw-r--r--go/analysis/passes/printf/types.go10
-rw-r--r--go/analysis/passes/shadow/shadow.go1
-rw-r--r--go/analysis/passes/sigchanyzer/sigchanyzer.go2
-rw-r--r--go/analysis/passes/sortslice/analyzer.go9
-rw-r--r--go/analysis/passes/sortslice/testdata/src/a/a.go24
-rw-r--r--go/analysis/passes/stdmethods/stdmethods.go13
-rw-r--r--go/analysis/passes/stdmethods/testdata/src/a/a.go14
-rw-r--r--go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go4
-rw-r--r--go/analysis/passes/tests/testdata/src/a/go118_test.go5
-rw-r--r--go/analysis/passes/tests/tests.go30
-rw-r--r--go/analysis/passes/timeformat/testdata/src/a/a.go50
-rw-r--r--go/analysis/passes/timeformat/testdata/src/a/a.go.golden50
-rw-r--r--go/analysis/passes/timeformat/testdata/src/b/b.go11
-rw-r--r--go/analysis/passes/timeformat/timeformat.go129
-rw-r--r--go/analysis/passes/timeformat/timeformat_test.go17
-rw-r--r--go/analysis/passes/unusedwrite/unusedwrite.go53
-rw-r--r--go/analysis/singlechecker/singlechecker.go15
-rw-r--r--go/analysis/unitchecker/main.go4
-rw-r--r--go/analysis/unitchecker/unitchecker.go30
-rw-r--r--go/analysis/unitchecker/unitchecker_test.go81
-rw-r--r--go/analysis/validate.go5
-rw-r--r--go/analysis/validate_test.go34
-rw-r--r--go/ast/astutil/enclosing.go29
-rw-r--r--go/ast/astutil/enclosing_test.go1
-rw-r--r--go/ast/astutil/imports.go7
-rw-r--r--go/ast/astutil/imports_test.go28
-rw-r--r--go/ast/astutil/rewrite.go11
-rw-r--r--go/ast/astutil/rewrite_test.go18
-rw-r--r--go/ast/inspector/inspector.go68
-rw-r--r--go/ast/inspector/inspector_test.go44
-rw-r--r--go/ast/inspector/typeof.go18
-rw-r--r--go/buildutil/allpackages.go11
-rw-r--r--go/buildutil/fakecontext.go1
-rw-r--r--go/buildutil/overlay.go3
-rw-r--r--go/buildutil/tags.go3
-rw-r--r--go/buildutil/util.go5
-rw-r--r--go/callgraph/callgraph.go5
-rw-r--r--go/callgraph/callgraph_test.go253
-rw-r--r--go/callgraph/cha/cha.go104
-rw-r--r--go/callgraph/cha/cha_test.go89
-rw-r--r--go/callgraph/cha/testdata/generics.go49
-rw-r--r--go/callgraph/rta/rta.go8
-rw-r--r--go/callgraph/rta/rta_test.go92
-rw-r--r--go/callgraph/rta/testdata/generics.go79
-rw-r--r--go/callgraph/static/static.go3
-rw-r--r--go/callgraph/static/static_test.go116
-rw-r--r--go/callgraph/util.go4
-rw-r--r--go/callgraph/vta/graph.go126
-rw-r--r--go/callgraph/vta/graph_test.go7
-rw-r--r--go/callgraph/vta/helpers_test.go8
-rw-r--r--go/callgraph/vta/internal/trie/bits.go25
-rw-r--r--go/callgraph/vta/internal/trie/builder.go24
-rw-r--r--go/callgraph/vta/internal/trie/trie.go12
-rw-r--r--go/callgraph/vta/propagation.go57
-rw-r--r--go/callgraph/vta/propagation_test.go61
-rw-r--r--go/callgraph/vta/testdata/src/callgraph_generics.go71
-rw-r--r--go/callgraph/vta/testdata/src/callgraph_issue_57756.go67
-rw-r--r--go/callgraph/vta/testdata/src/callgraph_recursive_types.go56
-rw-r--r--go/callgraph/vta/testdata/src/function_alias.go44
-rw-r--r--go/callgraph/vta/testdata/src/panic.go3
-rw-r--r--go/callgraph/vta/utils.go110
-rw-r--r--go/callgraph/vta/vta.go33
-rw-r--r--go/callgraph/vta/vta_go117_test.go3
-rw-r--r--go/callgraph/vta/vta_test.go28
-rw-r--r--go/cfg/builder.go3
-rw-r--r--go/cfg/cfg.go17
-rw-r--r--go/expect/expect.go13
-rw-r--r--go/expect/expect_test.go2
-rw-r--r--go/expect/testdata/go.fake.mod9
-rw-r--r--go/expect/testdata/go.mod5
-rw-r--r--go/gccgoexportdata/gccgoexportdata_test.go12
-rw-r--r--go/gcexportdata/example_test.go36
-rw-r--r--go/gcexportdata/gcexportdata.go81
-rw-r--r--go/gcexportdata/gcexportdata_test.go45
-rw-r--r--go/gcexportdata/importer.go2
-rw-r--r--go/gcexportdata/testdata/errors-ae16.abin5494 -> 0 bytes
-rw-r--r--go/internal/cgo/cgo.go1
-rw-r--r--go/internal/gccgoimporter/parser.go145
-rw-r--r--go/internal/gccgoimporter/testenv_test.go4
-rw-r--r--go/internal/gcimporter/bexport.go851
-rw-r--r--go/internal/gcimporter/bexport_test.go551
-rw-r--r--go/internal/gcimporter/gcimporter.go1084
-rw-r--r--go/internal/gcimporter/gcimporter_test.go611
-rw-r--r--go/internal/gcimporter/iexport.go1010
-rw-r--r--go/internal/gcimporter/iexport_go118_test.go254
-rw-r--r--go/internal/gcimporter/iexport_test.go405
-rw-r--r--go/internal/gcimporter/iimport.go898
-rw-r--r--go/internal/gcimporter/support_go118.go23
-rw-r--r--go/loader/doc.go42
-rw-r--r--go/loader/loader.go21
-rw-r--r--go/loader/stdlib_test.go2
-rw-r--r--go/loader/util.go1
-rw-r--r--go/packages/doc.go1
-rw-r--r--go/packages/golist.go108
-rw-r--r--go/packages/overlay_test.go7
-rw-r--r--go/packages/packages.go103
-rw-r--r--go/packages/packages_test.go75
-rw-r--r--go/packages/packagestest/expect.go140
-rw-r--r--go/packages/packagestest/expect_test.go3
-rw-r--r--go/packages/packagestest/export.go35
-rw-r--r--go/packages/packagestest/gopath.go41
-rw-r--r--go/packages/packagestest/modules.go29
-rw-r--r--go/packages/packagestest/modules_test.go2
-rw-r--r--go/pointer/analysis.go30
-rw-r--r--go/pointer/api.go16
-rw-r--r--go/pointer/callgraph.go1
-rw-r--r--go/pointer/doc.go637
-rw-r--r--go/pointer/example_test.go3
-rw-r--r--go/pointer/gen.go57
-rw-r--r--go/pointer/hvn.go30
-rw-r--r--go/pointer/intrinsics.go2
-rw-r--r--go/pointer/labels.go48
-rw-r--r--go/pointer/opt.go1
-rw-r--r--go/pointer/pointer_race_test.go12
-rw-r--r--go/pointer/pointer_test.go293
-rw-r--r--go/pointer/reflect.go12
-rw-r--r--go/pointer/solve.go4
-rw-r--r--go/pointer/stdlib_test.go2
-rw-r--r--go/pointer/testdata/typeparams.go68
-rw-r--r--go/pointer/util.go28
-rw-r--r--go/ssa/TODO16
-rw-r--r--go/ssa/block.go5
-rw-r--r--go/ssa/blockopt.go4
-rw-r--r--go/ssa/builder.go616
-rw-r--r--go/ssa/builder_generic_test.go679
-rw-r--r--go/ssa/builder_go117_test.go1
-rw-r--r--go/ssa/builder_go120_test.go102
-rw-r--r--go/ssa/builder_test.go517
-rw-r--r--go/ssa/const.go154
-rw-r--r--go/ssa/const_test.go104
-rw-r--r--go/ssa/coretype.go159
-rw-r--r--go/ssa/coretype_test.go105
-rw-r--r--go/ssa/create.go68
-rw-r--r--go/ssa/doc.go100
-rw-r--r--go/ssa/dom.go8
-rw-r--r--go/ssa/emit.go211
-rw-r--r--go/ssa/example_test.go6
-rw-r--r--go/ssa/func.go169
-rw-r--r--go/ssa/instantiate.go177
-rw-r--r--go/ssa/instantiate_test.go361
-rw-r--r--go/ssa/interp/interp.go65
-rw-r--r--go/ssa/interp/interp_go120_test.go12
-rw-r--r--go/ssa/interp/interp_test.go97
-rw-r--r--go/ssa/interp/map.go2
-rw-r--r--go/ssa/interp/ops.go87
-rw-r--r--go/ssa/interp/reflect.go6
-rw-r--r--go/ssa/interp/testdata/boundmeth.go3
-rw-r--r--go/ssa/interp/testdata/convert.go9
-rw-r--r--go/ssa/interp/testdata/deepequal.go93
-rw-r--r--go/ssa/interp/testdata/fixedbugs/issue52342.go17
-rw-r--r--go/ssa/interp/testdata/fixedbugs/issue52835.go27
-rw-r--r--go/ssa/interp/testdata/fixedbugs/issue55086.go132
-rw-r--r--go/ssa/interp/testdata/slice2array.go92
-rw-r--r--go/ssa/interp/testdata/slice2arrayptr.go2
-rw-r--r--go/ssa/interp/testdata/src/encoding/encoding.go15
-rw-r--r--go/ssa/interp/testdata/src/log/log.go8
-rw-r--r--go/ssa/interp/testdata/src/reflect/deepequal.go109
-rw-r--r--go/ssa/interp/testdata/src/reflect/reflect.go13
-rw-r--r--go/ssa/interp/testdata/typeassert.go32
-rw-r--r--go/ssa/interp/testdata/width32.go42
-rw-r--r--go/ssa/interp/testdata/zeros.go45
-rw-r--r--go/ssa/lift.go13
-rw-r--r--go/ssa/lvalue.go49
-rw-r--r--go/ssa/methods.go111
-rw-r--r--go/ssa/methods_test.go96
-rw-r--r--go/ssa/mode.go12
-rw-r--r--go/ssa/parameterized.go12
-rw-r--r--go/ssa/print.go32
-rw-r--r--go/ssa/sanity.go31
-rw-r--r--go/ssa/source.go50
-rw-r--r--go/ssa/source_test.go7
-rw-r--r--go/ssa/ssa.go397
-rw-r--r--go/ssa/ssautil/load.go8
-rw-r--r--go/ssa/ssautil/load_test.go120
-rw-r--r--go/ssa/ssautil/switch.go4
-rw-r--r--go/ssa/ssautil/switch_test.go2
-rw-r--r--go/ssa/ssautil/visit.go1
-rw-r--r--go/ssa/stdlib_test.go19
-rw-r--r--go/ssa/subst.go113
-rw-r--r--go/ssa/subst_test.go6
-rw-r--r--go/ssa/testdata/src/README.txt5
-rw-r--r--go/ssa/testdata/src/bytes/bytes.go3
-rw-r--r--go/ssa/testdata/src/context/context.go7
-rw-r--r--go/ssa/testdata/src/encoding/encoding.go9
-rw-r--r--go/ssa/testdata/src/encoding/json/json.go4
-rw-r--r--go/ssa/testdata/src/encoding/xml/xml.go4
-rw-r--r--go/ssa/testdata/src/errors/errors.go3
-rw-r--r--go/ssa/testdata/src/fmt/fmt.go11
-rw-r--r--go/ssa/testdata/src/io/io.go5
-rw-r--r--go/ssa/testdata/src/log/log.go5
-rw-r--r--go/ssa/testdata/src/math/math.go15
-rw-r--r--go/ssa/testdata/src/os/os.go5
-rw-r--r--go/ssa/testdata/src/reflect/reflect.go40
-rw-r--r--go/ssa/testdata/src/runtime/runtime.go5
-rw-r--r--go/ssa/testdata/src/sort/sort.go13
-rw-r--r--go/ssa/testdata/src/strconv/strconv.go6
-rw-r--r--go/ssa/testdata/src/strings/strings.go13
-rw-r--r--go/ssa/testdata/src/sync/atomic/atomic.go5
-rw-r--r--go/ssa/testdata/src/sync/sync.go12
-rw-r--r--go/ssa/testdata/src/time/time.go24
-rw-r--r--go/ssa/testdata/src/unsafe/unsafe.go4
-rw-r--r--go/ssa/testdata/valueforexpr.go1
-rw-r--r--go/ssa/util.go258
-rw-r--r--go/ssa/wrappers.go196
-rw-r--r--go/types/objectpath/objectpath.go261
-rw-r--r--go/types/objectpath/objectpath_test.go23
-rw-r--r--go/types/typeutil/imports.go1
-rw-r--r--go/types/typeutil/map.go84
-rw-r--r--go/types/typeutil/map_test.go25
-rw-r--r--go/types/typeutil/methodsetcache.go1
-rw-r--r--go/types/typeutil/ui.go1
-rw-r--r--go/vcs/vcs.go1
-rw-r--r--godoc/analysis/analysis.go3
-rw-r--r--godoc/dirtrees.go4
-rw-r--r--godoc/format.go23
-rw-r--r--godoc/godoc.go21
-rw-r--r--godoc/index.go8
-rw-r--r--godoc/linkify.go3
-rw-r--r--godoc/meta.go7
-rw-r--r--godoc/redirect/hash.go138
-rw-r--r--godoc/redirect/redirect.go272
-rw-r--r--godoc/redirect/redirect_test.go51
-rw-r--r--godoc/redirect/rietveld.go1093
-rw-r--r--godoc/server.go4
-rw-r--r--godoc/spot.go5
-rw-r--r--godoc/static/package.html22
-rw-r--r--godoc/static/searchdoc.html2
-rw-r--r--godoc/static/static.go4
-rw-r--r--godoc/tohtml_go119.go17
-rw-r--r--godoc/tohtml_other.go17
-rw-r--r--godoc/util/throttle.go3
-rw-r--r--godoc/vfs/namespace.go2
-rw-r--r--godoc/vfs/zipfs/zipfs.go16
-rw-r--r--gopls/README.md80
-rw-r--r--gopls/api-diff/api_diff.go263
-rw-r--r--gopls/doc/advanced.md38
-rw-r--r--gopls/doc/analyzers.md125
-rw-r--r--gopls/doc/commands.md110
-rw-r--r--gopls/doc/contributing.md4
-rw-r--r--gopls/doc/design/implementation.md2
-rw-r--r--gopls/doc/design/integrating.md8
-rw-r--r--gopls/doc/generate.go94
-rw-r--r--gopls/doc/generate_test.go8
-rw-r--r--gopls/doc/inlayHints.md80
-rw-r--r--gopls/doc/releases.md25
-rw-r--r--gopls/doc/semantictokens.md2
-rw-r--r--gopls/doc/settings.md141
-rw-r--r--gopls/doc/vim.md51
-rw-r--r--gopls/doc/workspace.md16
-rw-r--r--gopls/go.mod26
-rw-r--r--gopls/go.sum79
-rw-r--r--gopls/internal/coverage/coverage.go21
-rw-r--r--gopls/internal/govulncheck/semver/semver.go51
-rw-r--r--gopls/internal/govulncheck/semver/semver_test.go28
-rw-r--r--gopls/internal/govulncheck/types.go37
-rw-r--r--gopls/internal/govulncheck/types_118.go43
-rw-r--r--gopls/internal/govulncheck/types_not118.go126
-rw-r--r--gopls/internal/govulncheck/util.go36
-rw-r--r--gopls/internal/govulncheck/vulncache.go105
-rw-r--r--gopls/internal/hooks/analysis.go60
-rw-r--r--gopls/internal/hooks/analysis_115.go12
-rw-r--r--gopls/internal/hooks/analysis_116.go14
-rw-r--r--gopls/internal/hooks/analysis_119.go62
-rw-r--r--gopls/internal/hooks/diff.go148
-rw-r--r--gopls/internal/hooks/diff_test.go25
-rwxr-xr-xgopls/internal/hooks/gen-licenses.sh2
-rw-r--r--gopls/internal/hooks/gofumpt_117.go13
-rw-r--r--gopls/internal/hooks/gofumpt_118.go24
-rw-r--r--gopls/internal/hooks/hooks.go25
-rw-r--r--gopls/internal/hooks/licenses_test.go7
-rw-r--r--gopls/internal/lsp/README.md (renamed from internal/lsp/README.md)0
-rw-r--r--gopls/internal/lsp/analysis/embeddirective/embeddirective.go58
-rw-r--r--gopls/internal/lsp/analysis/embeddirective/embeddirective_test.go22
-rw-r--r--gopls/internal/lsp/analysis/embeddirective/testdata/src/a/a.go13
-rw-r--r--gopls/internal/lsp/analysis/embeddirective/testdata/src/a/b.go14
-rw-r--r--gopls/internal/lsp/analysis/embeddirective/testdata/src/a/embedText1
-rw-r--r--gopls/internal/lsp/analysis/fillreturns/fillreturns.go279
-rw-r--r--gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go22
-rw-r--r--gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go (renamed from internal/lsp/analysis/fillreturns/testdata/src/a/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden (renamed from internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go (renamed from internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden (renamed from internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/fillstruct/fillstruct.go506
-rw-r--r--gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go22
-rw-r--r--gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go113
-rw-r--r--gopls/internal/lsp/analysis/fillstruct/testdata/src/b/b.go (renamed from internal/lsp/analysis/fillstruct/testdata/src/b/b.go)0
-rw-r--r--gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go50
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/infertypeargs.go (renamed from internal/lsp/analysis/infertypeargs/infertypeargs.go)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go21
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/run_go117.go (renamed from internal/lsp/analysis/infertypeargs/run_go117.go)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/run_go118.go (renamed from internal/lsp/analysis/infertypeargs/run_go118.go)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go (renamed from internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden (renamed from internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go (renamed from internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden (renamed from internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go (renamed from internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go (renamed from internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go)0
-rw-r--r--gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden (renamed from internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/nonewvars/nonewvars.go95
-rw-r--r--gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go22
-rw-r--r--gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go (renamed from internal/lsp/analysis/nonewvars/testdata/src/a/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden (renamed from internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go (renamed from internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden (renamed from internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go92
-rw-r--r--gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go22
-rw-r--r--gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go (renamed from internal/lsp/analysis/noresultvalues/testdata/src/a/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden (renamed from internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go (renamed from internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden (renamed from internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go (renamed from internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go)0
-rw-r--r--gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go17
-rw-r--r--gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go (renamed from internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden (renamed from internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/simplifyrange/simplifyrange.go (renamed from internal/lsp/analysis/simplifyrange/simplifyrange.go)0
-rw-r--r--gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go17
-rw-r--r--gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go (renamed from internal/lsp/analysis/simplifyrange/testdata/src/a/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden (renamed from internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/simplifyslice/simplifyslice.go (renamed from internal/lsp/analysis/simplifyslice/simplifyslice.go)0
-rw-r--r--gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go22
-rw-r--r--gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go (renamed from internal/lsp/analysis/simplifyslice/testdata/src/a/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden (renamed from internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go (renamed from internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go)0
-rw-r--r--gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden (renamed from internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/stubmethods/stubmethods.go418
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go28
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go13
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go10
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go10
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go11
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go11
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go10
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go9
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go13
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go11
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/undeclared.go347
-rw-r--r--gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go17
-rw-r--r--gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go (renamed from internal/lsp/analysis/unusedparams/testdata/src/a/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden (renamed from internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go (renamed from internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go)0
-rw-r--r--gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden (renamed from internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/unusedparams/unusedparams.go (renamed from internal/lsp/analysis/unusedparams/unusedparams.go)0
-rw-r--r--gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go22
-rw-r--r--gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go74
-rw-r--r--gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden59
-rw-r--r--gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go30
-rw-r--r--gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden24
-rw-r--r--gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go300
-rw-r--r--gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go24
-rw-r--r--gopls/internal/lsp/analysis/useany/testdata/src/a/a.go (renamed from internal/lsp/analysis/useany/testdata/src/a/a.go)0
-rw-r--r--gopls/internal/lsp/analysis/useany/testdata/src/a/a.go.golden (renamed from internal/lsp/analysis/useany/testdata/src/a/a.go.golden)0
-rw-r--r--gopls/internal/lsp/analysis/useany/useany.go (renamed from internal/lsp/analysis/useany/useany.go)0
-rw-r--r--gopls/internal/lsp/analysis/useany/useany_test.go21
-rw-r--r--gopls/internal/lsp/browser/README.md (renamed from internal/lsp/browser/README.md)0
-rw-r--r--gopls/internal/lsp/browser/browser.go (renamed from internal/lsp/browser/browser.go)0
-rw-r--r--gopls/internal/lsp/cache/analysis.go1247
-rw-r--r--gopls/internal/lsp/cache/cache.go78
-rw-r--r--gopls/internal/lsp/cache/check.go1227
-rw-r--r--gopls/internal/lsp/cache/debug.go55
-rw-r--r--gopls/internal/lsp/cache/errors.go528
-rw-r--r--gopls/internal/lsp/cache/errors_test.go (renamed from internal/lsp/cache/error_test.go)0
-rw-r--r--gopls/internal/lsp/cache/fs_memoized.go149
-rw-r--r--gopls/internal/lsp/cache/fs_overlay.go78
-rw-r--r--gopls/internal/lsp/cache/graph.go131
-rw-r--r--gopls/internal/lsp/cache/imports.go188
-rw-r--r--gopls/internal/lsp/cache/keys.go (renamed from internal/lsp/cache/keys.go)0
-rw-r--r--gopls/internal/lsp/cache/load.go782
-rw-r--r--gopls/internal/lsp/cache/maps.go121
-rw-r--r--gopls/internal/lsp/cache/mod.go522
-rw-r--r--gopls/internal/lsp/cache/mod_tidy.go469
-rw-r--r--gopls/internal/lsp/cache/mod_vuln.go75
-rw-r--r--gopls/internal/lsp/cache/os_darwin.go (renamed from internal/lsp/cache/os_darwin.go)0
-rw-r--r--gopls/internal/lsp/cache/os_windows.go56
-rw-r--r--gopls/internal/lsp/cache/parse.go900
-rw-r--r--gopls/internal/lsp/cache/parse_cache.go298
-rw-r--r--gopls/internal/lsp/cache/parse_cache_test.go142
-rw-r--r--gopls/internal/lsp/cache/parsemode_go116.go11
-rw-r--r--gopls/internal/lsp/cache/parsemode_go117.go12
-rw-r--r--gopls/internal/lsp/cache/pkg.go165
-rw-r--r--gopls/internal/lsp/cache/session.go730
-rw-r--r--gopls/internal/lsp/cache/snapshot.go2214
-rw-r--r--gopls/internal/lsp/cache/standalone_go115.go14
-rw-r--r--gopls/internal/lsp/cache/standalone_go116.go50
-rw-r--r--gopls/internal/lsp/cache/standalone_go116_test.go96
-rw-r--r--gopls/internal/lsp/cache/symbols.go213
-rw-r--r--gopls/internal/lsp/cache/view.go1142
-rw-r--r--gopls/internal/lsp/cache/view_test.go278
-rw-r--r--gopls/internal/lsp/cache/workspace.go177
-rw-r--r--gopls/internal/lsp/call_hierarchy.go42
-rw-r--r--gopls/internal/lsp/cmd/call_hierarchy.go142
-rw-r--r--gopls/internal/lsp/cmd/capabilities_test.go166
-rw-r--r--gopls/internal/lsp/cmd/check.go73
-rw-r--r--gopls/internal/lsp/cmd/cmd.go640
-rw-r--r--gopls/internal/lsp/cmd/definition.go132
-rw-r--r--gopls/internal/lsp/cmd/folding_range.go73
-rw-r--r--gopls/internal/lsp/cmd/format.go110
-rw-r--r--gopls/internal/lsp/cmd/help_test.go58
-rw-r--r--gopls/internal/lsp/cmd/highlight.go82
-rw-r--r--gopls/internal/lsp/cmd/implementation.go84
-rw-r--r--gopls/internal/lsp/cmd/imports.go105
-rw-r--r--gopls/internal/lsp/cmd/info.go246
-rw-r--r--gopls/internal/lsp/cmd/links.go77
-rw-r--r--gopls/internal/lsp/cmd/prepare_rename.go80
-rw-r--r--gopls/internal/lsp/cmd/references.go89
-rw-r--r--gopls/internal/lsp/cmd/remote.go164
-rw-r--r--gopls/internal/lsp/cmd/rename.go130
-rw-r--r--gopls/internal/lsp/cmd/semantictokens.go225
-rw-r--r--gopls/internal/lsp/cmd/serve.go130
-rw-r--r--gopls/internal/lsp/cmd/signature.go88
-rw-r--r--gopls/internal/lsp/cmd/subcommands.go59
-rw-r--r--gopls/internal/lsp/cmd/suggested_fix.go167
-rw-r--r--gopls/internal/lsp/cmd/symbols.go116
-rw-r--r--gopls/internal/lsp/cmd/test/cmdtest.go6
-rw-r--r--gopls/internal/lsp/cmd/test/integration_test.go898
-rw-r--r--gopls/internal/lsp/cmd/usage/api-json.hlp (renamed from internal/lsp/cmd/usage/api-json.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/bug.hlp (renamed from internal/lsp/cmd/usage/bug.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/call_hierarchy.hlp (renamed from internal/lsp/cmd/usage/call_hierarchy.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/check.hlp (renamed from internal/lsp/cmd/usage/check.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/definition.hlp (renamed from internal/lsp/cmd/usage/definition.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/fix.hlp (renamed from internal/lsp/cmd/usage/fix.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/folding_ranges.hlp (renamed from internal/lsp/cmd/usage/folding_ranges.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/format.hlp (renamed from internal/lsp/cmd/usage/format.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/help.hlp10
-rw-r--r--gopls/internal/lsp/cmd/usage/highlight.hlp (renamed from internal/lsp/cmd/usage/highlight.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/implementation.hlp (renamed from internal/lsp/cmd/usage/implementation.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/imports.hlp (renamed from internal/lsp/cmd/usage/imports.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/inspect.hlp (renamed from internal/lsp/cmd/usage/inspect.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/licenses.hlp (renamed from internal/lsp/cmd/usage/licenses.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/links.hlp (renamed from internal/lsp/cmd/usage/links.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/prepare_rename.hlp (renamed from internal/lsp/cmd/usage/prepare_rename.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/references.hlp (renamed from internal/lsp/cmd/usage/references.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/remote.hlp (renamed from internal/lsp/cmd/usage/remote.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/rename.hlp (renamed from internal/lsp/cmd/usage/rename.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/semtok.hlp (renamed from internal/lsp/cmd/usage/semtok.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/serve.hlp (renamed from internal/lsp/cmd/usage/serve.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/signature.hlp (renamed from internal/lsp/cmd/usage/signature.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/symbols.hlp (renamed from internal/lsp/cmd/usage/symbols.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/usage.hlp77
-rw-r--r--gopls/internal/lsp/cmd/usage/version.hlp (renamed from internal/lsp/cmd/usage/version.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/usage/vulncheck.hlp17
-rw-r--r--gopls/internal/lsp/cmd/usage/workspace_symbol.hlp (renamed from internal/lsp/cmd/usage/workspace_symbol.hlp)0
-rw-r--r--gopls/internal/lsp/cmd/vulncheck.go84
-rw-r--r--gopls/internal/lsp/cmd/workspace_symbol.go85
-rw-r--r--gopls/internal/lsp/code_action.go481
-rw-r--r--gopls/internal/lsp/code_lens.go57
-rw-r--r--gopls/internal/lsp/command.go964
-rw-r--r--gopls/internal/lsp/command/command_gen.go509
-rw-r--r--gopls/internal/lsp/command/commandmeta/meta.go259
-rw-r--r--gopls/internal/lsp/command/gen/gen.go155
-rw-r--r--gopls/internal/lsp/command/generate.go25
-rw-r--r--gopls/internal/lsp/command/interface.go410
-rw-r--r--gopls/internal/lsp/command/interface_test.go31
-rw-r--r--gopls/internal/lsp/command/util.go63
-rw-r--r--gopls/internal/lsp/completion.go140
-rw-r--r--gopls/internal/lsp/completion_test.go176
-rw-r--r--gopls/internal/lsp/debounce.go (renamed from internal/lsp/debounce.go)0
-rw-r--r--gopls/internal/lsp/debounce_test.go (renamed from internal/lsp/debounce_test.go)0
-rw-r--r--gopls/internal/lsp/debug/buildinfo_go1.12.go (renamed from internal/lsp/debug/buildinfo_go1.12.go)0
-rw-r--r--gopls/internal/lsp/debug/buildinfo_go1.18.go (renamed from internal/lsp/debug/buildinfo_go1.18.go)0
-rw-r--r--gopls/internal/lsp/debug/info.go254
-rw-r--r--gopls/internal/lsp/debug/info_test.go (renamed from internal/lsp/debug/info_test.go)0
-rw-r--r--gopls/internal/lsp/debug/log/log.go43
-rw-r--r--gopls/internal/lsp/debug/metrics.go58
-rw-r--r--gopls/internal/lsp/debug/rpc.go239
-rw-r--r--gopls/internal/lsp/debug/serve.go909
-rw-r--r--gopls/internal/lsp/debug/trace.go233
-rw-r--r--gopls/internal/lsp/definition.go52
-rw-r--r--gopls/internal/lsp/diagnostics.go764
-rw-r--r--gopls/internal/lsp/fake/client.go187
-rw-r--r--gopls/internal/lsp/fake/doc.go (renamed from internal/lsp/fake/doc.go)0
-rw-r--r--gopls/internal/lsp/fake/edit.go51
-rw-r--r--gopls/internal/lsp/fake/edit_test.go96
-rw-r--r--gopls/internal/lsp/fake/editor.go1464
-rw-r--r--gopls/internal/lsp/fake/editor_test.go61
-rw-r--r--gopls/internal/lsp/fake/proxy.go (renamed from internal/lsp/fake/proxy.go)0
-rw-r--r--gopls/internal/lsp/fake/sandbox.go299
-rw-r--r--gopls/internal/lsp/fake/workdir.go438
-rw-r--r--gopls/internal/lsp/fake/workdir_test.go220
-rw-r--r--gopls/internal/lsp/fake/workdir_windows.go21
-rw-r--r--gopls/internal/lsp/filecache/filecache.go369
-rw-r--r--gopls/internal/lsp/filecache/filecache_test.go215
-rw-r--r--gopls/internal/lsp/folding_range.go41
-rw-r--r--gopls/internal/lsp/format.go31
-rw-r--r--gopls/internal/lsp/general.go619
-rw-r--r--gopls/internal/lsp/general_test.go44
-rw-r--r--gopls/internal/lsp/glob/glob.go349
-rw-r--r--gopls/internal/lsp/glob/glob_test.go118
-rw-r--r--gopls/internal/lsp/helper/README.md35
-rw-r--r--gopls/internal/lsp/helper/helper.go264
-rw-r--r--gopls/internal/lsp/highlight.go45
-rw-r--r--gopls/internal/lsp/hover.go34
-rw-r--r--gopls/internal/lsp/implementation.go21
-rw-r--r--gopls/internal/lsp/inlay_hint.go21
-rw-r--r--gopls/internal/lsp/link.go278
-rw-r--r--gopls/internal/lsp/lsp_test.go1360
-rw-r--r--gopls/internal/lsp/lsprpc/autostart_default.go39
-rw-r--r--gopls/internal/lsp/lsprpc/autostart_posix.go97
-rw-r--r--gopls/internal/lsp/lsprpc/binder.go148
-rw-r--r--gopls/internal/lsp/lsprpc/binder_test.go147
-rw-r--r--gopls/internal/lsp/lsprpc/commandinterceptor.go44
-rw-r--r--gopls/internal/lsp/lsprpc/commandinterceptor_test.go42
-rw-r--r--gopls/internal/lsp/lsprpc/dialer.go114
-rw-r--r--gopls/internal/lsp/lsprpc/goenv.go96
-rw-r--r--gopls/internal/lsp/lsprpc/goenv_test.go65
-rw-r--r--gopls/internal/lsp/lsprpc/lsprpc.go543
-rw-r--r--gopls/internal/lsp/lsprpc/lsprpc_test.go345
-rw-r--r--gopls/internal/lsp/lsprpc/middleware.go142
-rw-r--r--gopls/internal/lsp/lsprpc/middleware_test.go93
-rw-r--r--gopls/internal/lsp/mod/code_lens.go191
-rw-r--r--gopls/internal/lsp/mod/diagnostics.go561
-rw-r--r--gopls/internal/lsp/mod/format.go30
-rw-r--r--gopls/internal/lsp/mod/hover.go358
-rw-r--r--gopls/internal/lsp/mod/mod_test.go57
-rw-r--r--gopls/internal/lsp/mod/testdata/unchanged/go.mod (renamed from internal/lsp/mod/testdata/unchanged/go.mod)0
-rw-r--r--gopls/internal/lsp/mod/testdata/unchanged/main.go (renamed from internal/lsp/mod/testdata/unchanged/main.go)0
-rw-r--r--gopls/internal/lsp/progress/progress.go271
-rw-r--r--gopls/internal/lsp/progress/progress_test.go161
-rw-r--r--gopls/internal/lsp/protocol/codeactionkind.go (renamed from internal/lsp/protocol/codeactionkind.go)0
-rw-r--r--gopls/internal/lsp/protocol/context.go (renamed from internal/lsp/protocol/context.go)0
-rw-r--r--gopls/internal/lsp/protocol/doc.go18
-rw-r--r--gopls/internal/lsp/protocol/enums.go231
-rw-r--r--gopls/internal/lsp/protocol/generate/README.md136
-rw-r--r--gopls/internal/lsp/protocol/generate/generate.go121
-rw-r--r--gopls/internal/lsp/protocol/generate/main.go387
-rw-r--r--gopls/internal/lsp/protocol/generate/main_test.go118
-rw-r--r--gopls/internal/lsp/protocol/generate/output.go420
-rw-r--r--gopls/internal/lsp/protocol/generate/tables.go327
-rw-r--r--gopls/internal/lsp/protocol/generate/typenames.go184
-rw-r--r--gopls/internal/lsp/protocol/generate/types.go170
-rw-r--r--gopls/internal/lsp/protocol/log.go (renamed from internal/lsp/protocol/log.go)0
-rw-r--r--gopls/internal/lsp/protocol/mapper.go529
-rw-r--r--gopls/internal/lsp/protocol/mapper_test.go441
-rw-r--r--gopls/internal/lsp/protocol/protocol.go284
-rw-r--r--gopls/internal/lsp/protocol/span.go118
-rw-r--r--gopls/internal/lsp/protocol/tsclient.go249
-rw-r--r--gopls/internal/lsp/protocol/tsdocument_changes.go42
-rw-r--r--gopls/internal/lsp/protocol/tsjson.go1997
-rw-r--r--gopls/internal/lsp/protocol/tsprotocol.go5450
-rw-r--r--gopls/internal/lsp/protocol/tsserver.go1160
-rw-r--r--gopls/internal/lsp/references.go25
-rw-r--r--gopls/internal/lsp/regtest/doc.go157
-rw-r--r--gopls/internal/lsp/regtest/env.go391
-rw-r--r--gopls/internal/lsp/regtest/env_test.go66
-rw-r--r--gopls/internal/lsp/regtest/expectation.go769
-rw-r--r--gopls/internal/lsp/regtest/marker.go1273
-rw-r--r--gopls/internal/lsp/regtest/options.go105
-rw-r--r--gopls/internal/lsp/regtest/regtest.go153
-rw-r--r--gopls/internal/lsp/regtest/runner.go437
-rw-r--r--gopls/internal/lsp/regtest/wrappers.go489
-rw-r--r--gopls/internal/lsp/rename.go78
-rwxr-xr-xgopls/internal/lsp/reset_golden.sh30
-rw-r--r--gopls/internal/lsp/safetoken/safetoken.go122
-rw-r--r--gopls/internal/lsp/safetoken/safetoken_test.go121
-rw-r--r--gopls/internal/lsp/selection_range.go69
-rw-r--r--gopls/internal/lsp/semantic.go1003
-rw-r--r--gopls/internal/lsp/server.go158
-rw-r--r--gopls/internal/lsp/server_gen.go301
-rw-r--r--gopls/internal/lsp/signature_help.go31
-rw-r--r--gopls/internal/lsp/snippet/snippet_builder.go111
-rw-r--r--gopls/internal/lsp/snippet/snippet_builder_test.go (renamed from internal/lsp/snippet/snippet_builder_test.go)0
-rw-r--r--gopls/internal/lsp/source/add_import.go26
-rwxr-xr-xgopls/internal/lsp/source/api_json.go1118
-rw-r--r--gopls/internal/lsp/source/call_hierarchy.go311
-rw-r--r--gopls/internal/lsp/source/code_lens.go248
-rw-r--r--gopls/internal/lsp/source/comment.go384
-rw-r--r--gopls/internal/lsp/source/comment_go118_test.go371
-rw-r--r--gopls/internal/lsp/source/comment_go119.go56
-rw-r--r--gopls/internal/lsp/source/completion/builtin.go (renamed from internal/lsp/source/completion/builtin.go)0
-rw-r--r--gopls/internal/lsp/source/completion/completion.go3252
-rw-r--r--gopls/internal/lsp/source/completion/deep_completion.go362
-rw-r--r--gopls/internal/lsp/source/completion/deep_completion_test.go (renamed from internal/lsp/source/completion/deep_completion_test.go)0
-rw-r--r--gopls/internal/lsp/source/completion/definition.go160
-rw-r--r--gopls/internal/lsp/source/completion/format.go338
-rw-r--r--gopls/internal/lsp/source/completion/fuzz.go142
-rw-r--r--gopls/internal/lsp/source/completion/keywords.go154
-rw-r--r--gopls/internal/lsp/source/completion/labels.go (renamed from internal/lsp/source/completion/labels.go)0
-rw-r--r--gopls/internal/lsp/source/completion/literal.go592
-rw-r--r--gopls/internal/lsp/source/completion/package.go351
-rw-r--r--gopls/internal/lsp/source/completion/package_test.go81
-rw-r--r--gopls/internal/lsp/source/completion/postfix_snippets.go471
-rw-r--r--gopls/internal/lsp/source/completion/printf.go172
-rw-r--r--gopls/internal/lsp/source/completion/printf_test.go (renamed from internal/lsp/source/completion/printf_test.go)0
-rw-r--r--gopls/internal/lsp/source/completion/snippet.go116
-rw-r--r--gopls/internal/lsp/source/completion/statements.go361
-rw-r--r--gopls/internal/lsp/source/completion/util.go344
-rw-r--r--gopls/internal/lsp/source/completion/util_test.go (renamed from internal/lsp/source/completion/util_test.go)0
-rw-r--r--gopls/internal/lsp/source/definition.go229
-rw-r--r--gopls/internal/lsp/source/diagnostics.go138
-rw-r--r--gopls/internal/lsp/source/extract.go1331
-rw-r--r--gopls/internal/lsp/source/fix.go138
-rw-r--r--gopls/internal/lsp/source/folding_range.go193
-rw-r--r--gopls/internal/lsp/source/format.go391
-rw-r--r--gopls/internal/lsp/source/format_test.go75
-rw-r--r--gopls/internal/lsp/source/gc_annotations.go221
-rw-r--r--gopls/internal/lsp/source/highlight.go484
-rw-r--r--gopls/internal/lsp/source/hover.go951
-rw-r--r--gopls/internal/lsp/source/identifier.go174
-rw-r--r--gopls/internal/lsp/source/identifier_test.go103
-rw-r--r--gopls/internal/lsp/source/implementation.go482
-rw-r--r--gopls/internal/lsp/source/inlay_hint.go394
-rw-r--r--gopls/internal/lsp/source/known_packages.go140
-rw-r--r--gopls/internal/lsp/source/linkname.go136
-rw-r--r--gopls/internal/lsp/source/methodsets/methodsets.go508
-rw-r--r--gopls/internal/lsp/source/options.go1631
-rw-r--r--gopls/internal/lsp/source/options_test.go206
-rw-r--r--gopls/internal/lsp/source/references.go582
-rw-r--r--gopls/internal/lsp/source/rename.go1244
-rw-r--r--gopls/internal/lsp/source/rename_check.go921
-rw-r--r--gopls/internal/lsp/source/signature_help.go185
-rw-r--r--gopls/internal/lsp/source/stub.go238
-rw-r--r--gopls/internal/lsp/source/symbols.go227
-rw-r--r--gopls/internal/lsp/source/type_definition.go55
-rw-r--r--gopls/internal/lsp/source/types_format.go517
-rw-r--r--gopls/internal/lsp/source/util.go555
-rw-r--r--gopls/internal/lsp/source/view.go857
-rw-r--r--gopls/internal/lsp/source/workspace_symbol.go632
-rw-r--r--gopls/internal/lsp/source/workspace_symbol_test.go136
-rw-r--r--gopls/internal/lsp/source/xrefs/xrefs.go216
-rw-r--r--gopls/internal/lsp/symbols.go60
-rw-r--r--gopls/internal/lsp/template/completion.go287
-rw-r--r--gopls/internal/lsp/template/completion_test.go102
-rw-r--r--gopls/internal/lsp/template/highlight.go96
-rw-r--r--gopls/internal/lsp/template/implementations.go189
-rw-r--r--gopls/internal/lsp/template/parse.go508
-rw-r--r--gopls/internal/lsp/template/parse_test.go (renamed from internal/lsp/template/parse_test.go)0
-rw-r--r--gopls/internal/lsp/template/symbols.go230
-rw-r--r--gopls/internal/lsp/testdata/%percent/perc%ent.go (renamed from internal/lsp/testdata/%percent/perc%ent.go)0
-rw-r--r--gopls/internal/lsp/testdata/addimport/addimport.go.golden (renamed from internal/lsp/testdata/addimport/addimport.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/addimport/addimport.go.in (renamed from internal/lsp/testdata/addimport/addimport.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/address/address.go (renamed from internal/lsp/testdata/address/address.go)0
-rw-r--r--gopls/internal/lsp/testdata/analyzer/bad_test.go24
-rw-r--r--gopls/internal/lsp/testdata/anon/anon.go.in (renamed from internal/lsp/testdata/anon/anon.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/append/append.go (renamed from internal/lsp/testdata/append/append.go)0
-rw-r--r--gopls/internal/lsp/testdata/append/append2.go.in (renamed from internal/lsp/testdata/append/append2.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/arraytype/array_type.go.in50
-rw-r--r--gopls/internal/lsp/testdata/assign/assign.go.in26
-rw-r--r--gopls/internal/lsp/testdata/assign/internal/secret/secret.go (renamed from internal/lsp/testdata/assign/internal/secret/secret.go)0
-rw-r--r--gopls/internal/lsp/testdata/bad/bad0.go24
-rw-r--r--gopls/internal/lsp/testdata/bad/bad1.go34
-rw-r--r--gopls/internal/lsp/testdata/badstmt/badstmt.go.in29
-rw-r--r--gopls/internal/lsp/testdata/badstmt/badstmt_2.go.in9
-rw-r--r--gopls/internal/lsp/testdata/badstmt/badstmt_3.go.in9
-rw-r--r--gopls/internal/lsp/testdata/badstmt/badstmt_4.go.in11
-rw-r--r--gopls/internal/lsp/testdata/bar/bar.go.in47
-rw-r--r--gopls/internal/lsp/testdata/basiclit/basiclit.go13
-rw-r--r--gopls/internal/lsp/testdata/baz/baz.go.in33
-rw-r--r--gopls/internal/lsp/testdata/builtins/builtin_args.go (renamed from internal/lsp/testdata/builtins/builtin_args.go)0
-rw-r--r--gopls/internal/lsp/testdata/builtins/builtin_go117.go8
-rw-r--r--gopls/internal/lsp/testdata/builtins/builtin_go118.go8
-rw-r--r--gopls/internal/lsp/testdata/builtins/builtin_go121.go8
-rw-r--r--gopls/internal/lsp/testdata/builtins/builtin_types.go (renamed from internal/lsp/testdata/builtins/builtin_types.go)0
-rw-r--r--gopls/internal/lsp/testdata/builtins/builtins.go47
-rw-r--r--gopls/internal/lsp/testdata/builtins/constants.go (renamed from internal/lsp/testdata/builtins/constants.go)0
-rw-r--r--gopls/internal/lsp/testdata/callhierarchy/callhierarchy.go70
-rw-r--r--gopls/internal/lsp/testdata/callhierarchy/incoming/incoming.go12
-rw-r--r--gopls/internal/lsp/testdata/callhierarchy/outgoing/outgoing.go (renamed from internal/lsp/testdata/callhierarchy/outgoing/outgoing.go)0
-rw-r--r--gopls/internal/lsp/testdata/casesensitive/casesensitive.go (renamed from internal/lsp/testdata/casesensitive/casesensitive.go)0
-rw-r--r--gopls/internal/lsp/testdata/cast/cast.go.in (renamed from internal/lsp/testdata/cast/cast.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/cgo/declarecgo.go (renamed from internal/lsp/testdata/cgo/declarecgo.go)0
-rw-r--r--gopls/internal/lsp/testdata/cgo/declarecgo.go.golden30
-rw-r--r--gopls/internal/lsp/testdata/cgo/declarecgo_nocgo.go (renamed from internal/lsp/testdata/cgo/declarecgo_nocgo.go)0
-rw-r--r--gopls/internal/lsp/testdata/cgoimport/usecgo.go.golden30
-rw-r--r--gopls/internal/lsp/testdata/cgoimport/usecgo.go.in9
-rw-r--r--gopls/internal/lsp/testdata/channel/channel.go (renamed from internal/lsp/testdata/channel/channel.go)0
-rw-r--r--gopls/internal/lsp/testdata/codelens/codelens_test.go (renamed from internal/lsp/testdata/codelens/codelens_test.go)0
-rw-r--r--gopls/internal/lsp/testdata/comment_completion/comment_completion.go.in (renamed from internal/lsp/testdata/comment_completion/comment_completion.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/complit/complit.go.in (renamed from internal/lsp/testdata/complit/complit.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/constant/constant.go (renamed from internal/lsp/testdata/constant/constant.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_for.go (renamed from internal/lsp/testdata/danglingstmt/dangling_for.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_for_init.go (renamed from internal/lsp/testdata/danglingstmt/dangling_for_init.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go (renamed from internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go (renamed from internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_if.go (renamed from internal/lsp/testdata/danglingstmt/dangling_if.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_if_eof.go (renamed from internal/lsp/testdata/danglingstmt/dangling_if_eof.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_if_init.go (renamed from internal/lsp/testdata/danglingstmt/dangling_if_init.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go (renamed from internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_multiline_if.go (renamed from internal/lsp/testdata/danglingstmt/dangling_multiline_if.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_selector_1.go (renamed from internal/lsp/testdata/danglingstmt/dangling_selector_1.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_selector_2.go8
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init.go (renamed from internal/lsp/testdata/danglingstmt/dangling_switch_init.go)0
-rw-r--r--gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go (renamed from internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go)0
-rw-r--r--gopls/internal/lsp/testdata/deep/deep.go142
-rw-r--r--gopls/internal/lsp/testdata/errors/errors.go10
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go (renamed from internal/lsp/testdata/extract/extract_function/extract_args_returns.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go (renamed from internal/lsp/testdata/extract/extract_function/extract_basic.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_basic.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go12
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden57
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go (renamed from internal/lsp/testdata/extract/extract_function/extract_issue_44813.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go (renamed from internal/lsp/testdata/extract/extract_function/extract_redefine.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go (renamed from internal/lsp/testdata/extract/extract_function/extract_return_basic.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go (renamed from internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go (renamed from internal/lsp/testdata/extract/extract_function/extract_return_complex.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go (renamed from internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go (renamed from internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go (renamed from internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go (renamed from internal/lsp/testdata/extract/extract_function/extract_return_init.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go (renamed from internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go (renamed from internal/lsp/testdata/extract/extract_function/extract_scope.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_scope.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go (renamed from internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go (renamed from internal/lsp/testdata/extract/extract_function/extract_smart_return.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go (renamed from internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden (renamed from internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go (renamed from internal/lsp/testdata/extract/extract_method/extract_basic.go)0
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden364
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go6
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden18
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go9
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden24
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go13
-rw-r--r--gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden32
-rw-r--r--gopls/internal/lsp/testdata/fieldlist/field_list.go (renamed from internal/lsp/testdata/fieldlist/field_list.go)0
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/a.go27
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/a.go.golden126
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/a2.go29
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/a2.go.golden139
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/a3.go42
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/a3.go.golden243
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/a4.go39
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/a4.go.golden174
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/data/a.go (renamed from internal/lsp/testdata/fillstruct/data/a.go)0
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct.go26
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct.go.golden124
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go14
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden20
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go15
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden19
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go12
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden36
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go24
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden52
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go9
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden13
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go12
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden17
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/typeparams.go37
-rw-r--r--gopls/internal/lsp/testdata/fillstruct/typeparams.go.golden206
-rw-r--r--gopls/internal/lsp/testdata/folding/a.go (renamed from internal/lsp/testdata/folding/a.go)0
-rw-r--r--gopls/internal/lsp/testdata/folding/a.go.golden722
-rw-r--r--gopls/internal/lsp/testdata/folding/bad.go.golden81
-rw-r--r--gopls/internal/lsp/testdata/folding/bad.go.in (renamed from internal/lsp/testdata/folding/bad.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/foo/foo.go30
-rw-r--r--gopls/internal/lsp/testdata/format/bad_format.go.golden21
-rw-r--r--gopls/internal/lsp/testdata/format/bad_format.go.in22
-rw-r--r--gopls/internal/lsp/testdata/format/good_format.go (renamed from internal/lsp/testdata/format/good_format.go)0
-rw-r--r--gopls/internal/lsp/testdata/format/good_format.go.golden (renamed from internal/lsp/testdata/format/good_format.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/format/newline_format.go.golden (renamed from internal/lsp/testdata/format/newline_format.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/format/newline_format.go.in (renamed from internal/lsp/testdata/format/newline_format.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/format/one_line.go.golden (renamed from internal/lsp/testdata/format/one_line.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/format/one_line.go.in (renamed from internal/lsp/testdata/format/one_line.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/func_rank/func_rank.go.in (renamed from internal/lsp/testdata/func_rank/func_rank.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/funcsig/func_sig.go (renamed from internal/lsp/testdata/funcsig/func_sig.go)0
-rw-r--r--gopls/internal/lsp/testdata/funcvalue/func_value.go (renamed from internal/lsp/testdata/funcvalue/func_value.go)0
-rw-r--r--gopls/internal/lsp/testdata/fuzzymatch/fuzzymatch.go (renamed from internal/lsp/testdata/fuzzymatch/fuzzymatch.go)0
-rw-r--r--gopls/internal/lsp/testdata/generate/generate.go (renamed from internal/lsp/testdata/generate/generate.go)0
-rw-r--r--gopls/internal/lsp/testdata/generated/generated.go7
-rw-r--r--gopls/internal/lsp/testdata/generated/generator.go5
-rw-r--r--gopls/internal/lsp/testdata/godef/a/a_x_test.go9
-rw-r--r--gopls/internal/lsp/testdata/godef/a/a_x_test.go.golden (renamed from internal/lsp/testdata/godef/a/a_x_test.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/godef/a/d.go69
-rw-r--r--gopls/internal/lsp/testdata/godef/a/d.go.golden191
-rw-r--r--gopls/internal/lsp/testdata/godef/a/f.go16
-rw-r--r--gopls/internal/lsp/testdata/godef/a/f.go.golden (renamed from internal/lsp/testdata/godef/a/f.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/godef/a/g.go (renamed from internal/lsp/testdata/godef/a/g.go)0
-rw-r--r--gopls/internal/lsp/testdata/godef/a/g.go.golden7
-rw-r--r--gopls/internal/lsp/testdata/godef/a/h.go (renamed from internal/lsp/testdata/godef/a/h.go)0
-rw-r--r--gopls/internal/lsp/testdata/godef/a/h.go.golden161
-rw-r--r--gopls/internal/lsp/testdata/godef/b/e.go31
-rw-r--r--gopls/internal/lsp/testdata/godef/b/e.go.golden156
-rw-r--r--gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.golden31
-rw-r--r--gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.in (renamed from internal/lsp/testdata/godef/broken/unclosedIf.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/good/good0.go (renamed from internal/lsp/testdata/good/good0.go)0
-rw-r--r--gopls/internal/lsp/testdata/good/good1.go21
-rw-r--r--gopls/internal/lsp/testdata/highlights/highlights.go (renamed from internal/lsp/testdata/highlights/highlights.go)0
-rw-r--r--gopls/internal/lsp/testdata/implementation/implementation.go37
-rw-r--r--gopls/internal/lsp/testdata/implementation/implementation_generics.go16
-rw-r--r--gopls/internal/lsp/testdata/implementation/other/other.go (renamed from internal/lsp/testdata/implementation/other/other.go)0
-rw-r--r--gopls/internal/lsp/testdata/implementation/other/other_generics.go16
-rw-r--r--gopls/internal/lsp/testdata/implementation/other/other_test.go (renamed from internal/lsp/testdata/implementation/other/other_test.go)0
-rw-r--r--gopls/internal/lsp/testdata/importedcomplit/imported_complit.go.in42
-rw-r--r--gopls/internal/lsp/testdata/imports/add_import.go.golden (renamed from internal/lsp/testdata/imports/add_import.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/imports/add_import.go.in (renamed from internal/lsp/testdata/imports/add_import.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/imports/good_imports.go.golden (renamed from internal/lsp/testdata/imports/good_imports.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/imports/good_imports.go.in (renamed from internal/lsp/testdata/imports/good_imports.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/imports/issue35458.go.golden (renamed from internal/lsp/testdata/imports/issue35458.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/imports/issue35458.go.in (renamed from internal/lsp/testdata/imports/issue35458.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/imports/multiple_blocks.go.golden (renamed from internal/lsp/testdata/imports/multiple_blocks.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/imports/multiple_blocks.go.in (renamed from internal/lsp/testdata/imports/multiple_blocks.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/imports/needs_imports.go.golden (renamed from internal/lsp/testdata/imports/needs_imports.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/imports/needs_imports.go.in (renamed from internal/lsp/testdata/imports/needs_imports.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/imports/remove_import.go.golden (renamed from internal/lsp/testdata/imports/remove_import.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/imports/remove_import.go.in (renamed from internal/lsp/testdata/imports/remove_import.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/imports/remove_imports.go.golden (renamed from internal/lsp/testdata/imports/remove_imports.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/imports/remove_imports.go.in (renamed from internal/lsp/testdata/imports/remove_imports.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/imports/two_lines.go.golden (renamed from internal/lsp/testdata/imports/two_lines.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/imports/two_lines.go.in (renamed from internal/lsp/testdata/imports/two_lines.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/index/index.go (renamed from internal/lsp/testdata/index/index.go)0
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/composite_literals.go27
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/composite_literals.go.golden29
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/constant_values.go45
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/constant_values.go.golden47
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/parameter_names.go50
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/parameter_names.go.golden52
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/type_params.go45
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/type_params.go.golden47
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/variable_types.go20
-rw-r--r--gopls/internal/lsp/testdata/inlay_hint/variable_types.go.golden22
-rw-r--r--gopls/internal/lsp/testdata/interfacerank/interface_rank.go (renamed from internal/lsp/testdata/interfacerank/interface_rank.go)0
-rw-r--r--gopls/internal/lsp/testdata/issues/issue56505.go8
-rw-r--r--gopls/internal/lsp/testdata/keywords/accidental_keywords.go.in (renamed from internal/lsp/testdata/keywords/accidental_keywords.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/keywords/empty_select.go (renamed from internal/lsp/testdata/keywords/empty_select.go)0
-rw-r--r--gopls/internal/lsp/testdata/keywords/empty_switch.go (renamed from internal/lsp/testdata/keywords/empty_switch.go)0
-rw-r--r--gopls/internal/lsp/testdata/keywords/keywords.go100
-rw-r--r--gopls/internal/lsp/testdata/labels/labels.go (renamed from internal/lsp/testdata/labels/labels.go)0
-rw-r--r--gopls/internal/lsp/testdata/links/links.go26
-rw-r--r--gopls/internal/lsp/testdata/maps/maps.go.in (renamed from internal/lsp/testdata/maps/maps.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/channels.go9
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/channels.go.golden15
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/consecutive_params.go6
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/consecutive_params.go.golden12
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/error_param.go6
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/error_param.go.golden12
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/literals.go7
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/literals.go.golden13
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/operation.go7
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/operation.go.golden13
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/selector.go6
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/selector.go.golden12
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/slice.go5
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/slice.go.golden11
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/tuple.go9
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/tuple.go.golden15
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/unique_params.go7
-rw-r--r--gopls/internal/lsp/testdata/missingfunction/unique_params.go.golden13
-rw-r--r--gopls/internal/lsp/testdata/multireturn/multi_return.go.in (renamed from internal/lsp/testdata/multireturn/multi_return.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/nested_complit/nested_complit.go.in15
-rw-r--r--gopls/internal/lsp/testdata/nodisk/empty (renamed from internal/lsp/testdata/nodisk/empty)0
-rw-r--r--gopls/internal/lsp/testdata/nodisk/nodisk.overlay.go9
-rw-r--r--gopls/internal/lsp/testdata/noparse/noparse.go.in24
-rw-r--r--gopls/internal/lsp/testdata/noparse_format/noparse_format.go.golden (renamed from internal/lsp/testdata/noparse_format/noparse_format.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/noparse_format/noparse_format.go.in14
-rw-r--r--gopls/internal/lsp/testdata/noparse_format/parse_format.go.golden (renamed from internal/lsp/testdata/noparse_format/parse_format.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/noparse_format/parse_format.go.in (renamed from internal/lsp/testdata/noparse_format/parse_format.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/printf/printf.go (renamed from internal/lsp/testdata/printf/printf.go)0
-rw-r--r--gopls/internal/lsp/testdata/rank/assign_rank.go.in (renamed from internal/lsp/testdata/rank/assign_rank.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rank/binexpr_rank.go.in (renamed from internal/lsp/testdata/rank/binexpr_rank.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rank/boolexpr_rank.go (renamed from internal/lsp/testdata/rank/boolexpr_rank.go)0
-rw-r--r--gopls/internal/lsp/testdata/rank/convert_rank.go.in (renamed from internal/lsp/testdata/rank/convert_rank.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rank/struct/struct_rank.go (renamed from internal/lsp/testdata/rank/struct/struct_rank.go)0
-rw-r--r--gopls/internal/lsp/testdata/rank/switch_rank.go.in (renamed from internal/lsp/testdata/rank/switch_rank.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rank/type_assert_rank.go.in (renamed from internal/lsp/testdata/rank/type_assert_rank.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rank/type_switch_rank.go.in (renamed from internal/lsp/testdata/rank/type_switch_rank.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/references/another/another.go13
-rw-r--r--gopls/internal/lsp/testdata/references/interfaces/interfaces.go (renamed from internal/lsp/testdata/references/interfaces/interfaces.go)0
-rw-r--r--gopls/internal/lsp/testdata/references/other/other.go19
-rw-r--r--gopls/internal/lsp/testdata/references/refs.go53
-rw-r--r--gopls/internal/lsp/testdata/references/refs_test.go (renamed from internal/lsp/testdata/references/refs_test.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/a/random.go.golden (renamed from internal/lsp/testdata/rename/a/random.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/a/random.go.in (renamed from internal/lsp/testdata/rename/a/random.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rename/b/b.go (renamed from internal/lsp/testdata/rename/b/b.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/b/b.go.golden78
-rw-r--r--gopls/internal/lsp/testdata/rename/bad/bad.go.golden2
-rw-r--r--gopls/internal/lsp/testdata/rename/bad/bad.go.in (renamed from internal/lsp/testdata/rename/bad/bad.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rename/bad/bad_test.go.in (renamed from internal/lsp/testdata/rename/bad/bad_test.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rename/c/c.go7
-rw-r--r--gopls/internal/lsp/testdata/rename/c/c.go.golden32
-rw-r--r--gopls/internal/lsp/testdata/rename/c/c2.go (renamed from internal/lsp/testdata/rename/c/c2.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/c/c2.go.golden (renamed from internal/lsp/testdata/rename/c/c2.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/crosspkg/another/another.go (renamed from internal/lsp/testdata/rename/crosspkg/another/another.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/crosspkg/another/another.go.golden (renamed from internal/lsp/testdata/rename/crosspkg/another/another.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go (renamed from internal/lsp/testdata/rename/crosspkg/crosspkg.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden40
-rw-r--r--gopls/internal/lsp/testdata/rename/crosspkg/other/other.go8
-rw-r--r--gopls/internal/lsp/testdata/rename/crosspkg/other/other.go.golden20
-rw-r--r--gopls/internal/lsp/testdata/rename/generics/embedded.go (renamed from internal/lsp/testdata/rename/generics/embedded.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/generics/embedded.go.golden (renamed from internal/lsp/testdata/rename/generics/embedded.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/generics/generics.go (renamed from internal/lsp/testdata/rename/generics/generics.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/generics/generics.go.golden (renamed from internal/lsp/testdata/rename/generics/generics.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/generics/unions.go (renamed from internal/lsp/testdata/rename/generics/unions.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/generics/unions.go.golden (renamed from internal/lsp/testdata/rename/generics/unions.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.golden (renamed from internal/lsp/testdata/rename/issue39614/issue39614.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.in (renamed from internal/lsp/testdata/rename/issue39614/issue39614.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue42134/1.go (renamed from internal/lsp/testdata/rename/issue42134/1.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue42134/1.go.golden (renamed from internal/lsp/testdata/rename/issue42134/1.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue42134/2.go (renamed from internal/lsp/testdata/rename/issue42134/2.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue42134/2.go.golden (renamed from internal/lsp/testdata/rename/issue42134/2.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue42134/3.go (renamed from internal/lsp/testdata/rename/issue42134/3.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue42134/3.go.golden (renamed from internal/lsp/testdata/rename/issue42134/3.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue42134/4.go (renamed from internal/lsp/testdata/rename/issue42134/4.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue42134/4.go.golden (renamed from internal/lsp/testdata/rename/issue42134/4.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.golden (renamed from internal/lsp/testdata/rename/issue43616/issue43616.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.in (renamed from internal/lsp/testdata/rename/issue43616/issue43616.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/rename/shadow/shadow.go (renamed from internal/lsp/testdata/rename/shadow/shadow.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/shadow/shadow.go.golden51
-rw-r--r--gopls/internal/lsp/testdata/rename/testy/testy.go (renamed from internal/lsp/testdata/rename/testy/testy.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/testy/testy.go.golden (renamed from internal/lsp/testdata/rename/testy/testy.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rename/testy/testy_test.go (renamed from internal/lsp/testdata/rename/testy/testy_test.go)0
-rw-r--r--gopls/internal/lsp/testdata/rename/testy/testy_test.go.golden (renamed from internal/lsp/testdata/rename/testy/testy_test.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go14
-rw-r--r--gopls/internal/lsp/testdata/selectionrange/foo.go13
-rw-r--r--gopls/internal/lsp/testdata/selectionrange/foo.go.golden29
-rw-r--r--gopls/internal/lsp/testdata/selector/selector.go.in66
-rw-r--r--gopls/internal/lsp/testdata/semantic/README.md (renamed from internal/lsp/testdata/semantic/README.md)0
-rw-r--r--gopls/internal/lsp/testdata/semantic/a.go (renamed from internal/lsp/testdata/semantic/a.go)0
-rw-r--r--gopls/internal/lsp/testdata/semantic/a.go.golden83
-rw-r--r--gopls/internal/lsp/testdata/semantic/b.go38
-rw-r--r--gopls/internal/lsp/testdata/semantic/b.go.golden40
-rw-r--r--gopls/internal/lsp/testdata/semantic/semantic_test.go (renamed from internal/lsp/testdata/semantic/semantic_test.go)0
-rw-r--r--gopls/internal/lsp/testdata/signature/signature.go (renamed from internal/lsp/testdata/signature/signature.go)0
-rw-r--r--gopls/internal/lsp/testdata/signature/signature.go.golden53
-rw-r--r--gopls/internal/lsp/testdata/signature/signature2.go.golden (renamed from internal/lsp/testdata/signature/signature2.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/signature/signature2.go.in (renamed from internal/lsp/testdata/signature/signature2.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/signature/signature3.go.golden (renamed from internal/lsp/testdata/signature/signature3.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/signature/signature3.go.in (renamed from internal/lsp/testdata/signature/signature3.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/signature/signature_test.go13
-rw-r--r--gopls/internal/lsp/testdata/signature/signature_test.go.golden9
-rw-r--r--gopls/internal/lsp/testdata/snippets/func_snippets118.go.in (renamed from internal/lsp/testdata/snippets/func_snippets118.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/snippets/literal.go22
-rw-r--r--gopls/internal/lsp/testdata/snippets/literal.go.golden3
-rw-r--r--gopls/internal/lsp/testdata/snippets/literal_snippets.go.in233
-rw-r--r--gopls/internal/lsp/testdata/snippets/literal_snippets118.go.in (renamed from internal/lsp/testdata/snippets/literal_snippets118.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/snippets/postfix.go (renamed from internal/lsp/testdata/snippets/postfix.go)0
-rw-r--r--gopls/internal/lsp/testdata/snippets/snippets.go.golden (renamed from internal/lsp/testdata/snippets/snippets.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/snippets/snippets.go.in (renamed from internal/lsp/testdata/snippets/snippets.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/statements/append.go (renamed from internal/lsp/testdata/statements/append.go)0
-rw-r--r--gopls/internal/lsp/testdata/statements/if_err_check_return.go (renamed from internal/lsp/testdata/statements/if_err_check_return.go)0
-rw-r--r--gopls/internal/lsp/testdata/statements/if_err_check_return_2.go (renamed from internal/lsp/testdata/statements/if_err_check_return_2.go)0
-rw-r--r--gopls/internal/lsp/testdata/statements/if_err_check_test.go (renamed from internal/lsp/testdata/statements/if_err_check_test.go)0
-rw-r--r--gopls/internal/lsp/testdata/stub/other/other.go (renamed from internal/lsp/testdata/stub/other/other.go)0
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_add_selector.go12
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_add_selector.go.golden19
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_assign.go10
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_assign.go.golden17
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_assign_multivars.go11
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_assign_multivars.go.golden18
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_call_expr.go13
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_call_expr.go.golden20
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_embedded.go15
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_embedded.go.golden37
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_err.go7
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_err.go.golden14
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_function_return.go11
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_function_return.go.golden18
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_generic_receiver.go15
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_generic_receiver.go.golden22
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_ignored_imports.go18
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_ignored_imports.go.golden25
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_issue2606.go7
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_issue2606.go.golden14
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_multi_var.go11
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_multi_var.go.golden18
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_pointer.go9
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_pointer.go.golden16
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_renamed_import.go11
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_renamed_import.go.golden18
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go13
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden22
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_stdlib.go9
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_stdlib.go.golden16
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_typedecl_group.go27
-rw-r--r--gopls/internal/lsp/testdata/stub/stub_typedecl_group.go.golden39
-rw-r--r--gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go11
-rw-r--r--gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden13
-rw-r--r--gopls/internal/lsp/testdata/summary.txt.golden32
-rw-r--r--gopls/internal/lsp/testdata/summary_go1.18.txt.golden32
-rw-r--r--gopls/internal/lsp/testdata/symbols/go1.18.go16
-rw-r--r--gopls/internal/lsp/testdata/symbols/go1.18.go.golden7
-rw-r--r--gopls/internal/lsp/testdata/symbols/main.go91
-rw-r--r--gopls/internal/lsp/testdata/symbols/main.go.golden36
-rw-r--r--gopls/internal/lsp/testdata/testy/testy.go (renamed from internal/lsp/testdata/testy/testy.go)0
-rw-r--r--gopls/internal/lsp/testdata/testy/testy_test.go18
-rw-r--r--gopls/internal/lsp/testdata/testy/testy_test.go.golden (renamed from internal/lsp/testdata/testy/testy_test.go.golden)0
-rw-r--r--gopls/internal/lsp/testdata/typdef/typdef.go (renamed from internal/lsp/testdata/typdef/typdef.go)0
-rw-r--r--gopls/internal/lsp/testdata/typeassert/type_assert.go (renamed from internal/lsp/testdata/typeassert/type_assert.go)0
-rw-r--r--gopls/internal/lsp/testdata/typeerrors/noresultvalues.go5
-rw-r--r--gopls/internal/lsp/testdata/typeerrors/noresultvalues.go.golden14
-rw-r--r--gopls/internal/lsp/testdata/typemods/type_mods.go (renamed from internal/lsp/testdata/typemods/type_mods.go)0
-rw-r--r--gopls/internal/lsp/testdata/typeparams/type_params.go61
-rw-r--r--gopls/internal/lsp/testdata/types/types.go (renamed from internal/lsp/testdata/types/types.go)0
-rw-r--r--gopls/internal/lsp/testdata/undeclared/var.go14
-rw-r--r--gopls/internal/lsp/testdata/undeclared/var.go.golden51
-rw-r--r--gopls/internal/lsp/testdata/unimported/export_test.go3
-rw-r--r--gopls/internal/lsp/testdata/unimported/unimported.go.in23
-rw-r--r--gopls/internal/lsp/testdata/unimported/unimported_cand_type.go16
-rw-r--r--gopls/internal/lsp/testdata/unimported/x_test.go (renamed from internal/lsp/testdata/unimported/x_test.go)0
-rw-r--r--gopls/internal/lsp/testdata/unresolved/unresolved.go.in (renamed from internal/lsp/testdata/unresolved/unresolved.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/unsafe/unsafe.go (renamed from internal/lsp/testdata/unsafe/unsafe.go)0
-rw-r--r--gopls/internal/lsp/testdata/variadic/variadic.go.in (renamed from internal/lsp/testdata/variadic/variadic.go.in)0
-rw-r--r--gopls/internal/lsp/testdata/variadic/variadic_intf.go (renamed from internal/lsp/testdata/variadic/variadic_intf.go)0
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/a/a.go9
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/a/a_test.go3
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/a/a_x_test.go3
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/b/b.go7
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/issue44806.go (renamed from internal/lsp/testdata/workspacesymbol/issue44806.go)0
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/main.go (renamed from internal/lsp/testdata/workspacesymbol/main.go)0
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/p/p.go (renamed from internal/lsp/testdata/workspacesymbol/p/p.go)0
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/query.go (renamed from internal/lsp/testdata/workspacesymbol/query.go)0
-rw-r--r--gopls/internal/lsp/testdata/workspacesymbol/query.go.golden (renamed from internal/lsp/testdata/workspacesymbol/query.go.golden)0
-rw-r--r--gopls/internal/lsp/tests/README.md66
-rw-r--r--gopls/internal/lsp/tests/compare/text.go49
-rw-r--r--gopls/internal/lsp/tests/compare/text_test.go28
-rw-r--r--gopls/internal/lsp/tests/markdown_go118.go69
-rw-r--r--gopls/internal/lsp/tests/markdown_go119.go22
-rw-r--r--gopls/internal/lsp/tests/normalizer.go113
-rw-r--r--gopls/internal/lsp/tests/tests.go1446
-rw-r--r--gopls/internal/lsp/tests/util.go547
-rw-r--r--gopls/internal/lsp/tests/util_go118.go13
-rw-r--r--gopls/internal/lsp/tests/util_go121.go12
-rw-r--r--gopls/internal/lsp/text_synchronization.go349
-rw-r--r--gopls/internal/lsp/work/completion.go154
-rw-r--r--gopls/internal/lsp/work/diagnostics.go92
-rw-r--r--gopls/internal/lsp/work/format.go28
-rw-r--r--gopls/internal/lsp/work/hover.go89
-rw-r--r--gopls/internal/lsp/workspace.go95
-rw-r--r--gopls/internal/lsp/workspace_symbol.go32
-rw-r--r--gopls/internal/regtest/bench/bench_test.go349
-rw-r--r--gopls/internal/regtest/bench/completion_bench_test.go186
-rw-r--r--gopls/internal/regtest/bench/completion_test.go173
-rw-r--r--gopls/internal/regtest/bench/definition_test.go39
-rw-r--r--gopls/internal/regtest/bench/didchange_test.go99
-rw-r--r--gopls/internal/regtest/bench/doc.go33
-rw-r--r--gopls/internal/regtest/bench/hover_test.go39
-rw-r--r--gopls/internal/regtest/bench/implementations_test.go37
-rw-r--r--gopls/internal/regtest/bench/iwl_test.go77
-rw-r--r--gopls/internal/regtest/bench/references_test.go37
-rw-r--r--gopls/internal/regtest/bench/rename_test.go44
-rw-r--r--gopls/internal/regtest/bench/repo_test.go231
-rw-r--r--gopls/internal/regtest/bench/stress_test.go102
-rw-r--r--gopls/internal/regtest/bench/workspace_symbols_test.go37
-rw-r--r--gopls/internal/regtest/codelens/codelens_test.go213
-rw-r--r--gopls/internal/regtest/codelens/gcdetails_test.go127
-rw-r--r--gopls/internal/regtest/completion/completion18_test.go21
-rw-r--r--gopls/internal/regtest/completion/completion_test.go320
-rw-r--r--gopls/internal/regtest/completion/postfix_snippet_test.go50
-rw-r--r--gopls/internal/regtest/debug/debug_test.go30
-rw-r--r--gopls/internal/regtest/diagnostics/analysis_test.go49
-rw-r--r--gopls/internal/regtest/diagnostics/builtin_test.go13
-rw-r--r--gopls/internal/regtest/diagnostics/diagnostics_test.go1041
-rw-r--r--gopls/internal/regtest/diagnostics/golist_test.go71
-rw-r--r--gopls/internal/regtest/diagnostics/invalidation_test.go111
-rw-r--r--gopls/internal/regtest/diagnostics/undeclared_test.go26
-rw-r--r--gopls/internal/regtest/inlayhints/inlayhints_test.go69
-rw-r--r--gopls/internal/regtest/marker/marker_test.go21
-rw-r--r--gopls/internal/regtest/marker/testdata/definition/embed.txt254
-rw-r--r--gopls/internal/regtest/marker/testdata/definition/import.txt52
-rw-r--r--gopls/internal/regtest/marker/testdata/definition/misc.txt230
-rw-r--r--gopls/internal/regtest/marker/testdata/hover/basiclit.txt60
-rw-r--r--gopls/internal/regtest/marker/testdata/hover/const.txt18
-rw-r--r--gopls/internal/regtest/marker/testdata/hover/generics.txt77
-rw-r--r--gopls/internal/regtest/marker/testdata/hover/goprivate.txt27
-rw-r--r--gopls/internal/regtest/marker/testdata/hover/hover.txt29
-rw-r--r--gopls/internal/regtest/marker/testdata/hover/linkable.txt120
-rw-r--r--gopls/internal/regtest/marker/testdata/hover/linkable_generics.txt145
-rw-r--r--gopls/internal/regtest/marker/testdata/hover/std.txt80
-rw-r--r--gopls/internal/regtest/marker/testdata/rename/basic.txt22
-rw-r--r--gopls/internal/regtest/marker/testdata/rename/conflict.txt59
-rw-r--r--gopls/internal/regtest/marker/testdata/rename/embed.txt36
-rw-r--r--gopls/internal/regtest/marker/testdata/rename/methods.txt67
-rw-r--r--gopls/internal/regtest/marker/testdata/rename/typeswitch.txt26
-rw-r--r--gopls/internal/regtest/marker/testdata/stubmethods/basic.txt24
-rw-r--r--gopls/internal/regtest/misc/call_hierarchy_test.go10
-rw-r--r--gopls/internal/regtest/misc/configuration_test.go132
-rw-r--r--gopls/internal/regtest/misc/debugserver_test.go6
-rw-r--r--gopls/internal/regtest/misc/definition_test.go301
-rw-r--r--gopls/internal/regtest/misc/embed_test.go13
-rw-r--r--gopls/internal/regtest/misc/extract_test.go65
-rw-r--r--gopls/internal/regtest/misc/failures_test.go46
-rw-r--r--gopls/internal/regtest/misc/fix_test.go24
-rw-r--r--gopls/internal/regtest/misc/formatting_test.go45
-rw-r--r--gopls/internal/regtest/misc/generate_test.go19
-rw-r--r--gopls/internal/regtest/misc/highlight_test.go36
-rw-r--r--gopls/internal/regtest/misc/hover_test.go272
-rw-r--r--gopls/internal/regtest/misc/import_test.go12
-rw-r--r--gopls/internal/regtest/misc/imports_test.go78
-rw-r--r--gopls/internal/regtest/misc/leak_test.go89
-rw-r--r--gopls/internal/regtest/misc/link_test.go27
-rw-r--r--gopls/internal/regtest/misc/misc_test.go4
-rw-r--r--gopls/internal/regtest/misc/multiple_adhoc_test.go8
-rw-r--r--gopls/internal/regtest/misc/references_test.go340
-rw-r--r--gopls/internal/regtest/misc/rename_test.go885
-rw-r--r--gopls/internal/regtest/misc/semantictokens_test.go172
-rw-r--r--gopls/internal/regtest/misc/settings_test.go8
-rw-r--r--gopls/internal/regtest/misc/shared_test.go58
-rw-r--r--gopls/internal/regtest/misc/signature_help_test.go69
-rw-r--r--gopls/internal/regtest/misc/staticcheck_test.go110
-rw-r--r--gopls/internal/regtest/misc/vendor_test.go26
-rw-r--r--gopls/internal/regtest/misc/vuln_test.go948
-rw-r--r--gopls/internal/regtest/misc/workspace_symbol_test.go49
-rw-r--r--gopls/internal/regtest/modfile/modfile_test.go349
-rw-r--r--gopls/internal/regtest/template/template_test.go83
-rw-r--r--gopls/internal/regtest/watch/watch_test.go292
-rw-r--r--gopls/internal/regtest/workspace/broken_test.go264
-rw-r--r--gopls/internal/regtest/workspace/directoryfilters_test.go259
-rw-r--r--gopls/internal/regtest/workspace/fromenv_test.go68
-rw-r--r--gopls/internal/regtest/workspace/metadata_test.go181
-rw-r--r--gopls/internal/regtest/workspace/misspelling_test.go80
-rw-r--r--gopls/internal/regtest/workspace/standalone_test.go206
-rw-r--r--gopls/internal/regtest/workspace/workspace_test.go842
-rw-r--r--gopls/internal/span/parse.go114
-rw-r--r--gopls/internal/span/span.go253
-rw-r--r--gopls/internal/span/span_test.go57
-rw-r--r--gopls/internal/span/uri.go185
-rw-r--r--gopls/internal/span/uri_test.go117
-rw-r--r--gopls/internal/span/uri_windows_test.go112
-rw-r--r--gopls/internal/vulncheck/cache.go124
-rw-r--r--gopls/internal/vulncheck/command.go414
-rw-r--r--gopls/internal/vulncheck/command_test.go378
-rw-r--r--gopls/internal/vulncheck/util.go130
-rw-r--r--gopls/internal/vulncheck/vulncheck.go14
-rw-r--r--gopls/internal/vulncheck/vulntest/db.go303
-rw-r--r--gopls/internal/vulncheck/vulntest/db_test.go61
-rw-r--r--gopls/internal/vulncheck/vulntest/report.go176
-rw-r--r--gopls/internal/vulncheck/vulntest/report_test.go52
-rw-r--r--gopls/internal/vulncheck/vulntest/stdlib.go26
-rw-r--r--gopls/internal/vulncheck/vulntest/stdlib_test.go27
-rw-r--r--gopls/internal/vulncheck/vulntest/testdata/report.yaml15
-rw-r--r--gopls/main.go4
-rw-r--r--gopls/release/release.go85
-rw-r--r--gopls/test/debug/debug_test.go60
-rw-r--r--gopls/test/gopls_test.go32
-rw-r--r--gopls/test/json_test.go22
-rw-r--r--imports/forward.go2
-rw-r--r--internal/analysisinternal/analysis.go126
-rw-r--r--internal/apidiff/compatibility.go14
-rw-r--r--internal/bug/bug.go132
-rw-r--r--internal/bug/bug_test.go65
-rw-r--r--internal/diff/diff.go169
-rw-r--r--internal/diff/diff_test.go199
-rw-r--r--internal/diff/difftest/difftest.go289
-rw-r--r--internal/diff/difftest/difftest_test.go83
-rw-r--r--internal/diff/export_test.go9
-rw-r--r--internal/diff/lcs/common.go179
-rw-r--r--internal/diff/lcs/common_test.go140
-rw-r--r--internal/diff/lcs/doc.go156
-rw-r--r--internal/diff/lcs/git.sh33
-rw-r--r--internal/diff/lcs/labels.go55
-rw-r--r--internal/diff/lcs/old.go480
-rw-r--r--internal/diff/lcs/old_test.go251
-rw-r--r--internal/diff/lcs/sequence.go113
-rw-r--r--internal/diff/myers/diff.go215
-rw-r--r--internal/diff/myers/diff_test.go16
-rw-r--r--internal/diff/ndiff.go109
-rw-r--r--internal/diff/unified.go248
-rw-r--r--internal/event/export/ocagent/wire/metrics.go14
-rw-r--r--internal/event/export/trace.go2
-rw-r--r--internal/event/tag/tag.go59
-rw-r--r--internal/facts/facts.go335
-rw-r--r--internal/facts/facts_test.go564
-rw-r--r--internal/facts/imports.go130
-rw-r--r--internal/fastwalk/fastwalk.go6
-rw-r--r--internal/fastwalk/fastwalk_darwin.go119
-rw-r--r--internal/fastwalk/fastwalk_dirent_ino.go6
-rw-r--r--internal/fastwalk/fastwalk_dirent_namlen_bsd.go4
-rw-r--r--internal/fastwalk/fastwalk_unix.go4
-rw-r--r--internal/fuzzy/input.go (renamed from internal/lsp/fuzzy/input.go)0
-rw-r--r--internal/fuzzy/input_test.go141
-rw-r--r--internal/fuzzy/matcher.go434
-rw-r--r--internal/fuzzy/matcher_test.go294
-rw-r--r--internal/fuzzy/symbol.go237
-rw-r--r--internal/fuzzy/symbol_test.go79
-rw-r--r--internal/gcimporter/bexport.go852
-rw-r--r--internal/gcimporter/bexport_test.go551
-rw-r--r--internal/gcimporter/bimport.go (renamed from go/internal/gcimporter/bimport.go)0
-rw-r--r--internal/gcimporter/exportdata.go (renamed from go/internal/gcimporter/exportdata.go)0
-rw-r--r--internal/gcimporter/gcimporter.go277
-rw-r--r--internal/gcimporter/gcimporter_test.go935
-rw-r--r--internal/gcimporter/iexport.go1180
-rw-r--r--internal/gcimporter/iexport_common_test.go (renamed from go/internal/gcimporter/iexport_common_test.go)0
-rw-r--r--internal/gcimporter/iexport_go118_test.go257
-rw-r--r--internal/gcimporter/iexport_test.go454
-rw-r--r--internal/gcimporter/iimport.go976
-rw-r--r--internal/gcimporter/israce_test.go (renamed from go/internal/gcimporter/israce_test.go)0
-rw-r--r--internal/gcimporter/newInterface10.go (renamed from go/internal/gcimporter/newInterface10.go)0
-rw-r--r--internal/gcimporter/newInterface11.go (renamed from go/internal/gcimporter/newInterface11.go)0
-rw-r--r--internal/gcimporter/shallow_test.go226
-rw-r--r--internal/gcimporter/stdlib_test.go94
-rw-r--r--internal/gcimporter/support_go117.go (renamed from go/internal/gcimporter/support_go117.go)0
-rw-r--r--internal/gcimporter/support_go118.go37
-rw-r--r--internal/gcimporter/testdata/a.go (renamed from go/internal/gcimporter/testdata/a.go)0
-rw-r--r--internal/gcimporter/testdata/a/a.go14
-rw-r--r--internal/gcimporter/testdata/b.go (renamed from go/internal/gcimporter/testdata/b.go)0
-rw-r--r--internal/gcimporter/testdata/exports.go (renamed from go/internal/gcimporter/testdata/exports.go)0
-rw-r--r--internal/gcimporter/testdata/issue15920.go (renamed from go/internal/gcimporter/testdata/issue15920.go)0
-rw-r--r--internal/gcimporter/testdata/issue20046.go (renamed from go/internal/gcimporter/testdata/issue20046.go)0
-rw-r--r--internal/gcimporter/testdata/issue25301.go (renamed from go/internal/gcimporter/testdata/issue25301.go)0
-rw-r--r--internal/gcimporter/testdata/issue51836/a.go8
-rw-r--r--internal/gcimporter/testdata/issue51836/a/a.go8
-rw-r--r--internal/gcimporter/testdata/issue51836/aa.go13
-rw-r--r--internal/gcimporter/testdata/issue57015.go16
-rw-r--r--internal/gcimporter/testdata/issue58296/a/a.go9
-rw-r--r--internal/gcimporter/testdata/issue58296/b/b.go11
-rw-r--r--internal/gcimporter/testdata/issue58296/c/c.go11
-rw-r--r--internal/gcimporter/testdata/p.go (renamed from go/internal/gcimporter/testdata/p.go)0
-rw-r--r--internal/gcimporter/testdata/versions/test.go (renamed from go/internal/gcimporter/testdata/versions/test.go)0
-rw-r--r--internal/gcimporter/testdata/versions/test_go1.11_0i.a (renamed from go/internal/gcimporter/testdata/versions/test_go1.11_0i.a)bin2420 -> 2420 bytes
-rw-r--r--internal/gcimporter/testdata/versions/test_go1.11_6b.a (renamed from go/internal/gcimporter/testdata/versions/test_go1.11_6b.a)bin2426 -> 2426 bytes
-rw-r--r--internal/gcimporter/testdata/versions/test_go1.11_999b.a (renamed from go/internal/gcimporter/testdata/versions/test_go1.11_999b.a)bin2600 -> 2600 bytes
-rw-r--r--internal/gcimporter/testdata/versions/test_go1.11_999i.a (renamed from go/internal/gcimporter/testdata/versions/test_go1.11_999i.a)bin2420 -> 2420 bytes
-rw-r--r--internal/gcimporter/testdata/versions/test_go1.7_0.a (renamed from go/internal/gcimporter/testdata/versions/test_go1.7_0.a)bin1862 -> 1862 bytes
-rw-r--r--internal/gcimporter/testdata/versions/test_go1.7_1.a (renamed from go/internal/gcimporter/testdata/versions/test_go1.7_1.a)bin2316 -> 2316 bytes
-rw-r--r--internal/gcimporter/testdata/versions/test_go1.8_4.a (renamed from go/internal/gcimporter/testdata/versions/test_go1.8_4.a)bin1658 -> 1658 bytes
-rw-r--r--internal/gcimporter/testdata/versions/test_go1.8_5.a (renamed from go/internal/gcimporter/testdata/versions/test_go1.8_5.a)bin1658 -> 1658 bytes
-rw-r--r--internal/gcimporter/unified_no.go10
-rw-r--r--internal/gcimporter/unified_yes.go10
-rw-r--r--internal/gcimporter/ureader_no.go19
-rw-r--r--internal/gcimporter/ureader_yes.go719
-rw-r--r--internal/gocommand/invoke.go89
-rw-r--r--internal/gocommand/version.go36
-rw-r--r--internal/gocommand/version_test.go31
-rw-r--r--internal/gopathwalk/walk.go20
-rw-r--r--internal/gopathwalk/walk_test.go2
-rw-r--r--internal/goroot/importcfg.go71
-rw-r--r--internal/imports/fix.go20
-rw-r--r--internal/imports/fix_test.go94
-rw-r--r--internal/imports/imports.go27
-rw-r--r--internal/imports/mkstdlib.go86
-rw-r--r--internal/imports/mod.go36
-rw-r--r--internal/imports/mod_test.go65
-rw-r--r--internal/imports/sortimports.go40
-rw-r--r--internal/imports/zstdlib.go606
-rw-r--r--internal/jsonrpc2/conn.go2
-rw-r--r--internal/jsonrpc2/messages.go3
-rw-r--r--internal/jsonrpc2/serve.go5
-rw-r--r--internal/jsonrpc2/servertest/servertest.go4
-rw-r--r--internal/jsonrpc2/servertest/servertest_test.go2
-rw-r--r--internal/jsonrpc2/wire.go2
-rw-r--r--internal/jsonrpc2_v2/conn.go954
-rw-r--r--internal/jsonrpc2_v2/frame.go22
-rw-r--r--internal/jsonrpc2_v2/jsonrpc2.go12
-rw-r--r--internal/jsonrpc2_v2/jsonrpc2_test.go55
-rw-r--r--internal/jsonrpc2_v2/messages.go12
-rw-r--r--internal/jsonrpc2_v2/net.go35
-rw-r--r--internal/jsonrpc2_v2/serve.go367
-rw-r--r--internal/jsonrpc2_v2/serve_go116.go19
-rw-r--r--internal/jsonrpc2_v2/serve_pre116.go30
-rw-r--r--internal/jsonrpc2_v2/serve_test.go284
-rw-r--r--internal/jsonrpc2_v2/wire.go12
-rw-r--r--internal/lockedfile/internal/filelock/filelock.go99
-rw-r--r--internal/lockedfile/internal/filelock/filelock_fcntl.go215
-rw-r--r--internal/lockedfile/internal/filelock/filelock_other.go37
-rw-r--r--internal/lockedfile/internal/filelock/filelock_plan9.go37
-rw-r--r--internal/lockedfile/internal/filelock/filelock_test.go209
-rw-r--r--internal/lockedfile/internal/filelock/filelock_unix.go45
-rw-r--r--internal/lockedfile/internal/filelock/filelock_windows.go67
-rw-r--r--internal/lockedfile/lockedfile.go187
-rw-r--r--internal/lockedfile/lockedfile_filelock.go66
-rw-r--r--internal/lockedfile/lockedfile_plan9.go95
-rw-r--r--internal/lockedfile/lockedfile_test.go270
-rw-r--r--internal/lockedfile/mutex.go67
-rw-r--r--internal/lockedfile/transform_test.go106
-rw-r--r--internal/lsp/analysis/fillreturns/fillreturns.go276
-rw-r--r--internal/lsp/analysis/fillreturns/fillreturns_test.go22
-rw-r--r--internal/lsp/analysis/fillstruct/fillstruct.go495
-rw-r--r--internal/lsp/analysis/fillstruct/fillstruct_test.go22
-rw-r--r--internal/lsp/analysis/fillstruct/testdata/src/a/a.go106
-rw-r--r--internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go41
-rw-r--r--internal/lsp/analysis/infertypeargs/infertypeargs_test.go23
-rw-r--r--internal/lsp/analysis/nonewvars/nonewvars.go93
-rw-r--r--internal/lsp/analysis/nonewvars/nonewvars_test.go22
-rw-r--r--internal/lsp/analysis/noresultvalues/noresultvalues.go90
-rw-r--r--internal/lsp/analysis/noresultvalues/noresultvalues_test.go22
-rw-r--r--internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go17
-rw-r--r--internal/lsp/analysis/simplifyrange/simplifyrange_test.go17
-rw-r--r--internal/lsp/analysis/simplifyslice/simplifyslice_test.go22
-rw-r--r--internal/lsp/analysis/stubmethods/stubmethods.go351
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/a.go28
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go13
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go10
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go10
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go11
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go11
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go10
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go9
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go13
-rw-r--r--internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go11
-rw-r--r--internal/lsp/analysis/undeclaredname/undeclared.go340
-rw-r--r--internal/lsp/analysis/undeclaredname/undeclared_test.go17
-rw-r--r--internal/lsp/analysis/unusedparams/unusedparams_test.go22
-rw-r--r--internal/lsp/analysis/useany/useany_test.go21
-rw-r--r--internal/lsp/cache/analysis.go433
-rw-r--r--internal/lsp/cache/cache.go293
-rw-r--r--internal/lsp/cache/check.go863
-rw-r--r--internal/lsp/cache/errors.go411
-rw-r--r--internal/lsp/cache/imports.go201
-rw-r--r--internal/lsp/cache/load.go507
-rw-r--r--internal/lsp/cache/metadata.go74
-rw-r--r--internal/lsp/cache/mod.go516
-rw-r--r--internal/lsp/cache/mod_tidy.go500
-rw-r--r--internal/lsp/cache/os_windows.go55
-rw-r--r--internal/lsp/cache/parse.go1467
-rw-r--r--internal/lsp/cache/parse_test.go217
-rw-r--r--internal/lsp/cache/pkg.go149
-rw-r--r--internal/lsp/cache/session.go741
-rw-r--r--internal/lsp/cache/snapshot.go2479
-rw-r--r--internal/lsp/cache/symbols.go210
-rw-r--r--internal/lsp/cache/view.go1076
-rw-r--r--internal/lsp/cache/view_test.go218
-rw-r--r--internal/lsp/cache/workspace.go599
-rw-r--r--internal/lsp/cache/workspace_test.go425
-rw-r--r--internal/lsp/call_hierarchy.go42
-rw-r--r--internal/lsp/cmd/call_hierarchy.go146
-rw-r--r--internal/lsp/cmd/capabilities_test.go166
-rw-r--r--internal/lsp/cmd/check.go74
-rw-r--r--internal/lsp/cmd/cmd.go630
-rw-r--r--internal/lsp/cmd/cmd_test.go23
-rw-r--r--internal/lsp/cmd/definition.go137
-rw-r--r--internal/lsp/cmd/export_test.go11
-rw-r--r--internal/lsp/cmd/folding_range.go73
-rw-r--r--internal/lsp/cmd/format.go108
-rw-r--r--internal/lsp/cmd/help_test.go57
-rw-r--r--internal/lsp/cmd/highlight.go89
-rw-r--r--internal/lsp/cmd/implementation.go88
-rw-r--r--internal/lsp/cmd/imports.go102
-rw-r--r--internal/lsp/cmd/info.go197
-rw-r--r--internal/lsp/cmd/links.go78
-rw-r--r--internal/lsp/cmd/prepare_rename.go84
-rw-r--r--internal/lsp/cmd/references.go92
-rw-r--r--internal/lsp/cmd/remote.go164
-rw-r--r--internal/lsp/cmd/rename.go128
-rw-r--r--internal/lsp/cmd/semantictokens.go230
-rw-r--r--internal/lsp/cmd/serve.go130
-rw-r--r--internal/lsp/cmd/signature.go87
-rw-r--r--internal/lsp/cmd/subcommands.go44
-rw-r--r--internal/lsp/cmd/suggested_fix.go159
-rw-r--r--internal/lsp/cmd/symbols.go116
-rw-r--r--internal/lsp/cmd/test/call_hierarchy.go85
-rw-r--r--internal/lsp/cmd/test/check.go63
-rw-r--r--internal/lsp/cmd/test/cmdtest.go169
-rw-r--r--internal/lsp/cmd/test/definition.go61
-rw-r--r--internal/lsp/cmd/test/folding_range.go25
-rw-r--r--internal/lsp/cmd/test/format.go86
-rw-r--r--internal/lsp/cmd/test/highlight.go29
-rw-r--r--internal/lsp/cmd/test/implementation.go37
-rw-r--r--internal/lsp/cmd/test/imports.go29
-rw-r--r--internal/lsp/cmd/test/links.go30
-rw-r--r--internal/lsp/cmd/test/prepare_rename.go46
-rw-r--r--internal/lsp/cmd/test/references.go49
-rw-r--r--internal/lsp/cmd/test/rename.go29
-rw-r--r--internal/lsp/cmd/test/semanticdriver.go34
-rw-r--r--internal/lsp/cmd/test/signature.go34
-rw-r--r--internal/lsp/cmd/test/suggested_fix.go35
-rw-r--r--internal/lsp/cmd/test/symbols.go23
-rw-r--r--internal/lsp/cmd/test/workspace_symbol.go53
-rw-r--r--internal/lsp/cmd/usage/usage.hlp77
-rw-r--r--internal/lsp/cmd/usage/vulncheck.hlp9
-rw-r--r--internal/lsp/cmd/usage/workspace.hlp7
-rw-r--r--internal/lsp/cmd/vulncheck.go79
-rw-r--r--internal/lsp/cmd/workspace.go77
-rw-r--r--internal/lsp/cmd/workspace_symbol.go85
-rw-r--r--internal/lsp/code_action.go455
-rw-r--r--internal/lsp/code_lens.go57
-rw-r--r--internal/lsp/command.go819
-rw-r--r--internal/lsp/command/command_gen.go473
-rw-r--r--internal/lsp/command/commandmeta/meta.go258
-rw-r--r--internal/lsp/command/gen/gen.go155
-rw-r--r--internal/lsp/command/generate.go25
-rw-r--r--internal/lsp/command/interface.go384
-rw-r--r--internal/lsp/command/interface_test.go31
-rw-r--r--internal/lsp/command/util.go65
-rw-r--r--internal/lsp/completion.go178
-rw-r--r--internal/lsp/completion_test.go154
-rw-r--r--internal/lsp/debug/info.go265
-rw-r--r--internal/lsp/debug/log/log.go43
-rw-r--r--internal/lsp/debug/metrics.go58
-rw-r--r--internal/lsp/debug/rpc.go239
-rw-r--r--internal/lsp/debug/serve.go954
-rw-r--r--internal/lsp/debug/tag/tag.go63
-rw-r--r--internal/lsp/debug/trace.go226
-rw-r--r--internal/lsp/definition.go67
-rw-r--r--internal/lsp/diagnostics.go649
-rw-r--r--internal/lsp/diff/diff.go159
-rw-r--r--internal/lsp/diff/diff_test.go80
-rw-r--r--internal/lsp/diff/difftest/difftest.go243
-rw-r--r--internal/lsp/diff/difftest/difftest_test.go84
-rw-r--r--internal/lsp/diff/myers/diff.go205
-rw-r--r--internal/lsp/diff/myers/diff_test.go16
-rw-r--r--internal/lsp/diff/unified.go210
-rw-r--r--internal/lsp/fake/client.go128
-rw-r--r--internal/lsp/fake/edit.go157
-rw-r--r--internal/lsp/fake/edit_test.go97
-rw-r--r--internal/lsp/fake/editor.go1258
-rw-r--r--internal/lsp/fake/editor_test.go82
-rw-r--r--internal/lsp/fake/sandbox.go273
-rw-r--r--internal/lsp/fake/workdir.go365
-rw-r--r--internal/lsp/fake/workdir_test.go192
-rw-r--r--internal/lsp/fake/workdir_windows.go20
-rw-r--r--internal/lsp/folding_range.go44
-rw-r--r--internal/lsp/format.go31
-rw-r--r--internal/lsp/fuzzy/input_test.go141
-rw-r--r--internal/lsp/fuzzy/matcher.go407
-rw-r--r--internal/lsp/fuzzy/matcher_test.go295
-rw-r--r--internal/lsp/fuzzy/symbol.go236
-rw-r--r--internal/lsp/fuzzy/symbol_test.go79
-rw-r--r--internal/lsp/general.go510
-rw-r--r--internal/lsp/helper/README.md33
-rw-r--r--internal/lsp/helper/helper.go258
-rw-r--r--internal/lsp/highlight.go45
-rw-r--r--internal/lsp/hover.go34
-rw-r--r--internal/lsp/implementation.go21
-rw-r--r--internal/lsp/link.go280
-rw-r--r--internal/lsp/lsp_test.go1319
-rw-r--r--internal/lsp/lsppos/lsppos.go89
-rw-r--r--internal/lsp/lsprpc/autostart_default.go39
-rw-r--r--internal/lsp/lsprpc/autostart_posix.go99
-rw-r--r--internal/lsp/lsprpc/binder.go143
-rw-r--r--internal/lsp/lsprpc/binder_test.go154
-rw-r--r--internal/lsp/lsprpc/commandinterceptor.go47
-rw-r--r--internal/lsp/lsprpc/commandinterceptor_test.go42
-rw-r--r--internal/lsp/lsprpc/dialer.go115
-rw-r--r--internal/lsp/lsprpc/goenv.go89
-rw-r--r--internal/lsp/lsprpc/goenv_test.go68
-rw-r--r--internal/lsp/lsprpc/lsprpc.go530
-rw-r--r--internal/lsp/lsprpc/lsprpc_test.go349
-rw-r--r--internal/lsp/lsprpc/middleware.go145
-rw-r--r--internal/lsp/lsprpc/middleware_test.go93
-rw-r--r--internal/lsp/mod/code_lens.go153
-rw-r--r--internal/lsp/mod/diagnostics.go116
-rw-r--r--internal/lsp/mod/format.go33
-rw-r--r--internal/lsp/mod/hover.go163
-rw-r--r--internal/lsp/mod/mod_test.go60
-rw-r--r--internal/lsp/progress/progress.go269
-rw-r--r--internal/lsp/progress/progress_test.go161
-rw-r--r--internal/lsp/protocol/doc.go16
-rw-r--r--internal/lsp/protocol/enums.go246
-rw-r--r--internal/lsp/protocol/protocol.go277
-rw-r--r--internal/lsp/protocol/span.go151
-rw-r--r--internal/lsp/protocol/tsclient.go205
-rw-r--r--internal/lsp/protocol/tsprotocol.go6750
-rw-r--r--internal/lsp/protocol/tsserver.go1143
-rw-r--r--internal/lsp/protocol/typescript/README.md55
-rw-r--r--internal/lsp/protocol/typescript/code.ts1448
-rw-r--r--internal/lsp/protocol/typescript/tsconfig.json29
-rw-r--r--internal/lsp/protocol/typescript/util.ts254
-rw-r--r--internal/lsp/references.go40
-rw-r--r--internal/lsp/regtest/doc.go36
-rw-r--r--internal/lsp/regtest/env.go318
-rw-r--r--internal/lsp/regtest/env_test.go68
-rw-r--r--internal/lsp/regtest/expectation.go668
-rw-r--r--internal/lsp/regtest/regtest.go148
-rw-r--r--internal/lsp/regtest/runner.go533
-rw-r--r--internal/lsp/regtest/wrappers.go446
-rw-r--r--internal/lsp/rename.go56
-rwxr-xr-xinternal/lsp/reset_golden.sh6
-rw-r--r--internal/lsp/semantic.go958
-rw-r--r--internal/lsp/server.go168
-rw-r--r--internal/lsp/server_gen.go321
-rw-r--r--internal/lsp/signature_help.go31
-rw-r--r--internal/lsp/snippet/snippet_builder.go104
-rw-r--r--internal/lsp/source/add_import.go26
-rwxr-xr-xinternal/lsp/source/api_json.go972
-rw-r--r--internal/lsp/source/call_hierarchy.go310
-rw-r--r--internal/lsp/source/code_lens.go244
-rw-r--r--internal/lsp/source/comment.go381
-rw-r--r--internal/lsp/source/comment_test.go368
-rw-r--r--internal/lsp/source/completion/completion.go2967
-rw-r--r--internal/lsp/source/completion/deep_completion.go362
-rw-r--r--internal/lsp/source/completion/definition.go127
-rw-r--r--internal/lsp/source/completion/format.go340
-rw-r--r--internal/lsp/source/completion/fuzz.go142
-rw-r--r--internal/lsp/source/completion/keywords.go154
-rw-r--r--internal/lsp/source/completion/literal.go440
-rw-r--r--internal/lsp/source/completion/package.go364
-rw-r--r--internal/lsp/source/completion/package_test.go77
-rw-r--r--internal/lsp/source/completion/postfix_snippets.go461
-rw-r--r--internal/lsp/source/completion/printf.go172
-rw-r--r--internal/lsp/source/completion/snippet.go115
-rw-r--r--internal/lsp/source/completion/statements.go360
-rw-r--r--internal/lsp/source/completion/util.go326
-rw-r--r--internal/lsp/source/diagnostics.go84
-rw-r--r--internal/lsp/source/extract.go1307
-rw-r--r--internal/lsp/source/fix.go140
-rw-r--r--internal/lsp/source/folding_range.go185
-rw-r--r--internal/lsp/source/format.go387
-rw-r--r--internal/lsp/source/format_test.go91
-rw-r--r--internal/lsp/source/gc_annotations.go214
-rw-r--r--internal/lsp/source/highlight.go509
-rw-r--r--internal/lsp/source/hover.go870
-rw-r--r--internal/lsp/source/identifier.go576
-rw-r--r--internal/lsp/source/identifier_test.go128
-rw-r--r--internal/lsp/source/implementation.go446
-rw-r--r--internal/lsp/source/known_packages.go118
-rw-r--r--internal/lsp/source/offset_test.go71
-rw-r--r--internal/lsp/source/options.go1449
-rw-r--r--internal/lsp/source/options_test.go183
-rw-r--r--internal/lsp/source/references.go200
-rw-r--r--internal/lsp/source/rename.go371
-rw-r--r--internal/lsp/source/rename_check.go936
-rw-r--r--internal/lsp/source/signature_help.go181
-rw-r--r--internal/lsp/source/source_test.go984
-rw-r--r--internal/lsp/source/stub.go330
-rw-r--r--internal/lsp/source/symbols.go266
-rw-r--r--internal/lsp/source/types_format.go459
-rw-r--r--internal/lsp/source/util.go586
-rw-r--r--internal/lsp/source/view.go696
-rw-r--r--internal/lsp/source/workspace_symbol.go593
-rw-r--r--internal/lsp/source/workspace_symbol_test.go46
-rw-r--r--internal/lsp/symbols.go57
-rw-r--r--internal/lsp/template/completion.go301
-rw-r--r--internal/lsp/template/completion_test.go102
-rw-r--r--internal/lsp/template/highlight.go96
-rw-r--r--internal/lsp/template/implementations.go189
-rw-r--r--internal/lsp/template/parse.go520
-rw-r--r--internal/lsp/template/symbols.go230
-rw-r--r--internal/lsp/testdata/analyzer/bad_test.go18
-rw-r--r--internal/lsp/testdata/arraytype/array_type.go.in48
-rw-r--r--internal/lsp/testdata/assign/assign.go.in26
-rw-r--r--internal/lsp/testdata/bad/bad0.go23
-rw-r--r--internal/lsp/testdata/bad/bad1.go33
-rw-r--r--internal/lsp/testdata/badstmt/badstmt.go.in26
-rw-r--r--internal/lsp/testdata/badstmt/badstmt_2.go.in9
-rw-r--r--internal/lsp/testdata/badstmt/badstmt_3.go.in9
-rw-r--r--internal/lsp/testdata/badstmt/badstmt_4.go.in11
-rw-r--r--internal/lsp/testdata/bar/bar.go.in47
-rw-r--r--internal/lsp/testdata/basiclit/basiclit.go56
-rw-r--r--internal/lsp/testdata/baz/baz.go.in33
-rw-r--r--internal/lsp/testdata/builtins/builtins.go46
-rw-r--r--internal/lsp/testdata/callhierarchy/callhierarchy.go70
-rw-r--r--internal/lsp/testdata/callhierarchy/incoming/incoming.go12
-rw-r--r--internal/lsp/testdata/cgo/declarecgo.go.golden30
-rw-r--r--internal/lsp/testdata/cgoimport/usecgo.go.golden30
-rw-r--r--internal/lsp/testdata/cgoimport/usecgo.go.in9
-rw-r--r--internal/lsp/testdata/danglingstmt/dangling_selector_2.go8
-rw-r--r--internal/lsp/testdata/deep/deep.go135
-rw-r--r--internal/lsp/testdata/errors/errors.go10
-rw-r--r--internal/lsp/testdata/extract/extract_function/extract_basic_comment.go8
-rw-r--r--internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden17
-rw-r--r--internal/lsp/testdata/extract/extract_method/extract_basic.go.golden728
-rw-r--r--internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go6
-rw-r--r--internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden18
-rw-r--r--internal/lsp/testdata/extract/extract_variable/extract_func_call.go9
-rw-r--r--internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden36
-rw-r--r--internal/lsp/testdata/extract/extract_variable/extract_scope.go13
-rw-r--r--internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden32
-rw-r--r--internal/lsp/testdata/fillstruct/a.go27
-rw-r--r--internal/lsp/testdata/fillstruct/a.go.golden126
-rw-r--r--internal/lsp/testdata/fillstruct/a2.go29
-rw-r--r--internal/lsp/testdata/fillstruct/a2.go.golden139
-rw-r--r--internal/lsp/testdata/fillstruct/a3.go42
-rw-r--r--internal/lsp/testdata/fillstruct/a3.go.golden243
-rw-r--r--internal/lsp/testdata/fillstruct/a4.go39
-rw-r--r--internal/lsp/testdata/fillstruct/a4.go.golden174
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct.go26
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct.go.golden124
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_anon.go14
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden20
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_nested.go15
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden19
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_package.go12
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_package.go.golden36
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_partial.go24
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden52
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_spaces.go9
-rw-r--r--internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden13
-rw-r--r--internal/lsp/testdata/folding/a.go.golden759
-rw-r--r--internal/lsp/testdata/folding/bad.go.golden91
-rw-r--r--internal/lsp/testdata/foo/foo.go30
-rw-r--r--internal/lsp/testdata/format/bad_format.go.golden21
-rw-r--r--internal/lsp/testdata/format/bad_format.go.in22
-rw-r--r--internal/lsp/testdata/generated/generated.go7
-rw-r--r--internal/lsp/testdata/generated/generator.go5
-rw-r--r--internal/lsp/testdata/godef/a/a.go105
-rw-r--r--internal/lsp/testdata/godef/a/a.go.golden190
-rw-r--r--internal/lsp/testdata/godef/a/a_test.go8
-rw-r--r--internal/lsp/testdata/godef/a/a_test.go.golden26
-rw-r--r--internal/lsp/testdata/godef/a/a_x_test.go9
-rw-r--r--internal/lsp/testdata/godef/a/d.go43
-rw-r--r--internal/lsp/testdata/godef/a/d.go.golden164
-rw-r--r--internal/lsp/testdata/godef/a/f.go15
-rw-r--r--internal/lsp/testdata/godef/a/g.go.golden6
-rw-r--r--internal/lsp/testdata/godef/a/h.go.golden136
-rw-r--r--internal/lsp/testdata/godef/a/random.go31
-rw-r--r--internal/lsp/testdata/godef/a/random.go.golden112
-rw-r--r--internal/lsp/testdata/godef/b/b.go57
-rw-r--r--internal/lsp/testdata/godef/b/b.go.golden454
-rw-r--r--internal/lsp/testdata/godef/b/c.go8
-rw-r--r--internal/lsp/testdata/godef/b/c.go.golden74
-rw-r--r--internal/lsp/testdata/godef/b/c.go.saved7
-rw-r--r--internal/lsp/testdata/godef/b/e.go31
-rw-r--r--internal/lsp/testdata/godef/b/e.go.golden144
-rw-r--r--internal/lsp/testdata/godef/b/h.go10
-rw-r--r--internal/lsp/testdata/godef/b/h.go.golden12
-rw-r--r--internal/lsp/testdata/godef/broken/unclosedIf.go.golden30
-rw-r--r--internal/lsp/testdata/godef/hover_generics/hover.go15
-rw-r--r--internal/lsp/testdata/godef/hover_generics/hover.go.golden45
-rw-r--r--internal/lsp/testdata/godef/infer_generics/inferred.go12
-rw-r--r--internal/lsp/testdata/godef/infer_generics/inferred.go.golden20
-rw-r--r--internal/lsp/testdata/good/good1.go20
-rw-r--r--internal/lsp/testdata/implementation/implementation.go31
-rw-r--r--internal/lsp/testdata/importedcomplit/imported_complit.go.in42
-rw-r--r--internal/lsp/testdata/keywords/keywords.go100
-rw-r--r--internal/lsp/testdata/links/links.go26
-rw-r--r--internal/lsp/testdata/missingfunction/channels.go9
-rw-r--r--internal/lsp/testdata/missingfunction/channels.go.golden15
-rw-r--r--internal/lsp/testdata/missingfunction/consecutive_params.go6
-rw-r--r--internal/lsp/testdata/missingfunction/consecutive_params.go.golden12
-rw-r--r--internal/lsp/testdata/missingfunction/error_param.go6
-rw-r--r--internal/lsp/testdata/missingfunction/error_param.go.golden12
-rw-r--r--internal/lsp/testdata/missingfunction/literals.go7
-rw-r--r--internal/lsp/testdata/missingfunction/literals.go.golden29
-rw-r--r--internal/lsp/testdata/missingfunction/operation.go7
-rw-r--r--internal/lsp/testdata/missingfunction/operation.go.golden29
-rw-r--r--internal/lsp/testdata/missingfunction/selector.go6
-rw-r--r--internal/lsp/testdata/missingfunction/selector.go.golden12
-rw-r--r--internal/lsp/testdata/missingfunction/slice.go5
-rw-r--r--internal/lsp/testdata/missingfunction/slice.go.golden11
-rw-r--r--internal/lsp/testdata/missingfunction/tuple.go9
-rw-r--r--internal/lsp/testdata/missingfunction/tuple.go.golden15
-rw-r--r--internal/lsp/testdata/missingfunction/unique_params.go7
-rw-r--r--internal/lsp/testdata/missingfunction/unique_params.go.golden30
-rw-r--r--internal/lsp/testdata/nested_complit/nested_complit.go.in14
-rw-r--r--internal/lsp/testdata/nodisk/nodisk.overlay.go9
-rw-r--r--internal/lsp/testdata/noparse/noparse.go.in11
-rw-r--r--internal/lsp/testdata/noparse_format/noparse_format.go.in9
-rw-r--r--internal/lsp/testdata/references/another/another.go13
-rw-r--r--internal/lsp/testdata/references/other/other.go19
-rw-r--r--internal/lsp/testdata/references/refs.go38
-rw-r--r--internal/lsp/testdata/rename/b/b.go.golden78
-rw-r--r--internal/lsp/testdata/rename/bad/bad.go.golden2
-rw-r--r--internal/lsp/testdata/rename/c/c.go7
-rw-r--r--internal/lsp/testdata/rename/c/c.go.golden32
-rw-r--r--internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden40
-rw-r--r--internal/lsp/testdata/rename/crosspkg/other/other.go8
-rw-r--r--internal/lsp/testdata/rename/crosspkg/other/other.go.golden20
-rw-r--r--internal/lsp/testdata/rename/shadow/shadow.go.golden48
-rw-r--r--internal/lsp/testdata/selector/selector.go.in66
-rw-r--r--internal/lsp/testdata/semantic/a.go.golden83
-rw-r--r--internal/lsp/testdata/semantic/b.go34
-rw-r--r--internal/lsp/testdata/semantic/b.go.golden36
-rw-r--r--internal/lsp/testdata/signature/signature.go.golden65
-rw-r--r--internal/lsp/testdata/signature/signature_test.go13
-rw-r--r--internal/lsp/testdata/signature/signature_test.go.golden30
-rw-r--r--internal/lsp/testdata/snippets/literal.go22
-rw-r--r--internal/lsp/testdata/snippets/literal.go.golden6
-rw-r--r--internal/lsp/testdata/snippets/literal_snippets.go.in233
-rw-r--r--internal/lsp/testdata/stub/stub_add_selector.go12
-rw-r--r--internal/lsp/testdata/stub/stub_add_selector.go.golden19
-rw-r--r--internal/lsp/testdata/stub/stub_assign.go10
-rw-r--r--internal/lsp/testdata/stub/stub_assign.go.golden17
-rw-r--r--internal/lsp/testdata/stub/stub_assign_multivars.go11
-rw-r--r--internal/lsp/testdata/stub/stub_assign_multivars.go.golden18
-rw-r--r--internal/lsp/testdata/stub/stub_embedded.go15
-rw-r--r--internal/lsp/testdata/stub/stub_embedded.go.golden37
-rw-r--r--internal/lsp/testdata/stub/stub_err.go7
-rw-r--r--internal/lsp/testdata/stub/stub_err.go.golden14
-rw-r--r--internal/lsp/testdata/stub/stub_function_return.go11
-rw-r--r--internal/lsp/testdata/stub/stub_function_return.go.golden18
-rw-r--r--internal/lsp/testdata/stub/stub_generic_receiver.go15
-rw-r--r--internal/lsp/testdata/stub/stub_generic_receiver.go.golden22
-rw-r--r--internal/lsp/testdata/stub/stub_ignored_imports.go18
-rw-r--r--internal/lsp/testdata/stub/stub_ignored_imports.go.golden26
-rw-r--r--internal/lsp/testdata/stub/stub_multi_var.go11
-rw-r--r--internal/lsp/testdata/stub/stub_multi_var.go.golden18
-rw-r--r--internal/lsp/testdata/stub/stub_pointer.go9
-rw-r--r--internal/lsp/testdata/stub/stub_pointer.go.golden16
-rw-r--r--internal/lsp/testdata/stub/stub_renamed_import.go11
-rw-r--r--internal/lsp/testdata/stub/stub_renamed_import.go.golden18
-rw-r--r--internal/lsp/testdata/stub/stub_renamed_import_iface.go13
-rw-r--r--internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden22
-rw-r--r--internal/lsp/testdata/stub/stub_stdlib.go9
-rw-r--r--internal/lsp/testdata/stub/stub_stdlib.go.golden16
-rw-r--r--internal/lsp/testdata/suggestedfix/has_suggested_fix.go11
-rw-r--r--internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden13
-rw-r--r--internal/lsp/testdata/summary.txt.golden30
-rw-r--r--internal/lsp/testdata/summary_go1.18.txt.golden30
-rw-r--r--internal/lsp/testdata/symbols/main.go64
-rw-r--r--internal/lsp/testdata/symbols/main.go.golden31
-rw-r--r--internal/lsp/testdata/testy/testy_test.go18
-rw-r--r--internal/lsp/testdata/typeerrors/noresultvalues.go5
-rw-r--r--internal/lsp/testdata/typeerrors/noresultvalues.go.golden14
-rw-r--r--internal/lsp/testdata/typeparams/type_params.go33
-rw-r--r--internal/lsp/testdata/undeclared/var.go14
-rw-r--r--internal/lsp/testdata/undeclared/var.go.golden51
-rw-r--r--internal/lsp/testdata/unimported/export_test.go3
-rw-r--r--internal/lsp/testdata/unimported/unimported.go.in20
-rw-r--r--internal/lsp/testdata/unimported/unimported_cand_type.go16
-rw-r--r--internal/lsp/testdata/workspacesymbol/a/a.go9
-rw-r--r--internal/lsp/testdata/workspacesymbol/a/a.go.golden5
-rw-r--r--internal/lsp/testdata/workspacesymbol/a/a_test.go3
-rw-r--r--internal/lsp/testdata/workspacesymbol/a/a_test.go.golden3
-rw-r--r--internal/lsp/testdata/workspacesymbol/a/a_x_test.go3
-rw-r--r--internal/lsp/testdata/workspacesymbol/a/a_x_test.go.golden3
-rw-r--r--internal/lsp/testdata/workspacesymbol/b/b.go7
-rw-r--r--internal/lsp/testdata/workspacesymbol/b/b.go.golden5
-rw-r--r--internal/lsp/tests/README.md66
-rw-r--r--internal/lsp/tests/normalizer.go129
-rw-r--r--internal/lsp/tests/tests.go1458
-rw-r--r--internal/lsp/tests/util.go580
-rw-r--r--internal/lsp/text_synchronization.go382
-rw-r--r--internal/lsp/work/completion.go159
-rw-r--r--internal/lsp/work/diagnostics.go93
-rw-r--r--internal/lsp/work/format.go31
-rw-r--r--internal/lsp/work/hover.go94
-rw-r--r--internal/lsp/workspace.go108
-rw-r--r--internal/lsp/workspace_symbol.go23
-rw-r--r--internal/memoize/memoize.go552
-rw-r--r--internal/memoize/memoize_test.go187
-rw-r--r--internal/packagesinternal/packages.go2
-rw-r--r--internal/persistent/map.go311
-rw-r--r--internal/persistent/map_test.go355
-rw-r--r--internal/pkgbits/codes.go77
-rw-r--r--internal/pkgbits/decoder.go517
-rw-r--r--internal/pkgbits/doc.go32
-rw-r--r--internal/pkgbits/encoder.go383
-rw-r--r--internal/pkgbits/flags.go9
-rw-r--r--internal/pkgbits/frames_go1.go21
-rw-r--r--internal/pkgbits/frames_go17.go28
-rw-r--r--internal/pkgbits/reloc.go42
-rw-r--r--internal/pkgbits/support.go17
-rw-r--r--internal/pkgbits/sync.go113
-rw-r--r--internal/pkgbits/syncmarker_string.go89
-rw-r--r--internal/robustio/copyfiles.go117
-rw-r--r--internal/robustio/gopls_windows.go16
-rw-r--r--internal/robustio/robustio.go69
-rw-r--r--internal/robustio/robustio_darwin.go21
-rw-r--r--internal/robustio/robustio_flaky.go93
-rw-r--r--internal/robustio/robustio_other.go29
-rw-r--r--internal/robustio/robustio_plan9.go26
-rw-r--r--internal/robustio/robustio_posix.go28
-rw-r--r--internal/robustio/robustio_test.go88
-rw-r--r--internal/robustio/robustio_windows.go51
-rw-r--r--internal/span/parse.go112
-rw-r--r--internal/span/span.go285
-rw-r--r--internal/span/span_test.go70
-rw-r--r--internal/span/token.go194
-rw-r--r--internal/span/token111.go40
-rw-r--r--internal/span/token112.go17
-rw-r--r--internal/span/token_test.go81
-rw-r--r--internal/span/uri.go169
-rw-r--r--internal/span/uri_test.go117
-rw-r--r--internal/span/uri_windows_test.go112
-rw-r--r--internal/span/utf16.go95
-rw-r--r--internal/span/utf16_test.go322
-rw-r--r--internal/stack/process.go2
-rw-r--r--internal/stack/stacktest/stacktest.go2
-rw-r--r--internal/testenv/exec.go149
-rw-r--r--internal/testenv/testenv.go160
-rw-r--r--internal/testenv/testenv_112.go28
-rw-r--r--internal/testenv/testenv_notunix.go14
-rw-r--r--internal/testenv/testenv_unix.go14
-rw-r--r--internal/tokeninternal/tokeninternal.go59
-rw-r--r--internal/tool/tool.go4
-rw-r--r--internal/typeparams/common.go22
-rw-r--r--internal/typeparams/copytermlist.go6
-rw-r--r--internal/typeparams/coretype.go122
-rw-r--r--internal/typeparams/coretype_test.go105
-rw-r--r--internal/typeparams/example/README.md328
-rw-r--r--internal/typeparams/example/findtypeparams/main.go155
-rw-r--r--internal/typeparams/example/generic-go-types.md206
-rw-r--r--internal/typeparams/normalize.go12
-rw-r--r--internal/typeparams/normalize_test.go8
-rw-r--r--internal/typeparams/termlist.go9
-rw-r--r--internal/typesinternal/errorcode.go38
-rw-r--r--internal/typesinternal/errorcode_string.go26
-rw-r--r--internal/typesinternal/errorcode_test.go105
-rw-r--r--internal/typesinternal/types.go9
-rw-r--r--playground/socket/socket_test.go6
-rw-r--r--present/args.go2
-rw-r--r--present/code.go4
-rw-r--r--present/doc.go32
-rw-r--r--refactor/eg/eg.go1
-rw-r--r--refactor/eg/match.go1
-rw-r--r--refactor/eg/rewrite.go1
-rw-r--r--refactor/rename/check.go18
-rw-r--r--refactor/rename/spec.go3
-rw-r--r--refactor/rename/util.go1
-rw-r--r--refactor/satisfy/find.go103
-rw-r--r--refactor/satisfy/find_test.go238
-rw-r--r--txtar/archive.go8
1920 files changed, 133732 insertions, 102609 deletions
diff --git a/AUTHORS b/AUTHORS
deleted file mode 100644
index 15167cd74..000000000
--- a/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
deleted file mode 100644
index 1c4577e96..000000000
--- a/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/METADATA b/METADATA
index caf2398fe..d83a2be2a 100644
--- a/METADATA
+++ b/METADATA
@@ -9,11 +9,11 @@ third_party {
type: GIT
value: "https://go.googlesource.com/tools/"
}
- version: "e693fb417253d14786976bd29a456961aa8b6343"
+ version: "v0.7.0"
license_type: NOTICE
last_upgrade_date {
- year: 2022
+ year: 2023
month: 3
- day: 29
+ day: 15
}
}
diff --git a/README.md b/README.md
index 71a945aaf..d9d7edd73 100644
--- a/README.md
+++ b/README.md
@@ -2,36 +2,96 @@
[![PkgGoDev](https://pkg.go.dev/badge/golang.org/x/tools)](https://pkg.go.dev/golang.org/x/tools)
-This subrepository holds the source for various packages and tools that support
-the Go programming language.
+This repository provides the `golang.org/x/tools` module, comprising
+various tools and packages mostly for static analysis of Go programs,
+some of which are listed below.
+Use the "Go reference" link above for more information about any package.
-Some of the tools, `godoc` and `vet` for example, are included in binary Go
-distributions.
+It also contains the
+[`golang.org/x/tools/gopls`](https://pkg.go.dev/golang.org/x/tools/gopls)
+module, whose root package is a language-server protocol (LSP) server for Go.
+An LSP server analyses the source code of a project and
+responds to requests from a wide range of editors such as VSCode and
+Vim, allowing them to support IDE-like functionality.
-Others, including the Go `guru` and the test coverage tool, can be fetched with
-`go install`.
+<!-- List only packages of general interest below. -->
-Packages include a type-checker for Go and an implementation of the
-Static Single Assignment form (SSA) representation for Go programs.
+Selected commands:
-## Download/Install
+- `cmd/goimports` formats a Go program like `go fmt` and additionally
+ inserts import statements for any packages required by the file
+ after it is edited.
+- `cmd/callgraph` prints the call graph of a Go program.
+- `cmd/digraph` is a utility for manipulating directed graphs in textual notation.
+- `cmd/stringer` generates declarations (including a `String` method) for "enum" types.
+- `cmd/toolstash` is a utility to simplify working with multiple versions of the Go toolchain.
-The easiest way to install is to run `go install golang.org/x/tools/...@latest`.
+These commands may be fetched with a command such as
+```
+go install golang.org/x/tools/cmd/goimports@latest
+```
-## JS/CSS Formatting
+Selected packages:
-This repository uses [prettier](https://prettier.io/) to format JS and CSS files.
+- `go/ssa` provides a static single-assignment form (SSA) intermediate
+ representation (IR) for Go programs, similar to a typical compiler,
+ for use by analysis tools.
-The version of `prettier` used is 1.18.2.
+- `go/packages` provides a simple interface for loading, parsing, and
+ type checking a complete Go program from source code.
-It is encouraged that all JS and CSS code be run through this before submitting
-a change. However, it is not a strict requirement enforced by CI.
+- `go/analysis` provides a framework for modular static analysis of Go
+ programs.
+
+- `go/callgraph` provides call graphs of Go programs using a variety
+ of algorithms with different trade-offs.
+
+- `go/ast/inspector` provides an optimized means of traversing a Go
+ parse tree for use in analysis tools.
+
+- `go/cfg` provides a simple control-flow graph (CFG) for a Go function.
+
+- `go/expect` reads Go source files used as test inputs and interprets
+ special comments within them as queries or assertions for testing.
+
+- `go/gcexportdata` and `go/gccgoexportdata` read and write the binary
+ files containing type information used by the standard and `gccgo` compilers.
+
+- `go/types/objectpath` provides a stable naming scheme for named
+ entities ("objects") in the `go/types` API.
+
+Numerous other packages provide more esoteric functionality.
-## Report Issues / Send Patches
+<!-- Some that didn't make the cut:
-This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
+golang.org/x/tools/benchmark/parse
+golang.org/x/tools/go/ast/astutil
+golang.org/x/tools/go/types/typeutil
+golang.org/x/tools/go/vcs
+golang.org/x/tools/godoc
+golang.org/x/tools/playground
+golang.org/x/tools/present
+golang.org/x/tools/refactor/importgraph
+golang.org/x/tools/refactor/rename
+golang.org/x/tools/refactor/satisfy
+golang.org/x/tools/txtar
+
+-->
+
+## Contributing
+
+This repository uses Gerrit for code changes.
+To learn how to submit changes, see https://golang.org/doc/contribute.html.
The main issue tracker for the tools repository is located at
https://github.com/golang/go/issues. Prefix your issue with "x/tools/(your
subdir):" in the subject line, so it is easy to find.
+
+### JavaScript and CSS Formatting
+
+This repository uses [prettier](https://prettier.io/) to format JS and CSS files.
+
+The version of `prettier` used is 1.18.2.
+
+It is encouraged that all JS and CSS code be run through this before submitting
+a change. However, it is not a strict requirement enforced by CI.
diff --git a/cmd/auth/cookieauth/cookieauth.go b/cmd/auth/cookieauth/cookieauth.go
index 37e8d6e18..8b0ff1766 100644
--- a/cmd/auth/cookieauth/cookieauth.go
+++ b/cmd/auth/cookieauth/cookieauth.go
@@ -7,7 +7,8 @@
// It expects the location of the file as the first command-line argument.
//
// Example GOAUTH usage:
-// export GOAUTH="cookieauth $(git config --get http.cookieFile)"
+//
+// export GOAUTH="cookieauth $(git config --get http.cookieFile)"
//
// See http://www.cookiecentral.com/faq/#3.5 for a description of the Netscape
// cookie file format.
@@ -39,7 +40,6 @@ func main() {
f, err := os.Open(os.Args[1])
if err != nil {
log.Fatalf("failed to read cookie file: %v\n", os.Args[1])
- os.Exit(1)
}
defer f.Close()
diff --git a/cmd/auth/gitauth/gitauth.go b/cmd/auth/gitauth/gitauth.go
index 7bfca6efb..6128889f0 100644
--- a/cmd/auth/gitauth/gitauth.go
+++ b/cmd/auth/gitauth/gitauth.go
@@ -7,7 +7,8 @@
// directory for the 'git' command as the first command-line argument.
//
// Example GOAUTH usage:
-// export GOAUTH="gitauth $HOME"
+//
+// export GOAUTH="gitauth $HOME"
//
// See https://git-scm.com/docs/gitcredentials or run 'man gitcredentials' for
// information on how to configure 'git credential'.
diff --git a/cmd/auth/netrcauth/netrcauth.go b/cmd/auth/netrcauth/netrcauth.go
index 1855cfa24..7d29c9603 100644
--- a/cmd/auth/netrcauth/netrcauth.go
+++ b/cmd/auth/netrcauth/netrcauth.go
@@ -7,7 +7,8 @@
// It expects the location of the file as the first command-line argument.
//
// Example GOAUTH usage:
-// export GOAUTH="netrcauth $HOME/.netrc"
+//
+// export GOAUTH="netrcauth $HOME/.netrc"
//
// See https://www.gnu.org/software/inetutils/manual/html_node/The-_002enetrc-file.html
// or run 'man 5 netrc' for a description of the .netrc file format.
diff --git a/cmd/benchcmp/compare.go b/cmd/benchcmp/compare.go
index c3f5e89c7..083aa4ddb 100644
--- a/cmd/benchcmp/compare.go
+++ b/cmd/benchcmp/compare.go
@@ -109,8 +109,8 @@ func (x ByParseOrder) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x ByParseOrder) Less(i, j int) bool { return x[i].Before.Ord < x[j].Before.Ord }
// lessByDelta provides lexicographic ordering:
-// * largest delta by magnitude
-// * alphabetic by name
+// - largest delta by magnitude
+// - alphabetic by name
func lessByDelta(i, j BenchCmp, calcDelta func(BenchCmp) Delta) bool {
iDelta, jDelta := calcDelta(i).mag(), calcDelta(j).mag()
if iDelta != jDelta {
diff --git a/cmd/benchcmp/doc.go b/cmd/benchcmp/doc.go
index cfe9801d8..97e8d8ace 100644
--- a/cmd/benchcmp/doc.go
+++ b/cmd/benchcmp/doc.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
/*
-
Deprecated: benchcmp is deprecated in favor of benchstat: golang.org/x/perf/cmd/benchstat
The benchcmp command displays performance changes between benchmarks.
@@ -34,6 +33,5 @@ in a format like this:
benchmark old bytes new bytes delta
BenchmarkConcat 80 48 -40.00%
-
*/
package main // import "golang.org/x/tools/cmd/benchcmp"
diff --git a/cmd/bundle/main.go b/cmd/bundle/main.go
index fd8b0e5a9..194797bd8 100644
--- a/cmd/bundle/main.go
+++ b/cmd/bundle/main.go
@@ -21,8 +21,8 @@
//
// By default, bundle writes the bundled code to standard output.
// If the -o argument is given, bundle writes to the named file
-// and also includes a ``//go:generate'' comment giving the exact
-// command line used, for regenerating the file with ``go generate.''
+// and also includes a “//go:generate” comment giving the exact
+// command line used, for regenerating the file with “go generate.”
//
// Bundle customizes its output for inclusion in a particular package, the destination package.
// By default bundle assumes the destination is the package in the current directory,
@@ -47,7 +47,7 @@
// process. The -import option, which may be repeated, specifies that
// an import of "old" should be rewritten to import "new" instead.
//
-// Example
+// # Example
//
// Bundle archive/zip for inclusion in cmd/dist:
//
@@ -68,7 +68,6 @@
// Update all bundles in the standard library:
//
// go generate -run bundle std
-//
package main
import (
@@ -85,6 +84,7 @@ import (
"os"
"strconv"
"strings"
+ "unicode"
"golang.org/x/tools/go/packages"
)
@@ -234,7 +234,7 @@ func bundle(src, dst, dstpkg, prefix, buildTags string) ([]byte, error) {
fmt.Fprintf(&out, "// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.\n")
if *outputFile != "" && buildTags == "" {
- fmt.Fprintf(&out, "//go:generate bundle %s\n", strings.Join(os.Args[1:], " "))
+ fmt.Fprintf(&out, "//go:generate bundle %s\n", strings.Join(quoteArgs(os.Args[1:]), " "))
} else {
fmt.Fprintf(&out, "// $ bundle %s\n", strings.Join(os.Args[1:], " "))
}
@@ -448,6 +448,35 @@ func printSameLineComment(out *bytes.Buffer, comments []*ast.CommentGroup, fset
return pos
}
+func quoteArgs(ss []string) []string {
+ // From go help generate:
+ //
+ // > The arguments to the directive are space-separated tokens or
+ // > double-quoted strings passed to the generator as individual
+ // > arguments when it is run.
+ //
+ // > Quoted strings use Go syntax and are evaluated before execution; a
+ // > quoted string appears as a single argument to the generator.
+ //
+ var qs []string
+ for _, s := range ss {
+ if s == "" || containsSpace(s) {
+ s = strconv.Quote(s)
+ }
+ qs = append(qs, s)
+ }
+ return qs
+}
+
+func containsSpace(s string) bool {
+ for _, r := range s {
+ if unicode.IsSpace(r) {
+ return true
+ }
+ }
+ return false
+}
+
type flagFunc func(string)
func (f flagFunc) Set(s string) error {
diff --git a/cmd/callgraph/main.go b/cmd/callgraph/main.go
index f83be0ea5..eb8c0d116 100644
--- a/cmd/callgraph/main.go
+++ b/cmd/callgraph/main.go
@@ -37,6 +37,7 @@ import (
"golang.org/x/tools/go/callgraph/cha"
"golang.org/x/tools/go/callgraph/rta"
"golang.org/x/tools/go/callgraph/static"
+ "golang.org/x/tools/go/callgraph/vta"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
@@ -46,7 +47,7 @@ import (
// flags
var (
algoFlag = flag.String("algo", "rta",
- `Call graph construction algorithm (static, cha, rta, pta)`)
+ `Call graph construction algorithm (static, cha, rta, vta, pta)`)
testFlag = flag.Bool("test", false,
"Loads test code (*_test.go) for imported packages")
@@ -67,7 +68,7 @@ const Usage = `callgraph: display the call graph of a Go program.
Usage:
- callgraph [-algo=static|cha|rta|pta] [-test] [-format=...] package...
+ callgraph [-algo=static|cha|rta|vta|pta] [-test] [-format=...] package...
Flags:
@@ -76,6 +77,7 @@ Flags:
static static calls only (unsound)
cha Class Hierarchy Analysis
rta Rapid Type Analysis
+ vta Variable Type Analysis
pta inclusion-based Points-To Analysis
The algorithms are ordered by increasing precision in their
@@ -187,7 +189,8 @@ func doCallgraph(dir, gopath, algo, format string, tests bool, args []string) er
}
// Create and build SSA-form program representation.
- prog, pkgs := ssautil.AllPackages(initial, 0)
+ mode := ssa.InstantiateGenerics // instantiate generics by default for soundness
+ prog, pkgs := ssautil.AllPackages(initial, mode)
prog.Build()
// -- call graph construction ------------------------------------------
@@ -250,6 +253,9 @@ func doCallgraph(dir, gopath, algo, format string, tests bool, args []string) er
// NB: RTA gives us Reachable and RuntimeTypes too.
+ case "vta":
+ cg = vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
+
default:
return fmt.Errorf("unknown algorithm: %s", algo)
}
diff --git a/cmd/callgraph/main_test.go b/cmd/callgraph/main_test.go
index 7e838f774..c8bee87e2 100644
--- a/cmd/callgraph/main_test.go
+++ b/cmd/callgraph/main_test.go
@@ -15,6 +15,7 @@ import (
"log"
"os"
"path/filepath"
+ "runtime"
"strings"
"testing"
@@ -34,8 +35,8 @@ func init() {
}
func TestCallgraph(t *testing.T) {
- if b := os.Getenv("GO_BUILDER_NAME"); b == "windows-arm64-10" {
- t.Skipf("skipping due to suspected file corruption bug on %s builder (https://go.dev/issue/50706)", b)
+ if runtime.GOOS == "windows" && runtime.GOARCH == "arm64" {
+ t.Skipf("skipping due to suspected file corruption bug on windows/arm64 (https://go.dev/issue/50706)")
}
testenv.NeedsTool(t, "go")
@@ -58,6 +59,12 @@ func TestCallgraph(t *testing.T) {
`pkg.main2 --> (pkg.C).f`,
`pkg.main2 --> (pkg.D).f`,
}},
+ {"vta", false, []string{
+ // vta distinguishes main->C, main2->D.
+ "pkg.main --> (pkg.C).f",
+ "pkg.main --> pkg.main2",
+ "pkg.main2 --> (pkg.D).f",
+ }},
{"pta", false, []string{
// pta distinguishes main->C, main2->D. Also has a root node.
`<root> --> pkg.init`,
@@ -74,6 +81,12 @@ func TestCallgraph(t *testing.T) {
`pkg.Example --> (pkg.C).f`,
`pkg.main --> (pkg.C).f`,
}},
+ {"vta", true, []string{
+ `pkg.test.main --> testing.MainStart`,
+ `testing.runExample --> pkg.Example`,
+ `pkg.Example --> (pkg.C).f`,
+ `pkg.main --> (pkg.C).f`,
+ }},
{"pta", true, []string{
`<root> --> pkg.test.main`,
`<root> --> pkg.main`,
@@ -94,13 +107,15 @@ func TestCallgraph(t *testing.T) {
for _, line := range strings.Split(fmt.Sprint(stdout), "\n") {
edges[line] = true
}
+ ok := true
for _, edge := range test.want {
if !edges[edge] {
+ ok = false
t.Errorf("callgraph(%q, %t): missing edge: %s",
test.algo, test.tests, edge)
}
}
- if t.Failed() {
+ if !ok {
t.Log("got:\n", stdout)
}
}
diff --git a/cmd/compilebench/main.go b/cmd/compilebench/main.go
index d7da6d51b..754acdca0 100644
--- a/cmd/compilebench/main.go
+++ b/cmd/compilebench/main.go
@@ -60,21 +60,20 @@
// today they write only the profile for the last benchmark executed.
//
// The default memory profiling rate is one profile sample per 512 kB
-// allocated (see ``go doc runtime.MemProfileRate'').
+// allocated (see “go doc runtime.MemProfileRate”).
// Lowering the rate (for example, -memprofilerate 64000) produces
// a more fine-grained and therefore accurate profile, but it also incurs
// execution cost. For benchmark comparisons, never use timings
// obtained with a low -memprofilerate option.
//
-// Example
+// # Example
//
// Assuming the base version of the compiler has been saved with
-// ``toolstash save,'' this sequence compares the old and new compiler:
+// “toolstash save,” this sequence compares the old and new compiler:
//
// compilebench -count 10 -compile $(toolstash -n compile) >old.txt
// compilebench -count 10 >new.txt
// benchstat old.txt new.txt
-//
package main
import (
@@ -82,23 +81,26 @@ import (
"encoding/json"
"flag"
"fmt"
- exec "golang.org/x/sys/execabs"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
+ "runtime"
"strconv"
"strings"
"time"
+
+ exec "golang.org/x/sys/execabs"
)
var (
- goroot string
- compiler string
- linker string
- runRE *regexp.Regexp
- is6g bool
+ goroot string
+ compiler string
+ assembler string
+ linker string
+ runRE *regexp.Regexp
+ is6g bool
)
var (
@@ -106,6 +108,7 @@ var (
flagAlloc = flag.Bool("alloc", false, "report allocations")
flagObj = flag.Bool("obj", false, "report object file stats")
flagCompiler = flag.String("compile", "", "use `exe` as the cmd/compile binary")
+ flagAssembler = flag.String("asm", "", "use `exe` as the cmd/asm binary")
flagCompilerFlags = flag.String("compileflags", "", "additional `flags` to pass to compile")
flagLinker = flag.String("link", "", "use `exe` as the cmd/link binary")
flagLinkerFlags = flag.String("linkflags", "", "additional `flags` to pass to link")
@@ -116,6 +119,7 @@ var (
flagMemprofilerate = flag.Int64("memprofilerate", -1, "set memory profile `rate`")
flagPackage = flag.String("pkg", "", "if set, benchmark the package at path `pkg`")
flagShort = flag.Bool("short", false, "skip long-running benchmarks")
+ flagTrace = flag.Bool("trace", false, "debug tracing of builds")
)
type test struct {
@@ -178,6 +182,10 @@ func main() {
is6g = true
}
}
+ assembler = *flagAssembler
+ if assembler == "" {
+ _, assembler = toolPath("asm")
+ }
linker = *flagLinker
if linker == "" && !is6g { // TODO: Support 6l
@@ -238,8 +246,10 @@ func toolPath(names ...string) (found, path string) {
}
type Pkg struct {
- Dir string
- GoFiles []string
+ ImportPath string
+ Dir string
+ GoFiles []string
+ SFiles []string
}
func goList(dir string) (*Pkg, error) {
@@ -325,10 +335,10 @@ type compile struct{ dir string }
func (compile) long() bool { return false }
func (c compile) run(name string, count int) error {
- // Make sure dependencies needed by go tool compile are installed to GOROOT/pkg.
- out, err := exec.Command(*flagGoCmd, "build", "-i", c.dir).CombinedOutput()
+ // Make sure dependencies needed by go tool compile are built.
+ out, err := exec.Command(*flagGoCmd, "build", c.dir).CombinedOutput()
if err != nil {
- return fmt.Errorf("go build -i %s: %v\n%s", c.dir, err, out)
+ return fmt.Errorf("go build %s: %v\n%s", c.dir, err, out)
}
// Find dir and source file list.
@@ -337,8 +347,39 @@ func (c compile) run(name string, count int) error {
return err
}
- args := []string{"-o", "_compilebench_.o"}
+ importcfg, err := genImportcfgFile(c.dir, false)
+ if err != nil {
+ return err
+ }
+
+ // If this package has assembly files, we'll need to pass a symabis
+ // file to the compiler; call a helper to invoke the assembler
+ // to do that.
+ var symAbisFile string
+ var asmIncFile string
+ if len(pkg.SFiles) != 0 {
+ symAbisFile = filepath.Join(pkg.Dir, "symabis")
+ asmIncFile = filepath.Join(pkg.Dir, "go_asm.h")
+ content := "\n"
+ if err := os.WriteFile(asmIncFile, []byte(content), 0666); err != nil {
+ return fmt.Errorf("os.WriteFile(%s) failed: %v", asmIncFile, err)
+ }
+ defer os.Remove(symAbisFile)
+ defer os.Remove(asmIncFile)
+ if err := genSymAbisFile(pkg, symAbisFile, pkg.Dir); err != nil {
+ return err
+ }
+ }
+
+ args := []string{"-o", "_compilebench_.o", "-p", pkg.ImportPath}
args = append(args, strings.Fields(*flagCompilerFlags)...)
+ if symAbisFile != "" {
+ args = append(args, "-symabis", symAbisFile)
+ }
+ if importcfg != "" {
+ args = append(args, "-importcfg", importcfg)
+ defer os.Remove(importcfg)
+ }
args = append(args, pkg.GoFiles...)
if err := runBuildCmd(name, count, pkg.Dir, compiler, args); err != nil {
return err
@@ -374,18 +415,28 @@ func (r link) run(name string, count int) error {
}
// Build dependencies.
- out, err := exec.Command(*flagGoCmd, "build", "-i", "-o", "/dev/null", r.dir).CombinedOutput()
+ out, err := exec.Command(*flagGoCmd, "build", "-o", "/dev/null", r.dir).CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("go build -a %s: %v\n%s", r.dir, err, out)
+ }
+
+ importcfg, err := genImportcfgFile(r.dir, true)
if err != nil {
- return fmt.Errorf("go build -i %s: %v\n%s", r.dir, err, out)
+ return err
}
+ defer os.Remove(importcfg)
// Build the main package.
pkg, err := goList(r.dir)
if err != nil {
return err
}
- args := []string{"-o", "_compilebench_.o"}
+ args := []string{"-o", "_compilebench_.o", "-importcfg", importcfg}
args = append(args, pkg.GoFiles...)
+ if *flagTrace {
+ fmt.Fprintf(os.Stderr, "running: %s %+v\n",
+ compiler, args)
+ }
cmd := exec.Command(compiler, args...)
cmd.Dir = pkg.Dir
cmd.Stdout = os.Stderr
@@ -397,7 +448,7 @@ func (r link) run(name string, count int) error {
defer os.Remove(pkg.Dir + "/_compilebench_.o")
// Link the main package.
- args = []string{"-o", "_compilebench_.exe"}
+ args = []string{"-o", "_compilebench_.exe", "-importcfg", importcfg}
args = append(args, strings.Fields(*flagLinkerFlags)...)
args = append(args, strings.Fields(r.flags)...)
args = append(args, "_compilebench_.o")
@@ -429,6 +480,10 @@ func runBuildCmd(name string, count int, dir, tool string, args []string) error
preArgs = append(preArgs, "-cpuprofile", "_compilebench_.cpuprof")
}
}
+ if *flagTrace {
+ fmt.Fprintf(os.Stderr, "running: %s %+v\n",
+ tool, append(preArgs, args...))
+ }
cmd := exec.Command(tool, append(preArgs, args...)...)
cmd.Dir = dir
cmd.Stdout = os.Stderr
@@ -511,3 +566,80 @@ func runBuildCmd(name string, count int, dir, tool string, args []string) error
return nil
}
+
+// genSymAbisFile runs the assembler on the target packge asm files
+// with "-gensymabis" to produce a symabis file that will feed into
+// the Go source compilation. This is fairly hacky in that if the
+// asm invocation convenion changes it will need to be updated
+// (hopefully that will not be needed too frequently).
+func genSymAbisFile(pkg *Pkg, symAbisFile, incdir string) error {
+ args := []string{"-gensymabis", "-o", symAbisFile,
+ "-p", pkg.ImportPath,
+ "-I", filepath.Join(goroot, "pkg", "include"),
+ "-I", incdir,
+ "-D", "GOOS_" + runtime.GOOS,
+ "-D", "GOARCH_" + runtime.GOARCH}
+ if pkg.ImportPath == "reflect" {
+ args = append(args, "-compiling-runtime")
+ }
+ args = append(args, pkg.SFiles...)
+ if *flagTrace {
+ fmt.Fprintf(os.Stderr, "running: %s %+v\n",
+ assembler, args)
+ }
+ cmd := exec.Command(assembler, args...)
+ cmd.Dir = pkg.Dir
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ if err != nil {
+ return fmt.Errorf("assembling to produce symabis file: %v", err)
+ }
+ return nil
+}
+
+// genImportcfgFile generates an importcfg file for building package
+// dir. Returns the generated importcfg file path (or empty string
+// if the package has no dependency).
+func genImportcfgFile(dir string, full bool) (string, error) {
+ need := "{{.Imports}}"
+ if full {
+ // for linking, we need transitive dependencies
+ need = "{{.Deps}}"
+ }
+
+ // find imported/dependent packages
+ cmd := exec.Command(*flagGoCmd, "list", "-f", need, dir)
+ cmd.Stderr = os.Stderr
+ out, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("go list -f %s %s: %v", need, dir, err)
+ }
+ // trim [ ]\n
+ if len(out) < 3 || out[0] != '[' || out[len(out)-2] != ']' || out[len(out)-1] != '\n' {
+ return "", fmt.Errorf("unexpected output from go list -f %s %s: %s", need, dir, out)
+ }
+ out = out[1 : len(out)-2]
+ if len(out) == 0 {
+ return "", nil
+ }
+
+ // build importcfg for imported packages
+ cmd = exec.Command(*flagGoCmd, "list", "-export", "-f", "{{if .Export}}packagefile {{.ImportPath}}={{.Export}}{{end}}")
+ cmd.Args = append(cmd.Args, strings.Fields(string(out))...)
+ cmd.Stderr = os.Stderr
+ out, err = cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("generating importcfg for %s: %s: %v", dir, cmd, err)
+ }
+
+ f, err := os.CreateTemp("", "importcfg")
+ if err != nil {
+ return "", fmt.Errorf("creating tmp importcfg file failed: %v", err)
+ }
+ defer f.Close()
+ if _, err := f.Write(out); err != nil {
+ return "", fmt.Errorf("writing importcfg file %s failed: %v", f.Name(), err)
+ }
+ return f.Name(), nil
+}
diff --git a/cmd/cover/README.md b/cmd/cover/README.md
deleted file mode 100644
index 62e60279a..000000000
--- a/cmd/cover/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Deprecated
-
-NOTE: For Go releases 1.5 and later, this tool lives in the standard repository. The code here is not maintained.
diff --git a/cmd/cover/cover.go b/cmd/cover/cover.go
deleted file mode 100644
index 42a7e37aa..000000000
--- a/cmd/cover/cover.go
+++ /dev/null
@@ -1,722 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/ast"
- "go/parser"
- "go/printer"
- "go/token"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
-)
-
-const usageMessage = "" +
- `Usage of 'go tool cover':
-Given a coverage profile produced by 'go test':
- go test -coverprofile=c.out
-
-Open a web browser displaying annotated source code:
- go tool cover -html=c.out
-
-Write out an HTML file instead of launching a web browser:
- go tool cover -html=c.out -o coverage.html
-
-Display coverage percentages to stdout for each function:
- go tool cover -func=c.out
-
-Finally, to generate modified source code with coverage annotations
-(what go test -cover does):
- go tool cover -mode=set -var=CoverageVariableName program.go
-`
-
-func usage() {
- fmt.Fprint(os.Stderr, usageMessage)
- fmt.Fprintln(os.Stderr, "\nFlags:")
- flag.PrintDefaults()
- fmt.Fprintln(os.Stderr, "\n Only one of -html, -func, or -mode may be set.")
- os.Exit(2)
-}
-
-var (
- mode = flag.String("mode", "", "coverage mode: set, count, atomic")
- varVar = flag.String("var", "GoCover", "name of coverage variable to generate")
- output = flag.String("o", "", "file for output; default: stdout")
- htmlOut = flag.String("html", "", "generate HTML representation of coverage profile")
- funcOut = flag.String("func", "", "output coverage profile information for each function")
-)
-
-var profile string // The profile to read; the value of -html or -func
-
-var counterStmt func(*File, ast.Expr) ast.Stmt
-
-const (
- atomicPackagePath = "sync/atomic"
- atomicPackageName = "_cover_atomic_"
-)
-
-func main() {
- flag.Usage = usage
- flag.Parse()
-
- // Usage information when no arguments.
- if flag.NFlag() == 0 && flag.NArg() == 0 {
- flag.Usage()
- }
-
- err := parseFlags()
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- fmt.Fprintln(os.Stderr, `For usage information, run "go tool cover -help"`)
- os.Exit(2)
- }
-
- // Generate coverage-annotated source.
- if *mode != "" {
- annotate(flag.Arg(0))
- return
- }
-
- // Output HTML or function coverage information.
- if *htmlOut != "" {
- err = htmlOutput(profile, *output)
- } else {
- err = funcOutput(profile, *output)
- }
-
- if err != nil {
- fmt.Fprintf(os.Stderr, "cover: %v\n", err)
- os.Exit(2)
- }
-}
-
-// parseFlags sets the profile and counterStmt globals and performs validations.
-func parseFlags() error {
- profile = *htmlOut
- if *funcOut != "" {
- if profile != "" {
- return fmt.Errorf("too many options")
- }
- profile = *funcOut
- }
-
- // Must either display a profile or rewrite Go source.
- if (profile == "") == (*mode == "") {
- return fmt.Errorf("too many options")
- }
-
- if *mode != "" {
- switch *mode {
- case "set":
- counterStmt = setCounterStmt
- case "count":
- counterStmt = incCounterStmt
- case "atomic":
- counterStmt = atomicCounterStmt
- default:
- return fmt.Errorf("unknown -mode %v", *mode)
- }
-
- if flag.NArg() == 0 {
- return fmt.Errorf("missing source file")
- } else if flag.NArg() == 1 {
- return nil
- }
- } else if flag.NArg() == 0 {
- return nil
- }
- return fmt.Errorf("too many arguments")
-}
-
-// Block represents the information about a basic block to be recorded in the analysis.
-// Note: Our definition of basic block is based on control structures; we don't break
-// apart && and ||. We could but it doesn't seem important enough to bother.
-type Block struct {
- startByte token.Pos
- endByte token.Pos
- numStmt int
-}
-
-// File is a wrapper for the state of a file used in the parser.
-// The basic parse tree walker is a method of this type.
-type File struct {
- fset *token.FileSet
- name string // Name of file.
- astFile *ast.File
- blocks []Block
- atomicPkg string // Package name for "sync/atomic" in this file.
-}
-
-// Visit implements the ast.Visitor interface.
-func (f *File) Visit(node ast.Node) ast.Visitor {
- switch n := node.(type) {
- case *ast.BlockStmt:
- // If it's a switch or select, the body is a list of case clauses; don't tag the block itself.
- if len(n.List) > 0 {
- switch n.List[0].(type) {
- case *ast.CaseClause: // switch
- for _, n := range n.List {
- clause := n.(*ast.CaseClause)
- clause.Body = f.addCounters(clause.Pos(), clause.End(), clause.Body, false)
- }
- return f
- case *ast.CommClause: // select
- for _, n := range n.List {
- clause := n.(*ast.CommClause)
- clause.Body = f.addCounters(clause.Pos(), clause.End(), clause.Body, false)
- }
- return f
- }
- }
- n.List = f.addCounters(n.Lbrace, n.Rbrace+1, n.List, true) // +1 to step past closing brace.
- case *ast.IfStmt:
- ast.Walk(f, n.Body)
- if n.Else == nil {
- return nil
- }
- // The elses are special, because if we have
- // if x {
- // } else if y {
- // }
- // we want to cover the "if y". To do this, we need a place to drop the counter,
- // so we add a hidden block:
- // if x {
- // } else {
- // if y {
- // }
- // }
- switch stmt := n.Else.(type) {
- case *ast.IfStmt:
- block := &ast.BlockStmt{
- Lbrace: n.Body.End(), // Start at end of the "if" block so the covered part looks like it starts at the "else".
- List: []ast.Stmt{stmt},
- Rbrace: stmt.End(),
- }
- n.Else = block
- case *ast.BlockStmt:
- stmt.Lbrace = n.Body.End() // Start at end of the "if" block so the covered part looks like it starts at the "else".
- default:
- panic("unexpected node type in if")
- }
- ast.Walk(f, n.Else)
- return nil
- case *ast.SelectStmt:
- // Don't annotate an empty select - creates a syntax error.
- if n.Body == nil || len(n.Body.List) == 0 {
- return nil
- }
- case *ast.SwitchStmt:
- // Don't annotate an empty switch - creates a syntax error.
- if n.Body == nil || len(n.Body.List) == 0 {
- return nil
- }
- case *ast.TypeSwitchStmt:
- // Don't annotate an empty type switch - creates a syntax error.
- if n.Body == nil || len(n.Body.List) == 0 {
- return nil
- }
- }
- return f
-}
-
-// unquote returns the unquoted string.
-func unquote(s string) string {
- t, err := strconv.Unquote(s)
- if err != nil {
- log.Fatalf("cover: improperly quoted string %q\n", s)
- }
- return t
-}
-
-// addImport adds an import for the specified path, if one does not already exist, and returns
-// the local package name.
-func (f *File) addImport(path string) string {
- // Does the package already import it?
- for _, s := range f.astFile.Imports {
- if unquote(s.Path.Value) == path {
- if s.Name != nil {
- return s.Name.Name
- }
- return filepath.Base(path)
- }
- }
- newImport := &ast.ImportSpec{
- Name: ast.NewIdent(atomicPackageName),
- Path: &ast.BasicLit{
- Kind: token.STRING,
- Value: fmt.Sprintf("%q", path),
- },
- }
- impDecl := &ast.GenDecl{
- Tok: token.IMPORT,
- Specs: []ast.Spec{
- newImport,
- },
- }
- // Make the new import the first Decl in the file.
- astFile := f.astFile
- astFile.Decls = append(astFile.Decls, nil)
- copy(astFile.Decls[1:], astFile.Decls[0:])
- astFile.Decls[0] = impDecl
- astFile.Imports = append(astFile.Imports, newImport)
-
- // Now refer to the package, just in case it ends up unused.
- // That is, append to the end of the file the declaration
- // var _ = _cover_atomic_.AddUint32
- reference := &ast.GenDecl{
- Tok: token.VAR,
- Specs: []ast.Spec{
- &ast.ValueSpec{
- Names: []*ast.Ident{
- ast.NewIdent("_"),
- },
- Values: []ast.Expr{
- &ast.SelectorExpr{
- X: ast.NewIdent(atomicPackageName),
- Sel: ast.NewIdent("AddUint32"),
- },
- },
- },
- },
- }
- astFile.Decls = append(astFile.Decls, reference)
- return atomicPackageName
-}
-
-var slashslash = []byte("//")
-
-// initialComments returns the prefix of content containing only
-// whitespace and line comments. Any +build directives must appear
-// within this region. This approach is more reliable than using
-// go/printer to print a modified AST containing comments.
-//
-func initialComments(content []byte) []byte {
- // Derived from go/build.Context.shouldBuild.
- end := 0
- p := content
- for len(p) > 0 {
- line := p
- if i := bytes.IndexByte(line, '\n'); i >= 0 {
- line, p = line[:i], p[i+1:]
- } else {
- p = p[len(p):]
- }
- line = bytes.TrimSpace(line)
- if len(line) == 0 { // Blank line.
- end = len(content) - len(p)
- continue
- }
- if !bytes.HasPrefix(line, slashslash) { // Not comment line.
- break
- }
- }
- return content[:end]
-}
-
-func annotate(name string) {
- fset := token.NewFileSet()
- content, err := ioutil.ReadFile(name)
- if err != nil {
- log.Fatalf("cover: %s: %s", name, err)
- }
- parsedFile, err := parser.ParseFile(fset, name, content, parser.ParseComments)
- if err != nil {
- log.Fatalf("cover: %s: %s", name, err)
- }
- parsedFile.Comments = trimComments(parsedFile, fset)
-
- file := &File{
- fset: fset,
- name: name,
- astFile: parsedFile,
- }
- if *mode == "atomic" {
- file.atomicPkg = file.addImport(atomicPackagePath)
- }
- ast.Walk(file, file.astFile)
- fd := os.Stdout
- if *output != "" {
- var err error
- fd, err = os.Create(*output)
- if err != nil {
- log.Fatalf("cover: %s", err)
- }
- }
- fd.Write(initialComments(content)) // Retain '// +build' directives.
- file.print(fd)
- // After printing the source tree, add some declarations for the counters etc.
- // We could do this by adding to the tree, but it's easier just to print the text.
- file.addVariables(fd)
-}
-
-// trimComments drops all but the //go: comments, some of which are semantically important.
-// We drop all others because they can appear in places that cause our counters
-// to appear in syntactically incorrect places. //go: appears at the beginning of
-// the line and is syntactically safe.
-func trimComments(file *ast.File, fset *token.FileSet) []*ast.CommentGroup {
- var comments []*ast.CommentGroup
- for _, group := range file.Comments {
- var list []*ast.Comment
- for _, comment := range group.List {
- if strings.HasPrefix(comment.Text, "//go:") && fset.Position(comment.Slash).Column == 1 {
- list = append(list, comment)
- }
- }
- if list != nil {
- comments = append(comments, &ast.CommentGroup{List: list})
- }
- }
- return comments
-}
-
-func (f *File) print(w io.Writer) {
- printer.Fprint(w, f.fset, f.astFile)
-}
-
-// intLiteral returns an ast.BasicLit representing the integer value.
-func (f *File) intLiteral(i int) *ast.BasicLit {
- node := &ast.BasicLit{
- Kind: token.INT,
- Value: fmt.Sprint(i),
- }
- return node
-}
-
-// index returns an ast.BasicLit representing the number of counters present.
-func (f *File) index() *ast.BasicLit {
- return f.intLiteral(len(f.blocks))
-}
-
-// setCounterStmt returns the expression: __count[23] = 1.
-func setCounterStmt(f *File, counter ast.Expr) ast.Stmt {
- return &ast.AssignStmt{
- Lhs: []ast.Expr{counter},
- Tok: token.ASSIGN,
- Rhs: []ast.Expr{f.intLiteral(1)},
- }
-}
-
-// incCounterStmt returns the expression: __count[23]++.
-func incCounterStmt(f *File, counter ast.Expr) ast.Stmt {
- return &ast.IncDecStmt{
- X: counter,
- Tok: token.INC,
- }
-}
-
-// atomicCounterStmt returns the expression: atomic.AddUint32(&__count[23], 1)
-func atomicCounterStmt(f *File, counter ast.Expr) ast.Stmt {
- return &ast.ExprStmt{
- X: &ast.CallExpr{
- Fun: &ast.SelectorExpr{
- X: ast.NewIdent(f.atomicPkg),
- Sel: ast.NewIdent("AddUint32"),
- },
- Args: []ast.Expr{&ast.UnaryExpr{
- Op: token.AND,
- X: counter,
- },
- f.intLiteral(1),
- },
- },
- }
-}
-
-// newCounter creates a new counter expression of the appropriate form.
-func (f *File) newCounter(start, end token.Pos, numStmt int) ast.Stmt {
- counter := &ast.IndexExpr{
- X: &ast.SelectorExpr{
- X: ast.NewIdent(*varVar),
- Sel: ast.NewIdent("Count"),
- },
- Index: f.index(),
- }
- stmt := counterStmt(f, counter)
- f.blocks = append(f.blocks, Block{start, end, numStmt})
- return stmt
-}
-
-// addCounters takes a list of statements and adds counters to the beginning of
-// each basic block at the top level of that list. For instance, given
-//
-// S1
-// if cond {
-// S2
-// }
-// S3
-//
-// counters will be added before S1 and before S3. The block containing S2
-// will be visited in a separate call.
-// TODO: Nested simple blocks get unnecessary (but correct) counters
-func (f *File) addCounters(pos, blockEnd token.Pos, list []ast.Stmt, extendToClosingBrace bool) []ast.Stmt {
- // Special case: make sure we add a counter to an empty block. Can't do this below
- // or we will add a counter to an empty statement list after, say, a return statement.
- if len(list) == 0 {
- return []ast.Stmt{f.newCounter(pos, blockEnd, 0)}
- }
- // We have a block (statement list), but it may have several basic blocks due to the
- // appearance of statements that affect the flow of control.
- var newList []ast.Stmt
- for {
- // Find first statement that affects flow of control (break, continue, if, etc.).
- // It will be the last statement of this basic block.
- var last int
- end := blockEnd
- for last = 0; last < len(list); last++ {
- end = f.statementBoundary(list[last])
- if f.endsBasicSourceBlock(list[last]) {
- extendToClosingBrace = false // Block is broken up now.
- last++
- break
- }
- }
- if extendToClosingBrace {
- end = blockEnd
- }
- if pos != end { // Can have no source to cover if e.g. blocks abut.
- newList = append(newList, f.newCounter(pos, end, last))
- }
- newList = append(newList, list[0:last]...)
- list = list[last:]
- if len(list) == 0 {
- break
- }
- pos = list[0].Pos()
- }
- return newList
-}
-
-// hasFuncLiteral reports the existence and position of the first func literal
-// in the node, if any. If a func literal appears, it usually marks the termination
-// of a basic block because the function body is itself a block.
-// Therefore we draw a line at the start of the body of the first function literal we find.
-// TODO: what if there's more than one? Probably doesn't matter much.
-func hasFuncLiteral(n ast.Node) (bool, token.Pos) {
- if n == nil {
- return false, 0
- }
- var literal funcLitFinder
- ast.Walk(&literal, n)
- return literal.found(), token.Pos(literal)
-}
-
-// statementBoundary finds the location in s that terminates the current basic
-// block in the source.
-func (f *File) statementBoundary(s ast.Stmt) token.Pos {
- // Control flow statements are easy.
- switch s := s.(type) {
- case *ast.BlockStmt:
- // Treat blocks like basic blocks to avoid overlapping counters.
- return s.Lbrace
- case *ast.IfStmt:
- found, pos := hasFuncLiteral(s.Init)
- if found {
- return pos
- }
- found, pos = hasFuncLiteral(s.Cond)
- if found {
- return pos
- }
- return s.Body.Lbrace
- case *ast.ForStmt:
- found, pos := hasFuncLiteral(s.Init)
- if found {
- return pos
- }
- found, pos = hasFuncLiteral(s.Cond)
- if found {
- return pos
- }
- found, pos = hasFuncLiteral(s.Post)
- if found {
- return pos
- }
- return s.Body.Lbrace
- case *ast.LabeledStmt:
- return f.statementBoundary(s.Stmt)
- case *ast.RangeStmt:
- found, pos := hasFuncLiteral(s.X)
- if found {
- return pos
- }
- return s.Body.Lbrace
- case *ast.SwitchStmt:
- found, pos := hasFuncLiteral(s.Init)
- if found {
- return pos
- }
- found, pos = hasFuncLiteral(s.Tag)
- if found {
- return pos
- }
- return s.Body.Lbrace
- case *ast.SelectStmt:
- return s.Body.Lbrace
- case *ast.TypeSwitchStmt:
- found, pos := hasFuncLiteral(s.Init)
- if found {
- return pos
- }
- return s.Body.Lbrace
- }
- // If not a control flow statement, it is a declaration, expression, call, etc. and it may have a function literal.
- // If it does, that's tricky because we want to exclude the body of the function from this block.
- // Draw a line at the start of the body of the first function literal we find.
- // TODO: what if there's more than one? Probably doesn't matter much.
- found, pos := hasFuncLiteral(s)
- if found {
- return pos
- }
- return s.End()
-}
-
-// endsBasicSourceBlock reports whether s changes the flow of control: break, if, etc.,
-// or if it's just problematic, for instance contains a function literal, which will complicate
-// accounting due to the block-within-an expression.
-func (f *File) endsBasicSourceBlock(s ast.Stmt) bool {
- switch s := s.(type) {
- case *ast.BlockStmt:
- // Treat blocks like basic blocks to avoid overlapping counters.
- return true
- case *ast.BranchStmt:
- return true
- case *ast.ForStmt:
- return true
- case *ast.IfStmt:
- return true
- case *ast.LabeledStmt:
- return f.endsBasicSourceBlock(s.Stmt)
- case *ast.RangeStmt:
- return true
- case *ast.SwitchStmt:
- return true
- case *ast.SelectStmt:
- return true
- case *ast.TypeSwitchStmt:
- return true
- case *ast.ExprStmt:
- // Calls to panic change the flow.
- // We really should verify that "panic" is the predefined function,
- // but without type checking we can't and the likelihood of it being
- // an actual problem is vanishingly small.
- if call, ok := s.X.(*ast.CallExpr); ok {
- if ident, ok := call.Fun.(*ast.Ident); ok && ident.Name == "panic" && len(call.Args) == 1 {
- return true
- }
- }
- }
- found, _ := hasFuncLiteral(s)
- return found
-}
-
-// funcLitFinder implements the ast.Visitor pattern to find the location of any
-// function literal in a subtree.
-type funcLitFinder token.Pos
-
-func (f *funcLitFinder) Visit(node ast.Node) (w ast.Visitor) {
- if f.found() {
- return nil // Prune search.
- }
- switch n := node.(type) {
- case *ast.FuncLit:
- *f = funcLitFinder(n.Body.Lbrace)
- return nil // Prune search.
- }
- return f
-}
-
-func (f *funcLitFinder) found() bool {
- return token.Pos(*f) != token.NoPos
-}
-
-// Sort interface for []block1; used for self-check in addVariables.
-
-type block1 struct {
- Block
- index int
-}
-
-type blockSlice []block1
-
-func (b blockSlice) Len() int { return len(b) }
-func (b blockSlice) Less(i, j int) bool { return b[i].startByte < b[j].startByte }
-func (b blockSlice) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
-
-// offset translates a token position into a 0-indexed byte offset.
-func (f *File) offset(pos token.Pos) int {
- return f.fset.Position(pos).Offset
-}
-
-// addVariables adds to the end of the file the declarations to set up the counter and position variables.
-func (f *File) addVariables(w io.Writer) {
- // Self-check: Verify that the instrumented basic blocks are disjoint.
- t := make([]block1, len(f.blocks))
- for i := range f.blocks {
- t[i].Block = f.blocks[i]
- t[i].index = i
- }
- sort.Sort(blockSlice(t))
- for i := 1; i < len(t); i++ {
- if t[i-1].endByte > t[i].startByte {
- fmt.Fprintf(os.Stderr, "cover: internal error: block %d overlaps block %d\n", t[i-1].index, t[i].index)
- // Note: error message is in byte positions, not token positions.
- fmt.Fprintf(os.Stderr, "\t%s:#%d,#%d %s:#%d,#%d\n",
- f.name, f.offset(t[i-1].startByte), f.offset(t[i-1].endByte),
- f.name, f.offset(t[i].startByte), f.offset(t[i].endByte))
- }
- }
-
- // Declare the coverage struct as a package-level variable.
- fmt.Fprintf(w, "\nvar %s = struct {\n", *varVar)
- fmt.Fprintf(w, "\tCount [%d]uint32\n", len(f.blocks))
- fmt.Fprintf(w, "\tPos [3 * %d]uint32\n", len(f.blocks))
- fmt.Fprintf(w, "\tNumStmt [%d]uint16\n", len(f.blocks))
- fmt.Fprintf(w, "} {\n")
-
- // Initialize the position array field.
- fmt.Fprintf(w, "\tPos: [3 * %d]uint32{\n", len(f.blocks))
-
- // A nice long list of positions. Each position is encoded as follows to reduce size:
- // - 32-bit starting line number
- // - 32-bit ending line number
- // - (16 bit ending column number << 16) | (16-bit starting column number).
- for i, block := range f.blocks {
- start := f.fset.Position(block.startByte)
- end := f.fset.Position(block.endByte)
- fmt.Fprintf(w, "\t\t%d, %d, %#x, // [%d]\n", start.Line, end.Line, (end.Column&0xFFFF)<<16|(start.Column&0xFFFF), i)
- }
-
- // Close the position array.
- fmt.Fprintf(w, "\t},\n")
-
- // Initialize the position array field.
- fmt.Fprintf(w, "\tNumStmt: [%d]uint16{\n", len(f.blocks))
-
- // A nice long list of statements-per-block, so we can give a conventional
- // valuation of "percent covered". To save space, it's a 16-bit number, so we
- // clamp it if it overflows - won't matter in practice.
- for i, block := range f.blocks {
- n := block.numStmt
- if n > 1<<16-1 {
- n = 1<<16 - 1
- }
- fmt.Fprintf(w, "\t\t%d, // %d\n", n, i)
- }
-
- // Close the statements-per-block array.
- fmt.Fprintf(w, "\t},\n")
-
- // Close the struct initialization.
- fmt.Fprintf(w, "}\n")
-}
diff --git a/cmd/cover/cover_test.go b/cmd/cover/cover_test.go
deleted file mode 100644
index 228c91144..000000000
--- a/cmd/cover/cover_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// No testdata on Android.
-
-//go:build !android
-// +build !android
-
-package main_test
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "testing"
-
- "golang.org/x/tools/internal/testenv"
-)
-
-const (
- // Data directory, also the package directory for the test.
- testdata = "testdata"
-)
-
-var debug = false // Keeps the rewritten files around if set.
-
-// Run this shell script, but do it in Go so it can be run by "go test".
-//
-// replace the word LINE with the line number < testdata/test.go > testdata/test_line.go
-// go build -o ./testcover
-// ./testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go
-// go run ./testdata/main.go ./testdata/test.go
-//
-func TestCover(t *testing.T) {
- testenv.NeedsTool(t, "go")
-
- tmpdir, err := ioutil.TempDir("", "TestCover")
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- if debug {
- fmt.Printf("test files left in %s\n", tmpdir)
- } else {
- os.RemoveAll(tmpdir)
- }
- }()
-
- testcover := filepath.Join(tmpdir, "testcover.exe")
- testMain := filepath.Join(tmpdir, "main.go")
- testTest := filepath.Join(tmpdir, "test.go")
- coverInput := filepath.Join(tmpdir, "test_line.go")
- coverOutput := filepath.Join(tmpdir, "test_cover.go")
-
- for _, f := range []string{testMain, testTest} {
- data, err := ioutil.ReadFile(filepath.Join(testdata, filepath.Base(f)))
- if err != nil {
- t.Fatal(err)
- }
- if err := ioutil.WriteFile(f, data, 0644); err != nil {
- t.Fatal(err)
- }
- }
-
- // Read in the test file (testTest) and write it, with LINEs specified, to coverInput.
- file, err := ioutil.ReadFile(testTest)
- if err != nil {
- t.Fatal(err)
- }
- lines := bytes.Split(file, []byte("\n"))
- for i, line := range lines {
- lines[i] = bytes.Replace(line, []byte("LINE"), []byte(fmt.Sprint(i+1)), -1)
- }
- err = ioutil.WriteFile(coverInput, bytes.Join(lines, []byte("\n")), 0666)
- if err != nil {
- t.Fatal(err)
- }
-
- // go build -o testcover
- cmd := exec.Command("go", "build", "-o", testcover)
- run(cmd, t)
-
- // ./testcover -mode=count -var=coverTest -o ./testdata/test_cover.go testdata/test_line.go
- cmd = exec.Command(testcover, "-mode=count", "-var=coverTest", "-o", coverOutput, coverInput)
- run(cmd, t)
-
- // defer removal of ./testdata/test_cover.go
- if !debug {
- defer os.Remove(coverOutput)
- }
-
- // go run ./testdata/main.go ./testdata/test.go
- cmd = exec.Command("go", "run", testMain, coverOutput)
- run(cmd, t)
-}
-
-func run(c *exec.Cmd, t *testing.T) {
- c.Stdout = os.Stdout
- c.Stderr = os.Stderr
- err := c.Run()
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/cmd/cover/doc.go b/cmd/cover/doc.go
deleted file mode 100644
index f903d8508..000000000
--- a/cmd/cover/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Cover is a program for analyzing the coverage profiles generated by
-'go test -coverprofile=cover.out'.
-
-Deprecated: For Go releases 1.5 and later, this tool lives in the
-standard repository. The code here is not maintained.
-
-Cover is also used by 'go test -cover' to rewrite the source code with
-annotations to track which parts of each function are executed.
-It operates on one Go source file at a time, computing approximate
-basic block information by studying the source. It is thus more portable
-than binary-rewriting coverage tools, but also a little less capable.
-For instance, it does not probe inside && and || expressions, and can
-be mildly confused by single statements with multiple function literals.
-
-For usage information, please see:
- go help testflag
- go tool cover -help
-*/
-package main // import "golang.org/x/tools/cmd/cover"
diff --git a/cmd/cover/func.go b/cmd/cover/func.go
deleted file mode 100644
index 41d9fceca..000000000
--- a/cmd/cover/func.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the visitor that computes the (line, column)-(line-column) range for each function.
-
-package main
-
-import (
- "bufio"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "os"
- "path/filepath"
- "text/tabwriter"
-
- "golang.org/x/tools/cover"
-)
-
-// funcOutput takes two file names as arguments, a coverage profile to read as input and an output
-// file to write ("" means to write to standard output). The function reads the profile and produces
-// as output the coverage data broken down by function, like this:
-//
-// fmt/format.go:30: init 100.0%
-// fmt/format.go:57: clearflags 100.0%
-// ...
-// fmt/scan.go:1046: doScan 100.0%
-// fmt/scan.go:1075: advance 96.2%
-// fmt/scan.go:1119: doScanf 96.8%
-// total: (statements) 91.9%
-
-func funcOutput(profile, outputFile string) error {
- profiles, err := cover.ParseProfiles(profile)
- if err != nil {
- return err
- }
-
- var out *bufio.Writer
- if outputFile == "" {
- out = bufio.NewWriter(os.Stdout)
- } else {
- fd, err := os.Create(outputFile)
- if err != nil {
- return err
- }
- defer fd.Close()
- out = bufio.NewWriter(fd)
- }
- defer out.Flush()
-
- tabber := tabwriter.NewWriter(out, 1, 8, 1, '\t', 0)
- defer tabber.Flush()
-
- var total, covered int64
- for _, profile := range profiles {
- fn := profile.FileName
- file, err := findFile(fn)
- if err != nil {
- return err
- }
- funcs, err := findFuncs(file)
- if err != nil {
- return err
- }
- // Now match up functions and profile blocks.
- for _, f := range funcs {
- c, t := f.coverage(profile)
- fmt.Fprintf(tabber, "%s:%d:\t%s\t%.1f%%\n", fn, f.startLine, f.name, 100.0*float64(c)/float64(t))
- total += t
- covered += c
- }
- }
- fmt.Fprintf(tabber, "total:\t(statements)\t%.1f%%\n", 100.0*float64(covered)/float64(total))
-
- return nil
-}
-
-// findFuncs parses the file and returns a slice of FuncExtent descriptors.
-func findFuncs(name string) ([]*FuncExtent, error) {
- fset := token.NewFileSet()
- parsedFile, err := parser.ParseFile(fset, name, nil, 0)
- if err != nil {
- return nil, err
- }
- visitor := &FuncVisitor{
- fset: fset,
- name: name,
- astFile: parsedFile,
- }
- ast.Walk(visitor, visitor.astFile)
- return visitor.funcs, nil
-}
-
-// FuncExtent describes a function's extent in the source by file and position.
-type FuncExtent struct {
- name string
- startLine int
- startCol int
- endLine int
- endCol int
-}
-
-// FuncVisitor implements the visitor that builds the function position list for a file.
-type FuncVisitor struct {
- fset *token.FileSet
- name string // Name of file.
- astFile *ast.File
- funcs []*FuncExtent
-}
-
-// Visit implements the ast.Visitor interface.
-func (v *FuncVisitor) Visit(node ast.Node) ast.Visitor {
- switch n := node.(type) {
- case *ast.FuncDecl:
- start := v.fset.Position(n.Pos())
- end := v.fset.Position(n.End())
- fe := &FuncExtent{
- name: n.Name.Name,
- startLine: start.Line,
- startCol: start.Column,
- endLine: end.Line,
- endCol: end.Column,
- }
- v.funcs = append(v.funcs, fe)
- }
- return v
-}
-
-// coverage returns the fraction of the statements in the function that were covered, as a numerator and denominator.
-func (f *FuncExtent) coverage(profile *cover.Profile) (num, den int64) {
- // We could avoid making this n^2 overall by doing a single scan and annotating the functions,
- // but the sizes of the data structures is never very large and the scan is almost instantaneous.
- var covered, total int64
- // The blocks are sorted, so we can stop counting as soon as we reach the end of the relevant block.
- for _, b := range profile.Blocks {
- if b.StartLine > f.endLine || (b.StartLine == f.endLine && b.StartCol >= f.endCol) {
- // Past the end of the function.
- break
- }
- if b.EndLine < f.startLine || (b.EndLine == f.startLine && b.EndCol <= f.startCol) {
- // Before the beginning of the function
- continue
- }
- total += int64(b.NumStmt)
- if b.Count > 0 {
- covered += int64(b.NumStmt)
- }
- }
- if total == 0 {
- total = 1 // Avoid zero denominator.
- }
- return covered, total
-}
-
-// findFile finds the location of the named file in GOROOT, GOPATH etc.
-func findFile(file string) (string, error) {
- dir, file := filepath.Split(file)
- pkg, err := build.Import(dir, ".", build.FindOnly)
- if err != nil {
- return "", fmt.Errorf("can't find %q: %v", file, err)
- }
- return filepath.Join(pkg.Dir, file), nil
-}
diff --git a/cmd/cover/html.go b/cmd/cover/html.go
deleted file mode 100644
index 0f8c72542..000000000
--- a/cmd/cover/html.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bufio"
- "bytes"
- "fmt"
- exec "golang.org/x/sys/execabs"
- "html/template"
- "io"
- "io/ioutil"
- "math"
- "os"
- "path/filepath"
- "runtime"
-
- "golang.org/x/tools/cover"
-)
-
-// htmlOutput reads the profile data from profile and generates an HTML
-// coverage report, writing it to outfile. If outfile is empty,
-// it writes the report to a temporary file and opens it in a web browser.
-func htmlOutput(profile, outfile string) error {
- profiles, err := cover.ParseProfiles(profile)
- if err != nil {
- return err
- }
-
- var d templateData
-
- for _, profile := range profiles {
- fn := profile.FileName
- if profile.Mode == "set" {
- d.Set = true
- }
- file, err := findFile(fn)
- if err != nil {
- return err
- }
- src, err := ioutil.ReadFile(file)
- if err != nil {
- return fmt.Errorf("can't read %q: %v", fn, err)
- }
- var buf bytes.Buffer
- err = htmlGen(&buf, src, profile.Boundaries(src))
- if err != nil {
- return err
- }
- d.Files = append(d.Files, &templateFile{
- Name: fn,
- Body: template.HTML(buf.String()),
- Coverage: percentCovered(profile),
- })
- }
-
- var out *os.File
- if outfile == "" {
- var dir string
- dir, err = ioutil.TempDir("", "cover")
- if err != nil {
- return err
- }
- out, err = os.Create(filepath.Join(dir, "coverage.html"))
- } else {
- out, err = os.Create(outfile)
- }
- if err != nil {
- return err
- }
- err = htmlTemplate.Execute(out, d)
- if err == nil {
- err = out.Close()
- }
- if err != nil {
- return err
- }
-
- if outfile == "" {
- if !startBrowser("file://" + out.Name()) {
- fmt.Fprintf(os.Stderr, "HTML output written to %s\n", out.Name())
- }
- }
-
- return nil
-}
-
-// percentCovered returns, as a percentage, the fraction of the statements in
-// the profile covered by the test run.
-// In effect, it reports the coverage of a given source file.
-func percentCovered(p *cover.Profile) float64 {
- var total, covered int64
- for _, b := range p.Blocks {
- total += int64(b.NumStmt)
- if b.Count > 0 {
- covered += int64(b.NumStmt)
- }
- }
- if total == 0 {
- return 0
- }
- return float64(covered) / float64(total) * 100
-}
-
-// htmlGen generates an HTML coverage report with the provided filename,
-// source code, and tokens, and writes it to the given Writer.
-func htmlGen(w io.Writer, src []byte, boundaries []cover.Boundary) error {
- dst := bufio.NewWriter(w)
- for i := range src {
- for len(boundaries) > 0 && boundaries[0].Offset == i {
- b := boundaries[0]
- if b.Start {
- n := 0
- if b.Count > 0 {
- n = int(math.Floor(b.Norm*9)) + 1
- }
- fmt.Fprintf(dst, `<span class="cov%v" title="%v">`, n, b.Count)
- } else {
- dst.WriteString("</span>")
- }
- boundaries = boundaries[1:]
- }
- switch b := src[i]; b {
- case '>':
- dst.WriteString("&gt;")
- case '<':
- dst.WriteString("&lt;")
- case '&':
- dst.WriteString("&amp;")
- case '\t':
- dst.WriteString(" ")
- default:
- dst.WriteByte(b)
- }
- }
- return dst.Flush()
-}
-
-// startBrowser tries to open the URL in a browser
-// and reports whether it succeeds.
-func startBrowser(url string) bool {
- // try to start the browser
- var args []string
- switch runtime.GOOS {
- case "darwin":
- args = []string{"open"}
- case "windows":
- args = []string{"cmd", "/c", "start"}
- default:
- args = []string{"xdg-open"}
- }
- cmd := exec.Command(args[0], append(args[1:], url)...)
- return cmd.Start() == nil
-}
-
-// rgb returns an rgb value for the specified coverage value
-// between 0 (no coverage) and 10 (max coverage).
-func rgb(n int) string {
- if n == 0 {
- return "rgb(192, 0, 0)" // Red
- }
- // Gradient from gray to green.
- r := 128 - 12*(n-1)
- g := 128 + 12*(n-1)
- b := 128 + 3*(n-1)
- return fmt.Sprintf("rgb(%v, %v, %v)", r, g, b)
-}
-
-// colors generates the CSS rules for coverage colors.
-func colors() template.CSS {
- var buf bytes.Buffer
- for i := 0; i < 11; i++ {
- fmt.Fprintf(&buf, ".cov%v { color: %v }\n", i, rgb(i))
- }
- return template.CSS(buf.String())
-}
-
-var htmlTemplate = template.Must(template.New("html").Funcs(template.FuncMap{
- "colors": colors,
-}).Parse(tmplHTML))
-
-type templateData struct {
- Files []*templateFile
- Set bool
-}
-
-type templateFile struct {
- Name string
- Body template.HTML
- Coverage float64
-}
-
-const tmplHTML = `
-<!DOCTYPE html>
-<html>
- <head>
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
- <style>
- body {
- background: black;
- color: rgb(80, 80, 80);
- }
- body, pre, #legend span {
- font-family: Menlo, monospace;
- font-weight: bold;
- }
- #topbar {
- background: black;
- position: fixed;
- top: 0; left: 0; right: 0;
- height: 42px;
- border-bottom: 1px solid rgb(80, 80, 80);
- }
- #content {
- margin-top: 50px;
- }
- #nav, #legend {
- float: left;
- margin-left: 10px;
- }
- #legend {
- margin-top: 12px;
- }
- #nav {
- margin-top: 10px;
- }
- #legend span {
- margin: 0 5px;
- }
- {{colors}}
- </style>
- </head>
- <body>
- <div id="topbar">
- <div id="nav">
- <select id="files">
- {{range $i, $f := .Files}}
- <option value="file{{$i}}">{{$f.Name}} ({{printf "%.1f" $f.Coverage}}%)</option>
- {{end}}
- </select>
- </div>
- <div id="legend">
- <span>not tracked</span>
- {{if .Set}}
- <span class="cov0">not covered</span>
- <span class="cov8">covered</span>
- {{else}}
- <span class="cov0">no coverage</span>
- <span class="cov1">low coverage</span>
- <span class="cov2">*</span>
- <span class="cov3">*</span>
- <span class="cov4">*</span>
- <span class="cov5">*</span>
- <span class="cov6">*</span>
- <span class="cov7">*</span>
- <span class="cov8">*</span>
- <span class="cov9">*</span>
- <span class="cov10">high coverage</span>
- {{end}}
- </div>
- </div>
- <div id="content">
- {{range $i, $f := .Files}}
- <pre class="file" id="file{{$i}}" {{if $i}}style="display: none"{{end}}>{{$f.Body}}</pre>
- {{end}}
- </div>
- </body>
- <script>
- (function() {
- var files = document.getElementById('files');
- var visible = document.getElementById('file0');
- files.addEventListener('change', onChange, false);
- function onChange() {
- visible.style.display = 'none';
- visible = document.getElementById(files.value);
- visible.style.display = 'block';
- window.scrollTo(0, 0);
- }
- })();
- </script>
-</html>
-`
diff --git a/cmd/cover/testdata/main.go b/cmd/cover/testdata/main.go
deleted file mode 100644
index 6ed39c4f2..000000000
--- a/cmd/cover/testdata/main.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test runner for coverage test. This file is not coverage-annotated; test.go is.
-// It knows the coverage counter is called "coverTest".
-
-package main
-
-import (
- "fmt"
- "os"
-)
-
-func main() {
- testAll()
- verify()
-}
-
-type block struct {
- count uint32
- line uint32
-}
-
-var counters = make(map[block]bool)
-
-// check records the location and expected value for a counter.
-func check(line, count uint32) {
- b := block{
- count,
- line,
- }
- counters[b] = true
-}
-
-// checkVal is a version of check that returns its extra argument,
-// so it can be used in conditionals.
-func checkVal(line, count uint32, val int) int {
- b := block{
- count,
- line,
- }
- counters[b] = true
- return val
-}
-
-var PASS = true
-
-// verify checks the expected counts against the actual. It runs after the test has completed.
-func verify() {
- for b := range counters {
- got, index := count(b.line)
- if b.count == anything && got != 0 {
- got = anything
- }
- if got != b.count {
- fmt.Fprintf(os.Stderr, "test_go:%d expected count %d got %d [counter %d]\n", b.line, b.count, got, index)
- PASS = false
- }
- }
- verifyPanic()
- if !PASS {
- fmt.Fprintf(os.Stderr, "FAIL\n")
- os.Exit(2)
- }
-}
-
-// verifyPanic is a special check for the known counter that should be
-// after the panic call in testPanic.
-func verifyPanic() {
- if coverTest.Count[panicIndex-1] != 1 {
- // Sanity check for test before panic.
- fmt.Fprintf(os.Stderr, "bad before panic")
- PASS = false
- }
- if coverTest.Count[panicIndex] != 0 {
- fmt.Fprintf(os.Stderr, "bad at panic: %d should be 0\n", coverTest.Count[panicIndex])
- PASS = false
- }
- if coverTest.Count[panicIndex+1] != 1 {
- fmt.Fprintf(os.Stderr, "bad after panic")
- PASS = false
- }
-}
-
-// count returns the count and index for the counter at the specified line.
-func count(line uint32) (uint32, int) {
- // Linear search is fine. Choose perfect fit over approximate.
- // We can have a closing brace for a range on the same line as a condition for an "else if"
- // and we don't want that brace to steal the count for the condition on the "if".
- // Therefore we test for a perfect (lo==line && hi==line) match, but if we can't
- // find that we take the first imperfect match.
- index := -1
- indexLo := uint32(1e9)
- for i := range coverTest.Count {
- lo, hi := coverTest.Pos[3*i], coverTest.Pos[3*i+1]
- if lo == line && line == hi {
- return coverTest.Count[i], i
- }
- // Choose the earliest match (the counters are in unpredictable order).
- if lo <= line && line <= hi && indexLo > lo {
- index = i
- indexLo = lo
- }
- }
- if index == -1 {
- fmt.Fprintln(os.Stderr, "cover_test: no counter for line", line)
- PASS = false
- return 0, 0
- }
- return coverTest.Count[index], index
-}
diff --git a/cmd/cover/testdata/test.go b/cmd/cover/testdata/test.go
deleted file mode 100644
index 9013950a2..000000000
--- a/cmd/cover/testdata/test.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This program is processed by the cover command, and then testAll is called.
-// The test driver in main.go can then compare the coverage statistics with expectation.
-
-// The word LINE is replaced by the line number in this file. When the file is executed,
-// the coverage processing has changed the line numbers, so we can't use runtime.Caller.
-
-package main
-
-const anything = 1e9 // Just some unlikely value that means "we got here, don't care how often"
-
-func testAll() {
- testSimple()
- testBlockRun()
- testIf()
- testFor()
- testRange()
- testSwitch()
- testTypeSwitch()
- testSelect1()
- testSelect2()
- testPanic()
- testEmptySwitches()
-}
-
-// The indexes of the counters in testPanic are known to main.go
-const panicIndex = 3
-
-// This test appears first because the index of its counters is known to main.go
-func testPanic() {
- defer func() {
- recover()
- }()
- check(LINE, 1)
- panic("should not get next line")
- check(LINE, 0) // this is GoCover.Count[panicIndex]
- // The next counter is in testSimple and it will be non-zero.
- // If the panic above does not trigger a counter, the test will fail
- // because GoCover.Count[panicIndex] will be the one in testSimple.
-}
-
-func testSimple() {
- check(LINE, 1)
-}
-
-func testIf() {
- if true {
- check(LINE, 1)
- } else {
- check(LINE, 0)
- }
- if false {
- check(LINE, 0)
- } else {
- check(LINE, 1)
- }
- for i := 0; i < 3; i++ {
- if checkVal(LINE, 3, i) <= 2 {
- check(LINE, 3)
- }
- if checkVal(LINE, 3, i) <= 1 {
- check(LINE, 2)
- }
- if checkVal(LINE, 3, i) <= 0 {
- check(LINE, 1)
- }
- }
- for i := 0; i < 3; i++ {
- if checkVal(LINE, 3, i) <= 1 {
- check(LINE, 2)
- } else {
- check(LINE, 1)
- }
- }
- for i := 0; i < 3; i++ {
- if checkVal(LINE, 3, i) <= 0 {
- check(LINE, 1)
- } else if checkVal(LINE, 2, i) <= 1 {
- check(LINE, 1)
- } else if checkVal(LINE, 1, i) <= 2 {
- check(LINE, 1)
- } else if checkVal(LINE, 0, i) <= 3 {
- check(LINE, 0)
- }
- }
- if func(a, b int) bool { return a < b }(3, 4) {
- check(LINE, 1)
- }
-}
-
-func testFor() {
- for i := 0; i < 10; func() { i++; check(LINE, 10) }() {
- check(LINE, 10)
- }
-}
-
-func testRange() {
- for _, f := range []func(){
- func() { check(LINE, 1) },
- } {
- f()
- check(LINE, 1)
- }
-}
-
-func testBlockRun() {
- check(LINE, 1)
- {
- check(LINE, 1)
- }
- {
- check(LINE, 1)
- }
- check(LINE, 1)
- {
- check(LINE, 1)
- }
- {
- check(LINE, 1)
- }
- check(LINE, 1)
-}
-
-func testSwitch() {
- for i := 0; i < 5; func() { i++; check(LINE, 5) }() {
- switch i {
- case 0:
- check(LINE, 1)
- case 1:
- check(LINE, 1)
- case 2:
- check(LINE, 1)
- default:
- check(LINE, 2)
- }
- }
-}
-
-func testTypeSwitch() {
- var x = []interface{}{1, 2.0, "hi"}
- for _, v := range x {
- switch func() { check(LINE, 3) }(); v.(type) {
- case int:
- check(LINE, 1)
- case float64:
- check(LINE, 1)
- case string:
- check(LINE, 1)
- case complex128:
- check(LINE, 0)
- default:
- check(LINE, 0)
- }
- }
-}
-
-func testSelect1() {
- c := make(chan int)
- go func() {
- for i := 0; i < 1000; i++ {
- c <- i
- }
- }()
- for {
- select {
- case <-c:
- check(LINE, anything)
- case <-c:
- check(LINE, anything)
- default:
- check(LINE, 1)
- return
- }
- }
-}
-
-func testSelect2() {
- c1 := make(chan int, 1000)
- c2 := make(chan int, 1000)
- for i := 0; i < 1000; i++ {
- c1 <- i
- c2 <- i
- }
- for {
- select {
- case <-c1:
- check(LINE, 1000)
- case <-c2:
- check(LINE, 1000)
- default:
- check(LINE, 1)
- return
- }
- }
-}
-
-// Empty control statements created syntax errors. This function
-// is here just to be sure that those are handled correctly now.
-func testEmptySwitches() {
- check(LINE, 1)
- switch 3 {
- }
- check(LINE, 1)
- switch i := (interface{})(3).(int); i {
- }
- check(LINE, 1)
- c := make(chan int)
- go func() {
- check(LINE, 1)
- c <- 1
- select {}
- }()
- <-c
- check(LINE, 1)
-}
diff --git a/cmd/digraph/digraph.go b/cmd/digraph/digraph.go
index 88eb05bf1..0e50ad18d 100644
--- a/cmd/digraph/digraph.go
+++ b/cmd/digraph/digraph.go
@@ -34,7 +34,7 @@ The support commands are:
sccs
all strongly connected components (one per line)
scc <node>
- the set of nodes nodes strongly connected to the specified one
+ the set of nodes strongly connected to the specified one
focus <node>
the subgraph containing all directed paths that pass through the specified node
@@ -69,11 +69,12 @@ Using digraph with existing Go tools:
$ go list -m all | digraph nodes # Operate on the Go package graph.
Show the transitive closure of imports of the digraph tool itself:
+
$ go list -f '{{.ImportPath}} {{join .Imports " "}}' ... | digraph forward golang.org/x/tools/cmd/digraph
Show which clothes (see above) must be donned before a jacket:
- $ digraph reverse jacket
+ $ digraph reverse jacket
*/
package main // import "golang.org/x/tools/cmd/digraph"
@@ -121,7 +122,8 @@ The support commands are:
allpaths <node> <node>
the set of nodes on all paths from the first node to the second
sccs
- all strongly connected components (one per line)
+ all non-trivial strongly connected components, one per line
+ (single-node components are only printed for nodes with self-loops)
scc <node>
the set of nodes nodes strongly connected to the specified one
focus <node>
@@ -157,7 +159,7 @@ func (l nodelist) println(sep string) {
fmt.Fprintln(stdout)
}
-type nodeset map[string]bool // TODO(deklerk): change bool to struct to reduce memory footprint
+type nodeset map[string]bool
func (s nodeset) sort() nodelist {
nodes := make(nodelist, len(s))
@@ -265,6 +267,9 @@ func (g graph) sccs() []nodeset {
if !seen[top] {
scc = make(nodeset)
rvisit(top)
+ if len(scc) == 1 && !g[top][top] {
+ continue
+ }
sccs = append(sccs, scc)
}
}
@@ -346,25 +351,34 @@ func parse(rd io.Reader) (graph, error) {
g := make(graph)
var linenum int
- in := bufio.NewScanner(rd)
- for in.Scan() {
+ // We avoid bufio.Scanner as it imposes a (configurable) limit
+ // on line length, whereas Reader.ReadString does not.
+ in := bufio.NewReader(rd)
+ for {
linenum++
+ line, err := in.ReadString('\n')
+ eof := false
+ if err == io.EOF {
+ eof = true
+ } else if err != nil {
+ return nil, err
+ }
// Split into words, honoring double-quotes per Go spec.
- words, err := split(in.Text())
+ words, err := split(line)
if err != nil {
return nil, fmt.Errorf("at line %d: %v", linenum, err)
}
if len(words) > 0 {
g.addEdges(words[0], words[1:]...)
}
- }
- if err := in.Err(); err != nil {
- return nil, err
+ if eof {
+ break
+ }
}
return g, nil
}
-// Overridable for testing purposes.
+// Overridable for redirection.
var stdin io.Reader = os.Stdin
var stdout io.Writer = os.Stdout
@@ -484,9 +498,16 @@ func digraph(cmd string, args []string) error {
if len(args) != 0 {
return fmt.Errorf("usage: digraph sccs")
}
+ buf := new(bytes.Buffer)
+ oldStdout := stdout
+ stdout = buf
for _, scc := range g.sccs() {
scc.sort().println(" ")
}
+ lines := strings.SplitAfter(buf.String(), "\n")
+ sort.Strings(lines)
+ stdout = oldStdout
+ io.WriteString(stdout, strings.Join(lines, ""))
case "scc":
if len(args) != 1 {
@@ -546,9 +567,8 @@ func digraph(cmd string, args []string) error {
// spaces, but Go-style double-quoted string literals are also supported.
// (This approximates the behaviour of the Bourne shell.)
//
-// `one "two three"` -> ["one" "two three"]
-// `a"\n"b` -> ["a\nb"]
-//
+// `one "two three"` -> ["one" "two three"]
+// `a"\n"b` -> ["a\nb"]
func split(line string) ([]string, error) {
var (
words []string
@@ -605,7 +625,6 @@ func split(line string) ([]string, error) {
// its length is returned.
//
// TODO(adonovan): move this into a strconv-like utility package.
-//
func quotedLength(input string) (n int, ok bool) {
var offset int
diff --git a/cmd/digraph/digraph_test.go b/cmd/digraph/digraph_test.go
index 1746fcaa6..60b8e75eb 100644
--- a/cmd/digraph/digraph_test.go
+++ b/cmd/digraph/digraph_test.go
@@ -27,6 +27,7 @@ a b c
b d
c d
d c
+e e
`
for _, test := range []struct {
@@ -41,9 +42,10 @@ d c
{"transpose", g1, "transpose", nil, "belt pants\njacket sweater\npants shorts\nshoes pants\nshoes socks\nsweater shirt\ntie shirt\n"},
{"forward", g1, "forward", []string{"socks"}, "shoes\nsocks\n"},
{"forward multiple args", g1, "forward", []string{"socks", "sweater"}, "jacket\nshoes\nsocks\nsweater\n"},
- {"scss", g2, "sccs", nil, "a\nb\nc d\n"},
+ {"scss", g2, "sccs", nil, "c d\ne\n"},
{"scc", g2, "scc", []string{"d"}, "c\nd\n"},
{"succs", g2, "succs", []string{"a"}, "b\nc\n"},
+ {"succs-long-token", g2 + "x " + strings.Repeat("x", 96*1024), "succs", []string{"x"}, strings.Repeat("x", 96*1024) + "\n"},
{"preds", g2, "preds", []string{"c"}, "a\nd\n"},
{"preds multiple args", g2, "preds", []string{"c", "d"}, "a\nb\nc\nd\n"},
} {
diff --git a/cmd/file2fuzz/main.go b/cmd/file2fuzz/main.go
index 350ed0af6..ed212cb9d 100644
--- a/cmd/file2fuzz/main.go
+++ b/cmd/file2fuzz/main.go
@@ -17,7 +17,6 @@
// argument is specified it may be a file path or an existing directory, if there are
// multiple inputs specified it must be a directory. If a directory is provided
// the name of the file will be the SHA-256 hash of its contents.
-//
package main
import (
diff --git a/cmd/fiximports/main.go b/cmd/fiximports/main.go
index f572a15c5..8eeacd1ed 100644
--- a/cmd/fiximports/main.go
+++ b/cmd/fiximports/main.go
@@ -6,8 +6,7 @@
// import path for packages that have an "import comment" as defined by
// https://golang.org/s/go14customimport.
//
-//
-// Background
+// # Background
//
// The Go 1 custom import path mechanism lets the maintainer of a
// package give it a stable name by which clients may import and "go
@@ -28,15 +27,14 @@
// does not match the path of the enclosing package relative to
// GOPATH/src:
//
-// $ grep ^package $GOPATH/src/github.com/bob/vanity/foo/foo.go
-// package foo // import "vanity.com/foo"
+// $ grep ^package $GOPATH/src/github.com/bob/vanity/foo/foo.go
+// package foo // import "vanity.com/foo"
//
// The error from "go build" indicates that the package canonically
// known as "vanity.com/foo" is locally installed under the
// non-canonical name "github.com/bob/vanity/foo".
//
-//
-// Usage
+// # Usage
//
// When a package that you depend on introduces a custom import comment,
// and your workspace imports it by the non-canonical name, your build
@@ -66,7 +64,6 @@
//
// To see the changes fiximports would make without applying them, use
// the -n flag.
-//
package main
import (
@@ -75,11 +72,9 @@ import (
"flag"
"fmt"
"go/ast"
- "go/build"
"go/format"
"go/parser"
"go/token"
- exec "golang.org/x/sys/execabs"
"io"
"io/ioutil"
"log"
@@ -89,6 +84,8 @@ import (
"sort"
"strconv"
"strings"
+
+ exec "golang.org/x/sys/execabs"
)
// flags
@@ -140,16 +137,16 @@ type canonicalName struct{ path, name string }
// Invariant: a false result implies an error was already printed.
func fiximports(packages ...string) bool {
// importedBy is the transpose of the package import graph.
- importedBy := make(map[string]map[*build.Package]bool)
+ importedBy := make(map[string]map[*listPackage]bool)
// addEdge adds an edge to the import graph.
- addEdge := func(from *build.Package, to string) {
+ addEdge := func(from *listPackage, to string) {
if to == "C" || to == "unsafe" {
return // fake
}
pkgs := importedBy[to]
if pkgs == nil {
- pkgs = make(map[*build.Package]bool)
+ pkgs = make(map[*listPackage]bool)
importedBy[to] = pkgs
}
pkgs[from] = true
@@ -165,7 +162,7 @@ func fiximports(packages ...string) bool {
// packageName maps each package's path to its name.
packageName := make(map[string]string)
for _, p := range pkgs {
- packageName[p.ImportPath] = p.Package.Name
+ packageName[p.ImportPath] = p.Name
}
// canonical maps each non-canonical package path to
@@ -210,21 +207,21 @@ func fiximports(packages ...string) bool {
}
for _, imp := range p.Imports {
- addEdge(&p.Package, imp)
+ addEdge(p, imp)
}
for _, imp := range p.TestImports {
- addEdge(&p.Package, imp)
+ addEdge(p, imp)
}
for _, imp := range p.XTestImports {
- addEdge(&p.Package, imp)
+ addEdge(p, imp)
}
// Does package have an explicit import comment?
if p.ImportComment != "" {
if p.ImportComment != p.ImportPath {
canonical[p.ImportPath] = canonicalName{
- path: p.Package.ImportComment,
- name: p.Package.Name,
+ path: p.ImportComment,
+ name: p.Name,
}
}
} else {
@@ -276,7 +273,7 @@ func fiximports(packages ...string) bool {
// Find all clients (direct importers) of canonical packages.
// These are the packages that need fixing up.
- clients := make(map[*build.Package]bool)
+ clients := make(map[*listPackage]bool)
for path := range canonical {
for client := range importedBy[path] {
clients[client] = true
@@ -353,7 +350,7 @@ func fiximports(packages ...string) bool {
}
// Invariant: false result => error already printed.
-func rewritePackage(client *build.Package, canonical map[string]canonicalName) bool {
+func rewritePackage(client *listPackage, canonical map[string]canonicalName) bool {
ok := true
used := make(map[string]bool)
@@ -392,7 +389,7 @@ func rewritePackage(client *build.Package, canonical map[string]canonicalName) b
return ok
}
-// rewrite reads, modifies, and writes filename, replacing all imports
+// rewriteFile reads, modifies, and writes filename, replacing all imports
// of packages P in canonical by canonical[P].
// It records in used which canonical packages were imported.
// used[P]=="" indicates that P was imported but its canonical path is unknown.
@@ -453,11 +450,20 @@ func rewriteFile(filename string, canonical map[string]canonicalName, used map[s
return nil
}
-// listPackage is a copy of cmd/go/list.Package.
-// It has more fields than build.Package and we need some of them.
+// listPackage corresponds to the output of go list -json,
+// but only the fields we need.
type listPackage struct {
- build.Package
- Error *packageError // error loading package
+ Name string
+ Dir string
+ ImportPath string
+ GoFiles []string
+ TestGoFiles []string
+ XTestGoFiles []string
+ Imports []string
+ TestImports []string
+ XTestImports []string
+ ImportComment string
+ Error *packageError // error loading package
}
// A packageError describes an error loading information about a package.
diff --git a/cmd/fiximports/main_test.go b/cmd/fiximports/main_test.go
index bbc4a2eb2..ebbd7520d 100644
--- a/cmd/fiximports/main_test.go
+++ b/cmd/fiximports/main_test.go
@@ -55,6 +55,9 @@ func init() {
}
func TestFixImports(t *testing.T) {
+ if os.Getenv("GO_BUILDER_NAME") == "plan9-arm" {
+ t.Skipf("skipping test that times out on plan9-arm; see https://go.dev/issue/50775")
+ }
testenv.NeedsTool(t, "go")
defer func() {
diff --git a/cmd/godex/doc.go b/cmd/godex/doc.go
index ceb7c2fe1..3c2112ebf 100644
--- a/cmd/godex/doc.go
+++ b/cmd/godex/doc.go
@@ -62,7 +62,6 @@
// (uncompiled) source code (not yet implemented)
//
// If no -s argument is provided, godex will try to find a matching source.
-//
package main // import "golang.org/x/tools/cmd/godex"
// BUG(gri): support for -s=source is not yet implemented
diff --git a/cmd/godoc/doc.go b/cmd/godoc/doc.go
index 279b2b1bb..91d015046 100644
--- a/cmd/godoc/doc.go
+++ b/cmd/godoc/doc.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
/*
-
Godoc extracts and generates documentation for Go programs.
It runs as a web server and presents the documentation as a
diff --git a/cmd/godoc/godoc_test.go b/cmd/godoc/godoc_test.go
index 76568c31d..3e91ac6f9 100644
--- a/cmd/godoc/godoc_test.go
+++ b/cmd/godoc/godoc_test.go
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package main_test
+package main
import (
"bytes"
+ "context"
"fmt"
"go/build"
"io/ioutil"
@@ -13,10 +14,10 @@ import (
"net/http"
"os"
"os/exec"
- "path/filepath"
"regexp"
"runtime"
"strings"
+ "sync"
"testing"
"time"
@@ -24,42 +25,39 @@ import (
"golang.org/x/tools/internal/testenv"
)
-// buildGodoc builds the godoc executable.
-// It returns its path, and a cleanup function.
-//
-// TODO(adonovan): opt: do this at most once, and do the cleanup
-// exactly once. How though? There's no atexit.
-func buildGodoc(t *testing.T) (bin string, cleanup func()) {
- t.Helper()
-
- if runtime.GOARCH == "arm" {
- t.Skip("skipping test on arm platforms; too slow")
- }
- if runtime.GOOS == "android" {
- t.Skipf("the dependencies are not available on android")
+func TestMain(m *testing.M) {
+ if os.Getenv("GODOC_TEST_IS_GODOC") != "" {
+ main()
+ os.Exit(0)
}
- testenv.NeedsTool(t, "go")
- tmp, err := ioutil.TempDir("", "godoc-regtest-")
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- if cleanup == nil { // probably, go build failed.
- os.RemoveAll(tmp)
- }
- }()
+ // Inform subprocesses that they should run the cmd/godoc main instead of
+ // running tests. It's a close approximation to building and running the real
+ // command, and much less complicated and expensive to build and clean up.
+ os.Setenv("GODOC_TEST_IS_GODOC", "1")
- bin = filepath.Join(tmp, "godoc")
- if runtime.GOOS == "windows" {
- bin += ".exe"
- }
- cmd := exec.Command("go", "build", "-o", bin)
- if err := cmd.Run(); err != nil {
- t.Fatalf("Building godoc: %v", err)
+ os.Exit(m.Run())
+}
+
+var exe struct {
+ path string
+ err error
+ once sync.Once
+}
+
+func godocPath(t *testing.T) string {
+ switch runtime.GOOS {
+ case "js", "ios":
+ t.Skipf("skipping test that requires exec")
}
- return bin, func() { os.RemoveAll(tmp) }
+ exe.once.Do(func() {
+ exe.path, exe.err = os.Executable()
+ })
+ if exe.err != nil {
+ t.Fatal(exe.err)
+ }
+ return exe.path
}
func serverAddress(t *testing.T) string {
@@ -74,60 +72,42 @@ func serverAddress(t *testing.T) string {
return ln.Addr().String()
}
-func waitForServerReady(t *testing.T, cmd *exec.Cmd, addr string) {
- ch := make(chan error, 1)
- go func() { ch <- fmt.Errorf("server exited early: %v", cmd.Wait()) }()
- go waitForServer(t, ch,
+func waitForServerReady(t *testing.T, ctx context.Context, cmd *exec.Cmd, addr string) {
+ waitForServer(t, ctx,
fmt.Sprintf("http://%v/", addr),
"Go Documentation Server",
- 15*time.Second,
false)
- if err := <-ch; err != nil {
- t.Fatal(err)
- }
}
-func waitForSearchReady(t *testing.T, cmd *exec.Cmd, addr string) {
- ch := make(chan error, 1)
- go func() { ch <- fmt.Errorf("server exited early: %v", cmd.Wait()) }()
- go waitForServer(t, ch,
+func waitForSearchReady(t *testing.T, ctx context.Context, cmd *exec.Cmd, addr string) {
+ waitForServer(t, ctx,
fmt.Sprintf("http://%v/search?q=FALLTHROUGH", addr),
"The list of tokens.",
- 2*time.Minute,
false)
- if err := <-ch; err != nil {
- t.Fatal(err)
- }
}
-func waitUntilScanComplete(t *testing.T, addr string) {
- ch := make(chan error)
- go waitForServer(t, ch,
+func waitUntilScanComplete(t *testing.T, ctx context.Context, addr string) {
+ waitForServer(t, ctx,
fmt.Sprintf("http://%v/pkg", addr),
"Scan is not yet complete",
- 2*time.Minute,
// setting reverse as true, which means this waits
// until the string is not returned in the response anymore
- true,
- )
- if err := <-ch; err != nil {
- t.Fatal(err)
- }
+ true)
}
-const pollInterval = 200 * time.Millisecond
+const pollInterval = 50 * time.Millisecond
-// waitForServer waits for server to meet the required condition.
-// It sends a single error value to ch, unless the test has failed.
-// The error value is nil if the required condition was met within
-// timeout, or non-nil otherwise.
-func waitForServer(t *testing.T, ch chan<- error, url, match string, timeout time.Duration, reverse bool) {
- deadline := time.Now().Add(timeout)
- for time.Now().Before(deadline) {
- time.Sleep(pollInterval)
- if t.Failed() {
- return
+// waitForServer waits for server to meet the required condition,
+// failing the test if ctx is canceled before that occurs.
+func waitForServer(t *testing.T, ctx context.Context, url, match string, reverse bool) {
+ start := time.Now()
+ for {
+ if ctx.Err() != nil {
+ t.Helper()
+ t.Fatalf("server failed to respond in %v", time.Since(start))
}
+
+ time.Sleep(pollInterval)
res, err := http.Get(url)
if err != nil {
continue
@@ -140,11 +120,9 @@ func waitForServer(t *testing.T, ch chan<- error, url, match string, timeout tim
switch {
case !reverse && bytes.Contains(body, []byte(match)),
reverse && !bytes.Contains(body, []byte(match)):
- ch <- nil
return
}
}
- ch <- fmt.Errorf("server failed to respond in %v", timeout)
}
// hasTag checks whether a given release tag is contained in the current version
@@ -158,24 +136,18 @@ func hasTag(t string) bool {
return false
}
-func killAndWait(cmd *exec.Cmd) {
- cmd.Process.Kill()
- cmd.Process.Wait()
-}
-
func TestURL(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping on plan9; fails to start up quickly enough")
}
- bin, cleanup := buildGodoc(t)
- defer cleanup()
+ bin := godocPath(t)
testcase := func(url string, contents string) func(t *testing.T) {
return func(t *testing.T) {
stdout, stderr := new(bytes.Buffer), new(bytes.Buffer)
args := []string{fmt.Sprintf("-url=%s", url)}
- cmd := exec.Command(bin, args...)
+ cmd := testenv.Command(t, bin, args...)
cmd.Stdout = stdout
cmd.Stderr = stderr
cmd.Args[0] = "godoc"
@@ -205,8 +177,8 @@ func TestURL(t *testing.T) {
// Basic integration test for godoc HTTP interface.
func TestWeb(t *testing.T) {
- bin, cleanup := buildGodoc(t)
- defer cleanup()
+ bin := godocPath(t)
+
for _, x := range packagestest.All {
t.Run(x.Name(), func(t *testing.T) {
testWeb(t, x, bin, false)
@@ -217,17 +189,19 @@ func TestWeb(t *testing.T) {
// Basic integration test for godoc HTTP interface.
func TestWebIndex(t *testing.T) {
if testing.Short() {
- t.Skip("skipping test in -short mode")
+ t.Skip("skipping slow test in -short mode")
}
- bin, cleanup := buildGodoc(t)
- defer cleanup()
+ bin := godocPath(t)
testWeb(t, packagestest.GOPATH, bin, true)
}
// Basic integration test for godoc HTTP interface.
func testWeb(t *testing.T, x packagestest.Exporter, bin string, withIndex bool) {
- if runtime.GOOS == "plan9" {
- t.Skip("skipping on plan9; fails to start up quickly enough")
+ switch runtime.GOOS {
+ case "plan9":
+ t.Skip("skipping on plan9: fails to start up quickly enough")
+ case "android", "ios":
+ t.Skip("skipping on mobile: lacks GOROOT/api in test environment")
}
// Write a fake GOROOT/GOPATH with some third party packages.
@@ -256,23 +230,39 @@ package a; import _ "godoc.test/repo2/a"; const Name = "repo1a"`,
if withIndex {
args = append(args, "-index", "-index_interval=-1s")
}
- cmd := exec.Command(bin, args...)
+ cmd := testenv.Command(t, bin, args...)
cmd.Dir = e.Config.Dir
cmd.Env = e.Config.Env
- cmd.Stdout = os.Stderr
- cmd.Stderr = os.Stderr
+ cmdOut := new(strings.Builder)
+ cmd.Stdout = cmdOut
+ cmd.Stderr = cmdOut
cmd.Args[0] = "godoc"
if err := cmd.Start(); err != nil {
t.Fatalf("failed to start godoc: %s", err)
}
- defer killAndWait(cmd)
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ err := cmd.Wait()
+ t.Logf("%v: %v", cmd, err)
+ cancel()
+ }()
+ defer func() {
+ // Shut down the server cleanly if possible.
+ if runtime.GOOS == "windows" {
+ cmd.Process.Kill() // Windows doesn't support os.Interrupt.
+ } else {
+ cmd.Process.Signal(os.Interrupt)
+ }
+ <-ctx.Done()
+ t.Logf("server output:\n%s", cmdOut)
+ }()
if withIndex {
- waitForSearchReady(t, cmd, addr)
+ waitForSearchReady(t, ctx, cmd, addr)
} else {
- waitForServerReady(t, cmd, addr)
- waitUntilScanComplete(t, addr)
+ waitForServerReady(t, ctx, cmd, addr)
+ waitUntilScanComplete(t, ctx, addr)
}
tests := []struct {
@@ -454,22 +444,17 @@ func TestNoMainModule(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping on plan9; for consistency with other tests that build godoc binary")
}
- bin, cleanup := buildGodoc(t)
- defer cleanup()
- tempDir, err := ioutil.TempDir("", "godoc-test-")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tempDir)
+ bin := godocPath(t)
+ tempDir := t.TempDir()
// Run godoc in an empty directory with module mode explicitly on,
// so that 'go env GOMOD' reports os.DevNull.
- cmd := exec.Command(bin, "-url=/")
+ cmd := testenv.Command(t, bin, "-url=/")
cmd.Dir = tempDir
cmd.Env = append(os.Environ(), "GO111MODULE=on")
var stderr bytes.Buffer
cmd.Stderr = &stderr
- err = cmd.Run()
+ err := cmd.Run()
if err != nil {
t.Fatalf("godoc command failed: %v\nstderr=%q", err, stderr.String())
}
diff --git a/cmd/godoc/main.go b/cmd/godoc/main.go
index 352bb4b7a..79dcf3821 100644
--- a/cmd/godoc/main.go
+++ b/cmd/godoc/main.go
@@ -21,6 +21,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
_ "expvar" // to serve /debug/vars
"flag"
"fmt"
@@ -46,7 +47,6 @@ import (
"golang.org/x/tools/godoc/vfs/mapfs"
"golang.org/x/tools/godoc/vfs/zipfs"
"golang.org/x/tools/internal/gocommand"
- "golang.org/x/xerrors"
)
const defaultAddr = "localhost:6060" // default webserver address
@@ -368,12 +368,11 @@ func main() {
//
// GOMOD is documented at https://golang.org/cmd/go/#hdr-Environment_variables:
//
-// The absolute path to the go.mod of the main module,
-// or the empty string if not using modules.
-//
+// The absolute path to the go.mod of the main module,
+// or the empty string if not using modules.
func goMod() (string, error) {
out, err := exec.Command("go", "env", "-json", "GOMOD").Output()
- if ee := (*exec.ExitError)(nil); xerrors.As(err, &ee) {
+ if ee := (*exec.ExitError)(nil); errors.As(err, &ee) {
return "", fmt.Errorf("go command exited unsuccessfully: %v\n%s", ee.ProcessState.String(), ee.Stderr)
} else if err != nil {
return "", err
@@ -406,7 +405,7 @@ func fillModuleCache(w io.Writer, goMod string) {
cmd.Stdout = &out
cmd.Stderr = w
err := cmd.Run()
- if ee := (*exec.ExitError)(nil); xerrors.As(err, &ee) && ee.ExitCode() == 1 {
+ if ee := (*exec.ExitError)(nil); errors.As(err, &ee) && ee.ExitCode() == 1 {
// Exit code 1 from this command means there were some
// non-empty Error values in the output. Print them to w.
fmt.Fprintf(w, "documentation for some packages is not shown:\n")
@@ -450,7 +449,7 @@ func buildList(goMod string) ([]mod, error) {
}
out, err := exec.Command("go", "list", "-m", "-json", "all").Output()
- if ee := (*exec.ExitError)(nil); xerrors.As(err, &ee) {
+ if ee := (*exec.ExitError)(nil); errors.As(err, &ee) {
return nil, fmt.Errorf("go command exited unsuccessfully: %v\n%s", ee.ProcessState.String(), ee.Stderr)
} else if err != nil {
return nil, err
@@ -483,7 +482,6 @@ func buildList(goMod string) ([]mod, error) {
// workspaces are bound at their roots, but scales poorly in the
// general case. It should be replaced by a more direct solution
// for determining whether a package is third party or not.
-//
type moduleFS struct{ vfs.FileSystem }
func (moduleFS) RootType(path string) vfs.RootType {
diff --git a/cmd/goimports/doc.go b/cmd/goimports/doc.go
index 5a5b9005f..18a3ad448 100644
--- a/cmd/goimports/doc.go
+++ b/cmd/goimports/doc.go
@@ -3,29 +3,33 @@
// license that can be found in the LICENSE file.
/*
-
Command goimports updates your Go import lines,
adding missing ones and removing unreferenced ones.
- $ go install golang.org/x/tools/cmd/goimports@latest
+ $ go install golang.org/x/tools/cmd/goimports@latest
In addition to fixing imports, goimports also formats
your code in the same style as gofmt so it can be used
as a replacement for your editor's gofmt-on-save hook.
For emacs, make sure you have the latest go-mode.el:
- https://github.com/dominikh/go-mode.el
+
+ https://github.com/dominikh/go-mode.el
+
Then in your .emacs file:
- (setq gofmt-command "goimports")
- (add-hook 'before-save-hook 'gofmt-before-save)
+
+ (setq gofmt-command "goimports")
+ (add-hook 'before-save-hook 'gofmt-before-save)
For vim, set "gofmt_command" to "goimports":
- https://golang.org/change/39c724dd7f252
- https://golang.org/wiki/IDEsAndTextEditorPlugins
- etc
+
+ https://golang.org/change/39c724dd7f252
+ https://golang.org/wiki/IDEsAndTextEditorPlugins
+ etc
For GoSublime, follow the steps described here:
- http://michaelwhatcott.com/gosublime-goimports/
+
+ http://michaelwhatcott.com/gosublime-goimports/
For other editors, you probably know what to do.
@@ -39,9 +43,8 @@ working and see what goimports is doing.
File bugs or feature requests at:
- https://golang.org/issues/new?title=x/tools/cmd/goimports:+
+ https://golang.org/issues/new?title=x/tools/cmd/goimports:+
Happy hacking!
-
*/
package main // import "golang.org/x/tools/cmd/goimports"
diff --git a/cmd/gorename/main.go b/cmd/gorename/main.go
index e59abd758..98625fff6 100644
--- a/cmd/gorename/main.go
+++ b/cmd/gorename/main.go
@@ -8,7 +8,6 @@
// Run with -help for usage information, or view the Usage constant in
// package golang.org/x/tools/refactor/rename, which contains most of
// the implementation.
-//
package main // import "golang.org/x/tools/cmd/gorename"
import (
diff --git a/cmd/gotype/gotype.go b/cmd/gotype/gotype.go
index 22fe4aa9d..08b52057f 100644
--- a/cmd/gotype/gotype.go
+++ b/cmd/gotype/gotype.go
@@ -41,9 +41,11 @@ checking packages containing imports with relative import paths
files to include for such packages.
Usage:
+
gotype [flags] [path...]
The flags are:
+
-t
include local test files in a directory (ignored if -x is provided)
-x
@@ -56,6 +58,7 @@ The flags are:
compiler used for installed packages (gc, gccgo, or source); default: source
Flags controlling additional output:
+
-ast
print AST (forces -seq)
-trace
@@ -81,7 +84,6 @@ cmd/compile:
To verify the output of a pipe:
echo "package foo" | gotype
-
*/
package main
diff --git a/cmd/goyacc/doc.go b/cmd/goyacc/doc.go
index 03ffee7b6..5eb27f16a 100644
--- a/cmd/goyacc/doc.go
+++ b/cmd/goyacc/doc.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
/*
-
Goyacc is a version of yacc for Go.
It is written in Go and generates parsers written in Go.
@@ -65,6 +64,5 @@ goyacc sets the prefix, by default yy, that begins the names of
symbols, including types, the parser, and the lexer, generated and
referenced by yacc's generated code. Setting it to distinct values
allows multiple grammars to be placed in a single package.
-
*/
package main
diff --git a/cmd/goyacc/yacc.go b/cmd/goyacc/yacc.go
index 70d01f0d5..948e50104 100644
--- a/cmd/goyacc/yacc.go
+++ b/cmd/goyacc/yacc.go
@@ -742,9 +742,7 @@ outer:
}
}
-//
// allocate enough room to hold another production
-//
func moreprod() {
n := len(prdptr)
if nprod >= n {
@@ -763,10 +761,8 @@ func moreprod() {
}
}
-//
// define s to be a terminal if nt==0
// or a nonterminal if nt==1
-//
func defin(nt int, s string) int {
val := 0
if nt != 0 {
@@ -1007,9 +1003,7 @@ func getword(c rune) {
ungetrune(finput, c)
}
-//
// determine the type of a symbol
-//
func fdtype(t int) int {
var v int
var s string
@@ -1049,9 +1043,7 @@ func chfind(t int, s string) int {
return defin(t, s)
}
-//
// copy the union declaration to the output, and the define file if present
-//
func cpyunion() {
if !lflag {
@@ -1086,10 +1078,8 @@ out:
fmt.Fprintf(ftable, "\n\n")
}
-//
// saves code between %{ and %}
// adds an import for __fmt__ the first time
-//
func cpycode() {
lno := lineno
@@ -1122,11 +1112,9 @@ func cpycode() {
errorf("eof before %%}")
}
-//
// emits code saved up from between %{ and %}
// called by cpycode
// adds an import for __yyfmt__ after the package clause
-//
func emitcode(code []rune, lineno int) {
for i, line := range lines(code) {
writecode(line)
@@ -1140,9 +1128,7 @@ func emitcode(code []rune, lineno int) {
}
}
-//
// does this line look like a package clause? not perfect: might be confused by early comments.
-//
func isPackageClause(line []rune) bool {
line = skipspace(line)
@@ -1184,9 +1170,7 @@ func isPackageClause(line []rune) bool {
return false
}
-//
// skip initial spaces
-//
func skipspace(line []rune) []rune {
for len(line) > 0 {
if line[0] != ' ' && line[0] != '\t' {
@@ -1197,9 +1181,7 @@ func skipspace(line []rune) []rune {
return line
}
-//
// break code into lines
-//
func lines(code []rune) [][]rune {
l := make([][]rune, 0, 100)
for len(code) > 0 {
@@ -1216,19 +1198,15 @@ func lines(code []rune) [][]rune {
return l
}
-//
// writes code to ftable
-//
func writecode(code []rune) {
for _, r := range code {
ftable.WriteRune(r)
}
}
-//
// skip over comments
// skipcom is called after reading a '/'
-//
func skipcom() int {
c := getrune(finput)
if c == '/' {
@@ -1268,9 +1246,7 @@ l1:
return nl
}
-//
// copy action to the next ; or closing }
-//
func cpyact(curprod []int, max int) {
if !lflag {
@@ -1488,9 +1464,7 @@ func openup() {
}
-//
// return a pointer to the name of symbol i
-//
func symnam(i int) string {
var s string
@@ -1502,20 +1476,16 @@ func symnam(i int) string {
return s
}
-//
// set elements 0 through n-1 to c
-//
func aryfil(v []int, n, c int) {
for i := 0; i < n; i++ {
v[i] = c
}
}
-//
// compute an array with the beginnings of productions yielding given nonterminals
// The array pres points to these lists
// the array pyield has the lists: the total size is only NPROD+1
-//
func cpres() {
pres = make([][][]int, nnonter+1)
curres := make([][]int, nprod)
@@ -1553,10 +1523,8 @@ func cpres() {
}
}
-//
// mark nonterminals which derive the empty string
// also, look for nonterminals which don't derive any token strings
-//
func cempty() {
var i, p, np int
var prd []int
@@ -1639,9 +1607,7 @@ again:
}
}
-//
// compute an array with the first of nonterminals
-//
func cpfir() {
var s, n, p, np, ch, i int
var curres [][]int
@@ -1707,9 +1673,7 @@ func cpfir() {
}
}
-//
// generate the states
-//
func stagen() {
// initialize
nstate = 0
@@ -1799,9 +1763,7 @@ func stagen() {
}
}
-//
// generate the closure of state i
-//
func closure(i int) {
zzclose++
@@ -1931,9 +1893,7 @@ func closure(i int) {
}
}
-//
// sorts last state,and sees if it equals earlier ones. returns state number
-//
func state(c int) int {
zzstate++
p1 := pstate[nstate]
@@ -2046,9 +2006,7 @@ func putitem(p Pitem, set Lkset) {
pstate[nstate+1] = j
}
-//
// creates output string for item pointed to by pp
-//
func writem(pp Pitem) string {
var i int
@@ -2082,9 +2040,7 @@ func writem(pp Pitem) string {
return q
}
-//
// pack state i from temp1 into amem
-//
func apack(p []int, n int) int {
//
// we don't need to worry about checking because
@@ -2149,9 +2105,7 @@ nextk:
return 0
}
-//
// print the output for the states
-//
func output() {
var c, u, v int
@@ -2240,12 +2194,10 @@ func output() {
fmt.Fprintf(ftable, "const %sPrivate = %v\n", prefix, PRIVATE)
}
-//
// decide a shift/reduce conflict by precedence.
// r is a rule number, t a token number
// the conflict is in state s
// temp1[t] is changed to reflect the action
-//
func precftn(r, t, s int) {
action := NOASC
@@ -2276,10 +2228,8 @@ func precftn(r, t, s int) {
}
}
-//
// output state i
// temp1 has the actions, lastred the default
-//
func addActions(act []int, i int) []int {
var p, p1 int
@@ -2368,9 +2318,7 @@ func addActions(act []int, i int) []int {
return act
}
-//
// writes state i
-//
func wrstate(i int) {
var j0, j1, u int
var pp, qq int
@@ -2440,9 +2388,7 @@ func wrstate(i int) {
}
}
-//
// output the gotos for the nontermninals
-//
func go2out() {
for i := 1; i <= nnonter; i++ {
go2gen(i)
@@ -2505,9 +2451,7 @@ func go2out() {
}
}
-//
// output the gotos for nonterminal c
-//
func go2gen(c int) {
var i, cc, p, q int
@@ -2559,12 +2503,10 @@ func go2gen(c int) {
}
}
-//
// in order to free up the mem and amem arrays for the optimizer,
// and still be able to output yyr1, etc., after the sizes of
// the action array is known, we hide the nonterminals
// derived by productions in levprd.
-//
func hideprod() {
nred := 0
levprd[0] = 0
@@ -2678,9 +2620,7 @@ func callopt() {
osummary()
}
-//
// finds the next i
-//
func nxti() int {
max := 0
maxi := 0
@@ -2817,10 +2757,8 @@ nextn:
errorf("Error; failure to place state %v", i)
}
-//
// this version is for limbo
// write out the optimized parser
-//
func aoutput() {
ftable.WriteRune('\n')
fmt.Fprintf(ftable, "const %sLast = %v\n", prefix, maxa+1)
@@ -2829,9 +2767,7 @@ func aoutput() {
arout("Pgo", pgo, nnonter+1)
}
-//
// put out other arrays, copy the parsers
-//
func others() {
var i, j int
@@ -3066,9 +3002,7 @@ func arout(s string, v []int, n int) {
arrayOutColumns(s, v[:n], 10, true)
}
-//
// output the summary on y.output
-//
func summary() {
if foutput != nil {
fmt.Fprintf(foutput, "\n%v terminals, %v nonterminals\n", ntokens, nnonter+1)
@@ -3096,9 +3030,7 @@ func summary() {
}
}
-//
// write optimizer summary
-//
func osummary() {
if foutput == nil {
return
@@ -3115,9 +3047,7 @@ func osummary() {
fmt.Fprintf(foutput, "maximum spread: %v, maximum offset: %v\n", maxspr, maxoff)
}
-//
// copies and protects "'s in q
-//
func chcopy(q string) string {
s := ""
i := 0
@@ -3142,10 +3072,8 @@ func setbit(set Lkset, bit int) { set[bit>>5] |= (1 << uint(bit&31)) }
func mkset() Lkset { return make([]int, tbitset) }
-//
// set a to the union of a and b
// return 1 if b is not a subset of a, 0 otherwise
-//
func setunion(a, b []int) int {
sub := 0
for i := 0; i < tbitset; i++ {
@@ -3173,9 +3101,7 @@ func prlook(p Lkset) {
fmt.Fprintf(foutput, "}")
}
-//
// utility routines
-//
var peekrune rune
func isdigit(c rune) bool { return c >= '0' && c <= '9' }
@@ -3184,10 +3110,8 @@ func isword(c rune) bool {
return c >= 0xa0 || c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
-//
// return 1 if 2 arrays are equal
// return 0 if not equal
-//
func aryeq(a []int, b []int) int {
n := len(a)
if len(b) != n {
@@ -3252,9 +3176,7 @@ func create(s string) *bufio.Writer {
return bufio.NewWriter(fo)
}
-//
// write out error comment
-//
func lerrorf(lineno int, s string, v ...interface{}) {
nerrors++
fmt.Fprintf(stderr, s, v...)
diff --git a/cmd/guru/TODO b/cmd/guru/TODO
new file mode 100644
index 000000000..61bf1519e
--- /dev/null
+++ b/cmd/guru/TODO
@@ -0,0 +1,11 @@
+-*- text -*-
+
+Guru to-do list
+===========================
+
+Generics:
+- decide on whether to support generics in guru
+- decide on whether to instantiate generics in ssa (go.dev/issue/52503)
+
+MISC:
+- test support for *ssa.SliceToArrayPointer instructions (go.dev/issue/47326) \ No newline at end of file
diff --git a/cmd/guru/callers.go b/cmd/guru/callers.go
index b39b07869..8afefba33 100644
--- a/cmd/guru/callers.go
+++ b/cmd/guru/callers.go
@@ -18,7 +18,6 @@ import (
// The callers function reports the possible callers of the function
// immediately enclosing the specified source location.
-//
func callers(q *Query) error {
lconf := loader.Config{Build: q.Build}
diff --git a/cmd/guru/callstack.go b/cmd/guru/callstack.go
index 10939ddfb..c3d6d6ee7 100644
--- a/cmd/guru/callstack.go
+++ b/cmd/guru/callstack.go
@@ -25,7 +25,6 @@ import (
//
// TODO(adonovan): permit user to specify a starting point other than
// the analysis root.
-//
func callstack(q *Query) error {
fset := token.NewFileSet()
lconf := loader.Config{Fset: fset, Build: q.Build}
diff --git a/cmd/guru/describe.go b/cmd/guru/describe.go
index 41189f662..0e4964428 100644
--- a/cmd/guru/describe.go
+++ b/cmd/guru/describe.go
@@ -26,7 +26,6 @@ import (
// - its syntactic category
// - the definition of its referent (for identifiers) [now redundant]
// - its type, fields, and methods (for an expression or type expression)
-//
func describe(q *Query) error {
lconf := loader.Config{Build: q.Build}
allowErrors(&lconf)
@@ -106,15 +105,15 @@ const (
)
// findInterestingNode classifies the syntax node denoted by path as one of:
-// - an expression, part of an expression or a reference to a constant
-// or variable;
-// - a type, part of a type, or a reference to a named type;
-// - a statement, part of a statement, or a label referring to a statement;
-// - part of a package declaration or import spec.
-// - none of the above.
+// - an expression, part of an expression or a reference to a constant
+// or variable;
+// - a type, part of a type, or a reference to a named type;
+// - a statement, part of a statement, or a label referring to a statement;
+// - part of a package declaration or import spec.
+// - none of the above.
+//
// and returns the most "interesting" associated node, which may be
// the same node, an ancestor or a descendent.
-//
func findInterestingNode(pkginfo *loader.PackageInfo, path []ast.Node) ([]ast.Node, action) {
// TODO(adonovan): integrate with go/types/stdlib_test.go and
// apply this to every AST node we can find to make sure it
diff --git a/cmd/guru/freevars.go b/cmd/guru/freevars.go
index a36d1f80b..b079a3ef4 100644
--- a/cmd/guru/freevars.go
+++ b/cmd/guru/freevars.go
@@ -28,7 +28,6 @@ import (
// Depending on where the resulting function abstraction will go,
// these might be interesting. Perhaps group the results into three
// bands.
-//
func freevars(q *Query) error {
lconf := loader.Config{Build: q.Build}
allowErrors(&lconf)
diff --git a/cmd/guru/guru.go b/cmd/guru/guru.go
index 2eafca654..7a42aaa3a 100644
--- a/cmd/guru/guru.go
+++ b/cmd/guru/guru.go
@@ -55,12 +55,12 @@ type queryPos struct {
info *loader.PackageInfo // type info for the queried package (nil for fastQueryPos)
}
-// TypeString prints type T relative to the query position.
+// typeString prints type T relative to the query position.
func (qpos *queryPos) typeString(T types.Type) string {
return types.TypeString(T, types.RelativeTo(qpos.info.Pkg))
}
-// ObjectString prints object obj relative to the query position.
+// objectString prints object obj relative to the query position.
func (qpos *queryPos) objectString(obj types.Object) string {
return types.ObjectString(obj, types.RelativeTo(qpos.info.Pkg))
}
@@ -207,12 +207,11 @@ func pkgContainsFile(bp *build.Package, filename string) byte {
return 0 // not found
}
-// ParseQueryPos parses the source query position pos and returns the
+// parseQueryPos parses the source query position pos and returns the
// AST node of the loaded program lprog that it identifies.
// If needExact, it must identify a single AST subtree;
// this is appropriate for queries that allow fairly arbitrary syntax,
// e.g. "describe".
-//
func parseQueryPos(lprog *loader.Program, pos string, needExact bool) (*queryPos, error) {
filename, startOffset, endOffset, err := parsePos(pos)
if err != nil {
@@ -331,16 +330,15 @@ func deref(typ types.Type) types.Type {
// where location is derived from pos.
//
// pos must be one of:
-// - a token.Pos, denoting a position
-// - an ast.Node, denoting an interval
-// - anything with a Pos() method:
-// ssa.Member, ssa.Value, ssa.Instruction, types.Object, pointer.Label, etc.
-// - a QueryPos, denoting the extent of the user's query.
-// - nil, meaning no position at all.
+// - a token.Pos, denoting a position
+// - an ast.Node, denoting an interval
+// - anything with a Pos() method:
+// ssa.Member, ssa.Value, ssa.Instruction, types.Object, pointer.Label, etc.
+// - a QueryPos, denoting the extent of the user's query.
+// - nil, meaning no position at all.
//
// The output format is is compatible with the 'gnu'
// compilation-error-regexp in Emacs' compilation mode.
-//
func fprintf(w io.Writer, fset *token.FileSet, pos interface{}, format string, args ...interface{}) {
var start, end token.Pos
switch pos := pos.(type) {
diff --git a/cmd/guru/implements.go b/cmd/guru/implements.go
index dbdba0412..527e88bd7 100644
--- a/cmd/guru/implements.go
+++ b/cmd/guru/implements.go
@@ -24,7 +24,6 @@ import (
// If the selection is a method, 'implements' displays
// the corresponding methods of the types that would have been reported
// by an implements query on the receiver type.
-//
func implements(q *Query) error {
lconf := loader.Config{Build: q.Build}
allowErrors(&lconf)
diff --git a/cmd/guru/main.go b/cmd/guru/main.go
index 4fde4d2d2..7ad083e45 100644
--- a/cmd/guru/main.go
+++ b/cmd/guru/main.go
@@ -4,10 +4,9 @@
// guru: a tool for answering questions about Go source code.
//
-// http://golang.org/s/using-guru
+// http://golang.org/s/using-guru
//
// Run with -help flag or help subcommand for usage information.
-//
package main // import "golang.org/x/tools/cmd/guru"
import (
diff --git a/cmd/guru/pointsto.go b/cmd/guru/pointsto.go
index 782277f37..e7608442c 100644
--- a/cmd/guru/pointsto.go
+++ b/cmd/guru/pointsto.go
@@ -25,7 +25,6 @@ import (
// reflect.Type expression) and their points-to sets.
//
// All printed sets are sorted to ensure determinism.
-//
func pointsto(q *Query) error {
lconf := loader.Config{Build: q.Build}
@@ -113,7 +112,6 @@ func pointsto(q *Query) error {
// ssaValueForIdent returns the ssa.Value for the ast.Ident whose path
// to the root of the AST is path. isAddr reports whether the
// ssa.Value is the address denoted by the ast.Ident, not its value.
-//
func ssaValueForIdent(prog *ssa.Program, qinfo *loader.PackageInfo, obj types.Object, path []ast.Node) (value ssa.Value, isAddr bool, err error) {
switch obj := obj.(type) {
case *types.Var:
@@ -138,7 +136,6 @@ func ssaValueForIdent(prog *ssa.Program, qinfo *loader.PackageInfo, obj types.Ob
// ssaValueForExpr returns the ssa.Value of the non-ast.Ident
// expression whose path to the root of the AST is path.
-//
func ssaValueForExpr(prog *ssa.Program, qinfo *loader.PackageInfo, path []ast.Node) (value ssa.Value, isAddr bool, err error) {
pkg := prog.Package(qinfo.Pkg)
pkg.SetDebugMode(true)
diff --git a/cmd/guru/pos.go b/cmd/guru/pos.go
index 2e659fe42..9ae4d16b6 100644
--- a/cmd/guru/pos.go
+++ b/cmd/guru/pos.go
@@ -37,7 +37,6 @@ func parseOctothorpDecimal(s string) int {
//
// (Numbers without a '#' prefix are reserved for future use,
// e.g. to indicate line/column positions.)
-//
func parsePos(pos string) (filename string, startOffset, endOffset int, err error) {
if pos == "" {
err = fmt.Errorf("no source position specified")
@@ -71,7 +70,6 @@ func parsePos(pos string) (filename string, startOffset, endOffset int, err erro
// fileOffsetToPos translates the specified file-relative byte offsets
// into token.Pos form. It returns an error if the file was not found
// or the offsets were out of bounds.
-//
func fileOffsetToPos(file *token.File, startOffset, endOffset int) (start, end token.Pos, err error) {
// Range check [start..end], inclusive of both end-points.
@@ -94,7 +92,6 @@ func fileOffsetToPos(file *token.File, startOffset, endOffset int) (start, end t
// sameFile returns true if x and y have the same basename and denote
// the same file.
-//
func sameFile(x, y string) bool {
if filepath.Base(x) == filepath.Base(y) { // (optimisation)
if xi, err := os.Stat(x); err == nil {
diff --git a/cmd/guru/referrers.go b/cmd/guru/referrers.go
index 9d1507157..d75196bf9 100644
--- a/cmd/guru/referrers.go
+++ b/cmd/guru/referrers.go
@@ -617,7 +617,6 @@ func findObject(fset *token.FileSet, info *types.Info, objposn token.Position) t
// same reports whether x and y are identical, or both are PkgNames
// that import the same Package.
-//
func sameObj(x, y types.Object) bool {
if x == y {
return true
@@ -704,7 +703,7 @@ type referrersPackageResult struct {
refs []*ast.Ident // set of all other references to it
}
-// forEachRef calls f(id, text) for id in r.refs, in order.
+// foreachRef calls f(id, text) for id in r.refs, in order.
// Text is the text of the line on which id appears.
func (r *referrersPackageResult) foreachRef(f func(id *ast.Ident, text string)) {
// Show referring lines, like grep.
diff --git a/cmd/guru/serial/serial.go b/cmd/guru/serial/serial.go
index 5f097c51a..082e6cf0d 100644
--- a/cmd/guru/serial/serial.go
+++ b/cmd/guru/serial/serial.go
@@ -8,20 +8,20 @@
// This table shows the types of objects in the result stream for each
// query type.
//
-// Query Result stream
-// ----- -------------
-// callees Callees
-// callers Caller ...
-// callstack CallStack
-// definition Definition
-// describe Describe
-// freevars FreeVar ...
-// implements Implements
-// peers Peers
-// pointsto PointsTo ...
-// referrers ReferrersInitial ReferrersPackage ...
-// what What
-// whicherrs WhichErrs
+// Query Result stream
+// ----- -------------
+// callees Callees
+// callers Caller ...
+// callstack CallStack
+// definition Definition
+// describe Describe
+// freevars FreeVar ...
+// implements Implements
+// peers Peers
+// pointsto PointsTo ...
+// referrers ReferrersInitial ReferrersPackage ...
+// what What
+// whicherrs WhichErrs
//
// All 'pos' strings in the output are of the form "file:line:col",
// where line is the 1-based line number and col is the 1-based byte index.
@@ -113,7 +113,6 @@ type FreeVar struct {
// It describes the queried type, the set of named non-empty interface
// types to which it is assignable, and the set of named/*named types
// (concrete or non-empty interface) which may be assigned to it.
-//
type Implements struct {
T ImplementsType `json:"type,omitempty"` // the queried type
AssignableTo []ImplementsType `json:"to,omitempty"` // types assignable to T
@@ -161,14 +160,13 @@ type What struct {
//
// A "label" is an object that may be pointed to by a pointer, map,
// channel, 'func', slice or interface. Labels include:
-// - functions
-// - globals
-// - arrays created by literals (e.g. []byte("foo")) and conversions ([]byte(s))
-// - stack- and heap-allocated variables (including composite literals)
-// - arrays allocated by append()
-// - channels, maps and arrays created by make()
-// - and their subelements, e.g. "alloc.y[*].z"
-//
+// - functions
+// - globals
+// - arrays created by literals (e.g. []byte("foo")) and conversions ([]byte(s))
+// - stack- and heap-allocated variables (including composite literals)
+// - arrays allocated by append()
+// - channels, maps and arrays created by make()
+// - and their subelements, e.g. "alloc.y[*].z"
type PointsToLabel struct {
Pos string `json:"pos"` // location of syntax that allocated the object
Desc string `json:"desc"` // description of the label
@@ -183,7 +181,6 @@ type PointsToLabel struct {
// concrete type that is a pointer, the PTS entry describes the labels
// it may point to. The same is true for reflect.Values, except the
// dynamic types needn't be concrete.
-//
type PointsTo struct {
Type string `json:"type"` // (concrete) type of the pointer
NamePos string `json:"namepos,omitempty"` // location of type defn, if Named
diff --git a/cmd/guru/what.go b/cmd/guru/what.go
index 82495b4f8..7ebabbd82 100644
--- a/cmd/guru/what.go
+++ b/cmd/guru/what.go
@@ -24,7 +24,6 @@ import (
// It is intended to be a very low-latency query callable from GUI
// tools, e.g. to populate a menu of options of slower queries about
// the selected location.
-//
func what(q *Query) error {
qpos, err := fastQueryPos(q.Build, q.Pos)
if err != nil {
@@ -170,7 +169,6 @@ func what(q *Query) error {
//
// TODO(adonovan): what about _test.go files that are not part of the
// package?
-//
func guessImportPath(filename string, buildContext *build.Context) (srcdir, importPath string, err error) {
absFile, err := filepath.Abs(filename)
if err != nil {
diff --git a/cmd/present/dir.go b/cmd/present/dir.go
index 17736ec14..93db12bf4 100644
--- a/cmd/present/dir.go
+++ b/cmd/present/dir.go
@@ -7,6 +7,7 @@ package main
import (
"html/template"
"io"
+ "io/fs"
"log"
"net"
"net/http"
@@ -65,9 +66,9 @@ var (
contentTemplate map[string]*template.Template
)
-func initTemplates(base string) error {
+func initTemplates(fsys fs.FS) error {
// Locate the template file.
- actionTmpl := filepath.Join(base, "templates/action.tmpl")
+ actionTmpl := "templates/action.tmpl"
contentTemplate = make(map[string]*template.Template)
@@ -75,19 +76,19 @@ func initTemplates(base string) error {
".slide": "slides.tmpl",
".article": "article.tmpl",
} {
- contentTmpl = filepath.Join(base, "templates", contentTmpl)
+ contentTmpl = "templates/" + contentTmpl
// Read and parse the input.
tmpl := present.Template()
tmpl = tmpl.Funcs(template.FuncMap{"playable": playable})
- if _, err := tmpl.ParseFiles(actionTmpl, contentTmpl); err != nil {
+ if _, err := tmpl.ParseFS(fsys, actionTmpl, contentTmpl); err != nil {
return err
}
contentTemplate[ext] = tmpl
}
var err error
- dirListTemplate, err = template.ParseFiles(filepath.Join(base, "templates/dir.tmpl"))
+ dirListTemplate, err = template.ParseFS(fsys, "templates/dir.tmpl")
return err
}
diff --git a/cmd/present/doc.go b/cmd/present/doc.go
index e66984edb..654553507 100644
--- a/cmd/present/doc.go
+++ b/cmd/present/doc.go
@@ -14,35 +14,36 @@ https://golang.org/wiki/NativeClient
To use with App Engine, copy the files in the tools/cmd/present directory to the
root of your application and create an app.yaml file similar to this:
- runtime: go111
-
- handlers:
- - url: /favicon.ico
- static_files: static/favicon.ico
- upload: static/favicon.ico
- - url: /static
- static_dir: static
- - url: /.*
- script: auto
-
- # nobuild_files is a regexp that identifies which files to not build. It
- # is useful for embedding static assets like code snippets and preventing
- # them from producing build errors for your project.
- nobuild_files: [path regexp for talk materials]
+ runtime: go111
+
+ handlers:
+ - url: /favicon.ico
+ static_files: static/favicon.ico
+ upload: static/favicon.ico
+ - url: /static
+ static_dir: static
+ - url: /.*
+ script: auto
+
+ # nobuild_files is a regexp that identifies which files to not build. It
+ # is useful for embedding static assets like code snippets and preventing
+ # them from producing build errors for your project.
+ nobuild_files: [path regexp for talk materials]
When running on App Engine, content will be served from the ./content/
subdirectory.
Present then can be tested in a local App Engine environment with
- GAE_ENV=standard go run .
+ GAE_ENV=standard go run .
And deployed using
- gcloud app deploy
+ gcloud app deploy
Input files are named foo.extension, where "extension" defines the format of
the generated output. The supported formats are:
+
.slide // HTML5 slide presentation
.article // article format, such as a blog post
diff --git a/cmd/present/main.go b/cmd/present/main.go
index b89e11fe5..340025276 100644
--- a/cmd/present/main.go
+++ b/cmd/present/main.go
@@ -5,9 +5,10 @@
package main
import (
+ "embed"
"flag"
"fmt"
- "go/build"
+ "io/fs"
"log"
"net"
"net/http"
@@ -18,17 +19,17 @@ import (
"golang.org/x/tools/present"
)
-const basePkg = "golang.org/x/tools/cmd/present"
-
var (
httpAddr = flag.String("http", "127.0.0.1:3999", "HTTP service address (e.g., '127.0.0.1:3999')")
originHost = flag.String("orighost", "", "host component of web origin URL (e.g., 'localhost')")
basePath = flag.String("base", "", "base path for slide template and static resources")
contentPath = flag.String("content", ".", "base path for presentation content")
usePlayground = flag.Bool("use_playground", false, "run code snippets using play.golang.org; if false, run them locally and deliver results by WebSocket transport")
- nativeClient = flag.Bool("nacl", false, "use Native Client environment playground (prevents non-Go code execution) when using local WebSocket transport")
)
+//go:embed static templates
+var embedFS embed.FS
+
func main() {
flag.BoolVar(&present.PlayEnabled, "play", true, "enable playground (permit execution of arbitrary user code)")
flag.BoolVar(&present.NotesEnabled, "notes", false, "enable presenter notes (press 'N' from the browser to display them)")
@@ -50,16 +51,11 @@ func main() {
*contentPath = "./content/"
}
- if *basePath == "" {
- p, err := build.Default.Import(basePkg, "", build.FindOnly)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Couldn't find gopresent files: %v\n", err)
- fmt.Fprintf(os.Stderr, basePathMessage, basePkg)
- os.Exit(1)
- }
- *basePath = p.Dir
+ var fsys fs.FS = embedFS
+ if *basePath != "" {
+ fsys = os.DirFS(*basePath)
}
- err := initTemplates(*basePath)
+ err := initTemplates(fsys)
if err != nil {
log.Fatalf("Failed to parse templates: %v", err)
}
@@ -98,11 +94,11 @@ func main() {
}
}
- initPlayground(*basePath, origin)
- http.Handle("/static/", http.FileServer(http.Dir(*basePath)))
+ initPlayground(fsys, origin)
+ http.Handle("/static/", http.FileServer(http.FS(fsys)))
if !ln.Addr().(*net.TCPAddr).IP.IsLoopback() &&
- present.PlayEnabled && !*nativeClient && !*usePlayground {
+ present.PlayEnabled && !*usePlayground {
log.Print(localhostWarning)
}
diff --git a/cmd/present/play.go b/cmd/present/play.go
index 2e53f1474..fb24fabfc 100644
--- a/cmd/present/play.go
+++ b/cmd/present/play.go
@@ -7,11 +7,9 @@ package main
import (
"bytes"
"fmt"
- "io/ioutil"
+ "io/fs"
"net/http"
"net/url"
- "path/filepath"
- "runtime"
"time"
"golang.org/x/tools/godoc/static"
@@ -31,7 +29,7 @@ var scripts = []string{"jquery.js", "jquery-ui.js", "playground.js", "play.js"}
// playScript registers an HTTP handler at /play.js that serves all the
// scripts specified by the variable above, and appends a line that
// initializes the playground with the specified transport.
-func playScript(root, transport string) {
+func playScript(fsys fs.FS, transport string) {
modTime := time.Now()
var buf bytes.Buffer
for _, p := range scripts {
@@ -39,7 +37,7 @@ func playScript(root, transport string) {
buf.WriteString(s)
continue
}
- b, err := ioutil.ReadFile(filepath.Join(root, "static", p))
+ b, err := fs.ReadFile(fsys, "static/"+p)
if err != nil {
panic(err)
}
@@ -53,27 +51,16 @@ func playScript(root, transport string) {
})
}
-func initPlayground(basepath string, origin *url.URL) {
+func initPlayground(fsys fs.FS, origin *url.URL) {
if !present.PlayEnabled {
return
}
if *usePlayground {
- playScript(basepath, "HTTPTransport")
+ playScript(fsys, "HTTPTransport")
return
}
- if *nativeClient {
- // When specifying nativeClient, non-Go code cannot be executed
- // because the NaCl setup doesn't support doing so.
- socket.RunScripts = false
- socket.Environ = func() []string {
- if runtime.GOARCH == "amd64" {
- return environ("GOOS=nacl", "GOARCH=amd64p32")
- }
- return environ("GOOS=nacl")
- }
- }
- playScript(basepath, "SocketTransport")
+ playScript(fsys, "SocketTransport")
http.Handle("/socket", socket.NewHandler(origin))
}
diff --git a/cmd/present/static/article.css b/cmd/present/static/article.css
index 52fd73737..b577aaf2e 100644
--- a/cmd/present/static/article.css
+++ b/cmd/present/static/article.css
@@ -102,29 +102,33 @@ div#footer {
div.code,
div.output {
+ margin: 0;
+}
+
+pre {
margin: 20px 20px 20px 40px;
-webkit-border-radius: 5px;
-moz-border-radius: 5px;
border-radius: 5px;
}
-div.output {
+div.output pre {
padding: 10px;
}
-div.code {
+pre {
background: white;
}
-div.output {
+div.output pre {
background: black;
}
-div.output .stdout {
+div.output .stdout pre {
color: #e6e6e6;
}
-div.output .stderr {
+div.output .stderr pre {
color: rgb(244, 74, 63);
}
-div.output .system {
+div.output .system pre {
color: rgb(255, 209, 77);
}
diff --git a/cmd/present/static/styles.css b/cmd/present/static/styles.css
index 5edfde934..47c9f196d 100644
--- a/cmd/present/static/styles.css
+++ b/cmd/present/static/styles.css
@@ -242,7 +242,7 @@
margin-bottom: 100px !important;
}
- div.code {
+ pre {
background: rgb(240, 240, 240);
}
@@ -359,7 +359,12 @@ li {
margin: 0 0 0.5em 0;
}
-div.code {
+div.code, div.output {
+ margin: 0;
+ padding: 0;
+}
+
+pre {
padding: 5px 10px;
margin-top: 20px;
margin-bottom: 20px;
@@ -367,10 +372,6 @@ div.code {
background: rgb(240, 240, 240);
border: 1px solid rgb(224, 224, 224);
-}
-pre {
- margin: 0;
- padding: 0;
font-family: 'Droid Sans Mono', 'Courier New', monospace;
font-size: 18px;
@@ -393,6 +394,10 @@ code {
color: black;
}
+pre code {
+ font-size: 100%;
+}
+
article > .image,
article > .video {
text-align: center;
@@ -433,7 +438,7 @@ p.link {
}
/* Code */
-div.code {
+pre {
outline: 0px solid transparent;
}
div.playground {
diff --git a/cmd/present2md/main.go b/cmd/present2md/main.go
index 64be64b97..748b041e4 100644
--- a/cmd/present2md/main.go
+++ b/cmd/present2md/main.go
@@ -18,7 +18,6 @@
//
// present2md your.article
// present2md -w *.article
-//
package main
import (
diff --git a/cmd/signature-fuzzer/fuzz-runner/runner.go b/cmd/signature-fuzzer/fuzz-runner/runner.go
index 4e5b413f3..b77b218f5 100644
--- a/cmd/signature-fuzzer/fuzz-runner/runner.go
+++ b/cmd/signature-fuzzer/fuzz-runner/runner.go
@@ -107,7 +107,7 @@ func docmd(cmd []string, dir string) int {
return st
}
-// docodmout forks and execs command 'cmd' in dir 'dir', redirecting
+// docmdout forks and execs command 'cmd' in dir 'dir', redirecting
// stderr and stdout from the execution to file 'outfile'.
func docmdout(cmd []string, dir string, outfile string) int {
of, err := os.OpenFile(outfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
diff --git a/cmd/signature-fuzzer/internal/fuzz-generator/generator.go b/cmd/signature-fuzzer/internal/fuzz-generator/generator.go
index bbe53fb10..ba5f05525 100644
--- a/cmd/signature-fuzzer/internal/fuzz-generator/generator.go
+++ b/cmd/signature-fuzzer/internal/fuzz-generator/generator.go
@@ -1564,10 +1564,10 @@ func (s *genstate) emitParamChecks(f *funcdef, b *bytes.Buffer, pidx int, value
// emitDeferChecks creates code like
//
-// defer func(...args...) {
-// check arg
-// check param
-// }(...)
+// defer func(...args...) {
+// check arg
+// check param
+// }(...)
//
// where we randomly choose to either pass a param through to the
// function literal, or have the param captured by the closure, then
diff --git a/cmd/splitdwarf/splitdwarf.go b/cmd/splitdwarf/splitdwarf.go
index a13b9f316..9729b0b7a 100644
--- a/cmd/splitdwarf/splitdwarf.go
+++ b/cmd/splitdwarf/splitdwarf.go
@@ -6,7 +6,6 @@
// +build !js,!nacl,!plan9,!solaris,!windows
/*
-
Splitdwarf uncompresses and copies the DWARF segment of a Mach-O
executable into the "dSYM" file expected by lldb and ports of gdb
on OSX.
@@ -17,7 +16,6 @@ Unless a dSYM file name is provided on the command line,
splitdwarf will place it where the OSX tools expect it, in
"<osxMachoFile>.dSYM/Contents/Resources/DWARF/<osxMachoFile>",
creating directories as necessary.
-
*/
package main // import "golang.org/x/tools/cmd/splitdwarf"
@@ -94,7 +92,7 @@ for input_exe need to allow writing.
// IndSym Offset = file offset (within link edit section) of 4-byte indices within symtab.
//
// Section __TEXT.__symbol_stub1.
- // Offset and size (Reserved2) locate and describe a table for thios section.
+ // Offset and size (Reserved2) locate and describe a table for this section.
// Symbols beginning at IndirectSymIndex (Reserved1) (see LC_DYSYMTAB.IndSymOffset) refer to this table.
// (These table entries are apparently PLTs [Procedure Linkage Table/Trampoline])
//
@@ -184,7 +182,7 @@ for input_exe need to allow writing.
oldsym := symtab.Syms[ii]
newsymtab.Syms = append(newsymtab.Syms, oldsym)
- linkeditsyms = append(linkeditsyms, macho.Nlist64{Name: uint32(linkeditstringcur),
+ linkeditsyms = append(linkeditsyms, macho.Nlist64{Name: linkeditstringcur,
Type: oldsym.Type, Sect: oldsym.Sect, Desc: oldsym.Desc, Value: oldsym.Value})
linkeditstringcur += uint32(len(oldsym.Name)) + 1
linkeditstrings = append(linkeditstrings, oldsym.Name)
diff --git a/cmd/ssadump/main.go b/cmd/ssadump/main.go
index fee931b1c..cfb9122b2 100644
--- a/cmd/ssadump/main.go
+++ b/cmd/ssadump/main.go
@@ -47,7 +47,7 @@ func init() {
}
const usage = `SSA builder and interpreter.
-Usage: ssadump [-build=[DBCSNFL]] [-test] [-run] [-interp=[TR]] [-arg=...] package...
+Usage: ssadump [-build=[DBCSNFLG]] [-test] [-run] [-interp=[TR]] [-arg=...] package...
Use -help flag to display options.
Examples:
@@ -55,7 +55,8 @@ Examples:
% ssadump -build=F -test fmt # dump SSA form of a package and its tests
% ssadump -run -interp=T hello.go # interpret a program, with tracing
-The -run flag causes ssadump to run the first package named main.
+The -run flag causes ssadump to build the code in a runnable form and run the first
+package named main.
Interpretation of the standard "testing" package is no longer supported.
`
@@ -130,6 +131,11 @@ func doMain() error {
return fmt.Errorf("packages contain errors")
}
+ // Turn on instantiating generics during build if the program will be run.
+ if *runFlag {
+ mode |= ssa.InstantiateGenerics
+ }
+
// Create SSA-form program representation.
prog, pkgs := ssautil.AllPackages(initial, mode)
@@ -151,12 +157,15 @@ func doMain() error {
// Build SSA for all packages.
prog.Build()
- // The interpreter needs the runtime package.
- // It is a limitation of go/packages that
- // we cannot add "runtime" to its initial set,
- // we can only check that it is present.
- if prog.ImportedPackage("runtime") == nil {
- return fmt.Errorf("-run: program does not depend on runtime")
+ // Earlier versions of the interpreter needed the runtime
+ // package; however, interp cannot handle unsafe constructs
+ // used during runtime's package initialization at the moment.
+ // The key construct blocking support is:
+ // *((*T)(unsafe.Pointer(p)))
+ // Unfortunately, this means only trivial programs can be
+ // interpreted by ssadump.
+ if prog.ImportedPackage("runtime") != nil {
+ return fmt.Errorf("-run: program depends on runtime package (interpreter can run only trivial programs)")
}
if runtime.GOARCH != build.Default.GOARCH {
diff --git a/cmd/stress/stress.go b/cmd/stress/stress.go
index 9ba6ef35f..c4a187212 100644
--- a/cmd/stress/stress.go
+++ b/cmd/stress/stress.go
@@ -8,7 +8,9 @@
// The stress utility is intended for catching sporadic failures.
// It runs a given process in parallel in a loop and collects any failures.
// Usage:
-// $ stress ./fmt.test -test.run=TestSometing -test.cpu=10
+//
+// $ stress ./fmt.test -test.run=TestSometing -test.cpu=10
+//
// You can also specify a number of parallel processes with -p flag;
// instruct the utility to not kill hanged processes for gdb attach;
// or specify the failure output you are looking for (if you want to
diff --git a/cmd/stringer/endtoend_test.go b/cmd/stringer/endtoend_test.go
index 5b969a52e..29eb91860 100644
--- a/cmd/stringer/endtoend_test.go
+++ b/cmd/stringer/endtoend_test.go
@@ -14,15 +14,14 @@ import (
"fmt"
"go/build"
"io"
- "io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
+ "sync"
"testing"
- "golang.org/x/tools/internal/testenv"
"golang.org/x/tools/internal/typeparams"
)
@@ -31,9 +30,22 @@ import (
// we run stringer -type X and then compile and run the program. The resulting
// binary panics if the String method for X is not correct, including for error cases.
+func TestMain(m *testing.M) {
+ if os.Getenv("STRINGER_TEST_IS_STRINGER") != "" {
+ main()
+ os.Exit(0)
+ }
+
+ // Inform subprocesses that they should run the cmd/stringer main instead of
+ // running tests. It's a close approximation to building and running the real
+ // command, and much less complicated and expensive to build and clean up.
+ os.Setenv("STRINGER_TEST_IS_STRINGER", "1")
+
+ os.Exit(m.Run())
+}
+
func TestEndToEnd(t *testing.T) {
- dir, stringer := buildStringer(t)
- defer os.RemoveAll(dir)
+ stringer := stringerPath(t)
// Read the testdata directory.
fd, err := os.Open("testdata")
if err != nil {
@@ -65,7 +77,7 @@ func TestEndToEnd(t *testing.T) {
t.Logf("cgo is not enabled for %s", name)
continue
}
- stringerCompileAndRun(t, dir, stringer, typeName(name), name)
+ stringerCompileAndRun(t, t.TempDir(), stringer, typeName(name), name)
}
}
@@ -92,8 +104,8 @@ func moreTests(t *testing.T, dirname, prefix string) []string {
// TestTags verifies that the -tags flag works as advertised.
func TestTags(t *testing.T) {
- dir, stringer := buildStringer(t)
- defer os.RemoveAll(dir)
+ stringer := stringerPath(t)
+ dir := t.TempDir()
var (
protectedConst = []byte("TagProtected")
output = filepath.Join(dir, "const_string.go")
@@ -113,7 +125,7 @@ func TestTags(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- result, err := ioutil.ReadFile(output)
+ result, err := os.ReadFile(output)
if err != nil {
t.Fatal(err)
}
@@ -128,7 +140,7 @@ func TestTags(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- result, err = ioutil.ReadFile(output)
+ result, err = os.ReadFile(output)
if err != nil {
t.Fatal(err)
}
@@ -140,8 +152,8 @@ func TestTags(t *testing.T) {
// TestConstValueChange verifies that if a constant value changes and
// the stringer code is not regenerated, we'll get a compiler error.
func TestConstValueChange(t *testing.T) {
- dir, stringer := buildStringer(t)
- defer os.RemoveAll(dir)
+ stringer := stringerPath(t)
+ dir := t.TempDir()
source := filepath.Join(dir, "day.go")
err := copy(source, filepath.Join("testdata", "day.go"))
if err != nil {
@@ -179,21 +191,20 @@ func TestConstValueChange(t *testing.T) {
}
}
-// buildStringer creates a temporary directory and installs stringer there.
-func buildStringer(t *testing.T) (dir string, stringer string) {
- t.Helper()
- testenv.NeedsTool(t, "go")
+var exe struct {
+ path string
+ err error
+ once sync.Once
+}
- dir, err := ioutil.TempDir("", "stringer")
- if err != nil {
- t.Fatal(err)
- }
- stringer = filepath.Join(dir, "stringer.exe")
- err = run("go", "build", "-o", stringer)
- if err != nil {
- t.Fatalf("building stringer: %s", err)
+func stringerPath(t *testing.T) string {
+ exe.once.Do(func() {
+ exe.path, exe.err = os.Executable()
+ })
+ if exe.err != nil {
+ t.Fatal(exe.err)
}
- return dir, stringer
+ return exe.path
}
// stringerCompileAndRun runs stringer for the named file and compiles and
diff --git a/cmd/stringer/golden_test.go b/cmd/stringer/golden_test.go
index b29763174..250af05f9 100644
--- a/cmd/stringer/golden_test.go
+++ b/cmd/stringer/golden_test.go
@@ -10,7 +10,6 @@
package main
import (
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -452,12 +451,7 @@ func (i Token) String() string {
func TestGolden(t *testing.T) {
testenv.NeedsTool(t, "go")
- dir, err := ioutil.TempDir("", "stringer")
- if err != nil {
- t.Error(err)
- }
- defer os.RemoveAll(dir)
-
+ dir := t.TempDir()
for _, test := range golden {
g := Generator{
trimPrefix: test.trimPrefix,
@@ -466,7 +460,7 @@ func TestGolden(t *testing.T) {
input := "package test\n" + test.input
file := test.name + ".go"
absFile := filepath.Join(dir, file)
- err := ioutil.WriteFile(absFile, []byte(input), 0644)
+ err := os.WriteFile(absFile, []byte(input), 0644)
if err != nil {
t.Error(err)
}
diff --git a/cmd/stringer/stringer.go b/cmd/stringer/stringer.go
index 558a234d6..998d1a51b 100644
--- a/cmd/stringer/stringer.go
+++ b/cmd/stringer/stringer.go
@@ -5,7 +5,9 @@
// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer
// interface. Given the name of a (signed or unsigned) integer type T that has constants
// defined, stringer will create a new self-contained Go source file implementing
+//
// func (t T) String() string
+//
// The file is created in the same package and directory as the package that defines T.
// It has helpful defaults designed for use with go generate.
//
@@ -74,7 +76,6 @@ import (
"go/format"
"go/token"
"go/types"
- "io/ioutil"
"log"
"os"
"path/filepath"
@@ -164,7 +165,7 @@ func main() {
baseName := fmt.Sprintf("%s_string.go", types[0])
outputName = filepath.Join(dir, strings.ToLower(baseName))
}
- err := ioutil.WriteFile(outputName, src, 0644)
+ err := os.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
@@ -215,7 +216,7 @@ type Package struct {
// parsePackage exits if there is an error.
func (g *Generator) parsePackage(patterns []string, tags []string) {
cfg := &packages.Config{
- Mode: packages.LoadSyntax,
+ Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax,
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
Tests: false,
@@ -570,6 +571,7 @@ func (g *Generator) buildOneRun(runs [][]Value, typeName string) {
}
// Arguments to format are:
+//
// [1]: type name
// [2]: size of index element (8 for uint8 etc.)
// [3]: less than zero check (for signed types)
diff --git a/cmd/toolstash/buildall b/cmd/toolstash/buildall
index 0c6492c9e..4fc22f7f8 100755
--- a/cmd/toolstash/buildall
+++ b/cmd/toolstash/buildall
@@ -38,10 +38,10 @@ if [ "$pattern" = "" ]; then
fi
targets="$(go tool dist list; echo linux/386/softfloat)"
-targets="$(echo "$targets" | tr '/' '-' | sort | egrep "$pattern" | egrep -v 'android-arm|darwin-arm')"
+targets="$(echo "$targets" | tr '/' '-' | sort | grep -E "$pattern" | grep -E -v 'android-arm|darwin-arm')"
# put linux first in the target list to get all the architectures up front.
-targets="$(echo "$targets" | egrep 'linux') $(echo "$targets" | egrep -v 'linux')"
+targets="$(echo "$targets" | grep -E 'linux') $(echo "$targets" | grep -E -v 'linux')"
if [ "$sete" = true ]; then
set -e
diff --git a/cmd/toolstash/main.go b/cmd/toolstash/main.go
index 4c3494201..ddb1905ae 100644
--- a/cmd/toolstash/main.go
+++ b/cmd/toolstash/main.go
@@ -12,14 +12,14 @@
// toolstash [-n] [-v] [-t] go run x.go
// toolstash [-n] [-v] [-t] [-cmp] compile x.go
//
-// The toolstash command manages a ``stashed'' copy of the Go toolchain
+// The toolstash command manages a “stashed” copy of the Go toolchain
// kept in $GOROOT/pkg/toolstash. In this case, the toolchain means the
// tools available with the 'go tool' command as well as the go, godoc, and gofmt
// binaries.
//
-// The command ``toolstash save'', typically run when the toolchain is known to be working,
+// The command “toolstash save”, typically run when the toolchain is known to be working,
// copies the toolchain from its installed location to the toolstash directory.
-// Its inverse, ``toolchain restore'', typically run when the toolchain is known to be broken,
+// Its inverse, “toolchain restore”, typically run when the toolchain is known to be broken,
// copies the toolchain from the toolstash directory back to the installed locations.
// If additional arguments are given, the save or restore applies only to the named tools.
// Otherwise, it applies to all tools.
@@ -39,7 +39,7 @@
// The -t flag causes toolstash to print the time elapsed during while the
// command ran.
//
-// Comparing
+// # Comparing
//
// The -cmp flag causes toolstash to run both the installed and the stashed
// copy of an assembler or compiler and check that they produce identical
@@ -65,7 +65,7 @@
// go tool dist install cmd/compile # install compiler only
// toolstash -cmp compile x.go
//
-// Go Command Integration
+// # Go Command Integration
//
// The go command accepts a -toolexec flag that specifies a program
// to use to run the build tools.
@@ -97,7 +97,7 @@
// # If not, restore, in order to keep working on Go code.
// toolstash restore
//
-// Version Skew
+// # Version Skew
//
// The Go tools write the current Go version to object files, and (outside
// release branches) that version includes the hash and time stamp
@@ -118,9 +118,8 @@
// echo devel >$GOROOT/VERSION
//
// The version can be arbitrary text, but to pass all.bash's API check, it must
-// contain the substring ``devel''. The VERSION file must be created before
+// contain the substring “devel”. The VERSION file must be created before
// building either version of the toolchain.
-//
package main // import "golang.org/x/tools/cmd/toolstash"
import (
diff --git a/container/intsets/sparse.go b/container/intsets/sparse.go
index 2f1a0eaf3..d5fe156ed 100644
--- a/container/intsets/sparse.go
+++ b/container/intsets/sparse.go
@@ -10,7 +10,6 @@
// space-efficient than equivalent operations on sets based on the Go
// map type. The IsEmpty, Min, Max, Clear and TakeMin operations
// require constant time.
-//
package intsets // import "golang.org/x/tools/container/intsets"
// TODO(adonovan):
@@ -37,7 +36,6 @@ import (
//
// Sparse sets must be copied using the Copy method, not by assigning
// a Sparse value.
-//
type Sparse struct {
// An uninitialized Sparse represents an empty set.
// An empty set may also be represented by
@@ -105,7 +103,6 @@ func ntz(x word) int {
// is the Euclidean remainder.
//
// A block may only be empty transiently.
-//
type block struct {
offset int // offset mod bitsPerBlock == 0
bits [wordsPerBlock]word // contains at least one set bit
@@ -122,7 +119,6 @@ func wordMask(i uint) (w uint, mask word) {
// insert sets the block b's ith bit and
// returns true if it was not already set.
-//
func (b *block) insert(i uint) bool {
w, mask := wordMask(i)
if b.bits[w]&mask == 0 {
@@ -135,7 +131,6 @@ func (b *block) insert(i uint) bool {
// remove clears the block's ith bit and
// returns true if the bit was previously set.
// NB: may leave the block empty.
-//
func (b *block) remove(i uint) bool {
w, mask := wordMask(i)
if b.bits[w]&mask != 0 {
@@ -195,7 +190,7 @@ func (b *block) min(take bool) int {
if take {
b.bits[i] = w &^ (1 << uint(tz))
}
- return b.offset + int(i*bitsPerWord) + tz
+ return b.offset + i*bitsPerWord + tz
}
}
panic("BUG: empty block")
@@ -238,7 +233,6 @@ func (b *block) forEach(f func(int)) {
// offsetAndBitIndex returns the offset of the block that would
// contain x and the bit index of x within that block.
-//
func offsetAndBitIndex(x int) (int, uint) {
mod := x % bitsPerBlock
if mod < 0 {
@@ -438,9 +432,8 @@ func (s *Sparse) Clear() {
//
// This method may be used for iteration over a worklist like so:
//
-// var x int
-// for worklist.TakeMin(&x) { use(x) }
-//
+// var x int
+// for worklist.TakeMin(&x) { use(x) }
func (s *Sparse) TakeMin(p *int) bool {
if s.IsEmpty() {
return false
@@ -466,7 +459,6 @@ func (s *Sparse) Has(x int) bool {
// f must not mutate s. Consequently, forEach is not safe to expose
// to clients. In any case, using "range s.AppendTo()" allows more
// natural control flow with continue/break/return.
-//
func (s *Sparse) forEach(f func(int)) {
for b := s.first(); b != &none; b = s.next(b) {
b.forEach(f)
@@ -1021,11 +1013,11 @@ func (s *Sparse) String() string {
// preceded by a digit, appears if the sum is non-integral.
//
// Examples:
-// {}.BitString() = "0"
-// {4,5}.BitString() = "110000"
-// {-3}.BitString() = "0.001"
-// {-3,0,4,5}.BitString() = "110001.001"
//
+// {}.BitString() = "0"
+// {4,5}.BitString() = "110000"
+// {-3}.BitString() = "0.001"
+// {-3,0,4,5}.BitString() = "110001.001"
func (s *Sparse) BitString() string {
if s.IsEmpty() {
return "0"
@@ -1060,7 +1052,6 @@ func (s *Sparse) BitString() string {
// GoString returns a string showing the internal representation of
// the set s.
-//
func (s *Sparse) GoString() string {
var buf bytes.Buffer
for b := s.first(); b != &none; b = s.next(b) {
diff --git a/copyright/copyright.go b/copyright/copyright.go
index 4a04d132a..db63c5992 100644
--- a/copyright/copyright.go
+++ b/copyright/copyright.go
@@ -94,8 +94,9 @@ func checkFile(toolsDir, filename string) (bool, error) {
return shouldAddCopyright, nil
}
-// Copied from golang.org/x/tools/internal/lsp/source/util.go.
+// Copied from golang.org/x/tools/gopls/internal/lsp/source/util.go.
// Matches cgo generated comment as well as the proposed standard:
+//
// https://golang.org/s/generatedcode
var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`)
diff --git a/go.mod b/go.mod
index 34bc8ab49..29b36c6ac 100644
--- a/go.mod
+++ b/go.mod
@@ -1,13 +1,12 @@
module golang.org/x/tools
-go 1.17
+go 1.18 // tagx:compat 1.16
require (
- github.com/yuin/goldmark v1.4.1
- golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3
- golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f
- golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
- golang.org/x/text v0.3.7
- golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
+ github.com/yuin/goldmark v1.4.13
+ golang.org/x/mod v0.9.0
+ golang.org/x/net v0.8.0
+ golang.org/x/sys v0.6.0
)
+
+require golang.org/x/sync v0.1.0
diff --git a/go.sum b/go.sum
index c34a2aed4..c9fae489e 100644
--- a/go.sum
+++ b/go.sum
@@ -1,33 +1,40 @@
-github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/go/analysis/analysis.go b/go/analysis/analysis.go
index d11505a16..44ada22a0 100644
--- a/go/analysis/analysis.go
+++ b/go/analysis/analysis.go
@@ -11,8 +11,6 @@ import (
"go/token"
"go/types"
"reflect"
-
- "golang.org/x/tools/internal/analysisinternal"
)
// An Analyzer describes an analysis function and its options.
@@ -48,6 +46,7 @@ type Analyzer struct {
// RunDespiteErrors allows the driver to invoke
// the Run method of this analyzer even on a
// package that contains parse or type errors.
+ // The Pass.TypeErrors field may consequently be non-empty.
RunDespiteErrors bool
// Requires is a set of analyzers that must run successfully
@@ -75,17 +74,6 @@ type Analyzer struct {
func (a *Analyzer) String() string { return a.Name }
-func init() {
- // Set the analysisinternal functions to be able to pass type errors
- // to the Pass type without modifying the go/analysis API.
- analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) {
- p.(*Pass).typeErrors = errors
- }
- analysisinternal.GetTypeErrors = func(p interface{}) []types.Error {
- return p.(*Pass).typeErrors
- }
-}
-
// A Pass provides information to the Run function that
// applies a specific analyzer to a single Go package.
//
@@ -106,6 +94,7 @@ type Pass struct {
Pkg *types.Package // type information about the package
TypesInfo *types.Info // type information about the syntax trees
TypesSizes types.Sizes // function for computing sizes of types
+ TypeErrors []types.Error // type errors (only if Analyzer.RunDespiteErrors)
// Report reports a Diagnostic, a finding about a specific location
// in the analyzed source code such as a potential mistake.
diff --git a/go/analysis/analysistest/analysistest.go b/go/analysis/analysistest/analysistest.go
index df79a4419..be016e7e9 100644
--- a/go/analysis/analysistest/analysistest.go
+++ b/go/analysis/analysistest/analysistest.go
@@ -19,14 +19,13 @@ import (
"sort"
"strconv"
"strings"
+ "testing"
"text/scanner"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/checker"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/diff"
"golang.org/x/tools/internal/testenv"
"golang.org/x/tools/txtar"
)
@@ -81,23 +80,24 @@ type Testing interface {
// Each section in the archive corresponds to a single message.
//
// A golden file using txtar may look like this:
-// -- turn into single negation --
-// package pkg
//
-// func fn(b1, b2 bool) {
-// if !b1 { // want `negating a boolean twice`
-// println()
-// }
-// }
+// -- turn into single negation --
+// package pkg
//
-// -- remove double negation --
-// package pkg
+// func fn(b1, b2 bool) {
+// if !b1 { // want `negating a boolean twice`
+// println()
+// }
+// }
//
-// func fn(b1, b2 bool) {
-// if b1 { // want `negating a boolean twice`
-// println()
-// }
-// }
+// -- remove double negation --
+// package pkg
+//
+// func fn(b1, b2 bool) {
+// if b1 { // want `negating a boolean twice`
+// println()
+// }
+// }
func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result {
r := Run(t, dir, a, patterns...)
@@ -113,7 +113,7 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
// should match up.
for _, act := range r {
// file -> message -> edits
- fileEdits := make(map[*token.File]map[string][]diff.TextEdit)
+ fileEdits := make(map[*token.File]map[string][]diff.Edit)
fileContents := make(map[*token.File][]byte)
// Validate edits, prepare the fileEdits map and read the file contents.
@@ -141,17 +141,13 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
}
fileContents[file] = contents
}
- spn, err := span.NewRange(act.Pass.Fset, edit.Pos, edit.End).Span()
- if err != nil {
- t.Errorf("error converting edit to span %s: %v", file.Name(), err)
- }
-
if _, ok := fileEdits[file]; !ok {
- fileEdits[file] = make(map[string][]diff.TextEdit)
+ fileEdits[file] = make(map[string][]diff.Edit)
}
- fileEdits[file][sf.Message] = append(fileEdits[file][sf.Message], diff.TextEdit{
- Span: spn,
- NewText: string(edit.NewText),
+ fileEdits[file][sf.Message] = append(fileEdits[file][sf.Message], diff.Edit{
+ Start: file.Offset(edit.Pos),
+ End: file.Offset(edit.End),
+ New: string(edit.NewText),
})
}
}
@@ -188,23 +184,24 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
for _, vf := range ar.Files {
if vf.Name == sf {
found = true
- out := diff.ApplyEdits(string(orig), edits)
+ out, err := diff.ApplyBytes(orig, edits)
+ if err != nil {
+ t.Errorf("%s: error applying fixes: %v", file.Name(), err)
+ continue
+ }
// the file may contain multiple trailing
// newlines if the user places empty lines
// between files in the archive. normalize
// this to a single newline.
want := string(bytes.TrimRight(vf.Data, "\n")) + "\n"
- formatted, err := format.Source([]byte(out))
+ formatted, err := format.Source(out)
if err != nil {
t.Errorf("%s: error formatting edited source: %v\n%s", file.Name(), err, out)
continue
}
- if want != string(formatted) {
- d, err := myers.ComputeEdits("", want, string(formatted))
- if err != nil {
- t.Errorf("failed to compute suggested fix diff: %v", err)
- }
- t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), diff.ToUnified(fmt.Sprintf("%s.golden [%s]", file.Name(), sf), "actual", want, d))
+ if got := string(formatted); got != want {
+ unified := diff.Unified(fmt.Sprintf("%s.golden [%s]", file.Name(), sf), "actual", want, got)
+ t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), unified)
}
break
}
@@ -216,25 +213,26 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
} else {
// all suggested fixes are represented by a single file
- var catchallEdits []diff.TextEdit
+ var catchallEdits []diff.Edit
for _, edits := range fixes {
catchallEdits = append(catchallEdits, edits...)
}
- out := diff.ApplyEdits(string(orig), catchallEdits)
+ out, err := diff.ApplyBytes(orig, catchallEdits)
+ if err != nil {
+ t.Errorf("%s: error applying fixes: %v", file.Name(), err)
+ continue
+ }
want := string(ar.Comment)
- formatted, err := format.Source([]byte(out))
+ formatted, err := format.Source(out)
if err != nil {
t.Errorf("%s: error formatting resulting source: %v\n%s", file.Name(), err, out)
continue
}
- if want != string(formatted) {
- d, err := myers.ComputeEdits("", want, string(formatted))
- if err != nil {
- t.Errorf("%s: failed to compute suggested fix diff: %s", file.Name(), err)
- }
- t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), diff.ToUnified(file.Name()+".golden", "actual", want, d))
+ if got := string(formatted); got != want {
+ unified := diff.Unified(file.Name()+".golden", "actual", want, got)
+ t.Errorf("suggested fixes failed for %s:\n%s", file.Name(), unified)
}
}
}
@@ -248,7 +246,8 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
// directory using golang.org/x/tools/go/packages, runs the analysis on
// them, and checks that each analysis emits the expected diagnostics
// and facts specified by the contents of '// want ...' comments in the
-// package's source files.
+// package's source files. It treats a comment of the form
+// "//...// want..." or "/*...// want... */" as if it starts at 'want'
//
// An expectation of a Diagnostic is specified by a string literal
// containing a regular expression that must match the diagnostic
@@ -280,7 +279,7 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns
// attempted, even if unsuccessful. It is safe for a test to ignore all
// the results, but a test may use it to perform additional checks.
func Run(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result {
- if t, ok := t.(testenv.Testing); ok {
+ if t, ok := t.(testing.TB); ok {
testenv.NeedsGoPackages(t)
}
@@ -316,8 +315,11 @@ func loadPackages(a *analysis.Analyzer, dir string, patterns ...string) ([]*pack
// a list of packages we generate and then do the parsing and
// typechecking, though this feature seems to be a recurring need.
+ mode := packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedImports |
+ packages.NeedTypes | packages.NeedTypesSizes | packages.NeedSyntax | packages.NeedTypesInfo |
+ packages.NeedDeps
cfg := &packages.Config{
- Mode: packages.LoadAllSyntax,
+ Mode: mode,
Dir: dir,
Tests: true,
Env: append(os.Environ(), "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off"),
diff --git a/go/analysis/diagnostic.go b/go/analysis/diagnostic.go
index cd462a0cb..5cdcf46d2 100644
--- a/go/analysis/diagnostic.go
+++ b/go/analysis/diagnostic.go
@@ -37,7 +37,7 @@ type Diagnostic struct {
// declaration.
type RelatedInformation struct {
Pos token.Pos
- End token.Pos
+ End token.Pos // optional
Message string
}
diff --git a/go/analysis/doc.go b/go/analysis/doc.go
index 94a3bd5d0..c5429c9e2 100644
--- a/go/analysis/doc.go
+++ b/go/analysis/doc.go
@@ -3,12 +3,10 @@
// license that can be found in the LICENSE file.
/*
-
Package analysis defines the interface between a modular static
analysis and an analysis driver program.
-
-Background
+# Background
A static analysis is a function that inspects a package of Go code and
reports a set of diagnostics (typically mistakes in the code), and
@@ -32,8 +30,7 @@ frameworks, code review tools, code-base indexers (such as SourceGraph),
documentation viewers (such as godoc), batch pipelines for large code
bases, and so on.
-
-Analyzer
+# Analyzer
The primary type in the API is Analyzer. An Analyzer statically
describes an analysis function: its name, documentation, flags,
@@ -115,8 +112,7 @@ Finally, the Run field contains a function to be called by the driver to
execute the analysis on a single package. The driver passes it an
instance of the Pass type.
-
-Pass
+# Pass
A Pass describes a single unit of work: the application of a particular
Analyzer to a particular package of Go code.
@@ -181,14 +177,14 @@ Diagnostic is defined as:
The optional Category field is a short identifier that classifies the
kind of message when an analysis produces several kinds of diagnostic.
-Many analyses want to associate diagnostics with a severity level.
-Because Diagnostic does not have a severity level field, an Analyzer's
-diagnostics effectively all have the same severity level. To separate which
-diagnostics are high severity and which are low severity, expose multiple
-Analyzers instead. Analyzers should also be separated when their
-diagnostics belong in different groups, or could be tagged differently
-before being shown to the end user. Analyzers should document their severity
-level to help downstream tools surface diagnostics properly.
+The Diagnostic struct does not have a field to indicate its severity
+because opinions about the relative importance of Analyzers and their
+diagnostics vary widely among users. The design of this framework does
+not hold each Analyzer responsible for identifying the severity of its
+diagnostics. Instead, we expect that drivers will allow the user to
+customize the filtering and prioritization of diagnostics based on the
+producing Analyzer and optional Category, according to the user's
+preferences.
Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl
and buildtag, inspect the raw text of Go source files or even non-Go
@@ -202,8 +198,7 @@ raw text file, use the following sequence:
...
pass.Reportf(tf.LineStart(line), "oops")
-
-Modular analysis with Facts
+# Modular analysis with Facts
To improve efficiency and scalability, large programs are routinely
built using separate compilation: units of the program are compiled
@@ -246,6 +241,12 @@ Consequently, Facts must be serializable. The API requires that drivers
use the gob encoding, an efficient, robust, self-describing binary
protocol. A fact type may implement the GobEncoder/GobDecoder interfaces
if the default encoding is unsuitable. Facts should be stateless.
+Because serialized facts may appear within build outputs, the gob encoding
+of a fact must be deterministic, to avoid spurious cache misses in
+build systems that use content-addressable caches.
+The driver makes a single call to the gob encoder for all facts
+exported by a given analysis pass, so that the topology of
+shared data structures referenced by multiple facts is preserved.
The Pass type has functions to import and export facts,
associated either with an object or with a package:
@@ -280,8 +281,7 @@ this fact is built in to the analyzer so that it correctly checks
calls to log.Printf even when run in a driver that does not apply
it to standard packages. We would like to remove this limitation in future.
-
-Testing an Analyzer
+# Testing an Analyzer
The analysistest subpackage provides utilities for testing an Analyzer.
In a few lines of code, it is possible to run an analyzer on a package
@@ -289,8 +289,7 @@ of testdata files and check that it reported all the expected
diagnostics and facts (and no more). Expectations are expressed using
"// want ..." comments in the input code.
-
-Standalone commands
+# Standalone commands
Analyzers are provided in the form of packages that a driver program is
expected to import. The vet command imports a set of several analyzers,
@@ -301,7 +300,7 @@ singlechecker and multichecker subpackages.
The singlechecker package provides the main function for a command that
runs one analyzer. By convention, each analyzer such as
-go/passes/findcall should be accompanied by a singlechecker-based
+go/analysis/passes/findcall should be accompanied by a singlechecker-based
command such as go/analysis/passes/findcall/cmd/findcall, defined in its
entirety as:
@@ -316,6 +315,5 @@ entirety as:
A tool that provides multiple analyzers can use multichecker in a
similar way, giving it the list of Analyzers.
-
*/
package analysis
diff --git a/go/analysis/internal/analysisflags/flags.go b/go/analysis/internal/analysisflags/flags.go
index 4b7be2d1f..e127a42b9 100644
--- a/go/analysis/internal/analysisflags/flags.go
+++ b/go/analysis/internal/analysisflags/flags.go
@@ -206,7 +206,7 @@ func (versionFlag) Get() interface{} { return nil }
func (versionFlag) String() string { return "" }
func (versionFlag) Set(s string) error {
if s != "full" {
- log.Fatalf("unsupported flag value: -V=%s", s)
+ log.Fatalf("unsupported flag value: -V=%s (use -V=full)", s)
}
// This replicates the minimal subset of
@@ -218,7 +218,10 @@ func (versionFlag) Set(s string) error {
// Formats:
// $progname version devel ... buildID=...
// $progname version go1.9.1
- progname := os.Args[0]
+ progname, err := os.Executable()
+ if err != nil {
+ return err
+ }
f, err := os.Open(progname)
if err != nil {
log.Fatal(err)
@@ -339,9 +342,38 @@ func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) {
}
// A JSONTree is a mapping from package ID to analysis name to result.
-// Each result is either a jsonError or a list of jsonDiagnostic.
+// Each result is either a jsonError or a list of JSONDiagnostic.
type JSONTree map[string]map[string]interface{}
+// A TextEdit describes the replacement of a portion of a file.
+// Start and End are zero-based half-open indices into the original byte
+// sequence of the file, and New is the new text.
+type JSONTextEdit struct {
+ Filename string `json:"filename"`
+ Start int `json:"start"`
+ End int `json:"end"`
+ New string `json:"new"`
+}
+
+// A JSONSuggestedFix describes an edit that should be applied as a whole or not
+// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix
+// consists of multiple non-contiguous edits.
+type JSONSuggestedFix struct {
+ Message string `json:"message"`
+ Edits []JSONTextEdit `json:"edits"`
+}
+
+// A JSONDiagnostic can be used to encode and decode analysis.Diagnostics to and
+// from JSON.
+// TODO(matloob): Should the JSON diagnostics contain ranges?
+// If so, how should they be formatted?
+type JSONDiagnostic struct {
+ Category string `json:"category,omitempty"`
+ Posn string `json:"posn"`
+ Message string `json:"message"`
+ SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"`
+}
+
// Add adds the result of analysis 'name' on package 'id'.
// The result is either a list of diagnostics or an error.
func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) {
@@ -352,20 +384,31 @@ func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.
}
v = jsonError{err.Error()}
} else if len(diags) > 0 {
- type jsonDiagnostic struct {
- Category string `json:"category,omitempty"`
- Posn string `json:"posn"`
- Message string `json:"message"`
- }
- var diagnostics []jsonDiagnostic
- // TODO(matloob): Should the JSON diagnostics contain ranges?
- // If so, how should they be formatted?
+ diagnostics := make([]JSONDiagnostic, 0, len(diags))
for _, f := range diags {
- diagnostics = append(diagnostics, jsonDiagnostic{
- Category: f.Category,
- Posn: fset.Position(f.Pos).String(),
- Message: f.Message,
- })
+ var fixes []JSONSuggestedFix
+ for _, fix := range f.SuggestedFixes {
+ var edits []JSONTextEdit
+ for _, edit := range fix.TextEdits {
+ edits = append(edits, JSONTextEdit{
+ Filename: fset.Position(edit.Pos).Filename,
+ Start: fset.Position(edit.Pos).Offset,
+ End: fset.Position(edit.End).Offset,
+ New: string(edit.NewText),
+ })
+ }
+ fixes = append(fixes, JSONSuggestedFix{
+ Message: fix.Message,
+ Edits: edits,
+ })
+ }
+ jdiag := JSONDiagnostic{
+ Category: f.Category,
+ Posn: fset.Position(f.Pos).String(),
+ Message: f.Message,
+ SuggestedFixes: fixes,
+ }
+ diagnostics = append(diagnostics, jdiag)
}
v = diagnostics
}
diff --git a/go/analysis/internal/analysisflags/flags_test.go b/go/analysis/internal/analysisflags/flags_test.go
index 1f055dde7..b5cfb3d44 100644
--- a/go/analysis/internal/analysisflags/flags_test.go
+++ b/go/analysis/internal/analysisflags/flags_test.go
@@ -42,7 +42,7 @@ func TestExec(t *testing.T) {
for _, test := range []struct {
flags string
- want string
+ want string // output should contain want
}{
{"", "[a1 a2 a3]"},
{"-a1=0", "[a2 a3]"},
@@ -50,6 +50,7 @@ func TestExec(t *testing.T) {
{"-a1", "[a1]"},
{"-a1=1 -a3=1", "[a1 a3]"},
{"-a1=1 -a3=0", "[a1]"},
+ {"-V=full", "analysisflags.test version devel"},
} {
cmd := exec.Command(progname, "-test.run=TestExec")
cmd.Env = append(os.Environ(), "ANALYSISFLAGS_CHILD=1", "FLAGS="+test.flags)
@@ -60,8 +61,8 @@ func TestExec(t *testing.T) {
}
got := strings.TrimSpace(string(output))
- if got != test.want {
- t.Errorf("got %s, want %s", got, test.want)
+ if !strings.Contains(got, test.want) {
+ t.Errorf("got %q, does not contain %q", got, test.want)
}
}
}
diff --git a/go/analysis/internal/checker/checker.go b/go/analysis/internal/checker/checker.go
index e405a2ae1..5346acd76 100644
--- a/go/analysis/internal/checker/checker.go
+++ b/go/analysis/internal/checker/checker.go
@@ -15,7 +15,6 @@ import (
"flag"
"fmt"
"go/format"
- "go/parser"
"go/token"
"go/types"
"io/ioutil"
@@ -33,8 +32,8 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/analysisflags"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/robustio"
)
var (
@@ -51,6 +50,9 @@ var (
// Log files for optional performance tracing.
CPUProfile, MemProfile, Trace string
+ // IncludeTests indicates whether test files should be analyzed too.
+ IncludeTests = true
+
// Fix determines whether to apply all suggested fixes.
Fix bool
)
@@ -65,6 +67,7 @@ func RegisterFlags() {
flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file")
flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file")
flag.StringVar(&Trace, "trace", "", "write trace log to this file")
+ flag.BoolVar(&IncludeTests, "test", IncludeTests, "indicates whether test files should be analyzed, too")
flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes")
}
@@ -143,7 +146,11 @@ func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) {
roots := analyze(initial, analyzers)
if Fix {
- applyFixes(roots)
+ if err := applyFixes(roots); err != nil {
+ // Fail when applying fixes failed.
+ log.Print(err)
+ return 1
+ }
}
return printDiagnostics(roots)
}
@@ -163,7 +170,7 @@ func load(patterns []string, allSyntax bool) ([]*packages.Package, error) {
}
conf := packages.Config{
Mode: mode,
- Tests: true,
+ Tests: IncludeTests,
}
initial, err := packages.Load(&conf, patterns...)
if err == nil {
@@ -301,7 +308,10 @@ func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action
return roots
}
-func applyFixes(roots []*action) {
+func applyFixes(roots []*action) error {
+ // visit all of the actions and accumulate the suggested edits.
+ paths := make(map[robustio.FileID]string)
+ editsByAction := make(map[robustio.FileID]map[*action][]diff.Edit)
visited := make(map[*action]bool)
var apply func(*action) error
var visitAll func(actions []*action) error
@@ -309,7 +319,9 @@ func applyFixes(roots []*action) {
for _, act := range actions {
if !visited[act] {
visited[act] = true
- visitAll(act.deps)
+ if err := visitAll(act.deps); err != nil {
+ return err
+ }
if err := apply(act); err != nil {
return err
}
@@ -318,116 +330,167 @@ func applyFixes(roots []*action) {
return nil
}
- // TODO(matloob): Is this tree business too complicated? (After all this is Go!)
- // Just create a set (map) of edits, sort by pos and call it a day?
- type offsetedit struct {
- start, end int
- newText []byte
- } // TextEdit using byteOffsets instead of pos
- type node struct {
- edit offsetedit
- left, right *node
- }
-
- var insert func(tree **node, edit offsetedit) error
- insert = func(treeptr **node, edit offsetedit) error {
- if *treeptr == nil {
- *treeptr = &node{edit, nil, nil}
- return nil
- }
- tree := *treeptr
- if edit.end <= tree.edit.start {
- return insert(&tree.left, edit)
- } else if edit.start >= tree.edit.end {
- return insert(&tree.right, edit)
- }
-
- // Overlapping text edit.
- return fmt.Errorf("analyses applying overlapping text edits affecting pos range (%v, %v) and (%v, %v)",
- edit.start, edit.end, tree.edit.start, tree.edit.end)
-
- }
-
- editsForFile := make(map[*token.File]*node)
-
apply = func(act *action) error {
+ editsForTokenFile := make(map[*token.File][]diff.Edit)
for _, diag := range act.diagnostics {
for _, sf := range diag.SuggestedFixes {
for _, edit := range sf.TextEdits {
// Validate the edit.
+ // Any error here indicates a bug in the analyzer.
+ file := act.pkg.Fset.File(edit.Pos)
+ if file == nil {
+ return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)",
+ act.a.Name, edit.Pos)
+ }
if edit.Pos > edit.End {
- return fmt.Errorf(
- "diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)",
+ return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)",
act.a.Name, edit.Pos, edit.End)
}
- file, endfile := act.pkg.Fset.File(edit.Pos), act.pkg.Fset.File(edit.End)
- if file == nil || endfile == nil || file != endfile {
- return (fmt.Errorf(
- "diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v",
- act.a.Name, file.Name(), endfile.Name()))
+ if eof := token.Pos(file.Base() + file.Size()); edit.End > eof {
+ return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)",
+ act.a.Name, edit.End, eof)
}
- start, end := file.Offset(edit.Pos), file.Offset(edit.End)
-
- // TODO(matloob): Validate that edits do not affect other packages.
- root := editsForFile[file]
- if err := insert(&root, offsetedit{start, end, edit.NewText}); err != nil {
- return err
- }
- editsForFile[file] = root // In case the root changed
+ edit := diff.Edit{Start: file.Offset(edit.Pos), End: file.Offset(edit.End), New: string(edit.NewText)}
+ editsForTokenFile[file] = append(editsForTokenFile[file], edit)
}
}
}
+
+ for f, edits := range editsForTokenFile {
+ id, _, err := robustio.GetFileID(f.Name())
+ if err != nil {
+ return err
+ }
+ if _, hasId := paths[id]; !hasId {
+ paths[id] = f.Name()
+ editsByAction[id] = make(map[*action][]diff.Edit)
+ }
+ editsByAction[id][act] = edits
+ }
return nil
}
- visitAll(roots)
+ if err := visitAll(roots); err != nil {
+ return err
+ }
- fset := token.NewFileSet() // Shared by parse calls below
- // Now we've got a set of valid edits for each file. Get the new file contents.
- for f, tree := range editsForFile {
- contents, err := ioutil.ReadFile(f.Name())
- if err != nil {
- log.Fatal(err)
+ // Validate and group the edits to each actual file.
+ editsByPath := make(map[string][]diff.Edit)
+ for id, actToEdits := range editsByAction {
+ path := paths[id]
+ actions := make([]*action, 0, len(actToEdits))
+ for act := range actToEdits {
+ actions = append(actions, act)
}
- cur := 0 // current position in the file
-
- var out bytes.Buffer
-
- var recurse func(*node)
- recurse = func(node *node) {
- if node.left != nil {
- recurse(node.left)
+ // Does any action create conflicting edits?
+ for _, act := range actions {
+ edits := actToEdits[act]
+ if _, invalid := validateEdits(edits); invalid > 0 {
+ name, x, y := act.a.Name, edits[invalid-1], edits[invalid]
+ return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y})
}
+ }
- edit := node.edit
- if edit.start > cur {
- out.Write(contents[cur:edit.start])
- out.Write(edit.newText)
+ // Does any pair of different actions create edits that conflict?
+ for j := range actions {
+ for k := range actions[:j] {
+ x, y := actions[j], actions[k]
+ if x.a.Name > y.a.Name {
+ x, y = y, x
+ }
+ xedits, yedits := actToEdits[x], actToEdits[y]
+ combined := append(xedits, yedits...)
+ if _, invalid := validateEdits(combined); invalid > 0 {
+ // TODO: consider applying each action's consistent list of edits entirely,
+ // and then using a three-way merge (such as GNU diff3) on the resulting
+ // files to report more precisely the parts that actually conflict.
+ return diff3Conflict(path, x.a.Name, y.a.Name, xedits, yedits)
+ }
}
- cur = edit.end
+ }
- if node.right != nil {
- recurse(node.right)
- }
+ var edits []diff.Edit
+ for act := range actToEdits {
+ edits = append(edits, actToEdits[act]...)
}
- recurse(tree)
- // Write out the rest of the file.
- if cur < len(contents) {
- out.Write(contents[cur:])
+ editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated.
+ }
+
+ // Now we've got a set of valid edits for each file. Apply them.
+ for path, edits := range editsByPath {
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ out, err := diff.ApplyBytes(contents, edits)
+ if err != nil {
+ return err
}
// Try to format the file.
- ff, err := parser.ParseFile(fset, f.Name(), out.Bytes(), parser.ParseComments)
- if err == nil {
- var buf bytes.Buffer
- if err = format.Node(&buf, fset, ff); err == nil {
- out = buf
+ if formatted, err := format.Source(out); err == nil {
+ out = formatted
+ }
+
+ if err := ioutil.WriteFile(path, out, 0644); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// validateEdits returns a list of edits that is sorted and
+// contains no duplicate edits. Returns the index of some
+// overlapping adjacent edits if there is one and <0 if the
+// edits are valid.
+func validateEdits(edits []diff.Edit) ([]diff.Edit, int) {
+ if len(edits) == 0 {
+ return nil, -1
+ }
+ equivalent := func(x, y diff.Edit) bool {
+ return x.Start == y.Start && x.End == y.End && x.New == y.New
+ }
+ diff.SortEdits(edits)
+ unique := []diff.Edit{edits[0]}
+ invalid := -1
+ for i := 1; i < len(edits); i++ {
+ prev, cur := edits[i-1], edits[i]
+ // We skip over equivalent edits without considering them
+ // an error. This handles identical edits coming from the
+ // multiple ways of loading a package into a
+ // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]".
+ if !equivalent(prev, cur) {
+ unique = append(unique, cur)
+ if prev.End > cur.Start {
+ invalid = i
}
}
+ }
+ return unique, invalid
+}
- ioutil.WriteFile(f.Name(), out.Bytes(), 0644)
+// diff3Conflict returns an error describing two conflicting sets of
+// edits on a file at path.
+func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error {
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
}
+ oldlabel, old := "base", string(contents)
+
+ xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits)
+ if err != nil {
+ return err
+ }
+ ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits)
+ if err != nil {
+ return err
+ }
+
+ return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s",
+ xlabel, ylabel, path, xdiff, ydiff)
}
// printDiagnostics prints the diagnostics for the root packages in either
@@ -574,7 +637,6 @@ type action struct {
deps []*action
objectFacts map[objectFactKey]analysis.Fact
packageFacts map[packageFactKey]analysis.Fact
- inputs map[*analysis.Analyzer]interface{}
result interface{}
diagnostics []analysis.Diagnostic
err error
@@ -672,14 +734,16 @@ func (act *action) execOnce() {
// Run the analysis.
pass := &analysis.Pass{
- Analyzer: act.a,
- Fset: act.pkg.Fset,
- Files: act.pkg.Syntax,
- OtherFiles: act.pkg.OtherFiles,
- IgnoredFiles: act.pkg.IgnoredFiles,
- Pkg: act.pkg.Types,
- TypesInfo: act.pkg.TypesInfo,
- TypesSizes: act.pkg.TypesSizes,
+ Analyzer: act.a,
+ Fset: act.pkg.Fset,
+ Files: act.pkg.Syntax,
+ OtherFiles: act.pkg.OtherFiles,
+ IgnoredFiles: act.pkg.IgnoredFiles,
+ Pkg: act.pkg.Types,
+ TypesInfo: act.pkg.TypesInfo,
+ TypesSizes: act.pkg.TypesSizes,
+ TypeErrors: act.pkg.TypeErrors,
+
ResultOf: inputs,
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
ImportObjectFact: act.importObjectFact,
@@ -691,36 +755,6 @@ func (act *action) execOnce() {
}
act.pass = pass
- var errors []types.Error
- // Get any type errors that are attributed to the pkg.
- // This is necessary to test analyzers that provide
- // suggested fixes for compiler/type errors.
- for _, err := range act.pkg.Errors {
- if err.Kind != packages.TypeError {
- continue
- }
- // err.Pos is a string of form: "file:line:col" or "file:line" or "" or "-"
- spn := span.Parse(err.Pos)
- // Extract the token positions from the error string.
- line, col, offset := spn.Start().Line(), spn.Start().Column(), -1
- act.pkg.Fset.Iterate(func(f *token.File) bool {
- if f.Name() != spn.URI().Filename() {
- return true
- }
- offset = int(f.LineStart(line)) + col - 1
- return false
- })
- if offset == -1 {
- continue
- }
- errors = append(errors, types.Error{
- Fset: act.pkg.Fset,
- Msg: err.Msg,
- Pos: token.Pos(offset),
- })
- }
- analysisinternal.SetTypeErrors(pass, errors)
-
var err error
if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors {
err = fmt.Errorf("analysis skipped due to errors in package")
@@ -762,7 +796,7 @@ func inheritFacts(act, dep *action) {
if serialize {
encodedFact, err := codeFact(fact)
if err != nil {
- log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
+ log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err)
}
fact = encodedFact
}
@@ -826,7 +860,7 @@ func codeFact(fact analysis.Fact) (analysis.Fact, error) {
// exportedFrom reports whether obj may be visible to a package that imports pkg.
// This includes not just the exported members of pkg, but also unexported
-// constants, types, fields, and methods, perhaps belonging to oether packages,
+// constants, types, fields, and methods, perhaps belonging to other packages,
// that find there way into the API.
// This is an overapproximation of the more accurate approach used by
// gc export data, which walks the type graph, but it's much simpler.
@@ -890,7 +924,7 @@ func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
func (act *action) allObjectFacts() []analysis.ObjectFact {
facts := make([]analysis.ObjectFact, 0, len(act.objectFacts))
for k := range act.objectFacts {
- facts = append(facts, analysis.ObjectFact{k.obj, act.objectFacts[k]})
+ facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]})
}
return facts
}
@@ -932,11 +966,11 @@ func factType(fact analysis.Fact) reflect.Type {
return t
}
-// allObjectFacts implements Pass.AllObjectFacts.
+// allPackageFacts implements Pass.AllPackageFacts.
func (act *action) allPackageFacts() []analysis.PackageFact {
facts := make([]analysis.PackageFact, 0, len(act.packageFacts))
for k := range act.packageFacts {
- facts = append(facts, analysis.PackageFact{k.pkg, act.packageFacts[k]})
+ facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]})
}
return facts
}
diff --git a/go/analysis/internal/checker/checker_test.go b/go/analysis/internal/checker/checker_test.go
index eee211c21..34acae81e 100644
--- a/go/analysis/internal/checker/checker_test.go
+++ b/go/analysis/internal/checker/checker_test.go
@@ -19,14 +19,9 @@ import (
"golang.org/x/tools/internal/testenv"
)
-var from, to string
-
func TestApplyFixes(t *testing.T) {
testenv.NeedsGoPackages(t)
- from = "bar"
- to = "baz"
-
files := map[string]string{
"rename/test.go": `package rename
@@ -74,26 +69,55 @@ var analyzer = &analysis.Analyzer{
Run: run,
}
+var other = &analysis.Analyzer{ // like analyzer but with a different Name.
+ Name: "other",
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: run,
+}
+
func run(pass *analysis.Pass) (interface{}, error) {
+ const (
+ from = "bar"
+ to = "baz"
+ conflict = "conflict" // add conflicting edits to package conflict.
+ duplicate = "duplicate" // add duplicate edits to package conflict.
+ other = "other" // add conflicting edits to package other from different analyzers.
+ )
+
+ if pass.Analyzer.Name == other {
+ if pass.Pkg.Name() != other {
+ return nil, nil // only apply Analyzer other to packages named other
+ }
+ }
+
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{(*ast.Ident)(nil)}
inspect.Preorder(nodeFilter, func(n ast.Node) {
ident := n.(*ast.Ident)
if ident.Name == from {
msg := fmt.Sprintf("renaming %q to %q", from, to)
+ edits := []analysis.TextEdit{
+ {Pos: ident.Pos(), End: ident.End(), NewText: []byte(to)},
+ }
+ switch pass.Pkg.Name() {
+ case conflict:
+ edits = append(edits, []analysis.TextEdit{
+ {Pos: ident.Pos() - 1, End: ident.End(), NewText: []byte(to)},
+ {Pos: ident.Pos(), End: ident.End() - 1, NewText: []byte(to)},
+ {Pos: ident.Pos(), End: ident.End(), NewText: []byte("lorem ipsum")},
+ }...)
+ case duplicate:
+ edits = append(edits, edits...)
+ case other:
+ if pass.Analyzer.Name == other {
+ edits[0].Pos = edits[0].Pos + 1 // shift by one to mismatch analyzer and other
+ }
+ }
pass.Report(analysis.Diagnostic{
- Pos: ident.Pos(),
- End: ident.End(),
- Message: msg,
- SuggestedFixes: []analysis.SuggestedFix{{
- Message: msg,
- TextEdits: []analysis.TextEdit{{
- Pos: ident.Pos(),
- End: ident.End(),
- NewText: []byte(to),
- }},
- }},
- })
+ Pos: ident.Pos(),
+ End: ident.End(),
+ Message: msg,
+ SuggestedFixes: []analysis.SuggestedFix{{Message: msg, TextEdits: edits}}})
}
})
@@ -129,6 +153,18 @@ func Foo(s string) int {
RunDespiteErrors: true,
}
+ // A no-op analyzer that should finish regardless of
+ // parse or type errors in the code.
+ noopWithFact := &analysis.Analyzer{
+ Name: "noopfact",
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: func(pass *analysis.Pass) (interface{}, error) {
+ return nil, nil
+ },
+ RunDespiteErrors: true,
+ FactTypes: []analysis.Fact{&EmptyFact{}},
+ }
+
for _, test := range []struct {
name string
pattern []string
@@ -137,7 +173,17 @@ func Foo(s string) int {
}{
// parse/type errors
{name: "skip-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{analyzer}, code: 1},
- {name: "despite-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noop}, code: 0},
+ // RunDespiteErrors allows a driver to run an Analyzer even after parse/type errors.
+ //
+ // The noop analyzer doesn't use facts, so the driver loads only the root
+ // package from source. For the rest, it asks 'go list' for export data,
+ // which fails because the compiler encounters the type error. Since the
+ // errors come from 'go list', the driver doesn't run the analyzer.
+ {name: "despite-error", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noop}, code: 1},
+ // The noopfact analyzer does use facts, so the driver loads source for
+ // all dependencies, does type checking itself, recognizes the error as a
+ // type error, and runs the analyzer.
+ {name: "despite-error-fact", pattern: []string{"file=" + path}, analyzers: []*analysis.Analyzer{noopWithFact}, code: 0},
// combination of parse/type errors and no errors
{name: "despite-error-and-no-error", pattern: []string{"file=" + path, "sort"}, analyzers: []*analysis.Analyzer{analyzer, noop}, code: 1},
// non-existing package error
@@ -151,6 +197,10 @@ func Foo(s string) int {
// no errors
{name: "no-errors", pattern: []string{"sort"}, analyzers: []*analysis.Analyzer{analyzer, noop}, code: 0},
} {
+ if test.name == "despite-error" && testenv.Go1Point() < 20 {
+ // The behavior in the comment on the despite-error test only occurs for Go 1.20+.
+ continue
+ }
if got := checker.Run(test.pattern, test.analyzers); got != test.code {
t.Errorf("got incorrect exit code %d for test %s; want %d", got, test.name, test.code)
}
@@ -158,3 +208,7 @@ func Foo(s string) int {
defer cleanup()
}
+
+type EmptyFact struct{}
+
+func (f *EmptyFact) AFact() {}
diff --git a/go/analysis/internal/checker/fix_test.go b/go/analysis/internal/checker/fix_test.go
new file mode 100644
index 000000000..3ea92b38c
--- /dev/null
+++ b/go/analysis/internal/checker/fix_test.go
@@ -0,0 +1,309 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package checker_test
+
+import (
+ "flag"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "regexp"
+ "runtime"
+ "testing"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/go/analysis/internal/checker"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func main() {
+ checker.Fix = true
+ patterns := flag.Args()
+
+ code := checker.Run(patterns, []*analysis.Analyzer{analyzer, other})
+ os.Exit(code)
+}
+
+// TestFixes ensures that checker.Run applies fixes correctly.
+// This test fork/execs the main function above.
+func TestFixes(t *testing.T) {
+ oses := map[string]bool{"darwin": true, "linux": true}
+ if !oses[runtime.GOOS] {
+ t.Skipf("skipping fork/exec test on this platform")
+ }
+
+ if os.Getenv("TESTFIXES_CHILD") == "1" {
+ // child process
+
+ // replace [progname -test.run=TestFixes -- ...]
+ // by [progname ...]
+ os.Args = os.Args[2:]
+ os.Args[0] = "vet"
+ main()
+ panic("unreachable")
+ }
+
+ testenv.NeedsTool(t, "go")
+
+ files := map[string]string{
+ "rename/foo.go": `package rename
+
+func Foo() {
+ bar := 12
+ _ = bar
+}
+
+// the end
+`,
+ "rename/intestfile_test.go": `package rename
+
+func InTestFile() {
+ bar := 13
+ _ = bar
+}
+
+// the end
+`,
+ "rename/foo_test.go": `package rename_test
+
+func Foo() {
+ bar := 14
+ _ = bar
+}
+
+// the end
+`,
+ "duplicate/dup.go": `package duplicate
+
+func Foo() {
+ bar := 14
+ _ = bar
+}
+
+// the end
+`,
+ }
+ fixed := map[string]string{
+ "rename/foo.go": `package rename
+
+func Foo() {
+ baz := 12
+ _ = baz
+}
+
+// the end
+`,
+ "rename/intestfile_test.go": `package rename
+
+func InTestFile() {
+ baz := 13
+ _ = baz
+}
+
+// the end
+`,
+ "rename/foo_test.go": `package rename_test
+
+func Foo() {
+ baz := 14
+ _ = baz
+}
+
+// the end
+`,
+ "duplicate/dup.go": `package duplicate
+
+func Foo() {
+ baz := 14
+ _ = baz
+}
+
+// the end
+`,
+ }
+ dir, cleanup, err := analysistest.WriteFiles(files)
+ if err != nil {
+ t.Fatalf("Creating test files failed with %s", err)
+ }
+ defer cleanup()
+
+ args := []string{"-test.run=TestFixes", "--", "rename", "duplicate"}
+ cmd := exec.Command(os.Args[0], args...)
+ cmd.Env = append(os.Environ(), "TESTFIXES_CHILD=1", "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off")
+
+ out, err := cmd.CombinedOutput()
+ if len(out) > 0 {
+ t.Logf("%s: out=<<%s>>", args, out)
+ }
+ var exitcode int
+ if err, ok := err.(*exec.ExitError); ok {
+ exitcode = err.ExitCode() // requires go1.12
+ }
+
+ const diagnosticsExitCode = 3
+ if exitcode != diagnosticsExitCode {
+ t.Errorf("%s: exited %d, want %d", args, exitcode, diagnosticsExitCode)
+ }
+
+ for name, want := range fixed {
+ path := path.Join(dir, "src", name)
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Errorf("error reading %s: %v", path, err)
+ }
+ if got := string(contents); got != want {
+ t.Errorf("contents of %s file did not match expectations. got=%s, want=%s", path, got, want)
+ }
+ }
+}
+
+// TestConflict ensures that checker.Run detects conflicts correctly.
+// This test fork/execs the main function above.
+func TestConflict(t *testing.T) {
+ oses := map[string]bool{"darwin": true, "linux": true}
+ if !oses[runtime.GOOS] {
+ t.Skipf("skipping fork/exec test on this platform")
+ }
+
+ if os.Getenv("TESTCONFLICT_CHILD") == "1" {
+ // child process
+
+ // replace [progname -test.run=TestConflict -- ...]
+ // by [progname ...]
+ os.Args = os.Args[2:]
+ os.Args[0] = "vet"
+ main()
+ panic("unreachable")
+ }
+
+ testenv.NeedsTool(t, "go")
+
+ files := map[string]string{
+ "conflict/foo.go": `package conflict
+
+func Foo() {
+ bar := 12
+ _ = bar
+}
+
+// the end
+`,
+ }
+ dir, cleanup, err := analysistest.WriteFiles(files)
+ if err != nil {
+ t.Fatalf("Creating test files failed with %s", err)
+ }
+ defer cleanup()
+
+ args := []string{"-test.run=TestConflict", "--", "conflict"}
+ cmd := exec.Command(os.Args[0], args...)
+ cmd.Env = append(os.Environ(), "TESTCONFLICT_CHILD=1", "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off")
+
+ out, err := cmd.CombinedOutput()
+ var exitcode int
+ if err, ok := err.(*exec.ExitError); ok {
+ exitcode = err.ExitCode() // requires go1.12
+ }
+ const errExitCode = 1
+ if exitcode != errExitCode {
+ t.Errorf("%s: exited %d, want %d", args, exitcode, errExitCode)
+ }
+
+ pattern := `conflicting edits from rename and rename on /.*/conflict/foo.go`
+ matched, err := regexp.Match(pattern, out)
+ if err != nil {
+ t.Errorf("error matching pattern %s: %v", pattern, err)
+ } else if !matched {
+ t.Errorf("%s: output was=<<%s>>. Expected it to match <<%s>>", args, out, pattern)
+ }
+
+ // No files updated
+ for name, want := range files {
+ path := path.Join(dir, "src", name)
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Errorf("error reading %s: %v", path, err)
+ }
+ if got := string(contents); got != want {
+ t.Errorf("contents of %s file updated. got=%s, want=%s", path, got, want)
+ }
+ }
+}
+
+// TestOther ensures that checker.Run reports conflicts from
+// distinct actions correctly.
+// This test fork/execs the main function above.
+func TestOther(t *testing.T) {
+ oses := map[string]bool{"darwin": true, "linux": true}
+ if !oses[runtime.GOOS] {
+ t.Skipf("skipping fork/exec test on this platform")
+ }
+
+ if os.Getenv("TESTOTHER_CHILD") == "1" {
+ // child process
+
+ // replace [progname -test.run=TestOther -- ...]
+ // by [progname ...]
+ os.Args = os.Args[2:]
+ os.Args[0] = "vet"
+ main()
+ panic("unreachable")
+ }
+
+ testenv.NeedsTool(t, "go")
+
+ files := map[string]string{
+ "other/foo.go": `package other
+
+func Foo() {
+ bar := 12
+ _ = bar
+}
+
+// the end
+`,
+ }
+ dir, cleanup, err := analysistest.WriteFiles(files)
+ if err != nil {
+ t.Fatalf("Creating test files failed with %s", err)
+ }
+ defer cleanup()
+
+ args := []string{"-test.run=TestOther", "--", "other"}
+ cmd := exec.Command(os.Args[0], args...)
+ cmd.Env = append(os.Environ(), "TESTOTHER_CHILD=1", "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off")
+
+ out, err := cmd.CombinedOutput()
+ var exitcode int
+ if err, ok := err.(*exec.ExitError); ok {
+ exitcode = err.ExitCode() // requires go1.12
+ }
+ const errExitCode = 1
+ if exitcode != errExitCode {
+ t.Errorf("%s: exited %d, want %d", args, exitcode, errExitCode)
+ }
+
+ pattern := `conflicting edits from other and rename on /.*/other/foo.go`
+ matched, err := regexp.Match(pattern, out)
+ if err != nil {
+ t.Errorf("error matching pattern %s: %v", pattern, err)
+ } else if !matched {
+ t.Errorf("%s: output was=<<%s>>. Expected it to match <<%s>>", args, out, pattern)
+ }
+
+ // No files updated
+ for name, want := range files {
+ path := path.Join(dir, "src", name)
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Errorf("error reading %s: %v", path, err)
+ }
+ if got := string(contents); got != want {
+ t.Errorf("contents of %s file updated. got=%s, want=%s", path, got, want)
+ }
+ }
+}
diff --git a/go/analysis/internal/checker/start_test.go b/go/analysis/internal/checker/start_test.go
new file mode 100644
index 000000000..ede21159b
--- /dev/null
+++ b/go/analysis/internal/checker/start_test.go
@@ -0,0 +1,85 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package checker_test
+
+import (
+ "go/ast"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/go/analysis/internal/checker"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// TestStartFixes make sure modifying the first character
+// of the file takes effect.
+func TestStartFixes(t *testing.T) {
+ testenv.NeedsGoPackages(t)
+
+ files := map[string]string{
+ "comment/doc.go": `/* Package comment */
+package comment
+`}
+
+ want := `// Package comment
+package comment
+`
+
+ testdata, cleanup, err := analysistest.WriteFiles(files)
+ if err != nil {
+ t.Fatal(err)
+ }
+ path := filepath.Join(testdata, "src/comment/doc.go")
+ checker.Fix = true
+ checker.Run([]string{"file=" + path}, []*analysis.Analyzer{commentAnalyzer})
+
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got := string(contents)
+ if got != want {
+ t.Errorf("contents of rewritten file\ngot: %s\nwant: %s", got, want)
+ }
+
+ defer cleanup()
+}
+
+var commentAnalyzer = &analysis.Analyzer{
+ Name: "comment",
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: commentRun,
+}
+
+func commentRun(pass *analysis.Pass) (interface{}, error) {
+ const (
+ from = "/* Package comment */"
+ to = "// Package comment"
+ )
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ inspect.Preorder(nil, func(n ast.Node) {
+ if n, ok := n.(*ast.Comment); ok && n.Text == from {
+ pass.Report(analysis.Diagnostic{
+ Pos: n.Pos(),
+ End: n.End(),
+ SuggestedFixes: []analysis.SuggestedFix{{
+ TextEdits: []analysis.TextEdit{{
+ Pos: n.Pos(),
+ End: n.End(),
+ NewText: []byte(to),
+ }},
+ }},
+ })
+ }
+ })
+
+ return nil, nil
+}
diff --git a/go/analysis/internal/facts/facts.go b/go/analysis/internal/facts/facts.go
deleted file mode 100644
index 1fb69c615..000000000
--- a/go/analysis/internal/facts/facts.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package facts defines a serializable set of analysis.Fact.
-//
-// It provides a partial implementation of the Fact-related parts of the
-// analysis.Pass interface for use in analysis drivers such as "go vet"
-// and other build systems.
-//
-// The serial format is unspecified and may change, so the same version
-// of this package must be used for reading and writing serialized facts.
-//
-// The handling of facts in the analysis system parallels the handling
-// of type information in the compiler: during compilation of package P,
-// the compiler emits an export data file that describes the type of
-// every object (named thing) defined in package P, plus every object
-// indirectly reachable from one of those objects. Thus the downstream
-// compiler of package Q need only load one export data file per direct
-// import of Q, and it will learn everything about the API of package P
-// and everything it needs to know about the API of P's dependencies.
-//
-// Similarly, analysis of package P emits a fact set containing facts
-// about all objects exported from P, plus additional facts about only
-// those objects of P's dependencies that are reachable from the API of
-// package P; the downstream analysis of Q need only load one fact set
-// per direct import of Q.
-//
-// The notion of "exportedness" that matters here is that of the
-// compiler. According to the language spec, a method pkg.T.f is
-// unexported simply because its name starts with lowercase. But the
-// compiler must nonetheless export f so that downstream compilations can
-// accurately ascertain whether pkg.T implements an interface pkg.I
-// defined as interface{f()}. Exported thus means "described in export
-// data".
-//
-package facts
-
-import (
- "bytes"
- "encoding/gob"
- "fmt"
- "go/types"
- "io/ioutil"
- "log"
- "reflect"
- "sort"
- "sync"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/types/objectpath"
-)
-
-const debug = false
-
-// A Set is a set of analysis.Facts.
-//
-// Decode creates a Set of facts by reading from the imports of a given
-// package, and Encode writes out the set. Between these operation,
-// the Import and Export methods will query and update the set.
-//
-// All of Set's methods except String are safe to call concurrently.
-type Set struct {
- pkg *types.Package
- mu sync.Mutex
- m map[key]analysis.Fact
-}
-
-type key struct {
- pkg *types.Package
- obj types.Object // (object facts only)
- t reflect.Type
-}
-
-// ImportObjectFact implements analysis.Pass.ImportObjectFact.
-func (s *Set) ImportObjectFact(obj types.Object, ptr analysis.Fact) bool {
- if obj == nil {
- panic("nil object")
- }
- key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(ptr)}
- s.mu.Lock()
- defer s.mu.Unlock()
- if v, ok := s.m[key]; ok {
- reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
- return true
- }
- return false
-}
-
-// ExportObjectFact implements analysis.Pass.ExportObjectFact.
-func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) {
- if obj.Pkg() != s.pkg {
- log.Panicf("in package %s: ExportObjectFact(%s, %T): can't set fact on object belonging another package",
- s.pkg, obj, fact)
- }
- key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(fact)}
- s.mu.Lock()
- s.m[key] = fact // clobber any existing entry
- s.mu.Unlock()
-}
-
-func (s *Set) AllObjectFacts(filter map[reflect.Type]bool) []analysis.ObjectFact {
- var facts []analysis.ObjectFact
- s.mu.Lock()
- for k, v := range s.m {
- if k.obj != nil && filter[k.t] {
- facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: v})
- }
- }
- s.mu.Unlock()
- return facts
-}
-
-// ImportPackageFact implements analysis.Pass.ImportPackageFact.
-func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
- if pkg == nil {
- panic("nil package")
- }
- key := key{pkg: pkg, t: reflect.TypeOf(ptr)}
- s.mu.Lock()
- defer s.mu.Unlock()
- if v, ok := s.m[key]; ok {
- reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
- return true
- }
- return false
-}
-
-// ExportPackageFact implements analysis.Pass.ExportPackageFact.
-func (s *Set) ExportPackageFact(fact analysis.Fact) {
- key := key{pkg: s.pkg, t: reflect.TypeOf(fact)}
- s.mu.Lock()
- s.m[key] = fact // clobber any existing entry
- s.mu.Unlock()
-}
-
-func (s *Set) AllPackageFacts(filter map[reflect.Type]bool) []analysis.PackageFact {
- var facts []analysis.PackageFact
- s.mu.Lock()
- for k, v := range s.m {
- if k.obj == nil && filter[k.t] {
- facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: v})
- }
- }
- s.mu.Unlock()
- return facts
-}
-
-// gobFact is the Gob declaration of a serialized fact.
-type gobFact struct {
- PkgPath string // path of package
- Object objectpath.Path // optional path of object relative to package itself
- Fact analysis.Fact // type and value of user-defined Fact
-}
-
-// Decode decodes all the facts relevant to the analysis of package pkg.
-// The read function reads serialized fact data from an external source
-// for one of of pkg's direct imports. The empty file is a valid
-// encoding of an empty fact set.
-//
-// It is the caller's responsibility to call gob.Register on all
-// necessary fact types.
-func Decode(pkg *types.Package, read func(packagePath string) ([]byte, error)) (*Set, error) {
- // Compute the import map for this package.
- // See the package doc comment.
- packages := importMap(pkg.Imports())
-
- // Read facts from imported packages.
- // Facts may describe indirectly imported packages, or their objects.
- m := make(map[key]analysis.Fact) // one big bucket
- for _, imp := range pkg.Imports() {
- logf := func(format string, args ...interface{}) {
- if debug {
- prefix := fmt.Sprintf("in %s, importing %s: ",
- pkg.Path(), imp.Path())
- log.Print(prefix, fmt.Sprintf(format, args...))
- }
- }
-
- // Read the gob-encoded facts.
- data, err := read(imp.Path())
- if err != nil {
- return nil, fmt.Errorf("in %s, can't import facts for package %q: %v",
- pkg.Path(), imp.Path(), err)
- }
- if len(data) == 0 {
- continue // no facts
- }
- var gobFacts []gobFact
- if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&gobFacts); err != nil {
- return nil, fmt.Errorf("decoding facts for %q: %v", imp.Path(), err)
- }
- if debug {
- logf("decoded %d facts: %v", len(gobFacts), gobFacts)
- }
-
- // Parse each one into a key and a Fact.
- for _, f := range gobFacts {
- factPkg := packages[f.PkgPath]
- if factPkg == nil {
- // Fact relates to a dependency that was
- // unused in this translation unit. Skip.
- logf("no package %q; discarding %v", f.PkgPath, f.Fact)
- continue
- }
- key := key{pkg: factPkg, t: reflect.TypeOf(f.Fact)}
- if f.Object != "" {
- // object fact
- obj, err := objectpath.Object(factPkg, f.Object)
- if err != nil {
- // (most likely due to unexported object)
- // TODO(adonovan): audit for other possibilities.
- logf("no object for path: %v; discarding %s", err, f.Fact)
- continue
- }
- key.obj = obj
- logf("read %T fact %s for %v", f.Fact, f.Fact, key.obj)
- } else {
- // package fact
- logf("read %T fact %s for %v", f.Fact, f.Fact, factPkg)
- }
- m[key] = f.Fact
- }
- }
-
- return &Set{pkg: pkg, m: m}, nil
-}
-
-// Encode encodes a set of facts to a memory buffer.
-//
-// It may fail if one of the Facts could not be gob-encoded, but this is
-// a sign of a bug in an Analyzer.
-func (s *Set) Encode() []byte {
-
- // TODO(adonovan): opt: use a more efficient encoding
- // that avoids repeating PkgPath for each fact.
-
- // Gather all facts, including those from imported packages.
- var gobFacts []gobFact
-
- s.mu.Lock()
- for k, fact := range s.m {
- if debug {
- log.Printf("%v => %s\n", k, fact)
- }
- var object objectpath.Path
- if k.obj != nil {
- path, err := objectpath.For(k.obj)
- if err != nil {
- if debug {
- log.Printf("discarding fact %s about %s\n", fact, k.obj)
- }
- continue // object not accessible from package API; discard fact
- }
- object = path
- }
- gobFacts = append(gobFacts, gobFact{
- PkgPath: k.pkg.Path(),
- Object: object,
- Fact: fact,
- })
- }
- s.mu.Unlock()
-
- // Sort facts by (package, object, type) for determinism.
- sort.Slice(gobFacts, func(i, j int) bool {
- x, y := gobFacts[i], gobFacts[j]
- if x.PkgPath != y.PkgPath {
- return x.PkgPath < y.PkgPath
- }
- if x.Object != y.Object {
- return x.Object < y.Object
- }
- tx := reflect.TypeOf(x.Fact)
- ty := reflect.TypeOf(y.Fact)
- if tx != ty {
- return tx.String() < ty.String()
- }
- return false // equal
- })
-
- var buf bytes.Buffer
- if len(gobFacts) > 0 {
- if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil {
- // Fact encoding should never fail. Identify the culprit.
- for _, gf := range gobFacts {
- if err := gob.NewEncoder(ioutil.Discard).Encode(gf); err != nil {
- fact := gf.Fact
- pkgpath := reflect.TypeOf(fact).Elem().PkgPath()
- log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q",
- fact, err, fact, pkgpath)
- }
- }
- }
- }
-
- if debug {
- log.Printf("package %q: encode %d facts, %d bytes\n",
- s.pkg.Path(), len(gobFacts), buf.Len())
- }
-
- return buf.Bytes()
-}
-
-// String is provided only for debugging, and must not be called
-// concurrent with any Import/Export method.
-func (s *Set) String() string {
- var buf bytes.Buffer
- buf.WriteString("{")
- for k, f := range s.m {
- if buf.Len() > 1 {
- buf.WriteString(", ")
- }
- if k.obj != nil {
- buf.WriteString(k.obj.String())
- } else {
- buf.WriteString(k.pkg.Path())
- }
- fmt.Fprintf(&buf, ": %v", f)
- }
- buf.WriteString("}")
- return buf.String()
-}
diff --git a/go/analysis/internal/facts/facts_test.go b/go/analysis/internal/facts/facts_test.go
deleted file mode 100644
index 13c358230..000000000
--- a/go/analysis/internal/facts/facts_test.go
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package facts_test
-
-import (
- "encoding/gob"
- "fmt"
- "go/token"
- "go/types"
- "os"
- "reflect"
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/go/analysis/internal/facts"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/internal/typeparams"
-)
-
-type myFact struct {
- S string
-}
-
-func (f *myFact) String() string { return fmt.Sprintf("myFact(%s)", f.S) }
-func (f *myFact) AFact() {}
-
-func init() {
- gob.Register(new(myFact))
-}
-
-func TestEncodeDecode(t *testing.T) {
- tests := []struct {
- name string
- typeparams bool // requires typeparams to be enabled
- files map[string]string
- plookups []pkgLookups // see testEncodeDecode for details
- }{
- {
- name: "loading-order",
- // c -> b -> a, a2
- // c does not directly depend on a, but it indirectly uses a.T.
- //
- // Package a2 is never loaded directly so it is incomplete.
- //
- // We use only types in this example because we rely on
- // types.Eval to resolve the lookup expressions, and it only
- // works for types. This is a definite gap in the typechecker API.
- files: map[string]string{
- "a/a.go": `package a; type A int; type T int`,
- "a2/a.go": `package a2; type A2 int; type Unneeded int`,
- "b/b.go": `package b; import ("a"; "a2"); type B chan a2.A2; type F func() a.T`,
- "c/c.go": `package c; import "b"; type C []b.B`,
- },
- // In the following table, we analyze packages (a, b, c) in order,
- // look up various objects accessible within each package,
- // and see if they have a fact. The "analysis" exports a fact
- // for every object at package level.
- //
- // Note: Loop iterations are not independent test cases;
- // order matters, as we populate factmap.
- plookups: []pkgLookups{
- {"a", []lookup{
- {"A", "myFact(a.A)"},
- }},
- {"b", []lookup{
- {"a.A", "myFact(a.A)"},
- {"a.T", "myFact(a.T)"},
- {"B", "myFact(b.B)"},
- {"F", "myFact(b.F)"},
- {"F(nil)()", "myFact(a.T)"}, // (result type of b.F)
- }},
- {"c", []lookup{
- {"b.B", "myFact(b.B)"},
- {"b.F", "myFact(b.F)"},
- //{"b.F(nil)()", "myFact(a.T)"}, // no fact; TODO(adonovan): investigate
- {"C", "myFact(c.C)"},
- {"C{}[0]", "myFact(b.B)"},
- {"<-(C{}[0])", "no fact"}, // object but no fact (we never "analyze" a2)
- }},
- },
- },
- {
- name: "globals",
- files: map[string]string{
- "a/a.go": `package a;
- type T1 int
- type T2 int
- type T3 int
- type T4 int
- type T5 int
- type K int; type V string
- `,
- "b/b.go": `package b
- import "a"
- var (
- G1 []a.T1
- G2 [7]a.T2
- G3 chan a.T3
- G4 *a.T4
- G5 struct{ F a.T5 }
- G6 map[a.K]a.V
- )
- `,
- "c/c.go": `package c; import "b";
- var (
- v1 = b.G1
- v2 = b.G2
- v3 = b.G3
- v4 = b.G4
- v5 = b.G5
- v6 = b.G6
- )
- `,
- },
- plookups: []pkgLookups{
- {"a", []lookup{}},
- {"b", []lookup{}},
- {"c", []lookup{
- {"v1[0]", "myFact(a.T1)"},
- {"v2[0]", "myFact(a.T2)"},
- {"<-v3", "myFact(a.T3)"},
- {"*v4", "myFact(a.T4)"},
- {"v5.F", "myFact(a.T5)"},
- {"v6[0]", "myFact(a.V)"},
- }},
- },
- },
- {
- name: "typeparams",
- typeparams: true,
- files: map[string]string{
- "a/a.go": `package a
- type T1 int
- type T2 int
- type T3 interface{Foo()}
- type T4 int
- type T5 int
- type T6 interface{Foo()}
- `,
- "b/b.go": `package b
- import "a"
- type N1[T a.T1|int8] func() T
- type N2[T any] struct{ F T }
- type N3[T a.T3] func() T
- type N4[T a.T4|int8] func() T
- type N5[T interface{Bar() a.T5} ] func() T
-
- type t5 struct{}; func (t5) Bar() a.T5
-
- var G1 N1[a.T1]
- var G2 func() N2[a.T2]
- var G3 N3[a.T3]
- var G4 N4[a.T4]
- var G5 N5[t5]
-
- func F6[T a.T6]() T { var x T; return x }
- `,
- "c/c.go": `package c; import "b";
- var (
- v1 = b.G1
- v2 = b.G2
- v3 = b.G3
- v4 = b.G4
- v5 = b.G5
- v6 = b.F6[t6]
- )
-
- type t6 struct{}; func (t6) Foo() {}
- `,
- },
- plookups: []pkgLookups{
- {"a", []lookup{}},
- {"b", []lookup{}},
- {"c", []lookup{
- {"v1", "myFact(b.N1)"},
- {"v1()", "myFact(a.T1)"},
- {"v2()", "myFact(b.N2)"},
- {"v2().F", "myFact(a.T2)"},
- {"v3", "myFact(b.N3)"},
- {"v4", "myFact(b.N4)"},
- {"v4()", "myFact(a.T4)"},
- {"v5", "myFact(b.N5)"},
- {"v5()", "myFact(b.t5)"},
- {"v6()", "myFact(c.t6)"},
- }},
- },
- },
- }
-
- for i := range tests {
- test := tests[i]
- t.Run(test.name, func(t *testing.T) {
- t.Parallel()
- if test.typeparams && !typeparams.Enabled {
- t.Skip("type parameters are not enabled")
- }
- testEncodeDecode(t, test.files, test.plookups)
- })
- }
-}
-
-type lookup struct {
- objexpr string
- want string
-}
-
-type pkgLookups struct {
- path string
- lookups []lookup
-}
-
-// testEncodeDecode tests fact encoding and decoding and simulates how package facts
-// are passed during analysis. It operates on a group of Go file contents. Then
-// for each <package, []lookup> in tests it does the following:
-// 1) loads and type checks the package,
-// 2) calls facts.Decode to loads the facts exported by its imports,
-// 3) exports a myFact Fact for all of package level objects,
-// 4) For each lookup for the current package:
-// 4.a) lookup the types.Object for an Go source expression in the curent package
-// (or confirms one is not expected want=="no object"),
-// 4.b) finds a Fact for the object (or confirms one is not expected want=="no fact"),
-// 4.c) compares the content of the Fact to want.
-// 5) encodes the Facts of the package.
-//
-// Note: tests are not independent test cases; order matters (as does a package being
-// skipped). It changes what Facts can be imported.
-//
-// Failures are reported on t.
-func testEncodeDecode(t *testing.T, files map[string]string, tests []pkgLookups) {
- dir, cleanup, err := analysistest.WriteFiles(files)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // factmap represents the passing of encoded facts from one
- // package to another. In practice one would use the file system.
- factmap := make(map[string][]byte)
- read := func(path string) ([]byte, error) { return factmap[path], nil }
-
- // Analyze packages in order, look up various objects accessible within
- // each package, and see if they have a fact. The "analysis" exports a
- // fact for every object at package level.
- //
- // Note: Loop iterations are not independent test cases;
- // order matters, as we populate factmap.
- for _, test := range tests {
- // load package
- pkg, err := load(t, dir, test.path)
- if err != nil {
- t.Fatal(err)
- }
-
- // decode
- facts, err := facts.Decode(pkg, read)
- if err != nil {
- t.Fatalf("Decode failed: %v", err)
- }
- t.Logf("decode %s facts = %v", pkg.Path(), facts) // show all facts
-
- // export
- // (one fact for each package-level object)
- for _, name := range pkg.Scope().Names() {
- obj := pkg.Scope().Lookup(name)
- fact := &myFact{obj.Pkg().Name() + "." + obj.Name()}
- facts.ExportObjectFact(obj, fact)
- }
- t.Logf("exported %s facts = %v", pkg.Path(), facts) // show all facts
-
- // import
- // (after export, because an analyzer may import its own facts)
- for _, lookup := range test.lookups {
- fact := new(myFact)
- var got string
- if obj := find(pkg, lookup.objexpr); obj == nil {
- got = "no object"
- } else if facts.ImportObjectFact(obj, fact) {
- got = fact.String()
- } else {
- got = "no fact"
- }
- if got != lookup.want {
- t.Errorf("in %s, ImportObjectFact(%s, %T) = %s, want %s",
- pkg.Path(), lookup.objexpr, fact, got, lookup.want)
- }
- }
-
- // encode
- factmap[pkg.Path()] = facts.Encode()
- }
-}
-
-func find(p *types.Package, expr string) types.Object {
- // types.Eval only allows us to compute a TypeName object for an expression.
- // TODO(adonovan): support other expressions that denote an object:
- // - an identifier (or qualified ident) for a func, const, or var
- // - new(T).f for a field or method
- // I've added CheckExpr in https://go-review.googlesource.com/c/go/+/144677.
- // If that becomes available, use it.
-
- // Choose an arbitrary position within the (single-file) package
- // so that we are within the scope of its import declarations.
- somepos := p.Scope().Lookup(p.Scope().Names()[0]).Pos()
- tv, err := types.Eval(token.NewFileSet(), p, somepos, expr)
- if err != nil {
- return nil
- }
- if n, ok := tv.Type.(*types.Named); ok {
- return n.Obj()
- }
- return nil
-}
-
-func load(t *testing.T, dir string, path string) (*types.Package, error) {
- cfg := &packages.Config{
- Mode: packages.LoadSyntax,
- Dir: dir,
- Env: append(os.Environ(), "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off"),
- }
- testenv.NeedsGoPackagesEnv(t, cfg.Env)
- pkgs, err := packages.Load(cfg, path)
- if err != nil {
- return nil, err
- }
- if packages.PrintErrors(pkgs) > 0 {
- return nil, fmt.Errorf("packages had errors")
- }
- if len(pkgs) == 0 {
- return nil, fmt.Errorf("no package matched %s", path)
- }
- return pkgs[0].Types, nil
-}
-
-type otherFact struct {
- S string
-}
-
-func (f *otherFact) String() string { return fmt.Sprintf("otherFact(%s)", f.S) }
-func (f *otherFact) AFact() {}
-
-func TestFactFilter(t *testing.T) {
- files := map[string]string{
- "a/a.go": `package a; type A int`,
- }
- dir, cleanup, err := analysistest.WriteFiles(files)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- pkg, err := load(t, dir, "a")
- if err != nil {
- t.Fatal(err)
- }
-
- obj := pkg.Scope().Lookup("A")
- s, err := facts.Decode(pkg, func(string) ([]byte, error) { return nil, nil })
- if err != nil {
- t.Fatal(err)
- }
- s.ExportObjectFact(obj, &myFact{"good object fact"})
- s.ExportPackageFact(&myFact{"good package fact"})
- s.ExportObjectFact(obj, &otherFact{"bad object fact"})
- s.ExportPackageFact(&otherFact{"bad package fact"})
-
- filter := map[reflect.Type]bool{
- reflect.TypeOf(&myFact{}): true,
- }
-
- pkgFacts := s.AllPackageFacts(filter)
- wantPkgFacts := `[{package a ("a") myFact(good package fact)}]`
- if got := fmt.Sprintf("%v", pkgFacts); got != wantPkgFacts {
- t.Errorf("AllPackageFacts: got %v, want %v", got, wantPkgFacts)
- }
-
- objFacts := s.AllObjectFacts(filter)
- wantObjFacts := "[{type a.A int myFact(good object fact)}]"
- if got := fmt.Sprintf("%v", objFacts); got != wantObjFacts {
- t.Errorf("AllObjectFacts: got %v, want %v", got, wantObjFacts)
- }
-}
diff --git a/go/analysis/internal/facts/imports.go b/go/analysis/internal/facts/imports.go
deleted file mode 100644
index ade0cc6fa..000000000
--- a/go/analysis/internal/facts/imports.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package facts
-
-import (
- "go/types"
-
- "golang.org/x/tools/internal/typeparams"
-)
-
-// importMap computes the import map for a package by traversing the
-// entire exported API each of its imports.
-//
-// This is a workaround for the fact that we cannot access the map used
-// internally by the types.Importer returned by go/importer. The entries
-// in this map are the packages and objects that may be relevant to the
-// current analysis unit.
-//
-// Packages in the map that are only indirectly imported may be
-// incomplete (!pkg.Complete()).
-//
-func importMap(imports []*types.Package) map[string]*types.Package {
- objects := make(map[types.Object]bool)
- packages := make(map[string]*types.Package)
-
- var addObj func(obj types.Object) bool
- var addType func(T types.Type)
-
- addObj = func(obj types.Object) bool {
- if !objects[obj] {
- objects[obj] = true
- addType(obj.Type())
- if pkg := obj.Pkg(); pkg != nil {
- packages[pkg.Path()] = pkg
- }
- return true
- }
- return false
- }
-
- addType = func(T types.Type) {
- switch T := T.(type) {
- case *types.Basic:
- // nop
- case *types.Named:
- if addObj(T.Obj()) {
- // TODO(taking): Investigate why the Underlying type is not added here.
- for i := 0; i < T.NumMethods(); i++ {
- addObj(T.Method(i))
- }
- if tparams := typeparams.ForNamed(T); tparams != nil {
- for i := 0; i < tparams.Len(); i++ {
- addType(tparams.At(i))
- }
- }
- if targs := typeparams.NamedTypeArgs(T); targs != nil {
- for i := 0; i < targs.Len(); i++ {
- addType(targs.At(i))
- }
- }
- }
- case *types.Pointer:
- addType(T.Elem())
- case *types.Slice:
- addType(T.Elem())
- case *types.Array:
- addType(T.Elem())
- case *types.Chan:
- addType(T.Elem())
- case *types.Map:
- addType(T.Key())
- addType(T.Elem())
- case *types.Signature:
- addType(T.Params())
- addType(T.Results())
- if tparams := typeparams.ForSignature(T); tparams != nil {
- for i := 0; i < tparams.Len(); i++ {
- addType(tparams.At(i))
- }
- }
- case *types.Struct:
- for i := 0; i < T.NumFields(); i++ {
- addObj(T.Field(i))
- }
- case *types.Tuple:
- for i := 0; i < T.Len(); i++ {
- addObj(T.At(i))
- }
- case *types.Interface:
- for i := 0; i < T.NumMethods(); i++ {
- addObj(T.Method(i))
- }
- for i := 0; i < T.NumEmbeddeds(); i++ {
- addType(T.EmbeddedType(i)) // walk Embedded for implicits
- }
- case *typeparams.Union:
- for i := 0; i < T.Len(); i++ {
- addType(T.Term(i).Type())
- }
- case *typeparams.TypeParam:
- if addObj(T.Obj()) {
- addType(T.Constraint())
- }
- }
- }
-
- for _, imp := range imports {
- packages[imp.Path()] = imp
-
- scope := imp.Scope()
- for _, name := range scope.Names() {
- addObj(scope.Lookup(name))
- }
- }
-
- return packages
-}
diff --git a/go/analysis/passes/asmdecl/arches_go118.go b/go/analysis/passes/asmdecl/arches_go118.go
new file mode 100644
index 000000000..d8211afdc
--- /dev/null
+++ b/go/analysis/passes/asmdecl/arches_go118.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package asmdecl
+
+func additionalArches() []*asmArch {
+ return nil
+}
diff --git a/go/analysis/passes/asmdecl/arches_go119.go b/go/analysis/passes/asmdecl/arches_go119.go
new file mode 100644
index 000000000..3018383e7
--- /dev/null
+++ b/go/analysis/passes/asmdecl/arches_go119.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package asmdecl
+
+var asmArchLoong64 = asmArch{name: "loong64", bigEndian: false, stack: "R3", lr: true}
+
+func additionalArches() []*asmArch {
+ return []*asmArch{&asmArchLoong64}
+}
diff --git a/go/analysis/passes/asmdecl/asmdecl.go b/go/analysis/passes/asmdecl/asmdecl.go
index b05ed5c15..7288559fc 100644
--- a/go/analysis/passes/asmdecl/asmdecl.go
+++ b/go/analysis/passes/asmdecl/asmdecl.go
@@ -92,7 +92,7 @@ var (
asmArchMips64LE = asmArch{name: "mips64le", bigEndian: false, stack: "R29", lr: true}
asmArchPpc64 = asmArch{name: "ppc64", bigEndian: true, stack: "R1", lr: true, retRegs: []string{"R3", "F1"}}
asmArchPpc64LE = asmArch{name: "ppc64le", bigEndian: false, stack: "R1", lr: true, retRegs: []string{"R3", "F1"}}
- asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true}
+ asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true, retRegs: []string{"X10", "F10"}}
asmArchS390X = asmArch{name: "s390x", bigEndian: true, stack: "R15", lr: true}
asmArchWasm = asmArch{name: "wasm", bigEndian: false, stack: "SP", lr: false}
@@ -114,6 +114,7 @@ var (
)
func init() {
+ arches = append(arches, additionalArches()...)
for _, arch := range arches {
arch.sizes = types.SizesFor("gc", arch.name)
if arch.sizes == nil {
@@ -731,7 +732,7 @@ func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr stri
src = 8
}
}
- case "mips", "mipsle", "mips64", "mips64le":
+ case "loong64", "mips", "mipsle", "mips64", "mips64le":
switch op {
case "MOVB", "MOVBU":
src = 1
diff --git a/go/analysis/passes/asmdecl/asmdecl_test.go b/go/analysis/passes/asmdecl/asmdecl_test.go
index f88c188b2..50938a075 100644
--- a/go/analysis/passes/asmdecl/asmdecl_test.go
+++ b/go/analysis/passes/asmdecl/asmdecl_test.go
@@ -14,14 +14,17 @@ import (
)
var goosarches = []string{
- "linux/amd64", // asm1.s, asm4.s
- "linux/386", // asm2.s
- "linux/arm", // asm3.s
- "linux/mips64", // asm5.s
- "linux/s390x", // asm6.s
- "linux/ppc64", // asm7.s
- "linux/mips", // asm8.s,
- "js/wasm", // asm9.s
+ "linux/amd64", // asm1.s, asm4.s
+ "linux/386", // asm2.s
+ "linux/arm", // asm3.s
+ // TODO: skip test on loong64 until go toolchain supported loong64.
+ // "linux/loong64", // asm10.s
+ "linux/mips64", // asm5.s
+ "linux/s390x", // asm6.s
+ "linux/ppc64", // asm7.s
+ "linux/mips", // asm8.s,
+ "js/wasm", // asm9.s
+ "linux/riscv64", // asm11.s
}
func Test(t *testing.T) {
diff --git a/go/analysis/passes/asmdecl/testdata/src/a/asm10.s b/go/analysis/passes/asmdecl/testdata/src/a/asm10.s
new file mode 100644
index 000000000..f0045882a
--- /dev/null
+++ b/go/analysis/passes/asmdecl/testdata/src/a/asm10.s
@@ -0,0 +1,192 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build loong64
+
+TEXT ·arg1(SB),0,$0-2
+ MOVB x+0(FP), R19
+ MOVBU y+1(FP), R18
+ MOVH x+0(FP), R19 // want `\[loong64\] arg1: invalid MOVH of x\+0\(FP\); int8 is 1-byte value`
+ MOVHU y+1(FP), R19 // want `invalid MOVHU of y\+1\(FP\); uint8 is 1-byte value`
+ MOVW x+0(FP), R19 // want `invalid MOVW of x\+0\(FP\); int8 is 1-byte value`
+ MOVWU y+1(FP), R19 // want `invalid MOVWU of y\+1\(FP\); uint8 is 1-byte value`
+ MOVV x+0(FP), R19 // want `invalid MOVV of x\+0\(FP\); int8 is 1-byte value`
+ MOVV y+1(FP), R19 // want `invalid MOVV of y\+1\(FP\); uint8 is 1-byte value`
+ MOVB x+1(FP), R19 // want `invalid offset x\+1\(FP\); expected x\+0\(FP\)`
+ MOVBU y+2(FP), R19 // want `invalid offset y\+2\(FP\); expected y\+1\(FP\)`
+ MOVB 16(R3), R19 // want `16\(R3\) should be x\+0\(FP\)`
+ MOVB 17(R3), R19 // want `17\(R3\) should be y\+1\(FP\)`
+ MOVB 18(R3), R19 // want `use of 18\(R3\) points beyond argument frame`
+ RET
+
+TEXT ·arg2(SB),0,$0-4
+ MOVBU x+0(FP), R19 // want `arg2: invalid MOVBU of x\+0\(FP\); int16 is 2-byte value`
+ MOVB y+2(FP), R19 // want `invalid MOVB of y\+2\(FP\); uint16 is 2-byte value`
+ MOVHU x+0(FP), R19
+ MOVH y+2(FP), R18
+ MOVWU x+0(FP), R19 // want `invalid MOVWU of x\+0\(FP\); int16 is 2-byte value`
+ MOVW y+2(FP), R19 // want `invalid MOVW of y\+2\(FP\); uint16 is 2-byte value`
+ MOVV x+0(FP), R19 // want `invalid MOVV of x\+0\(FP\); int16 is 2-byte value`
+ MOVV y+2(FP), R19 // want `invalid MOVV of y\+2\(FP\); uint16 is 2-byte value`
+ MOVHU x+2(FP), R19 // want `invalid offset x\+2\(FP\); expected x\+0\(FP\)`
+ MOVH y+0(FP), R19 // want `invalid offset y\+0\(FP\); expected y\+2\(FP\)`
+ RET
+
+TEXT ·arg4(SB),0,$0-2 // want `arg4: wrong argument size 2; expected \$\.\.\.-8`
+ MOVB x+0(FP), R19 // want `invalid MOVB of x\+0\(FP\); int32 is 4-byte value`
+ MOVB y+4(FP), R18 // want `invalid MOVB of y\+4\(FP\); uint32 is 4-byte value`
+ MOVH x+0(FP), R19 // want `invalid MOVH of x\+0\(FP\); int32 is 4-byte value`
+ MOVH y+4(FP), R19 // want `invalid MOVH of y\+4\(FP\); uint32 is 4-byte value`
+ MOVW x+0(FP), R19
+ MOVW y+4(FP), R19
+ MOVV x+0(FP), R19 // want `invalid MOVV of x\+0\(FP\); int32 is 4-byte value`
+ MOVV y+4(FP), R19 // want `invalid MOVV of y\+4\(FP\); uint32 is 4-byte value`
+ MOVW x+4(FP), R19 // want `invalid offset x\+4\(FP\); expected x\+0\(FP\)`
+ MOVW y+2(FP), R19 // want `invalid offset y\+2\(FP\); expected y\+4\(FP\)`
+ RET
+
+TEXT ·arg8(SB),7,$0-2 // want `wrong argument size 2; expected \$\.\.\.-16`
+ MOVB x+0(FP), R19 // want `invalid MOVB of x\+0\(FP\); int64 is 8-byte value`
+ MOVB y+8(FP), R18 // want `invalid MOVB of y\+8\(FP\); uint64 is 8-byte value`
+ MOVH x+0(FP), R19 // want `invalid MOVH of x\+0\(FP\); int64 is 8-byte value`
+ MOVH y+8(FP), R19 // want `invalid MOVH of y\+8\(FP\); uint64 is 8-byte value`
+ MOVW x+0(FP), R19 // want `invalid MOVW of x\+0\(FP\); int64 is 8-byte value`
+ MOVW y+8(FP), R19 // want `invalid MOVW of y\+8\(FP\); uint64 is 8-byte value`
+ MOVV x+0(FP), R19
+ MOVV y+8(FP), R19
+ MOVV x+8(FP), R19 // want `invalid offset x\+8\(FP\); expected x\+0\(FP\)`
+ MOVV y+2(FP), R19 // want `invalid offset y\+2\(FP\); expected y\+8\(FP\)`
+ RET
+
+TEXT ·argint(SB),0,$0-2 // want `wrong argument size 2; expected \$\.\.\.-16`
+ MOVB x+0(FP), R19 // want `invalid MOVB of x\+0\(FP\); int is 8-byte value`
+ MOVB y+8(FP), R18 // want `invalid MOVB of y\+8\(FP\); uint is 8-byte value`
+ MOVH x+0(FP), R19 // want `invalid MOVH of x\+0\(FP\); int is 8-byte value`
+ MOVH y+8(FP), R19 // want `invalid MOVH of y\+8\(FP\); uint is 8-byte value`
+ MOVW x+0(FP), R19 // want `invalid MOVW of x\+0\(FP\); int is 8-byte value`
+ MOVW y+8(FP), R19 // want `invalid MOVW of y\+8\(FP\); uint is 8-byte value`
+ MOVV x+0(FP), R19
+ MOVV y+8(FP), R19
+ MOVV x+8(FP), R19 // want `invalid offset x\+8\(FP\); expected x\+0\(FP\)`
+ MOVV y+2(FP), R19 // want `invalid offset y\+2\(FP\); expected y\+8\(FP\)`
+ RET
+
+TEXT ·argptr(SB),7,$0-2 // want `wrong argument size 2; expected \$\.\.\.-40`
+ MOVB x+0(FP), R19 // want `invalid MOVB of x\+0\(FP\); \*byte is 8-byte value`
+ MOVB y+8(FP), R18 // want `invalid MOVB of y\+8\(FP\); \*byte is 8-byte value`
+ MOVH x+0(FP), R19 // want `invalid MOVH of x\+0\(FP\); \*byte is 8-byte value`
+ MOVH y+8(FP), R19 // want `invalid MOVH of y\+8\(FP\); \*byte is 8-byte value`
+ MOVW x+0(FP), R19 // want `invalid MOVW of x\+0\(FP\); \*byte is 8-byte value`
+ MOVW y+8(FP), R19 // want `invalid MOVW of y\+8\(FP\); \*byte is 8-byte value`
+ MOVV x+0(FP), R19
+ MOVV y+8(FP), R19
+ MOVV x+8(FP), R19 // want `invalid offset x\+8\(FP\); expected x\+0\(FP\)`
+ MOVV y+2(FP), R19 // want `invalid offset y\+2\(FP\); expected y\+8\(FP\)`
+ MOVW c+16(FP), R19 // want `invalid MOVW of c\+16\(FP\); chan int is 8-byte value`
+ MOVW m+24(FP), R19 // want `invalid MOVW of m\+24\(FP\); map\[int\]int is 8-byte value`
+ MOVW f+32(FP), R19 // want `invalid MOVW of f\+32\(FP\); func\(\) is 8-byte value`
+ RET
+
+TEXT ·argstring(SB),0,$32 // want `wrong argument size 0; expected \$\.\.\.-32`
+ MOVH x+0(FP), R19 // want `invalid MOVH of x\+0\(FP\); string base is 8-byte value`
+ MOVW x+0(FP), R19 // want `invalid MOVW of x\+0\(FP\); string base is 8-byte value`
+ MOVV x+0(FP), R19
+ MOVH x_base+0(FP), R19 // want `invalid MOVH of x_base\+0\(FP\); string base is 8-byte value`
+ MOVW x_base+0(FP), R19 // want `invalid MOVW of x_base\+0\(FP\); string base is 8-byte value`
+ MOVV x_base+0(FP), R19
+ MOVH x_len+0(FP), R19 // want `invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)`
+ MOVW x_len+0(FP), R19 // want `invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)`
+ MOVV x_len+0(FP), R19 // want `invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)`
+ MOVH x_len+8(FP), R19 // want `invalid MOVH of x_len\+8\(FP\); string len is 8-byte value`
+ MOVW x_len+8(FP), R19 // want `invalid MOVW of x_len\+8\(FP\); string len is 8-byte value`
+ MOVV x_len+8(FP), R19
+ MOVV y+0(FP), R19 // want `invalid offset y\+0\(FP\); expected y\+16\(FP\)`
+ MOVV y_len+8(FP), R19 // want `invalid offset y_len\+8\(FP\); expected y_len\+24\(FP\)`
+ RET
+
+TEXT ·argslice(SB),0,$48 // want `wrong argument size 0; expected \$\.\.\.-48`
+ MOVH x+0(FP), R19 // want `invalid MOVH of x\+0\(FP\); slice base is 8-byte value`
+ MOVW x+0(FP), R19 // want `invalid MOVW of x\+0\(FP\); slice base is 8-byte value`
+ MOVV x+0(FP), R19
+ MOVH x_base+0(FP), R19 // want `invalid MOVH of x_base\+0\(FP\); slice base is 8-byte value`
+ MOVW x_base+0(FP), R19 // want `invalid MOVW of x_base\+0\(FP\); slice base is 8-byte value`
+ MOVV x_base+0(FP), R19
+ MOVH x_len+0(FP), R19 // want `invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)`
+ MOVW x_len+0(FP), R19 // want `invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)`
+ MOVV x_len+0(FP), R19 // want `invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)`
+ MOVH x_len+8(FP), R19 // want `invalid MOVH of x_len\+8\(FP\); slice len is 8-byte value`
+ MOVW x_len+8(FP), R19 // want `invalid MOVW of x_len\+8\(FP\); slice len is 8-byte value`
+ MOVV x_len+8(FP), R19
+ MOVH x_cap+0(FP), R19 // want `invalid offset x_cap\+0\(FP\); expected x_cap\+16\(FP\)`
+ MOVW x_cap+0(FP), R19 // want `invalid offset x_cap\+0\(FP\); expected x_cap\+16\(FP\)`
+ MOVV x_cap+0(FP), R19 // want `invalid offset x_cap\+0\(FP\); expected x_cap\+16\(FP\)`
+ MOVH x_cap+16(FP), R19 // want `invalid MOVH of x_cap\+16\(FP\); slice cap is 8-byte value`
+ MOVW x_cap+16(FP), R19 // want `invalid MOVW of x_cap\+16\(FP\); slice cap is 8-byte value`
+ MOVV x_cap+16(FP), R19
+ MOVV y+0(FP), R19 // want `invalid offset y\+0\(FP\); expected y\+24\(FP\)`
+ MOVV y_len+8(FP), R19 // want `invalid offset y_len\+8\(FP\); expected y_len\+32\(FP\)`
+ MOVV y_cap+16(FP), R19 // want `invalid offset y_cap\+16\(FP\); expected y_cap\+40\(FP\)`
+ RET
+
+TEXT ·argiface(SB),0,$0-32
+ MOVH x+0(FP), R19 // want `invalid MOVH of x\+0\(FP\); interface type is 8-byte value`
+ MOVW x+0(FP), R19 // want `invalid MOVW of x\+0\(FP\); interface type is 8-byte value`
+ MOVV x+0(FP), R19
+ MOVH x_type+0(FP), R19 // want `invalid MOVH of x_type\+0\(FP\); interface type is 8-byte value`
+ MOVW x_type+0(FP), R19 // want `invalid MOVW of x_type\+0\(FP\); interface type is 8-byte value`
+ MOVV x_type+0(FP), R19
+ MOVV x_itable+0(FP), R19 // want `unknown variable x_itable; offset 0 is x_type\+0\(FP\)`
+ MOVV x_itable+1(FP), R19 // want `unknown variable x_itable; offset 1 is x_type\+0\(FP\)`
+ MOVH x_data+0(FP), R19 // want `invalid offset x_data\+0\(FP\); expected x_data\+8\(FP\)`
+ MOVW x_data+0(FP), R19 // want `invalid offset x_data\+0\(FP\); expected x_data\+8\(FP\)`
+ MOVV x_data+0(FP), R19 // want `invalid offset x_data\+0\(FP\); expected x_data\+8\(FP\)`
+ MOVH x_data+8(FP), R19 // want `invalid MOVH of x_data\+8\(FP\); interface data is 8-byte value`
+ MOVW x_data+8(FP), R19 // want `invalid MOVW of x_data\+8\(FP\); interface data is 8-byte value`
+ MOVV x_data+8(FP), R19
+ MOVH y+16(FP), R19 // want `invalid MOVH of y\+16\(FP\); interface itable is 8-byte value`
+ MOVW y+16(FP), R19 // want `invalid MOVW of y\+16\(FP\); interface itable is 8-byte value`
+ MOVV y+16(FP), R19
+ MOVH y_itable+16(FP), R19 // want `invalid MOVH of y_itable\+16\(FP\); interface itable is 8-byte value`
+ MOVW y_itable+16(FP), R19 // want `invalid MOVW of y_itable\+16\(FP\); interface itable is 8-byte value`
+ MOVV y_itable+16(FP), R19
+ MOVV y_type+16(FP), R19 // want `unknown variable y_type; offset 16 is y_itable\+16\(FP\)`
+ MOVH y_data+16(FP), R19 // want `invalid offset y_data\+16\(FP\); expected y_data\+24\(FP\)`
+ MOVW y_data+16(FP), R19 // want `invalid offset y_data\+16\(FP\); expected y_data\+24\(FP\)`
+ MOVV y_data+16(FP), R19 // want `invalid offset y_data\+16\(FP\); expected y_data\+24\(FP\)`
+ MOVH y_data+24(FP), R19 // want `invalid MOVH of y_data\+24\(FP\); interface data is 8-byte value`
+ MOVW y_data+24(FP), R19 // want `invalid MOVW of y_data\+24\(FP\); interface data is 8-byte value`
+ MOVV y_data+24(FP), R19
+ RET
+
+TEXT ·returnint(SB),0,$0-8
+ MOVB R19, ret+0(FP) // want `invalid MOVB of ret\+0\(FP\); int is 8-byte value`
+ MOVH R19, ret+0(FP) // want `invalid MOVH of ret\+0\(FP\); int is 8-byte value`
+ MOVW R19, ret+0(FP) // want `invalid MOVW of ret\+0\(FP\); int is 8-byte value`
+ MOVV R19, ret+0(FP)
+ MOVV R19, ret+1(FP) // want `invalid offset ret\+1\(FP\); expected ret\+0\(FP\)`
+ MOVV R19, r+0(FP) // want `unknown variable r; offset 0 is ret\+0\(FP\)`
+ RET
+
+TEXT ·returnbyte(SB),0,$0-9
+ MOVV x+0(FP), R19
+ MOVB R19, ret+8(FP)
+ MOVH R19, ret+8(FP) // want `invalid MOVH of ret\+8\(FP\); byte is 1-byte value`
+ MOVW R19, ret+8(FP) // want `invalid MOVW of ret\+8\(FP\); byte is 1-byte value`
+ MOVV R19, ret+8(FP) // want `invalid MOVV of ret\+8\(FP\); byte is 1-byte value`
+ MOVB R19, ret+7(FP) // want `invalid offset ret\+7\(FP\); expected ret\+8\(FP\)`
+ RET
+
+TEXT ·returnnamed(SB),0,$0-41
+ MOVB x+0(FP), R19
+ MOVV R19, r1+8(FP)
+ MOVH R19, r2+16(FP)
+ MOVV R19, r3+24(FP)
+ MOVV R19, r3_base+24(FP)
+ MOVV R19, r3_len+32(FP)
+ MOVB R19, r4+40(FP)
+ MOVW R19, r1+8(FP) // want `invalid MOVW of r1\+8\(FP\); int is 8-byte value`
+ RET
+
+TEXT ·returnintmissing(SB),0,$0-8
+ RET // want `RET without writing to 8-byte ret\+0\(FP\)`
diff --git a/go/analysis/passes/asmdecl/testdata/src/a/asm11.s b/go/analysis/passes/asmdecl/testdata/src/a/asm11.s
new file mode 100644
index 000000000..e81e8ee17
--- /dev/null
+++ b/go/analysis/passes/asmdecl/testdata/src/a/asm11.s
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build riscv64
+
+// writing to result in ABIInternal function
+TEXT ·returnABIInternal<ABIInternal>(SB), NOSPLIT, $8
+ MOV $123, X10
+ RET
+TEXT ·returnmissingABIInternal<ABIInternal>(SB), NOSPLIT, $8
+ MOV $123, X20
+ RET // want `RET without writing to result register`
diff --git a/go/analysis/passes/assign/assign.go b/go/analysis/passes/assign/assign.go
index 3586638ef..89146b733 100644
--- a/go/analysis/passes/assign/assign.go
+++ b/go/analysis/passes/assign/assign.go
@@ -12,6 +12,7 @@ import (
"fmt"
"go/ast"
"go/token"
+ "go/types"
"reflect"
"golang.org/x/tools/go/analysis"
@@ -51,7 +52,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
for i, lhs := range stmt.Lhs {
rhs := stmt.Rhs[i]
if analysisutil.HasSideEffects(pass.TypesInfo, lhs) ||
- analysisutil.HasSideEffects(pass.TypesInfo, rhs) {
+ analysisutil.HasSideEffects(pass.TypesInfo, rhs) ||
+ isMapIndex(pass.TypesInfo, lhs) {
continue // expressions may not be equal
}
if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) {
@@ -74,3 +76,14 @@ func run(pass *analysis.Pass) (interface{}, error) {
return nil, nil
}
+
+// isMapIndex returns true if e is a map index expression.
+func isMapIndex(info *types.Info, e ast.Expr) bool {
+ if idx, ok := analysisutil.Unparen(e).(*ast.IndexExpr); ok {
+ if typ := info.Types[idx.X].Type; typ != nil {
+ _, ok := typ.Underlying().(*types.Map)
+ return ok
+ }
+ }
+ return false
+}
diff --git a/go/analysis/passes/assign/testdata/src/a/a.go b/go/analysis/passes/assign/testdata/src/a/a.go
index eaec634d1..f9663120b 100644
--- a/go/analysis/passes/assign/testdata/src/a/a.go
+++ b/go/analysis/passes/assign/testdata/src/a/a.go
@@ -29,3 +29,31 @@ func (s *ST) SetX(x int, ch chan int) {
}
func num() int { return 2 }
+
+func Index() {
+ s := []int{1}
+ s[0] = s[0] // want "self-assignment"
+
+ var a [5]int
+ a[0] = a[0] // want "self-assignment"
+
+ pa := &[2]int{1, 2}
+ pa[1] = pa[1] // want "self-assignment"
+
+ var pss *struct { // report self assignment despite nil dereference
+ s []int
+ }
+ pss.s[0] = pss.s[0] // want "self-assignment"
+
+ m := map[int]string{1: "a"}
+ m[0] = m[0] // bail on map self-assignments due to side effects
+ m[1] = m[1] // not modeling what elements must be in the map
+ (m[2]) = (m[2]) // even with parens
+ type Map map[string]bool
+ named := make(Map)
+ named["s"] = named["s"] // even on named maps.
+ var psm *struct {
+ m map[string]int
+ }
+ psm.m["key"] = psm.m["key"] // handles dereferences
+}
diff --git a/go/analysis/passes/assign/testdata/src/a/a.go.golden b/go/analysis/passes/assign/testdata/src/a/a.go.golden
index 6c91d3666..f45b7f208 100644
--- a/go/analysis/passes/assign/testdata/src/a/a.go.golden
+++ b/go/analysis/passes/assign/testdata/src/a/a.go.golden
@@ -29,3 +29,31 @@ func (s *ST) SetX(x int, ch chan int) {
}
func num() int { return 2 }
+
+func Index() {
+ s := []int{1}
+ // want "self-assignment"
+
+ var a [5]int
+ // want "self-assignment"
+
+ pa := &[2]int{1, 2}
+ // want "self-assignment"
+
+ var pss *struct { // report self assignment despite nil dereference
+ s []int
+ }
+ // want "self-assignment"
+
+ m := map[int]string{1: "a"}
+ m[0] = m[0] // bail on map self-assignments due to side effects
+ m[1] = m[1] // not modeling what elements must be in the map
+ (m[2]) = (m[2]) // even with parens
+ type Map map[string]bool
+ named := make(Map)
+ named["s"] = named["s"] // even on named maps.
+ var psm *struct {
+ m map[string]int
+ }
+ psm.m["key"] = psm.m["key"] // handles dereferences
+}
diff --git a/go/analysis/passes/bools/bools.go b/go/analysis/passes/bools/bools.go
index 5ae47d894..0d8b0bf4f 100644
--- a/go/analysis/passes/bools/bools.go
+++ b/go/analysis/passes/bools/bools.go
@@ -94,8 +94,10 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[*
}
// checkRedundant checks for expressions of the form
-// e && e
-// e || e
+//
+// e && e
+// e || e
+//
// Exprs must contain only side effect free expressions.
func (op boolOp) checkRedundant(pass *analysis.Pass, exprs []ast.Expr) {
seen := make(map[string]bool)
@@ -110,8 +112,10 @@ func (op boolOp) checkRedundant(pass *analysis.Pass, exprs []ast.Expr) {
}
// checkSuspect checks for expressions of the form
-// x != c1 || x != c2
-// x == c1 && x == c2
+//
+// x != c1 || x != c2
+// x == c1 && x == c2
+//
// where c1 and c2 are constant expressions.
// If c1 and c2 are the same then it's redundant;
// if c1 and c2 are different then it's always true or always false.
diff --git a/go/analysis/passes/buildssa/buildssa_test.go b/go/analysis/passes/buildssa/buildssa_test.go
index 0b381500b..52f7e7aa6 100644
--- a/go/analysis/passes/buildssa/buildssa_test.go
+++ b/go/analysis/passes/buildssa/buildssa_test.go
@@ -11,6 +11,7 @@ import (
"golang.org/x/tools/go/analysis/analysistest"
"golang.org/x/tools/go/analysis/passes/buildssa"
+ "golang.org/x/tools/internal/typeparams"
)
func Test(t *testing.T) {
@@ -27,3 +28,39 @@ func Test(t *testing.T) {
}
}
}
+
+func TestGenericDecls(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestGenericDecls requires type parameters.")
+ }
+ testdata := analysistest.TestData()
+ result := analysistest.Run(t, testdata, buildssa.Analyzer, "b")[0].Result
+
+ ssainfo := result.(*buildssa.SSA)
+ got := fmt.Sprint(ssainfo.SrcFuncs)
+ want := `[(*b.Pointer[T]).Load b.Load b.LoadPointer]`
+ if got != want {
+ t.Errorf("SSA.SrcFuncs = %s, want %s", got, want)
+ for _, f := range ssainfo.SrcFuncs {
+ f.WriteTo(os.Stderr)
+ }
+ }
+}
+
+func TestImporting(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestImporting depends on testdata/b/b/go which uses type parameters.")
+ }
+ testdata := analysistest.TestData()
+ result := analysistest.Run(t, testdata, buildssa.Analyzer, "c")[0].Result
+
+ ssainfo := result.(*buildssa.SSA)
+ got := fmt.Sprint(ssainfo.SrcFuncs)
+ want := `[c.A c.B]`
+ if got != want {
+ t.Errorf("SSA.SrcFuncs = %s, want %s", got, want)
+ for _, f := range ssainfo.SrcFuncs {
+ f.WriteTo(os.Stderr)
+ }
+ }
+}
diff --git a/go/analysis/passes/buildssa/testdata/src/b/b.go b/go/analysis/passes/buildssa/testdata/src/b/b.go
new file mode 100644
index 000000000..dd029cf60
--- /dev/null
+++ b/go/analysis/passes/buildssa/testdata/src/b/b.go
@@ -0,0 +1,20 @@
+// Package b contains declarations of generic functions.
+package b
+
+import "unsafe"
+
+type Pointer[T any] struct {
+ v unsafe.Pointer
+}
+
+func (x *Pointer[T]) Load() *T {
+ return (*T)(LoadPointer(&x.v))
+}
+
+func Load[T any](x *Pointer[T]) *T {
+ return x.Load()
+}
+
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+
+var G Pointer[int]
diff --git a/go/analysis/passes/buildssa/testdata/src/c/c.go b/go/analysis/passes/buildssa/testdata/src/c/c.go
new file mode 100644
index 000000000..387a3b0ed
--- /dev/null
+++ b/go/analysis/passes/buildssa/testdata/src/c/c.go
@@ -0,0 +1,24 @@
+// Package c is to test buildssa importing packages.
+package c
+
+import (
+ "a"
+ "b"
+ "unsafe"
+)
+
+func A() {
+ _ = a.Fib(10)
+}
+
+func B() {
+ var x int
+ ptr := unsafe.Pointer(&x)
+ _ = b.LoadPointer(&ptr)
+
+ m := b.G.Load()
+ f := b.Load(&b.G)
+ if f != m {
+ panic("loads of b.G are expected to be indentical")
+ }
+}
diff --git a/go/analysis/passes/buildtag/buildtag.go b/go/analysis/passes/buildtag/buildtag.go
index c4407ad91..775e507a3 100644
--- a/go/analysis/passes/buildtag/buildtag.go
+++ b/go/analysis/passes/buildtag/buildtag.go
@@ -20,7 +20,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
)
-const Doc = "check that +build tags are well-formed and correctly located"
+const Doc = "check //go:build and // +build directives"
var Analyzer = &analysis.Analyzer{
Name: "buildtag",
diff --git a/go/analysis/passes/buildtag/buildtag_old.go b/go/analysis/passes/buildtag/buildtag_old.go
index e9234925f..0001ba536 100644
--- a/go/analysis/passes/buildtag/buildtag_old.go
+++ b/go/analysis/passes/buildtag/buildtag_old.go
@@ -22,7 +22,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
)
-const Doc = "check that +build tags are well-formed and correctly located"
+const Doc = "check // +build directives"
var Analyzer = &analysis.Analyzer{
Name: "buildtag",
diff --git a/go/analysis/passes/cgocall/cgocall.go b/go/analysis/passes/cgocall/cgocall.go
index 5768d0b9b..b61ee5c3d 100644
--- a/go/analysis/passes/cgocall/cgocall.go
+++ b/go/analysis/passes/cgocall/cgocall.go
@@ -122,8 +122,8 @@ func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(t
// For example, for each raw cgo source file in the original package,
// such as this one:
//
-// package p
-// import "C"
+// package p
+// import "C"
// import "fmt"
// type T int
// const k = 3
@@ -147,9 +147,9 @@ func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(t
// the receiver into the first parameter;
// and all functions are renamed to "_".
//
-// package p
-// import . "·this·" // declares T, k, x, y, f, g, T.f
-// import "C"
+// package p
+// import . "·this·" // declares T, k, x, y, f, g, T.f
+// import "C"
// import "fmt"
// const _ = 3
// var _, _ = fmt.Println()
@@ -169,7 +169,6 @@ func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(t
// C.f would resolve to "·this·"._C_func_f, for example. But we have
// limited ourselves here to preserving function bodies and initializer
// expressions since that is all that the cgocall analyzer needs.
-//
func typeCheckCgoSourceFiles(fset *token.FileSet, pkg *types.Package, files []*ast.File, info *types.Info, sizes types.Sizes) ([]*ast.File, *types.Info, error) {
const thispkg = "·this·"
@@ -284,8 +283,9 @@ func typeCheckCgoSourceFiles(fset *token.FileSet, pkg *types.Package, files []*a
// cgoBaseType tries to look through type conversions involving
// unsafe.Pointer to find the real type. It converts:
-// unsafe.Pointer(x) => x
-// *(*unsafe.Pointer)(unsafe.Pointer(&x)) => x
+//
+// unsafe.Pointer(x) => x
+// *(*unsafe.Pointer)(unsafe.Pointer(&x)) => x
func cgoBaseType(info *types.Info, arg ast.Expr) types.Type {
switch arg := arg.(type) {
case *ast.CallExpr:
diff --git a/go/analysis/passes/composite/composite.go b/go/analysis/passes/composite/composite.go
index d3670aca9..64e184d34 100644
--- a/go/analysis/passes/composite/composite.go
+++ b/go/analysis/passes/composite/composite.go
@@ -7,6 +7,7 @@
package composite
import (
+ "fmt"
"go/ast"
"go/types"
"strings"
@@ -83,7 +84,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
}
for _, typ := range structuralTypes {
under := deref(typ.Underlying())
- if _, ok := under.(*types.Struct); !ok {
+ strct, ok := under.(*types.Struct)
+ if !ok {
// skip non-struct composite literals
continue
}
@@ -92,20 +94,47 @@ func run(pass *analysis.Pass) (interface{}, error) {
continue
}
- // check if the CompositeLit contains an unkeyed field
+ // check if the struct contains an unkeyed field
allKeyValue := true
- for _, e := range cl.Elts {
+ var suggestedFixAvailable = len(cl.Elts) == strct.NumFields()
+ var missingKeys []analysis.TextEdit
+ for i, e := range cl.Elts {
if _, ok := e.(*ast.KeyValueExpr); !ok {
allKeyValue = false
- break
+ if i >= strct.NumFields() {
+ break
+ }
+ field := strct.Field(i)
+ if !field.Exported() {
+ // Adding unexported field names for structs not defined
+ // locally will not work.
+ suggestedFixAvailable = false
+ break
+ }
+ missingKeys = append(missingKeys, analysis.TextEdit{
+ Pos: e.Pos(),
+ End: e.Pos(),
+ NewText: []byte(fmt.Sprintf("%s: ", field.Name())),
+ })
}
}
if allKeyValue {
- // all the composite literal fields are keyed
+ // all the struct fields are keyed
continue
}
- pass.ReportRangef(cl, "%s composite literal uses unkeyed fields", typeName)
+ diag := analysis.Diagnostic{
+ Pos: cl.Pos(),
+ End: cl.End(),
+ Message: fmt.Sprintf("%s struct literal uses unkeyed fields", typeName),
+ }
+ if suggestedFixAvailable {
+ diag.SuggestedFixes = []analysis.SuggestedFix{{
+ Message: "Add field names to struct literal",
+ TextEdits: missingKeys,
+ }}
+ }
+ pass.Report(diag)
return
}
})
diff --git a/go/analysis/passes/composite/composite_test.go b/go/analysis/passes/composite/composite_test.go
index 952de8bfd..7afaaa7ff 100644
--- a/go/analysis/passes/composite/composite_test.go
+++ b/go/analysis/passes/composite/composite_test.go
@@ -18,5 +18,5 @@ func Test(t *testing.T) {
if typeparams.Enabled {
pkgs = append(pkgs, "typeparams")
}
- analysistest.Run(t, testdata, composite.Analyzer, pkgs...)
+ analysistest.RunWithSuggestedFixes(t, testdata, composite.Analyzer, pkgs...)
}
diff --git a/go/analysis/passes/composite/testdata/src/a/a.go b/go/analysis/passes/composite/testdata/src/a/a.go
index 3a5bc203b..cd69d3951 100644
--- a/go/analysis/passes/composite/testdata/src/a/a.go
+++ b/go/analysis/passes/composite/testdata/src/a/a.go
@@ -11,6 +11,7 @@ import (
"go/scanner"
"go/token"
"image"
+ "sync"
"unicode"
)
@@ -79,6 +80,18 @@ var badStructLiteral = flag.Flag{ // want "unkeyed fields"
nil, // Value
"DefValue",
}
+var tooManyFieldsStructLiteral = flag.Flag{ // want "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+ "DefValue",
+ "Extra Field",
+}
+var tooFewFieldsStructLiteral = flag.Flag{ // want "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+}
var delta [3]rune
@@ -100,6 +113,10 @@ var badScannerErrorList = scanner.ErrorList{
&scanner.Error{token.Position{}, "foobar"}, // want "unkeyed fields"
}
+// sync.Mutex has unexported fields. We expect a diagnostic but no
+// suggested fix.
+var mu = sync.Mutex{0, 0} // want "unkeyed fields"
+
// Check whitelisted structs: if vet is run with --compositewhitelist=false,
// this line triggers an error.
var whitelistedPoint = image.Point{1, 2}
diff --git a/go/analysis/passes/composite/testdata/src/a/a.go.golden b/go/analysis/passes/composite/testdata/src/a/a.go.golden
new file mode 100644
index 000000000..fe73a2e0a
--- /dev/null
+++ b/go/analysis/passes/composite/testdata/src/a/a.go.golden
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the test for untagged struct literals.
+
+package a
+
+import (
+ "flag"
+ "go/scanner"
+ "go/token"
+ "image"
+ "sync"
+ "unicode"
+)
+
+var Okay1 = []string{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+var Okay2 = map[string]bool{
+ "Name": true,
+ "Usage": true,
+ "DefValue": true,
+}
+
+var Okay3 = struct {
+ X string
+ Y string
+ Z string
+}{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+var Okay4 = []struct {
+ A int
+ B int
+}{
+ {1, 2},
+ {3, 4},
+}
+
+type MyStruct struct {
+ X string
+ Y string
+ Z string
+}
+
+var Okay5 = &MyStruct{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+var Okay6 = []MyStruct{
+ {"foo", "bar", "baz"},
+ {"aa", "bb", "cc"},
+}
+
+var Okay7 = []*MyStruct{
+ {"foo", "bar", "baz"},
+ {"aa", "bb", "cc"},
+}
+
+// Testing is awkward because we need to reference things from a separate package
+// to trigger the warnings.
+
+var goodStructLiteral = flag.Flag{
+ Name: "Name",
+ Usage: "Usage",
+}
+var badStructLiteral = flag.Flag{ // want "unkeyed fields"
+ Name: "Name",
+ Usage: "Usage",
+ Value: nil, // Value
+ DefValue: "DefValue",
+}
+var tooManyFieldsStructLiteral = flag.Flag{ // want "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+ "DefValue",
+ "Extra Field",
+}
+var tooFewFieldsStructLiteral = flag.Flag{ // want "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+}
+
+var delta [3]rune
+
+// SpecialCase is a named slice of CaseRange to test issue 9171.
+var goodNamedSliceLiteral = unicode.SpecialCase{
+ {Lo: 1, Hi: 2, Delta: delta},
+ unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta},
+}
+var badNamedSliceLiteral = unicode.SpecialCase{
+ {Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields"
+ unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields"
+}
+
+// ErrorList is a named slice, so no warnings should be emitted.
+var goodScannerErrorList = scanner.ErrorList{
+ &scanner.Error{Msg: "foobar"},
+}
+var badScannerErrorList = scanner.ErrorList{
+ &scanner.Error{Pos: token.Position{}, Msg: "foobar"}, // want "unkeyed fields"
+}
+
+// sync.Mutex has unexported fields. We expect a diagnostic but no
+// suggested fix.
+var mu = sync.Mutex{0, 0} // want "unkeyed fields"
+
+// Check whitelisted structs: if vet is run with --compositewhitelist=false,
+// this line triggers an error.
+var whitelistedPoint = image.Point{1, 2}
+
+// Do not check type from unknown package.
+// See issue 15408.
+var unknownPkgVar = unicode.NoSuchType{"foo", "bar"}
+
+// A named pointer slice of CaseRange to test issue 23539. In
+// particular, we're interested in how some slice elements omit their
+// type.
+var goodNamedPointerSliceLiteral = []*unicode.CaseRange{
+ {Lo: 1, Hi: 2},
+ &unicode.CaseRange{Lo: 1, Hi: 2},
+}
+var badNamedPointerSliceLiteral = []*unicode.CaseRange{
+ {Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields"
+ &unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields"
+}
+
+// unicode.Range16 is whitelisted, so there'll be no vet error
+var range16 = unicode.Range16{0xfdd0, 0xfdef, 1}
+
+// unicode.Range32 is whitelisted, so there'll be no vet error
+var range32 = unicode.Range32{0x1fffe, 0x1ffff, 1}
diff --git a/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden b/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden
new file mode 100644
index 000000000..20b652e88
--- /dev/null
+++ b/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package a
+
+import "testing"
+
+var fuzzTargets = []testing.InternalFuzzTarget{
+ {"Fuzz", Fuzz},
+}
+
+func Fuzz(f *testing.F) {}
diff --git a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go
index dd5d57efe..f9a5e1fb1 100644
--- a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go
+++ b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go
@@ -6,7 +6,7 @@ package typeparams
import "typeparams/lib"
-type localStruct struct { F int }
+type localStruct struct{ F int }
func F[
T1 ~struct{ f int },
@@ -20,8 +20,8 @@ func F[
_ = T1{2}
_ = T2a{2}
_ = T2b{2} // want "unkeyed fields"
- _ = T3{1,2}
- _ = T4{1,2}
- _ = T5{1:2}
- _ = T6{1:2}
+ _ = T3{1, 2}
+ _ = T4{1, 2}
+ _ = T5{1: 2}
+ _ = T6{1: 2}
}
diff --git a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden
new file mode 100644
index 000000000..66cd9158c
--- /dev/null
+++ b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import "typeparams/lib"
+
+type localStruct struct{ F int }
+
+func F[
+ T1 ~struct{ f int },
+ T2a localStruct,
+ T2b lib.Struct,
+ T3 ~[]int,
+ T4 lib.Slice,
+ T5 ~map[int]int,
+ T6 lib.Map,
+]() {
+ _ = T1{2}
+ _ = T2a{2}
+ _ = T2b{F: 2} // want "unkeyed fields"
+ _ = T3{1, 2}
+ _ = T4{1, 2}
+ _ = T5{1: 2}
+ _ = T6{1: 2}
+}
diff --git a/go/analysis/passes/copylock/copylock.go b/go/analysis/passes/copylock/copylock.go
index 350dc4e0f..8cc93e94d 100644
--- a/go/analysis/passes/copylock/copylock.go
+++ b/go/analysis/passes/copylock/copylock.go
@@ -128,7 +128,7 @@ func checkCopyLocksCallExpr(pass *analysis.Pass, ce *ast.CallExpr) {
}
if fun, ok := pass.TypesInfo.Uses[id].(*types.Builtin); ok {
switch fun.Name() {
- case "new", "len", "cap", "Sizeof":
+ case "new", "len", "cap", "Sizeof", "Offsetof", "Alignof":
return
}
}
diff --git a/go/analysis/passes/copylock/testdata/src/a/copylock.go b/go/analysis/passes/copylock/testdata/src/a/copylock.go
index e528280ab..4ab66dca1 100644
--- a/go/analysis/passes/copylock/testdata/src/a/copylock.go
+++ b/go/analysis/passes/copylock/testdata/src/a/copylock.go
@@ -50,27 +50,27 @@ func BadFunc() {
var t Tlock
var tp *Tlock
tp = &t
- *tp = t // want `assignment copies lock value to \*tp: a.Tlock contains sync.Once contains sync.Mutex`
- t = *tp // want "assignment copies lock value to t: a.Tlock contains sync.Once contains sync.Mutex"
+ *tp = t // want `assignment copies lock value to \*tp: a.Tlock contains sync.Once contains sync\b.*`
+ t = *tp // want `assignment copies lock value to t: a.Tlock contains sync.Once contains sync\b.*`
y := *x // want "assignment copies lock value to y: sync.Mutex"
- var z = t // want "variable declaration copies lock value to z: a.Tlock contains sync.Once contains sync.Mutex"
+ var z = t // want `variable declaration copies lock value to z: a.Tlock contains sync.Once contains sync\b.*`
w := struct{ L sync.Mutex }{
L: *x, // want `literal copies lock value from \*x: sync.Mutex`
}
var q = map[int]Tlock{
- 1: t, // want "literal copies lock value from t: a.Tlock contains sync.Once contains sync.Mutex"
- 2: *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync.Mutex`
+ 1: t, // want `literal copies lock value from t: a.Tlock contains sync.Once contains sync\b.*`
+ 2: *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync\b.*`
}
yy := []Tlock{
- t, // want "literal copies lock value from t: a.Tlock contains sync.Once contains sync.Mutex"
- *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync.Mutex`
+ t, // want `literal copies lock value from t: a.Tlock contains sync.Once contains sync\b.*`
+ *tp, // want `literal copies lock value from \*tp: a.Tlock contains sync.Once contains sync\b.*`
}
// override 'new' keyword
new := func(interface{}) {}
- new(t) // want "call of new copies lock value: a.Tlock contains sync.Once contains sync.Mutex"
+ new(t) // want `call of new copies lock value: a.Tlock contains sync.Once contains sync\b.*`
// copy of array of locks
var muA [5]sync.Mutex
@@ -124,6 +124,26 @@ func SizeofMutex() {
Sizeof(mu) // want "call of Sizeof copies lock value: sync.Mutex"
}
+func OffsetofMutex() {
+ type T struct {
+ f int
+ mu sync.Mutex
+ }
+ unsafe.Offsetof(T{}.mu) // OK
+ unsafe := struct{ Offsetof func(interface{}) }{}
+ unsafe.Offsetof(T{}.mu) // want "call of unsafe.Offsetof copies lock value: sync.Mutex"
+}
+
+func AlignofMutex() {
+ type T struct {
+ f int
+ mu sync.Mutex
+ }
+ unsafe.Alignof(T{}.mu) // OK
+ unsafe := struct{ Alignof func(interface{}) }{}
+ unsafe.Alignof(T{}.mu) // want "call of unsafe.Alignof copies lock value: sync.Mutex"
+}
+
// SyncTypesCheck checks copying of sync.* types except sync.Mutex
func SyncTypesCheck() {
// sync.RWMutex copying
@@ -173,9 +193,9 @@ func SyncTypesCheck() {
var onceX sync.Once
var onceXX = sync.Once{}
onceX1 := new(sync.Once)
- onceY := onceX // want "assignment copies lock value to onceY: sync.Once contains sync.Mutex"
- onceY = onceX // want "assignment copies lock value to onceY: sync.Once contains sync.Mutex"
- var onceYY = onceX // want "variable declaration copies lock value to onceYY: sync.Once contains sync.Mutex"
+ onceY := onceX // want `assignment copies lock value to onceY: sync.Once contains sync\b.*`
+ onceY = onceX // want `assignment copies lock value to onceY: sync.Once contains sync\b.*`
+ var onceYY = onceX // want `variable declaration copies lock value to onceYY: sync.Once contains sync\b.*`
onceP := &onceX
onceZ := &sync.Once{}
}
diff --git a/go/analysis/passes/copylock/testdata/src/a/copylock_func.go b/go/analysis/passes/copylock/testdata/src/a/copylock_func.go
index 801bc6f24..0d3168f1e 100644
--- a/go/analysis/passes/copylock/testdata/src/a/copylock_func.go
+++ b/go/analysis/passes/copylock/testdata/src/a/copylock_func.go
@@ -126,7 +126,7 @@ func AcceptedCases() {
// sync.Mutex gets called out, but without any reference to the sync.Once.
type LocalOnce sync.Once
-func (LocalOnce) Bad() {} // want "Bad passes lock by value: a.LocalOnce contains sync.Mutex"
+func (LocalOnce) Bad() {} // want `Bad passes lock by value: a.LocalOnce contains sync.\b.*`
// False negative:
// LocalMutex doesn't have a Lock method.
diff --git a/go/analysis/passes/directive/directive.go b/go/analysis/passes/directive/directive.go
new file mode 100644
index 000000000..76d852cd0
--- /dev/null
+++ b/go/analysis/passes/directive/directive.go
@@ -0,0 +1,216 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package directive defines an Analyzer that checks known Go toolchain directives.
+package directive
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+)
+
+const Doc = `check Go toolchain directives such as //go:debug
+
+This analyzer checks for problems with known Go toolchain directives
+in all Go source files in a package directory, even those excluded by
+//go:build constraints, and all non-Go source files too.
+
+For //go:debug (see https://go.dev/doc/godebug), the analyzer checks
+that the directives are placed only in Go source files, only above the
+package comment, and only in package main or *_test.go files.
+
+Support for other known directives may be added in the future.
+
+This analyzer does not check //go:build, which is handled by the
+buildtag analyzer.
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "directive",
+ Doc: Doc,
+ Run: runDirective,
+}
+
+func runDirective(pass *analysis.Pass) (interface{}, error) {
+ for _, f := range pass.Files {
+ checkGoFile(pass, f)
+ }
+ for _, name := range pass.OtherFiles {
+ if err := checkOtherFile(pass, name); err != nil {
+ return nil, err
+ }
+ }
+ for _, name := range pass.IgnoredFiles {
+ if strings.HasSuffix(name, ".go") {
+ f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments)
+ if err != nil {
+ // Not valid Go source code - not our job to diagnose, so ignore.
+ continue
+ }
+ checkGoFile(pass, f)
+ } else {
+ if err := checkOtherFile(pass, name); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return nil, nil
+}
+
+func checkGoFile(pass *analysis.Pass, f *ast.File) {
+ check := newChecker(pass, pass.Fset.File(f.Package).Name(), f)
+
+ for _, group := range f.Comments {
+ // A +build comment is ignored after or adjoining the package declaration.
+ if group.End()+1 >= f.Package {
+ check.inHeader = false
+ }
+ // A //go:build comment is ignored after the package declaration
+ // (but adjoining it is OK, in contrast to +build comments).
+ if group.Pos() >= f.Package {
+ check.inHeader = false
+ }
+
+ // Check each line of a //-comment.
+ for _, c := range group.List {
+ check.comment(c.Slash, c.Text)
+ }
+ }
+}
+
+func checkOtherFile(pass *analysis.Pass, filename string) error {
+ // We cannot use the Go parser, since is not a Go source file.
+ // Read the raw bytes instead.
+ content, tf, err := analysisutil.ReadFile(pass.Fset, filename)
+ if err != nil {
+ return err
+ }
+
+ check := newChecker(pass, filename, nil)
+ check.nonGoFile(token.Pos(tf.Base()), string(content))
+ return nil
+}
+
+type checker struct {
+ pass *analysis.Pass
+ filename string
+ file *ast.File // nil for non-Go file
+ inHeader bool // in file header (before package declaration)
+ inStar bool // currently in a /* */ comment
+}
+
+func newChecker(pass *analysis.Pass, filename string, file *ast.File) *checker {
+ return &checker{
+ pass: pass,
+ filename: filename,
+ file: file,
+ inHeader: true,
+ }
+}
+
+func (check *checker) nonGoFile(pos token.Pos, fullText string) {
+ // Process each line.
+ text := fullText
+ inStar := false
+ for text != "" {
+ offset := len(fullText) - len(text)
+ var line string
+ line, text, _ = stringsCut(text, "\n")
+
+ if !inStar && strings.HasPrefix(line, "//") {
+ check.comment(pos+token.Pos(offset), line)
+ continue
+ }
+
+ // Skip over, cut out any /* */ comments,
+ // to avoid being confused by a commented-out // comment.
+ for {
+ line = strings.TrimSpace(line)
+ if inStar {
+ var ok bool
+ _, line, ok = stringsCut(line, "*/")
+ if !ok {
+ break
+ }
+ inStar = false
+ continue
+ }
+ line, inStar = stringsCutPrefix(line, "/*")
+ if !inStar {
+ break
+ }
+ }
+ if line != "" {
+ // Found non-comment non-blank line.
+ // Ends space for valid //go:build comments,
+ // but also ends the fraction of the file we can
+ // reliably parse. From this point on we might
+ // incorrectly flag "comments" inside multiline
+ // string constants or anything else (this might
+ // not even be a Go program). So stop.
+ break
+ }
+ }
+}
+
+func (check *checker) comment(pos token.Pos, line string) {
+ if !strings.HasPrefix(line, "//go:") {
+ return
+ }
+ // testing hack: stop at // ERROR
+ if i := strings.Index(line, " // ERROR "); i >= 0 {
+ line = line[:i]
+ }
+
+ verb := line
+ if i := strings.IndexFunc(verb, unicode.IsSpace); i >= 0 {
+ verb = verb[:i]
+ if line[i] != ' ' && line[i] != '\t' && line[i] != '\n' {
+ r, _ := utf8.DecodeRuneInString(line[i:])
+ check.pass.Reportf(pos, "invalid space %#q in %s directive", r, verb)
+ }
+ }
+
+ switch verb {
+ default:
+ // TODO: Use the go language version for the file.
+ // If that version is not newer than us, then we can
+ // report unknown directives.
+
+ case "//go:build":
+ // Ignore. The buildtag analyzer reports misplaced comments.
+
+ case "//go:debug":
+ if check.file == nil {
+ check.pass.Reportf(pos, "//go:debug directive only valid in Go source files")
+ } else if check.file.Name.Name != "main" && !strings.HasSuffix(check.filename, "_test.go") {
+ check.pass.Reportf(pos, "//go:debug directive only valid in package main or test")
+ } else if !check.inHeader {
+ check.pass.Reportf(pos, "//go:debug directive only valid before package declaration")
+ }
+ }
+}
+
+// Go 1.18 strings.Cut.
+func stringsCut(s, sep string) (before, after string, found bool) {
+ if i := strings.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, "", false
+}
+
+// Go 1.20 strings.CutPrefix.
+func stringsCutPrefix(s, prefix string) (after string, found bool) {
+ if !strings.HasPrefix(s, prefix) {
+ return s, false
+ }
+ return s[len(prefix):], true
+}
diff --git a/go/analysis/passes/directive/directive_test.go b/go/analysis/passes/directive/directive_test.go
new file mode 100644
index 000000000..a526c0d74
--- /dev/null
+++ b/go/analysis/passes/directive/directive_test.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package directive_test
+
+import (
+ "runtime"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/go/analysis/passes/directive"
+)
+
+func Test(t *testing.T) {
+ if strings.HasPrefix(runtime.Version(), "go1.") && runtime.Version() < "go1.16" {
+ t.Skipf("skipping on %v", runtime.Version())
+ }
+ analyzer := *directive.Analyzer
+ analyzer.Run = func(pass *analysis.Pass) (interface{}, error) {
+ defer func() {
+ // The directive pass is unusual in that it checks the IgnoredFiles.
+ // After analysis, add IgnoredFiles to OtherFiles so that
+ // the test harness checks for expected diagnostics in those.
+ // (The test harness shouldn't do this by default because most
+ // passes can't do anything with the IgnoredFiles without type
+ // information, which is unavailable because they are ignored.)
+ var files []string
+ files = append(files, pass.OtherFiles...)
+ files = append(files, pass.IgnoredFiles...)
+ pass.OtherFiles = files
+ }()
+
+ return directive.Analyzer.Run(pass)
+ }
+ analysistest.Run(t, analysistest.TestData(), &analyzer, "a")
+}
diff --git a/go/analysis/passes/directive/testdata/src/a/badspace.go b/go/analysis/passes/directive/testdata/src/a/badspace.go
new file mode 100644
index 000000000..113139960
--- /dev/null
+++ b/go/analysis/passes/directive/testdata/src/a/badspace.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+// want +1 `invalid space '\\u00a0' in //go:debug directive`
+//go:debug 00a0
+
+package main
+
diff --git a/go/analysis/passes/directive/testdata/src/a/misplaced.go b/go/analysis/passes/directive/testdata/src/a/misplaced.go
new file mode 100644
index 000000000..db30ceb47
--- /dev/null
+++ b/go/analysis/passes/directive/testdata/src/a/misplaced.go
@@ -0,0 +1,10 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+// want +1 `//go:debug directive only valid before package declaration`
+//go:debug panicnil=1
diff --git a/go/analysis/passes/directive/testdata/src/a/misplaced.s b/go/analysis/passes/directive/testdata/src/a/misplaced.s
new file mode 100644
index 000000000..9e26dbc52
--- /dev/null
+++ b/go/analysis/passes/directive/testdata/src/a/misplaced.s
@@ -0,0 +1,19 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// want +1 `//go:debug directive only valid in Go source files`
+//go:debug panicnil=1
+
+/*
+can skip over comments
+//go:debug doesn't matter here
+*/
+
+// want +1 `//go:debug directive only valid in Go source files`
+//go:debug panicnil=1
+
+package a
+
+// no error here because we can't parse this far
+//go:debug panicnil=1
diff --git a/go/analysis/passes/directive/testdata/src/a/misplaced_test.go b/go/analysis/passes/directive/testdata/src/a/misplaced_test.go
new file mode 100644
index 000000000..6b4527a35
--- /dev/null
+++ b/go/analysis/passes/directive/testdata/src/a/misplaced_test.go
@@ -0,0 +1,10 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:debug panicnil=1
+
+package p_test
+
+// want +1 `//go:debug directive only valid before package declaration`
+//go:debug panicnil=1
diff --git a/go/analysis/passes/directive/testdata/src/a/p.go b/go/analysis/passes/directive/testdata/src/a/p.go
new file mode 100644
index 000000000..e1e3e6552
--- /dev/null
+++ b/go/analysis/passes/directive/testdata/src/a/p.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// want +1 `//go:debug directive only valid in package main or test`
+//go:debug panicnil=1
+
+package p
+
+// want +1 `//go:debug directive only valid in package main or test`
+//go:debug panicnil=1
diff --git a/go/analysis/passes/errorsas/errorsas.go b/go/analysis/passes/errorsas/errorsas.go
index 384f02557..96adad3ee 100644
--- a/go/analysis/passes/errorsas/errorsas.go
+++ b/go/analysis/passes/errorsas/errorsas.go
@@ -7,6 +7,7 @@
package errorsas
import (
+ "errors"
"go/ast"
"go/types"
@@ -50,26 +51,39 @@ func run(pass *analysis.Pass) (interface{}, error) {
if len(call.Args) < 2 {
return // not enough arguments, e.g. called with return values of another function
}
- if fn.FullName() == "errors.As" && !pointerToInterfaceOrError(pass, call.Args[1]) {
- pass.ReportRangef(call, "second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type")
+ if fn.FullName() != "errors.As" {
+ return
+ }
+ if err := checkAsTarget(pass, call.Args[1]); err != nil {
+ pass.ReportRangef(call, "%v", err)
}
})
return nil, nil
}
-var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
+var errorType = types.Universe.Lookup("error").Type()
// pointerToInterfaceOrError reports whether the type of e is a pointer to an interface or a type implementing error,
// or is the empty interface.
-func pointerToInterfaceOrError(pass *analysis.Pass, e ast.Expr) bool {
+
+// checkAsTarget reports an error if the second argument to errors.As is invalid.
+func checkAsTarget(pass *analysis.Pass, e ast.Expr) error {
t := pass.TypesInfo.Types[e].Type
if it, ok := t.Underlying().(*types.Interface); ok && it.NumMethods() == 0 {
- return true
+ // A target of interface{} is always allowed, since it often indicates
+ // a value forwarded from another source.
+ return nil
}
pt, ok := t.Underlying().(*types.Pointer)
if !ok {
- return false
+ return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type")
+ }
+ if pt.Elem() == errorType {
+ return errors.New("second argument to errors.As should not be *error")
}
_, ok = pt.Elem().Underlying().(*types.Interface)
- return ok || types.Implements(pt.Elem(), errorType)
+ if ok || types.Implements(pt.Elem(), errorType.Underlying().(*types.Interface)) {
+ return nil
+ }
+ return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type")
}
diff --git a/go/analysis/passes/errorsas/testdata/src/a/a.go b/go/analysis/passes/errorsas/testdata/src/a/a.go
index c987a8a65..7a9ae8976 100644
--- a/go/analysis/passes/errorsas/testdata/src/a/a.go
+++ b/go/analysis/passes/errorsas/testdata/src/a/a.go
@@ -28,10 +28,10 @@ func _() {
f iface
ei interface{}
)
- errors.As(nil, &e) // *error
+ errors.As(nil, &e) // want `second argument to errors.As should not be \*error`
errors.As(nil, &m) // *T where T implemements error
errors.As(nil, &f) // *interface
- errors.As(nil, perr()) // *error, via a call
+ errors.As(nil, perr()) // want `second argument to errors.As should not be \*error`
errors.As(nil, ei) // empty interface
errors.As(nil, nil) // want `second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type`
diff --git a/go/analysis/passes/errorsas/testdata/src/typeparams/typeparams.go b/go/analysis/passes/errorsas/testdata/src/typeparams/typeparams.go
index 5b9ec457c..4f7ae8491 100644
--- a/go/analysis/passes/errorsas/testdata/src/typeparams/typeparams.go
+++ b/go/analysis/passes/errorsas/testdata/src/typeparams/typeparams.go
@@ -28,7 +28,7 @@ func _[E error](e E) {
errors.As(nil, &e)
errors.As(nil, &m) // *T where T implemements error
errors.As(nil, &tw.t) // *T where T implements error
- errors.As(nil, perr[error]()) // *error, via a call
+ errors.As(nil, perr[error]()) // want `second argument to errors.As should not be \*error`
errors.As(nil, e) // want `second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type`
errors.As(nil, m) // want `second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type`
diff --git a/go/analysis/passes/fieldalignment/fieldalignment.go b/go/analysis/passes/fieldalignment/fieldalignment.go
index 78afe94ab..aff663046 100644
--- a/go/analysis/passes/fieldalignment/fieldalignment.go
+++ b/go/analysis/passes/fieldalignment/fieldalignment.go
@@ -23,7 +23,7 @@ import (
const Doc = `find structs that would use less memory if their fields were sorted
This analyzer find structs that can be rearranged to use less memory, and provides
-a suggested edit with the optimal order.
+a suggested edit with the most compact order.
Note that there are two different diagnostics reported. One checks struct size,
and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the
@@ -41,6 +41,11 @@ has 24 pointer bytes because it has to scan further through the *uint32.
struct { string; uint32 }
has 8 because it can stop immediately after the string pointer.
+
+Be aware that the most compact order is not always the most efficient.
+In rare cases it may cause two variables each updated by its own goroutine
+to occupy the same CPU cache line, inducing a form of memory contention
+known as "false sharing" that slows down both goroutines.
`
var Analyzer = &analysis.Analyzer{
diff --git a/go/analysis/passes/httpresponse/httpresponse.go b/go/analysis/passes/httpresponse/httpresponse.go
index fd9e2af2b..3b9168c6c 100644
--- a/go/analysis/passes/httpresponse/httpresponse.go
+++ b/go/analysis/passes/httpresponse/httpresponse.go
@@ -62,15 +62,23 @@ func run(pass *analysis.Pass) (interface{}, error) {
// Find the innermost containing block, and get the list
// of statements starting with the one containing call.
- stmts := restOfBlock(stack)
+ stmts, ncalls := restOfBlock(stack)
if len(stmts) < 2 {
- return true // the call to the http function is the last statement of the block.
+ // The call to the http function is the last statement of the block.
+ return true
+ }
+
+ // Skip cases in which the call is wrapped by another (#52661).
+ // Example: resp, err := checkError(http.Get(url))
+ if ncalls > 1 {
+ return true
}
asg, ok := stmts[0].(*ast.AssignStmt)
if !ok {
return true // the first statement is not assignment.
}
+
resp := rootIdent(asg.Lhs[0])
if resp == nil {
return true // could not find the http.Response in the assignment.
@@ -130,20 +138,25 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool {
}
// restOfBlock, given a traversal stack, finds the innermost containing
-// block and returns the suffix of its statements starting with the
-// current node (the last element of stack).
-func restOfBlock(stack []ast.Node) []ast.Stmt {
+// block and returns the suffix of its statements starting with the current
+// node, along with the number of call expressions encountered.
+func restOfBlock(stack []ast.Node) ([]ast.Stmt, int) {
+ var ncalls int
for i := len(stack) - 1; i >= 0; i-- {
if b, ok := stack[i].(*ast.BlockStmt); ok {
for j, v := range b.List {
if v == stack[i+1] {
- return b.List[j:]
+ return b.List[j:], ncalls
}
}
break
}
+
+ if _, ok := stack[i].(*ast.CallExpr); ok {
+ ncalls++
+ }
}
- return nil
+ return nil, 0
}
// rootIdent finds the root identifier x in a chain of selections x.y.z, or nil if not found.
diff --git a/go/analysis/passes/httpresponse/httpresponse_test.go b/go/analysis/passes/httpresponse/httpresponse_test.go
index 14e166789..34dc78ce2 100644
--- a/go/analysis/passes/httpresponse/httpresponse_test.go
+++ b/go/analysis/passes/httpresponse/httpresponse_test.go
@@ -5,10 +5,11 @@
package httpresponse_test
import (
+ "testing"
+
"golang.org/x/tools/go/analysis/analysistest"
"golang.org/x/tools/go/analysis/passes/httpresponse"
"golang.org/x/tools/internal/typeparams"
- "testing"
)
func Test(t *testing.T) {
diff --git a/go/analysis/passes/httpresponse/testdata/src/a/a.go b/go/analysis/passes/httpresponse/testdata/src/a/a.go
index df7703f41..de4121270 100644
--- a/go/analysis/passes/httpresponse/testdata/src/a/a.go
+++ b/go/analysis/passes/httpresponse/testdata/src/a/a.go
@@ -83,3 +83,30 @@ func badClientDo() {
log.Fatal(err)
}
}
+
+func goodUnwrapResp() {
+ unwrapResp := func(resp *http.Response, err error) *http.Response {
+ if err != nil {
+ panic(err)
+ }
+ return resp
+ }
+ resp := unwrapResp(http.Get("https://golang.org"))
+ // It is ok to call defer here immediately as err has
+ // been checked in unwrapResp (see #52661).
+ defer resp.Body.Close()
+}
+
+func badUnwrapResp() {
+ unwrapResp := func(resp *http.Response, err error) string {
+ if err != nil {
+ panic(err)
+ }
+ return "https://golang.org/" + resp.Status
+ }
+ resp, err := http.Get(unwrapResp(http.Get("https://golang.org")))
+ defer resp.Body.Close() // want "using resp before checking for errors"
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/go/analysis/passes/ifaceassert/parameterized.go b/go/analysis/passes/ifaceassert/parameterized.go
index 1285ecf13..b35f62dc7 100644
--- a/go/analysis/passes/ifaceassert/parameterized.go
+++ b/go/analysis/passes/ifaceassert/parameterized.go
@@ -1,6 +1,7 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+
package ifaceassert
import (
diff --git a/go/analysis/passes/inspect/inspect.go b/go/analysis/passes/inspect/inspect.go
index 4bb652a72..165c70cbd 100644
--- a/go/analysis/passes/inspect/inspect.go
+++ b/go/analysis/passes/inspect/inspect.go
@@ -19,14 +19,13 @@
// Requires: []*analysis.Analyzer{inspect.Analyzer},
// }
//
-// func run(pass *analysis.Pass) (interface{}, error) {
-// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
-// inspect.Preorder(nil, func(n ast.Node) {
-// ...
-// })
-// return nil
-// }
-//
+// func run(pass *analysis.Pass) (interface{}, error) {
+// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+// inspect.Preorder(nil, func(n ast.Node) {
+// ...
+// })
+// return nil, nil
+// }
package inspect
import (
diff --git a/go/analysis/passes/loopclosure/loopclosure.go b/go/analysis/passes/loopclosure/loopclosure.go
index 3ea91574d..ae5b4151d 100644
--- a/go/analysis/passes/loopclosure/loopclosure.go
+++ b/go/analysis/passes/loopclosure/loopclosure.go
@@ -18,19 +18,60 @@ import (
const Doc = `check references to loop variables from within nested functions
-This analyzer checks for references to loop variables from within a
-function literal inside the loop body. It checks only instances where
-the function literal is called in a defer or go statement that is the
-last statement in the loop body, as otherwise we would need whole
-program analysis.
+This analyzer reports places where a function literal references the
+iteration variable of an enclosing loop, and the loop calls the function
+in such a way (e.g. with go or defer) that it may outlive the loop
+iteration and possibly observe the wrong value of the variable.
-For example:
+In this example, all the deferred functions run after the loop has
+completed, so all observe the final value of v.
- for i, v := range s {
- go func() {
- println(i, v) // not what you might expect
- }()
- }
+ for _, v := range list {
+ defer func() {
+ use(v) // incorrect
+ }()
+ }
+
+One fix is to create a new variable for each iteration of the loop:
+
+ for _, v := range list {
+ v := v // new var per iteration
+ defer func() {
+ use(v) // ok
+ }()
+ }
+
+The next example uses a go statement and has a similar problem.
+In addition, it has a data race because the loop updates v
+concurrent with the goroutines accessing it.
+
+ for _, v := range elem {
+ go func() {
+ use(v) // incorrect, and a data race
+ }()
+ }
+
+A fix is the same as before. The checker also reports problems
+in goroutines started by golang.org/x/sync/errgroup.Group.
+A hard-to-spot variant of this form is common in parallel tests:
+
+ func Test(t *testing.T) {
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ use(test) // incorrect, and a data race
+ })
+ }
+ }
+
+The t.Parallel() call causes the rest of the function to execute
+concurrent with the loop.
+
+The analyzer reports references only in the last statement,
+as it is not deep enough to understand the effects of subsequent
+statements that might render the reference benign.
+("Last statement" is defined recursively in compound
+statements such as if, switch, and select.)
See: https://golang.org/doc/go_faq.html#closures_and_goroutines`
@@ -50,10 +91,12 @@ func run(pass *analysis.Pass) (interface{}, error) {
}
inspect.Preorder(nodeFilter, func(n ast.Node) {
// Find the variables updated by the loop statement.
- var vars []*ast.Ident
+ var vars []types.Object
addVar := func(expr ast.Expr) {
- if id, ok := expr.(*ast.Ident); ok {
- vars = append(vars, id)
+ if id, _ := expr.(*ast.Ident); id != nil {
+ if obj := pass.TypesInfo.ObjectOf(id); obj != nil {
+ vars = append(vars, obj)
+ }
}
}
var body *ast.BlockStmt
@@ -79,87 +122,312 @@ func run(pass *analysis.Pass) (interface{}, error) {
return
}
- // Inspect a go or defer statement
- // if it's the last one in the loop body.
- // (We give up if there are following statements,
- // because it's hard to prove go isn't followed by wait,
- // or defer by return.)
- if len(body.List) == 0 {
- return
- }
- // The function invoked in the last return statement.
- var fun ast.Expr
- switch s := body.List[len(body.List)-1].(type) {
- case *ast.GoStmt:
- fun = s.Call.Fun
- case *ast.DeferStmt:
- fun = s.Call.Fun
- case *ast.ExprStmt: // check for errgroup.Group.Go()
- if call, ok := s.X.(*ast.CallExpr); ok {
- fun = goInvokes(pass.TypesInfo, call)
- }
- }
- lit, ok := fun.(*ast.FuncLit)
- if !ok {
- return
- }
- ast.Inspect(lit.Body, func(n ast.Node) bool {
- id, ok := n.(*ast.Ident)
- if !ok || id.Obj == nil {
- return true
+ // Inspect statements to find function literals that may be run outside of
+ // the current loop iteration.
+ //
+ // For go, defer, and errgroup.Group.Go, we ignore all but the last
+ // statement, because it's hard to prove go isn't followed by wait, or
+ // defer by return. "Last" is defined recursively.
+ //
+ // TODO: consider allowing the "last" go/defer/Go statement to be followed by
+ // N "trivial" statements, possibly under a recursive definition of "trivial"
+ // so that that checker could, for example, conclude that a go statement is
+ // followed by an if statement made of only trivial statements and trivial expressions,
+ // and hence the go statement could still be checked.
+ forEachLastStmt(body.List, func(last ast.Stmt) {
+ var stmts []ast.Stmt
+ switch s := last.(type) {
+ case *ast.GoStmt:
+ stmts = litStmts(s.Call.Fun)
+ case *ast.DeferStmt:
+ stmts = litStmts(s.Call.Fun)
+ case *ast.ExprStmt: // check for errgroup.Group.Go
+ if call, ok := s.X.(*ast.CallExpr); ok {
+ stmts = litStmts(goInvoke(pass.TypesInfo, call))
+ }
}
- if pass.TypesInfo.Types[id].Type == nil {
- // Not referring to a variable (e.g. struct field name)
- return true
+ for _, stmt := range stmts {
+ reportCaptured(pass, vars, stmt)
}
- for _, v := range vars {
- if v.Obj == id.Obj {
- pass.ReportRangef(id, "loop variable %s captured by func literal",
- id.Name)
+ })
+
+ // Also check for testing.T.Run (with T.Parallel).
+ // We consider every t.Run statement in the loop body, because there is
+ // no commonly used mechanism for synchronizing parallel subtests.
+ // It is of course theoretically possible to synchronize parallel subtests,
+ // though such a pattern is likely to be exceedingly rare as it would be
+ // fighting against the test runner.
+ for _, s := range body.List {
+ switch s := s.(type) {
+ case *ast.ExprStmt:
+ if call, ok := s.X.(*ast.CallExpr); ok {
+ for _, stmt := range parallelSubtest(pass.TypesInfo, call) {
+ reportCaptured(pass, vars, stmt)
+ }
+
}
}
- return true
- })
+ }
})
return nil, nil
}
-// goInvokes returns a function expression that would be called asynchronously
+// reportCaptured reports a diagnostic stating a loop variable
+// has been captured by a func literal if checkStmt has escaping
+// references to vars. vars is expected to be variables updated by a loop statement,
+// and checkStmt is expected to be a statements from the body of a func literal in the loop.
+func reportCaptured(pass *analysis.Pass, vars []types.Object, checkStmt ast.Stmt) {
+ ast.Inspect(checkStmt, func(n ast.Node) bool {
+ id, ok := n.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ obj := pass.TypesInfo.Uses[id]
+ if obj == nil {
+ return true
+ }
+ for _, v := range vars {
+ if v == obj {
+ pass.ReportRangef(id, "loop variable %s captured by func literal", id.Name)
+ }
+ }
+ return true
+ })
+}
+
+// forEachLastStmt calls onLast on each "last" statement in a list of statements.
+// "Last" is defined recursively so, for example, if the last statement is
+// a switch statement, then each switch case is also visited to examine
+// its last statements.
+func forEachLastStmt(stmts []ast.Stmt, onLast func(last ast.Stmt)) {
+ if len(stmts) == 0 {
+ return
+ }
+
+ s := stmts[len(stmts)-1]
+ switch s := s.(type) {
+ case *ast.IfStmt:
+ loop:
+ for {
+ forEachLastStmt(s.Body.List, onLast)
+ switch e := s.Else.(type) {
+ case *ast.BlockStmt:
+ forEachLastStmt(e.List, onLast)
+ break loop
+ case *ast.IfStmt:
+ s = e
+ case nil:
+ break loop
+ }
+ }
+ case *ast.ForStmt:
+ forEachLastStmt(s.Body.List, onLast)
+ case *ast.RangeStmt:
+ forEachLastStmt(s.Body.List, onLast)
+ case *ast.SwitchStmt:
+ for _, c := range s.Body.List {
+ cc := c.(*ast.CaseClause)
+ forEachLastStmt(cc.Body, onLast)
+ }
+ case *ast.TypeSwitchStmt:
+ for _, c := range s.Body.List {
+ cc := c.(*ast.CaseClause)
+ forEachLastStmt(cc.Body, onLast)
+ }
+ case *ast.SelectStmt:
+ for _, c := range s.Body.List {
+ cc := c.(*ast.CommClause)
+ forEachLastStmt(cc.Body, onLast)
+ }
+ default:
+ onLast(s)
+ }
+}
+
+// litStmts returns all statements from the function body of a function
+// literal.
+//
+// If fun is not a function literal, it returns nil.
+func litStmts(fun ast.Expr) []ast.Stmt {
+ lit, _ := fun.(*ast.FuncLit)
+ if lit == nil {
+ return nil
+ }
+ return lit.Body.List
+}
+
+// goInvoke returns a function expression that would be called asynchronously
// (but not awaited) in another goroutine as a consequence of the call.
// For example, given the g.Go call below, it returns the function literal expression.
//
-// import "sync/errgroup"
-// var g errgroup.Group
-// g.Go(func() error { ... })
+// import "sync/errgroup"
+// var g errgroup.Group
+// g.Go(func() error { ... })
//
// Currently only "golang.org/x/sync/errgroup.Group()" is considered.
-func goInvokes(info *types.Info, call *ast.CallExpr) ast.Expr {
- f := typeutil.StaticCallee(info, call)
- // Note: Currently only supports: golang.org/x/sync/errgroup.Go.
- if f == nil || f.Name() != "Go" {
+func goInvoke(info *types.Info, call *ast.CallExpr) ast.Expr {
+ if !isMethodCall(info, call, "golang.org/x/sync/errgroup", "Group", "Go") {
return nil
}
- recv := f.Type().(*types.Signature).Recv()
- if recv == nil {
+ return call.Args[0]
+}
+
+// parallelSubtest returns statements that can be easily proven to execute
+// concurrently via the go test runner, as t.Run has been invoked with a
+// function literal that calls t.Parallel.
+//
+// In practice, users rely on the fact that statements before the call to
+// t.Parallel are synchronous. For example by declaring test := test inside the
+// function literal, but before the call to t.Parallel.
+//
+// Therefore, we only flag references in statements that are obviously
+// dominated by a call to t.Parallel. As a simple heuristic, we only consider
+// statements following the final labeled statement in the function body, to
+// avoid scenarios where a jump would cause either the call to t.Parallel or
+// the problematic reference to be skipped.
+//
+// import "testing"
+//
+// func TestFoo(t *testing.T) {
+// tests := []int{0, 1, 2}
+// for i, test := range tests {
+// t.Run("subtest", func(t *testing.T) {
+// println(i, test) // OK
+// t.Parallel()
+// println(i, test) // Not OK
+// })
+// }
+// }
+func parallelSubtest(info *types.Info, call *ast.CallExpr) []ast.Stmt {
+ if !isMethodCall(info, call, "testing", "T", "Run") {
return nil
}
- rtype, ok := recv.Type().(*types.Pointer)
- if !ok {
+
+ if len(call.Args) != 2 {
+ // Ignore calls such as t.Run(fn()).
return nil
}
- named, ok := rtype.Elem().(*types.Named)
- if !ok {
+
+ lit, _ := call.Args[1].(*ast.FuncLit)
+ if lit == nil {
return nil
}
- if named.Obj().Name() != "Group" {
+
+ // Capture the *testing.T object for the first argument to the function
+ // literal.
+ if len(lit.Type.Params.List[0].Names) == 0 {
+ return nil
+ }
+
+ tObj := info.Defs[lit.Type.Params.List[0].Names[0]]
+ if tObj == nil {
return nil
}
+
+ // Match statements that occur after a call to t.Parallel following the final
+ // labeled statement in the function body.
+ //
+ // We iterate over lit.Body.List to have a simple, fast and "frequent enough"
+ // dominance relationship for t.Parallel(): lit.Body.List[i] dominates
+ // lit.Body.List[j] for i < j unless there is a jump.
+ var stmts []ast.Stmt
+ afterParallel := false
+ for _, stmt := range lit.Body.List {
+ stmt, labeled := unlabel(stmt)
+ if labeled {
+ // Reset: naively we don't know if a jump could have caused the
+ // previously considered statements to be skipped.
+ stmts = nil
+ afterParallel = false
+ }
+
+ if afterParallel {
+ stmts = append(stmts, stmt)
+ continue
+ }
+
+ // Check if stmt is a call to t.Parallel(), for the correct t.
+ exprStmt, ok := stmt.(*ast.ExprStmt)
+ if !ok {
+ continue
+ }
+ expr := exprStmt.X
+ if isMethodCall(info, expr, "testing", "T", "Parallel") {
+ call, _ := expr.(*ast.CallExpr)
+ if call == nil {
+ continue
+ }
+ x, _ := call.Fun.(*ast.SelectorExpr)
+ if x == nil {
+ continue
+ }
+ id, _ := x.X.(*ast.Ident)
+ if id == nil {
+ continue
+ }
+ if info.Uses[id] == tObj {
+ afterParallel = true
+ }
+ }
+ }
+
+ return stmts
+}
+
+// unlabel returns the inner statement for the possibly labeled statement stmt,
+// stripping any (possibly nested) *ast.LabeledStmt wrapper.
+//
+// The second result reports whether stmt was an *ast.LabeledStmt.
+func unlabel(stmt ast.Stmt) (ast.Stmt, bool) {
+ labeled := false
+ for {
+ labelStmt, ok := stmt.(*ast.LabeledStmt)
+ if !ok {
+ return stmt, labeled
+ }
+ labeled = true
+ stmt = labelStmt.Stmt
+ }
+}
+
+// isMethodCall reports whether expr is a method call of
+// <pkgPath>.<typeName>.<method>.
+func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method string) bool {
+ call, ok := expr.(*ast.CallExpr)
+ if !ok {
+ return false
+ }
+
+ // Check that we are calling a method <method>
+ f := typeutil.StaticCallee(info, call)
+ if f == nil || f.Name() != method {
+ return false
+ }
+ recv := f.Type().(*types.Signature).Recv()
+ if recv == nil {
+ return false
+ }
+
+ // Check that the receiver is a <pkgPath>.<typeName> or
+ // *<pkgPath>.<typeName>.
+ rtype := recv.Type()
+ if ptr, ok := recv.Type().(*types.Pointer); ok {
+ rtype = ptr.Elem()
+ }
+ named, ok := rtype.(*types.Named)
+ if !ok {
+ return false
+ }
+ if named.Obj().Name() != typeName {
+ return false
+ }
pkg := f.Pkg()
if pkg == nil {
- return nil
+ return false
}
- if pkg.Path() != "golang.org/x/sync/errgroup" {
- return nil
+ if pkg.Path() != pkgPath {
+ return false
}
- return call.Args[0]
+
+ return true
}
diff --git a/go/analysis/passes/loopclosure/loopclosure_test.go b/go/analysis/passes/loopclosure/loopclosure_test.go
index 1498838d7..55fb2a4a3 100644
--- a/go/analysis/passes/loopclosure/loopclosure_test.go
+++ b/go/analysis/passes/loopclosure/loopclosure_test.go
@@ -5,16 +5,16 @@
package loopclosure_test
import (
- "golang.org/x/tools/internal/typeparams"
"testing"
"golang.org/x/tools/go/analysis/analysistest"
"golang.org/x/tools/go/analysis/passes/loopclosure"
+ "golang.org/x/tools/internal/typeparams"
)
func Test(t *testing.T) {
testdata := analysistest.TestData()
- tests := []string{"a", "golang.org/..."}
+ tests := []string{"a", "golang.org/...", "subtests"}
if typeparams.Enabled {
tests = append(tests, "typeparams")
}
diff --git a/go/analysis/passes/loopclosure/testdata/src/a/a.go b/go/analysis/passes/loopclosure/testdata/src/a/a.go
index 2c8e2e6c4..7a7f05f66 100644
--- a/go/analysis/passes/loopclosure/testdata/src/a/a.go
+++ b/go/analysis/passes/loopclosure/testdata/src/a/a.go
@@ -6,7 +6,13 @@
package testdata
-import "golang.org/x/sync/errgroup"
+import (
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+)
+
+var A int
func _() {
var s []int
@@ -49,6 +55,19 @@ func _() {
println(i, v)
}()
}
+
+ // iteration variable declared outside the loop
+ for A = range s {
+ go func() {
+ println(A) // want "loop variable A captured by func literal"
+ }()
+ }
+ // iteration variable declared in a different file
+ for B = range s {
+ go func() {
+ println(B) // want "loop variable B captured by func literal"
+ }()
+ }
// If the key of the range statement is not an identifier
// the code should not panic (it used to).
var x [2]int
@@ -91,9 +110,73 @@ func _() {
}
}
-// Group is used to test that loopclosure does not match on any type named "Group".
-// The checker only matches on methods "(*...errgroup.Group).Go".
-type Group struct{};
+// Cases that rely on recursively checking for last statements.
+func _() {
+
+ for i := range "outer" {
+ for j := range "inner" {
+ if j < 1 {
+ defer func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ } else if j < 2 {
+ go func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ } else {
+ go func() {
+ print(i)
+ }()
+ println("we don't catch the error above because of this statement")
+ }
+ }
+ }
+
+ for i := 0; i < 10; i++ {
+ for j := 0; j < 10; j++ {
+ if j < 1 {
+ switch j {
+ case 0:
+ defer func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ default:
+ go func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ }
+ } else if j < 2 {
+ var a interface{} = j
+ switch a.(type) {
+ case int:
+ defer func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ default:
+ go func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ }
+ } else {
+ ch := make(chan string)
+ select {
+ case <-ch:
+ defer func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ default:
+ go func() {
+ print(i) // want "loop variable i captured by func literal"
+ }()
+ }
+ }
+ }
+ }
+}
+
+// Group is used to test that loopclosure only matches Group.Go when Group is
+// from the golang.org/x/sync/errgroup package.
+type Group struct{}
func (g *Group) Go(func() error) {}
@@ -108,6 +191,21 @@ func _() {
return nil
})
}
+
+ for i, v := range s {
+ if i > 0 {
+ g.Go(func() error {
+ print(i) // want "loop variable i captured by func literal"
+ return nil
+ })
+ } else {
+ g.Go(func() error {
+ print(v) // want "loop variable v captured by func literal"
+ return nil
+ })
+ }
+ }
+
// Do not match other Group.Go cases
g1 := new(Group)
for i, v := range s {
@@ -118,3 +216,28 @@ func _() {
})
}
}
+
+// Real-world example from #16520, slightly simplified
+func _() {
+ var nodes []interface{}
+
+ critical := new(errgroup.Group)
+ others := sync.WaitGroup{}
+
+ isCritical := func(node interface{}) bool { return false }
+ run := func(node interface{}) error { return nil }
+
+ for _, node := range nodes {
+ if isCritical(node) {
+ critical.Go(func() error {
+ return run(node) // want "loop variable node captured by func literal"
+ })
+ } else {
+ others.Add(1)
+ go func() {
+ _ = run(node) // want "loop variable node captured by func literal"
+ others.Done()
+ }()
+ }
+ }
+}
diff --git a/go/analysis/passes/loopclosure/testdata/src/a/b.go b/go/analysis/passes/loopclosure/testdata/src/a/b.go
new file mode 100644
index 000000000..d4e5da418
--- /dev/null
+++ b/go/analysis/passes/loopclosure/testdata/src/a/b.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testdata
+
+// B is declared in a separate file to test that object resolution spans the
+// entire package.
+var B int
diff --git a/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go b/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go
new file mode 100644
index 000000000..50283ec61
--- /dev/null
+++ b/go/analysis/passes/loopclosure/testdata/src/subtests/subtest.go
@@ -0,0 +1,202 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests that the loopclosure analyzer detects leaked
+// references via parallel subtests.
+
+package subtests
+
+import (
+ "testing"
+)
+
+// T is used to test that loopclosure only matches T.Run when T is from the
+// testing package.
+type T struct{}
+
+// Run should not match testing.T.Run. Note that the second argument is
+// intentionally a *testing.T, not a *T, so that we can check both
+// testing.T.Parallel inside a T.Run, and a T.Parallel inside a testing.T.Run.
+func (t *T) Run(string, func(*testing.T)) {
+}
+
+func (t *T) Parallel() {}
+
+func _(t *testing.T) {
+ for i, test := range []int{1, 2, 3} {
+ // Check that parallel subtests are identified.
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ println(i) // want "loop variable i captured by func literal"
+ println(test) // want "loop variable test captured by func literal"
+ })
+
+ // Check that serial tests are OK.
+ t.Run("", func(t *testing.T) {
+ println(i)
+ println(test)
+ })
+
+ // Check that the location of t.Parallel matters.
+ t.Run("", func(t *testing.T) {
+ println(i)
+ println(test)
+ t.Parallel()
+ println(i) // want "loop variable i captured by func literal"
+ println(test) // want "loop variable test captured by func literal"
+ })
+
+ // Check that *testing.T value matters.
+ t.Run("", func(t *testing.T) {
+ var x testing.T
+ x.Parallel()
+ println(i)
+ println(test)
+ })
+
+ // Check that shadowing the loop variables within the test literal is OK if
+ // it occurs before t.Parallel().
+ t.Run("", func(t *testing.T) {
+ i := i
+ test := test
+ t.Parallel()
+ println(i)
+ println(test)
+ })
+
+ // Check that shadowing the loop variables within the test literal is Not
+ // OK if it occurs after t.Parallel().
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ i := i // want "loop variable i captured by func literal"
+ test := test // want "loop variable test captured by func literal"
+ println(i) // OK
+ println(test) // OK
+ })
+
+ // Check uses in nested blocks.
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ {
+ println(i) // want "loop variable i captured by func literal"
+ println(test) // want "loop variable test captured by func literal"
+ }
+ })
+
+ // Check that we catch uses in nested subtests.
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ t.Run("", func(t *testing.T) {
+ println(i) // want "loop variable i captured by func literal"
+ println(test) // want "loop variable test captured by func literal"
+ })
+ })
+
+ // Check that there is no diagnostic if t is not a *testing.T.
+ t.Run("", func(_ *testing.T) {
+ t := &T{}
+ t.Parallel()
+ println(i)
+ println(test)
+ })
+
+ // Check that there is no diagnostic when a jump to a label may have caused
+ // the call to t.Parallel to have been skipped.
+ t.Run("", func(t *testing.T) {
+ if true {
+ goto Test
+ }
+ t.Parallel()
+ Test:
+ println(i)
+ println(test)
+ })
+
+ // Check that there is no diagnostic when a jump to a label may have caused
+ // the loop variable reference to be skipped, but there is a diagnostic
+ // when both the call to t.Parallel and the loop variable reference occur
+ // after the final label in the block.
+ t.Run("", func(t *testing.T) {
+ if true {
+ goto Test
+ }
+ t.Parallel()
+ println(i) // maybe OK
+ Test:
+ t.Parallel()
+ println(test) // want "loop variable test captured by func literal"
+ })
+
+ // Check that multiple labels are handled.
+ t.Run("", func(t *testing.T) {
+ if true {
+ goto Test1
+ } else {
+ goto Test2
+ }
+ Test1:
+ Test2:
+ t.Parallel()
+ println(test) // want "loop variable test captured by func literal"
+ })
+
+ // Check that we do not have problems when t.Run has a single argument.
+ fn := func() (string, func(t *testing.T)) { return "", nil }
+ t.Run(fn())
+ }
+}
+
+// Check that there is no diagnostic when loop variables are shadowed within
+// the loop body.
+func _(t *testing.T) {
+ for i, test := range []int{1, 2, 3} {
+ i := i
+ test := test
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ println(i)
+ println(test)
+ })
+ }
+}
+
+// Check that t.Run must be *testing.T.Run.
+func _(t *T) {
+ for i, test := range []int{1, 2, 3} {
+ t.Run("", func(t *testing.T) {
+ t.Parallel()
+ println(i)
+ println(test)
+ })
+ }
+}
+
+// Check that the top-level must be parallel in order to cause a diagnostic.
+//
+// From https://pkg.go.dev/testing:
+//
+// "Run does not return until parallel subtests have completed, providing a
+// way to clean up after a group of parallel tests"
+func _(t *testing.T) {
+ for _, test := range []int{1, 2, 3} {
+ // In this subtest, a/b must complete before the synchronous subtest "a"
+ // completes, so the reference to test does not escape the current loop
+ // iteration.
+ t.Run("a", func(s *testing.T) {
+ s.Run("b", func(u *testing.T) {
+ u.Parallel()
+ println(test)
+ })
+ })
+
+ // In this subtest, c executes concurrently, so the reference to test may
+ // escape the current loop iteration.
+ t.Run("c", func(s *testing.T) {
+ s.Parallel()
+ s.Run("d", func(u *testing.T) {
+ println(test) // want "loop variable test captured by func literal"
+ })
+ })
+ }
+}
diff --git a/go/analysis/passes/nilness/nilness.go b/go/analysis/passes/nilness/nilness.go
index 8fd8cd000..6849c33cc 100644
--- a/go/analysis/passes/nilness/nilness.go
+++ b/go/analysis/passes/nilness/nilness.go
@@ -15,6 +15,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/buildssa"
"golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
)
const Doc = `check for redundant or impossible nil comparisons
@@ -102,8 +103,11 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) {
for _, instr := range b.Instrs {
switch instr := instr.(type) {
case ssa.CallInstruction:
- notNil(stack, instr, instr.Common().Value,
- instr.Common().Description())
+ // A nil receiver may be okay for type params.
+ cc := instr.Common()
+ if !(cc.IsInvoke() && typeparams.IsTypeParam(cc.Value.Type())) {
+ notNil(stack, instr, cc.Value, cc.Description())
+ }
case *ssa.FieldAddr:
notNil(stack, instr, instr.X, "field selection")
case *ssa.IndexAddr:
@@ -250,7 +254,7 @@ func (n nilness) String() string { return nilnessStrings[n+1] }
// or unknown given the dominating stack of facts.
func nilnessOf(stack []fact, v ssa.Value) nilness {
switch v := v.(type) {
- // unwrap ChangeInterface values recursively, to detect if underlying
+ // unwrap ChangeInterface and Slice values recursively, to detect if underlying
// values have any facts recorded or are otherwise known with regard to nilness.
//
// This work must be in addition to expanding facts about
@@ -264,6 +268,10 @@ func nilnessOf(stack []fact, v ssa.Value) nilness {
if underlying := nilnessOf(stack, v.X); underlying != unknown {
return underlying
}
+ case *ssa.Slice:
+ if underlying := nilnessOf(stack, v.X); underlying != unknown {
+ return underlying
+ }
case *ssa.SliceToArrayPointer:
nn := nilnessOf(stack, v.X)
if slice2ArrayPtrLen(v) > 0 {
@@ -302,9 +310,9 @@ func nilnessOf(stack []fact, v ssa.Value) nilness {
return isnonnil
case *ssa.Const:
if v.IsNil() {
- return isnil
+ return isnil // nil or zero value of a pointer-like type
} else {
- return isnonnil
+ return unknown // non-pointer
}
}
diff --git a/go/analysis/passes/nilness/nilness_test.go b/go/analysis/passes/nilness/nilness_test.go
index b258c1efb..99c4dfbac 100644
--- a/go/analysis/passes/nilness/nilness_test.go
+++ b/go/analysis/passes/nilness/nilness_test.go
@@ -9,9 +9,26 @@ import (
"golang.org/x/tools/go/analysis/analysistest"
"golang.org/x/tools/go/analysis/passes/nilness"
+ "golang.org/x/tools/internal/typeparams"
)
func Test(t *testing.T) {
testdata := analysistest.TestData()
analysistest.Run(t, testdata, nilness.Analyzer, "a")
}
+
+func TestInstantiated(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestInstantiated requires type parameters")
+ }
+ testdata := analysistest.TestData()
+ analysistest.Run(t, testdata, nilness.Analyzer, "c")
+}
+
+func TestTypeSet(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestTypeSet requires type parameters")
+ }
+ testdata := analysistest.TestData()
+ analysistest.Run(t, testdata, nilness.Analyzer, "d")
+}
diff --git a/go/analysis/passes/nilness/testdata/src/a/a.go b/go/analysis/passes/nilness/testdata/src/a/a.go
index f4d8f455e..0629e08d8 100644
--- a/go/analysis/passes/nilness/testdata/src/a/a.go
+++ b/go/analysis/passes/nilness/testdata/src/a/a.go
@@ -130,7 +130,6 @@ func f9(x interface {
b()
c()
}) {
-
x.b() // we don't catch this panic because we don't have any facts yet
xx := interface {
a()
@@ -155,11 +154,27 @@ func f9(x interface {
}
}
+func f10() {
+ s0 := make([]string, 0)
+ if s0 == nil { // want "impossible condition: non-nil == nil"
+ print(0)
+ }
+
+ var s1 []string
+ if s1 == nil { // want "tautological condition: nil == nil"
+ print(0)
+ }
+ s2 := s1[:][:]
+ if s2 == nil { // want "tautological condition: nil == nil"
+ print(0)
+ }
+}
+
func unknown() bool {
return false
}
-func f10(a interface{}) {
+func f11(a interface{}) {
switch a.(type) {
case nil:
return
@@ -170,7 +185,7 @@ func f10(a interface{}) {
}
}
-func f11(a interface{}) {
+func f12(a interface{}) {
switch a {
case nil:
return
@@ -181,3 +196,23 @@ func f11(a interface{}) {
return
}
}
+
+type Y struct {
+ innerY
+}
+
+type innerY struct {
+ value int
+}
+
+func f13() {
+ var d *Y
+ print(d.value) // want "nil dereference in field selection"
+}
+
+func f14() {
+ var x struct{ f string }
+ if x == struct{ f string }{} { // we don't catch this tautology as we restrict to reference types
+ print(x)
+ }
+}
diff --git a/go/analysis/passes/nilness/testdata/src/c/c.go b/go/analysis/passes/nilness/testdata/src/c/c.go
new file mode 100644
index 000000000..c9a05a714
--- /dev/null
+++ b/go/analysis/passes/nilness/testdata/src/c/c.go
@@ -0,0 +1,14 @@
+package c
+
+func instantiated[X any](x *X) int {
+ if x == nil {
+ print(*x) // want "nil dereference in load"
+ }
+ return 1
+}
+
+var g int
+
+func init() {
+ g = instantiated[int](&g)
+}
diff --git a/go/analysis/passes/nilness/testdata/src/d/d.go b/go/analysis/passes/nilness/testdata/src/d/d.go
new file mode 100644
index 000000000..72bd1c872
--- /dev/null
+++ b/go/analysis/passes/nilness/testdata/src/d/d.go
@@ -0,0 +1,55 @@
+package d
+
+type message interface{ PR() }
+
+func noparam() {
+ var messageT message
+ messageT.PR() // want "nil dereference in dynamic method call"
+}
+
+func paramNonnil[T message]() {
+ var messageT T
+ messageT.PR() // cannot conclude messageT is nil.
+}
+
+func instance() {
+ // buildssa.BuilderMode does not include InstantiateGenerics.
+ paramNonnil[message]() // no warning is expected as param[message] id not built.
+}
+
+func param[T interface {
+ message
+ ~*int | ~chan int
+}]() {
+ var messageT T // messageT is nil.
+ messageT.PR() // nil receiver may be okay. See param[nilMsg].
+}
+
+type nilMsg chan int
+
+func (m nilMsg) PR() {
+ if m == nil {
+ print("not an error")
+ }
+}
+
+var G func() = param[nilMsg] // no warning
+
+func allNillable[T ~*int | ~chan int]() {
+ var x, y T // both are nillable and are nil.
+ if x != y { // want "impossible condition: nil != nil"
+ print("unreachable")
+ }
+}
+
+func notAll[T ~*int | ~chan int | ~int]() {
+ var x, y T // neither are nillable due to ~int
+ if x != y { // no warning
+ print("unreachable")
+ }
+}
+
+func noninvoke[T ~func()]() {
+ var x T
+ x() // want "nil dereference in dynamic function call"
+}
diff --git a/go/analysis/passes/pkgfact/pkgfact.go b/go/analysis/passes/pkgfact/pkgfact.go
index 2262fc4f1..f4f5616e5 100644
--- a/go/analysis/passes/pkgfact/pkgfact.go
+++ b/go/analysis/passes/pkgfact/pkgfact.go
@@ -10,14 +10,14 @@
// Each key/value pair comes from a top-level constant declaration
// whose name starts and ends with "_". For example:
//
-// package p
+// package p
//
-// const _greeting_ = "hello"
-// const _audience_ = "world"
+// const _greeting_ = "hello"
+// const _audience_ = "world"
//
// the pkgfact analysis output for package p would be:
//
-// {"greeting": "hello", "audience": "world"}.
+// {"greeting": "hello", "audience": "world"}.
//
// In addition, the analysis reports a diagnostic at each import
// showing which key/value pairs it contributes.
diff --git a/go/analysis/passes/printf/printf.go b/go/analysis/passes/printf/printf.go
index dee37d78a..daaf709a4 100644
--- a/go/analysis/passes/printf/printf.go
+++ b/go/analysis/passes/printf/printf.go
@@ -342,7 +342,6 @@ func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, k
// not do so with gccgo, and nor do some other build systems.
// TODO(adonovan): eliminate the redundant facts once this restriction
// is lifted.
-//
var isPrint = stringSet{
"fmt.Errorf": true,
"fmt.Fprint": true,
@@ -584,7 +583,6 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
argNum := firstArg
maxArgNum := firstArg
anyIndex := false
- anyW := false
for i, w := 0, 0; i < len(format); i += w {
w = 1
if format[i] != '%' {
@@ -607,11 +605,6 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
pass.Reportf(call.Pos(), "%s does not support error-wrapping directive %%w", state.name)
return
}
- if anyW {
- pass.Reportf(call.Pos(), "%s call has more than one error-wrapping directive %%w", state.name)
- return
- }
- anyW = true
}
if len(state.argNums) > 0 {
// Continue with the next sequential argument.
@@ -673,12 +666,13 @@ func (s *formatState) parseIndex() bool {
s.scanNum()
ok := true
if s.nbytes == len(s.format) || s.nbytes == start || s.format[s.nbytes] != ']' {
- ok = false
- s.nbytes = strings.Index(s.format, "]")
+ ok = false // syntax error is either missing "]" or invalid index.
+ s.nbytes = strings.Index(s.format[start:], "]")
if s.nbytes < 0 {
s.pass.ReportRangef(s.call, "%s format %s is missing closing ]", s.name, s.format)
return false
}
+ s.nbytes = s.nbytes + start
}
arg32, err := strconv.ParseInt(s.format[start:s.nbytes], 10, 32)
if err != nil || !ok || arg32 <= 0 || arg32 > int64(len(s.call.Args)-s.firstArg) {
@@ -931,9 +925,9 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, state *formatState) (o
// recursiveStringer reports whether the argument e is a potential
// recursive call to stringer or is an error, such as t and &t in these examples:
//
-// func (t *T) String() string { printf("%s", t) }
-// func (t T) Error() string { printf("%s", t) }
-// func (t T) String() string { printf("%s", &t) }
+// func (t *T) String() string { printf("%s", t) }
+// func (t T) Error() string { printf("%s", t) }
+// func (t T) String() string { printf("%s", &t) }
func recursiveStringer(pass *analysis.Pass, e ast.Expr) (string, bool) {
typ := pass.TypesInfo.Types[e].Type
@@ -951,11 +945,16 @@ func recursiveStringer(pass *analysis.Pass, e ast.Expr) (string, bool) {
return "", false
}
+ // inScope returns true if e is in the scope of f.
+ inScope := func(e ast.Expr, f *types.Func) bool {
+ return f.Scope() != nil && f.Scope().Contains(e.Pos())
+ }
+
// Is the expression e within the body of that String or Error method?
var method *types.Func
- if strOk && strMethod.Pkg() == pass.Pkg && strMethod.Scope().Contains(e.Pos()) {
+ if strOk && strMethod.Pkg() == pass.Pkg && inScope(e, strMethod) {
method = strMethod
- } else if errOk && errMethod.Pkg() == pass.Pkg && errMethod.Scope().Contains(e.Pos()) {
+ } else if errOk && errMethod.Pkg() == pass.Pkg && inScope(e, errMethod) {
method = errMethod
} else {
return "", false
diff --git a/go/analysis/passes/printf/testdata/src/a/a.go b/go/analysis/passes/printf/testdata/src/a/a.go
index 5eca3172d..0c4d11bf0 100644
--- a/go/analysis/passes/printf/testdata/src/a/a.go
+++ b/go/analysis/passes/printf/testdata/src/a/a.go
@@ -217,6 +217,7 @@ func PrintfTests() {
Printf("%[2]*.[1]*[3]d x", 2, "hi", 4) // want `a.Printf format %\[2]\*\.\[1\]\*\[3\]d uses non-int \x22hi\x22 as argument of \*`
Printf("%[0]s x", "arg1") // want `a.Printf format has invalid argument index \[0\]`
Printf("%[0]d x", 1) // want `a.Printf format has invalid argument index \[0\]`
+ Printf("%[3]*.[2*[1]f", 1, 2, 3) // want `a.Printf format has invalid argument index \[2\*\[1\]`
// Something that satisfies the error interface.
var e error
fmt.Println(e.Error()) // ok
@@ -341,7 +342,7 @@ func PrintfTests() {
_ = fmt.Errorf("%[2]w %[1]s", "x", err) // OK
_ = fmt.Errorf("%[2]w %[1]s", e, "x") // want `fmt.Errorf format %\[2\]w has arg "x" of wrong type string`
_ = fmt.Errorf("%w", "x") // want `fmt.Errorf format %w has arg "x" of wrong type string`
- _ = fmt.Errorf("%w %w", err, err) // want `fmt.Errorf call has more than one error-wrapping directive %w`
+ _ = fmt.Errorf("%w %w", err, err) // OK
_ = fmt.Errorf("%w", interface{}(nil)) // want `fmt.Errorf format %w has arg interface{}\(nil\) of wrong type interface{}`
_ = fmt.Errorf("%w", errorTestOK(0)) // concrete value implements error
_ = fmt.Errorf("%w", errSubset) // interface value implements error
diff --git a/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go b/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go
index 76a9a205a..c4d7e530d 100644
--- a/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go
+++ b/go/analysis/passes/printf/testdata/src/typeparams/diagnostics.go
@@ -121,3 +121,25 @@ func TestTermReduction[T1 interface{ ~int | string }, T2 interface {
fmt.Printf("%d", t2)
fmt.Printf("%s", t2) // want "wrong type.*contains typeparams.myInt"
}
+
+type U[T any] struct{}
+
+func (u U[T]) String() string {
+ fmt.Println(u) // want `fmt.Println arg u causes recursive call to \(typeparams.U\[T\]\).String method`
+ return ""
+}
+
+type S[T comparable] struct {
+ t T
+}
+
+func (s S[T]) String() T {
+ fmt.Println(s) // Not flagged. We currently do not consider String() T to implement fmt.Stringer (see #55928).
+ return s.t
+}
+
+func TestInstanceStringer() {
+ // Tests String method with nil Scope (#55350)
+ fmt.Println(&S[string]{})
+ fmt.Println(&U[string]{})
+}
diff --git a/go/analysis/passes/printf/types.go b/go/analysis/passes/printf/types.go
index 270e917c8..7cbb0bdbf 100644
--- a/go/analysis/passes/printf/types.go
+++ b/go/analysis/passes/printf/types.go
@@ -299,13 +299,3 @@ func isConvertibleToString(typ types.Type) bool {
return false
}
-
-// hasBasicType reports whether x's type is a types.Basic with the given kind.
-func hasBasicType(pass *analysis.Pass, x ast.Expr, kind types.BasicKind) bool {
- t := pass.TypesInfo.Types[x].Type
- if t != nil {
- t = t.Underlying()
- }
- b, ok := t.(*types.Basic)
- return ok && b.Kind() == kind
-}
diff --git a/go/analysis/passes/shadow/shadow.go b/go/analysis/passes/shadow/shadow.go
index b160dcf5b..a19cecd14 100644
--- a/go/analysis/passes/shadow/shadow.go
+++ b/go/analysis/passes/shadow/shadow.go
@@ -120,7 +120,6 @@ func run(pass *analysis.Pass) (interface{}, error) {
// the block, we should complain about it but don't.
// - A variable declared inside a function literal can falsely be identified
// as shadowing a variable in the outer function.
-//
type span struct {
min token.Pos
max token.Pos
diff --git a/go/analysis/passes/sigchanyzer/sigchanyzer.go b/go/analysis/passes/sigchanyzer/sigchanyzer.go
index 0d6c8ebf1..c490a84ea 100644
--- a/go/analysis/passes/sigchanyzer/sigchanyzer.go
+++ b/go/analysis/passes/sigchanyzer/sigchanyzer.go
@@ -50,7 +50,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
}
case *ast.CallExpr:
// Only signal.Notify(make(chan os.Signal), os.Interrupt) is safe,
- // conservatively treate others as not safe, see golang/go#45043
+ // conservatively treat others as not safe, see golang/go#45043
if isBuiltinMake(pass.TypesInfo, arg) {
return
}
diff --git a/go/analysis/passes/sortslice/analyzer.go b/go/analysis/passes/sortslice/analyzer.go
index 5eb957a18..f85837d66 100644
--- a/go/analysis/passes/sortslice/analyzer.go
+++ b/go/analysis/passes/sortslice/analyzer.go
@@ -52,11 +52,20 @@ func run(pass *analysis.Pass) (interface{}, error) {
arg := call.Args[0]
typ := pass.TypesInfo.Types[arg].Type
+
+ if tuple, ok := typ.(*types.Tuple); ok {
+ typ = tuple.At(0).Type() // special case for Slice(f(...))
+ }
+
switch typ.Underlying().(type) {
case *types.Slice, *types.Interface:
return
}
+ // Restore typ to the original type, we may unwrap the tuple above,
+ // typ might not be the type of arg.
+ typ = pass.TypesInfo.Types[arg].Type
+
var fixes []analysis.SuggestedFix
switch v := typ.Underlying().(type) {
case *types.Array:
diff --git a/go/analysis/passes/sortslice/testdata/src/a/a.go b/go/analysis/passes/sortslice/testdata/src/a/a.go
index bc6cc16e9..c6aca8df1 100644
--- a/go/analysis/passes/sortslice/testdata/src/a/a.go
+++ b/go/analysis/passes/sortslice/testdata/src/a/a.go
@@ -6,8 +6,8 @@ import "sort"
func IncorrectSort() {
i := 5
sortFn := func(i, j int) bool { return false }
- sort.Slice(i, sortFn) // want "sort.Slice's argument must be a slice; is called with int"
- sort.SliceStable(i, sortFn) // want "sort.SliceStable's argument must be a slice; is called with int"
+ sort.Slice(i, sortFn) // want "sort.Slice's argument must be a slice; is called with int"
+ sort.SliceStable(i, sortFn) // want "sort.SliceStable's argument must be a slice; is called with int"
sort.SliceIsSorted(i, sortFn) // want "sort.SliceIsSorted's argument must be a slice; is called with int"
}
@@ -62,3 +62,23 @@ func UnderlyingSlice() {
sort.SliceStable(s, sortFn)
sort.SliceIsSorted(s, sortFn)
}
+
+// FunctionResultsAsArguments passes a function which returns two values
+// that satisfy sort.Slice signature. It should not produce a diagnostic.
+func FunctionResultsAsArguments() {
+ s := []string{"a", "z", "ooo"}
+ sort.Slice(less(s))
+ sort.Slice(lessPtr(s)) // want `sort.Slice's argument must be a slice; is called with \(\*\[\]string,.*`
+}
+
+func less(s []string) ([]string, func(i, j int) bool) {
+ return s, func(i, j int) bool {
+ return s[i] < s[j]
+ }
+}
+
+func lessPtr(s []string) (*[]string, func(i, j int) bool) {
+ return &s, func(i, j int) bool {
+ return s[i] < s[j]
+ }
+}
diff --git a/go/analysis/passes/stdmethods/stdmethods.go b/go/analysis/passes/stdmethods/stdmethods.go
index cc9497179..41f455d10 100644
--- a/go/analysis/passes/stdmethods/stdmethods.go
+++ b/go/analysis/passes/stdmethods/stdmethods.go
@@ -134,6 +134,19 @@ func canonicalMethod(pass *analysis.Pass, id *ast.Ident) {
}
}
+ // Special case: Unwrap has two possible signatures.
+ // Check for Unwrap() []error here.
+ if id.Name == "Unwrap" {
+ if args.Len() == 0 && results.Len() == 1 {
+ t := typeString(results.At(0).Type())
+ if t == "error" || t == "[]error" {
+ return
+ }
+ }
+ pass.ReportRangef(id, "method Unwrap() should have signature Unwrap() error or Unwrap() []error")
+ return
+ }
+
// Do the =s (if any) all match?
if !matchParams(pass, expect.args, args, "=") || !matchParams(pass, expect.results, results, "=") {
return
diff --git a/go/analysis/passes/stdmethods/testdata/src/a/a.go b/go/analysis/passes/stdmethods/testdata/src/a/a.go
index c95cf5d2b..2b01f4693 100644
--- a/go/analysis/passes/stdmethods/testdata/src/a/a.go
+++ b/go/analysis/passes/stdmethods/testdata/src/a/a.go
@@ -49,7 +49,7 @@ func (E) Error() string { return "" } // E implements error.
func (E) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool`
func (E) Is() {} // want `method Is\(\) should have signature Is\(error\) bool`
-func (E) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error`
+func (E) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error`
type F int
@@ -57,8 +57,18 @@ func (F) Error() string { return "" } // Both F and *F implement error.
func (*F) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool`
func (*F) Is() {} // want `method Is\(\) should have signature Is\(error\) bool`
-func (*F) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error`
+func (*F) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error`
type G int
func (G) As(interface{}) bool // ok
+
+type W int
+
+func (W) Error() string { return "" }
+func (W) Unwrap() error { return nil } // ok
+
+type M int
+
+func (M) Error() string { return "" }
+func (M) Unwrap() []error { return nil } // ok
diff --git a/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go b/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go
index 72df30d49..3d4146e9b 100644
--- a/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go
+++ b/go/analysis/passes/stdmethods/testdata/src/typeparams/typeparams.go
@@ -30,7 +30,7 @@ func (E[_]) Error() string { return "" } // E implements error.
func (E[P]) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool`
func (E[_]) Is() {} // want `method Is\(\) should have signature Is\(error\) bool`
-func (E[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error`
+func (E[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error`
type F[P any] int
@@ -38,4 +38,4 @@ func (F[_]) Error() string { return "" } // Both F and *F implement error.
func (*F[_]) As() {} // want `method As\(\) should have signature As\((any|interface\{\})\) bool`
func (*F[_]) Is() {} // want `method Is\(\) should have signature Is\(error\) bool`
-func (*F[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error`
+func (*F[_]) Unwrap() {} // want `method Unwrap\(\) should have signature Unwrap\(\) error or Unwrap\(\) \[\]error`
diff --git a/go/analysis/passes/tests/testdata/src/a/go118_test.go b/go/analysis/passes/tests/testdata/src/a/go118_test.go
index dc898daca..e2bc3f3a0 100644
--- a/go/analysis/passes/tests/testdata/src/a/go118_test.go
+++ b/go/analysis/passes/tests/testdata/src/a/go118_test.go
@@ -94,3 +94,8 @@ func FuzzObjectMethod(f *testing.F) {
}
f.Fuzz(obj.myVar) // ok
}
+
+// Test for golang/go#56505: checking fuzz arguments should not panic on *error.
+func FuzzIssue56505(f *testing.F) {
+ f.Fuzz(func(e *error) {}) // want "the first parameter of a fuzz target must be \\*testing.T"
+}
diff --git a/go/analysis/passes/tests/tests.go b/go/analysis/passes/tests/tests.go
index ffa5205dd..935aad00c 100644
--- a/go/analysis/passes/tests/tests.go
+++ b/go/analysis/passes/tests/tests.go
@@ -84,7 +84,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
return nil, nil
}
-// Checks the contents of a fuzz function.
+// checkFuzz checks the contents of a fuzz function.
func checkFuzz(pass *analysis.Pass, fn *ast.FuncDecl) {
params := checkFuzzCall(pass, fn)
if params != nil {
@@ -92,15 +92,17 @@ func checkFuzz(pass *analysis.Pass, fn *ast.FuncDecl) {
}
}
-// Check the arguments of f.Fuzz() calls :
-// 1. f.Fuzz() should call a function and it should be of type (*testing.F).Fuzz().
-// 2. The called function in f.Fuzz(func(){}) should not return result.
-// 3. First argument of func() should be of type *testing.T
-// 4. Second argument onwards should be of type []byte, string, bool, byte,
-// rune, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16,
-// uint32, uint64
-// 5. func() must not call any *F methods, e.g. (*F).Log, (*F).Error, (*F).Skip
-// The only *F methods that are allowed in the (*F).Fuzz function are (*F).Failed and (*F).Name.
+// checkFuzzCall checks the arguments of f.Fuzz() calls:
+//
+// 1. f.Fuzz() should call a function and it should be of type (*testing.F).Fuzz().
+// 2. The called function in f.Fuzz(func(){}) should not return result.
+// 3. First argument of func() should be of type *testing.T
+// 4. Second argument onwards should be of type []byte, string, bool, byte,
+// rune, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16,
+// uint32, uint64
+// 5. func() must not call any *F methods, e.g. (*F).Log, (*F).Error, (*F).Skip
+// The only *F methods that are allowed in the (*F).Fuzz function are (*F).Failed and (*F).Name.
+//
// Returns the list of parameters to the fuzz function, if they are valid fuzz parameters.
func checkFuzzCall(pass *analysis.Pass, fn *ast.FuncDecl) (params *types.Tuple) {
ast.Inspect(fn, func(n ast.Node) bool {
@@ -160,7 +162,7 @@ func checkFuzzCall(pass *analysis.Pass, fn *ast.FuncDecl) (params *types.Tuple)
return params
}
-// Check that the arguments of f.Add() calls have the same number and type of arguments as
+// checkAddCalls checks that the arguments of f.Add calls have the same number and type of arguments as
// the signature of the function passed to (*testing.F).Fuzz
func checkAddCalls(pass *analysis.Pass, fn *ast.FuncDecl, params *types.Tuple) {
ast.Inspect(fn, func(n ast.Node) bool {
@@ -267,7 +269,9 @@ func isTestingType(typ types.Type, testingType string) bool {
if !ok {
return false
}
- return named.Obj().Pkg().Path() == "testing" && named.Obj().Name() == testingType
+ obj := named.Obj()
+ // obj.Pkg is nil for the error type.
+ return obj != nil && obj.Pkg() != nil && obj.Pkg().Path() == "testing" && obj.Name() == testingType
}
// Validate that fuzz target function's arguments are of accepted types.
@@ -473,10 +477,12 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) {
if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 {
// Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters.
// We have currently decided to also warn before compilation/package loading. This can help users in IDEs.
+ // TODO(adonovan): use ReportRangef(tparams).
pass.Reportf(fn.Pos(), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix)
}
if !isTestSuffix(fn.Name.Name[len(prefix):]) {
+ // TODO(adonovan): use ReportRangef(fn.Name).
pass.Reportf(fn.Pos(), "%s has malformed name: first letter after '%s' must not be lowercase", fn.Name.Name, prefix)
}
}
diff --git a/go/analysis/passes/timeformat/testdata/src/a/a.go b/go/analysis/passes/timeformat/testdata/src/a/a.go
new file mode 100644
index 000000000..98481446e
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/a/a.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the timeformat checker.
+
+package a
+
+import (
+ "time"
+
+ "b"
+)
+
+func hasError() {
+ a, _ := time.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00") // want `2006-02-01 should be 2006-01-02`
+ a.Format(`2006-02-01`) // want `2006-02-01 should be 2006-01-02`
+ a.Format("2006-02-01 15:04:05") // want `2006-02-01 should be 2006-01-02`
+
+ const c = "2006-02-01"
+ a.Format(c) // want `2006-02-01 should be 2006-01-02`
+}
+
+func notHasError() {
+ a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00")
+ a.Format("2006-01-02")
+
+ const c = "2006-01-02"
+ a.Format(c)
+
+ v := "2006-02-01"
+ a.Format(v) // Allowed though variables.
+
+ m := map[string]string{
+ "y": "2006-02-01",
+ }
+ a.Format(m["y"])
+
+ s := []string{"2006-02-01"}
+ a.Format(s[0])
+
+ a.Format(badFormat())
+
+ o := b.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00")
+ o.Format("2006-02-01")
+}
+
+func badFormat() string {
+ return "2006-02-01"
+}
diff --git a/go/analysis/passes/timeformat/testdata/src/a/a.go.golden b/go/analysis/passes/timeformat/testdata/src/a/a.go.golden
new file mode 100644
index 000000000..9eccded63
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/a/a.go.golden
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the timeformat checker.
+
+package a
+
+import (
+ "time"
+
+ "b"
+)
+
+func hasError() {
+ a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00") // want `2006-02-01 should be 2006-01-02`
+ a.Format(`2006-01-02`) // want `2006-02-01 should be 2006-01-02`
+ a.Format("2006-01-02 15:04:05") // want `2006-02-01 should be 2006-01-02`
+
+ const c = "2006-02-01"
+ a.Format(c) // want `2006-02-01 should be 2006-01-02`
+}
+
+func notHasError() {
+ a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00")
+ a.Format("2006-01-02")
+
+ const c = "2006-01-02"
+ a.Format(c)
+
+ v := "2006-02-01"
+ a.Format(v) // Allowed though variables.
+
+ m := map[string]string{
+ "y": "2006-02-01",
+ }
+ a.Format(m["y"])
+
+ s := []string{"2006-02-01"}
+ a.Format(s[0])
+
+ a.Format(badFormat())
+
+ o := b.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00")
+ o.Format("2006-02-01")
+}
+
+func badFormat() string {
+ return "2006-02-01"
+}
diff --git a/go/analysis/passes/timeformat/testdata/src/b/b.go b/go/analysis/passes/timeformat/testdata/src/b/b.go
new file mode 100644
index 000000000..de5690863
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/b/b.go
@@ -0,0 +1,11 @@
+package b
+
+type B struct {
+}
+
+func Parse(string, string) B {
+ return B{}
+}
+
+func (b B) Format(string) {
+}
diff --git a/go/analysis/passes/timeformat/timeformat.go b/go/analysis/passes/timeformat/timeformat.go
new file mode 100644
index 000000000..acb198f95
--- /dev/null
+++ b/go/analysis/passes/timeformat/timeformat.go
@@ -0,0 +1,129 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeformat defines an Analyzer that checks for the use
+// of time.Format or time.Parse calls with a bad format.
+package timeformat
+
+import (
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+const badFormat = "2006-02-01"
+const goodFormat = "2006-01-02"
+
+const Doc = `check for calls of (time.Time).Format or time.Parse with 2006-02-01
+
+The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)
+format. Internationally, "yyyy-dd-mm" does not occur in common calendar date
+standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "timeformat",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: run,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+
+ nodeFilter := []ast.Node{
+ (*ast.CallExpr)(nil),
+ }
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ call := n.(*ast.CallExpr)
+ fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func)
+ if !ok {
+ return
+ }
+ if !isTimeDotFormat(fn) && !isTimeDotParse(fn) {
+ return
+ }
+ if len(call.Args) > 0 {
+ arg := call.Args[0]
+ badAt := badFormatAt(pass.TypesInfo, arg)
+
+ if badAt > -1 {
+ // Check if it's a literal string, otherwise we can't suggest a fix.
+ if _, ok := arg.(*ast.BasicLit); ok {
+ pos := int(arg.Pos()) + badAt + 1 // +1 to skip the " or `
+ end := pos + len(badFormat)
+
+ pass.Report(analysis.Diagnostic{
+ Pos: token.Pos(pos),
+ End: token.Pos(end),
+ Message: badFormat + " should be " + goodFormat,
+ SuggestedFixes: []analysis.SuggestedFix{{
+ Message: "Replace " + badFormat + " with " + goodFormat,
+ TextEdits: []analysis.TextEdit{{
+ Pos: token.Pos(pos),
+ End: token.Pos(end),
+ NewText: []byte(goodFormat),
+ }},
+ }},
+ })
+ } else {
+ pass.Reportf(arg.Pos(), badFormat+" should be "+goodFormat)
+ }
+ }
+ }
+ })
+ return nil, nil
+}
+
+func isTimeDotFormat(f *types.Func) bool {
+ if f.Name() != "Format" || f.Pkg().Path() != "time" {
+ return false
+ }
+ sig, ok := f.Type().(*types.Signature)
+ if !ok {
+ return false
+ }
+ // Verify that the receiver is time.Time.
+ recv := sig.Recv()
+ if recv == nil {
+ return false
+ }
+ named, ok := recv.Type().(*types.Named)
+ return ok && named.Obj().Name() == "Time"
+}
+
+func isTimeDotParse(f *types.Func) bool {
+ if f.Name() != "Parse" || f.Pkg().Path() != "time" {
+ return false
+ }
+ // Verify that there is no receiver.
+ sig, ok := f.Type().(*types.Signature)
+ return ok && sig.Recv() == nil
+}
+
+// badFormatAt return the start of a bad format in e or -1 if no bad format is found.
+func badFormatAt(info *types.Info, e ast.Expr) int {
+ tv, ok := info.Types[e]
+ if !ok { // no type info, assume good
+ return -1
+ }
+
+ t, ok := tv.Type.(*types.Basic)
+ if !ok || t.Info()&types.IsString == 0 {
+ return -1
+ }
+
+ if tv.Value == nil {
+ return -1
+ }
+
+ return strings.Index(constant.StringVal(tv.Value), badFormat)
+}
diff --git a/go/analysis/passes/timeformat/timeformat_test.go b/go/analysis/passes/timeformat/timeformat_test.go
new file mode 100644
index 000000000..86bbe1bb3
--- /dev/null
+++ b/go/analysis/passes/timeformat/timeformat_test.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package timeformat_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/go/analysis/passes/timeformat"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ analysistest.RunWithSuggestedFixes(t, testdata, timeformat.Analyzer, "a")
+}
diff --git a/go/analysis/passes/unusedwrite/unusedwrite.go b/go/analysis/passes/unusedwrite/unusedwrite.go
index 37a0e784b..9cc45e0a3 100644
--- a/go/analysis/passes/unusedwrite/unusedwrite.go
+++ b/go/analysis/passes/unusedwrite/unusedwrite.go
@@ -41,7 +41,7 @@ Another example is about non-pointer receiver:
`
// Analyzer reports instances of writes to struct fields and arrays
-//that are never read.
+// that are never read.
var Analyzer = &analysis.Analyzer{
Name: "unusedwrite",
Doc: Doc,
@@ -50,40 +50,49 @@ var Analyzer = &analysis.Analyzer{
}
func run(pass *analysis.Pass) (interface{}, error) {
- // Check the writes to struct and array objects.
- checkStore := func(store *ssa.Store) {
- // Consider field/index writes to an object whose elements are copied and not shared.
- // MapUpdate is excluded since only the reference of the map is copied.
- switch addr := store.Addr.(type) {
- case *ssa.FieldAddr:
- if isDeadStore(store, addr.X, addr) {
- // Report the bug.
+ ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
+ for _, fn := range ssainput.SrcFuncs {
+ // TODO(taking): Iterate over fn._Instantiations() once exported. If so, have 1 report per Pos().
+ reports := checkStores(fn)
+ for _, store := range reports {
+ switch addr := store.Addr.(type) {
+ case *ssa.FieldAddr:
pass.Reportf(store.Pos(),
"unused write to field %s",
getFieldName(addr.X.Type(), addr.Field))
- }
- case *ssa.IndexAddr:
- if isDeadStore(store, addr.X, addr) {
- // Report the bug.
+ case *ssa.IndexAddr:
pass.Reportf(store.Pos(),
"unused write to array index %s", addr.Index)
}
}
}
+ return nil, nil
+}
- ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
- for _, fn := range ssainput.SrcFuncs {
- // Visit each block. No need to visit fn.Recover.
- for _, blk := range fn.Blocks {
- for _, instr := range blk.Instrs {
- // Identify writes.
- if store, ok := instr.(*ssa.Store); ok {
- checkStore(store)
+// checkStores returns *Stores in fn whose address is written to but never used.
+func checkStores(fn *ssa.Function) []*ssa.Store {
+ var reports []*ssa.Store
+ // Visit each block. No need to visit fn.Recover.
+ for _, blk := range fn.Blocks {
+ for _, instr := range blk.Instrs {
+ // Identify writes.
+ if store, ok := instr.(*ssa.Store); ok {
+ // Consider field/index writes to an object whose elements are copied and not shared.
+ // MapUpdate is excluded since only the reference of the map is copied.
+ switch addr := store.Addr.(type) {
+ case *ssa.FieldAddr:
+ if isDeadStore(store, addr.X, addr) {
+ reports = append(reports, store)
+ }
+ case *ssa.IndexAddr:
+ if isDeadStore(store, addr.X, addr) {
+ reports = append(reports, store)
+ }
}
}
}
}
- return nil, nil
+ return reports
}
// isDeadStore determines whether a field/index write to an object is dead.
diff --git a/go/analysis/singlechecker/singlechecker.go b/go/analysis/singlechecker/singlechecker.go
index 28530777b..91044ca08 100644
--- a/go/analysis/singlechecker/singlechecker.go
+++ b/go/analysis/singlechecker/singlechecker.go
@@ -11,16 +11,15 @@
// all that is needed to define a standalone tool is a file,
// example.org/findbadness/cmd/findbadness/main.go, containing:
//
-// // The findbadness command runs an analysis.
-// package main
+// // The findbadness command runs an analysis.
+// package main
//
-// import (
-// "example.org/findbadness"
-// "golang.org/x/tools/go/analysis/singlechecker"
-// )
-//
-// func main() { singlechecker.Main(findbadness.Analyzer) }
+// import (
+// "example.org/findbadness"
+// "golang.org/x/tools/go/analysis/singlechecker"
+// )
//
+// func main() { singlechecker.Main(findbadness.Analyzer) }
package singlechecker
import (
diff --git a/go/analysis/unitchecker/main.go b/go/analysis/unitchecker/main.go
index 23acb7ed0..a054a2dce 100644
--- a/go/analysis/unitchecker/main.go
+++ b/go/analysis/unitchecker/main.go
@@ -10,8 +10,8 @@
// It serves as a model for the behavior of the cmd/vet tool in $GOROOT.
// Being based on the unitchecker driver, it must be run by go vet:
//
-// $ go build -o unitchecker main.go
-// $ go vet -vettool=unitchecker my/project/...
+// $ go build -o unitchecker main.go
+// $ go vet -vettool=unitchecker my/project/...
//
// For a checker also capable of running standalone, use multichecker.
package main
diff --git a/go/analysis/unitchecker/unitchecker.go b/go/analysis/unitchecker/unitchecker.go
index b539866dd..37693564e 100644
--- a/go/analysis/unitchecker/unitchecker.go
+++ b/go/analysis/unitchecker/unitchecker.go
@@ -6,13 +6,13 @@
// driver that analyzes a single compilation unit during a build.
// It is invoked by a build system such as "go vet":
//
-// $ go vet -vettool=$(which vet)
+// $ go vet -vettool=$(which vet)
//
// It supports the following command-line protocol:
//
-// -V=full describe executable (to the build tool)
-// -flags describe flags (to the build tool)
-// foo.cfg description of compilation unit (from the build tool)
+// -V=full describe executable (to the build tool)
+// -flags describe flags (to the build tool)
+// foo.cfg description of compilation unit (from the build tool)
//
// This package does not depend on go/packages.
// If you need a standalone tool, use multichecker,
@@ -50,7 +50,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/analysisflags"
- "golang.org/x/tools/go/analysis/internal/facts"
+ "golang.org/x/tools/internal/facts"
"golang.org/x/tools/internal/typeparams"
)
@@ -79,11 +79,10 @@ type Config struct {
//
// The protocol required by 'go vet -vettool=...' is that the tool must support:
//
-// -flags describe flags in JSON
-// -V=full describe executable for build caching
-// foo.cfg perform separate modular analyze on the single
-// unit described by a JSON config file foo.cfg.
-//
+// -flags describe flags in JSON
+// -V=full describe executable for build caching
+// foo.cfg perform separate modular analyze on the single
+// unit described by a JSON config file foo.cfg.
func Main(analyzers ...*analysis.Analyzer) {
progname := filepath.Base(os.Args[0])
log.SetFlags(0)
@@ -250,6 +249,10 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
// In VetxOnly mode, analyzers are only for their facts,
// so we can skip any analysis that neither produces facts
// nor depends on any analysis that produces facts.
+ //
+ // TODO(adonovan): fix: the command (and logic!) here are backwards.
+ // It should say "...nor is required by any...". (Issue 443099)
+ //
// Also build a map to hold working state and result.
type action struct {
once sync.Once
@@ -288,13 +291,13 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
analyzers = filtered
// Read facts from imported packages.
- read := func(path string) ([]byte, error) {
- if vetx, ok := cfg.PackageVetx[path]; ok {
+ read := func(imp *types.Package) ([]byte, error) {
+ if vetx, ok := cfg.PackageVetx[imp.Path()]; ok {
return ioutil.ReadFile(vetx)
}
return nil, nil // no .vetx file, no facts
}
- facts, err := facts.Decode(pkg, read)
+ facts, err := facts.NewDecoder(pkg).Decode(read)
if err != nil {
return nil, err
}
@@ -341,6 +344,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
Pkg: pkg,
TypesInfo: info,
TypesSizes: tc.Sizes,
+ TypeErrors: nil, // unitchecker doesn't RunDespiteErrors
ResultOf: inputs,
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
ImportObjectFact: facts.ImportObjectFact,
diff --git a/go/analysis/unitchecker/unitchecker_test.go b/go/analysis/unitchecker/unitchecker_test.go
index 7e5b848de..197abd9a1 100644
--- a/go/analysis/unitchecker/unitchecker_test.go
+++ b/go/analysis/unitchecker/unitchecker_test.go
@@ -20,6 +20,7 @@ import (
"strings"
"testing"
+ "golang.org/x/tools/go/analysis/passes/assign"
"golang.org/x/tools/go/analysis/passes/findcall"
"golang.org/x/tools/go/analysis/passes/printf"
"golang.org/x/tools/go/analysis/unitchecker"
@@ -41,6 +42,7 @@ func main() {
unitchecker.Main(
findcall.Analyzer,
printf.Analyzer,
+ assign.Analyzer,
)
}
@@ -75,6 +77,13 @@ func _() {
func MyFunc123() {}
`,
+ "c/c.go": `package c
+
+func _() {
+ i := 5
+ i = i
+}
+`,
}}})
defer exported.Cleanup()
@@ -85,29 +94,71 @@ func MyFunc123() {}
([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?b/b.go:6:13: call of MyFunc123\(...\)
([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?b/b.go:7:11: call of MyFunc123\(...\)
`
+ const wantC = `# golang.org/fake/c
+([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go:5:5: self-assignment of i to i
+`
const wantAJSON = `# golang.org/fake/a
\{
"golang.org/fake/a": \{
"findcall": \[
\{
"posn": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?a/a.go:4:11",
- "message": "call of MyFunc123\(...\)"
+ "message": "call of MyFunc123\(...\)",
+ "suggested_fixes": \[
+ \{
+ "message": "Add '_TEST_'",
+ "edits": \[
+ \{
+ "filename": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?a/a.go",
+ "start": 32,
+ "end": 32,
+ "new": "_TEST_"
+ \}
+ \]
+ \}
+ \]
+ \}
+ \]
+ \}
+\}
+`
+ const wantCJSON = `# golang.org/fake/c
+\{
+ "golang.org/fake/c": \{
+ "assign": \[
+ \{
+ "posn": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go:5:5",
+ "message": "self-assignment of i to i",
+ "suggested_fixes": \[
+ \{
+ "message": "Remove",
+ "edits": \[
+ \{
+ "filename": "([/._\-a-zA-Z0-9]+[\\/]fake[\\/])?c/c.go",
+ "start": 37,
+ "end": 42,
+ "new": ""
+ \}
+ \]
+ \}
+ \]
\}
\]
\}
\}
`
-
for _, test := range []struct {
- args string
- wantOut string
- wantExit int
+ args string
+ wantOut string
+ wantExitError bool
}{
- {args: "golang.org/fake/a", wantOut: wantA, wantExit: 2},
- {args: "golang.org/fake/b", wantOut: wantB, wantExit: 2},
- {args: "golang.org/fake/a golang.org/fake/b", wantOut: wantA + wantB, wantExit: 2},
- {args: "-json golang.org/fake/a", wantOut: wantAJSON, wantExit: 0},
- {args: "-c=0 golang.org/fake/a", wantOut: wantA + "4 MyFunc123\\(\\)\n", wantExit: 2},
+ {args: "golang.org/fake/a", wantOut: wantA, wantExitError: true},
+ {args: "golang.org/fake/b", wantOut: wantB, wantExitError: true},
+ {args: "golang.org/fake/c", wantOut: wantC, wantExitError: true},
+ {args: "golang.org/fake/a golang.org/fake/b", wantOut: wantA + wantB, wantExitError: true},
+ {args: "-json golang.org/fake/a", wantOut: wantAJSON, wantExitError: false},
+ {args: "-json golang.org/fake/c", wantOut: wantCJSON, wantExitError: false},
+ {args: "-c=0 golang.org/fake/a", wantOut: wantA + "4 MyFunc123\\(\\)\n", wantExitError: true},
} {
cmd := exec.Command("go", "vet", "-vettool="+os.Args[0], "-findcall.name=MyFunc123")
cmd.Args = append(cmd.Args, strings.Fields(test.args)...)
@@ -119,13 +170,17 @@ func MyFunc123() {}
if exitErr, ok := err.(*exec.ExitError); ok {
exitcode = exitErr.ExitCode()
}
- if exitcode != test.wantExit {
- t.Errorf("%s: got exit code %d, want %d", test.args, exitcode, test.wantExit)
+ if (exitcode != 0) != test.wantExitError {
+ want := "zero"
+ if test.wantExitError {
+ want = "nonzero"
+ }
+ t.Errorf("%s: got exit code %d, want %s", test.args, exitcode, want)
}
matched, err := regexp.Match(test.wantOut, out)
if err != nil {
- t.Fatal(err)
+ t.Fatalf("regexp.Match(<<%s>>): %v", test.wantOut, err)
}
if !matched {
t.Errorf("%s: got <<%s>>, want match of regexp <<%s>>", test.args, out, test.wantOut)
diff --git a/go/analysis/validate.go b/go/analysis/validate.go
index 23e57bf02..9da5692af 100644
--- a/go/analysis/validate.go
+++ b/go/analysis/validate.go
@@ -14,6 +14,8 @@ import (
// Validate reports an error if any of the analyzers are misconfigured.
// Checks include:
// that the name is a valid identifier;
+// that the Doc is not empty;
+// that the Run is non-nil;
// that the Requires graph is acyclic;
// that analyzer fact types are unique;
// that each fact type is a pointer.
@@ -46,6 +48,9 @@ func Validate(analyzers []*Analyzer) error {
return fmt.Errorf("analyzer %q is undocumented", a)
}
+ if a.Run == nil {
+ return fmt.Errorf("analyzer %q has nil Run", a)
+ }
// fact types
for _, f := range a.FactTypes {
if f == nil {
diff --git a/go/analysis/validate_test.go b/go/analysis/validate_test.go
index 1116034f7..7f4ee2c05 100644
--- a/go/analysis/validate_test.go
+++ b/go/analysis/validate_test.go
@@ -11,33 +11,43 @@ import (
func TestValidate(t *testing.T) {
var (
+ run = func(p *Pass) (interface{}, error) {
+ return nil, nil
+ }
dependsOnSelf = &Analyzer{
Name: "dependsOnSelf",
Doc: "this analyzer depends on itself",
+ Run: run,
}
inCycleA = &Analyzer{
Name: "inCycleA",
Doc: "this analyzer depends on inCycleB",
+ Run: run,
}
inCycleB = &Analyzer{
Name: "inCycleB",
Doc: "this analyzer depends on inCycleA and notInCycleA",
+ Run: run,
}
pointsToCycle = &Analyzer{
Name: "pointsToCycle",
Doc: "this analyzer depends on inCycleA",
+ Run: run,
}
notInCycleA = &Analyzer{
Name: "notInCycleA",
Doc: "this analyzer depends on notInCycleB and notInCycleC",
+ Run: run,
}
notInCycleB = &Analyzer{
Name: "notInCycleB",
Doc: "this analyzer depends on notInCycleC",
+ Run: run,
}
notInCycleC = &Analyzer{
Name: "notInCycleC",
Doc: "this analyzer has no dependencies",
+ Run: run,
}
)
@@ -116,3 +126,27 @@ func TestCycleInRequiresGraphErrorMessage(t *testing.T) {
t.Errorf("error string %s does not contain expected substring %q", errMsg, wantSubstring)
}
}
+
+func TestValidateEmptyDoc(t *testing.T) {
+ withoutDoc := &Analyzer{
+ Name: "withoutDoc",
+ Run: func(p *Pass) (interface{}, error) {
+ return nil, nil
+ },
+ }
+ err := Validate([]*Analyzer{withoutDoc})
+ if err == nil || !strings.Contains(err.Error(), "is undocumented") {
+ t.Errorf("got unexpected error while validating analyzers withoutDoc: %v", err)
+ }
+}
+
+func TestValidateNoRun(t *testing.T) {
+ withoutRun := &Analyzer{
+ Name: "withoutRun",
+ Doc: "this analyzer has no Run",
+ }
+ err := Validate([]*Analyzer{withoutRun})
+ if err == nil || !strings.Contains(err.Error(), "has nil Run") {
+ t.Errorf("got unexpected error while validating analyzers withoutRun: %v", err)
+ }
+}
diff --git a/go/ast/astutil/enclosing.go b/go/ast/astutil/enclosing.go
index a5c6d6d4f..9fa5aa192 100644
--- a/go/ast/astutil/enclosing.go
+++ b/go/ast/astutil/enclosing.go
@@ -22,9 +22,9 @@ import (
// additional whitespace abutting a node to be enclosed by it.
// In this example:
//
-// z := x + y // add them
-// <-A->
-// <----B----->
+// z := x + y // add them
+// <-A->
+// <----B----->
//
// the ast.BinaryExpr(+) node is considered to enclose interval B
// even though its [Pos()..End()) is actually only interval A.
@@ -43,10 +43,10 @@ import (
// interior whitespace of path[0].
// In this example:
//
-// z := x + y // add them
-// <--C--> <---E-->
-// ^
-// D
+// z := x + y // add them
+// <--C--> <---E-->
+// ^
+// D
//
// intervals C, D and E are inexact. C is contained by the
// z-assignment statement, because it spans three of its children (:=,
@@ -54,12 +54,11 @@ import (
// interior whitespace of the assignment. E is considered interior
// whitespace of the BlockStmt containing the assignment.
//
-// Precondition: [start, end) both lie within the same file as root.
-// TODO(adonovan): return (nil, false) in this case and remove precond.
-// Requires FileSet; see loader.tokenFileContainsPos.
-//
-// Postcondition: path is never nil; it always contains at least 'root'.
-//
+// The resulting path is never empty; it always contains at least the
+// 'root' *ast.File. Ideally PathEnclosingInterval would reject
+// intervals that lie wholly or partially outside the range of the
+// file, but unfortunately ast.File records only the token.Pos of
+// the 'package' keyword, but not of the start of the file itself.
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
@@ -135,6 +134,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
return false // inexact: overlaps multiple children
}
+ // Ensure [start,end) is nondecreasing.
if start > end {
start, end = end, start
}
@@ -162,7 +162,6 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
// tokenNode is a dummy implementation of ast.Node for a single token.
// They are used transiently by PathEnclosingInterval but never escape
// this package.
-//
type tokenNode struct {
pos token.Pos
end token.Pos
@@ -183,7 +182,6 @@ func tok(pos token.Pos, len int) ast.Node {
// childrenOf returns the direct non-nil children of ast.Node n.
// It may include fake ast.Node implementations for bare tokens.
// it is not safe to call (e.g.) ast.Walk on such nodes.
-//
func childrenOf(n ast.Node) []ast.Node {
var children []ast.Node
@@ -488,7 +486,6 @@ func (sl byPos) Swap(i, j int) {
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
// StarExpr) we could be much more specific given the path to the AST
// root. Perhaps we should do that.
-//
func NodeDescription(n ast.Node) string {
switch n := n.(type) {
case *ast.ArrayType:
diff --git a/go/ast/astutil/enclosing_test.go b/go/ast/astutil/enclosing_test.go
index 5e86ff93c..de96d4496 100644
--- a/go/ast/astutil/enclosing_test.go
+++ b/go/ast/astutil/enclosing_test.go
@@ -40,7 +40,6 @@ func pathToString(path []ast.Node) string {
// findInterval parses input and returns the [start, end) positions of
// the first occurrence of substr in input. f==nil indicates failure;
// an error has already been reported in that case.
-//
func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) {
f, err := parser.ParseFile(fset, "<input>", input, 0)
if err != nil {
diff --git a/go/ast/astutil/imports.go b/go/ast/astutil/imports.go
index 2087ceec9..18d1adb05 100644
--- a/go/ast/astutil/imports.go
+++ b/go/ast/astutil/imports.go
@@ -22,8 +22,11 @@ func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
// If name is not empty, it is used to rename the import.
//
// For example, calling
+//
// AddNamedImport(fset, f, "pathpkg", "path")
+//
// adds
+//
// import pathpkg "path"
func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
if imports(f, name, path) {
@@ -270,8 +273,8 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
}
if j > 0 {
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
- lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
- line := fset.Position(impspec.Path.ValuePos).Line
+ lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line
+ line := fset.PositionFor(impspec.Path.ValuePos, false).Line
// We deleted an entry but now there may be
// a blank line-sized hole where the import was.
diff --git a/go/ast/astutil/imports_test.go b/go/ast/astutil/imports_test.go
index 68f05ab6d..2a383e467 100644
--- a/go/ast/astutil/imports_test.go
+++ b/go/ast/astutil/imports_test.go
@@ -1654,6 +1654,34 @@ import f "fmt"
`,
unchanged: true,
},
+ // this test panics without PositionFor in DeleteNamedImport
+ {
+ name: "import.44",
+ pkg: "foo.com/other/v3",
+ renamedPkg: "",
+ in: `package main
+//line mah.go:600
+
+import (
+"foo.com/a.thing"
+"foo.com/surprise"
+"foo.com/v1"
+"foo.com/other/v2"
+"foo.com/other/v3"
+)
+`,
+ out: `package main
+
+//line mah.go:600
+
+import (
+ "foo.com/a.thing"
+ "foo.com/other/v2"
+ "foo.com/surprise"
+ "foo.com/v1"
+)
+`,
+ },
}
func TestDeleteImport(t *testing.T) {
diff --git a/go/ast/astutil/rewrite.go b/go/ast/astutil/rewrite.go
index 6d9ca23e2..f430b21b9 100644
--- a/go/ast/astutil/rewrite.go
+++ b/go/ast/astutil/rewrite.go
@@ -41,7 +41,6 @@ type ApplyFunc func(*Cursor) bool
// Children are traversed in the order in which they appear in the
// respective node's struct definition. A package's files are
// traversed in the filenames' alphabetical order.
-//
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
parent := &struct{ ast.Node }{root}
defer func() {
@@ -65,8 +64,8 @@ var abort = new(int) // singleton, to signal termination of Apply
// c.Parent(), and f is the field identifier with name c.Name(),
// the following invariants hold:
//
-// p.f == c.Node() if c.Index() < 0
-// p.f[c.Index()] == c.Node() if c.Index() >= 0
+// p.f == c.Node() if c.Index() < 0
+// p.f[c.Index()] == c.Node() if c.Index() >= 0
//
// The methods Replace, Delete, InsertBefore, and InsertAfter
// can be used to change the AST without disrupting Apply.
@@ -294,6 +293,9 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.
a.apply(n, "Fields", nil, n.Fields)
case *ast.FuncType:
+ if tparams := typeparams.ForFuncType(n); tparams != nil {
+ a.apply(n, "TypeParams", nil, tparams)
+ }
a.apply(n, "Params", nil, n.Params)
a.apply(n, "Results", nil, n.Results)
@@ -406,6 +408,9 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.
case *ast.TypeSpec:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
+ if tparams := typeparams.ForTypeSpec(n); tparams != nil {
+ a.apply(n, "TypeParams", nil, tparams)
+ }
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Comment", nil, n.Comment)
diff --git a/go/ast/astutil/rewrite_test.go b/go/ast/astutil/rewrite_test.go
index 9d23170a5..4ef6fe99d 100644
--- a/go/ast/astutil/rewrite_test.go
+++ b/go/ast/astutil/rewrite_test.go
@@ -202,20 +202,30 @@ func init() {
type T[P1, P2 any] int
type R T[int, string]
+
+func F[Q1 any](q Q1) {}
`,
+ // TODO: note how the rewrite adds a trailing comma in "func F".
+ // Is that a bug in the test, or in astutil.Apply?
want: `package p
-type S[P1, P2 any] int32
+type S[R1, P2 any] int32
type R S[int32, string]
+
+func F[X1 any](q X1,) {}
`,
post: func(c *astutil.Cursor) bool {
if ident, ok := c.Node().(*ast.Ident); ok {
- if ident.Name == "int" {
+ switch ident.Name {
+ case "int":
c.Replace(ast.NewIdent("int32"))
- }
- if ident.Name == "T" {
+ case "T":
c.Replace(ast.NewIdent("S"))
+ case "P1":
+ c.Replace(ast.NewIdent("R1"))
+ case "Q1":
+ c.Replace(ast.NewIdent("X1"))
}
}
return true
diff --git a/go/ast/inspector/inspector.go b/go/ast/inspector/inspector.go
index af5e17fee..3fbfebf36 100644
--- a/go/ast/inspector/inspector.go
+++ b/go/ast/inspector/inspector.go
@@ -53,10 +53,13 @@ func New(files []*ast.File) *Inspector {
// of an ast.Node during a traversal.
type event struct {
node ast.Node
- typ uint64 // typeOf(node)
- index int // 1 + index of corresponding pop event, or 0 if this is a pop
+ typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
+ index int // index of corresponding push or pop event
}
+// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
+// Type can be recovered from the sole bit in typ.
+
// Preorder visits all the nodes of the files supplied to New in
// depth-first order. It calls f(n) for each node n before it visits
// n's children.
@@ -72,10 +75,17 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
mask := maskOf(types)
for i := 0; i < len(in.events); {
ev := in.events[i]
- if ev.typ&mask != 0 {
- if ev.index > 0 {
+ if ev.index > i {
+ // push
+ if ev.typ&mask != 0 {
f(ev.node)
}
+ pop := ev.index
+ if in.events[pop].typ&mask == 0 {
+ // Subtrees do not contain types: skip them and pop.
+ i = pop + 1
+ continue
+ }
}
i++
}
@@ -94,15 +104,24 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc
mask := maskOf(types)
for i := 0; i < len(in.events); {
ev := in.events[i]
- if ev.typ&mask != 0 {
- if ev.index > 0 {
- // push
+ if ev.index > i {
+ // push
+ pop := ev.index
+ if ev.typ&mask != 0 {
if !f(ev.node, true) {
- i = ev.index // jump to corresponding pop + 1
+ i = pop + 1 // jump to corresponding pop + 1
continue
}
- } else {
- // pop
+ }
+ if in.events[pop].typ&mask == 0 {
+ // Subtrees do not contain types: skip them.
+ i = pop
+ continue
+ }
+ } else {
+ // pop
+ push := ev.index
+ if in.events[push].typ&mask != 0 {
f(ev.node, false)
}
}
@@ -119,19 +138,26 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s
var stack []ast.Node
for i := 0; i < len(in.events); {
ev := in.events[i]
- if ev.index > 0 {
+ if ev.index > i {
// push
+ pop := ev.index
stack = append(stack, ev.node)
if ev.typ&mask != 0 {
if !f(ev.node, true, stack) {
- i = ev.index
+ i = pop + 1
stack = stack[:len(stack)-1]
continue
}
}
+ if in.events[pop].typ&mask == 0 {
+ // Subtrees does not contain types: skip them.
+ i = pop
+ continue
+ }
} else {
// pop
- if ev.typ&mask != 0 {
+ push := ev.index
+ if in.events[push].typ&mask != 0 {
f(ev.node, false, stack)
}
stack = stack[:len(stack)-1]
@@ -157,25 +183,31 @@ func traverse(files []*ast.File) []event {
events := make([]event, 0, capacity)
var stack []event
+ stack = append(stack, event{}) // include an extra event so file nodes have a parent
for _, f := range files {
ast.Inspect(f, func(n ast.Node) bool {
if n != nil {
// push
ev := event{
node: n,
- typ: typeOf(n),
+ typ: 0, // temporarily used to accumulate type bits of subtree
index: len(events), // push event temporarily holds own index
}
stack = append(stack, ev)
events = append(events, ev)
} else {
// pop
- ev := stack[len(stack)-1]
- stack = stack[:len(stack)-1]
+ top := len(stack) - 1
+ ev := stack[top]
+ typ := typeOf(ev.node)
+ push := ev.index
+ parent := top - 1
- events[ev.index].index = len(events) + 1 // make push refer to pop
+ events[push].typ = typ // set type of push
+ stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs.
+ events[push].index = len(events) // make push refer to pop
- ev.index = 0 // turn ev into a pop event
+ stack = stack[:top]
events = append(events, ev)
}
return true
diff --git a/go/ast/inspector/inspector_test.go b/go/ast/inspector/inspector_test.go
index 9e5391896..e88d584b5 100644
--- a/go/ast/inspector/inspector_test.go
+++ b/go/ast/inspector/inspector_test.go
@@ -244,9 +244,11 @@ func typeOf(n ast.Node) string {
// but a break-even point (NewInspector/(ASTInspect-Inspect)) of about 5
// traversals.
//
-// BenchmarkNewInspector 4.5 ms
-// BenchmarkNewInspect 0.33ms
-// BenchmarkASTInspect 1.2 ms
+// BenchmarkASTInspect 1.0 ms
+// BenchmarkNewInspector 2.2 ms
+// BenchmarkInspect 0.39ms
+// BenchmarkInspectFilter 0.01ms
+// BenchmarkInspectCalls 0.14ms
func BenchmarkNewInspector(b *testing.B) {
// Measure one-time construction overhead.
@@ -274,6 +276,42 @@ func BenchmarkInspect(b *testing.B) {
}
}
+func BenchmarkInspectFilter(b *testing.B) {
+ b.StopTimer()
+ inspect := inspector.New(netFiles)
+ b.StartTimer()
+
+ // Measure marginal cost of traversal.
+ nodeFilter := []ast.Node{(*ast.FuncDecl)(nil), (*ast.FuncLit)(nil)}
+ var ndecls, nlits int
+ for i := 0; i < b.N; i++ {
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ switch n.(type) {
+ case *ast.FuncDecl:
+ ndecls++
+ case *ast.FuncLit:
+ nlits++
+ }
+ })
+ }
+}
+
+func BenchmarkInspectCalls(b *testing.B) {
+ b.StopTimer()
+ inspect := inspector.New(netFiles)
+ b.StartTimer()
+
+ // Measure marginal cost of traversal.
+ nodeFilter := []ast.Node{(*ast.CallExpr)(nil)}
+ var ncalls int
+ for i := 0; i < b.N; i++ {
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ _ = n.(*ast.CallExpr)
+ ncalls++
+ })
+ }
+}
+
func BenchmarkASTInspect(b *testing.B) {
var ndecls, nlits int
for i := 0; i < b.N; i++ {
diff --git a/go/ast/inspector/typeof.go b/go/ast/inspector/typeof.go
index 11f4fc369..703c81395 100644
--- a/go/ast/inspector/typeof.go
+++ b/go/ast/inspector/typeof.go
@@ -11,6 +11,7 @@ package inspector
import (
"go/ast"
+ "math"
"golang.org/x/tools/internal/typeparams"
)
@@ -77,12 +78,14 @@ const (
// typeOf returns a distinct single-bit value that represents the type of n.
//
// Various implementations were benchmarked with BenchmarkNewInspector:
-// GOGC=off
-// - type switch 4.9-5.5ms 2.1ms
-// - binary search over a sorted list of types 5.5-5.9ms 2.5ms
-// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms
-// - linear scan, unordered list 6.4ms 2.7ms
-// - hash table 6.5ms 3.1ms
+//
+// GOGC=off
+// - type switch 4.9-5.5ms 2.1ms
+// - binary search over a sorted list of types 5.5-5.9ms 2.5ms
+// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms
+// - linear scan, unordered list 6.4ms 2.7ms
+// - hash table 6.5ms 3.1ms
+//
// A perfect hash seemed like overkill.
//
// The compiler's switch statement is the clear winner
@@ -90,7 +93,6 @@ const (
// with constant conditions and good branch prediction.
// (Sadly it is the most verbose in source code.)
// Binary search suffered from poor branch prediction.
-//
func typeOf(n ast.Node) uint64 {
// Fast path: nearly half of all nodes are identifiers.
if _, ok := n.(*ast.Ident); ok {
@@ -217,7 +219,7 @@ func typeOf(n ast.Node) uint64 {
func maskOf(nodes []ast.Node) uint64 {
if nodes == nil {
- return 1<<64 - 1 // match all node types
+ return math.MaxUint64 // match all node types
}
var mask uint64
for _, n := range nodes {
diff --git a/go/buildutil/allpackages.go b/go/buildutil/allpackages.go
index c0cb03e7b..dfb8cd6c7 100644
--- a/go/buildutil/allpackages.go
+++ b/go/buildutil/allpackages.go
@@ -28,7 +28,6 @@ import (
//
// All I/O is done via the build.Context file system interface,
// which must be concurrency-safe.
-//
func AllPackages(ctxt *build.Context) []string {
var list []string
ForEachPackage(ctxt, func(pkg string, _ error) {
@@ -48,7 +47,6 @@ func AllPackages(ctxt *build.Context) []string {
//
// All I/O is done via the build.Context file system interface,
// which must be concurrency-safe.
-//
func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
ch := make(chan item)
@@ -127,19 +125,18 @@ func allPackages(ctxt *build.Context, root string, ch chan<- item) {
// ExpandPatterns returns the set of packages matched by patterns,
// which may have the following forms:
//
-// golang.org/x/tools/cmd/guru # a single package
-// golang.org/x/tools/... # all packages beneath dir
-// ... # the entire workspace.
+// golang.org/x/tools/cmd/guru # a single package
+// golang.org/x/tools/... # all packages beneath dir
+// ... # the entire workspace.
//
// Order is significant: a pattern preceded by '-' removes matching
// packages from the set. For example, these patterns match all encoding
// packages except encoding/xml:
//
-// encoding/... -encoding/xml
+// encoding/... -encoding/xml
//
// A trailing slash in a pattern is ignored. (Path components of Go
// package names are separated by slash, not the platform's path separator.)
-//
func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
// TODO(adonovan): support other features of 'go list':
// - "std"/"cmd"/"all" meta-packages
diff --git a/go/buildutil/fakecontext.go b/go/buildutil/fakecontext.go
index 5fc672fd5..15025f645 100644
--- a/go/buildutil/fakecontext.go
+++ b/go/buildutil/fakecontext.go
@@ -30,7 +30,6 @@ import (
// /go/src/ including, for instance, "math" and "math/big".
// ReadDir("/go/src/math/big") would return all the files in the
// "math/big" package.
-//
func FakeContext(pkgs map[string]map[string]string) *build.Context {
clean := func(filename string) string {
f := path.Clean(filepath.ToSlash(filename))
diff --git a/go/buildutil/overlay.go b/go/buildutil/overlay.go
index 8e239086b..bdbfd9314 100644
--- a/go/buildutil/overlay.go
+++ b/go/buildutil/overlay.go
@@ -60,8 +60,7 @@ func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Conte
// ParseOverlayArchive parses an archive containing Go files and their
// contents. The result is intended to be used with OverlayContext.
//
-//
-// Archive format
+// # Archive format
//
// The archive consists of a series of files. Each file consists of a
// name, a decimal file size and the file contents, separated by
diff --git a/go/buildutil/tags.go b/go/buildutil/tags.go
index 6da0ce484..7cf523bca 100644
--- a/go/buildutil/tags.go
+++ b/go/buildutil/tags.go
@@ -20,7 +20,8 @@ const TagsFlagDoc = "a list of `build tags` to consider satisfied during the bui
// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
//
// Example:
-// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
+//
+// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
type TagsFlag []string
func (v *TagsFlag) Set(s string) error {
diff --git a/go/buildutil/util.go b/go/buildutil/util.go
index fc923d7a7..bee6390de 100644
--- a/go/buildutil/util.go
+++ b/go/buildutil/util.go
@@ -28,7 +28,6 @@ import (
// filename that will be attached to the ASTs.
//
// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
-//
func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
if !IsAbsPath(ctxt, file) {
file = JoinPath(ctxt, dir, file)
@@ -51,7 +50,6 @@ func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string
//
// The '...Files []string' fields of the resulting build.Package are not
// populated (build.FindOnly mode).
-//
func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
if !IsAbsPath(ctxt, filename) {
filename = JoinPath(ctxt, dir, filename)
@@ -82,7 +80,7 @@ func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Packag
// (go/build.Context defines these as methods, but does not export them.)
-// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// HasSubdir calls ctxt.HasSubdir (if not nil) or else uses
// the local file system to answer the question.
func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
if f := ctxt.HasSubdir; f != nil {
@@ -196,7 +194,6 @@ func SplitPathList(ctxt *build.Context, s string) []string {
// sameFile returns true if x and y have the same basename and denote
// the same file.
-//
func sameFile(x, y string) bool {
if path.Clean(x) == path.Clean(y) {
return true
diff --git a/go/callgraph/callgraph.go b/go/callgraph/callgraph.go
index 2bcc3dcc8..905623753 100644
--- a/go/callgraph/callgraph.go
+++ b/go/callgraph/callgraph.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
/*
-
Package callgraph defines the call graph and various algorithms
and utilities to operate on it.
@@ -30,7 +29,6 @@ calling main() and init().
Calls to built-in functions (e.g. panic, println) are not represented
in the call graph; they are treated like built-in operators of the
language.
-
*/
package callgraph // import "golang.org/x/tools/go/callgraph"
@@ -39,6 +37,8 @@ package callgraph // import "golang.org/x/tools/go/callgraph"
// More generally, we could eliminate "uninteresting" nodes such as
// nodes from packages we don't care about.
+// TODO(zpavlinovic): decide how callgraphs handle calls to and from generic function bodies.
+
import (
"fmt"
"go/token"
@@ -51,7 +51,6 @@ import (
// A graph may contain nodes that are not reachable from the root.
// If the call graph is sound, such nodes indicate unreachable
// functions.
-//
type Graph struct {
Root *Node // the distinguished root node
Nodes map[*ssa.Function]*Node // all nodes by function
diff --git a/go/callgraph/callgraph_test.go b/go/callgraph/callgraph_test.go
new file mode 100644
index 000000000..dd6baafa5
--- /dev/null
+++ b/go/callgraph/callgraph_test.go
@@ -0,0 +1,253 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package callgraph_test
+
+import (
+ "log"
+ "sync"
+ "testing"
+
+ "golang.org/x/tools/go/callgraph"
+ "golang.org/x/tools/go/callgraph/cha"
+ "golang.org/x/tools/go/callgraph/rta"
+ "golang.org/x/tools/go/callgraph/static"
+ "golang.org/x/tools/go/callgraph/vta"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/pointer"
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
+)
+
+// Benchmarks comparing different callgraph algorithms implemented in
+// x/tools/go/callgraph. Comparison is on both speed, memory and precision.
+// Fewer edges and fewer reachable nodes implies a more precise result.
+// Comparison is done on a hello world http server using net/http.
+//
+// Current results were on an i7 macbook on go version devel go1.20-2730.
+// Number of nodes, edges, and reachable function are expected to vary between
+// go versions. Timing results are expected to vary between machines.
+// BenchmarkStatic-12 53 ms/op 6 MB/op 12113 nodes 37355 edges 1522 reachable
+// BenchmarkCHA-12 86 ms/op 16 MB/op 12113 nodes 131717 edges 7640 reachable
+// BenchmarkRTA-12 110 ms/op 12 MB/op 6566 nodes 42291 edges 5099 reachable
+// BenchmarkPTA-12 1427 ms/op 600 MB/op 8714 nodes 28244 edges 4184 reachable
+// BenchmarkVTA-12 600 ms/op 78 MB/op 12114 nodes 44861 edges 4919 reachable
+// BenchmarkVTA2-12 793 ms/op 104 MB/op 5450 nodes 22208 edges 4042 reachable
+// BenchmarkVTA3-12 977 ms/op 124 MB/op 4621 nodes 19331 edges 3700 reachable
+// BenchmarkVTAAlt-12 372 ms/op 57 MB/op 7763 nodes 29912 edges 4258 reachable
+// BenchmarkVTAAlt2-12 570 ms/op 78 MB/op 4838 nodes 20169 edges 3737 reachable
+//
+// Note:
+// * Static is unsound and may miss real edges.
+// * RTA starts from a main function and only includes reachable functions.
+// * CHA starts from all functions.
+// * VTA, VTA2, and VTA3 are starting from all functions and the CHA callgraph.
+// VTA2 and VTA3 are the result of re-applying VTA to the functions reachable
+// from main() via the callgraph of the previous stage.
+// * VTAAlt, and VTAAlt2 start from the functions reachable from main via the
+// CHA callgraph.
+// * All algorithms are unsound w.r.t. reflection.
+
+const httpEx = `package main
+
+import (
+ "fmt"
+ "net/http"
+)
+
+func hello(w http.ResponseWriter, req *http.Request) {
+ fmt.Fprintf(w, "hello world\n")
+}
+
+func main() {
+ http.HandleFunc("/hello", hello)
+ http.ListenAndServe(":8090", nil)
+}
+`
+
+var (
+ once sync.Once
+ prog *ssa.Program
+ main *ssa.Function
+)
+
+func example() (*ssa.Program, *ssa.Function) {
+ once.Do(func() {
+ var conf loader.Config
+ f, err := conf.ParseFile("<input>", httpEx)
+ if err != nil {
+ log.Fatal(err)
+ }
+ conf.CreateFromFiles(f.Name.Name, f)
+
+ lprog, err := conf.Load()
+ if err != nil {
+ log.Fatalf("test 'package %s': Load: %s", f.Name.Name, err)
+ }
+ prog = ssautil.CreateProgram(lprog, ssa.InstantiateGenerics)
+ prog.Build()
+
+ main = prog.Package(lprog.Created[0].Pkg).Members["main"].(*ssa.Function)
+ })
+ return prog, main
+}
+
+var stats bool = false // print stats?
+
+func logStats(b *testing.B, cnd bool, name string, cg *callgraph.Graph, main *ssa.Function) {
+ if cnd && stats {
+ e := 0
+ for _, n := range cg.Nodes {
+ e += len(n.Out)
+ }
+ r := len(reaches(main, cg, false))
+ b.Logf("%s:\t%d nodes\t%d edges\t%d reachable", name, len(cg.Nodes), e, r)
+ }
+}
+
+func BenchmarkStatic(b *testing.B) {
+ b.StopTimer()
+ prog, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ cg := static.CallGraph(prog)
+ logStats(b, i == 0, "static", cg, main)
+ }
+}
+
+func BenchmarkCHA(b *testing.B) {
+ b.StopTimer()
+ prog, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ cg := cha.CallGraph(prog)
+ logStats(b, i == 0, "cha", cg, main)
+ }
+}
+
+func BenchmarkRTA(b *testing.B) {
+ b.StopTimer()
+ _, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ res := rta.Analyze([]*ssa.Function{main}, true)
+ cg := res.CallGraph
+ logStats(b, i == 0, "rta", cg, main)
+ }
+}
+
+func BenchmarkPTA(b *testing.B) {
+ b.StopTimer()
+ _, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ config := &pointer.Config{Mains: []*ssa.Package{main.Pkg}, BuildCallGraph: true}
+ res, err := pointer.Analyze(config)
+ if err != nil {
+ b.Fatal(err)
+ }
+ logStats(b, i == 0, "pta", res.CallGraph, main)
+ }
+}
+
+func BenchmarkVTA(b *testing.B) {
+ b.StopTimer()
+ prog, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ cg := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
+ logStats(b, i == 0, "vta", cg, main)
+ }
+}
+
+func BenchmarkVTA2(b *testing.B) {
+ b.StopTimer()
+ prog, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ vta1 := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
+ cg := vta.CallGraph(reaches(main, vta1, true), vta1)
+ logStats(b, i == 0, "vta2", cg, main)
+ }
+}
+
+func BenchmarkVTA3(b *testing.B) {
+ b.StopTimer()
+ prog, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ vta1 := vta.CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
+ vta2 := vta.CallGraph(reaches(main, vta1, true), vta1)
+ cg := vta.CallGraph(reaches(main, vta2, true), vta2)
+ logStats(b, i == 0, "vta3", cg, main)
+ }
+}
+
+func BenchmarkVTAAlt(b *testing.B) {
+ b.StopTimer()
+ prog, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ cha := cha.CallGraph(prog)
+ cg := vta.CallGraph(reaches(main, cha, true), cha) // start from only functions reachable by CHA.
+ logStats(b, i == 0, "vta-alt", cg, main)
+ }
+}
+
+func BenchmarkVTAAlt2(b *testing.B) {
+ b.StopTimer()
+ prog, main := example()
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ cha := cha.CallGraph(prog)
+ vta1 := vta.CallGraph(reaches(main, cha, true), cha)
+ cg := vta.CallGraph(reaches(main, vta1, true), vta1)
+ logStats(b, i == 0, "vta-alt2", cg, main)
+ }
+}
+
+// reaches computes the transitive closure of functions forward reachable
+// via calls in cg starting from `sources`. If refs is true, include
+// functions referred to in an instruction.
+func reaches(source *ssa.Function, cg *callgraph.Graph, refs bool) map[*ssa.Function]bool {
+ seen := make(map[*ssa.Function]bool)
+ var visit func(f *ssa.Function)
+ visit = func(f *ssa.Function) {
+ if seen[f] {
+ return
+ }
+ seen[f] = true
+
+ if n := cg.Nodes[f]; n != nil {
+ for _, e := range n.Out {
+ if e.Site != nil {
+ visit(e.Callee.Func)
+ }
+ }
+ }
+
+ if refs {
+ var buf [10]*ssa.Value // avoid alloc in common case
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ for _, op := range instr.Operands(buf[:0]) {
+ if fn, ok := (*op).(*ssa.Function); ok {
+ visit(fn)
+ }
+ }
+ }
+ }
+ }
+ }
+ visit(source)
+ return seen
+}
diff --git a/go/callgraph/cha/cha.go b/go/callgraph/cha/cha.go
index 215ff173d..6296d48d9 100644
--- a/go/callgraph/cha/cha.go
+++ b/go/callgraph/cha/cha.go
@@ -20,9 +20,10 @@
// Since CHA conservatively assumes that all functions are address-taken
// and all concrete types are put into interfaces, it is sound to run on
// partial programs, such as libraries without a main or test function.
-//
package cha // import "golang.org/x/tools/go/callgraph/cha"
+// TODO(zpavlinovic): update CHA for how it handles generic function bodies.
+
import (
"go/types"
@@ -34,12 +35,59 @@ import (
// CallGraph computes the call graph of the specified program using the
// Class Hierarchy Analysis algorithm.
-//
func CallGraph(prog *ssa.Program) *callgraph.Graph {
cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
allFuncs := ssautil.AllFunctions(prog)
+ calleesOf := lazyCallees(allFuncs)
+
+ addEdge := func(fnode *callgraph.Node, site ssa.CallInstruction, g *ssa.Function) {
+ gnode := cg.CreateNode(g)
+ callgraph.AddEdge(fnode, site, gnode)
+ }
+
+ addEdges := func(fnode *callgraph.Node, site ssa.CallInstruction, callees []*ssa.Function) {
+ // Because every call to a highly polymorphic and
+ // frequently used abstract method such as
+ // (io.Writer).Write is assumed to call every concrete
+ // Write method in the program, the call graph can
+ // contain a lot of duplication.
+ //
+ // TODO(taking): opt: consider making lazyCallees public.
+ // Using the same benchmarks as callgraph_test.go, removing just
+ // the explicit callgraph.Graph construction is 4x less memory
+ // and is 37% faster.
+ // CHA 86 ms/op 16 MB/op
+ // lazyCallees 63 ms/op 4 MB/op
+ for _, g := range callees {
+ addEdge(fnode, site, g)
+ }
+ }
+
+ for f := range allFuncs {
+ fnode := cg.CreateNode(f)
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if site, ok := instr.(ssa.CallInstruction); ok {
+ if g := site.Common().StaticCallee(); g != nil {
+ addEdge(fnode, site, g)
+ } else {
+ addEdges(fnode, site, calleesOf(site))
+ }
+ }
+ }
+ }
+ }
+
+ return cg
+}
+
+// lazyCallees returns a function that maps a call site (in a function in fns)
+// to its callees within fns.
+//
+// The resulting function is not concurrency safe.
+func lazyCallees(fns map[*ssa.Function]bool) func(site ssa.CallInstruction) []*ssa.Function {
// funcsBySig contains all functions, keyed by signature. It is
// the effective set of address-taken functions used to resolve
// a dynamic call of a particular signature.
@@ -81,7 +129,7 @@ func CallGraph(prog *ssa.Program) *callgraph.Graph {
return methods
}
- for f := range allFuncs {
+ for f := range fns {
if f.Signature.Recv() == nil {
// Package initializers can never be address-taken.
if f.Name() == "init" && f.Synthetic == "package initializer" {
@@ -95,45 +143,17 @@ func CallGraph(prog *ssa.Program) *callgraph.Graph {
}
}
- addEdge := func(fnode *callgraph.Node, site ssa.CallInstruction, g *ssa.Function) {
- gnode := cg.CreateNode(g)
- callgraph.AddEdge(fnode, site, gnode)
- }
-
- addEdges := func(fnode *callgraph.Node, site ssa.CallInstruction, callees []*ssa.Function) {
- // Because every call to a highly polymorphic and
- // frequently used abstract method such as
- // (io.Writer).Write is assumed to call every concrete
- // Write method in the program, the call graph can
- // contain a lot of duplication.
- //
- // TODO(adonovan): opt: consider factoring the callgraph
- // API so that the Callers component of each edge is a
- // slice of nodes, not a singleton.
- for _, g := range callees {
- addEdge(fnode, site, g)
- }
- }
-
- for f := range allFuncs {
- fnode := cg.CreateNode(f)
- for _, b := range f.Blocks {
- for _, instr := range b.Instrs {
- if site, ok := instr.(ssa.CallInstruction); ok {
- call := site.Common()
- if call.IsInvoke() {
- tiface := call.Value.Type().Underlying().(*types.Interface)
- addEdges(fnode, site, lookupMethods(tiface, call.Method))
- } else if g := call.StaticCallee(); g != nil {
- addEdge(fnode, site, g)
- } else if _, ok := call.Value.(*ssa.Builtin); !ok {
- callees, _ := funcsBySig.At(call.Signature()).([]*ssa.Function)
- addEdges(fnode, site, callees)
- }
- }
- }
+ return func(site ssa.CallInstruction) []*ssa.Function {
+ call := site.Common()
+ if call.IsInvoke() {
+ tiface := call.Value.Type().Underlying().(*types.Interface)
+ return lookupMethods(tiface, call.Method)
+ } else if g := call.StaticCallee(); g != nil {
+ return []*ssa.Function{g}
+ } else if _, ok := call.Value.(*ssa.Builtin); !ok {
+ fns, _ := funcsBySig.At(call.Signature()).([]*ssa.Function)
+ return fns
}
+ return nil
}
-
- return cg
}
diff --git a/go/callgraph/cha/cha_test.go b/go/callgraph/cha/cha_test.go
index 3dc03143b..a12b3d0a3 100644
--- a/go/callgraph/cha/cha_test.go
+++ b/go/callgraph/cha/cha_test.go
@@ -24,7 +24,9 @@ import (
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/callgraph/cha"
"golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/internal/typeparams"
)
var inputs = []string{
@@ -47,19 +49,9 @@ func expectation(f *ast.File) (string, token.Pos) {
// TestCHA runs CHA on each file in inputs, prints the dynamic edges of
// the call graph, and compares it with the golden results embedded in
// the WANT comment at the end of the file.
-//
func TestCHA(t *testing.T) {
for _, filename := range inputs {
- content, err := ioutil.ReadFile(filename)
- if err != nil {
- t.Errorf("couldn't read file '%s': %s", filename, err)
- continue
- }
-
- conf := loader.Config{
- ParserMode: parser.ParseComments,
- }
- f, err := conf.ParseFile(filename, content)
+ prog, f, mainPkg, err := loadProgInfo(filename, ssa.InstantiateGenerics)
if err != nil {
t.Error(err)
continue
@@ -67,34 +59,77 @@ func TestCHA(t *testing.T) {
want, pos := expectation(f)
if pos == token.NoPos {
- t.Errorf("No WANT: comment in %s", filename)
- continue
- }
-
- conf.CreateFromFiles("main", f)
- iprog, err := conf.Load()
- if err != nil {
- t.Error(err)
+ t.Error(fmt.Errorf("No WANT: comment in %s", filename))
continue
}
- prog := ssautil.CreateProgram(iprog, 0)
- mainPkg := prog.Package(iprog.Created[0].Pkg)
- prog.Build()
-
cg := cha.CallGraph(prog)
- if got := printGraph(cg, mainPkg.Pkg); got != want {
+ if got := printGraph(cg, mainPkg.Pkg, "dynamic", "Dynamic calls"); got != want {
t.Errorf("%s: got:\n%s\nwant:\n%s",
prog.Fset.Position(pos), got, want)
}
}
}
-func printGraph(cg *callgraph.Graph, from *types.Package) string {
+// TestCHAGenerics is TestCHA tailored for testing generics,
+func TestCHAGenerics(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestCHAGenerics requires type parameters")
+ }
+
+ filename := "testdata/generics.go"
+ prog, f, mainPkg, err := loadProgInfo(filename, ssa.InstantiateGenerics)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want, pos := expectation(f)
+ if pos == token.NoPos {
+ t.Fatal(fmt.Errorf("No WANT: comment in %s", filename))
+ }
+
+ cg := cha.CallGraph(prog)
+
+ if got := printGraph(cg, mainPkg.Pkg, "", "All calls"); got != want {
+ t.Errorf("%s: got:\n%s\nwant:\n%s",
+ prog.Fset.Position(pos), got, want)
+ }
+}
+
+func loadProgInfo(filename string, mode ssa.BuilderMode) (*ssa.Program, *ast.File, *ssa.Package, error) {
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("couldn't read file '%s': %s", filename, err)
+ }
+
+ conf := loader.Config{
+ ParserMode: parser.ParseComments,
+ }
+ f, err := conf.ParseFile(filename, content)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ conf.CreateFromFiles("main", f)
+ iprog, err := conf.Load()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ prog := ssautil.CreateProgram(iprog, mode)
+ prog.Build()
+
+ return prog, f, prog.Package(iprog.Created[0].Pkg), nil
+}
+
+// printGraph returns a string representation of cg involving only edges
+// whose description contains edgeMatch. The string representation is
+// prefixed with a desc line.
+func printGraph(cg *callgraph.Graph, from *types.Package, edgeMatch string, desc string) string {
var edges []string
callgraph.GraphVisitEdges(cg, func(e *callgraph.Edge) error {
- if strings.Contains(e.Description(), "dynamic") {
+ if strings.Contains(e.Description(), edgeMatch) {
edges = append(edges, fmt.Sprintf("%s --> %s",
e.Caller.Func.RelString(from),
e.Callee.Func.RelString(from)))
@@ -104,7 +139,7 @@ func printGraph(cg *callgraph.Graph, from *types.Package) string {
sort.Strings(edges)
var buf bytes.Buffer
- buf.WriteString("Dynamic calls\n")
+ buf.WriteString(desc + "\n")
for _, edge := range edges {
fmt.Fprintf(&buf, " %s\n", edge)
}
diff --git a/go/callgraph/cha/testdata/generics.go b/go/callgraph/cha/testdata/generics.go
new file mode 100644
index 000000000..0323c7582
--- /dev/null
+++ b/go/callgraph/cha/testdata/generics.go
@@ -0,0 +1,49 @@
+//go:build ignore
+// +build ignore
+
+package main
+
+// Test of generic function calls.
+
+type I interface {
+ Foo()
+}
+
+type A struct{}
+
+func (a A) Foo() {}
+
+type B struct{}
+
+func (b B) Foo() {}
+
+func instantiated[X I](x X) {
+ x.Foo()
+}
+
+func Bar() {}
+
+func f(h func(), g func(I), k func(A), a A, b B) {
+ h()
+
+ k(a)
+ g(b) // g:func(I) is not matched by instantiated[B]:func(B)
+
+ instantiated[A](a) // static call
+ instantiated[B](b) // static call
+}
+
+// WANT:
+// All calls
+// (*A).Foo --> (A).Foo
+// (*B).Foo --> (B).Foo
+// f --> Bar
+// f --> instantiated[main.A]
+// f --> instantiated[main.A]
+// f --> instantiated[main.B]
+// instantiated --> (*A).Foo
+// instantiated --> (*B).Foo
+// instantiated --> (A).Foo
+// instantiated --> (B).Foo
+// instantiated[main.A] --> (A).Foo
+// instantiated[main.B] --> (B).Foo
diff --git a/go/callgraph/rta/rta.go b/go/callgraph/rta/rta.go
index e6b44606a..2e80415ff 100644
--- a/go/callgraph/rta/rta.go
+++ b/go/callgraph/rta/rta.go
@@ -39,13 +39,16 @@
// analysis, but the algorithm is much faster. For example, running the
// cmd/callgraph tool on its own source takes ~2.1s for RTA and ~5.4s
// for points-to analysis.
-//
package rta // import "golang.org/x/tools/go/callgraph/rta"
// TODO(adonovan): test it by connecting it to the interpreter and
// replacing all "unreachable" functions by a special intrinsic, and
// ensure that that intrinsic is never called.
+// TODO(zpavlinovic): decide if the clients must use ssa.InstantiateGenerics
+// mode when building programs with generics. It might be possible to
+// extend rta to accurately support generics with just ssa.BuilderMode(0).
+
import (
"fmt"
"go/types"
@@ -57,7 +60,6 @@ import (
// A Result holds the results of Rapid Type Analysis, which includes the
// set of reachable functions/methods, runtime types, and the call graph.
-//
type Result struct {
// CallGraph is the discovered callgraph.
// It does not include edges for calls made via reflection.
@@ -262,7 +264,6 @@ func (r *rta) visitFunc(f *ssa.Function) {
// If buildCallGraph is true, Result.CallGraph will contain a call
// graph; otherwise, only the other fields (reachable functions) are
// populated.
-//
func Analyze(roots []*ssa.Function, buildCallGraph bool) *Result {
if len(roots) == 0 {
return nil
@@ -341,7 +342,6 @@ func (r *rta) implementations(I *types.Interface) []types.Type {
// addRuntimeType is called for each concrete type that can be the
// dynamic type of some interface or reflect.Value.
// Adapted from needMethods in go/ssa/builder.go
-//
func (r *rta) addRuntimeType(T types.Type, skip bool) {
if prev, ok := r.result.RuntimeTypes.At(T).(bool); ok {
if skip && !prev {
diff --git a/go/callgraph/rta/rta_test.go b/go/callgraph/rta/rta_test.go
index 9ae1bdf99..67d05d612 100644
--- a/go/callgraph/rta/rta_test.go
+++ b/go/callgraph/rta/rta_test.go
@@ -16,7 +16,7 @@ import (
"go/parser"
"go/token"
"go/types"
- "io/ioutil"
+ "os"
"sort"
"strings"
"testing"
@@ -26,6 +26,7 @@ import (
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/internal/typeparams"
)
var inputs = []string{
@@ -51,19 +52,9 @@ func expectation(f *ast.File) (string, token.Pos) {
// The results string consists of two parts: the set of dynamic call
// edges, "f --> g", one per line, and the set of reachable functions,
// one per line. Each set is sorted.
-//
func TestRTA(t *testing.T) {
for _, filename := range inputs {
- content, err := ioutil.ReadFile(filename)
- if err != nil {
- t.Errorf("couldn't read file '%s': %s", filename, err)
- continue
- }
-
- conf := loader.Config{
- ParserMode: parser.ParseComments,
- }
- f, err := conf.ParseFile(filename, content)
+ prog, f, mainPkg, err := loadProgInfo(filename, ssa.BuilderMode(0))
if err != nil {
t.Error(err)
continue
@@ -75,30 +66,77 @@ func TestRTA(t *testing.T) {
continue
}
- conf.CreateFromFiles("main", f)
- iprog, err := conf.Load()
- if err != nil {
- t.Error(err)
- continue
- }
-
- prog := ssautil.CreateProgram(iprog, 0)
- mainPkg := prog.Package(iprog.Created[0].Pkg)
- prog.Build()
-
res := rta.Analyze([]*ssa.Function{
mainPkg.Func("main"),
mainPkg.Func("init"),
}, true)
- if got := printResult(res, mainPkg.Pkg); got != want {
+ if got := printResult(res, mainPkg.Pkg, "dynamic", "Dynamic calls"); got != want {
t.Errorf("%s: got:\n%s\nwant:\n%s",
prog.Fset.Position(pos), got, want)
}
}
}
-func printResult(res *rta.Result, from *types.Package) string {
+// TestRTAGenerics is TestRTA specialized for testing generics.
+func TestRTAGenerics(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestRTAGenerics requires type parameters")
+ }
+
+ filename := "testdata/generics.go"
+ prog, f, mainPkg, err := loadProgInfo(filename, ssa.InstantiateGenerics)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want, pos := expectation(f)
+ if pos == token.NoPos {
+ t.Fatalf("No WANT: comment in %s", filename)
+ }
+
+ res := rta.Analyze([]*ssa.Function{
+ mainPkg.Func("main"),
+ mainPkg.Func("init"),
+ }, true)
+
+ if got := printResult(res, mainPkg.Pkg, "", "All calls"); got != want {
+ t.Errorf("%s: got:\n%s\nwant:\n%s",
+ prog.Fset.Position(pos), got, want)
+ }
+}
+
+func loadProgInfo(filename string, mode ssa.BuilderMode) (*ssa.Program, *ast.File, *ssa.Package, error) {
+ content, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("couldn't read file '%s': %s", filename, err)
+ }
+
+ conf := loader.Config{
+ ParserMode: parser.ParseComments,
+ }
+ f, err := conf.ParseFile(filename, content)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ conf.CreateFromFiles("main", f)
+ iprog, err := conf.Load()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ prog := ssautil.CreateProgram(iprog, mode)
+ prog.Build()
+
+ return prog, f, prog.Package(iprog.Created[0].Pkg), nil
+}
+
+// printResult returns a string representation of res, i.e., call graph,
+// reachable functions, and reflect types. For call graph, only edges
+// whose description contains edgeMatch are returned and their string
+// representation is prefixed with a desc line.
+func printResult(res *rta.Result, from *types.Package, edgeMatch, desc string) string {
var buf bytes.Buffer
writeSorted := func(ss []string) {
@@ -108,10 +146,10 @@ func printResult(res *rta.Result, from *types.Package) string {
}
}
- buf.WriteString("Dynamic calls\n")
+ buf.WriteString(desc + "\n")
var edges []string
callgraph.GraphVisitEdges(res.CallGraph, func(e *callgraph.Edge) error {
- if strings.Contains(e.Description(), "dynamic") {
+ if strings.Contains(e.Description(), edgeMatch) {
edges = append(edges, fmt.Sprintf("%s --> %s",
e.Caller.Func.RelString(from),
e.Callee.Func.RelString(from)))
diff --git a/go/callgraph/rta/testdata/generics.go b/go/callgraph/rta/testdata/generics.go
new file mode 100644
index 000000000..d962fa43f
--- /dev/null
+++ b/go/callgraph/rta/testdata/generics.go
@@ -0,0 +1,79 @@
+//go:build ignore
+// +build ignore
+
+package main
+
+// Test of generic function calls.
+
+type I interface {
+ Foo()
+}
+
+type A struct{}
+
+func (a A) Foo() {}
+
+type B struct{}
+
+func (b B) Foo() {}
+
+func instantiated[X I](x X) {
+ x.Foo()
+}
+
+var a A
+var b B
+
+func main() {
+ instantiated[A](a) // static call
+ instantiated[B](b) // static call
+
+ local[C]().Foo()
+
+ lambda[A]()()()
+}
+
+func local[X I]() I {
+ var x X
+ return x
+}
+
+type C struct{}
+
+func (c C) Foo() {}
+
+func lambda[X I]() func() func() {
+ return func() func() {
+ var x X
+ return x.Foo
+ }
+}
+
+// WANT:
+// All calls
+// (*C).Foo --> (C).Foo
+// (A).Foo$bound --> (A).Foo
+// instantiated[main.A] --> (A).Foo
+// instantiated[main.B] --> (B).Foo
+// main --> (*C).Foo
+// main --> (A).Foo$bound
+// main --> (C).Foo
+// main --> instantiated[main.A]
+// main --> instantiated[main.B]
+// main --> lambda[main.A]
+// main --> lambda[main.A]$1
+// main --> local[main.C]
+// Reachable functions
+// (*C).Foo
+// (A).Foo
+// (A).Foo$bound
+// (B).Foo
+// (C).Foo
+// instantiated[main.A]
+// instantiated[main.B]
+// lambda[main.A]
+// lambda[main.A]$1
+// local[main.C]
+// Reflect types
+// *C
+// C
diff --git a/go/callgraph/static/static.go b/go/callgraph/static/static.go
index 7c41c1283..62d2364bf 100644
--- a/go/callgraph/static/static.go
+++ b/go/callgraph/static/static.go
@@ -6,6 +6,8 @@
// only static call edges.
package static // import "golang.org/x/tools/go/callgraph/static"
+// TODO(zpavlinovic): update static for how it handles generic function bodies.
+
import (
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
@@ -14,7 +16,6 @@ import (
// CallGraph computes the call graph of the specified program
// considering only static calls.
-//
func CallGraph(prog *ssa.Program) *callgraph.Graph {
cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
diff --git a/go/callgraph/static/static_test.go b/go/callgraph/static/static_test.go
index e1bfcd707..0a108d3d2 100644
--- a/go/callgraph/static/static_test.go
+++ b/go/callgraph/static/static_test.go
@@ -14,7 +14,9 @@ import (
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/callgraph/static"
"golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/internal/typeparams"
)
const input = `package P
@@ -47,42 +49,94 @@ func h()
var unknown bool
`
-func TestStatic(t *testing.T) {
- conf := loader.Config{ParserMode: parser.ParseComments}
- f, err := conf.ParseFile("P.go", input)
- if err != nil {
- t.Fatal(err)
- }
+const genericsInput = `package P
- conf.CreateFromFiles("P", f)
- iprog, err := conf.Load()
- if err != nil {
- t.Fatal(err)
- }
+type I interface {
+ F()
+}
- P := iprog.Created[0].Pkg
+type A struct{}
- prog := ssautil.CreateProgram(iprog, 0)
- prog.Build()
+func (a A) F() {}
- cg := static.CallGraph(prog)
+type B struct{}
- var edges []string
- callgraph.GraphVisitEdges(cg, func(e *callgraph.Edge) error {
- edges = append(edges, fmt.Sprintf("%s -> %s",
- e.Caller.Func.RelString(P),
- e.Callee.Func.RelString(P)))
- return nil
- })
- sort.Strings(edges)
+func (b B) F() {}
- want := []string{
- "(*C).f -> (C).f",
- "f -> (C).f",
- "f -> f$1",
- "f -> g",
- }
- if !reflect.DeepEqual(edges, want) {
- t.Errorf("Got edges %v, want %v", edges, want)
+func instantiated[X I](x X) {
+ x.F()
+}
+
+func Bar() {}
+
+func f(h func(), a A, b B) {
+ h()
+
+ instantiated[A](a)
+ instantiated[B](b)
+}
+`
+
+func TestStatic(t *testing.T) {
+ for _, e := range []struct {
+ input string
+ want []string
+ // typeparams must be true if input uses type parameters
+ typeparams bool
+ }{
+ {input, []string{
+ "(*C).f -> (C).f",
+ "f -> (C).f",
+ "f -> f$1",
+ "f -> g",
+ }, false},
+ {genericsInput, []string{
+ "(*A).F -> (A).F",
+ "(*B).F -> (B).F",
+ "f -> instantiated[P.A]",
+ "f -> instantiated[P.B]",
+ "instantiated[P.A] -> (A).F",
+ "instantiated[P.B] -> (B).F",
+ }, true},
+ } {
+ if e.typeparams && !typeparams.Enabled {
+ // Skip tests with type parameters when the build
+ // environment is not supporting any.
+ continue
+ }
+
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ f, err := conf.ParseFile("P.go", e.input)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ conf.CreateFromFiles("P", f)
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ P := iprog.Created[0].Pkg
+
+ prog := ssautil.CreateProgram(iprog, ssa.InstantiateGenerics)
+ prog.Build()
+
+ cg := static.CallGraph(prog)
+
+ var edges []string
+ callgraph.GraphVisitEdges(cg, func(e *callgraph.Edge) error {
+ edges = append(edges, fmt.Sprintf("%s -> %s",
+ e.Caller.Func.RelString(P),
+ e.Callee.Func.RelString(P)))
+ return nil
+ })
+ sort.Strings(edges)
+
+ if !reflect.DeepEqual(edges, e.want) {
+ t.Errorf("Got edges %v, want %v", edges, e.want)
+ }
}
}
diff --git a/go/callgraph/util.go b/go/callgraph/util.go
index a8f89031c..1ab039029 100644
--- a/go/callgraph/util.go
+++ b/go/callgraph/util.go
@@ -11,7 +11,6 @@ import "golang.org/x/tools/go/ssa"
// CalleesOf returns a new set containing all direct callees of the
// caller node.
-//
func CalleesOf(caller *Node) map[*Node]bool {
callees := make(map[*Node]bool)
for _, e := range caller.Out {
@@ -24,7 +23,6 @@ func CalleesOf(caller *Node) map[*Node]bool {
// The edge function is called for each edge in postorder. If it
// returns non-nil, visitation stops and GraphVisitEdges returns that
// value.
-//
func GraphVisitEdges(g *Graph, edge func(*Edge) error) error {
seen := make(map[*Node]bool)
var visit func(n *Node) error
@@ -54,7 +52,6 @@ func GraphVisitEdges(g *Graph, edge func(*Edge) error) error {
// ending at some node for which isEnd() returns true. On success,
// PathSearch returns the path as an ordered list of edges; on
// failure, it returns nil.
-//
func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge {
stack := make([]*Edge, 0, 32)
seen := make(map[*Node]bool)
@@ -82,7 +79,6 @@ func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge {
// synthetic functions (except g.Root and package initializers),
// preserving the topology. In effect, calls to synthetic wrappers
// are "inlined".
-//
func (g *Graph) DeleteSyntheticNodes() {
// Measurements on the standard library and go.tools show that
// resulting graph has ~15% fewer nodes and 4-8% fewer edges
diff --git a/go/callgraph/vta/graph.go b/go/callgraph/vta/graph.go
index ad7ef0e88..2537123f4 100644
--- a/go/callgraph/vta/graph.go
+++ b/go/callgraph/vta/graph.go
@@ -12,6 +12,7 @@ import (
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
// node interface for VTA nodes.
@@ -175,9 +176,10 @@ func (f function) String() string {
// We merge such constructs into a single node for simplicity and without
// much precision sacrifice as such variables are rare in practice. Both
// a and b would be represented as the same PtrInterface(I) node in:
-// type I interface
-// var a ***I
-// var b **I
+//
+// type I interface
+// var a ***I
+// var b **I
type nestedPtrInterface struct {
typ types.Type
}
@@ -195,8 +197,9 @@ func (l nestedPtrInterface) String() string {
// constructs into a single node for simplicity and without much precision
// sacrifice as such variables are rare in practice. Both a and b would be
// represented as the same PtrFunction(func()) node in:
-// var a *func()
-// var b **func()
+//
+// var a *func()
+// var b **func()
type nestedPtrFunction struct {
typ types.Type
}
@@ -325,14 +328,16 @@ func (b *builder) instr(instr ssa.Instruction) {
// change type command a := A(b) results in a and b being the
// same value. For concrete type A, there is no interesting flow.
//
- // Note: When A is an interface, most interface casts are handled
+ // When A is an interface, most interface casts are handled
// by the ChangeInterface instruction. The relevant case here is
// when converting a pointer to an interface type. This can happen
// when the underlying interfaces have the same method set.
- // type I interface{ foo() }
- // type J interface{ foo() }
- // var b *I
- // a := (*J)(b)
+ //
+ // type I interface{ foo() }
+ // type J interface{ foo() }
+ // var b *I
+ // a := (*J)(b)
+ //
// When this happens we add flows between a <--> b.
b.addInFlowAliasEdges(b.nodeFromVal(i), b.nodeFromVal(i.X))
case *ssa.TypeAssert:
@@ -371,6 +376,8 @@ func (b *builder) instr(instr ssa.Instruction) {
// SliceToArrayPointer: t1 = slice to array pointer *[4]T <- []T (t0)
// No interesting flow as sliceArrayElem(t1) == sliceArrayElem(t0).
return
+ case *ssa.MultiConvert:
+ b.multiconvert(i)
default:
panic(fmt.Sprintf("unsupported instruction %v\n", instr))
}
@@ -441,7 +448,9 @@ func (b *builder) send(s *ssa.Send) {
}
// selekt generates flows for select statement
-// a = select blocking/nonblocking [c_1 <- t_1, c_2 <- t_2, ..., <- o_1, <- o_2, ...]
+//
+// a = select blocking/nonblocking [c_1 <- t_1, c_2 <- t_2, ..., <- o_1, <- o_2, ...]
+//
// between receiving channel registers c_i and corresponding input register t_i. Further,
// flows are generated between o_i and a[2 + i]. Note that a is a tuple register of type
// <int, bool, r_1, r_2, ...> where the type of r_i is the element type of channel o_i.
@@ -544,8 +553,9 @@ func (b *builder) closure(c *ssa.MakeClosure) {
// panic creates a flow from arguments to panic instructions to return
// registers of all recover statements in the program. Introduces a
// global panic node Panic and
-// 1) for every panic statement p: add p -> Panic
-// 2) for every recover statement r: add Panic -> r (handled in call)
+// 1. for every panic statement p: add p -> Panic
+// 2. for every recover statement r: add Panic -> r (handled in call)
+//
// TODO(zpavlinovic): improve precision by explicitly modeling how panic
// values flow from callees to callers and into deferred recover instructions.
func (b *builder) panic(p *ssa.Panic) {
@@ -563,7 +573,9 @@ func (b *builder) panic(p *ssa.Panic) {
func (b *builder) call(c ssa.CallInstruction) {
// When c is r := recover() call register instruction, we add Recover -> r.
if bf, ok := c.Common().Value.(*ssa.Builtin); ok && bf.Name() == "recover" {
- b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(c.(*ssa.Call)))
+ if v, ok := c.(ssa.Value); ok {
+ b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(v))
+ }
return
}
@@ -581,10 +593,18 @@ func addArgumentFlows(b *builder, c ssa.CallInstruction, f *ssa.Function) {
return
}
cc := c.Common()
- // When c is an unresolved method call (cc.Method != nil), cc.Value contains
- // the receiver object rather than cc.Args[0].
if cc.Method != nil {
- b.addInFlowAliasEdges(b.nodeFromVal(f.Params[0]), b.nodeFromVal(cc.Value))
+ // In principle we don't add interprocedural flows for receiver
+ // objects. At a call site, the receiver object is interface
+ // while the callee object is concrete. The flow from interface
+ // to concrete type in general does not make sense. The exception
+ // is when the concrete type is a named function type (see #57756).
+ //
+ // The flow other way around would bake in information from the
+ // initial call graph.
+ if isFunction(f.Params[0].Type()) {
+ b.addInFlowEdge(b.nodeFromVal(cc.Value), b.nodeFromVal(f.Params[0]))
+ }
}
offset := 0
@@ -638,6 +658,71 @@ func addReturnFlows(b *builder, r *ssa.Return, site ssa.Value) {
}
}
+func (b *builder) multiconvert(c *ssa.MultiConvert) {
+ // TODO(zpavlinovic): decide what to do on MultiConvert long term.
+ // TODO(zpavlinovic): add unit tests.
+ typeSetOf := func(typ types.Type) []*typeparams.Term {
+ // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on.
+ var terms []*typeparams.Term
+ var err error
+ switch typ := typ.(type) {
+ case *typeparams.TypeParam:
+ terms, err = typeparams.StructuralTerms(typ)
+ case *typeparams.Union:
+ terms, err = typeparams.UnionTermSet(typ)
+ case *types.Interface:
+ terms, err = typeparams.InterfaceTermSet(typ)
+ default:
+ // Common case.
+ // Specializing the len=1 case to avoid a slice
+ // had no measurable space/time benefit.
+ terms = []*typeparams.Term{typeparams.NewTerm(false, typ)}
+ }
+
+ if err != nil {
+ return nil
+ }
+ return terms
+ }
+ // isValuePreserving returns true if a conversion from ut_src to
+ // ut_dst is value-preserving, i.e. just a change of type.
+ // Precondition: neither argument is a named type.
+ isValuePreserving := func(ut_src, ut_dst types.Type) bool {
+ // Identical underlying types?
+ if types.IdenticalIgnoreTags(ut_dst, ut_src) {
+ return true
+ }
+
+ switch ut_dst.(type) {
+ case *types.Chan:
+ // Conversion between channel types?
+ _, ok := ut_src.(*types.Chan)
+ return ok
+
+ case *types.Pointer:
+ // Conversion between pointers with identical base types?
+ _, ok := ut_src.(*types.Pointer)
+ return ok
+ }
+ return false
+ }
+ dst_terms := typeSetOf(c.Type())
+ src_terms := typeSetOf(c.X.Type())
+ for _, s := range src_terms {
+ us := s.Type().Underlying()
+ for _, d := range dst_terms {
+ ud := d.Type().Underlying()
+ if isValuePreserving(us, ud) {
+ // This is equivalent to a ChangeType.
+ b.addInFlowAliasEdges(b.nodeFromVal(c), b.nodeFromVal(c.X))
+ return
+ }
+ // This is equivalent to either: SliceToArrayPointer,,
+ // SliceToArrayPointer+Deref, Size 0 Array constant, or a Convert.
+ }
+ }
+}
+
// addInFlowEdge adds s -> d to g if d is node that can have an inflow, i.e., a node
// that represents an interface or an unresolved function value. Otherwise, there
// is no interesting type flow so the edge is omitted.
@@ -649,7 +734,7 @@ func (b *builder) addInFlowEdge(s, d node) {
// Creates const, pointer, global, func, and local nodes based on register instructions.
func (b *builder) nodeFromVal(val ssa.Value) node {
- if p, ok := val.Type().(*types.Pointer); ok && !isInterface(p.Elem()) && !isFunction(p.Elem()) {
+ if p, ok := val.Type().(*types.Pointer); ok && !types.IsInterface(p.Elem()) && !isFunction(p.Elem()) {
// Nested pointer to interfaces are modeled as a special
// nestedPtrInterface node.
if i := interfaceUnderPtr(p.Elem()); i != nil {
@@ -676,14 +761,15 @@ func (b *builder) nodeFromVal(val ssa.Value) node {
default:
panic(fmt.Errorf("unsupported value %v in node creation", val))
}
- return nil
}
// representative returns a unique representative for node `n`. Since
// semantically equivalent types can have different implementations,
// this method guarantees the same implementation is always used.
func (b *builder) representative(n node) node {
- if !hasInitialTypes(n) {
+ if n.Type() == nil {
+ // panicArg and recoverReturn do not have
+ // types and are unique by definition.
return n
}
t := canonicalize(n.Type(), &b.canon)
diff --git a/go/callgraph/vta/graph_test.go b/go/callgraph/vta/graph_test.go
index 8608844dd..8b8c6976f 100644
--- a/go/callgraph/vta/graph_test.go
+++ b/go/callgraph/vta/graph_test.go
@@ -13,6 +13,7 @@ import (
"testing"
"golang.org/x/tools/go/callgraph/cha"
+ "golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
)
@@ -24,7 +25,7 @@ func TestNodeInterface(t *testing.T) {
// - global variable "gl"
// - "main" function and its
// - first register instruction t0 := *gl
- prog, _, err := testProg("testdata/src/simple.go")
+ prog, _, err := testProg("testdata/src/simple.go", ssa.BuilderMode(0))
if err != nil {
t.Fatalf("couldn't load testdata/src/simple.go program: %v", err)
}
@@ -78,7 +79,7 @@ func TestNodeInterface(t *testing.T) {
func TestVtaGraph(t *testing.T) {
// Get the basic type int from a real program.
- prog, _, err := testProg("testdata/src/simple.go")
+ prog, _, err := testProg("testdata/src/simple.go", ssa.BuilderMode(0))
if err != nil {
t.Fatalf("couldn't load testdata/src/simple.go program: %v", err)
}
@@ -191,7 +192,7 @@ func TestVTAGraphConstruction(t *testing.T) {
"testdata/src/panic.go",
} {
t.Run(file, func(t *testing.T) {
- prog, want, err := testProg(file)
+ prog, want, err := testProg(file, ssa.BuilderMode(0))
if err != nil {
t.Fatalf("couldn't load test file '%s': %s", file, err)
}
diff --git a/go/callgraph/vta/helpers_test.go b/go/callgraph/vta/helpers_test.go
index 0e00aeb28..768365f5b 100644
--- a/go/callgraph/vta/helpers_test.go
+++ b/go/callgraph/vta/helpers_test.go
@@ -35,7 +35,7 @@ func want(f *ast.File) []string {
// testProg returns an ssa representation of a program at
// `path`, assumed to define package "testdata," and the
// test want result as list of strings.
-func testProg(path string) (*ssa.Program, []string, error) {
+func testProg(path string, mode ssa.BuilderMode) (*ssa.Program, []string, error) {
content, err := ioutil.ReadFile(path)
if err != nil {
return nil, nil, err
@@ -56,7 +56,7 @@ func testProg(path string) (*ssa.Program, []string, error) {
return nil, nil, err
}
- prog := ssautil.CreateProgram(iprog, 0)
+ prog := ssautil.CreateProgram(iprog, mode)
// Set debug mode to exercise DebugRef instructions.
prog.Package(iprog.Created[0].Pkg).SetDebugMode(true)
prog.Build()
@@ -87,7 +87,9 @@ func funcName(f *ssa.Function) string {
// callGraphStr stringifes `g` into a list of strings where
// each entry is of the form
-// f: cs1 -> f1, f2, ...; ...; csw -> fx, fy, ...
+//
+// f: cs1 -> f1, f2, ...; ...; csw -> fx, fy, ...
+//
// f is a function, cs1, ..., csw are call sites in f, and
// f1, f2, ..., fx, fy, ... are the resolved callees.
func callGraphStr(g *callgraph.Graph) []string {
diff --git a/go/callgraph/vta/internal/trie/bits.go b/go/callgraph/vta/internal/trie/bits.go
index f2fd0ba83..c3aa15985 100644
--- a/go/callgraph/vta/internal/trie/bits.go
+++ b/go/callgraph/vta/internal/trie/bits.go
@@ -19,11 +19,11 @@ type key uint64
// bitpos is the position of a bit. A position is represented by having a 1
// bit in that position.
// Examples:
-// * 0b0010 is the position of the `1` bit in 2.
-// It is the 3rd most specific bit position in big endian encoding
-// (0b0 and 0b1 are more specific).
-// * 0b0100 is the position of the bit that 1 and 5 disagree on.
-// * 0b0 is a special value indicating that all bit agree.
+// - 0b0010 is the position of the `1` bit in 2.
+// It is the 3rd most specific bit position in big endian encoding
+// (0b0 and 0b1 are more specific).
+// - 0b0100 is the position of the bit that 1 and 5 disagree on.
+// - 0b0 is a special value indicating that all bit agree.
type bitpos uint64
// prefixes represent a set of keys that all agree with the
@@ -35,7 +35,8 @@ type bitpos uint64
// A prefix always mask(p, m) == p.
//
// A key is its own prefix for the bit position 64,
-// e.g. seeing a `prefix(key)` is not a problem.
+// e.g. seeing a `prefix(key)` is not a problem.
+//
// Prefixes should never be turned into keys.
type prefix uint64
@@ -64,8 +65,9 @@ func matchPrefix(k prefix, p prefix, b bitpos) bool {
// In big endian encoding, this value is the [64-(m-1)] most significant bits of k
// followed by a `0` bit at bitpos m, followed m-1 `1` bits.
// Examples:
-// prefix(0b1011) for a bitpos 0b0100 represents the keys:
-// 0b1000, 0b1001, 0b1010, 0b1011, 0b1100, 0b1101, 0b1110, 0b1111
+//
+// prefix(0b1011) for a bitpos 0b0100 represents the keys:
+// 0b1000, 0b1001, 0b1010, 0b1011, 0b1100, 0b1101, 0b1110, 0b1111
//
// This mask function has the property that if matchPrefix(k, p, b), then
// k <= p if and only if zeroBit(k, m). This induces binary search tree tries.
@@ -85,9 +87,10 @@ func ord(m, n bitpos) bool {
// can hold that can also be held by a prefix `q` for some bitpos `n`.
//
// This is equivalent to:
-// m ==n && p == q,
-// higher(m, n) && matchPrefix(q, p, m), or
-// higher(n, m) && matchPrefix(p, q, n)
+//
+// m ==n && p == q,
+// higher(m, n) && matchPrefix(q, p, m), or
+// higher(n, m) && matchPrefix(p, q, n)
func prefixesOverlap(p prefix, m bitpos, q prefix, n bitpos) bool {
fbb := n
if ord(m, n) {
diff --git a/go/callgraph/vta/internal/trie/builder.go b/go/callgraph/vta/internal/trie/builder.go
index 25d3805bc..11ff59b1b 100644
--- a/go/callgraph/vta/internal/trie/builder.go
+++ b/go/callgraph/vta/internal/trie/builder.go
@@ -9,7 +9,9 @@ package trie
// will be stored for the key.
//
// Collision functions must be idempotent:
-// collision(x, x) == x for all x.
+//
+// collision(x, x) == x for all x.
+//
// Collisions functions may be applied whenever a value is inserted
// or two maps are merged, or intersected.
type Collision func(lhs interface{}, rhs interface{}) interface{}
@@ -72,7 +74,8 @@ func (b *Builder) Empty() Map { return Map{b.Scope(), b.empty} }
// in the current scope and handle collisions using the collision function c.
//
// This is roughly corresponds to updating a map[uint64]interface{} by:
-// if _, ok := m[k]; ok { m[k] = c(m[k], v} else { m[k] = v}
+//
+// if _, ok := m[k]; ok { m[k] = c(m[k], v} else { m[k] = v}
//
// An insertion or update happened whenever Insert(m, ...) != m .
func (b *Builder) InsertWith(c Collision, m Map, k uint64, v interface{}) Map {
@@ -85,7 +88,8 @@ func (b *Builder) InsertWith(c Collision, m Map, k uint64, v interface{}) Map {
//
// If there was a previous value mapped by key, keep the previously mapped value.
// This is roughly corresponds to updating a map[uint64]interface{} by:
-// if _, ok := m[k]; ok { m[k] = val }
+//
+// if _, ok := m[k]; ok { m[k] = val }
//
// This is equivalent to b.Merge(m, b.Create({k: v})).
func (b *Builder) Insert(m Map, k uint64, v interface{}) Map {
@@ -94,7 +98,8 @@ func (b *Builder) Insert(m Map, k uint64, v interface{}) Map {
// Updates a (key, value) in the map. This is roughly corresponds to
// updating a map[uint64]interface{} by:
-// m[key] = val
+//
+// m[key] = val
func (b *Builder) Update(m Map, key uint64, val interface{}) Map {
return b.InsertWith(TakeRhs, m, key, val)
}
@@ -148,14 +153,17 @@ func (b *Builder) Remove(m Map, k uint64) Map {
// Intersect Maps lhs and rhs and returns a map with all of the keys in
// both lhs and rhs and the value comes from lhs, i.e.
-// {(k, lhs[k]) | k in lhs, k in rhs}.
+//
+// {(k, lhs[k]) | k in lhs, k in rhs}.
func (b *Builder) Intersect(lhs, rhs Map) Map {
return b.IntersectWith(TakeLhs, lhs, rhs)
}
// IntersectWith take lhs and rhs and returns the intersection
// with the value coming from the collision function, i.e.
-// {(k, c(lhs[k], rhs[k]) ) | k in lhs, k in rhs}.
+//
+// {(k, c(lhs[k], rhs[k]) ) | k in lhs, k in rhs}.
+//
// The elements of the resulting map are always { <k, c(lhs[k], rhs[k]) > }
// for each key k that a key in both lhs and rhs.
func (b *Builder) IntersectWith(c Collision, lhs, rhs Map) Map {
@@ -261,7 +269,9 @@ func (b *Builder) mkLeaf(k key, v interface{}) *leaf {
}
// mkBranch returns the hash-consed representative of the tuple
-// (prefix, branch, left, right)
+//
+// (prefix, branch, left, right)
+//
// in the current scope.
func (b *Builder) mkBranch(p prefix, bp bitpos, left node, right node) *branch {
br := &branch{
diff --git a/go/callgraph/vta/internal/trie/trie.go b/go/callgraph/vta/internal/trie/trie.go
index 160eb21be..511fde515 100644
--- a/go/callgraph/vta/internal/trie/trie.go
+++ b/go/callgraph/vta/internal/trie/trie.go
@@ -10,8 +10,10 @@
// environment abstract domains in program analysis).
//
// This implementation closely follows the paper:
-// C. Okasaki and A. Gill, “Fast mergeable integer maps,” in ACM SIGPLAN
-// Workshop on ML, September 1998, pp. 77–86.
+//
+// C. Okasaki and A. Gill, “Fast mergeable integer maps,” in ACM SIGPLAN
+// Workshop on ML, September 1998, pp. 77–86.
+//
// Each Map is immutable and can be read from concurrently. The map does not
// guarantee that the value pointed to by the interface{} value is not updated
// concurrently.
@@ -36,9 +38,9 @@ import (
// Maps are immutable and can be read from concurrently.
//
// Notes on concurrency:
-// - A Map value itself is an interface and assignments to a Map value can race.
-// - Map does not guarantee that the value pointed to by the interface{} value
-// is not updated concurrently.
+// - A Map value itself is an interface and assignments to a Map value can race.
+// - Map does not guarantee that the value pointed to by the interface{} value
+// is not updated concurrently.
type Map struct {
s Scope
n node
diff --git a/go/callgraph/vta/propagation.go b/go/callgraph/vta/propagation.go
index 5934ebc21..5817e8938 100644
--- a/go/callgraph/vta/propagation.go
+++ b/go/callgraph/vta/propagation.go
@@ -20,53 +20,52 @@ import (
// with ids X and Y s.t. X < Y, Y comes before X in the topological order.
func scc(g vtaGraph) (map[node]int, int) {
// standard data structures used by Tarjan's algorithm.
- var index uint64
+ type state struct {
+ index int
+ lowLink int
+ onStack bool
+ }
+ states := make(map[node]*state, len(g))
var stack []node
- indexMap := make(map[node]uint64)
- lowLink := make(map[node]uint64)
- onStack := make(map[node]bool)
- nodeToSccID := make(map[node]int)
+ nodeToSccID := make(map[node]int, len(g))
sccID := 0
var doSCC func(node)
doSCC = func(n node) {
- indexMap[n] = index
- lowLink[n] = index
- index = index + 1
- onStack[n] = true
+ index := len(states)
+ ns := &state{index: index, lowLink: index, onStack: true}
+ states[n] = ns
stack = append(stack, n)
for s := range g[n] {
- if _, ok := indexMap[s]; !ok {
+ if ss, visited := states[s]; !visited {
// Analyze successor s that has not been visited yet.
doSCC(s)
- lowLink[n] = min(lowLink[n], lowLink[s])
- } else if onStack[s] {
+ ss = states[s]
+ ns.lowLink = min(ns.lowLink, ss.lowLink)
+ } else if ss.onStack {
// The successor is on the stack, meaning it has to be
// in the current SCC.
- lowLink[n] = min(lowLink[n], indexMap[s])
+ ns.lowLink = min(ns.lowLink, ss.index)
}
}
// if n is a root node, pop the stack and generate a new SCC.
- if lowLink[n] == indexMap[n] {
- for {
- w := stack[len(stack)-1]
+ if ns.lowLink == index {
+ var w node
+ for w != n {
+ w = stack[len(stack)-1]
stack = stack[:len(stack)-1]
- onStack[w] = false
+ states[w].onStack = false
nodeToSccID[w] = sccID
- if w == n {
- break
- }
}
sccID++
}
}
- index = 0
for n := range g {
- if _, ok := indexMap[n]; !ok {
+ if _, visited := states[n]; !visited {
doSCC(n)
}
}
@@ -74,7 +73,7 @@ func scc(g vtaGraph) (map[node]int, int) {
return nodeToSccID, sccID
}
-func min(x, y uint64) uint64 {
+func min(x, y int) int {
if x < y {
return x
}
@@ -175,6 +174,18 @@ func nodeTypes(nodes []node, builder *trie.Builder, propTypeId func(p propType)
return &typeSet
}
+// hasInitialTypes check if a node can have initial types.
+// Returns true iff `n` is not a panic, recover, nestedPtr*
+// node, nor a node whose type is an interface.
+func hasInitialTypes(n node) bool {
+ switch n.(type) {
+ case panicArg, recoverReturn, nestedPtrFunction, nestedPtrInterface:
+ return false
+ default:
+ return !types.IsInterface(n.Type())
+ }
+}
+
// getPropType creates a propType for `node` based on its type.
// propType.typ is always node.Type(). If node is function, then
// propType.val is the underlying function; nil otherwise.
diff --git a/go/callgraph/vta/propagation_test.go b/go/callgraph/vta/propagation_test.go
index 96707417f..f4a754f96 100644
--- a/go/callgraph/vta/propagation_test.go
+++ b/go/callgraph/vta/propagation_test.go
@@ -58,7 +58,7 @@ func newLocal(name string, t types.Type) local {
// newNamedType creates a bogus type named `name`.
func newNamedType(name string) *types.Named {
- return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), nil, nil)
+ return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), types.Universe.Lookup("int").Type(), nil)
}
// sccString is a utility for stringifying `nodeToScc`. Every
@@ -123,7 +123,8 @@ func sccEqual(sccs1 []string, sccs2 []string) bool {
// isRevTopSorted checks if sccs of `g` are sorted in reverse
// topological order:
-// for every edge x -> y in g, nodeToScc[x] > nodeToScc[y]
+//
+// for every edge x -> y in g, nodeToScc[x] > nodeToScc[y]
func isRevTopSorted(g vtaGraph, nodeToScc map[node]int) bool {
for n, succs := range g {
for s := range succs {
@@ -148,39 +149,39 @@ func setName(f *ssa.Function, name string) {
// parentheses contain node types and F nodes stand for function
// nodes whose content is function named F:
//
-// no-cycles:
-// t0 (A) -> t1 (B) -> t2 (C)
+// no-cycles:
+// t0 (A) -> t1 (B) -> t2 (C)
//
-// trivial-cycle:
-// <-------- <--------
-// | | | |
-// t0 (A) -> t1 (B) ->
+// trivial-cycle:
+// <-------- <--------
+// | | | |
+// t0 (A) -> t1 (B) ->
//
-// circle-cycle:
-// t0 (A) -> t1 (A) -> t2 (B)
-// | |
-// <--------------------
+// circle-cycle:
+// t0 (A) -> t1 (A) -> t2 (B)
+// | |
+// <--------------------
//
-// fully-connected:
-// t0 (A) <-> t1 (B)
-// \ /
-// t2(C)
+// fully-connected:
+// t0 (A) <-> t1 (B)
+// \ /
+// t2(C)
//
-// subsumed-scc:
-// t0 (A) -> t1 (B) -> t2(B) -> t3 (A)
-// | | | |
-// | <--------- |
-// <-----------------------------
+// subsumed-scc:
+// t0 (A) -> t1 (B) -> t2(B) -> t3 (A)
+// | | | |
+// | <--------- |
+// <-----------------------------
//
-// more-realistic:
-// <--------
-// | |
-// t0 (A) -->
-// ---------->
-// | |
-// t1 (A) -> t2 (B) -> F1 -> F2 -> F3 -> F4
-// | | | |
-// <------- <------------
+// more-realistic:
+// <--------
+// | |
+// t0 (A) -->
+// ---------->
+// | |
+// t1 (A) -> t2 (B) -> F1 -> F2 -> F3 -> F4
+// | | | |
+// <------- <------------
func testSuite() map[string]vtaGraph {
a := newNamedType("A")
b := newNamedType("B")
diff --git a/go/callgraph/vta/testdata/src/callgraph_generics.go b/go/callgraph/vta/testdata/src/callgraph_generics.go
new file mode 100644
index 000000000..da3dca52a
--- /dev/null
+++ b/go/callgraph/vta/testdata/src/callgraph_generics.go
@@ -0,0 +1,71 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+func instantiated[X any](x *X) int {
+ print(x)
+ return 0
+}
+
+type I interface {
+ Bar()
+}
+
+func interfaceInstantiated[X I](x X) {
+ x.Bar()
+}
+
+type A struct{}
+
+func (a A) Bar() {}
+
+type B struct{}
+
+func (b B) Bar() {}
+
+func Foo(a A, b B) {
+ x := true
+ instantiated[bool](&x)
+ y := 1
+ instantiated[int](&y)
+
+ interfaceInstantiated[A](a)
+ interfaceInstantiated[B](b)
+}
+
+// Relevant SSA:
+//func Foo(a A, b B):
+// t0 = local A (a)
+// *t0 = a
+// t1 = local B (b)
+// *t1 = b
+// t2 = new bool (x)
+// *t2 = true:bool
+// t3 = instantiated[bool](t2)
+// t4 = new int (y)
+// *t4 = 1:int
+// t5 = instantiated[int](t4)
+// t6 = *t0
+// t7 = interfaceInstantiated[testdata.A](t6)
+// t8 = *t1
+// t9 = interfaceInstantiated[testdata.B](t8)
+// return
+//
+//func interfaceInstantiated[testdata.B](x B):
+// t0 = local B (x)
+// *t0 = x
+// t1 = *t0
+// t2 = (B).Bar(t1)
+// return
+//
+//func interfaceInstantiated[X I](x X):
+// (external)
+
+// WANT:
+// Foo: instantiated[bool](t2) -> instantiated[bool]; instantiated[int](t4) -> instantiated[int]; interfaceInstantiated[testdata.A](t6) -> interfaceInstantiated[testdata.A]; interfaceInstantiated[testdata.B](t8) -> interfaceInstantiated[testdata.B]
+// interfaceInstantiated[testdata.B]: (B).Bar(t1) -> B.Bar
+// interfaceInstantiated[testdata.A]: (A).Bar(t1) -> A.Bar
diff --git a/go/callgraph/vta/testdata/src/callgraph_issue_57756.go b/go/callgraph/vta/testdata/src/callgraph_issue_57756.go
new file mode 100644
index 000000000..e18f16eba
--- /dev/null
+++ b/go/callgraph/vta/testdata/src/callgraph_issue_57756.go
@@ -0,0 +1,67 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+// Test that the values of a named function type are correctly
+// flowing from interface objects i in i.Foo() to the receiver
+// parameters of callees of i.Foo().
+
+type H func()
+
+func (h H) Do() {
+ h()
+}
+
+type I interface {
+ Do()
+}
+
+func Bar() I {
+ return H(func() {})
+}
+
+func For(g G) {
+ b := Bar()
+ b.Do()
+
+ g[0] = b
+ g.Goo()
+}
+
+type G []I
+
+func (g G) Goo() {
+ g[0].Do()
+}
+
+// Relevant SSA:
+// func Bar$1():
+// return
+//
+// func Bar() I:
+// t0 = changetype H <- func() (Bar$1)
+// t1 = make I <- H (t0)
+//
+// func For():
+// t0 = Bar()
+// t1 = invoke t0.Do()
+// t2 = &g[0:int]
+// *t2 = t0
+// t3 = (G).Goo(g)
+//
+// func (h H) Do():
+// t0 = h()
+//
+// func (g G) Goo():
+// t0 = &g[0:int]
+// t1 = *t0
+// t2 = invoke t1.Do()
+
+// WANT:
+// For: (G).Goo(g) -> G.Goo; Bar() -> Bar; invoke t0.Do() -> H.Do
+// H.Do: h() -> Bar$1
+// G.Goo: invoke t1.Do() -> H.Do
diff --git a/go/callgraph/vta/testdata/src/callgraph_recursive_types.go b/go/callgraph/vta/testdata/src/callgraph_recursive_types.go
new file mode 100644
index 000000000..6c3fef6f7
--- /dev/null
+++ b/go/callgraph/vta/testdata/src/callgraph_recursive_types.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+ Foo() I
+}
+
+type A struct {
+ i int
+ a *A
+}
+
+func (a *A) Foo() I {
+ return a
+}
+
+type B **B
+
+type C *D
+type D *C
+
+func Bar(a *A, b *B, c *C, d *D) {
+ Baz(a)
+ Baz(a.a)
+
+ sink(*b)
+ sink(*c)
+ sink(*d)
+}
+
+func Baz(i I) {
+ i.Foo()
+}
+
+func sink(i interface{}) {
+ print(i)
+}
+
+// Relevant SSA:
+// func Baz(i I):
+// t0 = invoke i.Foo()
+// return
+//
+// func Bar(a *A, b *B):
+// t0 = make I <- *A (a)
+// t1 = Baz(t0)
+// ...
+
+// WANT:
+// Bar: Baz(t0) -> Baz; Baz(t4) -> Baz; sink(t10) -> sink; sink(t13) -> sink; sink(t7) -> sink
+// Baz: invoke i.Foo() -> A.Foo
diff --git a/go/callgraph/vta/testdata/src/function_alias.go b/go/callgraph/vta/testdata/src/function_alias.go
index b38e0e00d..0a8dffe79 100644
--- a/go/callgraph/vta/testdata/src/function_alias.go
+++ b/go/callgraph/vta/testdata/src/function_alias.go
@@ -33,42 +33,42 @@ func Baz(f func()) {
// t2 = *t1
// *t2 = Baz$1
// t3 = local A (a)
-// t4 = &t3.foo [#0]
-// t5 = *t1
-// t6 = *t5
-// *t4 = t6
+// t4 = *t1
+// t5 = *t4
+// t6 = &t3.foo [#0]
+// *t6 = t5
// t7 = &t3.foo [#0]
// t8 = *t7
// t9 = t8()
-// t10 = &t3.do [#1] *Doer
-// t11 = &t3.foo [#0] *func()
-// t12 = *t11 func()
-// t13 = changetype Doer <- func() (t12) Doer
-// *t10 = t13
+// t10 = &t3.foo [#0] *func()
+// t11 = *t10 func()
+// t12 = &t3.do [#1] *Doer
+// t13 = changetype Doer <- func() (t11) Doer
+// *t12 = t13
// t14 = &t3.do [#1] *Doer
// t15 = *t14 Doer
// t16 = t15() ()
// Flow chain showing that Baz$1 reaches t8():
-// Baz$1 -> t2 <-> PtrFunction(func()) <-> t5 -> t6 -> t4 <-> Field(testdata.A:foo) <-> t7 -> t8
+// Baz$1 -> t2 <-> PtrFunction(func()) <-> t4 -> t5 -> t6 <-> Field(testdata.A:foo) <-> t7 -> t8
// Flow chain showing that Baz$1 reaches t15():
-// Field(testdata.A:foo) <-> t11 -> t12 -> t13 -> t10 <-> Field(testdata.A:do) <-> t14 -> t15
+// Field(testdata.A:foo) <-> t10 -> t11 -> t13 -> t12 <-> Field(testdata.A:do) <-> t14 -> t15
// WANT:
// Local(f) -> Local(t0)
// Local(t0) -> PtrFunction(func())
// Function(Baz$1) -> Local(t2)
-// PtrFunction(func()) -> Local(t0), Local(t2), Local(t5)
+// PtrFunction(func()) -> Local(t0), Local(t2), Local(t4)
// Local(t2) -> PtrFunction(func())
-// Local(t4) -> Field(testdata.A:foo)
-// Local(t5) -> Local(t6), PtrFunction(func())
-// Local(t6) -> Local(t4)
+// Local(t6) -> Field(testdata.A:foo)
+// Local(t4) -> Local(t5), PtrFunction(func())
+// Local(t5) -> Local(t6)
// Local(t7) -> Field(testdata.A:foo), Local(t8)
-// Field(testdata.A:foo) -> Local(t11), Local(t4), Local(t7)
-// Local(t4) -> Field(testdata.A:foo)
-// Field(testdata.A:do) -> Local(t10), Local(t14)
-// Local(t10) -> Field(testdata.A:do)
-// Local(t11) -> Field(testdata.A:foo), Local(t12)
-// Local(t12) -> Local(t13)
-// Local(t13) -> Local(t10)
+// Field(testdata.A:foo) -> Local(t10), Local(t6), Local(t7)
+// Local(t6) -> Field(testdata.A:foo)
+// Field(testdata.A:do) -> Local(t12), Local(t14)
+// Local(t12) -> Field(testdata.A:do)
+// Local(t10) -> Field(testdata.A:foo), Local(t11)
+// Local(t11) -> Local(t13)
+// Local(t13) -> Local(t12)
// Local(t14) -> Field(testdata.A:do), Local(t15)
diff --git a/go/callgraph/vta/testdata/src/panic.go b/go/callgraph/vta/testdata/src/panic.go
index 2d39c70ea..5ef354857 100644
--- a/go/callgraph/vta/testdata/src/panic.go
+++ b/go/callgraph/vta/testdata/src/panic.go
@@ -27,12 +27,12 @@ func recover2() {
func Baz(a A) {
defer recover1()
+ defer recover()
panic(a)
}
// Relevant SSA:
// func recover1():
-// 0:
// t0 = print("only this recover...":string)
// t1 = recover()
// t2 = typeassert,ok t1.(I)
@@ -53,6 +53,7 @@ func Baz(a A) {
// t0 = local A (a)
// *t0 = a
// defer recover1()
+// defer recover()
// t1 = *t0
// t2 = make interface{} <- A (t1)
// panic t2
diff --git a/go/callgraph/vta/utils.go b/go/callgraph/vta/utils.go
index e7a97e2d8..d1831983a 100644
--- a/go/callgraph/vta/utils.go
+++ b/go/callgraph/vta/utils.go
@@ -9,6 +9,7 @@ import (
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
)
func canAlias(n1, n2 node) bool {
@@ -32,13 +33,13 @@ func isReferenceNode(n node) bool {
// hasInFlow checks if a concrete type can flow to node `n`.
// Returns yes iff the type of `n` satisfies one the following:
-// 1) is an interface
-// 2) is a (nested) pointer to interface (needed for, say,
+// 1. is an interface
+// 2. is a (nested) pointer to interface (needed for, say,
// slice elements of nested pointers to interface type)
-// 3) is a function type (needed for higher-order type flow)
-// 4) is a (nested) pointer to function (needed for, say,
+// 3. is a function type (needed for higher-order type flow)
+// 4. is a (nested) pointer to function (needed for, say,
// slice elements of nested pointers to function type)
-// 5) is a global Recover or Panic node
+// 5. is a global Recover or Panic node
func hasInFlow(n node) bool {
if _, ok := n.(panicArg); ok {
return true
@@ -56,24 +57,7 @@ func hasInFlow(n node) bool {
return true
}
- return isInterface(t) || isFunction(t)
-}
-
-// hasInitialTypes check if a node can have initial types.
-// Returns true iff `n` is not a panic or recover node as
-// those are artificial.
-func hasInitialTypes(n node) bool {
- switch n.(type) {
- case panicArg, recoverReturn:
- return false
- default:
- return true
- }
-}
-
-func isInterface(t types.Type) bool {
- _, ok := t.Underlying().(*types.Interface)
- return ok
+ return types.IsInterface(t) || isFunction(t)
}
func isFunction(t types.Type) bool {
@@ -85,48 +69,76 @@ func isFunction(t types.Type) bool {
// pointer to interface and if yes, returns the interface type.
// Otherwise, returns nil.
func interfaceUnderPtr(t types.Type) types.Type {
- p, ok := t.Underlying().(*types.Pointer)
- if !ok {
- return nil
- }
+ seen := make(map[types.Type]bool)
+ var visit func(types.Type) types.Type
+ visit = func(t types.Type) types.Type {
+ if seen[t] {
+ return nil
+ }
+ seen[t] = true
- if isInterface(p.Elem()) {
- return p.Elem()
- }
+ p, ok := t.Underlying().(*types.Pointer)
+ if !ok {
+ return nil
+ }
+
+ if types.IsInterface(p.Elem()) {
+ return p.Elem()
+ }
- return interfaceUnderPtr(p.Elem())
+ return visit(p.Elem())
+ }
+ return visit(t)
}
// functionUnderPtr checks if type `t` is a potentially nested
// pointer to function type and if yes, returns the function type.
// Otherwise, returns nil.
func functionUnderPtr(t types.Type) types.Type {
- p, ok := t.Underlying().(*types.Pointer)
- if !ok {
- return nil
- }
+ seen := make(map[types.Type]bool)
+ var visit func(types.Type) types.Type
+ visit = func(t types.Type) types.Type {
+ if seen[t] {
+ return nil
+ }
+ seen[t] = true
- if isFunction(p.Elem()) {
- return p.Elem()
- }
+ p, ok := t.Underlying().(*types.Pointer)
+ if !ok {
+ return nil
+ }
+
+ if isFunction(p.Elem()) {
+ return p.Elem()
+ }
- return functionUnderPtr(p.Elem())
+ return visit(p.Elem())
+ }
+ return visit(t)
}
// sliceArrayElem returns the element type of type `t` that is
-// expected to be a (pointer to) array or slice, consistent with
+// expected to be a (pointer to) array, slice or string, consistent with
// the ssa.Index and ssa.IndexAddr instructions. Panics otherwise.
func sliceArrayElem(t types.Type) types.Type {
- u := t.Underlying()
-
- if p, ok := u.(*types.Pointer); ok {
- u = p.Elem().Underlying()
- }
-
- if a, ok := u.(*types.Array); ok {
- return a.Elem()
+ switch u := t.Underlying().(type) {
+ case *types.Pointer:
+ return u.Elem().Underlying().(*types.Array).Elem()
+ case *types.Array:
+ return u.Elem()
+ case *types.Slice:
+ return u.Elem()
+ case *types.Basic:
+ return types.Typ[types.Byte]
+ case *types.Interface: // type param.
+ terms, err := typeparams.InterfaceTermSet(u)
+ if err != nil || len(terms) == 0 {
+ panic(t)
+ }
+ return sliceArrayElem(terms[0].Type()) // Element types must match.
+ default:
+ panic(t)
}
- return u.(*types.Slice).Elem()
}
// siteCallees computes a set of callees for call site `c` given program `callgraph`.
diff --git a/go/callgraph/vta/vta.go b/go/callgraph/vta/vta.go
index 98fabe58c..583936003 100644
--- a/go/callgraph/vta/vta.go
+++ b/go/callgraph/vta/vta.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package vta computes the call graph of a Go program using the Variable
-// Type Analysis (VTA) algorithm originally described in ``Practical Virtual
+// Type Analysis (VTA) algorithm originally described in “Practical Virtual
// Method Call Resolution for Java," Vijay Sundaresan, Laurie Hendren,
// Chrislain Razafimahefa, Raja Vallée-Rai, Patrick Lam, Etienne Gagnon, and
// Charles Godin.
@@ -18,22 +18,23 @@
//
// A type propagation is a directed, labeled graph. A node can represent
// one of the following:
-// - A field of a struct type.
-// - A local (SSA) variable of a method/function.
-// - All pointers to a non-interface type.
-// - The return value of a method.
-// - All elements in an array.
-// - All elements in a slice.
-// - All elements in a map.
-// - All elements in a channel.
-// - A global variable.
+// - A field of a struct type.
+// - A local (SSA) variable of a method/function.
+// - All pointers to a non-interface type.
+// - The return value of a method.
+// - All elements in an array.
+// - All elements in a slice.
+// - All elements in a map.
+// - All elements in a channel.
+// - A global variable.
+//
// In addition, the implementation used in this package introduces
// a few Go specific kinds of nodes:
-// - (De)references of nested pointers to interfaces are modeled
-// as a unique nestedPtrInterface node in the type propagation graph.
-// - Each function literal is represented as a function node whose
-// internal value is the (SSA) representation of the function. This
-// is done to precisely infer flow of higher-order functions.
+// - (De)references of nested pointers to interfaces are modeled
+// as a unique nestedPtrInterface node in the type propagation graph.
+// - Each function literal is represented as a function node whose
+// internal value is the (SSA) representation of the function. This
+// is done to precisely infer flow of higher-order functions.
//
// Edges in the graph represent flow of types (and function literals) through
// the program. That is, the model 1) typing constraints that are induced by
@@ -53,6 +54,8 @@
// reaching the node representing the call site to create a set of callees.
package vta
+// TODO(zpavlinovic): update VTA for how it handles generic function bodies and instantiation wrappers.
+
import (
"go/types"
diff --git a/go/callgraph/vta/vta_go117_test.go b/go/callgraph/vta/vta_go117_test.go
index 9ce6a8864..04f6980e5 100644
--- a/go/callgraph/vta/vta_go117_test.go
+++ b/go/callgraph/vta/vta_go117_test.go
@@ -11,12 +11,13 @@ import (
"testing"
"golang.org/x/tools/go/callgraph/cha"
+ "golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
)
func TestVTACallGraphGo117(t *testing.T) {
file := "testdata/src/go117.go"
- prog, want, err := testProg(file)
+ prog, want, err := testProg(file, ssa.BuilderMode(0))
if err != nil {
t.Fatalf("couldn't load test file '%s': %s", file, err)
}
diff --git a/go/callgraph/vta/vta_test.go b/go/callgraph/vta/vta_test.go
index 33ceaf909..549c4af45 100644
--- a/go/callgraph/vta/vta_test.go
+++ b/go/callgraph/vta/vta_test.go
@@ -13,6 +13,7 @@ import (
"golang.org/x/tools/go/callgraph/cha"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/internal/typeparams"
)
func TestVTACallGraph(t *testing.T) {
@@ -24,9 +25,11 @@ func TestVTACallGraph(t *testing.T) {
"testdata/src/callgraph_collections.go",
"testdata/src/callgraph_fields.go",
"testdata/src/callgraph_field_funcs.go",
+ "testdata/src/callgraph_recursive_types.go",
+ "testdata/src/callgraph_issue_57756.go",
} {
t.Run(file, func(t *testing.T) {
- prog, want, err := testProg(file)
+ prog, want, err := testProg(file, ssa.BuilderMode(0))
if err != nil {
t.Fatalf("couldn't load test file '%s': %s", file, err)
}
@@ -46,7 +49,7 @@ func TestVTACallGraph(t *testing.T) {
// enabled by having an arbitrary function set as input to CallGraph
// instead of the whole program (i.e., ssautil.AllFunctions(prog)).
func TestVTAProgVsFuncSet(t *testing.T) {
- prog, want, err := testProg("testdata/src/callgraph_nested_ptr.go")
+ prog, want, err := testProg("testdata/src/callgraph_nested_ptr.go", ssa.BuilderMode(0))
if err != nil {
t.Fatalf("couldn't load test `testdata/src/callgraph_nested_ptr.go`: %s", err)
}
@@ -111,3 +114,24 @@ func TestVTAPanicMissingDefinitions(t *testing.T) {
}
}
}
+
+func TestVTACallGraphGenerics(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestVTACallGraphGenerics requires type parameters")
+ }
+
+ // TODO(zpavlinovic): add more tests
+ file := "testdata/src/callgraph_generics.go"
+ prog, want, err := testProg(file, ssa.InstantiateGenerics)
+ if err != nil {
+ t.Fatalf("couldn't load test file '%s': %s", file, err)
+ }
+ if len(want) == 0 {
+ t.Fatalf("couldn't find want in `%s`", file)
+ }
+
+ g := CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
+ if got := callGraphStr(g); !subGraph(want, got) {
+ t.Errorf("computed callgraph %v should contain %v", got, want)
+ }
+}
diff --git a/go/cfg/builder.go b/go/cfg/builder.go
index 7f95a2961..dad6a444d 100644
--- a/go/cfg/builder.go
+++ b/go/cfg/builder.go
@@ -443,7 +443,6 @@ func (b *builder) rangeStmt(s *ast.RangeStmt, label *lblock) {
// Destinations associated with unlabeled for/switch/select stmts.
// We push/pop one of these as we enter/leave each construct and for
// each BranchStmt we scan for the innermost target of the right type.
-//
type targets struct {
tail *targets // rest of stack
_break *Block
@@ -454,7 +453,6 @@ type targets struct {
// Destinations associated with a labeled block.
// We populate these as labels are encountered in forward gotos or
// labeled statements.
-//
type lblock struct {
_goto *Block
_break *Block
@@ -463,7 +461,6 @@ type lblock struct {
// labeledBlock returns the branch target associated with the
// specified label, creating it if needed.
-//
func (b *builder) labeledBlock(label *ast.Ident) *lblock {
lb := b.lblocks[label.Obj]
if lb == nil {
diff --git a/go/cfg/cfg.go b/go/cfg/cfg.go
index 3ebc65f60..37d799f4b 100644
--- a/go/cfg/cfg.go
+++ b/go/cfg/cfg.go
@@ -20,14 +20,14 @@
//
// produces this CFG:
//
-// 1: x := f()
-// x != nil
-// succs: 2, 3
-// 2: T()
-// succs: 4
-// 3: F()
-// succs: 4
-// 4:
+// 1: x := f()
+// x != nil
+// succs: 2, 3
+// 2: T()
+// succs: 4
+// 3: F()
+// succs: 4
+// 4:
//
// The CFG does contain Return statements; even implicit returns are
// materialized (at the position of the function's closing brace).
@@ -36,7 +36,6 @@
// edges, nor the short-circuit semantics of the && and || operators,
// nor abnormal control flow caused by panic. If you need this
// information, use golang.org/x/tools/go/ssa instead.
-//
package cfg
import (
diff --git a/go/expect/expect.go b/go/expect/expect.go
index bb203f58c..f5172ceab 100644
--- a/go/expect/expect.go
+++ b/go/expect/expect.go
@@ -16,20 +16,19 @@ The interpretation of the notes depends on the application.
For example, the test suite for a static checking tool might
use a @diag note to indicate an expected diagnostic:
- fmt.Printf("%s", 1) //@ diag("%s wants a string, got int")
+ fmt.Printf("%s", 1) //@ diag("%s wants a string, got int")
By contrast, the test suite for a source code navigation tool
might use notes to indicate the positions of features of
interest, the actions to be performed by the test,
and their expected outcomes:
- var x = 1 //@ x_decl
- ...
- print(x) //@ definition("x", x_decl)
- print(x) //@ typeof("x", "int")
+ var x = 1 //@ x_decl
+ ...
+ print(x) //@ definition("x", x_decl)
+ print(x) //@ typeof("x", "int")
-
-Note comment syntax
+# Note comment syntax
Note comments always start with the special marker @, which must be the
very first character after the comment opening pair, so //@ or /*@ with no
diff --git a/go/expect/expect_test.go b/go/expect/expect_test.go
index bd25ef831..e9ae40f7e 100644
--- a/go/expect/expect_test.go
+++ b/go/expect/expect_test.go
@@ -43,7 +43,7 @@ func TestMarker(t *testing.T) {
},
},
{
- filename: "testdata/go.mod",
+ filename: "testdata/go.fake.mod",
expectNotes: 2,
expectMarkers: map[string]string{
"αMarker": "αfake1α",
diff --git a/go/expect/testdata/go.fake.mod b/go/expect/testdata/go.fake.mod
new file mode 100644
index 000000000..ca84fcee9
--- /dev/null
+++ b/go/expect/testdata/go.fake.mod
@@ -0,0 +1,9 @@
+// This file is named go.fake.mod so it does not define a real module, which
+// would make the contents of this directory unavailable to the test when run
+// from outside the repository.
+
+module αfake1α //@mark(αMarker, "αfake1α")
+
+go 1.14
+
+require golang.org/modfile v0.0.0 //@mark(βMarker, "require golang.org/modfile v0.0.0")
diff --git a/go/expect/testdata/go.mod b/go/expect/testdata/go.mod
deleted file mode 100644
index d0323eae6..000000000
--- a/go/expect/testdata/go.mod
+++ /dev/null
@@ -1,5 +0,0 @@
-module αfake1α //@mark(αMarker, "αfake1α")
-
-go 1.14
-
-require golang.org/modfile v0.0.0 //@mark(βMarker, "require golang.org/modfile v0.0.0")
diff --git a/go/gccgoexportdata/gccgoexportdata_test.go b/go/gccgoexportdata/gccgoexportdata_test.go
index 0d0410249..39f0981c4 100644
--- a/go/gccgoexportdata/gccgoexportdata_test.go
+++ b/go/gccgoexportdata/gccgoexportdata_test.go
@@ -18,12 +18,12 @@ import (
//
// The testdata/{short,long}.a ELF archive files were produced by:
//
-// $ echo 'package foo; func F()' > foo.go
-// $ gccgo -c -fgo-pkgpath blah foo.go
-// $ objcopy -j .go_export foo.o foo.gox
-// $ ar q short.a foo.gox
-// $ objcopy -j .go_export foo.o name-longer-than-16-bytes.gox
-// $ ar q long.a name-longer-than-16-bytes.gox
+// $ echo 'package foo; func F()' > foo.go
+// $ gccgo -c -fgo-pkgpath blah foo.go
+// $ objcopy -j .go_export foo.o foo.gox
+// $ ar q short.a foo.gox
+// $ objcopy -j .go_export foo.o name-longer-than-16-bytes.gox
+// $ ar q long.a name-longer-than-16-bytes.gox
//
// The file long.a contains an archive string table.
//
diff --git a/go/gcexportdata/example_test.go b/go/gcexportdata/example_test.go
index 7df05abae..7371d31d4 100644
--- a/go/gcexportdata/example_test.go
+++ b/go/gcexportdata/example_test.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.7 && gc
-// +build go1.7,gc
+//go:build go1.7 && gc && !android && !ios && !js
+// +build go1.7,gc,!android,!ios,!js
package gcexportdata_test
@@ -30,7 +30,6 @@ func ExampleRead() {
log.Fatalf("can't find export data for fmt")
}
fmt.Printf("Package path: %s\n", path)
- fmt.Printf("Export data: %s\n", filepath.Base(filename))
// Open and read the file.
f, err := os.Open(filename)
@@ -51,25 +50,36 @@ func ExampleRead() {
log.Fatal(err)
}
- // Print package information.
+ // We can see all the names in Names.
members := pkg.Scope().Names()
- if members[0] == ".inittask" {
- // An improvement to init handling in 1.13 added ".inittask". Remove so go >= 1.13 and go < 1.13 both pass.
- members = members[1:]
+ foundPrintln := false
+ for _, member := range members {
+ if member == "Println" {
+ foundPrintln = true
+ break
+ }
}
- fmt.Printf("Package members: %s...\n", members[:5])
+ fmt.Print("Package members: ")
+ if foundPrintln {
+ fmt.Println("Println found")
+ } else {
+ fmt.Println("Println not found")
+ }
+
+ // We can also look up a name directly using Lookup.
println := pkg.Scope().Lookup("Println")
- posn := fset.Position(println.Pos())
- posn.Line = 123 // make example deterministic
- typ := strings.ReplaceAll(println.Type().String(), "interface{}", "any") // go 1.18+ uses the 'any' alias
+ // go 1.18+ uses the 'any' alias
+ typ := strings.ReplaceAll(println.Type().String(), "interface{}", "any")
fmt.Printf("Println type: %s\n", typ)
+ posn := fset.Position(println.Pos())
+ // make example deterministic
+ posn.Line = 123
fmt.Printf("Println location: %s\n", slashify(posn))
// Output:
//
// Package path: fmt
- // Export data: fmt.a
- // Package members: [Errorf Formatter Fprint Fprintf Fprintln]...
+ // Package members: Println found
// Println type: func(a ...any) (n int, err error)
// Println location: $GOROOT/src/fmt/print.go:123:1
}
diff --git a/go/gcexportdata/gcexportdata.go b/go/gcexportdata/gcexportdata.go
index cec819d64..165ede0f8 100644
--- a/go/gcexportdata/gcexportdata.go
+++ b/go/gcexportdata/gcexportdata.go
@@ -17,32 +17,46 @@
// developer tools, which will then be able to consume both Go 1.7 and
// Go 1.8 export data files, so they will work before and after the
// Go update. (See discussion at https://golang.org/issue/15651.)
-//
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
import (
"bufio"
"bytes"
+ "encoding/json"
"fmt"
"go/token"
"go/types"
"io"
- "io/ioutil"
+ "os/exec"
- "golang.org/x/tools/go/internal/gcimporter"
+ "golang.org/x/tools/internal/gcimporter"
)
// Find returns the name of an object (.o) or archive (.a) file
// containing type information for the specified import path,
-// using the workspace layout conventions of go/build.
+// using the go command.
// If no file was found, an empty filename is returned.
//
// A relative srcDir is interpreted relative to the current working directory.
//
// Find also returns the package's resolved (canonical) import path,
// reflecting the effects of srcDir and vendoring on importPath.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
func Find(importPath, srcDir string) (filename, path string) {
- return gcimporter.FindPkg(importPath, srcDir)
+ cmd := exec.Command("go", "list", "-json", "-export", "--", importPath)
+ cmd.Dir = srcDir
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", ""
+ }
+ var data struct {
+ ImportPath string
+ Export string
+ }
+ json.Unmarshal(out, &data)
+ return data.Export, data.ImportPath
}
// NewReader returns a reader for the export data section of an object
@@ -70,9 +84,26 @@ func NewReader(r io.Reader) (io.Reader, error) {
}
}
+// readAll works the same way as io.ReadAll, but avoids allocations and copies
+// by preallocating a byte slice of the necessary size if the size is known up
+// front. This is always possible when the input is an archive. In that case,
+// NewReader will return the known size using an io.LimitedReader.
+func readAll(r io.Reader) ([]byte, error) {
+ if lr, ok := r.(*io.LimitedReader); ok {
+ data := make([]byte, lr.N)
+ _, err := io.ReadFull(lr, data)
+ return data, err
+ }
+ return io.ReadAll(r)
+}
+
// Read reads export data from in, decodes it, and returns type
// information for the package.
-// The package name is specified by path.
+//
+// The package path (effectively its linker symbol prefix) is
+// specified by path, since unlike the package name, this information
+// may not be recorded in the export data.
+//
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references
@@ -83,7 +114,7 @@ func NewReader(r io.Reader) (io.Reader, error) {
//
// On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
- data, err := ioutil.ReadAll(in)
+ data, err := readAll(in)
if err != nil {
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
}
@@ -92,22 +123,32 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
}
- // The App Engine Go runtime v1.6 uses the old export data format.
- // TODO(adonovan): delete once v1.7 has been around for a while.
- if bytes.HasPrefix(data, []byte("package ")) {
- return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
- }
-
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
- if len(data) > 0 && data[0] == 'i' {
- _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
- return pkg, err
- }
+ if len(data) > 0 {
+ switch data[0] {
+ case 'i':
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ case 'v', 'c', 'd':
+ _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
+ return pkg, err
- _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
- return pkg, err
+ case 'u':
+ _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ default:
+ l := len(data)
+ if l > 10 {
+ l = 10
+ }
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path)
+ }
+ }
+ return nil, fmt.Errorf("empty export data for %s", path)
}
// Write writes encoded type information for the specified package to out.
@@ -130,7 +171,7 @@ func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
//
// Experimental: This API is experimental and may change in the future.
func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
- data, err := ioutil.ReadAll(in)
+ data, err := readAll(in)
if err != nil {
return nil, fmt.Errorf("reading export bundle: %v", err)
}
diff --git a/go/gcexportdata/gcexportdata_test.go b/go/gcexportdata/gcexportdata_test.go
deleted file mode 100644
index a0006c02d..000000000
--- a/go/gcexportdata/gcexportdata_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gcexportdata_test
-
-import (
- "go/token"
- "go/types"
- "log"
- "os"
- "testing"
-
- "golang.org/x/tools/go/gcexportdata"
-)
-
-// Test to ensure that gcexportdata can read files produced by App
-// Engine Go runtime v1.6.
-func TestAppEngine16(t *testing.T) {
- // Open and read the file.
- f, err := os.Open("testdata/errors-ae16.a")
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- r, err := gcexportdata.NewReader(f)
- if err != nil {
- log.Fatalf("reading export data: %v", err)
- }
-
- // Decode the export data.
- fset := token.NewFileSet()
- imports := make(map[string]*types.Package)
- pkg, err := gcexportdata.Read(r, fset, imports, "errors")
- if err != nil {
- log.Fatal(err)
- }
-
- // Print package information.
- got := pkg.Scope().Lookup("New").Type().String()
- want := "func(text string) error"
- if got != want {
- t.Errorf("New.Type = %s, want %s", got, want)
- }
-}
diff --git a/go/gcexportdata/importer.go b/go/gcexportdata/importer.go
index efe221e7e..37a7247e2 100644
--- a/go/gcexportdata/importer.go
+++ b/go/gcexportdata/importer.go
@@ -23,6 +23,8 @@ import (
// or to control the FileSet or access the imports map populated during
// package loading.
//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
return importer{fset, imports}
}
diff --git a/go/gcexportdata/testdata/errors-ae16.a b/go/gcexportdata/testdata/errors-ae16.a
deleted file mode 100644
index 3f1dad54f..000000000
--- a/go/gcexportdata/testdata/errors-ae16.a
+++ /dev/null
Binary files differ
diff --git a/go/internal/cgo/cgo.go b/go/internal/cgo/cgo.go
index d01fb04a6..3fce48003 100644
--- a/go/internal/cgo/cgo.go
+++ b/go/internal/cgo/cgo.go
@@ -69,7 +69,6 @@ import (
// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
// the output and returns the resulting ASTs.
-//
func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
if err != nil {
diff --git a/go/internal/gccgoimporter/parser.go b/go/internal/gccgoimporter/parser.go
index 7f07553e8..9fdb6f8b0 100644
--- a/go/internal/gccgoimporter/parser.go
+++ b/go/internal/gccgoimporter/parser.go
@@ -127,8 +127,10 @@ func (p *parser) parseString() string {
return str
}
-// unquotedString = { unquotedStringChar } .
-// unquotedStringChar = <neither a whitespace nor a ';' char> .
+// parseUnquotedString parses an UnquotedString:
+//
+// unquotedString = { unquotedStringChar } .
+// unquotedStringChar = <neither a whitespace nor a ';' char> .
func (p *parser) parseUnquotedString() string {
if p.tok == scanner.EOF {
p.error("unexpected EOF")
@@ -163,7 +165,10 @@ func (p *parser) parseUnquotedQualifiedName() (path, name string) {
return p.parseQualifiedNameStr(p.parseUnquotedString())
}
-// qualifiedName = [ ["."] unquotedString "." ] unquotedString .
+// parseQualifiedNameStr is given the leading name (unquoted by the caller if necessary)
+// and then parses the remainder of a qualified name:
+//
+// qualifiedName = [ ["."] unquotedString "." ] unquotedString .
//
// The above production uses greedy matching.
func (p *parser) parseQualifiedNameStr(unquotedName string) (pkgpath, name string) {
@@ -191,7 +196,6 @@ func (p *parser) parseQualifiedNameStr(unquotedName string) (pkgpath, name strin
// getPkg returns the package for a given path. If the package is
// not found but we have a package name, create the package and
// add it to the p.imports map.
-//
func (p *parser) getPkg(pkgpath, name string) *types.Package {
// package unsafe is not in the imports map - handle explicitly
if pkgpath == "unsafe" {
@@ -208,7 +212,7 @@ func (p *parser) getPkg(pkgpath, name string) *types.Package {
// parseExportedName is like parseQualifiedName, but
// the package path is resolved to an imported *types.Package.
//
-// ExportedName = string [string] .
+// ExportedName = string [string] .
func (p *parser) parseExportedName() (pkg *types.Package, name string) {
path, name := p.parseQualifiedName()
var pkgname string
@@ -222,7 +226,9 @@ func (p *parser) parseExportedName() (pkg *types.Package, name string) {
return
}
-// Name = QualifiedName | "?" .
+// parseName parses a Name:
+//
+// Name = QualifiedName | "?" .
func (p *parser) parseName() string {
if p.tok == '?' {
// Anonymous.
@@ -241,7 +247,9 @@ func deref(typ types.Type) types.Type {
return typ
}
-// Field = Name Type [string] .
+// parseField parses a Field:
+//
+// Field = Name Type [string] .
func (p *parser) parseField(pkg *types.Package) (field *types.Var, tag string) {
name := p.parseName()
typ, n := p.parseTypeExtended(pkg)
@@ -269,7 +277,9 @@ func (p *parser) parseField(pkg *types.Package) (field *types.Var, tag string) {
return
}
-// Param = Name ["..."] Type .
+// parseParam parses a Param:
+//
+// Param = Name ["..."] Type .
func (p *parser) parseParam(pkg *types.Package) (param *types.Var, isVariadic bool) {
name := p.parseName()
// Ignore names invented for inlinable functions.
@@ -298,7 +308,9 @@ func (p *parser) parseParam(pkg *types.Package) (param *types.Var, isVariadic bo
return
}
-// Var = Name Type .
+// parseVar parses a Var:
+//
+// Var = Name Type .
func (p *parser) parseVar(pkg *types.Package) *types.Var {
name := p.parseName()
v := types.NewVar(token.NoPos, pkg, name, p.parseType(pkg))
@@ -311,7 +323,9 @@ func (p *parser) parseVar(pkg *types.Package) *types.Var {
return v
}
-// Conversion = "convert" "(" Type "," ConstValue ")" .
+// parseConversion parses a Conversion:
+//
+// Conversion = "convert" "(" Type "," ConstValue ")" .
func (p *parser) parseConversion(pkg *types.Package) (val constant.Value, typ types.Type) {
p.expectKeyword("convert")
p.expect('(')
@@ -322,8 +336,10 @@ func (p *parser) parseConversion(pkg *types.Package) (val constant.Value, typ ty
return
}
-// ConstValue = string | "false" | "true" | ["-"] (int ["'"] | FloatOrComplex) | Conversion .
-// FloatOrComplex = float ["i" | ("+"|"-") float "i"] .
+// parseConstValue parses a ConstValue:
+//
+// ConstValue = string | "false" | "true" | ["-"] (int ["'"] | FloatOrComplex) | Conversion .
+// FloatOrComplex = float ["i" | ("+"|"-") float "i"] .
func (p *parser) parseConstValue(pkg *types.Package) (val constant.Value, typ types.Type) {
// v3 changed to $false, $true, $convert, to avoid confusion
// with variable names in inline function bodies.
@@ -429,7 +445,9 @@ func (p *parser) parseConstValue(pkg *types.Package) (val constant.Value, typ ty
return
}
-// Const = Name [Type] "=" ConstValue .
+// parseConst parses a Const:
+//
+// Const = Name [Type] "=" ConstValue .
func (p *parser) parseConst(pkg *types.Package) *types.Const {
name := p.parseName()
var typ types.Type
@@ -510,9 +528,11 @@ func (p *parser) update(t types.Type, nlist []interface{}) {
}
}
-// NamedType = TypeName [ "=" ] Type { Method } .
-// TypeName = ExportedName .
-// Method = "func" "(" Param ")" Name ParamList ResultList [InlineBody] ";" .
+// parseNamedType parses a NamedType:
+//
+// NamedType = TypeName [ "=" ] Type { Method } .
+// TypeName = ExportedName .
+// Method = "func" "(" Param ")" Name ParamList ResultList [InlineBody] ";" .
func (p *parser) parseNamedType(nlist []interface{}) types.Type {
pkg, name := p.parseExportedName()
scope := pkg.Scope()
@@ -629,7 +649,9 @@ func (p *parser) parseInt() int {
return int(n)
}
-// ArrayOrSliceType = "[" [ int ] "]" Type .
+// parseArrayOrSliceType parses an ArrayOrSliceType:
+//
+// ArrayOrSliceType = "[" [ int ] "]" Type .
func (p *parser) parseArrayOrSliceType(pkg *types.Package, nlist []interface{}) types.Type {
p.expect('[')
if p.tok == ']' {
@@ -652,7 +674,9 @@ func (p *parser) parseArrayOrSliceType(pkg *types.Package, nlist []interface{})
return t
}
-// MapType = "map" "[" Type "]" Type .
+// parseMapType parses a MapType:
+//
+// MapType = "map" "[" Type "]" Type .
func (p *parser) parseMapType(pkg *types.Package, nlist []interface{}) types.Type {
p.expectKeyword("map")
@@ -668,7 +692,9 @@ func (p *parser) parseMapType(pkg *types.Package, nlist []interface{}) types.Typ
return t
}
-// ChanType = "chan" ["<-" | "-<"] Type .
+// parseChanType parses a ChanType:
+//
+// ChanType = "chan" ["<-" | "-<"] Type .
func (p *parser) parseChanType(pkg *types.Package, nlist []interface{}) types.Type {
p.expectKeyword("chan")
@@ -695,7 +721,9 @@ func (p *parser) parseChanType(pkg *types.Package, nlist []interface{}) types.Ty
return t
}
-// StructType = "struct" "{" { Field } "}" .
+// parseStructType parses a StructType:
+//
+// StructType = "struct" "{" { Field } "}" .
func (p *parser) parseStructType(pkg *types.Package, nlist []interface{}) types.Type {
p.expectKeyword("struct")
@@ -718,7 +746,9 @@ func (p *parser) parseStructType(pkg *types.Package, nlist []interface{}) types.
return t
}
-// ParamList = "(" [ { Parameter "," } Parameter ] ")" .
+// parseParamList parses a ParamList:
+//
+// ParamList = "(" [ { Parameter "," } Parameter ] ")" .
func (p *parser) parseParamList(pkg *types.Package) (*types.Tuple, bool) {
var list []*types.Var
isVariadic := false
@@ -742,7 +772,9 @@ func (p *parser) parseParamList(pkg *types.Package) (*types.Tuple, bool) {
return types.NewTuple(list...), isVariadic
}
-// ResultList = Type | ParamList .
+// parseResultList parses a ResultList:
+//
+// ResultList = Type | ParamList .
func (p *parser) parseResultList(pkg *types.Package) *types.Tuple {
switch p.tok {
case '<':
@@ -762,7 +794,9 @@ func (p *parser) parseResultList(pkg *types.Package) *types.Tuple {
}
}
-// FunctionType = ParamList ResultList .
+// parseFunctionType parses a FunctionType:
+//
+// FunctionType = ParamList ResultList .
func (p *parser) parseFunctionType(pkg *types.Package, nlist []interface{}) *types.Signature {
t := new(types.Signature)
p.update(t, nlist)
@@ -774,7 +808,9 @@ func (p *parser) parseFunctionType(pkg *types.Package, nlist []interface{}) *typ
return t
}
-// Func = Name FunctionType [InlineBody] .
+// parseFunc parses a Func:
+//
+// Func = Name FunctionType [InlineBody] .
func (p *parser) parseFunc(pkg *types.Package) *types.Func {
if p.tok == '/' {
// Skip an /*asm ID */ comment.
@@ -802,7 +838,9 @@ func (p *parser) parseFunc(pkg *types.Package) *types.Func {
return f
}
-// InterfaceType = "interface" "{" { ("?" Type | Func) ";" } "}" .
+// parseInterfaceType parses an InterfaceType:
+//
+// InterfaceType = "interface" "{" { ("?" Type | Func) ";" } "}" .
func (p *parser) parseInterfaceType(pkg *types.Package, nlist []interface{}) types.Type {
p.expectKeyword("interface")
@@ -831,7 +869,9 @@ func (p *parser) parseInterfaceType(pkg *types.Package, nlist []interface{}) typ
return t
}
-// PointerType = "*" ("any" | Type) .
+// parsePointerType parses a PointerType:
+//
+// PointerType = "*" ("any" | Type) .
func (p *parser) parsePointerType(pkg *types.Package, nlist []interface{}) types.Type {
p.expect('*')
if p.tok == scanner.Ident {
@@ -849,7 +889,9 @@ func (p *parser) parsePointerType(pkg *types.Package, nlist []interface{}) types
return t
}
-// TypeSpec = NamedType | MapType | ChanType | StructType | InterfaceType | PointerType | ArrayOrSliceType | FunctionType .
+// parseTypeSpec parses a TypeSpec:
+//
+// TypeSpec = NamedType | MapType | ChanType | StructType | InterfaceType | PointerType | ArrayOrSliceType | FunctionType .
func (p *parser) parseTypeSpec(pkg *types.Package, nlist []interface{}) types.Type {
switch p.tok {
case scanner.String:
@@ -935,10 +977,11 @@ func lookupBuiltinType(typ int) types.Type {
}[typ]
}
-// Type = "<" "type" ( "-" int | int [ TypeSpec ] ) ">" .
+// parseType parses a Type:
//
-// parseType updates the type map to t for all type numbers n.
+// Type = "<" "type" ( "-" int | int [ TypeSpec ] ) ">" .
//
+// parseType updates the type map to t for all type numbers n.
func (p *parser) parseType(pkg *types.Package, n ...interface{}) types.Type {
p.expect('<')
t, _ := p.parseTypeAfterAngle(pkg, n...)
@@ -1028,7 +1071,9 @@ func (p *parser) skipInlineBody() {
}
}
-// Types = "types" maxp1 exportedp1 (offset length)* .
+// parseTypes parses a Types:
+//
+// Types = "types" maxp1 exportedp1 (offset length)* .
func (p *parser) parseTypes(pkg *types.Package) {
maxp1 := p.parseInt()
exportedp1 := p.parseInt()
@@ -1102,7 +1147,9 @@ func (p *parser) parseSavedType(pkg *types.Package, i int, nlist []interface{})
}
}
-// PackageInit = unquotedString unquotedString int .
+// parsePackageInit parses a PackageInit:
+//
+// PackageInit = unquotedString unquotedString int .
func (p *parser) parsePackageInit() PackageInit {
name := p.parseUnquotedString()
initfunc := p.parseUnquotedString()
@@ -1120,10 +1167,12 @@ func (p *parser) maybeCreatePackage() {
}
}
-// InitDataDirective = ( "v1" | "v2" | "v3" ) ";" |
-// "priority" int ";" |
-// "init" { PackageInit } ";" |
-// "checksum" unquotedString ";" .
+// parseInitDataDirective parses an InitDataDirective:
+//
+// InitDataDirective = ( "v1" | "v2" | "v3" ) ";" |
+// "priority" int ";" |
+// "init" { PackageInit } ";" |
+// "checksum" unquotedString ";" .
func (p *parser) parseInitDataDirective() {
if p.tok != scanner.Ident {
// unexpected token kind; panic
@@ -1173,16 +1222,18 @@ func (p *parser) parseInitDataDirective() {
}
}
-// Directive = InitDataDirective |
-// "package" unquotedString [ unquotedString ] [ unquotedString ] ";" |
-// "pkgpath" unquotedString ";" |
-// "prefix" unquotedString ";" |
-// "import" unquotedString unquotedString string ";" |
-// "indirectimport" unquotedString unquotedstring ";" |
-// "func" Func ";" |
-// "type" Type ";" |
-// "var" Var ";" |
-// "const" Const ";" .
+// parseDirective parses a Directive:
+//
+// Directive = InitDataDirective |
+// "package" unquotedString [ unquotedString ] [ unquotedString ] ";" |
+// "pkgpath" unquotedString ";" |
+// "prefix" unquotedString ";" |
+// "import" unquotedString unquotedString string ";" |
+// "indirectimport" unquotedString unquotedstring ";" |
+// "func" Func ";" |
+// "type" Type ";" |
+// "var" Var ";" |
+// "const" Const ";" .
func (p *parser) parseDirective() {
if p.tok != scanner.Ident {
// unexpected token kind; panic
@@ -1266,7 +1317,9 @@ func (p *parser) parseDirective() {
}
}
-// Package = { Directive } .
+// parsePackage parses a Package:
+//
+// Package = { Directive } .
func (p *parser) parsePackage() *types.Package {
for p.tok != scanner.EOF {
p.parseDirective()
diff --git a/go/internal/gccgoimporter/testenv_test.go b/go/internal/gccgoimporter/testenv_test.go
index 7afa464d9..9be8dcb32 100644
--- a/go/internal/gccgoimporter/testenv_test.go
+++ b/go/internal/gccgoimporter/testenv_test.go
@@ -12,7 +12,7 @@ import (
"testing"
)
-// HasGoBuild reports whether the current system can build programs with ``go build''
+// HasGoBuild reports whether the current system can build programs with “go build”
// and then run them with os.StartProcess or exec.Command.
func HasGoBuild() bool {
switch runtime.GOOS {
@@ -40,7 +40,7 @@ func HasExec() bool {
return true
}
-// MustHaveGoBuild checks that the current system can build programs with ``go build''
+// MustHaveGoBuild checks that the current system can build programs with “go build”
// and then run them with os.StartProcess or exec.Command.
// If not, MustHaveGoBuild calls t.Skip with an explanation.
func MustHaveGoBuild(t *testing.T) {
diff --git a/go/internal/gcimporter/bexport.go b/go/internal/gcimporter/bexport.go
deleted file mode 100644
index 0a3cdb9a3..000000000
--- a/go/internal/gcimporter/bexport.go
+++ /dev/null
@@ -1,851 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
-// see that file for specification of the format.
-
-package gcimporter
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- "math"
- "math/big"
- "sort"
- "strings"
-)
-
-// If debugFormat is set, each integer and string value is preceded by a marker
-// and position information in the encoding. This mechanism permits an importer
-// to recognize immediately when it is out of sync. The importer recognizes this
-// mode automatically (i.e., it can import export data produced with debugging
-// support even if debugFormat is not set at the time of import). This mode will
-// lead to massively larger export data (by a factor of 2 to 3) and should only
-// be enabled during development and debugging.
-//
-// NOTE: This flag is the first flag to enable if importing dies because of
-// (suspected) format errors, and whenever a change is made to the format.
-const debugFormat = false // default: false
-
-// Current export format version. Increase with each format change.
-// Note: The latest binary (non-indexed) export format is at version 6.
-// This exporter is still at level 4, but it doesn't matter since
-// the binary importer can handle older versions just fine.
-// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
-// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
-// 4: type name objects support type aliases, uses aliasTag
-// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
-// 2: removed unused bool in ODCL export (compiler only)
-// 1: header format change (more regular), export package for _ struct fields
-// 0: Go1.7 encoding
-const exportVersion = 4
-
-// trackAllTypes enables cycle tracking for all types, not just named
-// types. The existing compiler invariants assume that unnamed types
-// that are not completely set up are not used, or else there are spurious
-// errors.
-// If disabled, only named types are tracked, possibly leading to slightly
-// less efficient encoding in rare cases. It also prevents the export of
-// some corner-case type declarations (but those are not handled correctly
-// with with the textual export format either).
-// TODO(gri) enable and remove once issues caused by it are fixed
-const trackAllTypes = false
-
-type exporter struct {
- fset *token.FileSet
- out bytes.Buffer
-
- // object -> index maps, indexed in order of serialization
- strIndex map[string]int
- pkgIndex map[*types.Package]int
- typIndex map[types.Type]int
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
-
- // debugging support
- written int // bytes written
- indent int // for trace
-}
-
-// internalError represents an error generated inside this package.
-type internalError string
-
-func (e internalError) Error() string { return "gcimporter: " + string(e) }
-
-func internalErrorf(format string, args ...interface{}) error {
- return internalError(fmt.Sprintf(format, args...))
-}
-
-// BExportData returns binary export data for pkg.
-// If no file set is provided, position info will be missing.
-func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
- if !debug {
- defer func() {
- if e := recover(); e != nil {
- if ierr, ok := e.(internalError); ok {
- err = ierr
- return
- }
- // Not an internal error; panic again.
- panic(e)
- }
- }()
- }
-
- p := exporter{
- fset: fset,
- strIndex: map[string]int{"": 0}, // empty string is mapped to 0
- pkgIndex: make(map[*types.Package]int),
- typIndex: make(map[types.Type]int),
- posInfoFormat: true, // TODO(gri) might become a flag, eventually
- }
-
- // write version info
- // The version string must start with "version %d" where %d is the version
- // number. Additional debugging information may follow after a blank; that
- // text is ignored by the importer.
- p.rawStringln(fmt.Sprintf("version %d", exportVersion))
- var debug string
- if debugFormat {
- debug = "debug"
- }
- p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
- p.bool(trackAllTypes)
- p.bool(p.posInfoFormat)
-
- // --- generic export data ---
-
- // populate type map with predeclared "known" types
- for index, typ := range predeclared() {
- p.typIndex[typ] = index
- }
- if len(p.typIndex) != len(predeclared()) {
- return nil, internalError("duplicate entries in type map?")
- }
-
- // write package data
- p.pkg(pkg, true)
- if trace {
- p.tracef("\n")
- }
-
- // write objects
- objcount := 0
- scope := pkg.Scope()
- for _, name := range scope.Names() {
- if !ast.IsExported(name) {
- continue
- }
- if trace {
- p.tracef("\n")
- }
- p.obj(scope.Lookup(name))
- objcount++
- }
-
- // indicate end of list
- if trace {
- p.tracef("\n")
- }
- p.tag(endTag)
-
- // for self-verification only (redundant)
- p.int(objcount)
-
- if trace {
- p.tracef("\n")
- }
-
- // --- end of export data ---
-
- return p.out.Bytes(), nil
-}
-
-func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
- if pkg == nil {
- panic(internalError("unexpected nil pkg"))
- }
-
- // if we saw the package before, write its index (>= 0)
- if i, ok := p.pkgIndex[pkg]; ok {
- p.index('P', i)
- return
- }
-
- // otherwise, remember the package, write the package tag (< 0) and package data
- if trace {
- p.tracef("P%d = { ", len(p.pkgIndex))
- defer p.tracef("} ")
- }
- p.pkgIndex[pkg] = len(p.pkgIndex)
-
- p.tag(packageTag)
- p.string(pkg.Name())
- if emptypath {
- p.string("")
- } else {
- p.string(pkg.Path())
- }
-}
-
-func (p *exporter) obj(obj types.Object) {
- switch obj := obj.(type) {
- case *types.Const:
- p.tag(constTag)
- p.pos(obj)
- p.qualifiedName(obj)
- p.typ(obj.Type())
- p.value(obj.Val())
-
- case *types.TypeName:
- if obj.IsAlias() {
- p.tag(aliasTag)
- p.pos(obj)
- p.qualifiedName(obj)
- } else {
- p.tag(typeTag)
- }
- p.typ(obj.Type())
-
- case *types.Var:
- p.tag(varTag)
- p.pos(obj)
- p.qualifiedName(obj)
- p.typ(obj.Type())
-
- case *types.Func:
- p.tag(funcTag)
- p.pos(obj)
- p.qualifiedName(obj)
- sig := obj.Type().(*types.Signature)
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
-
- default:
- panic(internalErrorf("unexpected object %v (%T)", obj, obj))
- }
-}
-
-func (p *exporter) pos(obj types.Object) {
- if !p.posInfoFormat {
- return
- }
-
- file, line := p.fileLine(obj)
- if file == p.prevFile {
- // common case: write line delta
- // delta == 0 means different file or no line change
- delta := line - p.prevLine
- p.int(delta)
- if delta == 0 {
- p.int(-1) // -1 means no file change
- }
- } else {
- // different file
- p.int(0)
- // Encode filename as length of common prefix with previous
- // filename, followed by (possibly empty) suffix. Filenames
- // frequently share path prefixes, so this can save a lot
- // of space and make export data size less dependent on file
- // path length. The suffix is unlikely to be empty because
- // file names tend to end in ".go".
- n := commonPrefixLen(p.prevFile, file)
- p.int(n) // n >= 0
- p.string(file[n:]) // write suffix only
- p.prevFile = file
- p.int(line)
- }
- p.prevLine = line
-}
-
-func (p *exporter) fileLine(obj types.Object) (file string, line int) {
- if p.fset != nil {
- pos := p.fset.Position(obj.Pos())
- file = pos.Filename
- line = pos.Line
- }
- return
-}
-
-func commonPrefixLen(a, b string) int {
- if len(a) > len(b) {
- a, b = b, a
- }
- // len(a) <= len(b)
- i := 0
- for i < len(a) && a[i] == b[i] {
- i++
- }
- return i
-}
-
-func (p *exporter) qualifiedName(obj types.Object) {
- p.string(obj.Name())
- p.pkg(obj.Pkg(), false)
-}
-
-func (p *exporter) typ(t types.Type) {
- if t == nil {
- panic(internalError("nil type"))
- }
-
- // Possible optimization: Anonymous pointer types *T where
- // T is a named type are common. We could canonicalize all
- // such types *T to a single type PT = *T. This would lead
- // to at most one *T entry in typIndex, and all future *T's
- // would be encoded as the respective index directly. Would
- // save 1 byte (pointerTag) per *T and reduce the typIndex
- // size (at the cost of a canonicalization map). We can do
- // this later, without encoding format change.
-
- // if we saw the type before, write its index (>= 0)
- if i, ok := p.typIndex[t]; ok {
- p.index('T', i)
- return
- }
-
- // otherwise, remember the type, write the type tag (< 0) and type data
- if trackAllTypes {
- if trace {
- p.tracef("T%d = {>\n", len(p.typIndex))
- defer p.tracef("<\n} ")
- }
- p.typIndex[t] = len(p.typIndex)
- }
-
- switch t := t.(type) {
- case *types.Named:
- if !trackAllTypes {
- // if we don't track all types, track named types now
- p.typIndex[t] = len(p.typIndex)
- }
-
- p.tag(namedTag)
- p.pos(t.Obj())
- p.qualifiedName(t.Obj())
- p.typ(t.Underlying())
- if !types.IsInterface(t) {
- p.assocMethods(t)
- }
-
- case *types.Array:
- p.tag(arrayTag)
- p.int64(t.Len())
- p.typ(t.Elem())
-
- case *types.Slice:
- p.tag(sliceTag)
- p.typ(t.Elem())
-
- case *dddSlice:
- p.tag(dddTag)
- p.typ(t.elem)
-
- case *types.Struct:
- p.tag(structTag)
- p.fieldList(t)
-
- case *types.Pointer:
- p.tag(pointerTag)
- p.typ(t.Elem())
-
- case *types.Signature:
- p.tag(signatureTag)
- p.paramList(t.Params(), t.Variadic())
- p.paramList(t.Results(), false)
-
- case *types.Interface:
- p.tag(interfaceTag)
- p.iface(t)
-
- case *types.Map:
- p.tag(mapTag)
- p.typ(t.Key())
- p.typ(t.Elem())
-
- case *types.Chan:
- p.tag(chanTag)
- p.int(int(3 - t.Dir())) // hack
- p.typ(t.Elem())
-
- default:
- panic(internalErrorf("unexpected type %T: %s", t, t))
- }
-}
-
-func (p *exporter) assocMethods(named *types.Named) {
- // Sort methods (for determinism).
- var methods []*types.Func
- for i := 0; i < named.NumMethods(); i++ {
- methods = append(methods, named.Method(i))
- }
- sort.Sort(methodsByName(methods))
-
- p.int(len(methods))
-
- if trace && methods != nil {
- p.tracef("associated methods {>\n")
- }
-
- for i, m := range methods {
- if trace && i > 0 {
- p.tracef("\n")
- }
-
- p.pos(m)
- name := m.Name()
- p.string(name)
- if !exported(name) {
- p.pkg(m.Pkg(), false)
- }
-
- sig := m.Type().(*types.Signature)
- p.paramList(types.NewTuple(sig.Recv()), false)
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
- p.int(0) // dummy value for go:nointerface pragma - ignored by importer
- }
-
- if trace && methods != nil {
- p.tracef("<\n} ")
- }
-}
-
-type methodsByName []*types.Func
-
-func (x methodsByName) Len() int { return len(x) }
-func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
-
-func (p *exporter) fieldList(t *types.Struct) {
- if trace && t.NumFields() > 0 {
- p.tracef("fields {>\n")
- defer p.tracef("<\n} ")
- }
-
- p.int(t.NumFields())
- for i := 0; i < t.NumFields(); i++ {
- if trace && i > 0 {
- p.tracef("\n")
- }
- p.field(t.Field(i))
- p.string(t.Tag(i))
- }
-}
-
-func (p *exporter) field(f *types.Var) {
- if !f.IsField() {
- panic(internalError("field expected"))
- }
-
- p.pos(f)
- p.fieldName(f)
- p.typ(f.Type())
-}
-
-func (p *exporter) iface(t *types.Interface) {
- // TODO(gri): enable importer to load embedded interfaces,
- // then emit Embeddeds and ExplicitMethods separately here.
- p.int(0)
-
- n := t.NumMethods()
- if trace && n > 0 {
- p.tracef("methods {>\n")
- defer p.tracef("<\n} ")
- }
- p.int(n)
- for i := 0; i < n; i++ {
- if trace && i > 0 {
- p.tracef("\n")
- }
- p.method(t.Method(i))
- }
-}
-
-func (p *exporter) method(m *types.Func) {
- sig := m.Type().(*types.Signature)
- if sig.Recv() == nil {
- panic(internalError("method expected"))
- }
-
- p.pos(m)
- p.string(m.Name())
- if m.Name() != "_" && !ast.IsExported(m.Name()) {
- p.pkg(m.Pkg(), false)
- }
-
- // interface method; no need to encode receiver.
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
-}
-
-func (p *exporter) fieldName(f *types.Var) {
- name := f.Name()
-
- if f.Anonymous() {
- // anonymous field - we distinguish between 3 cases:
- // 1) field name matches base type name and is exported
- // 2) field name matches base type name and is not exported
- // 3) field name doesn't match base type name (alias name)
- bname := basetypeName(f.Type())
- if name == bname {
- if ast.IsExported(name) {
- name = "" // 1) we don't need to know the field name or package
- } else {
- name = "?" // 2) use unexported name "?" to force package export
- }
- } else {
- // 3) indicate alias and export name as is
- // (this requires an extra "@" but this is a rare case)
- p.string("@")
- }
- }
-
- p.string(name)
- if name != "" && !ast.IsExported(name) {
- p.pkg(f.Pkg(), false)
- }
-}
-
-func basetypeName(typ types.Type) string {
- switch typ := deref(typ).(type) {
- case *types.Basic:
- return typ.Name()
- case *types.Named:
- return typ.Obj().Name()
- default:
- return "" // unnamed type
- }
-}
-
-func (p *exporter) paramList(params *types.Tuple, variadic bool) {
- // use negative length to indicate unnamed parameters
- // (look at the first parameter only since either all
- // names are present or all are absent)
- n := params.Len()
- if n > 0 && params.At(0).Name() == "" {
- n = -n
- }
- p.int(n)
- for i := 0; i < params.Len(); i++ {
- q := params.At(i)
- t := q.Type()
- if variadic && i == params.Len()-1 {
- t = &dddSlice{t.(*types.Slice).Elem()}
- }
- p.typ(t)
- if n > 0 {
- name := q.Name()
- p.string(name)
- if name != "_" {
- p.pkg(q.Pkg(), false)
- }
- }
- p.string("") // no compiler-specific info
- }
-}
-
-func (p *exporter) value(x constant.Value) {
- if trace {
- p.tracef("= ")
- }
-
- switch x.Kind() {
- case constant.Bool:
- tag := falseTag
- if constant.BoolVal(x) {
- tag = trueTag
- }
- p.tag(tag)
-
- case constant.Int:
- if v, exact := constant.Int64Val(x); exact {
- // common case: x fits into an int64 - use compact encoding
- p.tag(int64Tag)
- p.int64(v)
- return
- }
- // uncommon case: large x - use float encoding
- // (powers of 2 will be encoded efficiently with exponent)
- p.tag(floatTag)
- p.float(constant.ToFloat(x))
-
- case constant.Float:
- p.tag(floatTag)
- p.float(x)
-
- case constant.Complex:
- p.tag(complexTag)
- p.float(constant.Real(x))
- p.float(constant.Imag(x))
-
- case constant.String:
- p.tag(stringTag)
- p.string(constant.StringVal(x))
-
- case constant.Unknown:
- // package contains type errors
- p.tag(unknownTag)
-
- default:
- panic(internalErrorf("unexpected value %v (%T)", x, x))
- }
-}
-
-func (p *exporter) float(x constant.Value) {
- if x.Kind() != constant.Float {
- panic(internalErrorf("unexpected constant %v, want float", x))
- }
- // extract sign (there is no -0)
- sign := constant.Sign(x)
- if sign == 0 {
- // x == 0
- p.int(0)
- return
- }
- // x != 0
-
- var f big.Float
- if v, exact := constant.Float64Val(x); exact {
- // float64
- f.SetFloat64(v)
- } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
- // TODO(gri): add big.Rat accessor to constant.Value.
- r := valueToRat(num)
- f.SetRat(r.Quo(r, valueToRat(denom)))
- } else {
- // Value too large to represent as a fraction => inaccessible.
- // TODO(gri): add big.Float accessor to constant.Value.
- f.SetFloat64(math.MaxFloat64) // FIXME
- }
-
- // extract exponent such that 0.5 <= m < 1.0
- var m big.Float
- exp := f.MantExp(&m)
-
- // extract mantissa as *big.Int
- // - set exponent large enough so mant satisfies mant.IsInt()
- // - get *big.Int from mant
- m.SetMantExp(&m, int(m.MinPrec()))
- mant, acc := m.Int(nil)
- if acc != big.Exact {
- panic(internalError("internal error"))
- }
-
- p.int(sign)
- p.int(exp)
- p.string(string(mant.Bytes()))
-}
-
-func valueToRat(x constant.Value) *big.Rat {
- // Convert little-endian to big-endian.
- // I can't believe this is necessary.
- bytes := constant.Bytes(x)
- for i := 0; i < len(bytes)/2; i++ {
- bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
- }
- return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
-}
-
-func (p *exporter) bool(b bool) bool {
- if trace {
- p.tracef("[")
- defer p.tracef("= %v] ", b)
- }
-
- x := 0
- if b {
- x = 1
- }
- p.int(x)
- return b
-}
-
-// ----------------------------------------------------------------------------
-// Low-level encoders
-
-func (p *exporter) index(marker byte, index int) {
- if index < 0 {
- panic(internalError("invalid index < 0"))
- }
- if debugFormat {
- p.marker('t')
- }
- if trace {
- p.tracef("%c%d ", marker, index)
- }
- p.rawInt64(int64(index))
-}
-
-func (p *exporter) tag(tag int) {
- if tag >= 0 {
- panic(internalError("invalid tag >= 0"))
- }
- if debugFormat {
- p.marker('t')
- }
- if trace {
- p.tracef("%s ", tagString[-tag])
- }
- p.rawInt64(int64(tag))
-}
-
-func (p *exporter) int(x int) {
- p.int64(int64(x))
-}
-
-func (p *exporter) int64(x int64) {
- if debugFormat {
- p.marker('i')
- }
- if trace {
- p.tracef("%d ", x)
- }
- p.rawInt64(x)
-}
-
-func (p *exporter) string(s string) {
- if debugFormat {
- p.marker('s')
- }
- if trace {
- p.tracef("%q ", s)
- }
- // if we saw the string before, write its index (>= 0)
- // (the empty string is mapped to 0)
- if i, ok := p.strIndex[s]; ok {
- p.rawInt64(int64(i))
- return
- }
- // otherwise, remember string and write its negative length and bytes
- p.strIndex[s] = len(p.strIndex)
- p.rawInt64(-int64(len(s)))
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
-}
-
-// marker emits a marker byte and position information which makes
-// it easy for a reader to detect if it is "out of sync". Used for
-// debugFormat format only.
-func (p *exporter) marker(m byte) {
- p.rawByte(m)
- // Enable this for help tracking down the location
- // of an incorrect marker when running in debugFormat.
- if false && trace {
- p.tracef("#%d ", p.written)
- }
- p.rawInt64(int64(p.written))
-}
-
-// rawInt64 should only be used by low-level encoders.
-func (p *exporter) rawInt64(x int64) {
- var tmp [binary.MaxVarintLen64]byte
- n := binary.PutVarint(tmp[:], x)
- for i := 0; i < n; i++ {
- p.rawByte(tmp[i])
- }
-}
-
-// rawStringln should only be used to emit the initial version string.
-func (p *exporter) rawStringln(s string) {
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
- p.rawByte('\n')
-}
-
-// rawByte is the bottleneck interface to write to p.out.
-// rawByte escapes b as follows (any encoding does that
-// hides '$'):
-//
-// '$' => '|' 'S'
-// '|' => '|' '|'
-//
-// Necessary so other tools can find the end of the
-// export data by searching for "$$".
-// rawByte should only be used by low-level encoders.
-func (p *exporter) rawByte(b byte) {
- switch b {
- case '$':
- // write '$' as '|' 'S'
- b = 'S'
- fallthrough
- case '|':
- // write '|' as '|' '|'
- p.out.WriteByte('|')
- p.written++
- }
- p.out.WriteByte(b)
- p.written++
-}
-
-// tracef is like fmt.Printf but it rewrites the format string
-// to take care of indentation.
-func (p *exporter) tracef(format string, args ...interface{}) {
- if strings.ContainsAny(format, "<>\n") {
- var buf bytes.Buffer
- for i := 0; i < len(format); i++ {
- // no need to deal with runes
- ch := format[i]
- switch ch {
- case '>':
- p.indent++
- continue
- case '<':
- p.indent--
- continue
- }
- buf.WriteByte(ch)
- if ch == '\n' {
- for j := p.indent; j > 0; j-- {
- buf.WriteString(". ")
- }
- }
- }
- format = buf.String()
- }
- fmt.Printf(format, args...)
-}
-
-// Debugging support.
-// (tagString is only used when tracing is enabled)
-var tagString = [...]string{
- // Packages
- -packageTag: "package",
-
- // Types
- -namedTag: "named type",
- -arrayTag: "array",
- -sliceTag: "slice",
- -dddTag: "ddd",
- -structTag: "struct",
- -pointerTag: "pointer",
- -signatureTag: "signature",
- -interfaceTag: "interface",
- -mapTag: "map",
- -chanTag: "chan",
-
- // Values
- -falseTag: "false",
- -trueTag: "true",
- -int64Tag: "int64",
- -floatTag: "float",
- -fractionTag: "fraction",
- -complexTag: "complex",
- -stringTag: "string",
- -unknownTag: "unknown",
-
- // Type aliases
- -aliasTag: "alias",
-}
diff --git a/go/internal/gcimporter/bexport_test.go b/go/internal/gcimporter/bexport_test.go
deleted file mode 100644
index 3da5397eb..000000000
--- a/go/internal/gcimporter/bexport_test.go
+++ /dev/null
@@ -1,551 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gcimporter_test
-
-import (
- "fmt"
- "go/ast"
- "go/build"
- "go/constant"
- "go/parser"
- "go/token"
- "go/types"
- "path/filepath"
- "reflect"
- "runtime"
- "sort"
- "strings"
- "testing"
-
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/go/buildutil"
- "golang.org/x/tools/go/internal/gcimporter"
- "golang.org/x/tools/go/loader"
- "golang.org/x/tools/internal/typeparams"
- "golang.org/x/tools/internal/typeparams/genericfeatures"
-)
-
-var isRace = false
-
-func TestBExportData_stdlib(t *testing.T) {
- if runtime.Compiler == "gccgo" {
- t.Skip("gccgo standard library is inaccessible")
- }
- if runtime.GOOS == "android" {
- t.Skipf("incomplete std lib on %s", runtime.GOOS)
- }
- if isRace {
- t.Skipf("stdlib tests take too long in race mode and flake on builders")
- }
- if testing.Short() {
- t.Skip("skipping RAM hungry test in -short mode")
- }
-
- // Load, parse and type-check the program.
- ctxt := build.Default // copy
- ctxt.GOPATH = "" // disable GOPATH
- conf := loader.Config{
- Build: &ctxt,
- AllowErrors: true,
- TypeChecker: types.Config{
- Error: func(err error) { t.Log(err) },
- },
- }
- for _, path := range buildutil.AllPackages(conf.Build) {
- conf.Import(path)
- }
-
- // Create a package containing type and value errors to ensure
- // they are properly encoded/decoded.
- f, err := conf.ParseFile("haserrors/haserrors.go", `package haserrors
-const UnknownValue = "" + 0
-type UnknownType undefined
-`)
- if err != nil {
- t.Fatal(err)
- }
- conf.CreateFromFiles("haserrors", f)
-
- prog, err := conf.Load()
- if err != nil {
- t.Fatalf("Load failed: %v", err)
- }
-
- numPkgs := len(prog.AllPackages)
- if want := minStdlibPackages; numPkgs < want {
- t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
- }
-
- checked := 0
- for pkg, info := range prog.AllPackages {
- if info.Files == nil {
- continue // empty directory
- }
- // Binary export does not support generic code.
- inspect := inspector.New(info.Files)
- if genericfeatures.ForPackage(inspect, &info.Info) != 0 {
- t.Logf("skipping package %q which uses generics", pkg.Path())
- continue
- }
- checked++
- exportdata, err := gcimporter.BExportData(conf.Fset, pkg)
- if err != nil {
- t.Fatal(err)
- }
-
- imports := make(map[string]*types.Package)
- fset2 := token.NewFileSet()
- n, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path())
- if err != nil {
- t.Errorf("BImportData(%s): %v", pkg.Path(), err)
- continue
- }
- if n != len(exportdata) {
- t.Errorf("BImportData(%s) decoded %d bytes, want %d",
- pkg.Path(), n, len(exportdata))
- }
-
- // Compare the packages' corresponding members.
- for _, name := range pkg.Scope().Names() {
- if !ast.IsExported(name) {
- continue
- }
- obj1 := pkg.Scope().Lookup(name)
- obj2 := pkg2.Scope().Lookup(name)
- if obj2 == nil {
- t.Errorf("%s.%s not found, want %s", pkg.Path(), name, obj1)
- continue
- }
-
- fl1 := fileLine(conf.Fset, obj1)
- fl2 := fileLine(fset2, obj2)
- if fl1 != fl2 {
- t.Errorf("%s.%s: got posn %s, want %s",
- pkg.Path(), name, fl2, fl1)
- }
-
- if err := equalObj(obj1, obj2); err != nil {
- t.Errorf("%s.%s: %s\ngot: %s\nwant: %s",
- pkg.Path(), name, err, obj2, obj1)
- }
- }
- }
- if want := minStdlibPackages; checked < want {
- t.Errorf("Checked only %d packages, want at least %d", checked, want)
- }
-}
-
-func fileLine(fset *token.FileSet, obj types.Object) string {
- posn := fset.Position(obj.Pos())
- filename := filepath.Clean(strings.ReplaceAll(posn.Filename, "$GOROOT", runtime.GOROOT()))
- return fmt.Sprintf("%s:%d", filename, posn.Line)
-}
-
-// equalObj reports how x and y differ. They are assumed to belong to
-// different universes so cannot be compared directly.
-func equalObj(x, y types.Object) error {
- if reflect.TypeOf(x) != reflect.TypeOf(y) {
- return fmt.Errorf("%T vs %T", x, y)
- }
- xt := x.Type()
- yt := y.Type()
- switch x.(type) {
- case *types.Var, *types.Func:
- // ok
- case *types.Const:
- xval := x.(*types.Const).Val()
- yval := y.(*types.Const).Val()
- // Use string comparison for floating-point values since rounding is permitted.
- if constant.Compare(xval, token.NEQ, yval) &&
- !(xval.Kind() == constant.Float && xval.String() == yval.String()) {
- return fmt.Errorf("unequal constants %s vs %s", xval, yval)
- }
- case *types.TypeName:
- xt = xt.Underlying()
- yt = yt.Underlying()
- default:
- return fmt.Errorf("unexpected %T", x)
- }
- return equalType(xt, yt)
-}
-
-func equalType(x, y types.Type) error {
- if reflect.TypeOf(x) != reflect.TypeOf(y) {
- return fmt.Errorf("unequal kinds: %T vs %T", x, y)
- }
- switch x := x.(type) {
- case *types.Interface:
- y := y.(*types.Interface)
- // TODO(gri): enable separate emission of Embedded interfaces
- // and ExplicitMethods then use this logic.
- // if x.NumEmbeddeds() != y.NumEmbeddeds() {
- // return fmt.Errorf("unequal number of embedded interfaces: %d vs %d",
- // x.NumEmbeddeds(), y.NumEmbeddeds())
- // }
- // for i := 0; i < x.NumEmbeddeds(); i++ {
- // xi := x.Embedded(i)
- // yi := y.Embedded(i)
- // if xi.String() != yi.String() {
- // return fmt.Errorf("mismatched %th embedded interface: %s vs %s",
- // i, xi, yi)
- // }
- // }
- // if x.NumExplicitMethods() != y.NumExplicitMethods() {
- // return fmt.Errorf("unequal methods: %d vs %d",
- // x.NumExplicitMethods(), y.NumExplicitMethods())
- // }
- // for i := 0; i < x.NumExplicitMethods(); i++ {
- // xm := x.ExplicitMethod(i)
- // ym := y.ExplicitMethod(i)
- // if xm.Name() != ym.Name() {
- // return fmt.Errorf("mismatched %th method: %s vs %s", i, xm, ym)
- // }
- // if err := equalType(xm.Type(), ym.Type()); err != nil {
- // return fmt.Errorf("mismatched %s method: %s", xm.Name(), err)
- // }
- // }
- if x.NumMethods() != y.NumMethods() {
- return fmt.Errorf("unequal methods: %d vs %d",
- x.NumMethods(), y.NumMethods())
- }
- for i := 0; i < x.NumMethods(); i++ {
- xm := x.Method(i)
- ym := y.Method(i)
- if xm.Name() != ym.Name() {
- return fmt.Errorf("mismatched %dth method: %s vs %s", i, xm, ym)
- }
- if err := equalType(xm.Type(), ym.Type()); err != nil {
- return fmt.Errorf("mismatched %s method: %s", xm.Name(), err)
- }
- }
- // Constraints are handled explicitly in the *TypeParam case below, so we
- // don't yet need to consider embeddeds here.
- // TODO(rfindley): consider the type set here.
- case *types.Array:
- y := y.(*types.Array)
- if x.Len() != y.Len() {
- return fmt.Errorf("unequal array lengths: %d vs %d", x.Len(), y.Len())
- }
- if err := equalType(x.Elem(), y.Elem()); err != nil {
- return fmt.Errorf("array elements: %s", err)
- }
- case *types.Basic:
- y := y.(*types.Basic)
- if x.Kind() != y.Kind() {
- return fmt.Errorf("unequal basic types: %s vs %s", x, y)
- }
- case *types.Chan:
- y := y.(*types.Chan)
- if x.Dir() != y.Dir() {
- return fmt.Errorf("unequal channel directions: %d vs %d", x.Dir(), y.Dir())
- }
- if err := equalType(x.Elem(), y.Elem()); err != nil {
- return fmt.Errorf("channel elements: %s", err)
- }
- case *types.Map:
- y := y.(*types.Map)
- if err := equalType(x.Key(), y.Key()); err != nil {
- return fmt.Errorf("map keys: %s", err)
- }
- if err := equalType(x.Elem(), y.Elem()); err != nil {
- return fmt.Errorf("map values: %s", err)
- }
- case *types.Named:
- y := y.(*types.Named)
- return cmpNamed(x, y)
- case *types.Pointer:
- y := y.(*types.Pointer)
- if err := equalType(x.Elem(), y.Elem()); err != nil {
- return fmt.Errorf("pointer elements: %s", err)
- }
- case *types.Signature:
- y := y.(*types.Signature)
- if err := equalType(x.Params(), y.Params()); err != nil {
- return fmt.Errorf("parameters: %s", err)
- }
- if err := equalType(x.Results(), y.Results()); err != nil {
- return fmt.Errorf("results: %s", err)
- }
- if x.Variadic() != y.Variadic() {
- return fmt.Errorf("unequal variadicity: %t vs %t",
- x.Variadic(), y.Variadic())
- }
- if (x.Recv() != nil) != (y.Recv() != nil) {
- return fmt.Errorf("unequal receivers: %s vs %s", x.Recv(), y.Recv())
- }
- if x.Recv() != nil {
- // TODO(adonovan): fix: this assertion fires for interface methods.
- // The type of the receiver of an interface method is a named type
- // if the Package was loaded from export data, or an unnamed (interface)
- // type if the Package was produced by type-checking ASTs.
- // if err := equalType(x.Recv().Type(), y.Recv().Type()); err != nil {
- // return fmt.Errorf("receiver: %s", err)
- // }
- }
- if err := equalTypeParams(typeparams.ForSignature(x), typeparams.ForSignature(y)); err != nil {
- return fmt.Errorf("type params: %s", err)
- }
- if err := equalTypeParams(typeparams.RecvTypeParams(x), typeparams.RecvTypeParams(y)); err != nil {
- return fmt.Errorf("recv type params: %s", err)
- }
- case *types.Slice:
- y := y.(*types.Slice)
- if err := equalType(x.Elem(), y.Elem()); err != nil {
- return fmt.Errorf("slice elements: %s", err)
- }
- case *types.Struct:
- y := y.(*types.Struct)
- if x.NumFields() != y.NumFields() {
- return fmt.Errorf("unequal struct fields: %d vs %d",
- x.NumFields(), y.NumFields())
- }
- for i := 0; i < x.NumFields(); i++ {
- xf := x.Field(i)
- yf := y.Field(i)
- if xf.Name() != yf.Name() {
- return fmt.Errorf("mismatched fields: %s vs %s", xf, yf)
- }
- if err := equalType(xf.Type(), yf.Type()); err != nil {
- return fmt.Errorf("struct field %s: %s", xf.Name(), err)
- }
- if x.Tag(i) != y.Tag(i) {
- return fmt.Errorf("struct field %s has unequal tags: %q vs %q",
- xf.Name(), x.Tag(i), y.Tag(i))
- }
- }
- case *types.Tuple:
- y := y.(*types.Tuple)
- if x.Len() != y.Len() {
- return fmt.Errorf("unequal tuple lengths: %d vs %d", x.Len(), y.Len())
- }
- for i := 0; i < x.Len(); i++ {
- if err := equalType(x.At(i).Type(), y.At(i).Type()); err != nil {
- return fmt.Errorf("tuple element %d: %s", i, err)
- }
- }
- case *typeparams.TypeParam:
- y := y.(*typeparams.TypeParam)
- if x.String() != y.String() {
- return fmt.Errorf("unequal named types: %s vs %s", x, y)
- }
- // For now, just compare constraints by type string to short-circuit
- // cycles. We have to make interfaces explicit as export data currently
- // doesn't support marking interfaces as implicit.
- // TODO(rfindley): remove makeExplicit once export data contains an
- // implicit bit.
- xc := makeExplicit(x.Constraint()).String()
- yc := makeExplicit(y.Constraint()).String()
- if xc != yc {
- return fmt.Errorf("unequal constraints: %s vs %s", xc, yc)
- }
-
- default:
- panic(fmt.Sprintf("unexpected %T type", x))
- }
- return nil
-}
-
-// cmpNamed compares two named types x and y, returning an error for any
-// discrepancies. It does not compare their underlying types.
-func cmpNamed(x, y *types.Named) error {
- xOrig := typeparams.NamedTypeOrigin(x)
- yOrig := typeparams.NamedTypeOrigin(y)
- if xOrig.String() != yOrig.String() {
- return fmt.Errorf("unequal named types: %s vs %s", x, y)
- }
- if err := equalTypeParams(typeparams.ForNamed(x), typeparams.ForNamed(y)); err != nil {
- return fmt.Errorf("type parameters: %s", err)
- }
- if err := equalTypeArgs(typeparams.NamedTypeArgs(x), typeparams.NamedTypeArgs(y)); err != nil {
- return fmt.Errorf("type arguments: %s", err)
- }
- if x.NumMethods() != y.NumMethods() {
- return fmt.Errorf("unequal methods: %d vs %d",
- x.NumMethods(), y.NumMethods())
- }
- // Unfortunately method sorting is not canonical, so sort before comparing.
- var xms, yms []*types.Func
- for i := 0; i < x.NumMethods(); i++ {
- xms = append(xms, x.Method(i))
- yms = append(yms, y.Method(i))
- }
- for _, ms := range [][]*types.Func{xms, yms} {
- sort.Slice(ms, func(i, j int) bool {
- return ms[i].Name() < ms[j].Name()
- })
- }
- for i, xm := range xms {
- ym := yms[i]
- if xm.Name() != ym.Name() {
- return fmt.Errorf("mismatched %dth method: %s vs %s", i, xm, ym)
- }
- // Calling equalType here leads to infinite recursion, so just compare
- // strings.
- if xm.String() != ym.String() {
- return fmt.Errorf("unequal methods: %s vs %s", x, y)
- }
- }
- return nil
-}
-
-// makeExplicit returns an explicit version of typ, if typ is an implicit
-// interface. Otherwise it returns typ unmodified.
-func makeExplicit(typ types.Type) types.Type {
- if iface, _ := typ.(*types.Interface); iface != nil && typeparams.IsImplicit(iface) {
- var methods []*types.Func
- for i := 0; i < iface.NumExplicitMethods(); i++ {
- methods = append(methods, iface.Method(i))
- }
- var embeddeds []types.Type
- for i := 0; i < iface.NumEmbeddeds(); i++ {
- embeddeds = append(embeddeds, iface.EmbeddedType(i))
- }
- return types.NewInterfaceType(methods, embeddeds)
- }
- return typ
-}
-
-func equalTypeArgs(x, y *typeparams.TypeList) error {
- if x.Len() != y.Len() {
- return fmt.Errorf("unequal lengths: %d vs %d", x.Len(), y.Len())
- }
- for i := 0; i < x.Len(); i++ {
- if err := equalType(x.At(i), y.At(i)); err != nil {
- return fmt.Errorf("type %d: %s", i, err)
- }
- }
- return nil
-}
-
-func equalTypeParams(x, y *typeparams.TypeParamList) error {
- if x.Len() != y.Len() {
- return fmt.Errorf("unequal lengths: %d vs %d", x.Len(), y.Len())
- }
- for i := 0; i < x.Len(); i++ {
- if err := equalType(x.At(i), y.At(i)); err != nil {
- return fmt.Errorf("type parameter %d: %s", i, err)
- }
- }
- return nil
-}
-
-// TestVeryLongFile tests the position of an import object declared in
-// a very long input file. Line numbers greater than maxlines are
-// reported as line 1, not garbage or token.NoPos.
-func TestVeryLongFile(t *testing.T) {
- // parse and typecheck
- longFile := "package foo" + strings.Repeat("\n", 123456) + "var X int"
- fset1 := token.NewFileSet()
- f, err := parser.ParseFile(fset1, "foo.go", longFile, 0)
- if err != nil {
- t.Fatal(err)
- }
- var conf types.Config
- pkg, err := conf.Check("foo", fset1, []*ast.File{f}, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // export
- exportdata, err := gcimporter.BExportData(fset1, pkg)
- if err != nil {
- t.Fatal(err)
- }
-
- // import
- imports := make(map[string]*types.Package)
- fset2 := token.NewFileSet()
- _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path())
- if err != nil {
- t.Fatalf("BImportData(%s): %v", pkg.Path(), err)
- }
-
- // compare
- posn1 := fset1.Position(pkg.Scope().Lookup("X").Pos())
- posn2 := fset2.Position(pkg2.Scope().Lookup("X").Pos())
- if want := "foo.go:1:1"; posn2.String() != want {
- t.Errorf("X position = %s, want %s (orig was %s)",
- posn2, want, posn1)
- }
-}
-
-const src = `
-package p
-
-type (
- T0 = int32
- T1 = struct{}
- T2 = struct{ T1 }
- Invalid = foo // foo is undeclared
-)
-`
-
-func checkPkg(t *testing.T, pkg *types.Package, label string) {
- T1 := types.NewStruct(nil, nil)
- T2 := types.NewStruct([]*types.Var{types.NewField(0, pkg, "T1", T1, true)}, nil)
-
- for _, test := range []struct {
- name string
- typ types.Type
- }{
- {"T0", types.Typ[types.Int32]},
- {"T1", T1},
- {"T2", T2},
- {"Invalid", types.Typ[types.Invalid]},
- } {
- obj := pkg.Scope().Lookup(test.name)
- if obj == nil {
- t.Errorf("%s: %s not found", label, test.name)
- continue
- }
- tname, _ := obj.(*types.TypeName)
- if tname == nil {
- t.Errorf("%s: %v not a type name", label, obj)
- continue
- }
- if !tname.IsAlias() {
- t.Errorf("%s: %v: not marked as alias", label, tname)
- continue
- }
- if got := tname.Type(); !types.Identical(got, test.typ) {
- t.Errorf("%s: %v: got %v; want %v", label, tname, got, test.typ)
- }
- }
-}
-
-func TestTypeAliases(t *testing.T) {
- // parse and typecheck
- fset1 := token.NewFileSet()
- f, err := parser.ParseFile(fset1, "p.go", src, 0)
- if err != nil {
- t.Fatal(err)
- }
- var conf types.Config
- pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil)
- if err == nil {
- // foo in undeclared in src; we should see an error
- t.Fatal("invalid source type-checked without error")
- }
- if pkg1 == nil {
- // despite incorrect src we should see a (partially) type-checked package
- t.Fatal("nil package returned")
- }
- checkPkg(t, pkg1, "export")
-
- // export
- exportdata, err := gcimporter.BExportData(fset1, pkg1)
- if err != nil {
- t.Fatal(err)
- }
-
- // import
- imports := make(map[string]*types.Package)
- fset2 := token.NewFileSet()
- _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg1.Path())
- if err != nil {
- t.Fatalf("BImportData(%s): %v", pkg1.Path(), err)
- }
- checkPkg(t, pkg2, "import")
-}
diff --git a/go/internal/gcimporter/gcimporter.go b/go/internal/gcimporter/gcimporter.go
deleted file mode 100644
index 3ab66830d..000000000
--- a/go/internal/gcimporter/gcimporter.go
+++ /dev/null
@@ -1,1084 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
-// but it also contains the original source-based importer code for Go1.6.
-// Once we stop supporting 1.6, we can remove that code.
-
-// Package gcimporter provides various functions for reading
-// gc-generated object files that can be used to implement the
-// Importer interface defined by the Go 1.5 standard library package.
-package gcimporter // import "golang.org/x/tools/go/internal/gcimporter"
-
-import (
- "bufio"
- "errors"
- "fmt"
- "go/build"
- "go/constant"
- "go/token"
- "go/types"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "text/scanner"
-)
-
-const (
- // Enable debug during development: it adds some additional checks, and
- // prevents errors from being recovered.
- debug = false
-
- // If trace is set, debugging output is printed to std out.
- trace = false
-)
-
-var pkgExts = [...]string{".a", ".o"}
-
-// FindPkg returns the filename and unique package id for an import
-// path based on package information provided by build.Import (using
-// the build.Default build.Context). A relative srcDir is interpreted
-// relative to the current working directory.
-// If no file was found, an empty filename is returned.
-//
-func FindPkg(path, srcDir string) (filename, id string) {
- if path == "" {
- return
- }
-
- var noext string
- switch {
- default:
- // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
- // Don't require the source files to be present.
- if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
- srcDir = abs
- }
- bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
- if bp.PkgObj == "" {
- id = path // make sure we have an id to print in error message
- return
- }
- noext = strings.TrimSuffix(bp.PkgObj, ".a")
- id = bp.ImportPath
-
- case build.IsLocalImport(path):
- // "./x" -> "/this/directory/x.ext", "/this/directory/x"
- noext = filepath.Join(srcDir, path)
- id = noext
-
- case filepath.IsAbs(path):
- // for completeness only - go/build.Import
- // does not support absolute imports
- // "/x" -> "/x.ext", "/x"
- noext = path
- id = path
- }
-
- if false { // for debugging
- if path != id {
- fmt.Printf("%s -> %s\n", path, id)
- }
- }
-
- // try extensions
- for _, ext := range pkgExts {
- filename = noext + ext
- if f, err := os.Stat(filename); err == nil && !f.IsDir() {
- return
- }
- }
-
- filename = "" // not found
- return
-}
-
-// ImportData imports a package by reading the gc-generated export data,
-// adds the corresponding package object to the packages map indexed by id,
-// and returns the object.
-//
-// The packages map must contains all packages already imported. The data
-// reader position must be the beginning of the export data section. The
-// filename is only used in error messages.
-//
-// If packages[id] contains the completely imported package, that package
-// can be used directly, and there is no need to call this function (but
-// there is also no harm but for extra time used).
-//
-func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
- // support for parser error handling
- defer func() {
- switch r := recover().(type) {
- case nil:
- // nothing to do
- case importError:
- err = r
- default:
- panic(r) // internal error
- }
- }()
-
- var p parser
- p.init(filename, id, data, packages)
- pkg = p.parseExport()
-
- return
-}
-
-// Import imports a gc-generated package given its import path and srcDir, adds
-// the corresponding package object to the packages map, and returns the object.
-// The packages map must contain all packages already imported.
-//
-func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
- var rc io.ReadCloser
- var filename, id string
- if lookup != nil {
- // With custom lookup specified, assume that caller has
- // converted path to a canonical import path for use in the map.
- if path == "unsafe" {
- return types.Unsafe, nil
- }
- id = path
-
- // No need to re-import if the package was imported completely before.
- if pkg = packages[id]; pkg != nil && pkg.Complete() {
- return
- }
- f, err := lookup(path)
- if err != nil {
- return nil, err
- }
- rc = f
- } else {
- filename, id = FindPkg(path, srcDir)
- if filename == "" {
- if path == "unsafe" {
- return types.Unsafe, nil
- }
- return nil, fmt.Errorf("can't find import: %q", id)
- }
-
- // no need to re-import if the package was imported completely before
- if pkg = packages[id]; pkg != nil && pkg.Complete() {
- return
- }
-
- // open file
- f, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer func() {
- if err != nil {
- // add file name to error
- err = fmt.Errorf("%s: %v", filename, err)
- }
- }()
- rc = f
- }
- defer rc.Close()
-
- var hdr string
- buf := bufio.NewReader(rc)
- if hdr, _, err = FindExportData(buf); err != nil {
- return
- }
-
- switch hdr {
- case "$$\n":
- // Work-around if we don't have a filename; happens only if lookup != nil.
- // Either way, the filename is only needed for importer error messages, so
- // this is fine.
- if filename == "" {
- filename = path
- }
- return ImportData(packages, filename, id, buf)
-
- case "$$B\n":
- var data []byte
- data, err = ioutil.ReadAll(buf)
- if err != nil {
- break
- }
-
- // TODO(gri): allow clients of go/importer to provide a FileSet.
- // Or, define a new standard go/types/gcexportdata package.
- fset := token.NewFileSet()
-
- // The indexed export format starts with an 'i'; the older
- // binary export format starts with a 'c', 'd', or 'v'
- // (from "version"). Select appropriate importer.
- if len(data) > 0 && data[0] == 'i' {
- _, pkg, err = IImportData(fset, packages, data[1:], id)
- } else {
- _, pkg, err = BImportData(fset, packages, data, id)
- }
-
- default:
- err = fmt.Errorf("unknown export data header: %q", hdr)
- }
-
- return
-}
-
-// ----------------------------------------------------------------------------
-// Parser
-
-// TODO(gri) Imported objects don't have position information.
-// Ideally use the debug table line info; alternatively
-// create some fake position (or the position of the
-// import). That way error messages referring to imported
-// objects can print meaningful information.
-
-// parser parses the exports inside a gc compiler-produced
-// object/archive file and populates its scope with the results.
-type parser struct {
- scanner scanner.Scanner
- tok rune // current token
- lit string // literal string; only valid for Ident, Int, String tokens
- id string // package id of imported package
- sharedPkgs map[string]*types.Package // package id -> package object (across importer)
- localPkgs map[string]*types.Package // package id -> package object (just this package)
-}
-
-func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) {
- p.scanner.Init(src)
- p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
- p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
- p.scanner.Whitespace = 1<<'\t' | 1<<' '
- p.scanner.Filename = filename // for good error messages
- p.next()
- p.id = id
- p.sharedPkgs = packages
- if debug {
- // check consistency of packages map
- for _, pkg := range packages {
- if pkg.Name() == "" {
- fmt.Printf("no package name for %s\n", pkg.Path())
- }
- }
- }
-}
-
-func (p *parser) next() {
- p.tok = p.scanner.Scan()
- switch p.tok {
- case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
- p.lit = p.scanner.TokenText()
- default:
- p.lit = ""
- }
- if debug {
- fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
- }
-}
-
-func declTypeName(pkg *types.Package, name string) *types.TypeName {
- scope := pkg.Scope()
- if obj := scope.Lookup(name); obj != nil {
- return obj.(*types.TypeName)
- }
- obj := types.NewTypeName(token.NoPos, pkg, name, nil)
- // a named type may be referred to before the underlying type
- // is known - set it up
- types.NewNamed(obj, nil, nil)
- scope.Insert(obj)
- return obj
-}
-
-// ----------------------------------------------------------------------------
-// Error handling
-
-// Internal errors are boxed as importErrors.
-type importError struct {
- pos scanner.Position
- err error
-}
-
-func (e importError) Error() string {
- return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
-}
-
-func (p *parser) error(err interface{}) {
- if s, ok := err.(string); ok {
- err = errors.New(s)
- }
- // panic with a runtime.Error if err is not an error
- panic(importError{p.scanner.Pos(), err.(error)})
-}
-
-func (p *parser) errorf(format string, args ...interface{}) {
- p.error(fmt.Sprintf(format, args...))
-}
-
-func (p *parser) expect(tok rune) string {
- lit := p.lit
- if p.tok != tok {
- p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
- }
- p.next()
- return lit
-}
-
-func (p *parser) expectSpecial(tok string) {
- sep := 'x' // not white space
- i := 0
- for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
- sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
- p.next()
- i++
- }
- if i < len(tok) {
- p.errorf("expected %q, got %q", tok, tok[0:i])
- }
-}
-
-func (p *parser) expectKeyword(keyword string) {
- lit := p.expect(scanner.Ident)
- if lit != keyword {
- p.errorf("expected keyword %s, got %q", keyword, lit)
- }
-}
-
-// ----------------------------------------------------------------------------
-// Qualified and unqualified names
-
-// PackageId = string_lit .
-//
-func (p *parser) parsePackageID() string {
- id, err := strconv.Unquote(p.expect(scanner.String))
- if err != nil {
- p.error(err)
- }
- // id == "" stands for the imported package id
- // (only known at time of package installation)
- if id == "" {
- id = p.id
- }
- return id
-}
-
-// PackageName = ident .
-//
-func (p *parser) parsePackageName() string {
- return p.expect(scanner.Ident)
-}
-
-// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
-func (p *parser) parseDotIdent() string {
- ident := ""
- if p.tok != scanner.Int {
- sep := 'x' // not white space
- for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
- ident += p.lit
- sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
- p.next()
- }
- }
- if ident == "" {
- p.expect(scanner.Ident) // use expect() for error handling
- }
- return ident
-}
-
-// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
-//
-func (p *parser) parseQualifiedName() (id, name string) {
- p.expect('@')
- id = p.parsePackageID()
- p.expect('.')
- // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
- if p.tok == '?' {
- p.next()
- } else {
- name = p.parseDotIdent()
- }
- return
-}
-
-// getPkg returns the package for a given id. If the package is
-// not found, create the package and add it to the p.localPkgs
-// and p.sharedPkgs maps. name is the (expected) name of the
-// package. If name == "", the package name is expected to be
-// set later via an import clause in the export data.
-//
-// id identifies a package, usually by a canonical package path like
-// "encoding/json" but possibly by a non-canonical import path like
-// "./json".
-//
-func (p *parser) getPkg(id, name string) *types.Package {
- // package unsafe is not in the packages maps - handle explicitly
- if id == "unsafe" {
- return types.Unsafe
- }
-
- pkg := p.localPkgs[id]
- if pkg == nil {
- // first import of id from this package
- pkg = p.sharedPkgs[id]
- if pkg == nil {
- // first import of id by this importer;
- // add (possibly unnamed) pkg to shared packages
- pkg = types.NewPackage(id, name)
- p.sharedPkgs[id] = pkg
- }
- // add (possibly unnamed) pkg to local packages
- if p.localPkgs == nil {
- p.localPkgs = make(map[string]*types.Package)
- }
- p.localPkgs[id] = pkg
- } else if name != "" {
- // package exists already and we have an expected package name;
- // make sure names match or set package name if necessary
- if pname := pkg.Name(); pname == "" {
- pkg.SetName(name)
- } else if pname != name {
- p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name)
- }
- }
- return pkg
-}
-
-// parseExportedName is like parseQualifiedName, but
-// the package id is resolved to an imported *types.Package.
-//
-func (p *parser) parseExportedName() (pkg *types.Package, name string) {
- id, name := p.parseQualifiedName()
- pkg = p.getPkg(id, "")
- return
-}
-
-// ----------------------------------------------------------------------------
-// Types
-
-// BasicType = identifier .
-//
-func (p *parser) parseBasicType() types.Type {
- id := p.expect(scanner.Ident)
- obj := types.Universe.Lookup(id)
- if obj, ok := obj.(*types.TypeName); ok {
- return obj.Type()
- }
- p.errorf("not a basic type: %s", id)
- return nil
-}
-
-// ArrayType = "[" int_lit "]" Type .
-//
-func (p *parser) parseArrayType(parent *types.Package) types.Type {
- // "[" already consumed and lookahead known not to be "]"
- lit := p.expect(scanner.Int)
- p.expect(']')
- elem := p.parseType(parent)
- n, err := strconv.ParseInt(lit, 10, 64)
- if err != nil {
- p.error(err)
- }
- return types.NewArray(elem, n)
-}
-
-// MapType = "map" "[" Type "]" Type .
-//
-func (p *parser) parseMapType(parent *types.Package) types.Type {
- p.expectKeyword("map")
- p.expect('[')
- key := p.parseType(parent)
- p.expect(']')
- elem := p.parseType(parent)
- return types.NewMap(key, elem)
-}
-
-// Name = identifier | "?" | QualifiedName .
-//
-// For unqualified and anonymous names, the returned package is the parent
-// package unless parent == nil, in which case the returned package is the
-// package being imported. (The parent package is not nil if the name
-// is an unqualified struct field or interface method name belonging to a
-// type declared in another package.)
-//
-// For qualified names, the returned package is nil (and not created if
-// it doesn't exist yet) unless materializePkg is set (which creates an
-// unnamed package with valid package path). In the latter case, a
-// subsequent import clause is expected to provide a name for the package.
-//
-func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
- pkg = parent
- if pkg == nil {
- pkg = p.sharedPkgs[p.id]
- }
- switch p.tok {
- case scanner.Ident:
- name = p.lit
- p.next()
- case '?':
- // anonymous
- p.next()
- case '@':
- // exported name prefixed with package path
- pkg = nil
- var id string
- id, name = p.parseQualifiedName()
- if materializePkg {
- pkg = p.getPkg(id, "")
- }
- default:
- p.error("name expected")
- }
- return
-}
-
-func deref(typ types.Type) types.Type {
- if p, _ := typ.(*types.Pointer); p != nil {
- return p.Elem()
- }
- return typ
-}
-
-// Field = Name Type [ string_lit ] .
-//
-func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
- pkg, name := p.parseName(parent, true)
-
- if name == "_" {
- // Blank fields should be package-qualified because they
- // are unexported identifiers, but gc does not qualify them.
- // Assuming that the ident belongs to the current package
- // causes types to change during re-exporting, leading
- // to spurious "can't assign A to B" errors from go/types.
- // As a workaround, pretend all blank fields belong
- // to the same unique dummy package.
- const blankpkg = "<_>"
- pkg = p.getPkg(blankpkg, blankpkg)
- }
-
- typ := p.parseType(parent)
- anonymous := false
- if name == "" {
- // anonymous field - typ must be T or *T and T must be a type name
- switch typ := deref(typ).(type) {
- case *types.Basic: // basic types are named types
- pkg = nil // objects defined in Universe scope have no package
- name = typ.Name()
- case *types.Named:
- name = typ.Obj().Name()
- default:
- p.errorf("anonymous field expected")
- }
- anonymous = true
- }
- tag := ""
- if p.tok == scanner.String {
- s := p.expect(scanner.String)
- var err error
- tag, err = strconv.Unquote(s)
- if err != nil {
- p.errorf("invalid struct tag %s: %s", s, err)
- }
- }
- return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
-}
-
-// StructType = "struct" "{" [ FieldList ] "}" .
-// FieldList = Field { ";" Field } .
-//
-func (p *parser) parseStructType(parent *types.Package) types.Type {
- var fields []*types.Var
- var tags []string
-
- p.expectKeyword("struct")
- p.expect('{')
- for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
- if i > 0 {
- p.expect(';')
- }
- fld, tag := p.parseField(parent)
- if tag != "" && tags == nil {
- tags = make([]string, i)
- }
- if tags != nil {
- tags = append(tags, tag)
- }
- fields = append(fields, fld)
- }
- p.expect('}')
-
- return types.NewStruct(fields, tags)
-}
-
-// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
-//
-func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
- _, name := p.parseName(nil, false)
- // remove gc-specific parameter numbering
- if i := strings.Index(name, "·"); i >= 0 {
- name = name[:i]
- }
- if p.tok == '.' {
- p.expectSpecial("...")
- isVariadic = true
- }
- typ := p.parseType(nil)
- if isVariadic {
- typ = types.NewSlice(typ)
- }
- // ignore argument tag (e.g. "noescape")
- if p.tok == scanner.String {
- p.next()
- }
- // TODO(gri) should we provide a package?
- par = types.NewVar(token.NoPos, nil, name, typ)
- return
-}
-
-// Parameters = "(" [ ParameterList ] ")" .
-// ParameterList = { Parameter "," } Parameter .
-//
-func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
- p.expect('(')
- for p.tok != ')' && p.tok != scanner.EOF {
- if len(list) > 0 {
- p.expect(',')
- }
- par, variadic := p.parseParameter()
- list = append(list, par)
- if variadic {
- if isVariadic {
- p.error("... not on final argument")
- }
- isVariadic = true
- }
- }
- p.expect(')')
-
- return
-}
-
-// Signature = Parameters [ Result ] .
-// Result = Type | Parameters .
-//
-func (p *parser) parseSignature(recv *types.Var) *types.Signature {
- params, isVariadic := p.parseParameters()
-
- // optional result type
- var results []*types.Var
- if p.tok == '(' {
- var variadic bool
- results, variadic = p.parseParameters()
- if variadic {
- p.error("... not permitted on result type")
- }
- }
-
- return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
-}
-
-// InterfaceType = "interface" "{" [ MethodList ] "}" .
-// MethodList = Method { ";" Method } .
-// Method = Name Signature .
-//
-// The methods of embedded interfaces are always "inlined"
-// by the compiler and thus embedded interfaces are never
-// visible in the export data.
-//
-func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
- var methods []*types.Func
-
- p.expectKeyword("interface")
- p.expect('{')
- for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
- if i > 0 {
- p.expect(';')
- }
- pkg, name := p.parseName(parent, true)
- sig := p.parseSignature(nil)
- methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
- }
- p.expect('}')
-
- // Complete requires the type's embedded interfaces to be fully defined,
- // but we do not define any
- return newInterface(methods, nil).Complete()
-}
-
-// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
-//
-func (p *parser) parseChanType(parent *types.Package) types.Type {
- dir := types.SendRecv
- if p.tok == scanner.Ident {
- p.expectKeyword("chan")
- if p.tok == '<' {
- p.expectSpecial("<-")
- dir = types.SendOnly
- }
- } else {
- p.expectSpecial("<-")
- p.expectKeyword("chan")
- dir = types.RecvOnly
- }
- elem := p.parseType(parent)
- return types.NewChan(dir, elem)
-}
-
-// Type =
-// BasicType | TypeName | ArrayType | SliceType | StructType |
-// PointerType | FuncType | InterfaceType | MapType | ChanType |
-// "(" Type ")" .
-//
-// BasicType = ident .
-// TypeName = ExportedName .
-// SliceType = "[" "]" Type .
-// PointerType = "*" Type .
-// FuncType = "func" Signature .
-//
-func (p *parser) parseType(parent *types.Package) types.Type {
- switch p.tok {
- case scanner.Ident:
- switch p.lit {
- default:
- return p.parseBasicType()
- case "struct":
- return p.parseStructType(parent)
- case "func":
- // FuncType
- p.next()
- return p.parseSignature(nil)
- case "interface":
- return p.parseInterfaceType(parent)
- case "map":
- return p.parseMapType(parent)
- case "chan":
- return p.parseChanType(parent)
- }
- case '@':
- // TypeName
- pkg, name := p.parseExportedName()
- return declTypeName(pkg, name).Type()
- case '[':
- p.next() // look ahead
- if p.tok == ']' {
- // SliceType
- p.next()
- return types.NewSlice(p.parseType(parent))
- }
- return p.parseArrayType(parent)
- case '*':
- // PointerType
- p.next()
- return types.NewPointer(p.parseType(parent))
- case '<':
- return p.parseChanType(parent)
- case '(':
- // "(" Type ")"
- p.next()
- typ := p.parseType(parent)
- p.expect(')')
- return typ
- }
- p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
- return nil
-}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-// ImportDecl = "import" PackageName PackageId .
-//
-func (p *parser) parseImportDecl() {
- p.expectKeyword("import")
- name := p.parsePackageName()
- p.getPkg(p.parsePackageID(), name)
-}
-
-// int_lit = [ "+" | "-" ] { "0" ... "9" } .
-//
-func (p *parser) parseInt() string {
- s := ""
- switch p.tok {
- case '-':
- s = "-"
- p.next()
- case '+':
- p.next()
- }
- return s + p.expect(scanner.Int)
-}
-
-// number = int_lit [ "p" int_lit ] .
-//
-func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
- // mantissa
- mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
- if mant == nil {
- panic("invalid mantissa")
- }
-
- if p.lit == "p" {
- // exponent (base 2)
- p.next()
- exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
- if err != nil {
- p.error(err)
- }
- if exp < 0 {
- denom := constant.MakeInt64(1)
- denom = constant.Shift(denom, token.SHL, uint(-exp))
- typ = types.Typ[types.UntypedFloat]
- val = constant.BinaryOp(mant, token.QUO, denom)
- return
- }
- if exp > 0 {
- mant = constant.Shift(mant, token.SHL, uint(exp))
- }
- typ = types.Typ[types.UntypedFloat]
- val = mant
- return
- }
-
- typ = types.Typ[types.UntypedInt]
- val = mant
- return
-}
-
-// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
-// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
-// bool_lit = "true" | "false" .
-// complex_lit = "(" float_lit "+" float_lit "i" ")" .
-// rune_lit = "(" int_lit "+" int_lit ")" .
-// string_lit = `"` { unicode_char } `"` .
-//
-func (p *parser) parseConstDecl() {
- p.expectKeyword("const")
- pkg, name := p.parseExportedName()
-
- var typ0 types.Type
- if p.tok != '=' {
- // constant types are never structured - no need for parent type
- typ0 = p.parseType(nil)
- }
-
- p.expect('=')
- var typ types.Type
- var val constant.Value
- switch p.tok {
- case scanner.Ident:
- // bool_lit
- if p.lit != "true" && p.lit != "false" {
- p.error("expected true or false")
- }
- typ = types.Typ[types.UntypedBool]
- val = constant.MakeBool(p.lit == "true")
- p.next()
-
- case '-', scanner.Int:
- // int_lit
- typ, val = p.parseNumber()
-
- case '(':
- // complex_lit or rune_lit
- p.next()
- if p.tok == scanner.Char {
- p.next()
- p.expect('+')
- typ = types.Typ[types.UntypedRune]
- _, val = p.parseNumber()
- p.expect(')')
- break
- }
- _, re := p.parseNumber()
- p.expect('+')
- _, im := p.parseNumber()
- p.expectKeyword("i")
- p.expect(')')
- typ = types.Typ[types.UntypedComplex]
- val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
-
- case scanner.Char:
- // rune_lit
- typ = types.Typ[types.UntypedRune]
- val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
- p.next()
-
- case scanner.String:
- // string_lit
- typ = types.Typ[types.UntypedString]
- val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
- p.next()
-
- default:
- p.errorf("expected literal got %s", scanner.TokenString(p.tok))
- }
-
- if typ0 == nil {
- typ0 = typ
- }
-
- pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
-}
-
-// TypeDecl = "type" ExportedName Type .
-//
-func (p *parser) parseTypeDecl() {
- p.expectKeyword("type")
- pkg, name := p.parseExportedName()
- obj := declTypeName(pkg, name)
-
- // The type object may have been imported before and thus already
- // have a type associated with it. We still need to parse the type
- // structure, but throw it away if the object already has a type.
- // This ensures that all imports refer to the same type object for
- // a given type declaration.
- typ := p.parseType(pkg)
-
- if name := obj.Type().(*types.Named); name.Underlying() == nil {
- name.SetUnderlying(typ)
- }
-}
-
-// VarDecl = "var" ExportedName Type .
-//
-func (p *parser) parseVarDecl() {
- p.expectKeyword("var")
- pkg, name := p.parseExportedName()
- typ := p.parseType(pkg)
- pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
-}
-
-// Func = Signature [ Body ] .
-// Body = "{" ... "}" .
-//
-func (p *parser) parseFunc(recv *types.Var) *types.Signature {
- sig := p.parseSignature(recv)
- if p.tok == '{' {
- p.next()
- for i := 1; i > 0; p.next() {
- switch p.tok {
- case '{':
- i++
- case '}':
- i--
- }
- }
- }
- return sig
-}
-
-// MethodDecl = "func" Receiver Name Func .
-// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
-//
-func (p *parser) parseMethodDecl() {
- // "func" already consumed
- p.expect('(')
- recv, _ := p.parseParameter() // receiver
- p.expect(')')
-
- // determine receiver base type object
- base := deref(recv.Type()).(*types.Named)
-
- // parse method name, signature, and possibly inlined body
- _, name := p.parseName(nil, false)
- sig := p.parseFunc(recv)
-
- // methods always belong to the same package as the base type object
- pkg := base.Obj().Pkg()
-
- // add method to type unless type was imported before
- // and method exists already
- // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
- base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
-}
-
-// FuncDecl = "func" ExportedName Func .
-//
-func (p *parser) parseFuncDecl() {
- // "func" already consumed
- pkg, name := p.parseExportedName()
- typ := p.parseFunc(nil)
- pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
-}
-
-// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
-//
-func (p *parser) parseDecl() {
- if p.tok == scanner.Ident {
- switch p.lit {
- case "import":
- p.parseImportDecl()
- case "const":
- p.parseConstDecl()
- case "type":
- p.parseTypeDecl()
- case "var":
- p.parseVarDecl()
- case "func":
- p.next() // look ahead
- if p.tok == '(' {
- p.parseMethodDecl()
- } else {
- p.parseFuncDecl()
- }
- }
- }
- p.expect('\n')
-}
-
-// ----------------------------------------------------------------------------
-// Export
-
-// Export = "PackageClause { Decl } "$$" .
-// PackageClause = "package" PackageName [ "safe" ] "\n" .
-//
-func (p *parser) parseExport() *types.Package {
- p.expectKeyword("package")
- name := p.parsePackageName()
- if p.tok == scanner.Ident && p.lit == "safe" {
- // package was compiled with -u option - ignore
- p.next()
- }
- p.expect('\n')
-
- pkg := p.getPkg(p.id, name)
-
- for p.tok != '$' && p.tok != scanner.EOF {
- p.parseDecl()
- }
-
- if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
- // don't call next()/expect() since reading past the
- // export data may cause scanner errors (e.g. NUL chars)
- p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
- }
-
- if n := p.scanner.ErrorCount; n != 0 {
- p.errorf("expected no scanner errors, got %d", n)
- }
-
- // Record all locally referenced packages as imports.
- var imports []*types.Package
- for id, pkg2 := range p.localPkgs {
- if pkg2.Name() == "" {
- p.errorf("%s package has no name", id)
- }
- if id == p.id {
- continue // avoid self-edge
- }
- imports = append(imports, pkg2)
- }
- sort.Sort(byPath(imports))
- pkg.SetImports(imports)
-
- // package was imported completely and without errors
- pkg.MarkComplete()
-
- return pkg
-}
-
-type byPath []*types.Package
-
-func (a byPath) Len() int { return len(a) }
-func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/go/internal/gcimporter/gcimporter_test.go b/go/internal/gcimporter/gcimporter_test.go
deleted file mode 100644
index 6baab0128..000000000
--- a/go/internal/gcimporter/gcimporter_test.go
+++ /dev/null
@@ -1,611 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter_test.go,
-// adjusted to make it build with code from (std lib) internal/testenv copied.
-
-package gcimporter
-
-import (
- "bytes"
- "fmt"
- "go/build"
- "go/constant"
- "go/types"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
- "time"
-
- "golang.org/x/tools/internal/testenv"
-)
-
-func TestMain(m *testing.M) {
- testenv.ExitIfSmallMachine()
- os.Exit(m.Run())
-}
-
-// ----------------------------------------------------------------------------
-
-func needsCompiler(t *testing.T, compiler string) {
- if runtime.Compiler == compiler {
- return
- }
- switch compiler {
- case "gc":
- t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
- }
-}
-
-// compile runs the compiler on filename, with dirname as the working directory,
-// and writes the output file to outdirname.
-func compile(t *testing.T, dirname, filename, outdirname string) string {
- testenv.NeedsGoBuild(t)
-
- // filename must end with ".go"
- if !strings.HasSuffix(filename, ".go") {
- t.Fatalf("filename doesn't end in .go: %s", filename)
- }
- basename := filepath.Base(filename)
- outname := filepath.Join(outdirname, basename[:len(basename)-2]+"o")
- cmd := exec.Command("go", "tool", "compile", "-p=p", "-o", outname, filename)
- cmd.Dir = dirname
- out, err := cmd.CombinedOutput()
- if err != nil {
- t.Logf("%s", out)
- t.Fatalf("go tool compile %s failed: %s", filename, err)
- }
- return outname
-}
-
-func testPath(t *testing.T, path, srcDir string) *types.Package {
- t0 := time.Now()
- pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
- if err != nil {
- t.Errorf("testPath(%s): %s", path, err)
- return nil
- }
- t.Logf("testPath(%s): %v", path, time.Since(t0))
- return pkg
-}
-
-const maxTime = 30 * time.Second
-
-func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
- dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
- list, err := ioutil.ReadDir(dirname)
- if err != nil {
- t.Fatalf("testDir(%s): %s", dirname, err)
- }
- for _, f := range list {
- if time.Now().After(endTime) {
- t.Log("testing time used up")
- return
- }
- switch {
- case !f.IsDir():
- // try extensions
- for _, ext := range pkgExts {
- if strings.HasSuffix(f.Name(), ext) {
- name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension
- if testPath(t, filepath.Join(dir, name), dir) != nil {
- nimports++
- }
- }
- }
- case f.IsDir():
- nimports += testDir(t, filepath.Join(dir, f.Name()), endTime)
- }
- }
- return
-}
-
-func mktmpdir(t *testing.T) string {
- tmpdir, err := ioutil.TempDir("", "gcimporter_test")
- if err != nil {
- t.Fatal("mktmpdir:", err)
- }
- if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil {
- os.RemoveAll(tmpdir)
- t.Fatal("mktmpdir:", err)
- }
- return tmpdir
-}
-
-const testfile = "exports.go"
-
-func TestImportTestdata(t *testing.T) {
- needsCompiler(t, "gc")
-
- tmpdir := mktmpdir(t)
- defer os.RemoveAll(tmpdir)
-
- compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"))
-
- // filename should end with ".go"
- filename := testfile[:len(testfile)-3]
- if pkg := testPath(t, "./testdata/"+filename, tmpdir); pkg != nil {
- // The package's Imports list must include all packages
- // explicitly imported by testfile, plus all packages
- // referenced indirectly via exported objects in testfile.
- // With the textual export format (when run against Go1.6),
- // the list may also include additional packages that are
- // not strictly required for import processing alone (they
- // are exported to err "on the safe side").
- // For now, we just test the presence of a few packages
- // that we know are there for sure.
- got := fmt.Sprint(pkg.Imports())
- for _, want := range []string{"go/ast", "go/token"} {
- if !strings.Contains(got, want) {
- t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
- }
- }
- }
-}
-
-func TestVersionHandling(t *testing.T) {
- if debug {
- t.Skip("TestVersionHandling panics in debug mode")
- }
-
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- const dir = "./testdata/versions"
- list, err := ioutil.ReadDir(dir)
- if err != nil {
- t.Fatal(err)
- }
-
- tmpdir := mktmpdir(t)
- defer os.RemoveAll(tmpdir)
- corruptdir := filepath.Join(tmpdir, "testdata", "versions")
- if err := os.Mkdir(corruptdir, 0700); err != nil {
- t.Fatal(err)
- }
-
- for _, f := range list {
- name := f.Name()
- if !strings.HasSuffix(name, ".a") {
- continue // not a package file
- }
- if strings.Contains(name, "corrupted") {
- continue // don't process a leftover corrupted file
- }
- pkgpath := "./" + name[:len(name)-2]
-
- if testing.Verbose() {
- t.Logf("importing %s", name)
- }
-
- // test that export data can be imported
- _, err := Import(make(map[string]*types.Package), pkgpath, dir, nil)
- if err != nil {
- // ok to fail if it fails with a newer version error for select files
- if strings.Contains(err.Error(), "newer version") {
- switch name {
- case "test_go1.11_999b.a", "test_go1.11_999i.a":
- continue
- }
- // fall through
- }
- t.Errorf("import %q failed: %v", pkgpath, err)
- continue
- }
-
- // create file with corrupted export data
- // 1) read file
- data, err := ioutil.ReadFile(filepath.Join(dir, name))
- if err != nil {
- t.Fatal(err)
- }
- // 2) find export data
- i := bytes.Index(data, []byte("\n$$B\n")) + 5
- j := bytes.Index(data[i:], []byte("\n$$\n")) + i
- if i < 0 || j < 0 || i > j {
- t.Fatalf("export data section not found (i = %d, j = %d)", i, j)
- }
- // 3) corrupt the data (increment every 7th byte)
- for k := j - 13; k >= i; k -= 7 {
- data[k]++
- }
- // 4) write the file
- pkgpath += "_corrupted"
- filename := filepath.Join(corruptdir, pkgpath) + ".a"
- ioutil.WriteFile(filename, data, 0666)
-
- // test that importing the corrupted file results in an error
- _, err = Import(make(map[string]*types.Package), pkgpath, corruptdir, nil)
- if err == nil {
- t.Errorf("import corrupted %q succeeded", pkgpath)
- } else if msg := err.Error(); !strings.Contains(msg, "version skew") {
- t.Errorf("import %q error incorrect (%s)", pkgpath, msg)
- }
- }
-}
-
-func TestImportStdLib(t *testing.T) {
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- dt := maxTime
- if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
- dt = 10 * time.Millisecond
- }
- nimports := testDir(t, "", time.Now().Add(dt)) // installed packages
- t.Logf("tested %d imports", nimports)
-}
-
-var importedObjectTests = []struct {
- name string
- want string
-}{
- // non-interfaces
- {"crypto.Hash", "type Hash uint"},
- {"go/ast.ObjKind", "type ObjKind int"},
- {"go/types.Qualifier", "type Qualifier func(*Package) string"},
- {"go/types.Comparable", "func Comparable(T Type) bool"},
- {"math.Pi", "const Pi untyped float"},
- {"math.Sin", "func Sin(x float64) float64"},
- {"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
- {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"},
-
- // interfaces
- {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
- {"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"},
- {"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"},
- {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
- {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
- {"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
- {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
-}
-
-// TODO(rsc): Delete this init func after x/tools no longer needs to test successfully with Go 1.17.
-func init() {
- if build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] <= "go1.17" {
- for i := range importedObjectTests {
- if importedObjectTests[i].name == "context.Context" {
- // Expand any to interface{}.
- importedObjectTests[i].want = "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key interface{}) interface{}}"
- }
- }
- }
-}
-
-func TestImportedTypes(t *testing.T) {
- testenv.NeedsGo1Point(t, 11)
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- for _, test := range importedObjectTests {
- obj := importObject(t, test.name)
- if obj == nil {
- continue // error reported elsewhere
- }
- got := types.ObjectString(obj, types.RelativeTo(obj.Pkg()))
-
- // TODO(rsc): Delete this block once go.dev/cl/368254 lands.
- if got != test.want && test.want == strings.ReplaceAll(got, "interface{}", "any") {
- got = test.want
- }
-
- if got != test.want {
- t.Errorf("%s: got %q; want %q", test.name, got, test.want)
- }
-
- if named, _ := obj.Type().(*types.Named); named != nil {
- verifyInterfaceMethodRecvs(t, named, 0)
- }
- }
-}
-
-func TestImportedConsts(t *testing.T) {
- testenv.NeedsGo1Point(t, 11)
- tests := []struct {
- name string
- want constant.Kind
- }{
- {"math.Pi", constant.Float},
- {"math.MaxFloat64", constant.Float},
- {"math.MaxInt64", constant.Int},
- }
-
- for _, test := range tests {
- obj := importObject(t, test.name)
- if got := obj.(*types.Const).Val().Kind(); got != test.want {
- t.Errorf("%s: imported as constant.Kind(%v), want constant.Kind(%v)", test.name, got, test.want)
- }
- }
-}
-
-// importObject imports the object specified by a name of the form
-// <import path>.<object name>, e.g. go/types.Type.
-//
-// If any errors occur they are reported via t and the resulting object will
-// be nil.
-func importObject(t *testing.T, name string) types.Object {
- s := strings.Split(name, ".")
- if len(s) != 2 {
- t.Fatal("inconsistent test data")
- }
- importPath := s[0]
- objName := s[1]
-
- pkg, err := Import(make(map[string]*types.Package), importPath, ".", nil)
- if err != nil {
- t.Error(err)
- return nil
- }
-
- obj := pkg.Scope().Lookup(objName)
- if obj == nil {
- t.Errorf("%s: object not found", name)
- return nil
- }
- return obj
-}
-
-// verifyInterfaceMethodRecvs verifies that method receiver types
-// are named if the methods belong to a named interface type.
-func verifyInterfaceMethodRecvs(t *testing.T, named *types.Named, level int) {
- // avoid endless recursion in case of an embedding bug that lead to a cycle
- if level > 10 {
- t.Errorf("%s: embeds itself", named)
- return
- }
-
- iface, _ := named.Underlying().(*types.Interface)
- if iface == nil {
- return // not an interface
- }
-
- // check explicitly declared methods
- for i := 0; i < iface.NumExplicitMethods(); i++ {
- m := iface.ExplicitMethod(i)
- recv := m.Type().(*types.Signature).Recv()
- if recv == nil {
- t.Errorf("%s: missing receiver type", m)
- continue
- }
- if recv.Type() != named {
- t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named)
- }
- }
-
- // check embedded interfaces (if they are named, too)
- for i := 0; i < iface.NumEmbeddeds(); i++ {
- // embedding of interfaces cannot have cycles; recursion will terminate
- if etype, _ := iface.EmbeddedType(i).(*types.Named); etype != nil {
- verifyInterfaceMethodRecvs(t, etype, level+1)
- }
- }
-}
-
-func TestIssue5815(t *testing.T) {
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- pkg := importPkg(t, "strings", ".")
-
- scope := pkg.Scope()
- for _, name := range scope.Names() {
- obj := scope.Lookup(name)
- if obj.Pkg() == nil {
- t.Errorf("no pkg for %s", obj)
- }
- if tname, _ := obj.(*types.TypeName); tname != nil {
- named := tname.Type().(*types.Named)
- for i := 0; i < named.NumMethods(); i++ {
- m := named.Method(i)
- if m.Pkg() == nil {
- t.Errorf("no pkg for %s", m)
- }
- }
- }
- }
-}
-
-// Smoke test to ensure that imported methods get the correct package.
-func TestCorrectMethodPackage(t *testing.T) {
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- imports := make(map[string]*types.Package)
- _, err := Import(imports, "net/http", ".", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- mutex := imports["sync"].Scope().Lookup("Mutex").(*types.TypeName).Type()
- mset := types.NewMethodSet(types.NewPointer(mutex)) // methods of *sync.Mutex
- sel := mset.Lookup(nil, "Lock")
- lock := sel.Obj().(*types.Func)
- if got, want := lock.Pkg().Path(), "sync"; got != want {
- t.Errorf("got package path %q; want %q", got, want)
- }
-}
-
-func TestIssue13566(t *testing.T) {
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- // On windows, we have to set the -D option for the compiler to avoid having a drive
- // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
- if runtime.GOOS == "windows" {
- t.Skip("avoid dealing with relative paths/drive letters on windows")
- }
-
- tmpdir := mktmpdir(t)
- defer os.RemoveAll(tmpdir)
- testoutdir := filepath.Join(tmpdir, "testdata")
-
- // b.go needs to be compiled from the output directory so that the compiler can
- // find the compiled package a. We pass the full path to compile() so that we
- // don't have to copy the file to that directory.
- bpath, err := filepath.Abs(filepath.Join("testdata", "b.go"))
- if err != nil {
- t.Fatal(err)
- }
- compile(t, "testdata", "a.go", testoutdir)
- compile(t, testoutdir, bpath, testoutdir)
-
- // import must succeed (test for issue at hand)
- pkg := importPkg(t, "./testdata/b", tmpdir)
-
- // make sure all indirectly imported packages have names
- for _, imp := range pkg.Imports() {
- if imp.Name() == "" {
- t.Errorf("no name for %s package", imp.Path())
- }
- }
-}
-
-func TestIssue13898(t *testing.T) {
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- // import go/internal/gcimporter which imports go/types partially
- imports := make(map[string]*types.Package)
- _, err := Import(imports, "go/internal/gcimporter", ".", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // look for go/types package
- var goTypesPkg *types.Package
- for path, pkg := range imports {
- if path == "go/types" {
- goTypesPkg = pkg
- break
- }
- }
- if goTypesPkg == nil {
- t.Fatal("go/types not found")
- }
-
- // look for go/types.Object type
- obj := lookupObj(t, goTypesPkg.Scope(), "Object")
- typ, ok := obj.Type().(*types.Named)
- if !ok {
- t.Fatalf("go/types.Object type is %v; wanted named type", typ)
- }
-
- // lookup go/types.Object.Pkg method
- m, index, indirect := types.LookupFieldOrMethod(typ, false, nil, "Pkg")
- if m == nil {
- t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
- }
-
- // the method must belong to go/types
- if m.Pkg().Path() != "go/types" {
- t.Fatalf("found %v; want go/types", m.Pkg())
- }
-}
-
-func TestIssue15517(t *testing.T) {
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- // On windows, we have to set the -D option for the compiler to avoid having a drive
- // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
- if runtime.GOOS == "windows" {
- t.Skip("avoid dealing with relative paths/drive letters on windows")
- }
-
- tmpdir := mktmpdir(t)
- defer os.RemoveAll(tmpdir)
-
- compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata"))
-
- // Multiple imports of p must succeed without redeclaration errors.
- // We use an import path that's not cleaned up so that the eventual
- // file path for the package is different from the package path; this
- // will expose the error if it is present.
- //
- // (Issue: Both the textual and the binary importer used the file path
- // of the package to be imported as key into the shared packages map.
- // However, the binary importer then used the package path to identify
- // the imported package to mark it as complete; effectively marking the
- // wrong package as complete. By using an "unclean" package path, the
- // file and package path are different, exposing the problem if present.
- // The same issue occurs with vendoring.)
- imports := make(map[string]*types.Package)
- for i := 0; i < 3; i++ {
- if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestIssue15920(t *testing.T) {
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- // On windows, we have to set the -D option for the compiler to avoid having a drive
- // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
- if runtime.GOOS == "windows" {
- t.Skip("avoid dealing with relative paths/drive letters on windows")
- }
-
- compileAndImportPkg(t, "issue15920")
-}
-
-func TestIssue20046(t *testing.T) {
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- // On windows, we have to set the -D option for the compiler to avoid having a drive
- // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
- if runtime.GOOS == "windows" {
- t.Skip("avoid dealing with relative paths/drive letters on windows")
- }
-
- // "./issue20046".V.M must exist
- pkg := compileAndImportPkg(t, "issue20046")
- obj := lookupObj(t, pkg.Scope(), "V")
- if m, index, indirect := types.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil {
- t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect)
- }
-}
-
-func TestIssue25301(t *testing.T) {
- testenv.NeedsGo1Point(t, 11)
- // This package only handles gc export data.
- needsCompiler(t, "gc")
-
- // On windows, we have to set the -D option for the compiler to avoid having a drive
- // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
- if runtime.GOOS == "windows" {
- t.Skip("avoid dealing with relative paths/drive letters on windows")
- }
-
- compileAndImportPkg(t, "issue25301")
-}
-
-func importPkg(t *testing.T, path, srcDir string) *types.Package {
- pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
- if err != nil {
- t.Fatal(err)
- }
- return pkg
-}
-
-func compileAndImportPkg(t *testing.T, name string) *types.Package {
- tmpdir := mktmpdir(t)
- defer os.RemoveAll(tmpdir)
- compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata"))
- return importPkg(t, "./testdata/"+name, tmpdir)
-}
-
-func lookupObj(t *testing.T, scope *types.Scope, name string) types.Object {
- if obj := scope.Lookup(name); obj != nil {
- return obj
- }
- t.Fatalf("%s not found", name)
- return nil
-}
diff --git a/go/internal/gcimporter/iexport.go b/go/internal/gcimporter/iexport.go
deleted file mode 100644
index 9a4ff329e..000000000
--- a/go/internal/gcimporter/iexport.go
+++ /dev/null
@@ -1,1010 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Indexed binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
-// see that file for specification of the format.
-
-package gcimporter
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- "io"
- "math/big"
- "reflect"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/tools/internal/typeparams"
-)
-
-// Current bundled export format version. Increase with each format change.
-// 0: initial implementation
-const bundleVersion = 0
-
-// IExportData writes indexed export data for pkg to out.
-//
-// If no file set is provided, position info will be missing.
-// The package path of the top-level package will not be recorded,
-// so that calls to IImportData can override with a provided package path.
-func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
- return iexportCommon(out, fset, false, iexportVersion, []*types.Package{pkg})
-}
-
-// IExportBundle writes an indexed export bundle for pkgs to out.
-func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
- return iexportCommon(out, fset, true, iexportVersion, pkgs)
-}
-
-func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, pkgs []*types.Package) (err error) {
- if !debug {
- defer func() {
- if e := recover(); e != nil {
- if ierr, ok := e.(internalError); ok {
- err = ierr
- return
- }
- // Not an internal error; panic again.
- panic(e)
- }
- }()
- }
-
- p := iexporter{
- fset: fset,
- version: version,
- allPkgs: map[*types.Package]bool{},
- stringIndex: map[string]uint64{},
- declIndex: map[types.Object]uint64{},
- tparamNames: map[types.Object]string{},
- typIndex: map[types.Type]uint64{},
- }
- if !bundle {
- p.localpkg = pkgs[0]
- }
-
- for i, pt := range predeclared() {
- p.typIndex[pt] = uint64(i)
- }
- if len(p.typIndex) > predeclReserved {
- panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
- }
-
- // Initialize work queue with exported declarations.
- for _, pkg := range pkgs {
- scope := pkg.Scope()
- for _, name := range scope.Names() {
- if ast.IsExported(name) {
- p.pushDecl(scope.Lookup(name))
- }
- }
-
- if bundle {
- // Ensure pkg and its imports are included in the index.
- p.allPkgs[pkg] = true
- for _, imp := range pkg.Imports() {
- p.allPkgs[imp] = true
- }
- }
- }
-
- // Loop until no more work.
- for !p.declTodo.empty() {
- p.doDecl(p.declTodo.popHead())
- }
-
- // Append indices to data0 section.
- dataLen := uint64(p.data0.Len())
- w := p.newWriter()
- w.writeIndex(p.declIndex)
-
- if bundle {
- w.uint64(uint64(len(pkgs)))
- for _, pkg := range pkgs {
- w.pkg(pkg)
- imps := pkg.Imports()
- w.uint64(uint64(len(imps)))
- for _, imp := range imps {
- w.pkg(imp)
- }
- }
- }
- w.flush()
-
- // Assemble header.
- var hdr intWriter
- if bundle {
- hdr.uint64(bundleVersion)
- }
- hdr.uint64(uint64(p.version))
- hdr.uint64(uint64(p.strings.Len()))
- hdr.uint64(dataLen)
-
- // Flush output.
- io.Copy(out, &hdr)
- io.Copy(out, &p.strings)
- io.Copy(out, &p.data0)
-
- return nil
-}
-
-// writeIndex writes out an object index. mainIndex indicates whether
-// we're writing out the main index, which is also read by
-// non-compiler tools and includes a complete package description
-// (i.e., name and height).
-func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
- type pkgObj struct {
- obj types.Object
- name string // qualified name; differs from obj.Name for type params
- }
- // Build a map from packages to objects from that package.
- pkgObjs := map[*types.Package][]pkgObj{}
-
- // For the main index, make sure to include every package that
- // we reference, even if we're not exporting (or reexporting)
- // any symbols from it.
- if w.p.localpkg != nil {
- pkgObjs[w.p.localpkg] = nil
- }
- for pkg := range w.p.allPkgs {
- pkgObjs[pkg] = nil
- }
-
- for obj := range index {
- name := w.p.exportName(obj)
- pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})
- }
-
- var pkgs []*types.Package
- for pkg, objs := range pkgObjs {
- pkgs = append(pkgs, pkg)
-
- sort.Slice(objs, func(i, j int) bool {
- return objs[i].name < objs[j].name
- })
- }
-
- sort.Slice(pkgs, func(i, j int) bool {
- return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
- })
-
- w.uint64(uint64(len(pkgs)))
- for _, pkg := range pkgs {
- w.string(w.exportPath(pkg))
- w.string(pkg.Name())
- w.uint64(uint64(0)) // package height is not needed for go/types
-
- objs := pkgObjs[pkg]
- w.uint64(uint64(len(objs)))
- for _, obj := range objs {
- w.string(obj.name)
- w.uint64(index[obj.obj])
- }
- }
-}
-
-// exportName returns the 'exported' name of an object. It differs from
-// obj.Name() only for type parameters (see tparamExportName for details).
-func (p *iexporter) exportName(obj types.Object) (res string) {
- if name := p.tparamNames[obj]; name != "" {
- return name
- }
- return obj.Name()
-}
-
-type iexporter struct {
- fset *token.FileSet
- out *bytes.Buffer
- version int
-
- localpkg *types.Package
-
- // allPkgs tracks all packages that have been referenced by
- // the export data, so we can ensure to include them in the
- // main index.
- allPkgs map[*types.Package]bool
-
- declTodo objQueue
-
- strings intWriter
- stringIndex map[string]uint64
-
- data0 intWriter
- declIndex map[types.Object]uint64
- tparamNames map[types.Object]string // typeparam->exported name
- typIndex map[types.Type]uint64
-
- indent int // for tracing support
-}
-
-func (p *iexporter) trace(format string, args ...interface{}) {
- if !trace {
- // Call sites should also be guarded, but having this check here allows
- // easily enabling/disabling debug trace statements.
- return
- }
- fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
-}
-
-// stringOff returns the offset of s within the string section.
-// If not already present, it's added to the end.
-func (p *iexporter) stringOff(s string) uint64 {
- off, ok := p.stringIndex[s]
- if !ok {
- off = uint64(p.strings.Len())
- p.stringIndex[s] = off
-
- p.strings.uint64(uint64(len(s)))
- p.strings.WriteString(s)
- }
- return off
-}
-
-// pushDecl adds n to the declaration work queue, if not already present.
-func (p *iexporter) pushDecl(obj types.Object) {
- // Package unsafe is known to the compiler and predeclared.
- // Caller should not ask us to do export it.
- if obj.Pkg() == types.Unsafe {
- panic("cannot export package unsafe")
- }
-
- if _, ok := p.declIndex[obj]; ok {
- return
- }
-
- p.declIndex[obj] = ^uint64(0) // mark obj present in work queue
- p.declTodo.pushTail(obj)
-}
-
-// exportWriter handles writing out individual data section chunks.
-type exportWriter struct {
- p *iexporter
-
- data intWriter
- currPkg *types.Package
- prevFile string
- prevLine int64
- prevColumn int64
-}
-
-func (w *exportWriter) exportPath(pkg *types.Package) string {
- if pkg == w.p.localpkg {
- return ""
- }
- return pkg.Path()
-}
-
-func (p *iexporter) doDecl(obj types.Object) {
- if trace {
- p.trace("exporting decl %v (%T)", obj, obj)
- p.indent++
- defer func() {
- p.indent--
- p.trace("=> %s", obj)
- }()
- }
- w := p.newWriter()
- w.setPkg(obj.Pkg(), false)
-
- switch obj := obj.(type) {
- case *types.Var:
- w.tag('V')
- w.pos(obj.Pos())
- w.typ(obj.Type(), obj.Pkg())
-
- case *types.Func:
- sig, _ := obj.Type().(*types.Signature)
- if sig.Recv() != nil {
- panic(internalErrorf("unexpected method: %v", sig))
- }
-
- // Function.
- if typeparams.ForSignature(sig).Len() == 0 {
- w.tag('F')
- } else {
- w.tag('G')
- }
- w.pos(obj.Pos())
- // The tparam list of the function type is the declaration of the type
- // params. So, write out the type params right now. Then those type params
- // will be referenced via their type offset (via typOff) in all other
- // places in the signature and function where they are used.
- //
- // While importing the type parameters, tparamList computes and records
- // their export name, so that it can be later used when writing the index.
- if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 {
- w.tparamList(obj.Name(), tparams, obj.Pkg())
- }
- w.signature(sig)
-
- case *types.Const:
- w.tag('C')
- w.pos(obj.Pos())
- w.value(obj.Type(), obj.Val())
-
- case *types.TypeName:
- t := obj.Type()
-
- if tparam, ok := t.(*typeparams.TypeParam); ok {
- w.tag('P')
- w.pos(obj.Pos())
- constraint := tparam.Constraint()
- if p.version >= iexportVersionGo1_18 {
- implicit := false
- if iface, _ := constraint.(*types.Interface); iface != nil {
- implicit = typeparams.IsImplicit(iface)
- }
- w.bool(implicit)
- }
- w.typ(constraint, obj.Pkg())
- break
- }
-
- if obj.IsAlias() {
- w.tag('A')
- w.pos(obj.Pos())
- w.typ(t, obj.Pkg())
- break
- }
-
- // Defined type.
- named, ok := t.(*types.Named)
- if !ok {
- panic(internalErrorf("%s is not a defined type", t))
- }
-
- if typeparams.ForNamed(named).Len() == 0 {
- w.tag('T')
- } else {
- w.tag('U')
- }
- w.pos(obj.Pos())
-
- if typeparams.ForNamed(named).Len() > 0 {
- // While importing the type parameters, tparamList computes and records
- // their export name, so that it can be later used when writing the index.
- w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg())
- }
-
- underlying := obj.Type().Underlying()
- w.typ(underlying, obj.Pkg())
-
- if types.IsInterface(t) {
- break
- }
-
- n := named.NumMethods()
- w.uint64(uint64(n))
- for i := 0; i < n; i++ {
- m := named.Method(i)
- w.pos(m.Pos())
- w.string(m.Name())
- sig, _ := m.Type().(*types.Signature)
-
- // Receiver type parameters are type arguments of the receiver type, so
- // their name must be qualified before exporting recv.
- if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 {
- prefix := obj.Name() + "." + m.Name()
- for i := 0; i < rparams.Len(); i++ {
- rparam := rparams.At(i)
- name := tparamExportName(prefix, rparam)
- w.p.tparamNames[rparam.Obj()] = name
- }
- }
- w.param(sig.Recv())
- w.signature(sig)
- }
-
- default:
- panic(internalErrorf("unexpected object: %v", obj))
- }
-
- p.declIndex[obj] = w.flush()
-}
-
-func (w *exportWriter) tag(tag byte) {
- w.data.WriteByte(tag)
-}
-
-func (w *exportWriter) pos(pos token.Pos) {
- if w.p.version >= iexportVersionPosCol {
- w.posV1(pos)
- } else {
- w.posV0(pos)
- }
-}
-
-func (w *exportWriter) posV1(pos token.Pos) {
- if w.p.fset == nil {
- w.int64(0)
- return
- }
-
- p := w.p.fset.Position(pos)
- file := p.Filename
- line := int64(p.Line)
- column := int64(p.Column)
-
- deltaColumn := (column - w.prevColumn) << 1
- deltaLine := (line - w.prevLine) << 1
-
- if file != w.prevFile {
- deltaLine |= 1
- }
- if deltaLine != 0 {
- deltaColumn |= 1
- }
-
- w.int64(deltaColumn)
- if deltaColumn&1 != 0 {
- w.int64(deltaLine)
- if deltaLine&1 != 0 {
- w.string(file)
- }
- }
-
- w.prevFile = file
- w.prevLine = line
- w.prevColumn = column
-}
-
-func (w *exportWriter) posV0(pos token.Pos) {
- if w.p.fset == nil {
- w.int64(0)
- return
- }
-
- p := w.p.fset.Position(pos)
- file := p.Filename
- line := int64(p.Line)
-
- // When file is the same as the last position (common case),
- // we can save a few bytes by delta encoding just the line
- // number.
- //
- // Note: Because data objects may be read out of order (or not
- // at all), we can only apply delta encoding within a single
- // object. This is handled implicitly by tracking prevFile and
- // prevLine as fields of exportWriter.
-
- if file == w.prevFile {
- delta := line - w.prevLine
- w.int64(delta)
- if delta == deltaNewFile {
- w.int64(-1)
- }
- } else {
- w.int64(deltaNewFile)
- w.int64(line) // line >= 0
- w.string(file)
- w.prevFile = file
- }
- w.prevLine = line
-}
-
-func (w *exportWriter) pkg(pkg *types.Package) {
- // Ensure any referenced packages are declared in the main index.
- w.p.allPkgs[pkg] = true
-
- w.string(w.exportPath(pkg))
-}
-
-func (w *exportWriter) qualifiedIdent(obj types.Object) {
- name := w.p.exportName(obj)
-
- // Ensure any referenced declarations are written out too.
- w.p.pushDecl(obj)
- w.string(name)
- w.pkg(obj.Pkg())
-}
-
-func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
- w.data.uint64(w.p.typOff(t, pkg))
-}
-
-func (p *iexporter) newWriter() *exportWriter {
- return &exportWriter{p: p}
-}
-
-func (w *exportWriter) flush() uint64 {
- off := uint64(w.p.data0.Len())
- io.Copy(&w.p.data0, &w.data)
- return off
-}
-
-func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
- off, ok := p.typIndex[t]
- if !ok {
- w := p.newWriter()
- w.doTyp(t, pkg)
- off = predeclReserved + w.flush()
- p.typIndex[t] = off
- }
- return off
-}
-
-func (w *exportWriter) startType(k itag) {
- w.data.uint64(uint64(k))
-}
-
-func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
- if trace {
- w.p.trace("exporting type %s (%T)", t, t)
- w.p.indent++
- defer func() {
- w.p.indent--
- w.p.trace("=> %s", t)
- }()
- }
- switch t := t.(type) {
- case *types.Named:
- if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 {
- w.startType(instanceType)
- // TODO(rfindley): investigate if this position is correct, and if it
- // matters.
- w.pos(t.Obj().Pos())
- w.typeList(targs, pkg)
- w.typ(typeparams.NamedTypeOrigin(t), pkg)
- return
- }
- w.startType(definedType)
- w.qualifiedIdent(t.Obj())
-
- case *typeparams.TypeParam:
- w.startType(typeParamType)
- w.qualifiedIdent(t.Obj())
-
- case *types.Pointer:
- w.startType(pointerType)
- w.typ(t.Elem(), pkg)
-
- case *types.Slice:
- w.startType(sliceType)
- w.typ(t.Elem(), pkg)
-
- case *types.Array:
- w.startType(arrayType)
- w.uint64(uint64(t.Len()))
- w.typ(t.Elem(), pkg)
-
- case *types.Chan:
- w.startType(chanType)
- // 1 RecvOnly; 2 SendOnly; 3 SendRecv
- var dir uint64
- switch t.Dir() {
- case types.RecvOnly:
- dir = 1
- case types.SendOnly:
- dir = 2
- case types.SendRecv:
- dir = 3
- }
- w.uint64(dir)
- w.typ(t.Elem(), pkg)
-
- case *types.Map:
- w.startType(mapType)
- w.typ(t.Key(), pkg)
- w.typ(t.Elem(), pkg)
-
- case *types.Signature:
- w.startType(signatureType)
- w.setPkg(pkg, true)
- w.signature(t)
-
- case *types.Struct:
- w.startType(structType)
- w.setPkg(pkg, true)
-
- n := t.NumFields()
- w.uint64(uint64(n))
- for i := 0; i < n; i++ {
- f := t.Field(i)
- w.pos(f.Pos())
- w.string(f.Name())
- w.typ(f.Type(), pkg)
- w.bool(f.Anonymous())
- w.string(t.Tag(i)) // note (or tag)
- }
-
- case *types.Interface:
- w.startType(interfaceType)
- w.setPkg(pkg, true)
-
- n := t.NumEmbeddeds()
- w.uint64(uint64(n))
- for i := 0; i < n; i++ {
- ft := t.EmbeddedType(i)
- tPkg := pkg
- if named, _ := ft.(*types.Named); named != nil {
- w.pos(named.Obj().Pos())
- } else {
- w.pos(token.NoPos)
- }
- w.typ(ft, tPkg)
- }
-
- n = t.NumExplicitMethods()
- w.uint64(uint64(n))
- for i := 0; i < n; i++ {
- m := t.ExplicitMethod(i)
- w.pos(m.Pos())
- w.string(m.Name())
- sig, _ := m.Type().(*types.Signature)
- w.signature(sig)
- }
-
- case *typeparams.Union:
- w.startType(unionType)
- nt := t.Len()
- w.uint64(uint64(nt))
- for i := 0; i < nt; i++ {
- term := t.Term(i)
- w.bool(term.Tilde())
- w.typ(term.Type(), pkg)
- }
-
- default:
- panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
- }
-}
-
-func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
- if write {
- w.pkg(pkg)
- }
-
- w.currPkg = pkg
-}
-
-func (w *exportWriter) signature(sig *types.Signature) {
- w.paramList(sig.Params())
- w.paramList(sig.Results())
- if sig.Params().Len() > 0 {
- w.bool(sig.Variadic())
- }
-}
-
-func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) {
- w.uint64(uint64(ts.Len()))
- for i := 0; i < ts.Len(); i++ {
- w.typ(ts.At(i), pkg)
- }
-}
-
-func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) {
- ll := uint64(list.Len())
- w.uint64(ll)
- for i := 0; i < list.Len(); i++ {
- tparam := list.At(i)
- // Set the type parameter exportName before exporting its type.
- exportName := tparamExportName(prefix, tparam)
- w.p.tparamNames[tparam.Obj()] = exportName
- w.typ(list.At(i), pkg)
- }
-}
-
-const blankMarker = "$"
-
-// tparamExportName returns the 'exported' name of a type parameter, which
-// differs from its actual object name: it is prefixed with a qualifier, and
-// blank type parameter names are disambiguated by their index in the type
-// parameter list.
-func tparamExportName(prefix string, tparam *typeparams.TypeParam) string {
- assert(prefix != "")
- name := tparam.Obj().Name()
- if name == "_" {
- name = blankMarker + strconv.Itoa(tparam.Index())
- }
- return prefix + "." + name
-}
-
-// tparamName returns the real name of a type parameter, after stripping its
-// qualifying prefix and reverting blank-name encoding. See tparamExportName
-// for details.
-func tparamName(exportName string) string {
- // Remove the "path" from the type param name that makes it unique.
- ix := strings.LastIndex(exportName, ".")
- if ix < 0 {
- errorf("malformed type parameter export name %s: missing prefix", exportName)
- }
- name := exportName[ix+1:]
- if strings.HasPrefix(name, blankMarker) {
- return "_"
- }
- return name
-}
-
-func (w *exportWriter) paramList(tup *types.Tuple) {
- n := tup.Len()
- w.uint64(uint64(n))
- for i := 0; i < n; i++ {
- w.param(tup.At(i))
- }
-}
-
-func (w *exportWriter) param(obj types.Object) {
- w.pos(obj.Pos())
- w.localIdent(obj)
- w.typ(obj.Type(), obj.Pkg())
-}
-
-func (w *exportWriter) value(typ types.Type, v constant.Value) {
- w.typ(typ, nil)
- if w.p.version >= iexportVersionGo1_18 {
- w.int64(int64(v.Kind()))
- }
-
- switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
- case types.IsBoolean:
- w.bool(constant.BoolVal(v))
- case types.IsInteger:
- var i big.Int
- if i64, exact := constant.Int64Val(v); exact {
- i.SetInt64(i64)
- } else if ui64, exact := constant.Uint64Val(v); exact {
- i.SetUint64(ui64)
- } else {
- i.SetString(v.ExactString(), 10)
- }
- w.mpint(&i, typ)
- case types.IsFloat:
- f := constantToFloat(v)
- w.mpfloat(f, typ)
- case types.IsComplex:
- w.mpfloat(constantToFloat(constant.Real(v)), typ)
- w.mpfloat(constantToFloat(constant.Imag(v)), typ)
- case types.IsString:
- w.string(constant.StringVal(v))
- default:
- if b.Kind() == types.Invalid {
- // package contains type errors
- break
- }
- panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying()))
- }
-}
-
-// constantToFloat converts a constant.Value with kind constant.Float to a
-// big.Float.
-func constantToFloat(x constant.Value) *big.Float {
- x = constant.ToFloat(x)
- // Use the same floating-point precision (512) as cmd/compile
- // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
- const mpprec = 512
- var f big.Float
- f.SetPrec(mpprec)
- if v, exact := constant.Float64Val(x); exact {
- // float64
- f.SetFloat64(v)
- } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
- // TODO(gri): add big.Rat accessor to constant.Value.
- n := valueToRat(num)
- d := valueToRat(denom)
- f.SetRat(n.Quo(n, d))
- } else {
- // Value too large to represent as a fraction => inaccessible.
- // TODO(gri): add big.Float accessor to constant.Value.
- _, ok := f.SetString(x.ExactString())
- assert(ok)
- }
- return &f
-}
-
-// mpint exports a multi-precision integer.
-//
-// For unsigned types, small values are written out as a single
-// byte. Larger values are written out as a length-prefixed big-endian
-// byte string, where the length prefix is encoded as its complement.
-// For example, bytes 0, 1, and 2 directly represent the integer
-// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
-// 2-, and 3-byte big-endian string follow.
-//
-// Encoding for signed types use the same general approach as for
-// unsigned types, except small values use zig-zag encoding and the
-// bottom bit of length prefix byte for large values is reserved as a
-// sign bit.
-//
-// The exact boundary between small and large encodings varies
-// according to the maximum number of bytes needed to encode a value
-// of type typ. As a special case, 8-bit types are always encoded as a
-// single byte.
-//
-// TODO(mdempsky): Is this level of complexity really worthwhile?
-func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
- basic, ok := typ.Underlying().(*types.Basic)
- if !ok {
- panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
- }
-
- signed, maxBytes := intSize(basic)
-
- negative := x.Sign() < 0
- if !signed && negative {
- panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
- }
-
- b := x.Bytes()
- if len(b) > 0 && b[0] == 0 {
- panic(internalErrorf("leading zeros"))
- }
- if uint(len(b)) > maxBytes {
- panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
- }
-
- maxSmall := 256 - maxBytes
- if signed {
- maxSmall = 256 - 2*maxBytes
- }
- if maxBytes == 1 {
- maxSmall = 256
- }
-
- // Check if x can use small value encoding.
- if len(b) <= 1 {
- var ux uint
- if len(b) == 1 {
- ux = uint(b[0])
- }
- if signed {
- ux <<= 1
- if negative {
- ux--
- }
- }
- if ux < maxSmall {
- w.data.WriteByte(byte(ux))
- return
- }
- }
-
- n := 256 - uint(len(b))
- if signed {
- n = 256 - 2*uint(len(b))
- if negative {
- n |= 1
- }
- }
- if n < maxSmall || n >= 256 {
- panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
- }
-
- w.data.WriteByte(byte(n))
- w.data.Write(b)
-}
-
-// mpfloat exports a multi-precision floating point number.
-//
-// The number's value is decomposed into mantissa × 2**exponent, where
-// mantissa is an integer. The value is written out as mantissa (as a
-// multi-precision integer) and then the exponent, except exponent is
-// omitted if mantissa is zero.
-func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
- if f.IsInf() {
- panic("infinite constant")
- }
-
- // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
- var mant big.Float
- exp := int64(f.MantExp(&mant))
-
- // Scale so that mant is an integer.
- prec := mant.MinPrec()
- mant.SetMantExp(&mant, int(prec))
- exp -= int64(prec)
-
- manti, acc := mant.Int(nil)
- if acc != big.Exact {
- panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
- }
- w.mpint(manti, typ)
- if manti.Sign() != 0 {
- w.int64(exp)
- }
-}
-
-func (w *exportWriter) bool(b bool) bool {
- var x uint64
- if b {
- x = 1
- }
- w.uint64(x)
- return b
-}
-
-func (w *exportWriter) int64(x int64) { w.data.int64(x) }
-func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
-func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
-
-func (w *exportWriter) localIdent(obj types.Object) {
- // Anonymous parameters.
- if obj == nil {
- w.string("")
- return
- }
-
- name := obj.Name()
- if name == "_" {
- w.string("_")
- return
- }
-
- w.string(name)
-}
-
-type intWriter struct {
- bytes.Buffer
-}
-
-func (w *intWriter) int64(x int64) {
- var buf [binary.MaxVarintLen64]byte
- n := binary.PutVarint(buf[:], x)
- w.Write(buf[:n])
-}
-
-func (w *intWriter) uint64(x uint64) {
- var buf [binary.MaxVarintLen64]byte
- n := binary.PutUvarint(buf[:], x)
- w.Write(buf[:n])
-}
-
-func assert(cond bool) {
- if !cond {
- panic("internal error: assertion failed")
- }
-}
-
-// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
-
-// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
-// a ready-to-use empty queue.
-type objQueue struct {
- ring []types.Object
- head, tail int
-}
-
-// empty returns true if q contains no Nodes.
-func (q *objQueue) empty() bool {
- return q.head == q.tail
-}
-
-// pushTail appends n to the tail of the queue.
-func (q *objQueue) pushTail(obj types.Object) {
- if len(q.ring) == 0 {
- q.ring = make([]types.Object, 16)
- } else if q.head+len(q.ring) == q.tail {
- // Grow the ring.
- nring := make([]types.Object, len(q.ring)*2)
- // Copy the old elements.
- part := q.ring[q.head%len(q.ring):]
- if q.tail-q.head <= len(part) {
- part = part[:q.tail-q.head]
- copy(nring, part)
- } else {
- pos := copy(nring, part)
- copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
- }
- q.ring, q.head, q.tail = nring, 0, q.tail-q.head
- }
-
- q.ring[q.tail%len(q.ring)] = obj
- q.tail++
-}
-
-// popHead pops a node from the head of the queue. It panics if q is empty.
-func (q *objQueue) popHead() types.Object {
- if q.empty() {
- panic("dequeue empty")
- }
- obj := q.ring[q.head%len(q.ring)]
- q.head++
- return obj
-}
diff --git a/go/internal/gcimporter/iexport_go118_test.go b/go/internal/gcimporter/iexport_go118_test.go
deleted file mode 100644
index 5dfa2580f..000000000
--- a/go/internal/gcimporter/iexport_go118_test.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package gcimporter_test
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/importer"
- "go/parser"
- "go/token"
- "go/types"
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
-
- "golang.org/x/tools/go/internal/gcimporter"
-)
-
-// TODO(rfindley): migrate this to testdata, as has been done in the standard library.
-func TestGenericExport(t *testing.T) {
- const src = `
-package generic
-
-type Any any
-
-type T[A, B any] struct { Left A; Right B }
-
-func (T[P, Q]) m() {}
-
-var X T[int, string] = T[int, string]{1, "hi"}
-
-func ToInt[P interface{ ~int }](p P) int { return int(p) }
-
-var IntID = ToInt[int]
-
-type G[C comparable] int
-
-func ImplicitFunc[T ~int]() {}
-
-type ImplicitType[T ~int] int
-
-// Exercise constant import/export
-const C1 = 42
-const C2 int = 42
-const C3 float64 = 42
-
-type Constraint[T any] interface {
- m(T)
-}
-
-// TODO(rfindley): revert to multiple blanks once the restriction on multiple
-// blanks is removed from the type checker.
-// type Blanks[_ any, _ Constraint[int]] int
-// func (Blanks[_, _]) m() {}
-type Blanks[_ any] int
-func (Blanks[_]) m() {}
-`
- testExportSrc(t, []byte(src))
-}
-
-func testExportSrc(t *testing.T, src []byte) {
- // This package only handles gc export data.
- if runtime.Compiler != "gc" {
- t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
- }
-
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, "g.go", src, 0)
- if err != nil {
- t.Fatal(err)
- }
- conf := types.Config{
- Importer: importer.Default(),
- }
- pkg, err := conf.Check("", fset, []*ast.File{f}, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // export
- version := gcimporter.IExportVersion
- data, err := iexport(fset, version, pkg)
- if err != nil {
- t.Fatal(err)
- }
-
- testPkgData(t, fset, version, pkg, data)
-}
-
-func TestImportTypeparamTests(t *testing.T) {
- // Check go files in test/typeparam.
- rootDir := filepath.Join(runtime.GOROOT(), "test", "typeparam")
- list, err := os.ReadDir(rootDir)
- if err != nil {
- t.Fatal(err)
- }
-
- if isUnifiedBuilder() {
- t.Skip("unified export data format is currently unsupported")
- }
-
- for _, entry := range list {
- if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".go") {
- // For now, only consider standalone go files.
- continue
- }
-
- t.Run(entry.Name(), func(t *testing.T) {
- filename := filepath.Join(rootDir, entry.Name())
- src, err := os.ReadFile(filename)
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.HasPrefix(src, []byte("// run")) && !bytes.HasPrefix(src, []byte("// compile")) {
- // We're bypassing the logic of run.go here, so be conservative about
- // the files we consider in an attempt to make this test more robust to
- // changes in test/typeparams.
- t.Skipf("not detected as a run test")
- }
-
- testExportSrc(t, src)
- })
- }
-}
-
-func TestRecursiveExport_Issue51219(t *testing.T) {
- const srca = `
-package a
-
-type Interaction[DataT InteractionDataConstraint] struct {
-}
-
-type InteractionDataConstraint interface {
- []byte |
- UserCommandInteractionData
-}
-
-type UserCommandInteractionData struct {
- resolvedInteractionWithOptions
-}
-
-type resolvedInteractionWithOptions struct {
- Resolved Resolved
-}
-
-type Resolved struct {
- Users ResolvedData[User]
-}
-
-type ResolvedData[T ResolvedDataConstraint] map[uint64]T
-
-type ResolvedDataConstraint interface {
- User | Message
-}
-
-type User struct{}
-
-type Message struct {
- Interaction *Interaction[[]byte]
-}
-`
-
- const srcb = `
-package b
-
-import (
- "a"
-)
-
-// InteractionRequest is an incoming request Interaction
-type InteractionRequest[T a.InteractionDataConstraint] struct {
- a.Interaction[T]
-}
-`
-
- const srcp = `
-package p
-
-import (
- "b"
-)
-
-// ResponseWriterMock mocks corde's ResponseWriter interface
-type ResponseWriterMock struct {
- x b.InteractionRequest[[]byte]
-}
-`
-
- importer := &testImporter{
- src: map[string][]byte{
- "a": []byte(srca),
- "b": []byte(srcb),
- "p": []byte(srcp),
- },
- pkgs: make(map[string]*types.Package),
- }
- _, err := importer.Import("p")
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// testImporter is a helper to test chains of imports using export data.
-type testImporter struct {
- src map[string][]byte // original source
- pkgs map[string]*types.Package // memoized imported packages
-}
-
-func (t *testImporter) Import(path string) (*types.Package, error) {
- if pkg, ok := t.pkgs[path]; ok {
- return pkg, nil
- }
- src, ok := t.src[path]
- if !ok {
- return nil, fmt.Errorf("unknown path %v", path)
- }
-
- // Type-check, but don't return this package directly.
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, path+".go", src, 0)
- if err != nil {
- return nil, err
- }
- conf := types.Config{
- Importer: t,
- }
- pkg, err := conf.Check(path, fset, []*ast.File{f}, nil)
- if err != nil {
- return nil, err
- }
-
- // Export and import to get the package imported from export data.
- exportdata, err := iexport(fset, gcimporter.IExportVersion, pkg)
- if err != nil {
- return nil, err
- }
- imports := make(map[string]*types.Package)
- fset2 := token.NewFileSet()
- _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path())
- if err != nil {
- return nil, err
- }
- t.pkgs[path] = pkg2
- return pkg2, nil
-}
diff --git a/go/internal/gcimporter/iexport_test.go b/go/internal/gcimporter/iexport_test.go
deleted file mode 100644
index f0e83e519..000000000
--- a/go/internal/gcimporter/iexport_test.go
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This is a copy of bexport_test.go for iexport.go.
-
-//go:build go1.11
-// +build go1.11
-
-package gcimporter_test
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "go/ast"
- "go/build"
- "go/constant"
- "go/parser"
- "go/token"
- "go/types"
- "io/ioutil"
- "math/big"
- "os"
- "reflect"
- "runtime"
- "sort"
- "strings"
- "testing"
-
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/go/buildutil"
- "golang.org/x/tools/go/internal/gcimporter"
- "golang.org/x/tools/go/loader"
- "golang.org/x/tools/internal/typeparams/genericfeatures"
-)
-
-func readExportFile(filename string) ([]byte, error) {
- f, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- buf := bufio.NewReader(f)
- if _, _, err := gcimporter.FindExportData(buf); err != nil {
- return nil, err
- }
-
- if ch, err := buf.ReadByte(); err != nil {
- return nil, err
- } else if ch != 'i' {
- return nil, fmt.Errorf("unexpected byte: %v", ch)
- }
-
- return ioutil.ReadAll(buf)
-}
-
-func iexport(fset *token.FileSet, version int, pkg *types.Package) ([]byte, error) {
- var buf bytes.Buffer
- if err := gcimporter.IExportCommon(&buf, fset, false, version, []*types.Package{pkg}); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-// isUnifiedBuilder reports whether we are executing on a go builder that uses
-// unified export data.
-func isUnifiedBuilder() bool {
- return os.Getenv("GO_BUILDER_NAME") == "linux-amd64-unified"
-}
-
-const minStdlibPackages = 248
-
-func TestIExportData_stdlib(t *testing.T) {
- if runtime.Compiler == "gccgo" {
- t.Skip("gccgo standard library is inaccessible")
- }
- if runtime.GOOS == "android" {
- t.Skipf("incomplete std lib on %s", runtime.GOOS)
- }
- if isRace {
- t.Skipf("stdlib tests take too long in race mode and flake on builders")
- }
- if testing.Short() {
- t.Skip("skipping RAM hungry test in -short mode")
- }
-
- // Load, parse and type-check the program.
- ctxt := build.Default // copy
- ctxt.GOPATH = "" // disable GOPATH
- conf := loader.Config{
- Build: &ctxt,
- AllowErrors: true,
- TypeChecker: types.Config{
- Sizes: types.SizesFor(ctxt.Compiler, ctxt.GOARCH),
- Error: func(err error) { t.Log(err) },
- },
- }
- for _, path := range buildutil.AllPackages(conf.Build) {
- conf.Import(path)
- }
-
- // Create a package containing type and value errors to ensure
- // they are properly encoded/decoded.
- f, err := conf.ParseFile("haserrors/haserrors.go", `package haserrors
-const UnknownValue = "" + 0
-type UnknownType undefined
-`)
- if err != nil {
- t.Fatal(err)
- }
- conf.CreateFromFiles("haserrors", f)
-
- prog, err := conf.Load()
- if err != nil {
- t.Fatalf("Load failed: %v", err)
- }
-
- var sorted []*types.Package
- isUnified := isUnifiedBuilder()
- for pkg, info := range prog.AllPackages {
- // Temporarily skip packages that use generics on the unified builder, to
- // fix TryBots.
- //
- // TODO(#48595): fix this test with GOEXPERIMENT=unified.
- inspect := inspector.New(info.Files)
- features := genericfeatures.ForPackage(inspect, &info.Info)
- if isUnified && features != 0 {
- t.Logf("skipping package %q which uses generics", pkg.Path())
- continue
- }
- if info.Files != nil { // non-empty directory
- sorted = append(sorted, pkg)
- }
- }
- sort.Slice(sorted, func(i, j int) bool {
- return sorted[i].Path() < sorted[j].Path()
- })
-
- version := gcimporter.IExportVersion
- numPkgs := len(sorted)
- if want := minStdlibPackages; numPkgs < want {
- t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
- }
-
- for _, pkg := range sorted {
- if exportdata, err := iexport(conf.Fset, version, pkg); err != nil {
- t.Error(err)
- } else {
- testPkgData(t, conf.Fset, version, pkg, exportdata)
- }
-
- if pkg.Name() == "main" || pkg.Name() == "haserrors" {
- // skip; no export data
- } else if bp, err := ctxt.Import(pkg.Path(), "", build.FindOnly); err != nil {
- t.Log("warning:", err)
- } else if exportdata, err := readExportFile(bp.PkgObj); err != nil {
- t.Log("warning:", err)
- } else {
- testPkgData(t, conf.Fset, version, pkg, exportdata)
- }
- }
-
- var bundle bytes.Buffer
- if err := gcimporter.IExportBundle(&bundle, conf.Fset, sorted); err != nil {
- t.Fatal(err)
- }
- fset2 := token.NewFileSet()
- imports := make(map[string]*types.Package)
- pkgs2, err := gcimporter.IImportBundle(fset2, imports, bundle.Bytes())
- if err != nil {
- t.Fatal(err)
- }
-
- for i, pkg := range sorted {
- testPkg(t, conf.Fset, version, pkg, fset2, pkgs2[i])
- }
-}
-
-func testPkgData(t *testing.T, fset *token.FileSet, version int, pkg *types.Package, exportdata []byte) {
- imports := make(map[string]*types.Package)
- fset2 := token.NewFileSet()
- _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path())
- if err != nil {
- t.Errorf("IImportData(%s): %v", pkg.Path(), err)
- }
-
- testPkg(t, fset, version, pkg, fset2, pkg2)
-}
-
-func testPkg(t *testing.T, fset *token.FileSet, version int, pkg *types.Package, fset2 *token.FileSet, pkg2 *types.Package) {
- if _, err := iexport(fset2, version, pkg2); err != nil {
- t.Errorf("reexport %q: %v", pkg.Path(), err)
- }
-
- // Compare the packages' corresponding members.
- for _, name := range pkg.Scope().Names() {
- if !ast.IsExported(name) {
- continue
- }
- obj1 := pkg.Scope().Lookup(name)
- obj2 := pkg2.Scope().Lookup(name)
- if obj2 == nil {
- t.Errorf("%s.%s not found, want %s", pkg.Path(), name, obj1)
- continue
- }
-
- fl1 := fileLine(fset, obj1)
- fl2 := fileLine(fset2, obj2)
- if fl1 != fl2 {
- t.Errorf("%s.%s: got posn %s, want %s",
- pkg.Path(), name, fl2, fl1)
- }
-
- if err := cmpObj(obj1, obj2); err != nil {
- t.Errorf("%s.%s: %s\ngot: %s\nwant: %s",
- pkg.Path(), name, err, obj2, obj1)
- }
- }
-}
-
-// TestVeryLongFile tests the position of an import object declared in
-// a very long input file. Line numbers greater than maxlines are
-// reported as line 1, not garbage or token.NoPos.
-func TestIExportData_long(t *testing.T) {
- // parse and typecheck
- longFile := "package foo" + strings.Repeat("\n", 123456) + "var X int"
- fset1 := token.NewFileSet()
- f, err := parser.ParseFile(fset1, "foo.go", longFile, 0)
- if err != nil {
- t.Fatal(err)
- }
- var conf types.Config
- pkg, err := conf.Check("foo", fset1, []*ast.File{f}, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // export
- exportdata, err := iexport(fset1, gcimporter.IExportVersion, pkg)
- if err != nil {
- t.Fatal(err)
- }
-
- // import
- imports := make(map[string]*types.Package)
- fset2 := token.NewFileSet()
- _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path())
- if err != nil {
- t.Fatalf("IImportData(%s): %v", pkg.Path(), err)
- }
-
- // compare
- posn1 := fset1.Position(pkg.Scope().Lookup("X").Pos())
- posn2 := fset2.Position(pkg2.Scope().Lookup("X").Pos())
- if want := "foo.go:1:1"; posn2.String() != want {
- t.Errorf("X position = %s, want %s (orig was %s)",
- posn2, want, posn1)
- }
-}
-
-func TestIExportData_typealiases(t *testing.T) {
- // parse and typecheck
- fset1 := token.NewFileSet()
- f, err := parser.ParseFile(fset1, "p.go", src, 0)
- if err != nil {
- t.Fatal(err)
- }
- var conf types.Config
- pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil)
- if err == nil {
- // foo in undeclared in src; we should see an error
- t.Fatal("invalid source type-checked without error")
- }
- if pkg1 == nil {
- // despite incorrect src we should see a (partially) type-checked package
- t.Fatal("nil package returned")
- }
- checkPkg(t, pkg1, "export")
-
- // export
- // use a nil fileset here to confirm that it doesn't panic
- exportdata, err := iexport(nil, gcimporter.IExportVersion, pkg1)
- if err != nil {
- t.Fatal(err)
- }
-
- // import
- imports := make(map[string]*types.Package)
- fset2 := token.NewFileSet()
- _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg1.Path())
- if err != nil {
- t.Fatalf("IImportData(%s): %v", pkg1.Path(), err)
- }
- checkPkg(t, pkg2, "import")
-}
-
-// cmpObj reports how x and y differ. They are assumed to belong to different
-// universes so cannot be compared directly. It is an adapted version of
-// equalObj in bexport_test.go.
-func cmpObj(x, y types.Object) error {
- if reflect.TypeOf(x) != reflect.TypeOf(y) {
- return fmt.Errorf("%T vs %T", x, y)
- }
- xt := x.Type()
- yt := y.Type()
- switch x := x.(type) {
- case *types.Var, *types.Func:
- // ok
- case *types.Const:
- xval := x.Val()
- yval := y.(*types.Const).Val()
- equal := constant.Compare(xval, token.EQL, yval)
- if !equal {
- // try approx. comparison
- xkind := xval.Kind()
- ykind := yval.Kind()
- if xkind == constant.Complex || ykind == constant.Complex {
- equal = same(constant.Real(xval), constant.Real(yval)) &&
- same(constant.Imag(xval), constant.Imag(yval))
- } else if xkind == constant.Float || ykind == constant.Float {
- equal = same(xval, yval)
- } else if xkind == constant.Unknown && ykind == constant.Unknown {
- equal = true
- }
- }
- if !equal {
- return fmt.Errorf("unequal constants %s vs %s", xval, yval)
- }
- case *types.TypeName:
- if xalias, yalias := x.IsAlias(), y.(*types.TypeName).IsAlias(); xalias != yalias {
- return fmt.Errorf("mismatching IsAlias(): %s vs %s", x, y)
- }
- // equalType does not recurse into the underlying types of named types, so
- // we must pass the underlying type explicitly here. However, in doing this
- // we may skip checking the features of the named types themselves, in
- // situations where the type name is not referenced by the underlying or
- // any other top-level declarations. Therefore, we must explicitly compare
- // named types here, before passing their underlying types into equalType.
- xn, _ := xt.(*types.Named)
- yn, _ := yt.(*types.Named)
- if (xn == nil) != (yn == nil) {
- return fmt.Errorf("mismatching types: %T vs %T", xt, yt)
- }
- if xn != nil {
- if err := cmpNamed(xn, yn); err != nil {
- return err
- }
- }
- xt = xt.Underlying()
- yt = yt.Underlying()
- default:
- return fmt.Errorf("unexpected %T", x)
- }
- return equalType(xt, yt)
-}
-
-// Use the same floating-point precision (512) as cmd/compile
-// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
-const mpprec = 512
-
-// same compares non-complex numeric values and reports if they are approximately equal.
-func same(x, y constant.Value) bool {
- xf := constantToFloat(x)
- yf := constantToFloat(y)
- d := new(big.Float).Sub(xf, yf)
- d.Abs(d)
- eps := big.NewFloat(1.0 / (1 << (mpprec - 1))) // allow for 1 bit of error
- return d.Cmp(eps) < 0
-}
-
-// copy of the function with the same name in iexport.go.
-func constantToFloat(x constant.Value) *big.Float {
- var f big.Float
- f.SetPrec(mpprec)
- if v, exact := constant.Float64Val(x); exact {
- // float64
- f.SetFloat64(v)
- } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
- // TODO(gri): add big.Rat accessor to constant.Value.
- n := valueToRat(num)
- d := valueToRat(denom)
- f.SetRat(n.Quo(n, d))
- } else {
- // Value too large to represent as a fraction => inaccessible.
- // TODO(gri): add big.Float accessor to constant.Value.
- _, ok := f.SetString(x.ExactString())
- if !ok {
- panic("should not reach here")
- }
- }
- return &f
-}
-
-// copy of the function with the same name in iexport.go.
-func valueToRat(x constant.Value) *big.Rat {
- // Convert little-endian to big-endian.
- // I can't believe this is necessary.
- bytes := constant.Bytes(x)
- for i := 0; i < len(bytes)/2; i++ {
- bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
- }
- return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
-}
diff --git a/go/internal/gcimporter/iimport.go b/go/internal/gcimporter/iimport.go
deleted file mode 100644
index 1d5650ae4..000000000
--- a/go/internal/gcimporter/iimport.go
+++ /dev/null
@@ -1,898 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Indexed package import.
-// See cmd/compile/internal/gc/iexport.go for the export data format.
-
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
-
-package gcimporter
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "go/constant"
- "go/token"
- "go/types"
- "io"
- "sort"
- "strings"
-
- "golang.org/x/tools/internal/typeparams"
-)
-
-type intReader struct {
- *bytes.Reader
- path string
-}
-
-func (r *intReader) int64() int64 {
- i, err := binary.ReadVarint(r.Reader)
- if err != nil {
- errorf("import %q: read varint error: %v", r.path, err)
- }
- return i
-}
-
-func (r *intReader) uint64() uint64 {
- i, err := binary.ReadUvarint(r.Reader)
- if err != nil {
- errorf("import %q: read varint error: %v", r.path, err)
- }
- return i
-}
-
-// Keep this in sync with constants in iexport.go.
-const (
- iexportVersionGo1_11 = 0
- iexportVersionPosCol = 1
- iexportVersionGo1_18 = 2
- iexportVersionGenerics = 2
-)
-
-type ident struct {
- pkg string
- name string
-}
-
-const predeclReserved = 32
-
-type itag uint64
-
-const (
- // Types
- definedType itag = iota
- pointerType
- sliceType
- arrayType
- chanType
- mapType
- signatureType
- structType
- interfaceType
- typeParamType
- instanceType
- unionType
-)
-
-// IImportData imports a package from the serialized package data
-// and returns 0 and a reference to the package.
-// If the export data version is not recognized or the format is otherwise
-// compromised, an error is returned.
-func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
- pkgs, err := iimportCommon(fset, imports, data, false, path)
- if err != nil {
- return 0, nil, err
- }
- return 0, pkgs[0], nil
-}
-
-// IImportBundle imports a set of packages from the serialized package bundle.
-func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
- return iimportCommon(fset, imports, data, true, "")
-}
-
-func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) {
- const currentVersion = 1
- version := int64(-1)
- if !debug {
- defer func() {
- if e := recover(); e != nil {
- if bundle {
- err = fmt.Errorf("%v", e)
- } else if version > currentVersion {
- err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
- } else {
- err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
- }
- }
- }()
- }
-
- r := &intReader{bytes.NewReader(data), path}
-
- if bundle {
- bundleVersion := r.uint64()
- switch bundleVersion {
- case bundleVersion:
- default:
- errorf("unknown bundle format version %d", bundleVersion)
- }
- }
-
- version = int64(r.uint64())
- switch version {
- case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
- default:
- if version > iexportVersionGo1_18 {
- errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
- } else {
- errorf("unknown iexport format version %d", version)
- }
- }
-
- sLen := int64(r.uint64())
- dLen := int64(r.uint64())
-
- whence, _ := r.Seek(0, io.SeekCurrent)
- stringData := data[whence : whence+sLen]
- declData := data[whence+sLen : whence+sLen+dLen]
- r.Seek(sLen+dLen, io.SeekCurrent)
-
- p := iimporter{
- version: int(version),
- ipath: path,
-
- stringData: stringData,
- stringCache: make(map[uint64]string),
- pkgCache: make(map[uint64]*types.Package),
-
- declData: declData,
- pkgIndex: make(map[*types.Package]map[string]uint64),
- typCache: make(map[uint64]types.Type),
- // Separate map for typeparams, keyed by their package and unique
- // name.
- tparamIndex: make(map[ident]types.Type),
-
- fake: fakeFileSet{
- fset: fset,
- files: make(map[string]*fileInfo),
- },
- }
- defer p.fake.setLines() // set lines for files in fset
-
- for i, pt := range predeclared() {
- p.typCache[uint64(i)] = pt
- }
-
- pkgList := make([]*types.Package, r.uint64())
- for i := range pkgList {
- pkgPathOff := r.uint64()
- pkgPath := p.stringAt(pkgPathOff)
- pkgName := p.stringAt(r.uint64())
- _ = r.uint64() // package height; unused by go/types
-
- if pkgPath == "" {
- pkgPath = path
- }
- pkg := imports[pkgPath]
- if pkg == nil {
- pkg = types.NewPackage(pkgPath, pkgName)
- imports[pkgPath] = pkg
- } else if pkg.Name() != pkgName {
- errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
- }
-
- p.pkgCache[pkgPathOff] = pkg
-
- nameIndex := make(map[string]uint64)
- for nSyms := r.uint64(); nSyms > 0; nSyms-- {
- name := p.stringAt(r.uint64())
- nameIndex[name] = r.uint64()
- }
-
- p.pkgIndex[pkg] = nameIndex
- pkgList[i] = pkg
- }
-
- if bundle {
- pkgs = make([]*types.Package, r.uint64())
- for i := range pkgs {
- pkg := p.pkgAt(r.uint64())
- imps := make([]*types.Package, r.uint64())
- for j := range imps {
- imps[j] = p.pkgAt(r.uint64())
- }
- pkg.SetImports(imps)
- pkgs[i] = pkg
- }
- } else {
- if len(pkgList) == 0 {
- errorf("no packages found for %s", path)
- panic("unreachable")
- }
- pkgs = pkgList[:1]
-
- // record all referenced packages as imports
- list := append(([]*types.Package)(nil), pkgList[1:]...)
- sort.Sort(byPath(list))
- pkgs[0].SetImports(list)
- }
-
- for _, pkg := range pkgs {
- if pkg.Complete() {
- continue
- }
-
- names := make([]string, 0, len(p.pkgIndex[pkg]))
- for name := range p.pkgIndex[pkg] {
- names = append(names, name)
- }
- sort.Strings(names)
- for _, name := range names {
- p.doDecl(pkg, name)
- }
-
- // package was imported completely and without errors
- pkg.MarkComplete()
- }
-
- // SetConstraint can't be called if the constraint type is not yet complete.
- // When type params are created in the 'P' case of (*importReader).obj(),
- // the associated constraint type may not be complete due to recursion.
- // Therefore, we defer calling SetConstraint there, and call it here instead
- // after all types are complete.
- for _, d := range p.later {
- typeparams.SetTypeParamConstraint(d.t, d.constraint)
- }
-
- for _, typ := range p.interfaceList {
- typ.Complete()
- }
-
- return pkgs, nil
-}
-
-type setConstraintArgs struct {
- t *typeparams.TypeParam
- constraint types.Type
-}
-
-type iimporter struct {
- version int
- ipath string
-
- stringData []byte
- stringCache map[uint64]string
- pkgCache map[uint64]*types.Package
-
- declData []byte
- pkgIndex map[*types.Package]map[string]uint64
- typCache map[uint64]types.Type
- tparamIndex map[ident]types.Type
-
- fake fakeFileSet
- interfaceList []*types.Interface
-
- // Arguments for calls to SetConstraint that are deferred due to recursive types
- later []setConstraintArgs
-
- indent int // for tracing support
-}
-
-func (p *iimporter) trace(format string, args ...interface{}) {
- if !trace {
- // Call sites should also be guarded, but having this check here allows
- // easily enabling/disabling debug trace statements.
- return
- }
- fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
-}
-
-func (p *iimporter) doDecl(pkg *types.Package, name string) {
- if debug {
- p.trace("import decl %s", name)
- p.indent++
- defer func() {
- p.indent--
- p.trace("=> %s", name)
- }()
- }
- // See if we've already imported this declaration.
- if obj := pkg.Scope().Lookup(name); obj != nil {
- return
- }
-
- off, ok := p.pkgIndex[pkg][name]
- if !ok {
- errorf("%v.%v not in index", pkg, name)
- }
-
- r := &importReader{p: p, currPkg: pkg}
- r.declReader.Reset(p.declData[off:])
-
- r.obj(name)
-}
-
-func (p *iimporter) stringAt(off uint64) string {
- if s, ok := p.stringCache[off]; ok {
- return s
- }
-
- slen, n := binary.Uvarint(p.stringData[off:])
- if n <= 0 {
- errorf("varint failed")
- }
- spos := off + uint64(n)
- s := string(p.stringData[spos : spos+slen])
- p.stringCache[off] = s
- return s
-}
-
-func (p *iimporter) pkgAt(off uint64) *types.Package {
- if pkg, ok := p.pkgCache[off]; ok {
- return pkg
- }
- path := p.stringAt(off)
- errorf("missing package %q in %q", path, p.ipath)
- return nil
-}
-
-func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
- if t, ok := p.typCache[off]; ok && canReuse(base, t) {
- return t
- }
-
- if off < predeclReserved {
- errorf("predeclared type missing from cache: %v", off)
- }
-
- r := &importReader{p: p}
- r.declReader.Reset(p.declData[off-predeclReserved:])
- t := r.doType(base)
-
- if canReuse(base, t) {
- p.typCache[off] = t
- }
- return t
-}
-
-// canReuse reports whether the type rhs on the RHS of the declaration for def
-// may be re-used.
-//
-// Specifically, if def is non-nil and rhs is an interface type with methods, it
-// may not be re-used because we have a convention of setting the receiver type
-// for interface methods to def.
-func canReuse(def *types.Named, rhs types.Type) bool {
- if def == nil {
- return true
- }
- iface, _ := rhs.(*types.Interface)
- if iface == nil {
- return true
- }
- // Don't use iface.Empty() here as iface may not be complete.
- return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
-}
-
-type importReader struct {
- p *iimporter
- declReader bytes.Reader
- currPkg *types.Package
- prevFile string
- prevLine int64
- prevColumn int64
-}
-
-func (r *importReader) obj(name string) {
- tag := r.byte()
- pos := r.pos()
-
- switch tag {
- case 'A':
- typ := r.typ()
-
- r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
-
- case 'C':
- typ, val := r.value()
-
- r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
-
- case 'F', 'G':
- var tparams []*typeparams.TypeParam
- if tag == 'G' {
- tparams = r.tparamList()
- }
- sig := r.signature(nil, nil, tparams)
- r.declare(types.NewFunc(pos, r.currPkg, name, sig))
-
- case 'T', 'U':
- // Types can be recursive. We need to setup a stub
- // declaration before recursing.
- obj := types.NewTypeName(pos, r.currPkg, name, nil)
- named := types.NewNamed(obj, nil, nil)
- // Declare obj before calling r.tparamList, so the new type name is recognized
- // if used in the constraint of one of its own typeparams (see #48280).
- r.declare(obj)
- if tag == 'U' {
- tparams := r.tparamList()
- typeparams.SetForNamed(named, tparams)
- }
-
- underlying := r.p.typAt(r.uint64(), named).Underlying()
- named.SetUnderlying(underlying)
-
- if !isInterface(underlying) {
- for n := r.uint64(); n > 0; n-- {
- mpos := r.pos()
- mname := r.ident()
- recv := r.param()
-
- // If the receiver has any targs, set those as the
- // rparams of the method (since those are the
- // typeparams being used in the method sig/body).
- base := baseType(recv.Type())
- assert(base != nil)
- targs := typeparams.NamedTypeArgs(base)
- var rparams []*typeparams.TypeParam
- if targs.Len() > 0 {
- rparams = make([]*typeparams.TypeParam, targs.Len())
- for i := range rparams {
- rparams[i] = targs.At(i).(*typeparams.TypeParam)
- }
- }
- msig := r.signature(recv, rparams, nil)
-
- named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
- }
- }
-
- case 'P':
- // We need to "declare" a typeparam in order to have a name that
- // can be referenced recursively (if needed) in the type param's
- // bound.
- if r.p.version < iexportVersionGenerics {
- errorf("unexpected type param type")
- }
- name0 := tparamName(name)
- tn := types.NewTypeName(pos, r.currPkg, name0, nil)
- t := typeparams.NewTypeParam(tn, nil)
-
- // To handle recursive references to the typeparam within its
- // bound, save the partial type in tparamIndex before reading the bounds.
- id := ident{r.currPkg.Name(), name}
- r.p.tparamIndex[id] = t
- var implicit bool
- if r.p.version >= iexportVersionGo1_18 {
- implicit = r.bool()
- }
- constraint := r.typ()
- if implicit {
- iface, _ := constraint.(*types.Interface)
- if iface == nil {
- errorf("non-interface constraint marked implicit")
- }
- typeparams.MarkImplicit(iface)
- }
- // The constraint type may not be complete, if we
- // are in the middle of a type recursion involving type
- // constraints. So, we defer SetConstraint until we have
- // completely set up all types in ImportData.
- r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
-
- case 'V':
- typ := r.typ()
-
- r.declare(types.NewVar(pos, r.currPkg, name, typ))
-
- default:
- errorf("unexpected tag: %v", tag)
- }
-}
-
-func (r *importReader) declare(obj types.Object) {
- obj.Pkg().Scope().Insert(obj)
-}
-
-func (r *importReader) value() (typ types.Type, val constant.Value) {
- typ = r.typ()
- if r.p.version >= iexportVersionGo1_18 {
- // TODO: add support for using the kind.
- _ = constant.Kind(r.int64())
- }
-
- switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
- case types.IsBoolean:
- val = constant.MakeBool(r.bool())
-
- case types.IsString:
- val = constant.MakeString(r.string())
-
- case types.IsInteger:
- val = r.mpint(b)
-
- case types.IsFloat:
- val = r.mpfloat(b)
-
- case types.IsComplex:
- re := r.mpfloat(b)
- im := r.mpfloat(b)
- val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
-
- default:
- if b.Kind() == types.Invalid {
- val = constant.MakeUnknown()
- return
- }
- errorf("unexpected type %v", typ) // panics
- panic("unreachable")
- }
-
- return
-}
-
-func intSize(b *types.Basic) (signed bool, maxBytes uint) {
- if (b.Info() & types.IsUntyped) != 0 {
- return true, 64
- }
-
- switch b.Kind() {
- case types.Float32, types.Complex64:
- return true, 3
- case types.Float64, types.Complex128:
- return true, 7
- }
-
- signed = (b.Info() & types.IsUnsigned) == 0
- switch b.Kind() {
- case types.Int8, types.Uint8:
- maxBytes = 1
- case types.Int16, types.Uint16:
- maxBytes = 2
- case types.Int32, types.Uint32:
- maxBytes = 4
- default:
- maxBytes = 8
- }
-
- return
-}
-
-func (r *importReader) mpint(b *types.Basic) constant.Value {
- signed, maxBytes := intSize(b)
-
- maxSmall := 256 - maxBytes
- if signed {
- maxSmall = 256 - 2*maxBytes
- }
- if maxBytes == 1 {
- maxSmall = 256
- }
-
- n, _ := r.declReader.ReadByte()
- if uint(n) < maxSmall {
- v := int64(n)
- if signed {
- v >>= 1
- if n&1 != 0 {
- v = ^v
- }
- }
- return constant.MakeInt64(v)
- }
-
- v := -n
- if signed {
- v = -(n &^ 1) >> 1
- }
- if v < 1 || uint(v) > maxBytes {
- errorf("weird decoding: %v, %v => %v", n, signed, v)
- }
-
- buf := make([]byte, v)
- io.ReadFull(&r.declReader, buf)
-
- // convert to little endian
- // TODO(gri) go/constant should have a more direct conversion function
- // (e.g., once it supports a big.Float based implementation)
- for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
- buf[i], buf[j] = buf[j], buf[i]
- }
-
- x := constant.MakeFromBytes(buf)
- if signed && n&1 != 0 {
- x = constant.UnaryOp(token.SUB, x, 0)
- }
- return x
-}
-
-func (r *importReader) mpfloat(b *types.Basic) constant.Value {
- x := r.mpint(b)
- if constant.Sign(x) == 0 {
- return x
- }
-
- exp := r.int64()
- switch {
- case exp > 0:
- x = constant.Shift(x, token.SHL, uint(exp))
- // Ensure that the imported Kind is Float, else this constant may run into
- // bitsize limits on overlarge integers. Eventually we can instead adopt
- // the approach of CL 288632, but that CL relies on go/constant APIs that
- // were introduced in go1.13.
- //
- // TODO(rFindley): sync the logic here with tip Go once we no longer
- // support go1.12.
- x = constant.ToFloat(x)
- case exp < 0:
- d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
- x = constant.BinaryOp(x, token.QUO, d)
- }
- return x
-}
-
-func (r *importReader) ident() string {
- return r.string()
-}
-
-func (r *importReader) qualifiedIdent() (*types.Package, string) {
- name := r.string()
- pkg := r.pkg()
- return pkg, name
-}
-
-func (r *importReader) pos() token.Pos {
- if r.p.version >= iexportVersionPosCol {
- r.posv1()
- } else {
- r.posv0()
- }
-
- if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
- return token.NoPos
- }
- return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
-}
-
-func (r *importReader) posv0() {
- delta := r.int64()
- if delta != deltaNewFile {
- r.prevLine += delta
- } else if l := r.int64(); l == -1 {
- r.prevLine += deltaNewFile
- } else {
- r.prevFile = r.string()
- r.prevLine = l
- }
-}
-
-func (r *importReader) posv1() {
- delta := r.int64()
- r.prevColumn += delta >> 1
- if delta&1 != 0 {
- delta = r.int64()
- r.prevLine += delta >> 1
- if delta&1 != 0 {
- r.prevFile = r.string()
- }
- }
-}
-
-func (r *importReader) typ() types.Type {
- return r.p.typAt(r.uint64(), nil)
-}
-
-func isInterface(t types.Type) bool {
- _, ok := t.(*types.Interface)
- return ok
-}
-
-func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
-func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
-
-func (r *importReader) doType(base *types.Named) (res types.Type) {
- k := r.kind()
- if debug {
- r.p.trace("importing type %d (base: %s)", k, base)
- r.p.indent++
- defer func() {
- r.p.indent--
- r.p.trace("=> %s", res)
- }()
- }
- switch k {
- default:
- errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
- return nil
-
- case definedType:
- pkg, name := r.qualifiedIdent()
- r.p.doDecl(pkg, name)
- return pkg.Scope().Lookup(name).(*types.TypeName).Type()
- case pointerType:
- return types.NewPointer(r.typ())
- case sliceType:
- return types.NewSlice(r.typ())
- case arrayType:
- n := r.uint64()
- return types.NewArray(r.typ(), int64(n))
- case chanType:
- dir := chanDir(int(r.uint64()))
- return types.NewChan(dir, r.typ())
- case mapType:
- return types.NewMap(r.typ(), r.typ())
- case signatureType:
- r.currPkg = r.pkg()
- return r.signature(nil, nil, nil)
-
- case structType:
- r.currPkg = r.pkg()
-
- fields := make([]*types.Var, r.uint64())
- tags := make([]string, len(fields))
- for i := range fields {
- fpos := r.pos()
- fname := r.ident()
- ftyp := r.typ()
- emb := r.bool()
- tag := r.string()
-
- fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
- tags[i] = tag
- }
- return types.NewStruct(fields, tags)
-
- case interfaceType:
- r.currPkg = r.pkg()
-
- embeddeds := make([]types.Type, r.uint64())
- for i := range embeddeds {
- _ = r.pos()
- embeddeds[i] = r.typ()
- }
-
- methods := make([]*types.Func, r.uint64())
- for i := range methods {
- mpos := r.pos()
- mname := r.ident()
-
- // TODO(mdempsky): Matches bimport.go, but I
- // don't agree with this.
- var recv *types.Var
- if base != nil {
- recv = types.NewVar(token.NoPos, r.currPkg, "", base)
- }
-
- msig := r.signature(recv, nil, nil)
- methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
- }
-
- typ := newInterface(methods, embeddeds)
- r.p.interfaceList = append(r.p.interfaceList, typ)
- return typ
-
- case typeParamType:
- if r.p.version < iexportVersionGenerics {
- errorf("unexpected type param type")
- }
- pkg, name := r.qualifiedIdent()
- id := ident{pkg.Name(), name}
- if t, ok := r.p.tparamIndex[id]; ok {
- // We're already in the process of importing this typeparam.
- return t
- }
- // Otherwise, import the definition of the typeparam now.
- r.p.doDecl(pkg, name)
- return r.p.tparamIndex[id]
-
- case instanceType:
- if r.p.version < iexportVersionGenerics {
- errorf("unexpected instantiation type")
- }
- // pos does not matter for instances: they are positioned on the original
- // type.
- _ = r.pos()
- len := r.uint64()
- targs := make([]types.Type, len)
- for i := range targs {
- targs[i] = r.typ()
- }
- baseType := r.typ()
- // The imported instantiated type doesn't include any methods, so
- // we must always use the methods of the base (orig) type.
- // TODO provide a non-nil *Environment
- t, _ := typeparams.Instantiate(nil, baseType, targs, false)
- return t
-
- case unionType:
- if r.p.version < iexportVersionGenerics {
- errorf("unexpected instantiation type")
- }
- terms := make([]*typeparams.Term, r.uint64())
- for i := range terms {
- terms[i] = typeparams.NewTerm(r.bool(), r.typ())
- }
- return typeparams.NewUnion(terms)
- }
-}
-
-func (r *importReader) kind() itag {
- return itag(r.uint64())
-}
-
-func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature {
- params := r.paramList()
- results := r.paramList()
- variadic := params.Len() > 0 && r.bool()
- return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic)
-}
-
-func (r *importReader) tparamList() []*typeparams.TypeParam {
- n := r.uint64()
- if n == 0 {
- return nil
- }
- xs := make([]*typeparams.TypeParam, n)
- for i := range xs {
- // Note: the standard library importer is tolerant of nil types here,
- // though would panic in SetTypeParams.
- xs[i] = r.typ().(*typeparams.TypeParam)
- }
- return xs
-}
-
-func (r *importReader) paramList() *types.Tuple {
- xs := make([]*types.Var, r.uint64())
- for i := range xs {
- xs[i] = r.param()
- }
- return types.NewTuple(xs...)
-}
-
-func (r *importReader) param() *types.Var {
- pos := r.pos()
- name := r.ident()
- typ := r.typ()
- return types.NewParam(pos, r.currPkg, name, typ)
-}
-
-func (r *importReader) bool() bool {
- return r.uint64() != 0
-}
-
-func (r *importReader) int64() int64 {
- n, err := binary.ReadVarint(&r.declReader)
- if err != nil {
- errorf("readVarint: %v", err)
- }
- return n
-}
-
-func (r *importReader) uint64() uint64 {
- n, err := binary.ReadUvarint(&r.declReader)
- if err != nil {
- errorf("readUvarint: %v", err)
- }
- return n
-}
-
-func (r *importReader) byte() byte {
- x, err := r.declReader.ReadByte()
- if err != nil {
- errorf("declReader.ReadByte: %v", err)
- }
- return x
-}
-
-func baseType(typ types.Type) *types.Named {
- // pointer receivers are never types.Named types
- if p, _ := typ.(*types.Pointer); p != nil {
- typ = p.Elem()
- }
- // receiver base types are always (possibly generic) types.Named types
- n, _ := typ.(*types.Named)
- return n
-}
diff --git a/go/internal/gcimporter/support_go118.go b/go/internal/gcimporter/support_go118.go
deleted file mode 100644
index a99384323..000000000
--- a/go/internal/gcimporter/support_go118.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package gcimporter
-
-import "go/types"
-
-const iexportVersion = iexportVersionGenerics
-
-// additionalPredeclared returns additional predeclared types in go.1.18.
-func additionalPredeclared() []types.Type {
- return []types.Type{
- // comparable
- types.Universe.Lookup("comparable").Type(),
-
- // any
- types.Universe.Lookup("any").Type(),
- }
-}
diff --git a/go/loader/doc.go b/go/loader/doc.go
index c5aa31c1a..e35b1fd7d 100644
--- a/go/loader/doc.go
+++ b/go/loader/doc.go
@@ -20,36 +20,35 @@
// be called any number of times. Finally, these are followed by a
// call to Load() to actually load and type-check the program.
//
-// var conf loader.Config
+// var conf loader.Config
//
-// // Use the command-line arguments to specify
-// // a set of initial packages to load from source.
-// // See FromArgsUsage for help.
-// rest, err := conf.FromArgs(os.Args[1:], wantTests)
+// // Use the command-line arguments to specify
+// // a set of initial packages to load from source.
+// // See FromArgsUsage for help.
+// rest, err := conf.FromArgs(os.Args[1:], wantTests)
//
-// // Parse the specified files and create an ad hoc package with path "foo".
-// // All files must have the same 'package' declaration.
-// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
+// // Parse the specified files and create an ad hoc package with path "foo".
+// // All files must have the same 'package' declaration.
+// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
//
-// // Create an ad hoc package with path "foo" from
-// // the specified already-parsed files.
-// // All ASTs must have the same 'package' declaration.
-// conf.CreateFromFiles("foo", parsedFiles)
+// // Create an ad hoc package with path "foo" from
+// // the specified already-parsed files.
+// // All ASTs must have the same 'package' declaration.
+// conf.CreateFromFiles("foo", parsedFiles)
//
-// // Add "runtime" to the set of packages to be loaded.
-// conf.Import("runtime")
+// // Add "runtime" to the set of packages to be loaded.
+// conf.Import("runtime")
//
-// // Adds "fmt" and "fmt_test" to the set of packages
-// // to be loaded. "fmt" will include *_test.go files.
-// conf.ImportWithTests("fmt")
+// // Adds "fmt" and "fmt_test" to the set of packages
+// // to be loaded. "fmt" will include *_test.go files.
+// conf.ImportWithTests("fmt")
//
-// // Finally, load all the packages specified by the configuration.
-// prog, err := conf.Load()
+// // Finally, load all the packages specified by the configuration.
+// prog, err := conf.Load()
//
// See examples_test.go for examples of API usage.
//
-//
-// CONCEPTS AND TERMINOLOGY
+// # CONCEPTS AND TERMINOLOGY
//
// The WORKSPACE is the set of packages accessible to the loader. The
// workspace is defined by Config.Build, a *build.Context. The
@@ -92,7 +91,6 @@
// The INITIAL packages are those specified in the configuration. A
// DEPENDENCY is a package loaded to satisfy an import in an initial
// package or another dependency.
-//
package loader
// IMPLEMENTATION NOTES
diff --git a/go/loader/loader.go b/go/loader/loader.go
index 3ba91f7c5..edf62c2cc 100644
--- a/go/loader/loader.go
+++ b/go/loader/loader.go
@@ -179,7 +179,6 @@ type Program struct {
// for a single package.
//
// Not mutated once exposed via the API.
-//
type PackageInfo struct {
Pkg *types.Package
Importable bool // true if 'import "Pkg.Path()"' would resolve to this
@@ -217,7 +216,6 @@ func (conf *Config) fset() *token.FileSet {
// src specifies the parser input as a string, []byte, or io.Reader, and
// filename is its apparent name. If src is nil, the contents of
// filename are read from the file system.
-//
func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) {
// TODO(adonovan): use conf.build() etc like parseFiles does.
return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode)
@@ -262,7 +260,6 @@ A '--' argument terminates the list of packages.
//
// Only superficial errors are reported at this stage; errors dependent
// on I/O are detected during Load.
-//
func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) {
var rest []string
for i, arg := range args {
@@ -300,14 +297,12 @@ func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) {
// CreateFromFilenames is a convenience function that adds
// a conf.CreatePkgs entry to create a package of the specified *.go
// files.
-//
func (conf *Config) CreateFromFilenames(path string, filenames ...string) {
conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames})
}
// CreateFromFiles is a convenience function that adds a conf.CreatePkgs
// entry to create package of the specified path and parsed files.
-//
func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files})
}
@@ -321,12 +316,10 @@ func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
// In addition, if any *_test.go files contain a "package x_test"
// declaration, an additional package comprising just those files will
// be added to CreatePkgs.
-//
func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) }
// Import is a convenience function that adds path to ImportPkgs, the
// set of initial packages that will be imported from source.
-//
func (conf *Config) Import(path string) { conf.addImport(path, false) }
func (conf *Config) addImport(path string, tests bool) {
@@ -345,7 +338,6 @@ func (conf *Config) addImport(path string, tests bool) {
// exact is defined as for astutil.PathEnclosingInterval.
//
// The zero value is returned if not found.
-//
func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) {
for _, info := range prog.AllPackages {
for _, f := range info.Files {
@@ -368,7 +360,6 @@ func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageIn
// InitialPackages returns a new slice containing the set of initial
// packages (Created + Imported) in unspecified order.
-//
func (prog *Program) InitialPackages() []*PackageInfo {
infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported))
infos = append(infos, prog.Created...)
@@ -435,7 +426,6 @@ type findpkgValue struct {
// Upon completion, exactly one of info and err is non-nil:
// info on successful creation of a package, err otherwise.
// A successful package may still contain type errors.
-//
type importInfo struct {
path string // import path
info *PackageInfo // results of typechecking (including errors)
@@ -475,7 +465,6 @@ type importError struct {
// false, Load will fail if any package had an error.
//
// It is an error if no packages were loaded.
-//
func (conf *Config) Load() (*Program, error) {
// Create a simple default error handler for parse/type errors.
if conf.TypeChecker.Error == nil {
@@ -732,10 +721,10 @@ func (conf *Config) build() *build.Context {
// errors that were encountered.
//
// 'which' indicates which files to include:
-// 'g': include non-test *.go source files (GoFiles + processed CgoFiles)
-// 't': include in-package *_test.go source files (TestGoFiles)
-// 'x': include external *_test.go source files. (XTestGoFiles)
//
+// 'g': include non-test *.go source files (GoFiles + processed CgoFiles)
+// 't': include in-package *_test.go source files (TestGoFiles)
+// 'x': include external *_test.go source files. (XTestGoFiles)
func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) {
if bp.ImportPath == "unsafe" {
return nil, nil
@@ -776,7 +765,6 @@ func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.Fil
// in the package's PackageInfo).
//
// Idempotent.
-//
func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) {
if to == "C" {
// This should be unreachable, but ad hoc packages are
@@ -868,7 +856,6 @@ func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMo
//
// fromDir is the directory containing the import declaration that
// caused these imports.
-//
func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) {
if fromPath != "" {
// We're loading a set of imports.
@@ -951,7 +938,6 @@ func (imp *importer) findPath(from, to string) []string {
// caller must call awaitCompletion() before accessing its info field.
//
// startLoad is concurrency-safe and idempotent.
-//
func (imp *importer) startLoad(bp *build.Package) *importInfo {
path := bp.ImportPath
imp.importedMu.Lock()
@@ -995,7 +981,6 @@ func (imp *importer) load(bp *build.Package) *PackageInfo {
//
// cycleCheck determines whether the imports within files create
// dependency edges that should be checked for potential cycles.
-//
func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) {
// Ensure the dependencies are loaded, in parallel.
var fromPath string
diff --git a/go/loader/stdlib_test.go b/go/loader/stdlib_test.go
index b55aa8ffa..f3f3e39bf 100644
--- a/go/loader/stdlib_test.go
+++ b/go/loader/stdlib_test.go
@@ -127,6 +127,8 @@ func TestCgoOption(t *testing.T) {
// or the std library is incomplete (Android).
case "android", "plan9", "solaris", "windows":
t.Skipf("no cgo or incomplete std lib on %s", runtime.GOOS)
+ case "darwin":
+ t.Skipf("golang/go#58493: file locations in this test are stale on darwin")
}
// In nocgo builds (e.g. linux-amd64-nocgo),
// there is no "runtime/cgo" package,
diff --git a/go/loader/util.go b/go/loader/util.go
index 7f38dd740..3a80acae6 100644
--- a/go/loader/util.go
+++ b/go/loader/util.go
@@ -27,7 +27,6 @@ var ioLimit = make(chan bool, 10)
//
// I/O is done via ctxt, which may specify a virtual file system.
// displayPath is used to transform the filenames attached to the ASTs.
-//
func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
if displayPath == nil {
displayPath = func(path string) string { return path }
diff --git a/go/packages/doc.go b/go/packages/doc.go
index 4bfe28a51..da4ab89fe 100644
--- a/go/packages/doc.go
+++ b/go/packages/doc.go
@@ -67,7 +67,6 @@ Most tools should pass their command-line arguments (after any flags)
uninterpreted to the loader, so that the loader can interpret them
according to the conventions of the underlying build system.
See the Example function for typical usage.
-
*/
package packages // import "golang.org/x/tools/go/packages"
diff --git a/go/packages/golist.go b/go/packages/golist.go
index 7aa97f7be..6bb7168d2 100644
--- a/go/packages/golist.go
+++ b/go/packages/golist.go
@@ -26,7 +26,6 @@ import (
"golang.org/x/tools/go/internal/packagesdriver"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/packagesinternal"
- "golang.org/x/xerrors"
)
// debug controls verbose logging.
@@ -61,6 +60,7 @@ func (r *responseDeduper) addAll(dr *driverResponse) {
for _, root := range dr.Roots {
r.addRoot(root)
}
+ r.dr.GoVersion = dr.GoVersion
}
func (r *responseDeduper) addPackage(p *Package) {
@@ -303,11 +303,12 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries
}
dirResponse, err := state.createDriverResponse(pattern)
- // If there was an error loading the package, or the package is returned
- // with errors, try to load the file as an ad-hoc package.
+ // If there was an error loading the package, or no packages are returned,
+ // or the package is returned with errors, try to load the file as an
+ // ad-hoc package.
// Usually the error will appear in a returned package, but may not if we're
// in module mode and the ad-hoc is located outside a module.
- if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
+ if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
len(dirResponse.Packages[0].Errors) == 1 {
var queryErr error
if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
@@ -446,15 +447,22 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
// Run "go list" for complete
// information on the specified packages.
- buf, err := state.invokeGo("list", golistargs(state.cfg, words)...)
+ goVersion, err := state.getGoVersion()
if err != nil {
return nil, err
}
+ buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...)
+ if err != nil {
+ return nil, err
+ }
+
seen := make(map[string]*jsonPackage)
pkgs := make(map[string]*Package)
additionalErrors := make(map[string][]Error)
// Decode the JSON and convert it to Package form.
- var response driverResponse
+ response := &driverResponse{
+ GoVersion: goVersion,
+ }
for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage)
if err := dec.Decode(p); err != nil {
@@ -596,17 +604,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
// Work around https://golang.org/issue/28749:
// cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
- // Filter out any elements of CompiledGoFiles that are also in OtherFiles.
- // We have to keep this workaround in place until go1.12 is a distant memory.
- if len(pkg.OtherFiles) > 0 {
- other := make(map[string]bool, len(pkg.OtherFiles))
- for _, f := range pkg.OtherFiles {
- other[f] = true
- }
-
+ // Remove files from CompiledGoFiles that are non-go files
+ // (or are not files that look like they are from the cache).
+ if len(pkg.CompiledGoFiles) > 0 {
out := pkg.CompiledGoFiles[:0]
for _, f := range pkg.CompiledGoFiles {
- if other[f] {
+ if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file
continue
}
out = append(out, f)
@@ -726,7 +729,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
}
sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
- return &response, nil
+ return response, nil
}
func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
@@ -752,6 +755,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath
}
+// getGoVersion returns the effective minor version of the go command.
func (state *golistState) getGoVersion() (int, error) {
state.goVersionOnce.Do(func() {
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
@@ -809,10 +813,76 @@ func absJoin(dir string, fileses ...[]string) (res []string) {
return res
}
-func golistargs(cfg *Config, words []string) []string {
+func jsonFlag(cfg *Config, goVersion int) string {
+ if goVersion < 19 {
+ return "-json"
+ }
+ var fields []string
+ added := make(map[string]bool)
+ addFields := func(fs ...string) {
+ for _, f := range fs {
+ if !added[f] {
+ added[f] = true
+ fields = append(fields, f)
+ }
+ }
+ }
+ addFields("Name", "ImportPath", "Error") // These fields are always needed
+ if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 {
+ addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
+ "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
+ "SwigFiles", "SwigCXXFiles", "SysoFiles")
+ if cfg.Tests {
+ addFields("TestGoFiles", "XTestGoFiles")
+ }
+ }
+ if cfg.Mode&NeedTypes != 0 {
+ // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
+ // even when -compiled isn't passed in.
+ // TODO(#52435): Should we make the test ask for -compiled, or automatically
+ // request CompiledGoFiles in certain circumstances?
+ addFields("Dir", "CompiledGoFiles")
+ }
+ if cfg.Mode&NeedCompiledGoFiles != 0 {
+ addFields("Dir", "CompiledGoFiles", "Export")
+ }
+ if cfg.Mode&NeedImports != 0 {
+ // When imports are requested, DepOnly is used to distinguish between packages
+ // explicitly requested and transitive imports of those packages.
+ addFields("DepOnly", "Imports", "ImportMap")
+ if cfg.Tests {
+ addFields("TestImports", "XTestImports")
+ }
+ }
+ if cfg.Mode&NeedDeps != 0 {
+ addFields("DepOnly")
+ }
+ if usesExportData(cfg) {
+ // Request Dir in the unlikely case Export is not absolute.
+ addFields("Dir", "Export")
+ }
+ if cfg.Mode&needInternalForTest != 0 {
+ addFields("ForTest")
+ }
+ if cfg.Mode&needInternalDepsErrors != 0 {
+ addFields("DepsErrors")
+ }
+ if cfg.Mode&NeedModule != 0 {
+ addFields("Module")
+ }
+ if cfg.Mode&NeedEmbedFiles != 0 {
+ addFields("EmbedFiles")
+ }
+ if cfg.Mode&NeedEmbedPatterns != 0 {
+ addFields("EmbedPatterns")
+ }
+ return "-json=" + strings.Join(fields, ",")
+}
+
+func golistargs(cfg *Config, words []string, goVersion int) []string {
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
fullargs := []string{
- "-e", "-json",
+ "-e", jsonFlag(cfg, goVersion),
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
fmt.Sprintf("-test=%t", cfg.Tests),
fmt.Sprintf("-export=%t", usesExportData(cfg)),
@@ -883,7 +953,7 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
if !ok {
// Catastrophic error:
// - context cancellation
- return nil, xerrors.Errorf("couldn't run 'go': %w", err)
+ return nil, fmt.Errorf("couldn't run 'go': %w", err)
}
// Old go version?
diff --git a/go/packages/overlay_test.go b/go/packages/overlay_test.go
index f2164c274..4318739eb 100644
--- a/go/packages/overlay_test.go
+++ b/go/packages/overlay_test.go
@@ -109,8 +109,6 @@ func TestOverlayChangesTestPackageName(t *testing.T) {
testAllOrModulesParallel(t, testOverlayChangesTestPackageName)
}
func testOverlayChangesTestPackageName(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 16)
-
exported := packagestest.Export(t, exporter, []packagestest.Module{{
Name: "fake",
Files: map[string]interface{}{
@@ -717,8 +715,6 @@ func TestInvalidFilesBeforeOverlay(t *testing.T) {
}
func testInvalidFilesBeforeOverlay(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
-
exported := packagestest.Export(t, exporter, []packagestest.Module{
{
Name: "golang.org/fake",
@@ -756,8 +752,6 @@ func TestInvalidFilesBeforeOverlayContains(t *testing.T) {
testAllOrModulesParallel(t, testInvalidFilesBeforeOverlayContains)
}
func testInvalidFilesBeforeOverlayContains(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
-
exported := packagestest.Export(t, exporter, []packagestest.Module{
{
Name: "golang.org/fake",
@@ -1046,6 +1040,7 @@ func Hi() {
// This does not use go/packagestest because it needs to write a replace
// directive with an absolute path in one of the module's go.mod files.
func TestOverlaysInReplace(t *testing.T) {
+ testenv.NeedsGoPackages(t)
t.Parallel()
// Create module b.com in a temporary directory. Do not add any Go files
diff --git a/go/packages/packages.go b/go/packages/packages.go
index 2442845fe..0f1505b80 100644
--- a/go/packages/packages.go
+++ b/go/packages/packages.go
@@ -15,10 +15,12 @@ import (
"go/scanner"
"go/token"
"go/types"
+ "io"
"io/ioutil"
"log"
"os"
"path/filepath"
+ "runtime"
"strings"
"sync"
"time"
@@ -71,6 +73,13 @@ const (
// NeedTypesSizes adds TypesSizes.
NeedTypesSizes
+ // needInternalDepsErrors adds the internal deps errors field for use by gopls.
+ needInternalDepsErrors
+
+ // needInternalForTest adds the internal forTest field.
+ // Tests must also be set on the context for this field to be populated.
+ needInternalForTest
+
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
// Modifies CompiledGoFiles and Types, and has no effect on its own.
typecheckCgo
@@ -226,6 +235,11 @@ type driverResponse struct {
// Imports will be connected and then type and syntax information added in a
// later pass (see refine).
Packages []*Package
+
+ // GoVersion is the minor version number used by the driver
+ // (e.g. the go command on the PATH) when selecting .go files.
+ // Zero means unknown.
+ GoVersion int
}
// Load loads and returns the Go packages named by the given patterns.
@@ -249,7 +263,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) {
return nil, err
}
l.sizes = response.Sizes
- return l.refine(response.Roots, response.Packages...)
+ return l.refine(response)
}
// defaultDriver is a driver that implements go/packages' fallback behavior.
@@ -290,6 +304,9 @@ type Package struct {
// of the package, or while parsing or type-checking its files.
Errors []Error
+ // TypeErrors contains the subset of errors produced during type checking.
+ TypeErrors []types.Error
+
// GoFiles lists the absolute file paths of the package's Go source files.
GoFiles []string
@@ -403,6 +420,8 @@ func init() {
config.(*Config).modFlag = value
}
packagesinternal.TypecheckCgo = int(typecheckCgo)
+ packagesinternal.DepsErrors = int(needInternalDepsErrors)
+ packagesinternal.ForTest = int(needInternalForTest)
}
// An Error describes a problem with a package's metadata, syntax, or types.
@@ -523,6 +542,7 @@ type loaderPackage struct {
needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern
+ goVersion int // minor version number of go command on PATH
}
// loader holds the working state of a single call to load.
@@ -609,7 +629,8 @@ func newLoader(cfg *Config) *loader {
// refine connects the supplied packages into a graph and then adds type and
// and syntax information as requested by the LoadMode.
-func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
+func (ld *loader) refine(response *driverResponse) ([]*Package, error) {
+ roots := response.Roots
rootMap := make(map[string]int, len(roots))
for i, root := range roots {
rootMap[root] = i
@@ -617,7 +638,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
ld.pkgs = make(map[string]*loaderPackage)
// first pass, fixup and build the map and roots
var initial = make([]*loaderPackage, len(roots))
- for _, pkg := range list {
+ for _, pkg := range response.Packages {
rootIndex := -1
if i, found := rootMap[pkg.ID]; found {
rootIndex = i
@@ -639,6 +660,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
Package: pkg,
needtypes: needtypes,
needsrc: needsrc,
+ goVersion: response.GoVersion,
}
ld.pkgs[lpkg.ID] = lpkg
if rootIndex >= 0 {
@@ -856,12 +878,19 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
// never has to create a types.Package for an indirect dependency,
// which would then require that such created packages be explicitly
// inserted back into the Import graph as a final step after export data loading.
+ // (Hence this return is after the Types assignment.)
// The Diamond test exercises this case.
if !lpkg.needtypes && !lpkg.needsrc {
return
}
if !lpkg.needsrc {
- ld.loadFromExportData(lpkg)
+ if err := ld.loadFromExportData(lpkg); err != nil {
+ lpkg.Errors = append(lpkg.Errors, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError, // e.g. can't find/open/parse export data
+ })
+ }
return // not a source package, don't get syntax trees
}
@@ -893,6 +922,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
case types.Error:
// from type checker
+ lpkg.TypeErrors = append(lpkg.TypeErrors, err)
errs = append(errs, Error{
Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg,
@@ -914,11 +944,41 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
lpkg.Errors = append(lpkg.Errors, errs...)
}
+ // If the go command on the PATH is newer than the runtime,
+ // then the go/{scanner,ast,parser,types} packages from the
+ // standard library may be unable to process the files
+ // selected by go list.
+ //
+ // There is currently no way to downgrade the effective
+ // version of the go command (see issue 52078), so we proceed
+ // with the newer go command but, in case of parse or type
+ // errors, we emit an additional diagnostic.
+ //
+ // See:
+ // - golang.org/issue/52078 (flag to set release tags)
+ // - golang.org/issue/50825 (gopls legacy version support)
+ // - golang.org/issue/55883 (go/packages confusing error)
+ //
+ // Should we assert a hard minimum of (currently) go1.16 here?
+ var runtimeVersion int
+ if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion {
+ defer func() {
+ if len(lpkg.Errors) > 0 {
+ appendError(Error{
+ Pos: "-",
+ Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion),
+ Kind: UnknownError,
+ })
+ }
+ }()
+ }
+
if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
// The config requested loading sources and types, but sources are missing.
// Add an error to the package and fall back to loading from export data.
appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
- ld.loadFromExportData(lpkg)
+ _ = ld.loadFromExportData(lpkg) // ignore any secondary errors
+
return // can't get syntax trees for this package
}
@@ -972,7 +1032,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
tc := &types.Config{
Importer: importer,
- // Type-check bodies of functions only in non-initial packages.
+ // Type-check bodies of functions only in initial packages.
// Example: for import graph A->B->C and initial packages {A,C},
// we can ignore function bodies in B.
IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
@@ -1079,7 +1139,6 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
//
// Because files are scanned in parallel, the token.Pos
// positions of the resulting ast.Files are not ordered.
-//
func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
var wg sync.WaitGroup
n := len(filenames)
@@ -1123,7 +1182,6 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
// sameFile returns true if x and y have the same basename and denote
// the same file.
-//
func sameFile(x, y string) bool {
if x == y {
// It could be the case that y doesn't exist.
@@ -1144,9 +1202,10 @@ func sameFile(x, y string) bool {
return false
}
-// loadFromExportData returns type information for the specified
+// loadFromExportData ensures that type information is present for the specified
// package, loading it from an export data file on the first request.
-func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
+// On success it sets lpkg.Types to a new Package.
+func (ld *loader) loadFromExportData(lpkg *loaderPackage) error {
if lpkg.PkgPath == "" {
log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
}
@@ -1157,8 +1216,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
// must be sequential. (Finer-grained locking would require
// changes to the gcexportdata API.)
//
- // The exportMu lock guards the Package.Pkg field and the
- // types.Package it points to, for each Package in the graph.
+ // The exportMu lock guards the lpkg.Types field and the
+ // types.Package it points to, for each loaderPackage in the graph.
//
// Not all accesses to Package.Pkg need to be protected by exportMu:
// graph ordering ensures that direct dependencies of source
@@ -1167,18 +1226,18 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
defer ld.exportMu.Unlock()
if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
- return tpkg, nil // cache hit
+ return nil // cache hit
}
lpkg.IllTyped = true // fail safe
if lpkg.ExportFile == "" {
// Errors while building export data will have been printed to stderr.
- return nil, fmt.Errorf("no export data file")
+ return fmt.Errorf("no export data file")
}
f, err := os.Open(lpkg.ExportFile)
if err != nil {
- return nil, err
+ return err
}
defer f.Close()
@@ -1190,7 +1249,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
// queries.)
r, err := gcexportdata.NewReader(f)
if err != nil {
- return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
}
// Build the view.
@@ -1234,7 +1293,12 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
// (May modify incomplete packages in view but not create new ones.)
tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
if err != nil {
- return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+ if _, ok := view["go.shape"]; ok {
+ // Account for the pseudopackage "go.shape" that gets
+ // created by generic code.
+ viewLen++
}
if viewLen != len(view) {
log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath)
@@ -1242,8 +1306,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
lpkg.Types = tpkg
lpkg.IllTyped = false
-
- return tpkg, nil
+ return nil
}
// impliedLoadMode returns loadMode with its dependencies.
@@ -1259,3 +1322,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
func usesExportData(cfg *Config) bool {
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
}
+
+var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
diff --git a/go/packages/packages_test.go b/go/packages/packages_test.go
index 796edb6b7..0da72851c 100644
--- a/go/packages/packages_test.go
+++ b/go/packages/packages_test.go
@@ -2471,10 +2471,55 @@ func testIssue37098(t *testing.T, exporter packagestest.Exporter) {
}
}
+// TestIssue56632 checks that CompiledGoFiles does not contain non-go files regardless of
+// whether the NeedFiles mode bit is set.
+func TestIssue56632(t *testing.T) {
+ t.Parallel()
+ testenv.NeedsGoBuild(t)
+ testenv.NeedsTool(t, "cgo")
+
+ exported := packagestest.Export(t, packagestest.GOPATH, []packagestest.Module{{
+ Name: "golang.org/issue56632",
+ Files: map[string]interface{}{
+ "a/a.go": `package a`,
+ "a/a_cgo.go": `package a
+
+import "C"`,
+ "a/a.s": ``,
+ "a/a.c": ``,
+ }}})
+ defer exported.Cleanup()
+
+ modes := []packages.LoadMode{packages.NeedCompiledGoFiles, packages.NeedCompiledGoFiles | packages.NeedFiles, packages.NeedImports | packages.NeedCompiledGoFiles, packages.NeedImports | packages.NeedFiles | packages.NeedCompiledGoFiles}
+ for _, mode := range modes {
+ exported.Config.Mode = mode
+
+ initial, err := packages.Load(exported.Config, "golang.org/issue56632/a")
+ if err != nil {
+ t.Fatalf("failed to load package: %v", err)
+ }
+
+ if len(initial) != 1 {
+ t.Errorf("expected 3 packages, got %d", len(initial))
+ }
+
+ p := initial[0]
+
+ if len(p.Errors) != 0 {
+ t.Errorf("expected no errors, got %v", p.Errors)
+ }
+
+ for _, f := range p.CompiledGoFiles {
+ if strings.HasSuffix(f, ".s") || strings.HasSuffix(f, ".c") {
+ t.Errorf("expected no non-Go CompiledGoFiles, got file %q in CompiledGoFiles", f)
+ }
+ }
+ }
+}
+
// TestInvalidFilesInXTest checks the fix for golang/go#37971 in Go 1.15.
func TestInvalidFilesInXTest(t *testing.T) { testAllOrModulesParallel(t, testInvalidFilesInXTest) }
func testInvalidFilesInXTest(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
exported := packagestest.Export(t, exporter, []packagestest.Module{
{
Name: "golang.org/fake",
@@ -2501,7 +2546,6 @@ func testInvalidFilesInXTest(t *testing.T, exporter packagestest.Exporter) {
func TestTypecheckCgo(t *testing.T) { testAllOrModulesParallel(t, testTypecheckCgo) }
func testTypecheckCgo(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
testenv.NeedsTool(t, "cgo")
const cgo = `package cgo
@@ -2673,8 +2717,6 @@ func TestInvalidPackageName(t *testing.T) {
}
func testInvalidPackageName(t *testing.T, exporter packagestest.Exporter) {
- testenv.NeedsGo1Point(t, 15)
-
exported := packagestest.Export(t, exporter, []packagestest.Module{{
Name: "golang.org/fake",
Files: map[string]interface{}{
@@ -2709,6 +2751,31 @@ func TestEmptyEnvironment(t *testing.T) {
}
}
+func TestPackageLoadSingleFile(t *testing.T) {
+ tmp, err := ioutil.TempDir("", "a")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ filename := filepath.Join(tmp, "a.go")
+
+ if err := ioutil.WriteFile(filename, []byte(`package main; func main() { println("hello world") }`), 0775); err != nil {
+ t.Fatal(err)
+ }
+
+ pkgs, err := packages.Load(&packages.Config{Mode: packages.LoadSyntax, Dir: tmp}, "file="+filename)
+ if err != nil {
+ t.Fatalf("could not load package: %v", err)
+ }
+ if len(pkgs) != 1 {
+ t.Fatalf("expected one package to be loaded, got %d", len(pkgs))
+ }
+ if len(pkgs[0].CompiledGoFiles) != 1 || pkgs[0].CompiledGoFiles[0] != filename {
+ t.Fatalf("expected one compiled go file (%q), got %v", filename, pkgs[0].CompiledGoFiles)
+ }
+}
+
func errorMessages(errors []packages.Error) []string {
var msgs []string
for _, err := range errors {
diff --git a/go/packages/packagestest/expect.go b/go/packages/packagestest/expect.go
index c1781e7b9..92c20a64a 100644
--- a/go/packages/packagestest/expect.go
+++ b/go/packages/packagestest/expect.go
@@ -16,7 +16,6 @@ import (
"golang.org/x/tools/go/expect"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/span"
)
const (
@@ -41,24 +40,27 @@ const (
// call the Mark method to add the marker to the global set.
// You can register the "mark" method to override these in your own call to
// Expect. The bound Mark function is usable directly in your method map, so
-// exported.Expect(map[string]interface{}{"mark": exported.Mark})
+//
+// exported.Expect(map[string]interface{}{"mark": exported.Mark})
+//
// replicates the built in behavior.
//
-// Method invocation
+// # Method invocation
//
// When invoking a method the expressions in the parameter list need to be
// converted to values to be passed to the method.
// There are a very limited set of types the arguments are allowed to be.
-// expect.Note : passed the Note instance being evaluated.
-// string : can be supplied either a string literal or an identifier.
-// int : can only be supplied an integer literal.
-// *regexp.Regexp : can only be supplied a regular expression literal
-// token.Pos : has a file position calculated as described below.
-// token.Position : has a file position calculated as described below.
-// expect.Range: has a start and end position as described below.
-// interface{} : will be passed any value
//
-// Position calculation
+// expect.Note : passed the Note instance being evaluated.
+// string : can be supplied either a string literal or an identifier.
+// int : can only be supplied an integer literal.
+// *regexp.Regexp : can only be supplied a regular expression literal
+// token.Pos : has a file position calculated as described below.
+// token.Position : has a file position calculated as described below.
+// expect.Range: has a start and end position as described below.
+// interface{} : will be passed any value
+//
+// # Position calculation
//
// There is some extra handling when a parameter is being coerced into a
// token.Pos, token.Position or Range type argument.
@@ -121,14 +123,31 @@ func (e *Exported) Expect(methods map[string]interface{}) error {
return nil
}
-// Range is a type alias for span.Range for backwards compatibility, prefer
-// using span.Range directly.
-type Range = span.Range
+// A Range represents an interval within a source file in go/token notation.
+type Range struct {
+ TokFile *token.File // non-nil
+ Start, End token.Pos // both valid and within range of TokFile
+}
+
+// A rangeSetter abstracts a variable that can be set from a Range value.
+//
+// The parameter conversion machinery will automatically construct a
+// variable of type T and call the SetRange method on its address if
+// *T implements rangeSetter. This allows alternative notations of
+// source ranges to interoperate transparently with this package.
+//
+// This type intentionally does not mention Range itself, to avoid a
+// dependency from the application's range type upon this package.
+//
+// Currently this is a secret back door for use only by gopls.
+type rangeSetter interface {
+ SetRange(file *token.File, start, end token.Pos)
+}
// Mark adds a new marker to the known set.
func (e *Exported) Mark(name string, r Range) {
if e.markers == nil {
- e.markers = make(map[string]span.Range)
+ e.markers = make(map[string]Range)
}
e.markers[name] = r
}
@@ -218,22 +237,22 @@ func (e *Exported) getMarkers() error {
return nil
}
// set markers early so that we don't call getMarkers again from Expect
- e.markers = make(map[string]span.Range)
+ e.markers = make(map[string]Range)
return e.Expect(map[string]interface{}{
markMethod: e.Mark,
})
}
var (
- noteType = reflect.TypeOf((*expect.Note)(nil))
- identifierType = reflect.TypeOf(expect.Identifier(""))
- posType = reflect.TypeOf(token.Pos(0))
- positionType = reflect.TypeOf(token.Position{})
- rangeType = reflect.TypeOf(span.Range{})
- spanType = reflect.TypeOf(span.Span{})
- fsetType = reflect.TypeOf((*token.FileSet)(nil))
- regexType = reflect.TypeOf((*regexp.Regexp)(nil))
- exportedType = reflect.TypeOf((*Exported)(nil))
+ noteType = reflect.TypeOf((*expect.Note)(nil))
+ identifierType = reflect.TypeOf(expect.Identifier(""))
+ posType = reflect.TypeOf(token.Pos(0))
+ positionType = reflect.TypeOf(token.Position{})
+ rangeType = reflect.TypeOf(Range{})
+ rangeSetterType = reflect.TypeOf((*rangeSetter)(nil)).Elem()
+ fsetType = reflect.TypeOf((*token.FileSet)(nil))
+ regexType = reflect.TypeOf((*regexp.Regexp)(nil))
+ exportedType = reflect.TypeOf((*Exported)(nil))
)
// converter converts from a marker's argument parsed from the comment to
@@ -292,17 +311,16 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) {
}
return reflect.ValueOf(r), remains, nil
}, nil
- case pt == spanType:
+ case reflect.PtrTo(pt).AssignableTo(rangeSetterType):
+ // (*pt).SetRange method exists: call it.
return func(n *expect.Note, args []interface{}) (reflect.Value, []interface{}, error) {
r, remains, err := e.rangeConverter(n, args)
if err != nil {
return reflect.Value{}, nil, err
}
- spn, err := r.Span()
- if err != nil {
- return reflect.Value{}, nil, err
- }
- return reflect.ValueOf(spn), remains, nil
+ v := reflect.New(pt)
+ v.Interface().(rangeSetter).SetRange(r.TokFile, r.Start, r.End)
+ return v.Elem(), remains, nil
}, nil
case pt == identifierType:
return func(n *expect.Note, args []interface{}) (reflect.Value, []interface{}, error) {
@@ -405,9 +423,10 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) {
}
}
-func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Range, []interface{}, error) {
+func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (Range, []interface{}, error) {
+ tokFile := e.ExpectFileSet.File(n.Pos)
if len(args) < 1 {
- return span.Range{}, nil, fmt.Errorf("missing argument")
+ return Range{}, nil, fmt.Errorf("missing argument")
}
arg := args[0]
args = args[1:]
@@ -416,37 +435,62 @@ func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Rang
// handle the special identifiers
switch arg {
case eofIdentifier:
- // end of file identifier, look up the current file
- f := e.ExpectFileSet.File(n.Pos)
- eof := f.Pos(f.Size())
- return span.Range{FileSet: e.ExpectFileSet, Start: eof, End: token.NoPos}, args, nil
+ // end of file identifier
+ eof := tokFile.Pos(tokFile.Size())
+ return newRange(tokFile, eof, eof), args, nil
default:
// look up an marker by name
mark, ok := e.markers[string(arg)]
if !ok {
- return span.Range{}, nil, fmt.Errorf("cannot find marker %v", arg)
+ return Range{}, nil, fmt.Errorf("cannot find marker %v", arg)
}
return mark, args, nil
}
case string:
start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg)
if err != nil {
- return span.Range{}, nil, err
+ return Range{}, nil, err
}
- if start == token.NoPos {
- return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
+ if !start.IsValid() {
+ return Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
}
- return span.Range{FileSet: e.ExpectFileSet, Start: start, End: end}, args, nil
+ return newRange(tokFile, start, end), args, nil
case *regexp.Regexp:
start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg)
if err != nil {
- return span.Range{}, nil, err
+ return Range{}, nil, err
}
- if start == token.NoPos {
- return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
+ if !start.IsValid() {
+ return Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
}
- return span.Range{FileSet: e.ExpectFileSet, Start: start, End: end}, args, nil
+ return newRange(tokFile, start, end), args, nil
default:
- return span.Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg)
+ return Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg)
+ }
+}
+
+// newRange creates a new Range from a token.File and two valid positions within it.
+func newRange(file *token.File, start, end token.Pos) Range {
+ fileBase := file.Base()
+ fileEnd := fileBase + file.Size()
+ if !start.IsValid() {
+ panic("invalid start token.Pos")
+ }
+ if !end.IsValid() {
+ panic("invalid end token.Pos")
+ }
+ if int(start) < fileBase || int(start) > fileEnd {
+ panic(fmt.Sprintf("invalid start: %d not in [%d, %d]", start, fileBase, fileEnd))
+ }
+ if int(end) < fileBase || int(end) > fileEnd {
+ panic(fmt.Sprintf("invalid end: %d not in [%d, %d]", end, fileBase, fileEnd))
+ }
+ if start > end {
+ panic("invalid start: greater than end")
+ }
+ return Range{
+ TokFile: file,
+ Start: start,
+ End: end,
}
}
diff --git a/go/packages/packagestest/expect_test.go b/go/packages/packagestest/expect_test.go
index 2587f580b..46d96d61f 100644
--- a/go/packages/packagestest/expect_test.go
+++ b/go/packages/packagestest/expect_test.go
@@ -10,7 +10,6 @@ import (
"golang.org/x/tools/go/expect"
"golang.org/x/tools/go/packages/packagestest"
- "golang.org/x/tools/internal/span"
)
func TestExpect(t *testing.T) {
@@ -43,7 +42,7 @@ func TestExpect(t *testing.T) {
}
},
"directNote": func(n *expect.Note) {},
- "range": func(r span.Range) {
+ "range": func(r packagestest.Range) {
if r.Start == token.NoPos || r.Start == 0 {
t.Errorf("Range had no valid starting position")
}
diff --git a/go/packages/packagestest/export.go b/go/packages/packagestest/export.go
index d792c3c3d..b687a44fb 100644
--- a/go/packages/packagestest/export.go
+++ b/go/packages/packagestest/export.go
@@ -9,7 +9,7 @@ By changing the exporter used, you can create projects for multiple build
systems from the same description, and run the same tests on them in many
cases.
-Example
+# Example
As an example of packagestest use, consider the following test that runs
the 'go list' command on the specified modules:
@@ -60,7 +60,6 @@ Running the test with verbose output will print:
main_test.go:36: 'go list gopher.example/...' with Modules mode layout:
gopher.example/repoa/a
gopher.example/repob/b
-
*/
package packagestest
@@ -80,9 +79,7 @@ import (
"golang.org/x/tools/go/expect"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/testenv"
- "golang.org/x/xerrors"
)
var (
@@ -131,7 +128,7 @@ type Exported struct {
primary string // the first non GOROOT module that was exported
written map[string]map[string]string // the full set of exported files
notes []*expect.Note // The list of expectations extracted from go source files
- markers map[string]span.Range // The set of markers extracted from go source files
+ markers map[string]Range // The set of markers extracted from go source files
}
// Exporter implementations are responsible for converting from the generic description of some
@@ -248,7 +245,7 @@ func Export(t testing.TB, exporter Exporter, modules []Module) *Exported {
switch value := value.(type) {
case Writer:
if err := value(fullpath); err != nil {
- if xerrors.Is(err, ErrUnsupported) {
+ if errors.Is(err, ErrUnsupported) {
t.Skip(err)
}
t.Fatal(err)
@@ -340,7 +337,7 @@ func Symlink(source string) Writer {
mode := os.ModePerm
if err == nil {
mode = stat.Mode()
- } else if !xerrors.Is(err, os.ErrNotExist) {
+ } else if !errors.Is(err, os.ErrNotExist) {
// We couldn't open the source, but it might exist. We don't expect to be
// able to portably create a symlink to a file we can't see.
return symlinkErr
@@ -452,17 +449,19 @@ func copyFile(dest, source string, perm os.FileMode) error {
// GroupFilesByModules attempts to map directories to the modules within each directory.
// This function assumes that the folder is structured in the following way:
-// - dir
-// - primarymod
-// - .go files
-// - packages
-// - go.mod (optional)
-// - modules
-// - repoa
-// - mod1
-// - .go files
-// - packages
-// - go.mod (optional)
+//
+// dir/
+// primarymod/
+// *.go files
+// packages
+// go.mod (optional)
+// modules/
+// repoa/
+// mod1/
+// *.go files
+// packages
+// go.mod (optional)
+//
// It scans the directory tree anchored at root and adds a Copy writer to the
// map for every file found.
// This is to enable the common case in tests where you have a full copy of the
diff --git a/go/packages/packagestest/gopath.go b/go/packages/packagestest/gopath.go
index 54016859b..d56f523ed 100644
--- a/go/packages/packagestest/gopath.go
+++ b/go/packages/packagestest/gopath.go
@@ -12,26 +12,33 @@ import (
// GOPATH is the exporter that produces GOPATH layouts.
// Each "module" is put in it's own GOPATH entry to help test complex cases.
// Given the two files
-// golang.org/repoa#a/a.go
-// golang.org/repob#b/b.go
+//
+// golang.org/repoa#a/a.go
+// golang.org/repob#b/b.go
+//
// You would get the directory layout
-// /sometemporarydirectory
-// ├── repoa
-// │ └── src
-// │ └── golang.org
-// │ └── repoa
-// │ └── a
-// │ └── a.go
-// └── repob
-// └── src
-// └── golang.org
-// └── repob
-// └── b
-// └── b.go
+//
+// /sometemporarydirectory
+// ├── repoa
+// │ └── src
+// │ └── golang.org
+// │ └── repoa
+// │ └── a
+// │ └── a.go
+// └── repob
+// └── src
+// └── golang.org
+// └── repob
+// └── b
+// └── b.go
+//
// GOPATH would be set to
-// /sometemporarydirectory/repoa;/sometemporarydirectory/repob
+//
+// /sometemporarydirectory/repoa;/sometemporarydirectory/repob
+//
// and the working directory would be
-// /sometemporarydirectory/repoa/src
+//
+// /sometemporarydirectory/repoa/src
var GOPATH = gopath{}
func init() {
diff --git a/go/packages/packagestest/modules.go b/go/packages/packagestest/modules.go
index 2c4356747..69a6c935d 100644
--- a/go/packages/packagestest/modules.go
+++ b/go/packages/packagestest/modules.go
@@ -23,20 +23,25 @@ import (
// Each "repository" is put in it's own module, and the module file generated
// will have replace directives for all other modules.
// Given the two files
-// golang.org/repoa#a/a.go
-// golang.org/repob#b/b.go
+//
+// golang.org/repoa#a/a.go
+// golang.org/repob#b/b.go
+//
// You would get the directory layout
-// /sometemporarydirectory
-// ├── repoa
-// │ ├── a
-// │ │ └── a.go
-// │ └── go.mod
-// └── repob
-// ├── b
-// │ └── b.go
-// └── go.mod
+//
+// /sometemporarydirectory
+// ├── repoa
+// │ ├── a
+// │ │ └── a.go
+// │ └── go.mod
+// └── repob
+// ├── b
+// │ └── b.go
+// └── go.mod
+//
// and the working directory would be
-// /sometemporarydirectory/repoa
+//
+// /sometemporarydirectory/repoa
var Modules = modules{}
type modules struct{}
diff --git a/go/packages/packagestest/modules_test.go b/go/packages/packagestest/modules_test.go
index 6f627b1e5..de290ead9 100644
--- a/go/packages/packagestest/modules_test.go
+++ b/go/packages/packagestest/modules_test.go
@@ -9,11 +9,9 @@ import (
"testing"
"golang.org/x/tools/go/packages/packagestest"
- "golang.org/x/tools/internal/testenv"
)
func TestModulesExport(t *testing.T) {
- testenv.NeedsGo1Point(t, 11)
exported := packagestest.Export(t, packagestest.Modules, testdata)
defer exported.Cleanup()
// Check that the cfg contains all the right bits
diff --git a/go/pointer/analysis.go b/go/pointer/analysis.go
index 0abb04dd8..e3c85ede4 100644
--- a/go/pointer/analysis.go
+++ b/go/pointer/analysis.go
@@ -16,6 +16,7 @@ import (
"runtime"
"runtime/debug"
"sort"
+ "strings"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
@@ -46,7 +47,6 @@ const (
//
// (Note: most variables called 'obj' are not *objects but nodeids
// such that a.nodes[obj].obj != nil.)
-//
type object struct {
// flags is a bitset of the node type (ot*) flags defined above.
flags uint32
@@ -59,8 +59,8 @@ type object struct {
//
// ssa.Value for an object allocated by an SSA operation.
// types.Type for an rtype instance object or *rtype-tagged object.
- // string for an instrinsic object, e.g. the array behind os.Args.
- // nil for an object allocated by an instrinsic.
+ // string for an intrinsic object, e.g. the array behind os.Args.
+ // nil for an object allocated by an intrinsic.
// (cgn provides the identity of the intrinsic.)
data interface{}
@@ -82,7 +82,6 @@ type nodeid uint32
//
// Nodes that are pointed-to locations ("labels") have an enclosing
// object (see analysis.enclosingObject).
-//
type node struct {
// If non-nil, this node is the start of an object
// (addressable memory location).
@@ -215,7 +214,6 @@ func (a *analysis) computeTrackBits() {
//
// Pointer analysis of a transitively closed well-typed program should
// always succeed. An error can occur only due to an internal bug.
-//
func Analyze(config *Config) (result *Result, err error) {
if config.Mains == nil {
return nil, fmt.Errorf("no main/test packages to analyze (check $GOROOT/$GOPATH)")
@@ -361,7 +359,6 @@ func Analyze(config *Config) (result *Result, err error) {
// callEdge is called for each edge in the callgraph.
// calleeid is the callee's object node (has otFunction flag).
-//
func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) {
obj := a.nodes[calleeid].obj
if obj.flags&otFunction == 0 {
@@ -381,12 +378,27 @@ func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) {
fmt.Fprintf(a.log, "\tcall edge %s -> %s\n", site, callee)
}
- // Warn about calls to non-intrinsic external functions.
+ // Warn about calls to functions that are handled unsoundly.
// TODO(adonovan): de-dup these messages.
- if fn := callee.fn; fn.Blocks == nil && a.findIntrinsic(fn) == nil {
+ fn := callee.fn
+
+ // Warn about calls to non-intrinsic external functions.
+ if fn.Blocks == nil && a.findIntrinsic(fn) == nil {
a.warnf(site.pos(), "unsound call to unknown intrinsic: %s", fn)
a.warnf(fn.Pos(), " (declared here)")
}
+
+ // Warn about calls to generic function bodies.
+ if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 {
+ a.warnf(site.pos(), "unsound call to generic function body: %s (build with ssa.InstantiateGenerics)", fn)
+ a.warnf(fn.Pos(), " (declared here)")
+ }
+
+ // Warn about calls to instantiation wrappers of generics functions.
+ if fn.Origin() != nil && strings.HasPrefix(fn.Synthetic, "instantiation wrapper ") {
+ a.warnf(site.pos(), "unsound call to instantiation wrapper of generic: %s (build with ssa.InstantiateGenerics)", fn)
+ a.warnf(fn.Pos(), " (declared here)")
+ }
}
// dumpSolution writes the PTS solution to the specified file.
@@ -394,7 +406,6 @@ func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) {
// It only dumps the nodes that existed before solving. The order in
// which solver-created nodes are created depends on pre-solver
// optimization, so we can't include them in the cross-check.
-//
func (a *analysis) dumpSolution(filename string, N int) {
f, err := os.Create(filename)
if err != nil {
@@ -422,7 +433,6 @@ func (a *analysis) dumpSolution(filename string, N int) {
// showCounts logs the size of the constraint system. A typical
// optimized distribution is 65% copy, 13% load, 11% addr, 5%
// offsetAddr, 4% store, 2% others.
-//
func (a *analysis) showCounts() {
if a.log != nil {
counts := make(map[reflect.Type]int)
diff --git a/go/pointer/api.go b/go/pointer/api.go
index 2a13a6781..8c9a8c775 100644
--- a/go/pointer/api.go
+++ b/go/pointer/api.go
@@ -28,7 +28,11 @@ type Config struct {
// dependencies of any main package may still affect the
// analysis result, because they contribute runtime types and
// thus methods.
+ //
// TODO(adonovan): investigate whether this is desirable.
+ //
+ // Calls to generic functions will be unsound unless packages
+ // are built using the ssa.InstantiateGenerics builder mode.
Mains []*ssa.Package
// Reflection determines whether to handle reflection
@@ -93,7 +97,7 @@ func (c *Config) AddQuery(v ssa.Value) {
c.Queries[v] = struct{}{}
}
-// AddQuery adds v to Config.IndirectQueries.
+// AddIndirectQuery adds v to Config.IndirectQueries.
// Precondition: CanPoint(v.Type().Underlying().(*types.Pointer).Elem()).
func (c *Config) AddIndirectQuery(v ssa.Value) {
if c.IndirectQueries == nil {
@@ -128,9 +132,10 @@ func (c *Config) AddIndirectQuery(v ssa.Value) {
// before analysis has finished has undefined behavior.
//
// Example:
-// // given v, which represents a function call to 'fn() (int, []*T)', and
-// // 'type T struct { F *int }', the following query will access the field F.
-// c.AddExtendedQuery(v, "x[1][0].F")
+//
+// // given v, which represents a function call to 'fn() (int, []*T)', and
+// // 'type T struct { F *int }', the following query will access the field F.
+// c.AddExtendedQuery(v, "x[1][0].F")
func (c *Config) AddExtendedQuery(v ssa.Value, query string) (*Pointer, error) {
ops, _, err := parseExtendedQuery(v.Type(), query)
if err != nil {
@@ -160,7 +165,6 @@ type Warning struct {
// A Result contains the results of a pointer analysis.
//
// See Config for how to request the various Result components.
-//
type Result struct {
CallGraph *callgraph.Graph // discovered call graph
Queries map[ssa.Value]Pointer // pts(v) for each v in Config.Queries.
@@ -172,7 +176,6 @@ type Result struct {
//
// A Pointer doesn't have a unique type because pointers of distinct
// types may alias the same object.
-//
type Pointer struct {
a *analysis
n nodeid
@@ -223,7 +226,6 @@ func (s PointsToSet) Labels() []*Label {
// map value is the PointsToSet for pointers of that type.
//
// The result is empty unless CanHaveDynamicTypes(T).
-//
func (s PointsToSet) DynamicTypes() *typeutil.Map {
var tmap typeutil.Map
tmap.SetHasher(s.a.hasher)
diff --git a/go/pointer/callgraph.go b/go/pointer/callgraph.go
index 48e152e4a..0b7aba52a 100644
--- a/go/pointer/callgraph.go
+++ b/go/pointer/callgraph.go
@@ -39,7 +39,6 @@ func (n *cgnode) String() string {
// it is implicitly context-sensitive.
// callsites never represent calls to built-ins;
// they are handled as intrinsics.
-//
type callsite struct {
targets nodeid // pts(·) contains objects for dynamically called functions
instr ssa.CallInstruction // the call instruction; nil for synthetic/intrinsic
diff --git a/go/pointer/doc.go b/go/pointer/doc.go
index e317cf5c3..aca343b88 100644
--- a/go/pointer/doc.go
+++ b/go/pointer/doc.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
/*
-
Package pointer implements Andersen's analysis, an inclusion-based
pointer analysis algorithm first described in (Andersen, 1994).
@@ -22,8 +21,7 @@ demonstrates both of these features. Clients should not request more
information than they need since it may increase the cost of the
analysis significantly.
-
-CLASSIFICATION
+# CLASSIFICATION
Our algorithm is INCLUSION-BASED: the points-to sets for x and y will
be related by pts(y) ⊇ pts(x) if the program contains the statement
@@ -44,7 +42,9 @@ of their calling context.
It has a CONTEXT-SENSITIVE HEAP: objects are named by both allocation
site and context, so the objects returned by two distinct calls to f:
- func f() *T { return new(T) }
+
+ func f() *T { return new(T) }
+
are distinguished up to the limits of the calling context.
It is a WHOLE PROGRAM analysis: it requires SSA-form IR for the
@@ -52,16 +52,14 @@ complete Go program and summaries for native code.
See the (Hind, PASTE'01) survey paper for an explanation of these terms.
-
-SOUNDNESS
+# SOUNDNESS
The analysis is fully sound when invoked on pure Go programs that do not
use reflection or unsafe.Pointer conversions. In other words, if there
is any possible execution of the program in which pointer P may point to
object O, the analysis will report that fact.
-
-REFLECTION
+# REFLECTION
By default, the "reflect" library is ignored by the analysis, as if all
its functions were no-ops, but if the client enables the Reflection flag,
@@ -77,17 +75,18 @@ Most but not all reflection operations are supported.
In particular, addressable reflect.Values are not yet implemented, so
operations such as (reflect.Value).Set have no analytic effect.
-
-UNSAFE POINTER CONVERSIONS
+# UNSAFE POINTER CONVERSIONS
The pointer analysis makes no attempt to understand aliasing between the
operand x and result y of an unsafe.Pointer conversion:
- y = (*T)(unsafe.Pointer(x))
+
+ y = (*T)(unsafe.Pointer(x))
+
It is as if the conversion allocated an entirely new object:
- y = new(T)
+ y = new(T)
-NATIVE CODE
+# NATIVE CODE
The analysis cannot model the aliasing effects of functions written in
languages other than Go, such as runtime intrinsics in C or assembly, or
@@ -100,7 +99,7 @@ effects of native code.
------------------------------------------------------------------------
-IMPLEMENTATION
+# IMPLEMENTATION
The remaining documentation is intended for package maintainers and
pointer analysis specialists. Maintainers should have a solid
@@ -118,8 +117,7 @@ operations.) This improves the traction of presolver optimisations,
but imposes certain restrictions, e.g. potential context sensitivity
is limited since all variants must be created a priori.
-
-TERMINOLOGY
+# TERMINOLOGY
A type is said to be "pointer-like" if it is a reference to an object.
Pointer-like types include pointers and also interfaces, maps, channels,
@@ -134,8 +132,7 @@ It means: for each node index p in pts(src), the node index p+offset is
in pts(dst). Similarly *dst+offset=src is used for store constraints
and dst=src+offset for offset-address constraints.
-
-NODES
+# NODES
Nodes are the key datastructure of the analysis, and have a dual role:
they represent both constraint variables (equivalence classes of
@@ -166,8 +163,7 @@ simple edges (or copy constraints) represent value flow. Complex
edges (load, store, etc) trigger the creation of new simple edges
during the solving phase.
-
-OBJECTS
+# OBJECTS
Conceptually, an "object" is a contiguous sequence of nodes denoting
an addressable location: something that a pointer can point to. The
@@ -175,12 +171,12 @@ first node of an object has a non-nil obj field containing information
about the allocation: its size, context, and ssa.Value.
Objects include:
- - functions and globals;
- - variable allocations in the stack frame or heap;
- - maps, channels and slices created by calls to make();
- - allocations to construct an interface;
- - allocations caused by conversions, e.g. []byte(str).
- - arrays allocated by calls to append();
+ - functions and globals;
+ - variable allocations in the stack frame or heap;
+ - maps, channels and slices created by calls to make();
+ - allocations to construct an interface;
+ - allocations caused by conversions, e.g. []byte(str).
+ - arrays allocated by calls to append();
Many objects have no Go types. For example, the func, map and chan type
kinds in Go are all varieties of pointers, but their respective objects
@@ -198,14 +194,13 @@ of the empty type struct{}. (All arrays are treated as if of length 1,
so there are no empty arrays. The empty tuple is never address-taken,
so is never an object.)
-
-TAGGED OBJECTS
+# TAGGED OBJECTS
An tagged object has the following layout:
- T -- obj.flags ⊇ {otTagged}
- v
- ...
+ T -- obj.flags ⊇ {otTagged}
+ v
+ ...
The T node's typ field is the dynamic type of the "payload": the value
v which follows, flattened out. The T node's obj has the otTagged
@@ -219,331 +214,353 @@ Tagged objects may be indirect (obj.flags ⊇ {otIndirect}) meaning that
the value v is not of type T but *T; this is used only for
reflect.Values that represent lvalues. (These are not implemented yet.)
-
-ANALYSIS ABSTRACTION OF EACH TYPE
+# ANALYSIS ABSTRACTION OF EACH TYPE
Variables of the following "scalar" types may be represented by a
single node: basic types, pointers, channels, maps, slices, 'func'
pointers, interfaces.
-Pointers
- Nothing to say here, oddly.
-
-Basic types (bool, string, numbers, unsafe.Pointer)
- Currently all fields in the flattening of a type, including
- non-pointer basic types such as int, are represented in objects and
- values. Though non-pointer nodes within values are uninteresting,
- non-pointer nodes in objects may be useful (if address-taken)
- because they permit the analysis to deduce, in this example,
-
- var s struct{ ...; x int; ... }
- p := &s.x
-
- that p points to s.x. If we ignored such object fields, we could only
- say that p points somewhere within s.
-
- All other basic types are ignored. Expressions of these types have
- zero nodeid, and fields of these types within aggregate other types
- are omitted.
-
- unsafe.Pointers are not modelled as pointers, so a conversion of an
- unsafe.Pointer to *T is (unsoundly) treated equivalent to new(T).
-
-Channels
- An expression of type 'chan T' is a kind of pointer that points
- exclusively to channel objects, i.e. objects created by MakeChan (or
- reflection).
-
- 'chan T' is treated like *T.
- *ssa.MakeChan is treated as equivalent to new(T).
- *ssa.Send and receive (*ssa.UnOp(ARROW)) and are equivalent to store
- and load.
-
-Maps
- An expression of type 'map[K]V' is a kind of pointer that points
- exclusively to map objects, i.e. objects created by MakeMap (or
- reflection).
-
- map K[V] is treated like *M where M = struct{k K; v V}.
- *ssa.MakeMap is equivalent to new(M).
- *ssa.MapUpdate is equivalent to *y=x where *y and x have type M.
- *ssa.Lookup is equivalent to y=x.v where x has type *M.
-
-Slices
- A slice []T, which dynamically resembles a struct{array *T, len, cap int},
- is treated as if it were just a *T pointer; the len and cap fields are
- ignored.
-
- *ssa.MakeSlice is treated like new([1]T): an allocation of a
- singleton array.
- *ssa.Index on a slice is equivalent to a load.
- *ssa.IndexAddr on a slice returns the address of the sole element of the
- slice, i.e. the same address.
- *ssa.Slice is treated as a simple copy.
-
-Functions
- An expression of type 'func...' is a kind of pointer that points
- exclusively to function objects.
-
- A function object has the following layout:
-
- identity -- typ:*types.Signature; obj.flags ⊇ {otFunction}
- params_0 -- (the receiver, if a method)
- ...
- params_n-1
- results_0
- ...
- results_m-1
-
- There may be multiple function objects for the same *ssa.Function
- due to context-sensitive treatment of some functions.
-
- The first node is the function's identity node.
- Associated with every callsite is a special "targets" variable,
- whose pts() contains the identity node of each function to which
- the call may dispatch. Identity words are not otherwise used during
- the analysis, but we construct the call graph from the pts()
- solution for such nodes.
-
- The following block of contiguous nodes represents the flattened-out
- types of the parameters ("P-block") and results ("R-block") of the
- function object.
-
- The treatment of free variables of closures (*ssa.FreeVar) is like
- that of global variables; it is not context-sensitive.
- *ssa.MakeClosure instructions create copy edges to Captures.
-
- A Go value of type 'func' (i.e. a pointer to one or more functions)
- is a pointer whose pts() contains function objects. The valueNode()
- for an *ssa.Function returns a singleton for that function.
-
-Interfaces
- An expression of type 'interface{...}' is a kind of pointer that
- points exclusively to tagged objects. All tagged objects pointed to
- by an interface are direct (the otIndirect flag is clear) and
- concrete (the tag type T is not itself an interface type). The
- associated ssa.Value for an interface's tagged objects may be an
- *ssa.MakeInterface instruction, or nil if the tagged object was
- created by an instrinsic (e.g. reflection).
-
- Constructing an interface value causes generation of constraints for
- all of the concrete type's methods; we can't tell a priori which
- ones may be called.
-
- TypeAssert y = x.(T) is implemented by a dynamic constraint
- triggered by each tagged object O added to pts(x): a typeFilter
- constraint if T is an interface type, or an untag constraint if T is
- a concrete type. A typeFilter tests whether O.typ implements T; if
- so, O is added to pts(y). An untagFilter tests whether O.typ is
- assignable to T,and if so, a copy edge O.v -> y is added.
-
- ChangeInterface is a simple copy because the representation of
- tagged objects is independent of the interface type (in contrast
- to the "method tables" approach used by the gc runtime).
-
- y := Invoke x.m(...) is implemented by allocating contiguous P/R
- blocks for the callsite and adding a dynamic rule triggered by each
- tagged object added to pts(x). The rule adds param/results copy
- edges to/from each discovered concrete method.
-
- (Q. Why do we model an interface as a pointer to a pair of type and
- value, rather than as a pair of a pointer to type and a pointer to
- value?
- A. Control-flow joins would merge interfaces ({T1}, {V1}) and ({T2},
- {V2}) to make ({T1,T2}, {V1,V2}), leading to the infeasible and
- type-unsafe combination (T1,V2). Treating the value and its concrete
- type as inseparable makes the analysis type-safe.)
-
-reflect.Value
- A reflect.Value is modelled very similar to an interface{}, i.e. as
- a pointer exclusively to tagged objects, but with two generalizations.
-
- 1) a reflect.Value that represents an lvalue points to an indirect
- (obj.flags ⊇ {otIndirect}) tagged object, which has a similar
- layout to an tagged object except that the value is a pointer to
- the dynamic type. Indirect tagged objects preserve the correct
- aliasing so that mutations made by (reflect.Value).Set can be
- observed.
-
- Indirect objects only arise when an lvalue is derived from an
- rvalue by indirection, e.g. the following code:
-
- type S struct { X T }
- var s S
- var i interface{} = &s // i points to a *S-tagged object (from MakeInterface)
- v1 := reflect.ValueOf(i) // v1 points to same *S-tagged object as i
- v2 := v1.Elem() // v2 points to an indirect S-tagged object, pointing to s
- v3 := v2.FieldByName("X") // v3 points to an indirect int-tagged object, pointing to s.X
- v3.Set(y) // pts(s.X) ⊇ pts(y)
-
- Whether indirect or not, the concrete type of the tagged object
- corresponds to the user-visible dynamic type, and the existence
- of a pointer is an implementation detail.
-
- (NB: indirect tagged objects are not yet implemented)
-
- 2) The dynamic type tag of a tagged object pointed to by a
- reflect.Value may be an interface type; it need not be concrete.
-
- This arises in code such as this:
- tEface := reflect.TypeOf(new(interface{}).Elem() // interface{}
- eface := reflect.Zero(tEface)
- pts(eface) is a singleton containing an interface{}-tagged
- object. That tagged object's payload is an interface{} value,
- i.e. the pts of the payload contains only concrete-tagged
- objects, although in this example it's the zero interface{} value,
- so its pts is empty.
-
-reflect.Type
- Just as in the real "reflect" library, we represent a reflect.Type
- as an interface whose sole implementation is the concrete type,
- *reflect.rtype. (This choice is forced on us by go/types: clients
- cannot fabricate types with arbitrary method sets.)
-
- rtype instances are canonical: there is at most one per dynamic
- type. (rtypes are in fact large structs but since identity is all
- that matters, we represent them by a single node.)
-
- The payload of each *rtype-tagged object is an *rtype pointer that
- points to exactly one such canonical rtype object. We exploit this
- by setting the node.typ of the payload to the dynamic type, not
- '*rtype'. This saves us an indirection in each resolution rule. As
- an optimisation, *rtype-tagged objects are canonicalized too.
+Pointers:
+
+Nothing to say here, oddly.
+
+Basic types (bool, string, numbers, unsafe.Pointer):
+
+Currently all fields in the flattening of a type, including
+non-pointer basic types such as int, are represented in objects and
+values. Though non-pointer nodes within values are uninteresting,
+non-pointer nodes in objects may be useful (if address-taken)
+because they permit the analysis to deduce, in this example,
+
+ var s struct{ ...; x int; ... }
+ p := &s.x
+
+that p points to s.x. If we ignored such object fields, we could only
+say that p points somewhere within s.
+
+All other basic types are ignored. Expressions of these types have
+zero nodeid, and fields of these types within aggregate other types
+are omitted.
+
+unsafe.Pointers are not modelled as pointers, so a conversion of an
+unsafe.Pointer to *T is (unsoundly) treated equivalent to new(T).
+
+Channels:
+
+An expression of type 'chan T' is a kind of pointer that points
+exclusively to channel objects, i.e. objects created by MakeChan (or
+reflection).
+
+'chan T' is treated like *T.
+*ssa.MakeChan is treated as equivalent to new(T).
+*ssa.Send and receive (*ssa.UnOp(ARROW)) and are equivalent to store
+
+ and load.
+
+Maps:
+
+An expression of type 'map[K]V' is a kind of pointer that points
+exclusively to map objects, i.e. objects created by MakeMap (or
+reflection).
+
+map K[V] is treated like *M where M = struct{k K; v V}.
+*ssa.MakeMap is equivalent to new(M).
+*ssa.MapUpdate is equivalent to *y=x where *y and x have type M.
+*ssa.Lookup is equivalent to y=x.v where x has type *M.
+Slices:
+
+A slice []T, which dynamically resembles a struct{array *T, len, cap int},
+is treated as if it were just a *T pointer; the len and cap fields are
+ignored.
+
+*ssa.MakeSlice is treated like new([1]T): an allocation of a
+
+ singleton array.
+
+*ssa.Index on a slice is equivalent to a load.
+*ssa.IndexAddr on a slice returns the address of the sole element of the
+slice, i.e. the same address.
+*ssa.Slice is treated as a simple copy.
+
+Functions:
+
+An expression of type 'func...' is a kind of pointer that points
+exclusively to function objects.
+
+A function object has the following layout:
+
+ identity -- typ:*types.Signature; obj.flags ⊇ {otFunction}
+ params_0 -- (the receiver, if a method)
+ ...
+ params_n-1
+ results_0
+ ...
+ results_m-1
+
+There may be multiple function objects for the same *ssa.Function
+due to context-sensitive treatment of some functions.
+
+The first node is the function's identity node.
+Associated with every callsite is a special "targets" variable,
+whose pts() contains the identity node of each function to which
+the call may dispatch. Identity words are not otherwise used during
+the analysis, but we construct the call graph from the pts()
+solution for such nodes.
+
+The following block of contiguous nodes represents the flattened-out
+types of the parameters ("P-block") and results ("R-block") of the
+function object.
+
+The treatment of free variables of closures (*ssa.FreeVar) is like
+that of global variables; it is not context-sensitive.
+*ssa.MakeClosure instructions create copy edges to Captures.
+
+A Go value of type 'func' (i.e. a pointer to one or more functions)
+is a pointer whose pts() contains function objects. The valueNode()
+for an *ssa.Function returns a singleton for that function.
+
+Interfaces:
+
+An expression of type 'interface{...}' is a kind of pointer that
+points exclusively to tagged objects. All tagged objects pointed to
+by an interface are direct (the otIndirect flag is clear) and
+concrete (the tag type T is not itself an interface type). The
+associated ssa.Value for an interface's tagged objects may be an
+*ssa.MakeInterface instruction, or nil if the tagged object was
+created by an instrinsic (e.g. reflection).
+
+Constructing an interface value causes generation of constraints for
+all of the concrete type's methods; we can't tell a priori which
+ones may be called.
+
+TypeAssert y = x.(T) is implemented by a dynamic constraint
+triggered by each tagged object O added to pts(x): a typeFilter
+constraint if T is an interface type, or an untag constraint if T is
+a concrete type. A typeFilter tests whether O.typ implements T; if
+so, O is added to pts(y). An untagFilter tests whether O.typ is
+assignable to T,and if so, a copy edge O.v -> y is added.
+
+ChangeInterface is a simple copy because the representation of
+tagged objects is independent of the interface type (in contrast
+to the "method tables" approach used by the gc runtime).
+
+y := Invoke x.m(...) is implemented by allocating contiguous P/R
+blocks for the callsite and adding a dynamic rule triggered by each
+tagged object added to pts(x). The rule adds param/results copy
+edges to/from each discovered concrete method.
+
+(Q. Why do we model an interface as a pointer to a pair of type and
+value, rather than as a pair of a pointer to type and a pointer to
+value?
+A. Control-flow joins would merge interfaces ({T1}, {V1}) and ({T2},
+{V2}) to make ({T1,T2}, {V1,V2}), leading to the infeasible and
+type-unsafe combination (T1,V2). Treating the value and its concrete
+type as inseparable makes the analysis type-safe.)
+
+Type parameters:
+
+Type parameters are not directly supported by the analysis.
+Calls to generic functions will be left as if they had empty bodies.
+Users of the package are expected to use the ssa.InstantiateGenerics
+builder mode when building code that uses or depends on code
+containing generics.
+
+reflect.Value:
+
+A reflect.Value is modelled very similar to an interface{}, i.e. as
+a pointer exclusively to tagged objects, but with two generalizations.
+
+1. a reflect.Value that represents an lvalue points to an indirect
+(obj.flags ⊇ {otIndirect}) tagged object, which has a similar
+layout to an tagged object except that the value is a pointer to
+the dynamic type. Indirect tagged objects preserve the correct
+aliasing so that mutations made by (reflect.Value).Set can be
+observed.
+
+Indirect objects only arise when an lvalue is derived from an
+rvalue by indirection, e.g. the following code:
+
+ type S struct { X T }
+ var s S
+ var i interface{} = &s // i points to a *S-tagged object (from MakeInterface)
+ v1 := reflect.ValueOf(i) // v1 points to same *S-tagged object as i
+ v2 := v1.Elem() // v2 points to an indirect S-tagged object, pointing to s
+ v3 := v2.FieldByName("X") // v3 points to an indirect int-tagged object, pointing to s.X
+ v3.Set(y) // pts(s.X) ⊇ pts(y)
+
+Whether indirect or not, the concrete type of the tagged object
+corresponds to the user-visible dynamic type, and the existence
+of a pointer is an implementation detail.
+
+(NB: indirect tagged objects are not yet implemented)
+
+2. The dynamic type tag of a tagged object pointed to by a
+reflect.Value may be an interface type; it need not be concrete.
+
+This arises in code such as this:
+
+ tEface := reflect.TypeOf(new(interface{}).Elem() // interface{}
+ eface := reflect.Zero(tEface)
+
+pts(eface) is a singleton containing an interface{}-tagged
+object. That tagged object's payload is an interface{} value,
+i.e. the pts of the payload contains only concrete-tagged
+objects, although in this example it's the zero interface{} value,
+so its pts is empty.
+
+reflect.Type:
+
+Just as in the real "reflect" library, we represent a reflect.Type
+as an interface whose sole implementation is the concrete type,
+*reflect.rtype. (This choice is forced on us by go/types: clients
+cannot fabricate types with arbitrary method sets.)
+
+rtype instances are canonical: there is at most one per dynamic
+type. (rtypes are in fact large structs but since identity is all
+that matters, we represent them by a single node.)
+
+The payload of each *rtype-tagged object is an *rtype pointer that
+points to exactly one such canonical rtype object. We exploit this
+by setting the node.typ of the payload to the dynamic type, not
+'*rtype'. This saves us an indirection in each resolution rule. As
+an optimisation, *rtype-tagged objects are canonicalized too.
Aggregate types:
Aggregate types are treated as if all directly contained
aggregates are recursively flattened out.
-Structs
- *ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
+Structs:
- *ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
- simple edges for each struct discovered in pts(x).
+*ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
- The nodes of a struct consist of a special 'identity' node (whose
- type is that of the struct itself), followed by the nodes for all
- the struct's fields, recursively flattened out. A pointer to the
- struct is a pointer to its identity node. That node allows us to
- distinguish a pointer to a struct from a pointer to its first field.
+*ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
- Field offsets are logical field offsets (plus one for the identity
- node), so the sizes of the fields can be ignored by the analysis.
+ simple edges for each struct discovered in pts(x).
- (The identity node is non-traditional but enables the distinction
- described above, which is valuable for code comprehension tools.
- Typical pointer analyses for C, whose purpose is compiler
- optimization, must soundly model unsafe.Pointer (void*) conversions,
- and this requires fidelity to the actual memory layout using physical
- field offsets.)
+The nodes of a struct consist of a special 'identity' node (whose
+type is that of the struct itself), followed by the nodes for all
+the struct's fields, recursively flattened out. A pointer to the
+struct is a pointer to its identity node. That node allows us to
+distinguish a pointer to a struct from a pointer to its first field.
- *ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
+Field offsets are logical field offsets (plus one for the identity
+node), so the sizes of the fields can be ignored by the analysis.
- *ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
- simple edges for each struct discovered in pts(x).
+(The identity node is non-traditional but enables the distinction
+described above, which is valuable for code comprehension tools.
+Typical pointer analyses for C, whose purpose is compiler
+optimization, must soundly model unsafe.Pointer (void*) conversions,
+and this requires fidelity to the actual memory layout using physical
+field offsets.)
-Arrays
- We model an array by an identity node (whose type is that of the
- array itself) followed by a node representing all the elements of
- the array; the analysis does not distinguish elements with different
- indices. Effectively, an array is treated like struct{elem T}, a
- load y=x[i] like y=x.elem, and a store x[i]=y like x.elem=y; the
- index i is ignored.
+*ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
- A pointer to an array is pointer to its identity node. (A slice is
- also a pointer to an array's identity node.) The identity node
- allows us to distinguish a pointer to an array from a pointer to one
- of its elements, but it is rather costly because it introduces more
- offset constraints into the system. Furthermore, sound treatment of
- unsafe.Pointer would require us to dispense with this node.
+*ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
- Arrays may be allocated by Alloc, by make([]T), by calls to append,
- and via reflection.
+ simple edges for each struct discovered in pts(x).
-Tuples (T, ...)
- Tuples are treated like structs with naturally numbered fields.
- *ssa.Extract is analogous to *ssa.Field.
+Arrays:
- However, tuples have no identity field since by construction, they
- cannot be address-taken.
+We model an array by an identity node (whose type is that of the
+array itself) followed by a node representing all the elements of
+the array; the analysis does not distinguish elements with different
+indices. Effectively, an array is treated like struct{elem T}, a
+load y=x[i] like y=x.elem, and a store x[i]=y like x.elem=y; the
+index i is ignored.
+A pointer to an array is pointer to its identity node. (A slice is
+also a pointer to an array's identity node.) The identity node
+allows us to distinguish a pointer to an array from a pointer to one
+of its elements, but it is rather costly because it introduces more
+offset constraints into the system. Furthermore, sound treatment of
+unsafe.Pointer would require us to dispense with this node.
-FUNCTION CALLS
+Arrays may be allocated by Alloc, by make([]T), by calls to append,
+and via reflection.
- There are three kinds of function call:
- (1) static "call"-mode calls of functions.
- (2) dynamic "call"-mode calls of functions.
- (3) dynamic "invoke"-mode calls of interface methods.
- Cases 1 and 2 apply equally to methods and standalone functions.
+Tuples (T, ...):
- Static calls.
- A static call consists three steps:
- - finding the function object of the callee;
- - creating copy edges from the actual parameter value nodes to the
- P-block in the function object (this includes the receiver if
- the callee is a method);
- - creating copy edges from the R-block in the function object to
- the value nodes for the result of the call.
+Tuples are treated like structs with naturally numbered fields.
+*ssa.Extract is analogous to *ssa.Field.
- A static function call is little more than two struct value copies
- between the P/R blocks of caller and callee:
+However, tuples have no identity field since by construction, they
+cannot be address-taken.
- callee.P = caller.P
- caller.R = callee.R
+# FUNCTION CALLS
- Context sensitivity
+There are three kinds of function call:
+ 1. static "call"-mode calls of functions.
+ 2. dynamic "call"-mode calls of functions.
+ 3. dynamic "invoke"-mode calls of interface methods.
- Static calls (alone) may be treated context sensitively,
- i.e. each callsite may cause a distinct re-analysis of the
- callee, improving precision. Our current context-sensitivity
- policy treats all intrinsics and getter/setter methods in this
- manner since such functions are small and seem like an obvious
- source of spurious confluences, though this has not yet been
- evaluated.
+Cases 1 and 2 apply equally to methods and standalone functions.
- Dynamic function calls
+Static calls:
- Dynamic calls work in a similar manner except that the creation of
- copy edges occurs dynamically, in a similar fashion to a pair of
- struct copies in which the callee is indirect:
+A static call consists three steps:
+ - finding the function object of the callee;
+ - creating copy edges from the actual parameter value nodes to the
+ P-block in the function object (this includes the receiver if
+ the callee is a method);
+ - creating copy edges from the R-block in the function object to
+ the value nodes for the result of the call.
- callee->P = caller.P
- caller.R = callee->R
+A static function call is little more than two struct value copies
+between the P/R blocks of caller and callee:
- (Recall that the function object's P- and R-blocks are contiguous.)
+ callee.P = caller.P
+ caller.R = callee.R
- Interface method invocation
+Context sensitivity: Static calls (alone) may be treated context sensitively,
+i.e. each callsite may cause a distinct re-analysis of the
+callee, improving precision. Our current context-sensitivity
+policy treats all intrinsics and getter/setter methods in this
+manner since such functions are small and seem like an obvious
+source of spurious confluences, though this has not yet been
+evaluated.
- For invoke-mode calls, we create a params/results block for the
- callsite and attach a dynamic closure rule to the interface. For
- each new tagged object that flows to the interface, we look up
- the concrete method, find its function object, and connect its P/R
- blocks to the callsite's P/R blocks, adding copy edges to the graph
- during solving.
+Dynamic function calls:
- Recording call targets
+Dynamic calls work in a similar manner except that the creation of
+copy edges occurs dynamically, in a similar fashion to a pair of
+struct copies in which the callee is indirect:
- The analysis notifies its clients of each callsite it encounters,
- passing a CallSite interface. Among other things, the CallSite
- contains a synthetic constraint variable ("targets") whose
- points-to solution includes the set of all function objects to
- which the call may dispatch.
+ callee->P = caller.P
+ caller.R = callee->R
- It is via this mechanism that the callgraph is made available.
- Clients may also elect to be notified of callgraph edges directly;
- internally this just iterates all "targets" variables' pts(·)s.
+(Recall that the function object's P- and R-blocks are contiguous.)
+Interface method invocation:
-PRESOLVER
+For invoke-mode calls, we create a params/results block for the
+callsite and attach a dynamic closure rule to the interface. For
+each new tagged object that flows to the interface, we look up
+the concrete method, find its function object, and connect its P/R
+blocks to the callsite's P/R blocks, adding copy edges to the graph
+during solving.
+
+Recording call targets:
+
+The analysis notifies its clients of each callsite it encounters,
+passing a CallSite interface. Among other things, the CallSite
+contains a synthetic constraint variable ("targets") whose
+points-to solution includes the set of all function objects to
+which the call may dispatch.
+
+It is via this mechanism that the callgraph is made available.
+Clients may also elect to be notified of callgraph edges directly;
+internally this just iterates all "targets" variables' pts(·)s.
+
+# PRESOLVER
We implement Hash-Value Numbering (HVN), a pre-solver constraint
optimization described in Hardekopf & Lin, SAS'07. This is documented
in more detail in hvn.go. We intend to add its cousins HR and HU in
future.
-
-SOLVER
+# SOLVER
The solver is currently a naive Andersen-style implementation; it does
not perform online cycle detection, though we plan to add solver
@@ -565,8 +582,7 @@ range, and thus the efficiency of the representation.
Partly thanks to avoiding map iteration, the execution of the solver is
100% deterministic, a great help during debugging.
-
-FURTHER READING
+# FURTHER READING
Andersen, L. O. 1994. Program analysis and specialization for the C
programming language. Ph.D. dissertation. DIKU, University of
@@ -605,6 +621,5 @@ for scaling points-to analysis. In Proceedings of the ACM SIGPLAN 2000
conference on Programming language design and implementation (PLDI '00).
ACM, New York, NY, USA, 47-56. DOI=10.1145/349299.349310
http://doi.acm.org/10.1145/349299.349310
-
*/
package pointer // import "golang.org/x/tools/go/pointer"
diff --git a/go/pointer/example_test.go b/go/pointer/example_test.go
index 673de7a49..00017df6e 100644
--- a/go/pointer/example_test.go
+++ b/go/pointer/example_test.go
@@ -19,7 +19,6 @@ import (
// obtain a conservative call-graph of a Go program.
// It also shows how to compute the points-to set of a variable,
// in this case, (C).f's ch parameter.
-//
func Example() {
const myprog = `
package main
@@ -62,7 +61,7 @@ func main() {
}
// Create SSA-form program representation.
- prog := ssautil.CreateProgram(iprog, 0)
+ prog := ssautil.CreateProgram(iprog, ssa.InstantiateGenerics)
mainPkg := prog.Package(iprog.Created[0].Pkg)
// Build SSA code for bodies of all functions in the whole program.
diff --git a/go/pointer/gen.go b/go/pointer/gen.go
index ef5108a5b..5e527f21a 100644
--- a/go/pointer/gen.go
+++ b/go/pointer/gen.go
@@ -14,9 +14,11 @@ import (
"fmt"
"go/token"
"go/types"
+ "strings"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
)
var (
@@ -37,7 +39,6 @@ func (a *analysis) nextNode() nodeid {
// analytically uninteresting.
//
// comment explains the origin of the nodes, as a debugging aid.
-//
func (a *analysis) addNodes(typ types.Type, comment string) nodeid {
id := a.nextNode()
for _, fi := range a.flatten(typ) {
@@ -56,7 +57,6 @@ func (a *analysis) addNodes(typ types.Type, comment string) nodeid {
//
// comment explains the origin of the nodes, as a debugging aid.
// subelement indicates the subelement, e.g. ".a.b[*].c".
-//
func (a *analysis) addOneNode(typ types.Type, comment string, subelement *fieldInfo) nodeid {
id := a.nextNode()
a.nodes = append(a.nodes, &node{typ: typ, subelement: subelement, solve: new(solverState)})
@@ -69,7 +69,6 @@ func (a *analysis) addOneNode(typ types.Type, comment string, subelement *fieldI
// setValueNode associates node id with the value v.
// cgn identifies the context iff v is a local variable.
-//
func (a *analysis) setValueNode(v ssa.Value, id nodeid, cgn *cgnode) {
if cgn != nil {
a.localval[v] = id
@@ -125,7 +124,6 @@ func (a *analysis) setValueNode(v ssa.Value, id nodeid, cgn *cgnode) {
//
// obj is the start node of the object, from a prior call to nextNode.
// Its size, flags and optional data will be updated.
-//
func (a *analysis) endObject(obj nodeid, cgn *cgnode, data interface{}) *object {
// Ensure object is non-empty by padding;
// the pad will be the object node.
@@ -150,7 +148,6 @@ func (a *analysis) endObject(obj nodeid, cgn *cgnode, data interface{}) *object
//
// For a context-sensitive contour, callersite identifies the sole
// callsite; for shared contours, caller is nil.
-//
func (a *analysis) makeFunctionObject(fn *ssa.Function, callersite *callsite) nodeid {
if a.log != nil {
fmt.Fprintf(a.log, "\t---- makeFunctionObject %s\n", fn)
@@ -190,7 +187,6 @@ func (a *analysis) makeTagged(typ types.Type, cgn *cgnode, data interface{}) nod
// payload points to the sole rtype object for T.
//
// TODO(adonovan): move to reflect.go; it's part of the solver really.
-//
func (a *analysis) makeRtype(T types.Type) nodeid {
if v := a.rtypes.At(T); v != nil {
return v.(nodeid)
@@ -210,7 +206,7 @@ func (a *analysis) makeRtype(T types.Type) nodeid {
return id
}
-// rtypeValue returns the type of the *reflect.rtype-tagged object obj.
+// rtypeTaggedValue returns the type of the *reflect.rtype-tagged object obj.
func (a *analysis) rtypeTaggedValue(obj nodeid) types.Type {
tDyn, t, _ := a.taggedValue(obj)
if tDyn != a.reflectRtypePtr {
@@ -222,7 +218,6 @@ func (a *analysis) rtypeTaggedValue(obj nodeid) types.Type {
// valueNode returns the id of the value node for v, creating it (and
// the association) as needed. It may return zero for uninteresting
// values containing no pointers.
-//
func (a *analysis) valueNode(v ssa.Value) nodeid {
// Value nodes for locals are created en masse by genFunc.
if id, ok := a.localval[v]; ok {
@@ -247,7 +242,6 @@ func (a *analysis) valueNode(v ssa.Value) nodeid {
// valueOffsetNode ascertains the node for tuple/struct value v,
// then returns the node for its subfield #index.
-//
func (a *analysis) valueOffsetNode(v ssa.Value, index int) nodeid {
id := a.valueNode(v)
if id == 0 {
@@ -264,7 +258,6 @@ func (a *analysis) isTaggedObject(obj nodeid) bool {
// taggedValue returns the dynamic type tag, the (first node of the)
// payload, and the indirect flag of the tagged object starting at id.
// Panic ensues if !isTaggedObject(id).
-//
func (a *analysis) taggedValue(obj nodeid) (tDyn types.Type, v nodeid, indirect bool) {
n := a.nodes[obj]
flags := n.obj.flags
@@ -276,7 +269,6 @@ func (a *analysis) taggedValue(obj nodeid) (tDyn types.Type, v nodeid, indirect
// funcParams returns the first node of the params (P) block of the
// function whose object node (obj.flags&otFunction) is id.
-//
func (a *analysis) funcParams(id nodeid) nodeid {
n := a.nodes[id]
if n.obj == nil || n.obj.flags&otFunction == 0 {
@@ -287,7 +279,6 @@ func (a *analysis) funcParams(id nodeid) nodeid {
// funcResults returns the first node of the results (R) block of the
// function whose object node (obj.flags&otFunction) is id.
-//
func (a *analysis) funcResults(id nodeid) nodeid {
n := a.nodes[id]
if n.obj == nil || n.obj.flags&otFunction == 0 {
@@ -305,7 +296,6 @@ func (a *analysis) funcResults(id nodeid) nodeid {
// copy creates a constraint of the form dst = src.
// sizeof is the width (in logical fields) of the copied type.
-//
func (a *analysis) copy(dst, src nodeid, sizeof uint32) {
if src == dst || sizeof == 0 {
return // trivial
@@ -337,7 +327,6 @@ func (a *analysis) addressOf(T types.Type, id, obj nodeid) {
// load creates a load constraint of the form dst = src[offset].
// offset is the pointer offset in logical fields.
// sizeof is the width (in logical fields) of the loaded type.
-//
func (a *analysis) load(dst, src nodeid, offset, sizeof uint32) {
if dst == 0 {
return // load of non-pointerlike value
@@ -358,7 +347,6 @@ func (a *analysis) load(dst, src nodeid, offset, sizeof uint32) {
// store creates a store constraint of the form dst[offset] = src.
// offset is the pointer offset in logical fields.
// sizeof is the width (in logical fields) of the stored type.
-//
func (a *analysis) store(dst, src nodeid, offset uint32, sizeof uint32) {
if src == 0 {
return // store of non-pointerlike value
@@ -379,7 +367,6 @@ func (a *analysis) store(dst, src nodeid, offset uint32, sizeof uint32) {
// offsetAddr creates an offsetAddr constraint of the form dst = &src.#offset.
// offset is the field offset in logical fields.
// T is the type of the address.
-//
func (a *analysis) offsetAddr(T types.Type, dst, src nodeid, offset uint32) {
if !a.shouldTrack(T) {
return
@@ -398,7 +385,6 @@ func (a *analysis) offsetAddr(T types.Type, dst, src nodeid, offset uint32) {
// typeAssert creates a typeFilter or untag constraint of the form dst = src.(T):
// typeFilter for an interface, untag for a concrete type.
// The exact flag is specified as for untagConstraint.
-//
func (a *analysis) typeAssert(T types.Type, dst, src nodeid, exact bool) {
if isInterface(T) {
a.addConstraint(&typeFilterConstraint{T, dst, src})
@@ -417,7 +403,6 @@ func (a *analysis) addConstraint(c constraint) {
// copyElems generates load/store constraints for *dst = *src,
// where src and dst are slices or *arrays.
-//
func (a *analysis) copyElems(cgn *cgnode, typ types.Type, dst, src ssa.Value) {
tmp := a.addNodes(typ, "copy")
sz := a.sizeof(typ)
@@ -553,7 +538,6 @@ func (a *analysis) genBuiltinCall(instr ssa.CallInstruction, cgn *cgnode) {
// choose a policy. The current policy, rather arbitrarily, is true
// for intrinsics and accessor methods (actually: short, single-block,
// call-free functions). This is just a starting point.
-//
func (a *analysis) shouldUseContext(fn *ssa.Function) bool {
if a.findIntrinsic(fn) != nil {
return true // treat intrinsics context-sensitively
@@ -705,11 +689,13 @@ func (a *analysis) genInvoke(caller *cgnode, site *callsite, call *ssa.CallCommo
// practice it occurs rarely, so we special case for reflect.Type.)
//
// In effect we treat this:
-// var rt reflect.Type = ...
-// rt.F()
+//
+// var rt reflect.Type = ...
+// rt.F()
+//
// as this:
-// rt.(*reflect.rtype).F()
//
+// rt.(*reflect.rtype).F()
func (a *analysis) genInvokeReflectType(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) {
// Unpack receiver into rtype
rtype := a.addOneNode(a.reflectRtypePtr, "rtype.recv", nil)
@@ -789,13 +775,15 @@ func (a *analysis) genCall(caller *cgnode, instr ssa.CallInstruction) {
// a simple copy constraint when the sole destination is known a priori.
//
// Some SSA instructions always have singletons points-to sets:
-// Alloc, Function, Global, MakeChan, MakeClosure, MakeInterface, MakeMap, MakeSlice.
+//
+// Alloc, Function, Global, MakeChan, MakeClosure, MakeInterface, MakeMap, MakeSlice.
+//
// Others may be singletons depending on their operands:
-// FreeVar, Const, Convert, FieldAddr, IndexAddr, Slice, SliceToArrayPointer.
+//
+// FreeVar, Const, Convert, FieldAddr, IndexAddr, Slice, SliceToArrayPointer.
//
// Idempotent. Objects are created as needed, possibly via recursion
// down the SSA value graph, e.g IndexAddr(FieldAddr(Alloc))).
-//
func (a *analysis) objectNode(cgn *cgnode, v ssa.Value) nodeid {
switch v.(type) {
case *ssa.Global, *ssa.Function, *ssa.Const, *ssa.FreeVar:
@@ -992,7 +980,10 @@ func (a *analysis) genInstr(cgn *cgnode, instr ssa.Instruction) {
a.sizeof(instr.Type()))
case *ssa.Index:
- a.copy(a.valueNode(instr), 1+a.valueNode(instr.X), a.sizeof(instr.Type()))
+ _, isstring := typeparams.CoreType(instr.X.Type()).(*types.Basic)
+ if !isstring {
+ a.copy(a.valueNode(instr), 1+a.valueNode(instr.X), a.sizeof(instr.Type()))
+ }
case *ssa.Select:
recv := a.valueOffsetNode(instr, 2) // instr : (index, recvOk, recv0, ... recv_n-1)
@@ -1156,7 +1147,6 @@ func (a *analysis) makeCGNode(fn *ssa.Function, obj nodeid, callersite *callsite
// genRootCalls generates the synthetic root of the callgraph and the
// initial calls from it to the analysis scope, such as main, a test
// or a library.
-//
func (a *analysis) genRootCalls() *cgnode {
r := a.prog.NewFunction("<root>", new(types.Signature), "root of callgraph")
root := a.makeCGNode(r, 0, nil)
@@ -1217,6 +1207,19 @@ func (a *analysis) genFunc(cgn *cgnode) {
return
}
+ if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 {
+ // Body of generic function.
+ // We'll warn about calls to such functions at the end.
+ return
+ }
+
+ if strings.HasPrefix(fn.Synthetic, "instantiation wrapper ") {
+ // instantiation wrapper of a generic function.
+ // These may contain type coercions which are not currently supported.
+ // We'll warn about calls to such functions at the end.
+ return
+ }
+
if a.log != nil {
fmt.Fprintln(a.log, "; Creating nodes for local values")
}
diff --git a/go/pointer/hvn.go b/go/pointer/hvn.go
index 52fd479fa..ad25cdfa4 100644
--- a/go/pointer/hvn.go
+++ b/go/pointer/hvn.go
@@ -174,14 +174,14 @@ import (
// peLabel have identical points-to solutions.
//
// The numbers are allocated consecutively like so:
-// 0 not a pointer
+//
+// 0 not a pointer
// 1..N-1 addrConstraints (equals the constraint's .src field, hence sparse)
// ... offsetAddr constraints
// ... SCCs (with indirect nodes or multiple inputs)
//
// Each PE label denotes a set of pointers containing a single addr, a
// single offsetAddr, or some set of other PE labels.
-//
type peLabel int
type hvn struct {
@@ -212,7 +212,6 @@ type onodeid uint32
// the source, i.e. against the flow of values: they are dependencies.
// Implicit edges are used for SCC computation, but not for gathering
// incoming labels.
-//
type onode struct {
rep onodeid // index of representative of SCC in offline constraint graph
@@ -244,7 +243,6 @@ func (h *hvn) ref(id onodeid) onodeid {
// hvn computes pointer-equivalence labels (peLabels) using the Hash-based
// Value Numbering (HVN) algorithm described in Hardekopf & Lin, SAS'07.
-//
func (a *analysis) hvn() {
start("HVN")
@@ -455,28 +453,27 @@ func (c *invokeConstraint) presolve(h *hvn) {
// markIndirectNodes marks as indirect nodes whose points-to relations
// are not entirely captured by the offline graph, including:
//
-// (a) All address-taken nodes (including the following nodes within
-// the same object). This is described in the paper.
+// (a) All address-taken nodes (including the following nodes within
+// the same object). This is described in the paper.
//
// The most subtle cause of indirect nodes is the generation of
// store-with-offset constraints since the offline graph doesn't
// represent them. A global audit of constraint generation reveals the
// following uses of store-with-offset:
//
-// (b) genDynamicCall, for P-blocks of dynamically called functions,
-// to which dynamic copy edges will be added to them during
-// solving: from storeConstraint for standalone functions,
-// and from invokeConstraint for methods.
-// All such P-blocks must be marked indirect.
-// (c) MakeUpdate, to update the value part of a map object.
-// All MakeMap objects's value parts must be marked indirect.
-// (d) copyElems, to update the destination array.
-// All array elements must be marked indirect.
+// (b) genDynamicCall, for P-blocks of dynamically called functions,
+// to which dynamic copy edges will be added to them during
+// solving: from storeConstraint for standalone functions,
+// and from invokeConstraint for methods.
+// All such P-blocks must be marked indirect.
+// (c) MakeUpdate, to update the value part of a map object.
+// All MakeMap objects's value parts must be marked indirect.
+// (d) copyElems, to update the destination array.
+// All array elements must be marked indirect.
//
// Not all indirect marking happens here. ref() nodes are marked
// indirect at construction, and each constraint's presolve() method may
// mark additional nodes.
-//
func (h *hvn) markIndirectNodes() {
// (a) all address-taken nodes, plus all nodes following them
// within the same object, since these may be indirectly
@@ -761,7 +758,6 @@ func (h *hvn) coalesce(x, y onodeid) {
// labels assigned by the hvn, and uses it to simplify the main
// constraint graph, eliminating non-pointer nodes and duplicate
// constraints.
-//
func (h *hvn) simplify() {
// canon maps each peLabel to its canonical main node.
canon := make([]nodeid, h.label)
diff --git a/go/pointer/intrinsics.go b/go/pointer/intrinsics.go
index b7e2b1403..43bb8e8fc 100644
--- a/go/pointer/intrinsics.go
+++ b/go/pointer/intrinsics.go
@@ -159,7 +159,6 @@ func init() {
// findIntrinsic returns the constraint generation function for an
// intrinsic function fn, or nil if the function should be handled normally.
-//
func (a *analysis) findIntrinsic(fn *ssa.Function) intrinsic {
// Consult the *Function-keyed cache.
// A cached nil indicates a normal non-intrinsic function.
@@ -220,7 +219,6 @@ func (a *analysis) isReflect(fn *ssa.Function) bool {
//
// We sometimes violate condition #3 if the function creates only
// non-function labels, as the control-flow graph is still sound.
-//
func ext۰NoEffect(a *analysis, cgn *cgnode) {}
func ext۰NotYetImplemented(a *analysis, cgn *cgnode) {
diff --git a/go/pointer/labels.go b/go/pointer/labels.go
index 7d64ef6a4..5a1e1999c 100644
--- a/go/pointer/labels.go
+++ b/go/pointer/labels.go
@@ -17,15 +17,15 @@ import (
// channel, 'func', slice or interface.
//
// Labels include:
-// - functions
-// - globals
-// - tagged objects, representing interfaces and reflect.Values
-// - arrays created by conversions (e.g. []byte("foo"), []byte(s))
-// - stack- and heap-allocated variables (including composite literals)
-// - channels, maps and arrays created by make()
-// - intrinsic or reflective operations that allocate (e.g. append, reflect.New)
-// - intrinsic objects, e.g. the initial array behind os.Args.
-// - and their subelements, e.g. "alloc.y[*].z"
+// - functions
+// - globals
+// - tagged objects, representing interfaces and reflect.Values
+// - arrays created by conversions (e.g. []byte("foo"), []byte(s))
+// - stack- and heap-allocated variables (including composite literals)
+// - channels, maps and arrays created by make()
+// - intrinsic or reflective operations that allocate (e.g. append, reflect.New)
+// - intrinsic objects, e.g. the initial array behind os.Args.
+// - and their subelements, e.g. "alloc.y[*].z"
//
// Labels are so varied that they defy good generalizations;
// some have no value, no callgraph node, or no position.
@@ -33,7 +33,6 @@ import (
// maps, channels, functions, tagged objects.
//
// At most one of Value() or ReflectType() may return non-nil.
-//
type Label struct {
obj *object // the addressable memory location containing this label
subelement *fieldInfo // subelement path within obj, e.g. ".a.b[*].c"
@@ -47,7 +46,6 @@ func (l Label) Value() ssa.Value {
// ReflectType returns the type represented by this label if it is an
// reflect.rtype instance object or *reflect.rtype-tagged object.
-//
func (l Label) ReflectType() types.Type {
rtype, _ := l.obj.data.(types.Type)
return rtype
@@ -55,7 +53,6 @@ func (l Label) ReflectType() types.Type {
// Path returns the path to the subelement of the object containing
// this label. For example, ".x[*].y".
-//
func (l Label) Path() string {
return l.subelement.path()
}
@@ -79,23 +76,24 @@ func (l Label) Pos() token.Pos {
// String returns the printed form of this label.
//
// Examples: Object type:
-// x (a variable)
-// (sync.Mutex).Lock (a function)
-// convert (array created by conversion)
-// makemap (map allocated via make)
-// makechan (channel allocated via make)
-// makeinterface (tagged object allocated by makeinterface)
-// <alloc in reflect.Zero> (allocation in instrinsic)
-// sync.Mutex (a reflect.rtype instance)
-// <command-line arguments> (an intrinsic object)
+//
+// x (a variable)
+// (sync.Mutex).Lock (a function)
+// convert (array created by conversion)
+// makemap (map allocated via make)
+// makechan (channel allocated via make)
+// makeinterface (tagged object allocated by makeinterface)
+// <alloc in reflect.Zero> (allocation in instrinsic)
+// sync.Mutex (a reflect.rtype instance)
+// <command-line arguments> (an intrinsic object)
//
// Labels within compound objects have subelement paths:
-// x.y[*].z (a struct variable, x)
-// append.y[*].z (array allocated by append)
-// makeslice.y[*].z (array allocated via make)
//
-// TODO(adonovan): expose func LabelString(*types.Package, Label).
+// x.y[*].z (a struct variable, x)
+// append.y[*].z (array allocated by append)
+// makeslice.y[*].z (array allocated via make)
//
+// TODO(adonovan): expose func LabelString(*types.Package, Label).
func (l Label) String() string {
var s string
switch v := l.obj.data.(type) {
diff --git a/go/pointer/opt.go b/go/pointer/opt.go
index 6defea11f..bbd411c2e 100644
--- a/go/pointer/opt.go
+++ b/go/pointer/opt.go
@@ -27,7 +27,6 @@ import "fmt"
//
// Renumbering makes the PTA log inscrutable. To aid debugging, later
// phases (e.g. HVN) must not rely on it having occurred.
-//
func (a *analysis) renumber() {
if a.log != nil {
fmt.Fprintf(a.log, "\n\n==== Renumbering\n\n")
diff --git a/go/pointer/pointer_race_test.go b/go/pointer/pointer_race_test.go
new file mode 100644
index 000000000..d3c9b475e
--- /dev/null
+++ b/go/pointer/pointer_race_test.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+// +build race
+
+package pointer_test
+
+func init() {
+ raceEnabled = true
+}
diff --git a/go/pointer/pointer_test.go b/go/pointer/pointer_test.go
index 1ac5b6c9f..1fa54f6e8 100644
--- a/go/pointer/pointer_test.go
+++ b/go/pointer/pointer_test.go
@@ -34,6 +34,7 @@ import (
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
var inputs = []string{
@@ -65,72 +66,79 @@ var inputs = []string{
// "testdata/timer.go", // TODO(adonovan): fix broken assumptions about runtime timers
}
+var raceEnabled = false
+
// Expectation grammar:
//
// @calls f -> g
//
-// A 'calls' expectation asserts that edge (f, g) appears in the
-// callgraph. f and g are notated as per Function.String(), which
-// may contain spaces (e.g. promoted method in anon struct).
+// A 'calls' expectation asserts that edge (f, g) appears in the
+// callgraph. f and g are notated as per Function.String(), which
+// may contain spaces (e.g. promoted method in anon struct).
//
// @pointsto a | b | c
//
-// A 'pointsto' expectation asserts that the points-to set of its
-// operand contains exactly the set of labels {a,b,c} notated as per
-// labelString.
+// A 'pointsto' expectation asserts that the points-to set of its
+// operand contains exactly the set of labels {a,b,c} notated as per
+// labelString.
+//
+// A 'pointsto' expectation must appear on the same line as a
+// print(x) statement; the expectation's operand is x.
//
-// A 'pointsto' expectation must appear on the same line as a
-// print(x) statement; the expectation's operand is x.
+// If one of the strings is "...", the expectation asserts that the
+// points-to set at least the other labels.
//
-// If one of the strings is "...", the expectation asserts that the
-// points-to set at least the other labels.
+// We use '|' because label names may contain spaces, e.g. methods
+// of anonymous structs.
//
-// We use '|' because label names may contain spaces, e.g. methods
-// of anonymous structs.
+// Assertions within generic functions are treated as a union of all
+// of the instantiations.
//
-// From a theoretical perspective, concrete types in interfaces are
-// labels too, but they are represented differently and so have a
-// different expectation, @types, below.
+// From a theoretical perspective, concrete types in interfaces are
+// labels too, but they are represented differently and so have a
+// different expectation, @types, below.
//
// @types t | u | v
//
-// A 'types' expectation asserts that the set of possible dynamic
-// types of its interface operand is exactly {t,u,v}, notated per
-// go/types.Type.String(). In other words, it asserts that the type
-// component of the interface may point to that set of concrete type
-// literals. It also works for reflect.Value, though the types
-// needn't be concrete in that case.
+// A 'types' expectation asserts that the set of possible dynamic
+// types of its interface operand is exactly {t,u,v}, notated per
+// go/types.Type.String(). In other words, it asserts that the type
+// component of the interface may point to that set of concrete type
+// literals. It also works for reflect.Value, though the types
+// needn't be concrete in that case.
//
-// A 'types' expectation must appear on the same line as a
-// print(x) statement; the expectation's operand is x.
+// A 'types' expectation must appear on the same line as a
+// print(x) statement; the expectation's operand is x.
//
-// If one of the strings is "...", the expectation asserts that the
-// interface's type may point to at least the other types.
+// If one of the strings is "...", the expectation asserts that the
+// interface's type may point to at least the other types.
//
-// We use '|' because type names may contain spaces.
+// We use '|' because type names may contain spaces.
+//
+// Assertions within generic functions are treated as a union of all
+// of the instantiations.
//
// @warning "regexp"
//
-// A 'warning' expectation asserts that the analysis issues a
-// warning that matches the regular expression within the string
-// literal.
+// A 'warning' expectation asserts that the analysis issues a
+// warning that matches the regular expression within the string
+// literal.
//
// @line id
//
-// A line directive associates the name "id" with the current
-// file:line. The string form of labels will use this id instead of
-// a file:line, making @pointsto expectations more robust against
-// perturbations in the source file.
-// (NB, anon functions still include line numbers.)
-//
+// A line directive associates the name "id" with the current
+// file:line. The string form of labels will use this id instead of
+// a file:line, making @pointsto expectations more robust against
+// perturbations in the source file.
+// (NB, anon functions still include line numbers.)
type expectation struct {
kind string // "pointsto" | "pointstoquery" | "types" | "calls" | "warning"
filepath string
linenum int // source line number, 1-based
args []string
- query string // extended query
- extended *pointer.Pointer // extended query pointer
- types []types.Type // for types
+ query string // extended query
+ extended []*pointer.Pointer // extended query pointer [per instantiation]
+ types []types.Type // for types
}
func (e *expectation) String() string {
@@ -147,18 +155,43 @@ func (e *expectation) needsProbe() bool {
return e.kind == "pointsto" || e.kind == "pointstoquery" || e.kind == "types"
}
-// Find probe (call to print(x)) of same source file/line as expectation.
-func findProbe(prog *ssa.Program, probes map[*ssa.CallCommon]bool, queries map[ssa.Value]pointer.Pointer, e *expectation) (site *ssa.CallCommon, pts pointer.PointsToSet) {
+// Find probes (call to print(x)) of same source file/line as expectation.
+//
+// May match multiple calls for different instantiations.
+func findProbes(prog *ssa.Program, probes map[*ssa.CallCommon]bool, e *expectation) []*ssa.CallCommon {
+ var calls []*ssa.CallCommon
for call := range probes {
pos := prog.Fset.Position(call.Pos())
if pos.Line == e.linenum && pos.Filename == e.filepath {
// TODO(adonovan): send this to test log (display only on failure).
// fmt.Printf("%s:%d: info: found probe for %s: %s\n",
// e.filepath, e.linenum, e, p.arg0) // debugging
- return call, queries[call.Args[0]].PointsTo()
+ calls = append(calls, call)
}
}
- return // e.g. analysis didn't reach this call
+ return calls
+}
+
+// Find points to sets of probes (call to print(x)).
+func probesPointTo(calls []*ssa.CallCommon, queries map[ssa.Value]pointer.Pointer) []pointer.PointsToSet {
+ ptss := make([]pointer.PointsToSet, len(calls))
+ for i, call := range calls {
+ ptss[i] = queries[call.Args[0]].PointsTo()
+ }
+ return ptss
+}
+
+// Find the types of the probes (call to print(x)).
+// Returns an error if type of the probe cannot point.
+func probesPointToTypes(calls []*ssa.CallCommon) ([]types.Type, error) {
+ tProbes := make([]types.Type, len(calls))
+ for i, call := range calls {
+ tProbes[i] = call.Args[0].Type()
+ if !pointer.CanPoint(tProbes[i]) {
+ return nil, fmt.Errorf("expectation on non-pointerlike operand: %s", tProbes[i])
+ }
+ }
+ return tProbes, nil
}
func doOneInput(t *testing.T, input, fpath string) bool {
@@ -177,7 +210,8 @@ func doOneInput(t *testing.T, input, fpath string) bool {
}
// SSA creation + building.
- prog, ssaPkgs := ssautil.AllPackages(pkgs, ssa.SanityCheckFunctions)
+ mode := ssa.SanityCheckFunctions | ssa.InstantiateGenerics
+ prog, ssaPkgs := ssautil.AllPackages(pkgs, mode)
prog.Build()
// main underlying packages.Package.
@@ -197,12 +231,24 @@ func doOneInput(t *testing.T, input, fpath string) bool {
}
}
+ // files in mainPpkg.
+ mainFiles := make(map[*token.File]bool)
+ for _, syn := range mainPpkg.Syntax {
+ mainFiles[prog.Fset.File(syn.Pos())] = true
+ }
+
// Find all calls to the built-in print(x). Analytically,
// print is a no-op, but it's a convenient hook for testing
// the PTS of an expression, so our tests use it.
+ // Exclude generic bodies as these should be dead code for pointer.
+ // Instance of generics are included.
probes := make(map[*ssa.CallCommon]bool)
for fn := range ssautil.AllFunctions(prog) {
- if fn.Pkg == mainpkg {
+ if isGenericBody(fn) {
+ continue // skip generic bodies
+ }
+ // TODO(taking): Switch to a more principled check like fn.declaredPackage() == mainPkg if Origin is exported.
+ if fn.Pkg == mainpkg || (fn.Pkg == nil && mainFiles[prog.Fset.File(fn.Pos())]) {
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
if instr, ok := instr.(ssa.CallInstruction); ok {
@@ -311,18 +357,16 @@ func doOneInput(t *testing.T, input, fpath string) bool {
Mains: []*ssa.Package{ptrmain},
Log: &log,
}
-probeLoop:
for probe := range probes {
v := probe.Args[0]
pos := prog.Fset.Position(probe.Pos())
for _, e := range exps {
if e.linenum == pos.Line && e.filepath == pos.Filename && e.kind == "pointstoquery" {
- var err error
- e.extended, err = config.AddExtendedQuery(v, e.query)
+ extended, err := config.AddExtendedQuery(v, e.query)
if err != nil {
panic(err)
}
- continue probeLoop
+ e.extended = append(e.extended, extended)
}
}
if pointer.CanPoint(v.Type()) {
@@ -345,34 +389,42 @@ probeLoop:
// Check the expectations.
for _, e := range exps {
- var call *ssa.CallCommon
- var pts pointer.PointsToSet
- var tProbe types.Type
+ var tProbes []types.Type
+ var calls []*ssa.CallCommon
+ var ptss []pointer.PointsToSet
if e.needsProbe() {
- if call, pts = findProbe(prog, probes, result.Queries, e); call == nil {
+ calls = findProbes(prog, probes, e)
+ if len(calls) == 0 {
ok = false
e.errorf("unreachable print() statement has expectation %s", e)
continue
}
- if e.extended != nil {
- pts = e.extended.PointsTo()
+ if e.extended == nil {
+ ptss = probesPointTo(calls, result.Queries)
+ } else {
+ ptss = make([]pointer.PointsToSet, len(e.extended))
+ for i, p := range e.extended {
+ ptss[i] = p.PointsTo()
+ }
}
- tProbe = call.Args[0].Type()
- if !pointer.CanPoint(tProbe) {
+
+ var err error
+ tProbes, err = probesPointToTypes(calls)
+ if err != nil {
ok = false
- e.errorf("expectation on non-pointerlike operand: %s", tProbe)
+ e.errorf(err.Error())
continue
}
}
switch e.kind {
case "pointsto", "pointstoquery":
- if !checkPointsToExpectation(e, pts, lineMapping, prog) {
+ if !checkPointsToExpectation(e, ptss, lineMapping, prog) {
ok = false
}
case "types":
- if !checkTypesExpectation(e, pts, tProbe) {
+ if !checkTypesExpectation(e, ptss, tProbes) {
ok = false
}
@@ -417,7 +469,7 @@ func labelString(l *pointer.Label, lineMapping map[string]string, prog *ssa.Prog
return str
}
-func checkPointsToExpectation(e *expectation, pts pointer.PointsToSet, lineMapping map[string]string, prog *ssa.Program) bool {
+func checkPointsToExpectation(e *expectation, ptss []pointer.PointsToSet, lineMapping map[string]string, prog *ssa.Program) bool {
expected := make(map[string]int)
surplus := make(map[string]int)
exact := true
@@ -430,12 +482,14 @@ func checkPointsToExpectation(e *expectation, pts pointer.PointsToSet, lineMappi
}
// Find the set of labels that the probe's
// argument (x in print(x)) may point to.
- for _, label := range pts.Labels() {
- name := labelString(label, lineMapping, prog)
- if expected[name] > 0 {
- expected[name]--
- } else if exact {
- surplus[name]++
+ for _, pts := range ptss { // treat ptss as union of points-to sets.
+ for _, label := range pts.Labels() {
+ name := labelString(label, lineMapping, prog)
+ if expected[name] > 0 {
+ expected[name]--
+ } else if exact {
+ surplus[name]++
+ }
}
}
// Report multiset difference:
@@ -457,7 +511,7 @@ func checkPointsToExpectation(e *expectation, pts pointer.PointsToSet, lineMappi
return ok
}
-func checkTypesExpectation(e *expectation, pts pointer.PointsToSet, typ types.Type) bool {
+func checkTypesExpectation(e *expectation, ptss []pointer.PointsToSet, typs []types.Type) bool {
var expected typeutil.Map
var surplus typeutil.Map
exact := true
@@ -469,18 +523,26 @@ func checkTypesExpectation(e *expectation, pts pointer.PointsToSet, typ types.Ty
expected.Set(g, struct{}{})
}
- if !pointer.CanHaveDynamicTypes(typ) {
- e.errorf("@types expectation requires an interface- or reflect.Value-typed operand, got %s", typ)
+ if len(typs) != len(ptss) {
+ e.errorf("@types expectation internal error differing number of types(%d) and points to sets (%d)", len(typs), len(ptss))
return false
}
// Find the set of types that the probe's
// argument (x in print(x)) may contain.
- for _, T := range pts.DynamicTypes().Keys() {
- if expected.At(T) != nil {
- expected.Delete(T)
- } else if exact {
- surplus.Set(T, struct{}{})
+ for i := range ptss {
+ var Ts []types.Type
+ if pointer.CanHaveDynamicTypes(typs[i]) {
+ Ts = ptss[i].DynamicTypes().Keys()
+ } else {
+ Ts = append(Ts, typs[i]) // static type
+ }
+ for _, T := range Ts {
+ if expected.At(T) != nil {
+ expected.Delete(T)
+ } else if exact {
+ surplus.Set(T, struct{}{})
+ }
}
}
// Report set difference:
@@ -554,10 +616,6 @@ func TestInput(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode; this test requires tons of memory; https://golang.org/issue/14113")
}
- if unsafe.Sizeof(unsafe.Pointer(nil)) <= 4 {
- t.Skip("skipping memory-intensive test on platform with small address space; https://golang.org/issue/14113")
- }
- ok := true
wd, err := os.Getwd()
if err != nil {
@@ -572,24 +630,44 @@ func TestInput(t *testing.T) {
fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd)
for _, filename := range inputs {
- content, err := ioutil.ReadFile(filename)
- if err != nil {
- t.Errorf("couldn't read file '%s': %s", filename, err)
- continue
- }
+ filename := filename
+ t.Run(filename, func(t *testing.T) {
+ if filename == "testdata/a_test.go" {
+ // For some reason this particular file is way more expensive than the others.
+ if unsafe.Sizeof(unsafe.Pointer(nil)) <= 4 {
+ t.Skip("skipping memory-intensive test on platform with small address space; https://golang.org/issue/14113")
+ }
+ if raceEnabled {
+ t.Skip("skipping memory-intensive test under race detector; https://golang.org/issue/14113")
+ }
+ } else {
+ t.Parallel()
+ }
- fpath, err := filepath.Abs(filename)
- if err != nil {
- t.Errorf("couldn't get absolute path for '%s': %s", filename, err)
- }
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("couldn't read file '%s': %s", filename, err)
+ }
- if !doOneInput(t, string(content), fpath) {
- ok = false
- }
+ fpath, err := filepath.Abs(filename)
+ if err != nil {
+ t.Fatalf("couldn't get absolute path for '%s': %s", filename, err)
+ }
+
+ if !doOneInput(t, string(content), fpath) {
+ t.Fail()
+ }
+ })
}
- if !ok {
- t.Fail()
+}
+
+// isGenericBody returns true if fn is the body of a generic function.
+func isGenericBody(fn *ssa.Function) bool {
+ sig := fn.Signature
+ if typeparams.ForSignature(sig).Len() > 0 || typeparams.RecvTypeParams(sig).Len() > 0 {
+ return fn.Synthetic == ""
}
+ return false
}
// join joins the elements of multiset with " | "s.
@@ -616,3 +694,34 @@ func split(s, sep string) (r []string) {
}
return
}
+
+func TestTypeParam(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestTypeParamInput requires type parameters")
+ }
+ // Based on TestInput. Keep this up to date with that.
+ filename := "testdata/typeparams.go"
+
+ if testing.Short() {
+ t.Skip("skipping in short mode; this test requires tons of memory; https://golang.org/issue/14113")
+ }
+
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("os.Getwd: %s", err)
+ }
+ fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd)
+
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("couldn't read file '%s': %s", filename, err)
+ }
+ fpath, err := filepath.Abs(filename)
+ if err != nil {
+ t.Errorf("couldn't get absolute path for '%s': %s", filename, err)
+ }
+
+ if !doOneInput(t, string(content), fpath) {
+ t.Fail()
+ }
+}
diff --git a/go/pointer/reflect.go b/go/pointer/reflect.go
index 7aa1a9cb8..3762dd8d4 100644
--- a/go/pointer/reflect.go
+++ b/go/pointer/reflect.go
@@ -1024,7 +1024,7 @@ func ext۰reflect۰ChanOf(a *analysis, cgn *cgnode) {
var dir reflect.ChanDir // unknown
if site := cgn.callersite; site != nil {
if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok {
- v, _ := constant.Int64Val(c.Value)
+ v := c.Int64()
if 0 <= v && v <= int64(reflect.BothDir) {
dir = reflect.ChanDir(v)
}
@@ -1751,8 +1751,7 @@ func ext۰reflect۰rtype۰InOut(a *analysis, cgn *cgnode, out bool) {
index := -1
if site := cgn.callersite; site != nil {
if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok {
- v, _ := constant.Int64Val(c.Value)
- index = int(v)
+ index = int(c.Int64())
}
}
a.addConstraint(&rtypeInOutConstraint{
@@ -1943,14 +1942,13 @@ func ext۰reflect۰rtype۰Method(a *analysis, cgn *cgnode) {
// types they create to ensure termination of the algorithm in cases
// where the output of a type constructor flows to its input, e.g.
//
-// func f(t reflect.Type) {
-// f(reflect.PtrTo(t))
-// }
+// func f(t reflect.Type) {
+// f(reflect.PtrTo(t))
+// }
//
// It does this by limiting the type height to k, but this still leaves
// a potentially exponential (4^k) number of of types that may be
// enumerated in pathological cases.
-//
func typeHeight(T types.Type) int {
switch T := T.(type) {
case *types.Chan:
diff --git a/go/pointer/solve.go b/go/pointer/solve.go
index 0fdd098b0..7a41b78a8 100644
--- a/go/pointer/solve.go
+++ b/go/pointer/solve.go
@@ -91,7 +91,6 @@ func (a *analysis) solve() {
// and adds them to the graph, ensuring
// that new constraints are applied to pre-existing labels and
// that pre-existing constraints are applied to new labels.
-//
func (a *analysis) processNewConstraints() {
// Take the slice of new constraints.
// (May grow during call to solveConstraints.)
@@ -151,7 +150,6 @@ func (a *analysis) processNewConstraints() {
// solveConstraints applies each resolution rule attached to node n to
// the set of labels delta. It may generate new constraints in
// a.constraints.
-//
func (a *analysis) solveConstraints(n *node, delta *nodeset) {
if delta.IsEmpty() {
return
@@ -199,7 +197,6 @@ func (a *analysis) addWork(id nodeid) {
//
// The size of the copy is implicitly 1.
// It returns true if pts(dst) changed.
-//
func (a *analysis) onlineCopy(dst, src nodeid) bool {
if dst != src {
if nsrc := a.nodes[src]; nsrc.solve.copyTo.add(dst) {
@@ -221,7 +218,6 @@ func (a *analysis) onlineCopy(dst, src nodeid) bool {
//
// TODO(adonovan): now that we support a.copy() during solving, we
// could eliminate onlineCopyN, but it's much slower. Investigate.
-//
func (a *analysis) onlineCopyN(dst, src nodeid, sizeof uint32) uint32 {
for i := uint32(0); i < sizeof; i++ {
if a.onlineCopy(dst, src) {
diff --git a/go/pointer/stdlib_test.go b/go/pointer/stdlib_test.go
index 3ba42a171..978cfb8fe 100644
--- a/go/pointer/stdlib_test.go
+++ b/go/pointer/stdlib_test.go
@@ -46,7 +46,7 @@ func TestStdlib(t *testing.T) {
}
// Create SSA packages.
- prog, _ := ssautil.AllPackages(pkgs, 0)
+ prog, _ := ssautil.AllPackages(pkgs, ssa.InstantiateGenerics)
prog.Build()
numPkgs := len(prog.AllPackages())
diff --git a/go/pointer/testdata/typeparams.go b/go/pointer/testdata/typeparams.go
new file mode 100644
index 000000000..461ba4437
--- /dev/null
+++ b/go/pointer/testdata/typeparams.go
@@ -0,0 +1,68 @@
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+type S[T any] struct{ t T }
+
+var theSint S[int]
+var theSbool S[bool]
+
+func (s *S[T]) String() string {
+ print(s) // @pointsto command-line-arguments.theSbool | command-line-arguments.theSint
+ return ""
+}
+
+func Type[T any]() {
+ var x *T
+ print(x) // @types *int | *bool
+}
+
+func Caller[T any]() {
+ var s *S[T]
+ _ = s.String()
+}
+
+var a int
+var b bool
+
+type t[T any] struct {
+ a *map[string]chan *T
+}
+
+func fn[T any](a *T) {
+ m := make(map[string]chan *T)
+ m[""] = make(chan *T, 1)
+ m[""] <- a
+ x := []t[T]{t[T]{a: &m}}
+ print(x) // @pointstoquery <-(*x[i].a)[key] command-line-arguments.a | command-line-arguments.b
+}
+
+func main() {
+ // os.Args is considered intrinsically allocated,
+ // but may also be set explicitly (e.g. on Windows), hence '...'.
+ print(os.Args) // @pointsto <command-line args> | ...
+ fmt.Println("Hello!", &theSint)
+ fmt.Println("World!", &theSbool)
+
+ Type[int]() // call
+ f := Type[bool] // call through a variable
+ _ = Type[string] // not called so will not appear in Type's print.
+ f()
+
+ Caller[int]()
+ Caller[bool]()
+
+ fn(&a)
+ fn(&b)
+}
+
+// @calls (*fmt.pp).handleMethods -> (*command-line-arguments.S[int]).String[int]
+// @calls (*fmt.pp).handleMethods -> (*command-line-arguments.S[bool]).String[bool]
+// @calls command-line-arguments.Caller[int] -> (*command-line-arguments.S[int]).String[int]
+// @calls command-line-arguments.Caller[bool] -> (*command-line-arguments.S[bool]).String[bool]
diff --git a/go/pointer/util.go b/go/pointer/util.go
index 5bdd623c0..17728aa06 100644
--- a/go/pointer/util.go
+++ b/go/pointer/util.go
@@ -8,12 +8,13 @@ import (
"bytes"
"fmt"
"go/types"
- exec "golang.org/x/sys/execabs"
"log"
"os"
"runtime"
"time"
+ exec "golang.org/x/sys/execabs"
+
"golang.org/x/tools/container/intsets"
)
@@ -35,7 +36,6 @@ func CanPoint(T types.Type) bool {
// CanHaveDynamicTypes reports whether the type T can "hold" dynamic types,
// i.e. is an interface (incl. reflect.Type) or a reflect.Value.
-//
func CanHaveDynamicTypes(T types.Type) bool {
switch T := T.(type) {
case *types.Named:
@@ -69,17 +69,21 @@ func deref(typ types.Type) types.Type {
// of a type T: the subelement's type and its path from the root of T.
//
// For example, for this type:
-// type line struct{ points []struct{x, y int} }
+//
+// type line struct{ points []struct{x, y int} }
+//
// flatten() of the inner struct yields the following []fieldInfo:
-// struct{ x, y int } ""
-// int ".x"
-// int ".y"
+//
+// struct{ x, y int } ""
+// int ".x"
+// int ".y"
+//
// and flatten(line) yields:
-// struct{ points []struct{x, y int} } ""
-// struct{ x, y int } ".points[*]"
-// int ".points[*].x
-// int ".points[*].y"
//
+// struct{ points []struct{x, y int} } ""
+// struct{ x, y int } ".points[*]"
+// int ".points[*].x
+// int ".points[*].y"
type fieldInfo struct {
typ types.Type
@@ -89,7 +93,6 @@ type fieldInfo struct {
}
// path returns a user-friendly string describing the subelement path.
-//
func (fi *fieldInfo) path() string {
var buf bytes.Buffer
for p := fi; p != nil; p = p.tail {
@@ -113,7 +116,6 @@ func (fi *fieldInfo) path() string {
// reflect.Value is considered pointerlike, similar to interface{}.
//
// Callers must not mutate the result.
-//
func (a *analysis) flatten(t types.Type) []*fieldInfo {
fl, ok := a.flattenMemo[t]
if !ok {
@@ -124,7 +126,7 @@ func (a *analysis) flatten(t types.Type) []*fieldInfo {
// Debuggability hack: don't remove
// the named type from interfaces as
// they're very verbose.
- fl = append(fl, &fieldInfo{typ: t})
+ fl = append(fl, &fieldInfo{typ: t}) // t may be a type param
} else {
fl = a.flatten(u)
}
diff --git a/go/ssa/TODO b/go/ssa/TODO
new file mode 100644
index 000000000..6c35253c7
--- /dev/null
+++ b/go/ssa/TODO
@@ -0,0 +1,16 @@
+-*- text -*-
+
+SSA Generics to-do list
+===========================
+
+DOCUMENTATION:
+- Read me for internals
+
+TYPE PARAMETERIZED GENERIC FUNCTIONS:
+- sanity.go updates.
+- Check source functions going to generics.
+- Tests, tests, tests...
+
+USAGE:
+- Back fill users for handling ssa.InstantiateGenerics being off.
+
diff --git a/go/ssa/block.go b/go/ssa/block.go
index 35f317332..28170c787 100644
--- a/go/ssa/block.go
+++ b/go/ssa/block.go
@@ -19,14 +19,12 @@ func (b *BasicBlock) Parent() *Function { return b.parent }
// String returns a human-readable label of this block.
// It is not guaranteed unique within the function.
-//
func (b *BasicBlock) String() string {
return fmt.Sprintf("%d", b.Index)
}
// emit appends an instruction to the current basic block.
// If the instruction defines a Value, it is returned.
-//
func (b *BasicBlock) emit(i Instruction) Value {
i.setBlock(b)
b.Instrs = append(b.Instrs, i)
@@ -63,7 +61,6 @@ func (b *BasicBlock) phis() []Instruction {
// replacePred replaces all occurrences of p in b's predecessor list with q.
// Ordinarily there should be at most one.
-//
func (b *BasicBlock) replacePred(p, q *BasicBlock) {
for i, pred := range b.Preds {
if pred == p {
@@ -74,7 +71,6 @@ func (b *BasicBlock) replacePred(p, q *BasicBlock) {
// replaceSucc replaces all occurrences of p in b's successor list with q.
// Ordinarily there should be at most one.
-//
func (b *BasicBlock) replaceSucc(p, q *BasicBlock) {
for i, succ := range b.Succs {
if succ == p {
@@ -86,7 +82,6 @@ func (b *BasicBlock) replaceSucc(p, q *BasicBlock) {
// removePred removes all occurrences of p in b's
// predecessor list and φ-nodes.
// Ordinarily there should be at most one.
-//
func (b *BasicBlock) removePred(p *BasicBlock) {
phis := b.phis()
diff --git a/go/ssa/blockopt.go b/go/ssa/blockopt.go
index e79260a21..7dabce8ca 100644
--- a/go/ssa/blockopt.go
+++ b/go/ssa/blockopt.go
@@ -31,7 +31,6 @@ func markReachable(b *BasicBlock) {
// deleteUnreachableBlocks marks all reachable blocks of f and
// eliminates (nils) all others, including possibly cyclic subgraphs.
-//
func deleteUnreachableBlocks(f *Function) {
const white, black = 0, -1
// We borrow b.Index temporarily as the mark bit.
@@ -61,7 +60,6 @@ func deleteUnreachableBlocks(f *Function) {
// jumpThreading attempts to apply simple jump-threading to block b,
// in which a->b->c become a->c if b is just a Jump.
// The result is true if the optimization was applied.
-//
func jumpThreading(f *Function, b *BasicBlock) bool {
if b.Index == 0 {
return false // don't apply to entry block
@@ -108,7 +106,6 @@ func jumpThreading(f *Function, b *BasicBlock) bool {
// fuseBlocks attempts to apply the block fusion optimization to block
// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1.
// The result is true if the optimization was applied.
-//
func fuseBlocks(f *Function, a *BasicBlock) bool {
if len(a.Succs) != 1 {
return false
@@ -150,7 +147,6 @@ func fuseBlocks(f *Function, a *BasicBlock) bool {
// optimizeBlocks() performs some simple block optimizations on a
// completed function: dead block elimination, block fusion, jump
// threading.
-//
func optimizeBlocks(f *Function) {
deleteUnreachableBlocks(f)
diff --git a/go/ssa/builder.go b/go/ssa/builder.go
index ac85541c9..be8d36a6e 100644
--- a/go/ssa/builder.go
+++ b/go/ssa/builder.go
@@ -24,10 +24,86 @@ package ssa
// TODO(adonovan): indeed, building functions is now embarrassingly parallel.
// Audit for concurrency then benchmark using more goroutines.
//
-// The builder's and Program's indices (maps) are populated and
+// State:
+//
+// The Package's and Program's indices (maps) are populated and
// mutated during the CREATE phase, but during the BUILD phase they
// remain constant. The sole exception is Prog.methodSets and its
// related maps, which are protected by a dedicated mutex.
+//
+// Generic functions declared in a package P can be instantiated from functions
+// outside of P. This happens independently of the CREATE and BUILD phase of P.
+//
+// Locks:
+//
+// Mutexes are currently acquired according to the following order:
+// Prog.methodsMu ⊃ canonizer.mu ⊃ printMu
+// where x ⊃ y denotes that y can be acquired while x is held
+// and x cannot be acquired while y is held.
+//
+// Synthetics:
+//
+// During the BUILD phase new functions can be created and built. These include:
+// - wrappers (wrappers, bounds, thunks)
+// - generic function instantiations
+// These functions do not belong to a specific Pkg (Pkg==nil). Instead the
+// Package that led to them being CREATED is obligated to ensure these
+// are BUILT during the BUILD phase of the Package.
+//
+// Runtime types:
+//
+// A concrete type is a type that is fully monomorphized with concrete types,
+// i.e. it cannot reach a TypeParam type.
+// Some concrete types require full runtime type information. Cases
+// include checking whether a type implements an interface or
+// interpretation by the reflect package. All such types that may require
+// this information will have all of their method sets built and will be added to Prog.methodSets.
+// A type T is considered to require runtime type information if it is
+// a runtime type and has a non-empty method set and either:
+// - T flows into a MakeInterface instructions,
+// - T appears in a concrete exported member, or
+// - T is a type reachable from a type S that has non-empty method set.
+// For any such type T, method sets must be created before the BUILD
+// phase of the package is done.
+//
+// Function literals:
+//
+// The BUILD phase of a function literal (anonymous function) is tied to the
+// BUILD phase of the enclosing parent function. The FreeVars of an anonymous
+// function are discovered by building the anonymous function. This in turn
+// changes which variables must be bound in a MakeClosure instruction in the
+// parent. Anonymous functions also track where they are referred to in their
+// parent function.
+//
+// Happens-before:
+//
+// The above discussion leads to the following happens-before relation for
+// the BUILD and CREATE phases.
+// The happens-before relation (with X<Y denoting X happens-before Y) are:
+// - CREATE fn < fn.startBody() < fn.finishBody() < fn.built
+// for any function fn.
+// - anon.parent.startBody() < CREATE anon, and
+// anon.finishBody() < anon.parent().finishBody() < anon.built < fn.built
+// for an anonymous function anon (i.e. anon.parent() != nil).
+// - CREATE fn.Pkg < CREATE fn
+// for a declared function fn (i.e. fn.Pkg != nil)
+// - fn.built < BUILD pkg done
+// for any function fn created during the CREATE or BUILD phase of a package
+// pkg. This includes declared and synthetic functions.
+//
+// Program.MethodValue:
+//
+// Program.MethodValue may trigger new wrapper and instantiation functions to
+// be created. It has the same obligation to BUILD created functions as a
+// Package.
+//
+// Program.NewFunction:
+//
+// This is a low level operation for creating functions that do not exist in
+// the source. Use with caution.
+//
+// TODO(taking): Use consistent terminology for "concrete".
+// TODO(taking): Use consistent terminology for "monomorphization"/"instantiate"/"expand".
import (
"fmt"
@@ -37,6 +113,8 @@ import (
"go/types"
"os"
"sync"
+
+ "golang.org/x/tools/internal/typeparams"
)
type opaqueType struct {
@@ -58,7 +136,7 @@ var (
tString = types.Typ[types.String]
tUntypedNil = types.Typ[types.UntypedNil]
tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators
- tEface = types.NewInterface(nil, nil).Complete()
+ tEface = types.NewInterfaceType(nil, nil).Complete()
// SSA Value constants.
vZero = intConst(0)
@@ -68,13 +146,17 @@ var (
// builder holds state associated with the package currently being built.
// Its methods contain all the logic for AST-to-SSA conversion.
-type builder struct{}
+type builder struct {
+ // Invariant: 0 <= rtypes <= finished <= created.Len()
+ created *creator // functions created during building
+ finished int // Invariant: create[i].built holds for i in [0,finished)
+ rtypes int // Invariant: all of the runtime types for create[i] have been added for i in [0,rtypes)
+}
// cond emits to fn code to evaluate boolean condition e and jump
// to t or f depending on its value, performing various simplifications.
//
// Postcondition: fn.currentBlock is nil.
-//
func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) {
switch e := e.(type) {
case *ast.ParenExpr:
@@ -117,7 +199,6 @@ func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) {
// logicalBinop emits code to fn to evaluate e, a &&- or
// ||-expression whose reified boolean value is wanted.
// The value is returned.
-//
func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value {
rhs := fn.newBasicBlock("binop.rhs")
done := fn.newBasicBlock("binop.done")
@@ -178,7 +259,6 @@ func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value {
// assignment or return statement, and "value,ok" uses of
// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op
// is token.ARROW).
-//
func (b *builder) exprN(fn *Function, e ast.Expr) Value {
typ := fn.typeOf(e).(*types.Tuple)
switch e := e.(type) {
@@ -195,7 +275,7 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value {
return fn.emit(&c)
case *ast.IndexExpr:
- mapt := fn.typeOf(e.X).Underlying().(*types.Map)
+ mapt := typeparams.CoreType(fn.typeOf(e.X)).(*types.Map) // ,ok must be a map.
lookup := &Lookup{
X: b.expr(fn, e.X),
Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()),
@@ -228,11 +308,11 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value {
// The result is nil if no special handling was required; in this case
// the caller should treat this like an ordinary library function
// call.
-//
func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value {
+ typ = fn.typ(typ)
switch obj.Name() {
case "make":
- switch typ.Underlying().(type) {
+ switch ct := typeparams.CoreType(typ).(type) {
case *types.Slice:
n := b.expr(fn, args[1])
m := n
@@ -242,7 +322,7 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
if m, ok := m.(*Const); ok {
// treat make([]T, n, m) as new([m]T)[:n]
cap := m.Int64()
- at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap)
+ at := types.NewArray(ct.Elem(), cap)
alloc := emitNew(fn, at, pos)
alloc.Comment = "makeslice"
v := &Slice{
@@ -293,6 +373,8 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
// We must still evaluate the value, though. (If it
// was side-effect free, the whole call would have
// been constant-folded.)
+ //
+ // Type parameters are always non-constant so use Underlying.
t := deref(fn.typeOf(args[0])).Underlying()
if at, ok := t.(*types.Array); ok {
b.expr(fn, args[0]) // for effects only
@@ -319,10 +401,10 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
// addressable expression e as being a potentially escaping pointer
// value. For example, in this code:
//
-// a := A{
-// b: [1]B{B{c: 1}}
-// }
-// return &a.b[0].c
+// a := A{
+// b: [1]B{B{c: 1}}
+// }
+// return &a.b[0].c
//
// the application of & causes a.b[0].c to have its address taken,
// which means that ultimately the local variable a must be
@@ -333,7 +415,6 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
// - &x, including when implicit in method call or composite literals.
// - a[:] iff a is an array (not *array)
// - references to variables in lexically enclosing functions.
-//
func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
switch e := e.(type) {
case *ast.Ident:
@@ -367,53 +448,67 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
return b.addr(fn, e.X, escaping)
case *ast.SelectorExpr:
- sel, ok := fn.info.Selections[e]
- if !ok {
+ sel := fn.selection(e)
+ if sel == nil {
// qualified identifier
return b.addr(fn, e.Sel, escaping)
}
- if sel.Kind() != types.FieldVal {
+ if sel.kind != types.FieldVal {
panic(sel)
}
wantAddr := true
v := b.receiver(fn, e.X, wantAddr, escaping, sel)
- last := len(sel.Index()) - 1
- return &address{
- addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel),
- pos: e.Sel.Pos(),
- expr: e.Sel,
+ index := sel.index[len(sel.index)-1]
+ fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index)
+
+ // Due to the two phases of resolving AssignStmt, a panic from x.f = p()
+ // when x is nil is required to come after the side-effects of
+ // evaluating x and p().
+ emit := func(fn *Function) Value {
+ return emitFieldSelection(fn, v, index, true, e.Sel)
}
+ return &lazyAddress{addr: emit, t: fld.Type(), pos: e.Sel.Pos(), expr: e.Sel}
case *ast.IndexExpr:
+ xt := fn.typeOf(e.X)
+ elem, mode := indexType(xt)
var x Value
var et types.Type
- switch t := fn.typeOf(e.X).Underlying().(type) {
- case *types.Array:
+ switch mode {
+ case ixArrVar: // array, array|slice, array|*array, or array|*array|slice.
x = b.addr(fn, e.X, escaping).address(fn)
- et = types.NewPointer(t.Elem())
- case *types.Pointer: // *array
- x = b.expr(fn, e.X)
- et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())
- case *types.Slice:
+ et = types.NewPointer(elem)
+ case ixVar: // *array, slice, *array|slice
x = b.expr(fn, e.X)
- et = types.NewPointer(t.Elem())
- case *types.Map:
+ et = types.NewPointer(elem)
+ case ixMap:
+ mt := typeparams.CoreType(xt).(*types.Map)
return &element{
m: b.expr(fn, e.X),
- k: emitConv(fn, b.expr(fn, e.Index), t.Key()),
- t: t.Elem(),
+ k: emitConv(fn, b.expr(fn, e.Index), mt.Key()),
+ t: mt.Elem(),
pos: e.Lbrack,
}
default:
- panic("unexpected container type in IndexExpr: " + t.String())
+ panic("unexpected container type in IndexExpr: " + xt.String())
}
- v := &IndexAddr{
- X: x,
- Index: emitConv(fn, b.expr(fn, e.Index), tInt),
+ index := b.expr(fn, e.Index)
+ if isUntyped(index.Type()) {
+ index = emitConv(fn, index, tInt)
}
- v.setPos(e.Lbrack)
- v.setType(et)
- return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e}
+ // Due to the two phases of resolving AssignStmt, a panic from x[i] = p()
+ // when x is nil or i is out-of-bounds is required to come after the
+ // side-effects of evaluating x, i and p().
+ emit := func(fn *Function) Value {
+ v := &IndexAddr{
+ X: x,
+ Index: index,
+ }
+ v.setPos(e.Lbrack)
+ v.setType(et)
+ return fn.emit(v)
+ }
+ return &lazyAddress{addr: emit, t: deref(et), pos: e.Lbrack, expr: e}
case *ast.StarExpr:
return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e}
@@ -452,7 +547,6 @@ func (sb *storebuf) emit(fn *Function) {
// storebuf sb so that they can be executed later. This allows correct
// in-place update of existing variables when the RHS is a composite
// literal that may reference parts of the LHS.
-//
func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) {
// Can we initialize it in place?
if e, ok := unparen(e).(*ast.CompositeLit); ok {
@@ -473,7 +567,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *
}
if _, ok := loc.(*address); ok {
- if isInterface(loc.typ()) {
+ if isNonTypeParamInterface(loc.typ()) {
// e.g. var x interface{} = T{...}
// Can't in-place initialize an interface value.
// Fall back to copying.
@@ -511,7 +605,6 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *
// expr lowers a single-result expression e to SSA form, emitting code
// to fn and returning the Value defined by the expression.
-//
func (b *builder) expr(fn *Function, e ast.Expr) Value {
e = unparen(e)
@@ -519,7 +612,7 @@ func (b *builder) expr(fn *Function, e ast.Expr) Value {
// Is expression a constant?
if tv.Value != nil {
- return NewConst(tv.Value, tv.Type)
+ return NewConst(tv.Value, fn.typ(tv.Type))
}
var v Value
@@ -544,22 +637,30 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
case *ast.FuncLit:
fn2 := &Function{
- name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)),
- Signature: fn.typeOf(e.Type).Underlying().(*types.Signature),
- pos: e.Type.Func,
- parent: fn,
- Pkg: fn.Pkg,
- Prog: fn.Prog,
- syntax: e,
- info: fn.info,
+ name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)),
+ Signature: fn.typeOf(e.Type).(*types.Signature),
+ pos: e.Type.Func,
+ parent: fn,
+ anonIdx: int32(len(fn.AnonFuncs)),
+ Pkg: fn.Pkg,
+ Prog: fn.Prog,
+ syntax: e,
+ topLevelOrigin: nil, // use anonIdx to lookup an anon instance's origin.
+ typeparams: fn.typeparams, // share the parent's type parameters.
+ typeargs: fn.typeargs, // share the parent's type arguments.
+ info: fn.info,
+ subst: fn.subst, // share the parent's type substitutions.
}
fn.AnonFuncs = append(fn.AnonFuncs, fn2)
- b.buildFunction(fn2)
+ b.created.Add(fn2)
+ b.buildFunctionBody(fn2)
+ // fn2 is not done BUILDing. fn2.referrers can still be updated.
+ // fn2 is done BUILDing after fn.finishBody().
if fn2.FreeVars == nil {
return fn2
}
v := &MakeClosure{Fn: fn2}
- v.setType(tv.Type)
+ v.setType(fn.typ(tv.Type))
for _, fv := range fn2.FreeVars {
v.Bindings = append(v.Bindings, fv.outer)
fv.outer = nil
@@ -567,13 +668,13 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
return fn.emit(v)
case *ast.TypeAssertExpr: // single-result form only
- return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e.Lparen)
+ return emitTypeAssert(fn, b.expr(fn, e.X), fn.typ(tv.Type), e.Lparen)
case *ast.CallExpr:
if fn.info.Types[e.Fun].IsType() {
// Explicit type conversion, e.g. string(x) or big.Int(x)
x := b.expr(fn, e.Args[0])
- y := emitConv(fn, x, tv.Type)
+ y := emitConv(fn, x, fn.typ(tv.Type))
if y != x {
switch y := y.(type) {
case *Convert:
@@ -584,6 +685,8 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
y.pos = e.Lparen
case *SliceToArrayPointer:
y.pos = e.Lparen
+ case *UnOp: // conversion from slice to array.
+ y.pos = e.Lparen
}
}
return y
@@ -591,7 +694,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
// Call to "intrinsic" built-ins, e.g. new, make, panic.
if id, ok := unparen(e.Fun).(*ast.Ident); ok {
if obj, ok := fn.info.Uses[id].(*types.Builtin); ok {
- if v := b.builtin(fn, obj, e.Args, tv.Type, e.Lparen); v != nil {
+ if v := b.builtin(fn, obj, e.Args, fn.typ(tv.Type), e.Lparen); v != nil {
return v
}
}
@@ -599,7 +702,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
// Regular function call.
var v Call
b.setCall(fn, e, &v.Call)
- v.setType(tv.Type)
+ v.setType(fn.typ(tv.Type))
return fn.emit(&v)
case *ast.UnaryExpr:
@@ -622,7 +725,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
X: b.expr(fn, e.X),
}
v.setPos(e.OpPos)
- v.setType(tv.Type)
+ v.setType(fn.typ(tv.Type))
return fn.emit(v)
default:
panic(e.Op)
@@ -635,12 +738,12 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
case token.SHL, token.SHR:
fallthrough
case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
- return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e.OpPos)
+ return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), fn.typ(tv.Type), e.OpPos)
case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ:
cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos)
// The type of x==y may be UntypedBool.
- return emitConv(fn, cmp, types.Default(tv.Type))
+ return emitConv(fn, cmp, types.Default(fn.typ(tv.Type)))
default:
panic("illegal op in BinaryExpr: " + e.Op.String())
}
@@ -648,21 +751,27 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
case *ast.SliceExpr:
var low, high, max Value
var x Value
- switch fn.typeOf(e.X).Underlying().(type) {
+ xtyp := fn.typeOf(e.X)
+ switch typeparams.CoreType(xtyp).(type) {
case *types.Array:
// Potentially escaping.
x = b.addr(fn, e.X, true).address(fn)
case *types.Basic, *types.Slice, *types.Pointer: // *array
x = b.expr(fn, e.X)
default:
- panic("unreachable")
- }
- if e.High != nil {
- high = b.expr(fn, e.High)
+ // core type exception?
+ if isBytestring(xtyp) {
+ x = b.expr(fn, e.X) // bytestring is handled as string and []byte.
+ } else {
+ panic("unexpected sequence type in SliceExpr")
+ }
}
if e.Low != nil {
low = b.expr(fn, e.Low)
}
+ if e.High != nil {
+ high = b.expr(fn, e.High)
+ }
if e.Slice3 {
max = b.expr(fn, e.Max)
}
@@ -673,7 +782,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
Max: max,
}
v.setPos(e.Lbrack)
- v.setType(tv.Type)
+ v.setType(fn.typ(tv.Type))
return fn.emit(v)
case *ast.Ident:
@@ -681,108 +790,146 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
// Universal built-in or nil?
switch obj := obj.(type) {
case *types.Builtin:
- return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)}
+ return &Builtin{name: obj.Name(), sig: fn.instanceType(e).(*types.Signature)}
case *types.Nil:
- return nilConst(tv.Type)
+ return zeroConst(fn.instanceType(e))
}
// Package-level func or var?
if v := fn.Prog.packageLevelMember(obj); v != nil {
if g, ok := v.(*Global); ok {
return emitLoad(fn, g) // var (address)
}
- return v.(*Function) // (func)
+ callee := v.(*Function) // (func)
+ if callee.typeparams.Len() > 0 {
+ targs := fn.subst.types(instanceArgs(fn.info, e))
+ callee = fn.Prog.needsInstance(callee, targs, b.created)
+ }
+ return callee
}
// Local var.
return emitLoad(fn, fn.lookup(obj, false)) // var (address)
case *ast.SelectorExpr:
- sel, ok := fn.info.Selections[e]
- if !ok {
+ sel := fn.selection(e)
+ if sel == nil {
// builtin unsafe.{Add,Slice}
if obj, ok := fn.info.Uses[e.Sel].(*types.Builtin); ok {
- return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)}
+ return &Builtin{name: obj.Name(), sig: fn.typ(tv.Type).(*types.Signature)}
}
// qualified identifier
return b.expr(fn, e.Sel)
}
- switch sel.Kind() {
+ switch sel.kind {
case types.MethodExpr:
// (*T).f or T.f, the method f from the method-set of type T.
// The result is a "thunk".
- return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type)
+ thunk := makeThunk(fn.Prog, sel, b.created)
+ return emitConv(fn, thunk, fn.typ(tv.Type))
case types.MethodVal:
// e.f where e is an expression and f is a method.
// The result is a "bound".
- obj := sel.Obj().(*types.Func)
- rt := recvType(obj)
+ obj := sel.obj.(*types.Func)
+ rt := fn.typ(recvType(obj))
wantAddr := isPointer(rt)
escaping := true
v := b.receiver(fn, e.X, wantAddr, escaping, sel)
- if isInterface(rt) {
- // If v has interface type I,
+
+ if types.IsInterface(rt) {
+ // If v may be an interface type I (after instantiating),
// we must emit a check that v is non-nil.
- // We use: typeassert v.(I).
- emitTypeAssert(fn, v, rt, token.NoPos)
+ if recv, ok := sel.recv.(*typeparams.TypeParam); ok {
+ // Emit a nil check if any possible instantiation of the
+ // type parameter is an interface type.
+ if typeSetOf(recv).Len() > 0 {
+ // recv has a concrete term its typeset.
+ // So it cannot be instantiated as an interface.
+ //
+ // Example:
+ // func _[T interface{~int; Foo()}] () {
+ // var v T
+ // _ = v.Foo // <-- MethodVal
+ // }
+ } else {
+ // rt may be instantiated as an interface.
+ // Emit nil check: typeassert (any(v)).(any).
+ emitTypeAssert(fn, emitConv(fn, v, tEface), tEface, token.NoPos)
+ }
+ } else {
+ // non-type param interface
+ // Emit nil check: typeassert v.(I).
+ emitTypeAssert(fn, v, rt, token.NoPos)
+ }
+ }
+ if targs := receiverTypeArgs(obj); len(targs) > 0 {
+ // obj is generic.
+ obj = fn.Prog.canon.instantiateMethod(obj, fn.subst.types(targs), fn.Prog.ctxt)
}
c := &MakeClosure{
- Fn: makeBound(fn.Prog, obj),
+ Fn: makeBound(fn.Prog, obj, b.created),
Bindings: []Value{v},
}
c.setPos(e.Sel.Pos())
- c.setType(tv.Type)
+ c.setType(fn.typ(tv.Type))
return fn.emit(c)
case types.FieldVal:
- indices := sel.Index()
+ indices := sel.index
last := len(indices) - 1
v := b.expr(fn, e.X)
- v = emitImplicitSelections(fn, v, indices[:last])
+ v = emitImplicitSelections(fn, v, indices[:last], e.Pos())
v = emitFieldSelection(fn, v, indices[last], false, e.Sel)
return v
}
panic("unexpected expression-relative selector")
+ case *typeparams.IndexListExpr:
+ // f[X, Y] must be a generic function
+ if !instance(fn.info, e.X) {
+ panic("unexpected expression-could not match index list to instantiation")
+ }
+ return b.expr(fn, e.X) // Handle instantiation within the *Ident or *SelectorExpr cases.
+
case *ast.IndexExpr:
- switch t := fn.typeOf(e.X).Underlying().(type) {
- case *types.Array:
- // Non-addressable array (in a register).
- v := &Index{
- X: b.expr(fn, e.X),
- Index: emitConv(fn, b.expr(fn, e.Index), tInt),
- }
- v.setPos(e.Lbrack)
- v.setType(t.Elem())
- return fn.emit(v)
+ if instance(fn.info, e.X) {
+ return b.expr(fn, e.X) // Handle instantiation within the *Ident or *SelectorExpr cases.
+ }
+ // not a generic instantiation.
+ xt := fn.typeOf(e.X)
+ switch et, mode := indexType(xt); mode {
+ case ixVar:
+ // Addressable slice/array; use IndexAddr and Load.
+ return b.addr(fn, e, false).load(fn)
- case *types.Map:
- // Maps are not addressable.
- mapt := fn.typeOf(e.X).Underlying().(*types.Map)
- v := &Lookup{
+ case ixArrVar, ixValue:
+ // An array in a register, a string or a combined type that contains
+ // either an [_]array (ixArrVar) or string (ixValue).
+
+ // Note: for ixArrVar and CoreType(xt)==nil can be IndexAddr and Load.
+ index := b.expr(fn, e.Index)
+ if isUntyped(index.Type()) {
+ index = emitConv(fn, index, tInt)
+ }
+ v := &Index{
X: b.expr(fn, e.X),
- Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()),
+ Index: index,
}
v.setPos(e.Lbrack)
- v.setType(mapt.Elem())
+ v.setType(et)
return fn.emit(v)
- case *types.Basic: // => string
- // Strings are not addressable.
+ case ixMap:
+ ct := typeparams.CoreType(xt).(*types.Map)
v := &Lookup{
X: b.expr(fn, e.X),
- Index: b.expr(fn, e.Index),
+ Index: emitConv(fn, b.expr(fn, e.Index), ct.Key()),
}
v.setPos(e.Lbrack)
- v.setType(tByte)
+ v.setType(ct.Elem())
return fn.emit(v)
-
- case *types.Slice, *types.Pointer: // *array
- // Addressable slice/array; use IndexAddr and Load.
- return b.addr(fn, e, false).load(fn)
-
default:
- panic("unexpected container type in IndexExpr: " + t.String())
+ panic("unexpected container type in IndexExpr: " + xt.String())
}
case *ast.CompositeLit, *ast.StarExpr:
@@ -806,21 +953,21 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) {
// selections of sel.
//
// wantAddr requests that the result is an an address. If
-// !sel.Indirect(), this may require that e be built in addr() mode; it
+// !sel.indirect, this may require that e be built in addr() mode; it
// must thus be addressable.
//
// escaping is defined as per builder.addr().
-//
-func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection) Value {
+func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *selection) Value {
var v Value
- if wantAddr && !sel.Indirect() && !isPointer(fn.typeOf(e)) {
+ if wantAddr && !sel.indirect && !isPointer(fn.typeOf(e)) {
v = b.addr(fn, e, escaping).address(fn)
} else {
v = b.expr(fn, e)
}
- last := len(sel.Index()) - 1
- v = emitImplicitSelections(fn, v, sel.Index()[:last])
+ last := len(sel.index) - 1
+ // The position of implicit selection is the position of the inducing receiver expression.
+ v = emitImplicitSelections(fn, v, sel.index[:last], e.Pos())
if !wantAddr && isPointer(v.Type()) {
v = emitLoad(fn, v)
}
@@ -830,32 +977,36 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se
// setCallFunc populates the function parts of a CallCommon structure
// (Func, Method, Recv, Args[0]) based on the kind of invocation
// occurring in e.
-//
func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) {
c.pos = e.Lparen
// Is this a method call?
if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok {
- sel, ok := fn.info.Selections[selector]
- if ok && sel.Kind() == types.MethodVal {
- obj := sel.Obj().(*types.Func)
+ sel := fn.selection(selector)
+ if sel != nil && sel.kind == types.MethodVal {
+ obj := sel.obj.(*types.Func)
recv := recvType(obj)
+
wantAddr := isPointer(recv)
escaping := true
v := b.receiver(fn, selector.X, wantAddr, escaping, sel)
- if isInterface(recv) {
+ if types.IsInterface(recv) {
// Invoke-mode call.
- c.Value = v
+ c.Value = v // possibly type param
c.Method = obj
} else {
// "Call"-mode call.
- c.Value = fn.Prog.declaredFunc(obj)
+ callee := fn.Prog.originFunc(obj)
+ if callee.typeparams.Len() > 0 {
+ callee = fn.Prog.needsInstance(callee, receiverTypeArgs(obj), b.created)
+ }
+ c.Value = callee
c.Args = append(c.Args, v)
}
return
}
- // sel.Kind()==MethodExpr indicates T.f() or (*T).f():
+ // sel.kind==MethodExpr indicates T.f() or (*T).f():
// a statically dispatched call to the method f in the
// method-set of T or *T. T may be an interface.
//
@@ -893,7 +1044,6 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) {
// emitCallArgs emits to f code for the actual parameters of call e to
// a (possibly built-in) function of effective type sig.
// The argument values are appended to args, which is then returned.
-//
func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value {
// f(x, y, z...): pass slice z straight through.
if e.Ellipsis != 0 {
@@ -938,7 +1088,7 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx
st := sig.Params().At(np).Type().(*types.Slice)
vt := st.Elem()
if len(varargs) == 0 {
- args = append(args, nilConst(st))
+ args = append(args, zeroConst(st))
} else {
// Replace a suffix of args with a slice containing it.
at := types.NewArray(vt, int64(len(varargs)))
@@ -965,13 +1115,12 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx
// setCall emits to fn code to evaluate all the parameters of a function
// call e, and populates *c with those values.
-//
func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) {
// First deal with the f(...) part and optional receiver.
b.setCallFunc(fn, e, c)
// Then append the other actual parameters.
- sig, _ := fn.typeOf(e.Fun).Underlying().(*types.Signature)
+ sig, _ := typeparams.CoreType(fn.typeOf(e.Fun)).(*types.Signature)
if sig == nil {
panic(fmt.Sprintf("no signature for call of %s", e.Fun))
}
@@ -980,13 +1129,11 @@ func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) {
// assignOp emits to fn code to perform loc <op>= val.
func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, pos token.Pos) {
- oldv := loc.load(fn)
- loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type()), loc.typ(), pos))
+ loc.store(fn, emitArith(fn, op, loc.load(fn), val, loc.typ(), pos))
}
// localValueSpec emits to fn code to define all of the vars in the
// function-local ValueSpec, spec.
-//
func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) {
switch {
case len(spec.Values) == len(spec.Names):
@@ -1029,7 +1176,6 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) {
// isDef is true if this is a short variable declaration (:=).
//
// Note the similarity with localValueSpec.
-//
func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) {
// Side effects of all LHSs and RHSs must occur in left-to-right order.
lvals := make([]lvalue, len(lhss))
@@ -1095,8 +1241,10 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 {
//
// Because the elements of a composite literal may refer to the
// variables being updated, as in the second line below,
+//
// x := T{a: 1}
// x = T{a: x.a}
+//
// all the reads must occur before all the writes. Thus all stores to
// loc are emitted to the storebuf sb for later execution.
//
@@ -1104,10 +1252,33 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 {
// case when the type name is implicit. e.g. in []*T{{}}, the inner
// literal has type *T behaves like &T{}.
// In that case, addr must hold a T, not a *T.
-//
func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) {
- typ := deref(fn.typeOf(e))
- switch t := typ.Underlying().(type) {
+ typ := deref(fn.typeOf(e)) // type with name [may be type param]
+ t := deref(typeparams.CoreType(typ)).Underlying() // core type for comp lit case
+ // Computing typ and t is subtle as these handle pointer types.
+ // For example, &T{...} is valid even for maps and slices.
+ // Also typ should refer to T (not *T) while t should be the core type of T.
+ //
+ // To show the ordering to take into account, consider the composite literal
+ // expressions `&T{f: 1}` and `{f: 1}` within the expression `[]S{{f: 1}}` here:
+ // type N struct{f int}
+ // func _[T N, S *N]() {
+ // _ = &T{f: 1}
+ // _ = []S{{f: 1}}
+ // }
+ // For `&T{f: 1}`, we compute `typ` and `t` as:
+ // typeOf(&T{f: 1}) == *T
+ // deref(*T) == T (typ)
+ // CoreType(T) == N
+ // deref(N) == N
+ // N.Underlying() == struct{f int} (t)
+ // For `{f: 1}` in `[]S{{f: 1}}`, we compute `typ` and `t` as:
+ // typeOf({f: 1}) == S
+ // deref(S) == S (typ)
+ // CoreType(S) == *N
+ // deref(*N) == N
+ // N.Underlying() == struct{f int} (t)
+ switch t := t.(type) {
case *types.Struct:
if !isZero && len(e.Elts) != t.NumFields() {
// memclear
@@ -1135,6 +1306,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero
X: addr,
Field: fieldIndex,
}
+ faddr.setPos(pos)
faddr.setType(types.NewPointer(sf.Type()))
fn.emit(faddr)
b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb)
@@ -1243,7 +1415,6 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero
// switchStmt emits to fn code for the switch statement s, optionally
// labelled by label.
-//
func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) {
// We treat SwitchStmt like a sequential if-else chain.
// Multiway dispatch can be recovered later by ssautil.Switches()
@@ -1329,7 +1500,6 @@ func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) {
// typeSwitchStmt emits to fn code for the type switch statement s, optionally
// labelled by label.
-//
func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) {
// We treat TypeSwitchStmt like a sequential if-else chain.
// Multiway dispatch can be recovered later by ssautil.Switches().
@@ -1407,7 +1577,7 @@ func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lbl
casetype = fn.typeOf(cond)
var condv Value
if casetype == tUntypedNil {
- condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), cond.Pos())
+ condv = emitCompare(fn, token.EQL, x, zeroConst(x.Type()), cond.Pos())
ti = x
} else {
yok := emitTypeTest(fn, x, casetype, cc.Case)
@@ -1452,7 +1622,6 @@ func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *
// selectStmt emits to fn code for the select statement s, optionally
// labelled by label.
-//
func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) {
// A blocking select of a single case degenerates to a
// simple send or receive.
@@ -1491,12 +1660,12 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) {
case *ast.SendStmt: // ch<- i
ch := b.expr(fn, comm.Chan)
+ chtyp := typeparams.CoreType(fn.typ(ch.Type())).(*types.Chan)
st = &SelectState{
Dir: types.SendOnly,
Chan: ch,
- Send: emitConv(fn, b.expr(fn, comm.Value),
- ch.Type().Underlying().(*types.Chan).Elem()),
- Pos: comm.Arrow,
+ Send: emitConv(fn, b.expr(fn, comm.Value), chtyp.Elem()),
+ Pos: comm.Arrow,
}
if debugInfo {
st.DebugNode = comm
@@ -1548,8 +1717,8 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) {
vars = append(vars, varIndex, varOk)
for _, st := range states {
if st.Dir == types.RecvOnly {
- tElem := st.Chan.Type().Underlying().(*types.Chan).Elem()
- vars = append(vars, anonVar(tElem))
+ chtyp := typeparams.CoreType(fn.typ(st.Chan.Type())).(*types.Chan)
+ vars = append(vars, anonVar(chtyp.Elem()))
}
}
sel.setType(types.NewTuple(vars...))
@@ -1634,7 +1803,6 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) {
// forStmt emits to fn code for the for statement s, optionally
// labelled by label.
-//
func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) {
// ...init...
// jump loop
@@ -1691,7 +1859,6 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) {
// over array, *array or slice value x.
// The v result is defined only if tv is non-nil.
// forPos is the position of the "for" token.
-//
func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) {
//
// length = len(x)
@@ -1715,6 +1882,8 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P
// elimination if x is pure, static unrolling, etc.
// Ranging over a nil *array may have >0 iterations.
// We still generate code for x, in case it has effects.
+ //
+ // TypeParams do not have constant length. Use underlying instead of core type.
length = intConst(arr.Len())
} else {
// length = len(x).
@@ -1747,7 +1916,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P
k = emitLoad(fn, index)
if tv != nil {
- switch t := x.Type().Underlying().(type) {
+ switch t := typeparams.CoreType(x.Type()).(type) {
case *types.Array:
instr := &Index{
X: x,
@@ -1786,7 +1955,6 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P
// Range/Next/Extract to iterate over map or string value x.
// tk and tv are the types of the key/value results k and v, or nil
// if the respective component is not wanted.
-//
func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) {
//
// it = range x
@@ -1818,11 +1986,9 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.
emitJump(fn, loop)
fn.currentBlock = loop
- _, isString := x.Type().Underlying().(*types.Basic)
-
okv := &Next{
Iter: it,
- IsString: isString,
+ IsString: isBasic(typeparams.CoreType(x.Type())),
}
okv.setType(types.NewTuple(
varOk,
@@ -1850,7 +2016,6 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.
// tk is the channel's element type, or nil if the k result is
// not wanted
// pos is the position of the '=' or ':=' token.
-//
func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) {
//
// loop: (target of continue)
@@ -1873,7 +2038,7 @@ func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos)
}
recv.setPos(pos)
recv.setType(types.NewTuple(
- newVar("k", x.Type().Underlying().(*types.Chan).Elem()),
+ newVar("k", typeparams.CoreType(x.Type()).(*types.Chan).Elem()),
varOk,
))
ko := fn.emit(recv)
@@ -1889,7 +2054,6 @@ func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos)
// rangeStmt emits to fn code for the range statement s, optionally
// labelled by label.
-//
func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) {
var tk, tv types.Type
if s.Key != nil && !isBlankIdent(s.Key) {
@@ -1918,7 +2082,7 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) {
var k, v Value
var loop, done *BasicBlock
- switch rt := x.Type().Underlying().(type) {
+ switch rt := typeparams.CoreType(x.Type()).(type) {
case *types.Slice, *types.Array, *types.Pointer: // *array
k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For)
@@ -1996,11 +2160,11 @@ start:
b.expr(fn, s.X)
case *ast.SendStmt:
+ chtyp := typeparams.CoreType(fn.typeOf(s.Chan)).(*types.Chan)
fn.emit(&Send{
Chan: b.expr(fn, s.Chan),
- X: emitConv(fn, b.expr(fn, s.Value),
- fn.typeOf(s.Chan).Underlying().(*types.Chan).Elem()),
- pos: s.Arrow,
+ X: emitConv(fn, b.expr(fn, s.Value), chtyp.Elem()),
+ pos: s.Arrow,
})
case *ast.IncDecStmt:
@@ -2157,6 +2321,18 @@ start:
// buildFunction builds SSA code for the body of function fn. Idempotent.
func (b *builder) buildFunction(fn *Function) {
+ if !fn.built {
+ assert(fn.parent == nil, "anonymous functions should not be built by buildFunction()")
+ b.buildFunctionBody(fn)
+ fn.done()
+ }
+}
+
+// buildFunctionBody builds SSA code for the body of function fn.
+//
+// fn is not done building until fn.done() is called.
+func (b *builder) buildFunctionBody(fn *Function) {
+ // TODO(taking): see if this check is reachable.
if fn.Blocks != nil {
return // building already started
}
@@ -2166,7 +2342,9 @@ func (b *builder) buildFunction(fn *Function) {
var functype *ast.FuncType
switch n := fn.syntax.(type) {
case nil:
- return // not a Go source function. (Synthetic, or from object file.)
+ if fn.Params != nil {
+ return // not a Go source function. (Synthetic, or from object file.)
+ }
case *ast.FuncDecl:
functype = n.Type
recvField = n.Recv
@@ -2198,6 +2376,13 @@ func (b *builder) buildFunction(fn *Function) {
}
return
}
+
+ // Build instantiation wrapper around generic body?
+ if fn.topLevelOrigin != nil && fn.subst == nil {
+ buildInstantiationWrapper(fn)
+ return
+ }
+
if fn.Prog.mode&LogSource != 0 {
defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))()
}
@@ -2218,22 +2403,45 @@ func (b *builder) buildFunction(fn *Function) {
fn.finishBody()
}
-// buildFuncDecl builds SSA code for the function or method declared
-// by decl in package pkg.
+// buildCreated does the BUILD phase for each function created by builder that is not yet BUILT.
+// Functions are built using buildFunction.
//
-func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) {
- id := decl.Name
- if isBlankIdent(id) {
- return // discard
+// May add types that require runtime type information to builder.
+func (b *builder) buildCreated() {
+ for ; b.finished < b.created.Len(); b.finished++ {
+ fn := b.created.At(b.finished)
+ b.buildFunction(fn)
}
- fn := pkg.objects[pkg.info.Defs[id]].(*Function)
- if decl.Recv == nil && id.Name == "init" {
- var v Call
- v.Call.Value = fn
- v.setType(types.NewTuple())
- pkg.init.emit(&v)
+}
+
+// Adds any needed runtime type information for the created functions.
+//
+// May add newly CREATEd functions that may need to be built or runtime type information.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
+func (b *builder) needsRuntimeTypes() {
+ if b.created.Len() == 0 {
+ return
+ }
+ prog := b.created.At(0).Prog
+
+ var rtypes []types.Type
+ for ; b.rtypes < b.finished; b.rtypes++ {
+ fn := b.created.At(b.rtypes)
+ rtypes = append(rtypes, mayNeedRuntimeTypes(fn)...)
+ }
+
+ // Calling prog.needMethodsOf(T) on a basic type T is a no-op.
+ // Filter out the basic types to reduce acquiring prog.methodsMu.
+ rtypes = nonbasicTypes(rtypes)
+
+ for _, T := range rtypes {
+ prog.needMethodsOf(T, b.created)
}
- b.buildFunction(fn)
+}
+
+func (b *builder) done() bool {
+ return b.rtypes >= b.created.Len()
}
// Build calls Package.Build for each package in prog.
@@ -2243,7 +2451,6 @@ func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) {
// need only build a single package.
//
// Build is idempotent and thread-safe.
-//
func (prog *Program) Build() {
var wg sync.WaitGroup
for _, p := range prog.packages {
@@ -2267,7 +2474,6 @@ func (prog *Program) Build() {
// error-free).
//
// Build is idempotent and thread-safe.
-//
func (p *Package) Build() { p.buildOnce.Do(p.build) }
func (p *Package) build() {
@@ -2276,16 +2482,30 @@ func (p *Package) build() {
}
// Ensure we have runtime type info for all exported members.
+ // Additionally filter for just concrete types that can be runtime types.
+ //
// TODO(adonovan): ideally belongs in memberFromObject, but
// that would require package creation in topological order.
for name, mem := range p.Members {
- if ast.IsExported(name) {
- p.Prog.needMethodsOf(mem.Type())
+ isGround := func(m Member) bool {
+ switch m := m.(type) {
+ case *Type:
+ named, _ := m.Type().(*types.Named)
+ return named == nil || typeparams.ForNamed(named) == nil
+ case *Function:
+ return m.typeparams.Len() == 0
+ }
+ return true // *NamedConst, *Global
+ }
+ if ast.IsExported(name) && isGround(mem) {
+ p.Prog.needMethodsOf(mem.Type(), &p.created)
}
}
if p.Prog.mode&LogSource != 0 {
defer logStack("build %s", p)()
}
+
+ b := builder{created: &p.created}
init := p.init
init.startBody()
@@ -2314,9 +2534,10 @@ func (p *Package) build() {
}
}
- var b builder
-
// Initialize package-level vars in correct order.
+ if len(p.info.InitOrder) > 0 && len(p.files) == 0 {
+ panic("no source files provided for package. cannot initialize globals")
+ }
for _, varinit := range p.info.InitOrder {
if init.Prog.mode&LogSource != 0 {
fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n",
@@ -2343,13 +2564,18 @@ func (p *Package) build() {
}
}
- // Build all package-level functions, init functions
- // and methods, including unreachable/blank ones.
- // We build them in source order, but it's not significant.
+ // Call all of the declared init() functions in source order.
for _, file := range p.files {
for _, decl := range file.Decls {
if decl, ok := decl.(*ast.FuncDecl); ok {
- b.buildFuncDecl(p, decl)
+ id := decl.Name
+ if !isBlankIdent(id) && id.Name == "init" && decl.Recv == nil {
+ fn := p.objects[p.info.Defs[id]].(*Function)
+ var v Call
+ v.Call.Value = fn
+ v.setType(types.NewTuple())
+ p.init.emit(&v)
+ }
}
}
}
@@ -2361,8 +2587,28 @@ func (p *Package) build() {
}
init.emit(new(Return))
init.finishBody()
+ init.done()
+
+ // Build all CREATEd functions and add runtime types.
+ // These Functions include package-level functions, init functions, methods, and synthetic (including unreachable/blank ones).
+ // Builds any functions CREATEd while building this package.
+ //
+ // Initially the created functions for the package are:
+ // [init, decl0, ... , declN]
+ // Where decl0, ..., declN are declared functions in source order, but it's not significant.
+ //
+ // As these are built, more functions (function literals, wrappers, etc.) can be CREATEd.
+ // Iterate until we reach a fixed point.
+ //
+ // Wait for init() to be BUILT as that cannot be built by buildFunction().
+ //
+ for !b.done() {
+ b.buildCreated() // build any CREATEd and not BUILT function. May add runtime types.
+ b.needsRuntimeTypes() // Add all of the runtime type information. May CREATE Functions.
+ }
- p.info = nil // We no longer need ASTs or go/types deductions.
+ p.info = nil // We no longer need ASTs or go/types deductions.
+ p.created = nil // We no longer need created functions.
if p.Prog.mode&SanityCheckFunctions != 0 {
sanityCheckPackage(p)
diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go
new file mode 100644
index 000000000..2588f74c5
--- /dev/null
+++ b/go/ssa/builder_generic_test.go
@@ -0,0 +1,679 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "fmt"
+ "go/parser"
+ "go/token"
+ "reflect"
+ "sort"
+ "testing"
+
+ "golang.org/x/tools/go/expect"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// TestGenericBodies tests that bodies of generic functions and methods containing
+// different constructs can be built in BuilderMode(0).
+//
+// Each test specifies the contents of package containing a single go file.
+// Each call print(arg0, arg1, ...) to the builtin print function
+// in ssa is correlated a comment at the end of the line of the form:
+//
+// //@ types(a, b, c)
+//
+// where a, b and c are the types of the arguments to the print call
+// serialized using go/types.Type.String().
+// See x/tools/go/expect for details on the syntax.
+func TestGenericBodies(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestGenericBodies requires type parameters")
+ }
+ for _, test := range []struct {
+ pkg string // name of the package.
+ contents string // contents of the Go package.
+ }{
+ {
+ pkg: "p",
+ contents: `
+ package p
+
+ func f(x int) {
+ var i interface{}
+ print(i, 0) //@ types("interface{}", int)
+ print() //@ types()
+ print(x) //@ types(int)
+ }
+ `,
+ },
+ {
+ pkg: "q",
+ contents: `
+ package q
+
+ func f[T any](x T) {
+ print(x) //@ types(T)
+ }
+ `,
+ },
+ {
+ pkg: "r",
+ contents: `
+ package r
+
+ func f[T ~int]() {
+ var x T
+ print(x) //@ types(T)
+ }
+ `,
+ },
+ {
+ pkg: "s",
+ contents: `
+ package s
+
+ func a[T ~[4]byte](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, byte)
+ }
+ }
+ func b[T ~*[4]byte](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, byte)
+ }
+ }
+ func c[T ~[]byte](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, byte)
+ }
+ }
+ func d[T ~string](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, rune)
+ }
+ }
+ func e[T ~map[int]string](x T) {
+ for k, v := range x {
+ print(x, k, v) //@ types(T, int, string)
+ }
+ }
+ func f[T ~chan string](x T) {
+ for v := range x {
+ print(x, v) //@ types(T, string)
+ }
+ }
+
+ func From() {
+ type A [4]byte
+ print(a[A]) //@ types("func(x s.A)")
+
+ type B *[4]byte
+ print(b[B]) //@ types("func(x s.B)")
+
+ type C []byte
+ print(c[C]) //@ types("func(x s.C)")
+
+ type D string
+ print(d[D]) //@ types("func(x s.D)")
+
+ type E map[int]string
+ print(e[E]) //@ types("func(x s.E)")
+
+ type F chan string
+ print(f[F]) //@ types("func(x s.F)")
+ }
+ `,
+ },
+ {
+ pkg: "t",
+ contents: `
+ package t
+
+ func f[S any, T ~chan S](x T) {
+ for v := range x {
+ print(x, v) //@ types(T, S)
+ }
+ }
+
+ func From() {
+ type F chan string
+ print(f[string, F]) //@ types("func(x t.F)")
+ }
+ `,
+ },
+ {
+ pkg: "u",
+ contents: `
+ package u
+
+ func fibonacci[T ~chan int](c, quit T) {
+ x, y := 0, 1
+ for {
+ select {
+ case c <- x:
+ x, y = y, x+y
+ case <-quit:
+ print(c, quit, x, y) //@ types(T, T, int, int)
+ return
+ }
+ }
+ }
+ func start[T ~chan int](c, quit T) {
+ go func() {
+ for i := 0; i < 10; i++ {
+ print(<-c) //@ types(int)
+ }
+ quit <- 0
+ }()
+ }
+ func From() {
+ type F chan int
+ c := make(F)
+ quit := make(F)
+ print(start[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F")
+ print(fibonacci[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F")
+ }
+ `,
+ },
+ {
+ pkg: "v",
+ contents: `
+ package v
+
+ func f[T ~struct{ x int; y string }](i int) T {
+ u := []T{ T{0, "lorem"}, T{1, "ipsum"}}
+ return u[i]
+ }
+ func From() {
+ type S struct{ x int; y string }
+ print(f[S]) //@ types("func(i int) v.S")
+ }
+ `,
+ },
+ {
+ pkg: "w",
+ contents: `
+ package w
+
+ func f[T ~[4]int8](x T, l, h int) []int8 {
+ return x[l:h]
+ }
+ func g[T ~*[4]int16](x T, l, h int) []int16 {
+ return x[l:h]
+ }
+ func h[T ~[]int32](x T, l, h int) T {
+ return x[l:h]
+ }
+ func From() {
+ type F [4]int8
+ type G *[4]int16
+ type H []int32
+ print(f[F](F{}, 0, 0)) //@ types("[]int8")
+ print(g[G](nil, 0, 0)) //@ types("[]int16")
+ print(h[H](nil, 0, 0)) //@ types("w.H")
+ }
+ `,
+ },
+ {
+ pkg: "x",
+ contents: `
+ package x
+
+ func h[E any, T ~[]E](x T, l, h int) []E {
+ s := x[l:h]
+ print(s) //@ types("T")
+ return s
+ }
+ func From() {
+ type H []int32
+ print(h[int32, H](nil, 0, 0)) //@ types("[]int32")
+ }
+ `,
+ },
+ {
+ pkg: "y",
+ contents: `
+ package y
+
+ // Test "make" builtin with different forms on core types and
+ // when capacities are constants or variable.
+ func h[E any, T ~[]E](m, n int) {
+ print(make(T, 3)) //@ types(T)
+ print(make(T, 3, 5)) //@ types(T)
+ print(make(T, m)) //@ types(T)
+ print(make(T, m, n)) //@ types(T)
+ }
+ func i[K comparable, E any, T ~map[K]E](m int) {
+ print(make(T)) //@ types(T)
+ print(make(T, 5)) //@ types(T)
+ print(make(T, m)) //@ types(T)
+ }
+ func j[E any, T ~chan E](m int) {
+ print(make(T)) //@ types(T)
+ print(make(T, 6)) //@ types(T)
+ print(make(T, m)) //@ types(T)
+ }
+ func From() {
+ type H []int32
+ h[int32, H](3, 4)
+ type I map[int8]H
+ i[int8, H, I](5)
+ type J chan I
+ j[I, J](6)
+ }
+ `,
+ },
+ {
+ pkg: "z",
+ contents: `
+ package z
+
+ func h[T ~[4]int](x T) {
+ print(len(x), cap(x)) //@ types(int, int)
+ }
+ func i[T ~[4]byte | []int | ~chan uint8](x T) {
+ print(len(x), cap(x)) //@ types(int, int)
+ }
+ func j[T ~[4]int | any | map[string]int]() {
+ print(new(T)) //@ types("*T")
+ }
+ func k[T ~[4]int | any | map[string]int](x T) {
+ print(x) //@ types(T)
+ panic(x)
+ }
+ `,
+ },
+ {
+ pkg: "a",
+ contents: `
+ package a
+
+ func f[E any, F ~func() E](x F) {
+ print(x, x()) //@ types(F, E)
+ }
+ func From() {
+ type T func() int
+ f[int, T](func() int { return 0 })
+ f[int, func() int](func() int { return 1 })
+ }
+ `,
+ },
+ {
+ pkg: "b",
+ contents: `
+ package b
+
+ func f[E any, M ~map[string]E](m M) {
+ y, ok := m["lorem"]
+ print(m, y, ok) //@ types(M, E, bool)
+ }
+ func From() {
+ type O map[string][]int
+ f(O{"lorem": []int{0, 1, 2, 3}})
+ }
+ `,
+ },
+ {
+ pkg: "c",
+ contents: `
+ package c
+
+ func a[T interface{ []int64 | [5]int64 }](x T) int64 {
+ print(x, x[2], x[3]) //@ types(T, int64, int64)
+ x[2] = 5
+ return x[3]
+ }
+ func b[T interface{ []byte | string }](x T) byte {
+ print(x, x[3]) //@ types(T, byte)
+ return x[3]
+ }
+ func c[T interface{ []byte }](x T) byte {
+ print(x, x[2], x[3]) //@ types(T, byte, byte)
+ x[2] = 'b'
+ return x[3]
+ }
+ func d[T interface{ map[int]int64 }](x T) int64 {
+ print(x, x[2], x[3]) //@ types(T, int64, int64)
+ x[2] = 43
+ return x[3]
+ }
+ func e[T ~string](t T) {
+ print(t, t[0]) //@ types(T, uint8)
+ }
+ func f[T ~string|[]byte](t T) {
+ print(t, t[0]) //@ types(T, uint8)
+ }
+ func g[T []byte](t T) {
+ print(t, t[0]) //@ types(T, byte)
+ }
+ func h[T ~[4]int|[]int](t T) {
+ print(t, t[0]) //@ types(T, int)
+ }
+ func i[T ~[4]int|*[4]int|[]int](t T) {
+ print(t, t[0]) //@ types(T, int)
+ }
+ func j[T ~[4]int|*[4]int|[]int](t T) {
+ print(t, &t[0]) //@ types(T, "*int")
+ }
+ `,
+ },
+ {
+ pkg: "d",
+ contents: `
+ package d
+
+ type MyInt int
+ type Other int
+ type MyInterface interface{ foo() }
+
+ // ChangeType tests
+ func ct0(x int) { v := MyInt(x); print(x, v) /*@ types(int, "d.MyInt")*/ }
+ func ct1[T MyInt | Other, S int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ }
+ func ct2[T int, S MyInt | int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ }
+ func ct3[T MyInt | Other, S MyInt | int ](x S) { v := T(x) ; print(x, v) /*@ types(S, T)*/ }
+
+ // Convert tests
+ func co0[T int | int8](x MyInt) { v := T(x); print(x, v) /*@ types("d.MyInt", T)*/}
+ func co1[T int | int8](x T) { v := MyInt(x); print(x, v) /*@ types(T, "d.MyInt")*/ }
+ func co2[S, T int | int8](x T) { v := S(x); print(x, v) /*@ types(T, S)*/ }
+
+ // MakeInterface tests
+ func mi0[T MyInterface](x T) { v := MyInterface(x); print(x, v) /*@ types(T, "d.MyInterface")*/ }
+
+ // NewConst tests
+ func nc0[T any]() { v := (*T)(nil); print(v) /*@ types("*T")*/}
+
+ // SliceToArrayPointer
+ func sl0[T *[4]int | *[2]int](x []int) { v := T(x); print(x, v) /*@ types("[]int", T)*/ }
+ func sl1[T *[4]int | *[2]int, S []int](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ }
+ `,
+ },
+ {
+ pkg: "e",
+ contents: `
+ package e
+
+ func c[T interface{ foo() string }](x T) {
+ print(x, x.foo, x.foo()) /*@ types(T, "func() string", string)*/
+ }
+ `,
+ },
+ {
+ pkg: "f",
+ contents: `package f
+
+ func eq[T comparable](t T, i interface{}) bool {
+ return t == i
+ }
+ `,
+ },
+ {
+ pkg: "g",
+ contents: `package g
+ type S struct{ f int }
+ func c[P *S]() []P { return []P{{f: 1}} }
+ `,
+ },
+ {
+ pkg: "h",
+ contents: `package h
+ func sign[bytes []byte | string](s bytes) (bool, bool) {
+ neg := false
+ if len(s) > 0 && (s[0] == '-' || s[0] == '+') {
+ neg = s[0] == '-'
+ s = s[1:]
+ }
+ return !neg, len(s) > 0
+ }`,
+ },
+ {
+ pkg: "i",
+ contents: `package i
+ func digits[bytes []byte | string](s bytes) bool {
+ for _, c := range []byte(s) {
+ if c < '0' || '9' < c {
+ return false
+ }
+ }
+ return true
+ }`,
+ },
+ {
+ pkg: "j",
+ contents: `
+ package j
+
+ type E interface{}
+
+ func Foo[T E, PT interface{ *T }]() T {
+ pt := PT(new(T))
+ x := *pt
+ print(x) /*@ types(T)*/
+ return x
+ }
+ `,
+ },
+ } {
+ test := test
+ t.Run(test.pkg, func(t *testing.T) {
+ // Parse
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ fname := test.pkg + ".go"
+ f, err := conf.ParseFile(fname, test.contents)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ conf.CreateFromFiles(test.pkg, f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load: %v", err)
+ }
+
+ // Create and build SSA
+ prog := ssa.NewProgram(lprog.Fset, ssa.SanityCheckFunctions)
+ for _, info := range lprog.AllPackages {
+ if info.TransitivelyErrorFree {
+ prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
+ }
+ }
+ p := prog.Package(lprog.Package(test.pkg).Pkg)
+ p.Build()
+
+ // Collect calls to the builtin print function.
+ probes := make(map[*ssa.CallCommon]bool)
+ for _, mem := range p.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ for _, bb := range fn.Blocks {
+ for _, i := range bb.Instrs {
+ if i, ok := i.(ssa.CallInstruction); ok {
+ call := i.Common()
+ if b, ok := call.Value.(*ssa.Builtin); ok && b.Name() == "print" {
+ probes[i.Common()] = true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Collect all notes in f, i.e. comments starting with "//@ types".
+ notes, err := expect.ExtractGo(prog.Fset, f)
+ if err != nil {
+ t.Errorf("expect.ExtractGo: %v", err)
+ }
+
+ // Matches each probe with a note that has the same line.
+ sameLine := func(x, y token.Pos) bool {
+ xp := prog.Fset.Position(x)
+ yp := prog.Fset.Position(y)
+ return xp.Filename == yp.Filename && xp.Line == yp.Line
+ }
+ expectations := make(map[*ssa.CallCommon]*expect.Note)
+ for call := range probes {
+ var match *expect.Note
+ for _, note := range notes {
+ if note.Name == "types" && sameLine(call.Pos(), note.Pos) {
+ match = note // first match is good enough.
+ break
+ }
+ }
+ if match != nil {
+ expectations[call] = match
+ } else {
+ t.Errorf("Unmatched probe: %v", call)
+ }
+ }
+
+ // Check each expectation.
+ for call, note := range expectations {
+ var args []string
+ for _, a := range call.Args {
+ args = append(args, a.Type().String())
+ }
+ if got, want := fmt.Sprint(args), fmt.Sprint(note.Args); got != want {
+ t.Errorf("Arguments to print() were expected to be %q. got %q", want, got)
+ }
+ }
+ })
+ }
+}
+
+// TestInstructionString tests serializing instructions via Instruction.String().
+func TestInstructionString(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestInstructionString requires type parameters")
+ }
+ // Tests (ssa.Instruction).String(). Instructions are from a single go file.
+ // The Instructions tested are those that match a comment of the form:
+ //
+ // //@ instrs(f, kind, strs...)
+ //
+ // where f is the name of the function, kind is the type of the instructions matched
+ // within the function, and tests that the String() value for all of the instructions
+ // matched of String() is strs (in some order).
+ // See x/tools/go/expect for details on the syntax.
+
+ const contents = `
+ package p
+
+ //@ instrs("f", "*ssa.TypeAssert")
+ //@ instrs("f", "*ssa.Call", "print(nil:interface{}, 0:int)")
+ func f(x int) { // non-generic smoke test.
+ var i interface{}
+ print(i, 0)
+ }
+
+ //@ instrs("h", "*ssa.Alloc", "local T (u)")
+ //@ instrs("h", "*ssa.FieldAddr", "&t0.x [#0]")
+ func h[T ~struct{ x string }]() T {
+ u := T{"lorem"}
+ return u
+ }
+
+ //@ instrs("c", "*ssa.TypeAssert", "typeassert t0.(interface{})")
+ //@ instrs("c", "*ssa.Call", "invoke x.foo()")
+ func c[T interface{ foo() string }](x T) {
+ _ = x.foo
+ _ = x.foo()
+ }
+
+ //@ instrs("d", "*ssa.TypeAssert", "typeassert t0.(interface{})")
+ //@ instrs("d", "*ssa.Call", "invoke x.foo()")
+ func d[T interface{ foo() string; comparable }](x T) {
+ _ = x.foo
+ _ = x.foo()
+ }
+ `
+
+ // Parse
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ const fname = "p.go"
+ f, err := conf.ParseFile(fname, contents)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ conf.CreateFromFiles("p", f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load: %v", err)
+ }
+
+ // Create and build SSA
+ prog := ssa.NewProgram(lprog.Fset, ssa.SanityCheckFunctions)
+ for _, info := range lprog.AllPackages {
+ if info.TransitivelyErrorFree {
+ prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
+ }
+ }
+ p := prog.Package(lprog.Package("p").Pkg)
+ p.Build()
+
+ // Collect all notes in f, i.e. comments starting with "//@ instr".
+ notes, err := expect.ExtractGo(prog.Fset, f)
+ if err != nil {
+ t.Errorf("expect.ExtractGo: %v", err)
+ }
+
+ // Expectation is a {function, type string} -> {want, matches}
+ // where matches is all Instructions.String() that match the key.
+ // Each expecation is that some permutation of matches is wants.
+ type expKey struct {
+ function string
+ kind string
+ }
+ type expValue struct {
+ wants []string
+ matches []string
+ }
+ expectations := make(map[expKey]*expValue)
+ for _, note := range notes {
+ if note.Name == "instrs" {
+ if len(note.Args) < 2 {
+ t.Error("Had @instrs annotation without at least 2 arguments")
+ continue
+ }
+ fn, kind := fmt.Sprint(note.Args[0]), fmt.Sprint(note.Args[1])
+ var wants []string
+ for _, arg := range note.Args[2:] {
+ wants = append(wants, fmt.Sprint(arg))
+ }
+ expectations[expKey{fn, kind}] = &expValue{wants, nil}
+ }
+ }
+
+ // Collect all Instructions that match the expectations.
+ for _, mem := range p.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ for _, bb := range fn.Blocks {
+ for _, i := range bb.Instrs {
+ kind := fmt.Sprintf("%T", i)
+ if e := expectations[expKey{fn.Name(), kind}]; e != nil {
+ e.matches = append(e.matches, i.String())
+ }
+ }
+ }
+ }
+ }
+
+ // Check each expectation.
+ for key, value := range expectations {
+ if _, ok := p.Members[key.function]; !ok {
+ t.Errorf("Expectation on %s does not match a member in %s", key.function, p.Pkg.Name())
+ }
+ got, want := value.matches, value.wants
+ sort.Strings(got)
+ sort.Strings(want)
+ if !reflect.DeepEqual(want, got) {
+ t.Errorf("Within %s wanted instructions of kind %s: %q. got %q", key.function, key.kind, want, got)
+ }
+ }
+}
diff --git a/go/ssa/builder_go117_test.go b/go/ssa/builder_go117_test.go
index f6545e5e2..699859705 100644
--- a/go/ssa/builder_go117_test.go
+++ b/go/ssa/builder_go117_test.go
@@ -57,7 +57,6 @@ func TestBuildPackageFailuresGo117(t *testing.T) {
importer types.Importer
}{
{"slice to array pointer - source is not a slice", "package p; var s [4]byte; var _ = (*[4]byte)(s)", nil},
- {"slice to array pointer - dest is not a pointer", "package p; var s []byte; var _ = ([4]byte)(s)", nil},
{"slice to array pointer - dest pointer elem is not an array", "package p; var s []byte; var _ = (*byte)(s)", nil},
}
diff --git a/go/ssa/builder_go120_test.go b/go/ssa/builder_go120_test.go
new file mode 100644
index 000000000..acdd182c5
--- /dev/null
+++ b/go/ssa/builder_go120_test.go
@@ -0,0 +1,102 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+// +build go1.20
+
+package ssa_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
+)
+
+func TestBuildPackageGo120(t *testing.T) {
+ tests := []struct {
+ name string
+ src string
+ importer types.Importer
+ }{
+ {"slice to array", "package p; var s []byte; var _ = ([4]byte)(s)", nil},
+ {"slice to zero length array", "package p; var s []byte; var _ = ([0]byte)(s)", nil},
+ {"slice to zero length array type parameter", "package p; var s []byte; func f[T ~[0]byte]() { tmp := (T)(s); var z T; _ = tmp == z}", nil},
+ {"slice to non-zero length array type parameter", "package p; var s []byte; func h[T ~[1]byte | [4]byte]() { tmp := T(s); var z T; _ = tmp == z}", nil},
+ {"slice to maybe-zero length array type parameter", "package p; var s []byte; func g[T ~[0]byte | [4]byte]() { tmp := T(s); var z T; _ = tmp == z}", nil},
+ {
+ "rune sequence to sequence cast patterns", `
+ package p
+ // Each of fXX functions describes a 1.20 legal cast between sequences of runes
+ // as []rune, pointers to rune arrays, rune arrays, or strings.
+ //
+ // Comments listed given the current emitted instructions [approximately].
+ // If multiple conversions are needed, these are seperated by |.
+ // rune was selected as it leads to string casts (byte is similar).
+ // The length 2 is not significant.
+ // Multiple array lengths may occur in a cast in practice (including 0).
+ func f00[S string, D string](s S) { _ = D(s) } // ChangeType
+ func f01[S string, D []rune](s S) { _ = D(s) } // Convert
+ func f02[S string, D []rune | string](s S) { _ = D(s) } // ChangeType | Convert
+ func f03[S [2]rune, D [2]rune](s S) { _ = D(s) } // ChangeType
+ func f04[S *[2]rune, D *[2]rune](s S) { _ = D(s) } // ChangeType
+ func f05[S []rune, D string](s S) { _ = D(s) } // Convert
+ func f06[S []rune, D [2]rune](s S) { _ = D(s) } // SliceToArrayPointer; Deref
+ func f07[S []rune, D [2]rune | string](s S) { _ = D(s) } // SliceToArrayPointer; Deref | Convert
+ func f08[S []rune, D *[2]rune](s S) { _ = D(s) } // SliceToArrayPointer
+ func f09[S []rune, D *[2]rune | string](s S) { _ = D(s) } // SliceToArrayPointer; Deref | Convert
+ func f10[S []rune, D *[2]rune | [2]rune](s S) { _ = D(s) } // SliceToArrayPointer | SliceToArrayPointer; Deref
+ func f11[S []rune, D *[2]rune | [2]rune | string](s S) { _ = D(s) } // SliceToArrayPointer | SliceToArrayPointer; Deref | Convert
+ func f12[S []rune, D []rune](s S) { _ = D(s) } // ChangeType
+ func f13[S []rune, D []rune | string](s S) { _ = D(s) } // Convert | ChangeType
+ func f14[S []rune, D []rune | [2]rune](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer; Deref
+ func f15[S []rune, D []rune | [2]rune | string](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer; Deref | Convert
+ func f16[S []rune, D []rune | *[2]rune](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer
+ func f17[S []rune, D []rune | *[2]rune | string](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer | Convert
+ func f18[S []rune, D []rune | *[2]rune | [2]rune](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer | SliceToArrayPointer; Deref
+ func f19[S []rune, D []rune | *[2]rune | [2]rune | string](s S) { _ = D(s) } // ChangeType | SliceToArrayPointer | SliceToArrayPointer; Deref | Convert
+ func f20[S []rune | string, D string](s S) { _ = D(s) } // Convert | ChangeType
+ func f21[S []rune | string, D []rune](s S) { _ = D(s) } // Convert | ChangeType
+ func f22[S []rune | string, D []rune | string](s S) { _ = D(s) } // ChangeType | Convert | Convert | ChangeType
+ func f23[S []rune | [2]rune, D [2]rune](s S) { _ = D(s) } // SliceToArrayPointer; Deref | ChangeType
+ func f24[S []rune | *[2]rune, D *[2]rune](s S) { _ = D(s) } // SliceToArrayPointer | ChangeType
+ `, nil,
+ },
+ {
+ "matching named and underlying types", `
+ package p
+ type a string
+ type b string
+ func g0[S []rune | a | b, D []rune | a | b](s S) { _ = D(s) }
+ func g1[S []rune | ~string, D []rune | a | b](s S) { _ = D(s) }
+ func g2[S []rune | a | b, D []rune | ~string](s S) { _ = D(s) }
+ func g3[S []rune | ~string, D []rune |~string](s S) { _ = D(s) }
+ `, nil,
+ },
+ }
+
+ for _, tc := range tests {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "p.go", tc.src, 0)
+ if err != nil {
+ t.Error(err)
+ }
+ files := []*ast.File{f}
+
+ pkg := types.NewPackage("p", "")
+ conf := &types.Config{Importer: tc.importer}
+ _, _, err = ssautil.BuildPackage(conf, fset, pkg, files, ssa.SanityCheckFunctions)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ })
+ }
+}
diff --git a/go/ssa/builder_test.go b/go/ssa/builder_test.go
index c45f930b3..b3bb09c5e 100644
--- a/go/ssa/builder_test.go
+++ b/go/ssa/builder_test.go
@@ -6,20 +6,26 @@ package ssa_test
import (
"bytes"
+ "fmt"
"go/ast"
+ "go/build"
"go/importer"
"go/parser"
"go/token"
"go/types"
"os"
+ "path/filepath"
"reflect"
"sort"
"strings"
"testing"
+ "golang.org/x/tools/go/buildutil"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/internal/typeparams"
)
func isEmpty(f *ssa.Function) bool { return f.Blocks == nil }
@@ -27,6 +33,8 @@ func isEmpty(f *ssa.Function) bool { return f.Blocks == nil }
// Tests that programs partially loaded from gc object files contain
// functions with no code for the external portions, but are otherwise ok.
func TestBuildPackage(t *testing.T) {
+ testenv.NeedsGoBuild(t) // for importer.Default()
+
input := `
package main
@@ -38,7 +46,7 @@ import (
func main() {
var t testing.T
- t.Parallel() // static call to external declared method
+ t.Parallel() // static call to external declared method
t.Fail() // static call to promoted external declared method
testing.Short() // static call to external package-level function
@@ -57,8 +65,9 @@ func main() {
// Build an SSA program from the parsed file.
// Load its dependencies from gc binary export data.
+ mode := ssa.SanityCheckFunctions
mainPkg, _, err := ssautil.BuildPackage(&types.Config{Importer: importer.Default()}, fset,
- types.NewPackage("main", ""), []*ast.File{f}, ssa.SanityCheckFunctions)
+ types.NewPackage("main", ""), []*ast.File{f}, mode)
if err != nil {
t.Error(err)
return
@@ -158,6 +167,8 @@ func main() {
// TestRuntimeTypes tests that (*Program).RuntimeTypes() includes all necessary types.
func TestRuntimeTypes(t *testing.T) {
+ testenv.NeedsGoBuild(t) // for importer.Default()
+
tests := []struct {
input string
want []string
@@ -215,6 +226,18 @@ func TestRuntimeTypes(t *testing.T) {
nil,
},
}
+
+ if typeparams.Enabled {
+ tests = append(tests, []struct {
+ input string
+ want []string
+ }{
+ // MakeInterface does not create runtime type for parameterized types.
+ {`package N; var g interface{}; func f[S any]() { var v []S; g = v }; `,
+ nil,
+ },
+ }...)
+ }
for _, test := range tests {
// Parse the file.
fset := token.NewFileSet()
@@ -226,8 +249,9 @@ func TestRuntimeTypes(t *testing.T) {
// Create a single-file main package.
// Load dependencies from gc binary export data.
+ mode := ssa.SanityCheckFunctions
ssapkg, _, err := ssautil.BuildPackage(&types.Config{Importer: importer.Default()}, fset,
- types.NewPackage("p", ""), []*ast.File{f}, ssa.SanityCheckFunctions)
+ types.NewPackage("p", ""), []*ast.File{f}, mode)
if err != nil {
t.Errorf("test %q: %s", test.input[:15], err)
continue
@@ -374,7 +398,7 @@ var (
}
// Create and build SSA
- prog := ssautil.CreateProgram(lprog, 0)
+ prog := ssautil.CreateProgram(lprog, ssa.BuilderMode(0))
prog.Build()
// Enumerate reachable synthetic functions
@@ -480,7 +504,7 @@ func h(error)
}
// Create and build SSA
- prog := ssautil.CreateProgram(lprog, 0)
+ prog := ssautil.CreateProgram(lprog, ssa.BuilderMode(0))
p := prog.Package(lprog.Package("p").Pkg)
p.Build()
g := p.Func("g")
@@ -498,3 +522,486 @@ func h(error)
t.Errorf("expected a single Phi (for the range index), got %d", phis)
}
}
+
+// TestGenericDecls ensures that *unused* generic types, methods and functions
+// signatures can be built.
+//
+// TODO(taking): Add calls from non-generic functions to instantiations of generic functions.
+// TODO(taking): Add globals with types that are instantiations of generic functions.
+func TestGenericDecls(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestGenericDecls only works with type parameters enabled.")
+ }
+ const input = `
+package p
+
+import "unsafe"
+
+type Pointer[T any] struct {
+ v unsafe.Pointer
+}
+
+func (x *Pointer[T]) Load() *T {
+ return (*T)(LoadPointer(&x.v))
+}
+
+func Load[T any](x *Pointer[T]) *T {
+ return x.Load()
+}
+
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+`
+ // The SSA members for this package should look something like this:
+ // func LoadPointer func(addr *unsafe.Pointer) (val unsafe.Pointer)
+ // type Pointer struct{v unsafe.Pointer}
+ // method (*Pointer[T any]) Load() *T
+ // func init func()
+ // var init$guard bool
+
+ // Parse
+ var conf loader.Config
+ f, err := conf.ParseFile("<input>", input)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ conf.CreateFromFiles("p", f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load: %v", err)
+ }
+
+ // Create and build SSA
+ prog := ssautil.CreateProgram(lprog, ssa.BuilderMode(0))
+ p := prog.Package(lprog.Package("p").Pkg)
+ p.Build()
+
+ if load := p.Func("Load"); typeparams.ForSignature(load.Signature).Len() != 1 {
+ t.Errorf("expected a single type param T for Load got %q", load.Signature)
+ }
+ if ptr := p.Type("Pointer"); typeparams.ForNamed(ptr.Type().(*types.Named)).Len() != 1 {
+ t.Errorf("expected a single type param T for Pointer got %q", ptr.Type())
+ }
+}
+
+func TestGenericWrappers(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestGenericWrappers only works with type parameters enabled.")
+ }
+ const input = `
+package p
+
+type S[T any] struct {
+ t *T
+}
+
+func (x S[T]) M() T {
+ return *(x.t)
+}
+
+var thunk = S[int].M
+
+var g S[int]
+var bound = g.M
+
+type R[T any] struct{ S[T] }
+
+var indirect = R[int].M
+`
+ // The relevant SSA members for this package should look something like this:
+ // var bound func() int
+ // var thunk func(S[int]) int
+ // var wrapper func(R[int]) int
+
+ // Parse
+ var conf loader.Config
+ f, err := conf.ParseFile("<input>", input)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ conf.CreateFromFiles("p", f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load: %v", err)
+ }
+
+ for _, mode := range []ssa.BuilderMode{ssa.BuilderMode(0), ssa.InstantiateGenerics} {
+ // Create and build SSA
+ prog := ssautil.CreateProgram(lprog, mode)
+ p := prog.Package(lprog.Package("p").Pkg)
+ p.Build()
+
+ for _, entry := range []struct {
+ name string // name of the package variable
+ typ string // type of the package variable
+ wrapper string // wrapper function to which the package variable is set
+ callee string // callee within the wrapper function
+ }{
+ {
+ "bound",
+ "*func() int",
+ "(p.S[int]).M$bound",
+ "(p.S[int]).M[int]",
+ },
+ {
+ "thunk",
+ "*func(p.S[int]) int",
+ "(p.S[int]).M$thunk",
+ "(p.S[int]).M[int]",
+ },
+ {
+ "indirect",
+ "*func(p.R[int]) int",
+ "(p.R[int]).M$thunk",
+ "(p.S[int]).M[int]",
+ },
+ } {
+ entry := entry
+ t.Run(entry.name, func(t *testing.T) {
+ v := p.Var(entry.name)
+ if v == nil {
+ t.Fatalf("Did not find variable for %q in %s", entry.name, p.String())
+ }
+ if v.Type().String() != entry.typ {
+ t.Errorf("Expected type for variable %s: %q. got %q", v, entry.typ, v.Type())
+ }
+
+ // Find the wrapper for v. This is stored exactly once in init.
+ var wrapper *ssa.Function
+ for _, bb := range p.Func("init").Blocks {
+ for _, i := range bb.Instrs {
+ if store, ok := i.(*ssa.Store); ok && v == store.Addr {
+ switch val := store.Val.(type) {
+ case *ssa.Function:
+ wrapper = val
+ case *ssa.MakeClosure:
+ wrapper = val.Fn.(*ssa.Function)
+ }
+ }
+ }
+ }
+ if wrapper == nil {
+ t.Fatalf("failed to find wrapper function for %s", entry.name)
+ }
+ if wrapper.String() != entry.wrapper {
+ t.Errorf("Expected wrapper function %q. got %q", wrapper, entry.wrapper)
+ }
+
+ // Find the callee within the wrapper. There should be exactly one call.
+ var callee *ssa.Function
+ for _, bb := range wrapper.Blocks {
+ for _, i := range bb.Instrs {
+ if call, ok := i.(*ssa.Call); ok {
+ callee = call.Call.StaticCallee()
+ }
+ }
+ }
+ if callee == nil {
+ t.Fatalf("failed to find callee within wrapper %s", wrapper)
+ }
+ if callee.String() != entry.callee {
+ t.Errorf("Expected callee in wrapper %q is %q. got %q", v, entry.callee, callee)
+ }
+ })
+ }
+ }
+}
+
+// TestTypeparamTest builds SSA over compilable examples in $GOROOT/test/typeparam/*.go.
+
+func TestTypeparamTest(t *testing.T) {
+ if !typeparams.Enabled {
+ return
+ }
+
+ // Tests use a fake goroot to stub out standard libraries with delcarations in
+ // testdata/src. Decreases runtime from ~80s to ~1s.
+
+ dir := filepath.Join(build.Default.GOROOT, "test", "typeparam")
+
+ // Collect all of the .go files in
+ list, err := os.ReadDir(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, entry := range list {
+ if entry.Name() == "issue58513.go" {
+ continue // uses runtime.Caller; unimplemented by go/ssa/interp
+ }
+ if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".go") {
+ continue // Consider standalone go files.
+ }
+ input := filepath.Join(dir, entry.Name())
+ t.Run(entry.Name(), func(t *testing.T) {
+ src, err := os.ReadFile(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Only build test files that can be compiled, or compiled and run.
+ if !bytes.HasPrefix(src, []byte("// run")) && !bytes.HasPrefix(src, []byte("// compile")) {
+ t.Skipf("not detected as a run test")
+ }
+
+ t.Logf("Input: %s\n", input)
+
+ ctx := build.Default // copy
+ ctx.GOROOT = "testdata" // fake goroot. Makes tests ~1s. tests take ~80s.
+
+ reportErr := func(err error) {
+ t.Error(err)
+ }
+ conf := loader.Config{Build: &ctx, TypeChecker: types.Config{Error: reportErr}}
+ if _, err := conf.FromArgs([]string{input}, true); err != nil {
+ t.Fatalf("FromArgs(%s) failed: %s", input, err)
+ }
+
+ iprog, err := conf.Load()
+ if iprog != nil {
+ for _, pkg := range iprog.Created {
+ for i, e := range pkg.Errors {
+ t.Errorf("Loading pkg %s error[%d]=%s", pkg, i, e)
+ }
+ }
+ }
+ if err != nil {
+ t.Fatalf("conf.Load(%s) failed: %s", input, err)
+ }
+
+ mode := ssa.SanityCheckFunctions | ssa.InstantiateGenerics
+ prog := ssautil.CreateProgram(iprog, mode)
+ prog.Build()
+ })
+ }
+}
+
+// TestOrderOfOperations ensures order of operations are as intended.
+func TestOrderOfOperations(t *testing.T) {
+ // Testing for the order of operations within an expression is done
+ // by collecting the sequence of direct function calls within a *Function.
+ // Callees are all external functions so they cannot be safely re-ordered by ssa.
+ const input = `
+package p
+
+func a() int
+func b() int
+func c() int
+
+func slice(s []int) []int { return s[a():b()] }
+func sliceMax(s []int) []int { return s[a():b():c()] }
+
+`
+
+ // Parse
+ var conf loader.Config
+ f, err := conf.ParseFile("<input>", input)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ conf.CreateFromFiles("p", f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load: %v", err)
+ }
+
+ // Create and build SSA
+ prog := ssautil.CreateProgram(lprog, ssa.BuilderMode(0))
+ p := prog.Package(lprog.Package("p").Pkg)
+ p.Build()
+
+ for _, item := range []struct {
+ fn string
+ want string // sequence of calls within the function.
+ }{
+ {"sliceMax", "[a() b() c()]"},
+ {"slice", "[a() b()]"},
+ } {
+ fn := p.Func(item.fn)
+ want := item.want
+ t.Run(item.fn, func(t *testing.T) {
+ t.Parallel()
+
+ var calls []string
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ if call, ok := instr.(ssa.CallInstruction); ok {
+ calls = append(calls, call.String())
+ }
+ }
+ }
+ if got := fmt.Sprint(calls); got != want {
+ fn.WriteTo(os.Stderr)
+ t.Errorf("Expected sequence of function calls in %s was %s. got %s", fn, want, got)
+ }
+ })
+ }
+}
+
+// TestGenericFunctionSelector ensures generic functions from other packages can be selected.
+func TestGenericFunctionSelector(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestGenericFunctionSelector uses type parameters.")
+ }
+
+ pkgs := map[string]map[string]string{
+ "main": {"m.go": `package main; import "a"; func main() { a.F[int](); a.G[int,string](); a.H(0) }`},
+ "a": {"a.go": `package a; func F[T any](){}; func G[S, T any](){}; func H[T any](a T){} `},
+ }
+
+ for _, mode := range []ssa.BuilderMode{
+ ssa.SanityCheckFunctions,
+ ssa.SanityCheckFunctions | ssa.InstantiateGenerics,
+ } {
+ conf := loader.Config{
+ Build: buildutil.FakeContext(pkgs),
+ }
+ conf.Import("main")
+
+ lprog, err := conf.Load()
+ if err != nil {
+ t.Errorf("Load failed: %s", err)
+ }
+ if lprog == nil {
+ t.Fatalf("Load returned nil *Program")
+ }
+ // Create and build SSA
+ prog := ssautil.CreateProgram(lprog, mode)
+ p := prog.Package(lprog.Package("main").Pkg)
+ p.Build()
+
+ var callees []string // callees of the CallInstruction.String() in main().
+ for _, b := range p.Func("main").Blocks {
+ for _, i := range b.Instrs {
+ if call, ok := i.(ssa.CallInstruction); ok {
+ if callee := call.Common().StaticCallee(); call != nil {
+ callees = append(callees, callee.String())
+ } else {
+ t.Errorf("CallInstruction without StaticCallee() %q", call)
+ }
+ }
+ }
+ }
+ sort.Strings(callees) // ignore the order in the code.
+
+ want := "[a.F[int] a.G[int string] a.H[int]]"
+ if got := fmt.Sprint(callees); got != want {
+ t.Errorf("Expected main() to contain calls %v. got %v", want, got)
+ }
+ }
+}
+
+func TestIssue58491(t *testing.T) {
+ // Test that a local type reaches type param in instantiation.
+ testenv.NeedsGo1Point(t, 18)
+ src := `
+ package p
+
+ func foo[T any](blocking func() (T, error)) error {
+ type result struct {
+ res T
+ error // ensure the method set of result is non-empty
+ }
+
+ res := make(chan result, 1)
+ go func() {
+ var r result
+ r.res, r.error = blocking()
+ res <- r
+ }()
+ r := <-res
+ err := r // require the rtype for result when instantiated
+ return err
+ }
+ var Inst = foo[int]
+ `
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "p.go", src, 0)
+ if err != nil {
+ t.Error(err)
+ }
+ files := []*ast.File{f}
+
+ pkg := types.NewPackage("p", "")
+ conf := &types.Config{}
+ p, _, err := ssautil.BuildPackage(conf, fset, pkg, files, ssa.SanityCheckFunctions|ssa.InstantiateGenerics)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ // Find the local type result instantiated with int.
+ var found bool
+ for _, rt := range p.Prog.RuntimeTypes() {
+ if n, ok := rt.(*types.Named); ok {
+ if u, ok := n.Underlying().(*types.Struct); ok {
+ found = true
+ if got, want := n.String(), "p.result"; got != want {
+ t.Errorf("Expected the name %s got: %s", want, got)
+ }
+ if got, want := u.String(), "struct{res int; error}"; got != want {
+ t.Errorf("Expected the underlying type of %s to be %s. got %s", n, want, got)
+ }
+ }
+ }
+ }
+ if !found {
+ t.Error("Failed to find any Named to struct types")
+ }
+}
+
+func TestIssue58491Rec(t *testing.T) {
+ // Roughly the same as TestIssue58491 but with a recursive type.
+ testenv.NeedsGo1Point(t, 18)
+ src := `
+ package p
+
+ func foo[T any]() error {
+ type result struct {
+ res T
+ next *result
+ error // ensure the method set of result is non-empty
+ }
+
+ r := &result{}
+ err := r // require the rtype for result when instantiated
+ return err
+ }
+ var Inst = foo[int]
+ `
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "p.go", src, 0)
+ if err != nil {
+ t.Error(err)
+ }
+ files := []*ast.File{f}
+
+ pkg := types.NewPackage("p", "")
+ conf := &types.Config{}
+ p, _, err := ssautil.BuildPackage(conf, fset, pkg, files, ssa.SanityCheckFunctions|ssa.InstantiateGenerics)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ // Find the local type result instantiated with int.
+ var found bool
+ for _, rt := range p.Prog.RuntimeTypes() {
+ if n, ok := rt.(*types.Named); ok {
+ if u, ok := n.Underlying().(*types.Struct); ok {
+ found = true
+ if got, want := n.String(), "p.result"; got != want {
+ t.Errorf("Expected the name %s got: %s", want, got)
+ }
+ if got, want := u.String(), "struct{res int; next *p.result; error}"; got != want {
+ t.Errorf("Expected the underlying type of %s to be %s. got %s", n, want, got)
+ }
+ }
+ }
+ }
+ if !found {
+ t.Error("Failed to find any Named to struct types")
+ }
+}
diff --git a/go/ssa/const.go b/go/ssa/const.go
index f43792e7f..4a51a2cb4 100644
--- a/go/ssa/const.go
+++ b/go/ssa/const.go
@@ -12,68 +12,73 @@ import (
"go/token"
"go/types"
"strconv"
+ "strings"
+
+ "golang.org/x/tools/internal/typeparams"
)
// NewConst returns a new constant of the specified value and type.
// val must be valid according to the specification of Const.Value.
-//
func NewConst(val constant.Value, typ types.Type) *Const {
+ if val == nil {
+ switch soleTypeKind(typ) {
+ case types.IsBoolean:
+ val = constant.MakeBool(false)
+ case types.IsInteger:
+ val = constant.MakeInt64(0)
+ case types.IsString:
+ val = constant.MakeString("")
+ }
+ }
return &Const{typ, val}
}
+// soleTypeKind returns a BasicInfo for which constant.Value can
+// represent all zero values for the types in the type set.
+//
+// types.IsBoolean for false is a representative.
+// types.IsInteger for 0
+// types.IsString for ""
+// 0 otherwise.
+func soleTypeKind(typ types.Type) types.BasicInfo {
+ // State records the set of possible zero values (false, 0, "").
+ // Candidates (perhaps all) are eliminated during the type-set
+ // iteration, which executes at least once.
+ state := types.IsBoolean | types.IsInteger | types.IsString
+ underIs(typeSetOf(typ), func(t types.Type) bool {
+ var c types.BasicInfo
+ if t, ok := t.(*types.Basic); ok {
+ c = t.Info()
+ }
+ if c&types.IsNumeric != 0 { // int/float/complex
+ c = types.IsInteger
+ }
+ state = state & c
+ return state != 0
+ })
+ return state
+}
+
// intConst returns an 'int' constant that evaluates to i.
// (i is an int64 in case the host is narrower than the target.)
func intConst(i int64) *Const {
return NewConst(constant.MakeInt64(i), tInt)
}
-// nilConst returns a nil constant of the specified type, which may
-// be any reference type, including interfaces.
-//
-func nilConst(typ types.Type) *Const {
- return NewConst(nil, typ)
-}
-
// stringConst returns a 'string' constant that evaluates to s.
func stringConst(s string) *Const {
return NewConst(constant.MakeString(s), tString)
}
-// zeroConst returns a new "zero" constant of the specified type,
-// which must not be an array or struct type: the zero values of
-// aggregates are well-defined but cannot be represented by Const.
-//
+// zeroConst returns a new "zero" constant of the specified type.
func zeroConst(t types.Type) *Const {
- switch t := t.(type) {
- case *types.Basic:
- switch {
- case t.Info()&types.IsBoolean != 0:
- return NewConst(constant.MakeBool(false), t)
- case t.Info()&types.IsNumeric != 0:
- return NewConst(constant.MakeInt64(0), t)
- case t.Info()&types.IsString != 0:
- return NewConst(constant.MakeString(""), t)
- case t.Kind() == types.UnsafePointer:
- fallthrough
- case t.Kind() == types.UntypedNil:
- return nilConst(t)
- default:
- panic(fmt.Sprint("zeroConst for unexpected type:", t))
- }
- case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
- return nilConst(t)
- case *types.Named:
- return NewConst(zeroConst(t.Underlying()).Value, t)
- case *types.Array, *types.Struct, *types.Tuple:
- panic(fmt.Sprint("zeroConst applied to aggregate:", t))
- }
- panic(fmt.Sprint("zeroConst: unexpected ", t))
+ return NewConst(nil, t)
}
func (c *Const) RelString(from *types.Package) string {
var s string
if c.Value == nil {
- s = "nil"
+ s = zeroString(c.typ, from)
} else if c.Value.Kind() == constant.String {
s = constant.StringVal(c.Value)
const max = 20
@@ -88,6 +93,44 @@ func (c *Const) RelString(from *types.Package) string {
return s + ":" + relType(c.Type(), from)
}
+// zeroString returns the string representation of the "zero" value of the type t.
+func zeroString(t types.Type, from *types.Package) string {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return "false"
+ case t.Info()&types.IsNumeric != 0:
+ return "0"
+ case t.Info()&types.IsString != 0:
+ return `""`
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return "nil"
+ default:
+ panic(fmt.Sprint("zeroString for unexpected type:", t))
+ }
+ case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+ return "nil"
+ case *types.Named:
+ return zeroString(t.Underlying(), from)
+ case *types.Array, *types.Struct:
+ return relType(t, from) + "{}"
+ case *types.Tuple:
+ // Tuples are not normal values.
+ // We are currently format as "(t[0], ..., t[n])". Could be something else.
+ components := make([]string, t.Len())
+ for i := 0; i < t.Len(); i++ {
+ components[i] = zeroString(t.At(i).Type(), from)
+ }
+ return "(" + strings.Join(components, ", ") + ")"
+ case *typeparams.TypeParam:
+ return "*new(" + relType(t, from) + ")"
+ }
+ panic(fmt.Sprint("zeroString: unexpected ", t))
+}
+
func (c *Const) Name() string {
return c.RelString(nil)
}
@@ -110,16 +153,36 @@ func (c *Const) Pos() token.Pos {
return token.NoPos
}
-// IsNil returns true if this constant represents a typed or untyped nil value.
+// IsNil returns true if this constant is a nil value of
+// a nillable reference type (pointer, slice, channel, map, or function),
+// a basic interface type, or
+// a type parameter all of whose possible instantiations are themselves nillable.
func (c *Const) IsNil() bool {
- return c.Value == nil
+ return c.Value == nil && nillable(c.typ)
+}
+
+// nillable reports whether *new(T) == nil is legal for type T.
+func nillable(t types.Type) bool {
+ if typeparams.IsTypeParam(t) {
+ return underIs(typeSetOf(t), func(u types.Type) bool {
+ // empty type set (u==nil) => any underlying types => not nillable
+ return u != nil && nillable(u)
+ })
+ }
+ switch t.Underlying().(type) {
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return true
+ case *types.Interface:
+ return true // basic interface.
+ default:
+ return false
+ }
}
// TODO(adonovan): move everything below into golang.org/x/tools/go/ssa/interp.
// Int64 returns the numeric value of this constant truncated to fit
// a signed 64-bit integer.
-//
func (c *Const) Int64() int64 {
switch x := constant.ToInt(c.Value); x.Kind() {
case constant.Int:
@@ -136,7 +199,6 @@ func (c *Const) Int64() int64 {
// Uint64 returns the numeric value of this constant truncated to fit
// an unsigned 64-bit integer.
-//
func (c *Const) Uint64() uint64 {
switch x := constant.ToInt(c.Value); x.Kind() {
case constant.Int:
@@ -153,17 +215,17 @@ func (c *Const) Uint64() uint64 {
// Float64 returns the numeric value of this constant truncated to fit
// a float64.
-//
func (c *Const) Float64() float64 {
- f, _ := constant.Float64Val(c.Value)
+ x := constant.ToFloat(c.Value) // (c.Value == nil) => x.Kind() == Unknown
+ f, _ := constant.Float64Val(x)
return f
}
// Complex128 returns the complex value of this constant truncated to
// fit a complex128.
-//
func (c *Const) Complex128() complex128 {
- re, _ := constant.Float64Val(constant.Real(c.Value))
- im, _ := constant.Float64Val(constant.Imag(c.Value))
+ x := constant.ToComplex(c.Value) // (c.Value == nil) => x.Kind() == Unknown
+ re, _ := constant.Float64Val(constant.Real(x))
+ im, _ := constant.Float64Val(constant.Imag(x))
return complex(re, im)
}
diff --git a/go/ssa/const_test.go b/go/ssa/const_test.go
new file mode 100644
index 000000000..131fe1ace
--- /dev/null
+++ b/go/ssa/const_test.go
@@ -0,0 +1,104 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "go/ast"
+ "go/constant"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "math/big"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func TestConstString(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestConstString requires type parameters.")
+ }
+
+ const source = `
+ package P
+
+ type Named string
+
+ func fn() (int, bool, string)
+ func gen[T int]() {}
+ `
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "p.go", source, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf types.Config
+ pkg, err := conf.Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ expr string // type expression
+ constant interface{} // constant value
+ want string // expected String() value
+ }{
+ {"int", int64(0), "0:int"},
+ {"int64", int64(0), "0:int64"},
+ {"float32", int64(0), "0:float32"},
+ {"float32", big.NewFloat(1.5), "1.5:float32"},
+ {"bool", false, "false:bool"},
+ {"string", "", `"":string`},
+ {"Named", "", `"":P.Named`},
+ {"struct{x string}", nil, "struct{x string}{}:struct{x string}"},
+ {"[]int", nil, "nil:[]int"},
+ {"[3]int", nil, "[3]int{}:[3]int"},
+ {"*int", nil, "nil:*int"},
+ {"interface{}", nil, "nil:interface{}"},
+ {"interface{string}", nil, `"":interface{string}`},
+ {"interface{int|int64}", nil, "0:interface{int|int64}"},
+ {"interface{bool}", nil, "false:interface{bool}"},
+ {"interface{bool|int}", nil, "nil:interface{bool|int}"},
+ {"interface{int|string}", nil, "nil:interface{int|string}"},
+ {"interface{bool|string}", nil, "nil:interface{bool|string}"},
+ {"interface{struct{x string}}", nil, "nil:interface{struct{x string}}"},
+ {"interface{int|int64}", int64(1), "1:interface{int|int64}"},
+ {"interface{~bool}", true, "true:interface{~bool}"},
+ {"interface{Named}", "lorem ipsum", `"lorem ipsum":interface{P.Named}`},
+ {"func() (int, bool, string)", nil, "nil:func() (int, bool, string)"},
+ } {
+ // Eval() expr for its type.
+ tv, err := types.Eval(fset, pkg, 0, test.expr)
+ if err != nil {
+ t.Fatalf("Eval(%s) failed: %v", test.expr, err)
+ }
+ var val constant.Value
+ if test.constant != nil {
+ val = constant.Make(test.constant)
+ }
+ c := ssa.NewConst(val, tv.Type)
+ got := strings.ReplaceAll(c.String(), " | ", "|") // Accept both interface{a | b} and interface{a|b}.
+ if got != test.want {
+ t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", val, tv.Type, got, test.want)
+ }
+ }
+
+ // Test tuples
+ fn := pkg.Scope().Lookup("fn")
+ tup := fn.Type().(*types.Signature).Results()
+ if got, want := ssa.NewConst(nil, tup).String(), `(0, false, ""):(int, bool, string)`; got != want {
+ t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", nil, tup, got, want)
+ }
+
+ // Test type-param
+ gen := pkg.Scope().Lookup("gen")
+ tp := typeparams.ForSignature(gen.Type().(*types.Signature)).At(0)
+ if got, want := ssa.NewConst(nil, tp).String(), "0:T"; got != want {
+ t.Errorf("ssa.NewConst(%v, %s).String() = %v, want %v", nil, tup, got, want)
+ }
+}
diff --git a/go/ssa/coretype.go b/go/ssa/coretype.go
new file mode 100644
index 000000000..128d61e42
--- /dev/null
+++ b/go/ssa/coretype.go
@@ -0,0 +1,159 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// Utilities for dealing with core types.
+
+// isBytestring returns true if T has the same terms as interface{[]byte | string}.
+// These act like a core type for some operations: slice expressions, append and copy.
+//
+// See https://go.dev/ref/spec#Core_types for the details on bytestring.
+func isBytestring(T types.Type) bool {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return false
+ }
+
+ tset := typeSetOf(U)
+ if tset.Len() != 2 {
+ return false
+ }
+ hasBytes, hasString := false, false
+ underIs(tset, func(t types.Type) bool {
+ switch {
+ case isString(t):
+ hasString = true
+ case isByteSlice(t):
+ hasBytes = true
+ }
+ return hasBytes || hasString
+ })
+ return hasBytes && hasString
+}
+
+// termList is a list of types.
+type termList []*typeparams.Term // type terms of the type set
+func (s termList) Len() int { return len(s) }
+func (s termList) At(i int) types.Type { return s[i].Type() }
+
+// typeSetOf returns the type set of typ. Returns an empty typeset on an error.
+func typeSetOf(typ types.Type) termList {
+ // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on.
+ var terms []*typeparams.Term
+ var err error
+ switch typ := typ.(type) {
+ case *typeparams.TypeParam:
+ terms, err = typeparams.StructuralTerms(typ)
+ case *typeparams.Union:
+ terms, err = typeparams.UnionTermSet(typ)
+ case *types.Interface:
+ terms, err = typeparams.InterfaceTermSet(typ)
+ default:
+ // Common case.
+ // Specializing the len=1 case to avoid a slice
+ // had no measurable space/time benefit.
+ terms = []*typeparams.Term{typeparams.NewTerm(false, typ)}
+ }
+
+ if err != nil {
+ return termList(nil)
+ }
+ return termList(terms)
+}
+
+// underIs calls f with the underlying types of the specific type terms
+// of s and reports whether all calls to f returned true. If there are
+// no specific terms, underIs returns the result of f(nil).
+func underIs(s termList, f func(types.Type) bool) bool {
+ if s.Len() == 0 {
+ return f(nil)
+ }
+ for i := 0; i < s.Len(); i++ {
+ u := s.At(i).Underlying()
+ if !f(u) {
+ return false
+ }
+ }
+ return true
+}
+
+// indexType returns the element type and index mode of a IndexExpr over a type.
+// It returns (nil, invalid) if the type is not indexable; this should never occur in a well-typed program.
+func indexType(typ types.Type) (types.Type, indexMode) {
+ switch U := typ.Underlying().(type) {
+ case *types.Array:
+ return U.Elem(), ixArrVar
+ case *types.Pointer:
+ if arr, ok := U.Elem().Underlying().(*types.Array); ok {
+ return arr.Elem(), ixVar
+ }
+ case *types.Slice:
+ return U.Elem(), ixVar
+ case *types.Map:
+ return U.Elem(), ixMap
+ case *types.Basic:
+ return tByte, ixValue // must be a string
+ case *types.Interface:
+ tset := typeSetOf(U)
+ if tset.Len() == 0 {
+ return nil, ixInvalid // no underlying terms or error is empty.
+ }
+
+ elem, mode := indexType(tset.At(0))
+ for i := 1; i < tset.Len() && mode != ixInvalid; i++ {
+ e, m := indexType(tset.At(i))
+ if !types.Identical(elem, e) { // if type checked, just a sanity check
+ return nil, ixInvalid
+ }
+ // Update the mode to the most constrained address type.
+ mode = mode.meet(m)
+ }
+ if mode != ixInvalid {
+ return elem, mode
+ }
+ }
+ return nil, ixInvalid
+}
+
+// An indexMode specifies the (addressing) mode of an index operand.
+//
+// Addressing mode of an index operation is based on the set of
+// underlying types.
+// Hasse diagram of the indexMode meet semi-lattice:
+//
+// ixVar ixMap
+// | |
+// ixArrVar |
+// | |
+// ixValue |
+// \ /
+// ixInvalid
+type indexMode byte
+
+const (
+ ixInvalid indexMode = iota // index is invalid
+ ixValue // index is a computed value (not addressable)
+ ixArrVar // like ixVar, but index operand contains an array
+ ixVar // index is an addressable variable
+ ixMap // index is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment)
+)
+
+// meet is the address type that is constrained by both x and y.
+func (x indexMode) meet(y indexMode) indexMode {
+ if (x == ixMap || y == ixMap) && x != y {
+ return ixInvalid
+ }
+ // Use int representation and return min.
+ if x < y {
+ return y
+ }
+ return x
+}
diff --git a/go/ssa/coretype_test.go b/go/ssa/coretype_test.go
new file mode 100644
index 000000000..74fe4db16
--- /dev/null
+++ b/go/ssa/coretype_test.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func TestCoreType(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestCoreType requires type parameters.")
+ }
+
+ const source = `
+ package P
+
+ type Named int
+
+ type A any
+ type B interface{~int}
+ type C interface{int}
+ type D interface{Named}
+ type E interface{~int|interface{Named}}
+ type F interface{~int|~float32}
+ type G interface{chan int|interface{chan int}}
+ type H interface{chan int|chan float32}
+ type I interface{chan<- int|chan int}
+ type J interface{chan int|chan<- int}
+ type K interface{<-chan int|chan int}
+ type L interface{chan int|<-chan int}
+ type M interface{chan int|chan Named}
+ type N interface{<-chan int|chan<- int}
+ type O interface{chan int|bool}
+ type P struct{ Named }
+ type Q interface{ Foo() }
+ type R interface{ Foo() ; Named }
+ type S interface{ Foo() ; ~int }
+
+ type T interface{chan int|interface{chan int}|<-chan int}
+`
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "hello.go", source, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf types.Config
+ pkg, err := conf.Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ expr string // type expression of Named type
+ want string // expected core type (or "<nil>" if none)
+ }{
+ {"Named", "int"}, // Underlying type is not interface.
+ {"A", "<nil>"}, // Interface has no terms.
+ {"B", "int"}, // Tilde term.
+ {"C", "int"}, // Non-tilde term.
+ {"D", "int"}, // Named term.
+ {"E", "int"}, // Identical underlying types.
+ {"F", "<nil>"}, // Differing underlying types.
+ {"G", "chan int"}, // Identical Element types.
+ {"H", "<nil>"}, // Element type int has differing underlying type to float32.
+ {"I", "chan<- int"}, // SendRecv followed by SendOnly
+ {"J", "chan<- int"}, // SendOnly followed by SendRecv
+ {"K", "<-chan int"}, // RecvOnly followed by SendRecv
+ {"L", "<-chan int"}, // SendRecv followed by RecvOnly
+ {"M", "<nil>"}, // Element type int is not *identical* to Named.
+ {"N", "<nil>"}, // Differing channel directions
+ {"O", "<nil>"}, // A channel followed by a non-channel.
+ {"P", "struct{P.Named}"}, // Embedded type.
+ {"Q", "<nil>"}, // interface type with no terms and functions
+ {"R", "int"}, // interface type with both terms and functions.
+ {"S", "int"}, // interface type with a tilde term
+ {"T", "<-chan int"}, // Prefix of 2 terms that are identical before switching to channel.
+ } {
+ // Eval() expr for its type.
+ tv, err := types.Eval(fset, pkg, 0, test.expr)
+ if err != nil {
+ t.Fatalf("Eval(%s) failed: %v", test.expr, err)
+ }
+
+ ct := typeparams.CoreType(tv.Type)
+ var got string
+ if ct == nil {
+ got = "<nil>"
+ } else {
+ got = ct.String()
+ }
+ if got != test.want {
+ t.Errorf("CoreType(%s) = %v, want %v", test.expr, got, test.want)
+ }
+ }
+}
diff --git a/go/ssa/create.go b/go/ssa/create.go
index 69cd93713..ccb20e796 100644
--- a/go/ssa/create.go
+++ b/go/ssa/create.go
@@ -16,24 +16,29 @@ import (
"sync"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
// NewProgram returns a new SSA Program.
//
// mode controls diagnostics and checking during SSA construction.
-//
func NewProgram(fset *token.FileSet, mode BuilderMode) *Program {
prog := &Program{
- Fset: fset,
- imported: make(map[string]*Package),
- packages: make(map[*types.Package]*Package),
- thunks: make(map[selectionKey]*Function),
- bounds: make(map[*types.Func]*Function),
- mode: mode,
+ Fset: fset,
+ imported: make(map[string]*Package),
+ packages: make(map[*types.Package]*Package),
+ thunks: make(map[selectionKey]*Function),
+ bounds: make(map[boundsKey]*Function),
+ mode: mode,
+ canon: newCanonizer(),
+ ctxt: typeparams.NewContext(),
+ instances: make(map[*Function]*instanceSet),
+ parameterized: tpWalker{seen: make(map[types.Type]bool)},
}
h := typeutil.MakeHasher() // protected by methodsMu, in effect
prog.methodSets.SetHasher(h)
+ prog.runtimeTypes.SetHasher(h)
return prog
}
@@ -44,7 +49,6 @@ func NewProgram(fset *token.FileSet, mode BuilderMode) *Program {
// For objects from Go source code, syntax is the associated syntax
// tree (for funcs and vars only); it will be used during the build
// phase.
-//
func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
name := obj.Name()
switch obj := obj.(type) {
@@ -85,19 +89,33 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
pkg.ninit++
name = fmt.Sprintf("init#%d", pkg.ninit)
}
+
+ // Collect type parameters if this is a generic function/method.
+ var tparams *typeparams.TypeParamList
+ if rtparams := typeparams.RecvTypeParams(sig); rtparams.Len() > 0 {
+ tparams = rtparams
+ } else if sigparams := typeparams.ForSignature(sig); sigparams.Len() > 0 {
+ tparams = sigparams
+ }
+
fn := &Function{
- name: name,
- object: obj,
- Signature: sig,
- syntax: syntax,
- pos: obj.Pos(),
- Pkg: pkg,
- Prog: pkg.Prog,
- info: pkg.info,
+ name: name,
+ object: obj,
+ Signature: sig,
+ syntax: syntax,
+ pos: obj.Pos(),
+ Pkg: pkg,
+ Prog: pkg.Prog,
+ typeparams: tparams,
+ info: pkg.info,
}
+ pkg.created.Add(fn)
if syntax == nil {
fn.Synthetic = "loaded from gc object file"
}
+ if tparams.Len() > 0 {
+ fn.Prog.createInstanceSet(fn)
+ }
pkg.objects[obj] = fn
if sig.Recv() == nil {
@@ -112,7 +130,6 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
// membersFromDecl populates package pkg with members for each
// typechecker object (var, func, const or type) associated with the
// specified decl.
-//
func membersFromDecl(pkg *Package, decl ast.Decl) {
switch decl := decl.(type) {
case *ast.GenDecl: // import, const, type or var
@@ -152,6 +169,19 @@ func membersFromDecl(pkg *Package, decl ast.Decl) {
}
}
+// creator tracks functions that have finished their CREATE phases.
+//
+// All Functions belong to the same Program. May have differing packages.
+//
+// creators are not thread-safe.
+type creator []*Function
+
+func (c *creator) Add(fn *Function) {
+ *c = append(*c, fn)
+}
+func (c *creator) At(i int) *Function { return (*c)[i] }
+func (c *creator) Len() int { return len(*c) }
+
// CreatePackage constructs and returns an SSA Package from the
// specified type-checked, error-free file ASTs, and populates its
// Members mapping.
@@ -161,7 +191,6 @@ func membersFromDecl(pkg *Package, decl ast.Decl) {
//
// The real work of building SSA form for each function is not done
// until a subsequent call to Package.Build().
-//
func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package {
p := &Package{
Prog: prog,
@@ -182,6 +211,7 @@ func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *
info: p.info,
}
p.Members[p.init.name] = p.init
+ p.created.Add(p.init)
// CREATE phase.
// Allocate all package members: vars, funcs, consts and types.
@@ -243,7 +273,6 @@ var printMu sync.Mutex
// AllPackages returns a new slice containing all packages in the
// program prog in unspecified order.
-//
func (prog *Program) AllPackages() []*Package {
pkgs := make([]*Package, 0, len(prog.packages))
for _, pkg := range prog.packages {
@@ -265,7 +294,6 @@ func (prog *Program) AllPackages() []*Package {
// false---yet this function remains very convenient.
// Clients should use (*Program).Package instead where possible.
// SSA doesn't really need a string-keyed map of packages.
-//
func (prog *Program) ImportedPackage(path string) *Package {
return prog.imported[path]
}
diff --git a/go/ssa/doc.go b/go/ssa/doc.go
index 6885bedb3..afda476b3 100644
--- a/go/ssa/doc.go
+++ b/go/ssa/doc.go
@@ -41,60 +41,61 @@
//
// The primary interfaces of this package are:
//
-// - Member: a named member of a Go package.
-// - Value: an expression that yields a value.
-// - Instruction: a statement that consumes values and performs computation.
-// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph)
+// - Member: a named member of a Go package.
+// - Value: an expression that yields a value.
+// - Instruction: a statement that consumes values and performs computation.
+// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph)
//
// A computation that yields a result implements both the Value and
// Instruction interfaces. The following table shows for each
// concrete type which of these interfaces it implements.
//
-// Value? Instruction? Member?
-// *Alloc ✔ ✔
-// *BinOp ✔ ✔
-// *Builtin ✔
-// *Call ✔ ✔
-// *ChangeInterface ✔ ✔
-// *ChangeType ✔ ✔
-// *Const ✔
-// *Convert ✔ ✔
-// *DebugRef ✔
-// *Defer ✔
-// *Extract ✔ ✔
-// *Field ✔ ✔
-// *FieldAddr ✔ ✔
-// *FreeVar ✔
-// *Function ✔ ✔ (func)
-// *Global ✔ ✔ (var)
-// *Go ✔
-// *If ✔
-// *Index ✔ ✔
-// *IndexAddr ✔ ✔
-// *Jump ✔
-// *Lookup ✔ ✔
-// *MakeChan ✔ ✔
-// *MakeClosure ✔ ✔
-// *MakeInterface ✔ ✔
-// *MakeMap ✔ ✔
-// *MakeSlice ✔ ✔
-// *MapUpdate ✔
-// *NamedConst ✔ (const)
-// *Next ✔ ✔
-// *Panic ✔
-// *Parameter ✔
-// *Phi ✔ ✔
-// *Range ✔ ✔
-// *Return ✔
-// *RunDefers ✔
-// *Select ✔ ✔
-// *Send ✔
-// *Slice ✔ ✔
-// *SliceToArrayPointer ✔ ✔
-// *Store ✔
-// *Type ✔ (type)
-// *TypeAssert ✔ ✔
-// *UnOp ✔ ✔
+// Value? Instruction? Member?
+// *Alloc ✔ ✔
+// *BinOp ✔ ✔
+// *Builtin ✔
+// *Call ✔ ✔
+// *ChangeInterface ✔ ✔
+// *ChangeType ✔ ✔
+// *Const ✔
+// *Convert ✔ ✔
+// *DebugRef ✔
+// *Defer ✔
+// *Extract ✔ ✔
+// *Field ✔ ✔
+// *FieldAddr ✔ ✔
+// *FreeVar ✔
+// *Function ✔ ✔ (func)
+// *GenericConvert ✔ ✔
+// *Global ✔ ✔ (var)
+// *Go ✔
+// *If ✔
+// *Index ✔ ✔
+// *IndexAddr ✔ ✔
+// *Jump ✔
+// *Lookup ✔ ✔
+// *MakeChan ✔ ✔
+// *MakeClosure ✔ ✔
+// *MakeInterface ✔ ✔
+// *MakeMap ✔ ✔
+// *MakeSlice ✔ ✔
+// *MapUpdate ✔
+// *NamedConst ✔ (const)
+// *Next ✔ ✔
+// *Panic ✔
+// *Parameter ✔
+// *Phi ✔ ✔
+// *Range ✔ ✔
+// *Return ✔
+// *RunDefers ✔
+// *Select ✔ ✔
+// *Send ✔
+// *Slice ✔ ✔
+// *SliceToArrayPointer ✔ ✔
+// *Store ✔
+// *Type ✔ (type)
+// *TypeAssert ✔ ✔
+// *UnOp ✔ ✔
//
// Other key types in this package include: Program, Package, Function
// and BasicBlock.
@@ -122,5 +123,4 @@
// of trying to determine corresponding elements across the four
// domains of source locations, ast.Nodes, types.Objects,
// ssa.Values/Instructions.
-//
package ssa // import "golang.org/x/tools/go/ssa"
diff --git a/go/ssa/dom.go b/go/ssa/dom.go
index 822fe9772..66a2f5e6e 100644
--- a/go/ssa/dom.go
+++ b/go/ssa/dom.go
@@ -29,12 +29,10 @@ import (
// its parent in the dominator tree, if any.
// Neither the entry node (b.Index==0) nor recover node
// (b==b.Parent().Recover()) have a parent.
-//
func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom }
// Dominees returns the list of blocks that b immediately dominates:
// its children in the dominator tree.
-//
func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children }
// Dominates reports whether b dominates c.
@@ -50,7 +48,6 @@ func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre
// DomPreorder returns a new slice containing the blocks of f in
// dominator tree preorder.
-//
func (f *Function) DomPreorder() []*BasicBlock {
n := len(f.Blocks)
order := make(byDomPreorder, n)
@@ -110,7 +107,6 @@ func (lt *ltState) link(v, w *BasicBlock) {
// buildDomTree computes the dominator tree of f using the LT algorithm.
// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run).
-//
func buildDomTree(f *Function) {
// The step numbers refer to the original LT paper; the
// reordering is due to Georgiadis.
@@ -210,7 +206,6 @@ func buildDomTree(f *Function) {
// numberDomTree sets the pre- and post-order numbers of a depth-first
// traversal of the dominator tree rooted at v. These are used to
// answer dominance queries in constant time.
-//
func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
v.dom.pre = pre
pre++
@@ -228,7 +223,6 @@ func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
// computed by the LT algorithm by comparing against the dominance
// relation computed by a naive Kildall-style forward dataflow
// analysis (Algorithm 10.16 from the "Dragon" book).
-//
func sanityCheckDomTree(f *Function) {
n := len(f.Blocks)
@@ -309,7 +303,7 @@ func sanityCheckDomTree(f *Function) {
// Printing functions ----------------------------------------
-// printDomTree prints the dominator tree as text, using indentation.
+// printDomTreeText prints the dominator tree as text, using indentation.
func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) {
fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
for _, child := range v.dom.children {
diff --git a/go/ssa/emit.go b/go/ssa/emit.go
index 576e0245a..1731c7975 100644
--- a/go/ssa/emit.go
+++ b/go/ssa/emit.go
@@ -11,11 +11,12 @@ import (
"go/ast"
"go/token"
"go/types"
+
+ "golang.org/x/tools/internal/typeparams"
)
// emitNew emits to f a new (heap Alloc) instruction allocating an
// object of type typ. pos is the optional source location.
-//
func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc {
v := &Alloc{Heap: true}
v.setType(types.NewPointer(typ))
@@ -26,17 +27,15 @@ func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc {
// emitLoad emits to f an instruction to load the address addr into a
// new temporary, and returns the value so defined.
-//
func emitLoad(f *Function, addr Value) *UnOp {
v := &UnOp{Op: token.MUL, X: addr}
- v.setType(deref(addr.Type()))
+ v.setType(deref(typeparams.CoreType(addr.Type())))
f.emit(v)
return v
}
// emitDebugRef emits to f a DebugRef pseudo-instruction associating
// expression e with value v.
-//
func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) {
if !f.debugInfo() {
return // debugging not enabled
@@ -68,7 +67,6 @@ func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) {
// where op is an eager shift, logical or arithmetic operation.
// (Use emitCompare() for comparisons and Builder.logicalBinop() for
// non-eager operations.)
-//
func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value {
switch op {
case token.SHL, token.SHR:
@@ -78,7 +76,7 @@ func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.
// There is a runtime panic if y is signed and <0. Instead of inserting a check for y<0
// and converting to an unsigned value (like the compiler) leave y as is.
- if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
+ if isUntyped(y.Type().Underlying()) {
// Untyped conversion:
// Spec https://go.dev/ref/spec#Operators:
// The right operand in a shift expression must have integer type or be an untyped constant
@@ -106,7 +104,6 @@ func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.
// emitCompare emits to f code compute the boolean result of
// comparison comparison 'x op y'.
-//
func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
xt := x.Type().Underlying()
yt := y.Type().Underlying()
@@ -126,9 +123,9 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
if types.Identical(xt, yt) {
// no conversion necessary
- } else if _, ok := xt.(*types.Interface); ok {
+ } else if isNonTypeParamInterface(x.Type()) {
y = emitConv(f, y, x.Type())
- } else if _, ok := yt.(*types.Interface); ok {
+ } else if isNonTypeParamInterface(y.Type()) {
x = emitConv(f, x, y.Type())
} else if _, ok := x.(*Const); ok {
x = emitConv(f, x, y.Type())
@@ -151,7 +148,6 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
// isValuePreserving returns true if a conversion from ut_src to
// ut_dst is value-preserving, i.e. just a change of type.
// Precondition: neither argument is a named type.
-//
func isValuePreserving(ut_src, ut_dst types.Type) bool {
// Identical underlying types?
if structTypesIdentical(ut_dst, ut_src) {
@@ -176,7 +172,6 @@ func isValuePreserving(ut_src, ut_dst types.Type) bool {
// and returns the converted value. Implicit conversions are required
// by language assignability rules in assignments, parameter passing,
// etc.
-//
func emitConv(f *Function, val Value, typ types.Type) Value {
t_src := val.Type()
@@ -184,21 +179,20 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
if types.Identical(t_src, typ) {
return val
}
-
ut_dst := typ.Underlying()
ut_src := t_src.Underlying()
- // Just a change of type, but not value or representation?
- if isValuePreserving(ut_src, ut_dst) {
- c := &ChangeType{X: val}
- c.setType(typ)
- return f.emit(c)
- }
-
// Conversion to, or construction of a value of, an interface type?
- if _, ok := ut_dst.(*types.Interface); ok {
+ if isNonTypeParamInterface(typ) {
+ // Interface name change?
+ if isValuePreserving(ut_src, ut_dst) {
+ c := &ChangeType{X: val}
+ c.setType(typ)
+ return f.emit(c)
+ }
+
// Assignment from one interface type to another?
- if _, ok := ut_src.(*types.Interface); ok {
+ if isNonTypeParamInterface(t_src) {
c := &ChangeInterface{X: val}
c.setType(typ)
return f.emit(c)
@@ -206,7 +200,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
// Untyped nil constant? Return interface-typed nil constant.
if ut_src == tUntypedNil {
- return nilConst(typ)
+ return zeroConst(typ)
}
// Convert (non-nil) "untyped" literals to their default type.
@@ -214,15 +208,88 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
val = emitConv(f, val, types.Default(ut_src))
}
- f.Pkg.Prog.needMethodsOf(val.Type())
mi := &MakeInterface{X: val}
mi.setType(typ)
return f.emit(mi)
}
+ // In the common case, the typesets of src and dst are singletons
+ // and we emit an appropriate conversion. But if either contains
+ // a type parameter, the conversion may represent a cross product,
+ // in which case which we emit a MultiConvert.
+ dst_terms := typeSetOf(ut_dst)
+ src_terms := typeSetOf(ut_src)
+
+ // conversionCase describes an instruction pattern that maybe emitted to
+ // model d <- s for d in dst_terms and s in src_terms.
+ // Multiple conversions can match the same pattern.
+ type conversionCase uint8
+ const (
+ changeType conversionCase = 1 << iota
+ sliceToArray
+ sliceToArrayPtr
+ sliceTo0Array
+ sliceTo0ArrayPtr
+ convert
+ )
+ classify := func(s, d types.Type) conversionCase {
+ // Just a change of type, but not value or representation?
+ if isValuePreserving(s, d) {
+ return changeType
+ }
+
+ // Conversion from slice to array or slice to array pointer?
+ if slice, ok := s.(*types.Slice); ok {
+ var arr *types.Array
+ var ptr bool
+ // Conversion from slice to array pointer?
+ switch d := d.(type) {
+ case *types.Array:
+ arr = d
+ case *types.Pointer:
+ arr, _ = d.Elem().Underlying().(*types.Array)
+ ptr = true
+ }
+ if arr != nil && types.Identical(slice.Elem(), arr.Elem()) {
+ if arr.Len() == 0 {
+ if ptr {
+ return sliceTo0ArrayPtr
+ } else {
+ return sliceTo0Array
+ }
+ }
+ if ptr {
+ return sliceToArrayPtr
+ } else {
+ return sliceToArray
+ }
+ }
+ }
+
+ // The only remaining case in well-typed code is a representation-
+ // changing conversion of basic types (possibly with []byte/[]rune).
+ if !isBasic(s) && !isBasic(d) {
+ panic(fmt.Sprintf("in %s: cannot convert term %s (%s [within %s]) to type %s [within %s]", f, val, val.Type(), s, typ, d))
+ }
+ return convert
+ }
+
+ var classifications conversionCase
+ for _, s := range src_terms {
+ us := s.Type().Underlying()
+ for _, d := range dst_terms {
+ ud := d.Type().Underlying()
+ classifications |= classify(us, ud)
+ }
+ }
+ if classifications == 0 {
+ panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ))
+ }
+
// Conversion of a compile-time constant value?
if c, ok := val.(*Const); ok {
- if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() {
+ // Conversion to a basic type?
+ if isBasic(ut_dst) {
// Conversion of a compile-time constant to
// another constant type results in a new
// constant of the destination type and
@@ -230,38 +297,80 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
// We don't truncate the value yet.
return NewConst(c.Value, typ)
}
+ // Can we always convert from zero value without panicking?
+ const mayPanic = sliceToArray | sliceToArrayPtr
+ if c.Value == nil && classifications&mayPanic == 0 {
+ return NewConst(nil, typ)
+ }
// We're converting from constant to non-constant type,
// e.g. string -> []byte/[]rune.
}
- // Conversion from slice to array pointer?
- if slice, ok := ut_src.(*types.Slice); ok {
- if ptr, ok := ut_dst.(*types.Pointer); ok {
- if arr, ok := ptr.Elem().Underlying().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) {
- c := &SliceToArrayPointer{X: val}
- c.setType(ut_dst)
- return f.emit(c)
- }
- }
- }
- // A representation-changing conversion?
- // At least one of {ut_src,ut_dst} must be *Basic.
- // (The other may be []byte or []rune.)
- _, ok1 := ut_src.(*types.Basic)
- _, ok2 := ut_dst.(*types.Basic)
- if ok1 || ok2 {
+ switch classifications {
+ case changeType: // representation-preserving change
+ c := &ChangeType{X: val}
+ c.setType(typ)
+ return f.emit(c)
+
+ case sliceToArrayPtr, sliceTo0ArrayPtr: // slice to array pointer
+ c := &SliceToArrayPointer{X: val}
+ c.setType(typ)
+ return f.emit(c)
+
+ case sliceToArray: // slice to arrays (not zero-length)
+ ptype := types.NewPointer(typ)
+ p := &SliceToArrayPointer{X: val}
+ p.setType(ptype)
+ x := f.emit(p)
+ unOp := &UnOp{Op: token.MUL, X: x}
+ unOp.setType(typ)
+ return f.emit(unOp)
+
+ case sliceTo0Array: // slice to zero-length arrays (constant)
+ return zeroConst(typ)
+
+ case convert: // representation-changing conversion
c := &Convert{X: val}
c.setType(typ)
return f.emit(c)
+
+ default: // multiple conversion
+ c := &MultiConvert{X: val, from: src_terms, to: dst_terms}
+ c.setType(typ)
+ return f.emit(c)
}
+}
- panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ))
+// emitTypeCoercion emits to f code to coerce the type of a
+// Value v to exactly type typ, and returns the coerced value.
+//
+// Requires that coercing v.Typ() to typ is a value preserving change.
+//
+// Currently used only when v.Type() is a type instance of typ or vice versa.
+// A type v is a type instance of a type t if there exists a
+// type parameter substitution σ s.t. σ(v) == t. Example:
+//
+// σ(func(T) T) == func(int) int for σ == [T ↦ int]
+//
+// This happens in instantiation wrappers for conversion
+// from an instantiation to a parameterized type (and vice versa)
+// with σ substituting f.typeparams by f.typeargs.
+func emitTypeCoercion(f *Function, v Value, typ types.Type) Value {
+ if types.Identical(v.Type(), typ) {
+ return v // no coercion needed
+ }
+ // TODO(taking): for instances should we record which side is the instance?
+ c := &ChangeType{
+ X: v,
+ }
+ c.setType(typ)
+ f.emit(c)
+ return c
}
// emitStore emits to f an instruction to store value val at location
// addr, applying implicit conversions as required by assignability rules.
-//
func emitStore(f *Function, addr, val Value, pos token.Pos) *Store {
s := &Store{
Addr: addr,
@@ -274,7 +383,6 @@ func emitStore(f *Function, addr, val Value, pos token.Pos) *Store {
// emitJump emits to f a jump to target, and updates the control-flow graph.
// Postcondition: f.currentBlock is nil.
-//
func emitJump(f *Function, target *BasicBlock) {
b := f.currentBlock
b.emit(new(Jump))
@@ -285,7 +393,6 @@ func emitJump(f *Function, target *BasicBlock) {
// emitIf emits to f a conditional jump to tblock or fblock based on
// cond, and updates the control-flow graph.
// Postcondition: f.currentBlock is nil.
-//
func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) {
b := f.currentBlock
b.emit(&If{Cond: cond})
@@ -296,7 +403,6 @@ func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) {
// emitExtract emits to f an instruction to extract the index'th
// component of tuple. It returns the extracted value.
-//
func emitExtract(f *Function, tuple Value, index int) Value {
e := &Extract{Tuple: tuple, Index: index}
e.setType(tuple.Type().(*types.Tuple).At(index).Type())
@@ -305,7 +411,6 @@ func emitExtract(f *Function, tuple Value, index int) Value {
// emitTypeAssert emits to f a type assertion value := x.(t) and
// returns the value. x.Type() must be an interface.
-//
func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value {
a := &TypeAssert{X: x, AssertedType: t}
a.setPos(pos)
@@ -315,7 +420,6 @@ func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value {
// emitTypeTest emits to f a type test value,ok := x.(t) and returns
// a (value, ok) tuple. x.Type() must be an interface.
-//
func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value {
a := &TypeAssert{
X: x,
@@ -335,7 +439,6 @@ func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value {
// Intended for wrapper methods.
// Precondition: f does/will not use deferred procedure calls.
// Postcondition: f.currentBlock is nil.
-//
func emitTailCall(f *Function, call *Call) {
tresults := f.Signature.Results()
nr := tresults.Len()
@@ -372,16 +475,16 @@ func emitTailCall(f *Function, call *Call) {
// If v is the address of a struct, the result will be the address of
// a field; if it is the value of a struct, the result will be the
// value of a field.
-//
-func emitImplicitSelections(f *Function, v Value, indices []int) Value {
+func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value {
for _, index := range indices {
- fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
+ fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index)
if isPointer(v.Type()) {
instr := &FieldAddr{
X: v,
Field: index,
}
+ instr.setPos(pos)
instr.setType(types.NewPointer(fld.Type()))
v = f.emit(instr)
// Load the field's value iff indirectly embedded.
@@ -393,6 +496,7 @@ func emitImplicitSelections(f *Function, v Value, indices []int) Value {
X: v,
Field: index,
}
+ instr.setPos(pos)
instr.setType(fld.Type())
v = f.emit(instr)
}
@@ -406,9 +510,8 @@ func emitImplicitSelections(f *Function, v Value, indices []int) Value {
// will be the field's address; otherwise the result will be the
// field's value.
// Ident id is used for position and debug info.
-//
func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value {
- fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
+ fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index)
if isPointer(v.Type()) {
instr := &FieldAddr{
X: v,
@@ -436,7 +539,6 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.
// zeroValue emits to f code to produce a zero value of type t,
// and returns it.
-//
func zeroValue(f *Function, t types.Type) Value {
switch t.Underlying().(type) {
case *types.Struct, *types.Array:
@@ -454,7 +556,6 @@ func zeroValue(f *Function, t types.Type) Value {
// type.
//
// Idempotent.
-//
func createRecoverBlock(f *Function) {
if f.Recover != nil {
return // already created
diff --git a/go/ssa/example_test.go b/go/ssa/example_test.go
index 2ab9e9926..9a5fd4369 100644
--- a/go/ssa/example_test.go
+++ b/go/ssa/example_test.go
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !android && !ios && !js
+// +build !android,!ios,!js
+
package ssa_test
import (
@@ -48,7 +51,6 @@ func main() {
// Build and run the ssadump.go program if you want a standalone tool
// with similar functionality. It is located at
// golang.org/x/tools/cmd/ssadump.
-//
func Example_buildPackage() {
// Replace interface{} with any for this test.
ssa.SetNormalizeAnyForTesting(true)
@@ -159,7 +161,7 @@ func Example_loadWholeProgram() {
}
// Create SSA packages for well-typed packages and their dependencies.
- prog, pkgs := ssautil.AllPackages(initial, ssa.PrintPackages)
+ prog, pkgs := ssautil.AllPackages(initial, ssa.PrintPackages|ssa.InstantiateGenerics)
_ = pkgs
// Build SSA code for the whole program.
diff --git a/go/ssa/func.go b/go/ssa/func.go
index 8fc089e5d..57f5f718f 100644
--- a/go/ssa/func.go
+++ b/go/ssa/func.go
@@ -15,6 +15,8 @@ import (
"io"
"os"
"strings"
+
+ "golang.org/x/tools/internal/typeparams"
)
// Like ObjectOf, but panics instead of returning nil.
@@ -31,15 +33,62 @@ func (f *Function) objectOf(id *ast.Ident) types.Object {
// Only valid during f's create and build phases.
func (f *Function) typeOf(e ast.Expr) types.Type {
if T := f.info.TypeOf(e); T != nil {
- return T
+ return f.typ(T)
}
panic(fmt.Sprintf("no type for %T @ %s", e, f.Prog.Fset.Position(e.Pos())))
}
+// typ is the locally instantiated type of T. T==typ(T) if f is not an instantiation.
+func (f *Function) typ(T types.Type) types.Type {
+ return f.subst.typ(T)
+}
+
+// If id is an Instance, returns info.Instances[id].Type.
+// Otherwise returns f.typeOf(id).
+func (f *Function) instanceType(id *ast.Ident) types.Type {
+ if t, ok := typeparams.GetInstances(f.info)[id]; ok {
+ return t.Type
+ }
+ return f.typeOf(id)
+}
+
+// selection returns a *selection corresponding to f.info.Selections[selector]
+// with potential updates for type substitution.
+func (f *Function) selection(selector *ast.SelectorExpr) *selection {
+ sel := f.info.Selections[selector]
+ if sel == nil {
+ return nil
+ }
+
+ switch sel.Kind() {
+ case types.MethodExpr, types.MethodVal:
+ if recv := f.typ(sel.Recv()); recv != sel.Recv() {
+ // recv changed during type substitution.
+ pkg := f.declaredPackage().Pkg
+ obj, index, indirect := types.LookupFieldOrMethod(recv, true, pkg, sel.Obj().Name())
+
+ // sig replaces sel.Type(). See (types.Selection).Typ() for details.
+ sig := obj.Type().(*types.Signature)
+ sig = changeRecv(sig, newVar(sig.Recv().Name(), recv))
+ if sel.Kind() == types.MethodExpr {
+ sig = recvAsFirstArg(sig)
+ }
+ return &selection{
+ kind: sel.Kind(),
+ recv: recv,
+ typ: sig,
+ obj: obj,
+ index: index,
+ indirect: indirect,
+ }
+ }
+ }
+ return toSelection(sel)
+}
+
// Destinations associated with unlabelled for/switch/select stmts.
// We push/pop one of these as we enter/leave each construct and for
// each BranchStmt we scan for the innermost target of the right type.
-//
type targets struct {
tail *targets // rest of stack
_break *BasicBlock
@@ -50,7 +99,6 @@ type targets struct {
// Destinations associated with a labelled block.
// We populate these as labels are encountered in forward gotos or
// labelled statements.
-//
type lblock struct {
_goto *BasicBlock
_break *BasicBlock
@@ -59,22 +107,21 @@ type lblock struct {
// labelledBlock returns the branch target associated with the
// specified label, creating it if needed.
-//
func (f *Function) labelledBlock(label *ast.Ident) *lblock {
- lb := f.lblocks[label.Obj]
+ obj := f.objectOf(label)
+ lb := f.lblocks[obj]
if lb == nil {
lb = &lblock{_goto: f.newBasicBlock(label.Name)}
if f.lblocks == nil {
- f.lblocks = make(map[*ast.Object]*lblock)
+ f.lblocks = make(map[types.Object]*lblock)
}
- f.lblocks[label.Obj] = lb
+ f.lblocks[obj] = lb
}
return lb
}
// addParam adds a (non-escaping) parameter to f.Params of the
// specified name, type and source position.
-//
func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter {
v := &Parameter{
name: name,
@@ -91,7 +138,7 @@ func (f *Function) addParamObj(obj types.Object) *Parameter {
if name == "" {
name = fmt.Sprintf("arg%d", len(f.Params))
}
- param := f.addParam(name, obj.Type(), obj.Pos())
+ param := f.addParam(name, f.typ(obj.Type()), obj.Pos())
param.object = obj
return param
}
@@ -99,11 +146,10 @@ func (f *Function) addParamObj(obj types.Object) *Parameter {
// addSpilledParam declares a parameter that is pre-spilled to the
// stack; the function body will load/store the spilled location.
// Subsequent lifting will eliminate spills where possible.
-//
func (f *Function) addSpilledParam(obj types.Object) {
param := f.addParamObj(obj)
spill := &Alloc{Comment: obj.Name()}
- spill.setType(types.NewPointer(obj.Type()))
+ spill.setType(types.NewPointer(param.Type()))
spill.setPos(obj.Pos())
f.objects[obj] = spill
f.Locals = append(f.Locals, spill)
@@ -113,7 +159,6 @@ func (f *Function) addSpilledParam(obj types.Object) {
// startBody initializes the function prior to generating SSA code for its body.
// Precondition: f.Type() already set.
-//
func (f *Function) startBody() {
f.currentBlock = f.newBasicBlock("entry")
f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init
@@ -127,7 +172,6 @@ func (f *Function) startBody() {
// f.startBody() was called. f.info != nil.
// Postcondition:
// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0)
-//
func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) {
// Receiver (at most one inner iteration).
if recv != nil {
@@ -174,7 +218,6 @@ type setNumable interface {
// numberRegisters assigns numbers to all SSA registers
// (value-defining Instructions) in f, to aid debugging.
// (Non-Instruction Values are named at construction.)
-//
func numberRegisters(f *Function) {
v := 0
for _, b := range f.Blocks {
@@ -207,7 +250,39 @@ func buildReferrers(f *Function) {
}
}
-// finishBody() finalizes the function after SSA code generation of its body.
+// mayNeedRuntimeTypes returns all of the types in the body of fn that might need runtime types.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
+func mayNeedRuntimeTypes(fn *Function) []types.Type {
+ // Collect all types that may need rtypes, i.e. those that flow into an interface.
+ var ts []types.Type
+ for _, bb := range fn.Blocks {
+ for _, instr := range bb.Instrs {
+ if mi, ok := instr.(*MakeInterface); ok {
+ ts = append(ts, mi.X.Type())
+ }
+ }
+ }
+
+ // Types that contain a parameterized type are considered to not be runtime types.
+ if fn.typeparams.Len() == 0 {
+ return ts // No potentially parameterized types.
+ }
+ // Filter parameterized types, in place.
+ fn.Prog.methodsMu.Lock()
+ defer fn.Prog.methodsMu.Unlock()
+ filtered := ts[:0]
+ for _, t := range ts {
+ if !fn.Prog.parameterized.isParameterized(t) {
+ filtered = append(filtered, t)
+ }
+ }
+ return filtered
+}
+
+// finishBody() finalizes the contents of the function after SSA code generation of its body.
+//
+// The function is not done being built until done() is called.
func (f *Function) finishBody() {
f.objects = nil
f.currentBlock = nil
@@ -248,23 +323,38 @@ func (f *Function) finishBody() {
// clear remaining stateful variables
f.namedResults = nil // (used by lifting)
f.info = nil
+ f.subst = nil
- numberRegisters(f)
+ numberRegisters(f) // uses f.namedRegisters
+}
- if f.Prog.mode&PrintFunctions != 0 {
- printMu.Lock()
- f.WriteTo(os.Stdout)
- printMu.Unlock()
- }
+// After this, function is done with BUILD phase.
+func (f *Function) done() {
+ assert(f.parent == nil, "done called on an anonymous function")
+
+ var visit func(*Function)
+ visit = func(f *Function) {
+ for _, anon := range f.AnonFuncs {
+ visit(anon) // anon is done building before f.
+ }
+
+ f.built = true // function is done with BUILD phase
+
+ if f.Prog.mode&PrintFunctions != 0 {
+ printMu.Lock()
+ f.WriteTo(os.Stdout)
+ printMu.Unlock()
+ }
- if f.Prog.mode&SanityCheckFunctions != 0 {
- mustSanityCheck(f, nil)
+ if f.Prog.mode&SanityCheckFunctions != 0 {
+ mustSanityCheck(f, nil)
+ }
}
+ visit(f)
}
// removeNilBlocks eliminates nils from f.Blocks and updates each
// BasicBlock.Index. Use this after any pass that may delete blocks.
-//
func (f *Function) removeNilBlocks() {
j := 0
for _, b := range f.Blocks {
@@ -285,7 +375,6 @@ func (f *Function) removeNilBlocks() {
// functions will include full debug info. This greatly increases the
// size of the instruction stream, and causes Functions to depend upon
// the ASTs, potentially keeping them live in memory for longer.
-//
func (pkg *Package) SetDebugMode(debug bool) {
// TODO(adonovan): do we want ast.File granularity?
pkg.debug = debug
@@ -299,7 +388,6 @@ func (f *Function) debugInfo() bool {
// addNamedLocal creates a local variable, adds it to function f and
// returns it. Its name and type are taken from obj. Subsequent
// calls to f.lookup(obj) will return the same local.
-//
func (f *Function) addNamedLocal(obj types.Object) *Alloc {
l := f.addLocal(obj.Type(), obj.Pos())
l.Comment = obj.Name()
@@ -313,8 +401,8 @@ func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc {
// addLocal creates an anonymous local variable of type typ, adds it
// to function f and returns it. pos is the optional source location.
-//
func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc {
+ typ = f.typ(typ)
v := &Alloc{}
v.setType(types.NewPointer(typ))
v.setPos(pos)
@@ -327,7 +415,6 @@ func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc {
// that is local to function f or one of its enclosing functions.
// If escaping, the reference comes from a potentially escaping pointer
// expression and the referent must be heap-allocated.
-//
func (f *Function) lookup(obj types.Object, escaping bool) Value {
if v, ok := f.objects[obj]; ok {
if alloc, ok := v.(*Alloc); ok && escaping {
@@ -365,13 +452,14 @@ func (f *Function) emit(instr Instruction) Value {
// The specific formatting rules are not guaranteed and may change.
//
// Examples:
-// "math.IsNaN" // a package-level function
-// "(*bytes.Buffer).Bytes" // a declared method or a wrapper
-// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0)
-// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure)
-// "main.main$1" // an anonymous function in main
-// "main.init#1" // a declared init function
-// "main.init" // the synthesized package initializer
+//
+// "math.IsNaN" // a package-level function
+// "(*bytes.Buffer).Bytes" // a declared method or a wrapper
+// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0)
+// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure)
+// "main.main$1" // an anonymous function in main
+// "main.init#1" // a declared init function
+// "main.init" // the synthesized package initializer
//
// When these functions are referred to from within the same package
// (i.e. from == f.Pkg.Object), they are rendered without the package path.
@@ -381,7 +469,6 @@ func (f *Function) emit(instr Instruction) Value {
// (But two methods may have the same name "(T).f" if one is a synthetic
// wrapper promoting a non-exported method "f" from another package; in
// that case, the strings are equal but the identifiers "f" are distinct.)
-//
func (f *Function) RelString(from *types.Package) string {
// Anonymous?
if f.parent != nil {
@@ -404,7 +491,7 @@ func (f *Function) RelString(from *types.Package) string {
// Thunk?
if f.method != nil {
- return f.relMethod(from, f.method.Recv())
+ return f.relMethod(from, f.method.recv)
}
// Bound?
@@ -448,9 +535,8 @@ func (fn *Function) declaredPackage() *Package {
switch {
case fn.Pkg != nil:
return fn.Pkg // non-generic function
- // generics:
- // case fn.Origin != nil:
- // return fn.Origin.pkg // instance of a named generic function
+ case fn.topLevelOrigin != nil:
+ return fn.topLevelOrigin.Pkg // instance of a named generic function
case fn.parent != nil:
return fn.parent.declaredPackage() // instance of an anonymous [generic] function
default:
@@ -572,7 +658,6 @@ func WriteFunction(buf *bytes.Buffer, f *Function) {
// newBasicBlock adds to f a new basic block and returns it. It does
// not automatically become the current block for subsequent calls to emit.
// comment is an optional string for more readable debugging output.
-//
func (f *Function) newBasicBlock(comment string) *BasicBlock {
b := &BasicBlock{
Index: len(f.Blocks),
@@ -598,7 +683,6 @@ func (f *Function) newBasicBlock(comment string) *BasicBlock {
// "reflect" package, etc.
//
// TODO(adonovan): think harder about the API here.
-//
func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function {
return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance}
}
@@ -616,5 +700,4 @@ func (n extentNode) End() token.Pos { return n[1] }
// the result is the *ast.FuncDecl or *ast.FuncLit that declared the
// function. Otherwise, it is an opaque Node providing only position
// information; this avoids pinning the AST in memory.
-//
func (f *Function) Syntax() ast.Node { return f.syntax }
diff --git a/go/ssa/instantiate.go b/go/ssa/instantiate.go
new file mode 100644
index 000000000..38249dea2
--- /dev/null
+++ b/go/ssa/instantiate.go
@@ -0,0 +1,177 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "go/ast"
+ "go/types"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// _Instances returns all of the instances generated by runtime types for this function in an unspecified order.
+//
+// Thread-safe.
+//
+// This is an experimental interface! It may change without warning.
+func (prog *Program) _Instances(fn *Function) []*Function {
+ if fn.typeparams.Len() == 0 || len(fn.typeargs) > 0 {
+ return nil
+ }
+
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+ return prog.instances[fn].list()
+}
+
+// A set of instantiations of a generic function fn.
+type instanceSet struct {
+ fn *Function // fn.typeparams.Len() > 0 and len(fn.typeargs) == 0.
+ instances map[*typeList]*Function // canonical type arguments to an instance.
+ syntax *ast.FuncDecl // fn.syntax copy for instantiating after fn is done. nil on synthetic packages.
+ info *types.Info // fn.pkg.info copy for building after fn is done.. nil on synthetic packages.
+
+ // TODO(taking): Consider ways to allow for clearing syntax and info when done building.
+ // May require a public API change as MethodValue can request these be built after prog.Build() is done.
+}
+
+func (insts *instanceSet) list() []*Function {
+ if insts == nil {
+ return nil
+ }
+
+ fns := make([]*Function, 0, len(insts.instances))
+ for _, fn := range insts.instances {
+ fns = append(fns, fn)
+ }
+ return fns
+}
+
+// createInstanceSet adds a new instanceSet for a generic function fn if one does not exist.
+//
+// Precondition: fn is a package level declaration (function or method).
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodMu)
+func (prog *Program) createInstanceSet(fn *Function) {
+ assert(fn.typeparams.Len() > 0 && len(fn.typeargs) == 0, "Can only create instance sets for generic functions")
+
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+
+ syntax, _ := fn.syntax.(*ast.FuncDecl)
+ assert((syntax == nil) == (fn.syntax == nil), "fn.syntax is either nil or a *ast.FuncDecl")
+
+ if _, ok := prog.instances[fn]; !ok {
+ prog.instances[fn] = &instanceSet{
+ fn: fn,
+ syntax: syntax,
+ info: fn.info,
+ }
+ }
+}
+
+// needsInstance returns a Function that is the instantiation of fn with the type arguments targs.
+//
+// Any CREATEd instance is added to cr.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodMu)
+func (prog *Program) needsInstance(fn *Function, targs []types.Type, cr *creator) *Function {
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+
+ return prog.lookupOrCreateInstance(fn, targs, cr)
+}
+
+// lookupOrCreateInstance returns a Function that is the instantiation of fn with the type arguments targs.
+//
+// Any CREATEd instance is added to cr.
+//
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodMu)
+func (prog *Program) lookupOrCreateInstance(fn *Function, targs []types.Type, cr *creator) *Function {
+ return prog.instances[fn].lookupOrCreate(targs, &prog.parameterized, cr)
+}
+
+// lookupOrCreate returns the instantiation of insts.fn using targs.
+// If the instantiation is created, this is added to cr.
+func (insts *instanceSet) lookupOrCreate(targs []types.Type, parameterized *tpWalker, cr *creator) *Function {
+ if insts.instances == nil {
+ insts.instances = make(map[*typeList]*Function)
+ }
+
+ fn := insts.fn
+ prog := fn.Prog
+
+ // canonicalize on a tuple of targs. Sig is not unique.
+ //
+ // func A[T any]() {
+ // var x T
+ // fmt.Println("%T", x)
+ // }
+ key := prog.canon.List(targs)
+ if inst, ok := insts.instances[key]; ok {
+ return inst
+ }
+
+ // CREATE instance/instantiation wrapper
+ var syntax ast.Node
+ if insts.syntax != nil {
+ syntax = insts.syntax
+ }
+
+ var sig *types.Signature
+ var obj *types.Func
+ if recv := fn.Signature.Recv(); recv != nil {
+ // method
+ m := fn.object.(*types.Func)
+ obj = prog.canon.instantiateMethod(m, targs, prog.ctxt)
+ sig = obj.Type().(*types.Signature)
+ } else {
+ instSig, err := typeparams.Instantiate(prog.ctxt, fn.Signature, targs, false)
+ if err != nil {
+ panic(err)
+ }
+ instance, ok := instSig.(*types.Signature)
+ if !ok {
+ panic("Instantiate of a Signature returned a non-signature")
+ }
+ obj = fn.object.(*types.Func) // instantiation does not exist yet
+ sig = prog.canon.Type(instance).(*types.Signature)
+ }
+
+ var synthetic string
+ var subst *subster
+
+ concrete := !parameterized.anyParameterized(targs)
+
+ if prog.mode&InstantiateGenerics != 0 && concrete {
+ synthetic = fmt.Sprintf("instance of %s", fn.Name())
+ scope := typeparams.OriginMethod(obj).Scope()
+ subst = makeSubster(prog.ctxt, scope, fn.typeparams, targs, false)
+ } else {
+ synthetic = fmt.Sprintf("instantiation wrapper of %s", fn.Name())
+ }
+
+ name := fmt.Sprintf("%s%s", fn.Name(), targs) // may not be unique
+ instance := &Function{
+ name: name,
+ object: obj,
+ Signature: sig,
+ Synthetic: synthetic,
+ syntax: syntax,
+ topLevelOrigin: fn,
+ pos: obj.Pos(),
+ Pkg: nil,
+ Prog: fn.Prog,
+ typeparams: fn.typeparams, // share with origin
+ typeargs: targs,
+ info: insts.info, // on synthetic packages info is nil.
+ subst: subst,
+ }
+
+ cr.Add(instance)
+ insts.instances[key] = instance
+ return instance
+}
diff --git a/go/ssa/instantiate_test.go b/go/ssa/instantiate_test.go
new file mode 100644
index 000000000..cd33e7e65
--- /dev/null
+++ b/go/ssa/instantiate_test.go
@@ -0,0 +1,361 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Note: Tests use unexported method _Instances.
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// loadProgram creates loader.Program out of p.
+func loadProgram(p string) (*loader.Program, error) {
+ // Parse
+ var conf loader.Config
+ f, err := conf.ParseFile("<input>", p)
+ if err != nil {
+ return nil, fmt.Errorf("parse: %v", err)
+ }
+ conf.CreateFromFiles("p", f)
+
+ // Load
+ lprog, err := conf.Load()
+ if err != nil {
+ return nil, fmt.Errorf("Load: %v", err)
+ }
+ return lprog, nil
+}
+
+// buildPackage builds and returns ssa representation of package pkg of lprog.
+func buildPackage(lprog *loader.Program, pkg string, mode BuilderMode) *Package {
+ prog := NewProgram(lprog.Fset, mode)
+
+ for _, info := range lprog.AllPackages {
+ prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
+ }
+
+ p := prog.Package(lprog.Package(pkg).Pkg)
+ p.Build()
+ return p
+}
+
+// TestNeedsInstance ensures that new method instances can be created via needsInstance,
+// that TypeArgs are as expected, and can be accessed via _Instances.
+func TestNeedsInstance(t *testing.T) {
+ if !typeparams.Enabled {
+ return
+ }
+ const input = `
+package p
+
+import "unsafe"
+
+type Pointer[T any] struct {
+ v unsafe.Pointer
+}
+
+func (x *Pointer[T]) Load() *T {
+ return (*T)(LoadPointer(&x.v))
+}
+
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+`
+ // The SSA members for this package should look something like this:
+ // func LoadPointer func(addr *unsafe.Pointer) (val unsafe.Pointer)
+ // type Pointer struct{v unsafe.Pointer}
+ // method (*Pointer[T any]) Load() *T
+ // func init func()
+ // var init$guard bool
+
+ lprog, err := loadProgram(input)
+ if err != err {
+ t.Fatal(err)
+ }
+
+ for _, mode := range []BuilderMode{BuilderMode(0), InstantiateGenerics} {
+ // Create and build SSA
+ p := buildPackage(lprog, "p", mode)
+ prog := p.Prog
+
+ ptr := p.Type("Pointer").Type().(*types.Named)
+ if ptr.NumMethods() != 1 {
+ t.Fatalf("Expected Pointer to have 1 method. got %d", ptr.NumMethods())
+ }
+
+ obj := ptr.Method(0)
+ if obj.Name() != "Load" {
+ t.Errorf("Expected Pointer to have method named 'Load'. got %q", obj.Name())
+ }
+
+ meth := prog.FuncValue(obj)
+
+ var cr creator
+ intSliceTyp := types.NewSlice(types.Typ[types.Int])
+ instance := prog.needsInstance(meth, []types.Type{intSliceTyp}, &cr)
+ if len(cr) != 1 {
+ t.Errorf("Expected first instance to create a function. got %d created functions", len(cr))
+ }
+ if instance.Origin() != meth {
+ t.Errorf("Expected Origin of %s to be %s. got %s", instance, meth, instance.Origin())
+ }
+ if len(instance.TypeArgs()) != 1 || !types.Identical(instance.TypeArgs()[0], intSliceTyp) {
+ t.Errorf("Expected TypeArgs of %s to be %v. got %v", instance, []types.Type{intSliceTyp}, instance.typeargs)
+ }
+ instances := prog._Instances(meth)
+ if want := []*Function{instance}; !reflect.DeepEqual(instances, want) {
+ t.Errorf("Expected instances of %s to be %v. got %v", meth, want, instances)
+ }
+
+ // A second request with an identical type returns the same Function.
+ second := prog.needsInstance(meth, []types.Type{types.NewSlice(types.Typ[types.Int])}, &cr)
+ if second != instance || len(cr) != 1 {
+ t.Error("Expected second identical instantiation to not create a function")
+ }
+
+ // Add a second instance.
+ inst2 := prog.needsInstance(meth, []types.Type{types.NewSlice(types.Typ[types.Uint])}, &cr)
+ instances = prog._Instances(meth)
+
+ // Note: instance.Name() < inst2.Name()
+ sort.Slice(instances, func(i, j int) bool {
+ return instances[i].Name() < instances[j].Name()
+ })
+ if want := []*Function{instance, inst2}; !reflect.DeepEqual(instances, want) {
+ t.Errorf("Expected instances of %s to be %v. got %v", meth, want, instances)
+ }
+
+ // build and sanity check manually created instance.
+ var b builder
+ b.buildFunction(instance)
+ var buf bytes.Buffer
+ if !sanityCheck(instance, &buf) {
+ t.Errorf("sanityCheck of %s failed with: %s", instance, buf.String())
+ }
+ }
+}
+
+// TestCallsToInstances checks that calles of calls to generic functions,
+// without monomorphization, are wrappers around the origin generic function.
+func TestCallsToInstances(t *testing.T) {
+ if !typeparams.Enabled {
+ return
+ }
+ const input = `
+package p
+
+type I interface {
+ Foo()
+}
+
+type A int
+func (a A) Foo() {}
+
+type J[T any] interface{ Bar() T }
+type K[T any] struct{ J[T] }
+
+func Id[T any] (t T) T {
+ return t
+}
+
+func Lambda[T I]() func() func(T) {
+ return func() func(T) {
+ return T.Foo
+ }
+}
+
+func NoOp[T any]() {}
+
+func Bar[T interface { Foo(); ~int | ~string }, U any] (t T, u U) {
+ Id[U](u)
+ Id[T](t)
+}
+
+func Make[T any]() interface{} {
+ NoOp[K[T]]()
+ return nil
+}
+
+func entry(i int, a A) int {
+ Lambda[A]()()(a)
+
+ x := Make[int]()
+ if j, ok := x.(interface{ Bar() int }); ok {
+ print(j)
+ }
+
+ Bar[A, int](a, i)
+
+ return Id[int](i)
+}
+`
+ lprog, err := loadProgram(input)
+ if err != err {
+ t.Fatal(err)
+ }
+
+ p := buildPackage(lprog, "p", SanityCheckFunctions)
+ prog := p.Prog
+
+ for _, ti := range []struct {
+ orig string
+ instance string
+ tparams string
+ targs string
+ chTypeInstrs int // number of ChangeType instructions in f's body
+ }{
+ {"Id", "Id[int]", "[T]", "[int]", 2},
+ {"Lambda", "Lambda[p.A]", "[T]", "[p.A]", 1},
+ {"Make", "Make[int]", "[T]", "[int]", 0},
+ {"NoOp", "NoOp[p.K[T]]", "[T]", "[p.K[T]]", 0},
+ } {
+ test := ti
+ t.Run(test.instance, func(t *testing.T) {
+ f := p.Members[test.orig].(*Function)
+ if f == nil {
+ t.Fatalf("origin function not found")
+ }
+
+ i := instanceOf(f, test.instance, prog)
+ if i == nil {
+ t.Fatalf("instance not found")
+ }
+
+ // for logging on failures
+ var body strings.Builder
+ i.WriteTo(&body)
+ t.Log(body.String())
+
+ if len(i.Blocks) != 1 {
+ t.Fatalf("body has more than 1 block")
+ }
+
+ if instrs := changeTypeInstrs(i.Blocks[0]); instrs != test.chTypeInstrs {
+ t.Errorf("want %v instructions; got %v", test.chTypeInstrs, instrs)
+ }
+
+ if test.tparams != tparams(i) {
+ t.Errorf("want %v type params; got %v", test.tparams, tparams(i))
+ }
+
+ if test.targs != targs(i) {
+ t.Errorf("want %v type arguments; got %v", test.targs, targs(i))
+ }
+ })
+ }
+}
+
+func instanceOf(f *Function, name string, prog *Program) *Function {
+ for _, i := range prog._Instances(f) {
+ if i.Name() == name {
+ return i
+ }
+ }
+ return nil
+}
+
+func tparams(f *Function) string {
+ tplist := f.TypeParams()
+ var tps []string
+ for i := 0; i < tplist.Len(); i++ {
+ tps = append(tps, tplist.At(i).String())
+ }
+ return fmt.Sprint(tps)
+}
+
+func targs(f *Function) string {
+ var tas []string
+ for _, ta := range f.TypeArgs() {
+ tas = append(tas, ta.String())
+ }
+ return fmt.Sprint(tas)
+}
+
+func changeTypeInstrs(b *BasicBlock) int {
+ cnt := 0
+ for _, i := range b.Instrs {
+ if _, ok := i.(*ChangeType); ok {
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func TestInstanceUniqueness(t *testing.T) {
+ if !typeparams.Enabled {
+ return
+ }
+ const input = `
+package p
+
+func H[T any](t T) {
+ print(t)
+}
+
+func F[T any](t T) {
+ H[T](t)
+ H[T](t)
+ H[T](t)
+}
+
+func G[T any](t T) {
+ H[T](t)
+ H[T](t)
+}
+
+func Foo[T any, S any](t T, s S) {
+ Foo[S, T](s, t)
+ Foo[T, S](t, s)
+}
+`
+ lprog, err := loadProgram(input)
+ if err != err {
+ t.Fatal(err)
+ }
+
+ p := buildPackage(lprog, "p", SanityCheckFunctions)
+ prog := p.Prog
+
+ for _, test := range []struct {
+ orig string
+ instances string
+ }{
+ {"H", "[p.H[T] p.H[T]]"},
+ {"Foo", "[p.Foo[S T] p.Foo[T S]]"},
+ } {
+ t.Run(test.orig, func(t *testing.T) {
+ f := p.Members[test.orig].(*Function)
+ if f == nil {
+ t.Fatalf("origin function not found")
+ }
+
+ instances := prog._Instances(f)
+ sort.Slice(instances, func(i, j int) bool { return instances[i].Name() < instances[j].Name() })
+
+ if got := fmt.Sprintf("%v", instances); !reflect.DeepEqual(got, test.instances) {
+ t.Errorf("got %v instances, want %v", got, test.instances)
+ }
+ })
+ }
+}
+
+// instancesStr returns a sorted slice of string
+// representation of instances.
+func instancesStr(instances []*Function) []string {
+ var is []string
+ for _, i := range instances {
+ is = append(is, fmt.Sprintf("%v", i))
+ }
+ sort.Strings(is)
+ return is
+}
diff --git a/go/ssa/interp/interp.go b/go/ssa/interp/interp.go
index bf7862289..58cac4642 100644
--- a/go/ssa/interp/interp.go
+++ b/go/ssa/interp/interp.go
@@ -76,16 +76,16 @@ type methodSet map[string]*ssa.Function
// State shared between all interpreted goroutines.
type interpreter struct {
- osArgs []value // the value of os.Args
- prog *ssa.Program // the SSA program
- globals map[ssa.Value]*value // addresses of global variables (immutable)
- mode Mode // interpreter options
- reflectPackage *ssa.Package // the fake reflect package
- errorMethods methodSet // the method set of reflect.error, which implements the error interface.
- rtypeMethods methodSet // the method set of rtype, which implements the reflect.Type interface.
- runtimeErrorString types.Type // the runtime.errorString type
- sizes types.Sizes // the effective type-sizing function
- goroutines int32 // atomically updated
+ osArgs []value // the value of os.Args
+ prog *ssa.Program // the SSA program
+ globals map[*ssa.Global]*value // addresses of global variables (immutable)
+ mode Mode // interpreter options
+ reflectPackage *ssa.Package // the fake reflect package
+ errorMethods methodSet // the method set of reflect.error, which implements the error interface.
+ rtypeMethods methodSet // the method set of rtype, which implements the reflect.Type interface.
+ runtimeErrorString types.Type // the runtime.errorString type
+ sizes types.Sizes // the effective type-sizing function
+ goroutines int32 // atomically updated
}
type deferred struct {
@@ -131,7 +131,6 @@ func (fr *frame) get(key ssa.Value) value {
// runDefer runs a deferred call d.
// It always returns normally, but may set or clear fr.panic.
-//
func (fr *frame) runDefer(d *deferred) {
if fr.i.mode&EnableTracing != 0 {
fmt.Fprintf(os.Stderr, "%s: invoking deferred function call\n",
@@ -160,7 +159,6 @@ func (fr *frame) runDefer(d *deferred) {
//
// If there was no initial state of panic, or it was recovered from,
// runDefers returns normally.
-//
func (fr *frame) runDefers() {
for d := fr.defers; d != nil; d = d.tail {
fr.runDefer(d)
@@ -279,7 +277,7 @@ func visitInstr(fr *frame, instr ssa.Instruction) continuation {
}()
case *ssa.MakeChan:
- fr.env[instr] = make(chan value, asInt(fr.get(instr.Size)))
+ fr.env[instr] = make(chan value, asInt64(fr.get(instr.Size)))
case *ssa.Alloc:
var addr *value
@@ -294,17 +292,20 @@ func visitInstr(fr *frame, instr ssa.Instruction) continuation {
*addr = zero(deref(instr.Type()))
case *ssa.MakeSlice:
- slice := make([]value, asInt(fr.get(instr.Cap)))
+ slice := make([]value, asInt64(fr.get(instr.Cap)))
tElt := instr.Type().Underlying().(*types.Slice).Elem()
for i := range slice {
slice[i] = zero(tElt)
}
- fr.env[instr] = slice[:asInt(fr.get(instr.Len))]
+ fr.env[instr] = slice[:asInt64(fr.get(instr.Len))]
case *ssa.MakeMap:
- reserve := 0
+ var reserve int64
if instr.Reserve != nil {
- reserve = asInt(fr.get(instr.Reserve))
+ reserve = asInt64(fr.get(instr.Reserve))
+ }
+ if !fitsInt(reserve, fr.i.sizes) {
+ panic(fmt.Sprintf("ssa.MakeMap.Reserve value %d does not fit in int", reserve))
}
fr.env[instr] = makeMap(instr.Type().Underlying().(*types.Map).Key(), reserve)
@@ -325,15 +326,25 @@ func visitInstr(fr *frame, instr ssa.Instruction) continuation {
idx := fr.get(instr.Index)
switch x := x.(type) {
case []value:
- fr.env[instr] = &x[asInt(idx)]
+ fr.env[instr] = &x[asInt64(idx)]
case *value: // *array
- fr.env[instr] = &(*x).(array)[asInt(idx)]
+ fr.env[instr] = &(*x).(array)[asInt64(idx)]
default:
panic(fmt.Sprintf("unexpected x type in IndexAddr: %T", x))
}
case *ssa.Index:
- fr.env[instr] = fr.get(instr.X).(array)[asInt(fr.get(instr.Index))]
+ x := fr.get(instr.X)
+ idx := fr.get(instr.Index)
+
+ switch x := x.(type) {
+ case array:
+ fr.env[instr] = x[asInt64(idx)]
+ case string:
+ fr.env[instr] = x[asInt64(idx)]
+ default:
+ panic(fmt.Sprintf("unexpected x type in Index: %T", x))
+ }
case *ssa.Lookup:
fr.env[instr] = lookup(instr, fr.get(instr.X), fr.get(instr.Index))
@@ -426,7 +437,6 @@ func visitInstr(fr *frame, instr ssa.Instruction) continuation {
// prepareCall determines the function value and argument values for a
// function call in a Call, Go or Defer instruction, performing
// interface method lookup if needed.
-//
func prepareCall(fr *frame, call *ssa.CallCommon) (fn value, args []value) {
v := fr.get(call.Value)
if call.Method == nil {
@@ -455,7 +465,6 @@ func prepareCall(fr *frame, call *ssa.CallCommon) (fn value, args []value) {
// call interprets a call to a function (function, builtin or closure)
// fn with arguments args, returning its result.
// callpos is the position of the callsite.
-//
func call(i *interpreter, caller *frame, callpos token.Pos, fn value, args []value) value {
switch fn := fn.(type) {
case *ssa.Function:
@@ -481,7 +490,6 @@ func loc(fset *token.FileSet, pos token.Pos) string {
// callSSA interprets a call to function fn with arguments args,
// and lexical environment env, returning its result.
// callpos is the position of the callsite.
-//
func callSSA(i *interpreter, caller *frame, callpos token.Pos, fn *ssa.Function, args []value, env []value) value {
if i.mode&EnableTracing != 0 {
fset := fn.Prog.Fset
@@ -510,6 +518,12 @@ func callSSA(i *interpreter, caller *frame, callpos token.Pos, fn *ssa.Function,
panic("no code for function: " + name)
}
}
+
+ // generic function body?
+ if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 {
+ panic("interp requires ssa.BuilderMode to include InstantiateGenerics to execute generics")
+ }
+
fr.env = make(map[ssa.Value]value)
fr.block = fn.Blocks[0]
fr.locals = make([]value, len(fn.Locals))
@@ -548,7 +562,6 @@ func callSSA(i *interpreter, caller *frame, callpos token.Pos, fn *ssa.Function,
// After a recovered panic in a function with NRPs, fr.result is
// undefined and fr.block contains the block at which to resume
// control.
-//
func runFrame(fr *frame) {
defer func() {
if fr.block == nil {
@@ -641,10 +654,12 @@ func setGlobal(i *interpreter, pkg *ssa.Package, name string, v value) {
//
// The SSA program must include the "runtime" package.
//
+// Type parameterized functions must have been built with
+// InstantiateGenerics in the ssa.BuilderMode to be interpreted.
func Interpret(mainpkg *ssa.Package, mode Mode, sizes types.Sizes, filename string, args []string) (exitCode int) {
i := &interpreter{
prog: mainpkg.Prog,
- globals: make(map[ssa.Value]*value),
+ globals: make(map[*ssa.Global]*value),
mode: mode,
sizes: sizes,
goroutines: 1,
diff --git a/go/ssa/interp/interp_go120_test.go b/go/ssa/interp/interp_go120_test.go
new file mode 100644
index 000000000..d8eb2c213
--- /dev/null
+++ b/go/ssa/interp/interp_go120_test.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+// +build go1.20
+
+package interp_test
+
+func init() {
+ testdataTests = append(testdataTests, "slice2array.go")
+}
diff --git a/go/ssa/interp/interp_test.go b/go/ssa/interp/interp_test.go
index 1b43742c8..70ddceec7 100644
--- a/go/ssa/interp/interp_test.go
+++ b/go/ssa/interp/interp_test.go
@@ -31,6 +31,7 @@ import (
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/interp"
"golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/internal/typeparams"
)
// Each line contains a space-separated list of $GOROOT/test/
@@ -111,6 +112,7 @@ var testdataTests = []string{
"complit.go",
"convert.go",
"coverage.go",
+ "deepequal.go",
"defer.go",
"fieldprom.go",
"ifaceconv.go",
@@ -122,6 +124,24 @@ var testdataTests = []string{
"recover.go",
"reflect.go",
"static.go",
+ "width32.go",
+
+ "fixedbugs/issue52342.go",
+}
+
+func init() {
+ if typeparams.Enabled {
+ testdataTests = append(testdataTests, "fixedbugs/issue52835.go")
+ testdataTests = append(testdataTests, "fixedbugs/issue55086.go")
+ testdataTests = append(testdataTests, "typeassert.go")
+ testdataTests = append(testdataTests, "zeros.go")
+ }
+}
+
+// Specific GOARCH to use for a test case in go.tools/go/ssa/interp/testdata/.
+// Defaults to amd64 otherwise.
+var testdataArchs = map[string]string{
+ "width32.go": "386",
}
func run(t *testing.T, input string) bool {
@@ -139,6 +159,9 @@ func run(t *testing.T, input string) bool {
ctx.GOROOT = "testdata" // fake goroot
ctx.GOOS = "linux"
ctx.GOARCH = "amd64"
+ if arch, ok := testdataArchs[filepath.Base(input)]; ok {
+ ctx.GOARCH = arch
+ }
conf := loader.Config{Build: &ctx}
if _, err := conf.FromArgs([]string{input}, true); err != nil {
@@ -169,7 +192,9 @@ func run(t *testing.T, input string) bool {
return false
}
- prog := ssautil.CreateProgram(iprog, ssa.SanityCheckFunctions)
+ bmode := ssa.InstantiateGenerics | ssa.SanityCheckFunctions
+ // bmode |= ssa.PrintFunctions // enable for debugging
+ prog := ssautil.CreateProgram(iprog, bmode)
prog.Build()
mainPkg := prog.Package(iprog.Created[0].Pkg)
@@ -179,8 +204,12 @@ func run(t *testing.T, input string) bool {
interp.CapturedOutput = new(bytes.Buffer)
+ sizes := types.SizesFor("gc", ctx.GOARCH)
hint = fmt.Sprintf("To trace execution, run:\n%% go build golang.org/x/tools/cmd/ssadump && ./ssadump -build=C -test -run --interp=T %s\n", input)
- exitCode := interp.Interpret(mainPkg, 0, &types.StdSizes{WordSize: 8, MaxAlign: 8}, input, []string{})
+ var imode interp.Mode // default mode
+ // imode |= interp.DisableRecover // enable for debugging
+ // imode |= interp.EnableTracing // enable for debugging
+ exitCode := interp.Interpret(mainPkg, imode, sizes, input, []string{})
if exitCode != 0 {
t.Fatalf("interpreting %s: exit code was %d", input, exitCode)
}
@@ -213,7 +242,6 @@ func TestTestdataFiles(t *testing.T) {
if err != nil {
log.Fatal(err)
}
-
var failures []string
for _, input := range testdataTests {
if !run(t, filepath.Join(cwd, "testdata", input)) {
@@ -234,3 +262,66 @@ func TestGorootTest(t *testing.T) {
}
printFailures(failures)
}
+
+// TestTypeparamTest runs the interpreter on runnable examples
+// in $GOROOT/test/typeparam/*.go.
+
+func TestTypeparamTest(t *testing.T) {
+ if !typeparams.Enabled {
+ return
+ }
+
+ // Skip known failures for the given reason.
+ // TODO(taking): Address these.
+ skip := map[string]string{
+ "chans.go": "interp tests do not support runtime.SetFinalizer",
+ "issue23536.go": "unknown reason",
+ "issue376214.go": "unknown issue with variadic cast on bytes",
+ "issue48042.go": "interp tests do not handle reflect.Value.SetInt",
+ "issue47716.go": "interp tests do not handle unsafe.Sizeof",
+ "issue50419.go": "interp tests do not handle dispatch to String() correctly",
+ "issue51733.go": "interp does not handle unsafe casts",
+ "ordered.go": "math.NaN() comparisons not being handled correctly",
+ "orderedmap.go": "interp tests do not support runtime.SetFinalizer",
+ "stringer.go": "unknown reason",
+ "issue48317.go": "interp tests do not support encoding/json",
+ "issue48318.go": "interp tests do not support encoding/json",
+ "issue58513.go": "interp tests do not support runtime.Caller",
+ }
+ // Collect all of the .go files in dir that are runnable.
+ dir := filepath.Join(build.Default.GOROOT, "test", "typeparam")
+ list, err := os.ReadDir(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var inputs []string
+ for _, entry := range list {
+ if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".go") {
+ continue // Consider standalone go files.
+ }
+ if reason := skip[entry.Name()]; reason != "" {
+ t.Logf("skipping %q due to %s.", entry.Name(), reason)
+ continue
+ }
+ input := filepath.Join(dir, entry.Name())
+ src, err := os.ReadFile(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Only build test files that can be compiled, or compiled and run.
+ if bytes.HasPrefix(src, []byte("// run")) && !bytes.HasPrefix(src, []byte("// rundir")) {
+ inputs = append(inputs, input)
+ } else {
+ t.Logf("Not a `// run` file: %s", entry.Name())
+ }
+ }
+
+ var failures []string
+ for _, input := range inputs {
+ t.Log("running", input)
+ if !run(t, input) {
+ failures = append(failures, input)
+ }
+ }
+ printFailures(failures)
+}
diff --git a/go/ssa/interp/map.go b/go/ssa/interp/map.go
index 92ccf9034..f5d5f230b 100644
--- a/go/ssa/interp/map.go
+++ b/go/ssa/interp/map.go
@@ -38,7 +38,7 @@ type hashmap struct {
// makeMap returns an empty initialized map of key type kt,
// preallocating space for reserve elements.
-func makeMap(kt types.Type, reserve int) value {
+func makeMap(kt types.Type, reserve int64) value {
if usesBuiltinMap(kt) {
return make(map[value]value, reserve)
}
diff --git a/go/ssa/interp/ops.go b/go/ssa/interp/ops.go
index 3bc6a4e32..39830bc8f 100644
--- a/go/ssa/interp/ops.go
+++ b/go/ssa/interp/ops.go
@@ -34,9 +34,10 @@ type exitPanic int
// constValue returns the value of the constant with the
// dynamic type tag appropriate for c.Type().
func constValue(c *ssa.Const) value {
- if c.IsNil() {
- return zero(c.Type()) // typed nil
+ if c.Value == nil {
+ return zero(c.Type()) // typed zero
}
+ // c is not a type parameter so it's underlying type is basic.
if t, ok := c.Type().Underlying().(*types.Basic); ok {
// TODO(adonovan): eliminate untyped constants from SSA form.
@@ -87,34 +88,46 @@ func constValue(c *ssa.Const) value {
panic(fmt.Sprintf("constValue: %s", c))
}
-// asInt converts x, which must be an integer, to an int suitable for
-// use as a slice or array index or operand to make().
-func asInt(x value) int {
+// fitsInt returns true if x fits in type int according to sizes.
+func fitsInt(x int64, sizes types.Sizes) bool {
+ intSize := sizes.Sizeof(types.Typ[types.Int])
+ if intSize < sizes.Sizeof(types.Typ[types.Int64]) {
+ maxInt := int64(1)<<(intSize-1) - 1
+ minInt := -int64(1) << (intSize - 1)
+ return minInt <= x && x <= maxInt
+ }
+ return true
+}
+
+// asInt64 converts x, which must be an integer, to an int64.
+//
+// Callers that need a value directly usable as an int should combine this with fitsInt().
+func asInt64(x value) int64 {
switch x := x.(type) {
case int:
- return x
+ return int64(x)
case int8:
- return int(x)
+ return int64(x)
case int16:
- return int(x)
+ return int64(x)
case int32:
- return int(x)
+ return int64(x)
case int64:
- return int(x)
+ return x
case uint:
- return int(x)
+ return int64(x)
case uint8:
- return int(x)
+ return int64(x)
case uint16:
- return int(x)
+ return int64(x)
case uint32:
- return int(x)
+ return int64(x)
case uint64:
- return int(x)
+ return int64(x)
case uintptr:
- return int(x)
+ return int64(x)
}
- panic(fmt.Sprintf("cannot convert %T to int", x))
+ panic(fmt.Sprintf("cannot convert %T to int64", x))
}
// asUint64 converts x, which must be an unsigned integer, to a uint64
@@ -268,19 +281,19 @@ func slice(x, lo, hi, max value) value {
Cap = cap(a)
}
- l := 0
+ l := int64(0)
if lo != nil {
- l = asInt(lo)
+ l = asInt64(lo)
}
- h := Len
+ h := int64(Len)
if hi != nil {
- h = asInt(hi)
+ h = asInt64(hi)
}
- m := Cap
+ m := int64(Cap)
if max != nil {
- m = asInt(max)
+ m = asInt64(max)
}
switch x := x.(type) {
@@ -295,7 +308,7 @@ func slice(x, lo, hi, max value) value {
panic(fmt.Sprintf("slice: unexpected X type: %T", x))
}
-// lookup returns x[idx] where x is a map or string.
+// lookup returns x[idx] where x is a map.
func lookup(instr *ssa.Lookup, x, idx value) value {
switch x := x.(type) { // map or string
case map[value]value, *hashmap:
@@ -315,8 +328,6 @@ func lookup(instr *ssa.Lookup, x, idx value) value {
v = tuple{v, ok}
}
return v
- case string:
- return x[asInt(idx)]
}
panic(fmt.Sprintf("unexpected x type in Lookup: %T", x))
}
@@ -324,7 +335,6 @@ func lookup(instr *ssa.Lookup, x, idx value) value {
// binop implements all arithmetic and logical binary operators for
// numeric datatypes and strings. Both operands must have identical
// dynamic type.
-//
func binop(op token.Token, t types.Type, x, y value) value {
switch op {
case token.ADD:
@@ -798,7 +808,6 @@ func binop(op token.Token, t types.Type, x, y value) value {
// appropriate for type t.
// If t is a reference type, at most one of x or y may be a nil value
// of that type.
-//
func eqnil(t types.Type, x, y value) bool {
switch t.Underlying().(type) {
case *types.Map, *types.Signature, *types.Slice:
@@ -907,7 +916,6 @@ func unop(instr *ssa.UnOp, x value) value {
// typeAssert checks whether dynamic type of itf is instr.AssertedType.
// It returns the extracted value on success, and panics on failure,
// unless instr.CommaOk, in which case it always returns a "value,ok" tuple.
-//
func typeAssert(i *interpreter, instr *ssa.TypeAssert, itf iface) value {
var v value
err := ""
@@ -924,6 +932,8 @@ func typeAssert(i *interpreter, instr *ssa.TypeAssert, itf iface) value {
} else {
err = fmt.Sprintf("interface conversion: interface is %s, not %s", itf.t, instr.AssertedType)
}
+ // Note: if instr.Underlying==true ever becomes reachable from interp check that
+ // types.Identical(itf.t.Underlying(), instr.AssertedType)
if err != "" {
if !instr.CommaOk {
@@ -944,7 +954,6 @@ func typeAssert(i *interpreter, instr *ssa.TypeAssert, itf iface) value {
// failure if "BUG" appears in the combined stdout/stderr output, even
// if it exits zero. This is a global variable shared by all
// interpreters in the same process.)
-//
var CapturedOutput *bytes.Buffer
var capturedOutputMu sync.Mutex
@@ -1117,10 +1126,11 @@ func rangeIter(x value, t types.Type) iter {
// widen widens a basic typed value x to the widest type of its
// category, one of:
-// bool, int64, uint64, float64, complex128, string.
+//
+// bool, int64, uint64, float64, complex128, string.
+//
// This is inefficient but reduces the size of the cross-product of
// cases we have to consider.
-//
func widen(x value) value {
switch y := x.(type) {
case bool, int64, uint64, float64, complex128, string, unsafe.Pointer:
@@ -1154,7 +1164,6 @@ func widen(x value) value {
// conv converts the value x of type t_src to type t_dst and returns
// the result.
// Possible cases are described with the ssa.Convert operator.
-//
func conv(t_dst, t_src types.Type, x value) value {
ut_src := t_src.Underlying()
ut_dst := t_dst.Underlying()
@@ -1388,18 +1397,15 @@ func conv(t_dst, t_src types.Type, x value) value {
// sliceToArrayPointer converts the value x of type slice to type t_dst
// a pointer to array and returns the result.
func sliceToArrayPointer(t_dst, t_src types.Type, x value) value {
- utSrc := t_src.Underlying()
- utDst := t_dst.Underlying()
-
- if _, ok := utSrc.(*types.Slice); ok {
- if utSrc, ok := utDst.(*types.Pointer); ok {
- if arr, ok := utSrc.Elem().(*types.Array); ok {
+ if _, ok := t_src.Underlying().(*types.Slice); ok {
+ if ptr, ok := t_dst.Underlying().(*types.Pointer); ok {
+ if arr, ok := ptr.Elem().Underlying().(*types.Array); ok {
x := x.([]value)
if arr.Len() > int64(len(x)) {
panic("array length is greater than slice length")
}
if x == nil {
- return zero(utSrc)
+ return zero(t_dst)
}
v := value(array(x[:arr.Len()]))
return &v
@@ -1413,7 +1419,6 @@ func sliceToArrayPointer(t_dst, t_src types.Type, x value) value {
// checkInterface checks that the method set of x implements the
// interface itype.
// On success it returns "", on failure, an error message.
-//
func checkInterface(i *interpreter, itype *types.Interface, x iface) string {
if meth, _ := types.MissingMethod(x.t, itype, true); meth != nil {
return fmt.Sprintf("interface conversion: %v is not %v: missing method %s",
diff --git a/go/ssa/interp/reflect.go b/go/ssa/interp/reflect.go
index 0a4465b0b..9f2f9e1e4 100644
--- a/go/ssa/interp/reflect.go
+++ b/go/ssa/interp/reflect.go
@@ -407,7 +407,11 @@ func ext۰reflect۰Value۰Elem(fr *frame, args []value) value {
case iface:
return makeReflectValue(x.t, x.v)
case *value:
- return makeReflectValue(rV2T(args[0]).t.Underlying().(*types.Pointer).Elem(), *x)
+ var v value
+ if x != nil {
+ v = *x
+ }
+ return makeReflectValue(rV2T(args[0]).t.Underlying().(*types.Pointer).Elem(), v)
default:
panic(fmt.Sprintf("reflect.(Value).Elem(%T)", x))
}
diff --git a/go/ssa/interp/testdata/boundmeth.go b/go/ssa/interp/testdata/boundmeth.go
index 69937f9d3..47b940685 100644
--- a/go/ssa/interp/testdata/boundmeth.go
+++ b/go/ssa/interp/testdata/boundmeth.go
@@ -123,7 +123,8 @@ func nilInterfaceMethodValue() {
r := fmt.Sprint(recover())
// runtime panic string varies across toolchains
if r != "interface conversion: interface is nil, not error" &&
- r != "runtime error: invalid memory address or nil pointer dereference" {
+ r != "runtime error: invalid memory address or nil pointer dereference" &&
+ r != "method value: interface is nil" {
panic("want runtime panic from nil interface method value, got " + r)
}
}()
diff --git a/go/ssa/interp/testdata/convert.go b/go/ssa/interp/testdata/convert.go
index 0dcf13bdd..76310405f 100644
--- a/go/ssa/interp/testdata/convert.go
+++ b/go/ssa/interp/testdata/convert.go
@@ -22,6 +22,15 @@ func main() {
},
"runtime error: negative shift amount",
)
+ wantPanic(
+ func() {
+ const maxInt32 = 1<<31 - 1
+ var idx int64 = maxInt32*2 + 8
+ x := make([]int, 16)
+ _ = x[idx]
+ },
+ "runtime error: runtime error: index out of range [4294967302] with length 16",
+ )
}
func wantPanic(fn func(), s string) {
diff --git a/go/ssa/interp/testdata/deepequal.go b/go/ssa/interp/testdata/deepequal.go
new file mode 100644
index 000000000..4fad2d657
--- /dev/null
+++ b/go/ssa/interp/testdata/deepequal.go
@@ -0,0 +1,93 @@
+// This interpreter test is designed to test the test copy of DeepEqual.
+//
+// Validate this file with 'go run' after editing.
+
+package main
+
+import "reflect"
+
+func assert(cond bool) {
+ if !cond {
+ panic("failed")
+ }
+}
+
+type X int
+type Y struct {
+ y *Y
+ z [3]int
+}
+
+var (
+ a = []int{0, 1, 2, 3}
+ b = []X{0, 1, 2, 3}
+ c = map[int]string{0: "zero", 1: "one"}
+ d = map[X]string{0: "zero", 1: "one"}
+ e = &Y{}
+ f = (*Y)(nil)
+ g = &Y{y: e}
+ h *Y
+)
+
+func init() {
+ h = &Y{} // h->h
+ h.y = h
+}
+
+func main() {
+ assert(reflect.DeepEqual(nil, nil))
+ assert(reflect.DeepEqual((*int)(nil), (*int)(nil)))
+ assert(!reflect.DeepEqual(nil, (*int)(nil)))
+
+ assert(reflect.DeepEqual(0, 0))
+ assert(!reflect.DeepEqual(0, int64(0)))
+
+ assert(!reflect.DeepEqual("", 0))
+
+ assert(reflect.DeepEqual(a, []int{0, 1, 2, 3}))
+ assert(!reflect.DeepEqual(a, []int{0, 1, 2}))
+ assert(!reflect.DeepEqual(a, []int{0, 1, 0, 3}))
+
+ assert(reflect.DeepEqual(b, []X{0, 1, 2, 3}))
+ assert(!reflect.DeepEqual(b, []X{0, 1, 0, 3}))
+
+ assert(reflect.DeepEqual(c, map[int]string{0: "zero", 1: "one"}))
+ assert(!reflect.DeepEqual(c, map[int]string{0: "zero", 1: "one", 2: "two"}))
+ assert(!reflect.DeepEqual(c, map[int]string{1: "one", 2: "two"}))
+ assert(!reflect.DeepEqual(c, map[int]string{1: "one"}))
+
+ assert(reflect.DeepEqual(d, map[X]string{0: "zero", 1: "one"}))
+ assert(!reflect.DeepEqual(d, map[int]string{0: "zero", 1: "one"}))
+
+ assert(reflect.DeepEqual(e, &Y{}))
+ assert(reflect.DeepEqual(e, &Y{z: [3]int{0, 0, 0}}))
+ assert(!reflect.DeepEqual(e, &Y{z: [3]int{0, 1, 0}}))
+
+ assert(reflect.DeepEqual(f, (*Y)(nil)))
+ assert(!reflect.DeepEqual(f, nil))
+
+ // eq_h -> eq_h. Pointer structure and elements are equal so DeepEqual.
+ eq_h := &Y{}
+ eq_h.y = eq_h
+ assert(reflect.DeepEqual(h, eq_h))
+
+ // deepeq_h->h->h. Pointed to elem of (deepeq_h, h) are (h,h). (h,h) are deep equal so h and deepeq_h are DeepEqual.
+ deepeq_h := &Y{}
+ deepeq_h.y = h
+ assert(reflect.DeepEqual(h, deepeq_h))
+
+ distinct := []interface{}{a, b, c, d, e, f, g, h}
+ for x := range distinct {
+ for y := range distinct {
+ assert((x == y) == reflect.DeepEqual(distinct[x], distinct[y]))
+ }
+ }
+
+ // anonymous struct types.
+ assert(reflect.DeepEqual(struct{}{}, struct{}{}))
+ assert(reflect.DeepEqual(struct{ x int }{1}, struct{ x int }{1}))
+ assert(!reflect.DeepEqual(struct{ x int }{}, struct{ x int }{5}))
+ assert(!reflect.DeepEqual(struct{ x, y int }{0, 1}, struct{ x int }{0}))
+ assert(reflect.DeepEqual(struct{ x, y int }{2, 3}, struct{ x, y int }{2, 3}))
+ assert(!reflect.DeepEqual(struct{ x, y int }{4, 5}, struct{ x, y int }{4, 6}))
+}
diff --git a/go/ssa/interp/testdata/fixedbugs/issue52342.go b/go/ssa/interp/testdata/fixedbugs/issue52342.go
new file mode 100644
index 000000000..2e1cc63cf
--- /dev/null
+++ b/go/ssa/interp/testdata/fixedbugs/issue52342.go
@@ -0,0 +1,17 @@
+package main
+
+func main() {
+ var d byte
+
+ d = 1
+ d <<= 256
+ if d != 0 {
+ panic(d)
+ }
+
+ d = 1
+ d >>= 256
+ if d != 0 {
+ panic(d)
+ }
+}
diff --git a/go/ssa/interp/testdata/fixedbugs/issue52835.go b/go/ssa/interp/testdata/fixedbugs/issue52835.go
new file mode 100644
index 000000000..f1d99abb7
--- /dev/null
+++ b/go/ssa/interp/testdata/fixedbugs/issue52835.go
@@ -0,0 +1,27 @@
+package main
+
+var called bool
+
+type I interface {
+ Foo()
+}
+
+type A struct{}
+
+func (a A) Foo() {
+ called = true
+}
+
+func lambda[X I]() func() func() {
+ return func() func() {
+ var x X
+ return x.Foo
+ }
+}
+
+func main() {
+ lambda[A]()()()
+ if !called {
+ panic(called)
+ }
+}
diff --git a/go/ssa/interp/testdata/fixedbugs/issue55086.go b/go/ssa/interp/testdata/fixedbugs/issue55086.go
new file mode 100644
index 000000000..84c81e91a
--- /dev/null
+++ b/go/ssa/interp/testdata/fixedbugs/issue55086.go
@@ -0,0 +1,132 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func a() (r string) {
+ s := "initial"
+ var p *struct{ i int }
+ defer func() {
+ recover()
+ r = s
+ }()
+
+ s, p.i = "set", 2 // s must be set before p.i panics
+ return "unreachable"
+}
+
+func b() (r string) {
+ s := "initial"
+ fn := func() []int { panic("") }
+ defer func() {
+ recover()
+ r = s
+ }()
+
+ s, fn()[0] = "set", 2 // fn() panics before any assignment occurs
+ return "unreachable"
+}
+
+func c() (r string) {
+ s := "initial"
+ var p map[int]int
+ defer func() {
+ recover()
+ r = s
+ }()
+
+ s, p[0] = "set", 2 //s must be set before p[0] index panics"
+ return "unreachable"
+}
+
+func d() (r string) {
+ s := "initial"
+ var p map[int]int
+ defer func() {
+ recover()
+ r = s
+ }()
+ fn := func() int { panic("") }
+
+ s, p[0] = "set", fn() // fn() panics before s is set
+ return "unreachable"
+}
+
+func e() (r string) {
+ s := "initial"
+ p := map[int]int{}
+ defer func() {
+ recover()
+ r = s
+ }()
+ fn := func() int { panic("") }
+
+ s, p[fn()] = "set", 0 // fn() panics before any assignment occurs
+ return "unreachable"
+}
+
+func f() (r string) {
+ s := "initial"
+ p := []int{}
+ defer func() {
+ recover()
+ r = s
+ }()
+
+ s, p[1] = "set", 0 // p[1] panics after s is set
+ return "unreachable"
+}
+
+func g() (r string) {
+ s := "initial"
+ p := map[any]any{}
+ defer func() {
+ recover()
+ r = s
+ }()
+ var i any = func() {}
+ s, p[i] = "set", 0 // p[i] panics after s is set
+ return "unreachable"
+}
+
+func h() (r string) {
+ fail := false
+ defer func() {
+ recover()
+ if fail {
+ r = "fail"
+ } else {
+ r = "success"
+ }
+ }()
+
+ type T struct{ f int }
+ var p *struct{ *T }
+
+ // The implicit "p.T" operand should be evaluated in phase 1 (and panic),
+ // before the "fail = true" assignment in phase 2.
+ fail, p.f = true, 0
+ return "unreachable"
+}
+
+func main() {
+ for _, test := range []struct {
+ fn func() string
+ want string
+ desc string
+ }{
+ {a, "set", "s must be set before p.i panics"},
+ {b, "initial", "p() panics before s is set"},
+ {c, "set", "s must be set before p[0] index panics"},
+ {d, "initial", "fn() panics before s is set"},
+ {e, "initial", "fn() panics before s is set"},
+ {f, "set", "p[1] panics after s is set"},
+ {g, "set", "p[i] panics after s is set"},
+ {h, "success", "p.T panics before fail is set"},
+ } {
+ if test.fn() != test.want {
+ panic(test.desc)
+ }
+ }
+}
diff --git a/go/ssa/interp/testdata/slice2array.go b/go/ssa/interp/testdata/slice2array.go
new file mode 100644
index 000000000..84e6b7330
--- /dev/null
+++ b/go/ssa/interp/testdata/slice2array.go
@@ -0,0 +1,92 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test for slice to array conversion introduced in go1.20
+// See: https://tip.golang.org/ref/spec#Conversions_from_slice_to_array_pointer
+
+package main
+
+func main() {
+ s := make([]byte, 3, 4)
+ s[0], s[1], s[2] = 2, 3, 5
+ a := ([2]byte)(s)
+ s[0] = 7
+
+ if a != [2]byte{2, 3} {
+ panic("converted from non-nil slice to array")
+ }
+
+ {
+ var s []int
+ a := ([0]int)(s)
+ if a != [0]int{} {
+ panic("zero len array is not equal")
+ }
+ }
+
+ if emptyToEmptyDoesNotPanic() {
+ panic("no panic expected from emptyToEmptyDoesNotPanic()")
+ }
+ if !threeToFourDoesPanic() {
+ panic("panic expected from threeToFourDoesPanic()")
+ }
+
+ if !fourPanicsWhileOneDoesNot[[4]int]() {
+ panic("panic expected from fourPanicsWhileOneDoesNot[[4]int]()")
+ }
+ if fourPanicsWhileOneDoesNot[[1]int]() {
+ panic("no panic expected from fourPanicsWhileOneDoesNot[[1]int]()")
+ }
+
+ if !fourPanicsWhileZeroDoesNot[[4]int]() {
+ panic("panic expected from fourPanicsWhileZeroDoesNot[[4]int]()")
+ }
+ if fourPanicsWhileZeroDoesNot[[0]int]() {
+ panic("no panic expected from fourPanicsWhileZeroDoesNot[[0]int]()")
+ }
+}
+
+func emptyToEmptyDoesNotPanic() (raised bool) {
+ defer func() {
+ if e := recover(); e != nil {
+ raised = true
+ }
+ }()
+ var s []int
+ _ = ([0]int)(s)
+ return false
+}
+
+func threeToFourDoesPanic() (raised bool) {
+ defer func() {
+ if e := recover(); e != nil {
+ raised = true
+ }
+ }()
+ s := make([]int, 3, 5)
+ _ = ([4]int)(s)
+ return false
+}
+
+func fourPanicsWhileOneDoesNot[T [1]int | [4]int]() (raised bool) {
+ defer func() {
+ if e := recover(); e != nil {
+ raised = true
+ }
+ }()
+ s := make([]int, 3, 5)
+ _ = T(s)
+ return false
+}
+
+func fourPanicsWhileZeroDoesNot[T [0]int | [4]int]() (raised bool) {
+ defer func() {
+ if e := recover(); e != nil {
+ raised = true
+ }
+ }()
+ var s []int
+ _ = T(s)
+ return false
+}
diff --git a/go/ssa/interp/testdata/slice2arrayptr.go b/go/ssa/interp/testdata/slice2arrayptr.go
index ff2d9b55c..d9d8804d3 100644
--- a/go/ssa/interp/testdata/slice2arrayptr.go
+++ b/go/ssa/interp/testdata/slice2arrayptr.go
@@ -32,6 +32,8 @@ func main() {
},
"runtime error: array length is greater than slice length",
)
+
+ f()
}
type arr [2]int
diff --git a/go/ssa/interp/testdata/src/encoding/encoding.go b/go/ssa/interp/testdata/src/encoding/encoding.go
new file mode 100644
index 000000000..73e9de494
--- /dev/null
+++ b/go/ssa/interp/testdata/src/encoding/encoding.go
@@ -0,0 +1,15 @@
+package encoding
+
+type BinaryMarshaler interface {
+ MarshalBinary() (data []byte, err error)
+}
+type BinaryUnmarshaler interface {
+ UnmarshalBinary(data []byte) error
+}
+
+type TextMarshaler interface {
+ MarshalText() (text []byte, err error)
+}
+type TextUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/go/ssa/interp/testdata/src/log/log.go b/go/ssa/interp/testdata/src/log/log.go
index 8897c1d21..9a57e8c1c 100644
--- a/go/ssa/interp/testdata/src/log/log.go
+++ b/go/ssa/interp/testdata/src/log/log.go
@@ -8,8 +8,16 @@ import (
func Println(v ...interface{}) {
fmt.Println(v...)
}
+func Printf(format string, v ...interface{}) {
+ fmt.Printf(format, v...)
+}
func Fatalln(v ...interface{}) {
Println(v...)
os.Exit(1)
}
+
+func Fatalf(format string, v ...interface{}) {
+ Printf(format, v...)
+ os.Exit(1)
+}
diff --git a/go/ssa/interp/testdata/src/reflect/deepequal.go b/go/ssa/interp/testdata/src/reflect/deepequal.go
new file mode 100644
index 000000000..a48e4dafa
--- /dev/null
+++ b/go/ssa/interp/testdata/src/reflect/deepequal.go
@@ -0,0 +1,109 @@
+package reflect
+
+// Not an actual implementation of DeepEqual. This is a model that supports
+// the bare minimum needed to get through testing interp.
+//
+// Does not handle cycles.
+//
+// Note: unclear if reflect.go can support this.
+func DeepEqual(x, y interface{}) bool {
+ if x == nil || y == nil {
+ return x == y
+ }
+ v1 := ValueOf(x)
+ v2 := ValueOf(y)
+
+ return deepValueEqual(v1, v2, make(map[visit]bool))
+}
+
+// Key for the visitedMap in deepValueEqual.
+type visit struct {
+ a1, a2 uintptr
+ typ Type
+}
+
+func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool {
+ if !v1.IsValid() || !v2.IsValid() {
+ return v1.IsValid() == v2.IsValid()
+ }
+ if v1.Type() != v2.Type() {
+ return false
+ }
+
+ // Short circuit on reference types that can lead to cycles in comparison.
+ switch v1.Kind() {
+ case Pointer, Map, Slice, Interface:
+ k := visit{v1.Pointer(), v2.Pointer(), v1.Type()} // Not safe for moving GC.
+ if visited[k] {
+ // The comparison algorithm assumes that all checks in progress are true when it reencounters them.
+ return true
+ }
+ visited[k] = true
+ }
+
+ switch v1.Kind() {
+ case Array:
+ for i := 0; i < v1.Len(); i++ {
+ if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Slice:
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Interface:
+ if v1.IsNil() || v2.IsNil() {
+ return v1.IsNil() == v2.IsNil()
+ }
+ return deepValueEqual(v1.Elem(), v2.Elem(), visited)
+ case Ptr:
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ return deepValueEqual(v1.Elem(), v2.Elem(), visited)
+ case Struct:
+ for i, n := 0, v1.NumField(); i < n; i++ {
+ if !deepValueEqual(v1.Field(i), v2.Field(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Map:
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
+ for _, k := range v1.MapKeys() {
+ val1 := v1.MapIndex(k)
+ val2 := v2.MapIndex(k)
+ if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(val1, val2, visited) {
+ return false
+ }
+ }
+ return true
+ case Func:
+ return v1.IsNil() && v2.IsNil()
+ default:
+ // Normal equality suffices
+ return v1.Interface() == v2.Interface() // try interface comparison as a fallback.
+ }
+}
diff --git a/go/ssa/interp/testdata/src/reflect/reflect.go b/go/ssa/interp/testdata/src/reflect/reflect.go
index 8a23d272f..207e7dcfd 100644
--- a/go/ssa/interp/testdata/src/reflect/reflect.go
+++ b/go/ssa/interp/testdata/src/reflect/reflect.go
@@ -11,9 +11,20 @@ type Value struct {
func (Value) String() string
-func (Value) Elem() string
+func (Value) Elem() Value
func (Value) Kind() Kind
func (Value) Int() int64
+func (Value) IsValid() bool
+func (Value) IsNil() bool
+func (Value) Len() int
+func (Value) Pointer() uintptr
+func (Value) Index(i int) Value
+func (Value) Type() Type
+func (Value) Field(int) Value
+func (Value) MapIndex(Value) Value
+func (Value) MapKeys() []Value
+func (Value) NumField() int
+func (Value) Interface() interface{}
func SliceOf(Type) Type
diff --git a/go/ssa/interp/testdata/typeassert.go b/go/ssa/interp/testdata/typeassert.go
new file mode 100644
index 000000000..792a7558f
--- /dev/null
+++ b/go/ssa/interp/testdata/typeassert.go
@@ -0,0 +1,32 @@
+// Tests of type asserts.
+// Requires type parameters.
+package typeassert
+
+type fooer interface{ foo() string }
+
+type X int
+
+func (_ X) foo() string { return "x" }
+
+func f[T fooer](x T) func() string {
+ return x.foo
+}
+
+func main() {
+ if f[X](0)() != "x" {
+ panic("f[X]() != 'x'")
+ }
+
+ p := false
+ func() {
+ defer func() {
+ if recover() != nil {
+ p = true
+ }
+ }()
+ f[fooer](nil) // panics on x.foo when T is an interface and nil.
+ }()
+ if !p {
+ panic("f[fooer] did not panic")
+ }
+}
diff --git a/go/ssa/interp/testdata/width32.go b/go/ssa/interp/testdata/width32.go
new file mode 100644
index 000000000..a032ba44c
--- /dev/null
+++ b/go/ssa/interp/testdata/width32.go
@@ -0,0 +1,42 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test interpretation on 32 bit widths.
+
+package main
+
+func main() {
+ mapSize()
+}
+
+func mapSize() {
+ // Tests for the size argument of make on a map type.
+ const tooBigFor32 = 1<<33 - 1
+ wantPanic(
+ func() {
+ _ = make(map[int]int, int64(tooBigFor32))
+ },
+ "runtime error: ssa.MakeMap.Reserve value 8589934591 does not fit in int",
+ )
+
+ // TODO: Enable the following if sizeof(int) can be different for host and target.
+ // _ = make(map[int]int, tooBigFor32)
+ //
+ // Second arg to make in `make(map[int]int, tooBigFor32)` is an untyped int and
+ // is converted into an int explicitly in ssa.
+ // This has a different value on 32 and 64 bit systems.
+}
+
+func wantPanic(fn func(), s string) {
+ defer func() {
+ err := recover()
+ if err == nil {
+ panic("expected panic")
+ }
+ if got := err.(error).Error(); got != s {
+ panic("expected panic " + s + " got " + got)
+ }
+ }()
+ fn()
+}
diff --git a/go/ssa/interp/testdata/zeros.go b/go/ssa/interp/testdata/zeros.go
new file mode 100644
index 000000000..509c78a36
--- /dev/null
+++ b/go/ssa/interp/testdata/zeros.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test interpretation on zero values with type params.
+package zeros
+
+func assert(cond bool, msg string) {
+ if !cond {
+ panic(msg)
+ }
+}
+
+func tp0[T int | string | float64]() T { return T(0) }
+
+func tpFalse[T ~bool]() T { return T(false) }
+
+func tpEmptyString[T string | []byte]() T { return T("") }
+
+func tpNil[T *int | []byte]() T { return T(nil) }
+
+func main() {
+ // zero values
+ var zi int
+ var zf float64
+ var zs string
+
+ assert(zi == int(0), "zero value of int is int(0)")
+ assert(zf == float64(0), "zero value of float64 is float64(0)")
+ assert(zs != string(0), "zero value of string is not string(0)")
+
+ assert(zi == tp0[int](), "zero value of int is int(0)")
+ assert(zf == tp0[float64](), "zero value of float64 is float64(0)")
+ assert(zs != tp0[string](), "zero value of string is not string(0)")
+
+ assert(zf == -0.0, "constant -0.0 is converted to 0.0")
+
+ assert(!tpFalse[bool](), "zero value of bool is false")
+
+ assert(tpEmptyString[string]() == zs, `zero value of string is string("")`)
+ assert(len(tpEmptyString[[]byte]()) == 0, `[]byte("") is empty`)
+
+ assert(tpNil[*int]() == nil, "nil is nil")
+ assert(tpNil[[]byte]() == nil, "nil is nil")
+}
diff --git a/go/ssa/lift.go b/go/ssa/lift.go
index 048e9b032..945536bbb 100644
--- a/go/ssa/lift.go
+++ b/go/ssa/lift.go
@@ -44,6 +44,8 @@ import (
"go/types"
"math/big"
"os"
+
+ "golang.org/x/tools/internal/typeparams"
)
// If true, show diagnostic information at each step of lifting.
@@ -61,7 +63,6 @@ const debugLifting = false
//
// domFrontier's methods mutate the slice's elements but not its
// length, so their receivers needn't be pointers.
-//
type domFrontier [][]*BasicBlock
func (df domFrontier) add(u, v *BasicBlock) {
@@ -127,7 +128,6 @@ func removeInstr(refs []Instruction, instr Instruction) []Instruction {
// - fn has no dead blocks (blockopt has run).
// - Def/use info (Operands and Referrers) is up-to-date.
// - The dominator tree is up-to-date.
-//
func lift(fn *Function) {
// TODO(adonovan): opt: lots of little optimizations may be
// worthwhile here, especially if they cause us to avoid
@@ -382,12 +382,10 @@ type newPhiMap map[*BasicBlock][]newPhi
// and returns true.
//
// fresh is a source of fresh ids for phi nodes.
-//
func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool {
- // Don't lift aggregates into registers, because we don't have
- // a way to express their zero-constants.
+ // TODO(taking): zero constants of aggregated types can now be lifted.
switch deref(alloc.Type()).Underlying().(type) {
- case *types.Array, *types.Struct:
+ case *types.Array, *types.Struct, *typeparams.TypeParam:
return false
}
@@ -491,7 +489,6 @@ func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool
// replaceAll replaces all intraprocedural uses of x with y,
// updating x.Referrers and y.Referrers.
// Precondition: x.Referrers() != nil, i.e. x must be local to some function.
-//
func replaceAll(x, y Value) {
var rands []*Value
pxrefs := x.Referrers()
@@ -514,7 +511,6 @@ func replaceAll(x, y Value) {
// renamed returns the value to which alloc is being renamed,
// constructing it lazily if it's the implicit zero initialization.
-//
func renamed(renaming []Value, alloc *Alloc) Value {
v := renaming[alloc.index]
if v == nil {
@@ -533,7 +529,6 @@ func renamed(renaming []Value, alloc *Alloc) Value {
// renaming is a map from *Alloc (keyed by index number) to its
// dominating stored value; newPhis[x] is the set of new φ-nodes to be
// prepended to block x.
-//
func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) {
// Each φ-node becomes the new name for its associated Alloc.
for _, np := range newPhis[u] {
diff --git a/go/ssa/lvalue.go b/go/ssa/lvalue.go
index 4d85be3ec..51122b8e8 100644
--- a/go/ssa/lvalue.go
+++ b/go/ssa/lvalue.go
@@ -16,7 +16,6 @@ import (
// An lvalue represents an assignable location that may appear on the
// left-hand side of an assignment. This is a generalization of a
// pointer to permit updates to elements of maps.
-//
type lvalue interface {
store(fn *Function, v Value) // stores v into the location
load(fn *Function) Value // loads the contents of the location
@@ -57,13 +56,12 @@ func (a *address) typ() types.Type {
}
// An element is an lvalue represented by m[k], the location of an
-// element of a map or string. These locations are not addressable
+// element of a map. These locations are not addressable
// since pointers cannot be formed from them, but they do support
-// load(), and in the case of maps, store().
-//
+// load() and store().
type element struct {
- m, k Value // map or string
- t types.Type // map element type or string byte type
+ m, k Value // map
+ t types.Type // map element type
pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v)
}
@@ -88,16 +86,51 @@ func (e *element) store(fn *Function, v Value) {
}
func (e *element) address(fn *Function) Value {
- panic("map/string elements are not addressable")
+ panic("map elements are not addressable")
}
func (e *element) typ() types.Type {
return e.t
}
+// A lazyAddress is an lvalue whose address is the result of an instruction.
+// These work like an *address except a new address.address() Value
+// is created on each load, store and address call.
+// A lazyAddress can be used to control when a side effect (nil pointer
+// dereference, index out of bounds) of using a location happens.
+type lazyAddress struct {
+ addr func(fn *Function) Value // emit to fn the computation of the address
+ t types.Type // type of the location
+ pos token.Pos // source position
+ expr ast.Expr // source syntax of the value (not address) [debug mode]
+}
+
+func (l *lazyAddress) load(fn *Function) Value {
+ load := emitLoad(fn, l.addr(fn))
+ load.pos = l.pos
+ return load
+}
+
+func (l *lazyAddress) store(fn *Function, v Value) {
+ store := emitStore(fn, l.addr(fn), v, l.pos)
+ if l.expr != nil {
+ // store.Val is v, converted for assignability.
+ emitDebugRef(fn, l.expr, store.Val, false)
+ }
+}
+
+func (l *lazyAddress) address(fn *Function) Value {
+ addr := l.addr(fn)
+ if l.expr != nil {
+ emitDebugRef(fn, l.expr, addr, true)
+ }
+ return addr
+}
+
+func (l *lazyAddress) typ() types.Type { return l.t }
+
// A blank is a dummy variable whose name is "_".
// It is not reified: loads are illegal and stores are ignored.
-//
type blank struct{}
func (bl blank) load(fn *Function) Value {
diff --git a/go/ssa/methods.go b/go/ssa/methods.go
index 22e1f3f0a..4185618cd 100644
--- a/go/ssa/methods.go
+++ b/go/ssa/methods.go
@@ -9,40 +9,55 @@ package ssa
import (
"fmt"
"go/types"
+
+ "golang.org/x/tools/internal/typeparams"
)
// MethodValue returns the Function implementing method sel, building
// wrapper methods on demand. It returns nil if sel denotes an
-// abstract (interface) method.
+// abstract (interface or parameterized) method.
//
// Precondition: sel.Kind() == MethodVal.
//
// Thread-safe.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
-//
func (prog *Program) MethodValue(sel *types.Selection) *Function {
if sel.Kind() != types.MethodVal {
panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel))
}
T := sel.Recv()
- if isInterface(T) {
- return nil // abstract method
+ if types.IsInterface(T) {
+ return nil // abstract method (interface, possibly type param)
}
if prog.mode&LogSource != 0 {
defer logStack("MethodValue %s %v", T, sel)()
}
+ var m *Function
+ b := builder{created: &creator{}}
+
prog.methodsMu.Lock()
- defer prog.methodsMu.Unlock()
+ // Checks whether a type param is reachable from T.
+ // This is an expensive check. May need to be optimized later.
+ if !prog.parameterized.isParameterized(T) {
+ m = prog.addMethod(prog.createMethodSet(T), sel, b.created)
+ }
+ prog.methodsMu.Unlock()
- return prog.addMethod(prog.createMethodSet(T), sel)
+ if m == nil {
+ return nil // abstract method (generic)
+ }
+ for !b.done() {
+ b.buildCreated()
+ b.needsRuntimeTypes()
+ }
+ return m
}
// LookupMethod returns the implementation of the method of type T
// identified by (pkg, name). It returns nil if the method exists but
// is abstract, and panics if T has no such method.
-//
func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function {
sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name)
if sel == nil {
@@ -51,15 +66,20 @@ func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string)
return prog.MethodValue(sel)
}
-// methodSet contains the (concrete) methods of a non-interface type.
+// methodSet contains the (concrete) methods of a concrete type (non-interface, non-parameterized).
type methodSet struct {
mapping map[string]*Function // populated lazily
complete bool // mapping contains all methods
}
-// Precondition: !isInterface(T).
+// Precondition: T is a concrete type, e.g. !isInterface(T) and not parameterized.
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
func (prog *Program) createMethodSet(T types.Type) *methodSet {
+ if prog.mode&SanityCheckFunctions != 0 {
+ if types.IsInterface(T) || prog.parameterized.isParameterized(T) {
+ panic("type is interface or parameterized")
+ }
+ }
mset, ok := prog.methodSets.At(T).(*methodSet)
if !ok {
mset = &methodSet{mapping: make(map[string]*Function)}
@@ -68,22 +88,29 @@ func (prog *Program) createMethodSet(T types.Type) *methodSet {
return mset
}
+// Adds any created functions to cr.
+// Precondition: T is a concrete type, e.g. !isInterface(T) and not parameterized.
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
-func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function {
+func (prog *Program) addMethod(mset *methodSet, sel *types.Selection, cr *creator) *Function {
if sel.Kind() == types.MethodExpr {
panic(sel)
}
id := sel.Obj().Id()
fn := mset.mapping[id]
if fn == nil {
- obj := sel.Obj().(*types.Func)
+ sel := toSelection(sel)
+ obj := sel.obj.(*types.Func)
- needsPromotion := len(sel.Index()) > 1
- needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv())
+ needsPromotion := len(sel.index) > 1
+ needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.recv)
if needsPromotion || needsIndirection {
- fn = makeWrapper(prog, sel)
+ fn = makeWrapper(prog, sel, cr)
} else {
- fn = prog.declaredFunc(obj)
+ fn = prog.originFunc(obj)
+ if fn.typeparams.Len() > 0 { // instantiate
+ targs := receiverTypeArgs(obj)
+ fn = prog.lookupOrCreateInstance(fn, targs, cr)
+ }
}
if fn.Signature.Recv() == nil {
panic(fn) // missing receiver
@@ -100,7 +127,6 @@ func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function
// Thread-safe.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
-//
func (prog *Program) RuntimeTypes() []types.Type {
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
@@ -116,7 +142,6 @@ func (prog *Program) RuntimeTypes() []types.Type {
// declaredFunc returns the concrete function/method denoted by obj.
// Panic ensues if there is none.
-//
func (prog *Program) declaredFunc(obj *types.Func) *Function {
if v := prog.packageLevelMember(obj); v != nil {
return v.(*Function)
@@ -132,26 +157,28 @@ func (prog *Program) declaredFunc(obj *types.Func) *Function {
// operand of some MakeInterface instruction, and for the type of
// every exported package member.
//
+// Adds any created functions to cr.
+//
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
+// Precondition: T is not parameterized.
//
-// Thread-safe. (Called via emitConv from multiple builder goroutines.)
+// Thread-safe. (Called via Package.build from multiple builder goroutines.)
//
// TODO(adonovan): make this faster. It accounts for 20% of SSA build time.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
-//
-func (prog *Program) needMethodsOf(T types.Type) {
+func (prog *Program) needMethodsOf(T types.Type, cr *creator) {
prog.methodsMu.Lock()
- prog.needMethods(T, false)
+ prog.needMethods(T, false, cr)
prog.methodsMu.Unlock()
}
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
+// Precondition: T is not parameterized.
// Recursive case: skip => don't create methods for T.
//
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
-//
-func (prog *Program) needMethods(T types.Type, skip bool) {
+func (prog *Program) needMethods(T types.Type, skip bool, cr *creator) {
// Each package maintains its own set of types it has visited.
if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok {
// needMethods(T) was previously called
@@ -163,14 +190,14 @@ func (prog *Program) needMethods(T types.Type, skip bool) {
tmset := prog.MethodSets.MethodSet(T)
- if !skip && !isInterface(T) && tmset.Len() > 0 {
+ if !skip && !types.IsInterface(T) && tmset.Len() > 0 {
// Create methods of T.
mset := prog.createMethodSet(T)
if !mset.complete {
mset.complete = true
n := tmset.Len()
for i := 0; i < n; i++ {
- prog.addMethod(mset, tmset.At(i))
+ prog.addMethod(mset, tmset.At(i), cr)
}
}
}
@@ -178,8 +205,8 @@ func (prog *Program) needMethods(T types.Type, skip bool) {
// Recursion over signatures of each method.
for i := 0; i < tmset.Len(); i++ {
sig := tmset.At(i).Type().(*types.Signature)
- prog.needMethods(sig.Params(), false)
- prog.needMethods(sig.Results(), false)
+ prog.needMethods(sig.Params(), false, cr)
+ prog.needMethods(sig.Results(), false, cr)
}
switch t := T.(type) {
@@ -190,49 +217,55 @@ func (prog *Program) needMethods(T types.Type, skip bool) {
// nop---handled by recursion over method set.
case *types.Pointer:
- prog.needMethods(t.Elem(), false)
+ prog.needMethods(t.Elem(), false, cr)
case *types.Slice:
- prog.needMethods(t.Elem(), false)
+ prog.needMethods(t.Elem(), false, cr)
case *types.Chan:
- prog.needMethods(t.Elem(), false)
+ prog.needMethods(t.Elem(), false, cr)
case *types.Map:
- prog.needMethods(t.Key(), false)
- prog.needMethods(t.Elem(), false)
+ prog.needMethods(t.Key(), false, cr)
+ prog.needMethods(t.Elem(), false, cr)
case *types.Signature:
if t.Recv() != nil {
panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
}
- prog.needMethods(t.Params(), false)
- prog.needMethods(t.Results(), false)
+ prog.needMethods(t.Params(), false, cr)
+ prog.needMethods(t.Results(), false, cr)
case *types.Named:
// A pointer-to-named type can be derived from a named
// type via reflection. It may have methods too.
- prog.needMethods(types.NewPointer(T), false)
+ prog.needMethods(types.NewPointer(T), false, cr)
// Consider 'type T struct{S}' where S has methods.
// Reflection provides no way to get from T to struct{S},
// only to S, so the method set of struct{S} is unwanted,
// so set 'skip' flag during recursion.
- prog.needMethods(t.Underlying(), true)
+ prog.needMethods(t.Underlying(), true, cr)
case *types.Array:
- prog.needMethods(t.Elem(), false)
+ prog.needMethods(t.Elem(), false, cr)
case *types.Struct:
for i, n := 0, t.NumFields(); i < n; i++ {
- prog.needMethods(t.Field(i).Type(), false)
+ prog.needMethods(t.Field(i).Type(), false, cr)
}
case *types.Tuple:
for i, n := 0, t.Len(); i < n; i++ {
- prog.needMethods(t.At(i).Type(), false)
+ prog.needMethods(t.At(i).Type(), false, cr)
}
+ case *typeparams.TypeParam:
+ panic(T) // type parameters are always abstract.
+
+ case *typeparams.Union:
+ // nop
+
default:
panic(T)
}
diff --git a/go/ssa/methods_test.go b/go/ssa/methods_test.go
new file mode 100644
index 000000000..8391cf6d7
--- /dev/null
+++ b/go/ssa/methods_test.go
@@ -0,0 +1,96 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// Tests that MethodValue returns the expected method.
+func TestMethodValue(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestMethodValue requires type parameters")
+ }
+ input := `
+package p
+
+type I interface{ M() }
+
+type S int
+func (S) M() {}
+type R[T any] struct{ S }
+
+var i I
+var s S
+var r R[string]
+
+func selections[T any]() {
+ _ = i.M
+ _ = s.M
+ _ = r.M
+
+ var v R[T]
+ _ = v.M
+}
+`
+
+ // Parse the file.
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "input.go", input, 0)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // Build an SSA program from the parsed file.
+ p, info, err := ssautil.BuildPackage(&types.Config{}, fset,
+ types.NewPackage("p", ""), []*ast.File{f}, ssa.SanityCheckFunctions)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // Collect all of the *types.Selection in the function "selections".
+ var selections []*types.Selection
+ for _, decl := range f.Decls {
+ if fn, ok := decl.(*ast.FuncDecl); ok && fn.Name.Name == "selections" {
+ for _, stmt := range fn.Body.List {
+ if assign, ok := stmt.(*ast.AssignStmt); ok {
+ sel := assign.Rhs[0].(*ast.SelectorExpr)
+ selections = append(selections, info.Selections[sel])
+ }
+ }
+ }
+ }
+
+ wants := map[string]string{
+ "method (p.S) M()": "(p.S).M",
+ "method (p.R[string]) M()": "(p.R[string]).M",
+ "method (p.I) M()": "nil", // interface
+ "method (p.R[T]) M()": "nil", // parameterized
+ }
+ if len(wants) != len(selections) {
+ t.Fatalf("Wanted %d selections. got %d", len(wants), len(selections))
+ }
+ for _, selection := range selections {
+ var got string
+ if m := p.Prog.MethodValue(selection); m != nil {
+ got = m.String()
+ } else {
+ got = "nil"
+ }
+ if want := wants[selection.String()]; want != got {
+ t.Errorf("p.Prog.MethodValue(%s) expected %q. got %q", selection, want, got)
+ }
+ }
+}
diff --git a/go/ssa/mode.go b/go/ssa/mode.go
index 298f24b91..8381639a5 100644
--- a/go/ssa/mode.go
+++ b/go/ssa/mode.go
@@ -15,9 +15,8 @@ import (
//
// *BuilderMode satisfies the flag.Value interface. Example:
//
-// var mode = ssa.BuilderMode(0)
-// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) }
-//
+// var mode = ssa.BuilderMode(0)
+// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) }
type BuilderMode uint
const (
@@ -29,6 +28,7 @@ const (
BuildSerially // Build packages serially, not in parallel.
GlobalDebug // Enable debug info for all packages
BareInits // Build init functions without guards or calls to dependent inits
+ InstantiateGenerics // Instantiate generics functions (monomorphize) while building
)
const BuilderModeDoc = `Options controlling the SSA builder.
@@ -41,6 +41,7 @@ S log [S]ource locations as SSA builder progresses.
L build distinct packages seria[L]ly instead of in parallel.
N build [N]aive SSA form: don't replace local loads/stores with registers.
I build bare [I]nit functions: no init guards or calls to dependent inits.
+G instantiate [G]eneric function bodies via monomorphization
`
func (m BuilderMode) String() string {
@@ -69,6 +70,9 @@ func (m BuilderMode) String() string {
if m&BareInits != 0 {
buf.WriteByte('I')
}
+ if m&InstantiateGenerics != 0 {
+ buf.WriteByte('G')
+ }
return buf.String()
}
@@ -93,6 +97,8 @@ func (m *BuilderMode) Set(s string) error {
mode |= BuildSerially
case 'I':
mode |= BareInits
+ case 'G':
+ mode |= InstantiateGenerics
default:
return fmt.Errorf("unknown BuilderMode option: %q", c)
}
diff --git a/go/ssa/parameterized.go b/go/ssa/parameterized.go
index 956718cd7..3fc4348fc 100644
--- a/go/ssa/parameterized.go
+++ b/go/ssa/parameterized.go
@@ -17,7 +17,7 @@ type tpWalker struct {
seen map[types.Type]bool
}
-// isParameterized returns true when typ contains any type parameters.
+// isParameterized returns true when typ reaches any type parameter.
func (w *tpWalker) isParameterized(typ types.Type) (res bool) {
// NOTE: Adapted from go/types/infer.go. Try to keep in sync.
@@ -101,6 +101,7 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) {
return true
}
}
+ return w.isParameterized(t.Underlying()) // recurse for types local to parameterized functions
case *typeparams.TypeParam:
return true
@@ -111,3 +112,12 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) {
return false
}
+
+func (w *tpWalker) anyParameterized(ts []types.Type) bool {
+ for _, t := range ts {
+ if w.isParameterized(t) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/go/ssa/print.go b/go/ssa/print.go
index d0f3bbf7e..8b783196e 100644
--- a/go/ssa/print.go
+++ b/go/ssa/print.go
@@ -17,6 +17,7 @@ import (
"strings"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
// relName returns the name of v relative to i.
@@ -24,7 +25,6 @@ import (
// Functions (including methods) and Globals use RelString and
// all types are displayed with relType, so that only cross-package
// references are package-qualified.
-//
func relName(v Value, i Instruction) string {
var from *types.Package
if i != nil {
@@ -51,6 +51,14 @@ func relType(t types.Type, from *types.Package) string {
return s
}
+func relTerm(term *typeparams.Term, from *types.Package) string {
+ s := relType(term.Type(), from)
+ if term.Tilde() {
+ return "~" + s
+ }
+ return s
+}
+
func relString(m Member, from *types.Package) string {
// NB: not all globals have an Object (e.g. init$guard),
// so use Package().Object not Object.Package().
@@ -174,6 +182,24 @@ func (v *ChangeInterface) String() string { return printConv("change interfa
func (v *SliceToArrayPointer) String() string { return printConv("slice to array pointer", v, v.X) }
func (v *MakeInterface) String() string { return printConv("make", v, v.X) }
+func (v *MultiConvert) String() string {
+ from := v.Parent().relPkg()
+
+ var b strings.Builder
+ b.WriteString(printConv("multiconvert", v, v.X))
+ b.WriteString(" [")
+ for i, s := range v.from {
+ for j, d := range v.to {
+ if i != 0 || j != 0 {
+ b.WriteString(" | ")
+ }
+ fmt.Fprintf(&b, "%s <- %s", relTerm(d, from), relTerm(s, from))
+ }
+ }
+ b.WriteString("]")
+ return b.String()
+}
+
func (v *MakeClosure) String() string {
var b bytes.Buffer
fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v))
@@ -233,7 +259,7 @@ func (v *MakeChan) String() string {
}
func (v *FieldAddr) String() string {
- st := deref(v.X.Type()).Underlying().(*types.Struct)
+ st := typeparams.CoreType(deref(v.X.Type())).(*types.Struct)
// Be robust against a bad index.
name := "?"
if 0 <= v.Field && v.Field < st.NumFields() {
@@ -243,7 +269,7 @@ func (v *FieldAddr) String() string {
}
func (v *Field) String() string {
- st := v.X.Type().Underlying().(*types.Struct)
+ st := typeparams.CoreType(v.X.Type()).(*types.Struct)
// Be robust against a bad index.
name := "?"
if 0 <= v.Field && v.Field < st.NumFields() {
diff --git a/go/ssa/sanity.go b/go/ssa/sanity.go
index 6e65d760d..88ad374de 100644
--- a/go/ssa/sanity.go
+++ b/go/ssa/sanity.go
@@ -30,7 +30,6 @@ type sanity struct {
//
// Sanity-checking is intended to facilitate the debugging of code
// transformation passes.
-//
func sanityCheck(fn *Function, reporter io.Writer) bool {
if reporter == nil {
reporter = os.Stderr
@@ -40,7 +39,6 @@ func sanityCheck(fn *Function, reporter io.Writer) bool {
// mustSanityCheck is like sanityCheck but panics instead of returning
// a negative result.
-//
func mustSanityCheck(fn *Function, reporter io.Writer) {
if !sanityCheck(fn, reporter) {
fn.WriteTo(os.Stderr)
@@ -110,6 +108,9 @@ func (s *sanity) checkInstr(idx int, instr Instruction) {
for i, e := range instr.Edges {
if e == nil {
s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i])
+ } else if !types.Identical(instr.typ, e.Type()) {
+ s.errorf("phi node '%s' has a different type (%s) for edge #%d from %s (%s)",
+ instr.Comment, instr.Type(), i, s.block.Preds[i], e.Type())
}
}
}
@@ -134,12 +135,12 @@ func (s *sanity) checkInstr(idx int, instr Instruction) {
case *ChangeType:
case *SliceToArrayPointer:
case *Convert:
- if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
- if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
- s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
+ if from := instr.X.Type(); !isBasicConvTypes(typeSetOf(from)) {
+ if to := instr.Type(); !isBasicConvTypes(typeSetOf(to)) {
+ s.errorf("convert %s -> %s: at least one type must be basic (or all basic, []byte, or []rune)", from, to)
}
}
-
+ case *MultiConvert:
case *Defer:
case *Extract:
case *Field:
@@ -404,6 +405,8 @@ func (s *sanity) checkFunction(fn *Function) bool {
// - check params match signature
// - check transient fields are nil
// - warn if any fn.Locals do not appear among block instructions.
+
+ // TODO(taking): Sanity check origin, typeparams, and typeargs.
s.fn = fn
if fn.Prog == nil {
s.errorf("nil Prog")
@@ -419,14 +422,23 @@ func (s *sanity) checkFunction(fn *Function) bool {
if strings.HasPrefix(fn.Synthetic, "wrapper ") ||
strings.HasPrefix(fn.Synthetic, "bound ") ||
strings.HasPrefix(fn.Synthetic, "thunk ") ||
- strings.HasSuffix(fn.name, "Error") {
+ strings.HasSuffix(fn.name, "Error") ||
+ strings.HasPrefix(fn.Synthetic, "instance ") ||
+ strings.HasPrefix(fn.Synthetic, "instantiation ") ||
+ (fn.parent != nil && len(fn.typeargs) > 0) /* anon fun in instance */ {
// ok
} else {
s.errorf("nil Pkg")
}
}
if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn {
- s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
+ if len(fn.typeargs) > 0 && fn.Prog.mode&InstantiateGenerics != 0 {
+ // ok (instantiation with InstantiateGenerics on)
+ } else if fn.topLevelOrigin != nil && len(fn.typeargs) > 0 {
+ // ok (we always have the syntax set for instantiation)
+ } else {
+ s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
+ }
}
for i, l := range fn.Locals {
if l.Parent() != fn {
@@ -488,6 +500,9 @@ func (s *sanity) checkFunction(fn *Function) bool {
if anon.Parent() != fn {
s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent())
}
+ if i != int(anon.anonIdx) {
+ s.errorf("AnonFuncs[%d]=%s but %s.anonIdx=%d", i, anon, anon, anon.anonIdx)
+ }
}
s.fn = nil
return !s.insane
diff --git a/go/ssa/source.go b/go/ssa/source.go
index 7e2a369dd..b9a08363e 100644
--- a/go/ssa/source.go
+++ b/go/ssa/source.go
@@ -14,6 +14,8 @@ import (
"go/ast"
"go/token"
"go/types"
+
+ "golang.org/x/tools/internal/typeparams"
)
// EnclosingFunction returns the function that contains the syntax
@@ -23,11 +25,10 @@ import (
// enclosed by the package's init() function.
//
// Returns nil if not found; reasons might include:
-// - the node is not enclosed by any function.
-// - the node is within an anonymous function (FuncLit) and
-// its SSA function has not been created yet
-// (pkg.Build() has not yet been called).
-//
+// - the node is not enclosed by any function.
+// - the node is within an anonymous function (FuncLit) and
+// its SSA function has not been created yet
+// (pkg.Build() has not yet been called).
func EnclosingFunction(pkg *Package, path []ast.Node) *Function {
// Start with package-level function...
fn := findEnclosingPackageLevelFunction(pkg, path)
@@ -65,14 +66,12 @@ outer:
// depend on whether SSA code for pkg has been built, so it can be
// used to quickly reject check inputs that will cause
// EnclosingFunction to fail, prior to SSA building.
-//
func HasEnclosingFunction(pkg *Package, path []ast.Node) bool {
return findEnclosingPackageLevelFunction(pkg, path) != nil
}
// findEnclosingPackageLevelFunction returns the Function
// corresponding to the package-level function enclosing path.
-//
func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function {
if n := len(path); n >= 2 { // [... {Gen,Func}Decl File]
switch decl := path[n-2].(type) {
@@ -107,7 +106,6 @@ func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function
// findNamedFunc returns the named function whose FuncDecl.Ident is at
// position pos.
-//
func findNamedFunc(pkg *Package, pos token.Pos) *Function {
// Look at all package members and method sets of named types.
// Not very efficient.
@@ -135,13 +133,13 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function {
// expression e.
//
// It returns nil if no value was found, e.g.
-// - the expression is not lexically contained within f;
-// - f was not built with debug information; or
-// - e is a constant expression. (For efficiency, no debug
-// information is stored for constants. Use
-// go/types.Info.Types[e].Value instead.)
-// - e is a reference to nil or a built-in function.
-// - the value was optimised away.
+// - the expression is not lexically contained within f;
+// - f was not built with debug information; or
+// - e is a constant expression. (For efficiency, no debug
+// information is stored for constants. Use
+// go/types.Info.Types[e].Value instead.)
+// - e is a reference to nil or a built-in function.
+// - the value was optimised away.
//
// If e is an addressable expression used in an lvalue context,
// value is the address denoted by e, and isAddr is true.
@@ -153,7 +151,6 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function {
// astutil.PathEnclosingInterval to locate the ast.Node, then
// EnclosingFunction to locate the Function, then ValueForExpr to find
// the ssa.Value.)
-//
func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) {
if f.debugInfo() { // (opt)
e = unparen(e)
@@ -175,7 +172,6 @@ func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) {
// Package returns the SSA Package corresponding to the specified
// type-checker package object.
// It returns nil if no such SSA package has been created.
-//
func (prog *Program) Package(obj *types.Package) *Package {
return prog.packages[obj]
}
@@ -184,7 +180,6 @@ func (prog *Program) Package(obj *types.Package) *Package {
// the specified named object, which may be a package-level const
// (*NamedConst), var (*Global) or func (*Function) of some package in
// prog. It returns nil if the object is not found.
-//
func (prog *Program) packageLevelMember(obj types.Object) Member {
if pkg, ok := prog.packages[obj.Pkg()]; ok {
return pkg.objects[obj]
@@ -192,12 +187,17 @@ func (prog *Program) packageLevelMember(obj types.Object) Member {
return nil
}
+// originFunc returns the package-level generic function that is the
+// origin of obj. If returns nil if the generic function is not found.
+func (prog *Program) originFunc(obj *types.Func) *Function {
+ return prog.declaredFunc(typeparams.OriginMethod(obj))
+}
+
// FuncValue returns the concrete Function denoted by the source-level
// named function obj, or nil if obj denotes an interface method.
//
// TODO(adonovan): check the invariant that obj.Type() matches the
// result's Signature, both in the params/results and in the receiver.
-//
func (prog *Program) FuncValue(obj *types.Func) *Function {
fn, _ := prog.packageLevelMember(obj).(*Function)
return fn
@@ -205,7 +205,6 @@ func (prog *Program) FuncValue(obj *types.Func) *Function {
// ConstValue returns the SSA Value denoted by the source-level named
// constant obj.
-//
func (prog *Program) ConstValue(obj *types.Const) *Const {
// TODO(adonovan): opt: share (don't reallocate)
// Consts for const objects and constant ast.Exprs.
@@ -237,8 +236,9 @@ func (prog *Program) ConstValue(obj *types.Const) *Const {
// If the identifier is a field selector and its base expression is
// non-addressable, then VarValue returns the value of that field.
// For example:
-// func f() struct {x int}
-// f().x // VarValue(x) returns a *Field instruction of type int
+//
+// func f() struct {x int}
+// f().x // VarValue(x) returns a *Field instruction of type int
//
// All other identifiers denote addressable locations (variables).
// For them, VarValue may return either the variable's address or its
@@ -247,14 +247,14 @@ func (prog *Program) ConstValue(obj *types.Const) *Const {
//
// If !isAddr, the returned value is the one associated with the
// specific identifier. For example,
-// var x int // VarValue(x) returns Const 0 here
-// x = 1 // VarValue(x) returns Const 1 here
+//
+// var x int // VarValue(x) returns Const 0 here
+// x = 1 // VarValue(x) returns Const 1 here
//
// It is not specified whether the value or the address is returned in
// any particular case, as it may depend upon optimizations performed
// during SSA code generation, such as registerization, constant
// folding, avoidance of materialization of subexpressions, etc.
-//
func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) {
// All references to a var are local to some function, possibly init.
fn := EnclosingFunction(pkg, ref)
diff --git a/go/ssa/source_test.go b/go/ssa/source_test.go
index 24cf57ef0..eb266edd1 100644
--- a/go/ssa/source_test.go
+++ b/go/ssa/source_test.go
@@ -89,7 +89,7 @@ func TestObjValueLookup(t *testing.T) {
return
}
- prog := ssautil.CreateProgram(iprog, 0 /*|ssa.PrintFunctions*/)
+ prog := ssautil.CreateProgram(iprog, ssa.BuilderMode(0) /*|ssa.PrintFunctions*/)
mainInfo := iprog.Created[0]
mainPkg := prog.Package(mainInfo.Pkg)
mainPkg.SetDebugMode(true)
@@ -247,7 +247,7 @@ func testValueForExpr(t *testing.T, testfile string) {
mainInfo := iprog.Created[0]
- prog := ssautil.CreateProgram(iprog, 0)
+ prog := ssautil.CreateProgram(iprog, ssa.BuilderMode(0))
mainPkg := prog.Package(mainInfo.Pkg)
mainPkg.SetDebugMode(true)
mainPkg.Build()
@@ -325,7 +325,6 @@ func testValueForExpr(t *testing.T, testfile string) {
// findInterval parses input and returns the [start, end) positions of
// the first occurrence of substr in input. f==nil indicates failure;
// an error has already been reported in that case.
-//
func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) {
f, err := parser.ParseFile(fset, "<input>", input, 0)
if err != nil {
@@ -404,7 +403,7 @@ func TestEnclosingFunction(t *testing.T) {
t.Error(err)
continue
}
- prog := ssautil.CreateProgram(iprog, 0)
+ prog := ssautil.CreateProgram(iprog, ssa.BuilderMode(0))
pkg := prog.Package(iprog.Created[0].Pkg)
pkg.Build()
diff --git a/go/ssa/ssa.go b/go/ssa/ssa.go
index ea5b68e26..c3471c156 100644
--- a/go/ssa/ssa.go
+++ b/go/ssa/ssa.go
@@ -16,6 +16,7 @@ import (
"sync"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
// A Program is a partial or complete Go program converted to SSA form.
@@ -26,13 +27,16 @@ type Program struct {
mode BuilderMode // set of mode bits for SSA construction
MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets
- canon canonizer // type canonicalization map
+ canon *canonizer // type canonicalization map
+ ctxt *typeparams.Context // cache for type checking instantiations
- methodsMu sync.Mutex // guards the following maps:
- methodSets typeutil.Map // maps type to its concrete methodSet
- runtimeTypes typeutil.Map // types for which rtypes are needed
- bounds map[*types.Func]*Function // bounds for curried x.Method closures
- thunks map[selectionKey]*Function // thunks for T.Method expressions
+ methodsMu sync.Mutex // guards the following maps:
+ methodSets typeutil.Map // maps type to its concrete methodSet
+ runtimeTypes typeutil.Map // types for which rtypes are needed
+ bounds map[boundsKey]*Function // bounds for curried x.Method closures
+ thunks map[selectionKey]*Function // thunks for T.Method expressions
+ instances map[*Function]*instanceSet // instances of generic functions
+ parameterized tpWalker // determines whether a type reaches a type parameter.
}
// A Package is a single analyzed Go package containing Members for
@@ -43,7 +47,6 @@ type Program struct {
// Members also contains entries for "init" (the synthetic package
// initializer) and "init#%d", the nth declared init function,
// and unspecified other things too.
-//
type Package struct {
Prog *Program // the owning program
Pkg *types.Package // the corresponding go/types.Package
@@ -58,12 +61,12 @@ type Package struct {
ninit int32 // number of init functions
info *types.Info // package type information
files []*ast.File // package ASTs
+ created creator // members created as a result of building this package (includes declared functions, wrappers)
}
// A Member is a member of a Go package, implemented by *NamedConst,
// *Global, *Function, or *Type; they are created by package-level
// const, var, func and type declarations respectively.
-//
type Member interface {
Name() string // declared name of the package member
String() string // package-qualified name of the package member
@@ -89,7 +92,6 @@ type Type struct {
//
// NB: a NamedConst is not a Value; it contains a constant Value, which
// it augments with the name and position of its 'const' declaration.
-//
type NamedConst struct {
object *types.Const
Value *Const
@@ -165,7 +167,6 @@ type Value interface {
// An Instruction that defines a value (e.g. BinOp) also implements
// the Value interface; an Instruction that only has an effect (e.g. Store)
// does not.
-//
type Instruction interface {
// String returns the disassembled form of this value.
//
@@ -242,7 +243,6 @@ type Instruction interface {
// Node is provided to simplify SSA graph algorithms. Clients should
// use the more specific and informative Value or Instruction
// interfaces where appropriate.
-//
type Node interface {
// Common methods:
String() string
@@ -294,10 +294,19 @@ type Node interface {
//
// Type() returns the function's Signature.
//
+// A generic function is a function or method that has uninstantiated type
+// parameters (TypeParams() != nil). Consider a hypothetical generic
+// method, (*Map[K,V]).Get. It may be instantiated with all ground
+// (non-parameterized) types as (*Map[string,int]).Get or with
+// parameterized types as (*Map[string,U]).Get, where U is a type parameter.
+// In both instantiations, Origin() refers to the instantiated generic
+// method, (*Map[K,V]).Get, TypeParams() refers to the parameters [K,V] of
+// the generic method. TypeArgs() refers to [string,U] or [string,int],
+// respectively, and is nil in the generic method.
type Function struct {
name string
- object types.Object // a declared *types.Func or one of its wrappers
- method *types.Selection // info about provenance of synthetic methods
+ object types.Object // a declared *types.Func or one of its wrappers
+ method *selection // info about provenance of synthetic methods; thunk => non-nil
Signature *types.Signature
pos token.Pos
@@ -313,15 +322,22 @@ type Function struct {
Recover *BasicBlock // optional; control transfers here after recovered panic
AnonFuncs []*Function // anonymous functions directly beneath this one
referrers []Instruction // referring instructions (iff Parent() != nil)
+ built bool // function has completed both CREATE and BUILD phase.
+ anonIdx int32 // position of a nested function in parent's AnonFuncs. fn.Parent()!=nil => fn.Parent().AnonFunc[fn.anonIdx] == fn.
+
+ typeparams *typeparams.TypeParamList // type parameters of this function. typeparams.Len() > 0 => generic or instance of generic function
+ typeargs []types.Type // type arguments that instantiated typeparams. len(typeargs) > 0 => instance of generic function
+ topLevelOrigin *Function // the origin function if this is an instance of a source function. nil if Parent()!=nil.
// The following fields are set transiently during building,
// then cleared.
- currentBlock *BasicBlock // where to emit code
- objects map[types.Object]Value // addresses of local variables
- namedResults []*Alloc // tuple of named results
- targets *targets // linked stack of branch targets
- lblocks map[*ast.Object]*lblock // labelled blocks
- info *types.Info // *types.Info to build from. nil for wrappers.
+ currentBlock *BasicBlock // where to emit code
+ objects map[types.Object]Value // addresses of local variables
+ namedResults []*Alloc // tuple of named results
+ targets *targets // linked stack of branch targets
+ lblocks map[types.Object]*lblock // labelled blocks
+ info *types.Info // *types.Info to build from. nil for wrappers.
+ subst *subster // non-nil => expand generic body using this type substitution of ground types
}
// BasicBlock represents an SSA basic block.
@@ -343,7 +359,6 @@ type Function struct {
//
// The order of Preds and Succs is significant (to Phi and If
// instructions, respectively).
-//
type BasicBlock struct {
Index int // index of this block within Parent().Blocks
Comment string // optional label; no semantic significance
@@ -373,7 +388,6 @@ type BasicBlock struct {
//
// Pos() returns the position of the value that was captured, which
// belongs to an enclosing function.
-//
type FreeVar struct {
name string
typ types.Type
@@ -386,7 +400,6 @@ type FreeVar struct {
}
// A Parameter represents an input parameter of a function.
-//
type Parameter struct {
name string
object types.Object // a *types.Var; nil for non-source locals
@@ -396,26 +409,28 @@ type Parameter struct {
referrers []Instruction
}
-// A Const represents the value of a constant expression.
-//
-// The underlying type of a constant may be any boolean, numeric, or
-// string type. In addition, a Const may represent the nil value of
-// any reference type---interface, map, channel, pointer, slice, or
-// function---but not "untyped nil".
+// A Const represents a value known at build time.
//
-// All source-level constant expressions are represented by a Const
-// of the same type and value.
+// Consts include true constants of boolean, numeric, and string types, as
+// defined by the Go spec; these are represented by a non-nil Value field.
//
-// Value holds the value of the constant, independent of its Type(),
-// using go/constant representation, or nil for a typed nil value.
+// Consts also include the "zero" value of any type, of which the nil values
+// of various pointer-like types are a special case; these are represented
+// by a nil Value field.
//
// Pos() returns token.NoPos.
//
-// Example printed form:
-// 42:int
-// "hello":untyped string
-// 3+4i:MyComplex
-//
+// Example printed forms:
+//
+// 42:int
+// "hello":untyped string
+// 3+4i:MyComplex
+// nil:*int
+// nil:[]string
+// [3]int{}:[3]int
+// struct{x string}{}:struct{x string}
+// 0:interface{int|int64}
+// nil:interface{bool|int} // no go/constant representation
type Const struct {
typ types.Type
Value constant.Value
@@ -426,7 +441,6 @@ type Const struct {
//
// Pos() returns the position of the ast.ValueSpec.Names[*]
// identifier.
-//
type Global struct {
name string
object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard
@@ -445,16 +459,15 @@ type Global struct {
// Go spec (excluding "make" and "new") or one of these ssa-defined
// intrinsics:
//
-// // wrapnilchk returns ptr if non-nil, panics otherwise.
-// // (For use in indirection wrappers.)
-// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T
+// // wrapnilchk returns ptr if non-nil, panics otherwise.
+// // (For use in indirection wrappers.)
+// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T
//
// Object() returns a *types.Builtin for built-ins defined by the spec,
// nil for others.
//
// Type() returns a *types.Signature representing the effective
// signature of the built-in for this call.
-//
type Builtin struct {
name string
sig *types.Signature
@@ -489,9 +502,9 @@ type Builtin struct {
// allocates a varargs slice.
//
// Example printed form:
-// t0 = local int
-// t1 = new int
//
+// t0 = local int
+// t1 = new int
type Alloc struct {
register
Comment string
@@ -509,8 +522,8 @@ type Alloc struct {
// during SSA renaming.
//
// Example printed form:
-// t2 = phi [0: t0, 1: t1]
//
+// t2 = phi [0: t0, 1: t1]
type Phi struct {
register
Comment string // a hint as to its purpose
@@ -528,10 +541,10 @@ type Phi struct {
// Pos() returns the ast.CallExpr.Lparen, if explicit in the source.
//
// Example printed form:
-// t2 = println(t0, t1)
-// t4 = t3()
-// t7 = invoke t5.Println(...t6)
//
+// t2 = println(t0, t1)
+// t4 = t3()
+// t7 = invoke t5.Println(...t6)
type Call struct {
register
Call CallCommon
@@ -542,8 +555,8 @@ type Call struct {
// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source.
//
// Example printed form:
-// t1 = t0 + 1:int
//
+// t1 = t0 + 1:int
type BinOp struct {
register
// One of:
@@ -573,9 +586,9 @@ type BinOp struct {
// specified.
//
// Example printed form:
-// t0 = *x
-// t2 = <-t1,ok
//
+// t0 = *x
+// t2 = <-t1,ok
type UnOp struct {
register
Op token.Token // One of: NOT SUB ARROW MUL XOR ! - <- * ^
@@ -587,20 +600,28 @@ type UnOp struct {
// change to Type().
//
// Type changes are permitted:
-// - between a named type and its underlying type.
-// - between two named types of the same underlying type.
-// - between (possibly named) pointers to identical base types.
-// - from a bidirectional channel to a read- or write-channel,
-// optionally adding/removing a name.
+// - between a named type and its underlying type.
+// - between two named types of the same underlying type.
+// - between (possibly named) pointers to identical base types.
+// - from a bidirectional channel to a read- or write-channel,
+// optionally adding/removing a name.
+// - between a type (t) and an instance of the type (tσ), i.e.
+// Type() == σ(X.Type()) (or X.Type()== σ(Type())) where
+// σ is the type substitution of Parent().TypeParams by
+// Parent().TypeArgs.
//
// This operation cannot fail dynamically.
//
+// Type changes may to be to or from a type parameter (or both). All
+// types in the type set of X.Type() have a value-preserving type
+// change to all types in the type set of Type().
+//
// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
// from an explicit conversion in the source.
//
// Example printed form:
-// t1 = changetype *int <- IntPtr (t0)
//
+// t1 = changetype *int <- IntPtr (t0)
type ChangeType struct {
register
X Value
@@ -611,14 +632,19 @@ type ChangeType struct {
//
// A conversion may change the value and representation of its operand.
// Conversions are permitted:
-// - between real numeric types.
-// - between complex numeric types.
-// - between string and []byte or []rune.
-// - between pointers and unsafe.Pointer.
-// - between unsafe.Pointer and uintptr.
-// - from (Unicode) integer to (UTF-8) string.
+// - between real numeric types.
+// - between complex numeric types.
+// - between string and []byte or []rune.
+// - between pointers and unsafe.Pointer.
+// - between unsafe.Pointer and uintptr.
+// - from (Unicode) integer to (UTF-8) string.
+//
// A conversion may imply a type name change also.
//
+// Conversions may to be to or from a type parameter. All types in
+// the type set of X.Type() can be converted to all types in the type
+// set of Type().
+//
// This operation cannot fail dynamically.
//
// Conversions of untyped string/number/bool constants to a specific
@@ -628,13 +654,37 @@ type ChangeType struct {
// from an explicit conversion in the source.
//
// Example printed form:
-// t1 = convert []byte <- string (t0)
//
+// t1 = convert []byte <- string (t0)
type Convert struct {
register
X Value
}
+// The MultiConvert instruction yields the conversion of value X to type
+// Type(). Either X.Type() or Type() must be a type parameter. Each
+// type in the type set of X.Type() can be converted to each type in the
+// type set of Type().
+//
+// See the documentation for Convert, ChangeType, and SliceToArrayPointer
+// for the conversions that are permitted. Additionally conversions of
+// slices to arrays are permitted.
+//
+// This operation can fail dynamically (see SliceToArrayPointer).
+//
+// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
+// from an explicit conversion in the source.
+//
+// Example printed form:
+//
+// t1 = multiconvert D <- S (t0) [*[2]rune <- []rune | string <- []rune]
+type MultiConvert struct {
+ register
+ X Value
+ from []*typeparams.Term
+ to []*typeparams.Term
+}
+
// ChangeInterface constructs a value of one interface type from a
// value of another interface type known to be assignable to it.
// This operation cannot fail.
@@ -645,8 +695,8 @@ type Convert struct {
// otherwise.
//
// Example printed form:
-// t1 = change interface interface{} <- I (t0)
//
+// t1 = change interface interface{} <- I (t0)
type ChangeInterface struct {
register
X Value
@@ -658,9 +708,17 @@ type ChangeInterface struct {
// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
// from an explicit conversion in the source.
//
+// Conversion may to be to or from a type parameter. All types in
+// the type set of X.Type() must be a slice types that can be converted to
+// all types in the type set of Type() which must all be pointer to array
+// types.
+//
+// This operation can fail dynamically if the length of the slice is less
+// than the length of the array.
+//
// Example printed form:
-// t1 = slice to array pointer *[4]byte <- []byte (t0)
//
+// t1 = slice to array pointer *[4]byte <- []byte (t0)
type SliceToArrayPointer struct {
register
X Value
@@ -673,15 +731,16 @@ type SliceToArrayPointer struct {
// of X, and Program.MethodValue(m) to find the implementation of a method.
//
// To construct the zero value of an interface type T, use:
-// NewConst(constant.MakeNil(), T, pos)
+//
+// NewConst(constant.MakeNil(), T, pos)
//
// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
// from an explicit conversion in the source.
//
// Example printed form:
-// t1 = make interface{} <- int (42:int)
-// t2 = make Stringer <- t0
//
+// t1 = make interface{} <- int (42:int)
+// t2 = make Stringer <- t0
type MakeInterface struct {
register
X Value
@@ -696,9 +755,9 @@ type MakeInterface struct {
// closure or the ast.SelectorExpr.Sel for a bound method closure.
//
// Example printed form:
-// t0 = make closure anon@1.2 [x y z]
-// t1 = make closure bound$(main.I).add [i]
//
+// t0 = make closure anon@1.2 [x y z]
+// t1 = make closure bound$(main.I).add [i]
type MakeClosure struct {
register
Fn Value // always a *Function
@@ -714,9 +773,9 @@ type MakeClosure struct {
// the ast.CompositeLit.Lbrack if created by a literal.
//
// Example printed form:
-// t1 = make map[string]int t0
-// t1 = make StringIntMap t0
//
+// t1 = make map[string]int t0
+// t1 = make StringIntMap t0
type MakeMap struct {
register
Reserve Value // initial space reservation; nil => default
@@ -731,9 +790,9 @@ type MakeMap struct {
// created it.
//
// Example printed form:
-// t0 = make chan int 0
-// t0 = make IntChan 0
//
+// t0 = make chan int 0
+// t0 = make IntChan 0
type MakeChan struct {
register
Size Value // int; size of buffer; zero => synchronous.
@@ -753,9 +812,9 @@ type MakeChan struct {
// created it.
//
// Example printed form:
-// t1 = make []string 1:int t0
-// t1 = make StringSlice 1:int t0
//
+// t1 = make []string 1:int t0
+// t1 = make StringSlice 1:int t0
type MakeSlice struct {
register
Len Value
@@ -776,8 +835,8 @@ type MakeSlice struct {
// NoPos if not explicit in the source (e.g. a variadic argument slice).
//
// Example printed form:
-// t1 = slice t0[1:]
//
+// t1 = slice t0[1:]
type Slice struct {
register
X Value // slice, string, or *array
@@ -795,15 +854,18 @@ type Slice struct {
// Type() returns a (possibly named) *types.Pointer.
//
// Pos() returns the position of the ast.SelectorExpr.Sel for the
-// field, if explicit in the source.
+// field, if explicit in the source. For implicit selections, returns
+// the position of the inducing explicit selection. If produced for a
+// struct literal S{f: e}, it returns the position of the colon; for
+// S{e} it returns the start of expression e.
//
// Example printed form:
-// t1 = &t0.name [#1]
//
+// t1 = &t0.name [#1]
type FieldAddr struct {
register
X Value // *struct
- Field int // field is X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct).Field(Field)
+ Field int // field is typeparams.CoreType(X.Type().Underlying().(*types.Pointer).Elem()).(*types.Struct).Field(Field)
}
// The Field instruction yields the Field of struct X.
@@ -813,22 +875,23 @@ type FieldAddr struct {
// package-local identifiers and permit compact representations.
//
// Pos() returns the position of the ast.SelectorExpr.Sel for the
-// field, if explicit in the source.
-//
+// field, if explicit in the source. For implicit selections, returns
+// the position of the inducing explicit selection.
+
// Example printed form:
-// t1 = t0.name [#1]
//
+// t1 = t0.name [#1]
type Field struct {
register
X Value // struct
- Field int // index into X.Type().(*types.Struct).Fields
+ Field int // index into typeparams.CoreType(X.Type()).(*types.Struct).Fields
}
// The IndexAddr instruction yields the address of the element at
// index Index of collection X. Index is an integer expression.
//
-// The elements of maps and strings are not addressable; use Lookup or
-// MapUpdate instead.
+// The elements of maps and strings are not addressable; use Lookup (map),
+// Index (string), or MapUpdate instead.
//
// Dynamically, this instruction panics if X evaluates to a nil *array
// pointer.
@@ -839,31 +902,32 @@ type Field struct {
// explicit in the source.
//
// Example printed form:
-// t2 = &t0[t1]
//
+// t2 = &t0[t1]
type IndexAddr struct {
register
- X Value // slice or *array,
+ X Value // *array, slice or type parameter with types array, *array, or slice.
Index Value // numeric index
}
-// The Index instruction yields element Index of array X.
+// The Index instruction yields element Index of collection X, an array,
+// string or type parameter containing an array, a string, a pointer to an,
+// array or a slice.
//
// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if
// explicit in the source.
//
// Example printed form:
-// t2 = t0[t1]
//
+// t2 = t0[t1]
type Index struct {
register
- X Value // array
+ X Value // array, string or type parameter with types array, *array, slice, or string.
Index Value // integer index
}
-// The Lookup instruction yields element Index of collection X, a map
-// or string. Index is an integer expression if X is a string or the
-// appropriate key type if X is a map.
+// The Lookup instruction yields element Index of collection map X.
+// Index is the appropriate key type.
//
// If CommaOk, the result is a 2-tuple of the value above and a
// boolean indicating the result of a map membership test for the key.
@@ -872,19 +936,18 @@ type Index struct {
// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source.
//
// Example printed form:
-// t2 = t0[t1]
-// t5 = t3[t4],ok
//
+// t2 = t0[t1]
+// t5 = t3[t4],ok
type Lookup struct {
register
- X Value // string or map
- Index Value // numeric or key-typed index
+ X Value // map
+ Index Value // key-typed index
CommaOk bool // return a value,ok pair
}
// SelectState is a helper for Select.
// It represents one goal state and its corresponding communication.
-//
type SelectState struct {
Dir types.ChanDir // direction of case (SendOnly or RecvOnly)
Chan Value // channel to use (for send or receive)
@@ -899,7 +962,9 @@ type SelectState struct {
// Let n be the number of States for which Dir==RECV and T_i (0<=i<n)
// be the element type of each such state's Chan.
// Select returns an n+2-tuple
-// (index int, recvOk bool, r_0 T_0, ... r_n-1 T_n-1)
+//
+// (index int, recvOk bool, r_0 T_0, ... r_n-1 T_n-1)
+//
// The tuple's components, described below, must be accessed via the
// Extract instruction.
//
@@ -925,9 +990,9 @@ type SelectState struct {
// Pos() returns the ast.SelectStmt.Select.
//
// Example printed form:
-// t3 = select nonblocking [<-t0, t1<-t2]
-// t4 = select blocking []
//
+// t3 = select nonblocking [<-t0, t1<-t2]
+// t4 = select blocking []
type Select struct {
register
States []*SelectState
@@ -944,8 +1009,8 @@ type Select struct {
// Pos() returns the ast.RangeStmt.For.
//
// Example printed form:
-// t0 = range "hello":string
//
+// t0 = range "hello":string
type Range struct {
register
X Value // string or map
@@ -967,8 +1032,8 @@ type Range struct {
// The types of k and/or v may be types.Invalid.
//
// Example printed form:
-// t1 = next t0
//
+// t1 = next t0
type Next struct {
register
Iter Value
@@ -986,6 +1051,9 @@ type Next struct {
// is AssertedType's zero value. The components of the pair must be
// accessed using the Extract instruction.
//
+// If Underlying: tests whether interface value X has the underlying
+// type AssertedType.
+//
// If AssertedType is a concrete type, TypeAssert checks whether the
// dynamic type in interface X is equal to it, and if so, the result
// of the conversion is a copy of the value in the interface.
@@ -1007,9 +1075,9 @@ type Next struct {
// type-switch statement.
//
// Example printed form:
-// t1 = typeassert t0.(int)
-// t3 = typeassert,ok t2.(T)
//
+// t1 = typeassert t0.(int)
+// t3 = typeassert,ok t2.(T)
type TypeAssert struct {
register
X Value
@@ -1024,8 +1092,8 @@ type TypeAssert struct {
// IndexExpr(Map).
//
// Example printed form:
-// t1 = extract t0 #1
//
+// t1 = extract t0 #1
type Extract struct {
register
Tuple Value
@@ -1042,8 +1110,8 @@ type Extract struct {
// Pos() returns NoPos.
//
// Example printed form:
-// jump done
//
+// jump done
type Jump struct {
anInstruction
}
@@ -1058,8 +1126,8 @@ type Jump struct {
// Pos() returns NoPos.
//
// Example printed form:
-// if t0 goto done else body
//
+// if t0 goto done else body
type If struct {
anInstruction
Cond Value
@@ -1084,9 +1152,9 @@ type If struct {
// Pos() returns the ast.ReturnStmt.Return, if explicit in the source.
//
// Example printed form:
-// return
-// return nil:I, 2:int
//
+// return
+// return nil:I, 2:int
type Return struct {
anInstruction
Results []Value
@@ -1103,8 +1171,8 @@ type Return struct {
// Pos() returns NoPos.
//
// Example printed form:
-// rundefers
//
+// rundefers
type RunDefers struct {
anInstruction
}
@@ -1121,8 +1189,8 @@ type RunDefers struct {
// in the source.
//
// Example printed form:
-// panic t0
//
+// panic t0
type Panic struct {
anInstruction
X Value // an interface{}
@@ -1137,10 +1205,10 @@ type Panic struct {
// Pos() returns the ast.GoStmt.Go.
//
// Example printed form:
-// go println(t0, t1)
-// go t3()
-// go invoke t5.Println(...t6)
//
+// go println(t0, t1)
+// go t3()
+// go invoke t5.Println(...t6)
type Go struct {
anInstruction
Call CallCommon
@@ -1155,10 +1223,10 @@ type Go struct {
// Pos() returns the ast.DeferStmt.Defer.
//
// Example printed form:
-// defer println(t0, t1)
-// defer t3()
-// defer invoke t5.Println(...t6)
//
+// defer println(t0, t1)
+// defer t3()
+// defer invoke t5.Println(...t6)
type Defer struct {
anInstruction
Call CallCommon
@@ -1170,8 +1238,8 @@ type Defer struct {
// Pos() returns the ast.SendStmt.Arrow, if explicit in the source.
//
// Example printed form:
-// send t0 <- t1
//
+// send t0 <- t1
type Send struct {
anInstruction
Chan, X Value
@@ -1187,8 +1255,8 @@ type Send struct {
// implementation choices, the details are not specified.
//
// Example printed form:
-// *x = y
//
+// *x = y
type Store struct {
anInstruction
Addr Value
@@ -1203,8 +1271,8 @@ type Store struct {
// if explicit in the source.
//
// Example printed form:
-// t0[t1] = t2
//
+// t0[t1] = t2
type MapUpdate struct {
anInstruction
Map Value
@@ -1242,11 +1310,12 @@ type MapUpdate struct {
// ordinary SSA renaming machinery.)
//
// Example printed form:
-// ; *ast.CallExpr @ 102:9 is t5
-// ; var x float64 @ 109:72 is x
-// ; address of *ast.CompositeLit @ 216:10 is t0
//
+// ; *ast.CallExpr @ 102:9 is t5
+// ; var x float64 @ 109:72 is x
+// ; address of *ast.CompositeLit @ 216:10 is t0
type DebugRef struct {
+ // TODO(generics): Reconsider what DebugRefs are for generics.
anInstruction
Expr ast.Expr // the referring expression (never *ast.ParenExpr)
object types.Object // the identity of the source var/func
@@ -1268,7 +1337,6 @@ type DebugRef struct {
// from it) is unique within a function. As always in this API,
// semantics are determined only by identity; names exist only to
// facilitate debugging.
-//
type register struct {
anInstruction
num int // "name" of virtual register, e.g. "t0". Not guaranteed unique.
@@ -1295,15 +1363,17 @@ type anInstruction struct {
// 'func'.
//
// Value may be one of:
-// (a) a *Function, indicating a statically dispatched call
-// to a package-level function, an anonymous function, or
-// a method of a named type.
-// (b) a *MakeClosure, indicating an immediately applied
-// function literal with free variables.
-// (c) a *Builtin, indicating a statically dispatched call
-// to a built-in function.
-// (d) any other value, indicating a dynamically dispatched
-// function call.
+//
+// (a) a *Function, indicating a statically dispatched call
+// to a package-level function, an anonymous function, or
+// a method of a named type.
+// (b) a *MakeClosure, indicating an immediately applied
+// function literal with free variables.
+// (c) a *Builtin, indicating a statically dispatched call
+// to a built-in function.
+// (d) any other value, indicating a dynamically dispatched
+// function call.
+//
// StaticCallee returns the identity of the callee in cases
// (a) and (b), nil otherwise.
//
@@ -1311,29 +1381,31 @@ type anInstruction struct {
// Args[0] contains the receiver parameter.
//
// Example printed form:
-// t2 = println(t0, t1)
-// go t3()
+//
+// t2 = println(t0, t1)
+// go t3()
// defer t5(...t6)
//
// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon
// represents a dynamically dispatched call to an interface method.
// In this mode, Value is the interface value and Method is the
-// interface's abstract method. Note: an abstract method may be
-// shared by multiple interfaces due to embedding; Value.Type()
-// provides the specific interface used for this call.
+// interface's abstract method. The interface value may be a type
+// parameter. Note: an abstract method may be shared by multiple
+// interfaces due to embedding; Value.Type() provides the specific
+// interface used for this call.
//
// Value is implicitly supplied to the concrete method implementation
// as the receiver parameter; in other words, Args[0] holds not the
// receiver but the first true argument.
//
// Example printed form:
-// t1 = invoke t0.String()
-// go invoke t3.Run(t2)
-// defer invoke t4.Handle(...t5)
+//
+// t1 = invoke t0.String()
+// go invoke t3.Run(t2)
+// defer invoke t4.Handle(...t5)
//
// For all calls to variadic functions (Signature().Variadic()),
// the last element of Args is a slice.
-//
type CallCommon struct {
Value Value // receiver (invoke mode) or func value (call mode)
Method *types.Func // abstract method (invoke mode)
@@ -1355,12 +1427,11 @@ func (c *CallCommon) Pos() token.Pos { return c.pos }
//
// In either "call" or "invoke" mode, if the callee is a method, its
// receiver is represented by sig.Recv, not sig.Params().At(0).
-//
func (c *CallCommon) Signature() *types.Signature {
if c.Method != nil {
return c.Method.Type().(*types.Signature)
}
- return c.Value.Type().Underlying().(*types.Signature)
+ return typeparams.CoreType(c.Value.Type()).(*types.Signature)
}
// StaticCallee returns the callee if this is a trivially static
@@ -1398,7 +1469,6 @@ func (c *CallCommon) Description() string {
// The CallInstruction interface, implemented by *Go, *Defer and *Call,
// exposes the common parts of function-calling instructions,
// yet provides a way back to the Value defined by *Call alone.
-//
type CallInstruction interface {
Instruction
Common() *CallCommon // returns the common parts of the call
@@ -1452,6 +1522,29 @@ func (v *Function) Referrers() *[]Instruction {
return nil
}
+// TypeParams are the function's type parameters if generic or the
+// type parameters that were instantiated if fn is an instantiation.
+//
+// TODO(taking): declare result type as *types.TypeParamList
+// after we drop support for go1.17.
+func (fn *Function) TypeParams() *typeparams.TypeParamList {
+ return fn.typeparams
+}
+
+// TypeArgs are the types that TypeParams() were instantiated by to create fn
+// from fn.Origin().
+func (fn *Function) TypeArgs() []types.Type { return fn.typeargs }
+
+// Origin is the function fn is an instantiation of. Returns nil if fn is not
+// an instantiation.
+func (fn *Function) Origin() *Function {
+ if fn.parent != nil && len(fn.typeargs) > 0 {
+ // Nested functions are BUILT at a different time than there instances.
+ return fn.parent.Origin().AnonFuncs[fn.anonIdx]
+ }
+ return fn.topLevelOrigin
+}
+
func (v *Parameter) Type() types.Type { return v.typ }
func (v *Parameter) Name() string { return v.name }
func (v *Parameter) Object() types.Object { return v.object }
@@ -1498,7 +1591,6 @@ func (d *DebugRef) Object() types.Object { return d.object }
// Func returns the package-level function of the specified name,
// or nil if not found.
-//
func (p *Package) Func(name string) (f *Function) {
f, _ = p.Members[name].(*Function)
return
@@ -1506,7 +1598,6 @@ func (p *Package) Func(name string) (f *Function) {
// Var returns the package-level variable of the specified name,
// or nil if not found.
-//
func (p *Package) Var(name string) (g *Global) {
g, _ = p.Members[name].(*Global)
return
@@ -1514,7 +1605,6 @@ func (p *Package) Var(name string) (g *Global) {
// Const returns the package-level constant of the specified name,
// or nil if not found.
-//
func (p *Package) Const(name string) (c *NamedConst) {
c, _ = p.Members[name].(*NamedConst)
return
@@ -1522,7 +1612,6 @@ func (p *Package) Const(name string) (c *NamedConst) {
// Type returns the package-level type of the specified name,
// or nil if not found.
-//
func (p *Package) Type(name string) (t *Type) {
t, _ = p.Members[name].(*Type)
return
@@ -1583,6 +1672,10 @@ func (v *Convert) Operands(rands []*Value) []*Value {
return append(rands, &v.X)
}
+func (v *MultiConvert) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
func (v *SliceToArrayPointer) Operands(rands []*Value) []*Value {
return append(rands, &v.X)
}
diff --git a/go/ssa/ssautil/load.go b/go/ssa/ssautil/load.go
index 88d7c8f49..96d69a20a 100644
--- a/go/ssa/ssautil/load.go
+++ b/go/ssa/ssautil/load.go
@@ -34,7 +34,6 @@ import (
// packages with well-typed syntax trees.
//
// The mode parameter controls diagnostics and checking during SSA construction.
-//
func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) {
return doPackages(initial, mode, false)
}
@@ -56,7 +55,6 @@ func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program,
// well-typed syntax trees.
//
// The mode parameter controls diagnostics and checking during SSA construction.
-//
func AllPackages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) {
return doPackages(initial, mode, true)
}
@@ -79,10 +77,12 @@ func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (*
packages.Visit(initial, nil, func(p *packages.Package) {
if p.Types != nil && !p.IllTyped {
var files []*ast.File
+ var info *types.Info
if deps || isInitial[p] {
files = p.Syntax
+ info = p.TypesInfo
}
- ssamap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true)
+ ssamap[p] = prog.CreatePackage(p.Types, files, info, true)
}
})
@@ -104,7 +104,6 @@ func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (*
//
// Deprecated: Use golang.org/x/tools/go/packages and the Packages
// function instead; see ssa.Example_loadPackages.
-//
func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program {
prog := ssa.NewProgram(lprog.Fset, mode)
@@ -131,7 +130,6 @@ func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program {
// The operation fails if there were any type-checking or import errors.
//
// See ../example_test.go for an example.
-//
func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ssa.BuilderMode) (*ssa.Package, *types.Info, error) {
if fset == nil {
panic("no token.FileSet")
diff --git a/go/ssa/ssautil/load_test.go b/go/ssa/ssautil/load_test.go
index 55684e0a6..efa2ba40a 100644
--- a/go/ssa/ssautil/load_test.go
+++ b/go/ssa/ssautil/load_test.go
@@ -12,10 +12,13 @@ import (
"go/token"
"go/types"
"os"
+ "path"
"strings"
"testing"
"golang.org/x/tools/go/packages"
+ "golang.org/x/tools/go/packages/packagestest"
+ "golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
"golang.org/x/tools/internal/testenv"
)
@@ -30,6 +33,8 @@ func main() {
`
func TestBuildPackage(t *testing.T) {
+ testenv.NeedsGoBuild(t) // for importer.Default()
+
// There is a more substantial test of BuildPackage and the
// SSA program it builds in ../ssa/builder_test.go.
@@ -39,17 +44,23 @@ func TestBuildPackage(t *testing.T) {
t.Fatal(err)
}
- pkg := types.NewPackage("hello", "")
- ssapkg, _, err := ssautil.BuildPackage(&types.Config{Importer: importer.Default()}, fset, pkg, []*ast.File{f}, 0)
- if err != nil {
- t.Fatal(err)
- }
- if pkg.Name() != "main" {
- t.Errorf("pkg.Name() = %s, want main", pkg.Name())
- }
- if ssapkg.Func("main") == nil {
- ssapkg.WriteTo(os.Stderr)
- t.Errorf("ssapkg has no main function")
+ for _, mode := range []ssa.BuilderMode{
+ ssa.SanityCheckFunctions,
+ ssa.InstantiateGenerics | ssa.SanityCheckFunctions,
+ } {
+ pkg := types.NewPackage("hello", "")
+ ssapkg, _, err := ssautil.BuildPackage(&types.Config{Importer: importer.Default()}, fset, pkg, []*ast.File{f}, mode)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pkg.Name() != "main" {
+ t.Errorf("pkg.Name() = %s, want main", pkg.Name())
+ }
+ if ssapkg.Func("main") == nil {
+ ssapkg.WriteTo(os.Stderr)
+ t.Errorf("ssapkg has no main function")
+ }
+
}
}
@@ -65,19 +76,23 @@ func TestPackages(t *testing.T) {
t.Fatal("there were errors")
}
- prog, pkgs := ssautil.Packages(initial, 0)
- bytesNewBuffer := pkgs[0].Func("NewBuffer")
- bytesNewBuffer.Pkg.Build()
+ for _, mode := range []ssa.BuilderMode{
+ ssa.SanityCheckFunctions,
+ ssa.SanityCheckFunctions | ssa.InstantiateGenerics,
+ } {
+ prog, pkgs := ssautil.Packages(initial, mode)
+ bytesNewBuffer := pkgs[0].Func("NewBuffer")
+ bytesNewBuffer.Pkg.Build()
- // We'll dump the SSA of bytes.NewBuffer because it is small and stable.
- out := new(bytes.Buffer)
- bytesNewBuffer.WriteTo(out)
+ // We'll dump the SSA of bytes.NewBuffer because it is small and stable.
+ out := new(bytes.Buffer)
+ bytesNewBuffer.WriteTo(out)
- // For determinism, sanitize the location.
- location := prog.Fset.Position(bytesNewBuffer.Pos()).String()
- got := strings.Replace(out.String(), location, "$GOROOT/src/bytes/buffer.go:1", -1)
+ // For determinism, sanitize the location.
+ location := prog.Fset.Position(bytesNewBuffer.Pos()).String()
+ got := strings.Replace(out.String(), location, "$GOROOT/src/bytes/buffer.go:1", -1)
- want := `
+ want := `
# Name: bytes.NewBuffer
# Package: bytes
# Location: $GOROOT/src/bytes/buffer.go:1
@@ -89,8 +104,9 @@ func NewBuffer(buf []byte) *Buffer:
return t0
`[1:]
- if got != want {
- t.Errorf("bytes.NewBuffer SSA = <<%s>>, want <<%s>>", got, want)
+ if got != want {
+ t.Errorf("bytes.NewBuffer SSA = <<%s>>, want <<%s>>", got, want)
+ }
}
}
@@ -102,7 +118,7 @@ func TestBuildPackage_MissingImport(t *testing.T) {
}
pkg := types.NewPackage("bad", "")
- ssapkg, _, err := ssautil.BuildPackage(new(types.Config), fset, pkg, []*ast.File{f}, 0)
+ ssapkg, _, err := ssautil.BuildPackage(new(types.Config), fset, pkg, []*ast.File{f}, ssa.BuilderMode(0))
if err == nil || ssapkg != nil {
t.Fatal("BuildPackage succeeded unexpectedly")
}
@@ -120,6 +136,60 @@ func TestIssue28106(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- prog, _ := ssautil.Packages(pkgs, 0)
+ prog, _ := ssautil.Packages(pkgs, ssa.BuilderMode(0))
prog.Build() // no crash
}
+
+func TestIssue53604(t *testing.T) {
+ // Tests that variable initializers are not added to init() when syntax
+ // is not present but types.Info is available.
+ //
+ // Packages x, y, z are loaded with mode `packages.LoadSyntax`.
+ // Package x imports y, and y imports z.
+ // Packages are built using ssautil.Packages() with x and z as roots.
+ // This setup creates y using CreatePackage(pkg, files, info, ...)
+ // where len(files) == 0 but info != nil.
+ //
+ // Tests that globals from y are not initialized.
+ e := packagestest.Export(t, packagestest.Modules, []packagestest.Module{
+ {
+ Name: "golang.org/fake",
+ Files: map[string]interface{}{
+ "x/x.go": `package x; import "golang.org/fake/y"; var V = y.F()`,
+ "y/y.go": `package y; import "golang.org/fake/z"; var F = func () *int { return &z.Z } `,
+ "z/z.go": `package z; var Z int`,
+ },
+ },
+ })
+ defer e.Cleanup()
+
+ // Load x and z as entry packages using packages.LoadSyntax
+ e.Config.Mode = packages.LoadSyntax
+ pkgs, err := packages.Load(e.Config, path.Join(e.Temp(), "fake/x"), path.Join(e.Temp(), "fake/z"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, p := range pkgs {
+ if len(p.Errors) > 0 {
+ t.Fatalf("%v", p.Errors)
+ }
+ }
+
+ prog, _ := ssautil.Packages(pkgs, ssa.BuilderMode(0))
+ prog.Build()
+
+ // y does not initialize F.
+ y := prog.ImportedPackage("golang.org/fake/y")
+ if y == nil {
+ t.Fatal("Failed to load intermediate package y")
+ }
+ yinit := y.Members["init"].(*ssa.Function)
+ for _, bb := range yinit.Blocks {
+ for _, i := range bb.Instrs {
+ if store, ok := i.(*ssa.Store); ok && store.Addr == y.Var("F") {
+ t.Errorf("y.init() stores to F %v", store)
+ }
+ }
+ }
+
+}
diff --git a/go/ssa/ssautil/switch.go b/go/ssa/ssautil/switch.go
index db03bf555..dd4b04e76 100644
--- a/go/ssa/ssautil/switch.go
+++ b/go/ssa/ssautil/switch.go
@@ -55,7 +55,6 @@ type TypeCase struct {
// A type switch may contain duplicate types, or types assignable
// to an interface type also in the list.
// TODO(adonovan): eliminate such duplicates.
-//
type Switch struct {
Start *ssa.BasicBlock // block containing start of if/else chain
X ssa.Value // the switch operand
@@ -103,7 +102,6 @@ func (sw *Switch) String() string {
// Switches may even be inferred from if/else- or goto-based control flow.
// (In general, the control flow constructs of the source program
// cannot be faithfully reproduced from the SSA representation.)
-//
func Switches(fn *ssa.Function) []Switch {
// Traverse the CFG in dominance order, so we don't
// enter an if/else-chain in the middle.
@@ -197,7 +195,6 @@ func typeSwitch(sw *Switch, y ssa.Value, T types.Type, seen map[*ssa.BasicBlock]
// isComparisonBlock returns the operands (v, k) if a block ends with
// a comparison v==k, where k is a compile-time constant.
-//
func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) {
if n := len(b.Instrs); n >= 2 {
if i, ok := b.Instrs[n-1].(*ssa.If); ok {
@@ -216,7 +213,6 @@ func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) {
// isTypeAssertBlock returns the operands (y, x, T) if a block ends with
// a type assertion "if y, ok := x.(T); ok {".
-//
func isTypeAssertBlock(b *ssa.BasicBlock) (y, x ssa.Value, T types.Type) {
if n := len(b.Instrs); n >= 4 {
if i, ok := b.Instrs[n-1].(*ssa.If); ok {
diff --git a/go/ssa/ssautil/switch_test.go b/go/ssa/ssautil/switch_test.go
index bad8bdd6a..6db410524 100644
--- a/go/ssa/ssautil/switch_test.go
+++ b/go/ssa/ssautil/switch_test.go
@@ -34,7 +34,7 @@ func TestSwitches(t *testing.T) {
return
}
- prog := ssautil.CreateProgram(iprog, 0)
+ prog := ssautil.CreateProgram(iprog, ssa.BuilderMode(0))
mainPkg := prog.Package(iprog.Created[0].Pkg)
mainPkg.Build()
diff --git a/go/ssa/ssautil/visit.go b/go/ssa/ssautil/visit.go
index 3424e8a30..5f27050b0 100644
--- a/go/ssa/ssautil/visit.go
+++ b/go/ssa/ssautil/visit.go
@@ -18,7 +18,6 @@ import "golang.org/x/tools/go/ssa"
// synthetic wrappers.
//
// Precondition: all packages are built.
-//
func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool {
visit := visitor{
prog: prog,
diff --git a/go/ssa/stdlib_test.go b/go/ssa/stdlib_test.go
index aaa158076..8b9f4238d 100644
--- a/go/ssa/stdlib_test.go
+++ b/go/ssa/stdlib_test.go
@@ -21,12 +21,10 @@ import (
"testing"
"time"
- "golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
"golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/internal/typeparams/genericfeatures"
)
func bytesAllocated() uint64 {
@@ -51,22 +49,6 @@ func TestStdlib(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- var nonGeneric int
- for i := 0; i < len(pkgs); i++ {
- pkg := pkgs[i]
- inspect := inspector.New(pkg.Syntax)
- features := genericfeatures.ForPackage(inspect, pkg.TypesInfo)
- // Skip standard library packages that use generics. This won't be
- // sufficient if any standard library packages start _importing_ packages
- // that use generics.
- if features != 0 {
- t.Logf("skipping package %q which uses generics", pkg.PkgPath)
- continue
- }
- pkgs[nonGeneric] = pkg
- nonGeneric++
- }
- pkgs = pkgs[:nonGeneric]
t1 := time.Now()
alloc1 := bytesAllocated()
@@ -76,6 +58,7 @@ func TestStdlib(t *testing.T) {
// Comment out these lines during benchmarking. Approx SSA build costs are noted.
mode |= ssa.SanityCheckFunctions // + 2% space, + 4% time
mode |= ssa.GlobalDebug // +30% space, +18% time
+ mode |= ssa.InstantiateGenerics // + 0% space, + 2% time (unlikely to reproduce outside of stdlib)
prog, _ := ssautil.Packages(pkgs, mode)
t2 := time.Now()
diff --git a/go/ssa/subst.go b/go/ssa/subst.go
index 0e9263fd2..7efab3578 100644
--- a/go/ssa/subst.go
+++ b/go/ssa/subst.go
@@ -1,10 +1,10 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+
package ssa
import (
- "fmt"
"go/types"
"golang.org/x/tools/internal/typeparams"
@@ -20,37 +20,40 @@ import (
type subster struct {
replacements map[*typeparams.TypeParam]types.Type // values should contain no type params
cache map[types.Type]types.Type // cache of subst results
- ctxt *typeparams.Context
- debug bool // perform extra debugging checks
+ ctxt *typeparams.Context // cache for instantiation
+ scope *types.Scope // *types.Named declared within this scope can be substituted (optional)
+ debug bool // perform extra debugging checks
// TODO(taking): consider adding Pos
+ // TODO(zpavlinovic): replacements can contain type params
+ // when generating instances inside of a generic function body.
}
// Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache.
// targs should not contain any types in tparams.
-func makeSubster(ctxt *typeparams.Context, tparams []*typeparams.TypeParam, targs []types.Type, debug bool) *subster {
- assert(len(tparams) == len(targs), "makeSubster argument count must match")
+// scope is the (optional) lexical block of the generic function for which we are substituting.
+func makeSubster(ctxt *typeparams.Context, scope *types.Scope, tparams *typeparams.TypeParamList, targs []types.Type, debug bool) *subster {
+ assert(tparams.Len() == len(targs), "makeSubster argument count must match")
subst := &subster{
- replacements: make(map[*typeparams.TypeParam]types.Type, len(tparams)),
+ replacements: make(map[*typeparams.TypeParam]types.Type, tparams.Len()),
cache: make(map[types.Type]types.Type),
ctxt: ctxt,
+ scope: scope,
debug: debug,
}
- for i, tpar := range tparams {
- subst.replacements[tpar] = targs[i]
+ for i := 0; i < tparams.Len(); i++ {
+ subst.replacements[tparams.At(i)] = targs[i]
}
if subst.debug {
- if err := subst.wellFormed(); err != nil {
- panic(err)
- }
+ subst.wellFormed()
}
return subst
}
-// wellFormed returns an error if subst was not properly initialized.
-func (subst *subster) wellFormed() error {
- if subst == nil || len(subst.replacements) == 0 {
- return nil
+// wellFormed asserts that subst was properly initialized.
+func (subst *subster) wellFormed() {
+ if subst == nil {
+ return
}
// Check that all of the type params do not appear in the arguments.
s := make(map[types.Type]bool, len(subst.replacements))
@@ -59,10 +62,9 @@ func (subst *subster) wellFormed() error {
}
for _, r := range subst.replacements {
if reaches(r, s) {
- return fmt.Errorf("\n‰r %s s %v replacements %v\n", r, s, subst.replacements)
+ panic(subst)
}
}
- return nil
}
// typ returns the type of t with the type parameter tparams[i] substituted
@@ -143,6 +145,15 @@ func (subst *subster) typ(t types.Type) (res types.Type) {
}
}
+// types returns the result of {subst.typ(ts[i])}.
+func (subst *subster) types(ts []types.Type) []types.Type {
+ res := make([]types.Type, len(ts))
+ for i := range ts {
+ res[i] = subst.typ(ts[i])
+ }
+ return res
+}
+
func (subst *subster) tuple(t *types.Tuple) *types.Tuple {
if t != nil {
if vars := subst.varlist(t); vars != nil {
@@ -294,37 +305,64 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface {
}
func (subst *subster) named(t *types.Named) types.Type {
- // A name type may be:
- // (1) ordinary (no type parameters, no type arguments),
- // (2) generic (type parameters but no type arguments), or
- // (3) instantiated (type parameters and type arguments).
+ // A named type may be:
+ // (1) ordinary named type (non-local scope, no type parameters, no type arguments),
+ // (2) locally scoped type,
+ // (3) generic (type parameters but no type arguments), or
+ // (4) instantiated (type parameters and type arguments).
tparams := typeparams.ForNamed(t)
if tparams.Len() == 0 {
- // case (1) ordinary
+ if subst.scope != nil && !subst.scope.Contains(t.Obj().Pos()) {
+ // Outside the current function scope?
+ return t // case (1) ordinary
+ }
- // Note: If Go allows for local type declarations in generic
- // functions we may need to descend into underlying as well.
- return t
+ // case (2) locally scoped type.
+ // Create a new named type to represent this instantiation.
+ // We assume that local types of distinct instantiations of a
+ // generic function are distinct, even if they don't refer to
+ // type parameters, but the spec is unclear; see golang/go#58573.
+ //
+ // Subtle: We short circuit substitution and use a newly created type in
+ // subst, i.e. cache[t]=n, to pre-emptively replace t with n in recursive
+ // types during traversal. This both breaks infinite cycles and allows for
+ // constructing types with the replacement applied in subst.typ(under).
+ //
+ // Example:
+ // func foo[T any]() {
+ // type linkedlist struct {
+ // next *linkedlist
+ // val T
+ // }
+ // }
+ //
+ // When the field `next *linkedlist` is visited during subst.typ(under),
+ // we want the substituted type for the field `next` to be `*n`.
+ n := types.NewNamed(t.Obj(), nil, nil)
+ subst.cache[t] = n
+ subst.cache[n] = n
+ n.SetUnderlying(subst.typ(t.Underlying()))
+ return n
}
targs := typeparams.NamedTypeArgs(t)
// insts are arguments to instantiate using.
insts := make([]types.Type, tparams.Len())
- // case (2) generic ==> targs.Len() == 0
+ // case (3) generic ==> targs.Len() == 0
// Instantiating a generic with no type arguments should be unreachable.
// Please report a bug if you encounter this.
assert(targs.Len() != 0, "substition into a generic Named type is currently unsupported")
- // case (3) instantiated.
+ // case (4) instantiated.
// Substitute into the type arguments and instantiate the replacements/
// Example:
// type N[A any] func() A
// func Foo[T](g N[T]) {}
// To instantiate Foo[string], one goes through {T->string}. To get the type of g
- // one subsitutes T with string in {N with TypeArgs == {T} and TypeParams == {A} }
- // to get {N with TypeArgs == {string} and TypeParams == {A} }.
- assert(targs.Len() == tparams.Len(), "TypeArgs().Len() must match TypeParams().Len() if present")
+ // one subsitutes T with string in {N with typeargs == {T} and typeparams == {A} }
+ // to get {N with TypeArgs == {string} and typeparams == {A} }.
+ assert(targs.Len() == tparams.Len(), "typeargs.Len() must match typeparams.Len() if present")
for i, n := 0, targs.Len(); i < n; i++ {
inst := subst.typ(targs.At(i)) // TODO(generic): Check with rfindley for mutual recursion
insts[i] = inst
@@ -360,25 +398,32 @@ func (subst *subster) signature(t *types.Signature) types.Type {
params := subst.tuple(t.Params())
results := subst.tuple(t.Results())
if recv != t.Recv() || params != t.Params() || results != t.Results() {
- return types.NewSignature(recv, params, results, t.Variadic())
+ return typeparams.NewSignatureType(recv, nil, nil, params, results, t.Variadic())
}
return t
}
// reaches returns true if a type t reaches any type t' s.t. c[t'] == true.
-// Updates c to cache results.
+// It updates c to cache results.
+//
+// reaches is currently only part of the wellFormed debug logic, and
+// in practice c is initially only type parameters. It is not currently
+// relied on in production.
func reaches(t types.Type, c map[types.Type]bool) (res bool) {
if c, ok := c[t]; ok {
return c
}
- c[t] = false // prevent cycles
+
+ // c is populated with temporary false entries as types are visited.
+ // This avoids repeat visits and break cycles.
+ c[t] = false
defer func() {
c[t] = res
}()
switch t := t.(type) {
case *typeparams.TypeParam, *types.Basic:
- // no-op => c == false
+ return false
case *types.Array:
return reaches(t.Elem(), c)
case *types.Slice:
diff --git a/go/ssa/subst_test.go b/go/ssa/subst_test.go
index fe84adcc3..14cda54e6 100644
--- a/go/ssa/subst_test.go
+++ b/go/ssa/subst_test.go
@@ -99,12 +99,8 @@ var _ L[int] = Fn0[L[int]](nil)
}
T := tv.Type.(*types.Named)
- var tparams []*typeparams.TypeParam
- for i, l := 0, typeparams.ForNamed(T); i < l.Len(); i++ {
- tparams = append(tparams, l.At(i))
- }
- subst := makeSubster(typeparams.NewContext(), tparams, targs, true)
+ subst := makeSubster(typeparams.NewContext(), nil, typeparams.ForNamed(T), targs, true)
sub := subst.typ(T.Underlying())
if got := sub.String(); got != test.want {
t.Errorf("subst{%v->%v}.typ(%s) = %v, want %v", test.expr, test.args, T.Underlying(), got, test.want)
diff --git a/go/ssa/testdata/src/README.txt b/go/ssa/testdata/src/README.txt
new file mode 100644
index 000000000..ee5909318
--- /dev/null
+++ b/go/ssa/testdata/src/README.txt
@@ -0,0 +1,5 @@
+These files are present to test building ssa on go files that use signatures from standard library packages.
+
+Only the exported members used by the tests are needed.
+
+Providing these decreases testing time ~10x (90s -> 8s) compared to building the standard library packages form source during tests. \ No newline at end of file
diff --git a/go/ssa/testdata/src/bytes/bytes.go b/go/ssa/testdata/src/bytes/bytes.go
new file mode 100644
index 000000000..deb7fdd7d
--- /dev/null
+++ b/go/ssa/testdata/src/bytes/bytes.go
@@ -0,0 +1,3 @@
+package bytes
+
+func Compare(a, b []byte) int
diff --git a/go/ssa/testdata/src/context/context.go b/go/ssa/testdata/src/context/context.go
new file mode 100644
index 000000000..d4f6c256c
--- /dev/null
+++ b/go/ssa/testdata/src/context/context.go
@@ -0,0 +1,7 @@
+package context
+
+type Context interface {
+ Done() <-chan struct{}
+}
+
+func Background() Context
diff --git a/go/ssa/testdata/src/encoding/encoding.go b/go/ssa/testdata/src/encoding/encoding.go
new file mode 100644
index 000000000..3fa2ba36c
--- /dev/null
+++ b/go/ssa/testdata/src/encoding/encoding.go
@@ -0,0 +1,9 @@
+package encoding
+
+type BinaryMarshaler interface {
+ MarshalBinary() (data []byte, err error)
+}
+
+type BinaryUnmarshaler interface {
+ UnmarshalBinary(data []byte) error
+}
diff --git a/go/ssa/testdata/src/encoding/json/json.go b/go/ssa/testdata/src/encoding/json/json.go
new file mode 100644
index 000000000..2080fc8cb
--- /dev/null
+++ b/go/ssa/testdata/src/encoding/json/json.go
@@ -0,0 +1,4 @@
+package json
+
+func Marshal(v any) ([]byte, error)
+func Unmarshal(data []byte, v any) error
diff --git a/go/ssa/testdata/src/encoding/xml/xml.go b/go/ssa/testdata/src/encoding/xml/xml.go
new file mode 100644
index 000000000..b226144b6
--- /dev/null
+++ b/go/ssa/testdata/src/encoding/xml/xml.go
@@ -0,0 +1,4 @@
+package xml
+
+func Marshal(v any) ([]byte, error)
+func Unmarshal(data []byte, v any) error
diff --git a/go/ssa/testdata/src/errors/errors.go b/go/ssa/testdata/src/errors/errors.go
new file mode 100644
index 000000000..5b292709f
--- /dev/null
+++ b/go/ssa/testdata/src/errors/errors.go
@@ -0,0 +1,3 @@
+package errors
+
+func New(text string) error
diff --git a/go/ssa/testdata/src/fmt/fmt.go b/go/ssa/testdata/src/fmt/fmt.go
new file mode 100644
index 000000000..cacfeef20
--- /dev/null
+++ b/go/ssa/testdata/src/fmt/fmt.go
@@ -0,0 +1,11 @@
+package fmt
+
+func Sprint(args ...interface{}) string
+func Sprintln(args ...interface{}) string
+func Sprintf(format string, args ...interface{}) string
+
+func Print(args ...interface{}) (int, error)
+func Println(args ...interface{})
+func Printf(format string, args ...interface{}) (int, error)
+
+func Errorf(format string, args ...interface{}) error
diff --git a/go/ssa/testdata/src/io/io.go b/go/ssa/testdata/src/io/io.go
new file mode 100644
index 000000000..8cde43061
--- /dev/null
+++ b/go/ssa/testdata/src/io/io.go
@@ -0,0 +1,5 @@
+package io
+
+import "errors"
+
+var EOF = errors.New("EOF")
diff --git a/go/ssa/testdata/src/log/log.go b/go/ssa/testdata/src/log/log.go
new file mode 100644
index 000000000..4ff0d8ea9
--- /dev/null
+++ b/go/ssa/testdata/src/log/log.go
@@ -0,0 +1,5 @@
+package log
+
+func Println(v ...interface{})
+func Fatalln(v ...interface{})
+func Fatalf(format string, v ...any)
diff --git a/go/ssa/testdata/src/math/math.go b/go/ssa/testdata/src/math/math.go
new file mode 100644
index 000000000..9768a56ef
--- /dev/null
+++ b/go/ssa/testdata/src/math/math.go
@@ -0,0 +1,15 @@
+package math
+
+func NaN() float64
+
+func Inf(int) float64
+
+func IsNaN(float64) bool
+
+func Float64bits(float64) uint64
+
+func Signbit(x float64) bool
+
+func Sqrt(x float64) float64
+
+func Sin(x float64) float64
diff --git a/go/ssa/testdata/src/os/os.go b/go/ssa/testdata/src/os/os.go
new file mode 100644
index 000000000..555ef5491
--- /dev/null
+++ b/go/ssa/testdata/src/os/os.go
@@ -0,0 +1,5 @@
+package os
+
+func Getenv(string) string
+
+func Exit(int)
diff --git a/go/ssa/testdata/src/reflect/reflect.go b/go/ssa/testdata/src/reflect/reflect.go
new file mode 100644
index 000000000..f5d7ba2a0
--- /dev/null
+++ b/go/ssa/testdata/src/reflect/reflect.go
@@ -0,0 +1,40 @@
+package reflect
+
+type Type interface {
+ Elem() Type
+ Kind() Kind
+ String() string
+}
+
+type Value struct{}
+
+func (Value) String() string
+func (Value) Elem() Value
+func (Value) Field(int) Value
+func (Value) Index(i int) Value
+func (Value) Int() int64
+func (Value) Interface() interface{}
+func (Value) IsNil() bool
+func (Value) IsValid() bool
+func (Value) Kind() Kind
+func (Value) Len() int
+func (Value) MapIndex(Value) Value
+func (Value) MapKeys() []Value
+func (Value) NumField() int
+func (Value) Pointer() uintptr
+func (Value) SetInt(int64)
+func (Value) Type() Type
+
+func SliceOf(Type) Type
+func TypeOf(interface{}) Type
+func ValueOf(interface{}) Value
+
+type Kind uint
+
+const (
+ Invalid Kind = iota
+ Int
+ Pointer
+)
+
+func DeepEqual(x, y interface{}) bool
diff --git a/go/ssa/testdata/src/runtime/runtime.go b/go/ssa/testdata/src/runtime/runtime.go
new file mode 100644
index 000000000..9feed5c99
--- /dev/null
+++ b/go/ssa/testdata/src/runtime/runtime.go
@@ -0,0 +1,5 @@
+package runtime
+
+func GC()
+
+func SetFinalizer(obj, finalizer any)
diff --git a/go/ssa/testdata/src/sort/sort.go b/go/ssa/testdata/src/sort/sort.go
new file mode 100644
index 000000000..d0b0e9942
--- /dev/null
+++ b/go/ssa/testdata/src/sort/sort.go
@@ -0,0 +1,13 @@
+package sort
+
+func Strings(x []string)
+func Ints(x []int)
+func Float64s(x []float64)
+
+func Sort(data Interface)
+
+type Interface interface {
+ Len() int
+ Less(i, j int) bool
+ Swap(i, j int)
+}
diff --git a/go/ssa/testdata/src/strconv/strconv.go b/go/ssa/testdata/src/strconv/strconv.go
new file mode 100644
index 000000000..3f6f8772b
--- /dev/null
+++ b/go/ssa/testdata/src/strconv/strconv.go
@@ -0,0 +1,6 @@
+package strconv
+
+func Itoa(i int) string
+func Atoi(s string) (int, error)
+
+func FormatFloat(float64, byte, int, int) string
diff --git a/go/ssa/testdata/src/strings/strings.go b/go/ssa/testdata/src/strings/strings.go
new file mode 100644
index 000000000..11695a43c
--- /dev/null
+++ b/go/ssa/testdata/src/strings/strings.go
@@ -0,0 +1,13 @@
+package strings
+
+func Replace(s, old, new string, n int) string
+func Index(haystack, needle string) int
+func Contains(haystack, needle string) bool
+func HasPrefix(s, prefix string) bool
+func EqualFold(s, t string) bool
+func ToLower(s string) string
+
+type Builder struct{}
+
+func (b *Builder) WriteString(s string) (int, error)
+func (b *Builder) String() string
diff --git a/go/ssa/testdata/src/sync/atomic/atomic.go b/go/ssa/testdata/src/sync/atomic/atomic.go
new file mode 100644
index 000000000..6080435b2
--- /dev/null
+++ b/go/ssa/testdata/src/sync/atomic/atomic.go
@@ -0,0 +1,5 @@
+package atomic
+
+import "unsafe"
+
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
diff --git a/go/ssa/testdata/src/sync/sync.go b/go/ssa/testdata/src/sync/sync.go
new file mode 100644
index 000000000..8e6ff6893
--- /dev/null
+++ b/go/ssa/testdata/src/sync/sync.go
@@ -0,0 +1,12 @@
+package sync
+
+type Mutex struct{}
+
+func (m *Mutex) Lock()
+func (m *Mutex) Unlock()
+
+type WaitGroup struct{}
+
+func (wg *WaitGroup) Add(delta int)
+func (wg *WaitGroup) Done()
+func (wg *WaitGroup) Wait()
diff --git a/go/ssa/testdata/src/time/time.go b/go/ssa/testdata/src/time/time.go
new file mode 100644
index 000000000..d8d577d61
--- /dev/null
+++ b/go/ssa/testdata/src/time/time.go
@@ -0,0 +1,24 @@
+package time
+
+type Duration int64
+
+func Sleep(Duration)
+
+func NewTimer(d Duration) *Timer
+
+type Timer struct {
+ C <-chan Time
+}
+
+func (t *Timer) Stop() bool
+
+type Time struct{}
+
+func After(d Duration) <-chan Time
+
+const (
+ Nanosecond Duration = iota // Specific values do not matter here.
+ Second
+ Minute
+ Hour
+)
diff --git a/go/ssa/testdata/src/unsafe/unsafe.go b/go/ssa/testdata/src/unsafe/unsafe.go
new file mode 100644
index 000000000..5fd90b6f0
--- /dev/null
+++ b/go/ssa/testdata/src/unsafe/unsafe.go
@@ -0,0 +1,4 @@
+package unsafe
+
+// Empty unsafe package helps other packages load.
+// TODO(taking): determine why.
diff --git a/go/ssa/testdata/valueforexpr.go b/go/ssa/testdata/valueforexpr.go
index da76f13a3..243ec614f 100644
--- a/go/ssa/testdata/valueforexpr.go
+++ b/go/ssa/testdata/valueforexpr.go
@@ -1,3 +1,4 @@
+//go:build ignore
// +build ignore
package main
diff --git a/go/ssa/util.go b/go/ssa/util.go
index 010219364..db53aebee 100644
--- a/go/ssa/util.go
+++ b/go/ssa/util.go
@@ -17,6 +17,7 @@ import (
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
//// Sanity checking utilities
@@ -35,7 +36,6 @@ func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
// isBlankIdent returns true iff e is an Ident with name "_".
// They have no associated types.Object, and thus no type.
-//
func isBlankIdent(e ast.Expr) bool {
id, ok := e.(*ast.Ident)
return ok && id.Name == "_"
@@ -49,7 +49,56 @@ func isPointer(typ types.Type) bool {
return ok
}
-func isInterface(T types.Type) bool { return types.IsInterface(T) }
+// isNonTypeParamInterface reports whether t is an interface type but not a type parameter.
+func isNonTypeParamInterface(t types.Type) bool {
+ return !typeparams.IsTypeParam(t) && types.IsInterface(t)
+}
+
+// isBasic reports whether t is a basic type.
+func isBasic(t types.Type) bool {
+ _, ok := t.(*types.Basic)
+ return ok
+}
+
+// isString reports whether t is exactly a string type.
+func isString(t types.Type) bool {
+ return isBasic(t) && t.(*types.Basic).Info()&types.IsString != 0
+}
+
+// isByteSlice reports whether t is of the form []~bytes.
+func isByteSlice(t types.Type) bool {
+ if b, ok := t.(*types.Slice); ok {
+ e, _ := b.Elem().Underlying().(*types.Basic)
+ return e != nil && e.Kind() == types.Byte
+ }
+ return false
+}
+
+// isRuneSlice reports whether t is of the form []~runes.
+func isRuneSlice(t types.Type) bool {
+ if b, ok := t.(*types.Slice); ok {
+ e, _ := b.Elem().Underlying().(*types.Basic)
+ return e != nil && e.Kind() == types.Rune
+ }
+ return false
+}
+
+// isBasicConvTypes returns true when a type set can be
+// one side of a Convert operation. This is when:
+// - All are basic, []byte, or []rune.
+// - At least 1 is basic.
+// - At most 1 is []byte or []rune.
+func isBasicConvTypes(tset termList) bool {
+ basics := 0
+ all := underIs(tset, func(t types.Type) bool {
+ if isBasic(t) {
+ basics++
+ return true
+ }
+ return isByteSlice(t) || isRuneSlice(t)
+ })
+ return all && basics >= 1 && tset.Len()-basics <= 1
+}
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
@@ -64,11 +113,16 @@ func recvType(obj *types.Func) types.Type {
return obj.Type().(*types.Signature).Recv().Type()
}
+// isUntyped returns true for types that are untyped.
+func isUntyped(typ types.Type) bool {
+ b, ok := typ.(*types.Basic)
+ return ok && b.Info()&types.IsUntyped != 0
+}
+
// logStack prints the formatted "start" message to stderr and
// returns a closure that prints the corresponding "end" message.
// Call using 'defer logStack(...)()' to show builder stack on panic.
// Don't forget trailing parens!
-//
func logStack(format string, args ...interface{}) func() {
msg := fmt.Sprintf(format, args...)
io.WriteString(os.Stderr, msg)
@@ -100,25 +154,118 @@ func makeLen(T types.Type) *Builtin {
}
}
+// nonbasicTypes returns a list containing all of the types T in ts that are non-basic.
+func nonbasicTypes(ts []types.Type) []types.Type {
+ if len(ts) == 0 {
+ return nil
+ }
+ added := make(map[types.Type]bool) // additionally filter duplicates
+ var filtered []types.Type
+ for _, T := range ts {
+ if !isBasic(T) {
+ if !added[T] {
+ added[T] = true
+ filtered = append(filtered, T)
+ }
+ }
+ }
+ return filtered
+}
+
+// receiverTypeArgs returns the type arguments to a function's reciever.
+// Returns an empty list if obj does not have a reciever or its reciever does not have type arguments.
+func receiverTypeArgs(obj *types.Func) []types.Type {
+ rtype := recvType(obj)
+ if rtype == nil {
+ return nil
+ }
+ if isPointer(rtype) {
+ rtype = rtype.(*types.Pointer).Elem()
+ }
+ named, ok := rtype.(*types.Named)
+ if !ok {
+ return nil
+ }
+ ts := typeparams.NamedTypeArgs(named)
+ if ts.Len() == 0 {
+ return nil
+ }
+ targs := make([]types.Type, ts.Len())
+ for i := 0; i < ts.Len(); i++ {
+ targs[i] = ts.At(i)
+ }
+ return targs
+}
+
+// recvAsFirstArg takes a method signature and returns a function
+// signature with receiver as the first parameter.
+func recvAsFirstArg(sig *types.Signature) *types.Signature {
+ params := make([]*types.Var, 0, 1+sig.Params().Len())
+ params = append(params, sig.Recv())
+ for i := 0; i < sig.Params().Len(); i++ {
+ params = append(params, sig.Params().At(i))
+ }
+ return typeparams.NewSignatureType(nil, nil, nil, types.NewTuple(params...), sig.Results(), sig.Variadic())
+}
+
+// instance returns whether an expression is a simple or qualified identifier
+// that is a generic instantiation.
+func instance(info *types.Info, expr ast.Expr) bool {
+ // Compare the logic here against go/types.instantiatedIdent,
+ // which also handles *IndexExpr and *IndexListExpr.
+ var id *ast.Ident
+ switch x := expr.(type) {
+ case *ast.Ident:
+ id = x
+ case *ast.SelectorExpr:
+ id = x.Sel
+ default:
+ return false
+ }
+ _, ok := typeparams.GetInstances(info)[id]
+ return ok
+}
+
+// instanceArgs returns the Instance[id].TypeArgs as a slice.
+func instanceArgs(info *types.Info, id *ast.Ident) []types.Type {
+ targList := typeparams.GetInstances(info)[id].TypeArgs
+ if targList == nil {
+ return nil
+ }
+
+ targs := make([]types.Type, targList.Len())
+ for i, n := 0, targList.Len(); i < n; i++ {
+ targs[i] = targList.At(i)
+ }
+ return targs
+}
+
// Mapping of a type T to a canonical instance C s.t. types.Indentical(T, C).
// Thread-safe.
type canonizer struct {
mu sync.Mutex
- canon typeutil.Map // map from type to a canonical instance
+ types typeutil.Map // map from type to a canonical instance
+ lists typeListMap // map from a list of types to a canonical instance
+}
+
+func newCanonizer() *canonizer {
+ c := &canonizer{}
+ h := typeutil.MakeHasher()
+ c.types.SetHasher(h)
+ c.lists.hasher = h
+ return c
}
-// Tuple returns a canonical representative of a Tuple of types.
-// Representative of the empty Tuple is nil.
-func (c *canonizer) Tuple(ts []types.Type) *types.Tuple {
+// List returns a canonical representative of a list of types.
+// Representative of the empty list is nil.
+func (c *canonizer) List(ts []types.Type) *typeList {
if len(ts) == 0 {
return nil
}
- vars := make([]*types.Var, len(ts))
- for i, t := range ts {
- vars[i] = anonVar(t)
- }
- tuple := types.NewTuple(vars...)
- return c.Type(tuple).(*types.Tuple)
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.lists.rep(ts)
}
// Type returns a canonical representative of type T.
@@ -126,9 +273,90 @@ func (c *canonizer) Type(T types.Type) types.Type {
c.mu.Lock()
defer c.mu.Unlock()
- if r := c.canon.At(T); r != nil {
+ if r := c.types.At(T); r != nil {
return r.(types.Type)
}
- c.canon.Set(T, T)
+ c.types.Set(T, T)
return T
}
+
+// A type for representating an canonized list of types.
+type typeList []types.Type
+
+func (l *typeList) identical(ts []types.Type) bool {
+ if l == nil {
+ return len(ts) == 0
+ }
+ n := len(*l)
+ if len(ts) != n {
+ return false
+ }
+ for i, left := range *l {
+ right := ts[i]
+ if !types.Identical(left, right) {
+ return false
+ }
+ }
+ return true
+}
+
+type typeListMap struct {
+ hasher typeutil.Hasher
+ buckets map[uint32][]*typeList
+}
+
+// rep returns a canonical representative of a slice of types.
+func (m *typeListMap) rep(ts []types.Type) *typeList {
+ if m == nil || len(ts) == 0 {
+ return nil
+ }
+
+ if m.buckets == nil {
+ m.buckets = make(map[uint32][]*typeList)
+ }
+
+ h := m.hash(ts)
+ bucket := m.buckets[h]
+ for _, l := range bucket {
+ if l.identical(ts) {
+ return l
+ }
+ }
+
+ // not present. create a representative.
+ cp := make(typeList, len(ts))
+ copy(cp, ts)
+ rep := &cp
+
+ m.buckets[h] = append(bucket, rep)
+ return rep
+}
+
+func (m *typeListMap) hash(ts []types.Type) uint32 {
+ if m == nil {
+ return 0
+ }
+ // Some smallish prime far away from typeutil.Hash.
+ n := len(ts)
+ h := uint32(13619) + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ h += 3 * m.hasher.Hash(ts[i])
+ }
+ return h
+}
+
+// instantiateMethod instantiates m with targs and returns a canonical representative for this method.
+func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctxt *typeparams.Context) *types.Func {
+ recv := recvType(m)
+ if p, ok := recv.(*types.Pointer); ok {
+ recv = p.Elem()
+ }
+ named := recv.(*types.Named)
+ inst, err := typeparams.Instantiate(ctxt, typeparams.NamedTypeOrigin(named), targs, false)
+ if err != nil {
+ panic(err)
+ }
+ rep := canon.Type(inst)
+ obj, _, _ := types.LookupFieldOrMethod(rep, true, m.Pkg(), m.Name())
+ return obj.(*types.Func)
+}
diff --git a/go/ssa/wrappers.go b/go/ssa/wrappers.go
index 90ddc9df7..228daf615 100644
--- a/go/ssa/wrappers.go
+++ b/go/ssa/wrappers.go
@@ -22,6 +22,7 @@ package ssa
import (
"fmt"
+ "go/token"
"go/types"
)
@@ -41,16 +42,15 @@ import (
// - the result may be a thunk or a wrapper.
//
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
-//
-func makeWrapper(prog *Program, sel *types.Selection) *Function {
- obj := sel.Obj().(*types.Func) // the declared function
- sig := sel.Type().(*types.Signature) // type of this wrapper
+func makeWrapper(prog *Program, sel *selection, cr *creator) *Function {
+ obj := sel.obj.(*types.Func) // the declared function
+ sig := sel.typ.(*types.Signature) // type of this wrapper
var recv *types.Var // wrapper's receiver or thunk's params[0]
name := obj.Name()
var description string
var start int // first regular param
- if sel.Kind() == types.MethodExpr {
+ if sel.kind == types.MethodExpr {
name += "$thunk"
description = "thunk"
recv = sig.Params().At(0)
@@ -60,7 +60,7 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function {
recv = sig.Recv()
}
- description = fmt.Sprintf("%s for %s", description, sel.Obj())
+ description = fmt.Sprintf("%s for %s", description, sel.obj)
if prog.mode&LogSource != 0 {
defer logStack("make %s to (%s)", description, recv.Type())()
}
@@ -74,14 +74,15 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function {
pos: obj.Pos(),
info: nil, // info is not set on wrappers.
}
+ cr.Add(fn)
fn.startBody()
fn.addSpilledParam(recv)
createParams(fn, start)
- indices := sel.Index()
+ indices := sel.index
var v Value = fn.Locals[0] // spilled receiver
- if isPointer(sel.Recv()) {
+ if isPointer(sel.recv) {
v = emitLoad(fn, v)
// For simple indirection wrappers, perform an informative nil-check:
@@ -91,13 +92,13 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function {
c.Call.Value = &Builtin{
name: "ssa:wrapnilchk",
sig: types.NewSignature(nil,
- types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)),
- types.NewTuple(anonVar(sel.Recv())), false),
+ types.NewTuple(anonVar(sel.recv), anonVar(tString), anonVar(tString)),
+ types.NewTuple(anonVar(sel.recv)), false),
}
c.Call.Args = []Value{
v,
- stringConst(deref(sel.Recv()).String()),
- stringConst(sel.Obj().Name()),
+ stringConst(deref(sel.recv).String()),
+ stringConst(sel.obj.Name()),
}
c.setType(v.Type())
v = fn.emit(&c)
@@ -112,35 +113,39 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function {
// Load) in preference to value extraction (Field possibly
// preceded by Load).
- v = emitImplicitSelections(fn, v, indices[:len(indices)-1])
+ v = emitImplicitSelections(fn, v, indices[:len(indices)-1], token.NoPos)
// Invariant: v is a pointer, either
// value of implicit *C field, or
// address of implicit C field.
var c Call
- if r := recvType(obj); !isInterface(r) { // concrete method
+ if r := recvType(obj); !types.IsInterface(r) { // concrete method
if !isPointer(r) {
v = emitLoad(fn, v)
}
- c.Call.Value = prog.declaredFunc(obj)
+ callee := prog.originFunc(obj)
+ if callee.typeparams.Len() > 0 {
+ callee = prog.lookupOrCreateInstance(callee, receiverTypeArgs(obj), cr)
+ }
+ c.Call.Value = callee
c.Call.Args = append(c.Call.Args, v)
} else {
c.Call.Method = obj
- c.Call.Value = emitLoad(fn, v)
+ c.Call.Value = emitLoad(fn, v) // interface (possibly a typeparam)
}
for _, arg := range fn.Params[1:] {
c.Call.Args = append(c.Call.Args, arg)
}
emitTailCall(fn, &c)
fn.finishBody()
+ fn.done()
return fn
}
// createParams creates parameters for wrapper method fn based on its
// Signature.Params, which do not include the receiver.
// start is the index of the first regular parameter to use.
-//
func createParams(fn *Function, start int) {
tparams := fn.Signature.Params()
for i, n := start, tparams.Len(); i < n; i++ {
@@ -159,26 +164,28 @@ func createParams(fn *Function, start int) {
// Use MakeClosure with such a wrapper to construct a bound method
// closure. e.g.:
//
-// type T int or: type T interface { meth() }
-// func (t T) meth()
-// var t T
-// f := t.meth
-// f() // calls t.meth()
+// type T int or: type T interface { meth() }
+// func (t T) meth()
+// var t T
+// f := t.meth
+// f() // calls t.meth()
//
// f is a closure of a synthetic wrapper defined as if by:
//
-// f := func() { return t.meth() }
+// f := func() { return t.meth() }
//
// Unlike makeWrapper, makeBound need perform no indirection or field
// selections because that can be done before the closure is
// constructed.
//
// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
-//
-func makeBound(prog *Program, obj *types.Func) *Function {
+func makeBound(prog *Program, obj *types.Func, cr *creator) *Function {
+ targs := receiverTypeArgs(obj)
+ key := boundsKey{obj, prog.canon.List(targs)}
+
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
- fn, ok := prog.bounds[obj]
+ fn, ok := prog.bounds[key]
if !ok {
description := fmt.Sprintf("bound method wrapper for %s", obj)
if prog.mode&LogSource != 0 {
@@ -193,6 +200,7 @@ func makeBound(prog *Program, obj *types.Func) *Function {
pos: obj.Pos(),
info: nil, // info is not set on wrappers.
}
+ cr.Add(fn)
fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn}
fn.FreeVars = []*FreeVar{fv}
@@ -200,20 +208,25 @@ func makeBound(prog *Program, obj *types.Func) *Function {
createParams(fn, 0)
var c Call
- if !isInterface(recvType(obj)) { // concrete
- c.Call.Value = prog.declaredFunc(obj)
+ if !types.IsInterface(recvType(obj)) { // concrete
+ callee := prog.originFunc(obj)
+ if callee.typeparams.Len() > 0 {
+ callee = prog.lookupOrCreateInstance(callee, targs, cr)
+ }
+ c.Call.Value = callee
c.Call.Args = []Value{fv}
} else {
- c.Call.Value = fv
c.Call.Method = obj
+ c.Call.Value = fv // interface (possibly a typeparam)
}
for _, arg := range fn.Params {
c.Call.Args = append(c.Call.Args, arg)
}
emitTailCall(fn, &c)
fn.finishBody()
+ fn.done()
- prog.bounds[obj] = fn
+ prog.bounds[key] = fn
}
return fn
}
@@ -221,41 +234,40 @@ func makeBound(prog *Program, obj *types.Func) *Function {
// -- thunks -----------------------------------------------------------
// makeThunk returns a thunk, a synthetic function that delegates to a
-// concrete or interface method denoted by sel.Obj(). The resulting
+// concrete or interface method denoted by sel.obj. The resulting
// function has no receiver, but has an additional (first) regular
// parameter.
//
-// Precondition: sel.Kind() == types.MethodExpr.
+// Precondition: sel.kind == types.MethodExpr.
//
-// type T int or: type T interface { meth() }
-// func (t T) meth()
-// f := T.meth
-// var t T
-// f(t) // calls t.meth()
+// type T int or: type T interface { meth() }
+// func (t T) meth()
+// f := T.meth
+// var t T
+// f(t) // calls t.meth()
//
// f is a synthetic wrapper defined as if by:
//
-// f := func(t T) { return t.meth() }
+// f := func(t T) { return t.meth() }
//
// TODO(adonovan): opt: currently the stub is created even when used
// directly in a function call: C.f(i, 0). This is less efficient
// than inlining the stub.
//
// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
-//
-func makeThunk(prog *Program, sel *types.Selection) *Function {
- if sel.Kind() != types.MethodExpr {
+func makeThunk(prog *Program, sel *selection, cr *creator) *Function {
+ if sel.kind != types.MethodExpr {
panic(sel)
}
- // Canonicalize sel.Recv() to avoid constructing duplicate thunks.
- canonRecv := prog.canon.Type(sel.Recv())
+ // Canonicalize sel.recv to avoid constructing duplicate thunks.
+ canonRecv := prog.canon.Type(sel.recv)
key := selectionKey{
- kind: sel.Kind(),
+ kind: sel.kind,
recv: canonRecv,
- obj: sel.Obj(),
- index: fmt.Sprint(sel.Index()),
- indirect: sel.Indirect(),
+ obj: sel.obj,
+ index: fmt.Sprint(sel.index),
+ indirect: sel.indirect,
}
prog.methodsMu.Lock()
@@ -263,7 +275,7 @@ func makeThunk(prog *Program, sel *types.Selection) *Function {
fn, ok := prog.thunks[key]
if !ok {
- fn = makeWrapper(prog, sel)
+ fn = makeWrapper(prog, sel, cr)
if fn.Signature.Recv() != nil {
panic(fn) // unexpected receiver
}
@@ -284,3 +296,91 @@ type selectionKey struct {
index string
indirect bool
}
+
+// boundsKey is a unique for the object and a type instantiation.
+type boundsKey struct {
+ obj types.Object // t.meth
+ inst *typeList // canonical type instantiation list.
+}
+
+// A local version of *types.Selection.
+// Needed for some additional control, such as creating a MethodExpr for an instantiation.
+type selection struct {
+ kind types.SelectionKind
+ recv types.Type
+ typ types.Type
+ obj types.Object
+ index []int
+ indirect bool
+}
+
+func toSelection(sel *types.Selection) *selection {
+ return &selection{
+ kind: sel.Kind(),
+ recv: sel.Recv(),
+ typ: sel.Type(),
+ obj: sel.Obj(),
+ index: sel.Index(),
+ indirect: sel.Indirect(),
+ }
+}
+
+// -- instantiations --------------------------------------------------
+
+// buildInstantiationWrapper creates a body for an instantiation
+// wrapper fn. The body calls the original generic function,
+// bracketed by ChangeType conversions on its arguments and results.
+func buildInstantiationWrapper(fn *Function) {
+ orig := fn.topLevelOrigin
+ sig := fn.Signature
+
+ fn.startBody()
+ if sig.Recv() != nil {
+ fn.addParamObj(sig.Recv())
+ }
+ createParams(fn, 0)
+
+ // Create body. Add a call to origin generic function
+ // and make type changes between argument and parameters,
+ // as well as return values.
+ var c Call
+ c.Call.Value = orig
+ if res := orig.Signature.Results(); res.Len() == 1 {
+ c.typ = res.At(0).Type()
+ } else {
+ c.typ = res
+ }
+
+ // parameter of instance becomes an argument to the call
+ // to the original generic function.
+ argOffset := 0
+ for i, arg := range fn.Params {
+ var typ types.Type
+ if i == 0 && sig.Recv() != nil {
+ typ = orig.Signature.Recv().Type()
+ argOffset = 1
+ } else {
+ typ = orig.Signature.Params().At(i - argOffset).Type()
+ }
+ c.Call.Args = append(c.Call.Args, emitTypeCoercion(fn, arg, typ))
+ }
+
+ results := fn.emit(&c)
+ var ret Return
+ switch res := sig.Results(); res.Len() {
+ case 0:
+ // no results, do nothing.
+ case 1:
+ ret.Results = []Value{emitTypeCoercion(fn, results, res.At(0).Type())}
+ default:
+ for i := 0; i < sig.Results().Len(); i++ {
+ v := emitExtract(fn, results, i)
+ ret.Results = append(ret.Results, emitTypeCoercion(fn, v, res.At(i).Type()))
+ }
+ }
+
+ fn.emit(&ret)
+ fn.currentBlock = nil
+
+ fn.finishBody()
+}
diff --git a/go/types/objectpath/objectpath.go b/go/types/objectpath/objectpath.go
index 557202b4d..be8f5a867 100644
--- a/go/types/objectpath/objectpath.go
+++ b/go/types/objectpath/objectpath.go
@@ -14,8 +14,10 @@
// distinct but logically equivalent.
//
// A single object may have multiple paths. In this example,
-// type A struct{ X int }
-// type B A
+//
+// type A struct{ X int }
+// type B A
+//
// the field X has two paths due to its membership of both A and B.
// The For(obj) function always returns one of these paths, arbitrarily
// but consistently.
@@ -29,6 +31,8 @@ import (
"strings"
"golang.org/x/tools/internal/typeparams"
+
+ _ "unsafe" // for go:linkname
)
// A Path is an opaque name that identifies a types.Object
@@ -45,30 +49,30 @@ type Path string
// The sequences represent a path through the package/object/type graph.
// We classify these operators by their type:
//
-// PO package->object Package.Scope.Lookup
-// OT object->type Object.Type
-// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
-// TO type->object Type.{At,Field,Method,Obj} [AFMO]
+// PO package->object Package.Scope.Lookup
+// OT object->type Object.Type
+// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
+// TO type->object Type.{At,Field,Method,Obj} [AFMO]
//
// All valid paths start with a package and end at an object
// and thus may be defined by the regular language:
//
-// objectpath = PO (OT TT* TO)*
+// objectpath = PO (OT TT* TO)*
//
// The concrete encoding follows directly:
-// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
-// - The only OT operator is Object.Type,
-// which we encode as '.' because dot cannot appear in an identifier.
-// - The TT operators are encoded as [EKPRUTC];
-// one of these (TypeParam) requires an integer operand,
-// which is encoded as a string of decimal digits.
-// - The TO operators are encoded as [AFMO];
-// three of these (At,Field,Method) require an integer operand,
-// which is encoded as a string of decimal digits.
-// These indices are stable across different representations
-// of the same package, even source and export data.
-// The indices used are implementation specific and may not correspond to
-// the argument to the go/types function.
+// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
+// - The only OT operator is Object.Type,
+// which we encode as '.' because dot cannot appear in an identifier.
+// - The TT operators are encoded as [EKPRUTC];
+// one of these (TypeParam) requires an integer operand,
+// which is encoded as a string of decimal digits.
+// - The TO operators are encoded as [AFMO];
+// three of these (At,Field,Method) require an integer operand,
+// which is encoded as a string of decimal digits.
+// These indices are stable across different representations
+// of the same package, even source and export data.
+// The indices used are implementation specific and may not correspond to
+// the argument to the go/types function.
//
// In the example below,
//
@@ -81,15 +85,14 @@ type Path string
// field X has the path "T.UM0.RA1.F0",
// representing the following sequence of operations:
//
-// p.Lookup("T") T
-// .Type().Underlying().Method(0). f
-// .Type().Results().At(1) b
-// .Type().Field(0) X
+// p.Lookup("T") T
+// .Type().Underlying().Method(0). f
+// .Type().Results().At(1) b
+// .Type().Field(0) X
//
// The encoding is not maximally compact---every R or P is
// followed by an A, for example---but this simplifies the
// encoder and decoder.
-//
const (
// object->type operators
opType = '.' // .Type() (Object)
@@ -110,7 +113,7 @@ const (
opObj = 'O' // .Obj() (Named, TypeParam)
)
-// The For function returns the path to an object relative to its package,
+// For returns the path to an object relative to its package,
// or an error if the object is not accessible from the package's Scope.
//
// The For function guarantees to return a path only for the following objects:
@@ -136,13 +139,30 @@ const (
//
// For(X) would return a path that denotes the following sequence of operations:
//
-// p.Scope().Lookup("T") (TypeName T)
-// .Type().Underlying().Method(0). (method Func f)
-// .Type().Results().At(1) (field Var b)
-// .Type().Field(0) (field Var X)
+// p.Scope().Lookup("T") (TypeName T)
+// .Type().Underlying().Method(0). (method Func f)
+// .Type().Results().At(1) (field Var b)
+// .Type().Field(0) (field Var X)
//
// where p is the package (*types.Package) to which X belongs.
func For(obj types.Object) (Path, error) {
+ return newEncoderFor()(obj)
+}
+
+// An encoder amortizes the cost of encoding the paths of multiple objects.
+// Nonexported pending approval of proposal 58668.
+type encoder struct {
+ scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names()
+ namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods()
+}
+
+// Exposed to gopls via golang.org/x/tools/internal/typesinternal
+// pending approval of proposal 58668.
+//
+//go:linkname newEncoderFor
+func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For }
+
+func (enc *encoder) For(obj types.Object) (Path, error) {
pkg := obj.Pkg()
// This table lists the cases of interest.
@@ -223,10 +243,11 @@ func For(obj types.Object) (Path, error) {
if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
return "", fmt.Errorf("func is not a method: %v", obj)
}
- // TODO(adonovan): opt: if the method is concrete,
- // do a specialized version of the rest of this function so
- // that it's O(1) not O(|scope|). Basically 'find' is needed
- // only for struct fields and interface methods.
+
+ if path, ok := enc.concreteMethod(obj); ok {
+ // Fast path for concrete methods that avoids looping over scope.
+ return path, nil
+ }
default:
panic(obj)
@@ -239,7 +260,7 @@ func For(obj types.Object) (Path, error) {
// the best paths because non-types may
// refer to types, but not the reverse.
empty := make([]byte, 0, 48) // initial space
- names := scope.Names()
+ names := enc.scopeNames(scope)
for _, name := range names {
o := scope.Lookup(name)
tname, ok := o.(*types.TypeName)
@@ -292,9 +313,7 @@ func For(obj types.Object) (Path, error) {
// Note that method index here is always with respect
// to canonical ordering of methods, regardless of how
// they appear in the underlying type.
- canonical := canonicalize(T)
- for i := 0; i < len(canonical); i++ {
- m := canonical[i]
+ for i, m := range enc.namedMethods(T) {
path2 := appendOpArg(path, opMethod, i)
if m == obj {
return Path(path2), nil // found declared method
@@ -315,6 +334,96 @@ func appendOpArg(path []byte, op byte, arg int) []byte {
return path
}
+// concreteMethod returns the path for meth, which must have a non-nil receiver.
+// The second return value indicates success and may be false if the method is
+// an interface method or if it is an instantiated method.
+//
+// This function is just an optimization that avoids the general scope walking
+// approach. You are expected to fall back to the general approach if this
+// function fails.
+func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) {
+ // Concrete methods can only be declared on package-scoped named types. For
+ // that reason we can skip the expensive walk over the package scope: the
+ // path will always be package -> named type -> method. We can trivially get
+ // the type name from the receiver, and only have to look over the type's
+ // methods to find the method index.
+ //
+ // Methods on generic types require special consideration, however. Consider
+ // the following package:
+ //
+ // L1: type S[T any] struct{}
+ // L2: func (recv S[A]) Foo() { recv.Bar() }
+ // L3: func (recv S[B]) Bar() { }
+ // L4: type Alias = S[int]
+ // L5: func _[T any]() { var s S[int]; s.Foo() }
+ //
+ // The receivers of methods on generic types are instantiations. L2 and L3
+ // instantiate S with the type-parameters A and B, which are scoped to the
+ // respective methods. L4 and L5 each instantiate S with int. Each of these
+ // instantiations has its own method set, full of methods (and thus objects)
+ // with receivers whose types are the respective instantiations. In other
+ // words, we have
+ //
+ // S[A].Foo, S[A].Bar
+ // S[B].Foo, S[B].Bar
+ // S[int].Foo, S[int].Bar
+ //
+ // We may thus be trying to produce object paths for any of these objects.
+ //
+ // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
+ // and S.Bar, which are the paths that this function naturally produces.
+ //
+ // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
+ // don't correspond to the origin methods. For S[int], this is significant.
+ // The most precise object path for S[int].Foo, for example, is Alias.Foo,
+ // not S.Foo. Our function, however, would produce S.Foo, which would
+ // resolve to a different object.
+ //
+ // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
+ // still the correct paths, since only the origin methods have meaningful
+ // paths. But this is likely only true for trivial cases and has edge cases.
+ // Since this function is only an optimization, we err on the side of giving
+ // up, deferring to the slower but definitely correct algorithm. Most users
+ // of objectpath will only be giving us origin methods, anyway, as referring
+ // to instantiated methods is usually not useful.
+
+ if typeparams.OriginMethod(meth) != meth {
+ return "", false
+ }
+
+ recvT := meth.Type().(*types.Signature).Recv().Type()
+ if ptr, ok := recvT.(*types.Pointer); ok {
+ recvT = ptr.Elem()
+ }
+
+ named, ok := recvT.(*types.Named)
+ if !ok {
+ return "", false
+ }
+
+ if types.IsInterface(named) {
+ // Named interfaces don't have to be package-scoped
+ //
+ // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
+ // methods, too, I think.
+ return "", false
+ }
+
+ // Preallocate space for the name, opType, opMethod, and some digits.
+ name := named.Obj().Name()
+ path := make([]byte, 0, len(name)+8)
+ path = append(path, name...)
+ path = append(path, opType)
+ for i, m := range enc.namedMethods(named) {
+ if m == meth {
+ path = appendOpArg(path, opMethod, i)
+ return Path(path), true
+ }
+ }
+
+ panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named))
+}
+
// find finds obj within type T, returning the path to it, or nil if not found.
//
// The seen map is used to short circuit cycles through type parameters. If
@@ -570,15 +679,23 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
t = nil
case opMethod:
- hasMethods, ok := t.(hasMethods) // Interface or Named
- if !ok {
+ switch t := t.(type) {
+ case *types.Interface:
+ if index >= t.NumMethods() {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
+ }
+ obj = t.Method(index) // Id-ordered
+
+ case *types.Named:
+ methods := namedMethods(t) // (unmemoized)
+ if index >= len(methods) {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods))
+ }
+ obj = methods[index] // Id-ordered
+
+ default:
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
}
- canonical := canonicalize(hasMethods)
- if n := len(canonical); index >= n {
- return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n)
- }
- obj = canonical[index]
t = nil
case opObj:
@@ -601,27 +718,45 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
return obj, nil // success
}
-// hasMethods is an abstraction of *types.{Interface,Named}. This is pulled up
-// because it is used by methodOrdering, which is in turn used by both encoding
-// and decoding.
-type hasMethods interface {
- Method(int) *types.Func
- NumMethods() int
+// namedMethods returns the methods of a Named type in ascending Id order.
+func namedMethods(named *types.Named) []*types.Func {
+ methods := make([]*types.Func, named.NumMethods())
+ for i := range methods {
+ methods[i] = named.Method(i)
+ }
+ sort.Slice(methods, func(i, j int) bool {
+ return methods[i].Id() < methods[j].Id()
+ })
+ return methods
}
-// canonicalize returns a canonical order for the methods in a hasMethod.
-func canonicalize(hm hasMethods) []*types.Func {
- count := hm.NumMethods()
- if count <= 0 {
- return nil
+// scopeNames is a memoization of scope.Names. Callers must not modify the result.
+func (enc *encoder) scopeNames(scope *types.Scope) []string {
+ m := enc.scopeNamesMemo
+ if m == nil {
+ m = make(map[*types.Scope][]string)
+ enc.scopeNamesMemo = m
}
- canon := make([]*types.Func, count)
- for i := 0; i < count; i++ {
- canon[i] = hm.Method(i)
+ names, ok := m[scope]
+ if !ok {
+ names = scope.Names() // allocates and sorts
+ m[scope] = names
}
- less := func(i, j int) bool {
- return canon[i].Id() < canon[j].Id()
+ return names
+}
+
+// namedMethods is a memoization of the namedMethods function. Callers must not modify the result.
+func (enc *encoder) namedMethods(named *types.Named) []*types.Func {
+ m := enc.namedMethodsMemo
+ if m == nil {
+ m = make(map[*types.Named][]*types.Func)
+ enc.namedMethodsMemo = m
}
- sort.Slice(canon, less)
- return canon
+ methods, ok := m[named]
+ if !ok {
+ methods = namedMethods(named) // allocates and sorts
+ m[named] = methods
+ }
+ return methods
+
}
diff --git a/go/types/objectpath/objectpath_test.go b/go/types/objectpath/objectpath_test.go
index 39e7b1bcd..adfad2cd2 100644
--- a/go/types/objectpath/objectpath_test.go
+++ b/go/types/objectpath/objectpath_test.go
@@ -182,7 +182,7 @@ func testPath(prog *loader.Program, test pathTest) error {
return fmt.Errorf("Object(%s, %q) returned error %q, want %q", pkg.Path(), test.path, err, test.wantErr)
}
if test.wantErr != "" {
- if got := stripSubscripts(err.Error()); got != test.wantErr {
+ if got := err.Error(); got != test.wantErr {
return fmt.Errorf("Object(%s, %q) error was %q, want %q",
pkg.Path(), test.path, got, test.wantErr)
}
@@ -190,7 +190,7 @@ func testPath(prog *loader.Program, test pathTest) error {
}
// Inv: err == nil
- if objString := stripSubscripts(obj.String()); objString != test.wantobj {
+ if objString := obj.String(); objString != test.wantobj {
return fmt.Errorf("Object(%s, %q) = %s, want %s", pkg.Path(), test.path, objString, test.wantobj)
}
if obj.Pkg() != pkg {
@@ -215,25 +215,6 @@ func testPath(prog *loader.Program, test pathTest) error {
return nil
}
-// stripSubscripts removes type parameter id subscripts.
-//
-// TODO(rfindley): remove this function once subscripts are removed from the
-// type parameter type string.
-func stripSubscripts(s string) string {
- var runes []rune
- for _, r := range s {
- // For debugging/uniqueness purposes, TypeString on a type parameter adds a
- // subscript corresponding to the type parameter's unique id. This is going
- // to be removed, but in the meantime we skip the subscript runes to get a
- // deterministic output.
- if '₀' <= r && r < '₀'+10 {
- continue // trim type parameter subscripts
- }
- runes = append(runes, r)
- }
- return string(runes)
-}
-
// TestSourceAndExportData uses objectpath to compute a correspondence
// of objects between two versions of the same package, one loaded from
// source, the other from export data.
diff --git a/go/types/typeutil/imports.go b/go/types/typeutil/imports.go
index 9c441dba9..b81ce0c33 100644
--- a/go/types/typeutil/imports.go
+++ b/go/types/typeutil/imports.go
@@ -12,7 +12,6 @@ import "go/types"
// package Q, Q appears earlier than P in the result.
// The algorithm follows import statements in the order they
// appear in the source code, so the result is a total order.
-//
func Dependencies(pkgs ...*types.Package) []*types.Package {
var result []*types.Package
seen := make(map[*types.Package]bool)
diff --git a/go/types/typeutil/map.go b/go/types/typeutil/map.go
index c9f8f25a0..7bd2fdb38 100644
--- a/go/types/typeutil/map.go
+++ b/go/types/typeutil/map.go
@@ -24,7 +24,6 @@ import (
// Just as with map[K]V, a nil *Map is a valid empty map.
//
// Not thread-safe.
-//
type Map struct {
hasher Hasher // shared by many Maps
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
@@ -57,14 +56,12 @@ type entry struct {
//
// If SetHasher is not called, the Map will create a private hasher at
// the first call to Insert.
-//
func (m *Map) SetHasher(hasher Hasher) {
m.hasher = hasher
}
// Delete removes the entry with the given key, if any.
// It returns true if the entry was found.
-//
func (m *Map) Delete(key types.Type) bool {
if m != nil && m.table != nil {
hash := m.hasher.Hash(key)
@@ -84,7 +81,6 @@ func (m *Map) Delete(key types.Type) bool {
// At returns the map entry for the given key.
// The result is nil if the entry is not present.
-//
func (m *Map) At(key types.Type) interface{} {
if m != nil && m.table != nil {
for _, e := range m.table[m.hasher.Hash(key)] {
@@ -145,7 +141,6 @@ func (m *Map) Len() int {
// f will not be invoked for it, but if f inserts a map entry that
// Iterate has not yet reached, whether or not f will be invoked for
// it is unspecified.
-//
func (m *Map) Iterate(f func(key types.Type, value interface{})) {
if m != nil {
for _, bucket := range m.table {
@@ -190,14 +185,12 @@ func (m *Map) toString(values bool) string {
// String returns a string representation of the map's entries.
// Values are printed using fmt.Sprintf("%v", v).
// Order is unspecified.
-//
func (m *Map) String() string {
return m.toString(true)
}
// KeysString returns a string representation of the map's key set.
// Order is unspecified.
-//
func (m *Map) KeysString() string {
return m.toString(false)
}
@@ -339,7 +332,9 @@ func (h Hasher) hashFor(t types.Type) uint32 {
// Method order is not significant.
// Ignore m.Pkg().
m := t.Method(i)
- hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
+ // Use shallow hash on method signature to
+ // avoid anonymous interface cycles.
+ hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
}
// Hash type restrictions.
@@ -441,3 +436,76 @@ func (h Hasher) hashPtr(ptr interface{}) uint32 {
h.ptrMap[ptr] = hash
return hash
}
+
+// shallowHash computes a hash of t without looking at any of its
+// element Types, to avoid potential anonymous cycles in the types of
+// interface methods.
+//
+// When an unnamed non-empty interface type appears anywhere among the
+// arguments or results of an interface method, there is a potential
+// for endless recursion. Consider:
+//
+// type X interface { m() []*interface { X } }
+//
+// The problem is that the Methods of the interface in m's result type
+// include m itself; there is no mention of the named type X that
+// might help us break the cycle.
+// (See comment in go/types.identical, case *Interface, for more.)
+func (h Hasher) shallowHash(t types.Type) uint32 {
+ // t is the type of an interface method (Signature),
+ // its params or results (Tuples), or their immediate
+ // elements (mostly Slice, Pointer, Basic, Named),
+ // so there's no need to optimize anything else.
+ switch t := t.(type) {
+ case *types.Signature:
+ var hash uint32 = 604171
+ if t.Variadic() {
+ hash *= 971767
+ }
+ // The Signature/Tuple recursion is always finite
+ // and invariably shallow.
+ return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
+
+ case *types.Tuple:
+ n := t.Len()
+ hash := 9137 + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ hash += 53471161 * h.shallowHash(t.At(i).Type())
+ }
+ return hash
+
+ case *types.Basic:
+ return 45212177 * uint32(t.Kind())
+
+ case *types.Array:
+ return 1524181 + 2*uint32(t.Len())
+
+ case *types.Slice:
+ return 2690201
+
+ case *types.Struct:
+ return 3326489
+
+ case *types.Pointer:
+ return 4393139
+
+ case *typeparams.Union:
+ return 562448657
+
+ case *types.Interface:
+ return 2124679 // no recursion here
+
+ case *types.Map:
+ return 9109
+
+ case *types.Chan:
+ return 9127
+
+ case *types.Named:
+ return h.hashPtr(t.Obj())
+
+ case *typeparams.TypeParam:
+ return h.hashPtr(t.Obj())
+ }
+ panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
+}
diff --git a/go/types/typeutil/map_test.go b/go/types/typeutil/map_test.go
index 8cd643e5b..ee73ff9cf 100644
--- a/go/types/typeutil/map_test.go
+++ b/go/types/typeutil/map_test.go
@@ -244,6 +244,14 @@ func Bar[P Constraint[P]]() {}
func Baz[Q any]() {} // The underlying type of Constraint[P] is any.
// But Quux is not.
func Quux[Q interface{ quux() }]() {}
+
+
+type Issue56048_I interface{ m() interface { Issue56048_I } }
+var Issue56048 = Issue56048_I.m
+
+type Issue56048_Ib interface{ m() chan []*interface { Issue56048_Ib } }
+var Issue56048b = Issue56048_Ib.m
+
`
fset := token.NewFileSet()
@@ -296,12 +304,14 @@ func Quux[Q interface{ quux() }]() {}
ME1Type = scope.Lookup("ME1Type").Type()
ME2 = scope.Lookup("ME2").Type()
- Constraint = scope.Lookup("Constraint").Type()
- Foo = scope.Lookup("Foo").Type()
- Fn = scope.Lookup("Fn").Type()
- Bar = scope.Lookup("Foo").Type()
- Baz = scope.Lookup("Foo").Type()
- Quux = scope.Lookup("Quux").Type()
+ Constraint = scope.Lookup("Constraint").Type()
+ Foo = scope.Lookup("Foo").Type()
+ Fn = scope.Lookup("Fn").Type()
+ Bar = scope.Lookup("Foo").Type()
+ Baz = scope.Lookup("Foo").Type()
+ Quux = scope.Lookup("Quux").Type()
+ Issue56048 = scope.Lookup("Issue56048").Type()
+ Issue56048b = scope.Lookup("Issue56048b").Type()
)
tmap := new(typeutil.Map)
@@ -371,6 +381,9 @@ func Quux[Q interface{ quux() }]() {}
{Bar, "Bar", false},
{Baz, "Baz", false},
{Quux, "Quux", true},
+
+ {Issue56048, "Issue56048", true}, // (not actually about generics)
+ {Issue56048b, "Issue56048b", true}, // (not actually about generics)
}
for _, step := range steps {
diff --git a/go/types/typeutil/methodsetcache.go b/go/types/typeutil/methodsetcache.go
index 32084610f..a5d931083 100644
--- a/go/types/typeutil/methodsetcache.go
+++ b/go/types/typeutil/methodsetcache.go
@@ -25,7 +25,6 @@ type MethodSetCache struct {
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
// Utility functions can thus expose an optional *MethodSetCache
// parameter to clients that care about performance.
-//
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
if cache == nil {
return types.NewMethodSet(T)
diff --git a/go/types/typeutil/ui.go b/go/types/typeutil/ui.go
index 9849c24ce..fa55b0a1e 100644
--- a/go/types/typeutil/ui.go
+++ b/go/types/typeutil/ui.go
@@ -22,7 +22,6 @@ import "go/types"
// this function is intended only for user interfaces.
//
// The order of the result is as for types.MethodSet(T).
-//
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
isPointerToConcrete := func(T types.Type) bool {
ptr, ok := T.(*types.Pointer)
diff --git a/go/vcs/vcs.go b/go/vcs/vcs.go
index f2aac1c0d..54d850535 100644
--- a/go/vcs/vcs.go
+++ b/go/vcs/vcs.go
@@ -11,7 +11,6 @@
// for developers who want to write tools with similar semantics.
// It needs to be manually kept in sync with upstream when changes are
// made to cmd/go/internal/get; see https://golang.org/issue/11490.
-//
package vcs // import "golang.org/x/tools/go/vcs"
import (
diff --git a/godoc/analysis/analysis.go b/godoc/analysis/analysis.go
index de8e470b1..54611e87d 100644
--- a/godoc/analysis/analysis.go
+++ b/godoc/analysis/analysis.go
@@ -39,7 +39,6 @@
// ERRORS: for each locus of a frontend (scanner/parser/type) error, the
// location is highlighted in red and hover text provides the compiler
// error message.
-//
package analysis // import "golang.org/x/tools/godoc/analysis"
import (
@@ -160,7 +159,6 @@ func (res *Result) Status() string {
// HTML link markup for the specified godoc file URL. Thread-safe.
// Callers must not mutate the elements.
// It returns "zero" if no data is available.
-//
func (res *Result) FileInfo(url string) (fi FileInfo) {
return res.fileInfo(url).get()
}
@@ -185,7 +183,6 @@ func (res *Result) pkgInfo(importPath string) *pkgInfo {
// type info for the specified package. Thread-safe.
// Callers must not mutate its fields.
// PackageInfo returns "zero" if no data is available.
-//
func (res *Result) PackageInfo(importPath string) PackageInfo {
return res.pkgInfo(importPath).get()
}
diff --git a/godoc/dirtrees.go b/godoc/dirtrees.go
index 82c9a0619..f6a5ba0fe 100644
--- a/godoc/dirtrees.go
+++ b/godoc/dirtrees.go
@@ -22,7 +22,6 @@ import (
// Conventional name for directories containing test data.
// Excluded from directory trees.
-//
const testdataDirName = "testdata"
type Directory struct {
@@ -217,7 +216,6 @@ func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth i
// provided for maxDepth, nodes at larger depths are pruned as well; they
// are assumed to contain package files even if their contents are not known
// (i.e., in this case the tree may contain directories w/o any package files).
-//
func (c *Corpus) newDirectory(root string, maxDepth int) *Directory {
// The root could be a symbolic link so use Stat not Lstat.
d, err := c.fs.Stat(root)
@@ -300,7 +298,6 @@ func (dir *Directory) lookup(path string) *Directory {
// DirEntry describes a directory entry. The Depth and Height values
// are useful for presenting an entry in an indented fashion.
-//
type DirEntry struct {
Depth int // >= 0
Height int // = DirList.MaxHeight - Depth, > 0
@@ -331,7 +328,6 @@ func hasThirdParty(list []DirEntry) bool {
// If skipRoot is set, the root directory itself is excluded from the list.
// If filter is set, only the directory entries whose paths match the filter
// are included.
-//
func (dir *Directory) listing(skipRoot bool, filter func(string) bool) *DirList {
if dir == nil {
return nil
diff --git a/godoc/format.go b/godoc/format.go
index 3e8c8673a..eaac8bf27 100644
--- a/godoc/format.go
+++ b/godoc/format.go
@@ -25,7 +25,6 @@ import (
// A Segment describes a text segment [start, end).
// The zero value of a Segment is a ready-to-use empty segment.
-//
type Segment struct {
start, end int
}
@@ -36,12 +35,10 @@ func (seg *Segment) isEmpty() bool { return seg.start >= seg.end }
// Repeated calls to a selection return consecutive, non-overlapping,
// non-empty segments, followed by an infinite sequence of empty
// segments. The first empty segment marks the end of the selection.
-//
type Selection func() Segment
// A LinkWriter writes some start or end "tag" to w for the text offset offs.
// It is called by FormatSelections at the start or end of each link segment.
-//
type LinkWriter func(w io.Writer, offs int, start bool)
// A SegmentWriter formats a text according to selections and writes it to w.
@@ -49,7 +46,6 @@ type LinkWriter func(w io.Writer, offs int, start bool)
// to FormatSelections overlap with the text segment: If the n'th bit is set
// in selections, the n'th selection provided to FormatSelections is overlapping
// with the text.
-//
type SegmentWriter func(w io.Writer, text []byte, selections int)
// FormatSelections takes a text and writes it to w using link and segment
@@ -58,7 +54,6 @@ type SegmentWriter func(w io.Writer, text []byte, selections int)
// consecutive segments of text overlapped by the same selections as specified
// by selections. The link writer lw may be nil, in which case the links
// Selection is ignored.
-//
func FormatSelections(w io.Writer, text []byte, lw LinkWriter, links Selection, sw SegmentWriter, selections ...Selection) {
// If we have a link writer, make the links
// selection the last entry in selections
@@ -144,7 +139,6 @@ func FormatSelections(w io.Writer, text []byte, lw LinkWriter, links Selection,
// A merger merges a slice of Selections and produces a sequence of
// consecutive segment change events through repeated next() calls.
-//
type merger struct {
selections []Selection
segments []Segment // segments[i] is the next segment of selections[i]
@@ -169,7 +163,6 @@ func newMerger(selections []Selection) *merger {
// to which the segment belongs, offs is the segment start or end offset
// as determined by the start value. If there are no more segment changes,
// next returns an index value < 0.
-//
func (m *merger) next() (index, offs int, start bool) {
// find the next smallest offset where a segment starts or ends
offs = infinity
@@ -233,7 +226,6 @@ func lineSelection(text []byte) Selection {
// tokenSelection returns, as a selection, the sequence of
// consecutive occurrences of token sel in the Go src text.
-//
func tokenSelection(src []byte, sel token.Token) Selection {
var s scanner.Scanner
fset := token.NewFileSet()
@@ -257,7 +249,6 @@ func tokenSelection(src []byte, sel token.Token) Selection {
// makeSelection is a helper function to make a Selection from a slice of pairs.
// Pairs describing empty segments are ignored.
-//
func makeSelection(matches [][]int) Selection {
i := 0
return func() Segment {
@@ -306,7 +297,6 @@ func RangeSelection(str string) Selection {
// bit 0: comments
// bit 1: highlights
// bit 2: selections
-//
var startTags = [][]byte{
/* 000 */ []byte(``),
/* 001 */ []byte(`<span class="comment">`),
@@ -336,16 +326,15 @@ func selectionTag(w io.Writer, text []byte, selections int) {
// Consecutive text segments are wrapped in HTML spans (with tags as
// defined by startTags and endTag) as follows:
//
-// - if line >= 0, line number (ln) spans are inserted before each line,
-// starting with the value of line
-// - if the text is Go source, comments get the "comment" span class
-// - each occurrence of the regular expression pattern gets the "highlight"
-// span class
-// - text segments covered by selection get the "selection" span class
+// - if line >= 0, line number (ln) spans are inserted before each line,
+// starting with the value of line
+// - if the text is Go source, comments get the "comment" span class
+// - each occurrence of the regular expression pattern gets the "highlight"
+// span class
+// - text segments covered by selection get the "selection" span class
//
// Comments, highlights, and selections may overlap arbitrarily; the respective
// HTML span classes are specified in the startTags variable.
-//
func FormatText(w io.Writer, text []byte, line int, goSource bool, pattern string, selection Selection) {
var comments, highlights Selection
if goSource {
diff --git a/godoc/godoc.go b/godoc/godoc.go
index a88aa1260..dfac2111a 100644
--- a/godoc/godoc.go
+++ b/godoc/godoc.go
@@ -41,8 +41,8 @@ const builtinPkgPath = "builtin"
// FuncMap defines template functions used in godoc templates.
//
// Convention: template function names ending in "_html" or "_url" produce
-// HTML- or URL-escaped strings; all other function results may
-// require explicit escaping in the template.
+// HTML- or URL-escaped strings; all other function results may
+// require explicit escaping in the template.
func (p *Presentation) FuncMap() template.FuncMap {
p.initFuncMapOnce.Do(p.initFuncMap)
return p.funcMap
@@ -345,11 +345,16 @@ func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
}
-func comment_htmlFunc(comment string) string {
+func comment_htmlFunc(info *PageInfo, comment string) string {
var buf bytes.Buffer
// TODO(gri) Provide list of words (e.g. function parameters)
// to be emphasized by ToHTML.
- doc.ToHTML(&buf, comment, nil) // does html-escaping
+
+ // godocToHTML is:
+ // - buf.Write(info.PDoc.HTML(comment)) on go1.19
+ // - go/doc.ToHTML(&buf, comment, nil) on other versions
+ godocToHTML(&buf, info.PDoc, comment)
+
return buf.String()
}
@@ -448,7 +453,7 @@ func srcToPkgLinkFunc(relpath string) string {
return fmt.Sprintf(`<a href="/%s">%s</a>`, relpath, relpath[len("pkg/"):])
}
-// srcBreadcrumbFun converts each segment of relpath to a HTML <a>.
+// srcBreadcrumbFunc converts each segment of relpath to a HTML <a>.
// Each segment links to its corresponding src directories.
func srcBreadcrumbFunc(relpath string) string {
segments := strings.Split(relpath, "/")
@@ -658,7 +663,7 @@ func (p *Presentation) example_suffixFunc(name string) string {
return suffix
}
-// implements_html returns the "> Implements" toggle for a package-level named type.
+// implements_htmlFunc returns the "> Implements" toggle for a package-level named type.
// Its contents are populated from JSON data by client-side JS at load time.
func (p *Presentation) implements_htmlFunc(info *PageInfo, typeName string) string {
if p.ImplementsHTML == nil {
@@ -676,7 +681,7 @@ func (p *Presentation) implements_htmlFunc(info *PageInfo, typeName string) stri
return buf.String()
}
-// methodset_html returns the "> Method set" toggle for a package-level named type.
+// methodset_htmlFunc returns the "> Method set" toggle for a package-level named type.
// Its contents are populated from JSON data by client-side JS at load time.
func (p *Presentation) methodset_htmlFunc(info *PageInfo, typeName string) string {
if p.MethodSetHTML == nil {
@@ -694,7 +699,7 @@ func (p *Presentation) methodset_htmlFunc(info *PageInfo, typeName string) strin
return buf.String()
}
-// callgraph_html returns the "> Call graph" toggle for a package-level func.
+// callgraph_htmlFunc returns the "> Call graph" toggle for a package-level func.
// Its contents are populated from JSON data by client-side JS at load time.
func (p *Presentation) callgraph_htmlFunc(info *PageInfo, recv, name string) string {
if p.CallGraphHTML == nil {
diff --git a/godoc/index.go b/godoc/index.go
index f6de201fd..4471f5916 100644
--- a/godoc/index.go
+++ b/godoc/index.go
@@ -50,6 +50,7 @@ import (
"index/suffixarray"
"io"
"log"
+ "math"
"os"
pathpkg "path"
"path/filepath"
@@ -161,7 +162,7 @@ func newKindRun(h RunList) interface{} {
// bit is always the same for all infos in one
// list we can simply compare the entire info.
k := 0
- prev := SpotInfo(1<<32 - 1) // an unlikely value
+ prev := SpotInfo(math.MaxUint32) // an unlikely value
for _, x := range run {
if x != prev {
run[k] = x
@@ -1359,7 +1360,6 @@ type FileLines struct {
// LookupRegexp returns the number of matches and the matches where a regular
// expression r is found in the full text index. At most n matches are
// returned (thus found <= n).
-//
func (x *Index) LookupRegexp(r *regexp.Regexp, n int) (found int, result []FileLines) {
if x.suffixes == nil || n <= 0 {
return
@@ -1422,7 +1422,7 @@ func (x *Index) LookupRegexp(r *regexp.Regexp, n int) (found int, result []FileL
return
}
-// InvalidateIndex should be called whenever any of the file systems
+// invalidateIndex should be called whenever any of the file systems
// under godoc's observation change so that the indexer is kicked on.
func (c *Corpus) invalidateIndex() {
c.fsModified.Set(nil)
@@ -1431,7 +1431,6 @@ func (c *Corpus) invalidateIndex() {
// feedDirnames feeds the directory names of all directories
// under the file system given by root to channel c.
-//
func (c *Corpus) feedDirnames(ch chan<- string) {
if dir, _ := c.fsTree.Get(); dir != nil {
for d := range dir.(*Directory).iter(false) {
@@ -1442,7 +1441,6 @@ func (c *Corpus) feedDirnames(ch chan<- string) {
// fsDirnames() returns a channel sending all directory names
// of all the file systems under godoc's observation.
-//
func (c *Corpus) fsDirnames() <-chan string {
ch := make(chan string, 256) // buffered for fewer context switches
go func() {
diff --git a/godoc/linkify.go b/godoc/linkify.go
index 4a9c50604..cf266d01f 100644
--- a/godoc/linkify.go
+++ b/godoc/linkify.go
@@ -26,7 +26,6 @@ import (
// not being declared), are wrapped with HTML links pointing
// to the respective declaration, if possible. Comments are
// formatted the same way as with FormatText.
-//
func LinkifyText(w io.Writer, text []byte, n ast.Node) {
links := linksFor(n)
@@ -75,7 +74,6 @@ func LinkifyText(w io.Writer, text []byte, n ast.Node) {
// A link describes the (HTML) link information for an identifier.
// The zero value of a link represents "no link".
-//
type link struct {
path, name string // package path, identifier name
isVal bool // identifier is defined in a const or var declaration
@@ -83,7 +81,6 @@ type link struct {
// linksFor returns the list of links for the identifiers used
// by node in the same order as they appear in the source.
-//
func linksFor(node ast.Node) (links []link) {
// linkMap tracks link information for each ast.Ident node. Entries may
// be created out of source order (for example, when we visit a parent
diff --git a/godoc/meta.go b/godoc/meta.go
index 8d3b82534..76a27508b 100644
--- a/godoc/meta.go
+++ b/godoc/meta.go
@@ -43,7 +43,6 @@ func (m *Metadata) FilePath() string { return m.filePath }
// extractMetadata extracts the Metadata from a byte slice.
// It returns the Metadata value and the remaining data.
// If no metadata is present the original byte slice is returned.
-//
func extractMetadata(b []byte) (meta Metadata, tail []byte, err error) {
tail = b
if !bytes.HasPrefix(b, jsonStart) {
@@ -61,7 +60,7 @@ func extractMetadata(b []byte) (meta Metadata, tail []byte, err error) {
return
}
-// UpdateMetadata scans $GOROOT/doc for HTML and Markdown files, reads their metadata,
+// updateMetadata scans $GOROOT/doc for HTML and Markdown files, reads their metadata,
// and updates the DocMetadata map.
func (c *Corpus) updateMetadata() {
metadata := make(map[string]*Metadata)
@@ -121,7 +120,6 @@ func (c *Corpus) updateMetadata() {
// MetadataFor returns the *Metadata for a given relative path or nil if none
// exists.
-//
func (c *Corpus) MetadataFor(relpath string) *Metadata {
if m, _ := c.docMetadata.Get(); m != nil {
meta := m.(map[string]*Metadata)
@@ -142,7 +140,6 @@ func (c *Corpus) MetadataFor(relpath string) *Metadata {
// refreshMetadata sends a signal to update DocMetadata. If a refresh is in
// progress the metadata will be refreshed again afterward.
-//
func (c *Corpus) refreshMetadata() {
select {
case c.refreshMetadataSignal <- true:
@@ -150,7 +147,7 @@ func (c *Corpus) refreshMetadata() {
}
}
-// RefreshMetadataLoop runs forever, updating DocMetadata when the underlying
+// refreshMetadataLoop runs forever, updating DocMetadata when the underlying
// file system changes. It should be launched in a goroutine.
func (c *Corpus) refreshMetadataLoop() {
for {
diff --git a/godoc/redirect/hash.go b/godoc/redirect/hash.go
deleted file mode 100644
index d5a1e3eb6..000000000
--- a/godoc/redirect/hash.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides a compact encoding of
-// a map of Mercurial hashes to Git hashes.
-
-package redirect
-
-import (
- "encoding/binary"
- "fmt"
- "io"
- "os"
- "sort"
- "strconv"
- "strings"
-)
-
-// hashMap is a map of Mercurial hashes to Git hashes.
-type hashMap struct {
- file *os.File
- entries int
-}
-
-// newHashMap takes a file handle that contains a map of Mercurial to Git
-// hashes. The file should be a sequence of pairs of little-endian encoded
-// uint32s, representing a hgHash and a gitHash respectively.
-// The sequence must be sorted by hgHash.
-// The file must remain open for as long as the returned hashMap is used.
-func newHashMap(f *os.File) (*hashMap, error) {
- fi, err := f.Stat()
- if err != nil {
- return nil, err
- }
- return &hashMap{file: f, entries: int(fi.Size() / 8)}, nil
-}
-
-// Lookup finds an hgHash in the map that matches the given prefix, and returns
-// its corresponding gitHash. The prefix must be at least 8 characters long.
-func (m *hashMap) Lookup(s string) gitHash {
- if m == nil {
- return 0
- }
- hg, err := hgHashFromString(s)
- if err != nil {
- return 0
- }
- var git gitHash
- b := make([]byte, 8)
- sort.Search(m.entries, func(i int) bool {
- n, err := m.file.ReadAt(b, int64(i*8))
- if err != nil {
- panic(err)
- }
- if n != 8 {
- panic(io.ErrUnexpectedEOF)
- }
- v := hgHash(binary.LittleEndian.Uint32(b[:4]))
- if v == hg {
- git = gitHash(binary.LittleEndian.Uint32(b[4:]))
- }
- return v >= hg
- })
- return git
-}
-
-// hgHash represents the lower (leftmost) 32 bits of a Mercurial hash.
-type hgHash uint32
-
-func (h hgHash) String() string {
- return intToHash(int64(h))
-}
-
-func hgHashFromString(s string) (hgHash, error) {
- if len(s) < 8 {
- return 0, fmt.Errorf("string too small: len(s) = %d", len(s))
- }
- hash := s[:8]
- i, err := strconv.ParseInt(hash, 16, 64)
- if err != nil {
- return 0, err
- }
- return hgHash(i), nil
-}
-
-// gitHash represents the leftmost 28 bits of a Git hash in its upper 28 bits,
-// and it encodes hash's repository in the lower 4 bits.
-type gitHash uint32
-
-func (h gitHash) Hash() string {
- return intToHash(int64(h))[:7]
-}
-
-func (h gitHash) Repo() string {
- return repo(h & 0xF).String()
-}
-
-func intToHash(i int64) string {
- s := strconv.FormatInt(i, 16)
- if len(s) < 8 {
- s = strings.Repeat("0", 8-len(s)) + s
- }
- return s
-}
-
-// repo represents a Go Git repository.
-type repo byte
-
-const (
- repoGo repo = iota
- repoBlog
- repoCrypto
- repoExp
- repoImage
- repoMobile
- repoNet
- repoSys
- repoTalks
- repoText
- repoTools
-)
-
-func (r repo) String() string {
- return map[repo]string{
- repoGo: "go",
- repoBlog: "blog",
- repoCrypto: "crypto",
- repoExp: "exp",
- repoImage: "image",
- repoMobile: "mobile",
- repoNet: "net",
- repoSys: "sys",
- repoTalks: "talks",
- repoText: "text",
- repoTools: "tools",
- }[r]
-}
diff --git a/godoc/redirect/redirect.go b/godoc/redirect/redirect.go
index 57d779ccb..d0145ee18 100644
--- a/godoc/redirect/redirect.go
+++ b/godoc/redirect/redirect.go
@@ -3,147 +3,22 @@
// license that can be found in the LICENSE file.
// Package redirect provides hooks to register HTTP handlers that redirect old
-// godoc paths to their new equivalents and assist in accessing the issue
-// tracker, wiki, code review system, etc.
+// godoc paths to their new equivalents.
package redirect // import "golang.org/x/tools/godoc/redirect"
import (
- "context"
- "fmt"
- "html/template"
"net/http"
- "os"
"regexp"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context/ctxhttp"
)
-// Register registers HTTP handlers that redirect old godoc paths to their new
-// equivalents and assist in accessing the issue tracker, wiki, code review
-// system, etc. If mux is nil it uses http.DefaultServeMux.
+// Register registers HTTP handlers that redirect old godoc paths to their new equivalents.
+// If mux is nil it uses http.DefaultServeMux.
func Register(mux *http.ServeMux) {
if mux == nil {
mux = http.DefaultServeMux
}
- handlePathRedirects(mux, pkgRedirects, "/pkg/")
- handlePathRedirects(mux, cmdRedirects, "/cmd/")
- for prefix, redirect := range prefixHelpers {
- p := "/" + prefix + "/"
- mux.Handle(p, PrefixHandler(p, redirect))
- }
- for path, redirect := range redirects {
- mux.Handle(path, Handler(redirect))
- }
// NB: /src/pkg (sans trailing slash) is the index of packages.
mux.HandleFunc("/src/pkg/", srcPkgHandler)
- mux.HandleFunc("/cl/", clHandler)
- mux.HandleFunc("/change/", changeHandler)
- mux.HandleFunc("/design/", designHandler)
-}
-
-func handlePathRedirects(mux *http.ServeMux, redirects map[string]string, prefix string) {
- for source, target := range redirects {
- h := Handler(prefix + target + "/")
- p := prefix + source
- mux.Handle(p, h)
- mux.Handle(p+"/", h)
- }
-}
-
-// Packages that were renamed between r60 and go1.
-var pkgRedirects = map[string]string{
- "asn1": "encoding/asn1",
- "big": "math/big",
- "cmath": "math/cmplx",
- "csv": "encoding/csv",
- "exec": "os/exec",
- "exp/template/html": "html/template",
- "gob": "encoding/gob",
- "http": "net/http",
- "http/cgi": "net/http/cgi",
- "http/fcgi": "net/http/fcgi",
- "http/httptest": "net/http/httptest",
- "http/pprof": "net/http/pprof",
- "json": "encoding/json",
- "mail": "net/mail",
- "rand": "math/rand",
- "rpc": "net/rpc",
- "rpc/jsonrpc": "net/rpc/jsonrpc",
- "scanner": "text/scanner",
- "smtp": "net/smtp",
- "tabwriter": "text/tabwriter",
- "template": "text/template",
- "template/parse": "text/template/parse",
- "url": "net/url",
- "utf16": "unicode/utf16",
- "utf8": "unicode/utf8",
- "xml": "encoding/xml",
-}
-
-// Commands that were renamed between r60 and go1.
-var cmdRedirects = map[string]string{
- "gofix": "fix",
- "goinstall": "go",
- "gopack": "pack",
- "gotest": "go",
- "govet": "vet",
- "goyacc": "yacc",
-}
-
-var redirects = map[string]string{
- "/blog": "/blog/",
- "/build": "http://build.golang.org",
- "/change": "https://go.googlesource.com/go",
- "/cl": "https://go-review.googlesource.com",
- "/cmd/godoc/": "https://pkg.go.dev/golang.org/x/tools/cmd/godoc",
- "/issue": "https://github.com/golang/go/issues",
- "/issue/new": "https://github.com/golang/go/issues/new",
- "/issues": "https://github.com/golang/go/issues",
- "/issues/new": "https://github.com/golang/go/issues/new",
- "/play": "http://play.golang.org",
- "/design": "https://go.googlesource.com/proposal/+/master/design",
-
- // In Go 1.2 the references page is part of /doc/.
- "/ref": "/doc/#references",
- // This next rule clobbers /ref/spec and /ref/mem.
- // TODO(adg): figure out what to do here, if anything.
- // "/ref/": "/doc/#references",
-
- // Be nice to people who are looking in the wrong place.
- "/doc/mem": "/ref/mem",
- "/doc/spec": "/ref/spec",
-
- "/talks": "http://talks.golang.org",
- "/tour": "http://tour.golang.org",
- "/wiki": "https://github.com/golang/go/wiki",
-
- "/doc/articles/c_go_cgo.html": "/blog/c-go-cgo",
- "/doc/articles/concurrency_patterns.html": "/blog/go-concurrency-patterns-timing-out-and",
- "/doc/articles/defer_panic_recover.html": "/blog/defer-panic-and-recover",
- "/doc/articles/error_handling.html": "/blog/error-handling-and-go",
- "/doc/articles/gobs_of_data.html": "/blog/gobs-of-data",
- "/doc/articles/godoc_documenting_go_code.html": "/blog/godoc-documenting-go-code",
- "/doc/articles/gos_declaration_syntax.html": "/blog/gos-declaration-syntax",
- "/doc/articles/image_draw.html": "/blog/go-imagedraw-package",
- "/doc/articles/image_package.html": "/blog/go-image-package",
- "/doc/articles/json_and_go.html": "/blog/json-and-go",
- "/doc/articles/json_rpc_tale_of_interfaces.html": "/blog/json-rpc-tale-of-interfaces",
- "/doc/articles/laws_of_reflection.html": "/blog/laws-of-reflection",
- "/doc/articles/slices_usage_and_internals.html": "/blog/go-slices-usage-and-internals",
- "/doc/go_for_cpp_programmers.html": "/wiki/GoForCPPProgrammers",
- "/doc/go_tutorial.html": "http://tour.golang.org/",
-}
-
-var prefixHelpers = map[string]string{
- "issue": "https://github.com/golang/go/issues/",
- "issues": "https://github.com/golang/go/issues/",
- "play": "http://play.golang.org/",
- "talks": "http://talks.golang.org/",
- "wiki": "https://github.com/golang/go/wiki/",
}
func Handler(target string) http.Handler {
@@ -181,144 +56,3 @@ func srcPkgHandler(w http.ResponseWriter, r *http.Request) {
r.URL.Path = "/src/" + r.URL.Path[len("/src/pkg/"):]
http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)
}
-
-func clHandler(w http.ResponseWriter, r *http.Request) {
- const prefix = "/cl/"
- if p := r.URL.Path; p == prefix {
- // redirect /prefix/ to /prefix
- http.Redirect(w, r, p[:len(p)-1], http.StatusFound)
- return
- }
- id := r.URL.Path[len(prefix):]
- // support /cl/152700045/, which is used in commit 0edafefc36.
- id = strings.TrimSuffix(id, "/")
- if !validID.MatchString(id) {
- http.Error(w, "Not found", http.StatusNotFound)
- return
- }
- target := ""
-
- if n, err := strconv.Atoi(id); err == nil && isRietveldCL(n) {
- // Issue 28836: if this Rietveld CL happens to
- // also be a Gerrit CL, render a disambiguation HTML
- // page with two links instead. We need to make a
- // Gerrit API call to figure that out, but we cache
- // known Gerrit CLs so it's done at most once per CL.
- if ok, err := isGerritCL(r.Context(), n); err == nil && ok {
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- clDisambiguationHTML.Execute(w, n)
- return
- }
-
- target = "https://codereview.appspot.com/" + id
- } else {
- target = "https://go-review.googlesource.com/" + id
- }
- http.Redirect(w, r, target, http.StatusFound)
-}
-
-var clDisambiguationHTML = template.Must(template.New("").Parse(`<!DOCTYPE html>
-<html lang="en">
- <head>
- <title>Go CL {{.}} Disambiguation</title>
- <meta name="viewport" content="width=device-width">
- </head>
- <body>
- CL number {{.}} exists in both Gerrit (the current code review system)
- and Rietveld (the previous code review system). Please make a choice:
-
- <ul>
- <li><a href="https://go-review.googlesource.com/{{.}}">Gerrit CL {{.}}</a></li>
- <li><a href="https://codereview.appspot.com/{{.}}">Rietveld CL {{.}}</a></li>
- </ul>
- </body>
-</html>`))
-
-// isGerritCL reports whether a Gerrit CL with the specified numeric change ID (e.g., "4247")
-// is known to exist by querying the Gerrit API at https://go-review.googlesource.com.
-// isGerritCL uses gerritCLCache as a cache of Gerrit CL IDs that exist.
-func isGerritCL(ctx context.Context, id int) (bool, error) {
- // Check cache first.
- gerritCLCache.Lock()
- ok := gerritCLCache.exist[id]
- gerritCLCache.Unlock()
- if ok {
- return true, nil
- }
-
- // Query the Gerrit API Get Change endpoint, as documented at
- // https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#get-change.
- ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
- defer cancel()
- resp, err := ctxhttp.Get(ctx, nil, fmt.Sprintf("https://go-review.googlesource.com/changes/%d", id))
- if err != nil {
- return false, err
- }
- resp.Body.Close()
- switch resp.StatusCode {
- case http.StatusOK:
- // A Gerrit CL with this ID exists. Add it to cache.
- gerritCLCache.Lock()
- gerritCLCache.exist[id] = true
- gerritCLCache.Unlock()
- return true, nil
- case http.StatusNotFound:
- // A Gerrit CL with this ID doesn't exist. It may get created in the future.
- return false, nil
- default:
- return false, fmt.Errorf("unexpected status code: %v", resp.Status)
- }
-}
-
-var gerritCLCache = struct {
- sync.Mutex
- exist map[int]bool // exist is a set of Gerrit CL IDs that are known to exist.
-}{exist: make(map[int]bool)}
-
-var changeMap *hashMap
-
-// LoadChangeMap loads the specified map of Mercurial to Git revisions,
-// which is used by the /change/ handler to intelligently map old hg
-// revisions to their new git equivalents.
-// It should be called before calling Register.
-// The file should remain open as long as the process is running.
-// See the implementation of this package for details.
-func LoadChangeMap(filename string) error {
- f, err := os.Open(filename)
- if err != nil {
- return err
- }
- m, err := newHashMap(f)
- if err != nil {
- return err
- }
- changeMap = m
- return nil
-}
-
-func changeHandler(w http.ResponseWriter, r *http.Request) {
- const prefix = "/change/"
- if p := r.URL.Path; p == prefix {
- // redirect /prefix/ to /prefix
- http.Redirect(w, r, p[:len(p)-1], http.StatusFound)
- return
- }
- hash := r.URL.Path[len(prefix):]
- target := "https://go.googlesource.com/go/+/" + hash
- if git := changeMap.Lookup(hash); git > 0 {
- target = fmt.Sprintf("https://go.googlesource.com/%v/+/%v", git.Repo(), git.Hash())
- }
- http.Redirect(w, r, target, http.StatusFound)
-}
-
-func designHandler(w http.ResponseWriter, r *http.Request) {
- const prefix = "/design/"
- if p := r.URL.Path; p == prefix {
- // redirect /prefix/ to /prefix
- http.Redirect(w, r, p[:len(p)-1], http.StatusFound)
- return
- }
- name := r.URL.Path[len(prefix):]
- target := "https://go.googlesource.com/proposal/+/master/design/" + name + ".md"
- http.Redirect(w, r, target, http.StatusFound)
-}
diff --git a/godoc/redirect/redirect_test.go b/godoc/redirect/redirect_test.go
index 1de3c6ca7..59677c435 100644
--- a/godoc/redirect/redirect_test.go
+++ b/godoc/redirect/redirect_test.go
@@ -21,56 +21,7 @@ func errorResult(status int) redirectResult {
func TestRedirects(t *testing.T) {
var tests = map[string]redirectResult{
- "/build": {301, "http://build.golang.org"},
- "/ref": {301, "/doc/#references"},
- "/doc/mem": {301, "/ref/mem"},
- "/doc/spec": {301, "/ref/spec"},
- "/tour": {301, "http://tour.golang.org"},
- "/foo": errorResult(404),
-
- "/pkg/asn1": {301, "/pkg/encoding/asn1/"},
- "/pkg/template/parse": {301, "/pkg/text/template/parse/"},
-
- "/src/pkg/foo": {301, "/src/foo"},
-
- "/cmd/gofix": {301, "/cmd/fix/"},
-
- // git commits (/change)
- // TODO: mercurial tags and LoadChangeMap.
- "/change": {301, "https://go.googlesource.com/go"},
- "/change/a": {302, "https://go.googlesource.com/go/+/a"},
-
- "/issue": {301, "https://github.com/golang/go/issues"},
- "/issue?": {301, "https://github.com/golang/go/issues"},
- "/issue/1": {302, "https://github.com/golang/go/issues/1"},
- "/issue/new": {301, "https://github.com/golang/go/issues/new"},
- "/issue/new?a=b&c=d%20&e=f": {301, "https://github.com/golang/go/issues/new?a=b&c=d%20&e=f"},
- "/issues": {301, "https://github.com/golang/go/issues"},
- "/issues/1": {302, "https://github.com/golang/go/issues/1"},
- "/issues/new": {301, "https://github.com/golang/go/issues/new"},
- "/issues/1/2/3": errorResult(404),
-
- "/wiki/foo": {302, "https://github.com/golang/go/wiki/foo"},
- "/wiki/foo/": {302, "https://github.com/golang/go/wiki/foo/"},
-
- "/design": {301, "https://go.googlesource.com/proposal/+/master/design"},
- "/design/": {302, "/design"},
- "/design/123-foo": {302, "https://go.googlesource.com/proposal/+/master/design/123-foo.md"},
- "/design/text/123-foo": {302, "https://go.googlesource.com/proposal/+/master/design/text/123-foo.md"},
-
- "/cl/1": {302, "https://go-review.googlesource.com/1"},
- "/cl/1/": {302, "https://go-review.googlesource.com/1"},
- "/cl/267120043": {302, "https://codereview.appspot.com/267120043"},
- "/cl/267120043/": {302, "https://codereview.appspot.com/267120043"},
-
- // Verify that we're using the Rietveld CL table:
- "/cl/152046": {302, "https://codereview.appspot.com/152046"},
- "/cl/152047": {302, "https://go-review.googlesource.com/152047"},
- "/cl/152048": {302, "https://codereview.appspot.com/152048"},
-
- // And verify we're using the "bigEnoughAssumeRietveld" value:
- "/cl/299999": {302, "https://go-review.googlesource.com/299999"},
- "/cl/300000": {302, "https://codereview.appspot.com/300000"},
+ "/foo": errorResult(404),
}
mux := http.NewServeMux()
diff --git a/godoc/redirect/rietveld.go b/godoc/redirect/rietveld.go
deleted file mode 100644
index 81b1094db..000000000
--- a/godoc/redirect/rietveld.go
+++ /dev/null
@@ -1,1093 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package redirect
-
-// bigEnoughAssumeRietveld is the value where CLs equal or great are
-// assumed to be on Rietveld. By including this threshold we shrink
-// the size of the table below. When Go amasses 150,000 more CLs, we'll
-// need to bump this number and regenerate the list below.
-const bigEnoughAssumeRietveld = 300000
-
-// isRietveldCL reports whether cl was a Rietveld CL number.
-func isRietveldCL(cl int) bool {
- return cl >= bigEnoughAssumeRietveld || lowRietveldCL[cl]
-}
-
-// lowRietveldCLs are the old CL numbers assigned by Rietveld code
-// review system as used by Go prior to Gerrit which are less than
-// bigEnoughAssumeRietveld.
-//
-// This list of numbers is registered with the /cl/NNNN redirect
-// handler to disambiguate which code review system a particular
-// number corresponds to. In some rare cases there may be duplicates,
-// in which case we might render an HTML choice for the user.
-//
-// To re-generate this list, run:
-//
-// $ cd $GOROOT
-// $ git log 7d7c6a9..94151eb | grep "^ https://golang.org/cl/" | perl -ne 's,^\s+https://golang.org/cl/(\d+).*$,$1,; chomp; print "$_: true,\n" if $_ < 300000' | sort -n | uniq
-//
-// Note that we ignore the x/* repos because we didn't start using
-// "subrepos" until the Rietveld CLs numbers were already 4,000,000+,
-// well above bigEnoughAssumeRietveld.
-var lowRietveldCL = map[int]bool{
- 152046: true,
- 152048: true,
- 152049: true,
- 152050: true,
- 152051: true,
- 152052: true,
- 152055: true,
- 152056: true,
- 152057: true,
- 152072: true,
- 152073: true,
- 152075: true,
- 152076: true,
- 152077: true,
- 152078: true,
- 152079: true,
- 152080: true,
- 152082: true,
- 152084: true,
- 152085: true,
- 152086: true,
- 152088: true,
- 152089: true,
- 152091: true,
- 152098: true,
- 152101: true,
- 152102: true,
- 152105: true,
- 152106: true,
- 152107: true,
- 152108: true,
- 152109: true,
- 152110: true,
- 152114: true,
- 152117: true,
- 152118: true,
- 152120: true,
- 152123: true,
- 152124: true,
- 152128: true,
- 152130: true,
- 152131: true,
- 152138: true,
- 152141: true,
- 152142: true,
- 153048: true,
- 153049: true,
- 153050: true,
- 153051: true,
- 153055: true,
- 153056: true,
- 153057: true,
- 154043: true,
- 154044: true,
- 154045: true,
- 154049: true,
- 154055: true,
- 154057: true,
- 154058: true,
- 154059: true,
- 154061: true,
- 154064: true,
- 154065: true,
- 154067: true,
- 154068: true,
- 154069: true,
- 154071: true,
- 154072: true,
- 154073: true,
- 154076: true,
- 154079: true,
- 154096: true,
- 154097: true,
- 154099: true,
- 154100: true,
- 154101: true,
- 154102: true,
- 154108: true,
- 154118: true,
- 154121: true,
- 154122: true,
- 154123: true,
- 154125: true,
- 154126: true,
- 154128: true,
- 154136: true,
- 154138: true,
- 154139: true,
- 154140: true,
- 154141: true,
- 154142: true,
- 154143: true,
- 154144: true,
- 154145: true,
- 154146: true,
- 154152: true,
- 154153: true,
- 154156: true,
- 154159: true,
- 154161: true,
- 154166: true,
- 154167: true,
- 154169: true,
- 154171: true,
- 154172: true,
- 154173: true,
- 154174: true,
- 154175: true,
- 154176: true,
- 154177: true,
- 154178: true,
- 154179: true,
- 154180: true,
- 155041: true,
- 155042: true,
- 155045: true,
- 155047: true,
- 155048: true,
- 155049: true,
- 155050: true,
- 155054: true,
- 155055: true,
- 155056: true,
- 155057: true,
- 155058: true,
- 155059: true,
- 155061: true,
- 155062: true,
- 155063: true,
- 155065: true,
- 155067: true,
- 155069: true,
- 155072: true,
- 155074: true,
- 155075: true,
- 155077: true,
- 155078: true,
- 155079: true,
- 156041: true,
- 156044: true,
- 156045: true,
- 156046: true,
- 156047: true,
- 156051: true,
- 156052: true,
- 156054: true,
- 156055: true,
- 156056: true,
- 156058: true,
- 156059: true,
- 156060: true,
- 156061: true,
- 156062: true,
- 156063: true,
- 156066: true,
- 156067: true,
- 156070: true,
- 156071: true,
- 156073: true,
- 156075: true,
- 156077: true,
- 156079: true,
- 156080: true,
- 156081: true,
- 156083: true,
- 156084: true,
- 156085: true,
- 156086: true,
- 156089: true,
- 156091: true,
- 156092: true,
- 156093: true,
- 156094: true,
- 156097: true,
- 156099: true,
- 156100: true,
- 156102: true,
- 156103: true,
- 156104: true,
- 156106: true,
- 156107: true,
- 156108: true,
- 156109: true,
- 156110: true,
- 156113: true,
- 156115: true,
- 156116: true,
- 157041: true,
- 157042: true,
- 157043: true,
- 157044: true,
- 157046: true,
- 157053: true,
- 157055: true,
- 157056: true,
- 157058: true,
- 157060: true,
- 157061: true,
- 157062: true,
- 157065: true,
- 157066: true,
- 157067: true,
- 157068: true,
- 157069: true,
- 157071: true,
- 157072: true,
- 157073: true,
- 157074: true,
- 157075: true,
- 157076: true,
- 157077: true,
- 157082: true,
- 157084: true,
- 157085: true,
- 157087: true,
- 157088: true,
- 157091: true,
- 157095: true,
- 157096: true,
- 157099: true,
- 157100: true,
- 157101: true,
- 157102: true,
- 157103: true,
- 157104: true,
- 157106: true,
- 157110: true,
- 157111: true,
- 157112: true,
- 157114: true,
- 157116: true,
- 157119: true,
- 157140: true,
- 157142: true,
- 157143: true,
- 157144: true,
- 157146: true,
- 157147: true,
- 157149: true,
- 157151: true,
- 157152: true,
- 157153: true,
- 157154: true,
- 157156: true,
- 157157: true,
- 157158: true,
- 157159: true,
- 157160: true,
- 157162: true,
- 157166: true,
- 157167: true,
- 157168: true,
- 157170: true,
- 158041: true,
- 159044: true,
- 159049: true,
- 159050: true,
- 159051: true,
- 160043: true,
- 160044: true,
- 160045: true,
- 160046: true,
- 160047: true,
- 160054: true,
- 160056: true,
- 160057: true,
- 160059: true,
- 160060: true,
- 160061: true,
- 160064: true,
- 160065: true,
- 160069: true,
- 160070: true,
- 161049: true,
- 161050: true,
- 161056: true,
- 161058: true,
- 161060: true,
- 161061: true,
- 161069: true,
- 161070: true,
- 161073: true,
- 161075: true,
- 162041: true,
- 162044: true,
- 162046: true,
- 162053: true,
- 162054: true,
- 162055: true,
- 162056: true,
- 162057: true,
- 162058: true,
- 162059: true,
- 162061: true,
- 162062: true,
- 163042: true,
- 163044: true,
- 163049: true,
- 163050: true,
- 163051: true,
- 163052: true,
- 163053: true,
- 163055: true,
- 163058: true,
- 163061: true,
- 163062: true,
- 163064: true,
- 163067: true,
- 163068: true,
- 163069: true,
- 163070: true,
- 163071: true,
- 163072: true,
- 163082: true,
- 163083: true,
- 163085: true,
- 163088: true,
- 163091: true,
- 163092: true,
- 163097: true,
- 163098: true,
- 164043: true,
- 164047: true,
- 164049: true,
- 164052: true,
- 164053: true,
- 164056: true,
- 164059: true,
- 164060: true,
- 164062: true,
- 164068: true,
- 164069: true,
- 164071: true,
- 164073: true,
- 164074: true,
- 164075: true,
- 164078: true,
- 164079: true,
- 164081: true,
- 164082: true,
- 164083: true,
- 164085: true,
- 164086: true,
- 164088: true,
- 164090: true,
- 164091: true,
- 164092: true,
- 164093: true,
- 164094: true,
- 164095: true,
- 165042: true,
- 165044: true,
- 165045: true,
- 165048: true,
- 165049: true,
- 165050: true,
- 165051: true,
- 165055: true,
- 165057: true,
- 165058: true,
- 165059: true,
- 165061: true,
- 165062: true,
- 165063: true,
- 165064: true,
- 165065: true,
- 165068: true,
- 165070: true,
- 165076: true,
- 165078: true,
- 165080: true,
- 165083: true,
- 165086: true,
- 165097: true,
- 165100: true,
- 165101: true,
- 166041: true,
- 166043: true,
- 166044: true,
- 166047: true,
- 166049: true,
- 166052: true,
- 166053: true,
- 166055: true,
- 166058: true,
- 166059: true,
- 166060: true,
- 166064: true,
- 166066: true,
- 166067: true,
- 166068: true,
- 166070: true,
- 166071: true,
- 166072: true,
- 166073: true,
- 166074: true,
- 166076: true,
- 166077: true,
- 166078: true,
- 166080: true,
- 167043: true,
- 167044: true,
- 167047: true,
- 167050: true,
- 167055: true,
- 167057: true,
- 167058: true,
- 168041: true,
- 168045: true,
- 170042: true,
- 170043: true,
- 170044: true,
- 170046: true,
- 170047: true,
- 170048: true,
- 170049: true,
- 171044: true,
- 171046: true,
- 171047: true,
- 171048: true,
- 171051: true,
- 172041: true,
- 172042: true,
- 172043: true,
- 172045: true,
- 172049: true,
- 173041: true,
- 173044: true,
- 173045: true,
- 174042: true,
- 174047: true,
- 174048: true,
- 174050: true,
- 174051: true,
- 174052: true,
- 174053: true,
- 174063: true,
- 174064: true,
- 174072: true,
- 174076: true,
- 174077: true,
- 174078: true,
- 174082: true,
- 174083: true,
- 174087: true,
- 175045: true,
- 175046: true,
- 175047: true,
- 175048: true,
- 176056: true,
- 176057: true,
- 176058: true,
- 176061: true,
- 176062: true,
- 176063: true,
- 176064: true,
- 176066: true,
- 176067: true,
- 176070: true,
- 176071: true,
- 176076: true,
- 178043: true,
- 178044: true,
- 178046: true,
- 178048: true,
- 179047: true,
- 179055: true,
- 179061: true,
- 179062: true,
- 179063: true,
- 179067: true,
- 179069: true,
- 179070: true,
- 179072: true,
- 179079: true,
- 179088: true,
- 179095: true,
- 179096: true,
- 179097: true,
- 179099: true,
- 179105: true,
- 179106: true,
- 179108: true,
- 179118: true,
- 179120: true,
- 179125: true,
- 179126: true,
- 179128: true,
- 179129: true,
- 179130: true,
- 180044: true,
- 180045: true,
- 180046: true,
- 180047: true,
- 180048: true,
- 180049: true,
- 180050: true,
- 180052: true,
- 180053: true,
- 180054: true,
- 180055: true,
- 180056: true,
- 180057: true,
- 180059: true,
- 180061: true,
- 180064: true,
- 180065: true,
- 180068: true,
- 180069: true,
- 180070: true,
- 180074: true,
- 180075: true,
- 180081: true,
- 180082: true,
- 180085: true,
- 180092: true,
- 180099: true,
- 180105: true,
- 180108: true,
- 180112: true,
- 180118: true,
- 181041: true,
- 181043: true,
- 181044: true,
- 181045: true,
- 181049: true,
- 181050: true,
- 181055: true,
- 181057: true,
- 181058: true,
- 181059: true,
- 181063: true,
- 181071: true,
- 181073: true,
- 181075: true,
- 181077: true,
- 181080: true,
- 181083: true,
- 181084: true,
- 181085: true,
- 181086: true,
- 181087: true,
- 181089: true,
- 181097: true,
- 181099: true,
- 181102: true,
- 181111: true,
- 181130: true,
- 181135: true,
- 181137: true,
- 181138: true,
- 181139: true,
- 181151: true,
- 181152: true,
- 181153: true,
- 181155: true,
- 181156: true,
- 181157: true,
- 181158: true,
- 181160: true,
- 181161: true,
- 181163: true,
- 181164: true,
- 181171: true,
- 181179: true,
- 181183: true,
- 181184: true,
- 181186: true,
- 182041: true,
- 182043: true,
- 182044: true,
- 183042: true,
- 183043: true,
- 183044: true,
- 183047: true,
- 183049: true,
- 183065: true,
- 183066: true,
- 183073: true,
- 183074: true,
- 183075: true,
- 183083: true,
- 183084: true,
- 183087: true,
- 183088: true,
- 183090: true,
- 183095: true,
- 183104: true,
- 183107: true,
- 183109: true,
- 183111: true,
- 183112: true,
- 183113: true,
- 183116: true,
- 183123: true,
- 183124: true,
- 183125: true,
- 183126: true,
- 183132: true,
- 183133: true,
- 183135: true,
- 183136: true,
- 183137: true,
- 183138: true,
- 183139: true,
- 183140: true,
- 183141: true,
- 183142: true,
- 183153: true,
- 183155: true,
- 183156: true,
- 183157: true,
- 183160: true,
- 184043: true,
- 184055: true,
- 184058: true,
- 184059: true,
- 184068: true,
- 184069: true,
- 184079: true,
- 184080: true,
- 184081: true,
- 185043: true,
- 185045: true,
- 186042: true,
- 186043: true,
- 186073: true,
- 186076: true,
- 186077: true,
- 186078: true,
- 186079: true,
- 186081: true,
- 186095: true,
- 186108: true,
- 186113: true,
- 186115: true,
- 186116: true,
- 186118: true,
- 186119: true,
- 186132: true,
- 186137: true,
- 186138: true,
- 186139: true,
- 186143: true,
- 186144: true,
- 186145: true,
- 186146: true,
- 186147: true,
- 186148: true,
- 186159: true,
- 186160: true,
- 186161: true,
- 186165: true,
- 186169: true,
- 186173: true,
- 186180: true,
- 186210: true,
- 186211: true,
- 186212: true,
- 186213: true,
- 186214: true,
- 186215: true,
- 186216: true,
- 186228: true,
- 186229: true,
- 186230: true,
- 186232: true,
- 186234: true,
- 186255: true,
- 186263: true,
- 186276: true,
- 186279: true,
- 186282: true,
- 186283: true,
- 188043: true,
- 189042: true,
- 189057: true,
- 189059: true,
- 189062: true,
- 189078: true,
- 189080: true,
- 189083: true,
- 189088: true,
- 189093: true,
- 189095: true,
- 189096: true,
- 189098: true,
- 189100: true,
- 190041: true,
- 190042: true,
- 190043: true,
- 190044: true,
- 190059: true,
- 190062: true,
- 190068: true,
- 190074: true,
- 190076: true,
- 190077: true,
- 190079: true,
- 190085: true,
- 190088: true,
- 190103: true,
- 190104: true,
- 193055: true,
- 193066: true,
- 193067: true,
- 193070: true,
- 193075: true,
- 193079: true,
- 193080: true,
- 193081: true,
- 193091: true,
- 193092: true,
- 193095: true,
- 193101: true,
- 193104: true,
- 194043: true,
- 194045: true,
- 194046: true,
- 194050: true,
- 194051: true,
- 194052: true,
- 194053: true,
- 194064: true,
- 194066: true,
- 194069: true,
- 194071: true,
- 194072: true,
- 194073: true,
- 194074: true,
- 194076: true,
- 194077: true,
- 194078: true,
- 194082: true,
- 194084: true,
- 194085: true,
- 194090: true,
- 194091: true,
- 194092: true,
- 194094: true,
- 194097: true,
- 194098: true,
- 194099: true,
- 194100: true,
- 194114: true,
- 194116: true,
- 194118: true,
- 194119: true,
- 194120: true,
- 194121: true,
- 194122: true,
- 194126: true,
- 194129: true,
- 194131: true,
- 194132: true,
- 194133: true,
- 194134: true,
- 194146: true,
- 194151: true,
- 194156: true,
- 194157: true,
- 194159: true,
- 194161: true,
- 194165: true,
- 195041: true,
- 195044: true,
- 195050: true,
- 195051: true,
- 195052: true,
- 195068: true,
- 195075: true,
- 195076: true,
- 195079: true,
- 195080: true,
- 195081: true,
- 196042: true,
- 196044: true,
- 196050: true,
- 196051: true,
- 196055: true,
- 196056: true,
- 196061: true,
- 196063: true,
- 196065: true,
- 196070: true,
- 196071: true,
- 196075: true,
- 196077: true,
- 196079: true,
- 196087: true,
- 196088: true,
- 196090: true,
- 196091: true,
- 197041: true,
- 197042: true,
- 197043: true,
- 197044: true,
- 198044: true,
- 198045: true,
- 198046: true,
- 198048: true,
- 198049: true,
- 198050: true,
- 198053: true,
- 198057: true,
- 198058: true,
- 198066: true,
- 198071: true,
- 198074: true,
- 198081: true,
- 198084: true,
- 198085: true,
- 198102: true,
- 199042: true,
- 199044: true,
- 199045: true,
- 199046: true,
- 199047: true,
- 199052: true,
- 199054: true,
- 199057: true,
- 199066: true,
- 199070: true,
- 199082: true,
- 199091: true,
- 199094: true,
- 199096: true,
- 201041: true,
- 201042: true,
- 201043: true,
- 201047: true,
- 201048: true,
- 201049: true,
- 201058: true,
- 201061: true,
- 201064: true,
- 201065: true,
- 201068: true,
- 202042: true,
- 202043: true,
- 202044: true,
- 202051: true,
- 202054: true,
- 202055: true,
- 203043: true,
- 203050: true,
- 203051: true,
- 203053: true,
- 203060: true,
- 203062: true,
- 204042: true,
- 204044: true,
- 204048: true,
- 204052: true,
- 204053: true,
- 204061: true,
- 204062: true,
- 204064: true,
- 204065: true,
- 204067: true,
- 204068: true,
- 204069: true,
- 205042: true,
- 205044: true,
- 206043: true,
- 206044: true,
- 206047: true,
- 206050: true,
- 206051: true,
- 206052: true,
- 206053: true,
- 206054: true,
- 206055: true,
- 206058: true,
- 206059: true,
- 206060: true,
- 206067: true,
- 206069: true,
- 206077: true,
- 206078: true,
- 206079: true,
- 206084: true,
- 206089: true,
- 206101: true,
- 206107: true,
- 206109: true,
- 207043: true,
- 207044: true,
- 207049: true,
- 207050: true,
- 207051: true,
- 207052: true,
- 207053: true,
- 207054: true,
- 207055: true,
- 207061: true,
- 207062: true,
- 207069: true,
- 207071: true,
- 207085: true,
- 207086: true,
- 207087: true,
- 207088: true,
- 207095: true,
- 207096: true,
- 207102: true,
- 207103: true,
- 207106: true,
- 207108: true,
- 207110: true,
- 207111: true,
- 207112: true,
- 209041: true,
- 209042: true,
- 209043: true,
- 209044: true,
- 210042: true,
- 210043: true,
- 210044: true,
- 210047: true,
- 211041: true,
- 212041: true,
- 212045: true,
- 212046: true,
- 212047: true,
- 213041: true,
- 213042: true,
- 214042: true,
- 214046: true,
- 214049: true,
- 214050: true,
- 215042: true,
- 215048: true,
- 215050: true,
- 216043: true,
- 216046: true,
- 216047: true,
- 216052: true,
- 216053: true,
- 216054: true,
- 216059: true,
- 216068: true,
- 217041: true,
- 217044: true,
- 217047: true,
- 217048: true,
- 217049: true,
- 217056: true,
- 217058: true,
- 217059: true,
- 217060: true,
- 217061: true,
- 217064: true,
- 217066: true,
- 217069: true,
- 217071: true,
- 217085: true,
- 217086: true,
- 217088: true,
- 217093: true,
- 217094: true,
- 217108: true,
- 217109: true,
- 217111: true,
- 217115: true,
- 217116: true,
- 218042: true,
- 218044: true,
- 218046: true,
- 218050: true,
- 218060: true,
- 218061: true,
- 218063: true,
- 218064: true,
- 218065: true,
- 218070: true,
- 218071: true,
- 218072: true,
- 218074: true,
- 218076: true,
- 222041: true,
- 223041: true,
- 223043: true,
- 223044: true,
- 223050: true,
- 223052: true,
- 223054: true,
- 223058: true,
- 223059: true,
- 223061: true,
- 223068: true,
- 223069: true,
- 223070: true,
- 223071: true,
- 223073: true,
- 223075: true,
- 223076: true,
- 223083: true,
- 223087: true,
- 223094: true,
- 223096: true,
- 223101: true,
- 223106: true,
- 223108: true,
- 224041: true,
- 224042: true,
- 224043: true,
- 224045: true,
- 224051: true,
- 224053: true,
- 224057: true,
- 224060: true,
- 224061: true,
- 224062: true,
- 224063: true,
- 224068: true,
- 224069: true,
- 224081: true,
- 224084: true,
- 224087: true,
- 224090: true,
- 224096: true,
- 224105: true,
- 225042: true,
- 227041: true,
- 229045: true,
- 229046: true,
- 229048: true,
- 229049: true,
- 229050: true,
- 231042: true,
- 236041: true,
- 237041: true,
- 238041: true,
- 238042: true,
- 240041: true,
- 240042: true,
- 240043: true,
- 241041: true,
- 243041: true,
- 244041: true,
- 245041: true,
- 247041: true,
- 250041: true,
- 252041: true,
- 253041: true,
- 253045: true,
- 254043: true,
- 255042: true,
- 255043: true,
- 257041: true,
- 257042: true,
- 258041: true,
- 261041: true,
- 264041: true,
- 294042: true,
- 296042: true,
-}
diff --git a/godoc/server.go b/godoc/server.go
index 9c5d556b8..57576e102 100644
--- a/godoc/server.go
+++ b/godoc/server.go
@@ -55,7 +55,6 @@ func (s *handlerServer) registerWithMux(mux *http.ServeMux) {
// directory, PageInfo.PAst and PageInfo.PDoc are nil. If there are no sub-
// directories, PageInfo.Dirs is nil. If an error occurred, PageInfo.Err is
// set to the respective error but the error is not logged.
-//
func (h *handlerServer) GetPageInfo(abspath, relpath string, mode PageInfoMode, goos, goarch string) *PageInfo {
info := &PageInfo{Dirname: abspath, Mode: mode}
@@ -411,7 +410,6 @@ func (p *Presentation) GetPageInfoMode(r *http.Request) PageInfoMode {
// (as is the convention for packages). This is sufficient
// to resolve package identifiers without doing an actual
// import. It never returns an error.
-//
func poorMansImporter(imports map[string]*ast.Object, path string) (*ast.Object, error) {
pkg := imports[path]
if pkg == nil {
@@ -498,7 +496,6 @@ func addNames(names map[string]bool, decl ast.Decl) {
// which correctly updates each package file's comment list.
// (The ast.PackageExports signature is frozen, hence the local
// implementation).
-//
func packageExports(fset *token.FileSet, pkg *ast.Package) {
for _, src := range pkg.Files {
cmap := ast.NewCommentMap(fset, src, src.Comments)
@@ -621,7 +618,6 @@ func (p *Presentation) serveTextFile(w http.ResponseWriter, r *http.Request, abs
// formatGoSource HTML-escapes Go source text and writes it to w,
// decorating it with the specified analysis links.
-//
func formatGoSource(buf *bytes.Buffer, text []byte, links []analysis.Link, pattern string, selection Selection) {
// Emit to a temp buffer so that we can add line anchors at the end.
saved, buf := buf, new(bytes.Buffer)
diff --git a/godoc/spot.go b/godoc/spot.go
index 95ffa4b8c..4720e5b1f 100644
--- a/godoc/spot.go
+++ b/godoc/spot.go
@@ -13,9 +13,8 @@ package godoc
//
// The following encoding is used:
//
-// bits 32 4 1 0
-// value [lori|kind|isIndex]
-//
+// bits 32 4 1 0
+// value [lori|kind|isIndex]
type SpotInfo uint32
// SpotKind describes whether an identifier is declared (and what kind of
diff --git a/godoc/static/package.html b/godoc/static/package.html
index 86445df4c..a04b08b63 100644
--- a/godoc/static/package.html
+++ b/godoc/static/package.html
@@ -17,7 +17,7 @@
{{if $.IsMain}}
{{/* command documentation */}}
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
{{else}}
{{/* package documentation */}}
<div id="short-nav">
@@ -42,7 +42,7 @@
</div>
<div class="expanded">
<h2 class="toggleButton" title="Click to hide Overview section">Overview ▾</h2>
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
{{example_html $ ""}}
</div>
</div>
@@ -154,14 +154,14 @@
{{with .Consts}}
<h2 id="pkg-constants">Constants</h2>
{{range .}}
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
<pre>{{node_html $ .Decl true}}</pre>
{{end}}
{{end}}
{{with .Vars}}
<h2 id="pkg-variables">Variables</h2>
{{range .}}
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
<pre>{{node_html $ .Decl true}}</pre>
{{end}}
{{end}}
@@ -174,7 +174,7 @@
{{if $since}}<span title="Added in Go {{$since}}">{{$since}}</span>{{end}}
</h2>
<pre>{{node_html $ .Decl true}}</pre>
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
{{example_html $ .Name}}
{{callgraph_html $ "" .Name}}
@@ -187,16 +187,16 @@
{{$since := since "type" "" .Name $.PDoc.ImportPath}}
{{if $since}}<span title="Added in Go {{$since}}">{{$since}}</span>{{end}}
</h2>
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
<pre>{{node_html $ .Decl true}}</pre>
{{range .Consts}}
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
<pre>{{node_html $ .Decl true}}</pre>
{{end}}
{{range .Vars}}
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
<pre>{{node_html $ .Decl true}}</pre>
{{end}}
@@ -212,7 +212,7 @@
{{if $since}}<span title="Added in Go {{$since}}">{{$since}}</span>{{end}}
</h3>
<pre>{{node_html $ .Decl true}}</pre>
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
{{example_html $ .Name}}
{{callgraph_html $ "" .Name}}
{{end}}
@@ -225,7 +225,7 @@
{{if $since}}<span title="Added in Go {{$since}}">{{$since}}</span>{{end}}
</h3>
<pre>{{node_html $ .Decl true}}</pre>
- {{comment_html .Doc}}
+ {{comment_html $ .Doc}}
{{$name := printf "%s_%s" $tname .Name}}
{{example_html $ $name}}
{{callgraph_html $ .Recv .Name}}
@@ -238,7 +238,7 @@
<h2 id="pkg-note-{{$marker}}">{{noteTitle $marker | html}}s</h2>
<ul style="list-style: none; padding: 0;">
{{range .}}
- <li><a href="{{posLink_url $ .}}" style="float: left;">&#x261e;</a> {{comment_html .Body}}</li>
+ <li><a href="{{posLink_url $ .}}" style="float: left;">&#x261e;</a> {{comment_html $ .Body}}</li>
{{end}}
</ul>
{{end}}
diff --git a/godoc/static/searchdoc.html b/godoc/static/searchdoc.html
index 679c02cf3..84dcb3452 100644
--- a/godoc/static/searchdoc.html
+++ b/godoc/static/searchdoc.html
@@ -15,7 +15,7 @@
<a href="/{{$pkg_html}}">{{html .Package}}</a>.<a href="{{$doc_html}}">{{.Name}}</a>
{{end}}
{{if .Doc}}
- <p>{{comment_html .Doc}}</p>
+ <p>{{comment_html $ .Doc}}</p>
{{else}}
<p><em>No documentation available</em></p>
{{end}}
diff --git a/godoc/static/static.go b/godoc/static/static.go
index ada60fab6..d6e5f2d2e 100644
--- a/godoc/static/static.go
+++ b/godoc/static/static.go
@@ -83,7 +83,7 @@ var Files = map[string]string{
"methodset.html": "<div\x20class=\"toggle\"\x20style=\"display:\x20none\">\x0a\x09<div\x20class=\"collapsed\">\x0a\x09\x09<p\x20class=\"exampleHeading\x20toggleButton\">\xe2\x96\xb9\x20<span\x20class=\"text\">Method\x20set</span></p>\x0a\x09</div>\x0a\x09<div\x20class=\"expanded\">\x0a\x09\x09<p\x20class=\"exampleHeading\x20toggleButton\">\xe2\x96\xbe\x20<span\x20class=\"text\">Method\x20set</span></p>\x0a\x09\x09<div\x20style=\"margin-left:\x201in\"\x20id='methodset-{{.Index}}'>...</div>\x0a\x09</div>\x0a</div>\x0a",
- "package.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a<!--\x0a\x09Note:\x20Static\x20(i.e.,\x20not\x20template-generated)\x20href\x20and\x20id\x0a\x09attributes\x20start\x20with\x20\"pkg-\"\x20to\x20make\x20it\x20impossible\x20for\x0a\x09them\x20to\x20conflict\x20with\x20generated\x20attributes\x20(some\x20of\x20which\x0a\x09correspond\x20to\x20Go\x20identifiers).\x0a-->\x0a{{with\x20.PDoc}}\x0a\x09<script>\x0a\x09document.ANALYSIS_DATA\x20=\x20{{$.AnalysisData}};\x0a\x09document.CALLGRAPH\x20=\x20{{$.CallGraph}};\x0a\x09</script>\x0a\x0a\x09{{if\x20$.IsMain}}\x0a\x09\x09{{/*\x20command\x20documentation\x20*/}}\x0a\x09\x09{{comment_html\x20.Doc}}\x0a\x09{{else}}\x0a\x09\x09{{/*\x20package\x20documentation\x20*/}}\x0a\x09\x09<div\x20id=\"short-nav\">\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09<dd><code>import\x20\"{{html\x20.ImportPath}}\"</code></dd>\x0a\x09\x09\x09</dl>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09<dd><a\x20href=\"#pkg-overview\"\x20class=\"overviewLink\">Overview</a></dd>\x0a\x09\x09\x09<dd><a\x20href=\"#pkg-index\"\x20class=\"indexLink\">Index</a></dd>\x0a\x09\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-examples\"\x20class=\"examplesLink\">Examples</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Dirs}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-subdirectories\">Subdirectories</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x09\x09<!--\x20The\x20package's\x20Name\x20is\x20printed\x20as\x20title\x20by\x20the\x20top-level\x20template\x20-->\x0a\x09\x09<div\x20id=\"pkg-overview\"\x20class=\"toggleVisible\">\x0a\x09\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Overview\x20section\">Overview\x20\xe2\x96\xb9</h2>\x0a\x09\x09\x09</div>\x0a\x09\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Overview\x20section\">Overview\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20\"\"}}\x0a\x09\x09\x09</div>\x0a\x09\x09</div>\x0a\x0a\x09\x09<div\x20id=\"pkg-index\"\x20class=\"toggleVisible\">\x0a\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Index\x20section\">Index\x20\xe2\x96\xb9</h2>\x0a\x09\x09</div>\x0a\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Index\x20section\">Index\x20\xe2\x96\xbe</h2>\x0a\x0a\x09\x09<!--\x20Table\x20of\x20contents\x20for\x20API;\x20must\x20be\x20named\x20manual-nav\x20to\x20turn\x20off\x20auto\x20nav.\x20-->\x0a\x09\x09\x09<div\x20id=\"manual-nav\">\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09{{if\x20.Consts}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-constants\">Constants</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Vars}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-variables\">Variables</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#{{$tname_html}}\">type\x20{{$tname_html}}</a></dd>\x0a\x09\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09<dd>&nbsp;\x20&nbsp;\x20<a\x20href=\"#{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09<dd>&nbsp;\x20&nbsp;\x20<a\x20href=\"#{{$tname_html}}.{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Notes}}\x0a\x09\x09\x09\x09{{range\x20$marker,\x20$item\x20:=\x20$.Notes}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-note-{{$marker}}\">{{noteTitle\x20$marker\x20|\x20html}}s</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09\x09</div><!--\x20#manual-nav\x20-->\x0a\x0a\x09\x09{{if\x20$.Examples}}\x0a\x09\x09<div\x20id=\"pkg-examples\">\x0a\x09\x09\x09<h3>Examples</h3>\x0a\x09\x09\x09<div\x20class=\"js-expandAll\x20expandAll\x20collapsed\">(Expand\x20All)</div>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09{{range\x20$.Examples}}\x0a\x09\x09\x09<dd><a\x20class=\"exampleLink\"\x20href=\"#example_{{.Name}}\">{{example_name\x20.Name}}</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Filenames}}\x0a\x09\x09\x09<h3>Package\x20files</h3>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09<span\x20style=\"font-size:90%\">\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09<a\x20href=\"{{.|srcLink|html}}\">{{.|filename|html}}</a>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</span>\x0a\x09\x09\x09</p>\x0a\x09\x09{{end}}\x0a\x09\x09</div><!--\x20.expanded\x20-->\x0a\x09\x09</div><!--\x20#pkg-index\x20-->\x0a\x0a\x09\x09{{if\x20ne\x20$.CallGraph\x20\"null\"}}\x0a\x09\x09<div\x20id=\"pkg-callgraph\"\x20class=\"toggle\"\x20style=\"display:\x20none\">\x0a\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Internal\x20Call\x20Graph\x20section\">Internal\x20call\x20graph\x20\xe2\x96\xb9</h2>\x0a\x09\x09</div>\x20<!--\x20.expanded\x20-->\x0a\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Internal\x20Call\x20Graph\x20section\">Internal\x20call\x20graph\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20In\x20the\x20call\x20graph\x20viewer\x20below,\x20each\x20node\x0a\x09\x09\x09\x20\x20is\x20a\x20function\x20belonging\x20to\x20this\x20package\x0a\x09\x09\x09\x20\x20and\x20its\x20children\x20are\x20the\x20functions\x20it\x0a\x09\x09\x09\x20\x20calls&mdash;perhaps\x20dynamically.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20The\x20root\x20nodes\x20are\x20the\x20entry\x20points\x20of\x20the\x0a\x09\x09\x09\x20\x20package:\x20functions\x20that\x20may\x20be\x20called\x20from\x0a\x09\x09\x09\x20\x20outside\x20the\x20package.\x0a\x09\x09\x09\x20\x20There\x20may\x20be\x20non-exported\x20or\x20anonymous\x0a\x09\x09\x09\x20\x20functions\x20among\x20them\x20if\x20they\x20are\x20called\x0a\x09\x09\x09\x20\x20dynamically\x20from\x20another\x20package.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20Click\x20a\x20node\x20to\x20visit\x20that\x20function's\x20source\x20code.\x0a\x09\x09\x09\x20\x20From\x20there\x20you\x20can\x20visit\x20its\x20callers\x20by\x0a\x09\x09\x09\x20\x20clicking\x20its\x20declaring\x20<code>func</code>\x0a\x09\x09\x09\x20\x20token.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20Functions\x20may\x20be\x20omitted\x20if\x20they\x20were\x0a\x09\x09\x09\x20\x20determined\x20to\x20be\x20unreachable\x20in\x20the\x0a\x09\x09\x09\x20\x20particular\x20programs\x20or\x20tests\x20that\x20were\x0a\x09\x09\x09\x20\x20analyzed.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<!--\x20Zero\x20means\x20show\x20all\x20package\x20entry\x20points.\x20-->\x0a\x09\x09\x09<ul\x20style=\"margin-left:\x200.5in\"\x20id=\"callgraph-0\"\x20class=\"treeview\"></ul>\x0a\x09\x09</div>\x0a\x09\x09</div>\x20<!--\x20#pkg-callgraph\x20-->\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Consts}}\x0a\x09\x09\x09<h2\x20id=\"pkg-constants\">Constants</h2>\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{with\x20.Vars}}\x0a\x09\x09\x09<h2\x20id=\"pkg-variables\">Variables</h2>\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09{{/*\x20Name\x20is\x20a\x20string\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09<h2\x20id=\"{{$name_html}}\">func\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09</h2>\x0a\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09{{$tname\x20:=\x20.Name}}\x0a\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09<h2\x20id=\"{{$tname_html}}\">type\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$tname_html}}</a>\x0a\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$tname_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"type\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09</h2>\x0a\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x0a\x09\x09\x09{{range\x20.Consts}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Vars}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{example_html\x20$\x20$tname}}\x0a\x09\x09\x09{{implements_html\x20$\x20$tname}}\x0a\x09\x09\x09{{methodset_html\x20$\x20$tname}}\x0a\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<h3\x20id=\"{{$name_html}}\">func\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09\x09</h3>\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<h3\x20id=\"{{$tname_html}}.{{$name_html}}\">func\x20({{html\x20.Recv}})\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$tname_html}}.{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"method\"\x20.Recv\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09\x09</h3>\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{$name\x20:=\x20printf\x20\"%s_%s\"\x20$tname\x20.Name}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20$name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20.Recv\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x0a\x09{{with\x20$.Notes}}\x0a\x09\x09{{range\x20$marker,\x20$content\x20:=\x20.}}\x0a\x09\x09\x09<h2\x20id=\"pkg-note-{{$marker}}\">{{noteTitle\x20$marker\x20|\x20html}}s</h2>\x0a\x09\x09\x09<ul\x20style=\"list-style:\x20none;\x20padding:\x200;\">\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09<li><a\x20href=\"{{posLink_url\x20$\x20.}}\"\x20style=\"float:\x20left;\">&#x261e;</a>\x20{{comment_html\x20.Body}}</li>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</ul>\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09<a\x20href=\"{{$filename|srcLink|html}}\">{{$filename|filename|html}}</a>:<pre>{{node_html\x20$\x20$ast\x20false}}</pre>\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09<h2\x20id=\"pkg-subdirectories\">Subdirectories</h2>\x0a\x09{{end}}\x0a\x09<div\x20class=\"pkg-dir\">\x0a\x09\x09<table>\x0a\x09\x09\x09<tr>\x0a\x09\x09\x09\x09<th\x20class=\"pkg-name\">Name</th>\x0a\x09\x09\x09\x09<th\x20class=\"pkg-synopsis\">Synopsis</th>\x0a\x09\x09\x09</tr>\x0a\x0a\x09\x09\x09{{if\x20not\x20(or\x20(eq\x20$.Dirname\x20\"/src/cmd\")\x20$.DirFlat)}}\x0a\x09\x09\x09<tr>\x0a\x09\x09\x09\x09<td\x20colspan=\"2\"><a\x20href=\"..\">..</a></td>\x0a\x09\x09\x09</tr>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\">\x0a\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\"\x20style=\"padding-left:\x20{{multiply\x20.Depth\x2020}}px;\">\x0a\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Name}}</a>\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09<td\x20class=\"pkg-synopsis\">\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09</tr>\x0a\x09\x09\x09{{end}}\x0a\x09\x09</table>\x0a\x09</div>\x0a{{end}}\x0a",
+ "package.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a<!--\x0a\x09Note:\x20Static\x20(i.e.,\x20not\x20template-generated)\x20href\x20and\x20id\x0a\x09attributes\x20start\x20with\x20\"pkg-\"\x20to\x20make\x20it\x20impossible\x20for\x0a\x09them\x20to\x20conflict\x20with\x20generated\x20attributes\x20(some\x20of\x20which\x0a\x09correspond\x20to\x20Go\x20identifiers).\x0a-->\x0a{{with\x20.PDoc}}\x0a\x09<script>\x0a\x09document.ANALYSIS_DATA\x20=\x20{{$.AnalysisData}};\x0a\x09document.CALLGRAPH\x20=\x20{{$.CallGraph}};\x0a\x09</script>\x0a\x0a\x09{{if\x20$.IsMain}}\x0a\x09\x09{{/*\x20command\x20documentation\x20*/}}\x0a\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09{{else}}\x0a\x09\x09{{/*\x20package\x20documentation\x20*/}}\x0a\x09\x09<div\x20id=\"short-nav\">\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09<dd><code>import\x20\"{{html\x20.ImportPath}}\"</code></dd>\x0a\x09\x09\x09</dl>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09<dd><a\x20href=\"#pkg-overview\"\x20class=\"overviewLink\">Overview</a></dd>\x0a\x09\x09\x09<dd><a\x20href=\"#pkg-index\"\x20class=\"indexLink\">Index</a></dd>\x0a\x09\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-examples\"\x20class=\"examplesLink\">Examples</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Dirs}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-subdirectories\">Subdirectories</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x09\x09<!--\x20The\x20package's\x20Name\x20is\x20printed\x20as\x20title\x20by\x20the\x20top-level\x20template\x20-->\x0a\x09\x09<div\x20id=\"pkg-overview\"\x20class=\"toggleVisible\">\x0a\x09\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Overview\x20section\">Overview\x20\xe2\x96\xb9</h2>\x0a\x09\x09\x09</div>\x0a\x09\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Overview\x20section\">Overview\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20\"\"}}\x0a\x09\x09\x09</div>\x0a\x09\x09</div>\x0a\x0a\x09\x09<div\x20id=\"pkg-index\"\x20class=\"toggleVisible\">\x0a\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Index\x20section\">Index\x20\xe2\x96\xb9</h2>\x0a\x09\x09</div>\x0a\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Index\x20section\">Index\x20\xe2\x96\xbe</h2>\x0a\x0a\x09\x09<!--\x20Table\x20of\x20contents\x20for\x20API;\x20must\x20be\x20named\x20manual-nav\x20to\x20turn\x20off\x20auto\x20nav.\x20-->\x0a\x09\x09\x09<div\x20id=\"manual-nav\">\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09{{if\x20.Consts}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-constants\">Constants</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Vars}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-variables\">Variables</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#{{$tname_html}}\">type\x20{{$tname_html}}</a></dd>\x0a\x09\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09<dd>&nbsp;\x20&nbsp;\x20<a\x20href=\"#{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09<dd>&nbsp;\x20&nbsp;\x20<a\x20href=\"#{{$tname_html}}.{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Notes}}\x0a\x09\x09\x09\x09{{range\x20$marker,\x20$item\x20:=\x20$.Notes}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-note-{{$marker}}\">{{noteTitle\x20$marker\x20|\x20html}}s</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09\x09</div><!--\x20#manual-nav\x20-->\x0a\x0a\x09\x09{{if\x20$.Examples}}\x0a\x09\x09<div\x20id=\"pkg-examples\">\x0a\x09\x09\x09<h3>Examples</h3>\x0a\x09\x09\x09<div\x20class=\"js-expandAll\x20expandAll\x20collapsed\">(Expand\x20All)</div>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09{{range\x20$.Examples}}\x0a\x09\x09\x09<dd><a\x20class=\"exampleLink\"\x20href=\"#example_{{.Name}}\">{{example_name\x20.Name}}</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Filenames}}\x0a\x09\x09\x09<h3>Package\x20files</h3>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09<span\x20style=\"font-size:90%\">\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09<a\x20href=\"{{.|srcLink|html}}\">{{.|filename|html}}</a>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</span>\x0a\x09\x09\x09</p>\x0a\x09\x09{{end}}\x0a\x09\x09</div><!--\x20.expanded\x20-->\x0a\x09\x09</div><!--\x20#pkg-index\x20-->\x0a\x0a\x09\x09{{if\x20ne\x20$.CallGraph\x20\"null\"}}\x0a\x09\x09<div\x20id=\"pkg-callgraph\"\x20class=\"toggle\"\x20style=\"display:\x20none\">\x0a\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Internal\x20Call\x20Graph\x20section\">Internal\x20call\x20graph\x20\xe2\x96\xb9</h2>\x0a\x09\x09</div>\x20<!--\x20.expanded\x20-->\x0a\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Internal\x20Call\x20Graph\x20section\">Internal\x20call\x20graph\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20In\x20the\x20call\x20graph\x20viewer\x20below,\x20each\x20node\x0a\x09\x09\x09\x20\x20is\x20a\x20function\x20belonging\x20to\x20this\x20package\x0a\x09\x09\x09\x20\x20and\x20its\x20children\x20are\x20the\x20functions\x20it\x0a\x09\x09\x09\x20\x20calls&mdash;perhaps\x20dynamically.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20The\x20root\x20nodes\x20are\x20the\x20entry\x20points\x20of\x20the\x0a\x09\x09\x09\x20\x20package:\x20functions\x20that\x20may\x20be\x20called\x20from\x0a\x09\x09\x09\x20\x20outside\x20the\x20package.\x0a\x09\x09\x09\x20\x20There\x20may\x20be\x20non-exported\x20or\x20anonymous\x0a\x09\x09\x09\x20\x20functions\x20among\x20them\x20if\x20they\x20are\x20called\x0a\x09\x09\x09\x20\x20dynamically\x20from\x20another\x20package.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20Click\x20a\x20node\x20to\x20visit\x20that\x20function's\x20source\x20code.\x0a\x09\x09\x09\x20\x20From\x20there\x20you\x20can\x20visit\x20its\x20callers\x20by\x0a\x09\x09\x09\x20\x20clicking\x20its\x20declaring\x20<code>func</code>\x0a\x09\x09\x09\x20\x20token.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20Functions\x20may\x20be\x20omitted\x20if\x20they\x20were\x0a\x09\x09\x09\x20\x20determined\x20to\x20be\x20unreachable\x20in\x20the\x0a\x09\x09\x09\x20\x20particular\x20programs\x20or\x20tests\x20that\x20were\x0a\x09\x09\x09\x20\x20analyzed.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<!--\x20Zero\x20means\x20show\x20all\x20package\x20entry\x20points.\x20-->\x0a\x09\x09\x09<ul\x20style=\"margin-left:\x200.5in\"\x20id=\"callgraph-0\"\x20class=\"treeview\"></ul>\x0a\x09\x09</div>\x0a\x09\x09</div>\x20<!--\x20#pkg-callgraph\x20-->\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Consts}}\x0a\x09\x09\x09<h2\x20id=\"pkg-constants\">Constants</h2>\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{with\x20.Vars}}\x0a\x09\x09\x09<h2\x20id=\"pkg-variables\">Variables</h2>\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09{{/*\x20Name\x20is\x20a\x20string\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09<h2\x20id=\"{{$name_html}}\">func\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09</h2>\x0a\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09{{$tname\x20:=\x20.Name}}\x0a\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09<h2\x20id=\"{{$tname_html}}\">type\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$tname_html}}</a>\x0a\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$tname_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"type\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09</h2>\x0a\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x0a\x09\x09\x09{{range\x20.Consts}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Vars}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{example_html\x20$\x20$tname}}\x0a\x09\x09\x09{{implements_html\x20$\x20$tname}}\x0a\x09\x09\x09{{methodset_html\x20$\x20$tname}}\x0a\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<h3\x20id=\"{{$name_html}}\">func\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09\x09</h3>\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<h3\x20id=\"{{$tname_html}}.{{$name_html}}\">func\x20({{html\x20.Recv}})\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$tname_html}}.{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"method\"\x20.Recv\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09\x09</h3>\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{$name\x20:=\x20printf\x20\"%s_%s\"\x20$tname\x20.Name}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20$name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20.Recv\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x0a\x09{{with\x20$.Notes}}\x0a\x09\x09{{range\x20$marker,\x20$content\x20:=\x20.}}\x0a\x09\x09\x09<h2\x20id=\"pkg-note-{{$marker}}\">{{noteTitle\x20$marker\x20|\x20html}}s</h2>\x0a\x09\x09\x09<ul\x20style=\"list-style:\x20none;\x20padding:\x200;\">\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09<li><a\x20href=\"{{posLink_url\x20$\x20.}}\"\x20style=\"float:\x20left;\">&#x261e;</a>\x20{{comment_html\x20$\x20.Body}}</li>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</ul>\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09<a\x20href=\"{{$filename|srcLink|html}}\">{{$filename|filename|html}}</a>:<pre>{{node_html\x20$\x20$ast\x20false}}</pre>\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09<h2\x20id=\"pkg-subdirectories\">Subdirectories</h2>\x0a\x09{{end}}\x0a\x09<div\x20class=\"pkg-dir\">\x0a\x09\x09<table>\x0a\x09\x09\x09<tr>\x0a\x09\x09\x09\x09<th\x20class=\"pkg-name\">Name</th>\x0a\x09\x09\x09\x09<th\x20class=\"pkg-synopsis\">Synopsis</th>\x0a\x09\x09\x09</tr>\x0a\x0a\x09\x09\x09{{if\x20not\x20(or\x20(eq\x20$.Dirname\x20\"/src/cmd\")\x20$.DirFlat)}}\x0a\x09\x09\x09<tr>\x0a\x09\x09\x09\x09<td\x20colspan=\"2\"><a\x20href=\"..\">..</a></td>\x0a\x09\x09\x09</tr>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\">\x0a\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\"\x20style=\"padding-left:\x20{{multiply\x20.Depth\x2020}}px;\">\x0a\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Name}}</a>\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09<td\x20class=\"pkg-synopsis\">\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09</tr>\x0a\x09\x09\x09{{end}}\x0a\x09\x09</table>\x0a\x09</div>\x0a{{end}}\x0a",
"packageroot.html": "<!--\x0a\x09Copyright\x202018\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a<!--\x0a\x09Note:\x20Static\x20(i.e.,\x20not\x20template-generated)\x20href\x20and\x20id\x0a\x09attributes\x20start\x20with\x20\"pkg-\"\x20to\x20make\x20it\x20impossible\x20for\x0a\x09them\x20to\x20conflict\x20with\x20generated\x20attributes\x20(some\x20of\x20which\x0a\x09correspond\x20to\x20Go\x20identifiers).\x0a-->\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09<a\x20href=\"{{$filename|srcLink|html}}\">{{$filename|filename|html}}</a>:<pre>{{node_html\x20$\x20$ast\x20false}}</pre>\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09<h2\x20id=\"pkg-subdirectories\">Subdirectories</h2>\x0a\x09{{end}}\x0a\x09\x09<div\x20id=\"manual-nav\">\x0a\x09\x09\x09<img\x20alt=\"\"\x20class=\"gopher\"\x20src=\"/lib/godoc/gopher/pkg.png\"/>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09\x09<dt><a\x20href=\"#stdlib\">Standard\x20library</a></dt>\x0a\x09\x09\x09\x09{{if\x20hasThirdParty\x20.List\x20}}\x0a\x09\x09\x09\x09\x09<dt><a\x20href=\"#thirdparty\">Third\x20party</a></dt>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09<dt><a\x20href=\"#other\">Other\x20packages</a></dt>\x0a\x09\x09\x09\x09<dd><a\x20href=\"#subrepo\">Sub-repositories</a></dd>\x0a\x09\x09\x09\x09<dd><a\x20href=\"#community\">Community</a></dd>\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x0a\x09\x09<div\x20id=\"stdlib\"\x20class=\"toggleVisible\">\x0a\x09\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Standard\x20library\x20section\">Standard\x20library\x20\xe2\x96\xb9</h2>\x0a\x09\x09\x09</div>\x0a\x09\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Standard\x20library\x20section\">Standard\x20library\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09\x09<div\x20class=\"pkg-dir\">\x0a\x09\x09\x09\x09\x09<table>\x0a\x09\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09\x09\x09<th\x20class=\"pkg-name\">Name</th>\x0a\x09\x09\x09\x09\x09\x09\x09<th\x20class=\"pkg-synopsis\">Synopsis</th>\x0a\x09\x09\x09\x09\x09\x09</tr>\x0a\x0a\x09\x09\x09\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09\x09\x09{{if\x20eq\x20.RootType\x20\"GOROOT\"}}\x0a\x09\x09\x09\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\"\x20style=\"padding-left:\x20{{multiply\x20.Depth\x2020}}px;\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Name}}</a>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-synopsis\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09</tr>\x0a\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09</table>\x0a\x09\x09\x09\x09</div>\x20<!--\x20.pkg-dir\x20-->\x0a\x09\x09\x09</div>\x20<!--\x20.expanded\x20-->\x0a\x09\x09</div>\x20<!--\x20#stdlib\x20.toggleVisible\x20-->\x0a\x0a\x09{{if\x20hasThirdParty\x20.List\x20}}\x0a\x09\x09<div\x20id=\"thirdparty\"\x20class=\"toggleVisible\">\x0a\x09\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Third\x20party\x20section\">Third\x20party\x20\xe2\x96\xb9</h2>\x0a\x09\x09\x09</div>\x0a\x09\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Third\x20party\x20section\">Third\x20party\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09\x09<div\x20class=\"pkg-dir\">\x0a\x09\x09\x09\x09\x09<table>\x0a\x09\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09\x09\x09<th\x20class=\"pkg-name\">Name</th>\x0a\x09\x09\x09\x09\x09\x09\x09<th\x20class=\"pkg-synopsis\">Synopsis</th>\x0a\x09\x09\x09\x09\x09\x09</tr>\x0a\x0a\x09\x09\x09\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20eq\x20.RootType\x20\"GOPATH\"}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\"\x20style=\"padding-left:\x20{{multiply\x20.Depth\x2020}}px;\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Name}}</a>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-synopsis\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09</tr>\x0a\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09</table>\x0a\x09\x09\x09\x09</div>\x20<!--\x20.pkg-dir\x20-->\x0a\x09\x09\x09</div>\x20<!--\x20.expanded\x20-->\x0a\x09\x09</div>\x20<!--\x20#stdlib\x20.toggleVisible\x20-->\x0a\x09{{end}}\x0a\x0a\x09<h2\x20id=\"other\">Other\x20packages</h2>\x0a\x09<h3\x20id=\"subrepo\">Sub-repositories</h3>\x0a\x09<p>\x0a\x09These\x20packages\x20are\x20part\x20of\x20the\x20Go\x20Project\x20but\x20outside\x20the\x20main\x20Go\x20tree.\x0a\x09They\x20are\x20developed\x20under\x20looser\x20<a\x20href=\"https://golang.org/doc/go1compat\">compatibility\x20requirements</a>\x20than\x20the\x20Go\x20core.\x0a\x09Install\x20them\x20with\x20\"<a\x20href=\"/cmd/go/#hdr-Download_and_install_packages_and_dependencies\">go\x20get</a>\".\x0a\x09</p>\x0a\x09<ul>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/benchmarks\">benchmarks</a>\x20\xe2\x80\x94\x20benchmarks\x20to\x20measure\x20Go\x20as\x20it\x20is\x20developed.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/blog\">blog</a>\x20\xe2\x80\x94\x20<a\x20href=\"//blog.golang.org\">blog.golang.org</a>'s\x20implementation.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/build\">build</a>\x20\xe2\x80\x94\x20<a\x20href=\"//build.golang.org\">build.golang.org</a>'s\x20implementation.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/crypto\">crypto</a>\x20\xe2\x80\x94\x20additional\x20cryptography\x20packages.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/debug\">debug</a>\x20\xe2\x80\x94\x20an\x20experimental\x20debugger\x20for\x20Go.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/image\">image</a>\x20\xe2\x80\x94\x20additional\x20imaging\x20packages.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/mobile\">mobile</a>\x20\xe2\x80\x94\x20experimental\x20support\x20for\x20Go\x20on\x20mobile\x20platforms.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/net\">net</a>\x20\xe2\x80\x94\x20additional\x20networking\x20packages.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/perf\">perf</a>\x20\xe2\x80\x94\x20packages\x20and\x20tools\x20for\x20performance\x20measurement,\x20storage,\x20and\x20analysis.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/pkgsite\">pkgsite</a>\x20\xe2\x80\x94\x20home\x20of\x20the\x20pkg.go.dev\x20website.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/review\">review</a>\x20\xe2\x80\x94\x20a\x20tool\x20for\x20working\x20with\x20Gerrit\x20code\x20reviews.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/sync\">sync</a>\x20\xe2\x80\x94\x20additional\x20concurrency\x20primitives.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/sys\">sys</a>\x20\xe2\x80\x94\x20packages\x20for\x20making\x20system\x20calls.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/text\">text</a>\x20\xe2\x80\x94\x20packages\x20for\x20working\x20with\x20text.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/time\">time</a>\x20\xe2\x80\x94\x20additional\x20time\x20packages.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/tools\">tools</a>\x20\xe2\x80\x94\x20godoc,\x20goimports,\x20gorename,\x20and\x20other\x20tools.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/tour\">tour</a>\x20\xe2\x80\x94\x20<a\x20href=\"//tour.golang.org\">tour.golang.org</a>'s\x20implementation.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/exp\">exp</a>\x20\xe2\x80\x94\x20experimental\x20and\x20deprecated\x20packages\x20(handle\x20with\x20care;\x20may\x20change\x20without\x20warning).</li>\x0a\x09</ul>\x0a\x0a\x09<h3\x20id=\"community\">Community</h3>\x0a\x09<p>\x0a\x09These\x20services\x20can\x20help\x20you\x20find\x20Open\x20Source\x20packages\x20provided\x20by\x20the\x20community.\x0a\x09</p>\x0a\x09<ul>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev\">Pkg.go.dev</a>\x20-\x20the\x20Go\x20package\x20discovery\x20site.</li>\x0a\x09\x09<li><a\x20href=\"/wiki/Projects\">Projects\x20at\x20the\x20Go\x20Wiki</a>\x20-\x20a\x20curated\x20list\x20of\x20Go\x20projects.</li>\x0a\x09</ul>\x0a{{end}}\x0a",
@@ -95,7 +95,7 @@ var Files = map[string]string{
"searchcode.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a{{$query_url\x20:=\x20urlquery\x20.Query}}\x0a{{if\x20not\x20.Idents}}\x0a\x09{{with\x20.Pak}}\x0a\x09\x09<h2\x20id=\"Packages\">Package\x20{{html\x20$.Query}}</h2>\x0a\x09\x09<p>\x0a\x09\x09<table\x20class=\"layout\">\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09<tr><td><a\x20href=\"/{{$pkg_html}}\">{{$pkg_html}}</a></td></tr>\x0a\x09\x09{{end}}\x0a\x09\x09</table>\x0a\x09\x09</p>\x0a\x09{{end}}\x0a{{end}}\x0a{{with\x20.Hit}}\x0a\x09{{with\x20.Decls}}\x0a\x09\x09<h2\x20id=\"Global\">Package-level\x20declarations</h2>\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09<h3\x20id=\"Global_{{$pkg_html}}\">package\x20<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Pak.Name}}</a></h3>\x0a\x09\x09\x09{{range\x20.Files}}\x0a\x09\x09\x09\x09{{$file\x20:=\x20.File.Path}}\x0a\x09\x09\x09\x09{{range\x20.Groups}}\x0a\x09\x09\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line\x20:=\x20infoLine\x20.}}\x0a\x09\x09\x09\x09\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x20$line\x20|\x20html}}\">{{$file}}:{{$line}}</a>\x0a\x09\x09\x09\x09\x09\x09{{infoSnippet_html\x20.}}\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x09{{with\x20.Others}}\x0a\x09\x09<h2\x20id=\"Local\">Local\x20declarations\x20and\x20uses</h2>\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09<h3\x20id=\"Local_{{$pkg_html}}\">package\x20<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Pak.Name}}</a></h3>\x0a\x09\x09\x09{{range\x20.Files}}\x0a\x09\x09\x09\x09{{$file\x20:=\x20.File.Path}}\x0a\x09\x09\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x200\x20|\x20html}}\">{{$file}}</a>\x0a\x09\x09\x09\x09<table\x20class=\"layout\">\x0a\x09\x09\x09\x09{{range\x20.Groups}}\x0a\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09<td\x20width=\"25\"></td>\x0a\x09\x09\x09\x09\x09<th\x20align=\"left\"\x20valign=\"top\">{{index\x20.\x200\x20|\x20infoKind_html}}</th>\x0a\x09\x09\x09\x09\x09<td\x20align=\"left\"\x20width=\"4\"></td>\x0a\x09\x09\x09\x09\x09<td>\x0a\x09\x09\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line\x20:=\x20infoLine\x20.}}\x0a\x09\x09\x09\x09\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x20$line\x20|\x20html}}\">{{$line}}</a>\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09</tr>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09</table>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a",
- "searchdoc.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a{{range\x20$key,\x20$val\x20:=\x20.Idents}}\x0a\x09{{if\x20$val}}\x0a\x09\x09<h2\x20id=\"{{$key.Name}}\">{{$key.Name}}</h2>\x0a\x09\x09{{range\x20$val}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Path\x20|\x20html}}\x0a\x09\x09\x09{{if\x20eq\x20\"Packages\"\x20$key.Name}}\x0a\x09\x09\x09\x09<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09{{$doc_html\x20:=\x20docLink\x20.Path\x20.Name|\x20html}}\x0a\x09\x09\x09\x09<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Package}}</a>.<a\x20href=\"{{$doc_html}}\">{{.Name}}</a>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Doc}}\x0a\x09\x09\x09\x09<p>{{comment_html\x20.Doc}}</p>\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09<p><em>No\x20documentation\x20available</em></p>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a",
+ "searchdoc.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a{{range\x20$key,\x20$val\x20:=\x20.Idents}}\x0a\x09{{if\x20$val}}\x0a\x09\x09<h2\x20id=\"{{$key.Name}}\">{{$key.Name}}</h2>\x0a\x09\x09{{range\x20$val}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Path\x20|\x20html}}\x0a\x09\x09\x09{{if\x20eq\x20\"Packages\"\x20$key.Name}}\x0a\x09\x09\x09\x09<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09{{$doc_html\x20:=\x20docLink\x20.Path\x20.Name|\x20html}}\x0a\x09\x09\x09\x09<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Package}}</a>.<a\x20href=\"{{$doc_html}}\">{{.Name}}</a>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Doc}}\x0a\x09\x09\x09\x09<p>{{comment_html\x20$\x20.Doc}}</p>\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09<p><em>No\x20documentation\x20available</em></p>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a",
"searchtxt.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a{{$query_url\x20:=\x20urlquery\x20.Query}}\x0a{{with\x20.Textual}}\x0a\x09{{if\x20$.Complete}}\x0a\x09\x09<h2\x20id=\"Textual\">{{html\x20$.Found}}\x20textual\x20occurrences</h2>\x0a\x09{{else}}\x0a\x09\x09<h2\x20id=\"Textual\">More\x20than\x20{{html\x20$.Found}}\x20textual\x20occurrences</h2>\x0a\x09\x09<p>\x0a\x09\x09<span\x20class=\"alert\"\x20style=\"font-size:120%\">Not\x20all\x20files\x20or\x20lines\x20containing\x20\"{{html\x20$.Query}}\"\x20are\x20shown.</span>\x0a\x09\x09</p>\x0a\x09{{end}}\x0a\x09<p>\x0a\x09<table\x20class=\"layout\">\x0a\x09{{range\x20.}}\x0a\x09\x09{{$file\x20:=\x20.Filename}}\x0a\x09\x09<tr>\x0a\x09\x09<td\x20align=\"left\"\x20valign=\"top\">\x0a\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x200}}\">{{$file}}</a>:\x0a\x09\x09</td>\x0a\x09\x09<td\x20align=\"left\"\x20width=\"4\"></td>\x0a\x09\x09<th\x20align=\"left\"\x20valign=\"top\">{{len\x20.Lines}}</th>\x0a\x09\x09<td\x20align=\"left\"\x20width=\"4\"></td>\x0a\x09\x09<td\x20align=\"left\">\x0a\x09\x09{{range\x20.Lines}}\x0a\x09\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x20.}}\">{{html\x20.}}</a>\x0a\x09\x09{{end}}\x0a\x09\x09{{if\x20not\x20$.Complete}}\x0a\x09\x09\x09...\x0a\x09\x09{{end}}\x0a\x09\x09</td>\x0a\x09\x09</tr>\x0a\x09{{end}}\x0a\x09{{if\x20not\x20$.Complete}}\x0a\x09\x09<tr><td\x20align=\"left\">...</td></tr>\x0a\x09{{end}}\x0a\x09</table>\x0a\x09</p>\x0a{{end}}\x0a",
diff --git a/godoc/tohtml_go119.go b/godoc/tohtml_go119.go
new file mode 100644
index 000000000..6dbf7212b
--- /dev/null
+++ b/godoc/tohtml_go119.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package godoc
+
+import (
+ "bytes"
+ "go/doc"
+)
+
+func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) {
+ buf.Write(pkg.HTML(comment))
+}
diff --git a/godoc/tohtml_other.go b/godoc/tohtml_other.go
new file mode 100644
index 000000000..a1dcf2e19
--- /dev/null
+++ b/godoc/tohtml_other.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package godoc
+
+import (
+ "bytes"
+ "go/doc"
+)
+
+func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) {
+ doc.ToHTML(buf, comment, nil)
+}
diff --git a/godoc/util/throttle.go b/godoc/util/throttle.go
index 53d9ba621..7852a3284 100644
--- a/godoc/util/throttle.go
+++ b/godoc/util/throttle.go
@@ -8,7 +8,6 @@ import "time"
// A Throttle permits throttling of a goroutine by
// calling the Throttle method repeatedly.
-//
type Throttle struct {
f float64 // f = (1-r)/r for 0 < r < 1
dt time.Duration // minimum run time slice; >= 0
@@ -27,7 +26,6 @@ type Throttle struct {
// approx. 60% of the time, and sleeps approx. 40% of the time.
// Values of r < 0 or r > 1 are clamped down to values between 0 and 1.
// Values of dt < 0 are set to 0.
-//
func NewThrottle(r float64, dt time.Duration) *Throttle {
var f float64
switch {
@@ -49,7 +47,6 @@ func NewThrottle(r float64, dt time.Duration) *Throttle {
// accumulated run (tr) and sleep times (ts) approximates the value 1/(1-r)
// where r is the throttle value. Throttle returns immediately (w/o sleeping)
// if less than tm ns have passed since the last call to Throttle.
-//
func (p *Throttle) Throttle() {
if p.f < 0 {
select {} // always sleep
diff --git a/godoc/vfs/namespace.go b/godoc/vfs/namespace.go
index 32c82599d..23dd97943 100644
--- a/godoc/vfs/namespace.go
+++ b/godoc/vfs/namespace.go
@@ -97,7 +97,6 @@ const debugNS = false
// mount table entries always have old == "/src/pkg"). The 'old' field is
// useful to callers, because they receive just a []mountedFS and not any
// other indication of which mount point was found.
-//
type NameSpace map[string][]mountedFS
// A mountedFS handles requests for path by replacing
@@ -294,7 +293,6 @@ var startTime = time.Now()
// to find that subdirectory, because we've mounted d:\Work1 and d:\Work2
// there. So if we don't see "src" in the directory listing for c:\Go, we add an
// entry for it before returning.
-//
func (ns NameSpace) ReadDir(path string) ([]os.FileInfo, error) {
path = ns.clean(path)
diff --git a/godoc/vfs/zipfs/zipfs.go b/godoc/vfs/zipfs/zipfs.go
index a82febec9..14c9820a1 100644
--- a/godoc/vfs/zipfs/zipfs.go
+++ b/godoc/vfs/zipfs/zipfs.go
@@ -7,14 +7,14 @@
//
// Assumptions:
//
-// - The file paths stored in the zip file must use a slash ('/') as path
-// separator; and they must be relative (i.e., they must not start with
-// a '/' - this is usually the case if the file was created w/o special
-// options).
-// - The zip file system treats the file paths found in the zip internally
-// like absolute paths w/o a leading '/'; i.e., the paths are considered
-// relative to the root of the file system.
-// - All path arguments to file system methods must be absolute paths.
+// - The file paths stored in the zip file must use a slash ('/') as path
+// separator; and they must be relative (i.e., they must not start with
+// a '/' - this is usually the case if the file was created w/o special
+// options).
+// - The zip file system treats the file paths found in the zip internally
+// like absolute paths w/o a leading '/'; i.e., the paths are considered
+// relative to the root of the file system.
+// - All path arguments to file system methods must be absolute paths.
package zipfs // import "golang.org/x/tools/godoc/vfs/zipfs"
import (
diff --git a/gopls/README.md b/gopls/README.md
index 9afc2e48c..56d15921a 100644
--- a/gopls/README.md
+++ b/gopls/README.md
@@ -5,56 +5,57 @@
`gopls` (pronounced "Go please") is the official Go [language server] developed
by the Go team. It provides IDE features to any [LSP]-compatible editor.
-<!--TODO(rstambler): Add gifs here.-->
+<!--TODO(rfindley): Add gifs here.-->
You should not need to interact with `gopls` directly--it will be automatically
integrated into your editor. The specific features and settings vary slightly
-by editor, so we recommend that you proceed to the [documentation for your
-editor](#editors) below.
+by editor, so we recommend that you proceed to the
+[documentation for your editor](#editors) below.
## Editors
To get started with `gopls`, install an LSP plugin in your editor of choice.
-* [VSCode](https://github.com/golang/vscode-go/blob/master/README.md)
+* [VS Code](https://github.com/golang/vscode-go/blob/master/README.md)
* [Vim / Neovim](doc/vim.md)
* [Emacs](doc/emacs.md)
* [Atom](https://github.com/MordFustang21/ide-gopls)
* [Sublime Text](doc/subl.md)
* [Acme](https://github.com/fhs/acme-lsp)
+* [Lapce](https://github.com/lapce-community/lapce-go)
-If you use `gopls` with an editor that is not on this list, please let us know
-by [filing an issue](#new-issue) or [modifying this documentation](doc/contributing.md).
+If you use `gopls` with an editor that is not on this list, please send us a CL
+[updating this documentation](doc/contributing.md).
## Installation
For the most part, you should not need to install or update `gopls`. Your
editor should handle that step for you.
-If you do want to get the latest stable version of `gopls`, change to any
-directory that is both outside of your `GOPATH` and outside of a module (a temp
-directory is fine), and run:
+If you do want to get the latest stable version of `gopls`, run the following
+command:
```sh
go install golang.org/x/tools/gopls@latest
```
-Learn more in the [advanced installation
-instructions](doc/advanced.md#installing-unreleased-versions).
+Learn more in the
+[advanced installation instructions](doc/advanced.md#installing-unreleased-versions).
+
+Learn more about gopls releases in the [release policy](doc/releases.md).
## Setting up your workspace
-`gopls` supports both Go module and GOPATH modes, but if you are working with
-multiple modules or uncommon project layouts, you will need to specifically
-configure your workspace. See the [Workspace document](doc/workspace.md) for
-information on supported workspace layouts.
+`gopls` supports both Go module, multi-module and GOPATH modes. See the
+[workspace documentation](doc/workspace.md) for information on supported
+workspace layouts.
## Configuration
You can configure `gopls` to change your editor experience or view additional
debugging information. Configuration options will be made available by your
editor, so see your [editor's instructions](#editors) for specific details. A
-full list of `gopls` settings can be found in the [Settings documentation](doc/settings.md).
+full list of `gopls` settings can be found in the [settings documentation](doc/settings.md).
### Environment variables
@@ -62,27 +63,36 @@ full list of `gopls` settings can be found in the [Settings documentation](doc/s
variables you configure. Some editors, such as VS Code, allow users to
selectively override the values of some environment variables.
-## Troubleshooting
+## Support Policy
-If you are having issues with `gopls`, please follow the steps described in the
-[troubleshooting guide](doc/troubleshooting.md).
+Gopls is maintained by engineers on the
+[Go tools team](https://github.com/orgs/golang/teams/tools-team/members),
+who actively monitor the
+[Go](https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+label%3Agopls)
+and
+[VS Code Go](https://github.com/golang/vscode-go/issues) issue trackers.
-## Supported Go versions and build systems
+### Supported Go versions
`gopls` follows the
[Go Release Policy](https://golang.org/doc/devel/release.html#policy),
meaning that it officially supports the last 2 major Go releases. Per
-[issue #39146](golang.org/issues/39146), we attempt to maintain best-effort
+[issue #39146](https://go.dev/issues/39146), we attempt to maintain best-effort
support for the last 4 major Go releases, but this support extends only to not
breaking the build and avoiding easily fixable regressions.
-The following table shows the final gopls version that supports being built at
-a given Go Version. Any more recent Go versions missing from this table can
-still be built with the latest version of gopls.
+In the context of this discussion, gopls "supports" a Go version if it supports
+being built with that Go version as well as integrating with the `go` command
+of that Go version.
-| Go Version | Final gopls Version With Support |
-| ----------- | -------------------------------- |
+The following table shows the final gopls version that supports a given Go
+version. Go releases more recent than any in the table can be used with any
+version of gopls.
+
+| Go Version | Final gopls version with support (without warnings) |
+| ----------- | --------------------------------------------------- |
| Go 1.12 | [gopls@v0.7.5](https://github.com/golang/tools/releases/tag/gopls%2Fv0.7.5) |
+| Go 1.15 | [gopls@v0.9.5](https://github.com/golang/tools/releases/tag/gopls%2Fv0.9.5) |
Our extended support is enforced via [continuous integration with older Go
versions](doc/contributing.md#ci). This legacy Go CI may not block releases:
@@ -90,13 +100,22 @@ test failures may be skipped rather than fixed. Furthermore, if a regression in
an older Go version causes irreconcilable CI failures, we may drop support for
that Go version in CI if it is 3 or 4 Go versions old.
-`gopls` currently only supports the `go` command, so if you are using a
-different build system, `gopls` will not work well. Bazel is not officially
-supported, but Bazel support is in development (see
-[bazelbuild/rules_go#512](https://github.com/bazelbuild/rules_go/issues/512)).
+### Supported build systems
+
+`gopls` currently only supports the `go` command, so if you are using
+a different build system, `gopls` will not work well. Bazel is not officially
+supported, but may be made to work with an appropriately configured
+`go/packages` driver. See
+[bazelbuild/rules_go#512](https://github.com/bazelbuild/rules_go/issues/512)
+for more information.
You can follow [these instructions](https://github.com/bazelbuild/rules_go/wiki/Editor-setup)
to configure your `gopls` to work with Bazel.
+### Troubleshooting
+
+If you are having issues with `gopls`, please follow the steps described in the
+[troubleshooting guide](doc/troubleshooting.md).
+
## Additional information
* [Features](doc/features.md)
@@ -110,4 +129,3 @@ to configure your `gopls` to work with Bazel.
[language server]: https://langserver.org
[LSP]: https://microsoft.github.io/language-server-protocol/
-[Gophers Slack]: https://gophers.slack.com/
diff --git a/gopls/api-diff/api_diff.go b/gopls/api-diff/api_diff.go
index 167bdbd1b..8bb54186b 100644
--- a/gopls/api-diff/api_diff.go
+++ b/gopls/api-diff/api_diff.go
@@ -13,262 +13,77 @@ import (
"encoding/json"
"flag"
"fmt"
- "io"
- "io/ioutil"
"log"
"os"
"os/exec"
- "path/filepath"
- "strings"
- "golang.org/x/tools/internal/gocommand"
- difflib "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/source"
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
-var (
- previousVersionFlag = flag.String("prev", "", "version to compare against")
- versionFlag = flag.String("version", "", "version being tagged, or current version if omitted")
-)
+const usage = `api-diff <previous version> [<current version>]
+
+Compare the API of two gopls versions. If the second argument is provided, it
+will be used as the new version to compare against. Otherwise, compare against
+the current API.
+`
func main() {
flag.Parse()
- apiDiff, err := diffAPI(*versionFlag, *previousVersionFlag)
+ if flag.NArg() < 1 || flag.NArg() > 2 {
+ fmt.Fprint(os.Stderr, usage)
+ os.Exit(2)
+ }
+
+ oldVer := flag.Arg(0)
+ newVer := ""
+ if flag.NArg() == 2 {
+ newVer = flag.Arg(1)
+ }
+
+ apiDiff, err := diffAPI(oldVer, newVer)
if err != nil {
log.Fatal(err)
}
- fmt.Printf(`
-%s
-`, apiDiff)
-}
-
-type JSON interface {
- String() string
- Write(io.Writer)
+ fmt.Println("\n" + apiDiff)
}
-func diffAPI(version, prev string) (string, error) {
+func diffAPI(oldVer, newVer string) (string, error) {
ctx := context.Background()
- previousApi, err := loadAPI(ctx, prev)
+ previousAPI, err := loadAPI(ctx, oldVer)
if err != nil {
- return "", fmt.Errorf("load previous API: %v", err)
+ return "", fmt.Errorf("loading %s: %v", oldVer, err)
}
- var currentApi *source.APIJSON
- if version == "" {
- currentApi = source.GeneratedAPIJSON
+ var currentAPI *source.APIJSON
+ if newVer == "" {
+ currentAPI = source.GeneratedAPIJSON
} else {
var err error
- currentApi, err = loadAPI(ctx, version)
+ currentAPI, err = loadAPI(ctx, newVer)
if err != nil {
- return "", fmt.Errorf("load current API: %v", err)
- }
- }
-
- b := &strings.Builder{}
- if err := diff(b, previousApi.Commands, currentApi.Commands, "command", func(c *source.CommandJSON) string {
- return c.Command
- }, diffCommands); err != nil {
- return "", fmt.Errorf("diff commands: %v", err)
- }
- if diff(b, previousApi.Analyzers, currentApi.Analyzers, "analyzer", func(a *source.AnalyzerJSON) string {
- return a.Name
- }, diffAnalyzers); err != nil {
- return "", fmt.Errorf("diff analyzers: %v", err)
- }
- if err := diff(b, previousApi.Lenses, currentApi.Lenses, "code lens", func(l *source.LensJSON) string {
- return l.Lens
- }, diffLenses); err != nil {
- return "", fmt.Errorf("diff lenses: %v", err)
- }
- for key, prev := range previousApi.Options {
- current, ok := currentApi.Options[key]
- if !ok {
- panic(fmt.Sprintf("unexpected option key: %s", key))
- }
- if err := diff(b, prev, current, "option", func(o *source.OptionJSON) string {
- return o.Name
- }, diffOptions); err != nil {
- return "", fmt.Errorf("diff options (%s): %v", key, err)
+ return "", fmt.Errorf("loading %s: %v", newVer, err)
}
}
- return b.String(), nil
+ return cmp.Diff(previousAPI, currentAPI), nil
}
-func diff[T JSON](b *strings.Builder, previous, new []T, kind string, uniqueKey func(T) string, diffFunc func(*strings.Builder, T, T)) error {
- prevJSON := collect(previous, uniqueKey)
- newJSON := collect(new, uniqueKey)
- for k := range newJSON {
- delete(prevJSON, k)
- }
- for _, deleted := range prevJSON {
- b.WriteString(fmt.Sprintf("%s %s was deleted.\n", kind, deleted))
- }
- for _, prev := range previous {
- delete(newJSON, uniqueKey(prev))
- }
- if len(newJSON) > 0 {
- b.WriteString("The following commands were added:\n")
- for _, n := range newJSON {
- n.Write(b)
- b.WriteByte('\n')
- }
- }
- previousMap := collect(previous, uniqueKey)
- for _, current := range new {
- prev, ok := previousMap[uniqueKey(current)]
- if !ok {
- continue
- }
- c, p := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
- prev.Write(p)
- current.Write(c)
- if diff, err := diffStr(p.String(), c.String()); err == nil && diff != "" {
- diffFunc(b, prev, current)
- b.WriteString("\n--\n")
- }
- }
- return nil
-}
-
-func collect[T JSON](args []T, uniqueKey func(T) string) map[string]T {
- m := map[string]T{}
- for _, arg := range args {
- m[uniqueKey(arg)] = arg
- }
- return m
-}
-
-var goCmdRunner = gocommand.Runner{}
-
func loadAPI(ctx context.Context, version string) (*source.APIJSON, error) {
- tmpGopath, err := ioutil.TempDir("", "gopath*")
- if err != nil {
- return nil, fmt.Errorf("temp dir: %v", err)
- }
- defer os.RemoveAll(tmpGopath)
+ ver := fmt.Sprintf("golang.org/x/tools/gopls@%s", version)
+ cmd := exec.Command("go", "run", ver, "api-json")
- exampleDir := fmt.Sprintf("%s/src/example.com", tmpGopath)
- if err := os.MkdirAll(exampleDir, 0776); err != nil {
- return nil, fmt.Errorf("mkdir: %v", err)
- }
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
- if stdout, err := goCmdRunner.Run(ctx, gocommand.Invocation{
- Verb: "mod",
- Args: []string{"init", "example.com"},
- WorkingDir: exampleDir,
- Env: append(os.Environ(), fmt.Sprintf("GOPATH=%s", tmpGopath)),
- }); err != nil {
- return nil, fmt.Errorf("go mod init failed: %v (stdout: %v)", err, stdout)
- }
- if stdout, err := goCmdRunner.Run(ctx, gocommand.Invocation{
- Verb: "install",
- Args: []string{fmt.Sprintf("golang.org/x/tools/gopls@%s", version)},
- WorkingDir: exampleDir,
- Env: append(os.Environ(), fmt.Sprintf("GOPATH=%s", tmpGopath)),
- }); err != nil {
- return nil, fmt.Errorf("go install failed: %v (stdout: %v)", err, stdout.String())
- }
- cmd := exec.Cmd{
- Path: filepath.Join(tmpGopath, "bin", "gopls"),
- Args: []string{"gopls", "api-json"},
- Dir: tmpGopath,
- }
- out, err := cmd.Output()
- if err != nil {
- return nil, fmt.Errorf("output: %v", err)
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("go run failed: %v; stderr:\n%s", err, stderr)
}
apiJson := &source.APIJSON{}
- if err := json.Unmarshal(out, apiJson); err != nil {
+ if err := json.Unmarshal(stdout.Bytes(), apiJson); err != nil {
return nil, fmt.Errorf("unmarshal: %v", err)
}
return apiJson, nil
}
-
-func diffCommands(b *strings.Builder, prev, current *source.CommandJSON) {
- if prev.Title != current.Title {
- b.WriteString(fmt.Sprintf("Title changed from %q to %q\n", prev.Title, current.Title))
- }
- if prev.Doc != current.Doc {
- b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", prev.Doc, current.Doc))
- }
- if prev.ArgDoc != current.ArgDoc {
- b.WriteString("Arguments changed from " + formatBlock(prev.ArgDoc) + " to " + formatBlock(current.ArgDoc))
- }
- if prev.ResultDoc != current.ResultDoc {
- b.WriteString("Results changed from " + formatBlock(prev.ResultDoc) + " to " + formatBlock(current.ResultDoc))
- }
-}
-
-func diffAnalyzers(b *strings.Builder, previous, current *source.AnalyzerJSON) {
- b.WriteString(fmt.Sprintf("Changes to analyzer %s:\n\n", current.Name))
- if previous.Doc != current.Doc {
- b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", previous.Doc, current.Doc))
- }
- if previous.Default != current.Default {
- b.WriteString(fmt.Sprintf("Default changed from %v to %v\n", previous.Default, current.Default))
- }
-}
-
-func diffLenses(b *strings.Builder, previous, current *source.LensJSON) {
- b.WriteString(fmt.Sprintf("Changes to code lens %s:\n\n", current.Title))
- if previous.Title != current.Title {
- b.WriteString(fmt.Sprintf("Title changed from %q to %q\n", previous.Title, current.Title))
- }
- if previous.Doc != current.Doc {
- b.WriteString(fmt.Sprintf("Documentation changed from %q to %q\n", previous.Doc, current.Doc))
- }
-}
-
-func diffOptions(b *strings.Builder, previous, current *source.OptionJSON) {
- b.WriteString(fmt.Sprintf("Changes to option %s:\n\n", current.Name))
- if previous.Doc != current.Doc {
- diff, err := diffStr(previous.Doc, current.Doc)
- if err != nil {
- panic(err)
- }
- b.WriteString(fmt.Sprintf("Documentation changed:\n%s\n", diff))
- }
- if previous.Default != current.Default {
- b.WriteString(fmt.Sprintf("Default changed from %q to %q\n", previous.Default, current.Default))
- }
- if previous.Hierarchy != current.Hierarchy {
- b.WriteString(fmt.Sprintf("Categorization changed from %q to %q\n", previous.Hierarchy, current.Hierarchy))
- }
- if previous.Status != current.Status {
- b.WriteString(fmt.Sprintf("Status changed from %q to %q\n", previous.Status, current.Status))
- }
- if previous.Type != current.Type {
- b.WriteString(fmt.Sprintf("Type changed from %q to %q\n", previous.Type, current.Type))
- }
- // TODO(rstambler): Handle possibility of same number but different keys/values.
- if len(previous.EnumKeys.Keys) != len(current.EnumKeys.Keys) {
- b.WriteString(fmt.Sprintf("Enum keys changed from\n%s\n to \n%s\n", previous.EnumKeys, current.EnumKeys))
- }
- if len(previous.EnumValues) != len(current.EnumValues) {
- b.WriteString(fmt.Sprintf("Enum values changed from\n%s\n to \n%s\n", previous.EnumValues, current.EnumValues))
- }
-}
-
-func formatBlock(str string) string {
- if str == "" {
- return `""`
- }
- return "\n```\n" + str + "\n```\n"
-}
-
-func diffStr(before, after string) (string, error) {
- // Add newlines to avoid newline messages in diff.
- if before == after {
- return "", nil
- }
- before += "\n"
- after += "\n"
- d, err := myers.ComputeEdits("", before, after)
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%q", difflib.ToUnified("previous", "current", before, d)), err
-}
diff --git a/gopls/doc/advanced.md b/gopls/doc/advanced.md
index c4e9eabef..9f9267c1c 100644
--- a/gopls/doc/advanced.md
+++ b/gopls/doc/advanced.md
@@ -44,42 +44,26 @@ editor.
## Working with generic code
-Gopls has beta support for editing generic Go code, as defined by the type
-parameters proposal ([golang/go#43651](https://golang.org/issues/43651)) and
-type set addendum ([golang/go#45346](https://golang.org/issues/45346)).
-
-To enable this support, you need to **build gopls with a version of Go that
-supports generics**. The easiest way to do this is by installing the Go 1.18 Beta
-as described at
-[Tutorial: Getting started with generics#prerequisites](https://go.dev/doc/tutorial/generics),
-and then using this Go version to build gopls:
+Gopls has support for editing generic Go code. To enable this support, you need
+to **install gopls using Go 1.18 or later**. The easiest way to do this is by
+[installing Go 1.18+](https://go.dev/dl) and then using this Go version to
+install gopls:
```
-$ go1.18beta2 install golang.org/x/tools/gopls@latest
+$ go install golang.org/x/tools/gopls@latest
```
-When using the Go 1.18, it is strongly recommended that you install the latest
-version of `gopls`, or the latest **unstable** version as
-[described above](#installing-unreleased-versions).
-
-You also need to make `gopls` select the beta version of `go` (in `<GOROOT>/go/bin`
-where GOROOT is the location reported by `go1.18beta2 env GOROOT`) by adding
-it to your `PATH` or by configuring your editor.
-
-The `gopls` built with these instructions understands generic code. To actually
-run the generic code you develop, you must also use the beta version of the Go
-compiler. For example:
+It is strongly recommended that you install the latest version of `gopls`, or
+the latest **unstable** version as [described above](#installing-unreleased-versions).
+We're still working on improving our generics support.
-```
-$ go1.18beta2 run .
-```
+The `gopls` built with these instructions understands generic code. See the
+[generics tutorial](https://go.dev/doc/tutorial/generics) for more information
+on how to use generics in Go!
### Known issues
* [`staticcheck`](https://github.com/golang/tools/blob/master/gopls/doc/settings.md#staticcheck-bool)
on generic code is not supported yet.
-please follow the [v0.8.0](https://github.com/golang/go/milestone/244) milestone
-to see the list of go1.18-related known issues and our progress.
-
[Go project]: https://go.googlesource.com/go
diff --git a/gopls/doc/analyzers.md b/gopls/doc/analyzers.md
index 07f846db8..a1134bee3 100644
--- a/gopls/doc/analyzers.md
+++ b/gopls/doc/analyzers.md
@@ -48,7 +48,7 @@ check for common mistakes involving boolean operators
## **buildtag**
-check that +build tags are well-formed and correctly located
+check //go:build and // +build directives
**Enabled by default.**
@@ -108,6 +108,35 @@ errors is discouraged.
**Enabled by default.**
+## **directive**
+
+check Go toolchain directives such as //go:debug
+
+This analyzer checks for problems with known Go toolchain directives
+in all Go source files in a package directory, even those excluded by
+//go:build constraints, and all non-Go source files too.
+
+For //go:debug (see https://go.dev/doc/godebug), the analyzer checks
+that the directives are placed only in Go source files, only above the
+package comment, and only in package main or *_test.go files.
+
+Support for other known directives may be added in the future.
+
+This analyzer does not check //go:build, which is handled by the
+buildtag analyzer.
+
+
+**Enabled by default.**
+
+## **embed**
+
+check for //go:embed directive import
+
+This analyzer checks that the embed package is imported when source code contains //go:embed comment directives.
+The embed package must be imported for //go:embed directives to function.import _ "embed".
+
+**Enabled by default.**
+
## **errorsas**
report passing non-pointer or non-error values to errors.As
@@ -122,7 +151,7 @@ of the second argument is not a pointer to a type implementing error.
find structs that would use less memory if their fields were sorted
This analyzer find structs that can be rearranged to use less memory, and provides
-a suggested edit with the optimal order.
+a suggested edit with the most compact order.
Note that there are two different diagnostics reported. One checks struct size,
and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the
@@ -141,6 +170,11 @@ has 24 pointer bytes because it has to scan further through the *uint32.
has 8 because it can stop immediately after the string pointer.
+Be aware that the most compact order is not always the most efficient.
+In rare cases it may cause two variables each updated by its own goroutine
+to occupy the same CPU cache line, inducing a form of memory contention
+known as "false sharing" that slows down both goroutines.
+
**Disabled by default. Enable it by setting `"analyses": {"fieldalignment": true}`.**
@@ -204,19 +238,60 @@ inferred from function arguments, or from other type arguments:
check references to loop variables from within nested functions
-This analyzer checks for references to loop variables from within a
-function literal inside the loop body. It checks only instances where
-the function literal is called in a defer or go statement that is the
-last statement in the loop body, as otherwise we would need whole
-program analysis.
-
-For example:
-
- for i, v := range s {
- go func() {
- println(i, v) // not what you might expect
- }()
- }
+This analyzer reports places where a function literal references the
+iteration variable of an enclosing loop, and the loop calls the function
+in such a way (e.g. with go or defer) that it may outlive the loop
+iteration and possibly observe the wrong value of the variable.
+
+In this example, all the deferred functions run after the loop has
+completed, so all observe the final value of v.
+
+ for _, v := range list {
+ defer func() {
+ use(v) // incorrect
+ }()
+ }
+
+One fix is to create a new variable for each iteration of the loop:
+
+ for _, v := range list {
+ v := v // new var per iteration
+ defer func() {
+ use(v) // ok
+ }()
+ }
+
+The next example uses a go statement and has a similar problem.
+In addition, it has a data race because the loop updates v
+concurrent with the goroutines accessing it.
+
+ for _, v := range elem {
+ go func() {
+ use(v) // incorrect, and a data race
+ }()
+ }
+
+A fix is the same as before. The checker also reports problems
+in goroutines started by golang.org/x/sync/errgroup.Group.
+A hard-to-spot variant of this form is common in parallel tests:
+
+ func Test(t *testing.T) {
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ use(test) // incorrect, and a data race
+ })
+ }
+ }
+
+The t.Parallel() call causes the rest of the function to execute
+concurrent with the loop.
+
+The analyzer reports references only in the last statement,
+as it is not deep enough to understand the effects of subsequent
+statements that might render the reference benign.
+("Last statement" is defined recursively in compound
+statements such as if, switch, and select.)
See: https://golang.org/doc/go_faq.html#closures_and_goroutines
@@ -483,6 +558,17 @@ for the conventions that are enforced for Tests, Benchmarks, and Examples.
**Enabled by default.**
+## **timeformat**
+
+check for calls of (time.Time).Format or time.Parse with 2006-02-01
+
+The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)
+format. Internationally, "yyyy-dd-mm" does not occur in common calendar date
+standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.
+
+
+**Enabled by default.**
+
## **unmarshal**
report passing non-pointer or non-interface values to unmarshal
@@ -643,6 +729,15 @@ func <>(inferred parameters) {
**Enabled by default.**
+## **unusedvariable**
+
+check for unused variables
+
+The unusedvariable analyzer suggests fixes for unused variables errors.
+
+
+**Disabled by default. Enable it by setting `"analyses": {"unusedvariable": true}`.**
+
## **fillstruct**
note incomplete struct initializations
diff --git a/gopls/doc/commands.md b/gopls/doc/commands.md
index 65fa5c5ff..e7b6afb1d 100644
--- a/gopls/doc/commands.md
+++ b/gopls/doc/commands.md
@@ -100,6 +100,26 @@ Args:
}
```
+### **Get known vulncheck result**
+Identifier: `gopls.fetch_vulncheck_result`
+
+Fetch the result of latest vulnerability check (`govulncheck`).
+
+Args:
+
+```
+{
+ // The file URI.
+ "URI": string,
+}
+```
+
+Result:
+
+```
+map[golang.org/x/tools/gopls/internal/lsp/protocol.DocumentURI]*golang.org/x/tools/gopls/internal/govulncheck.Result
+```
+
### **Toggle gc_details**
Identifier: `gopls.gc_details`
@@ -127,20 +147,6 @@ Args:
}
```
-### **Generate gopls.mod**
-Identifier: `gopls.generate_gopls_mod`
-
-(Re)generate the gopls.mod file for a workspace.
-
-Args:
-
-```
-{
- // The file URI.
- "URI": string,
-}
-```
-
### **go get a package**
Identifier: `gopls.go_get_package`
@@ -216,6 +222,23 @@ Result:
}
```
+### **fetch memory statistics**
+Identifier: `gopls.mem_stats`
+
+Call runtime.GC multiple times and return memory statistics as reported by
+runtime.MemStats.
+
+This command is used for benchmarking, and may change in the future.
+
+Result:
+
+```
+{
+ "HeapAlloc": uint64,
+ "HeapInUse": uint64,
+}
+```
+
### **Regenerate cgo**
Identifier: `gopls.regenerate_cgo`
@@ -247,26 +270,26 @@ Args:
}
```
-### **Run test(s)**
-Identifier: `gopls.run_tests`
+### **Reset go.mod diagnostics**
+Identifier: `gopls.reset_go_mod_diagnostics`
-Runs `go test` for a specific set of test or benchmark functions.
+Reset diagnostics in the go.mod file of a module.
Args:
```
{
- // The test file containing the tests to run.
- "URI": string,
- // Specific test names to run, e.g. TestFoo.
- "Tests": []string,
- // Specific benchmarks to run, e.g. BenchmarkFoo.
- "Benchmarks": []string,
+ "URIArg": {
+ "URI": string,
+ },
+ // Optional: source of the diagnostics to reset.
+ // If not set, all resettable go.mod diagnostics will be cleared.
+ "DiagnosticSource": string,
}
```
-### **Run vulncheck (experimental)**
-Identifier: `gopls.run_vulncheck_exp`
+### **Run govulncheck.**
+Identifier: `gopls.run_govulncheck`
Run vulnerability check (`govulncheck`).
@@ -274,8 +297,8 @@ Args:
```
{
- // Dir is the directory from which vulncheck will run from.
- "Dir": string,
+ // Any document in the directory from which govulncheck will run.
+ "URI": string,
// Package pattern. E.g. "", ".", "./...".
"Pattern": string,
}
@@ -285,18 +308,27 @@ Result:
```
{
- "Vuln": []{
- "ID": string,
- "Details": string,
- "Aliases": []string,
- "Symbol": string,
- "PkgPath": string,
- "ModPath": string,
- "URL": string,
- "CurrentVersion": string,
- "FixedVersion": string,
- "CallStacks": [][]golang.org/x/tools/internal/lsp/command.StackEntry,
- },
+ // Token holds the progress token for LSP workDone reporting of the vulncheck
+ // invocation.
+ "Token": interface{},
+}
+```
+
+### **Run test(s)**
+Identifier: `gopls.run_tests`
+
+Runs `go test` for a specific set of test or benchmark functions.
+
+Args:
+
+```
+{
+ // The test file containing the tests to run.
+ "URI": string,
+ // Specific test names to run, e.g. TestFoo.
+ "Tests": []string,
+ // Specific benchmarks to run, e.g. BenchmarkFoo.
+ "Benchmarks": []string,
}
```
diff --git a/gopls/doc/contributing.md b/gopls/doc/contributing.md
index 99e452922..367280f53 100644
--- a/gopls/doc/contributing.md
+++ b/gopls/doc/contributing.md
@@ -18,8 +18,8 @@ claiming it.
## Getting started
-Most of the `gopls` logic is actually in the `golang.org/x/tools/internal/lsp`
-directory, so you are most likely to develop in the golang.org/x/tools module.
+Most of the `gopls` logic is in the `golang.org/x/tools/gopls/internal/lsp`
+directory.
## Build
diff --git a/gopls/doc/design/implementation.md b/gopls/doc/design/implementation.md
index a8f7f0b0e..859ec1c12 100644
--- a/gopls/doc/design/implementation.md
+++ b/gopls/doc/design/implementation.md
@@ -29,7 +29,7 @@ Package | Description
[internal/lsp/cache] | the cache layer
[internal/lsp/cmd] | the gopls command line layer
[internal/lsp/debug] | features to aid in debugging gopls
-[internal/lsp/protocol] | the lsp protocol layer and wire format
+[internal/lsp/protocol] | the types of LSP request and response messages
[internal/lsp/source] | the core feature implementations
[internal/span] | a package for dealing with source file locations
[internal/memoize] | a function invocation cache used to reduce the work done
diff --git a/gopls/doc/design/integrating.md b/gopls/doc/design/integrating.md
index 845f9eb00..ba2cc07aa 100644
--- a/gopls/doc/design/integrating.md
+++ b/gopls/doc/design/integrating.md
@@ -20,7 +20,7 @@ Many LSP requests pass position or range information. This is described in the [
This means that integrators will need to calculate UTF-16 based column offsets.
-[`golang.org/x/tools/internal/span`] has the code to do this in go.
+[`golang.org/x/tools/gopls/internal/span`] has the code to do this in go.
[#31080] tracks making `span` and other useful packages non-internal.
## Edits
@@ -61,9 +61,9 @@ For instance, files that are needed to do correct type checking are modified by
Monitoring files inside gopls directly has a lot of awkward problems, but the [LSP specification] has methods that allow gopls to request that the client notify it of file system changes, specifically [`workspace/didChangeWatchedFiles`].
This is currently being added to gopls by a community member, and tracked in [#31553]
-[InitializeResult]: https://pkg.go.dev/golang.org/x/tools/internal/lsp/protocol#InitializeResult
-[ServerCapabilities]: https://pkg.go.dev/golang.org/x/tools/internal/lsp/protocol#ServerCapabilities
-[`golang.org/x/tools/internal/span`]: https://pkg.go.dev/golang.org/x/tools/internal/span#NewPoint
+[InitializeResult]: https://pkg.go.dev/golang.org/x/tools/gopls/internal/lsp/protocol#InitializeResult
+[ServerCapabilities]: https://pkg.go.dev/golang.org/x/tools/gopls/internal/lsp/protocol#ServerCapabilities
+[`golang.org/x/tools/gopls/internal/span`]: https://pkg.go.dev/golang.org/x/tools/internal/span#NewPoint
[LSP specification]: https://microsoft.github.io/language-server-protocol/specifications/specification-3-14/
[lsp-response]: https://github.com/Microsoft/language-server-protocol/blob/gh-pages/_specifications/specification-3-14.md#response-message
diff --git a/gopls/doc/generate.go b/gopls/doc/generate.go
index e63653de6..d674bfce4 100644
--- a/gopls/doc/generate.go
+++ b/gopls/doc/generate.go
@@ -20,6 +20,7 @@ import (
"io"
"io/ioutil"
"os"
+ "os/exec"
"path/filepath"
"reflect"
"regexp"
@@ -32,47 +33,71 @@ import (
"github.com/jba/printsrc"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/command/commandmeta"
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/command/commandmeta"
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
func main() {
- if _, err := doMain("..", true); err != nil {
+ if _, err := doMain(true); err != nil {
fmt.Fprintf(os.Stderr, "Generation failed: %v\n", err)
os.Exit(1)
}
}
-func doMain(baseDir string, write bool) (bool, error) {
+func doMain(write bool) (bool, error) {
api, err := loadAPI()
if err != nil {
return false, err
}
- if ok, err := rewriteFile(filepath.Join(baseDir, "internal/lsp/source/api_json.go"), api, write, rewriteAPI); !ok || err != nil {
+ sourceDir, err := pkgDir("golang.org/x/tools/gopls/internal/lsp/source")
+ if err != nil {
+ return false, err
+ }
+
+ if ok, err := rewriteFile(filepath.Join(sourceDir, "api_json.go"), api, write, rewriteAPI); !ok || err != nil {
+ return ok, err
+ }
+
+ goplsDir, err := pkgDir("golang.org/x/tools/gopls")
+ if err != nil {
+ return false, err
+ }
+
+ if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "settings.md"), api, write, rewriteSettings); !ok || err != nil {
return ok, err
}
- if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/settings.md"), api, write, rewriteSettings); !ok || err != nil {
+ if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "commands.md"), api, write, rewriteCommands); !ok || err != nil {
return ok, err
}
- if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/commands.md"), api, write, rewriteCommands); !ok || err != nil {
+ if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "analyzers.md"), api, write, rewriteAnalyzers); !ok || err != nil {
return ok, err
}
- if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/analyzers.md"), api, write, rewriteAnalyzers); !ok || err != nil {
+ if ok, err := rewriteFile(filepath.Join(goplsDir, "doc", "inlayHints.md"), api, write, rewriteInlayHints); !ok || err != nil {
return ok, err
}
return true, nil
}
+// pkgDir returns the directory corresponding to the import path pkgPath.
+func pkgDir(pkgPath string) (string, error) {
+ out, err := exec.Command("go", "list", "-f", "{{.Dir}}", pkgPath).Output()
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
func loadAPI() (*source.APIJSON, error) {
pkgs, err := packages.Load(
&packages.Config{
Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedDeps,
},
- "golang.org/x/tools/internal/lsp/source",
+ "golang.org/x/tools/gopls/internal/lsp/source",
)
if err != nil {
return nil, err
@@ -102,6 +127,7 @@ func loadAPI() (*source.APIJSON, error) {
} {
api.Analyzers = append(api.Analyzers, loadAnalyzers(m)...)
}
+ api.Hints = loadHints(source.AllInlayHints)
for _, category := range []reflect.Value{
reflect.ValueOf(defaults.UserOptions),
} {
@@ -146,6 +172,14 @@ func loadAPI() (*source.APIJSON, error) {
Default: def,
})
}
+ case "hints":
+ for _, a := range api.Hints {
+ opt.EnumKeys.Keys = append(opt.EnumKeys.Keys, source.EnumKey{
+ Name: fmt.Sprintf("%q", a.Name),
+ Doc: a.Doc,
+ Default: strconv.FormatBool(a.Default),
+ })
+ }
}
}
}
@@ -488,6 +522,23 @@ func loadAnalyzers(m map[string]*source.Analyzer) []*source.AnalyzerJSON {
return json
}
+func loadHints(m map[string]*source.Hint) []*source.HintJSON {
+ var sorted []string
+ for _, h := range m {
+ sorted = append(sorted, h.Name)
+ }
+ sort.Strings(sorted)
+ var json []*source.HintJSON
+ for _, name := range sorted {
+ h := m[name]
+ json = append(json, &source.HintJSON{
+ Name: h.Name,
+ Doc: h.Doc,
+ })
+ }
+ return json
+}
+
func lowerFirst(x string) string {
if x == "" {
return x
@@ -505,7 +556,7 @@ func upperFirst(x string) string {
func fileForPos(pkg *packages.Package, pos token.Pos) (*ast.File, error) {
fset := pkg.Fset
for _, f := range pkg.Syntax {
- if fset.Position(f.Pos()).Filename == fset.Position(pos).Filename {
+ if safetoken.StartPosition(fset, f.Pos()).Filename == safetoken.StartPosition(fset, pos).Filename {
return f, nil
}
}
@@ -537,7 +588,7 @@ func rewriteFile(file string, api *source.APIJSON, write bool, rewrite func([]by
func rewriteAPI(_ []byte, api *source.APIJSON) ([]byte, error) {
var buf bytes.Buffer
fmt.Fprintf(&buf, "// Code generated by \"golang.org/x/tools/gopls/doc/generate\"; DO NOT EDIT.\n\npackage source\n\nvar GeneratedAPIJSON = ")
- if err := printsrc.NewPrinter("golang.org/x/tools/internal/lsp/source").Fprint(&buf, api); err != nil {
+ if err := printsrc.NewPrinter("golang.org/x/tools/gopls/internal/lsp/source").Fprint(&buf, api); err != nil {
return nil, err
}
return format.Source(buf.Bytes())
@@ -571,7 +622,7 @@ func rewriteSettings(doc []byte, api *source.APIJSON) ([]byte, error) {
writeTitle(section, h.final, level)
for _, opt := range h.options {
header := strMultiply("#", level+1)
- section.Write([]byte(fmt.Sprintf("%s ", header)))
+ fmt.Fprintf(section, "%s ", header)
opt.Write(section)
}
}
@@ -699,6 +750,21 @@ func rewriteAnalyzers(doc []byte, api *source.APIJSON) ([]byte, error) {
return replaceSection(doc, "Analyzers", section.Bytes())
}
+func rewriteInlayHints(doc []byte, api *source.APIJSON) ([]byte, error) {
+ section := bytes.NewBuffer(nil)
+ for _, hint := range api.Hints {
+ fmt.Fprintf(section, "## **%v**\n\n", hint.Name)
+ fmt.Fprintf(section, "%s\n\n", hint.Doc)
+ switch hint.Default {
+ case true:
+ fmt.Fprintf(section, "**Enabled by default.**\n\n")
+ case false:
+ fmt.Fprintf(section, "**Disabled by default. Enable it by setting `\"hints\": {\"%s\": true}`.**\n\n", hint.Name)
+ }
+ }
+ return replaceSection(doc, "Hints", section.Bytes())
+}
+
func replaceSection(doc []byte, sectionName string, replacement []byte) ([]byte, error) {
re := regexp.MustCompile(fmt.Sprintf(`(?s)<!-- BEGIN %v.* -->\n(.*?)<!-- END %v.* -->`, sectionName, sectionName))
idx := re.FindSubmatchIndex(doc)
diff --git a/gopls/doc/generate_test.go b/gopls/doc/generate_test.go
index 137a646cd..5dc97d2dd 100644
--- a/gopls/doc/generate_test.go
+++ b/gopls/doc/generate_test.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.16
-// +build go1.16
-
package main
import (
@@ -14,9 +11,12 @@ import (
)
func TestGenerated(t *testing.T) {
+ // This test fails on 1.18 Kokoro for unknown reasons; in any case, it
+ // suffices to run this test on any builder.
+ testenv.NeedsGo1Point(t, 19)
testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code.
- ok, err := doMain("../..", false)
+ ok, err := doMain(false)
if err != nil {
t.Fatal(err)
}
diff --git a/gopls/doc/inlayHints.md b/gopls/doc/inlayHints.md
new file mode 100644
index 000000000..2ae9a2828
--- /dev/null
+++ b/gopls/doc/inlayHints.md
@@ -0,0 +1,80 @@
+# Hints
+
+This document describes the inlay hints that `gopls` uses inside the editor.
+
+<!-- BEGIN Hints: DO NOT MANUALLY EDIT THIS SECTION -->
+## **assignVariableTypes**
+
+Enable/disable inlay hints for variable types in assign statements:
+```go
+ i/* int*/, j/* int*/ := 0, len(r)-1
+```
+
+**Disabled by default. Enable it by setting `"hints": {"assignVariableTypes": true}`.**
+
+## **compositeLiteralFields**
+
+Enable/disable inlay hints for composite literal field names:
+```go
+ {/*in: */"Hello, world", /*want: */"dlrow ,olleH"}
+```
+
+**Disabled by default. Enable it by setting `"hints": {"compositeLiteralFields": true}`.**
+
+## **compositeLiteralTypes**
+
+Enable/disable inlay hints for composite literal types:
+```go
+ for _, c := range []struct {
+ in, want string
+ }{
+ /*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"},
+ }
+```
+
+**Disabled by default. Enable it by setting `"hints": {"compositeLiteralTypes": true}`.**
+
+## **constantValues**
+
+Enable/disable inlay hints for constant values:
+```go
+ const (
+ KindNone Kind = iota/* = 0*/
+ KindPrint/* = 1*/
+ KindPrintf/* = 2*/
+ KindErrorf/* = 3*/
+ )
+```
+
+**Disabled by default. Enable it by setting `"hints": {"constantValues": true}`.**
+
+## **functionTypeParameters**
+
+Enable/disable inlay hints for implicit type parameters on generic functions:
+```go
+ myFoo/*[int, string]*/(1, "hello")
+```
+
+**Disabled by default. Enable it by setting `"hints": {"functionTypeParameters": true}`.**
+
+## **parameterNames**
+
+Enable/disable inlay hints for parameter names:
+```go
+ parseInt(/* str: */ "123", /* radix: */ 8)
+```
+
+**Disabled by default. Enable it by setting `"hints": {"parameterNames": true}`.**
+
+## **rangeVariableTypes**
+
+Enable/disable inlay hints for variable types in range statements:
+```go
+ for k/* int*/, v/* string*/ := range []string{} {
+ fmt.Println(k, v)
+ }
+```
+
+**Disabled by default. Enable it by setting `"hints": {"rangeVariableTypes": true}`.**
+
+<!-- END Hints: DO NOT MANUALLY EDIT THIS SECTION -->
diff --git a/gopls/doc/releases.md b/gopls/doc/releases.md
new file mode 100644
index 000000000..befb92c39
--- /dev/null
+++ b/gopls/doc/releases.md
@@ -0,0 +1,25 @@
+# Gopls release policy
+
+Gopls releases follow [semver](http://semver.org), with major changes and new
+features introduced only in new minor versions (i.e. versions of the form
+`v*.N.0` for some N). Subsequent patch releases contain only cherry-picked
+fixes or superficial updates.
+
+In order to align with the
+[Go release timeline](https://github.com/golang/go/wiki/Go-Release-Cycle#timeline),
+we aim to release a new minor version of Gopls approximately every three
+months, with patch releases approximately every month, according to the
+following table:
+
+| Month | Version(s) |
+| ---- | ------- |
+| Jan | `v*.<N+0>.0` |
+| Jan-Mar | `v*.<N+0>.*` |
+| Apr | `v*.<N+1>.0` |
+| Apr-Jun | `v*.<N+1>.*` |
+| Jul | `v*.<N+2>.0` |
+| Jul-Sep | `v*.<N+2>.*` |
+| Oct | `v*.<N+3>.0` |
+| Oct-Dec | `v*.<N+3>.*` |
+
+For more background on this policy, see https://go.dev/issue/55267.
diff --git a/gopls/doc/semantictokens.md b/gopls/doc/semantictokens.md
index c9124b796..a1e140d29 100644
--- a/gopls/doc/semantictokens.md
+++ b/gopls/doc/semantictokens.md
@@ -57,7 +57,7 @@ different runes by their Unicode language assignment, or some other Unicode prop
being [confusable](http://www.unicode.org/Public/security/10.0.0/confusables.txt).
Gopls does not come close to either of these principles. Semantic tokens are returned for
-identifiers, keywords, operators, comments, and literals. (Sematic tokens do not
+identifiers, keywords, operators, comments, and literals. (Semantic tokens do not
cover the file. They are not returned for
white space or punctuation, and there is no semantic token for labels.)
The following describes more precisely what gopls
diff --git a/gopls/doc/settings.md b/gopls/doc/settings.md
index 092a3c7cf..52a753910 100644
--- a/gopls/doc/settings.md
+++ b/gopls/doc/settings.md
@@ -1,6 +1,6 @@
# Settings
-<!--TODO: Generate this file from the documentation in golang/org/x/tools/internal/lsp/source/options.go.-->
+<!--TODO: Generate this file from the documentation in golang.org/x/tools/gopls/internal/lsp/source/options.go.-->
This document describes the global settings for `gopls` inside the editor.
The settings block will be called `"gopls"` and contains a collection of
@@ -35,6 +35,7 @@ still be able to independently override specific experimental features.
* [Completion](#completion)
* [Diagnostic](#diagnostic)
* [Documentation](#documentation)
+ * [Inlayhint](#inlayhint)
* [Navigation](#navigation)
### Build
@@ -62,15 +63,19 @@ relative to the workspace folder. They are evaluated in order, and
the last filter that applies to a path controls whether it is included.
The path prefix can be empty, so an initial `-` excludes everything.
+DirectoryFilters also supports the `**` operator to match 0 or more directories.
+
Examples:
-Exclude node_modules: `-node_modules`
+Exclude node_modules at current depth: `-node_modules`
+
+Exclude node_modules at any depth: `-**/node_modules`
Include only project_a: `-` (exclude everything), `+project_a`
Include only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules`
-Default: `["-node_modules"]`.
+Default: `["-**/node_modules"]`.
#### **templateExtensions** *[]string*
@@ -111,29 +116,6 @@ a go.mod file, narrowing the scope to that directory if it exists.
Default: `true`.
-#### **experimentalWorkspaceModule** *bool*
-
-**This setting is experimental and may be deleted.**
-
-experimentalWorkspaceModule opts a user into the experimental support
-for multi-module workspaces.
-
-Default: `false`.
-
-#### **experimentalPackageCacheKey** *bool*
-
-**This setting is experimental and may be deleted.**
-
-experimentalPackageCacheKey controls whether to use a coarser cache key
-for package type information to increase cache hits. This setting removes
-the user's environment, build flags, and working directory from the cache
-key, which should be a safe change as all relevant inputs into the type
-checking pass are already hashed into the key. This is temporarily guarded
-by an experiment because caching behavior is subtle and difficult to
-comprehensively test.
-
-Default: `true`.
-
#### **allowModfileModifications** *bool*
**This setting is experimental and may be deleted.**
@@ -153,16 +135,28 @@ be removed.
Default: `false`.
-#### **experimentalUseInvalidMetadata** *bool*
+#### **standaloneTags** *[]string*
-**This setting is experimental and may be deleted.**
+standaloneTags specifies a set of build constraints that identify
+individual Go source files that make up the entire main package of an
+executable.
-experimentalUseInvalidMetadata enables gopls to fall back on outdated
-package metadata to provide editor features if the go command fails to
-load packages for some reason (like an invalid go.mod file). This will
-eventually be the default behavior, and this setting will be removed.
+A common example of standalone main files is the convention of using the
+directive `//go:build ignore` to denote files that are not intended to be
+included in any package, for example because they are invoked directly by
+the developer using `go run`.
-Default: `false`.
+Gopls considers a file to be a standalone main file if and only if it has
+package name "main" and has a build directive of the exact form
+"//go:build tag" or "// +build tag", where tag is among the list of tags
+configured by this setting. Notably, if the build constraint is more
+complicated than a simple tag (such as the composite constraint
+`//go:build tag && go1.18`), the file is not considered to be a standalone
+main file.
+
+This setting is only supported when gopls is built with Go 1.16 or later.
+
+Default: `["ignore"]`.
### Formatting
@@ -214,6 +208,22 @@ semantic tokens to the client.
Default: `false`.
+#### **noSemanticString** *bool*
+
+**This setting is experimental and may be deleted.**
+
+noSemanticString turns off the sending of the semantic token 'string'
+
+Default: `false`.
+
+#### **noSemanticNumber** *bool*
+
+**This setting is experimental and may be deleted.**
+
+noSemanticNumber turns off the sending of the semantic token 'number'
+
+Default: `false`.
+
#### Completion
##### **usePlaceholders** *bool*
@@ -265,8 +275,8 @@ Default: `true`.
analyses specify analyses that the user would like to enable or disable.
A map of the names of analysis passes that should be enabled/disabled.
-A full list of analyzers that gopls uses can be found
-[here](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).
+A full list of analyzers that gopls uses can be found in
+[analyzers.md](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).
Example Usage:
@@ -286,6 +296,8 @@ Default: `{}`.
**This setting is experimental and may be deleted.**
staticcheck enables additional analyses from staticcheck.io.
+These analyses are documented on
+[Staticcheck's website](https://staticcheck.io/docs/checks/).
Default: `false`.
@@ -305,6 +317,20 @@ Can contain any of:
Default: `{"bounds":true,"escape":true,"inline":true,"nil":true}`.
+##### **vulncheck** *enum*
+
+**This setting is experimental and may be deleted.**
+
+vulncheck enables vulnerability scanning.
+
+Must be one of:
+
+* `"Imports"`: In Imports mode, `gopls` will report vulnerabilities that affect packages
+directly and indirectly used by the analyzed main module.
+* `"Off"`: Disable vulnerability analysis.
+
+Default: `"Off"`.
+
##### **diagnosticsDelay** *time.Duration*
**This is an advanced setting and should not be configured by most `gopls` users.**
@@ -318,20 +344,6 @@ This option must be set to a valid duration string, for example `"250ms"`.
Default: `"250ms"`.
-##### **experimentalWatchedFileDelay** *time.Duration*
-
-**This setting is experimental and may be deleted.**
-
-experimentalWatchedFileDelay controls the amount of time that gopls waits
-for additional workspace/didChangeWatchedFiles notifications to arrive,
-before processing all such notifications in a single batch. This is
-intended for use by LSP clients that don't support their own batching of
-file system notifications.
-
-This option must be set to a valid duration string, for example `"100ms"`.
-
-Default: `"0s"`.
-
#### Documentation
##### **hoverKind** *enum*
@@ -362,6 +374,9 @@ It might be one of:
If company chooses to use its own `godoc.org`, its address can be used as well.
+Modules matching the GOPRIVATE environment variable will not have
+documentation links in hover.
+
Default: `"pkg.go.dev"`.
##### **linksInHover** *bool*
@@ -370,6 +385,18 @@ linksInHover toggles the presence of links to documentation in hover.
Default: `true`.
+#### Inlayhint
+
+##### **hints** *map[string]bool*
+
+**This setting is experimental and may be deleted.**
+
+hints specify inlay hints that users want to see. A full list of hints
+that gopls uses can be found in
+[inlayHints.md](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md).
+
+Default: `{}`.
+
#### Navigation
##### **importShortcut** *enum*
@@ -439,6 +466,17 @@ Default: `false`.
<!-- END User: DO NOT MANUALLY EDIT THIS SECTION -->
+#### **newDiff** *string*
+
+newDiff enables the new diff implementation. If this is "both", for now both
+diffs will be run and statistics will be generated in a file in $TMPDIR. This
+is a risky setting; help in trying it is appreciated. If it is "old" the old
+implementation is used, and if it is "new", just the new implementation is
+used. This setting will eventually be deleted, once gopls has fully migrated to
+the new diff algorithm.
+
+Default: 'both'.
+
## Code Lenses
These are the code lenses that `gopls` currently supports. They can be enabled
@@ -461,6 +499,11 @@ Runs `go generate` for a given directory.
Identifier: `regenerate_cgo`
Regenerates cgo definitions.
+### **Run govulncheck.**
+
+Identifier: `run_govulncheck`
+
+Run vulnerability check (`govulncheck`).
### **Run test(s) (legacy)**
Identifier: `test`
diff --git a/gopls/doc/vim.md b/gopls/doc/vim.md
index 887a246ed..1fa44bfa7 100644
--- a/gopls/doc/vim.md
+++ b/gopls/doc/vim.md
@@ -168,41 +168,32 @@ EOF
### <a href="#neovim-imports" id="neovim-imports">Imports</a>
-To get your imports ordered on save, like `goimports` does, you can define
-a helper function in Lua:
-
-```vim
-lua <<EOF
- -- …
-
- function OrgImports(wait_ms)
- local params = vim.lsp.util.make_range_params()
- params.context = {only = {"source.organizeImports"}}
- local result = vim.lsp.buf_request_sync(0, "textDocument/codeAction", params, wait_ms)
- for _, res in pairs(result or {}) do
- for _, r in pairs(res.result or {}) do
- if r.edit then
- vim.lsp.util.apply_workspace_edit(r.edit)
- else
- vim.lsp.buf.execute_command(r.command)
- end
- end
- end
+Use the following configuration to have your imports organized on save using
+the logic of `goimports`. Note: this requires Neovim v0.7.0 or later.
+
+```lua
+vim.api.nvim_create_autocmd('BufWritePre', {
+ pattern = '*.go',
+ callback = function()
+ vim.lsp.buf.code_action({ context = { only = { 'source.organizeImports' } }, apply = true })
end
-EOF
-
-autocmd BufWritePre *.go lua OrgImports(1000)
+})
```
-(Taken from the [discussion][nvim-lspconfig-imports] on Neovim issue tracker.)
-
### <a href="#neovim-omnifunc" id="neovim-omnifunc">Omnifunc</a>
-To make your <kbd>Ctrl</kbd>+<kbd>x</kbd>,<kbd>Ctrl</kbd>+<kbd>o</kbd> work, add
-this to your `init.vim`:
-
-```vim
-autocmd FileType go setlocal omnifunc=v:lua.vim.lsp.omnifunc
+In Neovim v0.8.1 and later if you don't set the option `omnifunc`, it will auto
+set to `v:lua.vim.lsp.omnifunc`. If you are using an earlier version, you can
+configure it manually:
+
+```lua
+local on_attach = function(client, bufnr)
+ -- Enable completion triggered by <c-x><c-o>
+ vim.api.nvim_buf_set_option(bufnr, 'omnifunc', 'v:lua.vim.lsp.omnifunc')
+end
+require('lspconfig').gopls.setup({
+ on_attach = on_attach
+})
```
### <a href="#neovim-links" id="neovim-links">Additional Links</a>
diff --git a/gopls/doc/workspace.md b/gopls/doc/workspace.md
index 610afbe61..4ff9994f9 100644
--- a/gopls/doc/workspace.md
+++ b/gopls/doc/workspace.md
@@ -34,20 +34,26 @@ your workspace root to the directory containing the `go.work` file.
For example, suppose this repo is checked out into the `$WORK/tools` directory.
We can work on both `golang.org/x/tools` and `golang.org/x/tools/gopls`
-simultaneously by creating a `go.work` file:
+simultaneously by creating a `go.work` file using `go work init`, followed by
+`go work use MODULE_DIRECTORIES...` to add directories containing `go.mod` files to the
+workspace:
-```
+```sh
cd $WORK
go work init
-go work use tools tools/gopls
+go work use ./tools/ ./tools/gopls/
```
...followed by opening the `$WORK` directory in our editor.
-#### Experimental workspace module (Go 1.17 and earlier)
+#### DEPRECATED: Experimental workspace module (Go 1.17 and earlier)
+
+**This feature is deprecated and will be removed in future versions of gopls.
+Please see [issue #52897](https://go.dev/issue/52897) for additional
+information.**
With earlier versions of Go, `gopls` can simulate multi-module workspaces by
-creating a synthetic module requiring the the modules in the workspace root.
+creating a synthetic module requiring the modules in the workspace root.
See [the design document](https://github.com/golang/proposal/blob/master/design/37720-gopls-workspaces.md)
for more information.
diff --git a/gopls/go.mod b/gopls/go.mod
index ed04fd0b7..57b445f5c 100644
--- a/gopls/go.mod
+++ b/gopls/go.mod
@@ -3,25 +3,27 @@ module golang.org/x/tools/gopls
go 1.18
require (
- github.com/google/go-cmp v0.5.7
+ github.com/google/go-cmp v0.5.9
github.com/jba/printsrc v0.2.2
github.com/jba/templatecheck v0.6.0
github.com/sergi/go-diff v1.1.0
- golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3
- golang.org/x/sys v0.0.0-20220209214540-3681064d5158
- golang.org/x/tools v0.1.9
- golang.org/x/vuln v0.0.0-20220324005316-18fd808f5c7f
- honnef.co/go/tools v0.2.2
- mvdan.cc/gofumpt v0.3.0
+ golang.org/x/mod v0.9.0
+ golang.org/x/sync v0.1.0
+ golang.org/x/sys v0.6.0
+ golang.org/x/text v0.8.0
+ golang.org/x/tools v0.6.0
+ golang.org/x/vuln v0.0.0-20230110180137-6ad3e3d07815
+ gopkg.in/yaml.v3 v3.0.1
+ honnef.co/go/tools v0.4.2
+ mvdan.cc/gofumpt v0.4.0
mvdan.cc/xurls/v2 v2.4.0
)
require (
- github.com/BurntSushi/toml v1.0.0 // indirect
- github.com/google/safehtml v0.0.2 // indirect
- golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
- golang.org/x/text v0.3.7 // indirect
- golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
+ github.com/BurntSushi/toml v1.2.1 // indirect
+ github.com/google/safehtml v0.1.0 // indirect
+ golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
+ golang.org/x/exp/typeparams v0.0.0-20221212164502-fae10dda9338 // indirect
)
replace golang.org/x/tools => ../
diff --git a/gopls/go.sum b/gopls/go.sum
index 759d7eff6..9c7a0afb9 100644
--- a/gopls/go.sum
+++ b/gopls/go.sum
@@ -1,17 +1,24 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
-github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns=
-github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
-github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/safehtml v0.0.2 h1:ZOt2VXg4x24bW0m2jtzAOkhoXV0iM8vNKc0paByCZqM=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/safehtml v0.0.2/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU=
+github.com/google/safehtml v0.1.0 h1:EwLKo8qawTKfsi0orxcQAZzu07cICaBeFMegAU9eaT8=
+github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU=
github.com/jba/printsrc v0.2.2 h1:9OHK51UT+/iMAEBlQIIXW04qvKyF3/vvLuwW/hL8tDU=
github.com/jba/printsrc v0.2.2/go.mod h1:1xULjw59sL0dPdWpDoVU06TIEO/Wnfv6AHRpiElTwYM=
github.com/jba/templatecheck v0.6.0 h1:SwM8C4hlK/YNLsdcXStfnHWE2HKkuTVwy5FKQHt5ro8=
@@ -27,56 +34,68 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
+golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20221212164502-fae10dda9338 h1:2O2DON6y3XMJiQRAS1UWU+54aec2uopH3x7MAiqGW6Y=
+golang.org/x/exp/typeparams v0.0.0-20221212164502-fae10dda9338/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/vuln v0.0.0-20220324005316-18fd808f5c7f h1:9dMzk88fnONra7zrEalqkRMGa9jMGf9B5mdzhYVyI28=
-golang.org/x/vuln v0.0.0-20220324005316-18fd808f5c7f/go.mod h1:RMxFJYUtgT86cNTSzXJAe51WiT0Vg5LCGePfAGufJCc=
+golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/vuln v0.0.0-20230110180137-6ad3e3d07815 h1:A9kONVi4+AnuOr1dopsibH6hLi1Huy54cbeJxnq4vmU=
+golang.org/x/vuln v0.0.0-20230110180137-6ad3e3d07815/go.mod h1:XJiVExZgoZfrrxoTeVsFYrSSk1snhfpOEC95JL+A4T0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
-mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4=
-mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo=
+honnef.co/go/tools v0.4.2 h1:6qXr+R5w+ktL5UkwEbPp+fEvfyoMPche6GkOpGHZcLc=
+honnef.co/go/tools v0.4.2/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA=
+mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM=
+mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ=
mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio=
+mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY=
mvdan.cc/xurls/v2 v2.4.0 h1:tzxjVAj+wSBmDcF6zBB7/myTy3gX9xvi8Tyr28AuQgc=
mvdan.cc/xurls/v2 v2.4.0/go.mod h1:+GEjq9uNjqs8LQfM9nVnM8rff0OQ5Iash5rzX+N1CSg=
diff --git a/gopls/internal/coverage/coverage.go b/gopls/internal/coverage/coverage.go
index 7bb3640bd..9a7d21994 100644
--- a/gopls/internal/coverage/coverage.go
+++ b/gopls/internal/coverage/coverage.go
@@ -12,9 +12,13 @@
// -o controls where the coverage file is written, defaulting to /tmp/cover.out
// -i coverage-file will generate the report from an existing coverage file
// -v controls verbosity (0: only report coverage, 1: report as each directory is finished,
-// 2: report on each test, 3: more details, 4: too much)
+//
+// 2: report on each test, 3: more details, 4: too much)
+//
// -t tests only tests packages in the given comma-separated list of directories in gopls.
-// The names should start with ., as in ./internal/regtest/bench
+//
+// The names should start with ., as in ./internal/regtest/bench
+//
// -run tests. If set, -run tests is passed on to the go test command.
//
// Despite gopls' use of goroutines, the counts are almost deterministic.
@@ -60,7 +64,7 @@ func main() {
tests = realTestName(tests)
// report coverage for packages under internal/lsp
- parg := "golang.org/x/tools/internal/lsp/..."
+ parg := "golang.org/x/tools/gopls/internal/lsp/..."
accum := []string{}
seen := make(map[string]bool)
@@ -184,7 +188,12 @@ func maybePrint(m result) {
if *verbose > 3 {
fmt.Printf("%s %s %q %.3f\n", m.Action, m.Test, m.Output, m.Elapsed)
}
+ case "pause", "cont":
+ if *verbose > 2 {
+ fmt.Printf("%s %s %.3f\n", m.Action, m.Test, m.Elapsed)
+ }
default:
+ fmt.Printf("%#v\n", m)
log.Fatalf("unknown action %s\n", m.Action)
}
}
@@ -224,7 +233,7 @@ func checkCwd() {
if err != nil {
log.Fatal(err)
}
- // we expect to be a the root of golang.org/x/tools
+ // we expect to be at the root of golang.org/x/tools
cmd := exec.Command("go", "list", "-m", "-f", "{{.Dir}}", "golang.org/x/tools")
buf, err := cmd.Output()
buf = bytes.Trim(buf, "\n \t") // remove \n at end
@@ -239,10 +248,6 @@ func checkCwd() {
if err != nil {
log.Fatalf("expected a gopls directory, %v", err)
}
- _, err = os.Stat("internal/lsp")
- if err != nil {
- log.Fatalf("expected to see internal/lsp, %v", err)
- }
}
func listDirs(dir string) []string {
diff --git a/gopls/internal/govulncheck/semver/semver.go b/gopls/internal/govulncheck/semver/semver.go
new file mode 100644
index 000000000..4ab298d13
--- /dev/null
+++ b/gopls/internal/govulncheck/semver/semver.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+// Package semver provides shared utilities for manipulating
+// Go semantic versions.
+package semver
+
+import (
+ "regexp"
+ "strings"
+)
+
+// addSemverPrefix adds a 'v' prefix to s if it isn't already prefixed
+// with 'v' or 'go'. This allows us to easily test go-style SEMVER
+// strings against normal SEMVER strings.
+func addSemverPrefix(s string) string {
+ if !strings.HasPrefix(s, "v") && !strings.HasPrefix(s, "go") {
+ return "v" + s
+ }
+ return s
+}
+
+// removeSemverPrefix removes the 'v' or 'go' prefixes from go-style
+// SEMVER strings, for usage in the public vulnerability format.
+func removeSemverPrefix(s string) string {
+ s = strings.TrimPrefix(s, "v")
+ s = strings.TrimPrefix(s, "go")
+ return s
+}
+
+// CanonicalizeSemverPrefix turns a SEMVER string into the canonical
+// representation using the 'v' prefix, as used by the OSV format.
+// Input may be a bare SEMVER ("1.2.3"), Go prefixed SEMVER ("go1.2.3"),
+// or already canonical SEMVER ("v1.2.3").
+func CanonicalizeSemverPrefix(s string) string {
+ return addSemverPrefix(removeSemverPrefix(s))
+}
+
+var (
+ // Regexp for matching go tags. The groups are:
+ // 1 the major.minor version
+ // 2 the patch version, or empty if none
+ // 3 the entire prerelease, if present
+ // 4 the prerelease type ("beta" or "rc")
+ // 5 the prerelease number
+ tagRegexp = regexp.MustCompile(`^go(\d+\.\d+)(\.\d+|)((beta|rc|-pre)(\d+))?$`)
+)
diff --git a/gopls/internal/govulncheck/semver/semver_test.go b/gopls/internal/govulncheck/semver/semver_test.go
new file mode 100644
index 000000000..6daead685
--- /dev/null
+++ b/gopls/internal/govulncheck/semver/semver_test.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package semver
+
+import (
+ "testing"
+)
+
+func TestCanonicalize(t *testing.T) {
+ for _, test := range []struct {
+ v string
+ want string
+ }{
+ {"v1.2.3", "v1.2.3"},
+ {"1.2.3", "v1.2.3"},
+ {"go1.2.3", "v1.2.3"},
+ } {
+ got := CanonicalizeSemverPrefix(test.v)
+ if got != test.want {
+ t.Errorf("want %s; got %s", test.want, got)
+ }
+ }
+}
diff --git a/gopls/internal/govulncheck/types.go b/gopls/internal/govulncheck/types.go
new file mode 100644
index 000000000..2881cf4bc
--- /dev/null
+++ b/gopls/internal/govulncheck/types.go
@@ -0,0 +1,37 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package govulncheck
+
+import "time"
+
+// Result is the result of vulnerability scanning.
+type Result struct {
+ // Vulns contains all vulnerabilities that are called or imported by
+ // the analyzed module.
+ Vulns []*Vuln `json:",omitempty"`
+
+ // Mode contains the source of the vulnerability info.
+ // Clients of the gopls.fetch_vulncheck_result command may need
+ // to interpret the vulnerabilities differently based on the
+ // analysis mode. For example, Vuln without callstack traces
+ // indicate a vulnerability that is not used if the result was
+ // from 'govulncheck' analysis mode. On the other hand, Vuln
+ // without callstack traces just implies the package with the
+ // vulnerability is known to the workspace and we do not know
+ // whether the vulnerable symbols are actually used or not.
+ Mode AnalysisMode `json:",omitempty"`
+
+ // AsOf describes when this Result was computed using govulncheck.
+ // It is valid only with the govulncheck analysis mode.
+ AsOf time.Time `json:",omitempty"`
+}
+
+type AnalysisMode string
+
+const (
+ ModeInvalid AnalysisMode = "" // zero value
+ ModeGovulncheck AnalysisMode = "govulncheck"
+ ModeImports AnalysisMode = "imports"
+)
diff --git a/gopls/internal/govulncheck/types_118.go b/gopls/internal/govulncheck/types_118.go
new file mode 100644
index 000000000..7b354d622
--- /dev/null
+++ b/gopls/internal/govulncheck/types_118.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+// Package govulncheck provides an experimental govulncheck API.
+package govulncheck
+
+import (
+ "golang.org/x/vuln/exp/govulncheck"
+)
+
+var (
+ // Source reports vulnerabilities that affect the analyzed packages.
+ Source = govulncheck.Source
+
+ // DefaultCache constructs cache for a vulnerability database client.
+ DefaultCache = govulncheck.DefaultCache
+)
+
+type (
+ // Config is the configuration for Main.
+ Config = govulncheck.Config
+
+ // Vuln represents a single OSV entry.
+ Vuln = govulncheck.Vuln
+
+ // Module represents a specific vulnerability relevant to a
+ // single module or package.
+ Module = govulncheck.Module
+
+ // Package is a Go package with known vulnerable symbols.
+ Package = govulncheck.Package
+
+ // CallStacks contains a representative call stack for each
+ // vulnerable symbol that is called.
+ CallStack = govulncheck.CallStack
+
+ // StackFrame represents a call stack entry.
+ StackFrame = govulncheck.StackFrame
+)
diff --git a/gopls/internal/govulncheck/types_not118.go b/gopls/internal/govulncheck/types_not118.go
new file mode 100644
index 000000000..faf5a7055
--- /dev/null
+++ b/gopls/internal/govulncheck/types_not118.go
@@ -0,0 +1,126 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package govulncheck
+
+import (
+ "go/token"
+
+ "golang.org/x/vuln/osv"
+)
+
+// Vuln represents a single OSV entry.
+type Vuln struct {
+ // OSV contains all data from the OSV entry for this vulnerability.
+ OSV *osv.Entry
+
+ // Modules contains all of the modules in the OSV entry where a
+ // vulnerable package is imported by the target source code or binary.
+ //
+ // For example, a module M with two packages M/p1 and M/p2, where only p1
+ // is vulnerable, will appear in this list if and only if p1 is imported by
+ // the target source code or binary.
+ Modules []*Module
+}
+
+func (v *Vuln) IsCalled() bool {
+ return false
+}
+
+// Module represents a specific vulnerability relevant to a single module.
+type Module struct {
+ // Path is the module path of the module containing the vulnerability.
+ //
+ // Importable packages in the standard library will have the path "stdlib".
+ Path string
+
+ // FoundVersion is the module version where the vulnerability was found.
+ FoundVersion string
+
+ // FixedVersion is the module version where the vulnerability was
+ // fixed. If there are multiple fixed versions in the OSV report, this will
+ // be the latest fixed version.
+ //
+ // This is empty if a fix is not available.
+ FixedVersion string
+
+ // Packages contains all the vulnerable packages in OSV entry that are
+ // imported by the target source code or binary.
+ //
+ // For example, given a module M with two packages M/p1 and M/p2, where
+ // both p1 and p2 are vulnerable, p1 and p2 will each only appear in this
+ // list they are individually imported by the target source code or binary.
+ Packages []*Package
+}
+
+// Package is a Go package with known vulnerable symbols.
+type Package struct {
+ // Path is the import path of the package containing the vulnerability.
+ Path string
+
+ // CallStacks contains a representative call stack for each
+ // vulnerable symbol that is called.
+ //
+ // For vulnerabilities found from binary analysis, only CallStack.Symbol
+ // will be provided.
+ //
+ // For non-affecting vulnerabilities reported from the source mode
+ // analysis, this will be empty.
+ CallStacks []CallStack
+}
+
+// CallStacks contains a representative call stack for a vulnerable
+// symbol.
+type CallStack struct {
+ // Symbol is the name of the detected vulnerable function
+ // or method.
+ //
+ // This follows the naming convention in the OSV report.
+ Symbol string
+
+ // Summary is a one-line description of the callstack, used by the
+ // default govulncheck mode.
+ //
+ // Example: module3.main calls github.com/shiyanhui/dht.DHT.Run
+ Summary string
+
+ // Frames contains an entry for each stack in the call stack.
+ //
+ // Frames are sorted starting from the entry point to the
+ // imported vulnerable symbol. The last frame in Frames should match
+ // Symbol.
+ Frames []*StackFrame
+}
+
+// StackFrame represents a call stack entry.
+type StackFrame struct {
+ // PackagePath is the import path.
+ PkgPath string
+
+ // FuncName is the function name.
+ FuncName string
+
+ // RecvType is the fully qualified receiver type,
+ // if the called symbol is a method.
+ //
+ // The client can create the final symbol name by
+ // prepending RecvType to FuncName.
+ RecvType string
+
+ // Position describes an arbitrary source position
+ // including the file, line, and column location.
+ // A Position is valid if the line number is > 0.
+ Position token.Position
+}
+
+func (sf *StackFrame) Name() string {
+ return ""
+}
+
+func (sf *StackFrame) Pos() string {
+ return ""
+}
diff --git a/gopls/internal/govulncheck/util.go b/gopls/internal/govulncheck/util.go
new file mode 100644
index 000000000..544fba2a5
--- /dev/null
+++ b/gopls/internal/govulncheck/util.go
@@ -0,0 +1,36 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package govulncheck
+
+import (
+ "golang.org/x/mod/semver"
+ isem "golang.org/x/tools/gopls/internal/govulncheck/semver"
+ "golang.org/x/vuln/osv"
+)
+
+// LatestFixed returns the latest fixed version in the list of affected ranges,
+// or the empty string if there are no fixed versions.
+func LatestFixed(modulePath string, as []osv.Affected) string {
+ v := ""
+ for _, a := range as {
+ if a.Package.Name != modulePath {
+ continue
+ }
+ for _, r := range a.Ranges {
+ if r.Type == osv.TypeSemver {
+ for _, e := range r.Events {
+ if e.Fixed != "" && (v == "" ||
+ semver.Compare(isem.CanonicalizeSemverPrefix(e.Fixed), isem.CanonicalizeSemverPrefix(v)) > 0) {
+ v = e.Fixed
+ }
+ }
+ }
+ }
+ }
+ return v
+}
diff --git a/gopls/internal/govulncheck/vulncache.go b/gopls/internal/govulncheck/vulncache.go
new file mode 100644
index 000000000..a259f0273
--- /dev/null
+++ b/gopls/internal/govulncheck/vulncache.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package govulncheck
+
+import (
+ "sync"
+ "time"
+
+ vulnc "golang.org/x/vuln/client"
+ "golang.org/x/vuln/osv"
+)
+
+// inMemoryCache is an implementation of the [client.Cache] interface
+// that "decorates" another instance of that interface to provide
+// an additional layer of (memory-based) caching.
+type inMemoryCache struct {
+ mu sync.Mutex
+ underlying vulnc.Cache
+ db map[string]*db
+}
+
+var _ vulnc.Cache = &inMemoryCache{}
+
+type db struct {
+ retrieved time.Time
+ index vulnc.DBIndex
+ entry map[string][]*osv.Entry
+}
+
+// NewInMemoryCache returns a new memory-based cache that decorates
+// the provided cache (file-based, perhaps).
+func NewInMemoryCache(underlying vulnc.Cache) *inMemoryCache {
+ return &inMemoryCache{
+ underlying: underlying,
+ db: make(map[string]*db),
+ }
+}
+
+func (c *inMemoryCache) lookupDBLocked(dbName string) *db {
+ cached := c.db[dbName]
+ if cached == nil {
+ cached = &db{entry: make(map[string][]*osv.Entry)}
+ c.db[dbName] = cached
+ }
+ return cached
+}
+
+// ReadIndex returns the index for dbName from the cache, or returns zero values
+// if it is not present.
+func (c *inMemoryCache) ReadIndex(dbName string) (vulnc.DBIndex, time.Time, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cached := c.lookupDBLocked(dbName)
+
+ if cached.retrieved.IsZero() {
+ // First time ReadIndex is called.
+ index, retrieved, err := c.underlying.ReadIndex(dbName)
+ if err != nil {
+ return index, retrieved, err
+ }
+ cached.index, cached.retrieved = index, retrieved
+ }
+ return cached.index, cached.retrieved, nil
+}
+
+// WriteIndex puts the index and retrieved time into the cache.
+func (c *inMemoryCache) WriteIndex(dbName string, index vulnc.DBIndex, retrieved time.Time) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cached := c.lookupDBLocked(dbName)
+ cached.index, cached.retrieved = index, retrieved
+ // TODO(hyangah): shouldn't we invalidate all cached entries?
+ return c.underlying.WriteIndex(dbName, index, retrieved)
+}
+
+// ReadEntries returns the vulndb entries for path from the cache.
+func (c *inMemoryCache) ReadEntries(dbName, path string) ([]*osv.Entry, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cached := c.lookupDBLocked(dbName)
+ entries, ok := cached.entry[path]
+ if !ok {
+ // cache miss
+ entries, err := c.underlying.ReadEntries(dbName, path)
+ if err != nil {
+ return entries, err
+ }
+ cached.entry[path] = entries
+ }
+ return entries, nil
+}
+
+// WriteEntries puts the entries for path into the cache.
+func (c *inMemoryCache) WriteEntries(dbName, path string, entries []*osv.Entry) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cached := c.lookupDBLocked(dbName)
+ cached.entry[path] = entries
+ return c.underlying.WriteEntries(dbName, path, entries)
+}
diff --git a/gopls/internal/hooks/analysis.go b/gopls/internal/hooks/analysis.go
deleted file mode 100644
index f1d166b09..000000000
--- a/gopls/internal/hooks/analysis.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.15
-// +build go1.15
-
-package hooks
-
-import (
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "honnef.co/go/tools/analysis/lint"
- "honnef.co/go/tools/quickfix"
- "honnef.co/go/tools/simple"
- "honnef.co/go/tools/staticcheck"
- "honnef.co/go/tools/stylecheck"
-)
-
-func updateAnalyzers(options *source.Options) {
- mapSeverity := func(severity lint.Severity) protocol.DiagnosticSeverity {
- switch severity {
- case lint.SeverityError:
- return protocol.SeverityError
- case lint.SeverityDeprecated:
- // TODO(dh): in LSP, deprecated is a tag, not a severity.
- // We'll want to support this once we enable SA5011.
- return protocol.SeverityWarning
- case lint.SeverityWarning:
- return protocol.SeverityWarning
- case lint.SeverityInfo:
- return protocol.SeverityInformation
- case lint.SeverityHint:
- return protocol.SeverityHint
- default:
- return protocol.SeverityWarning
- }
- }
- add := func(analyzers []*lint.Analyzer, skip map[string]struct{}) {
- for _, a := range analyzers {
- if _, ok := skip[a.Analyzer.Name]; ok {
- continue
- }
-
- enabled := !a.Doc.NonDefault
- options.AddStaticcheckAnalyzer(a.Analyzer, enabled, mapSeverity(a.Doc.Severity))
- }
- }
-
- add(simple.Analyzers, nil)
- add(staticcheck.Analyzers, map[string]struct{}{
- // This check conflicts with the vet printf check (golang/go#34494).
- "SA5009": {},
- // This check relies on facts from dependencies, which
- // we don't currently compute.
- "SA5011": {},
- })
- add(stylecheck.Analyzers, nil)
- add(quickfix.Analyzers, nil)
-}
diff --git a/gopls/internal/hooks/analysis_115.go b/gopls/internal/hooks/analysis_115.go
deleted file mode 100644
index 187e52218..000000000
--- a/gopls/internal/hooks/analysis_115.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.15
-// +build !go1.15
-
-package hooks
-
-import "golang.org/x/tools/internal/lsp/source"
-
-func updateAnalyzers(_ *source.Options) {}
diff --git a/gopls/internal/hooks/analysis_116.go b/gopls/internal/hooks/analysis_116.go
new file mode 100644
index 000000000..de58632ba
--- /dev/null
+++ b/gopls/internal/hooks/analysis_116.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package hooks
+
+import "golang.org/x/tools/gopls/internal/lsp/source"
+
+func updateAnalyzers(options *source.Options) {
+ options.StaticcheckSupported = false
+}
diff --git a/gopls/internal/hooks/analysis_119.go b/gopls/internal/hooks/analysis_119.go
new file mode 100644
index 000000000..1f81d7be6
--- /dev/null
+++ b/gopls/internal/hooks/analysis_119.go
@@ -0,0 +1,62 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package hooks
+
+import (
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "honnef.co/go/tools/analysis/lint"
+ "honnef.co/go/tools/quickfix"
+ "honnef.co/go/tools/simple"
+ "honnef.co/go/tools/staticcheck"
+ "honnef.co/go/tools/stylecheck"
+)
+
+func updateAnalyzers(options *source.Options) {
+ options.StaticcheckSupported = true
+
+ mapSeverity := func(severity lint.Severity) protocol.DiagnosticSeverity {
+ switch severity {
+ case lint.SeverityError:
+ return protocol.SeverityError
+ case lint.SeverityDeprecated:
+ // TODO(dh): in LSP, deprecated is a tag, not a severity.
+ // We'll want to support this once we enable SA5011.
+ return protocol.SeverityWarning
+ case lint.SeverityWarning:
+ return protocol.SeverityWarning
+ case lint.SeverityInfo:
+ return protocol.SeverityInformation
+ case lint.SeverityHint:
+ return protocol.SeverityHint
+ default:
+ return protocol.SeverityWarning
+ }
+ }
+ add := func(analyzers []*lint.Analyzer, skip map[string]struct{}) {
+ for _, a := range analyzers {
+ if _, ok := skip[a.Analyzer.Name]; ok {
+ continue
+ }
+
+ enabled := !a.Doc.NonDefault
+ options.AddStaticcheckAnalyzer(a.Analyzer, enabled, mapSeverity(a.Doc.Severity))
+ }
+ }
+
+ add(simple.Analyzers, nil)
+ add(staticcheck.Analyzers, map[string]struct{}{
+ // This check conflicts with the vet printf check (golang/go#34494).
+ "SA5009": {},
+ // This check relies on facts from dependencies, which
+ // we don't currently compute.
+ "SA5011": {},
+ })
+ add(stylecheck.Analyzers, nil)
+ add(quickfix.Analyzers, nil)
+}
diff --git a/gopls/internal/hooks/diff.go b/gopls/internal/hooks/diff.go
index a307ba77f..f7fec5a7b 100644
--- a/gopls/internal/hooks/diff.go
+++ b/gopls/internal/hooks/diff.go
@@ -5,37 +5,165 @@
package hooks
import (
+ "encoding/json"
"fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "time"
"github.com/sergi/go-diff/diffmatchpatch"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/diff"
)
-func ComputeEdits(uri span.URI, before, after string) (edits []diff.TextEdit, err error) {
+// structure for saving information about diffs
+// while the new code is being rolled out
+type diffstat struct {
+ Before, After int
+ Oldedits, Newedits int
+ Oldtime, Newtime time.Duration
+ Stack string
+ Msg string `json:",omitempty"` // for errors
+ Ignored int `json:",omitempty"` // numbr of skipped records with 0 edits
+}
+
+var (
+ ignoredMu sync.Mutex
+ ignored int // counter of diff requests on equal strings
+
+ diffStatsOnce sync.Once
+ diffStats *os.File // never closed
+)
+
+// save writes a JSON record of statistics about diff requests to a temporary file.
+func (s *diffstat) save() {
+ diffStatsOnce.Do(func() {
+ f, err := ioutil.TempFile("", "gopls-diff-stats-*")
+ if err != nil {
+ log.Printf("can't create diff stats temp file: %v", err) // e.g. disk full
+ return
+ }
+ diffStats = f
+ })
+ if diffStats == nil {
+ return
+ }
+
+ // diff is frequently called with equal strings,
+ // so we count repeated instances but only print every 15th.
+ ignoredMu.Lock()
+ if s.Oldedits == 0 && s.Newedits == 0 {
+ ignored++
+ if ignored < 15 {
+ ignoredMu.Unlock()
+ return
+ }
+ }
+ s.Ignored = ignored
+ ignored = 0
+ ignoredMu.Unlock()
+
+ // Record the name of the file in which diff was called.
+ // There aren't many calls, so only the base name is needed.
+ if _, file, line, ok := runtime.Caller(2); ok {
+ s.Stack = fmt.Sprintf("%s:%d", filepath.Base(file), line)
+ }
+ x, err := json.Marshal(s)
+ if err != nil {
+ log.Fatalf("internal error marshalling JSON: %v", err)
+ }
+ fmt.Fprintf(diffStats, "%s\n", x)
+}
+
+// disaster is called when the diff algorithm panics or produces a
+// diff that cannot be applied. It saves the broken input in a
+// new temporary file and logs the file name, which is returned.
+func disaster(before, after string) string {
+ // We use the pid to salt the name, not os.TempFile,
+ // so that each process creates at most one file.
+ // One is sufficient for a bug report.
+ filename := fmt.Sprintf("%s/gopls-diff-bug-%x", os.TempDir(), os.Getpid())
+
+ // We use NUL as a separator: it should never appear in Go source.
+ data := before + "\x00" + after
+
+ if err := ioutil.WriteFile(filename, []byte(data), 0600); err != nil {
+ log.Printf("failed to write diff bug report: %v", err)
+ return ""
+ }
+
+ bug.Reportf("Bug detected in diff algorithm! Please send file %s to the maintainers of gopls if you are comfortable sharing its contents.", filename)
+
+ return filename
+}
+
+// BothDiffs edits calls both the new and old diffs, checks that the new diffs
+// change before into after, and attempts to preserve some statistics.
+func BothDiffs(before, after string) (edits []diff.Edit) {
+ // The new diff code contains a lot of internal checks that panic when they
+ // fail. This code catches the panics, or other failures, tries to save
+ // the failing example (and it would ask the user to send it back to us, and
+ // changes options.newDiff to 'old', if only we could figure out how.)
+ stat := diffstat{Before: len(before), After: len(after)}
+ now := time.Now()
+ oldedits := ComputeEdits(before, after)
+ stat.Oldedits = len(oldedits)
+ stat.Oldtime = time.Since(now)
+ defer func() {
+ if r := recover(); r != nil {
+ disaster(before, after)
+ edits = oldedits
+ }
+ }()
+ now = time.Now()
+ newedits := diff.Strings(before, after)
+ stat.Newedits = len(newedits)
+ stat.Newtime = time.Now().Sub(now)
+ got, err := diff.Apply(before, newedits)
+ if err != nil || got != after {
+ stat.Msg += "FAIL"
+ disaster(before, after)
+ stat.save()
+ return oldedits
+ }
+ stat.save()
+ return newedits
+}
+
+// ComputeEdits computes a diff using the github.com/sergi/go-diff implementation.
+func ComputeEdits(before, after string) (edits []diff.Edit) {
// The go-diff library has an unresolved panic (see golang/go#278774).
// TODO(rstambler): Remove the recover once the issue has been fixed
// upstream.
defer func() {
if r := recover(); r != nil {
- edits = nil
- err = fmt.Errorf("unable to compute edits for %s: %s", uri.Filename(), r)
+ bug.Reportf("unable to compute edits: %s", r)
+ // Report one big edit for the whole file.
+ edits = []diff.Edit{{
+ Start: 0,
+ End: len(before),
+ New: after,
+ }}
}
}()
diffs := diffmatchpatch.New().DiffMain(before, after, true)
- edits = make([]diff.TextEdit, 0, len(diffs))
+ edits = make([]diff.Edit, 0, len(diffs))
offset := 0
for _, d := range diffs {
- start := span.NewPoint(0, 0, offset)
+ start := offset
switch d.Type {
case diffmatchpatch.DiffDelete:
offset += len(d.Text)
- edits = append(edits, diff.TextEdit{Span: span.New(uri, start, span.NewPoint(0, 0, offset))})
+ edits = append(edits, diff.Edit{Start: start, End: offset})
case diffmatchpatch.DiffEqual:
offset += len(d.Text)
case diffmatchpatch.DiffInsert:
- edits = append(edits, diff.TextEdit{Span: span.New(uri, start, span.Point{}), NewText: d.Text})
+ edits = append(edits, diff.Edit{Start: start, End: start, New: d.Text})
}
}
- return edits, nil
+ return edits
}
diff --git a/gopls/internal/hooks/diff_test.go b/gopls/internal/hooks/diff_test.go
index d979be78d..a46bf3b2d 100644
--- a/gopls/internal/hooks/diff_test.go
+++ b/gopls/internal/hooks/diff_test.go
@@ -2,15 +2,32 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package hooks_test
+package hooks
import (
+ "io/ioutil"
+ "os"
"testing"
- "golang.org/x/tools/gopls/internal/hooks"
- "golang.org/x/tools/internal/lsp/diff/difftest"
+ "golang.org/x/tools/internal/diff/difftest"
)
func TestDiff(t *testing.T) {
- difftest.DiffTest(t, hooks.ComputeEdits)
+ difftest.DiffTest(t, ComputeEdits)
+}
+
+func TestDisaster(t *testing.T) {
+ a := "This is a string,(\u0995) just for basic\nfunctionality"
+ b := "This is another string, (\u0996) to see if disaster will store stuff correctly"
+ fname := disaster(a, b)
+ buf, err := ioutil.ReadFile(fname)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(buf) != a+"\x00"+b {
+ t.Error("failed to record original strings")
+ }
+ if err := os.Remove(fname); err != nil {
+ t.Error(err)
+ }
}
diff --git a/gopls/internal/hooks/gen-licenses.sh b/gopls/internal/hooks/gen-licenses.sh
index 7d6bab79f..c35c91260 100755
--- a/gopls/internal/hooks/gen-licenses.sh
+++ b/gopls/internal/hooks/gen-licenses.sh
@@ -27,7 +27,7 @@ mods=$(go list -deps -f '{{with .Module}}{{.Path}}{{end}}' golang.org/x/tools/go
for mod in $mods; do
# Find the license file, either LICENSE or COPYING, and add it to the result.
dir=$(go list -m -f {{.Dir}} $mod)
- license=$(ls -1 $dir | egrep -i '^(LICENSE|COPYING)$')
+ license=$(ls -1 $dir | grep -E -i '^(LICENSE|COPYING)$')
echo "-- $mod $license --" >> $tempfile
echo >> $tempfile
sed 's/^-- / &/' $dir/$license >> $tempfile
diff --git a/gopls/internal/hooks/gofumpt_117.go b/gopls/internal/hooks/gofumpt_117.go
new file mode 100644
index 000000000..718863577
--- /dev/null
+++ b/gopls/internal/hooks/gofumpt_117.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package hooks
+
+import "golang.org/x/tools/gopls/internal/lsp/source"
+
+func updateGofumpt(options *source.Options) {
+}
diff --git a/gopls/internal/hooks/gofumpt_118.go b/gopls/internal/hooks/gofumpt_118.go
new file mode 100644
index 000000000..4eb523261
--- /dev/null
+++ b/gopls/internal/hooks/gofumpt_118.go
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package hooks
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "mvdan.cc/gofumpt/format"
+)
+
+func updateGofumpt(options *source.Options) {
+ options.GofumptFormat = func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) {
+ return format.Source(src, format.Options{
+ LangVersion: langVersion,
+ ModulePath: modulePath,
+ })
+ }
+}
diff --git a/gopls/internal/hooks/hooks.go b/gopls/internal/hooks/hooks.go
index 023aefeab..5624a5eb3 100644
--- a/gopls/internal/hooks/hooks.go
+++ b/gopls/internal/hooks/hooks.go
@@ -8,27 +8,24 @@
package hooks // import "golang.org/x/tools/gopls/internal/hooks"
import (
- "context"
-
- "golang.org/x/tools/gopls/internal/vulncheck"
- "golang.org/x/tools/internal/lsp/source"
- "mvdan.cc/gofumpt/format"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/diff"
"mvdan.cc/xurls/v2"
)
func Options(options *source.Options) {
options.LicensesText = licensesText
if options.GoDiff {
- options.ComputeEdits = ComputeEdits
+ switch options.NewDiff {
+ case "old":
+ options.ComputeEdits = ComputeEdits
+ case "new":
+ options.ComputeEdits = diff.Strings
+ default:
+ options.ComputeEdits = BothDiffs
+ }
}
options.URLRegexp = xurls.Relaxed()
- options.GofumptFormat = func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) {
- return format.Source(src, format.Options{
- LangVersion: langVersion,
- ModulePath: modulePath,
- })
- }
updateAnalyzers(options)
-
- options.Govulncheck = vulncheck.Govulncheck
+ updateGofumpt(options)
}
diff --git a/gopls/internal/hooks/licenses_test.go b/gopls/internal/hooks/licenses_test.go
index bed229535..a7853cd5f 100644
--- a/gopls/internal/hooks/licenses_test.go
+++ b/gopls/internal/hooks/licenses_test.go
@@ -15,9 +15,10 @@ import (
)
func TestLicenses(t *testing.T) {
- // License text differs for older Go versions because staticcheck isn't
- // supported for those versions.
- testenv.NeedsGo1Point(t, 15)
+ // License text differs for older Go versions because staticcheck or gofumpt
+ // isn't supported for those versions, and this fails for unknown, unrelated
+ // reasons on Kokoro legacy CI.
+ testenv.NeedsGo1Point(t, 19)
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
t.Skip("generating licenses only works on Unixes")
diff --git a/internal/lsp/README.md b/gopls/internal/lsp/README.md
index 34a142cbb..34a142cbb 100644
--- a/internal/lsp/README.md
+++ b/gopls/internal/lsp/README.md
diff --git a/gopls/internal/lsp/analysis/embeddirective/embeddirective.go b/gopls/internal/lsp/analysis/embeddirective/embeddirective.go
new file mode 100644
index 000000000..1b504f7cb
--- /dev/null
+++ b/gopls/internal/lsp/analysis/embeddirective/embeddirective.go
@@ -0,0 +1,58 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package embeddirective defines an Analyzer that validates import for //go:embed directive.
+package embeddirective
+
+import (
+ "go/ast"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+)
+
+const Doc = `check for //go:embed directive import
+
+This analyzer checks that the embed package is imported when source code contains //go:embed comment directives.
+The embed package must be imported for //go:embed directives to function.import _ "embed".`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "embed",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{},
+ Run: run,
+ RunDespiteErrors: true,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ for _, f := range pass.Files {
+ com := hasEmbedDirectiveComment(f)
+ if com != nil {
+ assertEmbedImport(pass, com, f)
+ }
+ }
+ return nil, nil
+}
+
+// Check if the comment contains //go:embed directive.
+func hasEmbedDirectiveComment(f *ast.File) *ast.Comment {
+ for _, cg := range f.Comments {
+ for _, c := range cg.List {
+ if strings.HasPrefix(c.Text, "//go:embed ") {
+ return c
+ }
+ }
+ }
+ return nil
+}
+
+// Verifies that "embed" import exists for //go:embed directive.
+func assertEmbedImport(pass *analysis.Pass, com *ast.Comment, f *ast.File) {
+ for _, imp := range f.Imports {
+ if "\"embed\"" == imp.Path.Value {
+ return
+ }
+ }
+ pass.Report(analysis.Diagnostic{Pos: com.Pos(), End: com.Pos() + 10, Message: "The \"embed\" package must be imported when using go:embed directives."})
+}
diff --git a/gopls/internal/lsp/analysis/embeddirective/embeddirective_test.go b/gopls/internal/lsp/analysis/embeddirective/embeddirective_test.go
new file mode 100644
index 000000000..1165c0bf6
--- /dev/null
+++ b/gopls/internal/lsp/analysis/embeddirective/embeddirective_test.go
@@ -0,0 +1,22 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package embeddirective
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ tests := []string{"a"}
+ if typeparams.Enabled {
+ tests = append(tests)
+ }
+
+ analysistest.RunWithSuggestedFixes(t, testdata, Analyzer, tests...)
+}
diff --git a/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/a.go b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/a.go
new file mode 100644
index 000000000..4203f6ce2
--- /dev/null
+++ b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/a.go
@@ -0,0 +1,13 @@
+package a
+
+import (
+ "fmt"
+)
+
+//go:embed embedText // want "The \"embed\" package must be imported when using go:embed directives"
+var s string
+
+// This is main function
+func main() {
+ fmt.Println(s)
+}
diff --git a/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/b.go b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/b.go
new file mode 100644
index 000000000..c8c701e66
--- /dev/null
+++ b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/b.go
@@ -0,0 +1,14 @@
+package a
+
+import (
+ _ "embed"
+ "fmt"
+)
+
+//go:embed embedText // ok
+var s string
+
+// This is main function
+func main() {
+ fmt.Println(s)
+}
diff --git a/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/embedText b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/embedText
new file mode 100644
index 000000000..5e1c309da
--- /dev/null
+++ b/gopls/internal/lsp/analysis/embeddirective/testdata/src/a/embedText
@@ -0,0 +1 @@
+Hello World \ No newline at end of file
diff --git a/gopls/internal/lsp/analysis/fillreturns/fillreturns.go b/gopls/internal/lsp/analysis/fillreturns/fillreturns.go
new file mode 100644
index 000000000..c8146df2d
--- /dev/null
+++ b/gopls/internal/lsp/analysis/fillreturns/fillreturns.go
@@ -0,0 +1,279 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fillreturns defines an Analyzer that will attempt to
+// automatically fill in a return statement that has missing
+// values with zero value elements.
+package fillreturns
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/types"
+ "regexp"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/fuzzy"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+const Doc = `suggest fixes for errors due to an incorrect number of return values
+
+This checker provides suggested fixes for type errors of the
+type "wrong number of return values (want %d, got %d)". For example:
+ func m() (int, string, *bool, error) {
+ return
+ }
+will turn into
+ func m() (int, string, *bool, error) {
+ return 0, "", nil, nil
+ }
+
+This functionality is similar to https://github.com/sqs/goreturns.
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "fillreturns",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{},
+ Run: run,
+ RunDespiteErrors: true,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ info := pass.TypesInfo
+ if info == nil {
+ return nil, fmt.Errorf("nil TypeInfo")
+ }
+
+outer:
+ for _, typeErr := range pass.TypeErrors {
+ // Filter out the errors that are not relevant to this analyzer.
+ if !FixesError(typeErr) {
+ continue
+ }
+ var file *ast.File
+ for _, f := range pass.Files {
+ if f.Pos() <= typeErr.Pos && typeErr.Pos <= f.End() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ continue
+ }
+
+ // Get the end position of the error.
+ // (This heuristic assumes that the buffer is formatted,
+ // at least up to the end position of the error.)
+ var buf bytes.Buffer
+ if err := format.Node(&buf, pass.Fset, file); err != nil {
+ continue
+ }
+ typeErrEndPos := analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), typeErr.Pos)
+
+ // TODO(rfindley): much of the error handling code below returns, when it
+ // should probably continue.
+
+ // Get the path for the relevant range.
+ path, _ := astutil.PathEnclosingInterval(file, typeErr.Pos, typeErrEndPos)
+ if len(path) == 0 {
+ return nil, nil
+ }
+
+ // Find the enclosing return statement.
+ var ret *ast.ReturnStmt
+ var retIdx int
+ for i, n := range path {
+ if r, ok := n.(*ast.ReturnStmt); ok {
+ ret = r
+ retIdx = i
+ break
+ }
+ }
+ if ret == nil {
+ return nil, nil
+ }
+
+ // Get the function type that encloses the ReturnStmt.
+ var enclosingFunc *ast.FuncType
+ for _, n := range path[retIdx+1:] {
+ switch node := n.(type) {
+ case *ast.FuncLit:
+ enclosingFunc = node.Type
+ case *ast.FuncDecl:
+ enclosingFunc = node.Type
+ }
+ if enclosingFunc != nil {
+ break
+ }
+ }
+ if enclosingFunc == nil || enclosingFunc.Results == nil {
+ continue
+ }
+
+ // Skip any generic enclosing functions, since type parameters don't
+ // have 0 values.
+ // TODO(rfindley): We should be able to handle this if the return
+ // values are all concrete types.
+ if tparams := typeparams.ForFuncType(enclosingFunc); tparams != nil && tparams.NumFields() > 0 {
+ return nil, nil
+ }
+
+ // Find the function declaration that encloses the ReturnStmt.
+ var outer *ast.FuncDecl
+ for _, p := range path {
+ if p, ok := p.(*ast.FuncDecl); ok {
+ outer = p
+ break
+ }
+ }
+ if outer == nil {
+ return nil, nil
+ }
+
+ // Skip any return statements that contain function calls with multiple
+ // return values.
+ for _, expr := range ret.Results {
+ e, ok := expr.(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ if tup, ok := info.TypeOf(e).(*types.Tuple); ok && tup.Len() > 1 {
+ continue outer
+ }
+ }
+
+ // Duplicate the return values to track which values have been matched.
+ remaining := make([]ast.Expr, len(ret.Results))
+ copy(remaining, ret.Results)
+
+ fixed := make([]ast.Expr, len(enclosingFunc.Results.List))
+
+ // For each value in the return function declaration, find the leftmost element
+ // in the return statement that has the desired type. If no such element exists,
+ // fill in the missing value with the appropriate "zero" value.
+ // Beware that type information may be incomplete.
+ var retTyps []types.Type
+ for _, ret := range enclosingFunc.Results.List {
+ retTyp := info.TypeOf(ret.Type)
+ if retTyp == nil {
+ return nil, nil
+ }
+ retTyps = append(retTyps, retTyp)
+ }
+ matches := analysisinternal.MatchingIdents(retTyps, file, ret.Pos(), info, pass.Pkg)
+ for i, retTyp := range retTyps {
+ var match ast.Expr
+ var idx int
+ for j, val := range remaining {
+ if t := info.TypeOf(val); t == nil || !matchingTypes(t, retTyp) {
+ continue
+ }
+ if !analysisinternal.IsZeroValue(val) {
+ match, idx = val, j
+ break
+ }
+ // If the current match is a "zero" value, we keep searching in
+ // case we find a non-"zero" value match. If we do not find a
+ // non-"zero" value, we will use the "zero" value.
+ match, idx = val, j
+ }
+
+ if match != nil {
+ fixed[i] = match
+ remaining = append(remaining[:idx], remaining[idx+1:]...)
+ } else {
+ names, ok := matches[retTyp]
+ if !ok {
+ return nil, fmt.Errorf("invalid return type: %v", retTyp)
+ }
+ // Find the identifier most similar to the return type.
+ // If no identifier matches the pattern, generate a zero value.
+ if best := fuzzy.BestMatch(retTyp.String(), names); best != "" {
+ fixed[i] = ast.NewIdent(best)
+ } else if zero := analysisinternal.ZeroValue(file, pass.Pkg, retTyp); zero != nil {
+ fixed[i] = zero
+ } else {
+ return nil, nil
+ }
+ }
+ }
+
+ // Remove any non-matching "zero values" from the leftover values.
+ var nonZeroRemaining []ast.Expr
+ for _, expr := range remaining {
+ if !analysisinternal.IsZeroValue(expr) {
+ nonZeroRemaining = append(nonZeroRemaining, expr)
+ }
+ }
+ // Append leftover return values to end of new return statement.
+ fixed = append(fixed, nonZeroRemaining...)
+
+ newRet := &ast.ReturnStmt{
+ Return: ret.Pos(),
+ Results: fixed,
+ }
+
+ // Convert the new return statement AST to text.
+ var newBuf bytes.Buffer
+ if err := format.Node(&newBuf, pass.Fset, newRet); err != nil {
+ return nil, err
+ }
+
+ pass.Report(analysis.Diagnostic{
+ Pos: typeErr.Pos,
+ End: typeErrEndPos,
+ Message: typeErr.Msg,
+ SuggestedFixes: []analysis.SuggestedFix{{
+ Message: "Fill in return values",
+ TextEdits: []analysis.TextEdit{{
+ Pos: ret.Pos(),
+ End: ret.End(),
+ NewText: newBuf.Bytes(),
+ }},
+ }},
+ })
+ }
+ return nil, nil
+}
+
+func matchingTypes(want, got types.Type) bool {
+ if want == got || types.Identical(want, got) {
+ return true
+ }
+ // Code segment to help check for untyped equality from (golang/go#32146).
+ if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 {
+ if lhs, ok := got.Underlying().(*types.Basic); ok {
+ return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType
+ }
+ }
+ return types.AssignableTo(want, got) || types.ConvertibleTo(want, got)
+}
+
+// Error messages have changed across Go versions. These regexps capture recent
+// incarnations.
+//
+// TODO(rfindley): once error codes are exported and exposed via go/packages,
+// use error codes rather than string matching here.
+var wrongReturnNumRegexes = []*regexp.Regexp{
+ regexp.MustCompile(`wrong number of return values \(want (\d+), got (\d+)\)`),
+ regexp.MustCompile(`too many return values`),
+ regexp.MustCompile(`not enough return values`),
+}
+
+func FixesError(err types.Error) bool {
+ msg := strings.TrimSpace(err.Msg)
+ for _, rx := range wrongReturnNumRegexes {
+ if rx.MatchString(msg) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go b/gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go
new file mode 100644
index 000000000..1f7627551
--- /dev/null
+++ b/gopls/internal/lsp/analysis/fillreturns/fillreturns_test.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fillreturns_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/fillreturns"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ tests := []string{"a"}
+ if typeparams.Enabled {
+ tests = append(tests, "typeparams")
+ }
+ analysistest.RunWithSuggestedFixes(t, testdata, fillreturns.Analyzer, tests...)
+}
diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/a.go b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go
index 7ab0ff167..7ab0ff167 100644
--- a/internal/lsp/analysis/fillreturns/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden
index f007a5f37..f007a5f37 100644
--- a/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden
+++ b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go
index 8454bd2ce..8454bd2ce 100644
--- a/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go
+++ b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go
diff --git a/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden
index 8454bd2ce..8454bd2ce 100644
--- a/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden
+++ b/gopls/internal/lsp/analysis/fillreturns/testdata/src/a/typeparams/a.go.golden
diff --git a/gopls/internal/lsp/analysis/fillstruct/fillstruct.go b/gopls/internal/lsp/analysis/fillstruct/fillstruct.go
new file mode 100644
index 000000000..af29a3632
--- /dev/null
+++ b/gopls/internal/lsp/analysis/fillstruct/fillstruct.go
@@ -0,0 +1,506 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fillstruct defines an Analyzer that automatically
+// fills in a struct declaration with zero value elements for each field.
+//
+// The analyzer's diagnostic is merely a prompt.
+// The actual fix is created by a separate direct call from gopls to
+// the SuggestedFixes function.
+// Tests of Analyzer.Run can be found in ./testdata/src.
+// Tests of the SuggestedFixes logic live in ../../testdata/fillstruct.
+package fillstruct
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/token"
+ "go/types"
+ "strings"
+ "unicode"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/fuzzy"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+const Doc = `note incomplete struct initializations
+
+This analyzer provides diagnostics for any struct literals that do not have
+any fields initialized. Because the suggested fix for this analysis is
+expensive to compute, callers should compute it separately, using the
+SuggestedFix function below.
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "fillstruct",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: run,
+ RunDespiteErrors: true,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ nodeFilter := []ast.Node{(*ast.CompositeLit)(nil)}
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ expr := n.(*ast.CompositeLit)
+
+ // Find enclosing file.
+ // TODO(adonovan): use inspect.WithStack?
+ var file *ast.File
+ for _, f := range pass.Files {
+ if f.Pos() <= expr.Pos() && expr.Pos() <= f.End() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ return
+ }
+
+ typ := pass.TypesInfo.TypeOf(expr)
+ if typ == nil {
+ return
+ }
+
+ // Find reference to the type declaration of the struct being initialized.
+ typ = deref(typ)
+ tStruct, ok := typ.Underlying().(*types.Struct)
+ if !ok {
+ return
+ }
+ // Inv: typ is the possibly-named struct type.
+
+ fieldCount := tStruct.NumFields()
+
+ // Skip any struct that is already populated or that has no fields.
+ if fieldCount == 0 || fieldCount == len(expr.Elts) {
+ return
+ }
+
+ // Are any fields in need of filling?
+ var fillableFields []string
+ for i := 0; i < fieldCount; i++ {
+ field := tStruct.Field(i)
+ // Ignore fields that are not accessible in the current package.
+ if field.Pkg() != nil && field.Pkg() != pass.Pkg && !field.Exported() {
+ continue
+ }
+ fillableFields = append(fillableFields, fmt.Sprintf("%s: %s", field.Name(), field.Type().String()))
+ }
+ if len(fillableFields) == 0 {
+ return
+ }
+
+ // Derive a name for the struct type.
+ var name string
+ if typ != tStruct {
+ // named struct type (e.g. pkg.S[T])
+ name = types.TypeString(typ, types.RelativeTo(pass.Pkg))
+ } else {
+ // anonymous struct type
+ totalFields := len(fillableFields)
+ const maxLen = 20
+ // Find the index to cut off printing of fields.
+ var i, fieldLen int
+ for i = range fillableFields {
+ if fieldLen > maxLen {
+ break
+ }
+ fieldLen += len(fillableFields[i])
+ }
+ fillableFields = fillableFields[:i]
+ if i < totalFields {
+ fillableFields = append(fillableFields, "...")
+ }
+ name = fmt.Sprintf("anonymous struct { %s }", strings.Join(fillableFields, ", "))
+ }
+ pass.Report(analysis.Diagnostic{
+ Message: fmt.Sprintf("Fill %s", name),
+ Pos: expr.Pos(),
+ End: expr.End(),
+ })
+ })
+ return nil, nil
+}
+
+// SuggestedFix computes the suggested fix for the kinds of
+// diagnostics produced by the Analyzer above.
+func SuggestedFix(fset *token.FileSet, start, end token.Pos, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+ if info == nil {
+ return nil, fmt.Errorf("nil types.Info")
+ }
+
+ pos := start // don't use the end
+
+ // TODO(rstambler): Using ast.Inspect would probably be more efficient than
+ // calling PathEnclosingInterval. Switch this approach.
+ path, _ := astutil.PathEnclosingInterval(file, pos, pos)
+ if len(path) == 0 {
+ return nil, fmt.Errorf("no enclosing ast.Node")
+ }
+ var expr *ast.CompositeLit
+ for _, n := range path {
+ if node, ok := n.(*ast.CompositeLit); ok {
+ expr = node
+ break
+ }
+ }
+
+ typ := info.TypeOf(expr)
+ if typ == nil {
+ return nil, fmt.Errorf("no composite literal")
+ }
+
+ // Find reference to the type declaration of the struct being initialized.
+ typ = deref(typ)
+ tStruct, ok := typ.Underlying().(*types.Struct)
+ if !ok {
+ return nil, fmt.Errorf("%s is not a (pointer to) struct type",
+ types.TypeString(typ, types.RelativeTo(pkg)))
+ }
+ // Inv: typ is the the possibly-named struct type.
+
+ fieldCount := tStruct.NumFields()
+
+ // Check which types have already been filled in. (we only want to fill in
+ // the unfilled types, or else we'll blat user-supplied details)
+ prefilledFields := map[string]ast.Expr{}
+ for _, e := range expr.Elts {
+ if kv, ok := e.(*ast.KeyValueExpr); ok {
+ if key, ok := kv.Key.(*ast.Ident); ok {
+ prefilledFields[key.Name] = kv.Value
+ }
+ }
+ }
+
+ // Use a new fileset to build up a token.File for the new composite
+ // literal. We need one line for foo{, one line for }, and one line for
+ // each field we're going to set. format.Node only cares about line
+ // numbers, so we don't need to set columns, and each line can be
+ // 1 byte long.
+ // TODO(adonovan): why is this necessary? The position information
+ // is going to be wrong for the existing trees in prefilledFields.
+ // Can't the formatter just do its best with an empty fileset?
+ fakeFset := token.NewFileSet()
+ tok := fakeFset.AddFile("", -1, fieldCount+2)
+
+ line := 2 // account for 1-based lines and the left brace
+ var fieldTyps []types.Type
+ for i := 0; i < fieldCount; i++ {
+ field := tStruct.Field(i)
+ // Ignore fields that are not accessible in the current package.
+ if field.Pkg() != nil && field.Pkg() != pkg && !field.Exported() {
+ fieldTyps = append(fieldTyps, nil)
+ continue
+ }
+ fieldTyps = append(fieldTyps, field.Type())
+ }
+ matches := analysisinternal.MatchingIdents(fieldTyps, file, start, info, pkg)
+ var elts []ast.Expr
+ for i, fieldTyp := range fieldTyps {
+ if fieldTyp == nil {
+ continue // TODO(adonovan): is this reachable?
+ }
+ fieldName := tStruct.Field(i).Name()
+
+ tok.AddLine(line - 1) // add 1 byte per line
+ if line > tok.LineCount() {
+ panic(fmt.Sprintf("invalid line number %v (of %v) for fillstruct", line, tok.LineCount()))
+ }
+ pos := tok.LineStart(line)
+
+ kv := &ast.KeyValueExpr{
+ Key: &ast.Ident{
+ NamePos: pos,
+ Name: fieldName,
+ },
+ Colon: pos,
+ }
+ if expr, ok := prefilledFields[fieldName]; ok {
+ kv.Value = expr
+ } else {
+ names, ok := matches[fieldTyp]
+ if !ok {
+ return nil, fmt.Errorf("invalid struct field type: %v", fieldTyp)
+ }
+
+ // Find the name most similar to the field name.
+ // If no name matches the pattern, generate a zero value.
+ // NOTE: We currently match on the name of the field key rather than the field type.
+ if best := fuzzy.BestMatch(fieldName, names); best != "" {
+ kv.Value = ast.NewIdent(best)
+ } else if v := populateValue(file, pkg, fieldTyp); v != nil {
+ kv.Value = v
+ } else {
+ return nil, nil
+ }
+ }
+ elts = append(elts, kv)
+ line++
+ }
+
+ // If all of the struct's fields are unexported, we have nothing to do.
+ if len(elts) == 0 {
+ return nil, fmt.Errorf("no elements to fill")
+ }
+
+ // Add the final line for the right brace. Offset is the number of
+ // bytes already added plus 1.
+ tok.AddLine(len(elts) + 1)
+ line = len(elts) + 2
+ if line > tok.LineCount() {
+ panic(fmt.Sprintf("invalid line number %v (of %v) for fillstruct", line, tok.LineCount()))
+ }
+
+ cl := &ast.CompositeLit{
+ Type: expr.Type,
+ Lbrace: tok.LineStart(1),
+ Elts: elts,
+ Rbrace: tok.LineStart(line),
+ }
+
+ // Find the line on which the composite literal is declared.
+ split := bytes.Split(content, []byte("\n"))
+ lineNumber := safetoken.StartPosition(fset, expr.Lbrace).Line
+ firstLine := split[lineNumber-1] // lines are 1-indexed
+
+ // Trim the whitespace from the left of the line, and use the index
+ // to get the amount of whitespace on the left.
+ trimmed := bytes.TrimLeftFunc(firstLine, unicode.IsSpace)
+ index := bytes.Index(firstLine, trimmed)
+ whitespace := firstLine[:index]
+
+ // First pass through the formatter: turn the expr into a string.
+ var formatBuf bytes.Buffer
+ if err := format.Node(&formatBuf, fakeFset, cl); err != nil {
+ return nil, fmt.Errorf("failed to run first format on:\n%s\ngot err: %v", cl.Type, err)
+ }
+ sug := indent(formatBuf.Bytes(), whitespace)
+
+ if len(prefilledFields) > 0 {
+ // Attempt a second pass through the formatter to line up columns.
+ sourced, err := format.Source(sug)
+ if err == nil {
+ sug = indent(sourced, whitespace)
+ }
+ }
+
+ return &analysis.SuggestedFix{
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: expr.Pos(),
+ End: expr.End(),
+ NewText: sug,
+ },
+ },
+ }, nil
+}
+
+// indent works line by line through str, indenting (prefixing) each line with
+// ind.
+func indent(str, ind []byte) []byte {
+ split := bytes.Split(str, []byte("\n"))
+ newText := bytes.NewBuffer(nil)
+ for i, s := range split {
+ if len(s) == 0 {
+ continue
+ }
+ // Don't add the extra indentation to the first line.
+ if i != 0 {
+ newText.Write(ind)
+ }
+ newText.Write(s)
+ if i < len(split)-1 {
+ newText.WriteByte('\n')
+ }
+ }
+ return newText.Bytes()
+}
+
+// populateValue constructs an expression to fill the value of a struct field.
+//
+// When the type of a struct field is a basic literal or interface, we return
+// default values. For other types, such as maps, slices, and channels, we create
+// empty expressions such as []T{} or make(chan T) rather than using default values.
+//
+// The reasoning here is that users will call fillstruct with the intention of
+// initializing the struct, in which case setting these fields to nil has no effect.
+func populateValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+ switch u := typ.Underlying().(type) {
+ case *types.Basic:
+ switch {
+ case u.Info()&types.IsNumeric != 0:
+ return &ast.BasicLit{Kind: token.INT, Value: "0"}
+ case u.Info()&types.IsBoolean != 0:
+ return &ast.Ident{Name: "false"}
+ case u.Info()&types.IsString != 0:
+ return &ast.BasicLit{Kind: token.STRING, Value: `""`}
+ case u.Kind() == types.UnsafePointer:
+ return ast.NewIdent("nil")
+ default:
+ panic("unknown basic type")
+ }
+
+ case *types.Map:
+ k := analysisinternal.TypeExpr(f, pkg, u.Key())
+ v := analysisinternal.TypeExpr(f, pkg, u.Elem())
+ if k == nil || v == nil {
+ return nil
+ }
+ return &ast.CompositeLit{
+ Type: &ast.MapType{
+ Key: k,
+ Value: v,
+ },
+ }
+ case *types.Slice:
+ s := analysisinternal.TypeExpr(f, pkg, u.Elem())
+ if s == nil {
+ return nil
+ }
+ return &ast.CompositeLit{
+ Type: &ast.ArrayType{
+ Elt: s,
+ },
+ }
+
+ case *types.Array:
+ a := analysisinternal.TypeExpr(f, pkg, u.Elem())
+ if a == nil {
+ return nil
+ }
+ return &ast.CompositeLit{
+ Type: &ast.ArrayType{
+ Elt: a,
+ Len: &ast.BasicLit{
+ Kind: token.INT, Value: fmt.Sprintf("%v", u.Len()),
+ },
+ },
+ }
+
+ case *types.Chan:
+ v := analysisinternal.TypeExpr(f, pkg, u.Elem())
+ if v == nil {
+ return nil
+ }
+ dir := ast.ChanDir(u.Dir())
+ if u.Dir() == types.SendRecv {
+ dir = ast.SEND | ast.RECV
+ }
+ return &ast.CallExpr{
+ Fun: ast.NewIdent("make"),
+ Args: []ast.Expr{
+ &ast.ChanType{
+ Dir: dir,
+ Value: v,
+ },
+ },
+ }
+
+ case *types.Struct:
+ s := analysisinternal.TypeExpr(f, pkg, typ)
+ if s == nil {
+ return nil
+ }
+ return &ast.CompositeLit{
+ Type: s,
+ }
+
+ case *types.Signature:
+ var params []*ast.Field
+ for i := 0; i < u.Params().Len(); i++ {
+ p := analysisinternal.TypeExpr(f, pkg, u.Params().At(i).Type())
+ if p == nil {
+ return nil
+ }
+ params = append(params, &ast.Field{
+ Type: p,
+ Names: []*ast.Ident{
+ {
+ Name: u.Params().At(i).Name(),
+ },
+ },
+ })
+ }
+ var returns []*ast.Field
+ for i := 0; i < u.Results().Len(); i++ {
+ r := analysisinternal.TypeExpr(f, pkg, u.Results().At(i).Type())
+ if r == nil {
+ return nil
+ }
+ returns = append(returns, &ast.Field{
+ Type: r,
+ })
+ }
+ return &ast.FuncLit{
+ Type: &ast.FuncType{
+ Params: &ast.FieldList{
+ List: params,
+ },
+ Results: &ast.FieldList{
+ List: returns,
+ },
+ },
+ Body: &ast.BlockStmt{},
+ }
+
+ case *types.Pointer:
+ switch u.Elem().(type) {
+ case *types.Basic:
+ return &ast.CallExpr{
+ Fun: &ast.Ident{
+ Name: "new",
+ },
+ Args: []ast.Expr{
+ &ast.Ident{
+ Name: u.Elem().String(),
+ },
+ },
+ }
+ default:
+ return &ast.UnaryExpr{
+ Op: token.AND,
+ X: populateValue(f, pkg, u.Elem()),
+ }
+ }
+
+ case *types.Interface:
+ if param, ok := typ.(*typeparams.TypeParam); ok {
+ // *new(T) is the zero value of a type parameter T.
+ // TODO(adonovan): one could give a more specific zero
+ // value if the type has a core type that is, say,
+ // always a number or a pointer. See go/ssa for details.
+ return &ast.StarExpr{
+ X: &ast.CallExpr{
+ Fun: ast.NewIdent("new"),
+ Args: []ast.Expr{
+ ast.NewIdent(param.Obj().Name()),
+ },
+ },
+ }
+ }
+
+ return ast.NewIdent("nil")
+ }
+ return nil
+}
+
+func deref(t types.Type) types.Type {
+ for {
+ ptr, ok := t.Underlying().(*types.Pointer)
+ if !ok {
+ return t
+ }
+ t = ptr.Elem()
+ }
+}
diff --git a/gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go b/gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go
new file mode 100644
index 000000000..66642b7ab
--- /dev/null
+++ b/gopls/internal/lsp/analysis/fillstruct/fillstruct_test.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fillstruct_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/fillstruct"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ tests := []string{"a"}
+ if typeparams.Enabled {
+ tests = append(tests, "typeparams")
+ }
+ analysistest.Run(t, testdata, fillstruct.Analyzer, tests...)
+}
diff --git a/gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go
new file mode 100644
index 000000000..9ee3860fc
--- /dev/null
+++ b/gopls/internal/lsp/analysis/fillstruct/testdata/src/a/a.go
@@ -0,0 +1,113 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fillstruct
+
+import (
+ data "b"
+ "go/ast"
+ "go/token"
+ "unsafe"
+)
+
+type emptyStruct struct{}
+
+var _ = emptyStruct{}
+
+type basicStruct struct {
+ foo int
+}
+
+var _ = basicStruct{} // want `Fill basicStruct`
+
+type twoArgStruct struct {
+ foo int
+ bar string
+}
+
+var _ = twoArgStruct{} // want `Fill twoArgStruct`
+
+var _ = twoArgStruct{ // want `Fill twoArgStruct`
+ bar: "bar",
+}
+
+type nestedStruct struct {
+ bar string
+ basic basicStruct
+}
+
+var _ = nestedStruct{} // want `Fill nestedStruct`
+
+var _ = data.B{} // want `Fill b.B`
+
+type typedStruct struct {
+ m map[string]int
+ s []int
+ c chan int
+ c1 <-chan int
+ a [2]string
+}
+
+var _ = typedStruct{} // want `Fill typedStruct`
+
+type funStruct struct {
+ fn func(i int) int
+}
+
+var _ = funStruct{} // want `Fill funStruct`
+
+type funStructComplex struct {
+ fn func(i int, s string) (string, int)
+}
+
+var _ = funStructComplex{} // want `Fill funStructComplex`
+
+type funStructEmpty struct {
+ fn func()
+}
+
+var _ = funStructEmpty{} // want `Fill funStructEmpty`
+
+type Foo struct {
+ A int
+}
+
+type Bar struct {
+ X *Foo
+ Y *Foo
+}
+
+var _ = Bar{} // want `Fill Bar`
+
+type importedStruct struct {
+ m map[*ast.CompositeLit]ast.Field
+ s []ast.BadExpr
+ a [3]token.Token
+ c chan ast.EmptyStmt
+ fn func(ast_decl ast.DeclStmt) ast.Ellipsis
+ st ast.CompositeLit
+}
+
+var _ = importedStruct{} // want `Fill importedStruct`
+
+type pointerBuiltinStruct struct {
+ b *bool
+ s *string
+ i *int
+}
+
+var _ = pointerBuiltinStruct{} // want `Fill pointerBuiltinStruct`
+
+var _ = []ast.BasicLit{
+ {}, // want `Fill go/ast.BasicLit`
+}
+
+var _ = []ast.BasicLit{{}, // want "go/ast.BasicLit"
+}
+
+type unsafeStruct struct {
+ foo unsafe.Pointer
+}
+
+var _ = unsafeStruct{} // want `Fill unsafeStruct`
diff --git a/internal/lsp/analysis/fillstruct/testdata/src/b/b.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/b/b.go
index a4b394605..a4b394605 100644
--- a/internal/lsp/analysis/fillstruct/testdata/src/b/b.go
+++ b/gopls/internal/lsp/analysis/fillstruct/testdata/src/b/b.go
diff --git a/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
new file mode 100644
index 000000000..46bb8ae40
--- /dev/null
+++ b/gopls/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
@@ -0,0 +1,50 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fillstruct
+
+type emptyStruct[A any] struct{}
+
+var _ = emptyStruct[int]{}
+
+type basicStruct[T any] struct {
+ foo T
+}
+
+var _ = basicStruct[int]{} // want `Fill basicStruct\[int\]`
+
+type twoArgStruct[F, B any] struct {
+ foo F
+ bar B
+}
+
+var _ = twoArgStruct[string, int]{} // want `Fill twoArgStruct\[string, int\]`
+
+var _ = twoArgStruct[int, string]{ // want `Fill twoArgStruct\[int, string\]`
+ bar: "bar",
+}
+
+type nestedStruct struct {
+ bar string
+ basic basicStruct[int]
+}
+
+var _ = nestedStruct{} // want "Fill nestedStruct"
+
+func _[T any]() {
+ type S struct{ t T }
+ x := S{} // want "Fill S"
+ _ = x
+}
+
+func Test() {
+ var tests = []struct {
+ a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p string
+ }{
+ {}, // want "Fill anonymous struct { a: string, b: string, c: string, ... }"
+ }
+ for _, test := range tests {
+ _ = test
+ }
+}
diff --git a/internal/lsp/analysis/infertypeargs/infertypeargs.go b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs.go
index 119de50ce..119de50ce 100644
--- a/internal/lsp/analysis/infertypeargs/infertypeargs.go
+++ b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs.go
diff --git a/gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go
new file mode 100644
index 000000000..70855e1ab
--- /dev/null
+++ b/gopls/internal/lsp/analysis/infertypeargs/infertypeargs_test.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package infertypeargs_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/infertypeargs"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("type params are not enabled")
+ }
+ testdata := analysistest.TestData()
+ analysistest.RunWithSuggestedFixes(t, testdata, infertypeargs.Analyzer, "a")
+}
diff --git a/internal/lsp/analysis/infertypeargs/run_go117.go b/gopls/internal/lsp/analysis/infertypeargs/run_go117.go
index bc5c29b51..bc5c29b51 100644
--- a/internal/lsp/analysis/infertypeargs/run_go117.go
+++ b/gopls/internal/lsp/analysis/infertypeargs/run_go117.go
diff --git a/internal/lsp/analysis/infertypeargs/run_go118.go b/gopls/internal/lsp/analysis/infertypeargs/run_go118.go
index 66457429a..66457429a 100644
--- a/internal/lsp/analysis/infertypeargs/run_go118.go
+++ b/gopls/internal/lsp/analysis/infertypeargs/run_go118.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go
index 1c3d88ba1..1c3d88ba1 100644
--- a/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go
+++ b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden
index 72348ff77..72348ff77 100644
--- a/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden
+++ b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/basic.go.golden
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go
index fc1f763df..fc1f763df 100644
--- a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go
+++ b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden
index 6099545bb..6099545bb 100644
--- a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden
+++ b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported.go.golden
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go
index f0610a8b4..f0610a8b4 100644
--- a/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go
+++ b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/imported/imported.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go
index c304f1d0d..c304f1d0d 100644
--- a/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go
+++ b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go
diff --git a/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden
index 93c6f707c..93c6f707c 100644
--- a/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden
+++ b/gopls/internal/lsp/analysis/infertypeargs/testdata/src/a/notypechange.go.golden
diff --git a/gopls/internal/lsp/analysis/nonewvars/nonewvars.go b/gopls/internal/lsp/analysis/nonewvars/nonewvars.go
new file mode 100644
index 000000000..6937b36d1
--- /dev/null
+++ b/gopls/internal/lsp/analysis/nonewvars/nonewvars.go
@@ -0,0 +1,95 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package nonewvars defines an Analyzer that applies suggested fixes
+// to errors of the type "no new variables on left side of :=".
+package nonewvars
+
+import (
+ "bytes"
+ "go/ast"
+ "go/format"
+ "go/token"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/analysisinternal"
+)
+
+const Doc = `suggested fixes for "no new vars on left side of :="
+
+This checker provides suggested fixes for type errors of the
+type "no new vars on left side of :=". For example:
+ z := 1
+ z := 2
+will turn into
+ z := 1
+ z = 2
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "nonewvars",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: run,
+ RunDespiteErrors: true,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if len(pass.TypeErrors) == 0 {
+ return nil, nil
+ }
+
+ nodeFilter := []ast.Node{(*ast.AssignStmt)(nil)}
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ assignStmt, _ := n.(*ast.AssignStmt)
+ // We only care about ":=".
+ if assignStmt.Tok != token.DEFINE {
+ return
+ }
+
+ var file *ast.File
+ for _, f := range pass.Files {
+ if f.Pos() <= assignStmt.Pos() && assignStmt.Pos() < f.End() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ return
+ }
+
+ for _, err := range pass.TypeErrors {
+ if !FixesError(err.Msg) {
+ continue
+ }
+ if assignStmt.Pos() > err.Pos || err.Pos >= assignStmt.End() {
+ continue
+ }
+ var buf bytes.Buffer
+ if err := format.Node(&buf, pass.Fset, file); err != nil {
+ continue
+ }
+ pass.Report(analysis.Diagnostic{
+ Pos: err.Pos,
+ End: analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), err.Pos),
+ Message: err.Msg,
+ SuggestedFixes: []analysis.SuggestedFix{{
+ Message: "Change ':=' to '='",
+ TextEdits: []analysis.TextEdit{{
+ Pos: err.Pos,
+ End: err.Pos + 1,
+ }},
+ }},
+ })
+ }
+ })
+ return nil, nil
+}
+
+func FixesError(msg string) bool {
+ return msg == "no new variables on left side of :="
+}
diff --git a/gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go b/gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go
new file mode 100644
index 000000000..8f6f0a51f
--- /dev/null
+++ b/gopls/internal/lsp/analysis/nonewvars/nonewvars_test.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nonewvars_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/nonewvars"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ tests := []string{"a"}
+ if typeparams.Enabled {
+ tests = append(tests, "typeparams")
+ }
+ analysistest.RunWithSuggestedFixes(t, testdata, nonewvars.Analyzer, tests...)
+}
diff --git a/internal/lsp/analysis/nonewvars/testdata/src/a/a.go b/gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go
index 97d8fcde1..97d8fcde1 100644
--- a/internal/lsp/analysis/nonewvars/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden
index 17197e564..17197e564 100644
--- a/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden
+++ b/gopls/internal/lsp/analysis/nonewvars/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go b/gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go
index b381c9c09..b381c9c09 100644
--- a/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go
+++ b/gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go
diff --git a/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden b/gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden
index 3a5117301..3a5117301 100644
--- a/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden
+++ b/gopls/internal/lsp/analysis/nonewvars/testdata/src/typeparams/a.go.golden
diff --git a/gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go
new file mode 100644
index 000000000..41952a547
--- /dev/null
+++ b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues.go
@@ -0,0 +1,92 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package noresultvalues defines an Analyzer that applies suggested fixes
+// to errors of the type "no result values expected".
+package noresultvalues
+
+import (
+ "bytes"
+ "go/ast"
+ "go/format"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/analysisinternal"
+)
+
+const Doc = `suggested fixes for unexpected return values
+
+This checker provides suggested fixes for type errors of the
+type "no result values expected" or "too many return values".
+For example:
+ func z() { return nil }
+will turn into
+ func z() { return }
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "noresultvalues",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: run,
+ RunDespiteErrors: true,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ if len(pass.TypeErrors) == 0 {
+ return nil, nil
+ }
+
+ nodeFilter := []ast.Node{(*ast.ReturnStmt)(nil)}
+ inspect.Preorder(nodeFilter, func(n ast.Node) {
+ retStmt, _ := n.(*ast.ReturnStmt)
+
+ var file *ast.File
+ for _, f := range pass.Files {
+ if f.Pos() <= retStmt.Pos() && retStmt.Pos() < f.End() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ return
+ }
+
+ for _, err := range pass.TypeErrors {
+ if !FixesError(err.Msg) {
+ continue
+ }
+ if retStmt.Pos() >= err.Pos || err.Pos >= retStmt.End() {
+ continue
+ }
+ var buf bytes.Buffer
+ if err := format.Node(&buf, pass.Fset, file); err != nil {
+ continue
+ }
+ pass.Report(analysis.Diagnostic{
+ Pos: err.Pos,
+ End: analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), err.Pos),
+ Message: err.Msg,
+ SuggestedFixes: []analysis.SuggestedFix{{
+ Message: "Delete return values",
+ TextEdits: []analysis.TextEdit{{
+ Pos: retStmt.Pos(),
+ End: retStmt.End(),
+ NewText: []byte("return"),
+ }},
+ }},
+ })
+ }
+ })
+ return nil, nil
+}
+
+func FixesError(msg string) bool {
+ return msg == "no result values expected" ||
+ strings.HasPrefix(msg, "too many return values") && strings.Contains(msg, "want ()")
+}
diff --git a/gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go
new file mode 100644
index 000000000..24ce39207
--- /dev/null
+++ b/gopls/internal/lsp/analysis/noresultvalues/noresultvalues_test.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noresultvalues_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/noresultvalues"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ tests := []string{"a"}
+ if typeparams.Enabled {
+ tests = append(tests, "typeparams")
+ }
+ analysistest.RunWithSuggestedFixes(t, testdata, noresultvalues.Analyzer, tests...)
+}
diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go
index 3daa7f7c7..3daa7f7c7 100644
--- a/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden
index 5e93aa413..5e93aa413 100644
--- a/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden
+++ b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go
index f8aa43665..f8aa43665 100644
--- a/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go
+++ b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go
diff --git a/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden
index 963e3f4e1..963e3f4e1 100644
--- a/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden
+++ b/gopls/internal/lsp/analysis/noresultvalues/testdata/src/typeparams/a.go.golden
diff --git a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go
index c91fc7577..c91fc7577 100644
--- a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go
+++ b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit.go
diff --git a/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
new file mode 100644
index 000000000..b0365a6b3
--- /dev/null
+++ b/gopls/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package simplifycompositelit_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifycompositelit"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ analysistest.RunWithSuggestedFixes(t, testdata, simplifycompositelit.Analyzer, "a")
+}
diff --git a/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go
index 14e0fa3ae..14e0fa3ae 100644
--- a/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden
index 6bfed45a5..6bfed45a5 100644
--- a/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden
+++ b/gopls/internal/lsp/analysis/simplifycompositelit/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/simplifyrange/simplifyrange.go b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange.go
index c9cb38798..c9cb38798 100644
--- a/internal/lsp/analysis/simplifyrange/simplifyrange.go
+++ b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange.go
diff --git a/gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go
new file mode 100644
index 000000000..fbd57ec2d
--- /dev/null
+++ b/gopls/internal/lsp/analysis/simplifyrange/simplifyrange_test.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package simplifyrange_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyrange"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ analysistest.RunWithSuggestedFixes(t, testdata, simplifyrange.Analyzer, "a")
+}
diff --git a/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go
index 49face1e9..49face1e9 100644
--- a/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden
index ec8490ab3..ec8490ab3 100644
--- a/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden
+++ b/gopls/internal/lsp/analysis/simplifyrange/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/simplifyslice/simplifyslice.go b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice.go
index da1728e6f..da1728e6f 100644
--- a/internal/lsp/analysis/simplifyslice/simplifyslice.go
+++ b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice.go
diff --git a/gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go
new file mode 100644
index 000000000..41914ba31
--- /dev/null
+++ b/gopls/internal/lsp/analysis/simplifyslice/simplifyslice_test.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package simplifyslice_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyslice"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ tests := []string{"a"}
+ if typeparams.Enabled {
+ tests = append(tests, "typeparams")
+ }
+ analysistest.RunWithSuggestedFixes(t, testdata, simplifyslice.Analyzer, tests...)
+}
diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go
index 20792105d..20792105d 100644
--- a/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden
index 45c791421..45c791421 100644
--- a/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden
+++ b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go
index 69db3100a..69db3100a 100644
--- a/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go
+++ b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go
diff --git a/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden
index 99ca9e447..99ca9e447 100644
--- a/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden
+++ b/gopls/internal/lsp/analysis/simplifyslice/testdata/src/typeparams/typeparams.go.golden
diff --git a/gopls/internal/lsp/analysis/stubmethods/stubmethods.go b/gopls/internal/lsp/analysis/stubmethods/stubmethods.go
new file mode 100644
index 000000000..e0d2c692c
--- /dev/null
+++ b/gopls/internal/lsp/analysis/stubmethods/stubmethods.go
@@ -0,0 +1,418 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stubmethods
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+const Doc = `stub methods analyzer
+
+This analyzer generates method stubs for concrete types
+in order to implement a target interface`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "stubmethods",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Run: run,
+ RunDespiteErrors: true,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ for _, err := range pass.TypeErrors {
+ ifaceErr := strings.Contains(err.Msg, "missing method") || strings.HasPrefix(err.Msg, "cannot convert")
+ if !ifaceErr {
+ continue
+ }
+ var file *ast.File
+ for _, f := range pass.Files {
+ if f.Pos() <= err.Pos && err.Pos < f.End() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ continue
+ }
+ // Get the end position of the error.
+ _, _, endPos, ok := typesinternal.ReadGo116ErrorData(err)
+ if !ok {
+ var buf bytes.Buffer
+ if err := format.Node(&buf, pass.Fset, file); err != nil {
+ continue
+ }
+ endPos = analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), err.Pos)
+ }
+ path, _ := astutil.PathEnclosingInterval(file, err.Pos, endPos)
+ si := GetStubInfo(pass.Fset, pass.TypesInfo, path, err.Pos)
+ if si == nil {
+ continue
+ }
+ qf := RelativeToFiles(si.Concrete.Obj().Pkg(), file, nil, nil)
+ pass.Report(analysis.Diagnostic{
+ Pos: err.Pos,
+ End: endPos,
+ Message: fmt.Sprintf("Implement %s", types.TypeString(si.Interface.Type(), qf)),
+ })
+ }
+ return nil, nil
+}
+
+// StubInfo represents a concrete type
+// that wants to stub out an interface type
+type StubInfo struct {
+ // Interface is the interface that the client wants to implement.
+ // When the interface is defined, the underlying object will be a TypeName.
+ // Note that we keep track of types.Object instead of types.Type in order
+ // to keep a reference to the declaring object's package and the ast file
+ // in the case where the concrete type file requires a new import that happens to be renamed
+ // in the interface file.
+ // TODO(marwan-at-work): implement interface literals.
+ Fset *token.FileSet // the FileSet used to type-check the types below
+ Interface *types.TypeName
+ Concrete *types.Named
+ Pointer bool
+}
+
+// GetStubInfo determines whether the "missing method error"
+// can be used to deduced what the concrete and interface types are.
+//
+// TODO(adonovan): this function (and its following 5 helpers) tries
+// to deduce a pair of (concrete, interface) types that are related by
+// an assignment, either explictly or through a return statement or
+// function call. This is essentially what the refactor/satisfy does,
+// more generally. Refactor to share logic, after auditing 'satisfy'
+// for safety on ill-typed code.
+func GetStubInfo(fset *token.FileSet, ti *types.Info, path []ast.Node, pos token.Pos) *StubInfo {
+ for _, n := range path {
+ switch n := n.(type) {
+ case *ast.ValueSpec:
+ return fromValueSpec(fset, ti, n, pos)
+ case *ast.ReturnStmt:
+ // An error here may not indicate a real error the user should know about, but it may.
+ // Therefore, it would be best to log it out for debugging/reporting purposes instead of ignoring
+ // it. However, event.Log takes a context which is not passed via the analysis package.
+ // TODO(marwan-at-work): properly log this error.
+ si, _ := fromReturnStmt(fset, ti, pos, path, n)
+ return si
+ case *ast.AssignStmt:
+ return fromAssignStmt(fset, ti, n, pos)
+ case *ast.CallExpr:
+ // Note that some call expressions don't carry the interface type
+ // because they don't point to a function or method declaration elsewhere.
+ // For eaxmple, "var Interface = (*Concrete)(nil)". In that case, continue
+ // this loop to encounter other possibilities such as *ast.ValueSpec or others.
+ si := fromCallExpr(fset, ti, pos, n)
+ if si != nil {
+ return si
+ }
+ }
+ }
+ return nil
+}
+
+// fromCallExpr tries to find an *ast.CallExpr's function declaration and
+// analyzes a function call's signature against the passed in parameter to deduce
+// the concrete and interface types.
+func fromCallExpr(fset *token.FileSet, ti *types.Info, pos token.Pos, ce *ast.CallExpr) *StubInfo {
+ paramIdx := -1
+ for i, p := range ce.Args {
+ if pos >= p.Pos() && pos <= p.End() {
+ paramIdx = i
+ break
+ }
+ }
+ if paramIdx == -1 {
+ return nil
+ }
+ p := ce.Args[paramIdx]
+ concObj, pointer := concreteType(p, ti)
+ if concObj == nil || concObj.Obj().Pkg() == nil {
+ return nil
+ }
+ tv, ok := ti.Types[ce.Fun]
+ if !ok {
+ return nil
+ }
+ sig, ok := tv.Type.(*types.Signature)
+ if !ok {
+ return nil
+ }
+ sigVar := sig.Params().At(paramIdx)
+ iface := ifaceObjFromType(sigVar.Type())
+ if iface == nil {
+ return nil
+ }
+ return &StubInfo{
+ Fset: fset,
+ Concrete: concObj,
+ Pointer: pointer,
+ Interface: iface,
+ }
+}
+
+// fromReturnStmt analyzes a "return" statement to extract
+// a concrete type that is trying to be returned as an interface type.
+//
+// For example, func() io.Writer { return myType{} }
+// would return StubInfo with the interface being io.Writer and the concrete type being myType{}.
+func fromReturnStmt(fset *token.FileSet, ti *types.Info, pos token.Pos, path []ast.Node, rs *ast.ReturnStmt) (*StubInfo, error) {
+ returnIdx := -1
+ for i, r := range rs.Results {
+ if pos >= r.Pos() && pos <= r.End() {
+ returnIdx = i
+ }
+ }
+ if returnIdx == -1 {
+ return nil, fmt.Errorf("pos %d not within return statement bounds: [%d-%d]", pos, rs.Pos(), rs.End())
+ }
+ concObj, pointer := concreteType(rs.Results[returnIdx], ti)
+ if concObj == nil || concObj.Obj().Pkg() == nil {
+ return nil, nil
+ }
+ ef := enclosingFunction(path, ti)
+ if ef == nil {
+ return nil, fmt.Errorf("could not find the enclosing function of the return statement")
+ }
+ iface := ifaceType(ef.Results.List[returnIdx].Type, ti)
+ if iface == nil {
+ return nil, nil
+ }
+ return &StubInfo{
+ Fset: fset,
+ Concrete: concObj,
+ Pointer: pointer,
+ Interface: iface,
+ }, nil
+}
+
+// fromValueSpec returns *StubInfo from a variable declaration such as
+// var x io.Writer = &T{}
+func fromValueSpec(fset *token.FileSet, ti *types.Info, vs *ast.ValueSpec, pos token.Pos) *StubInfo {
+ var idx int
+ for i, vs := range vs.Values {
+ if pos >= vs.Pos() && pos <= vs.End() {
+ idx = i
+ break
+ }
+ }
+
+ valueNode := vs.Values[idx]
+ ifaceNode := vs.Type
+ callExp, ok := valueNode.(*ast.CallExpr)
+ // if the ValueSpec is `var _ = myInterface(...)`
+ // as opposed to `var _ myInterface = ...`
+ if ifaceNode == nil && ok && len(callExp.Args) == 1 {
+ ifaceNode = callExp.Fun
+ valueNode = callExp.Args[0]
+ }
+ concObj, pointer := concreteType(valueNode, ti)
+ if concObj == nil || concObj.Obj().Pkg() == nil {
+ return nil
+ }
+ ifaceObj := ifaceType(ifaceNode, ti)
+ if ifaceObj == nil {
+ return nil
+ }
+ return &StubInfo{
+ Fset: fset,
+ Concrete: concObj,
+ Interface: ifaceObj,
+ Pointer: pointer,
+ }
+}
+
+// fromAssignStmt returns *StubInfo from a variable re-assignment such as
+// var x io.Writer
+// x = &T{}
+func fromAssignStmt(fset *token.FileSet, ti *types.Info, as *ast.AssignStmt, pos token.Pos) *StubInfo {
+ idx := -1
+ var lhs, rhs ast.Expr
+ // Given a re-assignment interface conversion error,
+ // the compiler error shows up on the right hand side of the expression.
+ // For example, x = &T{} where x is io.Writer highlights the error
+ // under "&T{}" and not "x".
+ for i, hs := range as.Rhs {
+ if pos >= hs.Pos() && pos <= hs.End() {
+ idx = i
+ break
+ }
+ }
+ if idx == -1 {
+ return nil
+ }
+ // Technically, this should never happen as
+ // we would get a "cannot assign N values to M variables"
+ // before we get an interface conversion error. Nonetheless,
+ // guard against out of range index errors.
+ if idx >= len(as.Lhs) {
+ return nil
+ }
+ lhs, rhs = as.Lhs[idx], as.Rhs[idx]
+ ifaceObj := ifaceType(lhs, ti)
+ if ifaceObj == nil {
+ return nil
+ }
+ concType, pointer := concreteType(rhs, ti)
+ if concType == nil || concType.Obj().Pkg() == nil {
+ return nil
+ }
+ return &StubInfo{
+ Fset: fset,
+ Concrete: concType,
+ Interface: ifaceObj,
+ Pointer: pointer,
+ }
+}
+
+// RelativeToFiles returns a types.Qualifier that formats package
+// names according to the import environments of the files that define
+// the concrete type and the interface type. (Only the imports of the
+// latter file are provided.)
+//
+// This is similar to types.RelativeTo except if a file imports the package with a different name,
+// then it will use it. And if the file does import the package but it is ignored,
+// then it will return the original name. It also prefers package names in importEnv in case
+// an import is missing from concFile but is present among importEnv.
+//
+// Additionally, if missingImport is not nil, the function will be called whenever the concFile
+// is presented with a package that is not imported. This is useful so that as types.TypeString is
+// formatting a function signature, it is identifying packages that will need to be imported when
+// stubbing an interface.
+//
+// TODO(rfindley): investigate if this can be merged with source.Qualifier.
+func RelativeToFiles(concPkg *types.Package, concFile *ast.File, ifaceImports []*ast.ImportSpec, missingImport func(name, path string)) types.Qualifier {
+ return func(other *types.Package) string {
+ if other == concPkg {
+ return ""
+ }
+
+ // Check if the concrete file already has the given import,
+ // if so return the default package name or the renamed import statement.
+ for _, imp := range concFile.Imports {
+ impPath, _ := strconv.Unquote(imp.Path.Value)
+ isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_")
+ // TODO(adonovan): this comparison disregards a vendor prefix in 'other'.
+ if impPath == other.Path() && !isIgnored {
+ importName := other.Name()
+ if imp.Name != nil {
+ importName = imp.Name.Name
+ }
+ return importName
+ }
+ }
+
+ // If the concrete file does not have the import, check if the package
+ // is renamed in the interface file and prefer that.
+ var importName string
+ for _, imp := range ifaceImports {
+ impPath, _ := strconv.Unquote(imp.Path.Value)
+ isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_")
+ // TODO(adonovan): this comparison disregards a vendor prefix in 'other'.
+ if impPath == other.Path() && !isIgnored {
+ if imp.Name != nil && imp.Name.Name != concPkg.Name() {
+ importName = imp.Name.Name
+ }
+ break
+ }
+ }
+
+ if missingImport != nil {
+ missingImport(importName, other.Path())
+ }
+
+ // Up until this point, importName must stay empty when calling missingImport,
+ // otherwise we'd end up with `import time "time"` which doesn't look idiomatic.
+ if importName == "" {
+ importName = other.Name()
+ }
+ return importName
+ }
+}
+
+// ifaceType will try to extract the types.Object that defines
+// the interface given the ast.Expr where the "missing method"
+// or "conversion" errors happen.
+func ifaceType(n ast.Expr, ti *types.Info) *types.TypeName {
+ tv, ok := ti.Types[n]
+ if !ok {
+ return nil
+ }
+ return ifaceObjFromType(tv.Type)
+}
+
+func ifaceObjFromType(t types.Type) *types.TypeName {
+ named, ok := t.(*types.Named)
+ if !ok {
+ return nil
+ }
+ _, ok = named.Underlying().(*types.Interface)
+ if !ok {
+ return nil
+ }
+ // Interfaces defined in the "builtin" package return nil a Pkg().
+ // But they are still real interfaces that we need to make a special case for.
+ // Therefore, protect gopls from panicking if a new interface type was added in the future.
+ if named.Obj().Pkg() == nil && named.Obj().Name() != "error" {
+ return nil
+ }
+ return named.Obj()
+}
+
+// concreteType tries to extract the *types.Named that defines
+// the concrete type given the ast.Expr where the "missing method"
+// or "conversion" errors happened. If the concrete type is something
+// that cannot have methods defined on it (such as basic types), this
+// method will return a nil *types.Named. The second return parameter
+// is a boolean that indicates whether the concreteType was defined as a
+// pointer or value.
+func concreteType(n ast.Expr, ti *types.Info) (*types.Named, bool) {
+ tv, ok := ti.Types[n]
+ if !ok {
+ return nil, false
+ }
+ typ := tv.Type
+ ptr, isPtr := typ.(*types.Pointer)
+ if isPtr {
+ typ = ptr.Elem()
+ }
+ named, ok := typ.(*types.Named)
+ if !ok {
+ return nil, false
+ }
+ return named, isPtr
+}
+
+// enclosingFunction returns the signature and type of the function
+// enclosing the given position.
+func enclosingFunction(path []ast.Node, info *types.Info) *ast.FuncType {
+ for _, node := range path {
+ switch t := node.(type) {
+ case *ast.FuncDecl:
+ if _, ok := info.Defs[t.Name]; ok {
+ return t.Type
+ }
+ case *ast.FuncLit:
+ if _, ok := info.Types[t]; ok {
+ return t.Type
+ }
+ }
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
new file mode 100644
index 000000000..c5d8a2d78
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
@@ -0,0 +1,28 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+func x() int {
+ var z int
+ z = y // want "(undeclared name|undefined): y"
+
+ if z == m { // want "(undeclared name|undefined): m"
+ z = 1
+ }
+
+ if z == 1 {
+ z = 1
+ } else if z == n+1 { // want "(undeclared name|undefined): n"
+ z = 1
+ }
+
+ switch z {
+ case 10:
+ z = 1
+ case a: // want "(undeclared name|undefined): a"
+ z = 1
+ }
+ return z
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
new file mode 100644
index 000000000..76c7ba685
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+func channels(s string) {
+ undefinedChannels(c()) // want "(undeclared name|undefined): undefinedChannels"
+}
+
+func c() (<-chan string, chan string) {
+ return make(<-chan string), make(chan string)
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
new file mode 100644
index 000000000..73beace10
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+func consecutiveParams() {
+ var s string
+ undefinedConsecutiveParams(s, s) // want "(undeclared name|undefined): undefinedConsecutiveParams"
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
new file mode 100644
index 000000000..5de925411
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+func errorParam() {
+ var err error
+ undefinedErrorParam(err) // want "(undeclared name|undefined): undefinedErrorParam"
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
new file mode 100644
index 000000000..c62174ec9
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+type T struct{}
+
+func literals() {
+ undefinedLiterals("hey compiler", T{}, &T{}) // want "(undeclared name|undefined): undefinedLiterals"
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
new file mode 100644
index 000000000..9396da4bd
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+import "time"
+
+func operation() {
+ undefinedOperation(10 * time.Second) // want "(undeclared name|undefined): undefinedOperation"
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
new file mode 100644
index 000000000..a4ed290d4
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+func selector() {
+ m := map[int]bool{}
+ undefinedSelector(m[1]) // want "(undeclared name|undefined): undefinedSelector"
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
new file mode 100644
index 000000000..5cde299ad
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+func slice() {
+ undefinedSlice([]int{1, 2}) // want "(undeclared name|undefined): undefinedSlice"
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
new file mode 100644
index 000000000..9e91c59c2
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+func tuple() {
+ undefinedTuple(b()) // want "(undeclared name|undefined): undefinedTuple"
+}
+
+func b() (string, error) {
+ return "", nil
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
new file mode 100644
index 000000000..5b4241425
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclared
+
+func uniqueArguments() {
+ var s string
+ var i int
+ undefinedUniqueArguments(s, i, s) // want "(undeclared name|undefined): undefinedUniqueArguments"
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/undeclared.go b/gopls/internal/lsp/analysis/undeclaredname/undeclared.go
new file mode 100644
index 000000000..043979408
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/undeclared.go
@@ -0,0 +1,347 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package undeclaredname defines an Analyzer that applies suggested fixes
+// to errors of the type "undeclared name: %s".
+package undeclaredname
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/token"
+ "go/types"
+ "strings"
+ "unicode"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/internal/analysisinternal"
+)
+
+const Doc = `suggested fixes for "undeclared name: <>"
+
+This checker provides suggested fixes for type errors of the
+type "undeclared name: <>". It will either insert a new statement,
+such as:
+
+"<> := "
+
+or a new function declaration, such as:
+
+func <>(inferred parameters) {
+ panic("implement me!")
+}
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "undeclaredname",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{},
+ Run: run,
+ RunDespiteErrors: true,
+}
+
+// The prefix for this error message changed in Go 1.20.
+var undeclaredNamePrefixes = []string{"undeclared name: ", "undefined: "}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ for _, err := range pass.TypeErrors {
+ runForError(pass, err)
+ }
+ return nil, nil
+}
+
+func runForError(pass *analysis.Pass, err types.Error) {
+ var name string
+ for _, prefix := range undeclaredNamePrefixes {
+ if !strings.HasPrefix(err.Msg, prefix) {
+ continue
+ }
+ name = strings.TrimPrefix(err.Msg, prefix)
+ }
+ if name == "" {
+ return
+ }
+ var file *ast.File
+ for _, f := range pass.Files {
+ if f.Pos() <= err.Pos && err.Pos < f.End() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ return
+ }
+
+ // Get the path for the relevant range.
+ path, _ := astutil.PathEnclosingInterval(file, err.Pos, err.Pos)
+ if len(path) < 2 {
+ return
+ }
+ ident, ok := path[0].(*ast.Ident)
+ if !ok || ident.Name != name {
+ return
+ }
+
+ // Undeclared quick fixes only work in function bodies.
+ inFunc := false
+ for i := range path {
+ if _, inFunc = path[i].(*ast.FuncDecl); inFunc {
+ if i == 0 {
+ return
+ }
+ if _, isBody := path[i-1].(*ast.BlockStmt); !isBody {
+ return
+ }
+ break
+ }
+ }
+ if !inFunc {
+ return
+ }
+ // Skip selector expressions because it might be too complex
+ // to try and provide a suggested fix for fields and methods.
+ if _, ok := path[1].(*ast.SelectorExpr); ok {
+ return
+ }
+ tok := pass.Fset.File(file.Pos())
+ if tok == nil {
+ return
+ }
+ offset := safetoken.StartPosition(pass.Fset, err.Pos).Offset
+ end := tok.Pos(offset + len(name)) // TODO(adonovan): dubious! err.Pos + len(name)??
+ pass.Report(analysis.Diagnostic{
+ Pos: err.Pos,
+ End: end,
+ Message: err.Msg,
+ })
+}
+
+func SuggestedFix(fset *token.FileSet, start, end token.Pos, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+ pos := start // don't use the end
+ path, _ := astutil.PathEnclosingInterval(file, pos, pos)
+ if len(path) < 2 {
+ return nil, fmt.Errorf("no expression found")
+ }
+ ident, ok := path[0].(*ast.Ident)
+ if !ok {
+ return nil, fmt.Errorf("no identifier found")
+ }
+
+ // Check for a possible call expression, in which case we should add a
+ // new function declaration.
+ if len(path) > 1 {
+ if _, ok := path[1].(*ast.CallExpr); ok {
+ return newFunctionDeclaration(path, file, pkg, info, fset)
+ }
+ }
+
+ // Get the place to insert the new statement.
+ insertBeforeStmt := analysisinternal.StmtToInsertVarBefore(path)
+ if insertBeforeStmt == nil {
+ return nil, fmt.Errorf("could not locate insertion point")
+ }
+
+ insertBefore := safetoken.StartPosition(fset, insertBeforeStmt.Pos()).Offset
+
+ // Get the indent to add on the line after the new statement.
+ // Since this will have a parse error, we can not use format.Source().
+ contentBeforeStmt, indent := content[:insertBefore], "\n"
+ if nl := bytes.LastIndex(contentBeforeStmt, []byte("\n")); nl != -1 {
+ indent = string(contentBeforeStmt[nl:])
+ }
+
+ // Create the new local variable statement.
+ newStmt := fmt.Sprintf("%s := %s", ident.Name, indent)
+ return &analysis.SuggestedFix{
+ Message: fmt.Sprintf("Create variable \"%s\"", ident.Name),
+ TextEdits: []analysis.TextEdit{{
+ Pos: insertBeforeStmt.Pos(),
+ End: insertBeforeStmt.Pos(),
+ NewText: []byte(newStmt),
+ }},
+ }, nil
+}
+
+func newFunctionDeclaration(path []ast.Node, file *ast.File, pkg *types.Package, info *types.Info, fset *token.FileSet) (*analysis.SuggestedFix, error) {
+ if len(path) < 3 {
+ return nil, fmt.Errorf("unexpected set of enclosing nodes: %v", path)
+ }
+ ident, ok := path[0].(*ast.Ident)
+ if !ok {
+ return nil, fmt.Errorf("no name for function declaration %v (%T)", path[0], path[0])
+ }
+ call, ok := path[1].(*ast.CallExpr)
+ if !ok {
+ return nil, fmt.Errorf("no call expression found %v (%T)", path[1], path[1])
+ }
+
+ // Find the enclosing function, so that we can add the new declaration
+ // below.
+ var enclosing *ast.FuncDecl
+ for _, n := range path {
+ if n, ok := n.(*ast.FuncDecl); ok {
+ enclosing = n
+ break
+ }
+ }
+ // TODO(rstambler): Support the situation when there is no enclosing
+ // function.
+ if enclosing == nil {
+ return nil, fmt.Errorf("no enclosing function found: %v", path)
+ }
+
+ pos := enclosing.End()
+
+ var paramNames []string
+ var paramTypes []types.Type
+ // keep track of all param names to later ensure uniqueness
+ nameCounts := map[string]int{}
+ for _, arg := range call.Args {
+ typ := info.TypeOf(arg)
+ if typ == nil {
+ return nil, fmt.Errorf("unable to determine type for %s", arg)
+ }
+
+ switch t := typ.(type) {
+ // this is the case where another function call returning multiple
+ // results is used as an argument
+ case *types.Tuple:
+ n := t.Len()
+ for i := 0; i < n; i++ {
+ name := typeToArgName(t.At(i).Type())
+ nameCounts[name]++
+
+ paramNames = append(paramNames, name)
+ paramTypes = append(paramTypes, types.Default(t.At(i).Type()))
+ }
+
+ default:
+ // does the argument have a name we can reuse?
+ // only happens in case of a *ast.Ident
+ var name string
+ if ident, ok := arg.(*ast.Ident); ok {
+ name = ident.Name
+ }
+
+ if name == "" {
+ name = typeToArgName(typ)
+ }
+
+ nameCounts[name]++
+
+ paramNames = append(paramNames, name)
+ paramTypes = append(paramTypes, types.Default(typ))
+ }
+ }
+
+ for n, c := range nameCounts {
+ // Any names we saw more than once will need a unique suffix added
+ // on. Reset the count to 1 to act as the suffix for the first
+ // occurrence of that name.
+ if c >= 2 {
+ nameCounts[n] = 1
+ } else {
+ delete(nameCounts, n)
+ }
+ }
+
+ params := &ast.FieldList{}
+
+ for i, name := range paramNames {
+ if suffix, repeats := nameCounts[name]; repeats {
+ nameCounts[name]++
+ name = fmt.Sprintf("%s%d", name, suffix)
+ }
+
+ // only worth checking after previous param in the list
+ if i > 0 {
+ // if type of parameter at hand is the same as the previous one,
+ // add it to the previous param list of identifiers so to have:
+ // (s1, s2 string)
+ // and not
+ // (s1 string, s2 string)
+ if paramTypes[i] == paramTypes[i-1] {
+ params.List[len(params.List)-1].Names = append(params.List[len(params.List)-1].Names, ast.NewIdent(name))
+ continue
+ }
+ }
+
+ params.List = append(params.List, &ast.Field{
+ Names: []*ast.Ident{
+ ast.NewIdent(name),
+ },
+ Type: analysisinternal.TypeExpr(file, pkg, paramTypes[i]),
+ })
+ }
+
+ decl := &ast.FuncDecl{
+ Name: ast.NewIdent(ident.Name),
+ Type: &ast.FuncType{
+ Params: params,
+ // TODO(rstambler): Also handle result parameters here.
+ },
+ Body: &ast.BlockStmt{
+ List: []ast.Stmt{
+ &ast.ExprStmt{
+ X: &ast.CallExpr{
+ Fun: ast.NewIdent("panic"),
+ Args: []ast.Expr{
+ &ast.BasicLit{
+ Value: `"unimplemented"`,
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ b := bytes.NewBufferString("\n\n")
+ if err := format.Node(b, fset, decl); err != nil {
+ return nil, err
+ }
+ return &analysis.SuggestedFix{
+ Message: fmt.Sprintf("Create function \"%s\"", ident.Name),
+ TextEdits: []analysis.TextEdit{{
+ Pos: pos,
+ End: pos,
+ NewText: b.Bytes(),
+ }},
+ }, nil
+}
+func typeToArgName(ty types.Type) string {
+ s := types.Default(ty).String()
+
+ switch t := ty.(type) {
+ case *types.Basic:
+ // use first letter in type name for basic types
+ return s[0:1]
+ case *types.Slice:
+ // use element type to decide var name for slices
+ return typeToArgName(t.Elem())
+ case *types.Array:
+ // use element type to decide var name for arrays
+ return typeToArgName(t.Elem())
+ case *types.Chan:
+ return "ch"
+ }
+
+ s = strings.TrimFunc(s, func(r rune) bool {
+ return !unicode.IsLetter(r)
+ })
+
+ if s == "error" {
+ return "err"
+ }
+
+ // remove package (if present)
+ // and make first letter lowercase
+ a := []rune(s[strings.LastIndexByte(s, '.')+1:])
+ a[0] = unicode.ToLower(a[0])
+ return string(a)
+}
diff --git a/gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go b/gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go
new file mode 100644
index 000000000..306c3f039
--- /dev/null
+++ b/gopls/internal/lsp/analysis/undeclaredname/undeclared_test.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package undeclaredname_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/undeclaredname"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ analysistest.Run(t, testdata, undeclaredname.Analyzer, "a")
+}
diff --git a/internal/lsp/analysis/unusedparams/testdata/src/a/a.go b/gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go
index 23e4122c4..23e4122c4 100644
--- a/internal/lsp/analysis/unusedparams/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden
index e28a6bdea..e28a6bdea 100644
--- a/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden
+++ b/gopls/internal/lsp/analysis/unusedparams/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go b/gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go
index 93af2681b..93af2681b 100644
--- a/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go
+++ b/gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go
diff --git a/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden b/gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden
index c86bf289a..c86bf289a 100644
--- a/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden
+++ b/gopls/internal/lsp/analysis/unusedparams/testdata/src/typeparams/typeparams.go.golden
diff --git a/internal/lsp/analysis/unusedparams/unusedparams.go b/gopls/internal/lsp/analysis/unusedparams/unusedparams.go
index 4c933c8fb..4c933c8fb 100644
--- a/internal/lsp/analysis/unusedparams/unusedparams.go
+++ b/gopls/internal/lsp/analysis/unusedparams/unusedparams.go
diff --git a/gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go b/gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go
new file mode 100644
index 000000000..fdd43b821
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedparams/unusedparams_test.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unusedparams_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/unusedparams"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+ tests := []string{"a"}
+ if typeparams.Enabled {
+ tests = append(tests, "typeparams")
+ }
+ analysistest.RunWithSuggestedFixes(t, testdata, unusedparams.Analyzer, tests...)
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go
new file mode 100644
index 000000000..aa9f46e5b
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go
@@ -0,0 +1,74 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "fmt"
+ "os"
+)
+
+type A struct {
+ b int
+}
+
+func singleAssignment() {
+ v := "s" // want `v declared (and|but) not used`
+
+ s := []int{ // want `s declared (and|but) not used`
+ 1,
+ 2,
+ }
+
+ a := func(s string) bool { // want `a declared (and|but) not used`
+ return false
+ }
+
+ if 1 == 1 {
+ s := "v" // want `s declared (and|but) not used`
+ }
+
+ panic("I should survive")
+}
+
+func noOtherStmtsInBlock() {
+ v := "s" // want `v declared (and|but) not used`
+}
+
+func partOfMultiAssignment() {
+ f, err := os.Open("file") // want `f declared (and|but) not used`
+ panic(err)
+}
+
+func sideEffects(cBool chan bool, cInt chan int) {
+ b := <-c // want `b declared (and|but) not used`
+ s := fmt.Sprint("") // want `s declared (and|but) not used`
+ a := A{ // want `a declared (and|but) not used`
+ b: func() int {
+ return 1
+ }(),
+ }
+ c := A{<-cInt} // want `c declared (and|but) not used`
+ d := fInt() + <-cInt // want `d declared (and|but) not used`
+ e := fBool() && <-cBool // want `e declared (and|but) not used`
+ f := map[int]int{ // want `f declared (and|but) not used`
+ fInt(): <-cInt,
+ }
+ g := []int{<-cInt} // want `g declared (and|but) not used`
+ h := func(s string) {} // want `h declared (and|but) not used`
+ i := func(s string) {}() // want `i declared (and|but) not used`
+}
+
+func commentAbove() {
+ // v is a variable
+ v := "s" // want `v declared (and|but) not used`
+}
+
+func fBool() bool {
+ return true
+}
+
+func fInt() int {
+ return 1
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden
new file mode 100644
index 000000000..18173ce0b
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden
@@ -0,0 +1,59 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "fmt"
+ "os"
+)
+
+type A struct {
+ b int
+}
+
+func singleAssignment() {
+ if 1 == 1 {
+ }
+
+ panic("I should survive")
+}
+
+func noOtherStmtsInBlock() {
+}
+
+func partOfMultiAssignment() {
+ _, err := os.Open("file") // want `f declared (and|but) not used`
+ panic(err)
+}
+
+func sideEffects(cBool chan bool, cInt chan int) {
+ <-c // want `b declared (and|but) not used`
+ fmt.Sprint("") // want `s declared (and|but) not used`
+ A{ // want `a declared (and|but) not used`
+ b: func() int {
+ return 1
+ }(),
+ }
+ A{<-cInt} // want `c declared (and|but) not used`
+ fInt() + <-cInt // want `d declared (and|but) not used`
+ fBool() && <-cBool // want `e declared (and|but) not used`
+ map[int]int{ // want `f declared (and|but) not used`
+ fInt(): <-cInt,
+ }
+ []int{<-cInt} // want `g declared (and|but) not used`
+ func(s string) {}() // want `i declared (and|but) not used`
+}
+
+func commentAbove() {
+ // v is a variable
+}
+
+func fBool() bool {
+ return true
+}
+
+func fInt() int {
+ return 1
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go
new file mode 100644
index 000000000..8e843024a
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go
@@ -0,0 +1,30 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decl
+
+func a() {
+ var b, c bool // want `b declared (and|but) not used`
+ panic(c)
+
+ if 1 == 1 {
+ var s string // want `s declared (and|but) not used`
+ }
+}
+
+func b() {
+ // b is a variable
+ var b bool // want `b declared (and|but) not used`
+}
+
+func c() {
+ var (
+ d string
+
+ // some comment for c
+ c bool // want `c declared (and|but) not used`
+ )
+
+ panic(d)
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden
new file mode 100644
index 000000000..6ed97332e
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decl
+
+func a() {
+ var c bool // want `b declared (and|but) not used`
+ panic(c)
+
+ if 1 == 1 {
+ }
+}
+
+func b() {
+ // b is a variable
+}
+
+func c() {
+ var (
+ d string
+ )
+ panic(d)
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go
new file mode 100644
index 000000000..904016be7
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable.go
@@ -0,0 +1,300 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unusedvariable defines an analyzer that checks for unused variables.
+package unusedvariable
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+const Doc = `check for unused variables
+
+The unusedvariable analyzer suggests fixes for unused variables errors.
+`
+
+var Analyzer = &analysis.Analyzer{
+ Name: "unusedvariable",
+ Doc: Doc,
+ Requires: []*analysis.Analyzer{},
+ Run: run,
+ RunDespiteErrors: true, // an unusedvariable diagnostic is a compile error
+}
+
+// The suffix for this error message changed in Go 1.20.
+var unusedVariableSuffixes = []string{" declared and not used", " declared but not used"}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ for _, typeErr := range pass.TypeErrors {
+ for _, suffix := range unusedVariableSuffixes {
+ if strings.HasSuffix(typeErr.Msg, suffix) {
+ varName := strings.TrimSuffix(typeErr.Msg, suffix)
+ err := runForError(pass, typeErr, varName)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+func runForError(pass *analysis.Pass, err types.Error, name string) error {
+ var file *ast.File
+ for _, f := range pass.Files {
+ if f.Pos() <= err.Pos && err.Pos < f.End() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ return nil
+ }
+
+ path, _ := astutil.PathEnclosingInterval(file, err.Pos, err.Pos)
+ if len(path) < 2 {
+ return nil
+ }
+
+ ident, ok := path[0].(*ast.Ident)
+ if !ok || ident.Name != name {
+ return nil
+ }
+
+ diag := analysis.Diagnostic{
+ Pos: ident.Pos(),
+ End: ident.End(),
+ Message: err.Msg,
+ }
+
+ for i := range path {
+ switch stmt := path[i].(type) {
+ case *ast.ValueSpec:
+ // Find GenDecl to which offending ValueSpec belongs.
+ if decl, ok := path[i+1].(*ast.GenDecl); ok {
+ fixes := removeVariableFromSpec(pass, path, stmt, decl, ident)
+ // fixes may be nil
+ if len(fixes) > 0 {
+ diag.SuggestedFixes = fixes
+ pass.Report(diag)
+ }
+ }
+
+ case *ast.AssignStmt:
+ if stmt.Tok != token.DEFINE {
+ continue
+ }
+
+ containsIdent := false
+ for _, expr := range stmt.Lhs {
+ if expr == ident {
+ containsIdent = true
+ }
+ }
+ if !containsIdent {
+ continue
+ }
+
+ fixes := removeVariableFromAssignment(pass, path, stmt, ident)
+ // fixes may be nil
+ if len(fixes) > 0 {
+ diag.SuggestedFixes = fixes
+ pass.Report(diag)
+ }
+ }
+ }
+
+ return nil
+}
+
+func removeVariableFromSpec(pass *analysis.Pass, path []ast.Node, stmt *ast.ValueSpec, decl *ast.GenDecl, ident *ast.Ident) []analysis.SuggestedFix {
+ newDecl := new(ast.GenDecl)
+ *newDecl = *decl
+ newDecl.Specs = nil
+
+ for _, spec := range decl.Specs {
+ if spec != stmt {
+ newDecl.Specs = append(newDecl.Specs, spec)
+ continue
+ }
+
+ newSpec := new(ast.ValueSpec)
+ *newSpec = *stmt
+ newSpec.Names = nil
+
+ for _, n := range stmt.Names {
+ if n != ident {
+ newSpec.Names = append(newSpec.Names, n)
+ }
+ }
+
+ if len(newSpec.Names) > 0 {
+ newDecl.Specs = append(newDecl.Specs, newSpec)
+ }
+ }
+
+ // decl.End() does not include any comments, so if a comment is present we
+ // need to account for it when we delete the statement
+ end := decl.End()
+ if stmt.Comment != nil && stmt.Comment.End() > end {
+ end = stmt.Comment.End()
+ }
+
+ // There are no other specs left in the declaration, the whole statement can
+ // be deleted
+ if len(newDecl.Specs) == 0 {
+ // Find parent DeclStmt and delete it
+ for _, node := range path {
+ if declStmt, ok := node.(*ast.DeclStmt); ok {
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: deleteStmtFromBlock(path, declStmt),
+ },
+ }
+ }
+ }
+ }
+
+ var b bytes.Buffer
+ if err := format.Node(&b, pass.Fset, newDecl); err != nil {
+ return nil
+ }
+
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: decl.Pos(),
+ // Avoid adding a new empty line
+ End: end + 1,
+ NewText: b.Bytes(),
+ },
+ },
+ },
+ }
+}
+
+func removeVariableFromAssignment(pass *analysis.Pass, path []ast.Node, stmt *ast.AssignStmt, ident *ast.Ident) []analysis.SuggestedFix {
+ // The only variable in the assignment is unused
+ if len(stmt.Lhs) == 1 {
+ // If LHS has only one expression to be valid it has to have 1 expression
+ // on RHS
+ //
+ // RHS may have side effects, preserve RHS
+ if exprMayHaveSideEffects(stmt.Rhs[0]) {
+ // Delete until RHS
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: ident.Pos(),
+ End: stmt.Rhs[0].Pos(),
+ },
+ },
+ },
+ }
+ }
+
+ // RHS does not have any side effects, delete the whole statement
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: deleteStmtFromBlock(path, stmt),
+ },
+ }
+ }
+
+ // Otherwise replace ident with `_`
+ return []analysis.SuggestedFix{
+ {
+ Message: suggestedFixMessage(ident.Name),
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: ident.Pos(),
+ End: ident.End(),
+ NewText: []byte("_"),
+ },
+ },
+ },
+ }
+}
+
+func suggestedFixMessage(name string) string {
+ return fmt.Sprintf("Remove variable %s", name)
+}
+
+func deleteStmtFromBlock(path []ast.Node, stmt ast.Stmt) []analysis.TextEdit {
+ // Find innermost enclosing BlockStmt.
+ var block *ast.BlockStmt
+ for i := range path {
+ if blockStmt, ok := path[i].(*ast.BlockStmt); ok {
+ block = blockStmt
+ break
+ }
+ }
+
+ nodeIndex := -1
+ for i, blockStmt := range block.List {
+ if blockStmt == stmt {
+ nodeIndex = i
+ break
+ }
+ }
+
+ // The statement we need to delete was not found in BlockStmt
+ if nodeIndex == -1 {
+ return nil
+ }
+
+ // Delete until the end of the block unless there is another statement after
+ // the one we are trying to delete
+ end := block.Rbrace
+ if nodeIndex < len(block.List)-1 {
+ end = block.List[nodeIndex+1].Pos()
+ }
+
+ return []analysis.TextEdit{
+ {
+ Pos: stmt.Pos(),
+ End: end,
+ },
+ }
+}
+
+// exprMayHaveSideEffects reports whether the expression may have side effects
+// (because it contains a function call or channel receive). We disregard
+// runtime panics as well written programs should not encounter them.
+func exprMayHaveSideEffects(expr ast.Expr) bool {
+ var mayHaveSideEffects bool
+ ast.Inspect(expr, func(n ast.Node) bool {
+ switch n := n.(type) {
+ case *ast.CallExpr: // possible function call
+ mayHaveSideEffects = true
+ return false
+ case *ast.UnaryExpr:
+ if n.Op == token.ARROW { // channel receive
+ mayHaveSideEffects = true
+ return false
+ }
+ case *ast.FuncLit:
+ return false // evaluating what's inside a FuncLit has no effect
+ }
+ return true
+ })
+
+ return mayHaveSideEffects
+}
diff --git a/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go
new file mode 100644
index 000000000..08223155f
--- /dev/null
+++ b/gopls/internal/lsp/analysis/unusedvariable/unusedvariable_test.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unusedvariable_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/unusedvariable"
+)
+
+func Test(t *testing.T) {
+ testdata := analysistest.TestData()
+
+ t.Run("decl", func(t *testing.T) {
+ analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "decl")
+ })
+
+ t.Run("assign", func(t *testing.T) {
+ analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "assign")
+ })
+}
diff --git a/internal/lsp/analysis/useany/testdata/src/a/a.go b/gopls/internal/lsp/analysis/useany/testdata/src/a/a.go
index 22d693150..22d693150 100644
--- a/internal/lsp/analysis/useany/testdata/src/a/a.go
+++ b/gopls/internal/lsp/analysis/useany/testdata/src/a/a.go
diff --git a/internal/lsp/analysis/useany/testdata/src/a/a.go.golden b/gopls/internal/lsp/analysis/useany/testdata/src/a/a.go.golden
index efd8fd640..efd8fd640 100644
--- a/internal/lsp/analysis/useany/testdata/src/a/a.go.golden
+++ b/gopls/internal/lsp/analysis/useany/testdata/src/a/a.go.golden
diff --git a/internal/lsp/analysis/useany/useany.go b/gopls/internal/lsp/analysis/useany/useany.go
index 73e2f7633..73e2f7633 100644
--- a/internal/lsp/analysis/useany/useany.go
+++ b/gopls/internal/lsp/analysis/useany/useany.go
diff --git a/gopls/internal/lsp/analysis/useany/useany_test.go b/gopls/internal/lsp/analysis/useany/useany_test.go
new file mode 100644
index 000000000..083c3d54f
--- /dev/null
+++ b/gopls/internal/lsp/analysis/useany/useany_test.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package useany_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/useany"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func Test(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("type params are not enabled")
+ }
+ testdata := analysistest.TestData()
+ analysistest.RunWithSuggestedFixes(t, testdata, useany.Analyzer, "a")
+}
diff --git a/internal/lsp/browser/README.md b/gopls/internal/lsp/browser/README.md
index e5f04df4d..e5f04df4d 100644
--- a/internal/lsp/browser/README.md
+++ b/gopls/internal/lsp/browser/README.md
diff --git a/internal/lsp/browser/browser.go b/gopls/internal/lsp/browser/browser.go
index 0ac4f20f0..0ac4f20f0 100644
--- a/internal/lsp/browser/browser.go
+++ b/gopls/internal/lsp/browser/browser.go
diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go
new file mode 100644
index 000000000..eac23cd26
--- /dev/null
+++ b/gopls/internal/lsp/cache/analysis.go
@@ -0,0 +1,1247 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+// This file defines gopls' driver for modular static analysis (go/analysis).
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/gob"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "log"
+ "reflect"
+ "runtime/debug"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/gopls/internal/lsp/filecache"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/facts"
+ "golang.org/x/tools/internal/gcimporter"
+ "golang.org/x/tools/internal/memoize"
+ "golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+/*
+
+ DESIGN
+
+ An analysis request is for a set of analyzers and an individual
+ package ID, notated (a*, p). The result is the set of diagnostics
+ for that package. It could easily be generalized to a set of
+ packages, (a*, p*), and perhaps should be, to improve performance
+ versus calling it in a loop.
+
+ The snapshot holds a cache (persistent.Map) of entries keyed by
+ (a*, p) pairs ("analysisKey") that have been requested so far. Some
+ of these entries may be invalidated during snapshot cloning after a
+ modification event. The cache maps each (a*, p) to a promise of
+ the analysis result or "analysisSummary". The summary contains the
+ results of analysis (e.g. diagnostics) as well as the intermediate
+ results required by the recursion, such as serialized types and
+ facts.
+
+ The promise represents the result of a call to analyzeImpl, which
+ type-checks a package and then applies a graph of analyzers to it
+ in parallel postorder. (These graph edges are "horizontal": within
+ the same package.) First, analyzeImpl reads the source files of
+ package p, and obtains (recursively) the results of the "vertical"
+ dependencies (i.e. analyzers applied to the packages imported by
+ p). Only the subset of analyzers that use facts need be executed
+ recursively, but even if this subset is empty, the step is still
+ necessary because it provides type information. It is possible that
+ a package may need to be type-checked and analyzed twice, for
+ different subsets of analyzers, but the overlap is typically
+ insignificant.
+
+ With the file contents and the results of vertical dependencies,
+ analyzeImpl is then in a position to produce a key representing the
+ unit of work (parsing, type-checking, and analysis) that it has to
+ do. The key is a cryptographic hash of the "recipe" for this step,
+ including the Metadata, the file contents, the set of analyzers,
+ and the type and fact information from the vertical dependencies.
+
+ The key is sought in a machine-global persistent file-system based
+ cache. If this gopls process, or another gopls process on the same
+ machine, has already performed this analysis step, analyzeImpl will
+ make a cache hit and load the serialized summary of the results. If
+ not, it will have to proceed to type-checking and analysis, and
+ write a new cache entry. The entry contains serialized types
+ (export data) and analysis facts.
+
+ For types, we use "shallow" export data. Historically, the Go
+ compiler always produced a summary of the types for a given package
+ that included types from other packages that it indirectly
+ referenced: "deep" export data. This had the advantage that the
+ compiler (and analogous tools such as gopls) need only load one
+ file per direct import. However, it meant that the files tended to
+ get larger based on the level of the package in the import
+ graph. For example, higher-level packages in the kubernetes module
+ have over 1MB of "deep" export data, even when they have almost no
+ content of their own, merely because they mention a major type that
+ references many others. In pathological cases the export data was
+ 300x larger than the source for a package due to this quadratic
+ growth.
+
+ "Shallow" export data means that the serialized types describe only
+ a single package. If those types mention types from other packages,
+ the type checker may need to request additional packages beyond
+ just the direct imports. This means type information for the entire
+ transitive closure of imports may need to be available just in
+ case. After a cache hit or a cache miss, the summary is
+ postprocessed so that it contains the union of export data payloads
+ of all its direct dependencies.
+
+ For correct dependency analysis, the digest used as a cache key
+ must reflect the "deep" export data, so it is derived recursively
+ from the transitive closure. As an optimization, we needn't include
+ every package of the transitive closure in the deep hash, only the
+ packages that were actually requested by the type checker. This
+ allows changes to a package that have no effect on its export data
+ to be "pruned". The direct consumer will need to be re-executed,
+ but if its export data is unchanged as a result, then indirect
+ consumers may not need to be re-executed. This allows, for example,
+ one to insert a print statement in a function and not "rebuild" the
+ whole application (though export data does record line numbers of
+ types which may be perturbed by otherwise insignificant changes.)
+
+ The summary must record whether a package is transitively
+ error-free (whether it would compile) because many analyzers are
+ not safe to run on packages with inconsistent types.
+
+ For fact encoding, we use the same fact set as the unitchecker
+ (vet) to record and serialize analysis facts. The fact
+ serialization mechanism is analogous to "deep" export data.
+
+*/
+
+// TODO(adonovan):
+// - Profile + optimize:
+// - on a cold run, mostly type checking + export data, unsurprisingly.
+// - on a hot-disk run, mostly type checking the IWL.
+// Would be nice to have a benchmark that separates this out.
+// - measure and record in the code the typical operation times
+// and file sizes (export data + facts = cache entries).
+// - Do "port the old logic" tasks (see TODO in actuallyAnalyze).
+// - Add a (white-box) test of pruning when a change doesn't affect export data.
+// - Optimise pruning based on subset of packages mentioned in exportdata.
+// - Better logging so that it is possible to deduce why an analyzer
+// is not being run--often due to very indirect failures.
+// Even if the ultimate consumer decides to ignore errors,
+// tests and other situations want to be assured of freedom from
+// errors, not just missing results. This should be recorded.
+// - Check that the event trace is intelligible.
+// - Split this into a subpackage, gopls/internal/lsp/cache/driver,
+// consisting of this file and three helpers from errors.go.
+// The (*snapshot).Analyze method would stay behind and make calls
+// to the driver package.
+// Steps:
+// - define a narrow driver.Snapshot interface with only these methods:
+// Metadata(PackageID) source.Metadata
+// GetFile(Context, URI) (source.FileHandle, error)
+// View() *View // for Options
+// - define a State type that encapsulates the persistent map
+// (with its own mutex), and has methods:
+// New() *State
+// Clone(invalidate map[PackageID]bool) *State
+// Destroy()
+// - share cache.{goVersionRx,parseGoImpl}
+
+var born = time.Now()
+
+// Analyze applies a set of analyzers to the package denoted by id,
+// and returns their diagnostics for that package.
+//
+// The analyzers list must be duplicate free; order does not matter.
+//
+// Precondition: all analyzers within the process have distinct names.
+// (The names are relied on by the serialization logic.)
+func (s *snapshot) Analyze(ctx context.Context, id PackageID, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) {
+ if false { // debugging
+ log.Println("Analyze@", time.Since(born)) // called after the 7s IWL in k8s
+ }
+
+ // Filter and sort enabled root analyzers.
+ // A disabled analyzer may still be run if required by another.
+ toSrc := make(map[*analysis.Analyzer]*source.Analyzer)
+ var enabled []*analysis.Analyzer
+ for _, a := range analyzers {
+ if a.IsEnabled(s.view.Options()) {
+ toSrc[a.Analyzer] = a
+ enabled = append(enabled, a.Analyzer)
+ }
+ }
+ sort.Slice(enabled, func(i, j int) bool {
+ return enabled[i].Name < enabled[j].Name
+ })
+
+ // Register fact types of required analyzers.
+ for _, a := range requiredAnalyzers(enabled) {
+ for _, f := range a.FactTypes {
+ gob.Register(f)
+ }
+ }
+
+ if false { // debugging
+ // TODO(adonovan): use proper tracing.
+ t0 := time.Now()
+ defer func() {
+ log.Printf("%v for analyze(%s, %s)", time.Since(t0), id, enabled)
+ }()
+ }
+
+ // Run the analysis.
+ res, err := s.analyze(ctx, id, enabled)
+ if err != nil {
+ return nil, err
+ }
+
+ // Report diagnostics only from enabled actions that succeeded.
+ // Errors from creating or analyzing packages are ignored.
+ // Diagnostics are reported in the order of the analyzers argument.
+ //
+ // TODO(adonovan): ignoring action errors gives the caller no way
+ // to distinguish "there are no problems in this code" from
+ // "the code (or analyzers!) are so broken that we couldn't even
+ // begin the analysis you asked for".
+ // Even if current callers choose to discard the
+ // results, we should propagate the per-action errors.
+ var results []*source.Diagnostic
+ for _, a := range enabled {
+ summary := res.Actions[a.Name]
+ if summary.Err != "" {
+ continue // action failed
+ }
+ for _, gobDiag := range summary.Diagnostics {
+ results = append(results, toSourceDiagnostic(toSrc[a], &gobDiag))
+ }
+ }
+ return results, nil
+}
+
+// analysisKey is the type of keys in the snapshot.analyses map.
+type analysisKey struct {
+ analyzerNames string
+ pkgid PackageID
+}
+
+func (key analysisKey) String() string {
+ return fmt.Sprintf("%s@%s", key.analyzerNames, key.pkgid)
+}
+
+// analyzeSummary is a gob-serializable summary of successfully
+// applying a list of analyzers to a package.
+type analyzeSummary struct {
+ PkgPath PackagePath // types.Package.Path() (needed to decode export data)
+ Export []byte
+ DeepExportHash source.Hash // hash of reflexive transitive closure of export data
+ Compiles bool // transitively free of list/parse/type errors
+ Actions actionsMap // map from analyzer name to analysis results (*actionSummary)
+
+ // Not serialized: populated after the summary is computed or deserialized.
+ allExport map[PackagePath][]byte // transitive export data
+}
+
+// actionsMap defines a stable Gob encoding for a map.
+// TODO(adonovan): generalize and move to a library when we can use generics.
+type actionsMap map[string]*actionSummary
+
+var _ gob.GobEncoder = (actionsMap)(nil)
+var _ gob.GobDecoder = (*actionsMap)(nil)
+
+type actionsMapEntry struct {
+ K string
+ V *actionSummary
+}
+
+func (m actionsMap) GobEncode() ([]byte, error) {
+ entries := make([]actionsMapEntry, 0, len(m))
+ for k, v := range m {
+ entries = append(entries, actionsMapEntry{k, v})
+ }
+ sort.Slice(entries, func(i, j int) bool {
+ return entries[i].K < entries[j].K
+ })
+ var buf bytes.Buffer
+ err := gob.NewEncoder(&buf).Encode(entries)
+ return buf.Bytes(), err
+}
+
+func (m *actionsMap) GobDecode(data []byte) error {
+ var entries []actionsMapEntry
+ if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&entries); err != nil {
+ return err
+ }
+ *m = make(actionsMap, len(entries))
+ for _, e := range entries {
+ (*m)[e.K] = e.V
+ }
+ return nil
+}
+
+// actionSummary is a gob-serializable summary of one possibly failed analysis action.
+// If Err is non-empty, the other fields are undefined.
+type actionSummary struct {
+ Facts []byte // the encoded facts.Set
+ FactsHash source.Hash // hash(Facts)
+ Diagnostics []gobDiagnostic
+ Err string // "" => success
+}
+
+// analyze is a memoization of analyzeImpl.
+func (s *snapshot) analyze(ctx context.Context, id PackageID, analyzers []*analysis.Analyzer) (*analyzeSummary, error) {
+ // Use the sorted list of names of analyzers in the key.
+ //
+ // TODO(adonovan): opt: account for analysis results at a
+ // finer grain to avoid duplicate work when a
+ // a proper subset of analyzers is requested?
+ // In particular, TypeErrorAnalyzers don't use facts
+ // but need to request vdeps just for type information.
+ names := make([]string, 0, len(analyzers))
+ for _, a := range analyzers {
+ names = append(names, a.Name)
+ }
+ // This key describes the result of applying a list of analyzers to a package.
+ key := analysisKey{strings.Join(names, ","), id}
+
+ // An analysisPromise represents the result of loading, parsing,
+ // type-checking and analyzing a single package.
+ type analysisPromise struct {
+ promise *memoize.Promise // [analyzeImplResult]
+ }
+
+ type analyzeImplResult struct {
+ summary *analyzeSummary
+ err error
+ }
+
+ // Access the map once, briefly, and atomically.
+ s.mu.Lock()
+ entry, hit := s.analyses.Get(key)
+ if !hit {
+ entry = analysisPromise{
+ promise: memoize.NewPromise("analysis", func(ctx context.Context, arg interface{}) interface{} {
+ summary, err := analyzeImpl(ctx, arg.(*snapshot), analyzers, id)
+ return analyzeImplResult{summary, err}
+ }),
+ }
+ s.analyses.Set(key, entry, nil) // nothing needs releasing
+ }
+ s.mu.Unlock()
+
+ // Await result.
+ ap := entry.(analysisPromise)
+ v, err := s.awaitPromise(ctx, ap.promise)
+ if err != nil {
+ return nil, err // e.g. cancelled
+ }
+ res := v.(analyzeImplResult)
+ return res.summary, res.err
+}
+
+// analyzeImpl applies a list of analyzers (plus any others
+// transitively required by them) to a package. It succeeds as long
+// as it could produce a types.Package, even if there were direct or
+// indirect list/parse/type errors, and even if all the analysis
+// actions failed. It usually fails only if the package was unknown,
+// a file was missing, or the operation was cancelled.
+//
+// Postcondition: analyzeImpl must not continue to use the snapshot
+// (in background goroutines) after it has returned; see memoize.RefCounted.
+func analyzeImpl(ctx context.Context, snapshot *snapshot, analyzers []*analysis.Analyzer, id PackageID) (*analyzeSummary, error) {
+ m := snapshot.Metadata(id)
+ if m == nil {
+ return nil, fmt.Errorf("no metadata for %s", id)
+ }
+
+ // Recursively analyze each "vertical" dependency
+ // for its types.Package and (perhaps) analysis.Facts.
+ // If any of them fails to produce a package, we cannot continue.
+ // We request only the analyzers that produce facts.
+ //
+ // Also, load the contents of each "compiled" Go file through
+ // the snapshot's cache.
+ //
+ // Both loops occur in parallel, and parallel with each other.
+ vdeps := make(map[PackageID]*analyzeSummary)
+ compiledGoFiles := make([]source.FileHandle, len(m.CompiledGoFiles))
+ {
+ var group errgroup.Group
+
+ // Analyze vertical dependencies.
+ // We request only the required analyzers that use facts.
+ var useFacts []*analysis.Analyzer
+ for _, a := range requiredAnalyzers(analyzers) {
+ if len(a.FactTypes) > 0 {
+ useFacts = append(useFacts, a)
+ }
+ }
+ var vdepsMu sync.Mutex
+ for _, id := range m.DepsByPkgPath {
+ id := id
+ group.Go(func() error {
+ res, err := snapshot.analyze(ctx, id, useFacts)
+ if err != nil {
+ return err // cancelled, or failed to produce a package
+ }
+
+ vdepsMu.Lock()
+ vdeps[id] = res
+ vdepsMu.Unlock()
+ return nil
+ })
+ }
+
+ // Read file contents.
+ // (In practice these will be cache hits
+ // on reads done by the initial workspace load
+ // or after a change modification event.)
+ for i, uri := range m.CompiledGoFiles {
+ i, uri := i, uri
+ group.Go(func() error {
+ fh, err := snapshot.GetFile(ctx, uri) // ~25us
+ compiledGoFiles[i] = fh
+ return err // e.g. cancelled
+ })
+ }
+
+ if err := group.Wait(); err != nil {
+ return nil, err
+ }
+ }
+
+ // Inv: analyze() of all vdeps succeeded (though some actions may have failed).
+
+ // We no longer depend on the snapshot.
+ snapshot = nil
+
+ // At this point we have the action results (serialized
+ // packages and facts) of our immediate dependencies,
+ // and the metadata and content of this package.
+ //
+ // We now compute a hash for all our inputs, and consult a
+ // global cache of promised results. If nothing material
+ // has changed, we'll make a hit in the shared cache.
+ //
+ // The hash of our inputs is based on the serialized export
+ // data and facts so that immaterial changes can be pruned
+ // without decoding.
+ key := analysisCacheKey(analyzers, m, compiledGoFiles, vdeps)
+
+ // Access the cache.
+ var summary *analyzeSummary
+ const cacheKind = "analysis"
+ if data, err := filecache.Get(cacheKind, key); err == nil {
+ // cache hit
+ mustDecode(data, &summary)
+
+ } else if err != filecache.ErrNotFound {
+ return nil, bug.Errorf("internal error reading shared cache: %v", err)
+
+ } else {
+ // Cache miss: do the work.
+ var err error
+ summary, err = actuallyAnalyze(ctx, analyzers, m, vdeps, compiledGoFiles)
+ if err != nil {
+ return nil, err
+ }
+ data := mustEncode(summary)
+ if false {
+ log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), id)
+ }
+ if err := filecache.Set(cacheKind, key, data); err != nil {
+ return nil, fmt.Errorf("internal error updating shared cache: %v", err)
+ }
+ }
+
+ // Hit or miss, we need to merge the export data from
+ // dependencies so that it includes all the types
+ // that might be summoned by the type checker.
+ //
+ // TODO(adonovan): opt: reduce this set by recording
+ // which packages were actually summoned by insert().
+ // (Just makes map smaller; probably marginal?)
+ allExport := make(map[PackagePath][]byte)
+ for _, vdep := range vdeps {
+ for k, v := range vdep.allExport {
+ allExport[k] = v
+ }
+ }
+ allExport[m.PkgPath] = summary.Export
+ summary.allExport = allExport
+
+ return summary, nil
+}
+
+// analysisCacheKey returns a cache key that is a cryptographic digest
+// of the all the values that might affect type checking and analysis:
+// the analyzer names, package metadata, names and contents of
+// compiled Go files, and vdeps information (export data and facts).
+//
+// TODO(adonovan): safety: define our own flavor of Metadata
+// containing just the fields we need, and using it in the subsequent
+// logic, to keep us honest about hashing all parts that matter?
+func analysisCacheKey(analyzers []*analysis.Analyzer, m *source.Metadata, compiledGoFiles []source.FileHandle, vdeps map[PackageID]*analyzeSummary) [sha256.Size]byte {
+ hasher := sha256.New()
+
+ // In principle, a key must be the hash of an
+ // unambiguous encoding of all the relevant data.
+ // If it's ambiguous, we risk collisions.
+
+ // analyzers
+ fmt.Fprintf(hasher, "analyzers: %d\n", len(analyzers))
+ for _, a := range analyzers {
+ fmt.Fprintln(hasher, a.Name)
+ }
+
+ // package metadata
+ fmt.Fprintf(hasher, "package: %s %s %s\n", m.ID, m.Name, m.PkgPath)
+ // We can ignore m.DepsBy{Pkg,Import}Path: although the logic
+ // uses those fields, we account for them by hashing vdeps.
+
+ // type sizes
+ // This assertion is safe, but if a black-box implementation
+ // is ever needed, record Sizeof(*int) and Alignof(int64).
+ sz := m.TypesSizes.(*types.StdSizes)
+ fmt.Fprintf(hasher, "sizes: %d %d\n", sz.WordSize, sz.MaxAlign)
+
+ // metadata errors: used for 'compiles' field
+ fmt.Fprintf(hasher, "errors: %d", len(m.Errors))
+
+ // module Go version
+ if m.Module != nil && m.Module.GoVersion != "" {
+ fmt.Fprintf(hasher, "go %s\n", m.Module.GoVersion)
+ }
+
+ // file names and contents
+ fmt.Fprintf(hasher, "files: %d\n", len(compiledGoFiles))
+ for _, fh := range compiledGoFiles {
+ fmt.Fprintln(hasher, fh.FileIdentity())
+ }
+
+ // vdeps, in PackageID order
+ depIDs := make([]string, 0, len(vdeps))
+ for depID := range vdeps {
+ depIDs = append(depIDs, string(depID))
+ }
+ sort.Strings(depIDs)
+ for _, depID := range depIDs {
+ vdep := vdeps[PackageID(depID)]
+ fmt.Fprintf(hasher, "dep: %s\n", vdep.PkgPath)
+ fmt.Fprintf(hasher, "export: %s\n", vdep.DeepExportHash)
+
+ // action results: errors and facts
+ names := make([]string, 0, len(vdep.Actions))
+ for name := range vdep.Actions {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ summary := vdep.Actions[name]
+ fmt.Fprintf(hasher, "action %s\n", name)
+ if summary.Err != "" {
+ fmt.Fprintf(hasher, "error %s\n", summary.Err)
+ } else {
+ fmt.Fprintf(hasher, "facts %s\n", summary.FactsHash)
+ // We can safely omit summary.diagnostics
+ // from the key since they have no downstream effect.
+ }
+ }
+ }
+
+ var hash [sha256.Size]byte
+ hasher.Sum(hash[:0])
+ return hash
+}
+
+// actuallyAnalyze implements the cache-miss case.
+// This function does not access the snapshot.
+func actuallyAnalyze(ctx context.Context, analyzers []*analysis.Analyzer, m *source.Metadata, vdeps map[PackageID]*analyzeSummary, compiledGoFiles []source.FileHandle) (*analyzeSummary, error) {
+
+ // Create a local FileSet for processing this package only.
+ fset := token.NewFileSet()
+
+ // Parse only the "compiled" Go files.
+ // Do the computation in parallel.
+ parsed := make([]*source.ParsedGoFile, len(compiledGoFiles))
+ {
+ var group errgroup.Group
+ for i, fh := range compiledGoFiles {
+ i, fh := i, fh
+ group.Go(func() error {
+ // Call parseGoImpl directly, not the caching wrapper,
+ // as cached ASTs require the global FileSet.
+ pgf, err := parseGoImpl(ctx, fset, fh, source.ParseFull)
+ parsed[i] = pgf
+ return err
+ })
+ }
+ if err := group.Wait(); err != nil {
+ return nil, err // cancelled, or catastrophic error (e.g. missing file)
+ }
+ }
+
+ // Type-check the package.
+ pkg := typeCheckForAnalysis(fset, parsed, m, vdeps)
+
+ // Build a map of PkgPath to *Package for all packages mentioned
+ // in exportdata for use by facts.
+ pkg.factsDecoder = facts.NewDecoder(pkg.types)
+
+ // Poll cancellation state.
+ if err := ctx.Err(); err != nil {
+ return nil, err
+ }
+
+ // TODO(adonovan): port the old logic to:
+ // - gather go/packages diagnostics from m.Errors? (port goPackagesErrorDiagnostics)
+ // - record unparseable file URIs so we can suppress type errors for these files.
+ // - gather diagnostics from expandErrors + typeErrorDiagnostics + depsErrors.
+
+ // -- analysis --
+
+ // Build action graph for this package.
+ // Each graph node (action) is one unit of analysis.
+ actions := make(map[*analysis.Analyzer]*action)
+ var mkAction func(a *analysis.Analyzer) *action
+ mkAction = func(a *analysis.Analyzer) *action {
+ act, ok := actions[a]
+ if !ok {
+ var hdeps []*action
+ for _, req := range a.Requires {
+ hdeps = append(hdeps, mkAction(req))
+ }
+ act = &action{a: a, pkg: pkg, vdeps: vdeps, hdeps: hdeps}
+ actions[a] = act
+ }
+ return act
+ }
+
+ // Build actions for initial package.
+ var roots []*action
+ for _, a := range analyzers {
+ roots = append(roots, mkAction(a))
+ }
+
+ // Execute the graph in parallel.
+ execActions(roots)
+
+ // Don't return (or cache) the result in case of cancellation.
+ if err := ctx.Err(); err != nil {
+ return nil, err // cancelled
+ }
+
+ // Return summaries only for the requested actions.
+ summaries := make(map[string]*actionSummary)
+ for _, act := range roots {
+ summaries[act.a.Name] = act.summary
+ }
+
+ return &analyzeSummary{
+ PkgPath: PackagePath(pkg.types.Path()),
+ Export: pkg.export,
+ DeepExportHash: pkg.deepExportHash,
+ Compiles: pkg.compiles,
+ Actions: summaries,
+ }, nil
+}
+
+func typeCheckForAnalysis(fset *token.FileSet, parsed []*source.ParsedGoFile, m *source.Metadata, vdeps map[PackageID]*analyzeSummary) *analysisPackage {
+ if false { // debugging
+ log.Println("typeCheckForAnalysis", m.PkgPath)
+ }
+
+ pkg := &analysisPackage{
+ m: m,
+ fset: fset,
+ parsed: parsed,
+ files: make([]*ast.File, len(parsed)),
+ compiles: len(m.Errors) == 0, // false => list error
+ types: types.NewPackage(string(m.PkgPath), string(m.Name)),
+ typesInfo: &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ Scopes: make(map[ast.Node]*types.Scope),
+ },
+ typesSizes: m.TypesSizes,
+ }
+ typeparams.InitInstanceInfo(pkg.typesInfo)
+
+ for i, p := range parsed {
+ pkg.files[i] = p.File
+ if p.ParseErr != nil {
+ pkg.compiles = false // parse error
+ }
+ }
+
+ // Unsafe is special.
+ if m.PkgPath == "unsafe" {
+ pkg.types = types.Unsafe
+ return pkg
+ }
+
+ // Compute the union of transitive export data.
+ // (The actual values are shared, and not serialized.)
+ allExport := make(map[PackagePath][]byte)
+ for _, vdep := range vdeps {
+ for k, v := range vdep.allExport {
+ allExport[k] = v
+ }
+
+ if !vdep.Compiles {
+ pkg.compiles = false // transitive error
+ }
+ }
+
+ // exportHasher computes a hash of the names and export data of
+ // each package that was actually loaded during type checking.
+ //
+ // Because we use shallow export data, the hash for dependency
+ // analysis must incorporate indirect dependencies. As an
+ // optimization, we include only those that were actually
+ // used, which may be a small subset of those available.
+ //
+ // TODO(adonovan): opt: even better would be to implement a
+ // traversal over the package API like facts.NewDecoder does
+ // and only mention that set of packages in the hash.
+ // Perhaps there's a way to do that more efficiently.
+ //
+ // TODO(adonovan): opt: record the shallow hash alongside the
+ // shallow export data in the allExport map to avoid repeatedly
+ // hashing the export data.
+ //
+ // The writes to hasher below assume that type checking imports
+ // packages in a deterministic order.
+ exportHasher := sha256.New()
+ hashExport := func(pkgPath PackagePath, export []byte) {
+ fmt.Fprintf(exportHasher, "%s %d ", pkgPath, len(export))
+ exportHasher.Write(export)
+ }
+
+ // importer state
+ var (
+ insert func(p *types.Package, name string)
+ importMap = make(map[string]*types.Package) // keys are PackagePaths
+ )
+ loadFromExportData := func(pkgPath PackagePath) (*types.Package, error) {
+ export, ok := allExport[pkgPath]
+ if !ok {
+ return nil, bug.Errorf("missing export data for %q", pkgPath)
+ }
+ hashExport(pkgPath, export)
+ imported, err := gcimporter.IImportShallow(fset, importMap, export, string(pkgPath), insert)
+ if err != nil {
+ return nil, bug.Errorf("invalid export data for %q: %v", pkgPath, err)
+ }
+ return imported, nil
+ }
+ insert = func(p *types.Package, name string) {
+ imported, err := loadFromExportData(PackagePath(p.Path()))
+ if err != nil {
+ log.Fatalf("internal error: %v", err)
+ }
+ if imported != p {
+ log.Fatalf("internal error: inconsistent packages")
+ }
+ }
+
+ cfg := &types.Config{
+ Sizes: m.TypesSizes,
+ Error: func(e error) {
+ pkg.compiles = false // type error
+ pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
+ },
+ Importer: importerFunc(func(importPath string) (*types.Package, error) {
+ if importPath == "unsafe" {
+ return types.Unsafe, nil // unsafe has no export data
+ }
+
+ // Beware that returning an error from this function
+ // will cause the type checker to synthesize a fake
+ // package whose Path is importPath, potentially
+ // losing a vendor/ prefix. If type-checking errors
+ // are swallowed, these packages may be confusing.
+
+ id, ok := m.DepsByImpPath[ImportPath(importPath)]
+ if !ok {
+ // The import syntax is inconsistent with the metadata.
+ // This could be because the import declaration was
+ // incomplete and the metadata only includes complete
+ // imports; or because the metadata ignores import
+ // edges that would lead to cycles in the graph.
+ return nil, fmt.Errorf("missing metadata for import of %q", importPath)
+ }
+
+ depResult, ok := vdeps[id] // id may be ""
+ if !ok {
+ // Analogous to (*snapshot).missingPkgError
+ // in the logic for regular type-checking,
+ // but without a snapshot we can't provide
+ // such detail, and anyway most analysis
+ // failures aren't surfaced in the UI.
+ return nil, fmt.Errorf("no required module provides package %q (id=%q)", importPath, id)
+ }
+
+ // (Duplicates logic from check.go.)
+ if !source.IsValidImport(m.PkgPath, depResult.PkgPath) {
+ return nil, fmt.Errorf("invalid use of internal package %s", importPath)
+ }
+
+ return loadFromExportData(depResult.PkgPath)
+ }),
+ }
+
+ // Set Go dialect.
+ if m.Module != nil && m.Module.GoVersion != "" {
+ goVersion := "go" + m.Module.GoVersion
+ // types.NewChecker panics if GoVersion is invalid.
+ // An unparsable mod file should probably stop us
+ // before we get here, but double check just in case.
+ if goVersionRx.MatchString(goVersion) {
+ typesinternal.SetGoVersion(cfg, goVersion)
+ }
+ }
+
+ // We want to type check cgo code if go/types supports it.
+ // We passed typecheckCgo to go/packages when we Loaded.
+ // TODO(adonovan): do we actually need this??
+ typesinternal.SetUsesCgo(cfg)
+
+ check := types.NewChecker(cfg, fset, pkg.types, pkg.typesInfo)
+
+ // Type checking errors are handled via the config, so ignore them here.
+ _ = check.Files(pkg.files)
+
+ // debugging (type errors are quite normal)
+ if false {
+ if pkg.typeErrors != nil {
+ log.Printf("package %s has type errors: %v", pkg.types.Path(), pkg.typeErrors)
+ }
+ }
+
+ // Emit the export data and compute the deep hash.
+ export, err := gcimporter.IExportShallow(pkg.fset, pkg.types)
+ if err != nil {
+ // TODO(adonovan): in light of exporter bugs such as #57729,
+ // consider using bug.Report here and retrying the IExportShallow
+ // call here using an empty types.Package.
+ log.Fatalf("internal error writing shallow export data: %v", err)
+ }
+ pkg.export = export
+ hashExport(m.PkgPath, export)
+ exportHasher.Sum(pkg.deepExportHash[:0])
+
+ return pkg
+}
+
+// analysisPackage contains information about a package, including
+// syntax trees, used transiently during its type-checking and analysis.
+type analysisPackage struct {
+ m *source.Metadata
+ fset *token.FileSet // local to this package
+ parsed []*source.ParsedGoFile
+ files []*ast.File // same as parsed[i].File
+ types *types.Package
+ compiles bool // package is transitively free of list/parse/type errors
+ factsDecoder *facts.Decoder
+ export []byte // encoding of types.Package
+ deepExportHash source.Hash // reflexive transitive hash of export data
+ typesInfo *types.Info
+ typeErrors []types.Error
+ typesSizes types.Sizes
+}
+
+// An action represents one unit of analysis work: the application of
+// one analysis to one package. Actions form a DAG, both within a
+// package (as different analyzers are applied, either in sequence or
+// parallel), and across packages (as dependencies are analyzed).
+type action struct {
+ once sync.Once
+ a *analysis.Analyzer
+ pkg *analysisPackage
+ hdeps []*action // horizontal dependencies
+ vdeps map[PackageID]*analyzeSummary // vertical dependencies
+
+ // results of action.exec():
+ result interface{} // result of Run function, of type a.ResultType
+ summary *actionSummary
+ err error
+}
+
+func (act *action) String() string {
+ return fmt.Sprintf("%s@%s", act.a.Name, act.pkg.m.ID)
+}
+
+// execActions executes a set of action graph nodes in parallel.
+func execActions(actions []*action) {
+ var wg sync.WaitGroup
+ for _, act := range actions {
+ act := act
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ act.once.Do(func() {
+ execActions(act.hdeps) // analyze "horizontal" dependencies
+ act.result, act.summary, act.err = act.exec()
+ if act.err != nil {
+ act.summary = &actionSummary{Err: act.err.Error()}
+ // TODO(adonovan): suppress logging. But
+ // shouldn't the root error's causal chain
+ // include this information?
+ if false { // debugging
+ log.Printf("act.exec(%v) failed: %v", act, act.err)
+ }
+ }
+ })
+ }()
+ }
+ wg.Wait()
+}
+
+// exec defines the execution of a single action.
+// It returns the (ephemeral) result of the analyzer's Run function,
+// along with its (serializable) facts and diagnostics.
+// Or it returns an error if the analyzer did not run to
+// completion and deliver a valid result.
+func (act *action) exec() (interface{}, *actionSummary, error) {
+ analyzer := act.a
+ pkg := act.pkg
+
+ hasFacts := len(analyzer.FactTypes) > 0
+
+ // Report an error if any action dependency (vertical or horizontal) failed.
+ // To avoid long error messages describing chains of failure,
+ // we return the dependencies' error' unadorned.
+ if hasFacts {
+ // TODO(adonovan): use deterministic order.
+ for _, res := range act.vdeps {
+ if vdep := res.Actions[analyzer.Name]; vdep.Err != "" {
+ return nil, nil, errors.New(vdep.Err)
+ }
+ }
+ }
+ for _, dep := range act.hdeps {
+ if dep.err != nil {
+ return nil, nil, dep.err
+ }
+ }
+ // Inv: all action dependencies succeeded.
+
+ // Were there list/parse/type errors that might prevent analysis?
+ if !pkg.compiles && !analyzer.RunDespiteErrors {
+ return nil, nil, fmt.Errorf("skipping analysis %q because package %q does not compile", analyzer.Name, pkg.m.ID)
+ }
+ // Inv: package is well-formed enough to proceed with analysis.
+
+ if false { // debugging
+ log.Println("action.exec", act)
+ }
+
+ // Gather analysis Result values from horizontal dependencies.
+ var inputs = make(map[*analysis.Analyzer]interface{})
+ for _, dep := range act.hdeps {
+ inputs[dep.a] = dep.result
+ }
+
+ // TODO(adonovan): opt: facts.Set works but it may be more
+ // efficient to fork and tailor it to our precise needs.
+ //
+ // We've already sharded the fact encoding by action
+ // so that it can be done in parallel (hoisting the
+ // ImportMap call so that we build the map once per package).
+ // We could eliminate locking.
+ // We could also dovetail more closely with the export data
+ // decoder to obtain a more compact representation of
+ // packages and objects (e.g. its internal IDs, instead
+ // of PkgPaths and objectpaths.)
+
+ // Read and decode analysis facts for each imported package.
+ factset, err := pkg.factsDecoder.Decode(func(imp *types.Package) ([]byte, error) {
+ if !hasFacts {
+ return nil, nil // analyzer doesn't use facts, so no vdeps
+ }
+
+ // Package.Imports() may contain a fake "C" package. Ignore it.
+ if imp.Path() == "C" {
+ return nil, nil
+ }
+
+ id, ok := pkg.m.DepsByPkgPath[PackagePath(imp.Path())]
+ if !ok {
+ // This may mean imp was synthesized by the type
+ // checker because it failed to import it for any reason
+ // (e.g. bug processing export data; metadata ignoring
+ // a cycle-forming import).
+ // In that case, the fake package's imp.Path
+ // is set to the failed importPath (and thus
+ // it may lack a "vendor/" prefix).
+ //
+ // For now, silently ignore it on the assumption
+ // that the error is already reported elsewhere.
+ // return nil, fmt.Errorf("missing metadata")
+ return nil, nil
+ }
+
+ vdep, ok := act.vdeps[id]
+ if !ok {
+ return nil, bug.Errorf("internal error in %s: missing vdep for id=%s", pkg.types.Path(), id)
+ }
+ return vdep.Actions[analyzer.Name].Facts, nil
+ })
+ if err != nil {
+ return nil, nil, fmt.Errorf("internal error decoding analysis facts: %w", err)
+ }
+
+ // TODO(adonovan): make Export*Fact panic rather than discarding
+ // undeclared fact types, so that we discover bugs in analyzers.
+ factFilter := make(map[reflect.Type]bool)
+ for _, f := range analyzer.FactTypes {
+ factFilter[reflect.TypeOf(f)] = true
+ }
+
+ // posToLocation converts from token.Pos to protocol form.
+ // TODO(adonovan): improve error messages.
+ posToLocation := func(start, end token.Pos) (protocol.Location, error) {
+ tokFile := pkg.fset.File(start)
+ for _, p := range pkg.parsed {
+ if p.Tok == tokFile {
+ if end == token.NoPos {
+ end = start
+ }
+ return p.PosLocation(start, end)
+ }
+ }
+ return protocol.Location{},
+ bug.Errorf("internal error: token.Pos not within package")
+ }
+
+ // Now run the (pkg, analyzer) action.
+ var diagnostics []gobDiagnostic
+ pass := &analysis.Pass{
+ Analyzer: analyzer,
+ Fset: pkg.fset,
+ Files: pkg.files,
+ Pkg: pkg.types,
+ TypesInfo: pkg.typesInfo,
+ TypesSizes: pkg.typesSizes,
+ TypeErrors: pkg.typeErrors,
+ ResultOf: inputs,
+ Report: func(d analysis.Diagnostic) {
+ // Prefix the diagnostic category with the analyzer's name.
+ if d.Category == "" {
+ d.Category = analyzer.Name
+ } else {
+ d.Category = analyzer.Name + "." + d.Category
+ }
+
+ diagnostic, err := toGobDiagnostic(posToLocation, d)
+ if err != nil {
+ bug.Reportf("internal error converting diagnostic from analyzer %q: %v", analyzer.Name, err)
+ return
+ }
+ diagnostics = append(diagnostics, diagnostic)
+ },
+ ImportObjectFact: factset.ImportObjectFact,
+ ExportObjectFact: factset.ExportObjectFact,
+ ImportPackageFact: factset.ImportPackageFact,
+ ExportPackageFact: factset.ExportPackageFact,
+ AllObjectFacts: func() []analysis.ObjectFact { return factset.AllObjectFacts(factFilter) },
+ AllPackageFacts: func() []analysis.PackageFact { return factset.AllPackageFacts(factFilter) },
+ }
+
+ // Recover from panics (only) within the analyzer logic.
+ // (Use an anonymous function to limit the recover scope.)
+ var result interface{}
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ // An Analyzer panicked, likely due to a bug.
+ //
+ // In general we want to discover and fix such panics quickly,
+ // so we don't suppress them, but some bugs in third-party
+ // analyzers cannot be quickly fixed, so we use an allowlist
+ // to suppress panics.
+ const strict = true
+ if strict && bug.PanicOnBugs &&
+ analyzer.Name != "buildir" { // see https://github.com/dominikh/go-tools/issues/1343
+ // Uncomment this when debugging suspected failures
+ // in the driver, not the analyzer.
+ if false {
+ debug.SetTraceback("all") // show all goroutines
+ }
+ panic(r)
+ } else {
+ // In production, suppress the panic and press on.
+ err = fmt.Errorf("analysis %s for package %s panicked: %v", analyzer.Name, pass.Pkg.Path(), r)
+ }
+ }
+ }()
+ result, err = pass.Analyzer.Run(pass)
+ }()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want {
+ return nil, nil, bug.Errorf(
+ "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
+ pass.Pkg.Path(), pass.Analyzer, got, want)
+ }
+
+ // Disallow Export*Fact calls after Run.
+ // (A panic means the Analyzer is abusing concurrency.)
+ pass.ExportObjectFact = func(obj types.Object, fact analysis.Fact) {
+ panic(fmt.Sprintf("%v: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact))
+ }
+ pass.ExportPackageFact = func(fact analysis.Fact) {
+ panic(fmt.Sprintf("%v: Pass.ExportPackageFact(%T) called after Run", act, fact))
+ }
+
+ factsdata := factset.Encode()
+ return result, &actionSummary{
+ Diagnostics: diagnostics,
+ Facts: factsdata,
+ FactsHash: source.HashOf(factsdata),
+ }, nil
+}
+
+// requiredAnalyzers returns the transitive closure of required analyzers in preorder.
+func requiredAnalyzers(analyzers []*analysis.Analyzer) []*analysis.Analyzer {
+ var result []*analysis.Analyzer
+ seen := make(map[*analysis.Analyzer]bool)
+ var visitAll func([]*analysis.Analyzer)
+ visitAll = func(analyzers []*analysis.Analyzer) {
+ for _, a := range analyzers {
+ if !seen[a] {
+ seen[a] = true
+ result = append(result, a)
+ visitAll(a.Requires)
+ }
+ }
+ }
+ visitAll(analyzers)
+ return result
+}
+
+func mustEncode(x interface{}) []byte {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(x); err != nil {
+ log.Fatalf("internal error encoding %T: %v", x, err)
+ }
+ return buf.Bytes()
+}
+
+func mustDecode(data []byte, ptr interface{}) {
+ if err := gob.NewDecoder(bytes.NewReader(data)).Decode(ptr); err != nil {
+ log.Fatalf("internal error decoding %T: %v", ptr, err)
+ }
+}
+
+// -- data types for serialization of analysis.Diagnostic and source.Diagnostic --
+
+type gobDiagnostic struct {
+ Location protocol.Location
+ Severity protocol.DiagnosticSeverity
+ Code string
+ CodeHref string
+ Source string
+ Message string
+ SuggestedFixes []gobSuggestedFix
+ Related []gobRelatedInformation
+ Tags []protocol.DiagnosticTag
+}
+
+type gobRelatedInformation struct {
+ Location protocol.Location
+ Message string
+}
+
+type gobSuggestedFix struct {
+ Message string
+ TextEdits []gobTextEdit
+ Command *gobCommand
+ ActionKind protocol.CodeActionKind
+}
+
+type gobCommand struct {
+ Title string
+ Command string
+ Arguments []json.RawMessage
+}
+
+type gobTextEdit struct {
+ Location protocol.Location
+ NewText []byte
+}
+
+// toGobDiagnostic converts an analysis.Diagnosic to a serializable gobDiagnostic,
+// which requires expanding token.Pos positions into protocol.Location form.
+func toGobDiagnostic(posToLocation func(start, end token.Pos) (protocol.Location, error), diag analysis.Diagnostic) (gobDiagnostic, error) {
+ var fixes []gobSuggestedFix
+ for _, fix := range diag.SuggestedFixes {
+ var gobEdits []gobTextEdit
+ for _, textEdit := range fix.TextEdits {
+ loc, err := posToLocation(textEdit.Pos, textEdit.End)
+ if err != nil {
+ return gobDiagnostic{}, fmt.Errorf("in SuggestedFixes: %w", err)
+ }
+ gobEdits = append(gobEdits, gobTextEdit{
+ Location: loc,
+ NewText: textEdit.NewText,
+ })
+ }
+ fixes = append(fixes, gobSuggestedFix{
+ Message: fix.Message,
+ TextEdits: gobEdits,
+ })
+ }
+
+ var related []gobRelatedInformation
+ for _, r := range diag.Related {
+ loc, err := posToLocation(r.Pos, r.End)
+ if err != nil {
+ return gobDiagnostic{}, fmt.Errorf("in Related: %w", err)
+ }
+ related = append(related, gobRelatedInformation{
+ Location: loc,
+ Message: r.Message,
+ })
+ }
+
+ loc, err := posToLocation(diag.Pos, diag.End)
+ if err != nil {
+ return gobDiagnostic{}, err
+ }
+
+ return gobDiagnostic{
+ Location: loc,
+ // Severity for analysis diagnostics is dynamic, based on user
+ // configuration per analyzer.
+ // Code and CodeHref are unset for Analysis diagnostics,
+ // TODO(rfindley): set Code fields if/when golang/go#57906 is accepted.
+ Source: diag.Category,
+ Message: diag.Message,
+ SuggestedFixes: fixes,
+ Related: related,
+ // Analysis diagnostics do not contain tags.
+ }, nil
+}
diff --git a/gopls/internal/lsp/cache/cache.go b/gopls/internal/lsp/cache/cache.go
new file mode 100644
index 000000000..24bd84288
--- /dev/null
+++ b/gopls/internal/lsp/cache/cache.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "reflect"
+ "strconv"
+ "sync/atomic"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/memoize"
+ "golang.org/x/tools/internal/robustio"
+)
+
+// New Creates a new cache for gopls operation results, using the given file
+// set, shared store, and session options.
+//
+// Both the fset and store may be nil, but if store is non-nil so must be fset
+// (and they must always be used together), otherwise it may be possible to get
+// cached data referencing token.Pos values not mapped by the FileSet.
+func New(store *memoize.Store) *Cache {
+ index := atomic.AddInt64(&cacheIndex, 1)
+
+ if store == nil {
+ store = &memoize.Store{}
+ }
+
+ c := &Cache{
+ id: strconv.FormatInt(index, 10),
+ store: store,
+ memoizedFS: &memoizedFS{filesByID: map[robustio.FileID][]*DiskFile{}},
+ }
+ return c
+}
+
+// A Cache holds caching stores that are bundled together for consistency.
+//
+// TODO(rfindley): once fset and store need not be bundled together, the Cache
+// type can be eliminated.
+type Cache struct {
+ id string
+
+ store *memoize.Store
+
+ *memoizedFS // implements source.FileSource
+}
+
+// NewSession creates a new gopls session with the given cache and options overrides.
+//
+// The provided optionsOverrides may be nil.
+//
+// TODO(rfindley): move this to session.go.
+func NewSession(ctx context.Context, c *Cache, optionsOverrides func(*source.Options)) *Session {
+ index := atomic.AddInt64(&sessionIndex, 1)
+ options := source.DefaultOptions().Clone()
+ if optionsOverrides != nil {
+ optionsOverrides(options)
+ }
+ s := &Session{
+ id: strconv.FormatInt(index, 10),
+ cache: c,
+ gocmdRunner: &gocommand.Runner{},
+ options: options,
+ overlayFS: newOverlayFS(c),
+ }
+ event.Log(ctx, "New session", KeyCreateSession.Of(s))
+ return s
+}
+
+var cacheIndex, sessionIndex, viewIndex int64
+
+func (c *Cache) ID() string { return c.id }
+func (c *Cache) MemStats() map[reflect.Type]int { return c.store.Stats() }
diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go
new file mode 100644
index 000000000..451604f74
--- /dev/null
+++ b/gopls/internal/lsp/cache/check.go
@@ -0,0 +1,1227 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "log"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/filecache"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source/methodsets"
+ "golang.org/x/tools/gopls/internal/lsp/source/xrefs"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/gcimporter"
+ "golang.org/x/tools/internal/memoize"
+ "golang.org/x/tools/internal/packagesinternal"
+ "golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// A typeCheckBatch holds data for a logical type-checking operation, which may
+// type-check many unrelated packages.
+//
+// It shares state such as parsed files and imports, to optimize type-checking
+// for packages with overlapping dependency graphs.
+type typeCheckBatch struct {
+ meta *metadataGraph
+
+ parsedFiles map[span.URI]*source.ParsedGoFile // parsed files necessary for type-checking
+ fset *token.FileSet // FileSet describing all parsed files
+
+ // Promises holds promises to either read export data for the package, or
+ // parse and type-check its syntax.
+ //
+ // The return value of these promises is not used: after promises are
+ // awaited, they must write an entry into the imports map.
+ promises map[PackageID]*memoize.Promise
+
+ mu sync.Mutex
+ needFiles map[span.URI]source.FileHandle // de-duplicated file handles required for type-checking
+ imports map[PackageID]pkgOrErr // types.Packages to use for importing
+ exportData map[PackageID][]byte
+ packages map[PackageID]*Package
+}
+
+type pkgOrErr struct {
+ pkg *types.Package
+ err error
+}
+
+// TypeCheck type-checks the specified packages.
+//
+// The resulting packages slice always contains len(ids) entries, though some
+// of them may be nil if (and only if) the resulting error is non-nil.
+//
+// An error is returned if any of the requested packages fail to type-check.
+// This is different from having type-checking errors: a failure to type-check
+// indicates context cancellation or otherwise significant failure to perform
+// the type-checking operation.
+func (s *snapshot) TypeCheck(ctx context.Context, ids ...PackageID) ([]source.Package, error) {
+ // Check for existing active packages.
+ //
+ // Since gopls can't depend on package identity, any instance of the
+ // requested package must be ok to return.
+ //
+ // This is an optimization to avoid redundant type-checking: following
+ // changes to an open package many LSP clients send several successive
+ // requests for package information for the modified package (semantic
+ // tokens, code lens, inlay hints, etc.)
+ pkgs := make([]source.Package, len(ids))
+ needSyntax := make(map[PackageID]bool)
+ for i, id := range ids {
+ if pkg := s.getActivePackage(id); pkg != nil {
+ pkgs[i] = pkg
+ } else {
+ needSyntax[id] = true
+ }
+ }
+
+ if len(needSyntax) == 0 {
+ return pkgs, nil
+ }
+
+ // Build up shared state for efficient type-checking.
+ b := &typeCheckBatch{
+ parsedFiles: make(map[span.URI]*source.ParsedGoFile),
+ // fset is built during the parsing pass.
+ needFiles: make(map[span.URI]source.FileHandle),
+
+ promises: make(map[PackageID]*memoize.Promise),
+ imports: make(map[PackageID]pkgOrErr),
+ exportData: make(map[PackageID][]byte),
+ packages: make(map[PackageID]*Package),
+ }
+
+ // Capture metadata once to ensure a consistent view.
+ s.mu.Lock()
+ b.meta = s.meta
+ s.mu.Unlock()
+
+ // -- Step 1: assemble the promises graph --
+
+ var (
+ needExportData = make(map[PackageID]packageHandleKey)
+ packageHandles = make(map[PackageID]*packageHandle)
+ )
+
+ // collectPromises collects promises to load packages from export data or
+ // type-check.
+ var collectPromises func(PackageID) error
+ collectPromises = func(id PackageID) error {
+ if _, ok := b.promises[id]; ok {
+ return nil
+ }
+ b.promises[id] = nil // break cycles
+
+ m := b.meta.metadata[id]
+ if m == nil {
+ return bug.Errorf("missing metadata for %v", id)
+ }
+ for _, id := range m.DepsByPkgPath {
+ if err := collectPromises(id); err != nil {
+ return err
+ }
+ }
+
+ // Note that we can't reuse active packages here, as they will have the
+ // wrong FileSet. Any active packages that exist as dependencies of other
+ // packages will need to be loaded from export data.
+ ph, err := s.buildPackageHandle(ctx, id)
+ if err != nil {
+ return err
+ }
+ packageHandles[id] = ph
+
+ if needSyntax[id] {
+ // We will need to parse and type-check this package.
+ //
+ // We may also need to parse and type-check if export data is missing,
+ // but that is handled after fetching export data below.
+ b.addNeededFiles(ph)
+ } else if id != "unsafe" { // we can't load export data for unsafe
+ needExportData[id] = ph.key
+ }
+
+ debugName := fmt.Sprintf("check(%s)", id)
+ b.promises[id] = memoize.NewPromise(debugName, func(ctx context.Context, _ interface{}) interface{} {
+ var res pkgOrErr
+ if err := b.awaitPredecessors(ctx, ph.m); err != nil {
+ res.err = err
+ } else {
+ b.mu.Lock()
+ data, ok := b.exportData[id]
+ b.mu.Unlock()
+
+ if ok {
+ // We need export data, and have it.
+ res.pkg, res.err = b.importPackage(ctx, m, data)
+ } else if !needSyntax[id] {
+ // We need only a types.Package, but don't have export data.
+ // Type-check as fast as possible (skipping function bodies).
+ res.pkg, res.err = b.checkPackageForImport(ctx, ph)
+ } else {
+ // We need a syntax package.
+ var pkg *Package
+ pkg, res.err = b.checkPackage(ctx, ph)
+ if res.err == nil {
+ res.pkg = pkg.pkg.types
+ b.mu.Lock()
+ b.packages[id] = pkg
+ b.mu.Unlock()
+ }
+ }
+ }
+
+ b.mu.Lock()
+ b.imports[m.ID] = res
+ b.mu.Unlock()
+ return nil
+ })
+ return nil
+ }
+ for id := range needSyntax {
+ collectPromises(id)
+ }
+
+ // -- Step 2: collect export data --
+ //
+ // This must be done before parsing in order to determine which files must be
+ // parsed.
+ {
+ var g errgroup.Group
+ for id, key := range needExportData {
+ id := id
+ key := key
+ g.Go(func() error {
+ data, err := filecache.Get(exportDataKind, key)
+ if err != nil {
+ if err == filecache.ErrNotFound {
+ ph := packageHandles[id]
+ b.addNeededFiles(ph) // we will need to parse and type check
+ return nil // ok: we will type check later
+ }
+ return err
+ }
+ b.mu.Lock()
+ b.exportData[id] = data
+ b.mu.Unlock()
+ return nil
+ })
+ }
+ if err := g.Wait(); err != nil {
+ return pkgs, err
+ }
+ }
+
+ // -- Step 3: parse files required for type checking. --
+ //
+ // Parse all necessary files in parallel. Unfortunately we can't start
+ // parsing each package's file as soon as we discover that it is a syntax
+ // package, because the parseCache cannot add files to an existing FileSet.
+ {
+ var fhs []source.FileHandle
+ for _, fh := range b.needFiles {
+ fhs = append(fhs, fh)
+ }
+ pgfs, fset, err := s.parseCache.parseFiles(ctx, source.ParseFull, fhs...)
+ if err != nil {
+ return pkgs, err
+ }
+ for _, pgf := range pgfs {
+ b.parsedFiles[pgf.URI] = pgf
+ }
+ b.fset = fset
+ }
+
+ // -- Step 4: await type-checking. --
+ //
+ // Start a single goroutine for each promise.
+ {
+ var g errgroup.Group
+ // TODO(rfindley): find a good way to limit concurrency of type-checking,
+ // which is CPU bound at this point.
+ //
+ // (calling g.SetLimit here is mostly ineffective, as promises are
+ // recursively concurrent.)
+ for _, promise := range b.promises {
+ promise := promise
+ g.Go(func() error {
+ _, err := promise.Get(ctx, nil)
+ return err
+ })
+ }
+ if err := g.Wait(); err != nil {
+ return pkgs, err
+ }
+ }
+
+ // Fill in the gaps of the results slice.
+ var firstErr error
+ for i, id := range ids {
+ if pkgs[i] != nil {
+ continue
+ }
+ if err := b.imports[id].err; err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+ pkg := b.packages[id]
+ if pkg == nil {
+ panic("nil package")
+ }
+ if alt := s.memoizeActivePackage(id, pkg); alt != nil && alt != pkg {
+ // pkg is an open package, but we've lost a race and an existing package
+ // has already been memoized.
+ pkg = alt
+ }
+ pkgs[i] = pkg
+ }
+
+ return pkgs, firstErr
+}
+
+// addNeededFiles records the files necessary for type-checking ph, for later
+// parsing.
+func (b *typeCheckBatch) addNeededFiles(ph *packageHandle) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // Technically for export-only packages we only need compiledGoFiles, but
+ // these slices are usually redundant.
+ for _, fh := range ph.inputs.goFiles {
+ b.needFiles[fh.URI()] = fh
+ }
+ for _, fh := range ph.inputs.compiledGoFiles {
+ b.needFiles[fh.URI()] = fh
+ }
+}
+
+// importPackage loads the given package from its export data in p.exportData
+// (which must already be populated).
+func (b *typeCheckBatch) importPackage(ctx context.Context, m *source.Metadata, data []byte) (*types.Package, error) {
+ impMap, errMap := b.importMap(m.ID)
+ // Any failure to populate an import will cause confusing errors from
+ // IImportShallow below.
+ for path, err := range errMap {
+ return nil, fmt.Errorf("error importing %q for %q: %v", path, m.ID, err)
+ }
+
+ // TODO(rfindley): collect "deep" hashes here using the provided
+ // callback, for precise pruning.
+ imported, err := gcimporter.IImportShallow(b.fset, impMap, data, string(m.PkgPath), func(*types.Package, string) {})
+ if err != nil {
+ return nil, bug.Errorf("invalid export data for %q: %v", m.ID, err)
+ }
+ return imported, nil
+}
+
+// checkPackageForImport type checks, but skips function bodies and does not
+// record syntax information.
+func (b *typeCheckBatch) checkPackageForImport(ctx context.Context, ph *packageHandle) (*types.Package, error) {
+ if ph.m.ID == "unsafe" {
+ return types.Unsafe, nil
+ }
+ impMap, errMap := b.importMap(ph.inputs.id)
+ onError := func(e error) {
+ // Ignore errors for exporting.
+ }
+ cfg := b.typesConfig(ph.inputs, onError, impMap, errMap)
+ var files []*ast.File
+ for _, fh := range ph.inputs.compiledGoFiles {
+ pgf := b.parsedFiles[fh.URI()]
+ if pgf == nil {
+ return nil, fmt.Errorf("compiled go file %q failed to parse", fh.URI().Filename())
+ }
+ files = append(files, pgf.File)
+ }
+ cfg.IgnoreFuncBodies = true
+ pkg := types.NewPackage(string(ph.inputs.pkgPath), string(ph.inputs.name))
+ check := types.NewChecker(cfg, b.fset, pkg, nil)
+
+ _ = check.Files(files) // ignore errors
+
+ // If the context was cancelled, we may have returned a ton of transient
+ // errors to the type checker. Swallow them.
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+
+ // Asynchronously record export data.
+ go func() {
+ exportData, err := gcimporter.IExportShallow(b.fset, pkg)
+ if err != nil {
+ bug.Reportf("exporting package %v: %v", ph.m.ID, err)
+ return
+ }
+ if err := filecache.Set(exportDataKind, ph.key, exportData); err != nil {
+ event.Error(ctx, fmt.Sprintf("storing export data for %s", ph.m.ID), err)
+ }
+ }()
+ return pkg, nil
+}
+
+// checkPackage "fully type checks" to produce a syntax package.
+func (b *typeCheckBatch) checkPackage(ctx context.Context, ph *packageHandle) (*Package, error) {
+ // TODO(rfindley): refactor to inline typeCheckImpl here. There is no need
+ // for so many layers to build up the package
+ // (checkPackage->typeCheckImpl->doTypeCheck).
+ pkg, err := typeCheckImpl(ctx, b, ph.inputs)
+
+ if err == nil {
+ // Write package data to disk asynchronously.
+ go func() {
+ toCache := map[string][]byte{
+ xrefsKind: pkg.xrefs,
+ methodSetsKind: pkg.methodsets.Encode(),
+ diagnosticsKind: encodeDiagnostics(pkg.diagnostics),
+ }
+
+ if ph.m.ID != "unsafe" { // unsafe cannot be exported
+ exportData, err := gcimporter.IExportShallow(pkg.fset, pkg.types)
+ if err != nil {
+ bug.Reportf("exporting package %v: %v", ph.m.ID, err)
+ } else {
+ toCache[exportDataKind] = exportData
+ }
+ }
+
+ for kind, data := range toCache {
+ if err := filecache.Set(kind, ph.key, data); err != nil {
+ event.Error(ctx, fmt.Sprintf("storing %s data for %s", kind, ph.m.ID), err)
+ }
+ }
+ }()
+ }
+
+ return &Package{ph.m, pkg}, err
+}
+
+// awaitPredecessors awaits all promises for m.DepsByPkgPath, returning an
+// error if awaiting failed due to context cancellation or if there was an
+// unrecoverable error loading export data.
+func (b *typeCheckBatch) awaitPredecessors(ctx context.Context, m *source.Metadata) error {
+ for _, depID := range m.DepsByPkgPath {
+ depID := depID
+ if p, ok := b.promises[depID]; ok {
+ if _, err := p.Get(ctx, nil); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// importMap returns an import map for the given package ID, populated with
+// type-checked packages for its dependencies. It is intended for compatibility
+// with gcimporter.IImportShallow, so the first result uses the map signature
+// of that API, where keys are package path strings.
+//
+// importMap must only be used once all promises for dependencies of id have
+// been awaited.
+//
+// For any missing packages, importMap returns an entry in the resulting errMap
+// reporting the error for that package.
+//
+// Invariant: for all recursive dependencies, either impMap[path] or
+// errMap[path] is set.
+func (b *typeCheckBatch) importMap(id PackageID) (impMap map[string]*types.Package, errMap map[PackagePath]error) {
+ impMap = make(map[string]*types.Package)
+ outerID := id
+ var populateDepsOf func(m *source.Metadata)
+ populateDepsOf = func(parent *source.Metadata) {
+ for _, id := range parent.DepsByPkgPath {
+ m := b.meta.metadata[id]
+ if _, ok := impMap[string(m.PkgPath)]; ok {
+ continue
+ }
+ if _, ok := errMap[m.PkgPath]; ok {
+ continue
+ }
+ b.mu.Lock()
+ result, ok := b.imports[m.ID]
+ b.mu.Unlock()
+ if !ok {
+ panic(fmt.Sprintf("import map for %q missing package data for %q", outerID, m.ID))
+ }
+ // We may fail to produce a package due to e.g. context cancellation
+ // (handled elsewhere), or some catastrophic failure such as a package with
+ // no files.
+ switch {
+ case result.err != nil:
+ if errMap == nil {
+ errMap = make(map[PackagePath]error)
+ }
+ errMap[m.PkgPath] = result.err
+ case result.pkg != nil:
+ impMap[string(m.PkgPath)] = result.pkg
+ default:
+ panic("invalid import for " + id)
+ }
+ populateDepsOf(m)
+ }
+ }
+ m := b.meta.metadata[id]
+ populateDepsOf(m)
+ return impMap, errMap
+}
+
+// packageData holds binary data (e.g. types, xrefs) extracted from a syntax
+// package.
+type packageData struct {
+ m *source.Metadata
+ data []byte
+}
+
+// getPackageData gets package data (e.g. types, xrefs) for the requested ids,
+// either loading from the file-based cache or type-checking and extracting
+// data using the provided get function.
+func (s *snapshot) getPackageData(ctx context.Context, kind string, ids []PackageID, get func(*syntaxPackage) []byte) ([]*packageData, error) {
+ var needIDs []PackageID
+ keys := make([]packageHandleKey, len(ids))
+ pkgData := make([]*packageData, len(ids))
+ var firstErr error
+ // Compute package keys and query file cache.
+ for i, id := range ids {
+ ph, err := s.buildPackageHandle(ctx, id)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ if ctx.Err() != nil {
+ return pkgData, firstErr
+ }
+ continue
+ }
+ keys[i] = ph.key
+ data, err := filecache.Get(kind, ph.key)
+ switch err {
+ case nil:
+ pkgData[i] = &packageData{m: ph.m, data: data}
+ case filecache.ErrNotFound:
+ needIDs = append(needIDs, id)
+ default:
+ if firstErr == nil {
+ firstErr = err
+ }
+ if ctx.Err() != nil {
+ return pkgData, firstErr
+ }
+ }
+ }
+
+ // Type-check the packages for which we got file-cache misses.
+ pkgs, err := s.TypeCheck(ctx, needIDs...)
+ if err != nil {
+ return nil, err
+ }
+
+ pkgMap := make(map[PackageID]source.Package)
+ for i, id := range needIDs {
+ pkgMap[id] = pkgs[i]
+ }
+
+ // Fill in the gaps using data derived from type checking.
+ for i, id := range ids {
+ if pkgData[i] != nil {
+ continue
+ }
+ result := pkgMap[id]
+ if result == nil {
+ panic(fmt.Sprintf("missing type-check result for %s", id))
+ }
+ data := get(result.(*Package).pkg)
+ pkgData[i] = &packageData{m: result.Metadata(), data: data}
+ }
+
+ return pkgData, firstErr
+}
+
+type packageHandleKey source.Hash
+
+// A packageHandle holds package information, some of which may not be fully
+// evaluated.
+//
+// The only methods on packageHandle that are safe to call before calling await
+// are Metadata and await itself.
+type packageHandle struct {
+ m *source.Metadata
+
+ inputs typeCheckInputs
+
+ // key is the hashed key for the package.
+ //
+ // It includes the all bits of the transitive closure of
+ // dependencies's sources. This is more than type checking
+ // really depends on: export data of direct deps should be
+ // enough. (The key for analysis actions could similarly
+ // hash only Facts of direct dependencies.)
+ key packageHandleKey
+
+ // Note: as an optimization, we could join in-flight type-checking by
+ // recording a transient ref-counted promise here.
+ // (This was done previously, but proved to be a premature optimization).
+}
+
+// buildPackageHandle returns a handle for the future results of
+// type-checking the package identified by id in the given mode.
+// It assumes that the given ID already has metadata available, so it does not
+// attempt to reload missing or invalid metadata. The caller must reload
+// metadata if needed.
+func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID) (*packageHandle, error) {
+ s.mu.Lock()
+ entry, hit := s.packages.Get(id)
+ m := s.meta.metadata[id]
+ s.mu.Unlock()
+
+ if m == nil {
+ return nil, fmt.Errorf("no metadata for %s", id)
+ }
+
+ if hit {
+ return entry.(*packageHandle), nil
+ }
+
+ inputs, err := s.typeCheckInputs(ctx, m)
+ if err != nil {
+ return nil, err
+ }
+ // All the file reading has now been done.
+ // Create a handle for the result of type checking.
+ phKey := computePackageKey(s, inputs)
+ ph := &packageHandle{
+ m: m,
+ inputs: inputs,
+ key: phKey,
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // Check that the metadata has not changed
+ // (which should invalidate this handle).
+ //
+ // (In future, handles should form a graph with edges from a
+ // packageHandle to the handles for parsing its files and the
+ // handles for type-checking its immediate deps, at which
+ // point there will be no need to even access s.meta.)
+ if s.meta.metadata[ph.m.ID] != ph.m {
+ // TODO(rfindley): this should be bug.Errorf.
+ return nil, fmt.Errorf("stale metadata for %s", ph.m.ID)
+ }
+
+ // Check cache again in case another goroutine got there first.
+ if prev, ok := s.packages.Get(id); ok {
+ prevPH := prev.(*packageHandle)
+ if prevPH.m != ph.m {
+ return nil, bug.Errorf("existing package handle does not match for %s", ph.m.ID)
+ }
+ return prevPH, nil
+ }
+
+ s.packages.Set(id, ph, nil)
+ return ph, nil
+}
+
+// typeCheckInputs contains the inputs of a call to typeCheckImpl, which
+// type-checks a package.
+//
+// Part of the purpose of this type is to keep type checking in-sync with the
+// package handle key, by explicitly identifying the inputs to type checking.
+type typeCheckInputs struct {
+ id PackageID
+
+ // Used for type checking:
+ pkgPath PackagePath
+ name PackageName
+ goFiles, compiledGoFiles []source.FileHandle
+ sizes types.Sizes
+ deps map[PackageID]*packageHandle
+ depsByImpPath map[ImportPath]PackageID
+ goVersion string // packages.Module.GoVersion, e.g. "1.18"
+
+ // Used for type check diagnostics:
+ relatedInformation bool
+ linkTarget string
+ moduleMode bool
+}
+
+func (s *snapshot) typeCheckInputs(ctx context.Context, m *source.Metadata) (typeCheckInputs, error) {
+ deps := make(map[PackageID]*packageHandle)
+ for _, depID := range m.DepsByPkgPath {
+ depHandle, err := s.buildPackageHandle(ctx, depID)
+ if err != nil {
+ // If err is non-nil, we either have an invalid dependency, or a
+ // catastrophic failure to read a file (context cancellation or
+ // permission issues).
+ //
+ // We don't want one bad dependency to prevent us from type-checking the
+ // package -- we should instead get an import error. So we only abort
+ // this operation if the context is cancelled.
+ //
+ // We could do a better job of handling permission errors on files, but
+ // this is rare, and it is reasonable to treat the same an invalid
+ // dependency.
+ event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", m.ID, depID), err, source.SnapshotLabels(s)...)
+ if ctx.Err() != nil {
+ return typeCheckInputs{}, ctx.Err() // cancelled
+ }
+ continue
+ }
+ deps[depID] = depHandle
+ }
+
+ // Read both lists of files of this package, in parallel.
+ //
+ // goFiles aren't presented to the type checker--nor
+ // are they included in the key, unsoundly--but their
+ // syntax trees are available from (*pkg).File(URI).
+ // TODO(adonovan): consider parsing them on demand?
+ // The need should be rare.
+ goFiles, compiledGoFiles, err := readGoFiles(ctx, s, m)
+ if err != nil {
+ return typeCheckInputs{}, err
+ }
+
+ goVersion := ""
+ if m.Module != nil && m.Module.GoVersion != "" {
+ goVersion = m.Module.GoVersion
+ }
+
+ return typeCheckInputs{
+ id: m.ID,
+ pkgPath: m.PkgPath,
+ name: m.Name,
+ goFiles: goFiles,
+ compiledGoFiles: compiledGoFiles,
+ sizes: m.TypesSizes,
+ deps: deps,
+ depsByImpPath: m.DepsByImpPath,
+ goVersion: goVersion,
+
+ relatedInformation: s.view.Options().RelatedInformationSupported,
+ linkTarget: s.view.Options().LinkTarget,
+ moduleMode: s.moduleMode(),
+ }, nil
+}
+
+// readGoFiles reads the content of Metadata.GoFiles and
+// Metadata.CompiledGoFiles, in parallel.
+func readGoFiles(ctx context.Context, s *snapshot, m *source.Metadata) (goFiles, compiledGoFiles []source.FileHandle, err error) {
+ var group errgroup.Group
+ getFileHandles := func(files []span.URI) []source.FileHandle {
+ fhs := make([]source.FileHandle, len(files))
+ for i, uri := range files {
+ i, uri := i, uri
+ group.Go(func() (err error) {
+ fhs[i], err = s.GetFile(ctx, uri) // ~25us
+ return
+ })
+ }
+ return fhs
+ }
+ return getFileHandles(m.GoFiles),
+ getFileHandles(m.CompiledGoFiles),
+ group.Wait()
+}
+
+// computePackageKey returns a key representing the act of type checking
+// a package named id containing the specified files, metadata, and
+// combined dependency hash.
+func computePackageKey(s *snapshot, inputs typeCheckInputs) packageHandleKey {
+ hasher := sha256.New()
+
+ // In principle, a key must be the hash of an
+ // unambiguous encoding of all the relevant data.
+ // If it's ambiguous, we risk collisions.
+
+ // package identifiers
+ fmt.Fprintf(hasher, "package: %s %s %s\n", inputs.id, inputs.name, inputs.pkgPath)
+
+ // module Go version
+ fmt.Fprintf(hasher, "go %s\n", inputs.goVersion)
+
+ // import map
+ importPaths := make([]string, 0, len(inputs.depsByImpPath))
+ for impPath := range inputs.depsByImpPath {
+ importPaths = append(importPaths, string(impPath))
+ }
+ sort.Strings(importPaths)
+ for _, impPath := range importPaths {
+ fmt.Fprintf(hasher, "import %s %s", impPath, string(inputs.depsByImpPath[ImportPath(impPath)]))
+ }
+
+ // deps, in PackageID order
+ depIDs := make([]string, 0, len(inputs.deps))
+ for depID := range inputs.deps {
+ depIDs = append(depIDs, string(depID))
+ }
+ sort.Strings(depIDs)
+ for _, depID := range depIDs {
+ dep := inputs.deps[PackageID(depID)]
+ fmt.Fprintf(hasher, "dep: %s key:%s\n", dep.m.PkgPath, dep.key)
+ }
+
+ // file names and contents
+ fmt.Fprintf(hasher, "compiledGoFiles: %d\n", len(inputs.compiledGoFiles))
+ for _, fh := range inputs.compiledGoFiles {
+ fmt.Fprintln(hasher, fh.FileIdentity())
+ }
+ fmt.Fprintf(hasher, "goFiles: %d\n", len(inputs.goFiles))
+ for _, fh := range inputs.goFiles {
+ fmt.Fprintln(hasher, fh.FileIdentity())
+ }
+
+ // types sizes
+ sz := inputs.sizes.(*types.StdSizes)
+ fmt.Fprintf(hasher, "sizes: %d %d\n", sz.WordSize, sz.MaxAlign)
+
+ fmt.Fprintf(hasher, "relatedInformation: %t\n", inputs.relatedInformation)
+ fmt.Fprintf(hasher, "linkTarget: %s\n", inputs.linkTarget)
+ fmt.Fprintf(hasher, "moduleMode: %t\n", inputs.moduleMode)
+
+ var hash [sha256.Size]byte
+ hasher.Sum(hash[:0])
+ return packageHandleKey(hash)
+}
+
+// typeCheckImpl type checks the parsed source files in compiledGoFiles.
+// (The resulting pkg also holds the parsed but not type-checked goFiles.)
+// deps holds the future results of type-checking the direct dependencies.
+func typeCheckImpl(ctx context.Context, b *typeCheckBatch, inputs typeCheckInputs) (*syntaxPackage, error) {
+ ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(inputs.id)))
+ defer done()
+
+ pkg, err := doTypeCheck(ctx, b, inputs)
+ if err != nil {
+ return nil, err
+ }
+ pkg.methodsets = methodsets.NewIndex(pkg.fset, pkg.types)
+ pkg.xrefs = xrefs.Index(pkg.compiledGoFiles, pkg.types, pkg.typesInfo)
+
+ // Our heuristic for whether to show type checking errors is:
+ // + If any file was 'fixed', don't show type checking errors as we
+ // can't guarantee that they reference accurate locations in the source.
+ // + If there is a parse error _in the current file_, suppress type
+ // errors in that file.
+ // + Otherwise, show type errors even in the presence of parse errors in
+ // other package files. go/types attempts to suppress follow-on errors
+ // due to bad syntax, so on balance type checking errors still provide
+ // a decent signal/noise ratio as long as the file in question parses.
+
+ // Track URIs with parse errors so that we can suppress type errors for these
+ // files.
+ unparseable := map[span.URI]bool{}
+ for _, e := range pkg.parseErrors {
+ diags, err := parseErrorDiagnostics(pkg, e)
+ if err != nil {
+ event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(string(inputs.id)))
+ continue
+ }
+ for _, diag := range diags {
+ unparseable[diag.URI] = true
+ pkg.diagnostics = append(pkg.diagnostics, diag)
+ }
+ }
+
+ if pkg.hasFixedFiles {
+ return pkg, nil
+ }
+
+ unexpanded := pkg.typeErrors
+ pkg.typeErrors = nil
+ for _, e := range expandErrors(unexpanded, inputs.relatedInformation) {
+ diags, err := typeErrorDiagnostics(inputs.moduleMode, inputs.linkTarget, pkg, e)
+ if err != nil {
+ event.Error(ctx, "unable to compute positions for type errors", err, tag.Package.Of(string(inputs.id)))
+ continue
+ }
+ pkg.typeErrors = append(pkg.typeErrors, e.primary)
+ for _, diag := range diags {
+ // If the file didn't parse cleanly, it is highly likely that type
+ // checking errors will be confusing or redundant. But otherwise, type
+ // checking usually provides a good enough signal to include.
+ if !unparseable[diag.URI] {
+ pkg.diagnostics = append(pkg.diagnostics, diag)
+ }
+ }
+ }
+
+ return pkg, nil
+}
+
+var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
+
+func doTypeCheck(ctx context.Context, b *typeCheckBatch, inputs typeCheckInputs) (*syntaxPackage, error) {
+ impMap, errMap := b.importMap(inputs.id)
+ pkg := &syntaxPackage{
+ id: inputs.id,
+ fset: b.fset, // must match parse call below
+ types: types.NewPackage(string(inputs.pkgPath), string(inputs.name)),
+ typesInfo: &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ Scopes: make(map[ast.Node]*types.Scope),
+ },
+ importMap: impMap,
+ }
+ typeparams.InitInstanceInfo(pkg.typesInfo)
+
+ // Collect parsed files from the type check pass, capturing parse errors from
+ // compiled files.
+ for _, fh := range inputs.goFiles {
+ pgf := b.parsedFiles[fh.URI()]
+ if pgf == nil {
+ // If go/packages told us that a file is in a package, it should be
+ // parseable (after all, it was parsed by go list).
+ return nil, bug.Errorf("go file %q failed to parse", fh.URI().Filename())
+ }
+ pkg.goFiles = append(pkg.goFiles, pgf)
+ }
+ for _, fh := range inputs.compiledGoFiles {
+ pgf := b.parsedFiles[fh.URI()]
+ if pgf == nil {
+ return nil, fmt.Errorf("compiled go file %q failed to parse", fh.URI().Filename())
+ }
+ if pgf.ParseErr != nil {
+ pkg.parseErrors = append(pkg.parseErrors, pgf.ParseErr)
+ }
+ pkg.compiledGoFiles = append(pkg.compiledGoFiles, pgf)
+ }
+
+ // Use the default type information for the unsafe package.
+ if inputs.pkgPath == "unsafe" {
+ // Don't type check Unsafe: it's unnecessary, and doing so exposes a data
+ // race to Unsafe.completed.
+ pkg.types = types.Unsafe
+ return pkg, nil
+ }
+
+ if len(pkg.compiledGoFiles) == 0 {
+ // No files most likely means go/packages failed.
+ //
+ // TODO(rfindley): in the past, we would capture go list errors in this
+ // case, to present go list errors to the user. However we had no tests for
+ // this behavior. It is unclear if anything better can be done here.
+ return nil, fmt.Errorf("no parsed files for package %s", inputs.pkgPath)
+ }
+
+ onError := func(e error) {
+ pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
+ }
+ cfg := b.typesConfig(inputs, onError, impMap, errMap)
+
+ check := types.NewChecker(cfg, pkg.fset, pkg.types, pkg.typesInfo)
+
+ var files []*ast.File
+ for _, cgf := range pkg.compiledGoFiles {
+ files = append(files, cgf.File)
+ }
+
+ // Type checking errors are handled via the config, so ignore them here.
+ _ = check.Files(files) // 50us-15ms, depending on size of package
+
+ // If the context was cancelled, we may have returned a ton of transient
+ // errors to the type checker. Swallow them.
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ return pkg, nil
+}
+
+func (b *typeCheckBatch) typesConfig(inputs typeCheckInputs, onError func(e error), impMap map[string]*types.Package, errMap map[PackagePath]error) *types.Config {
+ cfg := &types.Config{
+ Sizes: inputs.sizes,
+ Error: onError,
+ Importer: importerFunc(func(path string) (*types.Package, error) {
+ // While all of the import errors could be reported
+ // based on the metadata before we start type checking,
+ // reporting them via types.Importer places the errors
+ // at the correct source location.
+ id, ok := inputs.depsByImpPath[ImportPath(path)]
+ if !ok {
+ // If the import declaration is broken,
+ // go list may fail to report metadata about it.
+ // See TestFixImportDecl for an example.
+ return nil, fmt.Errorf("missing metadata for import of %q", path)
+ }
+ depPH := inputs.deps[id]
+ if depPH == nil {
+ // e.g. missing metadata for dependencies in buildPackageHandle
+ return nil, missingPkgError(path, inputs.moduleMode)
+ }
+ if !source.IsValidImport(inputs.pkgPath, depPH.m.PkgPath) {
+ return nil, fmt.Errorf("invalid use of internal package %q", path)
+ }
+ pkg, ok := impMap[string(depPH.m.PkgPath)]
+ if !ok {
+ err := errMap[depPH.m.PkgPath]
+ if err == nil {
+ log.Fatalf("neither pkg nor error is set")
+ }
+ return nil, err
+ }
+ return pkg, nil
+ }),
+ }
+
+ if inputs.goVersion != "" {
+ goVersion := "go" + inputs.goVersion
+ // types.NewChecker panics if GoVersion is invalid. An unparsable mod
+ // file should probably stop us before we get here, but double check
+ // just in case.
+ if goVersionRx.MatchString(goVersion) {
+ typesinternal.SetGoVersion(cfg, goVersion)
+ }
+ }
+
+ // We want to type check cgo code if go/types supports it.
+ // We passed typecheckCgo to go/packages when we Loaded.
+ typesinternal.SetUsesCgo(cfg)
+ return cfg
+}
+
+// depsErrors creates diagnostics for each metadata error (e.g. import cycle).
+// These may be attached to import declarations in the transitive source files
+// of pkg, or to 'requires' declarations in the package's go.mod file.
+//
+// TODO(rfindley): move this to load.go
+func depsErrors(ctx context.Context, m *source.Metadata, meta *metadataGraph, fs source.FileSource, workspacePackages map[PackageID]PackagePath) ([]*source.Diagnostic, error) {
+ // Select packages that can't be found, and were imported in non-workspace packages.
+ // Workspace packages already show their own errors.
+ var relevantErrors []*packagesinternal.PackageError
+ for _, depsError := range m.DepsErrors {
+ // Up to Go 1.15, the missing package was included in the stack, which
+ // was presumably a bug. We want the next one up.
+ directImporterIdx := len(depsError.ImportStack) - 1
+ if directImporterIdx < 0 {
+ continue
+ }
+
+ directImporter := depsError.ImportStack[directImporterIdx]
+ if _, ok := workspacePackages[PackageID(directImporter)]; ok {
+ continue
+ }
+ relevantErrors = append(relevantErrors, depsError)
+ }
+
+ // Don't build the import index for nothing.
+ if len(relevantErrors) == 0 {
+ return nil, nil
+ }
+
+ // Subsequent checks require Go files.
+ if len(m.CompiledGoFiles) == 0 {
+ return nil, nil
+ }
+
+ // Build an index of all imports in the package.
+ type fileImport struct {
+ cgf *source.ParsedGoFile
+ imp *ast.ImportSpec
+ }
+ allImports := map[string][]fileImport{}
+ for _, uri := range m.CompiledGoFiles {
+ pgf, err := parseGoURI(ctx, fs, uri, source.ParseHeader)
+ if err != nil {
+ return nil, err
+ }
+ fset := source.FileSetFor(pgf.Tok)
+ // TODO(adonovan): modify Imports() to accept a single token.File (cgf.Tok).
+ for _, group := range astutil.Imports(fset, pgf.File) {
+ for _, imp := range group {
+ if imp.Path == nil {
+ continue
+ }
+ path := strings.Trim(imp.Path.Value, `"`)
+ allImports[path] = append(allImports[path], fileImport{pgf, imp})
+ }
+ }
+ }
+
+ // Apply a diagnostic to any import involved in the error, stopping once
+ // we reach the workspace.
+ var errors []*source.Diagnostic
+ for _, depErr := range relevantErrors {
+ for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
+ item := depErr.ImportStack[i]
+ if _, ok := workspacePackages[PackageID(item)]; ok {
+ break
+ }
+
+ for _, imp := range allImports[item] {
+ rng, err := imp.cgf.NodeRange(imp.imp)
+ if err != nil {
+ return nil, err
+ }
+ fixes, err := goGetQuickFixes(m.Module != nil, imp.cgf.URI, item)
+ if err != nil {
+ return nil, err
+ }
+ errors = append(errors, &source.Diagnostic{
+ URI: imp.cgf.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.TypeError,
+ Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
+ SuggestedFixes: fixes,
+ })
+ }
+ }
+ }
+
+ modFile, err := nearestModFile(ctx, m.CompiledGoFiles[0], fs)
+ if err != nil {
+ return nil, err
+ }
+ pm, err := parseModURI(ctx, fs, modFile)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add a diagnostic to the module that contained the lowest-level import of
+ // the missing package.
+ for _, depErr := range relevantErrors {
+ for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
+ item := depErr.ImportStack[i]
+ m := meta.metadata[PackageID(item)]
+ if m == nil || m.Module == nil {
+ continue
+ }
+ modVer := module.Version{Path: m.Module.Path, Version: m.Module.Version}
+ reference := findModuleReference(pm.File, modVer)
+ if reference == nil {
+ continue
+ }
+ rng, err := pm.Mapper.OffsetRange(reference.Start.Byte, reference.End.Byte)
+ if err != nil {
+ return nil, err
+ }
+ fixes, err := goGetQuickFixes(true, pm.URI, item)
+ if err != nil {
+ return nil, err
+ }
+ errors = append(errors, &source.Diagnostic{
+ URI: pm.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.TypeError,
+ Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
+ SuggestedFixes: fixes,
+ })
+ break
+ }
+ }
+ return errors, nil
+}
+
+// missingPkgError returns an error message for a missing package that varies
+// based on the user's workspace mode.
+func missingPkgError(pkgPath string, moduleMode bool) error {
+ // TODO(rfindley): improve this error. Previous versions of this error had
+ // access to the full snapshot, and could provide more information (such as
+ // the initialization error).
+ if moduleMode {
+ // Previously, we would present the initialization error here.
+ return fmt.Errorf("no required module provides package %q", pkgPath)
+ } else {
+ // Previously, we would list the directories in GOROOT and GOPATH here.
+ return fmt.Errorf("cannot find package %q in GOROOT or GOPATH", pkgPath)
+ }
+}
+
+type extendedError struct {
+ primary types.Error
+ secondaries []types.Error
+}
+
+func (e extendedError) Error() string {
+ return e.primary.Error()
+}
+
+// expandErrors duplicates "secondary" errors by mapping them to their main
+// error. Some errors returned by the type checker are followed by secondary
+// errors which give more information about the error. These are errors in
+// their own right, and they are marked by starting with \t. For instance, when
+// there is a multiply-defined function, the secondary error points back to the
+// definition first noticed.
+//
+// This function associates the secondary error with its primary error, which can
+// then be used as RelatedInformation when the error becomes a diagnostic.
+//
+// If supportsRelatedInformation is false, the secondary is instead embedded as
+// additional context in the primary error.
+func expandErrors(errs []types.Error, supportsRelatedInformation bool) []extendedError {
+ var result []extendedError
+ for i := 0; i < len(errs); {
+ original := extendedError{
+ primary: errs[i],
+ }
+ for i++; i < len(errs); i++ {
+ spl := errs[i]
+ if len(spl.Msg) == 0 || spl.Msg[0] != '\t' {
+ break
+ }
+ spl.Msg = spl.Msg[1:]
+ original.secondaries = append(original.secondaries, spl)
+ }
+
+ // Clone the error to all its related locations -- VS Code, at least,
+ // doesn't do it for us.
+ result = append(result, original)
+ for i, mainSecondary := range original.secondaries {
+ // Create the new primary error, with a tweaked message, in the
+ // secondary's location. We need to start from the secondary to
+ // capture its unexported location fields.
+ relocatedSecondary := mainSecondary
+ if supportsRelatedInformation {
+ relocatedSecondary.Msg = fmt.Sprintf("%v (see details)", original.primary.Msg)
+ } else {
+ relocatedSecondary.Msg = fmt.Sprintf("%v (this error: %v)", original.primary.Msg, mainSecondary.Msg)
+ }
+ relocatedSecondary.Soft = original.primary.Soft
+
+ // Copy over the secondary errors, noting the location of the
+ // current error we're cloning.
+ clonedError := extendedError{primary: relocatedSecondary, secondaries: []types.Error{original.primary}}
+ for j, secondary := range original.secondaries {
+ if i == j {
+ secondary.Msg += " (this error)"
+ }
+ clonedError.secondaries = append(clonedError.secondaries, secondary)
+ }
+ result = append(result, clonedError)
+ }
+
+ }
+ return result
+}
+
+// An importFunc is an implementation of the single-method
+// types.Importer interface based on a function value.
+type importerFunc func(path string) (*types.Package, error)
+
+func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
diff --git a/gopls/internal/lsp/cache/debug.go b/gopls/internal/lsp/cache/debug.go
new file mode 100644
index 000000000..9d9de6322
--- /dev/null
+++ b/gopls/internal/lsp/cache/debug.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "fmt"
+ "os"
+ "sort"
+)
+
+// This file contains helpers that can be used to instrument code while
+// debugging.
+
+// debugEnabled toggles the helpers below.
+const debugEnabled = false
+
+// If debugEnabled is true, debugf formats its arguments and prints to stderr.
+// If debugEnabled is false, it is a no-op.
+func debugf(format string, args ...interface{}) {
+ if !debugEnabled {
+ return
+ }
+ if false {
+ _ = fmt.Sprintf(format, args...) // encourage vet to validate format strings
+ }
+ fmt.Fprintf(os.Stderr, ">>> "+format+"\n", args...)
+}
+
+// If debugEnabled is true, dumpWorkspace prints a summary of workspace
+// packages to stderr. If debugEnabled is false, it is a no-op.
+//
+// TODO(rfindley): this has served its purpose. Delete.
+func (s *snapshot) dumpWorkspace(context string) {
+ if !debugEnabled {
+ return
+ }
+
+ debugf("workspace (after %s):", context)
+ var ids []PackageID
+ for id := range s.workspacePackages {
+ ids = append(ids, id)
+ }
+
+ sort.Slice(ids, func(i, j int) bool {
+ return ids[i] < ids[j]
+ })
+
+ for _, id := range ids {
+ pkgPath := s.workspacePackages[id]
+ _, ok := s.meta.metadata[id]
+ debugf(" %s:%s (metadata: %t)", id, pkgPath, ok)
+ }
+}
diff --git a/gopls/internal/lsp/cache/errors.go b/gopls/internal/lsp/cache/errors.go
new file mode 100644
index 000000000..07783f4b3
--- /dev/null
+++ b/gopls/internal/lsp/cache/errors.go
@@ -0,0 +1,528 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+// This file defines routines to convert diagnostics from go list, go
+// get, go/packages, parsing, type checking, and analysis into
+// source.Diagnostic form, and suggesting quick fixes.
+
+import (
+ "context"
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "log"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// goPackagesErrorDiagnostics translates the given go/packages Error into a
+// diagnostic, using the provided metadata and filesource.
+//
+// The slice of diagnostics may be empty.
+func goPackagesErrorDiagnostics(ctx context.Context, e packages.Error, m *source.Metadata, fs source.FileSource) ([]*source.Diagnostic, error) {
+ if diag, err := parseGoListImportCycleError(ctx, e, m, fs); err != nil {
+ return nil, err
+ } else if diag != nil {
+ return []*source.Diagnostic{diag}, nil
+ }
+
+ var spn span.Span
+ if e.Pos == "" {
+ spn = parseGoListError(e.Msg, m.LoadDir)
+ // We may not have been able to parse a valid span. Apply the errors to all files.
+ if _, err := spanToRange(ctx, fs, spn); err != nil {
+ var diags []*source.Diagnostic
+ for _, uri := range m.CompiledGoFiles {
+ diags = append(diags, &source.Diagnostic{
+ URI: uri,
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: e.Msg,
+ })
+ }
+ return diags, nil
+ }
+ } else {
+ spn = span.ParseInDir(e.Pos, m.LoadDir)
+ }
+
+ // TODO(rfindley): in some cases the go command outputs invalid spans, for
+ // example (from TestGoListErrors):
+ //
+ // package a
+ // import
+ //
+ // In this case, the go command will complain about a.go:2:8, which is after
+ // the trailing newline but still considered to be on the second line, most
+ // likely because *token.File lacks information about newline termination.
+ //
+ // We could do better here by handling that case.
+ rng, err := spanToRange(ctx, fs, spn)
+ if err != nil {
+ return nil, err
+ }
+ return []*source.Diagnostic{{
+ URI: spn.URI(),
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: e.Msg,
+ }}, nil
+}
+
+func parseErrorDiagnostics(pkg *syntaxPackage, errList scanner.ErrorList) ([]*source.Diagnostic, error) {
+ // The first parser error is likely the root cause of the problem.
+ if errList.Len() <= 0 {
+ return nil, fmt.Errorf("no errors in %v", errList)
+ }
+ e := errList[0]
+ pgf, err := pkg.File(span.URIFromPath(e.Pos.Filename))
+ if err != nil {
+ return nil, err
+ }
+ rng, err := pgf.Mapper.OffsetRange(e.Pos.Offset, e.Pos.Offset)
+ if err != nil {
+ return nil, err
+ }
+ return []*source.Diagnostic{{
+ URI: pgf.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ParseError,
+ Message: e.Msg,
+ }}, nil
+}
+
+var importErrorRe = regexp.MustCompile(`could not import ([^\s]+)`)
+var unsupportedFeatureRe = regexp.MustCompile(`.*require.* go(\d+\.\d+) or later`)
+
+func typeErrorDiagnostics(moduleMode bool, linkTarget string, pkg *syntaxPackage, e extendedError) ([]*source.Diagnostic, error) {
+ code, loc, err := typeErrorData(pkg, e.primary)
+ if err != nil {
+ return nil, err
+ }
+ diag := &source.Diagnostic{
+ URI: loc.URI.SpanURI(),
+ Range: loc.Range,
+ Severity: protocol.SeverityError,
+ Source: source.TypeError,
+ Message: e.primary.Msg,
+ }
+ if code != 0 {
+ diag.Code = code.String()
+ diag.CodeHref = typesCodeHref(linkTarget, code)
+ }
+ switch code {
+ case typesinternal.UnusedVar, typesinternal.UnusedImport:
+ diag.Tags = append(diag.Tags, protocol.Unnecessary)
+ }
+
+ for _, secondary := range e.secondaries {
+ _, secondaryLoc, err := typeErrorData(pkg, secondary)
+ if err != nil {
+ return nil, err
+ }
+ diag.Related = append(diag.Related, protocol.DiagnosticRelatedInformation{
+ Location: secondaryLoc,
+ Message: secondary.Msg,
+ })
+ }
+
+ if match := importErrorRe.FindStringSubmatch(e.primary.Msg); match != nil {
+ diag.SuggestedFixes, err = goGetQuickFixes(moduleMode, loc.URI.SpanURI(), match[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ if match := unsupportedFeatureRe.FindStringSubmatch(e.primary.Msg); match != nil {
+ diag.SuggestedFixes, err = editGoDirectiveQuickFix(moduleMode, loc.URI.SpanURI(), match[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return []*source.Diagnostic{diag}, nil
+}
+
+func goGetQuickFixes(moduleMode bool, uri span.URI, pkg string) ([]source.SuggestedFix, error) {
+ // Go get only supports module mode for now.
+ if !moduleMode {
+ return nil, nil
+ }
+ title := fmt.Sprintf("go get package %v", pkg)
+ cmd, err := command.NewGoGetPackageCommand(title, command.GoGetPackageArgs{
+ URI: protocol.URIFromSpanURI(uri),
+ AddRequire: true,
+ Pkg: pkg,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil
+}
+
+func editGoDirectiveQuickFix(moduleMode bool, uri span.URI, version string) ([]source.SuggestedFix, error) {
+ // Go mod edit only supports module mode.
+ if !moduleMode {
+ return nil, nil
+ }
+ title := fmt.Sprintf("go mod edit -go=%s", version)
+ cmd, err := command.NewEditGoDirectiveCommand(title, command.EditGoDirectiveArgs{
+ URI: protocol.URIFromSpanURI(uri),
+ Version: version,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil
+}
+
+// encodeDiagnostics gob-encodes the given diagnostics.
+func encodeDiagnostics(srcDiags []*source.Diagnostic) []byte {
+ var gobDiags []gobDiagnostic
+ for _, srcDiag := range srcDiags {
+ var gobFixes []gobSuggestedFix
+ for _, srcFix := range srcDiag.SuggestedFixes {
+ gobFix := gobSuggestedFix{
+ Message: srcFix.Title,
+ ActionKind: srcFix.ActionKind,
+ }
+ for uri, srcEdits := range srcFix.Edits {
+ for _, srcEdit := range srcEdits {
+ gobFix.TextEdits = append(gobFix.TextEdits, gobTextEdit{
+ Location: protocol.Location{
+ URI: protocol.URIFromSpanURI(uri),
+ Range: srcEdit.Range,
+ },
+ NewText: []byte(srcEdit.NewText),
+ })
+ }
+ }
+ if srcCmd := srcFix.Command; srcCmd != nil {
+ gobFix.Command = &gobCommand{
+ Title: srcCmd.Title,
+ Command: srcCmd.Command,
+ Arguments: srcCmd.Arguments,
+ }
+ }
+ gobFixes = append(gobFixes, gobFix)
+ }
+ var gobRelated []gobRelatedInformation
+ for _, srcRel := range srcDiag.Related {
+ gobRel := gobRelatedInformation(srcRel)
+ gobRelated = append(gobRelated, gobRel)
+ }
+ gobDiag := gobDiagnostic{
+ Location: protocol.Location{
+ URI: protocol.URIFromSpanURI(srcDiag.URI),
+ Range: srcDiag.Range,
+ },
+ Severity: srcDiag.Severity,
+ Code: srcDiag.Code,
+ CodeHref: srcDiag.CodeHref,
+ Source: string(srcDiag.Source),
+ Message: srcDiag.Message,
+ SuggestedFixes: gobFixes,
+ Related: gobRelated,
+ Tags: srcDiag.Tags,
+ }
+ gobDiags = append(gobDiags, gobDiag)
+ }
+ return mustEncode(gobDiags)
+}
+
+// decodeDiagnostics decodes the given gob-encoded diagnostics.
+func decodeDiagnostics(data []byte) []*source.Diagnostic {
+ var gobDiags []gobDiagnostic
+ mustDecode(data, &gobDiags)
+ var srcDiags []*source.Diagnostic
+ for _, gobDiag := range gobDiags {
+ var srcFixes []source.SuggestedFix
+ for _, gobFix := range gobDiag.SuggestedFixes {
+ srcFix := source.SuggestedFix{
+ Title: gobFix.Message,
+ ActionKind: gobFix.ActionKind,
+ }
+ for _, gobEdit := range gobFix.TextEdits {
+ if srcFix.Edits == nil {
+ srcFix.Edits = make(map[span.URI][]protocol.TextEdit)
+ }
+ srcEdit := protocol.TextEdit{
+ Range: gobEdit.Location.Range,
+ NewText: string(gobEdit.NewText),
+ }
+ uri := gobEdit.Location.URI.SpanURI()
+ srcFix.Edits[uri] = append(srcFix.Edits[uri], srcEdit)
+ }
+ if gobCmd := gobFix.Command; gobCmd != nil {
+ gobFix.Command = &gobCommand{
+ Title: gobCmd.Title,
+ Command: gobCmd.Command,
+ Arguments: gobCmd.Arguments,
+ }
+ }
+ srcFixes = append(srcFixes, srcFix)
+ }
+ var srcRelated []protocol.DiagnosticRelatedInformation
+ for _, gobRel := range gobDiag.Related {
+ srcRel := protocol.DiagnosticRelatedInformation(gobRel)
+ srcRelated = append(srcRelated, srcRel)
+ }
+ srcDiag := &source.Diagnostic{
+ URI: gobDiag.Location.URI.SpanURI(),
+ Range: gobDiag.Location.Range,
+ Severity: gobDiag.Severity,
+ Source: source.AnalyzerErrorKind(gobDiag.Source),
+ Message: gobDiag.Message,
+ Tags: gobDiag.Tags,
+ Related: srcRelated,
+ SuggestedFixes: srcFixes,
+ }
+ srcDiags = append(srcDiags, srcDiag)
+ }
+ return srcDiags
+}
+
+// toSourceDiagnostic converts a gobDiagnostic to "source" form.
+func toSourceDiagnostic(srcAnalyzer *source.Analyzer, gobDiag *gobDiagnostic) *source.Diagnostic {
+ var related []protocol.DiagnosticRelatedInformation
+ for _, gobRelated := range gobDiag.Related {
+ related = append(related, protocol.DiagnosticRelatedInformation(gobRelated))
+ }
+
+ kinds := srcAnalyzer.ActionKind
+ if len(srcAnalyzer.ActionKind) == 0 {
+ kinds = append(kinds, protocol.QuickFix)
+ }
+ fixes := suggestedAnalysisFixes(gobDiag, kinds)
+ if srcAnalyzer.Fix != "" {
+ cmd, err := command.NewApplyFixCommand(gobDiag.Message, command.ApplyFixArgs{
+ URI: gobDiag.Location.URI,
+ Range: gobDiag.Location.Range,
+ Fix: srcAnalyzer.Fix,
+ })
+ if err != nil {
+ // JSON marshalling of these argument values cannot fail.
+ log.Fatalf("internal error in NewApplyFixCommand: %v", err)
+ }
+ for _, kind := range kinds {
+ fixes = append(fixes, source.SuggestedFixFromCommand(cmd, kind))
+ }
+ }
+
+ severity := srcAnalyzer.Severity
+ if severity == 0 {
+ severity = protocol.SeverityWarning
+ }
+
+ diag := &source.Diagnostic{
+ URI: gobDiag.Location.URI.SpanURI(),
+ Range: gobDiag.Location.Range,
+ Severity: severity,
+ Source: source.AnalyzerErrorKind(gobDiag.Source),
+ Message: gobDiag.Message,
+ Related: related,
+ SuggestedFixes: fixes,
+ }
+ // If the fixes only delete code, assume that the diagnostic is reporting dead code.
+ if onlyDeletions(fixes) {
+ diag.Tags = []protocol.DiagnosticTag{protocol.Unnecessary}
+ }
+ return diag
+}
+
+// onlyDeletions returns true if all of the suggested fixes are deletions.
+func onlyDeletions(fixes []source.SuggestedFix) bool {
+ for _, fix := range fixes {
+ if fix.Command != nil {
+ return false
+ }
+ for _, edits := range fix.Edits {
+ for _, edit := range edits {
+ if edit.NewText != "" {
+ return false
+ }
+ if protocol.ComparePosition(edit.Range.Start, edit.Range.End) == 0 {
+ return false
+ }
+ }
+ }
+ }
+ return len(fixes) > 0
+}
+
+func typesCodeHref(linkTarget string, code typesinternal.ErrorCode) string {
+ return source.BuildLink(linkTarget, "golang.org/x/tools/internal/typesinternal", code.String())
+}
+
+func suggestedAnalysisFixes(diag *gobDiagnostic, kinds []protocol.CodeActionKind) []source.SuggestedFix {
+ var fixes []source.SuggestedFix
+ for _, fix := range diag.SuggestedFixes {
+ edits := make(map[span.URI][]protocol.TextEdit)
+ for _, e := range fix.TextEdits {
+ uri := span.URI(e.Location.URI)
+ edits[uri] = append(edits[uri], protocol.TextEdit{
+ Range: e.Location.Range,
+ NewText: string(e.NewText),
+ })
+ }
+ for _, kind := range kinds {
+ fixes = append(fixes, source.SuggestedFix{
+ Title: fix.Message,
+ Edits: edits,
+ ActionKind: kind,
+ })
+ }
+
+ }
+ return fixes
+}
+
+func typeErrorData(pkg *syntaxPackage, terr types.Error) (typesinternal.ErrorCode, protocol.Location, error) {
+ ecode, start, end, ok := typesinternal.ReadGo116ErrorData(terr)
+ if !ok {
+ start, end = terr.Pos, terr.Pos
+ ecode = 0
+ }
+ // go/types may return invalid positions in some cases, such as
+ // in errors on tokens missing from the syntax tree.
+ if !start.IsValid() {
+ return 0, protocol.Location{}, fmt.Errorf("type error (%q, code %d, go116=%t) without position", terr.Msg, ecode, ok)
+ }
+ // go/types errors retain their FileSet.
+ // Sanity-check that we're using the right one.
+ fset := pkg.fset
+ if fset != terr.Fset {
+ return 0, protocol.Location{}, bug.Errorf("wrong FileSet for type error")
+ }
+ posn := safetoken.StartPosition(fset, start)
+ if !posn.IsValid() {
+ return 0, protocol.Location{}, fmt.Errorf("position %d of type error %q (code %q) not found in FileSet", start, start, terr)
+ }
+ pgf, err := pkg.File(span.URIFromPath(posn.Filename))
+ if err != nil {
+ return 0, protocol.Location{}, err
+ }
+ if !end.IsValid() || end == start {
+ end = analysisinternal.TypeErrorEndPos(fset, pgf.Src, start)
+ }
+ loc, err := pgf.Mapper.PosLocation(pgf.Tok, start, end)
+ return ecode, loc, err
+}
+
+// spanToRange converts a span.Span to a protocol.Range, by mapping content
+// contained in the provided FileSource.
+func spanToRange(ctx context.Context, fs source.FileSource, spn span.Span) (protocol.Range, error) {
+ uri := spn.URI()
+ fh, err := fs.GetFile(ctx, uri)
+ if err != nil {
+ return protocol.Range{}, err
+ }
+ content, err := fh.Read()
+ if err != nil {
+ return protocol.Range{}, err
+ }
+ mapper := protocol.NewMapper(uri, content)
+ return mapper.SpanRange(spn)
+}
+
+// parseGoListError attempts to parse a standard `go list` error message
+// by stripping off the trailing error message.
+//
+// It works only on errors whose message is prefixed by colon,
+// followed by a space (": "). For example:
+//
+// attributes.go:13:1: expected 'package', found 'type'
+func parseGoListError(input, wd string) span.Span {
+ input = strings.TrimSpace(input)
+ msgIndex := strings.Index(input, ": ")
+ if msgIndex < 0 {
+ return span.Parse(input)
+ }
+ return span.ParseInDir(input[:msgIndex], wd)
+}
+
+// parseGoListImportCycleError attempts to parse the given go/packages error as
+// an import cycle, returning a diagnostic if successful.
+//
+// If the error is not detected as an import cycle error, it returns nil, nil.
+func parseGoListImportCycleError(ctx context.Context, e packages.Error, m *source.Metadata, fs source.FileSource) (*source.Diagnostic, error) {
+ re := regexp.MustCompile(`(.*): import stack: \[(.+)\]`)
+ matches := re.FindStringSubmatch(strings.TrimSpace(e.Msg))
+ if len(matches) < 3 {
+ return nil, nil
+ }
+ msg := matches[1]
+ importList := strings.Split(matches[2], " ")
+ // Since the error is relative to the current package. The import that is causing
+ // the import cycle error is the second one in the list.
+ if len(importList) < 2 {
+ return nil, nil
+ }
+ // Imports have quotation marks around them.
+ circImp := strconv.Quote(importList[1])
+ for _, uri := range m.CompiledGoFiles {
+ pgf, err := parseGoURI(ctx, fs, uri, source.ParseHeader)
+ if err != nil {
+ return nil, err
+ }
+ // Search file imports for the import that is causing the import cycle.
+ for _, imp := range pgf.File.Imports {
+ if imp.Path.Value == circImp {
+ rng, err := pgf.NodeMappedRange(imp)
+ if err != nil {
+ return nil, nil
+ }
+
+ return &source.Diagnostic{
+ URI: pgf.URI,
+ Range: rng.Range(),
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: msg,
+ }, nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+// parseGoURI is a helper to parse the Go file at the given URI from the file
+// source fs. The resulting syntax and token.File belong to an ephemeral,
+// encapsulated FileSet, so this file stands only on its own: it's not suitable
+// to use in a list of file of a package, for example.
+//
+// It returns an error if the file could not be read.
+func parseGoURI(ctx context.Context, fs source.FileSource, uri span.URI, mode source.ParseMode) (*source.ParsedGoFile, error) {
+ fh, err := fs.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ return parseGoImpl(ctx, token.NewFileSet(), fh, source.ParseHeader)
+}
+
+// parseModURI is a helper to parse the Mod file at the given URI from the file
+// source fs.
+//
+// It returns an error if the file could not be read.
+func parseModURI(ctx context.Context, fs source.FileSource, uri span.URI) (*source.ParsedModule, error) {
+ fh, err := fs.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ return parseModImpl(ctx, fh)
+}
diff --git a/internal/lsp/cache/error_test.go b/gopls/internal/lsp/cache/errors_test.go
index 43cc03f78..43cc03f78 100644
--- a/internal/lsp/cache/error_test.go
+++ b/gopls/internal/lsp/cache/errors_test.go
diff --git a/gopls/internal/lsp/cache/fs_memoized.go b/gopls/internal/lsp/cache/fs_memoized.go
new file mode 100644
index 000000000..9acd87276
--- /dev/null
+++ b/gopls/internal/lsp/cache/fs_memoized.go
@@ -0,0 +1,149 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "os"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/robustio"
+)
+
+// A memoizedFS is a file source that memoizes reads, to reduce IO.
+type memoizedFS struct {
+ mu sync.Mutex
+
+ // filesByID maps existing file inodes to the result of a read.
+ // (The read may have failed, e.g. due to EACCES or a delete between stat+read.)
+ // Each slice is a non-empty list of aliases: different URIs.
+ filesByID map[robustio.FileID][]*DiskFile
+}
+
+func newMemoizedFS() *memoizedFS {
+ return &memoizedFS{filesByID: make(map[robustio.FileID][]*DiskFile)}
+}
+
+// A DiskFile is a file on the filesystem, or a failure to read one.
+// It implements the source.FileHandle interface.
+type DiskFile struct {
+ uri span.URI
+ modTime time.Time
+ content []byte
+ hash source.Hash
+ err error
+}
+
+func (h *DiskFile) URI() span.URI { return h.uri }
+
+func (h *DiskFile) FileIdentity() source.FileIdentity {
+ return source.FileIdentity{
+ URI: h.uri,
+ Hash: h.hash,
+ }
+}
+
+func (h *DiskFile) Saved() bool { return true }
+func (h *DiskFile) Version() int32 { return 0 }
+func (h *DiskFile) Read() ([]byte, error) { return h.content, h.err }
+
+// GetFile stats and (maybe) reads the file, updates the cache, and returns it.
+func (fs *memoizedFS) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
+ id, mtime, err := robustio.GetFileID(uri.Filename())
+ if err != nil {
+ // file does not exist
+ return &DiskFile{
+ err: err,
+ uri: uri,
+ }, nil
+ }
+
+ // We check if the file has changed by comparing modification times. Notably,
+ // this is an imperfect heuristic as various systems have low resolution
+ // mtimes (as much as 1s on WSL or s390x builders), so we only cache
+ // filehandles if mtime is old enough to be reliable, meaning that we don't
+ // expect a subsequent write to have the same mtime.
+ //
+ // The coarsest mtime precision we've seen in practice is 1s, so consider
+ // mtime to be unreliable if it is less than 2s old. Capture this before
+ // doing anything else.
+ recentlyModified := time.Since(mtime) < 2*time.Second
+
+ fs.mu.Lock()
+ fhs, ok := fs.filesByID[id]
+ if ok && fhs[0].modTime.Equal(mtime) {
+ var fh *DiskFile
+ // We have already seen this file and it has not changed.
+ for _, h := range fhs {
+ if h.uri == uri {
+ fh = h
+ break
+ }
+ }
+ // No file handle for this exact URI. Create an alias, but share content.
+ if fh == nil {
+ newFH := *fhs[0]
+ newFH.uri = uri
+ fh = &newFH
+ fhs = append(fhs, fh)
+ fs.filesByID[id] = fhs
+ }
+ fs.mu.Unlock()
+ return fh, nil
+ }
+ fs.mu.Unlock()
+
+ // Unknown file, or file has changed. Read (or re-read) it.
+ fh, err := readFile(ctx, uri, mtime) // ~25us
+ if err != nil {
+ return nil, err // e.g. cancelled (not: read failed)
+ }
+
+ fs.mu.Lock()
+ if !recentlyModified {
+ fs.filesByID[id] = []*DiskFile{fh}
+ } else {
+ delete(fs.filesByID, id)
+ }
+ fs.mu.Unlock()
+ return fh, nil
+}
+
+// ioLimit limits the number of parallel file reads per process.
+var ioLimit = make(chan struct{}, 128)
+
+func readFile(ctx context.Context, uri span.URI, mtime time.Time) (*DiskFile, error) {
+ select {
+ case ioLimit <- struct{}{}:
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ defer func() { <-ioLimit }()
+
+ ctx, done := event.Start(ctx, "cache.readFile", tag.File.Of(uri.Filename()))
+ _ = ctx
+ defer done()
+
+ // It is possible that a race causes us to read a file with different file
+ // ID, or whose mtime differs from the given mtime. However, in these cases
+ // we expect the client to notify of a subsequent file change, and the file
+ // content should be eventually consistent.
+ content, err := os.ReadFile(uri.Filename()) // ~20us
+ if err != nil {
+ content = nil // just in case
+ }
+ return &DiskFile{
+ modTime: mtime,
+ uri: uri,
+ content: content,
+ hash: source.HashOf(content),
+ err: err,
+ }, nil
+}
diff --git a/gopls/internal/lsp/cache/fs_overlay.go b/gopls/internal/lsp/cache/fs_overlay.go
new file mode 100644
index 000000000..36a7194ce
--- /dev/null
+++ b/gopls/internal/lsp/cache/fs_overlay.go
@@ -0,0 +1,78 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "sync"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// An overlayFS is a source.FileSource that keeps track of overlays on top of a
+// delegate FileSource.
+type overlayFS struct {
+ delegate source.FileSource
+
+ mu sync.Mutex
+ overlays map[span.URI]*Overlay
+}
+
+func newOverlayFS(delegate source.FileSource) *overlayFS {
+ return &overlayFS{
+ delegate: delegate,
+ overlays: make(map[span.URI]*Overlay),
+ }
+}
+
+// Overlays returns a new unordered array of overlays.
+func (fs *overlayFS) Overlays() []*Overlay {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ overlays := make([]*Overlay, 0, len(fs.overlays))
+ for _, overlay := range fs.overlays {
+ overlays = append(overlays, overlay)
+ }
+ return overlays
+}
+
+func (fs *overlayFS) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
+ fs.mu.Lock()
+ overlay, ok := fs.overlays[uri]
+ fs.mu.Unlock()
+ if ok {
+ return overlay, nil
+ }
+ return fs.delegate.GetFile(ctx, uri)
+}
+
+// An Overlay is a file open in the editor. It may have unsaved edits.
+// It implements the source.FileHandle interface.
+type Overlay struct {
+ uri span.URI
+ content []byte
+ hash source.Hash
+ version int32
+ kind source.FileKind
+
+ // saved is true if a file matches the state on disk,
+ // and therefore does not need to be part of the overlay sent to go/packages.
+ saved bool
+}
+
+func (o *Overlay) URI() span.URI { return o.uri }
+
+func (o *Overlay) FileIdentity() source.FileIdentity {
+ return source.FileIdentity{
+ URI: o.uri,
+ Hash: o.hash,
+ }
+}
+
+func (o *Overlay) Read() ([]byte, error) { return o.content, nil }
+func (o *Overlay) Version() int32 { return o.version }
+func (o *Overlay) Saved() bool { return o.saved }
+func (o *Overlay) Kind() source.FileKind { return o.kind }
diff --git a/gopls/internal/lsp/cache/graph.go b/gopls/internal/lsp/cache/graph.go
new file mode 100644
index 000000000..f304112fb
--- /dev/null
+++ b/gopls/internal/lsp/cache/graph.go
@@ -0,0 +1,131 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "sort"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// A metadataGraph is an immutable and transitively closed import
+// graph of Go packages, as obtained from go/packages.
+type metadataGraph struct {
+ // metadata maps package IDs to their associated metadata.
+ metadata map[PackageID]*source.Metadata
+
+ // importedBy maps package IDs to the list of packages that import them.
+ importedBy map[PackageID][]PackageID
+
+ // ids maps file URIs to package IDs, sorted by (!valid, cli, packageID).
+ // A single file may belong to multiple packages due to tests packages.
+ ids map[span.URI][]PackageID
+}
+
+// Metadata implements the source.MetadataSource interface.
+func (g *metadataGraph) Metadata(id PackageID) *source.Metadata {
+ return g.metadata[id]
+}
+
+// Clone creates a new metadataGraph, applying the given updates to the
+// receiver.
+func (g *metadataGraph) Clone(updates map[PackageID]*source.Metadata) *metadataGraph {
+ if len(updates) == 0 {
+ // Optimization: since the graph is immutable, we can return the receiver.
+ return g
+ }
+ result := &metadataGraph{metadata: make(map[PackageID]*source.Metadata, len(g.metadata))}
+ // Copy metadata.
+ for id, m := range g.metadata {
+ result.metadata[id] = m
+ }
+ for id, m := range updates {
+ if m == nil {
+ delete(result.metadata, id)
+ } else {
+ result.metadata[id] = m
+ }
+ }
+ result.build()
+ return result
+}
+
+// build constructs g.importedBy and g.uris from g.metadata.
+//
+// TODO(rfindley): we should enforce that the graph is acyclic here.
+func (g *metadataGraph) build() {
+ // Build the import graph.
+ g.importedBy = make(map[PackageID][]PackageID)
+ for id, m := range g.metadata {
+ for _, depID := range m.DepsByPkgPath {
+ g.importedBy[depID] = append(g.importedBy[depID], id)
+ }
+ }
+
+ // Collect file associations.
+ g.ids = make(map[span.URI][]PackageID)
+ for id, m := range g.metadata {
+ uris := map[span.URI]struct{}{}
+ for _, uri := range m.CompiledGoFiles {
+ uris[uri] = struct{}{}
+ }
+ for _, uri := range m.GoFiles {
+ uris[uri] = struct{}{}
+ }
+ for uri := range uris {
+ g.ids[uri] = append(g.ids[uri], id)
+ }
+ }
+
+ // Sort and filter file associations.
+ for uri, ids := range g.ids {
+ sort.Slice(ids, func(i, j int) bool {
+ cli := source.IsCommandLineArguments(ids[i])
+ clj := source.IsCommandLineArguments(ids[j])
+ if cli != clj {
+ return clj
+ }
+
+ // 2. packages appear in name order.
+ return ids[i] < ids[j]
+ })
+
+ // Choose the best IDs for each URI, according to the following rules:
+ // - If there are any valid real packages, choose them.
+ // - Else, choose the first valid command-line-argument package, if it exists.
+ //
+ // TODO(rfindley): it might be better to track all IDs here, and exclude
+ // them later when type checking, but this is the existing behavior.
+ for i, id := range ids {
+ // If we've seen *anything* prior to command-line arguments package, take
+ // it. Note that ids[0] may itself be command-line-arguments.
+ if i > 0 && source.IsCommandLineArguments(id) {
+ g.ids[uri] = ids[:i]
+ break
+ }
+ }
+ }
+}
+
+// reverseReflexiveTransitiveClosure returns a new mapping containing the
+// metadata for the specified packages along with any package that
+// transitively imports one of them, keyed by ID, including all the initial packages.
+func (g *metadataGraph) reverseReflexiveTransitiveClosure(ids ...PackageID) map[PackageID]*source.Metadata {
+ seen := make(map[PackageID]*source.Metadata)
+ var visitAll func([]PackageID)
+ visitAll = func(ids []PackageID) {
+ for _, id := range ids {
+ if seen[id] == nil {
+ if m := g.metadata[id]; m != nil {
+ seen[id] = m
+ visitAll(g.importedBy[id])
+ }
+ }
+ }
+ }
+ visitAll(ids)
+ return seen
+}
diff --git a/gopls/internal/lsp/cache/imports.go b/gopls/internal/lsp/cache/imports.go
new file mode 100644
index 000000000..46b8d151f
--- /dev/null
+++ b/gopls/internal/lsp/cache/imports.go
@@ -0,0 +1,188 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/imports"
+)
+
+type importsState struct {
+ ctx context.Context
+
+ mu sync.Mutex
+ processEnv *imports.ProcessEnv
+ cacheRefreshDuration time.Duration
+ cacheRefreshTimer *time.Timer
+ cachedModFileHash source.Hash
+ cachedBuildFlags []string
+ cachedDirectoryFilters []string
+
+ // runOnce records whether runProcessEnvFunc has been called at least once.
+ // This is necessary to avoid resetting state before the process env is
+ // populated.
+ //
+ // TODO(rfindley): this shouldn't be necessary.
+ runOnce bool
+}
+
+func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot, fn func(*imports.Options) error) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // Find the hash of active mod files, if any. Using the unsaved content
+ // is slightly wasteful, since we'll drop caches a little too often, but
+ // the mod file shouldn't be changing while people are autocompleting.
+ //
+ // TODO(rfindley): consider instead hashing on-disk modfiles here.
+ var modFileHash source.Hash
+ for m := range snapshot.workspaceModFiles {
+ fh, err := snapshot.GetFile(ctx, m)
+ if err != nil {
+ return err
+ }
+ modFileHash.XORWith(fh.FileIdentity().Hash)
+ }
+
+ // view.goEnv is immutable -- changes make a new view. Options can change.
+ // We can't compare build flags directly because we may add -modfile.
+ snapshot.view.optionsMu.Lock()
+ localPrefix := snapshot.view.options.Local
+ currentBuildFlags := snapshot.view.options.BuildFlags
+ currentDirectoryFilters := snapshot.view.options.DirectoryFilters
+ changed := !reflect.DeepEqual(currentBuildFlags, s.cachedBuildFlags) ||
+ snapshot.view.options.VerboseOutput != (s.processEnv.Logf != nil) ||
+ modFileHash != s.cachedModFileHash ||
+ !reflect.DeepEqual(snapshot.view.options.DirectoryFilters, s.cachedDirectoryFilters)
+ snapshot.view.optionsMu.Unlock()
+
+ // If anything relevant to imports has changed, clear caches and
+ // update the processEnv. Clearing caches blocks on any background
+ // scans.
+ if changed {
+ // As a special case, skip cleanup the first time -- we haven't fully
+ // initialized the environment yet and calling GetResolver will do
+ // unnecessary work and potentially mess up the go.mod file.
+ if s.runOnce {
+ if resolver, err := s.processEnv.GetResolver(); err == nil {
+ if modResolver, ok := resolver.(*imports.ModuleResolver); ok {
+ modResolver.ClearForNewMod()
+ }
+ }
+ }
+
+ s.cachedModFileHash = modFileHash
+ s.cachedBuildFlags = currentBuildFlags
+ s.cachedDirectoryFilters = currentDirectoryFilters
+ if err := s.populateProcessEnv(ctx, snapshot); err != nil {
+ return err
+ }
+ s.runOnce = true
+ }
+
+ // Run the user function.
+ opts := &imports.Options{
+ // Defaults.
+ AllErrors: true,
+ Comments: true,
+ Fragment: true,
+ FormatOnly: false,
+ TabIndent: true,
+ TabWidth: 8,
+ Env: s.processEnv,
+ LocalPrefix: localPrefix,
+ }
+
+ if err := fn(opts); err != nil {
+ return err
+ }
+
+ if s.cacheRefreshTimer == nil {
+ // Don't refresh more than twice per minute.
+ delay := 30 * time.Second
+ // Don't spend more than a couple percent of the time refreshing.
+ if adaptive := 50 * s.cacheRefreshDuration; adaptive > delay {
+ delay = adaptive
+ }
+ s.cacheRefreshTimer = time.AfterFunc(delay, s.refreshProcessEnv)
+ }
+
+ return nil
+}
+
+// populateProcessEnv sets the dynamically configurable fields for the view's
+// process environment. Assumes that the caller is holding the s.view.importsMu.
+func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapshot) error {
+ pe := s.processEnv
+
+ if snapshot.view.Options().VerboseOutput {
+ pe.Logf = func(format string, args ...interface{}) {
+ event.Log(ctx, fmt.Sprintf(format, args...))
+ }
+ } else {
+ pe.Logf = nil
+ }
+
+ // Extract invocation details from the snapshot to use with goimports.
+ //
+ // TODO(rfindley): refactor to extract the necessary invocation logic into
+ // separate functions. Using goCommandInvocation is unnecessarily indirect,
+ // and has led to memory leaks in the past, when the snapshot was
+ // unintentionally held past its lifetime.
+ _, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{
+ WorkingDir: snapshot.view.workingDir().Filename(),
+ })
+ if err != nil {
+ return err
+ }
+
+ pe.BuildFlags = inv.BuildFlags
+ pe.ModFlag = "readonly" // processEnv operations should not mutate the modfile
+ pe.Env = map[string]string{}
+ for _, kv := range inv.Env {
+ split := strings.SplitN(kv, "=", 2)
+ if len(split) != 2 {
+ continue
+ }
+ pe.Env[split[0]] = split[1]
+ }
+ // We don't actually use the invocation, so clean it up now.
+ cleanupInvocation()
+ // TODO(rfindley): should this simply be inv.WorkingDir?
+ pe.WorkingDir = snapshot.view.workingDir().Filename()
+ return nil
+}
+
+func (s *importsState) refreshProcessEnv() {
+ start := time.Now()
+
+ s.mu.Lock()
+ env := s.processEnv
+ if resolver, err := s.processEnv.GetResolver(); err == nil {
+ resolver.ClearForNewScan()
+ }
+ s.mu.Unlock()
+
+ event.Log(s.ctx, "background imports cache refresh starting")
+ if err := imports.PrimeCache(context.Background(), env); err == nil {
+ event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start)))
+ } else {
+ event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start)), keys.Err.Of(err))
+ }
+ s.mu.Lock()
+ s.cacheRefreshDuration = time.Since(start)
+ s.cacheRefreshTimer = nil
+ s.mu.Unlock()
+}
diff --git a/internal/lsp/cache/keys.go b/gopls/internal/lsp/cache/keys.go
index 449daba3a..449daba3a 100644
--- a/internal/lsp/cache/keys.go
+++ b/gopls/internal/lsp/cache/keys.go
diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go
new file mode 100644
index 000000000..c4bdf1cd1
--- /dev/null
+++ b/gopls/internal/lsp/cache/load.go
@@ -0,0 +1,782 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+)
+
+var loadID uint64 // atomic identifier for loads
+
+// errNoPackages indicates that a load query matched no packages.
+var errNoPackages = errors.New("no packages returned")
+
+// load calls packages.Load for the given scopes, updating package metadata,
+// import graph, and mapped files with the result.
+//
+// The resulting error may wrap the moduleErrorMap error type, representing
+// errors associated with specific modules.
+func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadScope) (err error) {
+ id := atomic.AddUint64(&loadID, 1)
+ eventName := fmt.Sprintf("go/packages.Load #%d", id) // unique name for logging
+
+ var query []string
+ var containsDir bool // for logging
+
+ // Keep track of module query -> module path so that we can later correlate query
+ // errors with errors.
+ moduleQueries := make(map[string]string)
+ for _, scope := range scopes {
+ switch scope := scope.(type) {
+ case packageLoadScope:
+ // The only time we pass package paths is when we're doing a
+ // partial workspace load. In those cases, the paths came back from
+ // go list and should already be GOPATH-vendorized when appropriate.
+ query = append(query, string(scope))
+
+ case fileLoadScope:
+ uri := span.URI(scope)
+ fh := s.FindFile(uri)
+ if fh == nil || s.View().FileKind(fh) != source.Go {
+ // Don't try to load a file that doesn't exist, or isn't a go file.
+ continue
+ }
+ contents, err := fh.Read()
+ if err != nil {
+ continue
+ }
+ if isStandaloneFile(contents, s.view.Options().StandaloneTags) {
+ query = append(query, uri.Filename())
+ } else {
+ query = append(query, fmt.Sprintf("file=%s", uri.Filename()))
+ }
+
+ case moduleLoadScope:
+ switch scope {
+ case "std", "cmd":
+ query = append(query, string(scope))
+ default:
+ modQuery := fmt.Sprintf("%s/...", scope)
+ query = append(query, modQuery)
+ moduleQueries[modQuery] = string(scope)
+ }
+
+ case viewLoadScope:
+ // If we are outside of GOPATH, a module, or some other known
+ // build system, don't load subdirectories.
+ if !s.ValidBuildConfiguration() {
+ query = append(query, "./")
+ } else {
+ query = append(query, "./...")
+ }
+
+ default:
+ panic(fmt.Sprintf("unknown scope type %T", scope))
+ }
+ switch scope.(type) {
+ case viewLoadScope, moduleLoadScope:
+ containsDir = true
+ }
+ }
+ if len(query) == 0 {
+ return nil
+ }
+ sort.Strings(query) // for determinism
+
+ ctx, done := event.Start(ctx, "cache.view.load", tag.Query.Of(query))
+ defer done()
+
+ flags := source.LoadWorkspace
+ if allowNetwork {
+ flags |= source.AllowNetwork
+ }
+ _, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{
+ WorkingDir: s.view.workingDir().Filename(),
+ })
+ if err != nil {
+ return err
+ }
+
+ // Set a last resort deadline on packages.Load since it calls the go
+ // command, which may hang indefinitely if it has a bug. golang/go#42132
+ // and golang/go#42255 have more context.
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
+ defer cancel()
+
+ cfg := s.config(ctx, inv)
+ pkgs, err := packages.Load(cfg, query...)
+ cleanup()
+
+ // If the context was canceled, return early. Otherwise, we might be
+ // type-checking an incomplete result. Check the context directly,
+ // because go/packages adds extra information to the error.
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ // This log message is sought for by TestReloadOnlyOnce.
+ labels := append(source.SnapshotLabels(s), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs)))
+ if err != nil {
+ event.Error(ctx, eventName, err, labels...)
+ } else {
+ event.Log(ctx, eventName, labels...)
+ }
+
+ if len(pkgs) == 0 {
+ if err == nil {
+ err = errNoPackages
+ }
+ return fmt.Errorf("packages.Load error: %w", err)
+ }
+
+ moduleErrs := make(map[string][]packages.Error) // module path -> errors
+ filterFunc := s.view.filterFunc()
+ newMetadata := make(map[PackageID]*source.Metadata)
+ for _, pkg := range pkgs {
+ // The Go command returns synthetic list results for module queries that
+ // encountered module errors.
+ //
+ // For example, given a module path a.mod, we'll query for "a.mod/..." and
+ // the go command will return a package named "a.mod/..." holding this
+ // error. Save it for later interpretation.
+ //
+ // See golang/go#50862 for more details.
+ if mod := moduleQueries[pkg.PkgPath]; mod != "" { // a synthetic result for the unloadable module
+ if len(pkg.Errors) > 0 {
+ moduleErrs[mod] = pkg.Errors
+ }
+ continue
+ }
+
+ if !containsDir || s.view.Options().VerboseOutput {
+ event.Log(ctx, eventName, append(
+ source.SnapshotLabels(s),
+ tag.Package.Of(pkg.ID),
+ tag.Files.Of(pkg.CompiledGoFiles))...)
+ }
+
+ // Ignore packages with no sources, since we will never be able to
+ // correctly invalidate that metadata.
+ if len(pkg.GoFiles) == 0 && len(pkg.CompiledGoFiles) == 0 {
+ continue
+ }
+ // Special case for the builtin package, as it has no dependencies.
+ if pkg.PkgPath == "builtin" {
+ if len(pkg.GoFiles) != 1 {
+ return fmt.Errorf("only expected 1 file for builtin, got %v", len(pkg.GoFiles))
+ }
+ s.setBuiltin(pkg.GoFiles[0])
+ continue
+ }
+ // Skip test main packages.
+ if isTestMain(pkg, s.view.gocache) {
+ continue
+ }
+ // Skip filtered packages. They may be added anyway if they're
+ // dependencies of non-filtered packages.
+ //
+ // TODO(rfindley): why exclude metadata arbitrarily here? It should be safe
+ // to capture all metadata.
+ // TODO(rfindley): what about compiled go files?
+ if allFilesExcluded(pkg.GoFiles, filterFunc) {
+ continue
+ }
+ if err := buildMetadata(ctx, pkg, cfg, query, newMetadata, nil); err != nil {
+ return err
+ }
+ }
+
+ s.mu.Lock()
+
+ // Compute the minimal metadata updates (for Clone)
+ // required to preserve this invariant:
+ // for all id, s.packages.Get(id).m == s.meta.metadata[id].
+ updates := make(map[PackageID]*source.Metadata)
+ for _, m := range newMetadata {
+ if existing := s.meta.metadata[m.ID]; existing == nil {
+ updates[m.ID] = m
+ delete(s.shouldLoad, m.ID)
+ }
+ }
+ // Assert the invariant.
+ s.packages.Range(func(k, v interface{}) {
+ id, ph := k.(PackageID), v.(*packageHandle)
+ if s.meta.metadata[id] != ph.m {
+ // TODO(adonovan): upgrade to unconditional panic after Jan 2023.
+ bug.Reportf("inconsistent metadata")
+ }
+ })
+
+ event.Log(ctx, fmt.Sprintf("%s: updating metadata for %d packages", eventName, len(updates)))
+
+ // Before mutating the snapshot, ensure that we compute load diagnostics
+ // successfully. This could fail if the context is cancelled, and we don't
+ // want to leave the snapshot metadata in a partial state.
+ meta := s.meta.Clone(updates)
+ workspacePackages := computeWorkspacePackagesLocked(s, meta)
+ for _, update := range updates {
+ if err := computeLoadDiagnostics(ctx, update, meta, lockedSnapshot{s}, workspacePackages); err != nil {
+ return err
+ }
+ }
+ s.meta = meta
+ s.workspacePackages = workspacePackages
+ s.resetActivePackagesLocked()
+
+ s.dumpWorkspace("load")
+ s.mu.Unlock()
+
+ // Recompute the workspace package handle for any packages we invalidated.
+ //
+ // This is (putatively) an optimization since handle construction prefetches
+ // the content of all Go source files.
+ //
+ // However, one necessary side effect of this operation is that we are
+ // guaranteed to visit all package files during load. This is required for
+ // e.g. determining the set of directories to watch.
+ //
+ // TODO(rfindley, golang/go#57558): determine the set of directories based on
+ // loaded packages, and skip this precomputation.
+ for _, m := range updates {
+ s.buildPackageHandle(ctx, m.ID) // ignore error
+ }
+
+ if len(moduleErrs) > 0 {
+ return &moduleErrorMap{moduleErrs}
+ }
+
+ return nil
+}
+
+type moduleErrorMap struct {
+ errs map[string][]packages.Error // module path -> errors
+}
+
+func (m *moduleErrorMap) Error() string {
+ var paths []string // sort for stability
+ for path, errs := range m.errs {
+ if len(errs) > 0 { // should always be true, but be cautious
+ paths = append(paths, path)
+ }
+ }
+ sort.Strings(paths)
+
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%d modules have errors:\n", len(paths))
+ for _, path := range paths {
+ fmt.Fprintf(&buf, "\t%s:%s\n", path, m.errs[path][0].Msg)
+ }
+
+ return buf.String()
+}
+
+// workspaceLayoutError returns an error describing a misconfiguration of the
+// workspace, along with related diagnostic.
+//
+// The unusual argument ordering of results is intentional: if the resulting
+// error is nil, so must be the resulting diagnostics.
+//
+// If ctx is cancelled, it may return ctx.Err(), nil.
+//
+// TODO(rfindley): separate workspace diagnostics from critical workspace
+// errors.
+func (s *snapshot) workspaceLayoutError(ctx context.Context) (error, []*source.Diagnostic) {
+ // TODO(rfindley): both of the checks below should be delegated to the workspace.
+
+ if s.view.effectiveGO111MODULE() == off {
+ return nil, nil
+ }
+
+ // If the user is using a go.work file, we assume that they know what they
+ // are doing.
+ //
+ // TODO(golang/go#53880): improve orphaned file diagnostics when using go.work.
+ if s.view.gowork != "" {
+ return nil, nil
+ }
+
+ // Apply diagnostics about the workspace configuration to relevant open
+ // files.
+ openFiles := s.openFiles()
+
+ // If the snapshot does not have a valid build configuration, it may be
+ // that the user has opened a directory that contains multiple modules.
+ // Check for that an warn about it.
+ if !s.ValidBuildConfiguration() {
+ var msg string
+ if s.view.goversion >= 18 {
+ msg = `gopls was not able to find modules in your workspace.
+When outside of GOPATH, gopls needs to know which modules you are working on.
+You can fix this by opening your workspace to a folder inside a Go module, or
+by using a go.work file to specify multiple modules.
+See the documentation for more information on setting up your workspace:
+https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`
+ } else {
+ msg = `gopls requires a module at the root of your workspace.
+You can work with multiple modules by upgrading to Go 1.18 or later, and using
+go workspaces (go.work files).
+See the documentation for more information on setting up your workspace:
+https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`
+ }
+ return fmt.Errorf(msg), s.applyCriticalErrorToFiles(ctx, msg, openFiles)
+ }
+
+ // If the user has one active go.mod file, they may still be editing files
+ // in nested modules. Check the module of each open file and add warnings
+ // that the nested module must be opened as a workspace folder.
+ if len(s.workspaceModFiles) == 1 {
+ // Get the active root go.mod file to compare against.
+ var rootMod string
+ for uri := range s.workspaceModFiles {
+ rootMod = uri.Filename()
+ }
+ rootDir := filepath.Dir(rootMod)
+ nestedModules := make(map[string][]source.FileHandle)
+ for _, fh := range openFiles {
+ mod, err := findRootPattern(ctx, filepath.Dir(fh.URI().Filename()), "go.mod", s)
+ if err != nil {
+ if ctx.Err() != nil {
+ return ctx.Err(), nil
+ }
+ continue
+ }
+ if mod == "" {
+ continue
+ }
+ if mod != rootMod && source.InDir(rootDir, mod) {
+ modDir := filepath.Dir(mod)
+ nestedModules[modDir] = append(nestedModules[modDir], fh)
+ }
+ }
+ var multiModuleMsg string
+ if s.view.goversion >= 18 {
+ multiModuleMsg = `To work on multiple modules at once, please use a go.work file.
+See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more information on using workspaces.`
+ } else {
+ multiModuleMsg = `To work on multiple modules at once, please upgrade to Go 1.18 and use a go.work file.
+See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more information on using workspaces.`
+ }
+ // Add a diagnostic to each file in a nested module to mark it as
+ // "orphaned". Don't show a general diagnostic in the progress bar,
+ // because the user may still want to edit a file in a nested module.
+ var srcDiags []*source.Diagnostic
+ for modDir, uris := range nestedModules {
+ msg := fmt.Sprintf("This file is in %s, which is a nested module in the %s module.\n%s", modDir, rootMod, multiModuleMsg)
+ srcDiags = append(srcDiags, s.applyCriticalErrorToFiles(ctx, msg, uris)...)
+ }
+ if len(srcDiags) != 0 {
+ return fmt.Errorf("You have opened a nested module.\n%s", multiModuleMsg), srcDiags
+ }
+ }
+ return nil, nil
+}
+
+func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []source.FileHandle) []*source.Diagnostic {
+ var srcDiags []*source.Diagnostic
+ for _, fh := range files {
+ // Place the diagnostics on the package or module declarations.
+ var rng protocol.Range
+ switch s.view.FileKind(fh) {
+ case source.Go:
+ if pgf, err := s.ParseGo(ctx, fh, source.ParseHeader); err == nil {
+ // Check that we have a valid `package foo` range to use for positioning the error.
+ if pgf.File.Package.IsValid() && pgf.File.Name != nil && pgf.File.Name.End().IsValid() {
+ rng, _ = pgf.PosRange(pgf.File.Package, pgf.File.Name.End())
+ }
+ }
+ case source.Mod:
+ if pmf, err := s.ParseMod(ctx, fh); err == nil {
+ if mod := pmf.File.Module; mod != nil && mod.Syntax != nil {
+ rng, _ = pmf.Mapper.OffsetRange(mod.Syntax.Start.Byte, mod.Syntax.End.Byte)
+ }
+ }
+ }
+ srcDiags = append(srcDiags, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: msg,
+ })
+ }
+ return srcDiags
+}
+
+// buildMetadata populates the updates map with metadata updates to
+// apply, based on the given pkg. It recurs through pkg.Imports to ensure that
+// metadata exists for all dependencies.
+func buildMetadata(ctx context.Context, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*source.Metadata, path []PackageID) error {
+ // Allow for multiple ad-hoc packages in the workspace (see #47584).
+ pkgPath := PackagePath(pkg.PkgPath)
+ id := PackageID(pkg.ID)
+ if source.IsCommandLineArguments(id) {
+ suffix := ":" + strings.Join(query, ",")
+ id = PackageID(pkg.ID + suffix)
+ pkgPath = PackagePath(pkg.PkgPath + suffix)
+ }
+
+ if _, ok := updates[id]; ok {
+ // If we've already seen this dependency, there may be an import cycle, or
+ // we may have reached the same package transitively via distinct paths.
+ // Check the path to confirm.
+
+ // TODO(rfindley): this doesn't look sufficient. Any single piece of new
+ // metadata could theoretically introduce import cycles in the metadata
+ // graph. What's the point of this limited check here (and is it even
+ // possible to get an import cycle in data from go/packages)? Consider
+ // simply returning, so that this function need not return an error.
+ //
+ // We should consider doing a more complete guard against import cycles
+ // elsewhere.
+ for _, prev := range path {
+ if prev == id {
+ return fmt.Errorf("import cycle detected: %q", id)
+ }
+ }
+ return nil
+ }
+
+ // Recreate the metadata rather than reusing it to avoid locking.
+ m := &source.Metadata{
+ ID: id,
+ PkgPath: pkgPath,
+ Name: PackageName(pkg.Name),
+ ForTest: PackagePath(packagesinternal.GetForTest(pkg)),
+ TypesSizes: pkg.TypesSizes,
+ LoadDir: cfg.Dir,
+ Module: pkg.Module,
+ Errors: pkg.Errors,
+ DepsErrors: packagesinternal.GetDepsErrors(pkg),
+ }
+
+ updates[id] = m
+
+ for _, filename := range pkg.CompiledGoFiles {
+ uri := span.URIFromPath(filename)
+ m.CompiledGoFiles = append(m.CompiledGoFiles, uri)
+ }
+ for _, filename := range pkg.GoFiles {
+ uri := span.URIFromPath(filename)
+ m.GoFiles = append(m.GoFiles, uri)
+ }
+
+ depsByImpPath := make(map[ImportPath]PackageID)
+ depsByPkgPath := make(map[PackagePath]PackageID)
+ for importPath, imported := range pkg.Imports {
+ importPath := ImportPath(importPath)
+
+ // It is not an invariant that importPath == imported.PkgPath.
+ // For example, package "net" imports "golang.org/x/net/dns/dnsmessage"
+ // which refers to the package whose ID and PkgPath are both
+ // "vendor/golang.org/x/net/dns/dnsmessage". Notice the ImportMap,
+ // which maps ImportPaths to PackagePaths:
+ //
+ // $ go list -json net vendor/golang.org/x/net/dns/dnsmessage
+ // {
+ // "ImportPath": "net",
+ // "Name": "net",
+ // "Imports": [
+ // "C",
+ // "vendor/golang.org/x/net/dns/dnsmessage",
+ // "vendor/golang.org/x/net/route",
+ // ...
+ // ],
+ // "ImportMap": {
+ // "golang.org/x/net/dns/dnsmessage": "vendor/golang.org/x/net/dns/dnsmessage",
+ // "golang.org/x/net/route": "vendor/golang.org/x/net/route"
+ // },
+ // ...
+ // }
+ // {
+ // "ImportPath": "vendor/golang.org/x/net/dns/dnsmessage",
+ // "Name": "dnsmessage",
+ // ...
+ // }
+ //
+ // (Beware that, for historical reasons, go list uses
+ // the JSON field "ImportPath" for the package's
+ // path--effectively the linker symbol prefix.)
+ //
+ // The example above is slightly special to go list
+ // because it's in the std module. Otherwise,
+ // vendored modules are simply modules whose directory
+ // is vendor/ instead of GOMODCACHE, and the
+ // import path equals the package path.
+ //
+ // But in GOPATH (non-module) mode, it's possible for
+ // package vendoring to cause a non-identity ImportMap,
+ // as in this example:
+ //
+ // $ cd $HOME/src
+ // $ find . -type f
+ // ./b/b.go
+ // ./vendor/example.com/a/a.go
+ // $ cat ./b/b.go
+ // package b
+ // import _ "example.com/a"
+ // $ cat ./vendor/example.com/a/a.go
+ // package a
+ // $ GOPATH=$HOME GO111MODULE=off go list -json ./b | grep -A2 ImportMap
+ // "ImportMap": {
+ // "example.com/a": "vendor/example.com/a"
+ // },
+
+ // Don't remember any imports with significant errors.
+ //
+ // The len=0 condition is a heuristic check for imports of
+ // non-existent packages (for which go/packages will create
+ // an edge to a synthesized node). The heuristic is unsound
+ // because some valid packages have zero files, for example,
+ // a directory containing only the file p_test.go defines an
+ // empty package p.
+ // TODO(adonovan): clarify this. Perhaps go/packages should
+ // report which nodes were synthesized.
+ if importPath != "unsafe" && len(imported.CompiledGoFiles) == 0 {
+ depsByImpPath[importPath] = "" // missing
+ continue
+ }
+
+ depsByImpPath[importPath] = PackageID(imported.ID)
+ depsByPkgPath[PackagePath(imported.PkgPath)] = PackageID(imported.ID)
+ if err := buildMetadata(ctx, imported, cfg, query, updates, append(path, id)); err != nil {
+ event.Error(ctx, "error in dependency", err)
+ }
+ }
+ m.DepsByImpPath = depsByImpPath
+ m.DepsByPkgPath = depsByPkgPath
+
+ // m.Diagnostics is set later in the loading pass, using
+ // computeLoadDiagnostics.
+
+ return nil
+}
+
+// computeLoadDiagnostics computes and sets m.Diagnostics for the given metadata m.
+//
+// It should only be called during metadata construction in snapshot.load.
+func computeLoadDiagnostics(ctx context.Context, m *source.Metadata, meta *metadataGraph, fs source.FileSource, workspacePackages map[PackageID]PackagePath) error {
+ for _, packagesErr := range m.Errors {
+ // Filter out parse errors from go list. We'll get them when we
+ // actually parse, and buggy overlay support may generate spurious
+ // errors. (See TestNewModule_Issue38207.)
+ if strings.Contains(packagesErr.Msg, "expected '") {
+ continue
+ }
+ pkgDiags, err := goPackagesErrorDiagnostics(ctx, packagesErr, m, fs)
+ if err != nil {
+ // There are certain cases where the go command returns invalid
+ // positions, so we cannot panic or even bug.Reportf here.
+ event.Error(ctx, "unable to compute positions for list errors", err, tag.Package.Of(string(m.ID)))
+ continue
+ }
+ m.Diagnostics = append(m.Diagnostics, pkgDiags...)
+ }
+
+ // TODO(rfindley): this is buggy: an insignificant change to a modfile
+ // (or an unsaved modfile) could affect the position of deps errors,
+ // without invalidating the package.
+ depsDiags, err := depsErrors(ctx, m, meta, fs, workspacePackages)
+ if err != nil {
+ if ctx.Err() == nil {
+ // TODO(rfindley): consider making this a bug.Reportf. depsErrors should
+ // not normally fail.
+ event.Error(ctx, "unable to compute deps errors", err, tag.Package.Of(string(m.ID)))
+ }
+ return nil
+ }
+ m.Diagnostics = append(m.Diagnostics, depsDiags...)
+ return nil
+}
+
+// containsPackageLocked reports whether p is a workspace package for the
+// snapshot s.
+//
+// s.mu must be held while calling this function.
+func containsPackageLocked(s *snapshot, m *source.Metadata) bool {
+ // In legacy workspace mode, or if a package does not have an associated
+ // module, a package is considered inside the workspace if any of its files
+ // are under the workspace root (and not excluded).
+ //
+ // Otherwise if the package has a module it must be an active module (as
+ // defined by the module root or go.work file) and at least one file must not
+ // be filtered out by directoryFilters.
+ //
+ // TODO(rfindley): revisit this function. We should not need to predicate on
+ // gowork != "". It should suffice to consider workspace mod files (also, we
+ // will hopefully eliminate the concept of a workspace package soon).
+ if m.Module != nil && s.view.gowork != "" {
+ modURI := span.URIFromPath(m.Module.GoMod)
+ _, ok := s.workspaceModFiles[modURI]
+ if !ok {
+ return false
+ }
+
+ uris := map[span.URI]struct{}{}
+ for _, uri := range m.CompiledGoFiles {
+ uris[uri] = struct{}{}
+ }
+ for _, uri := range m.GoFiles {
+ uris[uri] = struct{}{}
+ }
+
+ filterFunc := s.view.filterFunc()
+ for uri := range uris {
+ // Don't use view.contains here. go.work files may include modules
+ // outside of the workspace folder.
+ if !strings.Contains(string(uri), "/vendor/") && !filterFunc(uri) {
+ return true
+ }
+ }
+ return false
+ }
+
+ return containsFileInWorkspaceLocked(s, m)
+}
+
+// containsOpenFileLocked reports whether any file referenced by m is open in
+// the snapshot s.
+//
+// s.mu must be held while calling this function.
+func containsOpenFileLocked(s *snapshot, m *source.Metadata) bool {
+ uris := map[span.URI]struct{}{}
+ for _, uri := range m.CompiledGoFiles {
+ uris[uri] = struct{}{}
+ }
+ for _, uri := range m.GoFiles {
+ uris[uri] = struct{}{}
+ }
+
+ for uri := range uris {
+ if s.isOpenLocked(uri) {
+ return true
+ }
+ }
+ return false
+}
+
+// containsFileInWorkspaceLocked reports whether m contains any file inside the
+// workspace of the snapshot s.
+//
+// s.mu must be held while calling this function.
+func containsFileInWorkspaceLocked(s *snapshot, m *source.Metadata) bool {
+ uris := map[span.URI]struct{}{}
+ for _, uri := range m.CompiledGoFiles {
+ uris[uri] = struct{}{}
+ }
+ for _, uri := range m.GoFiles {
+ uris[uri] = struct{}{}
+ }
+
+ for uri := range uris {
+ // In order for a package to be considered for the workspace, at least one
+ // file must be contained in the workspace and not vendored.
+
+ // The package's files are in this view. It may be a workspace package.
+ // Vendored packages are not likely to be interesting to the user.
+ if !strings.Contains(string(uri), "/vendor/") && s.view.contains(uri) {
+ return true
+ }
+ }
+ return false
+}
+
+// computeWorkspacePackagesLocked computes workspace packages in the snapshot s
+// for the given metadata graph.
+//
+// s.mu must be held while calling this function.
+func computeWorkspacePackagesLocked(s *snapshot, meta *metadataGraph) map[PackageID]PackagePath {
+ workspacePackages := make(map[PackageID]PackagePath)
+ for _, m := range meta.metadata {
+ if !containsPackageLocked(s, m) {
+ continue
+ }
+
+ if source.IsCommandLineArguments(m.ID) {
+ // If all the files contained in m have a real package, we don't need to
+ // keep m as a workspace package.
+ if allFilesHaveRealPackages(meta, m) {
+ continue
+ }
+
+ // We only care about command-line-arguments packages if they are still
+ // open.
+ if !containsOpenFileLocked(s, m) {
+ continue
+ }
+ }
+
+ switch {
+ case m.ForTest == "":
+ // A normal package.
+ workspacePackages[m.ID] = m.PkgPath
+ case m.ForTest == m.PkgPath, m.ForTest+"_test" == m.PkgPath:
+ // The test variant of some workspace package or its x_test.
+ // To load it, we need to load the non-test variant with -test.
+ //
+ // Notably, this excludes intermediate test variants from workspace
+ // packages.
+ workspacePackages[m.ID] = m.ForTest
+ }
+ }
+ return workspacePackages
+}
+
+// allFilesHaveRealPackages reports whether all files referenced by m are
+// contained in a "real" package (not command-line-arguments).
+//
+// If m is valid but all "real" packages containing any file are invalid, this
+// function returns false.
+//
+// If m is not a command-line-arguments package, this is trivially true.
+func allFilesHaveRealPackages(g *metadataGraph, m *source.Metadata) bool {
+ n := len(m.CompiledGoFiles)
+checkURIs:
+ for _, uri := range append(m.CompiledGoFiles[0:n:n], m.GoFiles...) {
+ for _, id := range g.ids[uri] {
+ if !source.IsCommandLineArguments(id) {
+ continue checkURIs
+ }
+ }
+ return false
+ }
+ return true
+}
+
+func isTestMain(pkg *packages.Package, gocache string) bool {
+ // Test mains must have an import path that ends with ".test".
+ if !strings.HasSuffix(pkg.PkgPath, ".test") {
+ return false
+ }
+ // Test main packages are always named "main".
+ if pkg.Name != "main" {
+ return false
+ }
+ // Test mains always have exactly one GoFile that is in the build cache.
+ if len(pkg.GoFiles) > 1 {
+ return false
+ }
+ if !source.InDir(gocache, pkg.GoFiles[0]) {
+ return false
+ }
+ return true
+}
diff --git a/gopls/internal/lsp/cache/maps.go b/gopls/internal/lsp/cache/maps.go
new file mode 100644
index 000000000..0ad4ac90f
--- /dev/null
+++ b/gopls/internal/lsp/cache/maps.go
@@ -0,0 +1,121 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/persistent"
+)
+
+// TODO(euroelessar): Use generics once support for go1.17 is dropped.
+
+type filesMap struct {
+ impl *persistent.Map
+}
+
+// uriLessInterface is the < relation for "any" values containing span.URIs.
+func uriLessInterface(a, b interface{}) bool {
+ return a.(span.URI) < b.(span.URI)
+}
+
+func newFilesMap() filesMap {
+ return filesMap{
+ impl: persistent.NewMap(uriLessInterface),
+ }
+}
+
+func (m filesMap) Clone() filesMap {
+ return filesMap{
+ impl: m.impl.Clone(),
+ }
+}
+
+func (m filesMap) Destroy() {
+ m.impl.Destroy()
+}
+
+func (m filesMap) Get(key span.URI) (source.FileHandle, bool) {
+ value, ok := m.impl.Get(key)
+ if !ok {
+ return nil, false
+ }
+ return value.(source.FileHandle), true
+}
+
+func (m filesMap) Range(do func(key span.URI, value source.FileHandle)) {
+ m.impl.Range(func(key, value interface{}) {
+ do(key.(span.URI), value.(source.FileHandle))
+ })
+}
+
+func (m filesMap) Set(key span.URI, value source.FileHandle) {
+ m.impl.Set(key, value, nil)
+}
+
+func (m filesMap) Delete(key span.URI) {
+ m.impl.Delete(key)
+}
+
+func packageIDLessInterface(x, y interface{}) bool {
+ return x.(PackageID) < y.(PackageID)
+}
+
+type knownDirsSet struct {
+ impl *persistent.Map
+}
+
+func newKnownDirsSet() knownDirsSet {
+ return knownDirsSet{
+ impl: persistent.NewMap(func(a, b interface{}) bool {
+ return a.(span.URI) < b.(span.URI)
+ }),
+ }
+}
+
+func (s knownDirsSet) Clone() knownDirsSet {
+ return knownDirsSet{
+ impl: s.impl.Clone(),
+ }
+}
+
+func (s knownDirsSet) Destroy() {
+ s.impl.Destroy()
+}
+
+func (s knownDirsSet) Contains(key span.URI) bool {
+ _, ok := s.impl.Get(key)
+ return ok
+}
+
+func (s knownDirsSet) Range(do func(key span.URI)) {
+ s.impl.Range(func(key, value interface{}) {
+ do(key.(span.URI))
+ })
+}
+
+func (s knownDirsSet) SetAll(other knownDirsSet) {
+ s.impl.SetAll(other.impl)
+}
+
+func (s knownDirsSet) Insert(key span.URI) {
+ s.impl.Set(key, nil, nil)
+}
+
+func (s knownDirsSet) Remove(key span.URI) {
+ s.impl.Delete(key)
+}
+
+// analysisKeyLessInterface is the less-than relation for analysisKey
+// values wrapped in an interface.
+func analysisKeyLessInterface(a, b interface{}) bool {
+ x, y := a.(analysisKey), b.(analysisKey)
+ if cmp := strings.Compare(x.analyzerNames, y.analyzerNames); cmp != 0 {
+ return cmp < 0
+ }
+ return x.pkgid < y.pkgid
+}
diff --git a/gopls/internal/lsp/cache/mod.go b/gopls/internal/lsp/cache/mod.go
new file mode 100644
index 000000000..4a3d8db1b
--- /dev/null
+++ b/gopls/internal/lsp/cache/mod.go
@@ -0,0 +1,522 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/module"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/memoize"
+)
+
+// ParseMod parses a go.mod file, using a cache. It may return partial results and an error.
+func (s *snapshot) ParseMod(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) {
+ uri := fh.URI()
+
+ s.mu.Lock()
+ entry, hit := s.parseModHandles.Get(uri)
+ s.mu.Unlock()
+
+ type parseModKey source.FileIdentity
+ type parseModResult struct {
+ parsed *source.ParsedModule
+ err error
+ }
+
+ // cache miss?
+ if !hit {
+ promise, release := s.store.Promise(parseModKey(fh.FileIdentity()), func(ctx context.Context, _ interface{}) interface{} {
+ parsed, err := parseModImpl(ctx, fh)
+ return parseModResult{parsed, err}
+ })
+
+ entry = promise
+ s.mu.Lock()
+ s.parseModHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+ if err != nil {
+ return nil, err
+ }
+ res := v.(parseModResult)
+ return res.parsed, res.err
+}
+
+// parseModImpl parses the go.mod file whose name and contents are in fh.
+// It may return partial results and an error.
+func parseModImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) {
+ _, done := event.Start(ctx, "cache.ParseMod", tag.URI.Of(fh.URI()))
+ defer done()
+
+ contents, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ m := protocol.NewMapper(fh.URI(), contents)
+ file, parseErr := modfile.Parse(fh.URI().Filename(), contents, nil)
+ // Attempt to convert the error to a standardized parse error.
+ var parseErrors []*source.Diagnostic
+ if parseErr != nil {
+ mfErrList, ok := parseErr.(modfile.ErrorList)
+ if !ok {
+ return nil, fmt.Errorf("unexpected parse error type %v", parseErr)
+ }
+ for _, mfErr := range mfErrList {
+ rng, err := m.OffsetRange(mfErr.Pos.Byte, mfErr.Pos.Byte)
+ if err != nil {
+ return nil, err
+ }
+ parseErrors = append(parseErrors, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ParseError,
+ Message: mfErr.Err.Error(),
+ })
+ }
+ }
+ return &source.ParsedModule{
+ URI: fh.URI(),
+ Mapper: m,
+ File: file,
+ ParseErrors: parseErrors,
+ }, parseErr
+}
+
+// ParseWork parses a go.work file, using a cache. It may return partial results and an error.
+// TODO(adonovan): move to new work.go file.
+func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) {
+ uri := fh.URI()
+
+ s.mu.Lock()
+ entry, hit := s.parseWorkHandles.Get(uri)
+ s.mu.Unlock()
+
+ type parseWorkKey source.FileIdentity
+ type parseWorkResult struct {
+ parsed *source.ParsedWorkFile
+ err error
+ }
+
+ // cache miss?
+ if !hit {
+ handle, release := s.store.Promise(parseWorkKey(fh.FileIdentity()), func(ctx context.Context, _ interface{}) interface{} {
+ parsed, err := parseWorkImpl(ctx, fh)
+ return parseWorkResult{parsed, err}
+ })
+
+ entry = handle
+ s.mu.Lock()
+ s.parseWorkHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+ if err != nil {
+ return nil, err
+ }
+ res := v.(parseWorkResult)
+ return res.parsed, res.err
+}
+
+// parseWorkImpl parses a go.work file. It may return partial results and an error.
+func parseWorkImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) {
+ _, done := event.Start(ctx, "cache.ParseWork", tag.URI.Of(fh.URI()))
+ defer done()
+
+ contents, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ m := protocol.NewMapper(fh.URI(), contents)
+ file, parseErr := modfile.ParseWork(fh.URI().Filename(), contents, nil)
+ // Attempt to convert the error to a standardized parse error.
+ var parseErrors []*source.Diagnostic
+ if parseErr != nil {
+ mfErrList, ok := parseErr.(modfile.ErrorList)
+ if !ok {
+ return nil, fmt.Errorf("unexpected parse error type %v", parseErr)
+ }
+ for _, mfErr := range mfErrList {
+ rng, err := m.OffsetRange(mfErr.Pos.Byte, mfErr.Pos.Byte)
+ if err != nil {
+ return nil, err
+ }
+ parseErrors = append(parseErrors, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ParseError,
+ Message: mfErr.Err.Error(),
+ })
+ }
+ }
+ return &source.ParsedWorkFile{
+ URI: fh.URI(),
+ Mapper: m,
+ File: file,
+ ParseErrors: parseErrors,
+ }, parseErr
+}
+
+// goSum reads the go.sum file for the go.mod file at modURI, if it exists. If
+// it doesn't exist, it returns nil.
+func (s *snapshot) goSum(ctx context.Context, modURI span.URI) []byte {
+ // Get the go.sum file, either from the snapshot or directly from the
+ // cache. Avoid (*snapshot).GetFile here, as we don't want to add
+ // nonexistent file handles to the snapshot if the file does not exist.
+ //
+ // TODO(rfindley): but that's not right. Changes to sum files should
+ // invalidate content, even if it's nonexistent content.
+ sumURI := span.URIFromPath(sumFilename(modURI))
+ var sumFH source.FileHandle = s.FindFile(sumURI)
+ if sumFH == nil {
+ var err error
+ sumFH, err = s.view.fs.GetFile(ctx, sumURI)
+ if err != nil {
+ return nil
+ }
+ }
+ content, err := sumFH.Read()
+ if err != nil {
+ return nil
+ }
+ return content
+}
+
+func sumFilename(modURI span.URI) string {
+ return strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum"
+}
+
+// ModWhy returns the "go mod why" result for each module named in a
+// require statement in the go.mod file.
+// TODO(adonovan): move to new mod_why.go file.
+func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) {
+ uri := fh.URI()
+
+ if s.View().FileKind(fh) != source.Mod {
+ return nil, fmt.Errorf("%s is not a go.mod file", uri)
+ }
+
+ s.mu.Lock()
+ entry, hit := s.modWhyHandles.Get(uri)
+ s.mu.Unlock()
+
+ type modWhyResult struct {
+ why map[string]string
+ err error
+ }
+
+ // cache miss?
+ if !hit {
+ handle := memoize.NewPromise("modWhy", func(ctx context.Context, arg interface{}) interface{} {
+ why, err := modWhyImpl(ctx, arg.(*snapshot), fh)
+ return modWhyResult{why, err}
+ })
+
+ entry = handle
+ s.mu.Lock()
+ s.modWhyHandles.Set(uri, entry, nil)
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+ if err != nil {
+ return nil, err
+ }
+ res := v.(modWhyResult)
+ return res.why, res.err
+}
+
+// modWhyImpl returns the result of "go mod why -m" on the specified go.mod file.
+func modWhyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle) (map[string]string, error) {
+ ctx, done := event.Start(ctx, "cache.ModWhy", tag.URI.Of(fh.URI()))
+ defer done()
+
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil {
+ return nil, err
+ }
+ // No requires to explain.
+ if len(pm.File.Require) == 0 {
+ return nil, nil // empty result
+ }
+ // Run `go mod why` on all the dependencies.
+ inv := &gocommand.Invocation{
+ Verb: "mod",
+ Args: []string{"why", "-m"},
+ WorkingDir: filepath.Dir(fh.URI().Filename()),
+ }
+ for _, req := range pm.File.Require {
+ inv.Args = append(inv.Args, req.Mod.Path)
+ }
+ stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv)
+ if err != nil {
+ return nil, err
+ }
+ whyList := strings.Split(stdout.String(), "\n\n")
+ if len(whyList) != len(pm.File.Require) {
+ return nil, fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require))
+ }
+ why := make(map[string]string, len(pm.File.Require))
+ for i, req := range pm.File.Require {
+ why[req.Mod.Path] = whyList[i]
+ }
+ return why, nil
+}
+
+// extractGoCommandErrors tries to parse errors that come from the go command
+// and shape them into go.mod diagnostics.
+// TODO: rename this to 'load errors'
+func (s *snapshot) extractGoCommandErrors(ctx context.Context, goCmdError error) []*source.Diagnostic {
+ if goCmdError == nil {
+ return nil
+ }
+
+ type locatedErr struct {
+ spn span.Span
+ msg string
+ }
+ diagLocations := map[*source.ParsedModule]locatedErr{}
+ backupDiagLocations := map[*source.ParsedModule]locatedErr{}
+
+ // If moduleErrs is non-nil, go command errors are scoped to specific
+ // modules.
+ var moduleErrs *moduleErrorMap
+ _ = errors.As(goCmdError, &moduleErrs)
+
+ // Match the error against all the mod files in the workspace.
+ for _, uri := range s.ModFiles() {
+ fh, err := s.GetFile(ctx, uri)
+ if err != nil {
+ event.Error(ctx, "getting modfile for Go command error", err)
+ continue
+ }
+ pm, err := s.ParseMod(ctx, fh)
+ if err != nil {
+ // Parsing errors are reported elsewhere
+ return nil
+ }
+ var msgs []string // error messages to consider
+ if moduleErrs != nil {
+ if pm.File.Module != nil {
+ for _, mes := range moduleErrs.errs[pm.File.Module.Mod.Path] {
+ msgs = append(msgs, mes.Error())
+ }
+ }
+ } else {
+ msgs = append(msgs, goCmdError.Error())
+ }
+ for _, msg := range msgs {
+ if strings.Contains(goCmdError.Error(), "errors parsing go.mod") {
+ // The go command emits parse errors for completely invalid go.mod files.
+ // Those are reported by our own diagnostics and can be ignored here.
+ // As of writing, we are not aware of any other errors that include
+ // file/position information, so don't even try to find it.
+ continue
+ }
+ spn, found, err := s.matchErrorToModule(ctx, pm, msg)
+ if err != nil {
+ event.Error(ctx, "matching error to module", err)
+ continue
+ }
+ le := locatedErr{
+ spn: spn,
+ msg: msg,
+ }
+ if found {
+ diagLocations[pm] = le
+ } else {
+ backupDiagLocations[pm] = le
+ }
+ }
+ }
+
+ // If we didn't find any good matches, assign diagnostics to all go.mod files.
+ if len(diagLocations) == 0 {
+ diagLocations = backupDiagLocations
+ }
+
+ var srcErrs []*source.Diagnostic
+ for pm, le := range diagLocations {
+ diag, err := s.goCommandDiagnostic(pm, le.spn, le.msg)
+ if err != nil {
+ event.Error(ctx, "building go command diagnostic", err)
+ continue
+ }
+ srcErrs = append(srcErrs, diag)
+ }
+ return srcErrs
+}
+
+var moduleVersionInErrorRe = regexp.MustCompile(`[:\s]([+-._~0-9A-Za-z]+)@([+-._~0-9A-Za-z]+)[:\s]`)
+
+// matchErrorToModule matches a go command error message to a go.mod file.
+// Some examples:
+//
+// example.com@v1.2.2: reading example.com/@v/v1.2.2.mod: no such file or directory
+// go: github.com/cockroachdb/apd/v2@v2.0.72: reading github.com/cockroachdb/apd/go.mod at revision v2.0.72: unknown revision v2.0.72
+// go: example.com@v1.2.3 requires\n\trandom.org@v1.2.3: parsing go.mod:\n\tmodule declares its path as: bob.org\n\tbut was required as: random.org
+//
+// It returns the location of a reference to the one of the modules and true
+// if one exists. If none is found it returns a fallback location and false.
+func (s *snapshot) matchErrorToModule(ctx context.Context, pm *source.ParsedModule, goCmdError string) (span.Span, bool, error) {
+ var reference *modfile.Line
+ matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1)
+
+ for i := len(matches) - 1; i >= 0; i-- {
+ ver := module.Version{Path: matches[i][1], Version: matches[i][2]}
+ if err := module.Check(ver.Path, ver.Version); err != nil {
+ continue
+ }
+ reference = findModuleReference(pm.File, ver)
+ if reference != nil {
+ break
+ }
+ }
+
+ if reference == nil {
+ // No match for the module path was found in the go.mod file.
+ // Show the error on the module declaration, if one exists, or
+ // just the first line of the file.
+ if pm.File.Module == nil {
+ return span.New(pm.URI, span.NewPoint(1, 1, 0), span.Point{}), false, nil
+ }
+ syntax := pm.File.Module.Syntax
+ spn, err := pm.Mapper.OffsetSpan(syntax.Start.Byte, syntax.End.Byte)
+ return spn, false, err
+ }
+
+ spn, err := pm.Mapper.OffsetSpan(reference.Start.Byte, reference.End.Byte)
+ return spn, true, err
+}
+
+// goCommandDiagnostic creates a diagnostic for a given go command error.
+func (s *snapshot) goCommandDiagnostic(pm *source.ParsedModule, spn span.Span, goCmdError string) (*source.Diagnostic, error) {
+ rng, err := pm.Mapper.SpanRange(spn)
+ if err != nil {
+ return nil, err
+ }
+
+ matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1)
+ var innermost *module.Version
+ for i := len(matches) - 1; i >= 0; i-- {
+ ver := module.Version{Path: matches[i][1], Version: matches[i][2]}
+ if err := module.Check(ver.Path, ver.Version); err != nil {
+ continue
+ }
+ innermost = &ver
+ break
+ }
+
+ switch {
+ case strings.Contains(goCmdError, "inconsistent vendoring"):
+ cmd, err := command.NewVendorCommand("Run go mod vendor", command.URIArg{URI: protocol.URIFromSpanURI(pm.URI)})
+ if err != nil {
+ return nil, err
+ }
+ return &source.Diagnostic{
+ URI: pm.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: `Inconsistent vendoring detected. Please re-run "go mod vendor".
+See https://github.com/golang/go/issues/39164 for more detail on this issue.`,
+ SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+ }, nil
+
+ case strings.Contains(goCmdError, "updates to go.sum needed"), strings.Contains(goCmdError, "missing go.sum entry"):
+ var args []protocol.DocumentURI
+ for _, uri := range s.ModFiles() {
+ args = append(args, protocol.URIFromSpanURI(uri))
+ }
+ tidyCmd, err := command.NewTidyCommand("Run go mod tidy", command.URIArgs{URIs: args})
+ if err != nil {
+ return nil, err
+ }
+ updateCmd, err := command.NewUpdateGoSumCommand("Update go.sum", command.URIArgs{URIs: args})
+ if err != nil {
+ return nil, err
+ }
+ msg := "go.sum is out of sync with go.mod. Please update it by applying the quick fix."
+ if innermost != nil {
+ msg = fmt.Sprintf("go.sum is out of sync with go.mod: entry for %v is missing. Please updating it by applying the quick fix.", innermost)
+ }
+ return &source.Diagnostic{
+ URI: pm.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: msg,
+ SuggestedFixes: []source.SuggestedFix{
+ source.SuggestedFixFromCommand(tidyCmd, protocol.QuickFix),
+ source.SuggestedFixFromCommand(updateCmd, protocol.QuickFix),
+ },
+ }, nil
+ case strings.Contains(goCmdError, "disabled by GOPROXY=off") && innermost != nil:
+ title := fmt.Sprintf("Download %v@%v", innermost.Path, innermost.Version)
+ cmd, err := command.NewAddDependencyCommand(title, command.DependencyArgs{
+ URI: protocol.URIFromSpanURI(pm.URI),
+ AddRequire: false,
+ GoCmdArgs: []string{fmt.Sprintf("%v@%v", innermost.Path, innermost.Version)},
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &source.Diagnostic{
+ URI: pm.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Message: fmt.Sprintf("%v@%v has not been downloaded", innermost.Path, innermost.Version),
+ Source: source.ListError,
+ SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+ }, nil
+ default:
+ return &source.Diagnostic{
+ URI: pm.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: goCmdError,
+ }, nil
+ }
+}
+
+func findModuleReference(mf *modfile.File, ver module.Version) *modfile.Line {
+ for _, req := range mf.Require {
+ if req.Mod == ver {
+ return req.Syntax
+ }
+ }
+ for _, ex := range mf.Exclude {
+ if ex.Mod == ver {
+ return ex.Syntax
+ }
+ }
+ for _, rep := range mf.Replace {
+ if rep.New == ver || rep.Old == ver {
+ return rep.Syntax
+ }
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cache/mod_tidy.go b/gopls/internal/lsp/cache/mod_tidy.go
new file mode 100644
index 000000000..0572e9d5c
--- /dev/null
+++ b/gopls/internal/lsp/cache/mod_tidy.go
@@ -0,0 +1,469 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/memoize"
+)
+
+// ModTidy returns the go.mod file that would be obtained by running
+// "go mod tidy". Concurrent requests are combined into a single command.
+func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) {
+ uri := pm.URI
+ if pm.File == nil {
+ return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", uri)
+ }
+
+ s.mu.Lock()
+ entry, hit := s.modTidyHandles.Get(uri)
+ s.mu.Unlock()
+
+ type modTidyResult struct {
+ tidied *source.TidiedModule
+ err error
+ }
+
+ // Cache miss?
+ if !hit {
+ // If the file handle is an overlay, it may not be written to disk.
+ // The go.mod file has to be on disk for `go mod tidy` to work.
+ // TODO(rfindley): is this still true with Go 1.16 overlay support?
+ fh, err := s.GetFile(ctx, pm.URI)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := fh.(*Overlay); ok {
+ if info, _ := os.Stat(uri.Filename()); info == nil {
+ return nil, source.ErrNoModOnDisk
+ }
+ }
+
+ if criticalErr := s.GetCriticalError(ctx); criticalErr != nil {
+ return &source.TidiedModule{
+ Diagnostics: criticalErr.Diagnostics,
+ }, nil
+ }
+ if ctx.Err() != nil { // must check ctx after GetCriticalError
+ return nil, ctx.Err()
+ }
+
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
+ }
+
+ handle := memoize.NewPromise("modTidy", func(ctx context.Context, arg interface{}) interface{} {
+ tidied, err := modTidyImpl(ctx, arg.(*snapshot), uri.Filename(), pm)
+ return modTidyResult{tidied, err}
+ })
+
+ entry = handle
+ s.mu.Lock()
+ s.modTidyHandles.Set(uri, entry, nil)
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+ if err != nil {
+ return nil, err
+ }
+ res := v.(modTidyResult)
+ return res.tidied, res.err
+}
+
+// modTidyImpl runs "go mod tidy" on a go.mod file.
+func modTidyImpl(ctx context.Context, snapshot *snapshot, filename string, pm *source.ParsedModule) (*source.TidiedModule, error) {
+ ctx, done := event.Start(ctx, "cache.ModTidy", tag.URI.Of(filename))
+ defer done()
+
+ inv := &gocommand.Invocation{
+ Verb: "mod",
+ Args: []string{"tidy"},
+ WorkingDir: filepath.Dir(filename),
+ }
+ // TODO(adonovan): ensure that unsaved overlays are passed through to 'go'.
+ tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv)
+ if err != nil {
+ return nil, err
+ }
+ // Keep the temporary go.mod file around long enough to parse it.
+ defer cleanup()
+
+ if _, err := snapshot.view.gocmdRunner.Run(ctx, *inv); err != nil {
+ return nil, err
+ }
+
+ // Go directly to disk to get the temporary mod file,
+ // since it is always on disk.
+ tempContents, err := ioutil.ReadFile(tmpURI.Filename())
+ if err != nil {
+ return nil, err
+ }
+ ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil)
+ if err != nil {
+ // We do not need to worry about the temporary file's parse errors
+ // since it has been "tidied".
+ return nil, err
+ }
+
+ // Compare the original and tidied go.mod files to compute errors and
+ // suggested fixes.
+ diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal)
+ if err != nil {
+ return nil, err
+ }
+
+ return &source.TidiedModule{
+ Diagnostics: diagnostics,
+ TidiedContent: tempContents,
+ }, nil
+}
+
+// modTidyDiagnostics computes the differences between the original and tidied
+// go.mod files to produce diagnostic and suggested fixes. Some diagnostics
+// may appear on the Go files that import packages from missing modules.
+func modTidyDiagnostics(ctx context.Context, snapshot *snapshot, pm *source.ParsedModule, ideal *modfile.File) (diagnostics []*source.Diagnostic, err error) {
+ // First, determine which modules are unused and which are missing from the
+ // original go.mod file.
+ var (
+ unused = make(map[string]*modfile.Require, len(pm.File.Require))
+ missing = make(map[string]*modfile.Require, len(ideal.Require))
+ wrongDirectness = make(map[string]*modfile.Require, len(pm.File.Require))
+ )
+ for _, req := range pm.File.Require {
+ unused[req.Mod.Path] = req
+ }
+ for _, req := range ideal.Require {
+ origReq := unused[req.Mod.Path]
+ if origReq == nil {
+ missing[req.Mod.Path] = req
+ continue
+ } else if origReq.Indirect != req.Indirect {
+ wrongDirectness[req.Mod.Path] = origReq
+ }
+ delete(unused, req.Mod.Path)
+ }
+ for _, req := range wrongDirectness {
+ // Handle dependencies that are incorrectly labeled indirect and
+ // vice versa.
+ srcDiag, err := directnessDiagnostic(pm.Mapper, req, snapshot.View().Options().ComputeEdits)
+ if err != nil {
+ // We're probably in a bad state if we can't compute a
+ // directnessDiagnostic, but try to keep going so as to not suppress
+ // other, valid diagnostics.
+ event.Error(ctx, "computing directness diagnostic", err)
+ continue
+ }
+ diagnostics = append(diagnostics, srcDiag)
+ }
+ // Next, compute any diagnostics for modules that are missing from the
+ // go.mod file. The fixes will be for the go.mod file, but the
+ // diagnostics should also appear in both the go.mod file and the import
+ // statements in the Go files in which the dependencies are used.
+ missingModuleFixes := map[*modfile.Require][]source.SuggestedFix{}
+ for _, req := range missing {
+ srcDiag, err := missingModuleDiagnostic(pm, req)
+ if err != nil {
+ return nil, err
+ }
+ missingModuleFixes[req] = srcDiag.SuggestedFixes
+ diagnostics = append(diagnostics, srcDiag)
+ }
+ // Add diagnostics for missing modules anywhere they are imported in the
+ // workspace.
+ // TODO(adonovan): opt: opportunities for parallelism abound.
+ for _, m := range snapshot.workspaceMetadata() {
+ // Read both lists of files of this package, in parallel.
+ goFiles, compiledGoFiles, err := readGoFiles(ctx, snapshot, m)
+ if err != nil {
+ return nil, err
+ }
+
+ missingImports := map[string]*modfile.Require{}
+
+ // If -mod=readonly is not set we may have successfully imported
+ // packages from missing modules. Otherwise they'll be in
+ // MissingDependencies. Combine both.
+ imps, err := parseImports(ctx, snapshot, goFiles)
+ if err != nil {
+ return nil, err
+ }
+ for imp := range imps {
+ if req, ok := missing[imp]; ok {
+ missingImports[imp] = req
+ break
+ }
+ // If the import is a package of the dependency, then add the
+ // package to the map, this will eliminate the need to do this
+ // prefix package search on each import for each file.
+ // Example:
+ //
+ // import (
+ // "golang.org/x/tools/go/expect"
+ // "golang.org/x/tools/go/packages"
+ // )
+ // They both are related to the same module: "golang.org/x/tools".
+ var match string
+ for _, req := range ideal.Require {
+ if strings.HasPrefix(imp, req.Mod.Path) && len(req.Mod.Path) > len(match) {
+ match = req.Mod.Path
+ }
+ }
+ if req, ok := missing[match]; ok {
+ missingImports[imp] = req
+ }
+ }
+ // None of this package's imports are from missing modules.
+ if len(missingImports) == 0 {
+ continue
+ }
+ for _, goFile := range compiledGoFiles {
+ pgf, err := snapshot.ParseGo(ctx, goFile, source.ParseHeader)
+ if err != nil {
+ continue
+ }
+ file, m := pgf.File, pgf.Mapper
+ if file == nil || m == nil {
+ continue
+ }
+ imports := make(map[string]*ast.ImportSpec)
+ for _, imp := range file.Imports {
+ if imp.Path == nil {
+ continue
+ }
+ if target, err := strconv.Unquote(imp.Path.Value); err == nil {
+ imports[target] = imp
+ }
+ }
+ if len(imports) == 0 {
+ continue
+ }
+ for importPath, req := range missingImports {
+ imp, ok := imports[importPath]
+ if !ok {
+ continue
+ }
+ fixes, ok := missingModuleFixes[req]
+ if !ok {
+ return nil, fmt.Errorf("no missing module fix for %q (%q)", importPath, req.Mod.Path)
+ }
+ srcErr, err := missingModuleForImport(pgf, imp, req, fixes)
+ if err != nil {
+ return nil, err
+ }
+ diagnostics = append(diagnostics, srcErr)
+ }
+ }
+ }
+ // Finally, add errors for any unused dependencies.
+ onlyDiagnostic := len(diagnostics) == 0 && len(unused) == 1
+ for _, req := range unused {
+ srcErr, err := unusedDiagnostic(pm.Mapper, req, onlyDiagnostic)
+ if err != nil {
+ return nil, err
+ }
+ diagnostics = append(diagnostics, srcErr)
+ }
+ return diagnostics, nil
+}
+
+// unusedDiagnostic returns a source.Diagnostic for an unused require.
+func unusedDiagnostic(m *protocol.Mapper, req *modfile.Require, onlyDiagnostic bool) (*source.Diagnostic, error) {
+ rng, err := m.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte)
+ if err != nil {
+ return nil, err
+ }
+ title := fmt.Sprintf("Remove dependency: %s", req.Mod.Path)
+ cmd, err := command.NewRemoveDependencyCommand(title, command.RemoveDependencyArgs{
+ URI: protocol.URIFromSpanURI(m.URI),
+ OnlyDiagnostic: onlyDiagnostic,
+ ModulePath: req.Mod.Path,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &source.Diagnostic{
+ URI: m.URI,
+ Range: rng,
+ Severity: protocol.SeverityWarning,
+ Source: source.ModTidyError,
+ Message: fmt.Sprintf("%s is not used in this module", req.Mod.Path),
+ SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+ }, nil
+}
+
+// directnessDiagnostic extracts errors when a dependency is labeled indirect when
+// it should be direct and vice versa.
+func directnessDiagnostic(m *protocol.Mapper, req *modfile.Require, computeEdits source.DiffFunction) (*source.Diagnostic, error) {
+ rng, err := m.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte)
+ if err != nil {
+ return nil, err
+ }
+ direction := "indirect"
+ if req.Indirect {
+ direction = "direct"
+
+ // If the dependency should be direct, just highlight the // indirect.
+ if comments := req.Syntax.Comment(); comments != nil && len(comments.Suffix) > 0 {
+ end := comments.Suffix[0].Start
+ end.LineRune += len(comments.Suffix[0].Token)
+ end.Byte += len(comments.Suffix[0].Token)
+ rng, err = m.OffsetRange(comments.Suffix[0].Start.Byte, end.Byte)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ // If the dependency should be indirect, add the // indirect.
+ edits, err := switchDirectness(req, m, computeEdits)
+ if err != nil {
+ return nil, err
+ }
+ return &source.Diagnostic{
+ URI: m.URI,
+ Range: rng,
+ Severity: protocol.SeverityWarning,
+ Source: source.ModTidyError,
+ Message: fmt.Sprintf("%s should be %s", req.Mod.Path, direction),
+ SuggestedFixes: []source.SuggestedFix{{
+ Title: fmt.Sprintf("Change %s to %s", req.Mod.Path, direction),
+ Edits: map[span.URI][]protocol.TextEdit{
+ m.URI: edits,
+ },
+ ActionKind: protocol.QuickFix,
+ }},
+ }, nil
+}
+
+func missingModuleDiagnostic(pm *source.ParsedModule, req *modfile.Require) (*source.Diagnostic, error) {
+ var rng protocol.Range
+ // Default to the start of the file if there is no module declaration.
+ if pm.File != nil && pm.File.Module != nil && pm.File.Module.Syntax != nil {
+ start, end := pm.File.Module.Syntax.Span()
+ var err error
+ rng, err = pm.Mapper.OffsetRange(start.Byte, end.Byte)
+ if err != nil {
+ return nil, err
+ }
+ }
+ title := fmt.Sprintf("Add %s to your go.mod file", req.Mod.Path)
+ cmd, err := command.NewAddDependencyCommand(title, command.DependencyArgs{
+ URI: protocol.URIFromSpanURI(pm.Mapper.URI),
+ AddRequire: !req.Indirect,
+ GoCmdArgs: []string{req.Mod.Path + "@" + req.Mod.Version},
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &source.Diagnostic{
+ URI: pm.Mapper.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ModTidyError,
+ Message: fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path),
+ SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+ }, nil
+}
+
+// switchDirectness gets the edits needed to change an indirect dependency to
+// direct and vice versa.
+func switchDirectness(req *modfile.Require, m *protocol.Mapper, computeEdits source.DiffFunction) ([]protocol.TextEdit, error) {
+ // We need a private copy of the parsed go.mod file, since we're going to
+ // modify it.
+ copied, err := modfile.Parse("", m.Content, nil)
+ if err != nil {
+ return nil, err
+ }
+ // Change the directness in the matching require statement. To avoid
+ // reordering the require statements, rewrite all of them.
+ var requires []*modfile.Require
+ seenVersions := make(map[string]string)
+ for _, r := range copied.Require {
+ if seen := seenVersions[r.Mod.Path]; seen != "" && seen != r.Mod.Version {
+ // Avoid a panic in SetRequire below, which panics on conflicting
+ // versions.
+ return nil, fmt.Errorf("%q has conflicting versions: %q and %q", r.Mod.Path, seen, r.Mod.Version)
+ }
+ seenVersions[r.Mod.Path] = r.Mod.Version
+ if r.Mod.Path == req.Mod.Path {
+ requires = append(requires, &modfile.Require{
+ Mod: r.Mod,
+ Syntax: r.Syntax,
+ Indirect: !r.Indirect,
+ })
+ continue
+ }
+ requires = append(requires, r)
+ }
+ copied.SetRequire(requires)
+ newContent, err := copied.Format()
+ if err != nil {
+ return nil, err
+ }
+ // Calculate the edits to be made due to the change.
+ edits := computeEdits(string(m.Content), string(newContent))
+ return source.ToProtocolEdits(m, edits)
+}
+
+// missingModuleForImport creates an error for a given import path that comes
+// from a missing module.
+func missingModuleForImport(pgf *source.ParsedGoFile, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) {
+ if req.Syntax == nil {
+ return nil, fmt.Errorf("no syntax for %v", req)
+ }
+ rng, err := pgf.NodeRange(imp.Path)
+ if err != nil {
+ return nil, err
+ }
+ return &source.Diagnostic{
+ URI: pgf.URI,
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.ModTidyError,
+ Message: fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path),
+ SuggestedFixes: fixes,
+ }, nil
+}
+
+// parseImports parses the headers of the specified files and returns
+// the set of strings that appear in import declarations within
+// GoFiles. Errors are ignored.
+//
+// (We can't simply use Metadata.Imports because it is based on
+// CompiledGoFiles, after cgo processing.)
+//
+// TODO(rfindley): this should key off source.ImportPath.
+func parseImports(ctx context.Context, s *snapshot, files []source.FileHandle) (map[string]bool, error) {
+ pgfs, _, err := s.parseCache.parseFiles(ctx, source.ParseHeader, files...)
+ if err != nil { // e.g. context cancellation
+ return nil, err
+ }
+
+ seen := make(map[string]bool)
+ for _, pgf := range pgfs {
+ for _, spec := range pgf.File.Imports {
+ path, _ := strconv.Unquote(spec.Path.Value)
+ seen[path] = true
+ }
+ }
+ return seen, nil
+}
diff --git a/gopls/internal/lsp/cache/mod_vuln.go b/gopls/internal/lsp/cache/mod_vuln.go
new file mode 100644
index 000000000..88d1a1cb4
--- /dev/null
+++ b/gopls/internal/lsp/cache/mod_vuln.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "os"
+
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/gopls/internal/vulncheck"
+ "golang.org/x/tools/internal/memoize"
+)
+
+// ModVuln returns import vulnerability analysis for the given go.mod URI.
+// Concurrent requests are combined into a single command.
+func (s *snapshot) ModVuln(ctx context.Context, modURI span.URI) (*govulncheck.Result, error) {
+ s.mu.Lock()
+ entry, hit := s.modVulnHandles.Get(modURI)
+ s.mu.Unlock()
+
+ type modVuln struct {
+ result *govulncheck.Result
+ err error
+ }
+
+ // Cache miss?
+ if !hit {
+ // If the file handle is an overlay, it may not be written to disk.
+ // The go.mod file has to be on disk for vulncheck to work.
+ //
+ // TODO(hyangah): use overlays for vulncheck.
+ fh, err := s.GetFile(ctx, modURI)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := fh.(*Overlay); ok {
+ if info, _ := os.Stat(modURI.Filename()); info == nil {
+ return nil, source.ErrNoModOnDisk
+ }
+ }
+
+ handle := memoize.NewPromise("modVuln", func(ctx context.Context, arg interface{}) interface{} {
+ result, err := modVulnImpl(ctx, arg.(*snapshot), modURI)
+ return modVuln{result, err}
+ })
+
+ entry = handle
+ s.mu.Lock()
+ s.modVulnHandles.Set(modURI, entry, nil)
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+ if err != nil {
+ return nil, err
+ }
+ res := v.(modVuln)
+ return res.result, res.err
+}
+
+func modVulnImpl(ctx context.Context, s *snapshot, uri span.URI) (*govulncheck.Result, error) {
+ if vulncheck.VulnerablePackages == nil {
+ return &govulncheck.Result{}, nil
+ }
+ fh, err := s.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ return vulncheck.VulnerablePackages(ctx, s, fh)
+}
diff --git a/internal/lsp/cache/os_darwin.go b/gopls/internal/lsp/cache/os_darwin.go
index 2c88be1fc..2c88be1fc 100644
--- a/internal/lsp/cache/os_darwin.go
+++ b/gopls/internal/lsp/cache/os_darwin.go
diff --git a/gopls/internal/lsp/cache/os_windows.go b/gopls/internal/lsp/cache/os_windows.go
new file mode 100644
index 000000000..2feded84d
--- /dev/null
+++ b/gopls/internal/lsp/cache/os_windows.go
@@ -0,0 +1,56 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "fmt"
+ "path/filepath"
+ "syscall"
+)
+
+func init() {
+ checkPathCase = windowsCheckPathCase
+}
+
+func windowsCheckPathCase(path string) error {
+ // Back in the day, Windows used to have short and long filenames, and
+ // it still supports those APIs. GetLongPathName gets the real case for a
+ // path, so we can use it here. Inspired by
+ // http://stackoverflow.com/q/2113822.
+
+ // Short paths can be longer than long paths, and unicode, so be generous.
+ buflen := 4 * len(path)
+ namep, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return err
+ }
+ short := make([]uint16, buflen)
+ n, err := syscall.GetShortPathName(namep, &short[0], uint32(len(short)*2)) // buflen is in bytes.
+ if err != nil {
+ return err
+ }
+ if int(n) > len(short)*2 {
+ return fmt.Errorf("short buffer too short: %v vs %v*2", n, len(short))
+ }
+ long := make([]uint16, buflen)
+ n, err = syscall.GetLongPathName(&short[0], &long[0], uint32(len(long)*2))
+ if err != nil {
+ return err
+ }
+ if int(n) > len(long)*2 {
+ return fmt.Errorf("long buffer too short: %v vs %v*2", n, len(long))
+ }
+ longstr := syscall.UTF16ToString(long)
+
+ isRoot := func(p string) bool {
+ return p[len(p)-1] == filepath.Separator
+ }
+ for got, want := path, longstr; !isRoot(got) && !isRoot(want); got, want = filepath.Dir(got), filepath.Dir(want) {
+ if g, w := filepath.Base(got), filepath.Base(want); g != w {
+ return fmt.Errorf("case mismatch in path %q: component %q is listed by Windows as %q", path, g, w)
+ }
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cache/parse.go b/gopls/internal/lsp/cache/parse.go
new file mode 100644
index 000000000..165470863
--- /dev/null
+++ b/gopls/internal/lsp/cache/parse.go
@@ -0,0 +1,900 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "path/filepath"
+ "reflect"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+// ParseGo parses the file whose contents are provided by fh, using a cache.
+// The resulting tree may have beeen fixed up.
+func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
+ pgfs, _, err := s.parseCache.parseFiles(ctx, mode, fh)
+ if err != nil {
+ return nil, err
+ }
+ return pgfs[0], nil
+}
+
+// parseGoImpl parses the Go source file whose content is provided by fh.
+func parseGoImpl(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
+ ctx, done := event.Start(ctx, "cache.parseGo", tag.File.Of(fh.URI().Filename()))
+ defer done()
+
+ ext := filepath.Ext(fh.URI().Filename())
+ if ext != ".go" && ext != "" { // files generated by cgo have no extension
+ return nil, fmt.Errorf("cannot parse non-Go file %s", fh.URI())
+ }
+ src, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ return parseGoSrc(ctx, fset, fh.URI(), src, mode), nil
+}
+
+// parseGoSrc parses a buffer of Go source, repairing the tree if necessary.
+func parseGoSrc(ctx context.Context, fset *token.FileSet, uri span.URI, src []byte, mode source.ParseMode) (res *source.ParsedGoFile) {
+ parserMode := parser.AllErrors | parser.ParseComments
+ if mode == source.ParseHeader {
+ parserMode = parser.ImportsOnly | parser.ParseComments
+ }
+
+ file, err := parser.ParseFile(fset, uri.Filename(), src, parserMode)
+ var parseErr scanner.ErrorList
+ if err != nil {
+ // We passed a byte slice, so the only possible error is a parse error.
+ parseErr = err.(scanner.ErrorList)
+ }
+
+ tok := fset.File(file.Pos())
+ if tok == nil {
+ // file.Pos is the location of the package declaration (issue #53202). If there was
+ // none, we can't find the token.File that ParseFile created, and we
+ // have no choice but to recreate it.
+ tok = fset.AddFile(uri.Filename(), -1, len(src))
+ tok.SetLinesForContent(src)
+ }
+
+ fixed := false
+ // If there were parse errors, attempt to fix them up.
+ if parseErr != nil {
+ // Fix any badly parsed parts of the AST.
+ fixed = fixAST(file, tok, src)
+
+ for i := 0; i < 10; i++ {
+ // Fix certain syntax errors that render the file unparseable.
+ newSrc := fixSrc(file, tok, src)
+ if newSrc == nil {
+ break
+ }
+
+ // If we thought there was something to fix 10 times in a row,
+ // it is likely we got stuck in a loop somehow. Log out a diff
+ // of the last changes we made to aid in debugging.
+ if i == 9 {
+ unified := diff.Unified("before", "after", string(src), string(newSrc))
+ event.Log(ctx, fmt.Sprintf("fixSrc loop - last diff:\n%v", unified), tag.File.Of(tok.Name()))
+ }
+
+ newFile, _ := parser.ParseFile(fset, uri.Filename(), newSrc, parserMode)
+ if newFile != nil {
+ // Maintain the original parseError so we don't try formatting the doctored file.
+ file = newFile
+ src = newSrc
+ tok = fset.File(file.Pos())
+
+ fixed = fixAST(file, tok, src)
+ }
+ }
+ }
+
+ return &source.ParsedGoFile{
+ URI: uri,
+ Mode: mode,
+ Src: src,
+ Fixed: fixed,
+ File: file,
+ Tok: tok,
+ Mapper: protocol.NewMapper(uri, src),
+ ParseErr: parseErr,
+ }
+}
+
+// fixAST inspects the AST and potentially modifies any *ast.BadStmts so that it can be
+// type-checked more effectively.
+//
+// If fixAST returns true, the resulting AST is considered "fixed", meaning
+// positions have been mangled, and type checker errors may not make sense.
+func fixAST(n ast.Node, tok *token.File, src []byte) (fixed bool) {
+ var err error
+ walkASTWithParent(n, func(n, parent ast.Node) bool {
+ switch n := n.(type) {
+ case *ast.BadStmt:
+ if fixed = fixDeferOrGoStmt(n, parent, tok, src); fixed {
+ // Recursively fix in our fixed node.
+ _ = fixAST(parent, tok, src)
+ } else {
+ err = fmt.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err)
+ }
+ return false
+ case *ast.BadExpr:
+ if fixed = fixArrayType(n, parent, tok, src); fixed {
+ // Recursively fix in our fixed node.
+ _ = fixAST(parent, tok, src)
+ return false
+ }
+
+ // Fix cases where parser interprets if/for/switch "init"
+ // statement as "cond" expression, e.g.:
+ //
+ // // "i := foo" is init statement, not condition.
+ // for i := foo
+ //
+ fixInitStmt(n, parent, tok, src)
+
+ return false
+ case *ast.SelectorExpr:
+ // Fix cases where a keyword prefix results in a phantom "_" selector, e.g.:
+ //
+ // foo.var<> // want to complete to "foo.variance"
+ //
+ fixPhantomSelector(n, tok, src)
+ return true
+
+ case *ast.BlockStmt:
+ switch parent.(type) {
+ case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt:
+ // Adjust closing curly brace of empty switch/select
+ // statements so we can complete inside them.
+ fixEmptySwitch(n, tok, src)
+ }
+
+ return true
+ default:
+ return true
+ }
+ })
+ return fixed
+}
+
+// walkASTWithParent walks the AST rooted at n. The semantics are
+// similar to ast.Inspect except it does not call f(nil).
+func walkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) {
+ var ancestors []ast.Node
+ ast.Inspect(n, func(n ast.Node) (recurse bool) {
+ defer func() {
+ if recurse {
+ ancestors = append(ancestors, n)
+ }
+ }()
+
+ if n == nil {
+ ancestors = ancestors[:len(ancestors)-1]
+ return false
+ }
+
+ var parent ast.Node
+ if len(ancestors) > 0 {
+ parent = ancestors[len(ancestors)-1]
+ }
+
+ return f(n, parent)
+ })
+}
+
+// fixSrc attempts to modify the file's source code to fix certain
+// syntax errors that leave the rest of the file unparsed.
+func fixSrc(f *ast.File, tf *token.File, src []byte) (newSrc []byte) {
+ walkASTWithParent(f, func(n, parent ast.Node) bool {
+ if newSrc != nil {
+ return false
+ }
+
+ switch n := n.(type) {
+ case *ast.BlockStmt:
+ newSrc = fixMissingCurlies(f, n, parent, tf, src)
+ case *ast.SelectorExpr:
+ newSrc = fixDanglingSelector(n, tf, src)
+ }
+
+ return newSrc == nil
+ })
+
+ return newSrc
+}
+
+// fixMissingCurlies adds in curly braces for block statements that
+// are missing curly braces. For example:
+//
+// if foo
+//
+// becomes
+//
+// if foo {}
+func fixMissingCurlies(f *ast.File, b *ast.BlockStmt, parent ast.Node, tok *token.File, src []byte) []byte {
+ // If the "{" is already in the source code, there isn't anything to
+ // fix since we aren't missing curlies.
+ if b.Lbrace.IsValid() {
+ braceOffset, err := safetoken.Offset(tok, b.Lbrace)
+ if err != nil {
+ return nil
+ }
+ if braceOffset < len(src) && src[braceOffset] == '{' {
+ return nil
+ }
+ }
+
+ parentLine := tok.Line(parent.Pos())
+
+ if parentLine >= tok.LineCount() {
+ // If we are the last line in the file, no need to fix anything.
+ return nil
+ }
+
+ // Insert curlies at the end of parent's starting line. The parent
+ // is the statement that contains the block, e.g. *ast.IfStmt. The
+ // block's Pos()/End() can't be relied upon because they are based
+ // on the (missing) curly braces. We assume the statement is a
+ // single line for now and try sticking the curly braces at the end.
+ insertPos := tok.LineStart(parentLine+1) - 1
+
+ // Scootch position backwards until it's not in a comment. For example:
+ //
+ // if foo<> // some amazing comment |
+ // someOtherCode()
+ //
+ // insertPos will be located at "|", so we back it out of the comment.
+ didSomething := true
+ for didSomething {
+ didSomething = false
+ for _, c := range f.Comments {
+ if c.Pos() < insertPos && insertPos <= c.End() {
+ insertPos = c.Pos()
+ didSomething = true
+ }
+ }
+ }
+
+ // Bail out if line doesn't end in an ident or ".". This is to avoid
+ // cases like below where we end up making things worse by adding
+ // curlies:
+ //
+ // if foo &&
+ // bar<>
+ switch precedingToken(insertPos, tok, src) {
+ case token.IDENT, token.PERIOD:
+ // ok
+ default:
+ return nil
+ }
+
+ var buf bytes.Buffer
+ buf.Grow(len(src) + 3)
+ offset, err := safetoken.Offset(tok, insertPos)
+ if err != nil {
+ return nil
+ }
+ buf.Write(src[:offset])
+
+ // Detect if we need to insert a semicolon to fix "for" loop situations like:
+ //
+ // for i := foo(); foo<>
+ //
+ // Just adding curlies is not sufficient to make things parse well.
+ if fs, ok := parent.(*ast.ForStmt); ok {
+ if _, ok := fs.Cond.(*ast.BadExpr); !ok {
+ if xs, ok := fs.Post.(*ast.ExprStmt); ok {
+ if _, ok := xs.X.(*ast.BadExpr); ok {
+ buf.WriteByte(';')
+ }
+ }
+ }
+ }
+
+ // Insert "{}" at insertPos.
+ buf.WriteByte('{')
+ buf.WriteByte('}')
+ buf.Write(src[offset:])
+ return buf.Bytes()
+}
+
+// fixEmptySwitch moves empty switch/select statements' closing curly
+// brace down one line. This allows us to properly detect incomplete
+// "case" and "default" keywords as inside the switch statement. For
+// example:
+//
+// switch {
+// def<>
+// }
+//
+// gets parsed like:
+//
+// switch {
+// }
+//
+// Later we manually pull out the "def" token, but we need to detect
+// that our "<>" position is inside the switch block. To do that we
+// move the curly brace so it looks like:
+//
+// switch {
+//
+// }
+func fixEmptySwitch(body *ast.BlockStmt, tok *token.File, src []byte) {
+ // We only care about empty switch statements.
+ if len(body.List) > 0 || !body.Rbrace.IsValid() {
+ return
+ }
+
+ // If the right brace is actually in the source code at the
+ // specified position, don't mess with it.
+ braceOffset, err := safetoken.Offset(tok, body.Rbrace)
+ if err != nil {
+ return
+ }
+ if braceOffset < len(src) && src[braceOffset] == '}' {
+ return
+ }
+
+ braceLine := tok.Line(body.Rbrace)
+ if braceLine >= tok.LineCount() {
+ // If we are the last line in the file, no need to fix anything.
+ return
+ }
+
+ // Move the right brace down one line.
+ body.Rbrace = tok.LineStart(braceLine + 1)
+}
+
+// fixDanglingSelector inserts real "_" selector expressions in place
+// of phantom "_" selectors. For example:
+//
+// func _() {
+// x.<>
+// }
+//
+// var x struct { i int }
+//
+// To fix completion at "<>", we insert a real "_" after the "." so the
+// following declaration of "x" can be parsed and type checked
+// normally.
+func fixDanglingSelector(s *ast.SelectorExpr, tf *token.File, src []byte) []byte {
+ if !isPhantomUnderscore(s.Sel, tf, src) {
+ return nil
+ }
+
+ if !s.X.End().IsValid() {
+ return nil
+ }
+
+ insertOffset, err := safetoken.Offset(tf, s.X.End())
+ if err != nil {
+ return nil
+ }
+ // Insert directly after the selector's ".".
+ insertOffset++
+ if src[insertOffset-1] != '.' {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ buf.Grow(len(src) + 1)
+ buf.Write(src[:insertOffset])
+ buf.WriteByte('_')
+ buf.Write(src[insertOffset:])
+ return buf.Bytes()
+}
+
+// fixPhantomSelector tries to fix selector expressions with phantom
+// "_" selectors. In particular, we check if the selector is a
+// keyword, and if so we swap in an *ast.Ident with the keyword text. For example:
+//
+// foo.var
+//
+// yields a "_" selector instead of "var" since "var" is a keyword.
+//
+// TODO(rfindley): should this constitute an ast 'fix'?
+func fixPhantomSelector(sel *ast.SelectorExpr, tf *token.File, src []byte) {
+ if !isPhantomUnderscore(sel.Sel, tf, src) {
+ return
+ }
+
+ // Only consider selectors directly abutting the selector ".". This
+ // avoids false positives in cases like:
+ //
+ // foo. // don't think "var" is our selector
+ // var bar = 123
+ //
+ if sel.Sel.Pos() != sel.X.End()+1 {
+ return
+ }
+
+ maybeKeyword := readKeyword(sel.Sel.Pos(), tf, src)
+ if maybeKeyword == "" {
+ return
+ }
+
+ replaceNode(sel, sel.Sel, &ast.Ident{
+ Name: maybeKeyword,
+ NamePos: sel.Sel.Pos(),
+ })
+}
+
+// isPhantomUnderscore reports whether the given ident is a phantom
+// underscore. The parser sometimes inserts phantom underscores when
+// it encounters otherwise unparseable situations.
+func isPhantomUnderscore(id *ast.Ident, tok *token.File, src []byte) bool {
+ if id == nil || id.Name != "_" {
+ return false
+ }
+
+ // Phantom underscore means the underscore is not actually in the
+ // program text.
+ offset, err := safetoken.Offset(tok, id.Pos())
+ if err != nil {
+ return false
+ }
+ return len(src) <= offset || src[offset] != '_'
+}
+
+// fixInitStmt fixes cases where the parser misinterprets an
+// if/for/switch "init" statement as the "cond" conditional. In cases
+// like "if i := 0" the user hasn't typed the semicolon yet so the
+// parser is looking for the conditional expression. However, "i := 0"
+// are not valid expressions, so we get a BadExpr.
+//
+// fixInitStmt returns valid AST for the original source.
+func fixInitStmt(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) {
+ if !bad.Pos().IsValid() || !bad.End().IsValid() {
+ return
+ }
+
+ // Try to extract a statement from the BadExpr.
+ start, end, err := safetoken.Offsets(tok, bad.Pos(), bad.End()-1)
+ if err != nil {
+ return
+ }
+ stmtBytes := src[start : end+1]
+ stmt, err := parseStmt(bad.Pos(), stmtBytes)
+ if err != nil {
+ return
+ }
+
+ // If the parent statement doesn't already have an "init" statement,
+ // move the extracted statement into the "init" field and insert a
+ // dummy expression into the required "cond" field.
+ switch p := parent.(type) {
+ case *ast.IfStmt:
+ if p.Init != nil {
+ return
+ }
+ p.Init = stmt
+ p.Cond = &ast.Ident{
+ Name: "_",
+ NamePos: stmt.End(),
+ }
+ case *ast.ForStmt:
+ if p.Init != nil {
+ return
+ }
+ p.Init = stmt
+ p.Cond = &ast.Ident{
+ Name: "_",
+ NamePos: stmt.End(),
+ }
+ case *ast.SwitchStmt:
+ if p.Init != nil {
+ return
+ }
+ p.Init = stmt
+ p.Tag = nil
+ }
+}
+
+// readKeyword reads the keyword starting at pos, if any.
+func readKeyword(pos token.Pos, tok *token.File, src []byte) string {
+ var kwBytes []byte
+ offset, err := safetoken.Offset(tok, pos)
+ if err != nil {
+ return ""
+ }
+ for i := offset; i < len(src); i++ {
+ // Use a simplified identifier check since keywords are always lowercase ASCII.
+ if src[i] < 'a' || src[i] > 'z' {
+ break
+ }
+ kwBytes = append(kwBytes, src[i])
+
+ // Stop search at arbitrarily chosen too-long-for-a-keyword length.
+ if len(kwBytes) > 15 {
+ return ""
+ }
+ }
+
+ if kw := string(kwBytes); token.Lookup(kw).IsKeyword() {
+ return kw
+ }
+
+ return ""
+}
+
+// fixArrayType tries to parse an *ast.BadExpr into an *ast.ArrayType.
+// go/parser often turns lone array types like "[]int" into BadExprs
+// if it isn't expecting a type.
+func fixArrayType(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) bool {
+ // Our expected input is a bad expression that looks like "[]someExpr".
+
+ from := bad.Pos()
+ to := bad.End()
+
+ if !from.IsValid() || !to.IsValid() {
+ return false
+ }
+
+ exprBytes := make([]byte, 0, int(to-from)+3)
+ // Avoid doing tok.Offset(to) since that panics if badExpr ends at EOF.
+ // It also panics if the position is not in the range of the file, and
+ // badExprs may not necessarily have good positions, so check first.
+ fromOffset, toOffset, err := safetoken.Offsets(tok, from, to-1)
+ if err != nil {
+ return false
+ }
+ exprBytes = append(exprBytes, src[fromOffset:toOffset+1]...)
+ exprBytes = bytes.TrimSpace(exprBytes)
+
+ // If our expression ends in "]" (e.g. "[]"), add a phantom selector
+ // so we can complete directly after the "[]".
+ if len(exprBytes) > 0 && exprBytes[len(exprBytes)-1] == ']' {
+ exprBytes = append(exprBytes, '_')
+ }
+
+ // Add "{}" to turn our ArrayType into a CompositeLit. This is to
+ // handle the case of "[...]int" where we must make it a composite
+ // literal to be parseable.
+ exprBytes = append(exprBytes, '{', '}')
+
+ expr, err := parseExpr(from, exprBytes)
+ if err != nil {
+ return false
+ }
+
+ cl, _ := expr.(*ast.CompositeLit)
+ if cl == nil {
+ return false
+ }
+
+ at, _ := cl.Type.(*ast.ArrayType)
+ if at == nil {
+ return false
+ }
+
+ return replaceNode(parent, bad, at)
+}
+
+// precedingToken scans src to find the token preceding pos.
+func precedingToken(pos token.Pos, tok *token.File, src []byte) token.Token {
+ s := &scanner.Scanner{}
+ s.Init(tok, src, nil, 0)
+
+ var lastTok token.Token
+ for {
+ p, t, _ := s.Scan()
+ if t == token.EOF || p >= pos {
+ break
+ }
+
+ lastTok = t
+ }
+ return lastTok
+}
+
+// fixDeferOrGoStmt tries to parse an *ast.BadStmt into a defer or a go statement.
+//
+// go/parser packages a statement of the form "defer x." as an *ast.BadStmt because
+// it does not include a call expression. This means that go/types skips type-checking
+// this statement entirely, and we can't use the type information when completing.
+// Here, we try to generate a fake *ast.DeferStmt or *ast.GoStmt to put into the AST,
+// instead of the *ast.BadStmt.
+func fixDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src []byte) bool {
+ // Check if we have a bad statement containing either a "go" or "defer".
+ s := &scanner.Scanner{}
+ s.Init(tok, src, nil, 0)
+
+ var (
+ pos token.Pos
+ tkn token.Token
+ )
+ for {
+ if tkn == token.EOF {
+ return false
+ }
+ if pos >= bad.From {
+ break
+ }
+ pos, tkn, _ = s.Scan()
+ }
+
+ var stmt ast.Stmt
+ switch tkn {
+ case token.DEFER:
+ stmt = &ast.DeferStmt{
+ Defer: pos,
+ }
+ case token.GO:
+ stmt = &ast.GoStmt{
+ Go: pos,
+ }
+ default:
+ return false
+ }
+
+ var (
+ from, to, last token.Pos
+ lastToken token.Token
+ braceDepth int
+ phantomSelectors []token.Pos
+ )
+FindTo:
+ for {
+ to, tkn, _ = s.Scan()
+
+ if from == token.NoPos {
+ from = to
+ }
+
+ switch tkn {
+ case token.EOF:
+ break FindTo
+ case token.SEMICOLON:
+ // If we aren't in nested braces, end of statement means
+ // end of expression.
+ if braceDepth == 0 {
+ break FindTo
+ }
+ case token.LBRACE:
+ braceDepth++
+ }
+
+ // This handles the common dangling selector case. For example in
+ //
+ // defer fmt.
+ // y := 1
+ //
+ // we notice the dangling period and end our expression.
+ //
+ // If the previous token was a "." and we are looking at a "}",
+ // the period is likely a dangling selector and needs a phantom
+ // "_". Likewise if the current token is on a different line than
+ // the period, the period is likely a dangling selector.
+ if lastToken == token.PERIOD && (tkn == token.RBRACE || tok.Line(to) > tok.Line(last)) {
+ // Insert phantom "_" selector after the dangling ".".
+ phantomSelectors = append(phantomSelectors, last+1)
+ // If we aren't in a block then end the expression after the ".".
+ if braceDepth == 0 {
+ to = last + 1
+ break
+ }
+ }
+
+ lastToken = tkn
+ last = to
+
+ switch tkn {
+ case token.RBRACE:
+ braceDepth--
+ if braceDepth <= 0 {
+ if braceDepth == 0 {
+ // +1 to include the "}" itself.
+ to += 1
+ }
+ break FindTo
+ }
+ }
+ }
+
+ fromOffset, toOffset, err := safetoken.Offsets(tok, from, to)
+ if err != nil {
+ return false
+ }
+ if !from.IsValid() || fromOffset >= len(src) {
+ return false
+ }
+ if !to.IsValid() || toOffset >= len(src) {
+ return false
+ }
+
+ // Insert any phantom selectors needed to prevent dangling "." from messing
+ // up the AST.
+ exprBytes := make([]byte, 0, int(to-from)+len(phantomSelectors))
+ for i, b := range src[fromOffset:toOffset] {
+ if len(phantomSelectors) > 0 && from+token.Pos(i) == phantomSelectors[0] {
+ exprBytes = append(exprBytes, '_')
+ phantomSelectors = phantomSelectors[1:]
+ }
+ exprBytes = append(exprBytes, b)
+ }
+
+ if len(phantomSelectors) > 0 {
+ exprBytes = append(exprBytes, '_')
+ }
+
+ expr, err := parseExpr(from, exprBytes)
+ if err != nil {
+ return false
+ }
+
+ // Package the expression into a fake *ast.CallExpr and re-insert
+ // into the function.
+ call := &ast.CallExpr{
+ Fun: expr,
+ Lparen: to,
+ Rparen: to,
+ }
+
+ switch stmt := stmt.(type) {
+ case *ast.DeferStmt:
+ stmt.Call = call
+ case *ast.GoStmt:
+ stmt.Call = call
+ }
+
+ return replaceNode(parent, bad, stmt)
+}
+
+// parseStmt parses the statement in src and updates its position to
+// start at pos.
+func parseStmt(pos token.Pos, src []byte) (ast.Stmt, error) {
+ // Wrap our expression to make it a valid Go file we can pass to ParseFile.
+ fileSrc := bytes.Join([][]byte{
+ []byte("package fake;func _(){"),
+ src,
+ []byte("}"),
+ }, nil)
+
+ // Use ParseFile instead of ParseExpr because ParseFile has
+ // best-effort behavior, whereas ParseExpr fails hard on any error.
+ fakeFile, err := parser.ParseFile(token.NewFileSet(), "", fileSrc, 0)
+ if fakeFile == nil {
+ return nil, fmt.Errorf("error reading fake file source: %v", err)
+ }
+
+ // Extract our expression node from inside the fake file.
+ if len(fakeFile.Decls) == 0 {
+ return nil, fmt.Errorf("error parsing fake file: %v", err)
+ }
+
+ fakeDecl, _ := fakeFile.Decls[0].(*ast.FuncDecl)
+ if fakeDecl == nil || len(fakeDecl.Body.List) == 0 {
+ return nil, fmt.Errorf("no statement in %s: %v", src, err)
+ }
+
+ stmt := fakeDecl.Body.List[0]
+
+ // parser.ParseFile returns undefined positions.
+ // Adjust them for the current file.
+ offsetPositions(stmt, pos-1-(stmt.Pos()-1))
+
+ return stmt, nil
+}
+
+// parseExpr parses the expression in src and updates its position to
+// start at pos.
+func parseExpr(pos token.Pos, src []byte) (ast.Expr, error) {
+ stmt, err := parseStmt(pos, src)
+ if err != nil {
+ return nil, err
+ }
+
+ exprStmt, ok := stmt.(*ast.ExprStmt)
+ if !ok {
+ return nil, fmt.Errorf("no expr in %s: %v", src, err)
+ }
+
+ return exprStmt.X, nil
+}
+
+var tokenPosType = reflect.TypeOf(token.NoPos)
+
+// offsetPositions applies an offset to the positions in an ast.Node.
+func offsetPositions(n ast.Node, offset token.Pos) {
+ ast.Inspect(n, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+
+ v := reflect.ValueOf(n).Elem()
+
+ switch v.Kind() {
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ f := v.Field(i)
+ if f.Type() != tokenPosType {
+ continue
+ }
+
+ if !f.CanSet() {
+ continue
+ }
+
+ // Don't offset invalid positions: they should stay invalid.
+ if !token.Pos(f.Int()).IsValid() {
+ continue
+ }
+
+ f.SetInt(f.Int() + int64(offset))
+ }
+ }
+
+ return true
+ })
+}
+
+// replaceNode updates parent's child oldChild to be newChild. It
+// returns whether it replaced successfully.
+func replaceNode(parent, oldChild, newChild ast.Node) bool {
+ if parent == nil || oldChild == nil || newChild == nil {
+ return false
+ }
+
+ parentVal := reflect.ValueOf(parent).Elem()
+ if parentVal.Kind() != reflect.Struct {
+ return false
+ }
+
+ newChildVal := reflect.ValueOf(newChild)
+
+ tryReplace := func(v reflect.Value) bool {
+ if !v.CanSet() || !v.CanInterface() {
+ return false
+ }
+
+ // If the existing value is oldChild, we found our child. Make
+ // sure our newChild is assignable and then make the swap.
+ if v.Interface() == oldChild && newChildVal.Type().AssignableTo(v.Type()) {
+ v.Set(newChildVal)
+ return true
+ }
+
+ return false
+ }
+
+ // Loop over parent's struct fields.
+ for i := 0; i < parentVal.NumField(); i++ {
+ f := parentVal.Field(i)
+
+ switch f.Kind() {
+ // Check interface and pointer fields.
+ case reflect.Interface, reflect.Ptr:
+ if tryReplace(f) {
+ return true
+ }
+
+ // Search through any slice fields.
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ if tryReplace(f.Index(i)) {
+ return true
+ }
+ }
+ }
+ }
+
+ return false
+}
diff --git a/gopls/internal/lsp/cache/parse_cache.go b/gopls/internal/lsp/cache/parse_cache.go
new file mode 100644
index 000000000..91ca091c0
--- /dev/null
+++ b/gopls/internal/lsp/cache/parse_cache.go
@@ -0,0 +1,298 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "container/heap"
+ "context"
+ "go/token"
+ "runtime"
+ "sort"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/memoize"
+)
+
+// This file contains an implementation of a bounded-size parse cache, that
+// offsets the base token.Pos value of each cached file so that they may be
+// later described by a single dedicated FileSet.
+//
+// This is achieved by tracking a monotonic offset in the token.Pos space, that
+// is incremented before parsing allow room for the resulting parsed file.
+
+// Keep 200 recently parsed files, based on the following rationale:
+// - One of the most important benefits of caching is avoiding re-parsing
+// everything in a package when working on a single file. No packages in
+// Kubernetes have > 200 files (only one has > 100).
+// - Experience has shown that ~1000 parsed files can use noticeable space.
+// 200 feels like a sweet spot between limiting cache size and optimizing
+// cache hits for low-latency operations.
+const parseCacheMaxFiles = 200
+
+// parsePadding is additional padding allocated between entries in the parse
+// cache to allow for increases in length (such as appending missing braces)
+// caused by fixAST.
+//
+// This is used to mitigate a chicken and egg problem: we must know the base
+// offset of the file we're about to parse, before we start parsing, and yet
+// src fixups may affect the actual size of the parsed content (and therefore
+// the offsets of subsequent files).
+//
+// When we encounter a file that no longer fits in its allocated space in the
+// fileset, we have no choice but to re-parse it. Leaving a generous padding
+// reduces the likelihood of this "slow path".
+//
+// This value is mutable for testing, so that we can exercise the slow path.
+var parsePadding = 1000 // mutable for testing
+
+// A parseCache holds a bounded number of recently accessed parsed Go files. As
+// new files are stored, older files may be evicted from the cache.
+//
+// The parseCache.parseFiles method exposes a batch API for parsing (and
+// caching) multiple files. This is necessary for type-checking, where files
+// must be parsed in a common fileset.
+type parseCache struct {
+ mu sync.Mutex
+ m map[parseKey]*parseCacheEntry
+ lru queue // min-atime priority queue of *parseCacheEntry
+ clock uint64 // clock time, incremented when the cache is updated
+ nextOffset token.Pos // token.Pos offset for the next parsed file
+}
+
+// parseKey uniquely identifies a parsed Go file.
+type parseKey struct {
+ file source.FileIdentity
+ mode source.ParseMode
+}
+
+type parseCacheEntry struct {
+ key parseKey
+ promise *memoize.Promise // memoize.Promise[*source.ParsedGoFile]
+ atime uint64 // clock time of last access
+ lruIndex int
+}
+
+// startParse prepares a parsing pass, using the following steps:
+// - search for cache hits
+// - create new promises for cache misses
+// - store as many new promises in the cache as space will allow
+//
+// The resulting slice has an entry for every given file handle, though some
+// entries may be nil if there was an error reading the file (in which case the
+// resulting error will be non-nil).
+func (c *parseCache) startParse(mode source.ParseMode, fhs ...source.FileHandle) ([]*memoize.Promise, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Any parsing pass increments the clock, as we'll update access times.
+ // (technically, if fhs is empty this isn't necessary, but that's a degenerate case).
+ //
+ // All entries parsed from a single call get the same access time.
+ c.clock++
+
+ // Read file data and collect cacheable files.
+ var (
+ data = make([][]byte, len(fhs)) // file content for each readable file
+ promises = make([]*memoize.Promise, len(fhs))
+ firstReadError error // first error from fh.Read, or nil
+ )
+ for i, fh := range fhs {
+ src, err := fh.Read()
+ if err != nil {
+ if firstReadError == nil {
+ firstReadError = err
+ }
+ continue
+ }
+ data[i] = src
+
+ key := parseKey{
+ file: fh.FileIdentity(),
+ mode: mode,
+ }
+
+ // Check for a cache hit.
+ if e, ok := c.m[key]; ok {
+ e.atime = c.clock
+ heap.Fix(&c.lru, e.lruIndex)
+ promises[i] = e.promise
+ continue
+ }
+
+ // ...otherwise, create a new promise to parse with a non-overlapping offset
+ fset := token.NewFileSet()
+ if c.nextOffset > 0 {
+ // Add a dummy file so that this parsed file does not overlap with others.
+ fset.AddFile("", 1, int(c.nextOffset))
+ }
+ c.nextOffset += token.Pos(len(src) + parsePadding + 1) // leave room for src fixes
+ fh := fh
+ promise := memoize.NewPromise(string(fh.URI()), func(ctx context.Context, _ interface{}) interface{} {
+ return parseGoSrc(ctx, fset, fh.URI(), src, mode)
+ })
+ promises[i] = promise
+
+ var e *parseCacheEntry
+ if len(c.lru) < parseCacheMaxFiles {
+ // add new entry
+ e = new(parseCacheEntry)
+ if c.m == nil {
+ c.m = make(map[parseKey]*parseCacheEntry)
+ }
+ } else {
+ // evict oldest entry
+ e = heap.Pop(&c.lru).(*parseCacheEntry)
+ delete(c.m, e.key)
+ }
+ e.key = key
+ e.promise = promise
+ e.atime = c.clock
+ c.m[e.key] = e
+ heap.Push(&c.lru, e)
+ }
+
+ if len(c.m) != len(c.lru) {
+ panic("map and LRU are inconsistent")
+ }
+
+ return promises, firstReadError
+}
+
+// parseFiles returns a ParsedGoFile for the given file handles in the
+// requested parse mode.
+//
+// If parseFiles returns an error, it still returns a slice,
+// but with a nil entry for each file that could not be parsed.
+//
+// The second result is a FileSet describing all resulting parsed files.
+//
+// For parsed files that already exists in the cache, access time will be
+// updated. For others, parseFiles will parse and store as many results in the
+// cache as space allows.
+func (c *parseCache) parseFiles(ctx context.Context, mode source.ParseMode, fhs ...source.FileHandle) ([]*source.ParsedGoFile, *token.FileSet, error) {
+ promises, firstReadError := c.startParse(mode, fhs...)
+
+ // Await all parsing.
+ var g errgroup.Group
+ g.SetLimit(runtime.GOMAXPROCS(-1)) // parsing is CPU-bound.
+ pgfs := make([]*source.ParsedGoFile, len(fhs))
+ for i, promise := range promises {
+ if promise == nil {
+ continue
+ }
+ i := i
+ promise := promise
+ g.Go(func() error {
+ result, err := promise.Get(ctx, nil)
+ if err != nil {
+ return err
+ }
+ pgfs[i] = result.(*source.ParsedGoFile)
+ return nil
+ })
+ }
+ if err := g.Wait(); err != nil {
+ return nil, nil, err
+ }
+
+ // Construct a token.FileSet mapping all parsed files, and update their
+ // Tok to the corresponding file in the new fileset.
+ //
+ // In the unlikely event that a parsed file no longer fits in its allocated
+ // space in the FileSet range, it will need to be re-parsed.
+
+ var tokenFiles []*token.File
+ fileIndex := make(map[*token.File]int) // to look up original indexes after sorting
+ for i, pgf := range pgfs {
+ if pgf == nil {
+ continue
+ }
+ fileIndex[pgf.Tok] = i
+ tokenFiles = append(tokenFiles, pgf.Tok)
+ }
+
+ sort.Slice(tokenFiles, func(i, j int) bool {
+ return tokenFiles[i].Base() < tokenFiles[j].Base()
+ })
+
+ var needReparse []int // files requiring reparsing
+ out := tokenFiles[:0]
+ for i, f := range tokenFiles {
+ if i < len(tokenFiles)-1 && f.Base()+f.Size() >= tokenFiles[i+1].Base() {
+ if f != tokenFiles[i+1] { // no need to re-parse duplicates
+ needReparse = append(needReparse, fileIndex[f])
+ }
+ } else {
+ out = append(out, f)
+ }
+ }
+ fset := source.FileSetFor(out...)
+
+ // Re-parse any remaining files using the stitched fileSet.
+ for _, i := range needReparse {
+ // Start from scratch, rather than using ParsedGoFile.Src, so that source
+ // fixing operates exactly the same (note that fixing stops after a limited
+ // number of tries).
+ fh := fhs[i]
+ src, err := fh.Read()
+ if err != nil {
+ if firstReadError == nil {
+ firstReadError = err
+ }
+ continue
+ }
+ pgfs[i] = parseGoSrc(ctx, fset, fh.URI(), src, mode)
+ }
+
+ // Ensure each PGF refers to a token.File from the new FileSet.
+ for i, pgf := range pgfs {
+ if pgf == nil {
+ continue
+ }
+ newTok := fset.File(token.Pos(pgf.Tok.Base()))
+ if newTok == nil {
+ panic("internal error: missing tok for " + pgf.URI)
+ }
+ if newTok.Base() != pgf.Tok.Base() || newTok.Size() != pgf.Tok.Size() {
+ panic("internal error: mismatching token.File in synthetic FileSet")
+ }
+ pgf2 := *pgf
+ pgf2.Tok = newTok
+ pgfs[i] = &pgf2
+ }
+
+ return pgfs, fset, firstReadError
+}
+
+// -- priority queue boilerplate --
+
+// queue is a min-atime prority queue of cache entries.
+type queue []*parseCacheEntry
+
+func (q queue) Len() int { return len(q) }
+
+func (q queue) Less(i, j int) bool { return q[i].atime < q[j].atime }
+
+func (q queue) Swap(i, j int) {
+ q[i], q[j] = q[j], q[i]
+ q[i].lruIndex = i
+ q[j].lruIndex = j
+}
+
+func (q *queue) Push(x interface{}) {
+ e := x.(*parseCacheEntry)
+ e.lruIndex = len(*q)
+ *q = append(*q, e)
+}
+
+func (q *queue) Pop() interface{} {
+ last := len(*q) - 1
+ e := (*q)[last]
+ (*q)[last] = nil // aid GC
+ *q = (*q)[:last]
+ return e
+}
diff --git a/gopls/internal/lsp/cache/parse_cache_test.go b/gopls/internal/lsp/cache/parse_cache_test.go
new file mode 100644
index 000000000..97bff8768
--- /dev/null
+++ b/gopls/internal/lsp/cache/parse_cache_test.go
@@ -0,0 +1,142 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "fmt"
+ "go/token"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+func TestParseCache(t *testing.T) {
+ ctx := context.Background()
+ uri := span.URI("file:///myfile")
+ fh := makeFakeFileHandle(uri, []byte("package p\n\nconst _ = \"foo\""))
+
+ var cache parseCache
+ pgfs1, _, err := cache.parseFiles(ctx, source.ParseFull, fh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pgf1 := pgfs1[0]
+ pgfs2, _, err := cache.parseFiles(ctx, source.ParseFull, fh)
+ pgf2 := pgfs2[0]
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pgf1.File != pgf2.File {
+ t.Errorf("parseFiles(%q): unexpected cache miss on repeated call", uri)
+ }
+
+ // Fill up the cache with other files, but don't evict the file above.
+ files := []source.FileHandle{fh}
+ files = append(files, dummyFileHandles(parseCacheMaxFiles-1)...)
+ pgfs3, fset, err := cache.parseFiles(ctx, source.ParseFull, files...)
+ pgf3 := pgfs3[0]
+ if pgf3.File != pgf1.File {
+ t.Errorf("parseFiles(%q, ...): unexpected cache miss", uri)
+ }
+ if pgf3.Tok == pgf1.Tok {
+ t.Errorf("parseFiles(%q, ...): unexpectedly matching token file", uri)
+ }
+ if pgf3.Tok.Base() != pgf1.Tok.Base() || pgf3.Tok.Size() != pgf1.Tok.Size() {
+ t.Errorf("parseFiles(%q, ...): result.Tok has base: %d, size: %d, want (%d, %d)", uri, pgf3.Tok.Base(), pgf3.Tok.Size(), pgf1.Tok.Base(), pgf1.Tok.Size())
+ }
+ if tok := fset.File(token.Pos(pgf3.Tok.Base())); tok != pgf3.Tok {
+ t.Errorf("parseFiles(%q, ...): result.Tok not contained in FileSet", uri)
+ }
+
+ // Now overwrite the cache, after which we should get new results.
+ files = dummyFileHandles(parseCacheMaxFiles)
+ _, _, err = cache.parseFiles(ctx, source.ParseFull, files...)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pgfs4, _, err := cache.parseFiles(ctx, source.ParseFull, fh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pgfs4[0].File == pgf1.File {
+ t.Errorf("parseFiles(%q): unexpected cache hit after overwriting cache", uri)
+ }
+}
+
+func TestParseCache_Reparsing(t *testing.T) {
+ defer func(padding int) {
+ parsePadding = padding
+ }(parsePadding)
+ parsePadding = 0
+
+ files := dummyFileHandles(parseCacheMaxFiles)
+ danglingSelector := []byte("package p\nfunc _() {\n\tx.\n}")
+ files = append(files, makeFakeFileHandle("file:///bad1", danglingSelector))
+ files = append(files, makeFakeFileHandle("file:///bad2", danglingSelector))
+
+ // Parsing should succeed even though we overflow the padding.
+ var cache parseCache
+ _, _, err := cache.parseFiles(context.Background(), source.ParseFull, files...)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestParseCache_Duplicates(t *testing.T) {
+ ctx := context.Background()
+ uri := span.URI("file:///myfile")
+ fh := makeFakeFileHandle(uri, []byte("package p\n\nconst _ = \"foo\""))
+
+ var cache parseCache
+ pgfs, _, err := cache.parseFiles(ctx, source.ParseFull, fh, fh)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pgfs[0].File != pgfs[1].File {
+ t.Errorf("parseFiles(fh, fh): = [%p, %p], want duplicate files", pgfs[0].File, pgfs[1].File)
+ }
+}
+
+func dummyFileHandles(n int) []source.FileHandle {
+ var fhs []source.FileHandle
+ for i := 0; i < n; i++ {
+ uri := span.URI(fmt.Sprintf("file:///_%d", i))
+ src := []byte(fmt.Sprintf("package p\nvar _ = %d", i))
+ fhs = append(fhs, makeFakeFileHandle(uri, src))
+ }
+ return fhs
+}
+
+func makeFakeFileHandle(uri span.URI, src []byte) fakeFileHandle {
+ return fakeFileHandle{
+ uri: uri,
+ data: src,
+ hash: source.HashOf(src),
+ }
+}
+
+type fakeFileHandle struct {
+ source.FileHandle
+ uri span.URI
+ data []byte
+ hash source.Hash
+}
+
+func (h fakeFileHandle) URI() span.URI {
+ return h.uri
+}
+
+func (h fakeFileHandle) Read() ([]byte, error) {
+ return h.data, nil
+}
+
+func (h fakeFileHandle) FileIdentity() source.FileIdentity {
+ return source.FileIdentity{
+ URI: h.uri,
+ Hash: h.hash,
+ }
+}
diff --git a/gopls/internal/lsp/cache/parsemode_go116.go b/gopls/internal/lsp/cache/parsemode_go116.go
new file mode 100644
index 000000000..d365a9164
--- /dev/null
+++ b/gopls/internal/lsp/cache/parsemode_go116.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.17
+// +build !go1.17
+
+package cache
+
+// The parser.SkipObjectResolution mode flag is not supported before Go 1.17.
+const skipObjectResolution = 0
diff --git a/gopls/internal/lsp/cache/parsemode_go117.go b/gopls/internal/lsp/cache/parsemode_go117.go
new file mode 100644
index 000000000..e2c9fb915
--- /dev/null
+++ b/gopls/internal/lsp/cache/parsemode_go117.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.17
+// +build go1.17
+
+package cache
+
+import "go/parser"
+
+const skipObjectResolution = parser.SkipObjectResolution
diff --git a/gopls/internal/lsp/cache/pkg.go b/gopls/internal/lsp/cache/pkg.go
new file mode 100644
index 000000000..11346fc56
--- /dev/null
+++ b/gopls/internal/lsp/cache/pkg.go
@@ -0,0 +1,165 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/scanner"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source/methodsets"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/memoize"
+)
+
+// Convenient local aliases for typed strings.
+type (
+ PackageID = source.PackageID
+ PackagePath = source.PackagePath
+ PackageName = source.PackageName
+ ImportPath = source.ImportPath
+)
+
+// A Package is the union of snapshot-local information (Metadata) and shared
+// type-checking information (a syntaxPackage).
+//
+// TODO(rfindley): for now, we do not persist the post-processing of
+// loadDiagnostics, because the value of the snapshot.packages map is just the
+// package handle. Fix this.
+type Package struct {
+ m *source.Metadata
+ pkg *syntaxPackage
+}
+
+// syntaxPackage contains parse trees and type information for a package.
+type syntaxPackage struct {
+ // -- identifiers --
+ id PackageID
+ mode source.ParseMode
+
+ // -- outputs --
+ fset *token.FileSet // for now, same as the snapshot's FileSet
+ goFiles []*source.ParsedGoFile
+ compiledGoFiles []*source.ParsedGoFile
+ diagnostics []*source.Diagnostic
+ parseErrors []scanner.ErrorList
+ typeErrors []types.Error
+ types *types.Package
+ typesInfo *types.Info
+ importMap map[string]*types.Package // keys are PackagePaths
+ hasFixedFiles bool // if true, AST was sufficiently mangled that we should hide type errors
+ analyses memoize.Store // maps analyzer.Name to Promise[actionResult]
+ xrefs []byte
+ methodsets *methodsets.Index
+}
+
+func (p *Package) String() string { return string(p.m.ID) }
+
+func (p *Package) Metadata() *source.Metadata { return p.m }
+
+// A loadScope defines a package loading scope for use with go/packages.
+//
+// TODO(rfindley): move this to load.go.
+type loadScope interface {
+ aScope()
+}
+
+type (
+ fileLoadScope span.URI // load packages containing a file (including command-line-arguments)
+ packageLoadScope string // load a specific package (the value is its PackageID)
+ moduleLoadScope string // load packages in a specific module
+ viewLoadScope span.URI // load the workspace
+)
+
+// Implement the loadScope interface.
+func (fileLoadScope) aScope() {}
+func (packageLoadScope) aScope() {}
+func (moduleLoadScope) aScope() {}
+func (viewLoadScope) aScope() {}
+
+func (p *Package) ParseMode() source.ParseMode {
+ return p.pkg.mode
+}
+
+func (p *Package) CompiledGoFiles() []*source.ParsedGoFile {
+ return p.pkg.compiledGoFiles
+}
+
+func (p *Package) File(uri span.URI) (*source.ParsedGoFile, error) {
+ return p.pkg.File(uri)
+}
+
+func (pkg *syntaxPackage) File(uri span.URI) (*source.ParsedGoFile, error) {
+ for _, cgf := range pkg.compiledGoFiles {
+ if cgf.URI == uri {
+ return cgf, nil
+ }
+ }
+ for _, gf := range pkg.goFiles {
+ if gf.URI == uri {
+ return gf, nil
+ }
+ }
+ return nil, fmt.Errorf("no parsed file for %s in %v", uri, pkg.id)
+}
+
+func (p *Package) GetSyntax() []*ast.File {
+ var syntax []*ast.File
+ for _, pgf := range p.pkg.compiledGoFiles {
+ syntax = append(syntax, pgf.File)
+ }
+ return syntax
+}
+
+func (p *Package) FileSet() *token.FileSet {
+ return p.pkg.fset
+}
+
+func (p *Package) GetTypes() *types.Package {
+ return p.pkg.types
+}
+
+func (p *Package) GetTypesInfo() *types.Info {
+ return p.pkg.typesInfo
+}
+
+// DependencyTypes returns the type checker's symbol for the specified
+// package. It returns nil if path is not among the transitive
+// dependencies of p, or if no symbols from that package were
+// referenced during the type-checking of p.
+func (p *Package) DependencyTypes(path source.PackagePath) *types.Package {
+ if path == p.m.PkgPath {
+ return p.pkg.types
+ }
+ return p.pkg.importMap[string(path)]
+}
+
+func (p *Package) HasParseErrors() bool {
+ return len(p.pkg.parseErrors) != 0
+}
+
+func (p *Package) HasTypeErrors() bool {
+ return len(p.pkg.typeErrors) != 0
+}
+
+func (p *Package) DiagnosticsForFile(ctx context.Context, s source.Snapshot, uri span.URI) ([]*source.Diagnostic, error) {
+ var diags []*source.Diagnostic
+ for _, diag := range p.m.Diagnostics {
+ if diag.URI == uri {
+ diags = append(diags, diag)
+ }
+ }
+ for _, diag := range p.pkg.diagnostics {
+ if diag.URI == uri {
+ diags = append(diags, diag)
+ }
+ }
+
+ return diags, nil
+}
diff --git a/gopls/internal/lsp/cache/session.go b/gopls/internal/lsp/cache/session.go
new file mode 100644
index 000000000..f8d583542
--- /dev/null
+++ b/gopls/internal/lsp/cache/session.go
@@ -0,0 +1,730 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/imports"
+ "golang.org/x/tools/internal/persistent"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+type Session struct {
+ // Unique identifier for this session.
+ id string
+
+ // Immutable attributes shared across views.
+ cache *Cache // shared cache
+ gocmdRunner *gocommand.Runner // limits go command concurrency
+
+ optionsMu sync.Mutex
+ options *source.Options
+
+ viewMu sync.Mutex
+ views []*View
+ viewMap map[span.URI]*View // map of URI->best view
+
+ *overlayFS
+}
+
+// ID returns the unique identifier for this session on this server.
+func (s *Session) ID() string { return s.id }
+func (s *Session) String() string { return s.id }
+
+// Options returns a copy of the SessionOptions for this session.
+func (s *Session) Options() *source.Options {
+ s.optionsMu.Lock()
+ defer s.optionsMu.Unlock()
+ return s.options
+}
+
+// SetOptions sets the options of this session to new values.
+func (s *Session) SetOptions(options *source.Options) {
+ s.optionsMu.Lock()
+ defer s.optionsMu.Unlock()
+ s.options = options
+}
+
+// Shutdown the session and all views it has created.
+func (s *Session) Shutdown(ctx context.Context) {
+ var views []*View
+ s.viewMu.Lock()
+ views = append(views, s.views...)
+ s.views = nil
+ s.viewMap = nil
+ s.viewMu.Unlock()
+ for _, view := range views {
+ view.shutdown()
+ }
+ event.Log(ctx, "Shutdown session", KeyShutdownSession.Of(s))
+}
+
+// Cache returns the cache that created this session, for debugging only.
+func (s *Session) Cache() *Cache {
+ return s.cache
+}
+
+// NewView creates a new View, returning it and its first snapshot. If a
+// non-empty tempWorkspace directory is provided, the View will record a copy
+// of its gopls workspace module in that directory, so that client tooling
+// can execute in the same main module. On success it also returns a release
+// function that must be called when the Snapshot is no longer needed.
+func (s *Session) NewView(ctx context.Context, name string, folder span.URI, options *source.Options) (*View, source.Snapshot, func(), error) {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ for _, view := range s.views {
+ if span.SameExistingFile(view.folder, folder) {
+ return nil, nil, nil, source.ErrViewExists
+ }
+ }
+ view, snapshot, release, err := s.createView(ctx, name, folder, options, 0)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ s.views = append(s.views, view)
+ // we always need to drop the view map
+ s.viewMap = make(map[span.URI]*View)
+ return view, snapshot, release, nil
+}
+
+// TODO(rfindley): clarify that createView can never be cancelled (with the
+// possible exception of server shutdown).
+func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, seqID uint64) (*View, *snapshot, func(), error) {
+ index := atomic.AddInt64(&viewIndex, 1)
+
+ // Get immutable workspace information.
+ info, err := s.getWorkspaceInformation(ctx, folder, options)
+ if err != nil {
+ return nil, nil, func() {}, err
+ }
+
+ wsModFiles, wsModFilesErr := computeWorkspaceModFiles(ctx, info.gomod, info.effectiveGOWORK(), info.effectiveGO111MODULE(), s)
+
+ // We want a true background context and not a detached context here
+ // the spans need to be unrelated and no tag values should pollute it.
+ baseCtx := event.Detach(xcontext.Detach(ctx))
+ backgroundCtx, cancel := context.WithCancel(baseCtx)
+
+ v := &View{
+ id: strconv.FormatInt(index, 10),
+ gocmdRunner: s.gocmdRunner,
+ initialWorkspaceLoad: make(chan struct{}),
+ initializationSema: make(chan struct{}, 1),
+ options: options,
+ baseCtx: baseCtx,
+ name: name,
+ folder: folder,
+ moduleUpgrades: map[span.URI]map[string]string{},
+ vulns: map[span.URI]*govulncheck.Result{},
+ fs: s.overlayFS,
+ workspaceInformation: info,
+ }
+ v.importsState = &importsState{
+ ctx: backgroundCtx,
+ processEnv: &imports.ProcessEnv{
+ GocmdRunner: s.gocmdRunner,
+ SkipPathInScan: func(dir string) bool {
+ prefix := strings.TrimSuffix(string(v.folder), "/") + "/"
+ uri := strings.TrimSuffix(string(span.URIFromPath(dir)), "/")
+ if !strings.HasPrefix(uri+"/", prefix) {
+ return false
+ }
+ filterer := source.NewFilterer(options.DirectoryFilters)
+ rel := strings.TrimPrefix(uri, prefix)
+ disallow := filterer.Disallow(rel)
+ return disallow
+ },
+ },
+ }
+ v.snapshot = &snapshot{
+ sequenceID: seqID,
+ globalID: nextSnapshotID(),
+ view: v,
+ backgroundCtx: backgroundCtx,
+ cancel: cancel,
+ store: s.cache.store,
+ packages: persistent.NewMap(packageIDLessInterface),
+ meta: new(metadataGraph),
+ files: newFilesMap(),
+ parseCache: new(parseCache),
+ activePackages: persistent.NewMap(packageIDLessInterface),
+ symbolizeHandles: persistent.NewMap(uriLessInterface),
+ analyses: persistent.NewMap(analysisKeyLessInterface),
+ workspacePackages: make(map[PackageID]PackagePath),
+ unloadableFiles: make(map[span.URI]struct{}),
+ parseModHandles: persistent.NewMap(uriLessInterface),
+ parseWorkHandles: persistent.NewMap(uriLessInterface),
+ modTidyHandles: persistent.NewMap(uriLessInterface),
+ modVulnHandles: persistent.NewMap(uriLessInterface),
+ modWhyHandles: persistent.NewMap(uriLessInterface),
+ knownSubdirs: newKnownDirsSet(),
+ workspaceModFiles: wsModFiles,
+ workspaceModFilesErr: wsModFilesErr,
+ }
+ // Save one reference in the view.
+ v.releaseSnapshot = v.snapshot.Acquire()
+
+ // Record the environment of the newly created view in the log.
+ event.Log(ctx, viewEnv(v))
+
+ // Initialize the view without blocking.
+ initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx))
+ v.initCancelFirstAttempt = initCancel
+ snapshot := v.snapshot
+
+ // Pass a second reference to the background goroutine.
+ bgRelease := snapshot.Acquire()
+ go func() {
+ defer bgRelease()
+ snapshot.initialize(initCtx, true)
+ }()
+
+ // Return a third reference to the caller.
+ return v, snapshot, snapshot.Acquire(), nil
+}
+
+// View returns a view with a matching name, if the session has one.
+func (s *Session) View(name string) *View {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ for _, view := range s.views {
+ if view.Name() == name {
+ return view
+ }
+ }
+ return nil
+}
+
+// ViewOf returns a view corresponding to the given URI.
+// If the file is not already associated with a view, pick one using some heuristics.
+func (s *Session) ViewOf(uri span.URI) (*View, error) {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ return s.viewOfLocked(uri)
+}
+
+// Precondition: caller holds s.viewMu lock.
+func (s *Session) viewOfLocked(uri span.URI) (*View, error) {
+ // Check if we already know this file.
+ if v, found := s.viewMap[uri]; found {
+ return v, nil
+ }
+ // Pick the best view for this file and memoize the result.
+ if len(s.views) == 0 {
+ return nil, fmt.Errorf("no views in session")
+ }
+ s.viewMap[uri] = bestViewForURI(uri, s.views)
+ return s.viewMap[uri], nil
+}
+
+func (s *Session) Views() []*View {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ result := make([]*View, len(s.views))
+ copy(result, s.views)
+ return result
+}
+
+// bestViewForURI returns the most closely matching view for the given URI
+// out of the given set of views.
+func bestViewForURI(uri span.URI, views []*View) *View {
+ // we need to find the best view for this file
+ var longest *View
+ for _, view := range views {
+ if longest != nil && len(longest.Folder()) > len(view.Folder()) {
+ continue
+ }
+ // TODO(rfindley): this should consider the workspace layout (i.e.
+ // go.work).
+ if view.contains(uri) {
+ longest = view
+ }
+ }
+ if longest != nil {
+ return longest
+ }
+ // Try our best to return a view that knows the file.
+ for _, view := range views {
+ if view.knownFile(uri) {
+ return view
+ }
+ }
+ // TODO: are there any more heuristics we can use?
+ return views[0]
+}
+
+// RemoveView removes the view v from the session
+func (s *Session) RemoveView(view *View) {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ i := s.dropView(view)
+ if i == -1 { // error reported elsewhere
+ return
+ }
+ // delete this view... we don't care about order but we do want to make
+ // sure we can garbage collect the view
+ s.views = removeElement(s.views, i)
+}
+
+// updateView recreates the view with the given options.
+//
+// If the resulting error is non-nil, the view may or may not have already been
+// dropped from the session.
+func (s *Session) updateView(ctx context.Context, view *View, options *source.Options) (*View, error) {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+
+ return s.updateViewLocked(ctx, view, options)
+}
+
+func (s *Session) updateViewLocked(ctx context.Context, view *View, options *source.Options) (*View, error) {
+ // Preserve the snapshot ID if we are recreating the view.
+ view.snapshotMu.Lock()
+ if view.snapshot == nil {
+ view.snapshotMu.Unlock()
+ panic("updateView called after View was already shut down")
+ }
+ seqID := view.snapshot.sequenceID // Preserve sequence IDs when updating a view in place.
+ view.snapshotMu.Unlock()
+
+ i := s.dropView(view)
+ if i == -1 {
+ return nil, fmt.Errorf("view %q not found", view.id)
+ }
+
+ v, _, release, err := s.createView(ctx, view.name, view.folder, options, seqID)
+ release()
+
+ if err != nil {
+ // we have dropped the old view, but could not create the new one
+ // this should not happen and is very bad, but we still need to clean
+ // up the view array if it happens
+ s.views = removeElement(s.views, i)
+ return nil, err
+ }
+ // substitute the new view into the array where the old view was
+ s.views[i] = v
+ return v, nil
+}
+
+// removeElement removes the ith element from the slice replacing it with the last element.
+// TODO(adonovan): generics, someday.
+func removeElement(slice []*View, index int) []*View {
+ last := len(slice) - 1
+ slice[index] = slice[last]
+ slice[last] = nil // aid GC
+ return slice[:last]
+}
+
+// dropView removes v from the set of views for the receiver s and calls
+// v.shutdown, returning the index of v in s.views (if found), or -1 if v was
+// not found. s.viewMu must be held while calling this function.
+func (s *Session) dropView(v *View) int {
+ // we always need to drop the view map
+ s.viewMap = make(map[span.URI]*View)
+ for i := range s.views {
+ if v == s.views[i] {
+ // we found the view, drop it and return the index it was found at
+ s.views[i] = nil
+ v.shutdown()
+ return i
+ }
+ }
+ // TODO(rfindley): it looks wrong that we don't shutdown v in this codepath.
+ // We should never get here.
+ bug.Reportf("tried to drop nonexistent view %q", v.id)
+ return -1
+}
+
+func (s *Session) ModifyFiles(ctx context.Context, changes []source.FileModification) error {
+ _, release, err := s.DidModifyFiles(ctx, changes)
+ release()
+ return err
+}
+
+// TODO(rfindley): fileChange seems redundant with source.FileModification.
+// De-dupe into a common representation for changes.
+type fileChange struct {
+ content []byte
+ exists bool
+ fileHandle source.FileHandle
+
+ // isUnchanged indicates whether the file action is one that does not
+ // change the actual contents of the file. Opens and closes should not
+ // be treated like other changes, since the file content doesn't change.
+ isUnchanged bool
+}
+
+// DidModifyFiles reports a file modification to the session. It returns
+// the new snapshots after the modifications have been applied, paired with
+// the affected file URIs for those snapshots.
+// On success, it returns a release function that
+// must be called when the snapshots are no longer needed.
+//
+// TODO(rfindley): what happens if this function fails? It must leave us in a
+// broken state, which we should surface to the user, probably as a request to
+// restart gopls.
+func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, func(), error) {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+
+ // Update overlays.
+ //
+ // TODO(rfindley): I think we do this while holding viewMu to prevent views
+ // from seeing the updated file content before they have processed
+ // invalidations, which could lead to a partial view of the changes (i.e.
+ // spurious diagnostics). However, any such view would immediately be
+ // invalidated here, so it is possible that we could update overlays before
+ // acquiring viewMu.
+ if err := s.updateOverlays(ctx, changes); err != nil {
+ return nil, nil, err
+ }
+
+ // Re-create views whose definition may have changed.
+ //
+ // checkViews controls whether to re-evaluate view definitions when
+ // collecting views below. Any addition or deletion of a go.mod or go.work
+ // file may have affected the definition of the view.
+ checkViews := false
+
+ for _, c := range changes {
+ if isGoMod(c.URI) || isGoWork(c.URI) {
+ // Change, InvalidateMetadata, and UnknownFileAction actions do not cause
+ // us to re-evaluate views.
+ redoViews := (c.Action != source.Change &&
+ c.Action != source.InvalidateMetadata &&
+ c.Action != source.UnknownFileAction)
+
+ if redoViews {
+ checkViews = true
+ break
+ }
+ }
+ }
+
+ if checkViews {
+ for _, view := range s.views {
+ // TODO(rfindley): can we avoid running the go command (go env)
+ // synchronously to change processing? Can we assume that the env did not
+ // change, and derive go.work using a combination of the configured
+ // GOWORK value and filesystem?
+ info, err := s.getWorkspaceInformation(ctx, view.folder, view.Options())
+ if err != nil {
+ // Catastrophic failure, equivalent to a failure of session
+ // initialization and therefore should almost never happen. One
+ // scenario where this failure mode could occur is if some file
+ // permissions have changed preventing us from reading go.mod
+ // files.
+ //
+ // TODO(rfindley): consider surfacing this error more loudly. We
+ // could report a bug, but it's not really a bug.
+ event.Error(ctx, "fetching workspace information", err)
+ }
+
+ if info != view.workspaceInformation {
+ _, err := s.updateViewLocked(ctx, view, view.Options())
+ if err != nil {
+ // More catastrophic failure. The view may or may not still exist.
+ // The best we can do is log and move on.
+ event.Error(ctx, "recreating view", err)
+ }
+ }
+ }
+ }
+
+ // Collect information about views affected by these changes.
+ views := make(map[*View]map[span.URI]*fileChange)
+ affectedViews := map[span.URI][]*View{}
+ // forceReloadMetadata records whether any change is the magic
+ // source.InvalidateMetadata action.
+ forceReloadMetadata := false
+ for _, c := range changes {
+ if c.Action == source.InvalidateMetadata {
+ forceReloadMetadata = true
+ }
+ // Build the list of affected views.
+ var changedViews []*View
+ for _, view := range s.views {
+ // Don't propagate changes that are outside of the view's scope
+ // or knowledge.
+ if !view.relevantChange(c) {
+ continue
+ }
+ changedViews = append(changedViews, view)
+ }
+ // If the change is not relevant to any view, but the change is
+ // happening in the editor, assign it the most closely matching view.
+ if len(changedViews) == 0 {
+ if c.OnDisk {
+ continue
+ }
+ bestView, err := s.viewOfLocked(c.URI)
+ if err != nil {
+ return nil, nil, err
+ }
+ changedViews = append(changedViews, bestView)
+ }
+ affectedViews[c.URI] = changedViews
+
+ isUnchanged := c.Action == source.Open || c.Action == source.Close
+
+ // Apply the changes to all affected views.
+ for _, view := range changedViews {
+ // Make sure that the file is added to the view's seenFiles set.
+ view.markKnown(c.URI)
+ if _, ok := views[view]; !ok {
+ views[view] = make(map[span.URI]*fileChange)
+ }
+ fh, err := s.GetFile(ctx, c.URI)
+ if err != nil {
+ return nil, nil, err
+ }
+ content, err := fh.Read()
+ if err != nil {
+ // Ignore the error: the file may be deleted.
+ content = nil
+ }
+ views[view][c.URI] = &fileChange{
+ content: content,
+ exists: err == nil,
+ fileHandle: fh,
+ isUnchanged: isUnchanged,
+ }
+ }
+ }
+
+ var releases []func()
+ viewToSnapshot := map[*View]*snapshot{}
+ for view, changed := range views {
+ snapshot, release := view.invalidateContent(ctx, changed, forceReloadMetadata)
+ releases = append(releases, release)
+ viewToSnapshot[view] = snapshot
+ }
+
+ // The release function is called when the
+ // returned URIs no longer need to be valid.
+ release := func() {
+ for _, release := range releases {
+ release()
+ }
+ }
+
+ // We only want to diagnose each changed file once, in the view to which
+ // it "most" belongs. We do this by picking the best view for each URI,
+ // and then aggregating the set of snapshots and their URIs (to avoid
+ // diagnosing the same snapshot multiple times).
+ snapshotURIs := map[source.Snapshot][]span.URI{}
+ for _, mod := range changes {
+ viewSlice, ok := affectedViews[mod.URI]
+ if !ok || len(viewSlice) == 0 {
+ continue
+ }
+ view := bestViewForURI(mod.URI, viewSlice)
+ snapshot, ok := viewToSnapshot[view]
+ if !ok {
+ panic(fmt.Sprintf("no snapshot for view %s", view.Folder()))
+ }
+ snapshotURIs[snapshot] = append(snapshotURIs[snapshot], mod.URI)
+ }
+
+ return snapshotURIs, release, nil
+}
+
+// ExpandModificationsToDirectories returns the set of changes with the
+// directory changes removed and expanded to include all of the files in
+// the directory.
+func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes []source.FileModification) []source.FileModification {
+ var snapshots []*snapshot
+ s.viewMu.Lock()
+ for _, v := range s.views {
+ snapshot, release, err := v.getSnapshot()
+ if err != nil {
+ continue // view is shut down; continue with others
+ }
+ defer release()
+ snapshots = append(snapshots, snapshot)
+ }
+ s.viewMu.Unlock()
+
+ knownDirs := knownDirectories(ctx, snapshots)
+ defer knownDirs.Destroy()
+
+ var result []source.FileModification
+ for _, c := range changes {
+ if !knownDirs.Contains(c.URI) {
+ result = append(result, c)
+ continue
+ }
+ affectedFiles := knownFilesInDir(ctx, snapshots, c.URI)
+ var fileChanges []source.FileModification
+ for uri := range affectedFiles {
+ fileChanges = append(fileChanges, source.FileModification{
+ URI: uri,
+ Action: c.Action,
+ LanguageID: "",
+ OnDisk: c.OnDisk,
+ // changes to directories cannot include text or versions
+ })
+ }
+ result = append(result, fileChanges...)
+ }
+ return result
+}
+
+// knownDirectories returns all of the directories known to the given
+// snapshots, including workspace directories and their subdirectories.
+// It is responsibility of the caller to destroy the returned set.
+func knownDirectories(ctx context.Context, snapshots []*snapshot) knownDirsSet {
+ result := newKnownDirsSet()
+ for _, snapshot := range snapshots {
+ dirs := snapshot.dirs(ctx)
+ for _, dir := range dirs {
+ result.Insert(dir)
+ }
+ knownSubdirs := snapshot.getKnownSubdirs(dirs)
+ result.SetAll(knownSubdirs)
+ knownSubdirs.Destroy()
+ }
+ return result
+}
+
+// knownFilesInDir returns the files known to the snapshots in the session.
+// It does not respect symlinks.
+func knownFilesInDir(ctx context.Context, snapshots []*snapshot, dir span.URI) map[span.URI]struct{} {
+ files := map[span.URI]struct{}{}
+
+ for _, snapshot := range snapshots {
+ for _, uri := range snapshot.knownFilesInDir(ctx, dir) {
+ files[uri] = struct{}{}
+ }
+ }
+ return files
+}
+
+// Precondition: caller holds s.viewMu lock.
+// TODO(rfindley): move this to fs_overlay.go.
+func (fs *overlayFS) updateOverlays(ctx context.Context, changes []source.FileModification) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ for _, c := range changes {
+ // Don't update overlays for metadata invalidations.
+ if c.Action == source.InvalidateMetadata {
+ continue
+ }
+
+ o, ok := fs.overlays[c.URI]
+
+ // If the file is not opened in an overlay and the change is on disk,
+ // there's no need to update an overlay. If there is an overlay, we
+ // may need to update the overlay's saved value.
+ if !ok && c.OnDisk {
+ continue
+ }
+
+ // Determine the file kind on open, otherwise, assume it has been cached.
+ var kind source.FileKind
+ switch c.Action {
+ case source.Open:
+ kind = source.FileKindForLang(c.LanguageID)
+ default:
+ if !ok {
+ return fmt.Errorf("updateOverlays: modifying unopened overlay %v", c.URI)
+ }
+ kind = o.kind
+ }
+
+ // Closing a file just deletes its overlay.
+ if c.Action == source.Close {
+ delete(fs.overlays, c.URI)
+ continue
+ }
+
+ // If the file is on disk, check if its content is the same as in the
+ // overlay. Saves and on-disk file changes don't come with the file's
+ // content.
+ text := c.Text
+ if text == nil && (c.Action == source.Save || c.OnDisk) {
+ if !ok {
+ return fmt.Errorf("no known content for overlay for %s", c.Action)
+ }
+ text = o.content
+ }
+ // On-disk changes don't come with versions.
+ version := c.Version
+ if c.OnDisk || c.Action == source.Save {
+ version = o.version
+ }
+ hash := source.HashOf(text)
+ var sameContentOnDisk bool
+ switch c.Action {
+ case source.Delete:
+ // Do nothing. sameContentOnDisk should be false.
+ case source.Save:
+ // Make sure the version and content (if present) is the same.
+ if false && o.version != version { // Client no longer sends the version
+ return fmt.Errorf("updateOverlays: saving %s at version %v, currently at %v", c.URI, c.Version, o.version)
+ }
+ if c.Text != nil && o.hash != hash {
+ return fmt.Errorf("updateOverlays: overlay %s changed on save", c.URI)
+ }
+ sameContentOnDisk = true
+ default:
+ fh, err := fs.delegate.GetFile(ctx, c.URI)
+ if err != nil {
+ return err
+ }
+ _, readErr := fh.Read()
+ sameContentOnDisk = (readErr == nil && fh.FileIdentity().Hash == hash)
+ }
+ o = &Overlay{
+ uri: c.URI,
+ version: version,
+ content: text,
+ kind: kind,
+ hash: hash,
+ saved: sameContentOnDisk,
+ }
+
+ // NOTE: previous versions of this code checked here that the overlay had a
+ // view and file kind (but we don't know why).
+
+ fs.overlays[c.URI] = o
+ }
+
+ return nil
+}
+
+// FileWatchingGlobPatterns returns glob patterns to watch every directory
+// known by the view. For views within a module, this is the module root,
+// any directory in the module root, and any replace targets.
+func (s *Session) FileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
+ s.viewMu.Lock()
+ defer s.viewMu.Unlock()
+ patterns := map[string]struct{}{}
+ for _, view := range s.views {
+ snapshot, release, err := view.getSnapshot()
+ if err != nil {
+ continue // view is shut down; continue with others
+ }
+ for k, v := range snapshot.fileWatchingGlobPatterns(ctx) {
+ patterns[k] = v
+ }
+ release()
+ }
+ return patterns
+}
diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go
new file mode 100644
index 000000000..f81c54adb
--- /dev/null
+++ b/gopls/internal/lsp/cache/snapshot.go
@@ -0,0 +1,2214 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source/methodsets"
+ "golang.org/x/tools/gopls/internal/lsp/source/xrefs"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/memoize"
+ "golang.org/x/tools/internal/packagesinternal"
+ "golang.org/x/tools/internal/persistent"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+type snapshot struct {
+ sequenceID uint64
+ globalID source.GlobalSnapshotID
+
+ // TODO(rfindley): the snapshot holding a reference to the view poses
+ // lifecycle problems: a view may be shut down and waiting for work
+ // associated with this snapshot to complete. While most accesses of the view
+ // are benign (options or workspace information), this is not formalized and
+ // it is wrong for the snapshot to use a shutdown view.
+ //
+ // Fix this by passing options and workspace information to the snapshot,
+ // both of which should be immutable for the snapshot.
+ view *View
+
+ cancel func()
+ backgroundCtx context.Context
+
+ store *memoize.Store // cache of handles shared by all snapshots
+
+ refcount sync.WaitGroup // number of references
+ destroyedBy *string // atomically set to non-nil in Destroy once refcount = 0
+
+ // initialized reports whether the snapshot has been initialized. Concurrent
+ // initialization is guarded by the view.initializationSema. Each snapshot is
+ // initialized at most once: concurrent initialization is guarded by
+ // view.initializationSema.
+ initialized bool
+ // initializedErr holds the last error resulting from initialization. If
+ // initialization fails, we only retry when the the workspace modules change,
+ // to avoid too many go/packages calls.
+ initializedErr *source.CriticalError
+
+ // mu guards all of the maps in the snapshot, as well as the builtin URI.
+ mu sync.Mutex
+
+ // builtin pins the AST and package for builtin.go in memory.
+ builtin span.URI
+
+ // meta holds loaded metadata.
+ //
+ // meta is guarded by mu, but the metadataGraph itself is immutable.
+ // TODO(rfindley): in many places we hold mu while operating on meta, even
+ // though we only need to hold mu while reading the pointer.
+ meta *metadataGraph
+
+ // files maps file URIs to their corresponding FileHandles.
+ // It may invalidated when a file's content changes.
+ files filesMap
+
+ // parseCache holds an LRU cache of recently parsed files.
+ parseCache *parseCache
+
+ // symbolizeHandles maps each file URI to a handle for the future
+ // result of computing the symbols declared in that file.
+ symbolizeHandles *persistent.Map // from span.URI to *memoize.Promise[symbolizeResult]
+
+ // packages maps a packageKey to a *packageHandle.
+ // It may be invalidated when a file's content changes.
+ //
+ // Invariants to preserve:
+ // - packages.Get(id).meta == meta.metadata[id] for all ids
+ // - if a package is in packages, then all of its dependencies should also
+ // be in packages, unless there is a missing import
+ packages *persistent.Map // from packageID to *packageHandle
+
+ // activePackages maps a package ID to a memoized active package, or nil if
+ // the package is known not to be open.
+ //
+ // IDs not contained in the map are not known to be open or not open.
+ activePackages *persistent.Map // from packageID to *Package
+
+ // analyses maps an analysisKey (which identifies a package
+ // and a set of analyzers) to the handle for the future result
+ // of loading the package and analyzing it.
+ analyses *persistent.Map // from analysisKey to analysisPromise
+
+ // workspacePackages contains the workspace's packages, which are loaded
+ // when the view is created.
+ workspacePackages map[PackageID]PackagePath
+
+ // shouldLoad tracks packages that need to be reloaded, mapping a PackageID
+ // to the package paths that should be used to reload it
+ //
+ // When we try to load a package, we clear it from the shouldLoad map
+ // regardless of whether the load succeeded, to prevent endless loads.
+ shouldLoad map[PackageID][]PackagePath
+
+ // unloadableFiles keeps track of files that we've failed to load.
+ unloadableFiles map[span.URI]struct{}
+
+ // TODO(rfindley): rename the handles below to "promises". A promise is
+ // different from a handle (we mutate the package handle.)
+
+ // parseModHandles keeps track of any parseModHandles for the snapshot.
+ // The handles need not refer to only the view's go.mod file.
+ parseModHandles *persistent.Map // from span.URI to *memoize.Promise[parseModResult]
+
+ // parseWorkHandles keeps track of any parseWorkHandles for the snapshot.
+ // The handles need not refer to only the view's go.work file.
+ parseWorkHandles *persistent.Map // from span.URI to *memoize.Promise[parseWorkResult]
+
+ // Preserve go.mod-related handles to avoid garbage-collecting the results
+ // of various calls to the go command. The handles need not refer to only
+ // the view's go.mod file.
+ modTidyHandles *persistent.Map // from span.URI to *memoize.Promise[modTidyResult]
+ modWhyHandles *persistent.Map // from span.URI to *memoize.Promise[modWhyResult]
+ modVulnHandles *persistent.Map // from span.URI to *memoize.Promise[modVulnResult]
+
+ // knownSubdirs is the set of subdirectories in the workspace, used to
+ // create glob patterns for file watching.
+ knownSubdirs knownDirsSet
+ knownSubdirsPatternCache string
+ // unprocessedSubdirChanges are any changes that might affect the set of
+ // subdirectories in the workspace. They are not reflected to knownSubdirs
+ // during the snapshot cloning step as it can slow down cloning.
+ unprocessedSubdirChanges []*fileChange
+
+ // workspaceModFiles holds the set of mod files active in this snapshot.
+ //
+ // This is either empty, a single entry for the workspace go.mod file, or the
+ // set of mod files used by the workspace go.work file.
+ //
+ // This set is immutable inside the snapshot, and therefore is not guarded by mu.
+ workspaceModFiles map[span.URI]struct{}
+ workspaceModFilesErr error // error encountered computing workspaceModFiles
+}
+
+var globalSnapshotID uint64
+
+func nextSnapshotID() source.GlobalSnapshotID {
+ return source.GlobalSnapshotID(atomic.AddUint64(&globalSnapshotID, 1))
+}
+
+var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted
+
+// Acquire prevents the snapshot from being destroyed until the returned function is called.
+//
+// (s.Acquire().release() could instead be expressed as a pair of
+// method calls s.IncRef(); s.DecRef(). The latter has the advantage
+// that the DecRefs are fungible and don't require holding anything in
+// addition to the refcounted object s, but paradoxically that is also
+// an advantage of the current approach, which forces the caller to
+// consider the release function at every stage, making a reference
+// leak more obvious.)
+func (s *snapshot) Acquire() func() {
+ type uP = unsafe.Pointer
+ if destroyedBy := atomic.LoadPointer((*uP)(uP(&s.destroyedBy))); destroyedBy != nil {
+ log.Panicf("%d: acquire() after Destroy(%q)", s.globalID, *(*string)(destroyedBy))
+ }
+ s.refcount.Add(1)
+ return s.refcount.Done
+}
+
+func (s *snapshot) awaitPromise(ctx context.Context, p *memoize.Promise) (interface{}, error) {
+ return p.Get(ctx, s)
+}
+
+// destroy waits for all leases on the snapshot to expire then releases
+// any resources (reference counts and files) associated with it.
+// Snapshots being destroyed can be awaited using v.destroyWG.
+//
+// TODO(adonovan): move this logic into the release function returned
+// by Acquire when the reference count becomes zero. (This would cost
+// us the destroyedBy debug info, unless we add it to the signature of
+// memoize.RefCounted.Acquire.)
+//
+// The destroyedBy argument is used for debugging.
+//
+// v.snapshotMu must be held while calling this function, in order to preserve
+// the invariants described by the the docstring for v.snapshot.
+func (v *View) destroy(s *snapshot, destroyedBy string) {
+ v.snapshotWG.Add(1)
+ go func() {
+ defer v.snapshotWG.Done()
+ s.destroy(destroyedBy)
+ }()
+}
+
+func (s *snapshot) destroy(destroyedBy string) {
+ // Wait for all leases to end before commencing destruction.
+ s.refcount.Wait()
+
+ // Report bad state as a debugging aid.
+ // Not foolproof: another thread could acquire() at this moment.
+ type uP = unsafe.Pointer // looking forward to generics...
+ if old := atomic.SwapPointer((*uP)(uP(&s.destroyedBy)), uP(&destroyedBy)); old != nil {
+ log.Panicf("%d: Destroy(%q) after Destroy(%q)", s.globalID, destroyedBy, *(*string)(old))
+ }
+
+ s.packages.Destroy()
+ s.activePackages.Destroy()
+ s.analyses.Destroy()
+ s.files.Destroy()
+ s.knownSubdirs.Destroy()
+ s.symbolizeHandles.Destroy()
+ s.parseModHandles.Destroy()
+ s.parseWorkHandles.Destroy()
+ s.modTidyHandles.Destroy()
+ s.modVulnHandles.Destroy()
+ s.modWhyHandles.Destroy()
+}
+
+func (s *snapshot) SequenceID() uint64 {
+ return s.sequenceID
+}
+
+func (s *snapshot) GlobalID() source.GlobalSnapshotID {
+ return s.globalID
+}
+
+func (s *snapshot) View() source.View {
+ return s.view
+}
+
+func (s *snapshot) BackgroundContext() context.Context {
+ return s.backgroundCtx
+}
+
+func (s *snapshot) ModFiles() []span.URI {
+ var uris []span.URI
+ for modURI := range s.workspaceModFiles {
+ uris = append(uris, modURI)
+ }
+ return uris
+}
+
+func (s *snapshot) WorkFile() span.URI {
+ return s.view.effectiveGOWORK()
+}
+
+func (s *snapshot) Templates() map[span.URI]source.FileHandle {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ tmpls := map[span.URI]source.FileHandle{}
+ s.files.Range(func(k span.URI, fh source.FileHandle) {
+ if s.view.FileKind(fh) == source.Tmpl {
+ tmpls[k] = fh
+ }
+ })
+ return tmpls
+}
+
+func (s *snapshot) ValidBuildConfiguration() bool {
+ // Since we only really understand the `go` command, if the user has a
+ // different GOPACKAGESDRIVER, assume that their configuration is valid.
+ if s.view.hasGopackagesDriver {
+ return true
+ }
+ // Check if the user is working within a module or if we have found
+ // multiple modules in the workspace.
+ if len(s.workspaceModFiles) > 0 {
+ return true
+ }
+ // The user may have a multiple directories in their GOPATH.
+ // Check if the workspace is within any of them.
+ // TODO(rfindley): this should probably be subject to "if GO111MODULES = off {...}".
+ for _, gp := range filepath.SplitList(s.view.gopath) {
+ if source.InDir(filepath.Join(gp, "src"), s.view.folder.Filename()) {
+ return true
+ }
+ }
+ return false
+}
+
+// moduleMode reports whether the current snapshot uses Go modules.
+//
+// From https://go.dev/ref/mod, module mode is active if either of the
+// following hold:
+// - GO111MODULE=on
+// - GO111MODULE=auto and we are inside a module or have a GOWORK value.
+//
+// Additionally, this method returns false if GOPACKAGESDRIVER is set.
+//
+// TODO(rfindley): use this more widely.
+func (s *snapshot) moduleMode() bool {
+ // Since we only really understand the `go` command, if the user has a
+ // different GOPACKAGESDRIVER, assume that their configuration is valid.
+ if s.view.hasGopackagesDriver {
+ return false
+ }
+
+ switch s.view.effectiveGO111MODULE() {
+ case on:
+ return true
+ case off:
+ return false
+ default:
+ return len(s.workspaceModFiles) > 0 || s.view.gowork != ""
+ }
+}
+
+// workspaceMode describes the way in which the snapshot's workspace should
+// be loaded.
+//
+// TODO(rfindley): remove this, in favor of specific methods.
+func (s *snapshot) workspaceMode() workspaceMode {
+ var mode workspaceMode
+
+ // If the view has an invalid configuration, don't build the workspace
+ // module.
+ validBuildConfiguration := s.ValidBuildConfiguration()
+ if !validBuildConfiguration {
+ return mode
+ }
+ // If the view is not in a module and contains no modules, but still has a
+ // valid workspace configuration, do not create the workspace module.
+ // It could be using GOPATH or a different build system entirely.
+ if len(s.workspaceModFiles) == 0 && validBuildConfiguration {
+ return mode
+ }
+ mode |= moduleMode
+ options := s.view.Options()
+ if options.TempModfile {
+ mode |= tempModfile
+ }
+ return mode
+}
+
+// config returns the configuration used for the snapshot's interaction with
+// the go/packages API. It uses the given working directory.
+//
+// TODO(rstambler): go/packages requires that we do not provide overlays for
+// multiple modules in on config, so buildOverlay needs to filter overlays by
+// module.
+func (s *snapshot) config(ctx context.Context, inv *gocommand.Invocation) *packages.Config {
+ s.view.optionsMu.Lock()
+ verboseOutput := s.view.options.VerboseOutput
+ s.view.optionsMu.Unlock()
+
+ cfg := &packages.Config{
+ Context: ctx,
+ Dir: inv.WorkingDir,
+ Env: inv.Env,
+ BuildFlags: inv.BuildFlags,
+ Mode: packages.NeedName |
+ packages.NeedFiles |
+ packages.NeedCompiledGoFiles |
+ packages.NeedImports |
+ packages.NeedDeps |
+ packages.NeedTypesSizes |
+ packages.NeedModule |
+ packages.NeedEmbedFiles |
+ packages.LoadMode(packagesinternal.DepsErrors) |
+ packages.LoadMode(packagesinternal.ForTest),
+ Fset: nil, // we do our own parsing
+ Overlay: s.buildOverlay(),
+ ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) {
+ panic("go/packages must not be used to parse files")
+ },
+ Logf: func(format string, args ...interface{}) {
+ if verboseOutput {
+ event.Log(ctx, fmt.Sprintf(format, args...))
+ }
+ },
+ Tests: true,
+ }
+ packagesinternal.SetModFile(cfg, inv.ModFile)
+ packagesinternal.SetModFlag(cfg, inv.ModFlag)
+ // We want to type check cgo code if go/types supports it.
+ if typesinternal.SetUsesCgo(&types.Config{}) {
+ cfg.Mode |= packages.LoadMode(packagesinternal.TypecheckCgo)
+ }
+ packagesinternal.SetGoCmdRunner(cfg, s.view.gocmdRunner)
+ return cfg
+}
+
+func (s *snapshot) RunGoCommandDirect(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) {
+ _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv)
+ if err != nil {
+ return nil, err
+ }
+ defer cleanup()
+
+ return s.view.gocmdRunner.Run(ctx, *inv)
+}
+
+func (s *snapshot) RunGoCommandPiped(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error {
+ _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv)
+ if err != nil {
+ return err
+ }
+ defer cleanup()
+ return s.view.gocmdRunner.RunPiped(ctx, *inv, stdout, stderr)
+}
+
+func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error) {
+ var flags source.InvocationFlags
+ if s.workspaceMode()&tempModfile != 0 {
+ flags = source.WriteTemporaryModFile
+ } else {
+ flags = source.Normal
+ }
+ if allowNetwork {
+ flags |= source.AllowNetwork
+ }
+ tmpURI, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{WorkingDir: wd})
+ if err != nil {
+ return false, nil, nil, err
+ }
+ defer cleanup()
+ invoke := func(args ...string) (*bytes.Buffer, error) {
+ inv.Verb = args[0]
+ inv.Args = args[1:]
+ return s.view.gocmdRunner.Run(ctx, *inv)
+ }
+ if err := run(invoke); err != nil {
+ return false, nil, nil, err
+ }
+ if flags.Mode() != source.WriteTemporaryModFile {
+ return false, nil, nil, nil
+ }
+ var modBytes, sumBytes []byte
+ modBytes, err = ioutil.ReadFile(tmpURI.Filename())
+ if err != nil && !os.IsNotExist(err) {
+ return false, nil, nil, err
+ }
+ sumBytes, err = ioutil.ReadFile(strings.TrimSuffix(tmpURI.Filename(), ".mod") + ".sum")
+ if err != nil && !os.IsNotExist(err) {
+ return false, nil, nil, err
+ }
+ return true, modBytes, sumBytes, nil
+}
+
+// goCommandInvocation populates inv with configuration for running go commands on the snapshot.
+//
+// TODO(rfindley): refactor this function to compose the required configuration
+// explicitly, rather than implicitly deriving it from flags and inv.
+//
+// TODO(adonovan): simplify cleanup mechanism. It's hard to see, but
+// it used only after call to tempModFile. Clarify that it is only
+// non-nil on success.
+func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) {
+ s.view.optionsMu.Lock()
+ allowModfileModificationOption := s.view.options.AllowModfileModifications
+ allowNetworkOption := s.view.options.AllowImplicitNetworkAccess
+
+ // TODO(rfindley): this is very hard to follow, and may not even be doing the
+ // right thing: should inv.Env really trample view.options? Do we ever invoke
+ // this with a non-empty inv.Env?
+ //
+ // We should refactor to make it clearer that the correct env is being used.
+ inv.Env = append(append(append(os.Environ(), s.view.options.EnvSlice()...), inv.Env...), "GO111MODULE="+s.view.GO111MODULE())
+ inv.BuildFlags = append([]string{}, s.view.options.BuildFlags...)
+ s.view.optionsMu.Unlock()
+ cleanup = func() {} // fallback
+
+ // All logic below is for module mode.
+ if s.workspaceMode()&moduleMode == 0 {
+ return "", inv, cleanup, nil
+ }
+
+ mode, allowNetwork := flags.Mode(), flags.AllowNetwork()
+ if !allowNetwork && !allowNetworkOption {
+ inv.Env = append(inv.Env, "GOPROXY=off")
+ }
+
+ // What follows is rather complicated logic for how to actually run the go
+ // command. A word of warning: this is the result of various incremental
+ // features added to gopls, and varying behavior of the Go command across Go
+ // versions. It can surely be cleaned up significantly, but tread carefully.
+ //
+ // Roughly speaking we need to resolve four things:
+ // - the working directory.
+ // - the -mod flag
+ // - the -modfile flag
+ //
+ // These are dependent on a number of factors: whether we need to run in a
+ // synthetic workspace, whether flags are supported at the current go
+ // version, and what we're actually trying to achieve (the
+ // source.InvocationFlags).
+
+ var modURI span.URI
+ // Select the module context to use.
+ // If we're type checking, we need to use the workspace context, meaning
+ // the main (workspace) module. Otherwise, we should use the module for
+ // the passed-in working dir.
+ if mode == source.LoadWorkspace {
+ if s.view.effectiveGOWORK() == "" && s.view.gomod != "" {
+ modURI = s.view.gomod
+ }
+ } else {
+ modURI = s.GoModForFile(span.URIFromPath(inv.WorkingDir))
+ }
+
+ var modContent []byte
+ if modURI != "" {
+ modFH, err := s.GetFile(ctx, modURI)
+ if err != nil {
+ return "", nil, cleanup, err
+ }
+ modContent, err = modFH.Read()
+ if err != nil {
+ return "", nil, cleanup, err
+ }
+ }
+
+ // TODO(rfindley): in the case of go.work mode, modURI is empty and we fall
+ // back on the default behavior of vendorEnabled with an empty modURI. Figure
+ // out what is correct here and implement it explicitly.
+ vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent)
+ if err != nil {
+ return "", nil, cleanup, err
+ }
+
+ const mutableModFlag = "mod"
+ // If the mod flag isn't set, populate it based on the mode and workspace.
+ // TODO(rfindley): this doesn't make sense if we're not in module mode
+ if inv.ModFlag == "" {
+ switch mode {
+ case source.LoadWorkspace, source.Normal:
+ if vendorEnabled {
+ inv.ModFlag = "vendor"
+ } else if !allowModfileModificationOption {
+ inv.ModFlag = "readonly"
+ } else {
+ inv.ModFlag = mutableModFlag
+ }
+ case source.WriteTemporaryModFile:
+ inv.ModFlag = mutableModFlag
+ // -mod must be readonly when using go.work files - see issue #48941
+ inv.Env = append(inv.Env, "GOWORK=off")
+ }
+ }
+
+ // Only use a temp mod file if the modfile can actually be mutated.
+ needTempMod := inv.ModFlag == mutableModFlag
+ useTempMod := s.workspaceMode()&tempModfile != 0
+ if needTempMod && !useTempMod {
+ return "", nil, cleanup, source.ErrTmpModfileUnsupported
+ }
+
+ // We should use -modfile if:
+ // - the workspace mode supports it
+ // - we're using a go.work file on go1.18+, or we need a temp mod file (for
+ // example, if running go mod tidy in a go.work workspace)
+ //
+ // TODO(rfindley): this is very hard to follow. Refactor.
+ if !needTempMod && s.view.gowork != "" {
+ // Since we're running in the workspace root, the go command will resolve GOWORK automatically.
+ } else if useTempMod {
+ if modURI == "" {
+ return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir)
+ }
+ modFH, err := s.GetFile(ctx, modURI)
+ if err != nil {
+ return "", nil, cleanup, err
+ }
+ // Use the go.sum if it happens to be available.
+ gosum := s.goSum(ctx, modURI)
+ tmpURI, cleanup, err = tempModFile(modFH, gosum)
+ if err != nil {
+ return "", nil, cleanup, err
+ }
+ inv.ModFile = tmpURI.Filename()
+ }
+
+ return tmpURI, inv, cleanup, nil
+}
+
+func (s *snapshot) buildOverlay() map[string][]byte {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ overlays := make(map[string][]byte)
+ s.files.Range(func(uri span.URI, fh source.FileHandle) {
+ overlay, ok := fh.(*Overlay)
+ if !ok {
+ return
+ }
+ if overlay.saved {
+ return
+ }
+ // TODO(rstambler): Make sure not to send overlays outside of the current view.
+ overlays[uri.Filename()] = overlay.content
+ })
+ return overlays
+}
+
+// Package data kinds, identifying various package data that may be stored in
+// the file cache.
+const (
+ xrefsKind = "xrefs"
+ methodSetsKind = "methodsets"
+ exportDataKind = "export"
+ diagnosticsKind = "diagnostics"
+)
+
+func (s *snapshot) PackageDiagnostics(ctx context.Context, ids ...PackageID) (map[span.URI][]*source.Diagnostic, error) {
+ // TODO(rfindley): opt: avoid unnecessary encode->decode after type-checking.
+ data, err := s.getPackageData(ctx, diagnosticsKind, ids, func(p *syntaxPackage) []byte {
+ return encodeDiagnostics(p.diagnostics)
+ })
+ perFile := make(map[span.URI][]*source.Diagnostic)
+ for _, data := range data {
+ if data != nil {
+ for _, diag := range data.m.Diagnostics {
+ perFile[diag.URI] = append(perFile[diag.URI], diag)
+ }
+ diags := decodeDiagnostics(data.data)
+ for _, diag := range diags {
+ perFile[diag.URI] = append(perFile[diag.URI], diag)
+ }
+ }
+ }
+ return perFile, err
+}
+
+func (s *snapshot) References(ctx context.Context, ids ...PackageID) ([]source.XrefIndex, error) {
+ data, err := s.getPackageData(ctx, xrefsKind, ids, func(p *syntaxPackage) []byte { return p.xrefs })
+ indexes := make([]source.XrefIndex, len(ids))
+ for i, data := range data {
+ if data != nil {
+ indexes[i] = XrefIndex{m: data.m, data: data.data}
+ }
+ }
+ return indexes, err
+}
+
+// An XrefIndex is a helper for looking up a package in a given package.
+type XrefIndex struct {
+ m *source.Metadata
+ data []byte
+}
+
+func (index XrefIndex) Lookup(targets map[PackagePath]map[objectpath.Path]struct{}) []protocol.Location {
+ return xrefs.Lookup(index.m, index.data, targets)
+}
+
+func (s *snapshot) MethodSets(ctx context.Context, ids ...PackageID) ([]*methodsets.Index, error) {
+ // TODO(rfindley): opt: avoid unnecessary encode->decode after type-checking.
+ data, err := s.getPackageData(ctx, methodSetsKind, ids, func(p *syntaxPackage) []byte {
+ return p.methodsets.Encode()
+ })
+ indexes := make([]*methodsets.Index, len(ids))
+ for i, data := range data {
+ if data != nil {
+ indexes[i] = methodsets.Decode(data.data)
+ } else if ids[i] == "unsafe" {
+ indexes[i] = &methodsets.Index{}
+ } else {
+ panic(fmt.Sprintf("nil data for %s", ids[i]))
+ }
+ }
+ return indexes, err
+}
+
+func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]*source.Metadata, error) {
+ s.mu.Lock()
+
+ // Start with the set of package associations derived from the last load.
+ ids := s.meta.ids[uri]
+
+ shouldLoad := false // whether any packages containing uri are marked 'shouldLoad'
+ for _, id := range ids {
+ if len(s.shouldLoad[id]) > 0 {
+ shouldLoad = true
+ }
+ }
+
+ // Check if uri is known to be unloadable.
+ _, unloadable := s.unloadableFiles[uri]
+
+ s.mu.Unlock()
+
+ // Reload if loading is likely to improve the package associations for uri:
+ // - uri is not contained in any valid packages
+ // - ...or one of the packages containing uri is marked 'shouldLoad'
+ // - ...but uri is not unloadable
+ if (shouldLoad || len(ids) == 0) && !unloadable {
+ scope := fileLoadScope(uri)
+ err := s.load(ctx, false, scope)
+
+ // Guard against failed loads due to context cancellation.
+ //
+ // Return the context error here as the current operation is no longer
+ // valid.
+ if ctxErr := ctx.Err(); ctxErr != nil {
+ return nil, ctxErr
+ }
+
+ // We must clear scopes after loading.
+ //
+ // TODO(rfindley): unlike reloadWorkspace, this is simply marking loaded
+ // packages as loaded. We could do this from snapshot.load and avoid
+ // raciness.
+ s.clearShouldLoad(scope)
+
+ // Don't return an error here, as we may still return stale IDs.
+ // Furthermore, the result of MetadataForFile should be consistent upon
+ // subsequent calls, even if the file is marked as unloadable.
+ if err != nil && !errors.Is(err, errNoPackages) {
+ event.Error(ctx, "MetadataForFile", err)
+ }
+ }
+
+ // Retrieve the metadata.
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ ids = s.meta.ids[uri]
+ metas := make([]*source.Metadata, len(ids))
+ for i, id := range ids {
+ metas[i] = s.meta.metadata[id]
+ if metas[i] == nil {
+ panic("nil metadata")
+ }
+ }
+ // Metadata is only ever added by loading,
+ // so if we get here and still have
+ // no IDs, uri is unloadable.
+ if !unloadable && len(ids) == 0 {
+ s.unloadableFiles[uri] = struct{}{}
+ }
+
+ // Sort packages "narrowest" to "widest" (in practice: non-tests before tests).
+ sort.Slice(metas, func(i, j int) bool {
+ return len(metas[i].CompiledGoFiles) < len(metas[j].CompiledGoFiles)
+ })
+
+ return metas, nil
+}
+
+func (s *snapshot) ReverseDependencies(ctx context.Context, id PackageID, transitive bool) (map[PackageID]*source.Metadata, error) {
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
+ }
+ s.mu.Lock()
+ meta := s.meta
+ s.mu.Unlock()
+
+ var rdeps map[PackageID]*source.Metadata
+ if transitive {
+ rdeps = meta.reverseReflexiveTransitiveClosure(id)
+
+ // Remove the original package ID from the map.
+ // (Callers all want irreflexivity but it's easier
+ // to compute reflexively then subtract.)
+ delete(rdeps, id)
+
+ } else {
+ // direct reverse dependencies
+ rdeps = make(map[PackageID]*source.Metadata)
+ for _, rdepID := range meta.importedBy[id] {
+ if rdep := meta.metadata[rdepID]; rdep != nil {
+ rdeps[rdepID] = rdep
+ }
+ }
+ }
+
+ return rdeps, nil
+}
+
+func (s *snapshot) workspaceMetadata() (meta []*source.Metadata) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ for id := range s.workspacePackages {
+ meta = append(meta, s.meta.metadata[id])
+ }
+ return meta
+}
+
+// -- Active package tracking --
+//
+// We say a package is "active" if any of its files are open. After
+// type-checking we keep active packages in memory. The activePackages
+// peristent map does bookkeeping for the set of active packages.
+
+// getActivePackage returns a the memoized active package for id, if it exists.
+// If id is not active or has not yet been type-checked, it returns nil.
+func (s *snapshot) getActivePackage(id PackageID) *Package {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if value, ok := s.activePackages.Get(id); ok {
+ return value.(*Package) // possibly nil, if we have already checked this id.
+ }
+ return nil
+}
+
+// memoizeActivePackage checks if pkg is active, and if so either records it in
+// the active packages map or returns the existing memoized active package for id.
+//
+// The resulting package is non-nil if and only if the specified package is open.
+func (s *snapshot) memoizeActivePackage(id PackageID, pkg *Package) (active *Package) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if value, ok := s.activePackages.Get(id); ok {
+ return value.(*Package) // possibly nil, if we have already checked this id.
+ }
+
+ defer func() {
+ s.activePackages.Set(id, active, nil) // store the result either way: remember that pkg is not open
+ }()
+ for _, cgf := range pkg.Metadata().GoFiles {
+ if s.isOpenLocked(cgf) {
+ return pkg
+ }
+ }
+ for _, cgf := range pkg.Metadata().CompiledGoFiles {
+ if s.isOpenLocked(cgf) {
+ return pkg
+ }
+ }
+ return nil
+}
+
+func (s *snapshot) resetActivePackagesLocked() {
+ s.activePackages.Destroy()
+ s.activePackages = persistent.NewMap(packageIDLessInterface)
+}
+
+const fileExtensions = "go,mod,sum,work"
+
+func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
+ extensions := fileExtensions
+ for _, ext := range s.View().Options().TemplateExtensions {
+ extensions += "," + ext
+ }
+ // Work-around microsoft/vscode#100870 by making sure that we are,
+ // at least, watching the user's entire workspace. This will still be
+ // applied to every folder in the workspace.
+ patterns := map[string]struct{}{
+ fmt.Sprintf("**/*.{%s}", extensions): {},
+ }
+
+ // If GOWORK is outside the folder, ensure we are watching it.
+ gowork := s.view.effectiveGOWORK()
+ if gowork != "" && !source.InDir(s.view.folder.Filename(), gowork.Filename()) {
+ patterns[gowork.Filename()] = struct{}{}
+ }
+
+ // Add a pattern for each Go module in the workspace that is not within the view.
+ dirs := s.dirs(ctx)
+ for _, dir := range dirs {
+ dirName := dir.Filename()
+
+ // If the directory is within the view's folder, we're already watching
+ // it with the first pattern above.
+ if source.InDir(s.view.folder.Filename(), dirName) {
+ continue
+ }
+ // TODO(rstambler): If microsoft/vscode#3025 is resolved before
+ // microsoft/vscode#101042, we will need a work-around for Windows
+ // drive letter casing.
+ patterns[fmt.Sprintf("%s/**/*.{%s}", dirName, extensions)] = struct{}{}
+ }
+
+ // Some clients do not send notifications for changes to directories that
+ // contain Go code (golang/go#42348). To handle this, explicitly watch all
+ // of the directories in the workspace. We find them by adding the
+ // directories of every file in the snapshot's workspace directories.
+ // There may be thousands.
+ if pattern := s.getKnownSubdirsPattern(dirs); pattern != "" {
+ patterns[pattern] = struct{}{}
+ }
+
+ return patterns
+}
+
+func (s *snapshot) getKnownSubdirsPattern(wsDirs []span.URI) string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // First, process any pending changes and update the set of known
+ // subdirectories.
+ // It may change list of known subdirs and therefore invalidate the cache.
+ s.applyKnownSubdirsChangesLocked(wsDirs)
+
+ if s.knownSubdirsPatternCache == "" {
+ var builder strings.Builder
+ s.knownSubdirs.Range(func(uri span.URI) {
+ if builder.Len() == 0 {
+ builder.WriteString("{")
+ } else {
+ builder.WriteString(",")
+ }
+ builder.WriteString(uri.Filename())
+ })
+ if builder.Len() > 0 {
+ builder.WriteString("}")
+ s.knownSubdirsPatternCache = builder.String()
+ }
+ }
+
+ return s.knownSubdirsPatternCache
+}
+
+// collectAllKnownSubdirs collects all of the subdirectories within the
+// snapshot's workspace directories. None of the workspace directories are
+// included.
+func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) {
+ dirs := s.dirs(ctx)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.knownSubdirs.Destroy()
+ s.knownSubdirs = newKnownDirsSet()
+ s.knownSubdirsPatternCache = ""
+ s.files.Range(func(uri span.URI, fh source.FileHandle) {
+ s.addKnownSubdirLocked(uri, dirs)
+ })
+}
+
+func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) knownDirsSet {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // First, process any pending changes and update the set of known
+ // subdirectories.
+ s.applyKnownSubdirsChangesLocked(wsDirs)
+
+ return s.knownSubdirs.Clone()
+}
+
+func (s *snapshot) applyKnownSubdirsChangesLocked(wsDirs []span.URI) {
+ for _, c := range s.unprocessedSubdirChanges {
+ if c.isUnchanged {
+ continue
+ }
+ if !c.exists {
+ s.removeKnownSubdirLocked(c.fileHandle.URI())
+ } else {
+ s.addKnownSubdirLocked(c.fileHandle.URI(), wsDirs)
+ }
+ }
+ s.unprocessedSubdirChanges = nil
+}
+
+func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) {
+ dir := filepath.Dir(uri.Filename())
+ // First check if the directory is already known, because then we can
+ // return early.
+ if s.knownSubdirs.Contains(span.URIFromPath(dir)) {
+ return
+ }
+ var matched span.URI
+ for _, wsDir := range dirs {
+ if source.InDir(wsDir.Filename(), dir) {
+ matched = wsDir
+ break
+ }
+ }
+ // Don't watch any directory outside of the workspace directories.
+ if matched == "" {
+ return
+ }
+ for {
+ if dir == "" || dir == matched.Filename() {
+ break
+ }
+ uri := span.URIFromPath(dir)
+ if s.knownSubdirs.Contains(uri) {
+ break
+ }
+ s.knownSubdirs.Insert(uri)
+ dir = filepath.Dir(dir)
+ s.knownSubdirsPatternCache = ""
+ }
+}
+
+func (s *snapshot) removeKnownSubdirLocked(uri span.URI) {
+ dir := filepath.Dir(uri.Filename())
+ for dir != "" {
+ uri := span.URIFromPath(dir)
+ if !s.knownSubdirs.Contains(uri) {
+ break
+ }
+ if info, _ := os.Stat(dir); info == nil {
+ s.knownSubdirs.Remove(uri)
+ s.knownSubdirsPatternCache = ""
+ }
+ dir = filepath.Dir(dir)
+ }
+}
+
+// knownFilesInDir returns the files known to the given snapshot that are in
+// the given directory. It does not respect symlinks.
+func (s *snapshot) knownFilesInDir(ctx context.Context, dir span.URI) []span.URI {
+ var files []span.URI
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.files.Range(func(uri span.URI, fh source.FileHandle) {
+ if source.InDir(dir.Filename(), uri.Filename()) {
+ files = append(files, uri)
+ }
+ })
+ return files
+}
+
+func (s *snapshot) ActiveMetadata(ctx context.Context) ([]*source.Metadata, error) {
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
+ }
+ return s.workspaceMetadata(), nil
+}
+
+// Symbols extracts and returns symbol information for every file contained in
+// a loaded package. It awaits snapshot loading.
+//
+// TODO(rfindley): move this to the top of cache/symbols.go
+func (s *snapshot) Symbols(ctx context.Context) (map[span.URI][]source.Symbol, error) {
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
+ }
+
+ // Build symbols for all loaded Go files.
+ s.mu.Lock()
+ meta := s.meta
+ s.mu.Unlock()
+
+ goFiles := make(map[span.URI]struct{})
+ for _, m := range meta.metadata {
+ for _, uri := range m.GoFiles {
+ goFiles[uri] = struct{}{}
+ }
+ for _, uri := range m.CompiledGoFiles {
+ goFiles[uri] = struct{}{}
+ }
+ }
+
+ // Symbolize them in parallel.
+ var (
+ group errgroup.Group
+ nprocs = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU
+ resultMu sync.Mutex
+ result = make(map[span.URI][]source.Symbol)
+ )
+ group.SetLimit(nprocs)
+ for uri := range goFiles {
+ uri := uri
+ group.Go(func() error {
+ symbols, err := s.symbolize(ctx, uri)
+ if err != nil {
+ return err
+ }
+ resultMu.Lock()
+ result[uri] = symbols
+ resultMu.Unlock()
+ return nil
+ })
+ }
+ // Keep going on errors, but log the first failure.
+ // Partial results are better than no symbol results.
+ if err := group.Wait(); err != nil {
+ event.Error(ctx, "getting snapshot symbols", err)
+ }
+ return result, nil
+}
+
+func (s *snapshot) AllMetadata(ctx context.Context) ([]*source.Metadata, error) {
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
+ }
+
+ s.mu.Lock()
+ g := s.meta
+ s.mu.Unlock()
+
+ meta := make([]*source.Metadata, 0, len(g.metadata))
+ for _, m := range g.metadata {
+ meta = append(meta, m)
+ }
+ return meta, nil
+}
+
+// TODO(rfindley): clarify that this is only active modules. Or update to just
+// use findRootPattern.
+func (s *snapshot) GoModForFile(uri span.URI) span.URI {
+ return moduleForURI(s.workspaceModFiles, uri)
+}
+
+func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI {
+ var match span.URI
+ for modURI := range modFiles {
+ if !source.InDir(span.Dir(modURI).Filename(), uri.Filename()) {
+ continue
+ }
+ if len(modURI) > len(match) {
+ match = modURI
+ }
+ }
+ return match
+}
+
+// nearestModFile finds the nearest go.mod file contained in the directory
+// containing uri, or a parent of that directory.
+//
+// The given uri must be a file, not a directory.
+func nearestModFile(ctx context.Context, uri span.URI, fs source.FileSource) (span.URI, error) {
+ // TODO(rfindley)
+ dir := filepath.Dir(uri.Filename())
+ mod, err := findRootPattern(ctx, dir, "go.mod", fs)
+ if err != nil {
+ return "", err
+ }
+ return span.URIFromPath(mod), nil
+}
+
+func (s *snapshot) Metadata(id PackageID) *source.Metadata {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.meta.metadata[id]
+}
+
+// clearShouldLoad clears package IDs that no longer need to be reloaded after
+// scopes has been loaded.
+func (s *snapshot) clearShouldLoad(scopes ...loadScope) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ for _, scope := range scopes {
+ switch scope := scope.(type) {
+ case packageLoadScope:
+ scopePath := PackagePath(scope)
+ var toDelete []PackageID
+ for id, pkgPaths := range s.shouldLoad {
+ for _, pkgPath := range pkgPaths {
+ if pkgPath == scopePath {
+ toDelete = append(toDelete, id)
+ }
+ }
+ }
+ for _, id := range toDelete {
+ delete(s.shouldLoad, id)
+ }
+ case fileLoadScope:
+ uri := span.URI(scope)
+ ids := s.meta.ids[uri]
+ for _, id := range ids {
+ delete(s.shouldLoad, id)
+ }
+ }
+ }
+}
+
+// noValidMetadataForURILocked reports whether there is any valid metadata for
+// the given URI.
+func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool {
+ for _, id := range s.meta.ids[uri] {
+ if _, ok := s.meta.metadata[id]; ok {
+ return false
+ }
+ }
+ return true
+}
+
+func (s *snapshot) isWorkspacePackage(id PackageID) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ _, ok := s.workspacePackages[id]
+ return ok
+}
+
+func (s *snapshot) FindFile(uri span.URI) source.FileHandle {
+ s.view.markKnown(uri)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ result, _ := s.files.Get(uri)
+ return result
+}
+
+// GetFile returns a File for the given URI. If the file is unknown it is added
+// to the managed set.
+//
+// GetFile succeeds even if the file does not exist. A non-nil error return
+// indicates some type of internal error, for example if ctx is cancelled.
+func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ return lockedSnapshot{s}.GetFile(ctx, uri)
+}
+
+// A lockedSnapshot implements the source.FileSource interface while holding
+// the lock for the wrapped snapshot.
+type lockedSnapshot struct{ wrapped *snapshot }
+
+func (s lockedSnapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
+ s.wrapped.view.markKnown(uri)
+ if fh, ok := s.wrapped.files.Get(uri); ok {
+ return fh, nil
+ }
+
+ fh, err := s.wrapped.view.fs.GetFile(ctx, uri) // read the file
+ if err != nil {
+ return nil, err
+ }
+ s.wrapped.files.Set(uri, fh)
+ return fh, nil
+}
+
+func (s *snapshot) IsOpen(uri span.URI) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.isOpenLocked(uri)
+
+}
+
+func (s *snapshot) openFiles() []source.FileHandle {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var open []source.FileHandle
+ s.files.Range(func(uri span.URI, fh source.FileHandle) {
+ if isFileOpen(fh) {
+ open = append(open, fh)
+ }
+ })
+ return open
+}
+
+func (s *snapshot) isOpenLocked(uri span.URI) bool {
+ fh, _ := s.files.Get(uri)
+ return isFileOpen(fh)
+}
+
+func isFileOpen(fh source.FileHandle) bool {
+ _, open := fh.(*Overlay)
+ return open
+}
+
+func (s *snapshot) awaitLoaded(ctx context.Context) error {
+ loadErr := s.awaitLoadedAllErrors(ctx)
+
+ // TODO(rfindley): eliminate this function as part of simplifying
+ // CriticalErrors.
+ if loadErr != nil {
+ return loadErr.MainError
+ }
+ return nil
+}
+
+func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError {
+ // If we couldn't compute workspace mod files, then the load below is
+ // invalid.
+ //
+ // TODO(rfindley): is this a clear error to present to the user?
+ if s.workspaceModFilesErr != nil {
+ return &source.CriticalError{MainError: s.workspaceModFilesErr}
+ }
+
+ loadErr := s.awaitLoadedAllErrors(ctx)
+ if loadErr != nil && errors.Is(loadErr.MainError, context.Canceled) {
+ return nil
+ }
+
+ // Even if packages didn't fail to load, we still may want to show
+ // additional warnings.
+ if loadErr == nil {
+ active, _ := s.ActiveMetadata(ctx)
+ if msg := shouldShowAdHocPackagesWarning(s, active); msg != "" {
+ return &source.CriticalError{
+ MainError: errors.New(msg),
+ }
+ }
+ // Even if workspace packages were returned, there still may be an error
+ // with the user's workspace layout. Workspace packages that only have the
+ // ID "command-line-arguments" are usually a symptom of a bad workspace
+ // configuration.
+ //
+ // This heuristic is path-dependent: we only get command-line-arguments
+ // packages when we've loaded using file scopes, which only occurs
+ // on-demand or via orphaned file reloading.
+ //
+ // TODO(rfindley): re-evaluate this heuristic.
+ if containsCommandLineArguments(active) {
+ err, diags := s.workspaceLayoutError(ctx)
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil // see the API documentation for source.Snapshot
+ }
+ return &source.CriticalError{
+ MainError: err,
+ Diagnostics: diags,
+ }
+ }
+ }
+ return nil
+ }
+
+ if errMsg := loadErr.MainError.Error(); strings.Contains(errMsg, "cannot find main module") || strings.Contains(errMsg, "go.mod file not found") {
+ err, diags := s.workspaceLayoutError(ctx)
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil // see the API documentation for source.Snapshot
+ }
+ return &source.CriticalError{
+ MainError: err,
+ Diagnostics: diags,
+ }
+ }
+ }
+ return loadErr
+}
+
+// A portion of this text is expected by TestBrokenWorkspace_OutsideModule.
+const adHocPackagesWarning = `You are outside of a module and outside of $GOPATH/src.
+If you are using modules, please open your editor to a directory in your module.
+If you believe this warning is incorrect, please file an issue: https://github.com/golang/go/issues/new.`
+
+func shouldShowAdHocPackagesWarning(snapshot source.Snapshot, active []*source.Metadata) string {
+ if !snapshot.ValidBuildConfiguration() {
+ for _, m := range active {
+ // A blank entry in DepsByImpPath
+ // indicates a missing dependency.
+ for _, importID := range m.DepsByImpPath {
+ if importID == "" {
+ return adHocPackagesWarning
+ }
+ }
+ }
+ }
+ return ""
+}
+
+func containsCommandLineArguments(metas []*source.Metadata) bool {
+ for _, m := range metas {
+ if source.IsCommandLineArguments(m.ID) {
+ return true
+ }
+ }
+ return false
+}
+
+func (s *snapshot) awaitLoadedAllErrors(ctx context.Context) *source.CriticalError {
+ // Do not return results until the snapshot's view has been initialized.
+ s.AwaitInitialized(ctx)
+
+ // TODO(rfindley): Should we be more careful about returning the
+ // initialization error? Is it possible for the initialization error to be
+ // corrected without a successful reinitialization?
+ if err := s.getInitializationError(); err != nil {
+ return err
+ }
+
+ // TODO(rfindley): revisit this handling. Calling reloadWorkspace with a
+ // cancelled context should have the same effect, so this preemptive handling
+ // should not be necessary.
+ //
+ // Also: GetCriticalError ignores context cancellation errors. Should we be
+ // returning nil here?
+ if ctx.Err() != nil {
+ return &source.CriticalError{MainError: ctx.Err()}
+ }
+
+ // TODO(rfindley): reloading is not idempotent: if we try to reload or load
+ // orphaned files below and fail, we won't try again. For that reason, we
+ // could get different results from subsequent calls to this function, which
+ // may cause critical errors to be suppressed.
+
+ if err := s.reloadWorkspace(ctx); err != nil {
+ diags := s.extractGoCommandErrors(ctx, err)
+ return &source.CriticalError{
+ MainError: err,
+ Diagnostics: diags,
+ }
+ }
+
+ if err := s.reloadOrphanedOpenFiles(ctx); err != nil {
+ diags := s.extractGoCommandErrors(ctx, err)
+ return &source.CriticalError{
+ MainError: err,
+ Diagnostics: diags,
+ }
+ }
+ return nil
+}
+
+func (s *snapshot) getInitializationError() *source.CriticalError {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ return s.initializedErr
+}
+
+func (s *snapshot) AwaitInitialized(ctx context.Context) {
+ select {
+ case <-ctx.Done():
+ return
+ case <-s.view.initialWorkspaceLoad:
+ }
+ // We typically prefer to run something as intensive as the IWL without
+ // blocking. I'm not sure if there is a way to do that here.
+ s.initialize(ctx, false)
+}
+
+// reloadWorkspace reloads the metadata for all invalidated workspace packages.
+func (s *snapshot) reloadWorkspace(ctx context.Context) error {
+ var scopes []loadScope
+ var seen map[PackagePath]bool
+ s.mu.Lock()
+ for _, pkgPaths := range s.shouldLoad {
+ for _, pkgPath := range pkgPaths {
+ if seen == nil {
+ seen = make(map[PackagePath]bool)
+ }
+ if seen[pkgPath] {
+ continue
+ }
+ seen[pkgPath] = true
+ scopes = append(scopes, packageLoadScope(pkgPath))
+ }
+ }
+ s.mu.Unlock()
+
+ if len(scopes) == 0 {
+ return nil
+ }
+
+ // If the view's build configuration is invalid, we cannot reload by
+ // package path. Just reload the directory instead.
+ if !s.ValidBuildConfiguration() {
+ scopes = []loadScope{viewLoadScope("LOAD_INVALID_VIEW")}
+ }
+
+ err := s.load(ctx, false, scopes...)
+
+ // Unless the context was canceled, set "shouldLoad" to false for all
+ // of the metadata we attempted to load.
+ if !errors.Is(err, context.Canceled) {
+ s.clearShouldLoad(scopes...)
+ }
+
+ return err
+}
+
+func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error {
+ // When we load ./... or a package path directly, we may not get packages
+ // that exist only in overlays. As a workaround, we search all of the files
+ // available in the snapshot and reload their metadata individually using a
+ // file= query if the metadata is unavailable.
+ files := s.orphanedOpenFiles()
+
+ // Files without a valid package declaration can't be loaded. Don't try.
+ var scopes []loadScope
+ for _, file := range files {
+ pgf, err := s.ParseGo(ctx, file, source.ParseHeader)
+ if err != nil {
+ continue
+ }
+ if !pgf.File.Package.IsValid() {
+ continue
+ }
+
+ scopes = append(scopes, fileLoadScope(file.URI()))
+ }
+
+ if len(scopes) == 0 {
+ return nil
+ }
+
+ // The regtests match this exact log message, keep them in sync.
+ event.Log(ctx, "reloadOrphanedFiles reloading", tag.Query.Of(scopes))
+ err := s.load(ctx, false, scopes...)
+
+ // If we failed to load some files, i.e. they have no metadata,
+ // mark the failures so we don't bother retrying until the file's
+ // content changes.
+ //
+ // TODO(rstambler): This may be an overestimate if the load stopped
+ // early for an unrelated errors. Add a fallback?
+ //
+ // Check for context cancellation so that we don't incorrectly mark files
+ // as unloadable, but don't return before setting all workspace packages.
+ if ctx.Err() == nil && err != nil {
+ event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes))
+ s.mu.Lock()
+ for _, scope := range scopes {
+ uri := span.URI(scope.(fileLoadScope))
+ if s.noValidMetadataForURILocked(uri) {
+ s.unloadableFiles[uri] = struct{}{}
+ }
+ }
+ s.mu.Unlock()
+ }
+ return nil
+}
+
+func (s *snapshot) orphanedOpenFiles() []source.FileHandle {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var files []source.FileHandle
+ s.files.Range(func(uri span.URI, fh source.FileHandle) {
+ // Only consider open files, which will be represented as overlays.
+ if _, isOverlay := fh.(*Overlay); !isOverlay {
+ return
+ }
+ // Don't try to reload metadata for go.mod files.
+ if s.view.FileKind(fh) != source.Go {
+ return
+ }
+ // If the URI doesn't belong to this view, then it's not in a workspace
+ // package and should not be reloaded directly.
+ if !source.InDir(s.view.folder.Filename(), uri.Filename()) {
+ return
+ }
+ // Don't reload metadata for files we've already deemed unloadable.
+ if _, ok := s.unloadableFiles[uri]; ok {
+ return
+ }
+ if s.noValidMetadataForURILocked(uri) {
+ files = append(files, fh)
+ }
+ })
+ return files
+}
+
+// TODO(golang/go#53756): this function needs to consider more than just the
+// absolute URI, for example:
+// - the position of /vendor/ with respect to the relevant module root
+// - whether or not go.work is in use (as vendoring isn't supported in workspace mode)
+//
+// Most likely, each call site of inVendor needs to be reconsidered to
+// understand and correctly implement the desired behavior.
+func inVendor(uri span.URI) bool {
+ _, after, found := cut(string(uri), "/vendor/")
+ // Only subdirectories of /vendor/ are considered vendored
+ // (/vendor/a/foo.go is vendored, /vendor/foo.go is not).
+ return found && strings.Contains(after, "/")
+}
+
+// TODO(adonovan): replace with strings.Cut when we can assume go1.18.
+func cut(s, sep string) (before, after string, found bool) {
+ if i := strings.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, "", false
+}
+
+// unappliedChanges is a file source that handles an uncloned snapshot.
+type unappliedChanges struct {
+ originalSnapshot *snapshot
+ changes map[span.URI]*fileChange
+}
+
+func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
+ if c, ok := ac.changes[uri]; ok {
+ return c.fileHandle, nil
+ }
+ return ac.originalSnapshot.GetFile(ctx, uri)
+}
+
+func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) {
+ ctx, done := event.Start(ctx, "snapshot.clone")
+ defer done()
+
+ reinit := false
+ wsModFiles, wsModFilesErr := s.workspaceModFiles, s.workspaceModFilesErr
+
+ if workURI := s.view.effectiveGOWORK(); workURI != "" {
+ if change, ok := changes[workURI]; ok {
+ wsModFiles, wsModFilesErr = computeWorkspaceModFiles(ctx, s.view.gomod, workURI, s.view.effectiveGO111MODULE(), &unappliedChanges{
+ originalSnapshot: s,
+ changes: changes,
+ })
+ // TODO(rfindley): don't rely on 'isUnchanged' here. Use a content hash instead.
+ reinit = change.fileHandle.Saved() && !change.isUnchanged
+ }
+ }
+
+ // Reinitialize if any workspace mod file has changed on disk.
+ for uri, change := range changes {
+ if _, ok := wsModFiles[uri]; ok && change.fileHandle.Saved() && !change.isUnchanged {
+ reinit = true
+ }
+ }
+
+ // Finally, process sumfile changes that may affect loading.
+ for uri, change := range changes {
+ if !change.fileHandle.Saved() {
+ continue // like with go.mod files, we only reinit when things are saved
+ }
+ if filepath.Base(uri.Filename()) == "go.work.sum" && s.view.gowork != "" {
+ if filepath.Dir(uri.Filename()) == filepath.Dir(s.view.gowork) {
+ reinit = true
+ }
+ }
+ if filepath.Base(uri.Filename()) == "go.sum" {
+ dir := filepath.Dir(uri.Filename())
+ modURI := span.URIFromPath(filepath.Join(dir, "go.mod"))
+ if _, active := wsModFiles[modURI]; active {
+ reinit = true
+ }
+ }
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // Changes to vendor tree may require reinitialization,
+ // either because of an initialization error
+ // (e.g. "inconsistent vendoring detected"), or because
+ // one or more modules may have moved into or out of the
+ // vendor tree after 'go mod vendor' or 'rm -fr vendor/'.
+ for uri := range changes {
+ if inVendor(uri) && s.initializedErr != nil ||
+ strings.HasSuffix(string(uri), "/vendor/modules.txt") {
+ reinit = true
+ break
+ }
+ }
+
+ bgCtx, cancel := context.WithCancel(bgCtx)
+ result := &snapshot{
+ sequenceID: s.sequenceID + 1,
+ globalID: nextSnapshotID(),
+ store: s.store,
+ view: s.view,
+ backgroundCtx: bgCtx,
+ cancel: cancel,
+ builtin: s.builtin,
+ initialized: s.initialized,
+ initializedErr: s.initializedErr,
+ packages: s.packages.Clone(),
+ activePackages: s.activePackages.Clone(),
+ analyses: s.analyses.Clone(),
+ files: s.files.Clone(),
+ parseCache: s.parseCache,
+ symbolizeHandles: s.symbolizeHandles.Clone(),
+ workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)),
+ unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)),
+ parseModHandles: s.parseModHandles.Clone(),
+ parseWorkHandles: s.parseWorkHandles.Clone(),
+ modTidyHandles: s.modTidyHandles.Clone(),
+ modWhyHandles: s.modWhyHandles.Clone(),
+ modVulnHandles: s.modVulnHandles.Clone(),
+ knownSubdirs: s.knownSubdirs.Clone(),
+ workspaceModFiles: wsModFiles,
+ workspaceModFilesErr: wsModFilesErr,
+ }
+
+ // The snapshot should be initialized if either s was uninitialized, or we've
+ // detected a change that triggers reinitialization.
+ if reinit {
+ result.initialized = false
+ }
+
+ // Create a lease on the new snapshot.
+ // (Best to do this early in case the code below hides an
+ // incref/decref operation that might destroy it prematurely.)
+ release := result.Acquire()
+
+ // Copy the set of unloadable files.
+ //
+ // TODO(rfindley): this looks wrong. Shouldn't we clear unloadableFiles on
+ // changes to environment or workspace layout, or more generally on any
+ // metadata change?
+ //
+ // Maybe not, as major configuration changes cause a new view.
+ for k, v := range s.unloadableFiles {
+ result.unloadableFiles[k] = v
+ }
+
+ // Add all of the known subdirectories, but don't update them for the
+ // changed files. We need to rebuild the workspace module to know the
+ // true set of known subdirectories, but we don't want to do that in clone.
+ result.knownSubdirs = s.knownSubdirs.Clone()
+ result.knownSubdirsPatternCache = s.knownSubdirsPatternCache
+ for _, c := range changes {
+ result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c)
+ }
+
+ // directIDs keeps track of package IDs that have directly changed.
+ // Note: this is not a set, it's a map from id to invalidateMetadata.
+ directIDs := map[PackageID]bool{}
+
+ // Invalidate all package metadata if the workspace module has changed.
+ if reinit {
+ for k := range s.meta.metadata {
+ directIDs[k] = true
+ }
+ }
+
+ // Compute invalidations based on file changes.
+ anyImportDeleted := false // import deletions can resolve cycles
+ anyFileOpenedOrClosed := false // opened files affect workspace packages
+ anyFileAdded := false // adding a file can resolve missing dependencies
+
+ for uri, change := range changes {
+ // Invalidate go.mod-related handles.
+ result.modTidyHandles.Delete(uri)
+ result.modWhyHandles.Delete(uri)
+ result.modVulnHandles.Delete(uri)
+
+ // Invalidate handles for cached symbols.
+ result.symbolizeHandles.Delete(uri)
+
+ // The original FileHandle for this URI is cached on the snapshot.
+ originalFH, _ := s.files.Get(uri)
+ var originalOpen, newOpen bool
+ _, originalOpen = originalFH.(*Overlay)
+ _, newOpen = change.fileHandle.(*Overlay)
+ anyFileOpenedOrClosed = anyFileOpenedOrClosed || (originalOpen != newOpen)
+ anyFileAdded = anyFileAdded || (originalFH == nil && change.fileHandle != nil)
+
+ // If uri is a Go file, check if it has changed in a way that would
+ // invalidate metadata. Note that we can't use s.view.FileKind here,
+ // because the file type that matters is not what the *client* tells us,
+ // but what the Go command sees.
+ var invalidateMetadata, pkgFileChanged, importDeleted bool
+ if strings.HasSuffix(uri.Filename(), ".go") {
+ invalidateMetadata, pkgFileChanged, importDeleted = metadataChanges(ctx, s, originalFH, change.fileHandle)
+ }
+
+ invalidateMetadata = invalidateMetadata || forceReloadMetadata || reinit
+ anyImportDeleted = anyImportDeleted || importDeleted
+
+ // Mark all of the package IDs containing the given file.
+ filePackageIDs := invalidatedPackageIDs(uri, s.meta.ids, pkgFileChanged)
+ for id := range filePackageIDs {
+ directIDs[id] = directIDs[id] || invalidateMetadata // may insert 'false'
+ }
+
+ // Invalidate the previous modTidyHandle if any of the files have been
+ // saved or if any of the metadata has been invalidated.
+ if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) {
+ // TODO(maybe): Only delete mod handles for
+ // which the withoutURI is relevant.
+ // Requires reverse-engineering the go command. (!)
+ result.modTidyHandles.Clear()
+ result.modWhyHandles.Clear()
+ result.modVulnHandles.Clear()
+ }
+
+ result.parseModHandles.Delete(uri)
+ result.parseWorkHandles.Delete(uri)
+ // Handle the invalidated file; it may have new contents or not exist.
+ if !change.exists {
+ result.files.Delete(uri)
+ } else {
+ result.files.Set(uri, change.fileHandle)
+ }
+
+ // Make sure to remove the changed file from the unloadable set.
+ delete(result.unloadableFiles, uri)
+ }
+
+ // Deleting an import can cause list errors due to import cycles to be
+ // resolved. The best we can do without parsing the list error message is to
+ // hope that list errors may have been resolved by a deleted import.
+ //
+ // We could do better by parsing the list error message. We already do this
+ // to assign a better range to the list error, but for such critical
+ // functionality as metadata, it's better to be conservative until it proves
+ // impractical.
+ //
+ // We could also do better by looking at which imports were deleted and
+ // trying to find cycles they are involved in. This fails when the file goes
+ // from an unparseable state to a parseable state, as we don't have a
+ // starting point to compare with.
+ if anyImportDeleted {
+ for id, metadata := range s.meta.metadata {
+ if len(metadata.Errors) > 0 {
+ directIDs[id] = true
+ }
+ }
+ }
+
+ // Adding a file can resolve missing dependencies from existing packages.
+ //
+ // We could be smart here and try to guess which packages may have been
+ // fixed, but until that proves necessary, just invalidate metadata for any
+ // package with missing dependencies.
+ if anyFileAdded {
+ for id, metadata := range s.meta.metadata {
+ for _, impID := range metadata.DepsByImpPath {
+ if impID == "" { // missing import
+ directIDs[id] = true
+ break
+ }
+ }
+ }
+ }
+
+ // Invalidate reverse dependencies too.
+ // idsToInvalidate keeps track of transitive reverse dependencies.
+ // If an ID is present in the map, invalidate its types.
+ // If an ID's value is true, invalidate its metadata too.
+ idsToInvalidate := map[PackageID]bool{}
+ var addRevDeps func(PackageID, bool)
+ addRevDeps = func(id PackageID, invalidateMetadata bool) {
+ current, seen := idsToInvalidate[id]
+ newInvalidateMetadata := current || invalidateMetadata
+
+ // If we've already seen this ID, and the value of invalidate
+ // metadata has not changed, we can return early.
+ if seen && current == newInvalidateMetadata {
+ return
+ }
+ idsToInvalidate[id] = newInvalidateMetadata
+ for _, rid := range s.meta.importedBy[id] {
+ addRevDeps(rid, invalidateMetadata)
+ }
+ }
+ for id, invalidateMetadata := range directIDs {
+ addRevDeps(id, invalidateMetadata)
+ }
+
+ // Delete invalidated package type information.
+ for id := range idsToInvalidate {
+ result.packages.Delete(id)
+ result.activePackages.Delete(id)
+ }
+
+ // Delete invalidated analysis actions.
+ var actionsToDelete []analysisKey
+ result.analyses.Range(func(k, _ interface{}) {
+ key := k.(analysisKey)
+ if _, ok := idsToInvalidate[key.pkgid]; ok {
+ actionsToDelete = append(actionsToDelete, key)
+ }
+ })
+ for _, key := range actionsToDelete {
+ result.analyses.Delete(key)
+ }
+
+ // If a file has been deleted, we must delete metadata for all packages
+ // containing that file.
+ //
+ // TODO(rfindley): why not keep invalid metadata in this case? If we
+ // otherwise allow operate on invalid metadata, why not continue to do so,
+ // skipping the missing file?
+ skipID := map[PackageID]bool{}
+ for _, c := range changes {
+ if c.exists {
+ continue
+ }
+ // The file has been deleted.
+ if ids, ok := s.meta.ids[c.fileHandle.URI()]; ok {
+ for _, id := range ids {
+ skipID[id] = true
+ }
+ }
+ }
+
+ // Any packages that need loading in s still need loading in the new
+ // snapshot.
+ for k, v := range s.shouldLoad {
+ if result.shouldLoad == nil {
+ result.shouldLoad = make(map[PackageID][]PackagePath)
+ }
+ result.shouldLoad[k] = v
+ }
+
+ // Compute which metadata updates are required. We only need to invalidate
+ // packages directly containing the affected file, and only if it changed in
+ // a relevant way.
+ metadataUpdates := make(map[PackageID]*source.Metadata)
+ for k, v := range s.meta.metadata {
+ invalidateMetadata := idsToInvalidate[k]
+
+ // For metadata that has been newly invalidated, capture package paths
+ // requiring reloading in the shouldLoad map.
+ if invalidateMetadata && !source.IsCommandLineArguments(v.ID) {
+ if result.shouldLoad == nil {
+ result.shouldLoad = make(map[PackageID][]PackagePath)
+ }
+ needsReload := []PackagePath{v.PkgPath}
+ if v.ForTest != "" && v.ForTest != v.PkgPath {
+ // When reloading test variants, always reload their ForTest package as
+ // well. Otherwise, we may miss test variants in the resulting load.
+ //
+ // TODO(rfindley): is this actually sufficient? Is it possible that
+ // other test variants may be invalidated? Either way, we should
+ // determine exactly what needs to be reloaded here.
+ needsReload = append(needsReload, v.ForTest)
+ }
+ result.shouldLoad[k] = needsReload
+ }
+
+ // Check whether the metadata should be deleted.
+ if skipID[k] || invalidateMetadata {
+ metadataUpdates[k] = nil
+ continue
+ }
+ }
+
+ // Update metadata, if necessary.
+ result.meta = s.meta.Clone(metadataUpdates)
+
+ // Update workspace and active packages, if necessary.
+ if result.meta != s.meta || anyFileOpenedOrClosed {
+ result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta)
+ result.resetActivePackagesLocked()
+ } else {
+ result.workspacePackages = s.workspacePackages
+ }
+
+ // Don't bother copying the importedBy graph,
+ // as it changes each time we update metadata.
+
+ // TODO(rfindley): consolidate the this workspace mode detection with
+ // workspace invalidation.
+ workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
+
+ // If the snapshot's workspace mode has changed, the packages loaded using
+ // the previous mode are no longer relevant, so clear them out.
+ if workspaceModeChanged {
+ result.workspacePackages = map[PackageID]PackagePath{}
+ }
+ result.dumpWorkspace("clone")
+ return result, release
+}
+
+// invalidatedPackageIDs returns all packages invalidated by a change to uri.
+// If we haven't seen this URI before, we guess based on files in the same
+// directory. This is of course incorrect in build systems where packages are
+// not organized by directory.
+//
+// If packageFileChanged is set, the file is either a new file, or has a new
+// package name. In this case, all known packages in the directory will be
+// invalidated.
+func invalidatedPackageIDs(uri span.URI, known map[span.URI][]PackageID, packageFileChanged bool) map[PackageID]struct{} {
+ invalidated := make(map[PackageID]struct{})
+
+ // At a minimum, we invalidate packages known to contain uri.
+ for _, id := range known[uri] {
+ invalidated[id] = struct{}{}
+ }
+
+ // If the file didn't move to a new package, we should only invalidate the
+ // packages it is currently contained inside.
+ if !packageFileChanged && len(invalidated) > 0 {
+ return invalidated
+ }
+
+ // This is a file we don't yet know about, or which has moved packages. Guess
+ // relevant packages by considering files in the same directory.
+
+ // Cache of FileInfo to avoid unnecessary stats for multiple files in the
+ // same directory.
+ stats := make(map[string]struct {
+ os.FileInfo
+ error
+ })
+ getInfo := func(dir string) (os.FileInfo, error) {
+ if res, ok := stats[dir]; ok {
+ return res.FileInfo, res.error
+ }
+ fi, err := os.Stat(dir)
+ stats[dir] = struct {
+ os.FileInfo
+ error
+ }{fi, err}
+ return fi, err
+ }
+ dir := filepath.Dir(uri.Filename())
+ fi, err := getInfo(dir)
+ if err == nil {
+ // Aggregate all possibly relevant package IDs.
+ for knownURI, ids := range known {
+ knownDir := filepath.Dir(knownURI.Filename())
+ knownFI, err := getInfo(knownDir)
+ if err != nil {
+ continue
+ }
+ if os.SameFile(fi, knownFI) {
+ for _, id := range ids {
+ invalidated[id] = struct{}{}
+ }
+ }
+ }
+ }
+ return invalidated
+}
+
+// fileWasSaved reports whether the FileHandle passed in has been saved. It
+// accomplishes this by checking to see if the original and current FileHandles
+// are both overlays, and if the current FileHandle is saved while the original
+// FileHandle was not saved.
+func fileWasSaved(originalFH, currentFH source.FileHandle) bool {
+ c, ok := currentFH.(*Overlay)
+ if !ok || c == nil {
+ return true
+ }
+ o, ok := originalFH.(*Overlay)
+ if !ok || o == nil {
+ return c.saved
+ }
+ return !o.saved && c.saved
+}
+
+// metadataChanges detects features of the change from oldFH->newFH that may
+// affect package metadata.
+//
+// It uses lockedSnapshot to access cached parse information. lockedSnapshot
+// must be locked.
+//
+// The result parameters have the following meaning:
+// - invalidate means that package metadata for packages containing the file
+// should be invalidated.
+// - pkgFileChanged means that the file->package associates for the file have
+// changed (possibly because the file is new, or because its package name has
+// changed).
+// - importDeleted means that an import has been deleted, or we can't
+// determine if an import was deleted due to errors.
+func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH source.FileHandle) (invalidate, pkgFileChanged, importDeleted bool) {
+ if oldFH == nil || newFH == nil { // existential changes
+ changed := (oldFH == nil) != (newFH == nil)
+ return changed, changed, (newFH == nil) // we don't know if an import was deleted
+ }
+
+ // If the file hasn't changed, there's no need to reload.
+ if oldFH.FileIdentity() == newFH.FileIdentity() {
+ return false, false, false
+ }
+
+ // Parse headers to compare package names and imports.
+ oldHeads, _, oldErr := lockedSnapshot.parseCache.parseFiles(ctx, source.ParseHeader, oldFH)
+ newHeads, _, newErr := lockedSnapshot.parseCache.parseFiles(ctx, source.ParseHeader, newFH)
+
+ if oldErr != nil || newErr != nil {
+ // TODO(rfindley): we can get here if newFH does not exist. There is
+ // asymmetry, in that newFH may be non-nil even if the underlying file does
+ // not exist.
+ //
+ // We should not produce a non-nil filehandle for a file that does not exist.
+ errChanged := (oldErr == nil) != (newErr == nil)
+ return errChanged, errChanged, (newErr != nil) // we don't know if an import was deleted
+ }
+
+ oldHead := oldHeads[0]
+ newHead := newHeads[0]
+
+ // `go list` fails completely if the file header cannot be parsed. If we go
+ // from a non-parsing state to a parsing state, we should reload.
+ if oldHead.ParseErr != nil && newHead.ParseErr == nil {
+ return true, true, true // We don't know what changed, so fall back on full invalidation.
+ }
+
+ // If a package name has changed, the set of package imports may have changed
+ // in ways we can't detect here. Assume an import has been deleted.
+ if oldHead.File.Name.Name != newHead.File.Name.Name {
+ return true, true, true
+ }
+
+ // Check whether package imports have changed. Only consider potentially
+ // valid imports paths.
+ oldImports := validImports(oldHead.File.Imports)
+ newImports := validImports(newHead.File.Imports)
+
+ for path := range newImports {
+ if _, ok := oldImports[path]; ok {
+ delete(oldImports, path)
+ } else {
+ invalidate = true // a new, potentially valid import was added
+ }
+ }
+
+ if len(oldImports) > 0 {
+ invalidate = true
+ importDeleted = true
+ }
+
+ // If the change does not otherwise invalidate metadata, get the full ASTs in
+ // order to check magic comments.
+ //
+ // Note: if this affects performance we can probably avoid parsing in the
+ // common case by first scanning the source for potential comments.
+ if !invalidate {
+ origFulls, _, oldErr := lockedSnapshot.parseCache.parseFiles(ctx, source.ParseFull, oldFH)
+ newFulls, _, newErr := lockedSnapshot.parseCache.parseFiles(ctx, source.ParseFull, newFH)
+ if oldErr == nil && newErr == nil {
+ invalidate = magicCommentsChanged(origFulls[0].File, newFulls[0].File)
+ } else {
+ // At this point, we shouldn't ever fail to produce a ParsedGoFile, as
+ // we're already past header parsing.
+ bug.Reportf("metadataChanges: unparseable file %v (old error: %v, new error: %v)", oldFH.URI(), oldErr, newErr)
+ }
+ }
+
+ return invalidate, pkgFileChanged, importDeleted
+}
+
+func magicCommentsChanged(original *ast.File, current *ast.File) bool {
+ oldComments := extractMagicComments(original)
+ newComments := extractMagicComments(current)
+ if len(oldComments) != len(newComments) {
+ return true
+ }
+ for i := range oldComments {
+ if oldComments[i] != newComments[i] {
+ return true
+ }
+ }
+ return false
+}
+
+// validImports extracts the set of valid import paths from imports.
+func validImports(imports []*ast.ImportSpec) map[string]struct{} {
+ m := make(map[string]struct{})
+ for _, spec := range imports {
+ if path := spec.Path.Value; validImportPath(path) {
+ m[path] = struct{}{}
+ }
+ }
+ return m
+}
+
+func validImportPath(path string) bool {
+ path, err := strconv.Unquote(path)
+ if err != nil {
+ return false
+ }
+ if path == "" {
+ return false
+ }
+ if path[len(path)-1] == '/' {
+ return false
+ }
+ return true
+}
+
+var buildConstraintOrEmbedRe = regexp.MustCompile(`^//(go:embed|go:build|\s*\+build).*`)
+
+// extractMagicComments finds magic comments that affect metadata in f.
+func extractMagicComments(f *ast.File) []string {
+ var results []string
+ for _, cg := range f.Comments {
+ for _, c := range cg.List {
+ if buildConstraintOrEmbedRe.MatchString(c.Text) {
+ results = append(results, c.Text)
+ }
+ }
+ }
+ return results
+}
+
+func (s *snapshot) BuiltinFile(ctx context.Context) (*source.ParsedGoFile, error) {
+ s.AwaitInitialized(ctx)
+
+ s.mu.Lock()
+ builtin := s.builtin
+ s.mu.Unlock()
+
+ if builtin == "" {
+ return nil, fmt.Errorf("no builtin package for view %s", s.view.name)
+ }
+
+ fh, err := s.GetFile(ctx, builtin)
+ if err != nil {
+ return nil, err
+ }
+ return s.ParseGo(ctx, fh, source.ParseFull)
+}
+
+func (s *snapshot) IsBuiltin(ctx context.Context, uri span.URI) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ // We should always get the builtin URI in a canonical form, so use simple
+ // string comparison here. span.CompareURI is too expensive.
+ return uri == s.builtin
+}
+
+func (s *snapshot) setBuiltin(path string) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.builtin = span.URIFromPath(path)
+}
diff --git a/gopls/internal/lsp/cache/standalone_go115.go b/gopls/internal/lsp/cache/standalone_go115.go
new file mode 100644
index 000000000..79569ae10
--- /dev/null
+++ b/gopls/internal/lsp/cache/standalone_go115.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.16
+// +build !go1.16
+
+package cache
+
+// isStandaloneFile returns false, as the 'standaloneTags' setting is
+// unsupported on Go 1.15 and earlier.
+func isStandaloneFile(src []byte, standaloneTags []string) bool {
+ return false
+}
diff --git a/gopls/internal/lsp/cache/standalone_go116.go b/gopls/internal/lsp/cache/standalone_go116.go
new file mode 100644
index 000000000..2f72d5f54
--- /dev/null
+++ b/gopls/internal/lsp/cache/standalone_go116.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.16
+// +build go1.16
+
+package cache
+
+import (
+ "go/build/constraint"
+ "go/parser"
+ "go/token"
+)
+
+// isStandaloneFile reports whether a file with the given contents should be
+// considered a 'standalone main file', meaning a package that consists of only
+// a single file.
+func isStandaloneFile(src []byte, standaloneTags []string) bool {
+ f, err := parser.ParseFile(token.NewFileSet(), "", src, parser.PackageClauseOnly|parser.ParseComments)
+ if err != nil {
+ return false
+ }
+
+ if f.Name == nil || f.Name.Name != "main" {
+ return false
+ }
+
+ for _, cg := range f.Comments {
+ // Even with PackageClauseOnly the parser consumes the semicolon following
+ // the package clause, so we must guard against comments that come after
+ // the package name.
+ if cg.Pos() > f.Name.Pos() {
+ continue
+ }
+ for _, comment := range cg.List {
+ if c, err := constraint.Parse(comment.Text); err == nil {
+ if tag, ok := c.(*constraint.TagExpr); ok {
+ for _, t := range standaloneTags {
+ if t == tag.Tag {
+ return true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return false
+}
diff --git a/gopls/internal/lsp/cache/standalone_go116_test.go b/gopls/internal/lsp/cache/standalone_go116_test.go
new file mode 100644
index 000000000..9adf01e6c
--- /dev/null
+++ b/gopls/internal/lsp/cache/standalone_go116_test.go
@@ -0,0 +1,96 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.16
+// +build go1.16
+
+package cache
+
+import (
+ "testing"
+)
+
+func TestIsStandaloneFile(t *testing.T) {
+ tests := []struct {
+ desc string
+ contents string
+ standaloneTags []string
+ want bool
+ }{
+ {
+ "new syntax",
+ "//go:build ignore\n\npackage main\n",
+ []string{"ignore"},
+ true,
+ },
+ {
+ "legacy syntax",
+ "// +build ignore\n\npackage main\n",
+ []string{"ignore"},
+ true,
+ },
+ {
+ "multiple tags",
+ "//go:build ignore\n\npackage main\n",
+ []string{"exclude", "ignore"},
+ true,
+ },
+ {
+ "invalid tag",
+ "// +build ignore\n\npackage main\n",
+ []string{"script"},
+ false,
+ },
+ {
+ "non-main package",
+ "//go:build ignore\n\npackage p\n",
+ []string{"ignore"},
+ false,
+ },
+ {
+ "alternate tag",
+ "// +build script\n\npackage main\n",
+ []string{"script"},
+ true,
+ },
+ {
+ "both syntax",
+ "//go:build ignore\n// +build ignore\n\npackage main\n",
+ []string{"ignore"},
+ true,
+ },
+ {
+ "after comments",
+ "// A non-directive comment\n//go:build ignore\n\npackage main\n",
+ []string{"ignore"},
+ true,
+ },
+ {
+ "after package decl",
+ "package main //go:build ignore\n",
+ []string{"ignore"},
+ false,
+ },
+ {
+ "on line after package decl",
+ "package main\n\n//go:build ignore\n",
+ []string{"ignore"},
+ false,
+ },
+ {
+ "combined with other expressions",
+ "\n\n//go:build ignore || darwin\n\npackage main\n",
+ []string{"ignore"},
+ false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.desc, func(t *testing.T) {
+ if got := isStandaloneFile([]byte(test.contents), test.standaloneTags); got != test.want {
+ t.Errorf("isStandaloneFile(%q, %v) = %t, want %t", test.contents, test.standaloneTags, got, test.want)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/lsp/cache/symbols.go b/gopls/internal/lsp/cache/symbols.go
new file mode 100644
index 000000000..b6ffffb3a
--- /dev/null
+++ b/gopls/internal/lsp/cache/symbols.go
@@ -0,0 +1,213 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/memoize"
+)
+
+// symbolize returns the result of symbolizing the file identified by uri, using a cache.
+func (s *snapshot) symbolize(ctx context.Context, uri span.URI) ([]source.Symbol, error) {
+
+ s.mu.Lock()
+ entry, hit := s.symbolizeHandles.Get(uri)
+ s.mu.Unlock()
+
+ type symbolizeResult struct {
+ symbols []source.Symbol
+ err error
+ }
+
+ // Cache miss?
+ if !hit {
+ fh, err := s.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ type symbolHandleKey source.Hash
+ key := symbolHandleKey(fh.FileIdentity().Hash)
+ promise, release := s.store.Promise(key, func(ctx context.Context, arg interface{}) interface{} {
+ symbols, err := symbolizeImpl(ctx, arg.(*snapshot), fh)
+ return symbolizeResult{symbols, err}
+ })
+
+ entry = promise
+
+ s.mu.Lock()
+ s.symbolizeHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+ s.mu.Unlock()
+ }
+
+ // Await result.
+ v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+ if err != nil {
+ return nil, err
+ }
+ res := v.(symbolizeResult)
+ return res.symbols, res.err
+}
+
+// symbolizeImpl reads and parses a file and extracts symbols from it.
+// It may use a parsed file already present in the cache but
+// otherwise does not populate the cache.
+func symbolizeImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) {
+ pgfs, _, err := snapshot.parseCache.parseFiles(ctx, source.ParseFull, fh)
+ if err != nil {
+ return nil, err
+ }
+
+ w := &symbolWalker{
+ tokFile: pgfs[0].Tok,
+ mapper: pgfs[0].Mapper,
+ }
+ w.fileDecls(pgfs[0].File.Decls)
+
+ return w.symbols, w.firstError
+}
+
+type symbolWalker struct {
+ // for computing positions
+ tokFile *token.File
+ mapper *protocol.Mapper
+
+ symbols []source.Symbol
+ firstError error
+}
+
+func (w *symbolWalker) atNode(node ast.Node, name string, kind protocol.SymbolKind, path ...*ast.Ident) {
+ var b strings.Builder
+ for _, ident := range path {
+ if ident != nil {
+ b.WriteString(ident.Name)
+ b.WriteString(".")
+ }
+ }
+ b.WriteString(name)
+
+ rng, err := w.mapper.NodeRange(w.tokFile, node)
+ if err != nil {
+ w.error(err)
+ return
+ }
+ sym := source.Symbol{
+ Name: b.String(),
+ Kind: kind,
+ Range: rng,
+ }
+ w.symbols = append(w.symbols, sym)
+}
+
+func (w *symbolWalker) error(err error) {
+ if err != nil && w.firstError == nil {
+ w.firstError = err
+ }
+}
+
+func (w *symbolWalker) fileDecls(decls []ast.Decl) {
+ for _, decl := range decls {
+ switch decl := decl.(type) {
+ case *ast.FuncDecl:
+ kind := protocol.Function
+ var recv *ast.Ident
+ if decl.Recv.NumFields() > 0 {
+ kind = protocol.Method
+ recv = unpackRecv(decl.Recv.List[0].Type)
+ }
+ w.atNode(decl.Name, decl.Name.Name, kind, recv)
+ case *ast.GenDecl:
+ for _, spec := range decl.Specs {
+ switch spec := spec.(type) {
+ case *ast.TypeSpec:
+ kind := guessKind(spec)
+ w.atNode(spec.Name, spec.Name.Name, kind)
+ w.walkType(spec.Type, spec.Name)
+ case *ast.ValueSpec:
+ for _, name := range spec.Names {
+ kind := protocol.Variable
+ if decl.Tok == token.CONST {
+ kind = protocol.Constant
+ }
+ w.atNode(name, name.Name, kind)
+ }
+ }
+ }
+ }
+ }
+}
+
+func guessKind(spec *ast.TypeSpec) protocol.SymbolKind {
+ switch spec.Type.(type) {
+ case *ast.InterfaceType:
+ return protocol.Interface
+ case *ast.StructType:
+ return protocol.Struct
+ case *ast.FuncType:
+ return protocol.Function
+ }
+ return protocol.Class
+}
+
+func unpackRecv(rtyp ast.Expr) *ast.Ident {
+ // Extract the receiver identifier. Lifted from go/types/resolver.go
+L:
+ for {
+ switch t := rtyp.(type) {
+ case *ast.ParenExpr:
+ rtyp = t.X
+ case *ast.StarExpr:
+ rtyp = t.X
+ default:
+ break L
+ }
+ }
+ if name, _ := rtyp.(*ast.Ident); name != nil {
+ return name
+ }
+ return nil
+}
+
+// walkType processes symbols related to a type expression. path is path of
+// nested type identifiers to the type expression.
+func (w *symbolWalker) walkType(typ ast.Expr, path ...*ast.Ident) {
+ switch st := typ.(type) {
+ case *ast.StructType:
+ for _, field := range st.Fields.List {
+ w.walkField(field, protocol.Field, protocol.Field, path...)
+ }
+ case *ast.InterfaceType:
+ for _, field := range st.Methods.List {
+ w.walkField(field, protocol.Interface, protocol.Method, path...)
+ }
+ }
+}
+
+// walkField processes symbols related to the struct field or interface method.
+//
+// unnamedKind and namedKind are the symbol kinds if the field is resp. unnamed
+// or named. path is the path of nested identifiers containing the field.
+func (w *symbolWalker) walkField(field *ast.Field, unnamedKind, namedKind protocol.SymbolKind, path ...*ast.Ident) {
+ if len(field.Names) == 0 {
+ switch typ := field.Type.(type) {
+ case *ast.SelectorExpr:
+ // embedded qualified type
+ w.atNode(field, typ.Sel.Name, unnamedKind, path...)
+ default:
+ w.atNode(field, types.ExprString(field.Type), unnamedKind, path...)
+ }
+ }
+ for _, name := range field.Names {
+ w.atNode(name, name.Name, namedKind, path...)
+ w.walkType(field.Type, append(path, name)...)
+ }
+}
diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go
new file mode 100644
index 000000000..bc106d131
--- /dev/null
+++ b/gopls/internal/lsp/cache/view.go
@@ -0,0 +1,1142 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cache implements the caching layer for gopls.
+package cache
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/semver"
+ exec "golang.org/x/sys/execabs"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/imports"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+type View struct {
+ id string
+
+ gocmdRunner *gocommand.Runner // limits go command concurrency
+
+ // baseCtx is the context handed to NewView. This is the parent of all
+ // background contexts created for this view.
+ baseCtx context.Context
+
+ // name is the user-specified name of this view.
+ name string
+
+ optionsMu sync.Mutex
+ options *source.Options
+
+ // Workspace information. The fields below are immutable, and together with
+ // options define the build list. Any change to these fields results in a new
+ // View.
+ folder span.URI // user-specified workspace folder
+ workspaceInformation // Go environment information
+
+ importsState *importsState
+
+ // moduleUpgrades tracks known upgrades for module paths in each modfile.
+ // Each modfile has a map of module name to upgrade version.
+ moduleUpgradesMu sync.Mutex
+ moduleUpgrades map[span.URI]map[string]string
+
+ // vulns maps each go.mod file's URI to its known vulnerabilities.
+ vulnsMu sync.Mutex
+ vulns map[span.URI]*govulncheck.Result
+
+ // fs is the file source used to populate this view.
+ fs source.FileSource
+
+ // seenFiles tracks files that the view has accessed.
+ // TODO(golang/go#57558): this notion is fundamentally problematic, and
+ // should be removed.
+ knownFilesMu sync.Mutex
+ knownFiles map[span.URI]bool
+
+ // initCancelFirstAttempt can be used to terminate the view's first
+ // attempt at initialization.
+ initCancelFirstAttempt context.CancelFunc
+
+ // Track the latest snapshot via the snapshot field, guarded by snapshotMu.
+ //
+ // Invariant: whenever the snapshot field is overwritten, destroy(snapshot)
+ // is called on the previous (overwritten) snapshot while snapshotMu is held,
+ // incrementing snapshotWG. During shutdown the final snapshot is
+ // overwritten with nil and destroyed, guaranteeing that all observed
+ // snapshots have been destroyed via the destroy method, and snapshotWG may
+ // be waited upon to let these destroy operations complete.
+ snapshotMu sync.Mutex
+ snapshot *snapshot // latest snapshot; nil after shutdown has been called
+ releaseSnapshot func() // called when snapshot is no longer needed
+ snapshotWG sync.WaitGroup // refcount for pending destroy operations
+
+ // initialWorkspaceLoad is closed when the first workspace initialization has
+ // completed. If we failed to load, we only retry if the go.mod file changes,
+ // to avoid too many go/packages calls.
+ initialWorkspaceLoad chan struct{}
+
+ // initializationSema is used limit concurrent initialization of snapshots in
+ // the view. We use a channel instead of a mutex to avoid blocking when a
+ // context is canceled.
+ //
+ // This field (along with snapshot.initialized) guards against duplicate
+ // initialization of snapshots. Do not change it without adjusting snapshot
+ // accordingly.
+ initializationSema chan struct{}
+}
+
+// workspaceInformation holds the defining features of the View workspace.
+//
+// This type is compared to see if the View needs to be reconstructed.
+type workspaceInformation struct {
+ // `go env` variables that need to be tracked by gopls.
+ goEnv
+
+ // gomod holds the relevant go.mod file for this workspace.
+ gomod span.URI
+
+ // The Go version in use: X in Go 1.X.
+ goversion int
+
+ // The complete output of the go version command.
+ // (Call gocommand.ParseGoVersionOutput to extract a version
+ // substring such as go1.19.1 or go1.20-rc.1, go1.21-abcdef01.)
+ goversionOutput string
+
+ // hasGopackagesDriver is true if the user has a value set for the
+ // GOPACKAGESDRIVER environment variable or a gopackagesdriver binary on
+ // their machine.
+ hasGopackagesDriver bool
+}
+
+// effectiveGO111MODULE reports the value of GO111MODULE effective in the go
+// command at this go version, assuming at least Go 1.16.
+func (w workspaceInformation) effectiveGO111MODULE() go111module {
+ switch w.GO111MODULE() {
+ case "off":
+ return off
+ case "on", "":
+ return on
+ default:
+ return auto
+ }
+}
+
+// effectiveGOWORK returns the effective GOWORK value for this workspace, if
+// any, in URI form.
+func (w workspaceInformation) effectiveGOWORK() span.URI {
+ if w.gowork == "off" || w.gowork == "" {
+ return ""
+ }
+ return span.URIFromPath(w.gowork)
+}
+
+// GO111MODULE returns the value of GO111MODULE to use for running the go
+// command. It differs from the user's environment in order to allow for the
+// more forgiving default value "auto" when using recent go versions.
+//
+// TODO(rfindley): it is probably not worthwhile diverging from the go command
+// here. The extra forgiveness may be nice, but breaks the invariant that
+// running the go command from the command line produces the same build list.
+//
+// Put differently: we shouldn't go out of our way to make GOPATH work, when
+// the go command does not.
+func (w workspaceInformation) GO111MODULE() string {
+ if w.go111module == "" {
+ return "auto"
+ }
+ return w.go111module
+}
+
+type go111module int
+
+const (
+ off = go111module(iota)
+ auto
+ on
+)
+
+// goEnv holds important environment variables that gopls cares about.
+type goEnv struct {
+ gocache, gopath, goroot, goprivate, gomodcache, gowork, goflags string
+
+ // go111module holds the value of GO111MODULE as reported by go env.
+ //
+ // Don't use this value directly, because we choose to use a different
+ // default (auto) on Go 1.16 and later, to avoid spurious errors. Use
+ // the effectiveGO111MODULE method instead.
+ go111module string
+}
+
+// loadGoEnv loads `go env` values into the receiver, using the provided user
+// environment and go command runner.
+func (env *goEnv) load(ctx context.Context, folder string, configEnv []string, runner *gocommand.Runner) error {
+ vars := env.vars()
+
+ // We can save ~200 ms by requesting only the variables we care about.
+ args := []string{"-json"}
+ for k := range vars {
+ args = append(args, k)
+ }
+
+ inv := gocommand.Invocation{
+ Verb: "env",
+ Args: args,
+ Env: configEnv,
+ WorkingDir: folder,
+ }
+ stdout, err := runner.Run(ctx, inv)
+ if err != nil {
+ return err
+ }
+ envMap := make(map[string]string)
+ if err := json.Unmarshal(stdout.Bytes(), &envMap); err != nil {
+ return fmt.Errorf("internal error unmarshaling JSON from 'go env': %w", err)
+ }
+ for key, ptr := range vars {
+ *ptr = envMap[key]
+ }
+
+ return nil
+}
+
+func (env goEnv) String() string {
+ var vars []string
+ for govar, ptr := range env.vars() {
+ vars = append(vars, fmt.Sprintf("%s=%s", govar, *ptr))
+ }
+ sort.Strings(vars)
+ return "[" + strings.Join(vars, ", ") + "]"
+}
+
+// vars returns a map from Go environment variable to field value containing it.
+func (env *goEnv) vars() map[string]*string {
+ return map[string]*string{
+ "GOCACHE": &env.gocache,
+ "GOPATH": &env.gopath,
+ "GOROOT": &env.goroot,
+ "GOPRIVATE": &env.goprivate,
+ "GOMODCACHE": &env.gomodcache,
+ "GO111MODULE": &env.go111module,
+ "GOWORK": &env.gowork,
+ "GOFLAGS": &env.goflags,
+ }
+}
+
+// workspaceMode holds various flags defining how the gopls workspace should
+// behave. They may be derived from the environment, user configuration, or
+// depend on the Go version.
+//
+// TODO(rfindley): remove workspace mode, in favor of explicit checks.
+type workspaceMode int
+
+const (
+ moduleMode workspaceMode = 1 << iota
+
+ // tempModfile indicates whether or not the -modfile flag should be used.
+ tempModfile
+)
+
+func (v *View) ID() string { return v.id }
+
+// tempModFile creates a temporary go.mod file based on the contents
+// of the given go.mod file. On success, it is the caller's
+// responsibility to call the cleanup function when the file is no
+// longer needed.
+func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanup func(), err error) {
+ filenameHash := source.Hashf("%s", modFh.URI().Filename())
+ tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash))
+ if err != nil {
+ return "", nil, err
+ }
+ defer tmpMod.Close()
+
+ tmpURI = span.URIFromPath(tmpMod.Name())
+ tmpSumName := sumFilename(tmpURI)
+
+ content, err := modFh.Read()
+ if err != nil {
+ return "", nil, err
+ }
+
+ if _, err := tmpMod.Write(content); err != nil {
+ return "", nil, err
+ }
+
+ // We use a distinct name here to avoid subtlety around the fact
+ // that both 'return' and 'defer' update the "cleanup" variable.
+ doCleanup := func() {
+ _ = os.Remove(tmpSumName)
+ _ = os.Remove(tmpURI.Filename())
+ }
+
+ // Be careful to clean up if we return an error from this function.
+ defer func() {
+ if err != nil {
+ doCleanup()
+ cleanup = nil
+ }
+ }()
+
+ // Create an analogous go.sum, if one exists.
+ if gosum != nil {
+ if err := ioutil.WriteFile(tmpSumName, gosum, 0655); err != nil {
+ return "", nil, err
+ }
+ }
+
+ return tmpURI, doCleanup, nil
+}
+
+// Name returns the user visible name of this view.
+func (v *View) Name() string {
+ return v.name
+}
+
+// Folder returns the folder at the base of this view.
+func (v *View) Folder() span.URI {
+ return v.folder
+}
+
+func (v *View) Options() *source.Options {
+ v.optionsMu.Lock()
+ defer v.optionsMu.Unlock()
+ return v.options
+}
+
+func (v *View) FileKind(fh source.FileHandle) source.FileKind {
+ // The kind of an unsaved buffer comes from the
+ // TextDocumentItem.LanguageID field in the didChange event,
+ // not from the file name. They may differ.
+ if o, ok := fh.(*Overlay); ok {
+ if o.kind != source.UnknownKind {
+ return o.kind
+ }
+ }
+
+ fext := filepath.Ext(fh.URI().Filename())
+ switch fext {
+ case ".go":
+ return source.Go
+ case ".mod":
+ return source.Mod
+ case ".sum":
+ return source.Sum
+ case ".work":
+ return source.Work
+ }
+ exts := v.Options().TemplateExtensions
+ for _, ext := range exts {
+ if fext == ext || fext == "."+ext {
+ return source.Tmpl
+ }
+ }
+ // and now what? This should never happen, but it does for cgo before go1.15
+ return source.Go
+}
+
+func minorOptionsChange(a, b *source.Options) bool {
+ // Check if any of the settings that modify our understanding of files have
+ // been changed.
+ if !reflect.DeepEqual(a.Env, b.Env) {
+ return false
+ }
+ if !reflect.DeepEqual(a.DirectoryFilters, b.DirectoryFilters) {
+ return false
+ }
+ if !reflect.DeepEqual(a.StandaloneTags, b.StandaloneTags) {
+ return false
+ }
+ if a.ExpandWorkspaceToModule != b.ExpandWorkspaceToModule {
+ return false
+ }
+ if a.MemoryMode != b.MemoryMode {
+ return false
+ }
+ aBuildFlags := make([]string, len(a.BuildFlags))
+ bBuildFlags := make([]string, len(b.BuildFlags))
+ copy(aBuildFlags, a.BuildFlags)
+ copy(bBuildFlags, b.BuildFlags)
+ sort.Strings(aBuildFlags)
+ sort.Strings(bBuildFlags)
+ // the rest of the options are benign
+ return reflect.DeepEqual(aBuildFlags, bBuildFlags)
+}
+
+// SetViewOptions sets the options of the given view to new values. Calling
+// this may cause the view to be invalidated and a replacement view added to
+// the session. If so the new view will be returned, otherwise the original one
+// will be returned.
+func (s *Session) SetViewOptions(ctx context.Context, v *View, options *source.Options) (*View, error) {
+ // no need to rebuild the view if the options were not materially changed
+ v.optionsMu.Lock()
+ if minorOptionsChange(v.options, options) {
+ v.options = options
+ v.optionsMu.Unlock()
+ return v, nil
+ }
+ v.optionsMu.Unlock()
+ newView, err := s.updateView(ctx, v, options)
+ return newView, err
+}
+
+// viewEnv returns a string describing the environment of a newly created view.
+//
+// It must not be called concurrently with any other view methods.
+func viewEnv(v *View) string {
+ env := v.options.EnvSlice()
+ buildFlags := append([]string{}, v.options.BuildFlags...)
+
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, `go info for %v
+(go dir %s)
+(go version %s)
+(valid build configuration = %v)
+(build flags: %v)
+(selected go env: %v)
+`,
+ v.folder.Filename(),
+ v.workingDir().Filename(),
+ strings.TrimRight(v.workspaceInformation.goversionOutput, "\n"),
+ v.snapshot.ValidBuildConfiguration(),
+ buildFlags,
+ v.goEnv,
+ )
+
+ for _, v := range env {
+ s := strings.SplitN(v, "=", 2)
+ if len(s) != 2 {
+ continue
+ }
+ }
+
+ return buf.String()
+}
+
+func (s *snapshot) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error {
+ return s.view.importsState.runProcessEnvFunc(ctx, s, fn)
+}
+
+// separated out from its sole use in locateTemplateFiles for testability
+func fileHasExtension(path string, suffixes []string) bool {
+ ext := filepath.Ext(path)
+ if ext != "" && ext[0] == '.' {
+ ext = ext[1:]
+ }
+ for _, s := range suffixes {
+ if s != "" && ext == s {
+ return true
+ }
+ }
+ return false
+}
+
+// locateTemplateFiles ensures that the snapshot has mapped template files
+// within the workspace folder.
+func (s *snapshot) locateTemplateFiles(ctx context.Context) {
+ if len(s.view.Options().TemplateExtensions) == 0 {
+ return
+ }
+ suffixes := s.view.Options().TemplateExtensions
+
+ searched := 0
+ filterFunc := s.view.filterFunc()
+ err := filepath.WalkDir(s.view.folder.Filename(), func(path string, entry os.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if entry.IsDir() {
+ return nil
+ }
+ if fileLimit > 0 && searched > fileLimit {
+ return errExhausted
+ }
+ searched++
+ if !fileHasExtension(path, suffixes) {
+ return nil
+ }
+ uri := span.URIFromPath(path)
+ if filterFunc(uri) {
+ return nil
+ }
+ // Get the file in order to include it in the snapshot.
+ // TODO(golang/go#57558): it is fundamentally broken to track files in this
+ // way; we may lose them if configuration or layout changes cause a view to
+ // be recreated.
+ //
+ // Furthermore, this operation must ignore errors, including context
+ // cancellation, or risk leaving the snapshot in an undefined state.
+ s.GetFile(ctx, uri)
+ return nil
+ })
+ if err != nil {
+ event.Error(ctx, "searching for template files failed", err)
+ }
+}
+
+func (v *View) contains(uri span.URI) bool {
+ // If we've expanded the go dir to a parent directory, consider if the
+ // expanded dir contains the uri.
+ // TODO(rfindley): should we ignore the root here? It is not provided by the
+ // user. It would be better to explicitly consider the set of active modules
+ // wherever relevant.
+ inGoDir := false
+ if source.InDir(v.workingDir().Filename(), v.folder.Filename()) {
+ inGoDir = source.InDir(v.workingDir().Filename(), uri.Filename())
+ }
+ inFolder := source.InDir(v.folder.Filename(), uri.Filename())
+
+ if !inGoDir && !inFolder {
+ return false
+ }
+
+ return !v.filterFunc()(uri)
+}
+
+// filterFunc returns a func that reports whether uri is filtered by the currently configured
+// directoryFilters.
+func (v *View) filterFunc() func(span.URI) bool {
+ filterer := buildFilterer(v.folder.Filename(), v.gomodcache, v.Options())
+ return func(uri span.URI) bool {
+ // Only filter relative to the configured root directory.
+ if source.InDir(v.folder.Filename(), uri.Filename()) {
+ return pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), filterer)
+ }
+ return false
+ }
+}
+
+func (v *View) relevantChange(c source.FileModification) bool {
+ // If the file is known to the view, the change is relevant.
+ if v.knownFile(c.URI) {
+ return true
+ }
+ // The go.work file may not be "known" because we first access it through the
+ // session. As a result, treat changes to the view's go.work file as always
+ // relevant, even if they are only on-disk changes.
+ //
+ // TODO(rfindley): Make sure the go.work files are always known
+ // to the view.
+ if c.URI == v.effectiveGOWORK() {
+ return true
+ }
+
+ // Note: CL 219202 filtered out on-disk changes here that were not known to
+ // the view, but this introduces a race when changes arrive before the view
+ // is initialized (and therefore, before it knows about files). Since that CL
+ // had neither test nor associated issue, and cited only emacs behavior, this
+ // logic was deleted.
+
+ return v.contains(c.URI)
+}
+
+func (v *View) markKnown(uri span.URI) {
+ v.knownFilesMu.Lock()
+ defer v.knownFilesMu.Unlock()
+ if v.knownFiles == nil {
+ v.knownFiles = make(map[span.URI]bool)
+ }
+ v.knownFiles[uri] = true
+}
+
+// knownFile reports whether the specified valid URI (or an alias) is known to the view.
+func (v *View) knownFile(uri span.URI) bool {
+ v.knownFilesMu.Lock()
+ defer v.knownFilesMu.Unlock()
+ return v.knownFiles[uri]
+}
+
+// shutdown releases resources associated with the view, and waits for ongoing
+// work to complete.
+func (v *View) shutdown() {
+ // Cancel the initial workspace load if it is still running.
+ v.initCancelFirstAttempt()
+
+ v.snapshotMu.Lock()
+ if v.snapshot != nil {
+ v.releaseSnapshot()
+ v.destroy(v.snapshot, "View.shutdown")
+ v.snapshot = nil
+ v.releaseSnapshot = nil
+ }
+ v.snapshotMu.Unlock()
+
+ v.snapshotWG.Wait()
+}
+
+func (s *snapshot) IgnoredFile(uri span.URI) bool {
+ filename := uri.Filename()
+ var prefixes []string
+ if len(s.workspaceModFiles) == 0 {
+ for _, entry := range filepath.SplitList(s.view.gopath) {
+ prefixes = append(prefixes, filepath.Join(entry, "src"))
+ }
+ } else {
+ prefixes = append(prefixes, s.view.gomodcache)
+ for m := range s.workspaceModFiles {
+ prefixes = append(prefixes, span.Dir(m).Filename())
+ }
+ }
+ for _, prefix := range prefixes {
+ if strings.HasPrefix(filename, prefix) {
+ return checkIgnored(filename[len(prefix):])
+ }
+ }
+ return false
+}
+
+// checkIgnored implements go list's exclusion rules.
+// Quoting “go help list”:
+//
+// Directory and file names that begin with "." or "_" are ignored
+// by the go tool, as are directories named "testdata".
+func checkIgnored(suffix string) bool {
+ for _, component := range strings.Split(suffix, string(filepath.Separator)) {
+ if len(component) == 0 {
+ continue
+ }
+ if component[0] == '.' || component[0] == '_' || component == "testdata" {
+ return true
+ }
+ }
+ return false
+}
+
+func (v *View) Snapshot() (source.Snapshot, func(), error) {
+ return v.getSnapshot()
+}
+
+func (v *View) getSnapshot() (*snapshot, func(), error) {
+ v.snapshotMu.Lock()
+ defer v.snapshotMu.Unlock()
+ if v.snapshot == nil {
+ return nil, nil, errors.New("view is shutdown")
+ }
+ return v.snapshot, v.snapshot.Acquire(), nil
+}
+
+func (s *snapshot) initialize(ctx context.Context, firstAttempt bool) {
+ select {
+ case <-ctx.Done():
+ return
+ case s.view.initializationSema <- struct{}{}:
+ }
+
+ defer func() {
+ <-s.view.initializationSema
+ }()
+
+ s.mu.Lock()
+ initialized := s.initialized
+ s.mu.Unlock()
+
+ if initialized {
+ return
+ }
+
+ s.loadWorkspace(ctx, firstAttempt)
+ s.collectAllKnownSubdirs(ctx)
+}
+
+func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) (loadErr error) {
+ // A failure is retryable if it may have been due to context cancellation,
+ // and this is not the initial workspace load (firstAttempt==true).
+ //
+ // The IWL runs on a detached context with a long (~10m) timeout, so
+ // if the context was canceled we consider loading to have failed
+ // permanently.
+ retryableFailure := func() bool {
+ return loadErr != nil && ctx.Err() != nil && !firstAttempt
+ }
+ defer func() {
+ if !retryableFailure() {
+ s.mu.Lock()
+ s.initialized = true
+ s.mu.Unlock()
+ }
+ if firstAttempt {
+ close(s.view.initialWorkspaceLoad)
+ }
+ }()
+
+ // TODO(rFindley): we should only locate template files on the first attempt,
+ // or guard it via a different mechanism.
+ s.locateTemplateFiles(ctx)
+
+ // Collect module paths to load by parsing go.mod files. If a module fails to
+ // parse, capture the parsing failure as a critical diagnostic.
+ var scopes []loadScope // scopes to load
+ var modDiagnostics []*source.Diagnostic // diagnostics for broken go.mod files
+ addError := func(uri span.URI, err error) {
+ modDiagnostics = append(modDiagnostics, &source.Diagnostic{
+ URI: uri,
+ Severity: protocol.SeverityError,
+ Source: source.ListError,
+ Message: err.Error(),
+ })
+ }
+
+ if len(s.workspaceModFiles) > 0 {
+ for modURI := range s.workspaceModFiles {
+ // Be careful not to add context cancellation errors as critical module
+ // errors.
+ fh, err := s.GetFile(ctx, modURI)
+ if err != nil {
+ if ctx.Err() == nil {
+ addError(modURI, err)
+ }
+ continue
+ }
+ parsed, err := s.ParseMod(ctx, fh)
+ if err != nil {
+ if ctx.Err() == nil {
+ addError(modURI, err)
+ }
+ continue
+ }
+ if parsed.File == nil || parsed.File.Module == nil {
+ addError(modURI, fmt.Errorf("no module path for %s", modURI))
+ continue
+ }
+ path := parsed.File.Module.Mod.Path
+ scopes = append(scopes, moduleLoadScope(path))
+ }
+ } else {
+ scopes = append(scopes, viewLoadScope("LOAD_VIEW"))
+ }
+
+ // If we're loading anything, ensure we also load builtin,
+ // since it provides fake definitions (and documentation)
+ // for types like int that are used everywhere.
+ if len(scopes) > 0 {
+ scopes = append(scopes, packageLoadScope("builtin"))
+ }
+ loadErr = s.load(ctx, true, scopes...)
+
+ if retryableFailure() {
+ return loadErr
+ }
+
+ var criticalErr *source.CriticalError
+ switch {
+ case loadErr != nil && ctx.Err() != nil:
+ event.Error(ctx, fmt.Sprintf("initial workspace load: %v", loadErr), loadErr)
+ criticalErr = &source.CriticalError{
+ MainError: loadErr,
+ }
+ case loadErr != nil:
+ event.Error(ctx, "initial workspace load failed", loadErr)
+ extractedDiags := s.extractGoCommandErrors(ctx, loadErr)
+ criticalErr = &source.CriticalError{
+ MainError: loadErr,
+ Diagnostics: append(modDiagnostics, extractedDiags...),
+ }
+ case len(modDiagnostics) == 1:
+ criticalErr = &source.CriticalError{
+ MainError: fmt.Errorf(modDiagnostics[0].Message),
+ Diagnostics: modDiagnostics,
+ }
+ case len(modDiagnostics) > 1:
+ criticalErr = &source.CriticalError{
+ MainError: fmt.Errorf("error loading module names"),
+ Diagnostics: modDiagnostics,
+ }
+ }
+
+ // Lock the snapshot when setting the initialized error.
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.initializedErr = criticalErr
+ return loadErr
+}
+
+// invalidateContent invalidates the content of a Go file,
+// including any position and type information that depends on it.
+//
+// invalidateContent returns a non-nil snapshot for the new content, along with
+// a callback which the caller must invoke to release that snapshot.
+func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) {
+ // Detach the context so that content invalidation cannot be canceled.
+ ctx = xcontext.Detach(ctx)
+
+ // This should be the only time we hold the view's snapshot lock for any period of time.
+ v.snapshotMu.Lock()
+ defer v.snapshotMu.Unlock()
+
+ prevSnapshot, prevReleaseSnapshot := v.snapshot, v.releaseSnapshot
+
+ if prevSnapshot == nil {
+ panic("invalidateContent called after shutdown")
+ }
+
+ // Cancel all still-running previous requests, since they would be
+ // operating on stale data.
+ prevSnapshot.cancel()
+
+ // Do not clone a snapshot until its view has finished initializing.
+ prevSnapshot.AwaitInitialized(ctx)
+
+ // Save one lease of the cloned snapshot in the view.
+ v.snapshot, v.releaseSnapshot = prevSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata)
+
+ prevReleaseSnapshot()
+ v.destroy(prevSnapshot, "View.invalidateContent")
+
+ // Return a second lease to the caller.
+ return v.snapshot, v.snapshot.Acquire()
+}
+
+func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, options *source.Options) (workspaceInformation, error) {
+ if err := checkPathCase(folder.Filename()); err != nil {
+ return workspaceInformation{}, fmt.Errorf("invalid workspace folder path: %w; check that the casing of the configured workspace folder path agrees with the casing reported by the operating system", err)
+ }
+ var err error
+ var info workspaceInformation
+ inv := gocommand.Invocation{
+ WorkingDir: folder.Filename(),
+ Env: options.EnvSlice(),
+ }
+ info.goversion, err = gocommand.GoVersion(ctx, inv, s.gocmdRunner)
+ if err != nil {
+ return info, err
+ }
+ info.goversionOutput, err = gocommand.GoVersionOutput(ctx, inv, s.gocmdRunner)
+ if err != nil {
+ return info, err
+ }
+ if err := info.goEnv.load(ctx, folder.Filename(), options.EnvSlice(), s.gocmdRunner); err != nil {
+ return info, err
+ }
+ // The value of GOPACKAGESDRIVER is not returned through the go command.
+ gopackagesdriver := os.Getenv("GOPACKAGESDRIVER")
+ // A user may also have a gopackagesdriver binary on their machine, which
+ // works the same way as setting GOPACKAGESDRIVER.
+ tool, _ := exec.LookPath("gopackagesdriver")
+ info.hasGopackagesDriver = gopackagesdriver != "off" && (gopackagesdriver != "" || tool != "")
+
+ // filterFunc is the path filter function for this workspace folder. Notably,
+ // it is relative to folder (which is specified by the user), not root.
+ filterFunc := pathExcludedByFilterFunc(folder.Filename(), info.gomodcache, options)
+ info.gomod, err = findWorkspaceModFile(ctx, folder, s, filterFunc)
+ if err != nil {
+ return info, err
+ }
+
+ return info, nil
+}
+
+// findWorkspaceModFile searches for a single go.mod file relative to the given
+// folder URI, using the following algorithm:
+// 1. if there is a go.mod file in a parent directory, return it
+// 2. else, if there is exactly one nested module, return it
+// 3. else, return ""
+func findWorkspaceModFile(ctx context.Context, folderURI span.URI, fs source.FileSource, excludePath func(string) bool) (span.URI, error) {
+ folder := folderURI.Filename()
+ match, err := findRootPattern(ctx, folder, "go.mod", fs)
+ if err != nil {
+ if ctxErr := ctx.Err(); ctxErr != nil {
+ return "", ctxErr
+ }
+ return "", err
+ }
+ if match != "" {
+ return span.URIFromPath(match), nil
+ }
+
+ // ...else we should check if there's exactly one nested module.
+ all, err := findModules(folderURI, excludePath, 2)
+ if err == errExhausted {
+ // Fall-back behavior: if we don't find any modules after searching 10000
+ // files, assume there are none.
+ event.Log(ctx, fmt.Sprintf("stopped searching for modules after %d files", fileLimit))
+ return "", nil
+ }
+ if err != nil {
+ return "", err
+ }
+ if len(all) == 1 {
+ // range to access first element.
+ for uri := range all {
+ return uri, nil
+ }
+ }
+ return "", nil
+}
+
+// workingDir returns the directory from which to run Go commands.
+//
+// The only case where this should matter is if we've narrowed the workspace to
+// a singular nested module. In that case, the go command won't be able to find
+// the module unless we tell it the nested directory.
+func (v *View) workingDir() span.URI {
+ // Note: if gowork is in use, this will default to the workspace folder. In
+ // the past, we would instead use the folder containing go.work. This should
+ // not make a difference, and in fact may improve go list error messages.
+ //
+ // TODO(golang/go#57514): eliminate the expandWorkspaceToModule setting
+ // entirely.
+ if v.Options().ExpandWorkspaceToModule && v.gomod != "" {
+ return span.Dir(v.gomod)
+ }
+ return v.folder
+}
+
+// findRootPattern looks for files with the given basename in dir or any parent
+// directory of dir, using the provided FileSource. It returns the first match,
+// starting from dir and search parents.
+//
+// The resulting string is either the file path of a matching file with the
+// given basename, or "" if none was found.
+func findRootPattern(ctx context.Context, dir, basename string, fs source.FileSource) (string, error) {
+ for dir != "" {
+ target := filepath.Join(dir, basename)
+ exists, err := fileExists(ctx, span.URIFromPath(target), fs)
+ if err != nil {
+ return "", err // not readable or context cancelled
+ }
+ if exists {
+ return target, nil
+ }
+ // Trailing separators must be trimmed, otherwise filepath.Split is a noop.
+ next, _ := filepath.Split(strings.TrimRight(dir, string(filepath.Separator)))
+ if next == dir {
+ break
+ }
+ dir = next
+ }
+ return "", nil
+}
+
+// OS-specific path case check, for case-insensitive filesystems.
+var checkPathCase = defaultCheckPathCase
+
+func defaultCheckPathCase(path string) error {
+ return nil
+}
+
+func (v *View) IsGoPrivatePath(target string) bool {
+ return globsMatchPath(v.goprivate, target)
+}
+
+func (v *View) ModuleUpgrades(modfile span.URI) map[string]string {
+ v.moduleUpgradesMu.Lock()
+ defer v.moduleUpgradesMu.Unlock()
+
+ upgrades := map[string]string{}
+ for mod, ver := range v.moduleUpgrades[modfile] {
+ upgrades[mod] = ver
+ }
+ return upgrades
+}
+
+func (v *View) RegisterModuleUpgrades(modfile span.URI, upgrades map[string]string) {
+ // Return early if there are no upgrades.
+ if len(upgrades) == 0 {
+ return
+ }
+
+ v.moduleUpgradesMu.Lock()
+ defer v.moduleUpgradesMu.Unlock()
+
+ m := v.moduleUpgrades[modfile]
+ if m == nil {
+ m = make(map[string]string)
+ v.moduleUpgrades[modfile] = m
+ }
+ for mod, ver := range upgrades {
+ m[mod] = ver
+ }
+}
+
+func (v *View) ClearModuleUpgrades(modfile span.URI) {
+ v.moduleUpgradesMu.Lock()
+ defer v.moduleUpgradesMu.Unlock()
+
+ delete(v.moduleUpgrades, modfile)
+}
+
+const maxGovulncheckResultAge = 1 * time.Hour // Invalidate results older than this limit.
+var timeNow = time.Now // for testing
+
+func (v *View) Vulnerabilities(modfiles ...span.URI) map[span.URI]*govulncheck.Result {
+ m := make(map[span.URI]*govulncheck.Result)
+ now := timeNow()
+ v.vulnsMu.Lock()
+ defer v.vulnsMu.Unlock()
+
+ if len(modfiles) == 0 { // empty means all modfiles
+ for modfile := range v.vulns {
+ modfiles = append(modfiles, modfile)
+ }
+ }
+ for _, modfile := range modfiles {
+ vuln := v.vulns[modfile]
+ if vuln != nil && now.Sub(vuln.AsOf) > maxGovulncheckResultAge {
+ v.vulns[modfile] = nil // same as SetVulnerabilities(modfile, nil)
+ vuln = nil
+ }
+ m[modfile] = vuln
+ }
+ return m
+}
+
+func (v *View) SetVulnerabilities(modfile span.URI, vulns *govulncheck.Result) {
+ v.vulnsMu.Lock()
+ defer v.vulnsMu.Unlock()
+
+ v.vulns[modfile] = vulns
+}
+
+func (v *View) GoVersion() int {
+ return v.workspaceInformation.goversion
+}
+
+func (v *View) GoVersionString() string {
+ return gocommand.ParseGoVersionOutput(v.workspaceInformation.goversionOutput)
+}
+
+// Copied from
+// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/str/path.go;l=58;drc=2910c5b4a01a573ebc97744890a07c1a3122c67a
+func globsMatchPath(globs, target string) bool {
+ for globs != "" {
+ // Extract next non-empty glob in comma-separated list.
+ var glob string
+ if i := strings.Index(globs, ","); i >= 0 {
+ glob, globs = globs[:i], globs[i+1:]
+ } else {
+ glob, globs = globs, ""
+ }
+ if glob == "" {
+ continue
+ }
+
+ // A glob with N+1 path elements (N slashes) needs to be matched
+ // against the first N+1 path elements of target,
+ // which end just before the N+1'th slash.
+ n := strings.Count(glob, "/")
+ prefix := target
+ // Walk target, counting slashes, truncating at the N+1'th slash.
+ for i := 0; i < len(target); i++ {
+ if target[i] == '/' {
+ if n == 0 {
+ prefix = target[:i]
+ break
+ }
+ n--
+ }
+ }
+ if n > 0 {
+ // Not enough prefix elements.
+ continue
+ }
+ matched, _ := path.Match(glob, prefix)
+ if matched {
+ return true
+ }
+ }
+ return false
+}
+
+var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
+
+// TODO(rstambler): Consolidate modURI and modContent back into a FileHandle
+// after we have a version of the workspace go.mod file on disk. Getting a
+// FileHandle from the cache for temporary files is problematic, since we
+// cannot delete it.
+func (s *snapshot) vendorEnabled(ctx context.Context, modURI span.URI, modContent []byte) (bool, error) {
+ // Legacy GOPATH workspace?
+ if s.workspaceMode()&moduleMode == 0 {
+ return false, nil
+ }
+
+ // Explicit -mod flag?
+ matches := modFlagRegexp.FindStringSubmatch(s.view.goflags)
+ if len(matches) != 0 {
+ modFlag := matches[1]
+ if modFlag != "" {
+ // Don't override an explicit '-mod=vendor' argument.
+ // We do want to override '-mod=readonly': it would break various module code lenses,
+ // and on 1.16 we know -modfile is available, so we won't mess with go.mod anyway.
+ return modFlag == "vendor", nil
+ }
+ }
+
+ modFile, err := modfile.Parse(modURI.Filename(), modContent, nil)
+ if err != nil {
+ return false, err
+ }
+
+ // No vendor directory?
+ // TODO(golang/go#57514): this is wrong if the working dir is not the module
+ // root.
+ if fi, err := os.Stat(filepath.Join(s.view.workingDir().Filename(), "vendor")); err != nil || !fi.IsDir() {
+ return false, nil
+ }
+
+ // Vendoring enabled by default by go declaration in go.mod?
+ vendorEnabled := modFile.Go != nil && modFile.Go.Version != "" && semver.Compare("v"+modFile.Go.Version, "v1.14") >= 0
+ return vendorEnabled, nil
+}
+
+// TODO(rfindley): clean up the redundancy of allFilesExcluded,
+// pathExcludedByFilterFunc, pathExcludedByFilter, view.filterFunc...
+func allFilesExcluded(files []string, filterFunc func(span.URI) bool) bool {
+ for _, f := range files {
+ uri := span.URIFromPath(f)
+ if !filterFunc(uri) {
+ return false
+ }
+ }
+ return true
+}
+
+func pathExcludedByFilterFunc(folder, gomodcache string, opts *source.Options) func(string) bool {
+ filterer := buildFilterer(folder, gomodcache, opts)
+ return func(path string) bool {
+ return pathExcludedByFilter(path, filterer)
+ }
+}
+
+// pathExcludedByFilter reports whether the path (relative to the workspace
+// folder) should be excluded by the configured directory filters.
+//
+// TODO(rfindley): passing root and gomodcache here makes it confusing whether
+// path should be absolute or relative, and has already caused at least one
+// bug.
+func pathExcludedByFilter(path string, filterer *source.Filterer) bool {
+ path = strings.TrimPrefix(filepath.ToSlash(path), "/")
+ return filterer.Disallow(path)
+}
+
+func buildFilterer(folder, gomodcache string, opts *source.Options) *source.Filterer {
+ filters := opts.DirectoryFilters
+
+ if pref := strings.TrimPrefix(gomodcache, folder); pref != gomodcache {
+ modcacheFilter := "-" + strings.TrimPrefix(filepath.ToSlash(pref), "/")
+ filters = append(filters, modcacheFilter)
+ }
+ return source.NewFilterer(filters)
+}
diff --git a/gopls/internal/lsp/cache/view_test.go b/gopls/internal/lsp/cache/view_test.go
new file mode 100644
index 000000000..9e6d23bb8
--- /dev/null
+++ b/gopls/internal/lsp/cache/view_test.go
@@ -0,0 +1,278 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package cache
+
+import (
+ "context"
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+func TestCaseInsensitiveFilesystem(t *testing.T) {
+ base, err := ioutil.TempDir("", t.Name())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ inner := filepath.Join(base, "a/B/c/DEFgh")
+ if err := os.MkdirAll(inner, 0777); err != nil {
+ t.Fatal(err)
+ }
+ file := filepath.Join(inner, "f.go")
+ if err := ioutil.WriteFile(file, []byte("hi"), 0777); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := os.Stat(filepath.Join(inner, "F.go")); err != nil {
+ t.Skip("filesystem is case-sensitive")
+ }
+
+ tests := []struct {
+ path string
+ err bool
+ }{
+ {file, false},
+ {filepath.Join(inner, "F.go"), true},
+ {filepath.Join(base, "a/b/c/defgh/f.go"), true},
+ }
+ for _, tt := range tests {
+ err := checkPathCase(tt.path)
+ if err != nil != tt.err {
+ t.Errorf("checkPathCase(%q) = %v, wanted error: %v", tt.path, err, tt.err)
+ }
+ }
+}
+
+func TestFindWorkspaceModFile(t *testing.T) {
+ workspace := `
+-- a/go.mod --
+module a
+-- a/x/x.go
+package x
+-- a/x/y/y.go
+package x
+-- b/go.mod --
+module b
+-- b/c/go.mod --
+module bc
+-- d/gopls.mod --
+module d-goplsworkspace
+-- d/e/go.mod --
+module de
+-- f/g/go.mod --
+module fg
+`
+ dir, err := fake.Tempdir(fake.UnpackTxt(workspace))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ tests := []struct {
+ folder, want string
+ }{
+ {"", ""}, // no module at root, and more than one nested module
+ {"a", "a/go.mod"},
+ {"a/x", "a/go.mod"},
+ {"a/x/y", "a/go.mod"},
+ {"b/c", "b/c/go.mod"},
+ {"d", "d/e/go.mod"},
+ {"d/e", "d/e/go.mod"},
+ {"f", "f/g/go.mod"},
+ }
+
+ for _, test := range tests {
+ ctx := context.Background()
+ rel := fake.RelativeTo(dir)
+ folderURI := span.URIFromPath(rel.AbsPath(test.folder))
+ excludeNothing := func(string) bool { return false }
+ got, err := findWorkspaceModFile(ctx, folderURI, New(nil), excludeNothing)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := span.URI("")
+ if test.want != "" {
+ want = span.URIFromPath(rel.AbsPath(test.want))
+ }
+ if got != want {
+ t.Errorf("findWorkspaceModFile(%q) = %q, want %q", test.folder, got, want)
+ }
+ }
+}
+
+func TestInVendor(t *testing.T) {
+ for _, tt := range []struct {
+ path string
+ inVendor bool
+ }{
+ {"foo/vendor/x.go", false},
+ {"foo/vendor/x/x.go", true},
+ {"foo/x.go", false},
+ {"foo/vendor/foo.txt", false},
+ {"foo/vendor/modules.txt", false},
+ } {
+ if got := inVendor(span.URIFromPath(tt.path)); got != tt.inVendor {
+ t.Errorf("expected %s inVendor %v, got %v", tt.path, tt.inVendor, got)
+ }
+ }
+}
+
+func TestFilters(t *testing.T) {
+ tests := []struct {
+ filters []string
+ included []string
+ excluded []string
+ }{
+ {
+ included: []string{"x"},
+ },
+ {
+ filters: []string{"-"},
+ excluded: []string{"x", "x/a"},
+ },
+ {
+ filters: []string{"-x", "+y"},
+ included: []string{"y", "y/a", "z"},
+ excluded: []string{"x", "x/a"},
+ },
+ {
+ filters: []string{"-x", "+x/y", "-x/y/z"},
+ included: []string{"x/y", "x/y/a", "a"},
+ excluded: []string{"x", "x/a", "x/y/z/a"},
+ },
+ {
+ filters: []string{"+foobar", "-foo"},
+ included: []string{"foobar", "foobar/a"},
+ excluded: []string{"foo", "foo/a"},
+ },
+ }
+
+ for _, tt := range tests {
+ filterer := source.NewFilterer(tt.filters)
+ for _, inc := range tt.included {
+ if pathExcludedByFilter(inc, filterer) {
+ t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc)
+ }
+ }
+ for _, exc := range tt.excluded {
+ if !pathExcludedByFilter(exc, filterer) {
+ t.Errorf("filters %q included %v, wanted excluded", tt.filters, exc)
+ }
+ }
+ }
+}
+
+func TestSuffixes(t *testing.T) {
+ type file struct {
+ path string
+ want bool
+ }
+ type cases struct {
+ option []string
+ files []file
+ }
+ tests := []cases{
+ {[]string{"tmpl", "gotmpl"}, []file{ // default
+ {"foo", false},
+ {"foo.tmpl", true},
+ {"foo.gotmpl", true},
+ {"tmpl", false},
+ {"tmpl.go", false}},
+ },
+ {[]string{"tmpl", "gotmpl", "html", "gohtml"}, []file{
+ {"foo.gotmpl", true},
+ {"foo.html", true},
+ {"foo.gohtml", true},
+ {"html", false}},
+ },
+ {[]string{"tmpl", "gotmpl", ""}, []file{ // possible user mistake
+ {"foo.gotmpl", true},
+ {"foo.go", false},
+ {"foo", false}},
+ },
+ }
+ for _, a := range tests {
+ suffixes := a.option
+ for _, b := range a.files {
+ got := fileHasExtension(b.path, suffixes)
+ if got != b.want {
+ t.Errorf("got %v, want %v, option %q, file %q (%+v)",
+ got, b.want, a.option, b.path, b)
+ }
+ }
+ }
+}
+
+func TestView_Vulnerabilities(t *testing.T) {
+ // TODO(hyangah): use t.Cleanup when we get rid of go1.13 legacy CI.
+ defer func() { timeNow = time.Now }()
+
+ now := time.Now()
+
+ view := &View{
+ vulns: make(map[span.URI]*govulncheck.Result),
+ }
+ file1, file2 := span.URIFromPath("f1/go.mod"), span.URIFromPath("f2/go.mod")
+
+ vuln1 := &govulncheck.Result{AsOf: now.Add(-(maxGovulncheckResultAge * 3) / 4)} // already ~3/4*maxGovulncheckResultAge old
+ view.SetVulnerabilities(file1, vuln1)
+
+ vuln2 := &govulncheck.Result{AsOf: now} // fresh.
+ view.SetVulnerabilities(file2, vuln2)
+
+ t.Run("fresh", func(t *testing.T) {
+ got := view.Vulnerabilities()
+ want := map[span.URI]*govulncheck.Result{
+ file1: vuln1,
+ file2: vuln2,
+ }
+
+ if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" {
+ t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff)
+ }
+ })
+
+ // maxGovulncheckResultAge/2 later
+ timeNow = func() time.Time { return now.Add(maxGovulncheckResultAge / 2) }
+ t.Run("after30min", func(t *testing.T) {
+ got := view.Vulnerabilities()
+ want := map[span.URI]*govulncheck.Result{
+ file1: nil, // expired.
+ file2: vuln2,
+ }
+
+ if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" {
+ t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff)
+ }
+ })
+
+ // maxGovulncheckResultAge later
+ timeNow = func() time.Time { return now.Add(maxGovulncheckResultAge + time.Minute) }
+
+ t.Run("after1hr", func(t *testing.T) {
+ got := view.Vulnerabilities()
+ want := map[span.URI]*govulncheck.Result{
+ file1: nil,
+ file2: nil,
+ }
+
+ if diff := cmp.Diff(toJSON(want), toJSON(got)); diff != "" {
+ t.Errorf("view.Vulnerabilities() mismatch (-want +got):\n%s", diff)
+ }
+ })
+}
+
+func toJSON(x interface{}) string {
+ b, _ := json.MarshalIndent(x, "", " ")
+ return string(b)
+}
diff --git a/gopls/internal/lsp/cache/workspace.go b/gopls/internal/lsp/cache/workspace.go
new file mode 100644
index 000000000..e9845e89b
--- /dev/null
+++ b/gopls/internal/lsp/cache/workspace.go
@@ -0,0 +1,177 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// TODO(rfindley): now that experimentalWorkspaceModule is gone, this file can
+// be massively cleaned up and/or removed.
+
+// computeWorkspaceModFiles computes the set of workspace mod files based on the
+// value of go.mod, go.work, and GO111MODULE.
+func computeWorkspaceModFiles(ctx context.Context, gomod, gowork span.URI, go111module go111module, fs source.FileSource) (map[span.URI]struct{}, error) {
+ if go111module == off {
+ return nil, nil
+ }
+ if gowork != "" {
+ fh, err := fs.GetFile(ctx, gowork)
+ if err != nil {
+ return nil, err
+ }
+ content, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ filename := gowork.Filename()
+ dir := filepath.Dir(filename)
+ workFile, err := modfile.ParseWork(filename, content, nil)
+ if err != nil {
+ return nil, fmt.Errorf("parsing go.work: %w", err)
+ }
+ modFiles := make(map[span.URI]struct{})
+ for _, use := range workFile.Use {
+ modDir := filepath.FromSlash(use.Path)
+ if !filepath.IsAbs(modDir) {
+ modDir = filepath.Join(dir, modDir)
+ }
+ modURI := span.URIFromPath(filepath.Join(modDir, "go.mod"))
+ modFiles[modURI] = struct{}{}
+ }
+ return modFiles, nil
+ }
+ if gomod != "" {
+ return map[span.URI]struct{}{gomod: {}}, nil
+ }
+ return nil, nil
+}
+
+// dirs returns the workspace directories for the loaded modules.
+//
+// A workspace directory is, roughly speaking, a directory for which we care
+// about file changes. This is used for the purpose of registering file
+// watching patterns, and expanding directory modifications to their adjacent
+// files.
+//
+// TODO(rfindley): move this to snapshot.go.
+// TODO(rfindley): can we make this abstraction simpler and/or more accurate?
+func (s *snapshot) dirs(ctx context.Context) []span.URI {
+ dirSet := make(map[span.URI]struct{})
+
+ // Dirs should, at the very least, contain the working directory and folder.
+ dirSet[s.view.workingDir()] = struct{}{}
+ dirSet[s.view.folder] = struct{}{}
+
+ // Additionally, if e.g. go.work indicates other workspace modules, we should
+ // include their directories too.
+ if s.workspaceModFilesErr == nil {
+ for modFile := range s.workspaceModFiles {
+ dir := filepath.Dir(modFile.Filename())
+ dirSet[span.URIFromPath(dir)] = struct{}{}
+ }
+ }
+ var dirs []span.URI
+ for d := range dirSet {
+ dirs = append(dirs, d)
+ }
+ sort.Slice(dirs, func(i, j int) bool { return dirs[i] < dirs[j] })
+ return dirs
+}
+
+// isGoMod reports if uri is a go.mod file.
+func isGoMod(uri span.URI) bool {
+ return filepath.Base(uri.Filename()) == "go.mod"
+}
+
+// isGoWork reports if uri is a go.work file.
+func isGoWork(uri span.URI) bool {
+ return filepath.Base(uri.Filename()) == "go.work"
+}
+
+// fileExists reports if the file uri exists within source.
+func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) {
+ fh, err := source.GetFile(ctx, uri)
+ if err != nil {
+ return false, err
+ }
+ return fileHandleExists(fh)
+}
+
+// fileHandleExists reports if the file underlying fh actually exits.
+func fileHandleExists(fh source.FileHandle) (bool, error) {
+ _, err := fh.Read()
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+// errExhausted is returned by findModules if the file scan limit is reached.
+var errExhausted = errors.New("exhausted")
+
+// Limit go.mod search to 1 million files. As a point of reference,
+// Kubernetes has 22K files (as of 2020-11-24).
+const fileLimit = 1000000
+
+// findModules recursively walks the root directory looking for go.mod files,
+// returning the set of modules it discovers. If modLimit is non-zero,
+// searching stops once modLimit modules have been found.
+//
+// TODO(rfindley): consider overlays.
+func findModules(root span.URI, excludePath func(string) bool, modLimit int) (map[span.URI]struct{}, error) {
+ // Walk the view's folder to find all modules in the view.
+ modFiles := make(map[span.URI]struct{})
+ searched := 0
+ errDone := errors.New("done")
+ err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ // Probably a permission error. Keep looking.
+ return filepath.SkipDir
+ }
+ // For any path that is not the workspace folder, check if the path
+ // would be ignored by the go command. Vendor directories also do not
+ // contain workspace modules.
+ if info.IsDir() && path != root.Filename() {
+ suffix := strings.TrimPrefix(path, root.Filename())
+ switch {
+ case checkIgnored(suffix),
+ strings.Contains(filepath.ToSlash(suffix), "/vendor/"),
+ excludePath(suffix):
+ return filepath.SkipDir
+ }
+ }
+ // We're only interested in go.mod files.
+ uri := span.URIFromPath(path)
+ if isGoMod(uri) {
+ modFiles[uri] = struct{}{}
+ }
+ if modLimit > 0 && len(modFiles) >= modLimit {
+ return errDone
+ }
+ searched++
+ if fileLimit > 0 && searched >= fileLimit {
+ return errExhausted
+ }
+ return nil
+ })
+ if err == errDone {
+ return modFiles, nil
+ }
+ return modFiles, err
+}
diff --git a/gopls/internal/lsp/call_hierarchy.go b/gopls/internal/lsp/call_hierarchy.go
new file mode 100644
index 000000000..79eeb25cc
--- /dev/null
+++ b/gopls/internal/lsp/call_hierarchy.go
@@ -0,0 +1,42 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+func (s *Server) prepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) ([]protocol.CallHierarchyItem, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+
+ return source.PrepareCallHierarchy(ctx, snapshot, fh, params.Position)
+}
+
+func (s *Server) incomingCalls(ctx context.Context, params *protocol.CallHierarchyIncomingCallsParams) ([]protocol.CallHierarchyIncomingCall, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.Item.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+
+ return source.IncomingCalls(ctx, snapshot, fh, params.Item.Range.Start)
+}
+
+func (s *Server) outgoingCalls(ctx context.Context, params *protocol.CallHierarchyOutgoingCallsParams) ([]protocol.CallHierarchyOutgoingCall, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.Item.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+
+ return source.OutgoingCalls(ctx, snapshot, fh, params.Item.Range.Start)
+}
diff --git a/gopls/internal/lsp/cmd/call_hierarchy.go b/gopls/internal/lsp/cmd/call_hierarchy.go
new file mode 100644
index 000000000..eb5d29de8
--- /dev/null
+++ b/gopls/internal/lsp/cmd/call_hierarchy.go
@@ -0,0 +1,142 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// callHierarchy implements the callHierarchy verb for gopls.
+type callHierarchy struct {
+ app *Application
+}
+
+func (c *callHierarchy) Name() string { return "call_hierarchy" }
+func (c *callHierarchy) Parent() string { return c.app.Name() }
+func (c *callHierarchy) Usage() string { return "<position>" }
+func (c *callHierarchy) ShortHelp() string { return "display selected identifier's call hierarchy" }
+func (c *callHierarchy) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ # 1-indexed location (:line:column or :#offset) of the target identifier
+ $ gopls call_hierarchy helper/helper.go:8:6
+ $ gopls call_hierarchy helper/helper.go:#53
+`)
+ printFlagDefaults(f)
+}
+
+func (c *callHierarchy) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("call_hierarchy expects 1 argument (position)")
+ }
+
+ conn, err := c.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+
+ loc, err := file.mapper.SpanLocation(from)
+ if err != nil {
+ return err
+ }
+
+ p := protocol.CallHierarchyPrepareParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+
+ callItems, err := conn.PrepareCallHierarchy(ctx, &p)
+ if err != nil {
+ return err
+ }
+ if len(callItems) == 0 {
+ return fmt.Errorf("function declaration identifier not found at %v", args[0])
+ }
+
+ for _, item := range callItems {
+ incomingCalls, err := conn.IncomingCalls(ctx, &protocol.CallHierarchyIncomingCallsParams{Item: item})
+ if err != nil {
+ return err
+ }
+ for i, call := range incomingCalls {
+ // From the spec: CallHierarchyIncomingCall.FromRanges is relative to
+ // the caller denoted by CallHierarchyIncomingCall.from.
+ printString, err := callItemPrintString(ctx, conn, call.From, call.From.URI, call.FromRanges)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("caller[%d]: %s\n", i, printString)
+ }
+
+ printString, err := callItemPrintString(ctx, conn, item, "", nil)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("identifier: %s\n", printString)
+
+ outgoingCalls, err := conn.OutgoingCalls(ctx, &protocol.CallHierarchyOutgoingCallsParams{Item: item})
+ if err != nil {
+ return err
+ }
+ for i, call := range outgoingCalls {
+ // From the spec: CallHierarchyOutgoingCall.FromRanges is the range
+ // relative to the caller, e.g the item passed to
+ printString, err := callItemPrintString(ctx, conn, call.To, item.URI, call.FromRanges)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("callee[%d]: %s\n", i, printString)
+ }
+ }
+
+ return nil
+}
+
+// callItemPrintString returns a protocol.CallHierarchyItem object represented as a string.
+// item and call ranges (protocol.Range) are converted to user friendly spans (1-indexed).
+func callItemPrintString(ctx context.Context, conn *connection, item protocol.CallHierarchyItem, callsURI protocol.DocumentURI, calls []protocol.Range) (string, error) {
+ itemFile := conn.openFile(ctx, item.URI.SpanURI())
+ if itemFile.err != nil {
+ return "", itemFile.err
+ }
+ itemSpan, err := itemFile.mapper.LocationSpan(protocol.Location{URI: item.URI, Range: item.Range})
+ if err != nil {
+ return "", err
+ }
+
+ callsFile := conn.openFile(ctx, callsURI.SpanURI())
+ if callsURI != "" && callsFile.err != nil {
+ return "", callsFile.err
+ }
+ var callRanges []string
+ for _, rng := range calls {
+ call, err := callsFile.mapper.RangeSpan(rng)
+ if err != nil {
+ return "", err
+ }
+ callRange := fmt.Sprintf("%d:%d-%d", call.Start().Line(), call.Start().Column(), call.End().Column())
+ callRanges = append(callRanges, callRange)
+ }
+
+ printString := fmt.Sprintf("function %s in %v", item.Name, itemSpan)
+ if len(calls) > 0 {
+ printString = fmt.Sprintf("ranges %s in %s from/to %s", strings.Join(callRanges, ", "), callsURI.SpanURI().Filename(), printString)
+ }
+ return printString, nil
+}
diff --git a/gopls/internal/lsp/cmd/capabilities_test.go b/gopls/internal/lsp/cmd/capabilities_test.go
new file mode 100644
index 000000000..753d9bf39
--- /dev/null
+++ b/gopls/internal/lsp/cmd/capabilities_test.go
@@ -0,0 +1,166 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+// TestCapabilities does some minimal validation of the server's adherence to the LSP.
+// The checks in the test are added as changes are made and errors noticed.
+func TestCapabilities(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "fake")
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmpFile := filepath.Join(tmpDir, "fake.go")
+ if err := ioutil.WriteFile(tmpFile, []byte(""), 0775); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module fake\n\ngo 1.12\n"), 0775); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ app := New("gopls-test", tmpDir, os.Environ(), nil)
+ c := newConnection(app)
+ ctx := context.Background()
+ defer c.terminate(ctx)
+
+ params := &protocol.ParamInitialize{}
+ params.RootURI = protocol.URIFromPath(c.Client.app.wd)
+ params.Capabilities.Workspace.Configuration = true
+
+ // Send an initialize request to the server.
+ c.Server = lsp.NewServer(cache.NewSession(ctx, cache.New(nil), app.options), c.Client)
+ result, err := c.Server.Initialize(ctx, params)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Validate initialization result.
+ if err := validateCapabilities(result); err != nil {
+ t.Error(err)
+ }
+ // Complete initialization of server.
+ if err := c.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil {
+ t.Fatal(err)
+ }
+
+ // Open the file on the server side.
+ uri := protocol.URIFromPath(tmpFile)
+ if err := c.Server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{
+ TextDocument: protocol.TextDocumentItem{
+ URI: uri,
+ LanguageID: "go",
+ Version: 1,
+ Text: `package main; func main() {};`,
+ },
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // If we are sending a full text change, the change.Range must be nil.
+ // It is not enough for the Change to be empty, as that is ambiguous.
+ if err := c.Server.DidChange(ctx, &protocol.DidChangeTextDocumentParams{
+ TextDocument: protocol.VersionedTextDocumentIdentifier{
+ TextDocumentIdentifier: protocol.TextDocumentIdentifier{
+ URI: uri,
+ },
+ Version: 2,
+ },
+ ContentChanges: []protocol.TextDocumentContentChangeEvent{
+ {
+ Range: nil,
+ Text: `package main; func main() { fmt.Println("") }`,
+ },
+ },
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Send a code action request to validate expected types.
+ actions, err := c.Server.CodeAction(ctx, &protocol.CodeActionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: uri,
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, action := range actions {
+ // Validate that an empty command is sent along with import organization responses.
+ if action.Kind == protocol.SourceOrganizeImports && action.Command != nil {
+ t.Errorf("unexpected command for import organization")
+ }
+ }
+
+ if err := c.Server.DidSave(ctx, &protocol.DidSaveTextDocumentParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: uri,
+ },
+ // LSP specifies that a file can be saved with optional text, so this field must be nil.
+ Text: nil,
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Send a completion request to validate expected types.
+ list, err := c.Server.Completion(ctx, &protocol.CompletionParams{
+ TextDocumentPositionParams: protocol.TextDocumentPositionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: uri,
+ },
+ Position: protocol.Position{
+ Line: 0,
+ Character: 28,
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, item := range list.Items {
+ // All other completion items should have nil commands.
+ // An empty command will be treated as a command with the name '' by VS Code.
+ // This causes VS Code to report errors to users about invalid commands.
+ if item.Command != nil {
+ t.Errorf("unexpected command for completion item")
+ }
+ // The item's TextEdit must be a pointer, as VS Code considers TextEdits
+ // that don't contain the cursor position to be invalid.
+ var textEdit interface{} = item.TextEdit
+ if _, ok := textEdit.(*protocol.TextEdit); !ok {
+ t.Errorf("textEdit is not a *protocol.TextEdit, instead it is %T", textEdit)
+ }
+ }
+ if err := c.Server.Shutdown(ctx); err != nil {
+ t.Fatal(err)
+ }
+ if err := c.Server.Exit(ctx); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func validateCapabilities(result *protocol.InitializeResult) error {
+ // If the client sends "false" for RenameProvider.PrepareSupport,
+ // the server must respond with a boolean.
+ if v, ok := result.Capabilities.RenameProvider.(bool); !ok {
+ return fmt.Errorf("RenameProvider must be a boolean if PrepareSupport is false (got %T)", v)
+ }
+ // The same goes for CodeActionKind.ValueSet.
+ if v, ok := result.Capabilities.CodeActionProvider.(bool); !ok {
+ return fmt.Errorf("CodeActionSupport must be a boolean if CodeActionKind.ValueSet has length 0 (got %T)", v)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/check.go b/gopls/internal/lsp/cmd/check.go
new file mode 100644
index 000000000..cf081ca26
--- /dev/null
+++ b/gopls/internal/lsp/cmd/check.go
@@ -0,0 +1,73 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// check implements the check verb for gopls.
+type check struct {
+ app *Application
+}
+
+func (c *check) Name() string { return "check" }
+func (c *check) Parent() string { return c.app.Name() }
+func (c *check) Usage() string { return "<filename>" }
+func (c *check) ShortHelp() string { return "show diagnostic results for the specified file" }
+func (c *check) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example: show the diagnostic results of this file:
+
+ $ gopls check internal/lsp/cmd/check.go
+`)
+ printFlagDefaults(f)
+}
+
+// Run performs the check on the files specified by args and prints the
+// results to stdout.
+func (c *check) Run(ctx context.Context, args ...string) error {
+ if len(args) == 0 {
+ // no files, so no results
+ return nil
+ }
+ checking := map[span.URI]*cmdFile{}
+ var uris []span.URI
+ // now we ready to kick things off
+ conn, err := c.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+ for _, arg := range args {
+ uri := span.URIFromPath(arg)
+ uris = append(uris, uri)
+ file := conn.openFile(ctx, uri)
+ if file.err != nil {
+ return file.err
+ }
+ checking[uri] = file
+ }
+ if err := conn.diagnoseFiles(ctx, uris); err != nil {
+ return err
+ }
+ conn.Client.filesMu.Lock()
+ defer conn.Client.filesMu.Unlock()
+
+ for _, file := range checking {
+ for _, d := range file.diagnostics {
+ spn, err := file.mapper.RangeSpan(d.Range)
+ if err != nil {
+ return fmt.Errorf("Could not convert position %v for %q", d.Range, d.Message)
+ }
+ fmt.Printf("%v: %v\n", spn, d.Message)
+ }
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/cmd.go b/gopls/internal/lsp/cmd/cmd.go
new file mode 100644
index 000000000..a48eb16d3
--- /dev/null
+++ b/gopls/internal/lsp/cmd/cmd.go
@@ -0,0 +1,640 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmd handles the gopls command line.
+// It contains a handler for each of the modes, along with all the flag handling
+// and the command line output format.
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "text/tabwriter"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/tool"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+// Application is the main application as passed to tool.Main
+// It handles the main command line parsing and dispatch to the sub commands.
+type Application struct {
+ // Core application flags
+
+ // Embed the basic profiling flags supported by the tool package
+ tool.Profile
+
+ // We include the server configuration directly for now, so the flags work
+ // even without the verb.
+ // TODO: Remove this when we stop allowing the serve verb by default.
+ Serve Serve
+
+ // the options configuring function to invoke when building a server
+ options func(*source.Options)
+
+ // The name of the binary, used in help and telemetry.
+ name string
+
+ // The working directory to run commands in.
+ wd string
+
+ // The environment variables to use.
+ env []string
+
+ // Support for remote LSP server.
+ Remote string `flag:"remote" help:"forward all commands to a remote lsp specified by this flag. With no special prefix, this is assumed to be a TCP address. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. If 'auto', or prefixed by 'auto;', the remote address is automatically resolved based on the executing environment."`
+
+ // Verbose enables verbose logging.
+ Verbose bool `flag:"v,verbose" help:"verbose output"`
+
+ // VeryVerbose enables a higher level of verbosity in logging output.
+ VeryVerbose bool `flag:"vv,veryverbose" help:"very verbose output"`
+
+ // Control ocagent export of telemetry
+ OCAgent string `flag:"ocagent" help:"the address of the ocagent (e.g. http://localhost:55678), or off"`
+
+ // PrepareOptions is called to update the options when a new view is built.
+ // It is primarily to allow the behavior of gopls to be modified by hooks.
+ PrepareOptions func(*source.Options)
+}
+
+func (app *Application) verbose() bool {
+ return app.Verbose || app.VeryVerbose
+}
+
+// New returns a new Application ready to run.
+func New(name, wd string, env []string, options func(*source.Options)) *Application {
+ if wd == "" {
+ wd, _ = os.Getwd()
+ }
+ app := &Application{
+ options: options,
+ name: name,
+ wd: wd,
+ env: env,
+ OCAgent: "off", //TODO: Remove this line to default the exporter to on
+
+ Serve: Serve{
+ RemoteListenTimeout: 1 * time.Minute,
+ },
+ }
+ app.Serve.app = app
+ return app
+}
+
+// Name implements tool.Application returning the binary name.
+func (app *Application) Name() string { return app.name }
+
+// Usage implements tool.Application returning empty extra argument usage.
+func (app *Application) Usage() string { return "" }
+
+// ShortHelp implements tool.Application returning the main binary help.
+func (app *Application) ShortHelp() string {
+ return ""
+}
+
+// DetailedHelp implements tool.Application returning the main binary help.
+// This includes the short help for all the sub commands.
+func (app *Application) DetailedHelp(f *flag.FlagSet) {
+ w := tabwriter.NewWriter(f.Output(), 0, 0, 2, ' ', 0)
+ defer w.Flush()
+
+ fmt.Fprint(w, `
+gopls is a Go language server.
+
+It is typically used with an editor to provide language features. When no
+command is specified, gopls will default to the 'serve' command. The language
+features can also be accessed via the gopls command-line interface.
+
+Usage:
+ gopls help [<subject>]
+
+Command:
+`)
+ fmt.Fprint(w, "\nMain\t\n")
+ for _, c := range app.mainCommands() {
+ fmt.Fprintf(w, " %s\t%s\n", c.Name(), c.ShortHelp())
+ }
+ fmt.Fprint(w, "\t\nFeatures\t\n")
+ for _, c := range app.featureCommands() {
+ fmt.Fprintf(w, " %s\t%s\n", c.Name(), c.ShortHelp())
+ }
+ fmt.Fprint(w, "\nflags:\n")
+ printFlagDefaults(f)
+}
+
+// this is a slightly modified version of flag.PrintDefaults to give us control
+func printFlagDefaults(s *flag.FlagSet) {
+ var flags [][]*flag.Flag
+ seen := map[flag.Value]int{}
+ s.VisitAll(func(f *flag.Flag) {
+ if i, ok := seen[f.Value]; !ok {
+ seen[f.Value] = len(flags)
+ flags = append(flags, []*flag.Flag{f})
+ } else {
+ flags[i] = append(flags[i], f)
+ }
+ })
+ for _, entry := range flags {
+ sort.SliceStable(entry, func(i, j int) bool {
+ return len(entry[i].Name) < len(entry[j].Name)
+ })
+ var b strings.Builder
+ for i, f := range entry {
+ switch i {
+ case 0:
+ b.WriteString(" -")
+ default:
+ b.WriteString(",-")
+ }
+ b.WriteString(f.Name)
+ }
+
+ f := entry[0]
+ name, usage := flag.UnquoteUsage(f)
+ if len(name) > 0 {
+ b.WriteString("=")
+ b.WriteString(name)
+ }
+ // Boolean flags of one ASCII letter are so common we
+ // treat them specially, putting their usage on the same line.
+ if b.Len() <= 4 { // space, space, '-', 'x'.
+ b.WriteString("\t")
+ } else {
+ // Four spaces before the tab triggers good alignment
+ // for both 4- and 8-space tab stops.
+ b.WriteString("\n \t")
+ }
+ b.WriteString(strings.ReplaceAll(usage, "\n", "\n \t"))
+ if !isZeroValue(f, f.DefValue) {
+ if reflect.TypeOf(f.Value).Elem().Name() == "stringValue" {
+ fmt.Fprintf(&b, " (default %q)", f.DefValue)
+ } else {
+ fmt.Fprintf(&b, " (default %v)", f.DefValue)
+ }
+ }
+ fmt.Fprint(s.Output(), b.String(), "\n")
+ }
+}
+
+// isZeroValue is copied from the flags package
+func isZeroValue(f *flag.Flag, value string) bool {
+ // Build a zero value of the flag's Value type, and see if the
+ // result of calling its String method equals the value passed in.
+ // This works unless the Value type is itself an interface type.
+ typ := reflect.TypeOf(f.Value)
+ var z reflect.Value
+ if typ.Kind() == reflect.Ptr {
+ z = reflect.New(typ.Elem())
+ } else {
+ z = reflect.Zero(typ)
+ }
+ return value == z.Interface().(flag.Value).String()
+}
+
+// Run takes the args after top level flag processing, and invokes the correct
+// sub command as specified by the first argument.
+// If no arguments are passed it will invoke the server sub command, as a
+// temporary measure for compatibility.
+func (app *Application) Run(ctx context.Context, args ...string) error {
+ ctx = debug.WithInstance(ctx, app.wd, app.OCAgent)
+ if len(args) == 0 {
+ s := flag.NewFlagSet(app.Name(), flag.ExitOnError)
+ return tool.Run(ctx, s, &app.Serve, args)
+ }
+ command, args := args[0], args[1:]
+ for _, c := range app.Commands() {
+ if c.Name() == command {
+ s := flag.NewFlagSet(app.Name(), flag.ExitOnError)
+ return tool.Run(ctx, s, c, args)
+ }
+ }
+ return tool.CommandLineErrorf("Unknown command %v", command)
+}
+
+// Commands returns the set of commands supported by the gopls tool on the
+// command line.
+// The command is specified by the first non flag argument.
+func (app *Application) Commands() []tool.Application {
+ var commands []tool.Application
+ commands = append(commands, app.mainCommands()...)
+ commands = append(commands, app.featureCommands()...)
+ return commands
+}
+
+func (app *Application) mainCommands() []tool.Application {
+ return []tool.Application{
+ &app.Serve,
+ &version{app: app},
+ &bug{app: app},
+ &help{app: app},
+ &apiJSON{app: app},
+ &licenses{app: app},
+ }
+}
+
+func (app *Application) featureCommands() []tool.Application {
+ return []tool.Application{
+ &callHierarchy{app: app},
+ &check{app: app},
+ &definition{app: app},
+ &foldingRanges{app: app},
+ &format{app: app},
+ &highlight{app: app},
+ &implementation{app: app},
+ &imports{app: app},
+ newRemote(app, ""),
+ newRemote(app, "inspect"),
+ &links{app: app},
+ &prepareRename{app: app},
+ &references{app: app},
+ &rename{app: app},
+ &semtok{app: app},
+ &signature{app: app},
+ &suggestedFix{app: app},
+ &symbols{app: app},
+ &workspaceSymbol{app: app},
+ &vulncheck{app: app},
+ }
+}
+
+var (
+ internalMu sync.Mutex
+ internalConnections = make(map[string]*connection)
+)
+
+func (app *Application) connect(ctx context.Context) (*connection, error) {
+ switch {
+ case app.Remote == "":
+ connection := newConnection(app)
+ connection.Server = lsp.NewServer(cache.NewSession(ctx, cache.New(nil), app.options), connection.Client)
+ ctx = protocol.WithClient(ctx, connection.Client)
+ return connection, connection.initialize(ctx, app.options)
+ case strings.HasPrefix(app.Remote, "internal@"):
+ internalMu.Lock()
+ defer internalMu.Unlock()
+ opts := source.DefaultOptions().Clone()
+ if app.options != nil {
+ app.options(opts)
+ }
+ key := fmt.Sprintf("%s %v %v %v", app.wd, opts.PreferredContentFormat, opts.HierarchicalDocumentSymbolSupport, opts.SymbolMatcher)
+ if c := internalConnections[key]; c != nil {
+ return c, nil
+ }
+ remote := app.Remote[len("internal@"):]
+ ctx := xcontext.Detach(ctx) //TODO:a way of shutting down the internal server
+ connection, err := app.connectRemote(ctx, remote)
+ if err != nil {
+ return nil, err
+ }
+ internalConnections[key] = connection
+ return connection, nil
+ default:
+ return app.connectRemote(ctx, app.Remote)
+ }
+}
+
+// CloseTestConnections terminates shared connections used in command tests. It
+// should only be called from tests.
+func CloseTestConnections(ctx context.Context) {
+ for _, c := range internalConnections {
+ c.Shutdown(ctx)
+ c.Exit(ctx)
+ }
+}
+
+func (app *Application) connectRemote(ctx context.Context, remote string) (*connection, error) {
+ connection := newConnection(app)
+ conn, err := lsprpc.ConnectToRemote(ctx, remote)
+ if err != nil {
+ return nil, err
+ }
+ stream := jsonrpc2.NewHeaderStream(conn)
+ cc := jsonrpc2.NewConn(stream)
+ connection.Server = protocol.ServerDispatcher(cc)
+ ctx = protocol.WithClient(ctx, connection.Client)
+ cc.Go(ctx,
+ protocol.Handlers(
+ protocol.ClientHandler(connection.Client,
+ jsonrpc2.MethodNotFound)))
+ return connection, connection.initialize(ctx, app.options)
+}
+
+var matcherString = map[source.SymbolMatcher]string{
+ source.SymbolFuzzy: "fuzzy",
+ source.SymbolCaseSensitive: "caseSensitive",
+ source.SymbolCaseInsensitive: "caseInsensitive",
+}
+
+func (c *connection) initialize(ctx context.Context, options func(*source.Options)) error {
+ params := &protocol.ParamInitialize{}
+ params.RootURI = protocol.URIFromPath(c.Client.app.wd)
+ params.Capabilities.Workspace.Configuration = true
+
+ // Make sure to respect configured options when sending initialize request.
+ opts := source.DefaultOptions().Clone()
+ if options != nil {
+ options(opts)
+ }
+ // If you add an additional option here, you must update the map key in connect.
+ params.Capabilities.TextDocument.Hover = &protocol.HoverClientCapabilities{
+ ContentFormat: []protocol.MarkupKind{opts.PreferredContentFormat},
+ }
+ params.Capabilities.TextDocument.DocumentSymbol.HierarchicalDocumentSymbolSupport = opts.HierarchicalDocumentSymbolSupport
+ params.Capabilities.TextDocument.SemanticTokens = protocol.SemanticTokensClientCapabilities{}
+ params.Capabilities.TextDocument.SemanticTokens.Formats = []protocol.TokenFormat{"relative"}
+ params.Capabilities.TextDocument.SemanticTokens.Requests.Range.Value = true
+ params.Capabilities.TextDocument.SemanticTokens.Requests.Full.Value = true
+ params.Capabilities.TextDocument.SemanticTokens.TokenTypes = lsp.SemanticTypes()
+ params.Capabilities.TextDocument.SemanticTokens.TokenModifiers = lsp.SemanticModifiers()
+ params.InitializationOptions = map[string]interface{}{
+ "symbolMatcher": matcherString[opts.SymbolMatcher],
+ }
+ if _, err := c.Server.Initialize(ctx, params); err != nil {
+ return err
+ }
+ if err := c.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil {
+ return err
+ }
+ return nil
+}
+
+type connection struct {
+ protocol.Server
+ Client *cmdClient
+}
+
+type cmdClient struct {
+ protocol.Server
+ app *Application
+
+ diagnosticsMu sync.Mutex
+ diagnosticsDone chan struct{}
+
+ filesMu sync.Mutex
+ files map[span.URI]*cmdFile
+}
+
+type cmdFile struct {
+ uri span.URI
+ mapper *protocol.Mapper
+ err error
+ open bool
+ diagnostics []protocol.Diagnostic
+}
+
+func newConnection(app *Application) *connection {
+ return &connection{
+ Client: &cmdClient{
+ app: app,
+ files: make(map[span.URI]*cmdFile),
+ },
+ }
+}
+
+// fileURI converts a DocumentURI to a file:// span.URI, panicking if it's not a file.
+func fileURI(uri protocol.DocumentURI) span.URI {
+ sURI := uri.SpanURI()
+ if !sURI.IsFile() {
+ panic(fmt.Sprintf("%q is not a file URI", uri))
+ }
+ return sURI
+}
+
+func (c *cmdClient) CodeLensRefresh(context.Context) error { return nil }
+
+func (c *cmdClient) LogTrace(context.Context, *protocol.LogTraceParams) error { return nil }
+
+func (c *cmdClient) ShowMessage(ctx context.Context, p *protocol.ShowMessageParams) error { return nil }
+
+func (c *cmdClient) ShowMessageRequest(ctx context.Context, p *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) {
+ return nil, nil
+}
+
+func (c *cmdClient) LogMessage(ctx context.Context, p *protocol.LogMessageParams) error {
+ switch p.Type {
+ case protocol.Error:
+ log.Print("Error:", p.Message)
+ case protocol.Warning:
+ log.Print("Warning:", p.Message)
+ case protocol.Info:
+ if c.app.verbose() {
+ log.Print("Info:", p.Message)
+ }
+ case protocol.Log:
+ if c.app.verbose() {
+ log.Print("Log:", p.Message)
+ }
+ default:
+ if c.app.verbose() {
+ log.Print(p.Message)
+ }
+ }
+ return nil
+}
+
+func (c *cmdClient) Event(ctx context.Context, t *interface{}) error { return nil }
+
+func (c *cmdClient) RegisterCapability(ctx context.Context, p *protocol.RegistrationParams) error {
+ return nil
+}
+
+func (c *cmdClient) UnregisterCapability(ctx context.Context, p *protocol.UnregistrationParams) error {
+ return nil
+}
+
+func (c *cmdClient) WorkspaceFolders(ctx context.Context) ([]protocol.WorkspaceFolder, error) {
+ return nil, nil
+}
+
+func (c *cmdClient) Configuration(ctx context.Context, p *protocol.ParamConfiguration) ([]interface{}, error) {
+ results := make([]interface{}, len(p.Items))
+ for i, item := range p.Items {
+ if item.Section != "gopls" {
+ continue
+ }
+ env := map[string]interface{}{}
+ for _, value := range c.app.env {
+ l := strings.SplitN(value, "=", 2)
+ if len(l) != 2 {
+ continue
+ }
+ env[l[0]] = l[1]
+ }
+ m := map[string]interface{}{
+ "env": env,
+ "analyses": map[string]bool{
+ "fillreturns": true,
+ "nonewvars": true,
+ "noresultvalues": true,
+ "undeclaredname": true,
+ },
+ }
+ if c.app.VeryVerbose {
+ m["verboseOutput"] = true
+ }
+ results[i] = m
+ }
+ return results, nil
+}
+
+func (c *cmdClient) ApplyEdit(ctx context.Context, p *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) {
+ return &protocol.ApplyWorkspaceEditResult{Applied: false, FailureReason: "not implemented"}, nil
+}
+
+func (c *cmdClient) PublishDiagnostics(ctx context.Context, p *protocol.PublishDiagnosticsParams) error {
+ if p.URI == "gopls://diagnostics-done" {
+ close(c.diagnosticsDone)
+ }
+ // Don't worry about diagnostics without versions.
+ if p.Version == 0 {
+ return nil
+ }
+
+ c.filesMu.Lock()
+ defer c.filesMu.Unlock()
+
+ file := c.getFile(ctx, fileURI(p.URI))
+ file.diagnostics = p.Diagnostics
+ return nil
+}
+
+func (c *cmdClient) Progress(context.Context, *protocol.ProgressParams) error {
+ return nil
+}
+
+func (c *cmdClient) ShowDocument(context.Context, *protocol.ShowDocumentParams) (*protocol.ShowDocumentResult, error) {
+ return nil, nil
+}
+
+func (c *cmdClient) WorkDoneProgressCreate(context.Context, *protocol.WorkDoneProgressCreateParams) error {
+ return nil
+}
+
+func (c *cmdClient) DiagnosticRefresh(context.Context) error {
+ return nil
+}
+
+func (c *cmdClient) InlayHintRefresh(context.Context) error {
+ return nil
+}
+
+func (c *cmdClient) SemanticTokensRefresh(context.Context) error {
+ return nil
+}
+
+func (c *cmdClient) InlineValueRefresh(context.Context) error {
+ return nil
+}
+
+func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile {
+ file, found := c.files[uri]
+ if !found || file.err != nil {
+ file = &cmdFile{
+ uri: uri,
+ }
+ c.files[uri] = file
+ }
+ if file.mapper == nil {
+ content, err := ioutil.ReadFile(uri.Filename())
+ if err != nil {
+ file.err = fmt.Errorf("getFile: %v: %v", uri, err)
+ return file
+ }
+ file.mapper = protocol.NewMapper(uri, content)
+ }
+ return file
+}
+
+func (c *cmdClient) openFile(ctx context.Context, uri span.URI) *cmdFile {
+ c.filesMu.Lock()
+ defer c.filesMu.Unlock()
+
+ file := c.getFile(ctx, uri)
+ if file.err != nil || file.open {
+ return file
+ }
+ file.open = true
+ return file
+}
+
+func (c *connection) openFile(ctx context.Context, uri span.URI) *cmdFile {
+ file := c.Client.openFile(ctx, uri)
+ if file.err != nil {
+ return file
+ }
+
+ p := &protocol.DidOpenTextDocumentParams{
+ TextDocument: protocol.TextDocumentItem{
+ URI: protocol.URIFromSpanURI(uri),
+ LanguageID: "go",
+ Version: 1,
+ Text: string(file.mapper.Content),
+ },
+ }
+ if err := c.Server.DidOpen(ctx, p); err != nil {
+ file.err = fmt.Errorf("%v: %v", uri, err)
+ }
+ return file
+}
+
+func (c *connection) semanticTokens(ctx context.Context, p *protocol.SemanticTokensRangeParams) (*protocol.SemanticTokens, error) {
+ // use range to avoid limits on full
+ resp, err := c.Server.SemanticTokensRange(ctx, p)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *connection) diagnoseFiles(ctx context.Context, files []span.URI) error {
+ var untypedFiles []interface{}
+ for _, file := range files {
+ untypedFiles = append(untypedFiles, string(file))
+ }
+ c.Client.diagnosticsMu.Lock()
+ defer c.Client.diagnosticsMu.Unlock()
+
+ c.Client.diagnosticsDone = make(chan struct{})
+ _, err := c.Server.NonstandardRequest(ctx, "gopls/diagnoseFiles", map[string]interface{}{"files": untypedFiles})
+ if err != nil {
+ close(c.Client.diagnosticsDone)
+ return err
+ }
+
+ <-c.Client.diagnosticsDone
+ return nil
+}
+
+func (c *connection) terminate(ctx context.Context) {
+ if strings.HasPrefix(c.Client.app.Remote, "internal@") {
+ // internal connections need to be left alive for the next test
+ return
+ }
+ //TODO: do we need to handle errors on these calls?
+ c.Shutdown(ctx)
+ //TODO: right now calling exit terminates the process, we should rethink that
+ //server.Exit(ctx)
+}
+
+// Implement io.Closer.
+func (c *cmdClient) Close() error {
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/definition.go b/gopls/internal/lsp/cmd/definition.go
new file mode 100644
index 000000000..952f43b51
--- /dev/null
+++ b/gopls/internal/lsp/cmd/definition.go
@@ -0,0 +1,132 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// A Definition is the result of a 'definition' query.
+type Definition struct {
+ Span span.Span `json:"span"` // span of the definition
+ Description string `json:"description"` // description of the denoted object
+}
+
+// These constant is printed in the help, and then used in a test to verify the
+// help is still valid.
+// They refer to "Set" in "flag.FlagSet" from the DetailedHelp method below.
+const (
+ exampleLine = 44
+ exampleColumn = 47
+ exampleOffset = 1270
+)
+
+// definition implements the definition verb for gopls.
+type definition struct {
+ app *Application
+
+ JSON bool `flag:"json" help:"emit output in JSON format"`
+ MarkdownSupported bool `flag:"markdown" help:"support markdown in responses"`
+}
+
+func (d *definition) Name() string { return "definition" }
+func (d *definition) Parent() string { return d.app.Name() }
+func (d *definition) Usage() string { return "[definition-flags] <position>" }
+func (d *definition) ShortHelp() string { return "show declaration of selected identifier" }
+func (d *definition) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprintf(f.Output(), `
+Example: show the definition of the identifier at syntax at offset %[1]v in this file (flag.FlagSet):
+
+ $ gopls definition internal/lsp/cmd/definition.go:%[1]v:%[2]v
+ $ gopls definition internal/lsp/cmd/definition.go:#%[3]v
+
+definition-flags:
+`, exampleLine, exampleColumn, exampleOffset)
+ printFlagDefaults(f)
+}
+
+// Run performs the definition query as specified by args and prints the
+// results to stdout.
+func (d *definition) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("definition expects 1 argument")
+ }
+ // Plaintext makes more sense for the command line.
+ opts := d.app.options
+ d.app.options = func(o *source.Options) {
+ if opts != nil {
+ opts(o)
+ }
+ o.PreferredContentFormat = protocol.PlainText
+ if d.MarkdownSupported {
+ o.PreferredContentFormat = protocol.Markdown
+ }
+ }
+ conn, err := d.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+ loc, err := file.mapper.SpanLocation(from)
+ if err != nil {
+ return err
+ }
+ p := protocol.DefinitionParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ locs, err := conn.Definition(ctx, &p)
+ if err != nil {
+ return fmt.Errorf("%v: %v", from, err)
+ }
+
+ if len(locs) == 0 {
+ return fmt.Errorf("%v: not an identifier", from)
+ }
+ q := protocol.HoverParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ hover, err := conn.Hover(ctx, &q)
+ if err != nil {
+ return fmt.Errorf("%v: %v", from, err)
+ }
+ if hover == nil {
+ return fmt.Errorf("%v: not an identifier", from)
+ }
+ file = conn.openFile(ctx, fileURI(locs[0].URI))
+ if file.err != nil {
+ return fmt.Errorf("%v: %v", from, file.err)
+ }
+ definition, err := file.mapper.LocationSpan(locs[0])
+ if err != nil {
+ return fmt.Errorf("%v: %v", from, err)
+ }
+ description := strings.TrimSpace(hover.Contents.Value)
+ result := &Definition{
+ Span: definition,
+ Description: description,
+ }
+ if d.JSON {
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", "\t")
+ return enc.Encode(result)
+ }
+ fmt.Printf("%v: defined here as %s", result.Span, result.Description)
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/folding_range.go b/gopls/internal/lsp/cmd/folding_range.go
new file mode 100644
index 000000000..68d93a3fb
--- /dev/null
+++ b/gopls/internal/lsp/cmd/folding_range.go
@@ -0,0 +1,73 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// foldingRanges implements the folding_ranges verb for gopls
+type foldingRanges struct {
+ app *Application
+}
+
+func (r *foldingRanges) Name() string { return "folding_ranges" }
+func (r *foldingRanges) Parent() string { return r.app.Name() }
+func (r *foldingRanges) Usage() string { return "<file>" }
+func (r *foldingRanges) ShortHelp() string { return "display selected file's folding ranges" }
+func (r *foldingRanges) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ gopls folding_ranges helper/helper.go
+`)
+ printFlagDefaults(f)
+}
+
+func (r *foldingRanges) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("folding_ranges expects 1 argument (file)")
+ }
+
+ conn, err := r.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+
+ p := protocol.FoldingRangeParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(from.URI()),
+ },
+ }
+
+ ranges, err := conn.FoldingRange(ctx, &p)
+ if err != nil {
+ return err
+ }
+
+ for _, r := range ranges {
+ fmt.Printf("%v:%v-%v:%v\n",
+ r.StartLine+1,
+ r.StartCharacter+1,
+ r.EndLine+1,
+ r.EndCharacter+1,
+ )
+ }
+
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/format.go b/gopls/internal/lsp/cmd/format.go
new file mode 100644
index 000000000..1602dec67
--- /dev/null
+++ b/gopls/internal/lsp/cmd/format.go
@@ -0,0 +1,110 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
+)
+
+// format implements the format verb for gopls.
+type format struct {
+ Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"`
+ Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"`
+ List bool `flag:"l,list" help:"list files whose formatting differs from gofmt's"`
+
+ app *Application
+}
+
+func (c *format) Name() string { return "format" }
+func (c *format) Parent() string { return c.app.Name() }
+func (c *format) Usage() string { return "[format-flags] <filerange>" }
+func (c *format) ShortHelp() string { return "format the code according to the go standard" }
+func (c *format) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+The arguments supplied may be simple file names, or ranges within files.
+
+Example: reformat this file:
+
+ $ gopls format -w internal/lsp/cmd/check.go
+
+format-flags:
+`)
+ printFlagDefaults(f)
+}
+
+// Run performs the check on the files specified by args and prints the
+// results to stdout.
+func (c *format) Run(ctx context.Context, args ...string) error {
+ if len(args) == 0 {
+ // no files, so no results
+ return nil
+ }
+ // now we ready to kick things off
+ conn, err := c.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+ for _, arg := range args {
+ spn := span.Parse(arg)
+ file := conn.openFile(ctx, spn.URI())
+ if file.err != nil {
+ return file.err
+ }
+ filename := spn.URI().Filename()
+ loc, err := file.mapper.SpanLocation(spn)
+ if err != nil {
+ return err
+ }
+ if loc.Range.Start != loc.Range.End {
+ return fmt.Errorf("only full file formatting supported")
+ }
+ p := protocol.DocumentFormattingParams{
+ TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
+ }
+ edits, err := conn.Formatting(ctx, &p)
+ if err != nil {
+ return fmt.Errorf("%v: %v", spn, err)
+ }
+ formatted, sedits, err := source.ApplyProtocolEdits(file.mapper, edits)
+ if err != nil {
+ return fmt.Errorf("%v: %v", spn, err)
+ }
+ printIt := true
+ if c.List {
+ printIt = false
+ if len(edits) > 0 {
+ fmt.Println(filename)
+ }
+ }
+ if c.Write {
+ printIt = false
+ if len(edits) > 0 {
+ ioutil.WriteFile(filename, formatted, 0644)
+ }
+ }
+ if c.Diff {
+ printIt = false
+ unified, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
+ if err != nil {
+ return err
+ }
+ fmt.Print(unified)
+ }
+ if printIt {
+ os.Stdout.Write(formatted)
+ }
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/help_test.go b/gopls/internal/lsp/cmd/help_test.go
new file mode 100644
index 000000000..6bd3c8c50
--- /dev/null
+++ b/gopls/internal/lsp/cmd/help_test.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd_test
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp/cmd"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/internal/tool"
+)
+
+//go:generate go test -run Help -update-help-files
+
+var updateHelpFiles = flag.Bool("update-help-files", false, "Write out the help files instead of checking them")
+
+const appName = "gopls"
+
+func TestHelpFiles(t *testing.T) {
+ testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code.
+ app := cmd.New(appName, "", nil, nil)
+ ctx := context.Background()
+ for _, page := range append(app.Commands(), app) {
+ t.Run(page.Name(), func(t *testing.T) {
+ var buf bytes.Buffer
+ s := flag.NewFlagSet(page.Name(), flag.ContinueOnError)
+ s.SetOutput(&buf)
+ tool.Run(ctx, s, page, []string{"-h"})
+ name := page.Name()
+ if name == appName {
+ name = "usage"
+ }
+ helpFile := filepath.Join("usage", name+".hlp")
+ got := buf.Bytes()
+ if *updateHelpFiles {
+ if err := ioutil.WriteFile(helpFile, got, 0666); err != nil {
+ t.Errorf("Failed writing %v: %v", helpFile, err)
+ }
+ return
+ }
+ want, err := ioutil.ReadFile(helpFile)
+ if err != nil {
+ t.Fatalf("Missing help file %q", helpFile)
+ }
+ if diff := cmp.Diff(string(want), string(got)); diff != "" {
+ t.Errorf("Help file %q did not match, run with -update-help-files to fix (-want +got)\n%s", helpFile, diff)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/lsp/cmd/highlight.go b/gopls/internal/lsp/cmd/highlight.go
new file mode 100644
index 000000000..60c04b2d4
--- /dev/null
+++ b/gopls/internal/lsp/cmd/highlight.go
@@ -0,0 +1,82 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// highlight implements the highlight verb for gopls.
+type highlight struct {
+ app *Application
+}
+
+func (r *highlight) Name() string { return "highlight" }
+func (r *highlight) Parent() string { return r.app.Name() }
+func (r *highlight) Usage() string { return "<position>" }
+func (r *highlight) ShortHelp() string { return "display selected identifier's highlights" }
+func (r *highlight) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ # 1-indexed location (:line:column or :#offset) of the target identifier
+ $ gopls highlight helper/helper.go:8:6
+ $ gopls highlight helper/helper.go:#53
+`)
+ printFlagDefaults(f)
+}
+
+func (r *highlight) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("highlight expects 1 argument (position)")
+ }
+
+ conn, err := r.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+
+ loc, err := file.mapper.SpanLocation(from)
+ if err != nil {
+ return err
+ }
+
+ p := protocol.DocumentHighlightParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ highlights, err := conn.DocumentHighlight(ctx, &p)
+ if err != nil {
+ return err
+ }
+
+ var results []span.Span
+ for _, h := range highlights {
+ s, err := file.mapper.RangeSpan(h.Range)
+ if err != nil {
+ return err
+ }
+ results = append(results, s)
+ }
+ // Sort results to make tests deterministic since DocumentHighlight uses a map.
+ span.SortSpans(results)
+
+ for _, s := range results {
+ fmt.Println(s)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/implementation.go b/gopls/internal/lsp/cmd/implementation.go
new file mode 100644
index 000000000..bb5b1c24e
--- /dev/null
+++ b/gopls/internal/lsp/cmd/implementation.go
@@ -0,0 +1,84 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "sort"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// implementation implements the implementation verb for gopls
+type implementation struct {
+ app *Application
+}
+
+func (i *implementation) Name() string { return "implementation" }
+func (i *implementation) Parent() string { return i.app.Name() }
+func (i *implementation) Usage() string { return "<position>" }
+func (i *implementation) ShortHelp() string { return "display selected identifier's implementation" }
+func (i *implementation) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ # 1-indexed location (:line:column or :#offset) of the target identifier
+ $ gopls implementation helper/helper.go:8:6
+ $ gopls implementation helper/helper.go:#53
+`)
+ printFlagDefaults(f)
+}
+
+func (i *implementation) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("implementation expects 1 argument (position)")
+ }
+
+ conn, err := i.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+
+ loc, err := file.mapper.SpanLocation(from)
+ if err != nil {
+ return err
+ }
+
+ p := protocol.ImplementationParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ implementations, err := conn.Implementation(ctx, &p)
+ if err != nil {
+ return err
+ }
+
+ var spans []string
+ for _, impl := range implementations {
+ f := conn.openFile(ctx, fileURI(impl.URI))
+ span, err := f.mapper.LocationSpan(impl)
+ if err != nil {
+ return err
+ }
+ spans = append(spans, fmt.Sprint(span))
+ }
+ sort.Strings(spans)
+
+ for _, s := range spans {
+ fmt.Println(s)
+ }
+
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/imports.go b/gopls/internal/lsp/cmd/imports.go
new file mode 100644
index 000000000..99141da59
--- /dev/null
+++ b/gopls/internal/lsp/cmd/imports.go
@@ -0,0 +1,105 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/tool"
+)
+
+// imports implements the import verb for gopls.
+type imports struct {
+ Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"`
+ Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"`
+
+ app *Application
+}
+
+func (t *imports) Name() string { return "imports" }
+func (t *imports) Parent() string { return t.app.Name() }
+func (t *imports) Usage() string { return "[imports-flags] <filename>" }
+func (t *imports) ShortHelp() string { return "updates import statements" }
+func (t *imports) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprintf(f.Output(), `
+Example: update imports statements in a file:
+
+ $ gopls imports -w internal/lsp/cmd/check.go
+
+imports-flags:
+`)
+ printFlagDefaults(f)
+}
+
+// Run performs diagnostic checks on the file specified and either;
+// - if -w is specified, updates the file in place;
+// - if -d is specified, prints out unified diffs of the changes; or
+// - otherwise, prints the new versions to stdout.
+func (t *imports) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("imports expects 1 argument")
+ }
+ conn, err := t.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ uri := from.URI()
+ file := conn.openFile(ctx, uri)
+ if file.err != nil {
+ return file.err
+ }
+ actions, err := conn.CodeAction(ctx, &protocol.CodeActionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ })
+ if err != nil {
+ return fmt.Errorf("%v: %v", from, err)
+ }
+ var edits []protocol.TextEdit
+ for _, a := range actions {
+ if a.Title != "Organize Imports" {
+ continue
+ }
+ for _, c := range a.Edit.DocumentChanges {
+ if c.TextDocumentEdit != nil {
+ if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri {
+ edits = append(edits, c.TextDocumentEdit.Edits...)
+ }
+ }
+ }
+ }
+ newContent, sedits, err := source.ApplyProtocolEdits(file.mapper, edits)
+ if err != nil {
+ return fmt.Errorf("%v: %v", edits, err)
+ }
+ filename := file.uri.Filename()
+ switch {
+ case t.Write:
+ if len(edits) > 0 {
+ ioutil.WriteFile(filename, newContent, 0644)
+ }
+ case t.Diff:
+ unified, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
+ if err != nil {
+ return err
+ }
+ fmt.Print(unified)
+ default:
+ os.Stdout.Write(newContent)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/info.go b/gopls/internal/lsp/cmd/info.go
new file mode 100644
index 000000000..68ef40ffb
--- /dev/null
+++ b/gopls/internal/lsp/cmd/info.go
@@ -0,0 +1,246 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "net/url"
+ "os"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/browser"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/tool"
+)
+
+// help implements the help command.
+type help struct {
+ app *Application
+}
+
+func (h *help) Name() string { return "help" }
+func (h *help) Parent() string { return h.app.Name() }
+func (h *help) Usage() string { return "" }
+func (h *help) ShortHelp() string { return "print usage information for subcommands" }
+func (h *help) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+
+Examples:
+$ gopls help # main gopls help message
+$ gopls help remote # help on 'remote' command
+$ gopls help remote sessions # help on 'remote sessions' subcommand
+`)
+ printFlagDefaults(f)
+}
+
+// Run prints help information about a subcommand.
+func (h *help) Run(ctx context.Context, args ...string) error {
+ find := func(cmds []tool.Application, name string) tool.Application {
+ for _, cmd := range cmds {
+ if cmd.Name() == name {
+ return cmd
+ }
+ }
+ return nil
+ }
+
+ // Find the subcommand denoted by args (empty => h.app).
+ var cmd tool.Application = h.app
+ for i, arg := range args {
+ cmd = find(getSubcommands(cmd), arg)
+ if cmd == nil {
+ return tool.CommandLineErrorf(
+ "no such subcommand: %s", strings.Join(args[:i+1], " "))
+ }
+ }
+
+ // 'gopls help cmd subcmd' is equivalent to 'gopls cmd subcmd -h'.
+ // The flag package prints the usage information (defined by tool.Run)
+ // when it sees the -h flag.
+ fs := flag.NewFlagSet(cmd.Name(), flag.ExitOnError)
+ return tool.Run(ctx, fs, h.app, append(args[:len(args):len(args)], "-h"))
+}
+
+// version implements the version command.
+type version struct {
+ JSON bool `flag:"json" help:"outputs in json format."`
+
+ app *Application
+}
+
+func (v *version) Name() string { return "version" }
+func (v *version) Parent() string { return v.app.Name() }
+func (v *version) Usage() string { return "" }
+func (v *version) ShortHelp() string { return "print the gopls version information" }
+func (v *version) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), ``)
+ printFlagDefaults(f)
+}
+
+// Run prints version information to stdout.
+func (v *version) Run(ctx context.Context, args ...string) error {
+ var mode = debug.PlainText
+ if v.JSON {
+ mode = debug.JSON
+ }
+
+ return debug.PrintVersionInfo(ctx, os.Stdout, v.app.verbose(), mode)
+}
+
+// bug implements the bug command.
+type bug struct {
+ app *Application
+}
+
+func (b *bug) Name() string { return "bug" }
+func (b *bug) Parent() string { return b.app.Name() }
+func (b *bug) Usage() string { return "" }
+func (b *bug) ShortHelp() string { return "report a bug in gopls" }
+func (b *bug) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), ``)
+ printFlagDefaults(f)
+}
+
+const goplsBugPrefix = "x/tools/gopls: <DESCRIBE THE PROBLEM>"
+const goplsBugHeader = `ATTENTION: Please answer these questions BEFORE submitting your issue. Thanks!
+
+#### What did you do?
+If possible, provide a recipe for reproducing the error.
+A complete runnable program is good.
+A link on play.golang.org is better.
+A failing unit test is the best.
+
+#### What did you expect to see?
+
+
+#### What did you see instead?
+
+
+`
+
+// Run collects some basic information and then prepares an issue ready to
+// be reported.
+func (b *bug) Run(ctx context.Context, args ...string) error {
+ buf := &bytes.Buffer{}
+ fmt.Fprint(buf, goplsBugHeader)
+ debug.PrintVersionInfo(ctx, buf, true, debug.Markdown)
+ body := buf.String()
+ title := strings.Join(args, " ")
+ if !strings.HasPrefix(title, goplsBugPrefix) {
+ title = goplsBugPrefix + title
+ }
+ if !browser.Open("https://github.com/golang/go/issues/new?title=" + url.QueryEscape(title) + "&body=" + url.QueryEscape(body)) {
+ fmt.Print("Please file a new issue at golang.org/issue/new using this template:\n\n")
+ fmt.Print(body)
+ }
+ return nil
+}
+
+type apiJSON struct {
+ app *Application
+}
+
+func (j *apiJSON) Name() string { return "api-json" }
+func (j *apiJSON) Parent() string { return j.app.Name() }
+func (j *apiJSON) Usage() string { return "" }
+func (j *apiJSON) ShortHelp() string { return "print json describing gopls API" }
+func (j *apiJSON) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), ``)
+ printFlagDefaults(f)
+}
+
+func (j *apiJSON) Run(ctx context.Context, args ...string) error {
+ js, err := json.MarshalIndent(source.GeneratedAPIJSON, "", "\t")
+ if err != nil {
+ return err
+ }
+ fmt.Fprint(os.Stdout, string(js))
+ return nil
+}
+
+type licenses struct {
+ app *Application
+}
+
+func (l *licenses) Name() string { return "licenses" }
+func (l *licenses) Parent() string { return l.app.Name() }
+func (l *licenses) Usage() string { return "" }
+func (l *licenses) ShortHelp() string { return "print licenses of included software" }
+func (l *licenses) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), ``)
+ printFlagDefaults(f)
+}
+
+const licensePreamble = `
+gopls is made available under the following BSD-style license:
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+gopls implements the LSP specification, which is made available under the following license:
+
+Copyright (c) Microsoft Corporation
+
+All rights reserved.
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
+modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
+OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+gopls also includes software made available under these licenses:
+`
+
+func (l *licenses) Run(ctx context.Context, args ...string) error {
+ opts := source.DefaultOptions()
+ l.app.options(opts)
+ txt := licensePreamble
+ if opts.LicensesText == "" {
+ txt += "(development gopls, license information not available)"
+ } else {
+ txt += opts.LicensesText
+ }
+ fmt.Fprint(os.Stdout, txt)
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/links.go b/gopls/internal/lsp/cmd/links.go
new file mode 100644
index 000000000..b5413bba5
--- /dev/null
+++ b/gopls/internal/lsp/cmd/links.go
@@ -0,0 +1,77 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// links implements the links verb for gopls.
+type links struct {
+ JSON bool `flag:"json" help:"emit document links in JSON format"`
+
+ app *Application
+}
+
+func (l *links) Name() string { return "links" }
+func (l *links) Parent() string { return l.app.Name() }
+func (l *links) Usage() string { return "[links-flags] <filename>" }
+func (l *links) ShortHelp() string { return "list links in a file" }
+func (l *links) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprintf(f.Output(), `
+Example: list links contained within a file:
+
+ $ gopls links internal/lsp/cmd/check.go
+
+links-flags:
+`)
+ printFlagDefaults(f)
+}
+
+// Run finds all the links within a document
+// - if -json is specified, outputs location range and uri
+// - otherwise, prints the a list of unique links
+func (l *links) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("links expects 1 argument")
+ }
+ conn, err := l.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ uri := from.URI()
+ file := conn.openFile(ctx, uri)
+ if file.err != nil {
+ return file.err
+ }
+ results, err := conn.DocumentLink(ctx, &protocol.DocumentLinkParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ })
+ if err != nil {
+ return fmt.Errorf("%v: %v", from, err)
+ }
+ if l.JSON {
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", "\t")
+ return enc.Encode(results)
+ }
+ for _, v := range results {
+ fmt.Println(v.Target)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/prepare_rename.go b/gopls/internal/lsp/cmd/prepare_rename.go
new file mode 100644
index 000000000..5e9d732fb
--- /dev/null
+++ b/gopls/internal/lsp/cmd/prepare_rename.go
@@ -0,0 +1,80 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// prepareRename implements the prepare_rename verb for gopls.
+type prepareRename struct {
+ app *Application
+}
+
+func (r *prepareRename) Name() string { return "prepare_rename" }
+func (r *prepareRename) Parent() string { return r.app.Name() }
+func (r *prepareRename) Usage() string { return "<position>" }
+func (r *prepareRename) ShortHelp() string { return "test validity of a rename operation at location" }
+func (r *prepareRename) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ # 1-indexed location (:line:column or :#offset) of the target identifier
+ $ gopls prepare_rename helper/helper.go:8:6
+ $ gopls prepare_rename helper/helper.go:#53
+`)
+ printFlagDefaults(f)
+}
+
+// ErrInvalidRenamePosition is returned when prepareRename is run at a position that
+// is not a candidate for renaming.
+var ErrInvalidRenamePosition = errors.New("request is not valid at the given position")
+
+func (r *prepareRename) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("prepare_rename expects 1 argument (file)")
+ }
+
+ conn, err := r.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+ loc, err := file.mapper.SpanLocation(from)
+ if err != nil {
+ return err
+ }
+ p := protocol.PrepareRenameParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ result, err := conn.PrepareRename(ctx, &p)
+ if err != nil {
+ return fmt.Errorf("prepare_rename failed: %w", err)
+ }
+ if result == nil {
+ return ErrInvalidRenamePosition
+ }
+
+ s, err := file.mapper.RangeSpan(result.Range)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(s)
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/references.go b/gopls/internal/lsp/cmd/references.go
new file mode 100644
index 000000000..6db5ce34e
--- /dev/null
+++ b/gopls/internal/lsp/cmd/references.go
@@ -0,0 +1,89 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "sort"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// references implements the references verb for gopls
+type references struct {
+ IncludeDeclaration bool `flag:"d,declaration" help:"include the declaration of the specified identifier in the results"`
+
+ app *Application
+}
+
+func (r *references) Name() string { return "references" }
+func (r *references) Parent() string { return r.app.Name() }
+func (r *references) Usage() string { return "[references-flags] <position>" }
+func (r *references) ShortHelp() string { return "display selected identifier's references" }
+func (r *references) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ # 1-indexed location (:line:column or :#offset) of the target identifier
+ $ gopls references helper/helper.go:8:6
+ $ gopls references helper/helper.go:#53
+
+references-flags:
+`)
+ printFlagDefaults(f)
+}
+
+func (r *references) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("references expects 1 argument (position)")
+ }
+
+ conn, err := r.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+ loc, err := file.mapper.SpanLocation(from)
+ if err != nil {
+ return err
+ }
+ p := protocol.ReferenceParams{
+ Context: protocol.ReferenceContext{
+ IncludeDeclaration: r.IncludeDeclaration,
+ },
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ locations, err := conn.References(ctx, &p)
+ if err != nil {
+ return err
+ }
+ var spans []string
+ for _, l := range locations {
+ f := conn.openFile(ctx, fileURI(l.URI))
+ // convert location to span for user-friendly 1-indexed line
+ // and column numbers
+ span, err := f.mapper.LocationSpan(l)
+ if err != nil {
+ return err
+ }
+ spans = append(spans, fmt.Sprint(span))
+ }
+
+ sort.Strings(spans)
+ for _, s := range spans {
+ fmt.Println(s)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/remote.go b/gopls/internal/lsp/cmd/remote.go
new file mode 100644
index 000000000..684981cff
--- /dev/null
+++ b/gopls/internal/lsp/cmd/remote.go
@@ -0,0 +1,164 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+)
+
+type remote struct {
+ app *Application
+ subcommands
+
+ // For backward compatibility, allow aliasing this command (it was previously
+ // called 'inspect').
+ //
+ // TODO(rFindley): delete this after allowing some transition time in case
+ // there were any users of 'inspect' (I suspect not).
+ alias string
+}
+
+func newRemote(app *Application, alias string) *remote {
+ return &remote{
+ app: app,
+ subcommands: subcommands{
+ &listSessions{app: app},
+ &startDebugging{app: app},
+ },
+ alias: alias,
+ }
+}
+
+func (r *remote) Name() string {
+ if r.alias != "" {
+ return r.alias
+ }
+ return "remote"
+}
+
+func (r *remote) Parent() string { return r.app.Name() }
+
+func (r *remote) ShortHelp() string {
+ short := "interact with the gopls daemon"
+ if r.alias != "" {
+ short += " (deprecated: use 'remote')"
+ }
+ return short
+}
+
+// listSessions is an inspect subcommand to list current sessions.
+type listSessions struct {
+ app *Application
+}
+
+func (c *listSessions) Name() string { return "sessions" }
+func (c *listSessions) Parent() string { return c.app.Name() }
+func (c *listSessions) Usage() string { return "" }
+func (c *listSessions) ShortHelp() string {
+ return "print information about current gopls sessions"
+}
+
+const listSessionsExamples = `
+Examples:
+
+1) list sessions for the default daemon:
+
+$ gopls -remote=auto remote sessions
+or just
+$ gopls remote sessions
+
+2) list sessions for a specific daemon:
+
+$ gopls -remote=localhost:8082 remote sessions
+`
+
+func (c *listSessions) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), listSessionsExamples)
+ printFlagDefaults(f)
+}
+
+func (c *listSessions) Run(ctx context.Context, args ...string) error {
+ remote := c.app.Remote
+ if remote == "" {
+ remote = "auto"
+ }
+ state, err := lsprpc.QueryServerState(ctx, remote)
+ if err != nil {
+ return err
+ }
+ v, err := json.MarshalIndent(state, "", "\t")
+ if err != nil {
+ log.Fatal(err)
+ }
+ os.Stdout.Write(v)
+ return nil
+}
+
+type startDebugging struct {
+ app *Application
+}
+
+func (c *startDebugging) Name() string { return "debug" }
+func (c *startDebugging) Usage() string { return "[host:port]" }
+func (c *startDebugging) ShortHelp() string {
+ return "start the debug server"
+}
+
+const startDebuggingExamples = `
+Examples:
+
+1) start a debug server for the default daemon, on an arbitrary port:
+
+$ gopls -remote=auto remote debug
+or just
+$ gopls remote debug
+
+2) start for a specific daemon, on a specific port:
+
+$ gopls -remote=localhost:8082 remote debug localhost:8083
+`
+
+func (c *startDebugging) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), startDebuggingExamples)
+ printFlagDefaults(f)
+}
+
+func (c *startDebugging) Run(ctx context.Context, args ...string) error {
+ if len(args) > 1 {
+ fmt.Fprintln(os.Stderr, c.Usage())
+ return errors.New("invalid usage")
+ }
+ remote := c.app.Remote
+ if remote == "" {
+ remote = "auto"
+ }
+ debugAddr := ""
+ if len(args) > 0 {
+ debugAddr = args[0]
+ }
+ debugArgs := command.DebuggingArgs{
+ Addr: debugAddr,
+ }
+ var result command.DebuggingResult
+ if err := lsprpc.ExecuteCommand(ctx, remote, command.StartDebugging.ID(), debugArgs, &result); err != nil {
+ return err
+ }
+ if len(result.URLs) == 0 {
+ return errors.New("no debugging URLs")
+ }
+ for _, url := range result.URLs {
+ fmt.Printf("debugging on %s\n", url)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/rename.go b/gopls/internal/lsp/cmd/rename.go
new file mode 100644
index 000000000..14c789cbb
--- /dev/null
+++ b/gopls/internal/lsp/cmd/rename.go
@@ -0,0 +1,130 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/tool"
+)
+
+// rename implements the rename verb for gopls.
+type rename struct {
+ Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"`
+ Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"`
+ Preserve bool `flag:"preserve" help:"preserve original files"`
+
+ app *Application
+}
+
+func (r *rename) Name() string { return "rename" }
+func (r *rename) Parent() string { return r.app.Name() }
+func (r *rename) Usage() string { return "[rename-flags] <position> <name>" }
+func (r *rename) ShortHelp() string { return "rename selected identifier" }
+func (r *rename) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ # 1-based location (:line:column or :#position) of the thing to change
+ $ gopls rename helper/helper.go:8:6 Foo
+ $ gopls rename helper/helper.go:#53 Foo
+
+rename-flags:
+`)
+ printFlagDefaults(f)
+}
+
+// Run renames the specified identifier and either;
+// - if -w is specified, updates the file(s) in place;
+// - if -d is specified, prints out unified diffs of the changes; or
+// - otherwise, prints the new versions to stdout.
+func (r *rename) Run(ctx context.Context, args ...string) error {
+ if len(args) != 2 {
+ return tool.CommandLineErrorf("definition expects 2 arguments (position, new name)")
+ }
+ conn, err := r.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+ loc, err := file.mapper.SpanLocation(from)
+ if err != nil {
+ return err
+ }
+ p := protocol.RenameParams{
+ TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
+ Position: loc.Range.Start,
+ NewName: args[1],
+ }
+ edit, err := conn.Rename(ctx, &p)
+ if err != nil {
+ return err
+ }
+ var orderedURIs []string
+ edits := map[span.URI][]protocol.TextEdit{}
+ for _, c := range edit.DocumentChanges {
+ if c.TextDocumentEdit != nil {
+ uri := fileURI(c.TextDocumentEdit.TextDocument.URI)
+ edits[uri] = append(edits[uri], c.TextDocumentEdit.Edits...)
+ orderedURIs = append(orderedURIs, string(uri))
+ }
+ }
+ sort.Strings(orderedURIs)
+ changeCount := len(orderedURIs)
+
+ for _, u := range orderedURIs {
+ uri := span.URIFromURI(u)
+ cmdFile := conn.openFile(ctx, uri)
+ filename := cmdFile.uri.Filename()
+
+ newContent, renameEdits, err := source.ApplyProtocolEdits(cmdFile.mapper, edits[uri])
+ if err != nil {
+ return fmt.Errorf("%v: %v", edits, err)
+ }
+
+ switch {
+ case r.Write:
+ fmt.Fprintln(os.Stderr, filename)
+ if r.Preserve {
+ if err := os.Rename(filename, filename+".orig"); err != nil {
+ return fmt.Errorf("%v: %v", edits, err)
+ }
+ }
+ ioutil.WriteFile(filename, newContent, 0644)
+ case r.Diff:
+ unified, err := diff.ToUnified(filename+".orig", filename, string(cmdFile.mapper.Content), renameEdits)
+ if err != nil {
+ return err
+ }
+ fmt.Print(unified)
+ default:
+ if len(orderedURIs) > 1 {
+ fmt.Printf("%s:\n", filepath.Base(filename))
+ }
+ os.Stdout.Write(newContent)
+ if changeCount > 1 { // if this wasn't last change, print newline
+ fmt.Println()
+ }
+ changeCount -= 1
+ }
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/semantictokens.go b/gopls/internal/lsp/cmd/semantictokens.go
new file mode 100644
index 000000000..6747e4687
--- /dev/null
+++ b/gopls/internal/lsp/cmd/semantictokens.go
@@ -0,0 +1,225 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "unicode/utf8"
+
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// generate semantic tokens and interpolate them in the file
+
+// The output is the input file decorated with comments showing the
+// syntactic tokens. The comments are stylized:
+// /*<arrow><length>,<token type>,[<modifiers]*/
+// For most occurrences, the comment comes just before the token it
+// describes, and arrow is a right arrow. If the token is inside a string
+// the comment comes just after the string, and the arrow is a left arrow.
+// <length> is the length of the token in runes, <token type> is one
+// of the supported semantic token types, and <modifiers. is a
+// (possibly empty) list of token type modifiers.
+
+// There are 3 coordinate systems for lines and character offsets in lines
+// LSP (what's returned from semanticTokens()):
+// 0-based: the first line is line 0, the first character of a line
+// is character 0, and characters are counted as UTF-16 code points
+// gopls (and Go error messages):
+// 1-based: the first line is line1, the first character of a line
+// is character 0, and characters are counted as bytes
+// internal (as used in marks, and lines:=bytes.Split(buf, '\n'))
+// 0-based: lines and character positions are 1 less than in
+// the gopls coordinate system
+
+type semtok struct {
+ app *Application
+}
+
+var colmap *protocol.Mapper
+
+func (c *semtok) Name() string { return "semtok" }
+func (c *semtok) Parent() string { return c.app.Name() }
+func (c *semtok) Usage() string { return "<filename>" }
+func (c *semtok) ShortHelp() string { return "show semantic tokens for the specified file" }
+func (c *semtok) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example: show the semantic tokens for this file:
+
+ $ gopls semtok internal/lsp/cmd/semtok.go
+`)
+ printFlagDefaults(f)
+}
+
+// Run performs the semtok on the files specified by args and prints the
+// results to stdout in the format described above.
+func (c *semtok) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return fmt.Errorf("expected one file name, got %d", len(args))
+ }
+ // perhaps simpler if app had just had a FlagSet member
+ origOptions := c.app.options
+ c.app.options = func(opts *source.Options) {
+ origOptions(opts)
+ opts.SemanticTokens = true
+ }
+ conn, err := c.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+ uri := span.URIFromPath(args[0])
+ file := conn.openFile(ctx, uri)
+ if file.err != nil {
+ return file.err
+ }
+
+ buf, err := ioutil.ReadFile(args[0])
+ if err != nil {
+ return err
+ }
+ lines := bytes.Split(buf, []byte{'\n'})
+ p := &protocol.SemanticTokensRangeParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ Range: protocol.Range{Start: protocol.Position{Line: 0, Character: 0},
+ End: protocol.Position{
+ Line: uint32(len(lines) - 1),
+ Character: uint32(len(lines[len(lines)-1]))},
+ },
+ }
+ resp, err := conn.semanticTokens(ctx, p)
+ if err != nil {
+ return err
+ }
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, args[0], buf, 0)
+ if err != nil {
+ log.Printf("parsing %s failed %v", args[0], err)
+ return err
+ }
+ tok := fset.File(f.Pos())
+ if tok == nil {
+ // can't happen; just parsed this file
+ return fmt.Errorf("can't find %s in fset", args[0])
+ }
+ colmap = protocol.NewMapper(uri, buf)
+ err = decorate(file.uri.Filename(), resp.Data)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+type mark struct {
+ line, offset int // 1-based, from RangeSpan
+ len int // bytes, not runes
+ typ string
+ mods []string
+}
+
+// prefixes for semantic token comments
+const (
+ SemanticLeft = "/*⇐"
+ SemanticRight = "/*⇒"
+)
+
+func markLine(m mark, lines [][]byte) {
+ l := lines[m.line-1] // mx is 1-based
+ length := utf8.RuneCount(l[m.offset-1 : m.offset-1+m.len])
+ splitAt := m.offset - 1
+ insert := ""
+ if m.typ == "namespace" && m.offset-1+m.len < len(l) && l[m.offset-1+m.len] == '"' {
+ // it is the last component of an import spec
+ // cannot put a comment inside a string
+ insert = fmt.Sprintf("%s%d,namespace,[]*/", SemanticLeft, length)
+ splitAt = m.offset + m.len
+ } else {
+ // be careful not to generate //*
+ spacer := ""
+ if splitAt-1 >= 0 && l[splitAt-1] == '/' {
+ spacer = " "
+ }
+ insert = fmt.Sprintf("%s%s%d,%s,%v*/", spacer, SemanticRight, length, m.typ, m.mods)
+ }
+ x := append([]byte(insert), l[splitAt:]...)
+ l = append(l[:splitAt], x...)
+ lines[m.line-1] = l
+}
+
+func decorate(file string, result []uint32) error {
+ buf, err := ioutil.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ marks := newMarks(result)
+ if len(marks) == 0 {
+ return nil
+ }
+ lines := bytes.Split(buf, []byte{'\n'})
+ for i := len(marks) - 1; i >= 0; i-- {
+ mx := marks[i]
+ markLine(mx, lines)
+ }
+ os.Stdout.Write(bytes.Join(lines, []byte{'\n'}))
+ return nil
+}
+
+func newMarks(d []uint32) []mark {
+ ans := []mark{}
+ // the following two loops could be merged, at the cost
+ // of making the logic slightly more complicated to understand
+ // first, convert from deltas to absolute, in LSP coordinates
+ lspLine := make([]uint32, len(d)/5)
+ lspChar := make([]uint32, len(d)/5)
+ var line, char uint32
+ for i := 0; 5*i < len(d); i++ {
+ lspLine[i] = line + d[5*i+0]
+ if d[5*i+0] > 0 {
+ char = 0
+ }
+ lspChar[i] = char + d[5*i+1]
+ char = lspChar[i]
+ line = lspLine[i]
+ }
+ // second, convert to gopls coordinates
+ for i := 0; 5*i < len(d); i++ {
+ pr := protocol.Range{
+ Start: protocol.Position{
+ Line: lspLine[i],
+ Character: lspChar[i],
+ },
+ End: protocol.Position{
+ Line: lspLine[i],
+ Character: lspChar[i] + d[5*i+2],
+ },
+ }
+ spn, err := colmap.RangeSpan(pr)
+ if err != nil {
+ log.Fatal(err)
+ }
+ m := mark{
+ line: spn.Start().Line(),
+ offset: spn.Start().Column(),
+ len: spn.End().Column() - spn.Start().Column(),
+ typ: lsp.SemType(int(d[5*i+3])),
+ mods: lsp.SemMods(int(d[5*i+4])),
+ }
+ ans = append(ans, m)
+ }
+ return ans
+}
diff --git a/gopls/internal/lsp/cmd/serve.go b/gopls/internal/lsp/cmd/serve.go
new file mode 100644
index 000000000..df42e7983
--- /dev/null
+++ b/gopls/internal/lsp/cmd/serve.go
@@ -0,0 +1,130 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/fakenet"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/tool"
+)
+
+// Serve is a struct that exposes the configurable parts of the LSP server as
+// flags, in the right form for tool.Main to consume.
+type Serve struct {
+ Logfile string `flag:"logfile" help:"filename to log to. if value is \"auto\", then logging to a default output file is enabled"`
+ Mode string `flag:"mode" help:"no effect"`
+ Port int `flag:"port" help:"port on which to run gopls for debugging purposes"`
+ Address string `flag:"listen" help:"address on which to listen for remote connections. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. Otherwise, TCP is used."`
+ IdleTimeout time.Duration `flag:"listen.timeout" help:"when used with -listen, shut down the server when there are no connected clients for this duration"`
+ Trace bool `flag:"rpc.trace" help:"print the full rpc trace in lsp inspector format"`
+ Debug string `flag:"debug" help:"serve debug information on the supplied address"`
+
+ RemoteListenTimeout time.Duration `flag:"remote.listen.timeout" help:"when used with -remote=auto, the -listen.timeout value used to start the daemon"`
+ RemoteDebug string `flag:"remote.debug" help:"when used with -remote=auto, the -debug value used to start the daemon"`
+ RemoteLogfile string `flag:"remote.logfile" help:"when used with -remote=auto, the -logfile value used to start the daemon"`
+
+ app *Application
+}
+
+func (s *Serve) Name() string { return "serve" }
+func (s *Serve) Parent() string { return s.app.Name() }
+func (s *Serve) Usage() string { return "[server-flags]" }
+func (s *Serve) ShortHelp() string {
+ return "run a server for Go code using the Language Server Protocol"
+}
+func (s *Serve) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), ` gopls [flags] [server-flags]
+
+The server communicates using JSONRPC2 on stdin and stdout, and is intended to be run directly as
+a child of an editor process.
+
+server-flags:
+`)
+ printFlagDefaults(f)
+}
+
+func (s *Serve) remoteArgs(network, address string) []string {
+ args := []string{"serve",
+ "-listen", fmt.Sprintf(`%s;%s`, network, address),
+ }
+ if s.RemoteDebug != "" {
+ args = append(args, "-debug", s.RemoteDebug)
+ }
+ if s.RemoteListenTimeout != 0 {
+ args = append(args, "-listen.timeout", s.RemoteListenTimeout.String())
+ }
+ if s.RemoteLogfile != "" {
+ args = append(args, "-logfile", s.RemoteLogfile)
+ }
+ return args
+}
+
+// Run configures a server based on the flags, and then runs it.
+// It blocks until the server shuts down.
+func (s *Serve) Run(ctx context.Context, args ...string) error {
+ if len(args) > 0 {
+ return tool.CommandLineErrorf("server does not take arguments, got %v", args)
+ }
+
+ di := debug.GetInstance(ctx)
+ isDaemon := s.Address != "" || s.Port != 0
+ if di != nil {
+ closeLog, err := di.SetLogFile(s.Logfile, isDaemon)
+ if err != nil {
+ return err
+ }
+ defer closeLog()
+ di.ServerAddress = s.Address
+ di.MonitorMemory(ctx)
+ di.Serve(ctx, s.Debug)
+ }
+ var ss jsonrpc2.StreamServer
+ if s.app.Remote != "" {
+ var err error
+ ss, err = lsprpc.NewForwarder(s.app.Remote, s.remoteArgs)
+ if err != nil {
+ return fmt.Errorf("creating forwarder: %w", err)
+ }
+ } else {
+ ss = lsprpc.NewStreamServer(cache.New(nil), isDaemon, s.app.options)
+ }
+
+ var network, addr string
+ if s.Address != "" {
+ network, addr = lsprpc.ParseAddr(s.Address)
+ }
+ if s.Port != 0 {
+ network = "tcp"
+ addr = fmt.Sprintf(":%v", s.Port)
+ }
+ if addr != "" {
+ log.Printf("Gopls daemon: listening on %s network, address %s...", network, addr)
+ defer log.Printf("Gopls daemon: exiting")
+ return jsonrpc2.ListenAndServe(ctx, network, addr, ss, s.IdleTimeout)
+ }
+ stream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", os.Stdin, os.Stdout))
+ if s.Trace && di != nil {
+ stream = protocol.LoggingStream(stream, di.LogWriter)
+ }
+ conn := jsonrpc2.NewConn(stream)
+ err := ss.ServeStream(ctx, conn)
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ return err
+}
diff --git a/gopls/internal/lsp/cmd/signature.go b/gopls/internal/lsp/cmd/signature.go
new file mode 100644
index 000000000..4d47cd2d4
--- /dev/null
+++ b/gopls/internal/lsp/cmd/signature.go
@@ -0,0 +1,88 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// signature implements the signature verb for gopls
+type signature struct {
+ app *Application
+}
+
+func (r *signature) Name() string { return "signature" }
+func (r *signature) Parent() string { return r.app.Name() }
+func (r *signature) Usage() string { return "<position>" }
+func (r *signature) ShortHelp() string { return "display selected identifier's signature" }
+func (r *signature) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ # 1-indexed location (:line:column or :#offset) of the target identifier
+ $ gopls signature helper/helper.go:8:6
+ $ gopls signature helper/helper.go:#53
+`)
+ printFlagDefaults(f)
+}
+
+func (r *signature) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("signature expects 1 argument (position)")
+ }
+
+ conn, err := r.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ file := conn.openFile(ctx, from.URI())
+ if file.err != nil {
+ return file.err
+ }
+
+ loc, err := file.mapper.SpanLocation(from)
+ if err != nil {
+ return err
+ }
+
+ p := protocol.SignatureHelpParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+
+ s, err := conn.SignatureHelp(ctx, &p)
+ if err != nil {
+ return err
+ }
+
+ if s == nil || len(s.Signatures) == 0 {
+ return tool.CommandLineErrorf("%v: not a function", from)
+ }
+
+ // there is only ever one possible signature,
+ // see toProtocolSignatureHelp in lsp/signature_help.go
+ signature := s.Signatures[0]
+ fmt.Printf("%s\n", signature.Label)
+ switch x := signature.Documentation.Value.(type) {
+ case string:
+ if x != "" {
+ fmt.Printf("\n%s\n", x)
+ }
+ case protocol.MarkupContent:
+ if x.Value != "" {
+ fmt.Printf("\n%s\n", x.Value)
+ }
+ }
+
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/subcommands.go b/gopls/internal/lsp/cmd/subcommands.go
new file mode 100644
index 000000000..e30c42b85
--- /dev/null
+++ b/gopls/internal/lsp/cmd/subcommands.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "text/tabwriter"
+
+ "golang.org/x/tools/internal/tool"
+)
+
+// subcommands is a helper that may be embedded for commands that delegate to
+// subcommands.
+type subcommands []tool.Application
+
+func (s subcommands) DetailedHelp(f *flag.FlagSet) {
+ w := tabwriter.NewWriter(f.Output(), 0, 0, 2, ' ', 0)
+ defer w.Flush()
+ fmt.Fprint(w, "\nSubcommand:\n")
+ for _, c := range s {
+ fmt.Fprintf(w, " %s\t%s\n", c.Name(), c.ShortHelp())
+ }
+ printFlagDefaults(f)
+}
+
+func (s subcommands) Usage() string { return "<subcommand> [arg]..." }
+
+func (s subcommands) Run(ctx context.Context, args ...string) error {
+ if len(args) == 0 {
+ return tool.CommandLineErrorf("must provide subcommand")
+ }
+ command, args := args[0], args[1:]
+ for _, c := range s {
+ if c.Name() == command {
+ s := flag.NewFlagSet(c.Name(), flag.ExitOnError)
+ return tool.Run(ctx, s, c, args)
+ }
+ }
+ return tool.CommandLineErrorf("unknown subcommand %v", command)
+}
+
+func (s subcommands) Commands() []tool.Application { return s }
+
+// getSubcommands returns the subcommands of a given Application.
+func getSubcommands(a tool.Application) []tool.Application {
+ // This interface is satisfied both by tool.Applications
+ // that embed subcommands, and by *cmd.Application.
+ type hasCommands interface {
+ Commands() []tool.Application
+ }
+ if sub, ok := a.(hasCommands); ok {
+ return sub.Commands()
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/suggested_fix.go b/gopls/internal/lsp/cmd/suggested_fix.go
new file mode 100644
index 000000000..60571e290
--- /dev/null
+++ b/gopls/internal/lsp/cmd/suggested_fix.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/tool"
+)
+
+// suggestedFix implements the fix verb for gopls.
+type suggestedFix struct {
+ Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"`
+ Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"`
+ All bool `flag:"a,all" help:"apply all fixes, not just preferred fixes"`
+
+ app *Application
+}
+
+func (s *suggestedFix) Name() string { return "fix" }
+func (s *suggestedFix) Parent() string { return s.app.Name() }
+func (s *suggestedFix) Usage() string { return "[fix-flags] <filename>" }
+func (s *suggestedFix) ShortHelp() string { return "apply suggested fixes" }
+func (s *suggestedFix) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprintf(f.Output(), `
+Example: apply suggested fixes for this file
+ $ gopls fix -w internal/lsp/cmd/check.go
+
+fix-flags:
+`)
+ printFlagDefaults(f)
+}
+
+// Run performs diagnostic checks on the file specified and either;
+// - if -w is specified, updates the file in place;
+// - if -d is specified, prints out unified diffs of the changes; or
+// - otherwise, prints the new versions to stdout.
+func (s *suggestedFix) Run(ctx context.Context, args ...string) error {
+ if len(args) < 1 {
+ return tool.CommandLineErrorf("fix expects at least 1 argument")
+ }
+ conn, err := s.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ uri := from.URI()
+ file := conn.openFile(ctx, uri)
+ if file.err != nil {
+ return file.err
+ }
+
+ if err := conn.diagnoseFiles(ctx, []span.URI{uri}); err != nil {
+ return err
+ }
+ conn.Client.filesMu.Lock()
+ defer conn.Client.filesMu.Unlock()
+
+ codeActionKinds := []protocol.CodeActionKind{protocol.QuickFix}
+ if len(args) > 1 {
+ codeActionKinds = []protocol.CodeActionKind{}
+ for _, k := range args[1:] {
+ codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k))
+ }
+ }
+
+ rng, err := file.mapper.SpanRange(from)
+ if err != nil {
+ return err
+ }
+ p := protocol.CodeActionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ Context: protocol.CodeActionContext{
+ Only: codeActionKinds,
+ Diagnostics: file.diagnostics,
+ },
+ Range: rng,
+ }
+ actions, err := conn.CodeAction(ctx, &p)
+ if err != nil {
+ return fmt.Errorf("%v: %v", from, err)
+ }
+ var edits []protocol.TextEdit
+ for _, a := range actions {
+ if a.Command != nil {
+ return fmt.Errorf("ExecuteCommand is not yet supported on the command line")
+ }
+ if !a.IsPreferred && !s.All {
+ continue
+ }
+ if !from.HasPosition() {
+ for _, c := range a.Edit.DocumentChanges {
+ if c.TextDocumentEdit != nil {
+ if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri {
+ edits = append(edits, c.TextDocumentEdit.Edits...)
+ }
+ }
+ }
+ continue
+ }
+ // If the span passed in has a position, then we need to find
+ // the codeaction that has the same range as the passed in span.
+ for _, diag := range a.Diagnostics {
+ spn, err := file.mapper.RangeSpan(diag.Range)
+ if err != nil {
+ continue
+ }
+ if span.ComparePoint(from.Start(), spn.Start()) == 0 {
+ for _, c := range a.Edit.DocumentChanges {
+ if c.TextDocumentEdit != nil {
+ if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri {
+ edits = append(edits, c.TextDocumentEdit.Edits...)
+ }
+ }
+ }
+ break
+ }
+ }
+
+ // If suggested fix is not a diagnostic, still must collect edits.
+ if len(a.Diagnostics) == 0 {
+ for _, c := range a.Edit.DocumentChanges {
+ if c.TextDocumentEdit != nil {
+ if fileURI(c.TextDocumentEdit.TextDocument.URI) == uri {
+ edits = append(edits, c.TextDocumentEdit.Edits...)
+ }
+ }
+ }
+ }
+ }
+
+ newContent, sedits, err := source.ApplyProtocolEdits(file.mapper, edits)
+ if err != nil {
+ return fmt.Errorf("%v: %v", edits, err)
+ }
+
+ filename := file.uri.Filename()
+ switch {
+ case s.Write:
+ if len(edits) > 0 {
+ ioutil.WriteFile(filename, newContent, 0644)
+ }
+ case s.Diff:
+ diffs, err := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
+ if err != nil {
+ return err
+ }
+ fmt.Print(diffs)
+ default:
+ os.Stdout.Write(newContent)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/symbols.go b/gopls/internal/lsp/cmd/symbols.go
new file mode 100644
index 000000000..3ecdff801
--- /dev/null
+++ b/gopls/internal/lsp/cmd/symbols.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "sort"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/tool"
+)
+
+// symbols implements the symbols verb for gopls
+type symbols struct {
+ app *Application
+}
+
+func (r *symbols) Name() string { return "symbols" }
+func (r *symbols) Parent() string { return r.app.Name() }
+func (r *symbols) Usage() string { return "<file>" }
+func (r *symbols) ShortHelp() string { return "display selected file's symbols" }
+func (r *symbols) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+ $ gopls symbols helper/helper.go
+`)
+ printFlagDefaults(f)
+}
+func (r *symbols) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("symbols expects 1 argument (position)")
+ }
+
+ conn, err := r.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ from := span.Parse(args[0])
+ p := protocol.DocumentSymbolParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(from.URI()),
+ },
+ }
+ symbols, err := conn.DocumentSymbol(ctx, &p)
+ if err != nil {
+ return err
+ }
+ for _, s := range symbols {
+ if m, ok := s.(map[string]interface{}); ok {
+ s, err = mapToSymbol(m)
+ if err != nil {
+ return err
+ }
+ }
+ switch t := s.(type) {
+ case protocol.DocumentSymbol:
+ printDocumentSymbol(t)
+ case protocol.SymbolInformation:
+ printSymbolInformation(t)
+ }
+ }
+ return nil
+}
+
+func mapToSymbol(m map[string]interface{}) (interface{}, error) {
+ b, err := json.Marshal(m)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, ok := m["selectionRange"]; ok {
+ var s protocol.DocumentSymbol
+ if err := json.Unmarshal(b, &s); err != nil {
+ return nil, err
+ }
+ return s, nil
+ }
+
+ var s protocol.SymbolInformation
+ if err := json.Unmarshal(b, &s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+func printDocumentSymbol(s protocol.DocumentSymbol) {
+ fmt.Printf("%s %s %s\n", s.Name, s.Kind, positionToString(s.SelectionRange))
+ // Sort children for consistency
+ sort.Slice(s.Children, func(i, j int) bool {
+ return s.Children[i].Name < s.Children[j].Name
+ })
+ for _, c := range s.Children {
+ fmt.Printf("\t%s %s %s\n", c.Name, c.Kind, positionToString(c.SelectionRange))
+ }
+}
+
+func printSymbolInformation(s protocol.SymbolInformation) {
+ fmt.Printf("%s %s %s\n", s.Name, s.Kind, positionToString(s.Location.Range))
+}
+
+func positionToString(r protocol.Range) string {
+ return fmt.Sprintf("%v:%v-%v:%v",
+ r.Start.Line+1,
+ r.Start.Character+1,
+ r.End.Line+1,
+ r.End.Character+1,
+ )
+}
diff --git a/gopls/internal/lsp/cmd/test/cmdtest.go b/gopls/internal/lsp/cmd/test/cmdtest.go
new file mode 100644
index 000000000..7f8a13b76
--- /dev/null
+++ b/gopls/internal/lsp/cmd/test/cmdtest.go
@@ -0,0 +1,6 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmdtest contains the test suite for the command line behavior of gopls.
+package cmdtest
diff --git a/gopls/internal/lsp/cmd/test/integration_test.go b/gopls/internal/lsp/cmd/test/integration_test.go
new file mode 100644
index 000000000..dd4f0ff82
--- /dev/null
+++ b/gopls/internal/lsp/cmd/test/integration_test.go
@@ -0,0 +1,898 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package cmdtest
+
+// This file defines integration tests of each gopls subcommand that
+// fork+exec the command in a separate process.
+//
+// (Rather than execute 'go build gopls' during the test, we reproduce
+// the main entrypoint in the test executable.)
+//
+// The purpose of this test is to exercise client-side logic such as
+// argument parsing and formatting of LSP RPC responses, not server
+// behavior; see lsp_test for that.
+//
+// All tests run in parallel.
+//
+// TODO(adonovan):
+// - Use markers to represent positions in the input and in assertions.
+// - Coverage of cross-cutting things like cwd, enviro, span parsing, etc.
+// - Subcommands that accept -write and -diff flags should implement
+// them consistently wrt the default behavior; factor their tests.
+// - Add missing test for 'vulncheck' subcommand.
+// - Add tests for client-only commands: serve, bug, help, api-json, licenses.
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+
+ exec "golang.org/x/sys/execabs"
+ "golang.org/x/tools/gopls/internal/hooks"
+ "golang.org/x/tools/gopls/internal/lsp/cmd"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/internal/tool"
+ "golang.org/x/tools/txtar"
+)
+
+// TestVersion tests the 'version' subcommand (../info.go).
+func TestVersion(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, "")
+
+ // There's not much we can robustly assert about the actual version.
+ const want = debug.Version // e.g. "master"
+
+ // basic
+ {
+ res := gopls(t, tree, "version")
+ res.checkExit(true)
+ res.checkStdout(want)
+ }
+
+ // -json flag
+ {
+ res := gopls(t, tree, "version", "-json")
+ res.checkExit(true)
+ var v debug.ServerVersion
+ if res.toJSON(&v) {
+ if v.Version != want {
+ t.Errorf("expected Version %q, got %q (%v)", want, v.Version, res)
+ }
+ }
+ }
+}
+
+// TestCheck tests the 'check' subcommand (../check.go).
+func TestCheck(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+import "fmt"
+var _ = fmt.Sprintf("%s", 123)
+
+-- b.go --
+package a
+import "fmt"
+var _ = fmt.Sprintf("%d", "123")
+`)
+
+ // no files
+ {
+ res := gopls(t, tree, "check")
+ res.checkExit(true)
+ if res.stdout != "" {
+ t.Errorf("unexpected output: %v", res)
+ }
+ }
+
+ // one file
+ {
+ res := gopls(t, tree, "check", "./a.go")
+ res.checkExit(true)
+ res.checkStdout("fmt.Sprintf format %s has arg 123 of wrong type int")
+ }
+
+ // two files
+ {
+ res := gopls(t, tree, "check", "./a.go", "./b.go")
+ res.checkExit(true)
+ res.checkStdout(`a.go:.* fmt.Sprintf format %s has arg 123 of wrong type int`)
+ res.checkStdout(`b.go:.* fmt.Sprintf format %d has arg "123" of wrong type string`)
+ }
+}
+
+// TestCallHierarchy tests the 'call_hierarchy' subcommand (../call_hierarchy.go).
+func TestCallHierarchy(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+func f() {}
+func g() {
+ f()
+}
+func h() {
+ f()
+ f()
+}
+`)
+ // missing position
+ {
+ res := gopls(t, tree, "call_hierarchy")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // wrong place
+ {
+ res := gopls(t, tree, "call_hierarchy", "a.go:1")
+ res.checkExit(false)
+ res.checkStderr("identifier not found")
+ }
+ // f is called once from g and twice from h.
+ {
+ res := gopls(t, tree, "call_hierarchy", "a.go:2:6")
+ res.checkExit(true)
+ // We use regexp '.' as an OS-agnostic path separator.
+ res.checkStdout("ranges 7:2-3, 8:2-3 in ..a.go from/to function h in ..a.go:6:6-7")
+ res.checkStdout("ranges 4:2-3 in ..a.go from/to function g in ..a.go:3:6-7")
+ res.checkStdout("identifier: function f in ..a.go:2:6-7")
+ }
+}
+
+// TestDefinition tests the 'definition' subcommand (../definition.go).
+func TestDefinition(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+import "fmt"
+func f() {
+ fmt.Println()
+}
+func g() {
+ f()
+}
+`)
+ // missing position
+ {
+ res := gopls(t, tree, "definition")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // intra-package
+ {
+ res := gopls(t, tree, "definition", "a.go:7:2") // "f()"
+ res.checkExit(true)
+ res.checkStdout("a.go:3:6-7: defined here as func f")
+ }
+ // cross-package
+ {
+ res := gopls(t, tree, "definition", "a.go:4:7") // "Println"
+ res.checkExit(true)
+ res.checkStdout("print.go.* defined here as func fmt.Println")
+ res.checkStdout("Println formats using the default formats for its operands")
+ }
+ // -json and -markdown
+ {
+ res := gopls(t, tree, "definition", "-json", "-markdown", "a.go:4:7")
+ res.checkExit(true)
+ var defn cmd.Definition
+ if res.toJSON(&defn) {
+ if !strings.HasPrefix(defn.Description, "```go\nfunc fmt.Println") {
+ t.Errorf("Description does not start with markdown code block. Got: %s", defn.Description)
+ }
+ }
+ }
+}
+
+// TestFoldingRanges tests the 'folding_ranges' subcommand (../folding_range.go).
+func TestFoldingRanges(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+func f(x int) {
+ // hello
+}
+`)
+ // missing filename
+ {
+ res := gopls(t, tree, "folding_ranges")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // success
+ {
+ res := gopls(t, tree, "folding_ranges", "a.go")
+ res.checkExit(true)
+ res.checkStdout("2:8-2:13") // params (x int)
+ res.checkStdout("2:16-4:1") // body { ... }
+ }
+}
+
+// TestFormat tests the 'format' subcommand (../format.go).
+func TestFormat(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- a.go --
+package a ; func f ( ) { }
+`)
+ const want = `package a
+
+func f() {}
+`
+
+ // no files => nop
+ {
+ res := gopls(t, tree, "format")
+ res.checkExit(true)
+ }
+ // default => print formatted result
+ {
+ res := gopls(t, tree, "format", "a.go")
+ res.checkExit(true)
+ if res.stdout != want {
+ t.Errorf("format: got <<%s>>, want <<%s>>", res.stdout, want)
+ }
+ }
+ // start/end position not supported (unless equal to start/end of file)
+ {
+ res := gopls(t, tree, "format", "a.go:1-2")
+ res.checkExit(false)
+ res.checkStderr("only full file formatting supported")
+ }
+ // -list: show only file names
+ {
+ res := gopls(t, tree, "format", "-list", "a.go")
+ res.checkExit(true)
+ res.checkStdout("a.go")
+ }
+ // -diff prints a unified diff
+ {
+ res := gopls(t, tree, "format", "-diff", "a.go")
+ res.checkExit(true)
+ // We omit the filenames as they vary by OS.
+ want := `
+-package a ; func f ( ) { }
++package a
++
++func f() {}
+`
+ res.checkStdout(regexp.QuoteMeta(want))
+ }
+ // -write updates the file
+ {
+ res := gopls(t, tree, "format", "-write", "a.go")
+ res.checkExit(true)
+ res.checkStdout("^$") // empty
+ checkContent(t, filepath.Join(tree, "a.go"), want)
+ }
+}
+
+// TestHighlight tests the 'highlight' subcommand (../highlight.go).
+func TestHighlight(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- a.go --
+package a
+import "fmt"
+func f() {
+ fmt.Println()
+ fmt.Println()
+}
+`)
+
+ // no arguments
+ {
+ res := gopls(t, tree, "highlight")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // all occurrences of Println
+ {
+ res := gopls(t, tree, "highlight", "a.go:4:7")
+ res.checkExit(true)
+ res.checkStdout("a.go:4:6-13")
+ res.checkStdout("a.go:5:6-13")
+ }
+}
+
+// TestImplementations tests the 'implementation' subcommand (../implementation.go).
+func TestImplementations(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- a.go --
+package a
+import "fmt"
+type T int
+func (T) String() string { return "" }
+`)
+
+ // no arguments
+ {
+ res := gopls(t, tree, "implementation")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // T.String
+ {
+ res := gopls(t, tree, "implementation", "a.go:4:10")
+ res.checkExit(true)
+ // TODO(adonovan): extract and check the content of the reported ranges?
+ // We use regexp '.' as an OS-agnostic path separator.
+ res.checkStdout("fmt.print.go:") // fmt.Stringer.String
+ res.checkStdout("runtime.error.go:") // runtime.stringer.String
+ }
+}
+
+// TestImports tests the 'imports' subcommand (../imports.go).
+func TestImports(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- a.go --
+package a
+func _() {
+ fmt.Println()
+}
+`)
+
+ want := `
+package a
+
+import "fmt"
+func _() {
+ fmt.Println()
+}
+`[1:]
+
+ // no arguments
+ {
+ res := gopls(t, tree, "imports")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // default: print with imports
+ {
+ res := gopls(t, tree, "imports", "a.go")
+ res.checkExit(true)
+ if res.stdout != want {
+ t.Errorf("format: got <<%s>>, want <<%s>>", res.stdout, want)
+ }
+ }
+ // -diff: show a unified diff
+ {
+ res := gopls(t, tree, "imports", "-diff", "a.go")
+ res.checkExit(true)
+ res.checkStdout(regexp.QuoteMeta(`+import "fmt"`))
+ }
+ // -write: update file
+ {
+ res := gopls(t, tree, "imports", "-write", "a.go")
+ res.checkExit(true)
+ checkContent(t, filepath.Join(tree, "a.go"), want)
+ }
+}
+
+// TestLinks tests the 'links' subcommand (../links.go).
+func TestLinks(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- a.go --
+// Link in package doc: https://pkg.go.dev/
+package a
+
+// Link in internal comment: https://go.dev/cl
+
+// Doc comment link: https://blog.go.dev/
+func f() {}
+`)
+ // no arguments
+ {
+ res := gopls(t, tree, "links")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // success
+ {
+ res := gopls(t, tree, "links", "a.go")
+ res.checkExit(true)
+ res.checkStdout("https://go.dev/cl")
+ res.checkStdout("https://pkg.go.dev")
+ res.checkStdout("https://blog.go.dev/")
+ }
+ // -json
+ {
+ res := gopls(t, tree, "links", "-json", "a.go")
+ res.checkExit(true)
+ res.checkStdout("https://pkg.go.dev")
+ res.checkStdout("https://go.dev/cl")
+ res.checkStdout("https://blog.go.dev/") // at 5:21-5:41
+ var links []protocol.DocumentLink
+ if res.toJSON(&links) {
+ // Check just one of the three locations.
+ if got, want := fmt.Sprint(links[2].Range), "5:21-5:41"; got != want {
+ t.Errorf("wrong link location: got %v, want %v", got, want)
+ }
+ }
+ }
+}
+
+// TestReferences tests the 'references' subcommand (../references.go).
+func TestReferences(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+import "fmt"
+func f() {
+ fmt.Println()
+}
+
+-- b.go --
+package a
+import "fmt"
+func g() {
+ fmt.Println()
+}
+`)
+ // no arguments
+ {
+ res := gopls(t, tree, "references")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // fmt.Println
+ {
+ res := gopls(t, tree, "references", "a.go:4:10")
+ res.checkExit(true)
+ res.checkStdout("a.go:4:6-13")
+ res.checkStdout("b.go:4:6-13")
+ }
+}
+
+// TestSignature tests the 'signature' subcommand (../signature.go).
+func TestSignature(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+import "fmt"
+func f() {
+ fmt.Println(123)
+}
+`)
+ // no arguments
+ {
+ res := gopls(t, tree, "signature")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // at 123 inside fmt.Println() call
+ {
+ res := gopls(t, tree, "signature", "a.go:4:15")
+ res.checkExit(true)
+ res.checkStdout("Println\\(a ...")
+ res.checkStdout("Println formats using the default formats...")
+ }
+}
+
+// TestPrepareRename tests the 'prepare_rename' subcommand (../prepare_rename.go).
+func TestPrepareRename(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+func oldname() {}
+`)
+ // no arguments
+ {
+ res := gopls(t, tree, "prepare_rename")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // in 'package' keyword
+ {
+ res := gopls(t, tree, "prepare_rename", "a.go:1:3")
+ res.checkExit(false)
+ res.checkStderr("request is not valid at the given position")
+ }
+ // in 'package' identifier (not supported by client)
+ {
+ res := gopls(t, tree, "prepare_rename", "a.go:1:9")
+ res.checkExit(false)
+ res.checkStderr("can't rename package")
+ }
+ // in func oldname
+ {
+ res := gopls(t, tree, "prepare_rename", "a.go:2:9")
+ res.checkExit(true)
+ res.checkStdout("a.go:2:6-13") // all of "oldname"
+ }
+}
+
+// TestRename tests the 'rename' subcommand (../rename.go).
+func TestRename(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+func oldname() {}
+`)
+ // no arguments
+ {
+ res := gopls(t, tree, "rename")
+ res.checkExit(false)
+ res.checkStderr("expects 2 arguments")
+ }
+ // missing newname
+ {
+ res := gopls(t, tree, "rename", "a.go:1:3")
+ res.checkExit(false)
+ res.checkStderr("expects 2 arguments")
+ }
+ // in 'package' keyword
+ {
+ res := gopls(t, tree, "rename", "a.go:1:3", "newname")
+ res.checkExit(false)
+ res.checkStderr("no identifier found")
+ }
+ // in 'package' identifier
+ {
+ res := gopls(t, tree, "rename", "a.go:1:9", "newname")
+ res.checkExit(false)
+ res.checkStderr(`cannot rename package: module path .* same as the package path, so .* no effect`)
+ }
+ // success, func oldname (and -diff)
+ {
+ res := gopls(t, tree, "rename", "-diff", "a.go:2:9", "newname")
+ res.checkExit(true)
+ res.checkStdout(regexp.QuoteMeta("-func oldname() {}"))
+ res.checkStdout(regexp.QuoteMeta("+func newname() {}"))
+ }
+}
+
+// TestSymbols tests the 'symbols' subcommand (../symbols.go).
+func TestSymbols(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+func f()
+var v int
+const c = 0
+`)
+ // no files
+ {
+ res := gopls(t, tree, "symbols")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // success
+ {
+ res := gopls(t, tree, "symbols", "a.go:123:456") // (line/col ignored)
+ res.checkExit(true)
+ res.checkStdout("f Function 2:6-2:7")
+ res.checkStdout("v Variable 3:5-3:6")
+ res.checkStdout("c Constant 4:7-4:8")
+ }
+}
+
+// TestSemtok tests the 'semtok' subcommand (../semantictokens.go).
+func TestSemtok(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+func f()
+var v int
+const c = 0
+`)
+ // no files
+ {
+ res := gopls(t, tree, "semtok")
+ res.checkExit(false)
+ res.checkStderr("expected one file name")
+ }
+ // success
+ {
+ res := gopls(t, tree, "semtok", "a.go")
+ res.checkExit(true)
+ got := res.stdout
+ want := `
+/*⇒7,keyword,[]*/package /*⇒1,namespace,[]*/a
+/*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/f()
+/*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/v /*⇒3,type,[defaultLibrary]*/int
+/*⇒5,keyword,[]*/const /*⇒1,variable,[definition readonly]*/c = /*⇒1,number,[]*/0
+`[1:]
+ if got != want {
+ t.Errorf("semtok: got <<%s>>, want <<%s>>", got, want)
+ }
+ }
+}
+
+// TestFix tests the 'fix' subcommand (../suggested_fix.go).
+func TestFix(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+var _ error = T(0)
+type T int
+func f() (int, string) { return }
+`)
+ want := `
+package a
+var _ error = T(0)
+type T int
+func f() (int, string) { return 0, "" }
+`[1:]
+
+ // no arguments
+ {
+ res := gopls(t, tree, "fix")
+ res.checkExit(false)
+ res.checkStderr("expects at least 1 argument")
+ }
+ // success (-a enables fillreturns)
+ {
+ res := gopls(t, tree, "fix", "-a", "a.go")
+ res.checkExit(true)
+ got := res.stdout
+ if got != want {
+ t.Errorf("fix: got <<%s>>, want <<%s>>", got, want)
+ }
+ }
+ // TODO(adonovan): more tests:
+ // - -write, -diff: factor with imports, format, rename.
+ // - without -all flag
+ // - args[2:] is an optional list of protocol.CodeActionKind enum values.
+ // - a span argument with a range causes filtering.
+}
+
+// TestWorkspaceSymbol tests the 'workspace_symbol' subcommand (../workspace_symbol.go).
+func TestWorkspaceSymbol(t *testing.T) {
+ t.Parallel()
+
+ tree := writeTree(t, `
+-- go.mod --
+module example.com
+go 1.18
+
+-- a.go --
+package a
+func someFunctionName()
+`)
+ // no files
+ {
+ res := gopls(t, tree, "workspace_symbol")
+ res.checkExit(false)
+ res.checkStderr("expects 1 argument")
+ }
+ // success
+ {
+ res := gopls(t, tree, "workspace_symbol", "meFun")
+ res.checkExit(true)
+ res.checkStdout("a.go:2:6-22 someFunctionName Function")
+ }
+}
+
+// -- test framework --
+
+func TestMain(m *testing.M) {
+ switch os.Getenv("ENTRYPOINT") {
+ case "goplsMain":
+ goplsMain()
+ default:
+ os.Exit(m.Run())
+ }
+}
+
+// This function is a stand-in for gopls.main in ../../../../main.go.
+func goplsMain() {
+ bug.PanicOnBugs = true // (not in the production command)
+ tool.Main(context.Background(), cmd.New("gopls", "", nil, hooks.Options), os.Args[1:])
+}
+
+// writeTree extracts a txtar archive into a new directory and returns its path.
+func writeTree(t *testing.T, archive string) string {
+ root := t.TempDir()
+
+ // This unfortunate step is required because gopls output
+ // expands symbolic links it its input file names (arguably it
+ // should not), and on macOS the temp dir is in /var -> private/var.
+ root, err := filepath.EvalSymlinks(root)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, f := range txtar.Parse([]byte(archive)).Files {
+ filename := filepath.Join(root, f.Name)
+ if err := os.MkdirAll(filepath.Dir(filename), 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.WriteFile(filename, f.Data, 0666); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return root
+}
+
+// gopls executes gopls in a child process.
+func gopls(t *testing.T, dir string, args ...string) *result {
+ testenv.NeedsTool(t, "go")
+
+ // Catch inadvertent use of dir=".", which would make
+ // the ReplaceAll below unpredictable.
+ if !filepath.IsAbs(dir) {
+ t.Fatalf("dir is not absolute: %s", dir)
+ }
+
+ cmd := exec.Command(os.Args[0], args...)
+ cmd.Env = append(os.Environ(), "ENTRYPOINT=goplsMain")
+ cmd.Dir = dir
+ cmd.Stdout = new(bytes.Buffer)
+ cmd.Stderr = new(bytes.Buffer)
+
+ cmdErr := cmd.Run()
+
+ stdout := strings.ReplaceAll(fmt.Sprint(cmd.Stdout), dir, ".")
+ stderr := strings.ReplaceAll(fmt.Sprint(cmd.Stderr), dir, ".")
+ exitcode := 0
+ if cmdErr != nil {
+ if exitErr, ok := cmdErr.(*exec.ExitError); ok {
+ exitcode = exitErr.ExitCode()
+ } else {
+ stderr = cmdErr.Error() // (execve failure)
+ exitcode = -1
+ }
+ }
+ res := &result{
+ t: t,
+ command: "gopls " + strings.Join(args, " "),
+ exitcode: exitcode,
+ stdout: stdout,
+ stderr: stderr,
+ }
+ if false {
+ t.Log(res)
+ }
+ return res
+}
+
+// A result holds the result of a gopls invocation, and provides assertion helpers.
+type result struct {
+ t *testing.T
+ command string
+ exitcode int
+ stdout, stderr string
+}
+
+func (res *result) String() string {
+ return fmt.Sprintf("%s: exit=%d stdout=<<%s>> stderr=<<%s>>",
+ res.command, res.exitcode, res.stdout, res.stderr)
+}
+
+// checkExit asserts that gopls returned the expected exit code.
+func (res *result) checkExit(success bool) {
+ res.t.Helper()
+ if (res.exitcode == 0) != success {
+ res.t.Errorf("%s: exited with code %d, want success: %t (%s)",
+ res.command, res.exitcode, success, res)
+ }
+}
+
+// checkStdout asserts that the gopls standard output matches the pattern.
+func (res *result) checkStdout(pattern string) {
+ res.t.Helper()
+ res.checkOutput(pattern, "stdout", res.stdout)
+}
+
+// checkStderr asserts that the gopls standard error matches the pattern.
+func (res *result) checkStderr(pattern string) {
+ res.t.Helper()
+ res.checkOutput(pattern, "stderr", res.stderr)
+}
+
+func (res *result) checkOutput(pattern, name, content string) {
+ res.t.Helper()
+ if match, err := regexp.MatchString(pattern, content); err != nil {
+ res.t.Errorf("invalid regexp: %v", err)
+ } else if !match {
+ res.t.Errorf("%s: %s does not match [%s]; got <<%s>>",
+ res.command, name, pattern, content)
+ }
+}
+
+// toJSON decodes res.stdout as JSON into to *ptr and reports its success.
+func (res *result) toJSON(ptr interface{}) bool {
+ if err := json.Unmarshal([]byte(res.stdout), ptr); err != nil {
+ res.t.Errorf("invalid JSON %v", err)
+ return false
+ }
+ return true
+}
+
+// checkContent checks that the contents of the file are as expected.
+func checkContent(t *testing.T, filename, want string) {
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if got := string(data); got != want {
+ t.Errorf("content of %s is <<%s>>, want <<%s>>", filename, got, want)
+ }
+}
diff --git a/internal/lsp/cmd/usage/api-json.hlp b/gopls/internal/lsp/cmd/usage/api-json.hlp
index cb9fbfbea..cb9fbfbea 100644
--- a/internal/lsp/cmd/usage/api-json.hlp
+++ b/gopls/internal/lsp/cmd/usage/api-json.hlp
diff --git a/internal/lsp/cmd/usage/bug.hlp b/gopls/internal/lsp/cmd/usage/bug.hlp
index 772d54d5f..772d54d5f 100644
--- a/internal/lsp/cmd/usage/bug.hlp
+++ b/gopls/internal/lsp/cmd/usage/bug.hlp
diff --git a/internal/lsp/cmd/usage/call_hierarchy.hlp b/gopls/internal/lsp/cmd/usage/call_hierarchy.hlp
index 07fccc828..07fccc828 100644
--- a/internal/lsp/cmd/usage/call_hierarchy.hlp
+++ b/gopls/internal/lsp/cmd/usage/call_hierarchy.hlp
diff --git a/internal/lsp/cmd/usage/check.hlp b/gopls/internal/lsp/cmd/usage/check.hlp
index ba89588d5..ba89588d5 100644
--- a/internal/lsp/cmd/usage/check.hlp
+++ b/gopls/internal/lsp/cmd/usage/check.hlp
diff --git a/internal/lsp/cmd/usage/definition.hlp b/gopls/internal/lsp/cmd/usage/definition.hlp
index 500e6c9a4..500e6c9a4 100644
--- a/internal/lsp/cmd/usage/definition.hlp
+++ b/gopls/internal/lsp/cmd/usage/definition.hlp
diff --git a/internal/lsp/cmd/usage/fix.hlp b/gopls/internal/lsp/cmd/usage/fix.hlp
index 4789a6c5b..4789a6c5b 100644
--- a/internal/lsp/cmd/usage/fix.hlp
+++ b/gopls/internal/lsp/cmd/usage/fix.hlp
diff --git a/internal/lsp/cmd/usage/folding_ranges.hlp b/gopls/internal/lsp/cmd/usage/folding_ranges.hlp
index 4af2da615..4af2da615 100644
--- a/internal/lsp/cmd/usage/folding_ranges.hlp
+++ b/gopls/internal/lsp/cmd/usage/folding_ranges.hlp
diff --git a/internal/lsp/cmd/usage/format.hlp b/gopls/internal/lsp/cmd/usage/format.hlp
index 7ef0bbe43..7ef0bbe43 100644
--- a/internal/lsp/cmd/usage/format.hlp
+++ b/gopls/internal/lsp/cmd/usage/format.hlp
diff --git a/gopls/internal/lsp/cmd/usage/help.hlp b/gopls/internal/lsp/cmd/usage/help.hlp
new file mode 100644
index 000000000..f0ff44a4d
--- /dev/null
+++ b/gopls/internal/lsp/cmd/usage/help.hlp
@@ -0,0 +1,10 @@
+print usage information for subcommands
+
+Usage:
+ gopls [flags] help
+
+
+Examples:
+$ gopls help # main gopls help message
+$ gopls help remote # help on 'remote' command
+$ gopls help remote sessions # help on 'remote sessions' subcommand
diff --git a/internal/lsp/cmd/usage/highlight.hlp b/gopls/internal/lsp/cmd/usage/highlight.hlp
index e128eb7de..e128eb7de 100644
--- a/internal/lsp/cmd/usage/highlight.hlp
+++ b/gopls/internal/lsp/cmd/usage/highlight.hlp
diff --git a/internal/lsp/cmd/usage/implementation.hlp b/gopls/internal/lsp/cmd/usage/implementation.hlp
index 09414f190..09414f190 100644
--- a/internal/lsp/cmd/usage/implementation.hlp
+++ b/gopls/internal/lsp/cmd/usage/implementation.hlp
diff --git a/internal/lsp/cmd/usage/imports.hlp b/gopls/internal/lsp/cmd/usage/imports.hlp
index 295f4daa2..295f4daa2 100644
--- a/internal/lsp/cmd/usage/imports.hlp
+++ b/gopls/internal/lsp/cmd/usage/imports.hlp
diff --git a/internal/lsp/cmd/usage/inspect.hlp b/gopls/internal/lsp/cmd/usage/inspect.hlp
index 3d0a0f3c4..3d0a0f3c4 100644
--- a/internal/lsp/cmd/usage/inspect.hlp
+++ b/gopls/internal/lsp/cmd/usage/inspect.hlp
diff --git a/internal/lsp/cmd/usage/licenses.hlp b/gopls/internal/lsp/cmd/usage/licenses.hlp
index ab60ebc2f..ab60ebc2f 100644
--- a/internal/lsp/cmd/usage/licenses.hlp
+++ b/gopls/internal/lsp/cmd/usage/licenses.hlp
diff --git a/internal/lsp/cmd/usage/links.hlp b/gopls/internal/lsp/cmd/usage/links.hlp
index 7f7612ce7..7f7612ce7 100644
--- a/internal/lsp/cmd/usage/links.hlp
+++ b/gopls/internal/lsp/cmd/usage/links.hlp
diff --git a/internal/lsp/cmd/usage/prepare_rename.hlp b/gopls/internal/lsp/cmd/usage/prepare_rename.hlp
index 7f8a6f32d..7f8a6f32d 100644
--- a/internal/lsp/cmd/usage/prepare_rename.hlp
+++ b/gopls/internal/lsp/cmd/usage/prepare_rename.hlp
diff --git a/internal/lsp/cmd/usage/references.hlp b/gopls/internal/lsp/cmd/usage/references.hlp
index c55ef0337..c55ef0337 100644
--- a/internal/lsp/cmd/usage/references.hlp
+++ b/gopls/internal/lsp/cmd/usage/references.hlp
diff --git a/internal/lsp/cmd/usage/remote.hlp b/gopls/internal/lsp/cmd/usage/remote.hlp
index dd6034f46..dd6034f46 100644
--- a/internal/lsp/cmd/usage/remote.hlp
+++ b/gopls/internal/lsp/cmd/usage/remote.hlp
diff --git a/internal/lsp/cmd/usage/rename.hlp b/gopls/internal/lsp/cmd/usage/rename.hlp
index ae58cbf60..ae58cbf60 100644
--- a/internal/lsp/cmd/usage/rename.hlp
+++ b/gopls/internal/lsp/cmd/usage/rename.hlp
diff --git a/internal/lsp/cmd/usage/semtok.hlp b/gopls/internal/lsp/cmd/usage/semtok.hlp
index 459ed596c..459ed596c 100644
--- a/internal/lsp/cmd/usage/semtok.hlp
+++ b/gopls/internal/lsp/cmd/usage/semtok.hlp
diff --git a/internal/lsp/cmd/usage/serve.hlp b/gopls/internal/lsp/cmd/usage/serve.hlp
index 370cbce83..370cbce83 100644
--- a/internal/lsp/cmd/usage/serve.hlp
+++ b/gopls/internal/lsp/cmd/usage/serve.hlp
diff --git a/internal/lsp/cmd/usage/signature.hlp b/gopls/internal/lsp/cmd/usage/signature.hlp
index f9fd0bfb7..f9fd0bfb7 100644
--- a/internal/lsp/cmd/usage/signature.hlp
+++ b/gopls/internal/lsp/cmd/usage/signature.hlp
diff --git a/internal/lsp/cmd/usage/symbols.hlp b/gopls/internal/lsp/cmd/usage/symbols.hlp
index 2aa36aa84..2aa36aa84 100644
--- a/internal/lsp/cmd/usage/symbols.hlp
+++ b/gopls/internal/lsp/cmd/usage/symbols.hlp
diff --git a/gopls/internal/lsp/cmd/usage/usage.hlp b/gopls/internal/lsp/cmd/usage/usage.hlp
new file mode 100644
index 000000000..404750b7d
--- /dev/null
+++ b/gopls/internal/lsp/cmd/usage/usage.hlp
@@ -0,0 +1,77 @@
+
+gopls is a Go language server.
+
+It is typically used with an editor to provide language features. When no
+command is specified, gopls will default to the 'serve' command. The language
+features can also be accessed via the gopls command-line interface.
+
+Usage:
+ gopls help [<subject>]
+
+Command:
+
+Main
+ serve run a server for Go code using the Language Server Protocol
+ version print the gopls version information
+ bug report a bug in gopls
+ help print usage information for subcommands
+ api-json print json describing gopls API
+ licenses print licenses of included software
+
+Features
+ call_hierarchy display selected identifier's call hierarchy
+ check show diagnostic results for the specified file
+ definition show declaration of selected identifier
+ folding_ranges display selected file's folding ranges
+ format format the code according to the go standard
+ highlight display selected identifier's highlights
+ implementation display selected identifier's implementation
+ imports updates import statements
+ remote interact with the gopls daemon
+ inspect interact with the gopls daemon (deprecated: use 'remote')
+ links list links in a file
+ prepare_rename test validity of a rename operation at location
+ references display selected identifier's references
+ rename rename selected identifier
+ semtok show semantic tokens for the specified file
+ signature display selected identifier's signature
+ fix apply suggested fixes
+ symbols display selected file's symbols
+ workspace_symbol search symbols in workspace
+ vulncheck run experimental vulncheck analysis (experimental: under development)
+
+flags:
+ -debug=string
+ serve debug information on the supplied address
+ -listen=string
+ address on which to listen for remote connections. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. Otherwise, TCP is used.
+ -listen.timeout=duration
+ when used with -listen, shut down the server when there are no connected clients for this duration
+ -logfile=string
+ filename to log to. if value is "auto", then logging to a default output file is enabled
+ -mode=string
+ no effect
+ -ocagent=string
+ the address of the ocagent (e.g. http://localhost:55678), or off (default "off")
+ -port=int
+ port on which to run gopls for debugging purposes
+ -profile.cpu=string
+ write CPU profile to this file
+ -profile.mem=string
+ write memory profile to this file
+ -profile.trace=string
+ write trace log to this file
+ -remote=string
+ forward all commands to a remote lsp specified by this flag. With no special prefix, this is assumed to be a TCP address. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. If 'auto', or prefixed by 'auto;', the remote address is automatically resolved based on the executing environment.
+ -remote.debug=string
+ when used with -remote=auto, the -debug value used to start the daemon
+ -remote.listen.timeout=duration
+ when used with -remote=auto, the -listen.timeout value used to start the daemon (default 1m0s)
+ -remote.logfile=string
+ when used with -remote=auto, the -logfile value used to start the daemon
+ -rpc.trace
+ print the full rpc trace in lsp inspector format
+ -v,-verbose
+ verbose output
+ -vv,-veryverbose
+ very verbose output
diff --git a/internal/lsp/cmd/usage/version.hlp b/gopls/internal/lsp/cmd/usage/version.hlp
index 3a09ddedf..3a09ddedf 100644
--- a/internal/lsp/cmd/usage/version.hlp
+++ b/gopls/internal/lsp/cmd/usage/version.hlp
diff --git a/gopls/internal/lsp/cmd/usage/vulncheck.hlp b/gopls/internal/lsp/cmd/usage/vulncheck.hlp
new file mode 100644
index 000000000..4fbe573e2
--- /dev/null
+++ b/gopls/internal/lsp/cmd/usage/vulncheck.hlp
@@ -0,0 +1,17 @@
+run experimental vulncheck analysis (experimental: under development)
+
+Usage:
+ gopls [flags] vulncheck
+
+ WARNING: this command is experimental.
+
+ By default, the command outputs a JSON-encoded
+ golang.org/x/tools/gopls/internal/lsp/command.VulncheckResult
+ message.
+ Example:
+ $ gopls vulncheck <packages>
+
+ -config
+ If true, the command reads a JSON-encoded package load configuration from stdin
+ -summary
+ If true, outputs a JSON-encoded govulnchecklib.Summary JSON
diff --git a/internal/lsp/cmd/usage/workspace_symbol.hlp b/gopls/internal/lsp/cmd/usage/workspace_symbol.hlp
index a61b47b33..a61b47b33 100644
--- a/internal/lsp/cmd/usage/workspace_symbol.hlp
+++ b/gopls/internal/lsp/cmd/usage/workspace_symbol.hlp
diff --git a/gopls/internal/lsp/cmd/vulncheck.go b/gopls/internal/lsp/cmd/vulncheck.go
new file mode 100644
index 000000000..5c851b66e
--- /dev/null
+++ b/gopls/internal/lsp/cmd/vulncheck.go
@@ -0,0 +1,84 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+
+ "golang.org/x/tools/go/packages"
+ vulnchecklib "golang.org/x/tools/gopls/internal/vulncheck"
+ "golang.org/x/tools/internal/tool"
+)
+
+// vulncheck implements the vulncheck command.
+type vulncheck struct {
+ Config bool `flag:"config" help:"If true, the command reads a JSON-encoded package load configuration from stdin"`
+ AsSummary bool `flag:"summary" help:"If true, outputs a JSON-encoded govulnchecklib.Summary JSON"`
+ app *Application
+}
+
+type pkgLoadConfig struct {
+ // BuildFlags is a list of command-line flags to be passed through to
+ // the build system's query tool.
+ BuildFlags []string
+
+ // If Tests is set, the loader includes related test packages.
+ Tests bool
+}
+
+// TODO(hyangah): document pkgLoadConfig
+
+func (v *vulncheck) Name() string { return "vulncheck" }
+func (v *vulncheck) Parent() string { return v.app.Name() }
+func (v *vulncheck) Usage() string { return "" }
+func (v *vulncheck) ShortHelp() string {
+ return "run experimental vulncheck analysis (experimental: under development)"
+}
+func (v *vulncheck) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+ WARNING: this command is experimental.
+
+ By default, the command outputs a JSON-encoded
+ golang.org/x/tools/gopls/internal/lsp/command.VulncheckResult
+ message.
+ Example:
+ $ gopls vulncheck <packages>
+
+`)
+ printFlagDefaults(f)
+}
+
+func (v *vulncheck) Run(ctx context.Context, args ...string) error {
+ if vulnchecklib.Main == nil {
+ return fmt.Errorf("vulncheck command is available only in gopls compiled with go1.18 or newer")
+ }
+
+ // TODO(hyangah): what's wrong with allowing multiple targets?
+ if len(args) > 1 {
+ return tool.CommandLineErrorf("vulncheck accepts at most one package pattern")
+ }
+ var cfg pkgLoadConfig
+ if v.Config {
+ if err := json.NewDecoder(os.Stdin).Decode(&cfg); err != nil {
+ return tool.CommandLineErrorf("failed to parse cfg: %v", err)
+ }
+ }
+ loadCfg := packages.Config{
+ Context: ctx,
+ Tests: cfg.Tests,
+ BuildFlags: cfg.BuildFlags,
+ // inherit the current process's cwd and env.
+ }
+
+ if err := vulnchecklib.Main(loadCfg, args...); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/cmd/workspace_symbol.go b/gopls/internal/lsp/cmd/workspace_symbol.go
new file mode 100644
index 000000000..0c7160af3
--- /dev/null
+++ b/gopls/internal/lsp/cmd/workspace_symbol.go
@@ -0,0 +1,85 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import (
+ "context"
+ "flag"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/tool"
+)
+
+// workspaceSymbol implements the workspace_symbol verb for gopls.
+type workspaceSymbol struct {
+ Matcher string `flag:"matcher" help:"specifies the type of matcher: fuzzy, caseSensitive, or caseInsensitive.\nThe default is caseInsensitive."`
+
+ app *Application
+}
+
+func (r *workspaceSymbol) Name() string { return "workspace_symbol" }
+func (r *workspaceSymbol) Parent() string { return r.app.Name() }
+func (r *workspaceSymbol) Usage() string { return "[workspace_symbol-flags] <query>" }
+func (r *workspaceSymbol) ShortHelp() string { return "search symbols in workspace" }
+func (r *workspaceSymbol) DetailedHelp(f *flag.FlagSet) {
+ fmt.Fprint(f.Output(), `
+Example:
+
+ $ gopls workspace_symbol -matcher fuzzy 'wsymbols'
+
+workspace_symbol-flags:
+`)
+ printFlagDefaults(f)
+}
+
+func (r *workspaceSymbol) Run(ctx context.Context, args ...string) error {
+ if len(args) != 1 {
+ return tool.CommandLineErrorf("workspace_symbol expects 1 argument")
+ }
+
+ opts := r.app.options
+ r.app.options = func(o *source.Options) {
+ if opts != nil {
+ opts(o)
+ }
+ switch r.Matcher {
+ case "fuzzy":
+ o.SymbolMatcher = source.SymbolFuzzy
+ case "caseSensitive":
+ o.SymbolMatcher = source.SymbolCaseSensitive
+ case "fastfuzzy":
+ o.SymbolMatcher = source.SymbolFastFuzzy
+ default:
+ o.SymbolMatcher = source.SymbolCaseInsensitive
+ }
+ }
+
+ conn, err := r.app.connect(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.terminate(ctx)
+
+ p := protocol.WorkspaceSymbolParams{
+ Query: args[0],
+ }
+
+ symbols, err := conn.Symbol(ctx, &p)
+ if err != nil {
+ return err
+ }
+ for _, s := range symbols {
+ f := conn.openFile(ctx, fileURI(s.Location.URI))
+ span, err := f.mapper.LocationSpan(s.Location)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s %s %s\n", span, s.Name, s.Kind)
+ }
+
+ return nil
+}
diff --git a/gopls/internal/lsp/code_action.go b/gopls/internal/lsp/code_action.go
new file mode 100644
index 000000000..3864648bc
--- /dev/null
+++ b/gopls/internal/lsp/code_action.go
@@ -0,0 +1,481 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/imports"
+)
+
+func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionParams) ([]protocol.CodeAction, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ uri := fh.URI()
+
+ // Determine the supported actions for this file kind.
+ kind := snapshot.View().FileKind(fh)
+ supportedCodeActions, ok := snapshot.View().Options().SupportedCodeActions[kind]
+ if !ok {
+ return nil, fmt.Errorf("no supported code actions for %v file kind", kind)
+ }
+
+ // The Only field of the context specifies which code actions the client wants.
+ // If Only is empty, assume that the client wants all of the non-explicit code actions.
+ var wanted map[protocol.CodeActionKind]bool
+
+ // Explicit Code Actions are opt-in and shouldn't be returned to the client unless
+ // requested using Only.
+ // TODO: Add other CodeLenses such as GoGenerate, RegenerateCgo, etc..
+ explicit := map[protocol.CodeActionKind]bool{
+ protocol.GoTest: true,
+ }
+
+ if len(params.Context.Only) == 0 {
+ wanted = supportedCodeActions
+ } else {
+ wanted = make(map[protocol.CodeActionKind]bool)
+ for _, only := range params.Context.Only {
+ for k, v := range supportedCodeActions {
+ if only == k || strings.HasPrefix(string(k), string(only)+".") {
+ wanted[k] = wanted[k] || v
+ }
+ }
+ wanted[only] = wanted[only] || explicit[only]
+ }
+ }
+ if len(supportedCodeActions) == 0 {
+ return nil, nil // not an error if there are none supported
+ }
+ if len(wanted) == 0 {
+ return nil, fmt.Errorf("no supported code action to execute for %s, wanted %v", uri, params.Context.Only)
+ }
+
+ var codeActions []protocol.CodeAction
+ switch kind {
+ case source.Mod:
+ if diagnostics := params.Context.Diagnostics; len(diagnostics) > 0 {
+ diags, err := mod.ModDiagnostics(ctx, snapshot, fh)
+ if source.IsNonFatalGoModError(err) {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ udiags, err := mod.ModUpgradeDiagnostics(ctx, snapshot, fh)
+ if err != nil {
+ return nil, err
+ }
+ quickFixes, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, append(diags, udiags...))
+ if err != nil {
+ return nil, err
+ }
+ codeActions = append(codeActions, quickFixes...)
+
+ vdiags, err := mod.ModVulnerabilityDiagnostics(ctx, snapshot, fh)
+ if err != nil {
+ return nil, err
+ }
+ // Group vulnerabilities by location and then limit which code actions we return
+ // for each location.
+ m := make(map[protocol.Range][]*source.Diagnostic)
+ for _, v := range vdiags {
+ m[v.Range] = append(m[v.Range], v)
+ }
+ for _, sdiags := range m {
+ quickFixes, err = codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, sdiags)
+ if err != nil {
+ return nil, err
+ }
+ quickFixes = mod.SelectUpgradeCodeActions(quickFixes)
+ codeActions = append(codeActions, quickFixes...)
+ }
+ }
+ case source.Go:
+ // Don't suggest fixes for generated files, since they are generally
+ // not useful and some editors may apply them automatically on save.
+ if source.IsGenerated(ctx, snapshot, uri) {
+ return nil, nil
+ }
+ diagnostics := params.Context.Diagnostics
+
+ // First, process any missing imports and pair them with the
+ // diagnostics they fix.
+ if wantQuickFixes := wanted[protocol.QuickFix] && len(diagnostics) > 0; wantQuickFixes || wanted[protocol.SourceOrganizeImports] {
+ importEdits, importEditsPerFix, err := source.AllImportsFixes(ctx, snapshot, fh)
+ if err != nil {
+ event.Error(ctx, "imports fixes", err, tag.File.Of(fh.URI().Filename()))
+ }
+ // Separate this into a set of codeActions per diagnostic, where
+ // each action is the addition, removal, or renaming of one import.
+ if wantQuickFixes {
+ for _, importFix := range importEditsPerFix {
+ fixes := importDiagnostics(importFix.Fix, diagnostics)
+ if len(fixes) == 0 {
+ continue
+ }
+ codeActions = append(codeActions, protocol.CodeAction{
+ Title: importFixTitle(importFix.Fix),
+ Kind: protocol.QuickFix,
+ Edit: &protocol.WorkspaceEdit{
+ DocumentChanges: documentChanges(fh, importFix.Edits),
+ },
+ Diagnostics: fixes,
+ })
+ }
+ }
+
+ // Send all of the import edits as one code action if the file is
+ // being organized.
+ if wanted[protocol.SourceOrganizeImports] && len(importEdits) > 0 {
+ codeActions = append(codeActions, protocol.CodeAction{
+ Title: "Organize Imports",
+ Kind: protocol.SourceOrganizeImports,
+ Edit: &protocol.WorkspaceEdit{
+ DocumentChanges: documentChanges(fh, importEdits),
+ },
+ })
+ }
+ }
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+
+ // Type-check the package and also run analysis,
+ // then combine their diagnostics.
+ pkg, _, err := source.PackageForFile(ctx, snapshot, fh.URI(), source.NarrowestPackage)
+ if err != nil {
+ return nil, err
+ }
+ pkgDiags, err := pkg.DiagnosticsForFile(ctx, snapshot, uri)
+ if err != nil {
+ return nil, err
+ }
+ analysisDiags, err := source.Analyze(ctx, snapshot, pkg.Metadata().ID, true)
+ if err != nil {
+ return nil, err
+ }
+ var fileDiags []*source.Diagnostic
+ source.CombineDiagnostics(pkgDiags, analysisDiags[uri], &fileDiags, &fileDiags)
+
+ // Split diagnostics into fixes, which must match incoming diagnostics,
+ // and non-fixes, which must match the requested range. Build actions
+ // for all of them.
+ var fixDiags, nonFixDiags []*source.Diagnostic
+ for _, d := range fileDiags {
+ if len(d.SuggestedFixes) == 0 {
+ continue
+ }
+ var isFix bool
+ for _, fix := range d.SuggestedFixes {
+ if fix.ActionKind == protocol.QuickFix || fix.ActionKind == protocol.SourceFixAll {
+ isFix = true
+ break
+ }
+ }
+ if isFix {
+ fixDiags = append(fixDiags, d)
+ } else {
+ nonFixDiags = append(nonFixDiags, d)
+ }
+ }
+
+ fixActions, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, fixDiags)
+ if err != nil {
+ return nil, err
+ }
+ codeActions = append(codeActions, fixActions...)
+
+ for _, nonfix := range nonFixDiags {
+ // For now, only show diagnostics for matching lines. Maybe we should
+ // alter this behavior in the future, depending on the user experience.
+ if !protocol.Intersect(nonfix.Range, params.Range) {
+ continue
+ }
+ actions, err := codeActionsForDiagnostic(ctx, snapshot, nonfix, nil)
+ if err != nil {
+ return nil, err
+ }
+ codeActions = append(codeActions, actions...)
+ }
+
+ if wanted[protocol.RefactorExtract] {
+ fixes, err := extractionFixes(ctx, snapshot, uri, params.Range)
+ if err != nil {
+ return nil, err
+ }
+ codeActions = append(codeActions, fixes...)
+ }
+
+ if wanted[protocol.GoTest] {
+ fixes, err := goTest(ctx, snapshot, uri, params.Range)
+ if err != nil {
+ return nil, err
+ }
+ codeActions = append(codeActions, fixes...)
+ }
+
+ default:
+ // Unsupported file kind for a code action.
+ return nil, nil
+ }
+
+ var filtered []protocol.CodeAction
+ for _, action := range codeActions {
+ if wanted[action.Kind] {
+ filtered = append(filtered, action)
+ }
+ }
+ return filtered, nil
+}
+
+func (s *Server) getSupportedCodeActions() []protocol.CodeActionKind {
+ allCodeActionKinds := make(map[protocol.CodeActionKind]struct{})
+ for _, kinds := range s.session.Options().SupportedCodeActions {
+ for kind := range kinds {
+ allCodeActionKinds[kind] = struct{}{}
+ }
+ }
+ var result []protocol.CodeActionKind
+ for kind := range allCodeActionKinds {
+ result = append(result, kind)
+ }
+ sort.Slice(result, func(i, j int) bool {
+ return result[i] < result[j]
+ })
+ return result
+}
+
+func importFixTitle(fix *imports.ImportFix) string {
+ var str string
+ switch fix.FixType {
+ case imports.AddImport:
+ str = fmt.Sprintf("Add import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath)
+ case imports.DeleteImport:
+ str = fmt.Sprintf("Delete import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath)
+ case imports.SetImportName:
+ str = fmt.Sprintf("Rename import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath)
+ }
+ return str
+}
+
+func importDiagnostics(fix *imports.ImportFix, diagnostics []protocol.Diagnostic) (results []protocol.Diagnostic) {
+ for _, diagnostic := range diagnostics {
+ switch {
+ // "undeclared name: X" may be an unresolved import.
+ case strings.HasPrefix(diagnostic.Message, "undeclared name: "):
+ ident := strings.TrimPrefix(diagnostic.Message, "undeclared name: ")
+ if ident == fix.IdentName {
+ results = append(results, diagnostic)
+ }
+ // "undefined: X" may be an unresolved import at Go 1.20+.
+ case strings.HasPrefix(diagnostic.Message, "undefined: "):
+ ident := strings.TrimPrefix(diagnostic.Message, "undefined: ")
+ if ident == fix.IdentName {
+ results = append(results, diagnostic)
+ }
+ // "could not import: X" may be an invalid import.
+ case strings.HasPrefix(diagnostic.Message, "could not import: "):
+ ident := strings.TrimPrefix(diagnostic.Message, "could not import: ")
+ if ident == fix.IdentName {
+ results = append(results, diagnostic)
+ }
+ // "X imported but not used" is an unused import.
+ // "X imported but not used as Y" is an unused import.
+ case strings.Contains(diagnostic.Message, " imported but not used"):
+ idx := strings.Index(diagnostic.Message, " imported but not used")
+ importPath := diagnostic.Message[:idx]
+ if importPath == fmt.Sprintf("%q", fix.StmtInfo.ImportPath) {
+ results = append(results, diagnostic)
+ }
+ }
+ }
+ return results
+}
+
+func extractionFixes(ctx context.Context, snapshot source.Snapshot, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) {
+ if rng.Start == rng.End {
+ return nil, nil
+ }
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull)
+ if err != nil {
+ return nil, fmt.Errorf("getting file for Identifier: %w", err)
+ }
+ start, end, err := pgf.RangePos(rng)
+ if err != nil {
+ return nil, err
+ }
+ puri := protocol.URIFromSpanURI(uri)
+ var commands []protocol.Command
+ if _, ok, methodOk, _ := source.CanExtractFunction(pgf.Tok, start, end, pgf.Src, pgf.File); ok {
+ cmd, err := command.NewApplyFixCommand("Extract function", command.ApplyFixArgs{
+ URI: puri,
+ Fix: source.ExtractFunction,
+ Range: rng,
+ })
+ if err != nil {
+ return nil, err
+ }
+ commands = append(commands, cmd)
+ if methodOk {
+ cmd, err := command.NewApplyFixCommand("Extract method", command.ApplyFixArgs{
+ URI: puri,
+ Fix: source.ExtractMethod,
+ Range: rng,
+ })
+ if err != nil {
+ return nil, err
+ }
+ commands = append(commands, cmd)
+ }
+ }
+ if _, _, ok, _ := source.CanExtractVariable(start, end, pgf.File); ok {
+ cmd, err := command.NewApplyFixCommand("Extract variable", command.ApplyFixArgs{
+ URI: puri,
+ Fix: source.ExtractVariable,
+ Range: rng,
+ })
+ if err != nil {
+ return nil, err
+ }
+ commands = append(commands, cmd)
+ }
+ var actions []protocol.CodeAction
+ for i := range commands {
+ actions = append(actions, protocol.CodeAction{
+ Title: commands[i].Title,
+ Kind: protocol.RefactorExtract,
+ Command: &commands[i],
+ })
+ }
+ return actions, nil
+}
+
+func documentChanges(fh source.FileHandle, edits []protocol.TextEdit) []protocol.DocumentChanges {
+ return []protocol.DocumentChanges{
+ {
+ TextDocumentEdit: &protocol.TextDocumentEdit{
+ TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
+ Version: fh.Version(),
+ TextDocumentIdentifier: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(fh.URI()),
+ },
+ },
+ Edits: edits,
+ },
+ },
+ }
+}
+
+func codeActionsMatchingDiagnostics(ctx context.Context, snapshot source.Snapshot, pdiags []protocol.Diagnostic, sdiags []*source.Diagnostic) ([]protocol.CodeAction, error) {
+ var actions []protocol.CodeAction
+ for _, sd := range sdiags {
+ var diag *protocol.Diagnostic
+ for _, pd := range pdiags {
+ if sameDiagnostic(pd, sd) {
+ diag = &pd
+ break
+ }
+ }
+ if diag == nil {
+ continue
+ }
+ diagActions, err := codeActionsForDiagnostic(ctx, snapshot, sd, diag)
+ if err != nil {
+ return nil, err
+ }
+ actions = append(actions, diagActions...)
+
+ }
+ return actions, nil
+}
+
+func codeActionsForDiagnostic(ctx context.Context, snapshot source.Snapshot, sd *source.Diagnostic, pd *protocol.Diagnostic) ([]protocol.CodeAction, error) {
+ var actions []protocol.CodeAction
+ for _, fix := range sd.SuggestedFixes {
+ var changes []protocol.DocumentChanges
+ for uri, edits := range fix.Edits {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ changes = append(changes, documentChanges(fh, edits)...)
+ }
+ action := protocol.CodeAction{
+ Title: fix.Title,
+ Kind: fix.ActionKind,
+ Edit: &protocol.WorkspaceEdit{
+ DocumentChanges: changes,
+ },
+ Command: fix.Command,
+ }
+ if pd != nil {
+ action.Diagnostics = []protocol.Diagnostic{*pd}
+ }
+ actions = append(actions, action)
+ }
+ return actions, nil
+}
+
+func sameDiagnostic(pd protocol.Diagnostic, sd *source.Diagnostic) bool {
+ return pd.Message == strings.TrimSpace(sd.Message) && // extra space may have been trimmed when converting to protocol.Diagnostic
+ protocol.CompareRange(pd.Range, sd.Range) == 0 && pd.Source == string(sd.Source)
+}
+
+func goTest(ctx context.Context, snapshot source.Snapshot, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ fns, err := source.TestsAndBenchmarks(ctx, snapshot, fh)
+ if err != nil {
+ return nil, err
+ }
+
+ var tests, benchmarks []string
+ for _, fn := range fns.Tests {
+ if !protocol.Intersect(fn.Rng, rng) {
+ continue
+ }
+ tests = append(tests, fn.Name)
+ }
+ for _, fn := range fns.Benchmarks {
+ if !protocol.Intersect(fn.Rng, rng) {
+ continue
+ }
+ benchmarks = append(benchmarks, fn.Name)
+ }
+
+ if len(tests) == 0 && len(benchmarks) == 0 {
+ return nil, nil
+ }
+
+ cmd, err := command.NewTestCommand("Run tests and benchmarks", protocol.URIFromSpanURI(uri), tests, benchmarks)
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.CodeAction{{
+ Title: cmd.Title,
+ Kind: protocol.GoTest,
+ Command: &cmd,
+ }}, nil
+}
diff --git a/gopls/internal/lsp/code_lens.go b/gopls/internal/lsp/code_lens.go
new file mode 100644
index 000000000..f554e798c
--- /dev/null
+++ b/gopls/internal/lsp/code_lens.go
@@ -0,0 +1,57 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+ "fmt"
+ "sort"
+
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+)
+
+func (s *Server) codeLens(ctx context.Context, params *protocol.CodeLensParams) ([]protocol.CodeLens, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ var lenses map[command.Command]source.LensFunc
+ switch snapshot.View().FileKind(fh) {
+ case source.Mod:
+ lenses = mod.LensFuncs()
+ case source.Go:
+ lenses = source.LensFuncs()
+ default:
+ // Unsupported file kind for a code lens.
+ return nil, nil
+ }
+ var result []protocol.CodeLens
+ for cmd, lf := range lenses {
+ if !snapshot.View().Options().Codelenses[string(cmd)] {
+ continue
+ }
+ added, err := lf(ctx, snapshot, fh)
+ // Code lens is called on every keystroke, so we should just operate in
+ // a best-effort mode, ignoring errors.
+ if err != nil {
+ event.Error(ctx, fmt.Sprintf("code lens %s failed", cmd), err)
+ continue
+ }
+ result = append(result, added...)
+ }
+ sort.Slice(result, func(i, j int) bool {
+ a, b := result[i], result[j]
+ if cmp := protocol.CompareRange(a.Range, b.Range); cmp != 0 {
+ return cmp < 0
+ }
+ return a.Command.Command < b.Command.Command
+ })
+ return result, nil
+}
diff --git a/gopls/internal/lsp/command.go b/gopls/internal/lsp/command.go
new file mode 100644
index 000000000..75e5ef8b5
--- /dev/null
+++ b/gopls/internal/lsp/command.go
@@ -0,0 +1,964 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/progress"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/gopls/internal/vulncheck"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+func (s *Server) executeCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) {
+ var found bool
+ for _, name := range s.session.Options().SupportedCommands {
+ if name == params.Command {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, fmt.Errorf("%s is not a supported command", params.Command)
+ }
+
+ handler := &commandHandler{
+ s: s,
+ params: params,
+ }
+ return command.Dispatch(ctx, params, handler)
+}
+
+type commandHandler struct {
+ s *Server
+ params *protocol.ExecuteCommandParams
+}
+
+// commandConfig configures common command set-up and execution.
+type commandConfig struct {
+ async bool // whether to run the command asynchronously. Async commands can only return errors.
+ requireSave bool // whether all files must be saved for the command to work
+ progress string // title to use for progress reporting. If empty, no progress will be reported.
+ forURI protocol.DocumentURI // URI to resolve to a snapshot. If unset, snapshot will be nil.
+}
+
+// commandDeps is evaluated from a commandConfig. Note that not all fields may
+// be populated, depending on which configuration is set. See comments in-line
+// for details.
+type commandDeps struct {
+ snapshot source.Snapshot // present if cfg.forURI was set
+ fh source.FileHandle // present if cfg.forURI was set
+ work *progress.WorkDone // present cfg.progress was set
+}
+
+type commandFunc func(context.Context, commandDeps) error
+
+// run performs command setup for command execution, and invokes the given run
+// function. If cfg.async is set, run executes the given func in a separate
+// goroutine, and returns as soon as setup is complete and the goroutine is
+// scheduled.
+//
+// Invariant: if the resulting error is non-nil, the given run func will
+// (eventually) be executed exactly once.
+func (c *commandHandler) run(ctx context.Context, cfg commandConfig, run commandFunc) (err error) {
+ if cfg.requireSave {
+ var unsaved []string
+ for _, overlay := range c.s.session.Overlays() {
+ if !overlay.Saved() {
+ unsaved = append(unsaved, overlay.URI().Filename())
+ }
+ }
+ if len(unsaved) > 0 {
+ return fmt.Errorf("All files must be saved first (unsaved: %v).", unsaved)
+ }
+ }
+ var deps commandDeps
+ if cfg.forURI != "" {
+ var ok bool
+ var release func()
+ deps.snapshot, deps.fh, ok, release, err = c.s.beginFileRequest(ctx, cfg.forURI, source.UnknownKind)
+ defer release()
+ if !ok {
+ if err != nil {
+ return err
+ }
+ return fmt.Errorf("invalid file URL: %v", cfg.forURI)
+ }
+ }
+ ctx, cancel := context.WithCancel(xcontext.Detach(ctx))
+ if cfg.progress != "" {
+ deps.work = c.s.progress.Start(ctx, cfg.progress, "Running...", c.params.WorkDoneToken, cancel)
+ }
+ runcmd := func() error {
+ defer cancel()
+ err := run(ctx, deps)
+ if deps.work != nil {
+ switch {
+ case errors.Is(err, context.Canceled):
+ deps.work.End(ctx, "canceled")
+ case err != nil:
+ event.Error(ctx, "command error", err)
+ deps.work.End(ctx, "failed")
+ default:
+ deps.work.End(ctx, "completed")
+ }
+ }
+ return err
+ }
+ if cfg.async {
+ go func() {
+ if err := runcmd(); err != nil {
+ if showMessageErr := c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Error,
+ Message: err.Error(),
+ }); showMessageErr != nil {
+ event.Error(ctx, fmt.Sprintf("failed to show message: %q", err.Error()), showMessageErr)
+ }
+ }
+ }()
+ return nil
+ }
+ return runcmd()
+}
+
+func (c *commandHandler) ApplyFix(ctx context.Context, args command.ApplyFixArgs) error {
+ return c.run(ctx, commandConfig{
+ // Note: no progress here. Applying fixes should be quick.
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ edits, err := source.ApplyFix(ctx, args.Fix, deps.snapshot, deps.fh, args.Range)
+ if err != nil {
+ return err
+ }
+ var changes []protocol.DocumentChanges
+ for _, edit := range edits {
+ edit := edit
+ changes = append(changes, protocol.DocumentChanges{
+ TextDocumentEdit: &edit,
+ })
+ }
+ r, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
+ Edit: protocol.WorkspaceEdit{
+ DocumentChanges: changes,
+ },
+ })
+ if err != nil {
+ return err
+ }
+ if !r.Applied {
+ return errors.New(r.FailureReason)
+ }
+ return nil
+ })
+}
+
+func (c *commandHandler) RegenerateCgo(ctx context.Context, args command.URIArg) error {
+ return c.run(ctx, commandConfig{
+ progress: "Regenerating Cgo",
+ }, func(ctx context.Context, deps commandDeps) error {
+ mod := source.FileModification{
+ URI: args.URI.SpanURI(),
+ Action: source.InvalidateMetadata,
+ }
+ return c.s.didModifyFiles(ctx, []source.FileModification{mod}, FromRegenerateCgo)
+ })
+}
+
+func (c *commandHandler) CheckUpgrades(ctx context.Context, args command.CheckUpgradesArgs) error {
+ return c.run(ctx, commandConfig{
+ forURI: args.URI,
+ progress: "Checking for upgrades",
+ }, func(ctx context.Context, deps commandDeps) error {
+ upgrades, err := c.s.getUpgrades(ctx, deps.snapshot, args.URI.SpanURI(), args.Modules)
+ if err != nil {
+ return err
+ }
+ deps.snapshot.View().RegisterModuleUpgrades(args.URI.SpanURI(), upgrades)
+ // Re-diagnose the snapshot to publish the new module diagnostics.
+ c.s.diagnoseSnapshot(deps.snapshot, nil, false)
+ return nil
+ })
+}
+
+func (c *commandHandler) AddDependency(ctx context.Context, args command.DependencyArgs) error {
+ return c.GoGetModule(ctx, args)
+}
+
+func (c *commandHandler) UpgradeDependency(ctx context.Context, args command.DependencyArgs) error {
+ return c.GoGetModule(ctx, args)
+}
+
+func (c *commandHandler) ResetGoModDiagnostics(ctx context.Context, args command.ResetGoModDiagnosticsArgs) error {
+ return c.run(ctx, commandConfig{
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ // Clear all diagnostics coming from the upgrade check source and vulncheck.
+ // This will clear the diagnostics in all go.mod files, but they
+ // will be re-calculated when the snapshot is diagnosed again.
+ if args.DiagnosticSource == "" || args.DiagnosticSource == string(source.UpgradeNotification) {
+ deps.snapshot.View().ClearModuleUpgrades(args.URI.SpanURI())
+ c.s.clearDiagnosticSource(modCheckUpgradesSource)
+ }
+
+ if args.DiagnosticSource == "" || args.DiagnosticSource == string(source.Govulncheck) {
+ deps.snapshot.View().SetVulnerabilities(args.URI.SpanURI(), nil)
+ c.s.clearDiagnosticSource(modVulncheckSource)
+ }
+
+ // Re-diagnose the snapshot to remove the diagnostics.
+ c.s.diagnoseSnapshot(deps.snapshot, nil, false)
+ return nil
+ })
+}
+
+func (c *commandHandler) GoGetModule(ctx context.Context, args command.DependencyArgs) error {
+ return c.run(ctx, commandConfig{
+ progress: "Running go get",
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
+ return runGoGetModule(invoke, args.AddRequire, args.GoCmdArgs)
+ })
+ })
+}
+
+// TODO(rFindley): UpdateGoSum, Tidy, and Vendor could probably all be one command.
+func (c *commandHandler) UpdateGoSum(ctx context.Context, args command.URIArgs) error {
+ return c.run(ctx, commandConfig{
+ progress: "Updating go.sum",
+ }, func(ctx context.Context, deps commandDeps) error {
+ for _, uri := range args.URIs {
+ snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, uri, source.UnknownKind)
+ defer release()
+ if !ok {
+ return err
+ }
+ if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
+ _, err := invoke("list", "all")
+ return err
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+func (c *commandHandler) Tidy(ctx context.Context, args command.URIArgs) error {
+ return c.run(ctx, commandConfig{
+ requireSave: true,
+ progress: "Running go mod tidy",
+ }, func(ctx context.Context, deps commandDeps) error {
+ for _, uri := range args.URIs {
+ snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, uri, source.UnknownKind)
+ defer release()
+ if !ok {
+ return err
+ }
+ if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
+ _, err := invoke("mod", "tidy")
+ return err
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+func (c *commandHandler) Vendor(ctx context.Context, args command.URIArg) error {
+ return c.run(ctx, commandConfig{
+ requireSave: true,
+ progress: "Running go mod vendor",
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ // Use RunGoCommandPiped here so that we don't compete with any other go
+ // command invocations. go mod vendor deletes modules.txt before recreating
+ // it, and therefore can run into file locking issues on Windows if that
+ // file is in use by another process, such as go list.
+ //
+ // If golang/go#44119 is resolved, go mod vendor will instead modify
+ // modules.txt in-place. In that case we could theoretically allow this
+ // command to run concurrently.
+ err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal|source.AllowNetwork, &gocommand.Invocation{
+ Verb: "mod",
+ Args: []string{"vendor"},
+ WorkingDir: filepath.Dir(args.URI.SpanURI().Filename()),
+ }, &bytes.Buffer{}, &bytes.Buffer{})
+ return err
+ })
+}
+
+func (c *commandHandler) EditGoDirective(ctx context.Context, args command.EditGoDirectiveArgs) error {
+ return c.run(ctx, commandConfig{
+ requireSave: true, // if go.mod isn't saved it could cause a problem
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, args.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return err
+ }
+ if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
+ _, err := invoke("mod", "edit", "-go", args.Version)
+ return err
+ }); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func (c *commandHandler) RemoveDependency(ctx context.Context, args command.RemoveDependencyArgs) error {
+ return c.run(ctx, commandConfig{
+ progress: "Removing dependency",
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ // If the module is tidied apart from the one unused diagnostic, we can
+ // run `go get module@none`, and then run `go mod tidy`. Otherwise, we
+ // must make textual edits.
+ // TODO(rstambler): In Go 1.17+, we will be able to use the go command
+ // without checking if the module is tidy.
+ if args.OnlyDiagnostic {
+ return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
+ if err := runGoGetModule(invoke, false, []string{args.ModulePath + "@none"}); err != nil {
+ return err
+ }
+ _, err := invoke("mod", "tidy")
+ return err
+ })
+ }
+ pm, err := deps.snapshot.ParseMod(ctx, deps.fh)
+ if err != nil {
+ return err
+ }
+ edits, err := dropDependency(deps.snapshot, pm, args.ModulePath)
+ if err != nil {
+ return err
+ }
+ response, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
+ Edit: protocol.WorkspaceEdit{
+ DocumentChanges: []protocol.DocumentChanges{
+ {
+ TextDocumentEdit: &protocol.TextDocumentEdit{
+ TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
+ Version: deps.fh.Version(),
+ TextDocumentIdentifier: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(deps.fh.URI()),
+ },
+ },
+ Edits: edits,
+ },
+ },
+ },
+ },
+ })
+ if err != nil {
+ return err
+ }
+ if !response.Applied {
+ return fmt.Errorf("edits not applied because of %s", response.FailureReason)
+ }
+ return nil
+ })
+}
+
+// dropDependency returns the edits to remove the given require from the go.mod
+// file.
+func dropDependency(snapshot source.Snapshot, pm *source.ParsedModule, modulePath string) ([]protocol.TextEdit, error) {
+ // We need a private copy of the parsed go.mod file, since we're going to
+ // modify it.
+ copied, err := modfile.Parse("", pm.Mapper.Content, nil)
+ if err != nil {
+ return nil, err
+ }
+ if err := copied.DropRequire(modulePath); err != nil {
+ return nil, err
+ }
+ copied.Cleanup()
+ newContent, err := copied.Format()
+ if err != nil {
+ return nil, err
+ }
+ // Calculate the edits to be made due to the change.
+ diff := snapshot.View().Options().ComputeEdits(string(pm.Mapper.Content), string(newContent))
+ return source.ToProtocolEdits(pm.Mapper, diff)
+}
+
+func (c *commandHandler) Test(ctx context.Context, uri protocol.DocumentURI, tests, benchmarks []string) error {
+ return c.RunTests(ctx, command.RunTestsArgs{
+ URI: uri,
+ Tests: tests,
+ Benchmarks: benchmarks,
+ })
+}
+
+func (c *commandHandler) RunTests(ctx context.Context, args command.RunTestsArgs) error {
+ return c.run(ctx, commandConfig{
+ async: true,
+ progress: "Running go test",
+ requireSave: true,
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ if err := c.runTests(ctx, deps.snapshot, deps.work, args.URI, args.Tests, args.Benchmarks); err != nil {
+ return fmt.Errorf("running tests failed: %w", err)
+ }
+ return nil
+ })
+}
+
+func (c *commandHandler) runTests(ctx context.Context, snapshot source.Snapshot, work *progress.WorkDone, uri protocol.DocumentURI, tests, benchmarks []string) error {
+ // TODO: fix the error reporting when this runs async.
+ metas, err := snapshot.MetadataForFile(ctx, uri.SpanURI())
+ if err != nil {
+ return err
+ }
+ metas = source.RemoveIntermediateTestVariants(metas)
+ if len(metas) == 0 {
+ return fmt.Errorf("package could not be found for file: %s", uri.SpanURI().Filename())
+ }
+ pkgPath := string(metas[0].ForTest)
+
+ // create output
+ buf := &bytes.Buffer{}
+ ew := progress.NewEventWriter(ctx, "test")
+ out := io.MultiWriter(ew, progress.NewWorkDoneWriter(ctx, work), buf)
+
+ // Run `go test -run Func` on each test.
+ var failedTests int
+ for _, funcName := range tests {
+ inv := &gocommand.Invocation{
+ Verb: "test",
+ Args: []string{pkgPath, "-v", "-count=1", "-run", fmt.Sprintf("^%s$", funcName)},
+ WorkingDir: filepath.Dir(uri.SpanURI().Filename()),
+ }
+ if err := snapshot.RunGoCommandPiped(ctx, source.Normal, inv, out, out); err != nil {
+ if errors.Is(err, context.Canceled) {
+ return err
+ }
+ failedTests++
+ }
+ }
+
+ // Run `go test -run=^$ -bench Func` on each test.
+ var failedBenchmarks int
+ for _, funcName := range benchmarks {
+ inv := &gocommand.Invocation{
+ Verb: "test",
+ Args: []string{pkgPath, "-v", "-run=^$", "-bench", fmt.Sprintf("^%s$", funcName)},
+ WorkingDir: filepath.Dir(uri.SpanURI().Filename()),
+ }
+ if err := snapshot.RunGoCommandPiped(ctx, source.Normal, inv, out, out); err != nil {
+ if errors.Is(err, context.Canceled) {
+ return err
+ }
+ failedBenchmarks++
+ }
+ }
+
+ var title string
+ if len(tests) > 0 && len(benchmarks) > 0 {
+ title = "tests and benchmarks"
+ } else if len(tests) > 0 {
+ title = "tests"
+ } else if len(benchmarks) > 0 {
+ title = "benchmarks"
+ } else {
+ return errors.New("No functions were provided")
+ }
+ message := fmt.Sprintf("all %s passed", title)
+ if failedTests > 0 && failedBenchmarks > 0 {
+ message = fmt.Sprintf("%d / %d tests failed and %d / %d benchmarks failed", failedTests, len(tests), failedBenchmarks, len(benchmarks))
+ } else if failedTests > 0 {
+ message = fmt.Sprintf("%d / %d tests failed", failedTests, len(tests))
+ } else if failedBenchmarks > 0 {
+ message = fmt.Sprintf("%d / %d benchmarks failed", failedBenchmarks, len(benchmarks))
+ }
+ if failedTests > 0 || failedBenchmarks > 0 {
+ message += "\n" + buf.String()
+ }
+
+ return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Info,
+ Message: message,
+ })
+}
+
+func (c *commandHandler) Generate(ctx context.Context, args command.GenerateArgs) error {
+ title := "Running go generate ."
+ if args.Recursive {
+ title = "Running go generate ./..."
+ }
+ return c.run(ctx, commandConfig{
+ requireSave: true,
+ progress: title,
+ forURI: args.Dir,
+ }, func(ctx context.Context, deps commandDeps) error {
+ er := progress.NewEventWriter(ctx, "generate")
+
+ pattern := "."
+ if args.Recursive {
+ pattern = "./..."
+ }
+ inv := &gocommand.Invocation{
+ Verb: "generate",
+ Args: []string{"-x", pattern},
+ WorkingDir: args.Dir.SpanURI().Filename(),
+ }
+ stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(ctx, deps.work))
+ if err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal, inv, er, stderr); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func (c *commandHandler) GoGetPackage(ctx context.Context, args command.GoGetPackageArgs) error {
+ return c.run(ctx, commandConfig{
+ forURI: args.URI,
+ progress: "Running go get",
+ }, func(ctx context.Context, deps commandDeps) error {
+ // Run on a throwaway go.mod, otherwise it'll write to the real one.
+ stdout, err := deps.snapshot.RunGoCommandDirect(ctx, source.WriteTemporaryModFile|source.AllowNetwork, &gocommand.Invocation{
+ Verb: "list",
+ Args: []string{"-f", "{{.Module.Path}}@{{.Module.Version}}", args.Pkg},
+ WorkingDir: filepath.Dir(args.URI.SpanURI().Filename()),
+ })
+ if err != nil {
+ return err
+ }
+ ver := strings.TrimSpace(stdout.String())
+ return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
+ if args.AddRequire {
+ if err := addModuleRequire(invoke, []string{ver}); err != nil {
+ return err
+ }
+ }
+ _, err := invoke(append([]string{"get", "-d"}, args.Pkg)...)
+ return err
+ })
+ })
+}
+
+func (s *Server) runGoModUpdateCommands(ctx context.Context, snapshot source.Snapshot, uri span.URI, run func(invoke func(...string) (*bytes.Buffer, error)) error) error {
+ tmpModfile, newModBytes, newSumBytes, err := snapshot.RunGoCommands(ctx, true, filepath.Dir(uri.Filename()), run)
+ if err != nil {
+ return err
+ }
+ if !tmpModfile {
+ return nil
+ }
+ modURI := snapshot.GoModForFile(uri)
+ sumURI := span.URIFromPath(strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum")
+ modEdits, err := applyFileEdits(ctx, snapshot, modURI, newModBytes)
+ if err != nil {
+ return err
+ }
+ sumEdits, err := applyFileEdits(ctx, snapshot, sumURI, newSumBytes)
+ if err != nil {
+ return err
+ }
+ changes := append(sumEdits, modEdits...)
+ if len(changes) == 0 {
+ return nil
+ }
+ var documentChanges []protocol.DocumentChanges
+ for _, change := range changes {
+ change := change
+ documentChanges = append(documentChanges, protocol.DocumentChanges{
+ TextDocumentEdit: &change,
+ })
+ }
+ response, err := s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
+ Edit: protocol.WorkspaceEdit{
+ DocumentChanges: documentChanges,
+ },
+ })
+ if err != nil {
+ return err
+ }
+ if !response.Applied {
+ return fmt.Errorf("edits not applied because of %s", response.FailureReason)
+ }
+ return nil
+}
+
+func applyFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI, newContent []byte) ([]protocol.TextDocumentEdit, error) {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ oldContent, err := fh.Read()
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ if bytes.Equal(oldContent, newContent) {
+ return nil, nil
+ }
+
+ // Sending a workspace edit to a closed file causes VS Code to open the
+ // file and leave it unsaved. We would rather apply the changes directly,
+ // especially to go.sum, which should be mostly invisible to the user.
+ if !snapshot.IsOpen(uri) {
+ err := ioutil.WriteFile(uri.Filename(), newContent, 0666)
+ return nil, err
+ }
+
+ m := protocol.NewMapper(fh.URI(), oldContent)
+ diff := snapshot.View().Options().ComputeEdits(string(oldContent), string(newContent))
+ edits, err := source.ToProtocolEdits(m, diff)
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.TextDocumentEdit{{
+ TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
+ Version: fh.Version(),
+ TextDocumentIdentifier: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ },
+ Edits: edits,
+ }}, nil
+}
+
+func runGoGetModule(invoke func(...string) (*bytes.Buffer, error), addRequire bool, args []string) error {
+ if addRequire {
+ if err := addModuleRequire(invoke, args); err != nil {
+ return err
+ }
+ }
+ _, err := invoke(append([]string{"get", "-d"}, args...)...)
+ return err
+}
+
+func addModuleRequire(invoke func(...string) (*bytes.Buffer, error), args []string) error {
+ // Using go get to create a new dependency results in an
+ // `// indirect` comment we may not want. The only way to avoid it
+ // is to add the require as direct first. Then we can use go get to
+ // update go.sum and tidy up.
+ _, err := invoke(append([]string{"mod", "edit", "-require"}, args...)...)
+ return err
+}
+
+func (s *Server) getUpgrades(ctx context.Context, snapshot source.Snapshot, uri span.URI, modules []string) (map[string]string, error) {
+ stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal|source.AllowNetwork, &gocommand.Invocation{
+ Verb: "list",
+ Args: append([]string{"-m", "-u", "-json"}, modules...),
+ WorkingDir: filepath.Dir(uri.Filename()),
+ ModFlag: "readonly",
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ upgrades := map[string]string{}
+ for dec := json.NewDecoder(stdout); dec.More(); {
+ mod := &gocommand.ModuleJSON{}
+ if err := dec.Decode(mod); err != nil {
+ return nil, err
+ }
+ if mod.Update == nil {
+ continue
+ }
+ upgrades[mod.Path] = mod.Update.Version
+ }
+ return upgrades, nil
+}
+
+func (c *commandHandler) GCDetails(ctx context.Context, uri protocol.DocumentURI) error {
+ return c.ToggleGCDetails(ctx, command.URIArg{URI: uri})
+}
+
+func (c *commandHandler) ToggleGCDetails(ctx context.Context, args command.URIArg) error {
+ return c.run(ctx, commandConfig{
+ requireSave: true,
+ progress: "Toggling GC Details",
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ metas, err := deps.snapshot.MetadataForFile(ctx, deps.fh.URI())
+ if err != nil {
+ return err
+ }
+ id := metas[0].ID // 0 => narrowest package
+ c.s.gcOptimizationDetailsMu.Lock()
+ if _, ok := c.s.gcOptimizationDetails[id]; ok {
+ delete(c.s.gcOptimizationDetails, id)
+ c.s.clearDiagnosticSource(gcDetailsSource)
+ } else {
+ c.s.gcOptimizationDetails[id] = struct{}{}
+ }
+ c.s.gcOptimizationDetailsMu.Unlock()
+ c.s.diagnoseSnapshot(deps.snapshot, nil, false)
+ return nil
+ })
+}
+
+func (c *commandHandler) ListKnownPackages(ctx context.Context, args command.URIArg) (command.ListKnownPackagesResult, error) {
+ var result command.ListKnownPackagesResult
+ err := c.run(ctx, commandConfig{
+ progress: "Listing packages",
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ pkgs, err := source.KnownPackagePaths(ctx, deps.snapshot, deps.fh)
+ for _, pkg := range pkgs {
+ result.Packages = append(result.Packages, string(pkg))
+ }
+ return err
+ })
+ return result, err
+}
+
+func (c *commandHandler) ListImports(ctx context.Context, args command.URIArg) (command.ListImportsResult, error) {
+ var result command.ListImportsResult
+ err := c.run(ctx, commandConfig{
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ fh, err := deps.snapshot.GetFile(ctx, args.URI.SpanURI())
+ if err != nil {
+ return err
+ }
+ pgf, err := deps.snapshot.ParseGo(ctx, fh, source.ParseHeader)
+ if err != nil {
+ return err
+ }
+ fset := source.FileSetFor(pgf.Tok)
+ for _, group := range astutil.Imports(fset, pgf.File) {
+ for _, imp := range group {
+ if imp.Path == nil {
+ continue
+ }
+ var name string
+ if imp.Name != nil {
+ name = imp.Name.Name
+ }
+ result.Imports = append(result.Imports, command.FileImport{
+ Path: string(source.UnquoteImportPath(imp)),
+ Name: name,
+ })
+ }
+ }
+ metas, err := deps.snapshot.MetadataForFile(ctx, args.URI.SpanURI())
+ if err != nil {
+ return err // e.g. cancelled
+ }
+ if len(metas) == 0 {
+ return fmt.Errorf("no package containing %v", args.URI.SpanURI())
+ }
+ for pkgPath := range metas[0].DepsByPkgPath { // 0 => narrowest package
+ result.PackageImports = append(result.PackageImports,
+ command.PackageImport{Path: string(pkgPath)})
+ }
+ sort.Slice(result.PackageImports, func(i, j int) bool {
+ return result.PackageImports[i].Path < result.PackageImports[j].Path
+ })
+ return nil
+ })
+ return result, err
+}
+
+func (c *commandHandler) AddImport(ctx context.Context, args command.AddImportArgs) error {
+ return c.run(ctx, commandConfig{
+ progress: "Adding import",
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ edits, err := source.AddImport(ctx, deps.snapshot, deps.fh, args.ImportPath)
+ if err != nil {
+ return fmt.Errorf("could not add import: %v", err)
+ }
+ if _, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
+ Edit: protocol.WorkspaceEdit{
+ DocumentChanges: documentChanges(deps.fh, edits),
+ },
+ }); err != nil {
+ return fmt.Errorf("could not apply import edits: %v", err)
+ }
+ return nil
+ })
+}
+
+func (c *commandHandler) StartDebugging(ctx context.Context, args command.DebuggingArgs) (result command.DebuggingResult, _ error) {
+ addr := args.Addr
+ if addr == "" {
+ addr = "localhost:0"
+ }
+ di := debug.GetInstance(ctx)
+ if di == nil {
+ return result, errors.New("internal error: server has no debugging instance")
+ }
+ listenedAddr, err := di.Serve(ctx, addr)
+ if err != nil {
+ return result, fmt.Errorf("starting debug server: %w", err)
+ }
+ result.URLs = []string{"http://" + listenedAddr}
+ return result, nil
+}
+
+// Copy of pkgLoadConfig defined in internal/lsp/cmd/vulncheck.go
+// TODO(hyangah): decide where to define this.
+type pkgLoadConfig struct {
+ // BuildFlags is a list of command-line flags to be passed through to
+ // the build system's query tool.
+ BuildFlags []string
+
+ // If Tests is set, the loader includes related test packages.
+ Tests bool
+}
+
+func (c *commandHandler) FetchVulncheckResult(ctx context.Context, arg command.URIArg) (map[protocol.DocumentURI]*govulncheck.Result, error) {
+ ret := map[protocol.DocumentURI]*govulncheck.Result{}
+ err := c.run(ctx, commandConfig{forURI: arg.URI}, func(ctx context.Context, deps commandDeps) error {
+ if deps.snapshot.View().Options().Vulncheck == source.ModeVulncheckImports {
+ for _, modfile := range deps.snapshot.ModFiles() {
+ res, err := deps.snapshot.ModVuln(ctx, modfile)
+ if err != nil {
+ return err
+ }
+ ret[protocol.URIFromSpanURI(modfile)] = res
+ }
+ }
+ // Overwrite if there is any govulncheck-based result.
+ for modfile, result := range deps.snapshot.View().Vulnerabilities() {
+ ret[protocol.URIFromSpanURI(modfile)] = result
+ }
+ return nil
+ })
+ return ret, err
+}
+
+func (c *commandHandler) RunGovulncheck(ctx context.Context, args command.VulncheckArgs) (command.RunVulncheckResult, error) {
+ if args.URI == "" {
+ return command.RunVulncheckResult{}, errors.New("VulncheckArgs is missing URI field")
+ }
+
+ // Return the workdone token so that clients can identify when this
+ // vulncheck invocation is complete.
+ //
+ // Since the run function executes asynchronously, we use a channel to
+ // synchronize the start of the run and return the token.
+ tokenChan := make(chan protocol.ProgressToken, 1)
+ err := c.run(ctx, commandConfig{
+ async: true, // need to be async to be cancellable
+ progress: "govulncheck",
+ requireSave: true,
+ forURI: args.URI,
+ }, func(ctx context.Context, deps commandDeps) error {
+ tokenChan <- deps.work.Token()
+
+ view := deps.snapshot.View()
+ opts := view.Options()
+ // quickly test if gopls is compiled to support govulncheck
+ // by checking vulncheck.Main. Alternatively, we can continue and
+ // let the `gopls vulncheck` command fail. This is lighter-weight.
+ if vulncheck.Main == nil {
+ return errors.New("vulncheck feature is not available")
+ }
+
+ cmd := exec.CommandContext(ctx, os.Args[0], "vulncheck", "-config", args.Pattern)
+ cmd.Dir = filepath.Dir(args.URI.SpanURI().Filename())
+
+ var viewEnv []string
+ if e := opts.EnvSlice(); e != nil {
+ viewEnv = append(os.Environ(), e...)
+ }
+ cmd.Env = viewEnv
+
+ // stdin: gopls vulncheck expects JSON-encoded configuration from STDIN when -config flag is set.
+ var stdin bytes.Buffer
+ cmd.Stdin = &stdin
+
+ if err := json.NewEncoder(&stdin).Encode(pkgLoadConfig{
+ BuildFlags: opts.BuildFlags,
+ // TODO(hyangah): add `tests` flag in command.VulncheckArgs
+ }); err != nil {
+ return fmt.Errorf("failed to pass package load config: %v", err)
+ }
+
+ // stderr: stream gopls vulncheck's STDERR as progress reports
+ er := progress.NewEventWriter(ctx, "vulncheck")
+ stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(ctx, deps.work))
+ cmd.Stderr = stderr
+ // TODO: can we stream stdout?
+ stdout, err := cmd.Output()
+ if err != nil {
+ return fmt.Errorf("failed to run govulncheck: %v", err)
+ }
+
+ var result govulncheck.Result
+ if err := json.Unmarshal(stdout, &result); err != nil {
+ // TODO: for easy debugging, log the failed stdout somewhere?
+ return fmt.Errorf("failed to parse govulncheck output: %v", err)
+ }
+ result.Mode = govulncheck.ModeGovulncheck
+ result.AsOf = time.Now()
+ deps.snapshot.View().SetVulnerabilities(args.URI.SpanURI(), &result)
+
+ c.s.diagnoseSnapshot(deps.snapshot, nil, false)
+ vulns := result.Vulns
+ affecting := make([]string, 0, len(vulns))
+ for _, v := range vulns {
+ if v.IsCalled() {
+ affecting = append(affecting, v.OSV.ID)
+ }
+ }
+ if len(affecting) == 0 {
+ return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Info,
+ Message: "No vulnerabilities found",
+ })
+ }
+ sort.Strings(affecting)
+ return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Warning,
+ Message: fmt.Sprintf("Found %v", strings.Join(affecting, ", ")),
+ })
+ })
+ if err != nil {
+ return command.RunVulncheckResult{}, err
+ }
+ select {
+ case <-ctx.Done():
+ return command.RunVulncheckResult{}, ctx.Err()
+ case token := <-tokenChan:
+ return command.RunVulncheckResult{Token: token}, nil
+ }
+}
+
+// MemStats implements the MemStats command. It returns an error as a
+// future-proof API, but the resulting error is currently always nil.
+func (c *commandHandler) MemStats(ctx context.Context) (command.MemStatsResult, error) {
+ // GC a few times for stable results.
+ runtime.GC()
+ runtime.GC()
+ runtime.GC()
+ var m runtime.MemStats
+ runtime.ReadMemStats(&m)
+ return command.MemStatsResult{
+ HeapAlloc: m.HeapAlloc,
+ HeapInUse: m.HeapInuse,
+ }, nil
+}
diff --git a/gopls/internal/lsp/command/command_gen.go b/gopls/internal/lsp/command/command_gen.go
new file mode 100644
index 000000000..a768a5ae2
--- /dev/null
+++ b/gopls/internal/lsp/command/command_gen.go
@@ -0,0 +1,509 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Don't include this file during code generation, or it will break the build
+// if existing interface methods have been modified.
+//go:build !generate
+// +build !generate
+
+package command
+
+// Code generated by generate.go. DO NOT EDIT.
+
+import (
+ "context"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+const (
+ AddDependency Command = "add_dependency"
+ AddImport Command = "add_import"
+ ApplyFix Command = "apply_fix"
+ CheckUpgrades Command = "check_upgrades"
+ EditGoDirective Command = "edit_go_directive"
+ FetchVulncheckResult Command = "fetch_vulncheck_result"
+ GCDetails Command = "gc_details"
+ Generate Command = "generate"
+ GoGetPackage Command = "go_get_package"
+ ListImports Command = "list_imports"
+ ListKnownPackages Command = "list_known_packages"
+ MemStats Command = "mem_stats"
+ RegenerateCgo Command = "regenerate_cgo"
+ RemoveDependency Command = "remove_dependency"
+ ResetGoModDiagnostics Command = "reset_go_mod_diagnostics"
+ RunGovulncheck Command = "run_govulncheck"
+ RunTests Command = "run_tests"
+ StartDebugging Command = "start_debugging"
+ Test Command = "test"
+ Tidy Command = "tidy"
+ ToggleGCDetails Command = "toggle_gc_details"
+ UpdateGoSum Command = "update_go_sum"
+ UpgradeDependency Command = "upgrade_dependency"
+ Vendor Command = "vendor"
+)
+
+var Commands = []Command{
+ AddDependency,
+ AddImport,
+ ApplyFix,
+ CheckUpgrades,
+ EditGoDirective,
+ FetchVulncheckResult,
+ GCDetails,
+ Generate,
+ GoGetPackage,
+ ListImports,
+ ListKnownPackages,
+ MemStats,
+ RegenerateCgo,
+ RemoveDependency,
+ ResetGoModDiagnostics,
+ RunGovulncheck,
+ RunTests,
+ StartDebugging,
+ Test,
+ Tidy,
+ ToggleGCDetails,
+ UpdateGoSum,
+ UpgradeDependency,
+ Vendor,
+}
+
+func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Interface) (interface{}, error) {
+ switch params.Command {
+ case "gopls.add_dependency":
+ var a0 DependencyArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.AddDependency(ctx, a0)
+ case "gopls.add_import":
+ var a0 AddImportArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.AddImport(ctx, a0)
+ case "gopls.apply_fix":
+ var a0 ApplyFixArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.ApplyFix(ctx, a0)
+ case "gopls.check_upgrades":
+ var a0 CheckUpgradesArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.CheckUpgrades(ctx, a0)
+ case "gopls.edit_go_directive":
+ var a0 EditGoDirectiveArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.EditGoDirective(ctx, a0)
+ case "gopls.fetch_vulncheck_result":
+ var a0 URIArg
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return s.FetchVulncheckResult(ctx, a0)
+ case "gopls.gc_details":
+ var a0 protocol.DocumentURI
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.GCDetails(ctx, a0)
+ case "gopls.generate":
+ var a0 GenerateArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.Generate(ctx, a0)
+ case "gopls.go_get_package":
+ var a0 GoGetPackageArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.GoGetPackage(ctx, a0)
+ case "gopls.list_imports":
+ var a0 URIArg
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return s.ListImports(ctx, a0)
+ case "gopls.list_known_packages":
+ var a0 URIArg
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return s.ListKnownPackages(ctx, a0)
+ case "gopls.mem_stats":
+ return s.MemStats(ctx)
+ case "gopls.regenerate_cgo":
+ var a0 URIArg
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.RegenerateCgo(ctx, a0)
+ case "gopls.remove_dependency":
+ var a0 RemoveDependencyArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.RemoveDependency(ctx, a0)
+ case "gopls.reset_go_mod_diagnostics":
+ var a0 ResetGoModDiagnosticsArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.ResetGoModDiagnostics(ctx, a0)
+ case "gopls.run_govulncheck":
+ var a0 VulncheckArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return s.RunGovulncheck(ctx, a0)
+ case "gopls.run_tests":
+ var a0 RunTestsArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.RunTests(ctx, a0)
+ case "gopls.start_debugging":
+ var a0 DebuggingArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return s.StartDebugging(ctx, a0)
+ case "gopls.test":
+ var a0 protocol.DocumentURI
+ var a1 []string
+ var a2 []string
+ if err := UnmarshalArgs(params.Arguments, &a0, &a1, &a2); err != nil {
+ return nil, err
+ }
+ return nil, s.Test(ctx, a0, a1, a2)
+ case "gopls.tidy":
+ var a0 URIArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.Tidy(ctx, a0)
+ case "gopls.toggle_gc_details":
+ var a0 URIArg
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.ToggleGCDetails(ctx, a0)
+ case "gopls.update_go_sum":
+ var a0 URIArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.UpdateGoSum(ctx, a0)
+ case "gopls.upgrade_dependency":
+ var a0 DependencyArgs
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.UpgradeDependency(ctx, a0)
+ case "gopls.vendor":
+ var a0 URIArg
+ if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
+ return nil, err
+ }
+ return nil, s.Vendor(ctx, a0)
+ }
+ return nil, fmt.Errorf("unsupported command %q", params.Command)
+}
+
+func NewAddDependencyCommand(title string, a0 DependencyArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.add_dependency",
+ Arguments: args,
+ }, nil
+}
+
+func NewAddImportCommand(title string, a0 AddImportArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.add_import",
+ Arguments: args,
+ }, nil
+}
+
+func NewApplyFixCommand(title string, a0 ApplyFixArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.apply_fix",
+ Arguments: args,
+ }, nil
+}
+
+func NewCheckUpgradesCommand(title string, a0 CheckUpgradesArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.check_upgrades",
+ Arguments: args,
+ }, nil
+}
+
+func NewEditGoDirectiveCommand(title string, a0 EditGoDirectiveArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.edit_go_directive",
+ Arguments: args,
+ }, nil
+}
+
+func NewFetchVulncheckResultCommand(title string, a0 URIArg) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.fetch_vulncheck_result",
+ Arguments: args,
+ }, nil
+}
+
+func NewGCDetailsCommand(title string, a0 protocol.DocumentURI) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.gc_details",
+ Arguments: args,
+ }, nil
+}
+
+func NewGenerateCommand(title string, a0 GenerateArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.generate",
+ Arguments: args,
+ }, nil
+}
+
+func NewGoGetPackageCommand(title string, a0 GoGetPackageArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.go_get_package",
+ Arguments: args,
+ }, nil
+}
+
+func NewListImportsCommand(title string, a0 URIArg) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.list_imports",
+ Arguments: args,
+ }, nil
+}
+
+func NewListKnownPackagesCommand(title string, a0 URIArg) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.list_known_packages",
+ Arguments: args,
+ }, nil
+}
+
+func NewMemStatsCommand(title string) (protocol.Command, error) {
+ args, err := MarshalArgs()
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.mem_stats",
+ Arguments: args,
+ }, nil
+}
+
+func NewRegenerateCgoCommand(title string, a0 URIArg) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.regenerate_cgo",
+ Arguments: args,
+ }, nil
+}
+
+func NewRemoveDependencyCommand(title string, a0 RemoveDependencyArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.remove_dependency",
+ Arguments: args,
+ }, nil
+}
+
+func NewResetGoModDiagnosticsCommand(title string, a0 ResetGoModDiagnosticsArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.reset_go_mod_diagnostics",
+ Arguments: args,
+ }, nil
+}
+
+func NewRunGovulncheckCommand(title string, a0 VulncheckArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.run_govulncheck",
+ Arguments: args,
+ }, nil
+}
+
+func NewRunTestsCommand(title string, a0 RunTestsArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.run_tests",
+ Arguments: args,
+ }, nil
+}
+
+func NewStartDebuggingCommand(title string, a0 DebuggingArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.start_debugging",
+ Arguments: args,
+ }, nil
+}
+
+func NewTestCommand(title string, a0 protocol.DocumentURI, a1 []string, a2 []string) (protocol.Command, error) {
+ args, err := MarshalArgs(a0, a1, a2)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.test",
+ Arguments: args,
+ }, nil
+}
+
+func NewTidyCommand(title string, a0 URIArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.tidy",
+ Arguments: args,
+ }, nil
+}
+
+func NewToggleGCDetailsCommand(title string, a0 URIArg) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.toggle_gc_details",
+ Arguments: args,
+ }, nil
+}
+
+func NewUpdateGoSumCommand(title string, a0 URIArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.update_go_sum",
+ Arguments: args,
+ }, nil
+}
+
+func NewUpgradeDependencyCommand(title string, a0 DependencyArgs) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.upgrade_dependency",
+ Arguments: args,
+ }, nil
+}
+
+func NewVendorCommand(title string, a0 URIArg) (protocol.Command, error) {
+ args, err := MarshalArgs(a0)
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "gopls.vendor",
+ Arguments: args,
+ }, nil
+}
diff --git a/gopls/internal/lsp/command/commandmeta/meta.go b/gopls/internal/lsp/command/commandmeta/meta.go
new file mode 100644
index 000000000..bf85c4faa
--- /dev/null
+++ b/gopls/internal/lsp/command/commandmeta/meta.go
@@ -0,0 +1,259 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package commandmeta provides metadata about LSP commands, by analyzing the
+// command.Interface type.
+package commandmeta
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "reflect"
+ "strings"
+ "unicode"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+)
+
+type Command struct {
+ MethodName string
+ Name string
+ // TODO(rFindley): I think Title can actually be eliminated. In all cases
+ // where we use it, there is probably a more appropriate contextual title.
+ Title string
+ Doc string
+ Args []*Field
+ Result *Field
+}
+
+func (c *Command) ID() string {
+ return command.ID(c.Name)
+}
+
+type Field struct {
+ Name string
+ Doc string
+ JSONTag string
+ Type types.Type
+ FieldMod string
+ // In some circumstances, we may want to recursively load additional field
+ // descriptors for fields of struct types, documenting their internals.
+ Fields []*Field
+}
+
+func Load() (*packages.Package, []*Command, error) {
+ pkgs, err := packages.Load(
+ &packages.Config{
+ Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedImports | packages.NeedDeps,
+ BuildFlags: []string{"-tags=generate"},
+ },
+ "golang.org/x/tools/gopls/internal/lsp/command",
+ )
+ if err != nil {
+ return nil, nil, fmt.Errorf("packages.Load: %v", err)
+ }
+ pkg := pkgs[0]
+ if len(pkg.Errors) > 0 {
+ return pkg, nil, pkg.Errors[0]
+ }
+
+ // For a bit of type safety, use reflection to get the interface name within
+ // the package scope.
+ it := reflect.TypeOf((*command.Interface)(nil)).Elem()
+ obj := pkg.Types.Scope().Lookup(it.Name()).Type().Underlying().(*types.Interface)
+
+ // Load command metadata corresponding to each interface method.
+ var commands []*Command
+ loader := fieldLoader{make(map[types.Object]*Field)}
+ for i := 0; i < obj.NumMethods(); i++ {
+ m := obj.Method(i)
+ c, err := loader.loadMethod(pkg, m)
+ if err != nil {
+ return nil, nil, fmt.Errorf("loading %s: %v", m.Name(), err)
+ }
+ commands = append(commands, c)
+ }
+ return pkg, commands, nil
+}
+
+// fieldLoader loads field information, memoizing results to prevent infinite
+// recursion.
+type fieldLoader struct {
+ loaded map[types.Object]*Field
+}
+
+var universeError = types.Universe.Lookup("error").Type()
+
+func (l *fieldLoader) loadMethod(pkg *packages.Package, m *types.Func) (*Command, error) {
+ node, err := findField(pkg, m.Pos())
+ if err != nil {
+ return nil, err
+ }
+ title, doc := splitDoc(node.Doc.Text())
+ c := &Command{
+ MethodName: m.Name(),
+ Name: lspName(m.Name()),
+ Doc: doc,
+ Title: title,
+ }
+ sig := m.Type().Underlying().(*types.Signature)
+ rlen := sig.Results().Len()
+ if rlen > 2 || rlen == 0 {
+ return nil, fmt.Errorf("must have 1 or 2 returns, got %d", rlen)
+ }
+ finalResult := sig.Results().At(rlen - 1)
+ if !types.Identical(finalResult.Type(), universeError) {
+ return nil, fmt.Errorf("final return must be error")
+ }
+ if rlen == 2 {
+ obj := sig.Results().At(0)
+ c.Result, err = l.loadField(pkg, obj, "", "")
+ if err != nil {
+ return nil, err
+ }
+ }
+ for i := 0; i < sig.Params().Len(); i++ {
+ obj := sig.Params().At(i)
+ fld, err := l.loadField(pkg, obj, "", "")
+ if err != nil {
+ return nil, err
+ }
+ if i == 0 {
+ // Lazy check that the first argument is a context. We could relax this,
+ // but then the generated code gets more complicated.
+ if named, ok := fld.Type.(*types.Named); !ok || named.Obj().Name() != "Context" || named.Obj().Pkg().Path() != "context" {
+ return nil, fmt.Errorf("first method parameter must be context.Context")
+ }
+ // Skip the context argument, as it is implied.
+ continue
+ }
+ c.Args = append(c.Args, fld)
+ }
+ return c, nil
+}
+
+func (l *fieldLoader) loadField(pkg *packages.Package, obj *types.Var, doc, tag string) (*Field, error) {
+ if existing, ok := l.loaded[obj]; ok {
+ return existing, nil
+ }
+ fld := &Field{
+ Name: obj.Name(),
+ Doc: strings.TrimSpace(doc),
+ Type: obj.Type(),
+ JSONTag: reflect.StructTag(tag).Get("json"),
+ }
+ under := fld.Type.Underlying()
+ // Quick-and-dirty handling for various underlying types.
+ switch p := under.(type) {
+ case *types.Pointer:
+ under = p.Elem().Underlying()
+ case *types.Array:
+ under = p.Elem().Underlying()
+ fld.FieldMod = fmt.Sprintf("[%d]", p.Len())
+ case *types.Slice:
+ under = p.Elem().Underlying()
+ fld.FieldMod = "[]"
+ }
+
+ if s, ok := under.(*types.Struct); ok {
+ for i := 0; i < s.NumFields(); i++ {
+ obj2 := s.Field(i)
+ pkg2 := pkg
+ if obj2.Pkg() != pkg2.Types {
+ pkg2, ok = pkg.Imports[obj2.Pkg().Path()]
+ if !ok {
+ return nil, fmt.Errorf("missing import for %q: %q", pkg.ID, obj2.Pkg().Path())
+ }
+ }
+ node, err := findField(pkg2, obj2.Pos())
+ if err != nil {
+ return nil, err
+ }
+ tag := s.Tag(i)
+ structField, err := l.loadField(pkg2, obj2, node.Doc.Text(), tag)
+ if err != nil {
+ return nil, err
+ }
+ fld.Fields = append(fld.Fields, structField)
+ }
+ }
+ return fld, nil
+}
+
+// splitDoc parses a command doc string to separate the title from normal
+// documentation.
+//
+// The doc comment should be of the form: "MethodName: Title\nDocumentation"
+func splitDoc(text string) (title, doc string) {
+ docParts := strings.SplitN(text, "\n", 2)
+ titleParts := strings.SplitN(docParts[0], ":", 2)
+ if len(titleParts) > 1 {
+ title = strings.TrimSpace(titleParts[1])
+ }
+ if len(docParts) > 1 {
+ doc = strings.TrimSpace(docParts[1])
+ }
+ return title, doc
+}
+
+// lspName returns the normalized command name to use in the LSP.
+func lspName(methodName string) string {
+ words := splitCamel(methodName)
+ for i := range words {
+ words[i] = strings.ToLower(words[i])
+ }
+ return strings.Join(words, "_")
+}
+
+// splitCamel splits s into words, according to camel-case word boundaries.
+// Initialisms are grouped as a single word.
+//
+// For example:
+//
+// "RunTests" -> []string{"Run", "Tests"}
+// "GCDetails" -> []string{"GC", "Details"}
+func splitCamel(s string) []string {
+ var words []string
+ for len(s) > 0 {
+ last := strings.LastIndexFunc(s, unicode.IsUpper)
+ if last < 0 {
+ last = 0
+ }
+ if last == len(s)-1 {
+ // Group initialisms as a single word.
+ last = 1 + strings.LastIndexFunc(s[:last], func(r rune) bool { return !unicode.IsUpper(r) })
+ }
+ words = append(words, s[last:])
+ s = s[:last]
+ }
+ for i := 0; i < len(words)/2; i++ {
+ j := len(words) - i - 1
+ words[i], words[j] = words[j], words[i]
+ }
+ return words
+}
+
+// findField finds the struct field or interface method positioned at pos,
+// within the AST.
+func findField(pkg *packages.Package, pos token.Pos) (*ast.Field, error) {
+ fset := pkg.Fset
+ var file *ast.File
+ for _, f := range pkg.Syntax {
+ if fset.File(f.Pos()).Name() == fset.File(pos).Name() {
+ file = f
+ break
+ }
+ }
+ if file == nil {
+ return nil, fmt.Errorf("no file for pos %v", pos)
+ }
+ path, _ := astutil.PathEnclosingInterval(file, pos, pos)
+ // This is fragile, but in the cases we care about, the field will be in
+ // path[1].
+ return path[1].(*ast.Field), nil
+}
diff --git a/gopls/internal/lsp/command/gen/gen.go b/gopls/internal/lsp/command/gen/gen.go
new file mode 100644
index 000000000..29428699e
--- /dev/null
+++ b/gopls/internal/lsp/command/gen/gen.go
@@ -0,0 +1,155 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gen is used to generate command bindings from the gopls command
+// interface.
+package gen
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "text/template"
+
+ "golang.org/x/tools/internal/imports"
+ "golang.org/x/tools/gopls/internal/lsp/command/commandmeta"
+)
+
+const src = `// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Don't include this file during code generation, or it will break the build
+// if existing interface methods have been modified.
+//go:build !generate
+// +build !generate
+
+package command
+
+// Code generated by generate.go. DO NOT EDIT.
+
+import (
+ {{range $k, $v := .Imports -}}
+ "{{$k}}"
+ {{end}}
+)
+
+const (
+{{- range .Commands}}
+ {{.MethodName}} Command = "{{.Name}}"
+{{- end}}
+)
+
+var Commands = []Command {
+{{- range .Commands}}
+ {{.MethodName}},
+{{- end}}
+}
+
+func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Interface) (interface{}, error) {
+ switch params.Command {
+ {{- range .Commands}}
+ case "{{.ID}}":
+ {{- if .Args -}}
+ {{- range $i, $v := .Args}}
+ var a{{$i}} {{typeString $v.Type}}
+ {{- end}}
+ if err := UnmarshalArgs(params.Arguments{{range $i, $v := .Args}}, &a{{$i}}{{end}}); err != nil {
+ return nil, err
+ }
+ {{end -}}
+ return {{if not .Result}}nil, {{end}}s.{{.MethodName}}(ctx{{range $i, $v := .Args}}, a{{$i}}{{end}})
+ {{- end}}
+ }
+ return nil, fmt.Errorf("unsupported command %q", params.Command)
+}
+{{- range .Commands}}
+
+func New{{.MethodName}}Command(title string, {{range $i, $v := .Args}}{{if $i}}, {{end}}a{{$i}} {{typeString $v.Type}}{{end}}) (protocol.Command, error) {
+ args, err := MarshalArgs({{range $i, $v := .Args}}{{if $i}}, {{end}}a{{$i}}{{end}})
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return protocol.Command{
+ Title: title,
+ Command: "{{.ID}}",
+ Arguments: args,
+ }, nil
+}
+{{end}}
+`
+
+type data struct {
+ Imports map[string]bool
+ Commands []*commandmeta.Command
+}
+
+func Generate() ([]byte, error) {
+ pkg, cmds, err := commandmeta.Load()
+ if err != nil {
+ return nil, fmt.Errorf("loading command data: %v", err)
+ }
+ qf := func(p *types.Package) string {
+ if p == pkg.Types {
+ return ""
+ }
+ return p.Name()
+ }
+ tmpl, err := template.New("").Funcs(template.FuncMap{
+ "typeString": func(t types.Type) string {
+ return types.TypeString(t, qf)
+ },
+ }).Parse(src)
+ if err != nil {
+ return nil, err
+ }
+ d := data{
+ Commands: cmds,
+ Imports: map[string]bool{
+ "context": true,
+ "fmt": true,
+ "golang.org/x/tools/gopls/internal/lsp/protocol": true,
+ },
+ }
+ const thispkg = "golang.org/x/tools/gopls/internal/lsp/command"
+ for _, c := range d.Commands {
+ for _, arg := range c.Args {
+ pth := pkgPath(arg.Type)
+ if pth != "" && pth != thispkg {
+ d.Imports[pth] = true
+ }
+ }
+ if c.Result != nil {
+ pth := pkgPath(c.Result.Type)
+ if pth != "" && pth != thispkg {
+ d.Imports[pth] = true
+ }
+ }
+ }
+
+ var buf bytes.Buffer
+ if err := tmpl.Execute(&buf, d); err != nil {
+ return nil, fmt.Errorf("executing: %v", err)
+ }
+
+ opts := &imports.Options{
+ AllErrors: true,
+ FormatOnly: true,
+ Comments: true,
+ }
+ content, err := imports.Process("", buf.Bytes(), opts)
+ if err != nil {
+ return nil, fmt.Errorf("goimports: %v", err)
+ }
+ return content, nil
+}
+
+func pkgPath(t types.Type) string {
+ if n, ok := t.(*types.Named); ok {
+ if pkg := n.Obj().Pkg(); pkg != nil {
+ return pkg.Path()
+ }
+ }
+ return ""
+}
diff --git a/gopls/internal/lsp/command/generate.go b/gopls/internal/lsp/command/generate.go
new file mode 100644
index 000000000..b7907e60f
--- /dev/null
+++ b/gopls/internal/lsp/command/generate.go
@@ -0,0 +1,25 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+ "log"
+ "os"
+
+ "golang.org/x/tools/gopls/internal/lsp/command/gen"
+)
+
+func main() {
+ content, err := gen.Generate()
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := os.WriteFile("command_gen.go", content, 0644); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/gopls/internal/lsp/command/interface.go b/gopls/internal/lsp/command/interface.go
new file mode 100644
index 000000000..ea279e83c
--- /dev/null
+++ b/gopls/internal/lsp/command/interface.go
@@ -0,0 +1,410 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run -tags=generate generate.go
+
+// Package command defines the interface provided by gopls for the
+// workspace/executeCommand LSP request.
+//
+// This interface is fully specified by the Interface type, provided it
+// conforms to the restrictions outlined in its doc string.
+//
+// Bindings for server-side command dispatch and client-side serialization are
+// also provided by this package, via code generation.
+package command
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+// Interface defines the interface gopls exposes for the
+// workspace/executeCommand request.
+//
+// This interface is used to generate marshaling/unmarshaling code, dispatch,
+// and documentation, and so has some additional restrictions:
+// 1. All method arguments must be JSON serializable.
+// 2. Methods must return either error or (T, error), where T is a
+// JSON serializable type.
+// 3. The first line of the doc string is special. Everything after the colon
+// is considered the command 'Title'.
+// TODO(rFindley): reconsider this -- Title may be unnecessary.
+type Interface interface {
+ // ApplyFix: Apply a fix
+ //
+ // Applies a fix to a region of source code.
+ ApplyFix(context.Context, ApplyFixArgs) error
+ // Test: Run test(s) (legacy)
+ //
+ // Runs `go test` for a specific set of test or benchmark functions.
+ Test(context.Context, protocol.DocumentURI, []string, []string) error
+
+ // TODO: deprecate Test in favor of RunTests below.
+
+ // Test: Run test(s)
+ //
+ // Runs `go test` for a specific set of test or benchmark functions.
+ RunTests(context.Context, RunTestsArgs) error
+
+ // Generate: Run go generate
+ //
+ // Runs `go generate` for a given directory.
+ Generate(context.Context, GenerateArgs) error
+
+ // RegenerateCgo: Regenerate cgo
+ //
+ // Regenerates cgo definitions.
+ RegenerateCgo(context.Context, URIArg) error
+
+ // Tidy: Run go mod tidy
+ //
+ // Runs `go mod tidy` for a module.
+ Tidy(context.Context, URIArgs) error
+
+ // Vendor: Run go mod vendor
+ //
+ // Runs `go mod vendor` for a module.
+ Vendor(context.Context, URIArg) error
+
+ // EditGoDirective: Run go mod edit -go=version
+ //
+ // Runs `go mod edit -go=version` for a module.
+ EditGoDirective(context.Context, EditGoDirectiveArgs) error
+
+ // UpdateGoSum: Update go.sum
+ //
+ // Updates the go.sum file for a module.
+ UpdateGoSum(context.Context, URIArgs) error
+
+ // CheckUpgrades: Check for upgrades
+ //
+ // Checks for module upgrades.
+ CheckUpgrades(context.Context, CheckUpgradesArgs) error
+
+ // AddDependency: Add a dependency
+ //
+ // Adds a dependency to the go.mod file for a module.
+ AddDependency(context.Context, DependencyArgs) error
+
+ // UpgradeDependency: Upgrade a dependency
+ //
+ // Upgrades a dependency in the go.mod file for a module.
+ UpgradeDependency(context.Context, DependencyArgs) error
+
+ // RemoveDependency: Remove a dependency
+ //
+ // Removes a dependency from the go.mod file of a module.
+ RemoveDependency(context.Context, RemoveDependencyArgs) error
+
+ // ResetGoModDiagnostics: Reset go.mod diagnostics
+ //
+ // Reset diagnostics in the go.mod file of a module.
+ ResetGoModDiagnostics(context.Context, ResetGoModDiagnosticsArgs) error
+
+ // GoGetPackage: go get a package
+ //
+ // Runs `go get` to fetch a package.
+ GoGetPackage(context.Context, GoGetPackageArgs) error
+
+ // GCDetails: Toggle gc_details
+ //
+ // Toggle the calculation of gc annotations.
+ GCDetails(context.Context, protocol.DocumentURI) error
+
+ // TODO: deprecate GCDetails in favor of ToggleGCDetails below.
+
+ // ToggleGCDetails: Toggle gc_details
+ //
+ // Toggle the calculation of gc annotations.
+ ToggleGCDetails(context.Context, URIArg) error
+
+ // ListKnownPackages: List known packages
+ //
+ // Retrieve a list of packages that are importable from the given URI.
+ ListKnownPackages(context.Context, URIArg) (ListKnownPackagesResult, error)
+
+ // ListImports: List imports of a file and its package
+ //
+ // Retrieve a list of imports in the given Go file, and the package it
+ // belongs to.
+ ListImports(context.Context, URIArg) (ListImportsResult, error)
+
+ // AddImport: Add an import
+ //
+ // Ask the server to add an import path to a given Go file. The method will
+ // call applyEdit on the client so that clients don't have to apply the edit
+ // themselves.
+ AddImport(context.Context, AddImportArgs) error
+
+ // StartDebugging: Start the gopls debug server
+ //
+ // Start the gopls debug server if it isn't running, and return the debug
+ // address.
+ StartDebugging(context.Context, DebuggingArgs) (DebuggingResult, error)
+
+ // RunGovulncheck: Run govulncheck.
+ //
+ // Run vulnerability check (`govulncheck`).
+ RunGovulncheck(context.Context, VulncheckArgs) (RunVulncheckResult, error)
+
+ // FetchVulncheckResult: Get known vulncheck result
+ //
+ // Fetch the result of latest vulnerability check (`govulncheck`).
+ FetchVulncheckResult(context.Context, URIArg) (map[protocol.DocumentURI]*govulncheck.Result, error)
+
+ // MemStats: fetch memory statistics
+ //
+ // Call runtime.GC multiple times and return memory statistics as reported by
+ // runtime.MemStats.
+ //
+ // This command is used for benchmarking, and may change in the future.
+ MemStats(context.Context) (MemStatsResult, error)
+}
+
+type RunTestsArgs struct {
+ // The test file containing the tests to run.
+ URI protocol.DocumentURI
+
+ // Specific test names to run, e.g. TestFoo.
+ Tests []string
+
+ // Specific benchmarks to run, e.g. BenchmarkFoo.
+ Benchmarks []string
+}
+
+type GenerateArgs struct {
+ // URI for the directory to generate.
+ Dir protocol.DocumentURI
+
+ // Whether to generate recursively (go generate ./...)
+ Recursive bool
+}
+
+// TODO(rFindley): document the rest of these once the docgen is fleshed out.
+
+type ApplyFixArgs struct {
+ // The fix to apply.
+ Fix string
+ // The file URI for the document to fix.
+ URI protocol.DocumentURI
+ // The document range to scan for fixes.
+ Range protocol.Range
+}
+
+type URIArg struct {
+ // The file URI.
+ URI protocol.DocumentURI
+}
+
+type URIArgs struct {
+ // The file URIs.
+ URIs []protocol.DocumentURI
+}
+
+type CheckUpgradesArgs struct {
+ // The go.mod file URI.
+ URI protocol.DocumentURI
+ // The modules to check.
+ Modules []string
+}
+
+type DependencyArgs struct {
+ // The go.mod file URI.
+ URI protocol.DocumentURI
+ // Additional args to pass to the go command.
+ GoCmdArgs []string
+ // Whether to add a require directive.
+ AddRequire bool
+}
+
+type RemoveDependencyArgs struct {
+ // The go.mod file URI.
+ URI protocol.DocumentURI
+ // The module path to remove.
+ ModulePath string
+ OnlyDiagnostic bool
+}
+
+type EditGoDirectiveArgs struct {
+ // Any document URI within the relevant module.
+ URI protocol.DocumentURI
+ // The version to pass to `go mod edit -go`.
+ Version string
+}
+
+type GoGetPackageArgs struct {
+ // Any document URI within the relevant module.
+ URI protocol.DocumentURI
+ // The package to go get.
+ Pkg string
+ AddRequire bool
+}
+
+type AddImportArgs struct {
+ // ImportPath is the target import path that should
+ // be added to the URI file
+ ImportPath string
+ // URI is the file that the ImportPath should be
+ // added to
+ URI protocol.DocumentURI
+}
+
+type ListKnownPackagesResult struct {
+ // Packages is a list of packages relative
+ // to the URIArg passed by the command request.
+ // In other words, it omits paths that are already
+ // imported or cannot be imported due to compiler
+ // restrictions.
+ Packages []string
+}
+
+type ListImportsResult struct {
+ // Imports is a list of imports in the requested file.
+ Imports []FileImport
+
+ // PackageImports is a list of all imports in the requested file's package.
+ PackageImports []PackageImport
+}
+
+type FileImport struct {
+ // Path is the import path of the import.
+ Path string
+ // Name is the name of the import, e.g. `foo` in `import foo "strings"`.
+ Name string
+}
+
+type PackageImport struct {
+ // Path is the import path of the import.
+ Path string
+}
+
+type DebuggingArgs struct {
+ // Optional: the address (including port) for the debug server to listen on.
+ // If not provided, the debug server will bind to "localhost:0", and the
+ // full debug URL will be contained in the result.
+ //
+ // If there is more than one gopls instance along the serving path (i.e. you
+ // are using a daemon), each gopls instance will attempt to start debugging.
+ // If Addr specifies a port, only the daemon will be able to bind to that
+ // port, and each intermediate gopls instance will fail to start debugging.
+ // For this reason it is recommended not to specify a port (or equivalently,
+ // to specify ":0").
+ //
+ // If the server was already debugging this field has no effect, and the
+ // result will contain the previously configured debug URL(s).
+ Addr string
+}
+
+type DebuggingResult struct {
+ // The URLs to use to access the debug servers, for all gopls instances in
+ // the serving path. For the common case of a single gopls instance (i.e. no
+ // daemon), this will be exactly one address.
+ //
+ // In the case of one or more gopls instances forwarding the LSP to a daemon,
+ // URLs will contain debug addresses for each server in the serving path, in
+ // serving order. The daemon debug address will be the last entry in the
+ // slice. If any intermediate gopls instance fails to start debugging, no
+ // error will be returned but the debug URL for that server in the URLs slice
+ // will be empty.
+ URLs []string
+}
+
+type ResetGoModDiagnosticsArgs struct {
+ URIArg
+
+ // Optional: source of the diagnostics to reset.
+ // If not set, all resettable go.mod diagnostics will be cleared.
+ DiagnosticSource string
+}
+
+type VulncheckArgs struct {
+ // Any document in the directory from which govulncheck will run.
+ URI protocol.DocumentURI
+
+ // Package pattern. E.g. "", ".", "./...".
+ Pattern string
+
+ // TODO: -tests
+}
+
+// RunVulncheckResult holds the result of asynchronously starting the vulncheck
+// command.
+type RunVulncheckResult struct {
+ // Token holds the progress token for LSP workDone reporting of the vulncheck
+ // invocation.
+ Token protocol.ProgressToken
+}
+
+type VulncheckResult struct {
+ Vuln []Vuln
+
+ // TODO: Text string format output?
+}
+
+// CallStack models a trace of function calls starting
+// with a client function or method and ending with a
+// call to a vulnerable symbol.
+type CallStack []StackEntry
+
+// StackEntry models an element of a call stack.
+type StackEntry struct {
+ // See golang.org/x/exp/vulncheck.StackEntry.
+
+ // User-friendly representation of function/method names.
+ // e.g. package.funcName, package.(recvType).methodName, ...
+ Name string
+ URI protocol.DocumentURI
+ Pos protocol.Position // Start position. (0-based. Column is always 0)
+}
+
+// Vuln models an osv.Entry and representative call stacks.
+// TODO: deprecate
+type Vuln struct {
+ // ID is the vulnerability ID (osv.Entry.ID).
+ // https://ossf.github.io/osv-schema/#id-modified-fields
+ ID string
+ // Details is the description of the vulnerability (osv.Entry.Details).
+ // https://ossf.github.io/osv-schema/#summary-details-fields
+ Details string `json:",omitempty"`
+ // Aliases are alternative IDs of the vulnerability.
+ // https://ossf.github.io/osv-schema/#aliases-field
+ Aliases []string `json:",omitempty"`
+
+ // Symbol is the name of the detected vulnerable function or method.
+ // Can be empty if the vulnerability exists in required modules, but no vulnerable symbols are used.
+ Symbol string `json:",omitempty"`
+ // PkgPath is the package path of the detected Symbol.
+ // Can be empty if the vulnerability exists in required modules, but no vulnerable packages are used.
+ PkgPath string `json:",omitempty"`
+ // ModPath is the module path corresponding to PkgPath.
+ // TODO: how do we specify standard library's vulnerability?
+ ModPath string `json:",omitempty"`
+
+ // URL is the URL for more info about the information.
+ // Either the database specific URL or the one of the URLs
+ // included in osv.Entry.References.
+ URL string `json:",omitempty"`
+
+ // Current is the current module version.
+ CurrentVersion string `json:",omitempty"`
+
+ // Fixed is the minimum module version that contains the fix.
+ FixedVersion string `json:",omitempty"`
+
+ // Example call stacks.
+ CallStacks []CallStack `json:",omitempty"`
+
+ // Short description of each call stack in CallStacks.
+ CallStackSummaries []string `json:",omitempty"`
+
+ // TODO: import graph & module graph.
+}
+
+// MemStatsResult holds selected fields from runtime.MemStats.
+type MemStatsResult struct {
+ HeapAlloc uint64
+ HeapInUse uint64
+}
diff --git a/gopls/internal/lsp/command/interface_test.go b/gopls/internal/lsp/command/interface_test.go
new file mode 100644
index 000000000..e602293a1
--- /dev/null
+++ b/gopls/internal/lsp/command/interface_test.go
@@ -0,0 +1,31 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package command_test
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp/command/gen"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestGenerated(t *testing.T) {
+ testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code.
+
+ onDisk, err := ioutil.ReadFile("command_gen.go")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ generated, err := gen.Generate()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := cmp.Diff(string(generated), string(onDisk)); diff != "" {
+ t.Errorf("command_gen.go is stale -- regenerate (-generated +on disk)\n%s", diff)
+ }
+}
diff --git a/gopls/internal/lsp/command/util.go b/gopls/internal/lsp/command/util.go
new file mode 100644
index 000000000..011c3413c
--- /dev/null
+++ b/gopls/internal/lsp/command/util.go
@@ -0,0 +1,63 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package command
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// ID returns the command name for use in the LSP.
+func ID(name string) string {
+ return "gopls." + name
+}
+
+type Command string
+
+func (c Command) ID() string {
+ return ID(string(c))
+}
+
+// MarshalArgs encodes the given arguments to json.RawMessages. This function
+// is used to construct arguments to a protocol.Command.
+//
+// Example usage:
+//
+// jsonArgs, err := MarshalArgs(1, "hello", true, StructuredArg{42, 12.6})
+func MarshalArgs(args ...interface{}) ([]json.RawMessage, error) {
+ var out []json.RawMessage
+ for _, arg := range args {
+ argJSON, err := json.Marshal(arg)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, argJSON)
+ }
+ return out, nil
+}
+
+// UnmarshalArgs decodes the given json.RawMessages to the variables provided
+// by args. Each element of args should be a pointer.
+//
+// Example usage:
+//
+// var (
+// num int
+// str string
+// bul bool
+// structured StructuredArg
+// )
+// err := UnmarshalArgs(args, &num, &str, &bul, &structured)
+func UnmarshalArgs(jsonArgs []json.RawMessage, args ...interface{}) error {
+ if len(args) != len(jsonArgs) {
+ return fmt.Errorf("DecodeArgs: expected %d input arguments, got %d JSON arguments", len(args), len(jsonArgs))
+ }
+ for i, arg := range args {
+ if err := json.Unmarshal(jsonArgs[i], arg); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/completion.go b/gopls/internal/lsp/completion.go
new file mode 100644
index 000000000..3052a5cbf
--- /dev/null
+++ b/gopls/internal/lsp/completion.go
@@ -0,0 +1,140 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source/completion"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+ "golang.org/x/tools/gopls/internal/lsp/work"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+func (s *Server) completion(ctx context.Context, params *protocol.CompletionParams) (*protocol.CompletionList, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ var candidates []completion.CompletionItem
+ var surrounding *completion.Selection
+ switch snapshot.View().FileKind(fh) {
+ case source.Go:
+ candidates, surrounding, err = completion.Completion(ctx, snapshot, fh, params.Position, params.Context)
+ case source.Mod:
+ candidates, surrounding = nil, nil
+ case source.Work:
+ cl, err := work.Completion(ctx, snapshot, fh, params.Position)
+ if err != nil {
+ break
+ }
+ return cl, nil
+ case source.Tmpl:
+ var cl *protocol.CompletionList
+ cl, err = template.Completion(ctx, snapshot, fh, params.Position, params.Context)
+ if err != nil {
+ break // use common error handling, candidates==nil
+ }
+ return cl, nil
+ }
+ if err != nil {
+ event.Error(ctx, "no completions found", err, tag.Position.Of(params.Position))
+ }
+ if candidates == nil {
+ return &protocol.CompletionList{
+ IsIncomplete: true,
+ Items: []protocol.CompletionItem{},
+ }, nil
+ }
+
+ rng, err := surrounding.Range()
+ if err != nil {
+ return nil, err
+ }
+
+ // When using deep completions/fuzzy matching, report results as incomplete so
+ // client fetches updated completions after every key stroke.
+ options := snapshot.View().Options()
+ incompleteResults := options.DeepCompletion || options.Matcher == source.Fuzzy
+
+ items := toProtocolCompletionItems(candidates, rng, options)
+
+ return &protocol.CompletionList{
+ IsIncomplete: incompleteResults,
+ Items: items,
+ }, nil
+}
+
+func toProtocolCompletionItems(candidates []completion.CompletionItem, rng protocol.Range, options *source.Options) []protocol.CompletionItem {
+ var (
+ items = make([]protocol.CompletionItem, 0, len(candidates))
+ numDeepCompletionsSeen int
+ )
+ for i, candidate := range candidates {
+ // Limit the number of deep completions to not overwhelm the user in cases
+ // with dozens of deep completion matches.
+ if candidate.Depth > 0 {
+ if !options.DeepCompletion {
+ continue
+ }
+ if numDeepCompletionsSeen >= completion.MaxDeepCompletions {
+ continue
+ }
+ numDeepCompletionsSeen++
+ }
+ insertText := candidate.InsertText
+ if options.InsertTextFormat == protocol.SnippetTextFormat {
+ insertText = candidate.Snippet()
+ }
+
+ // This can happen if the client has snippets disabled but the
+ // candidate only supports snippet insertion.
+ if insertText == "" {
+ continue
+ }
+
+ doc := &protocol.Or_CompletionItem_documentation{
+ Value: protocol.MarkupContent{
+ Kind: protocol.Markdown,
+ Value: source.CommentToMarkdown(candidate.Documentation, options),
+ },
+ }
+ if options.PreferredContentFormat != protocol.Markdown {
+ doc.Value = candidate.Documentation
+ }
+ item := protocol.CompletionItem{
+ Label: candidate.Label,
+ Detail: candidate.Detail,
+ Kind: candidate.Kind,
+ TextEdit: &protocol.TextEdit{
+ NewText: insertText,
+ Range: rng,
+ },
+ InsertTextFormat: &options.InsertTextFormat,
+ AdditionalTextEdits: candidate.AdditionalTextEdits,
+ // This is a hack so that the client sorts completion results in the order
+ // according to their score. This can be removed upon the resolution of
+ // https://github.com/Microsoft/language-server-protocol/issues/348.
+ SortText: fmt.Sprintf("%05d", i),
+
+ // Trim operators (VSCode doesn't like weird characters in
+ // filterText).
+ FilterText: strings.TrimLeft(candidate.InsertText, "&*"),
+
+ Preselect: i == 0,
+ Documentation: doc,
+ Tags: candidate.Tags,
+ Deprecated: candidate.Deprecated,
+ }
+ items = append(items, item)
+ }
+ return items
+}
diff --git a/gopls/internal/lsp/completion_test.go b/gopls/internal/lsp/completion_test.go
new file mode 100644
index 000000000..cd3bcec99
--- /dev/null
+++ b/gopls/internal/lsp/completion_test.go
@@ -0,0 +1,176 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source/completion"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
+ got := r.callCompletion(t, src, func(opts *source.Options) {
+ opts.DeepCompletion = false
+ opts.Matcher = source.CaseInsensitive
+ opts.CompleteUnimported = false
+ opts.InsertTextFormat = protocol.SnippetTextFormat
+ opts.LiteralCompletions = strings.Contains(string(src.URI()), "literal")
+ opts.ExperimentalPostfixCompletions = strings.Contains(string(src.URI()), "postfix")
+ })
+ got = tests.FilterBuiltins(src, got)
+ want := expected(t, test, items)
+ if diff := tests.DiffCompletionItems(want, got); diff != "" {
+ t.Errorf("mismatching completion items (-want +got):\n%s", diff)
+ }
+}
+
+func (r *runner) CompletionSnippet(t *testing.T, src span.Span, expected tests.CompletionSnippet, placeholders bool, items tests.CompletionItems) {
+ list := r.callCompletion(t, src, func(opts *source.Options) {
+ opts.UsePlaceholders = placeholders
+ opts.DeepCompletion = true
+ opts.Matcher = source.Fuzzy
+ opts.CompleteUnimported = false
+ })
+ got := tests.FindItem(list, *items[expected.CompletionItem])
+ want := expected.PlainSnippet
+ if placeholders {
+ want = expected.PlaceholderSnippet
+ }
+ if diff := tests.DiffSnippets(want, got); diff != "" {
+ t.Errorf("%s", diff)
+ }
+}
+
+func (r *runner) UnimportedCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
+ got := r.callCompletion(t, src, func(opts *source.Options) {})
+ got = tests.FilterBuiltins(src, got)
+ want := expected(t, test, items)
+ if diff := tests.CheckCompletionOrder(want, got, false); diff != "" {
+ t.Errorf("%s", diff)
+ }
+}
+
+func (r *runner) DeepCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
+ got := r.callCompletion(t, src, func(opts *source.Options) {
+ opts.DeepCompletion = true
+ opts.Matcher = source.CaseInsensitive
+ opts.CompleteUnimported = false
+ })
+ got = tests.FilterBuiltins(src, got)
+ want := expected(t, test, items)
+ if diff := tests.DiffCompletionItems(want, got); diff != "" {
+ t.Errorf("mismatching completion items (-want +got):\n%s", diff)
+ }
+}
+
+func (r *runner) FuzzyCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
+ got := r.callCompletion(t, src, func(opts *source.Options) {
+ opts.DeepCompletion = true
+ opts.Matcher = source.Fuzzy
+ opts.CompleteUnimported = false
+ })
+ got = tests.FilterBuiltins(src, got)
+ want := expected(t, test, items)
+ if diff := tests.DiffCompletionItems(want, got); diff != "" {
+ t.Errorf("mismatching completion items (-want +got):\n%s", diff)
+ }
+}
+
+func (r *runner) CaseSensitiveCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
+ got := r.callCompletion(t, src, func(opts *source.Options) {
+ opts.Matcher = source.CaseSensitive
+ opts.CompleteUnimported = false
+ })
+ got = tests.FilterBuiltins(src, got)
+ want := expected(t, test, items)
+ if diff := tests.DiffCompletionItems(want, got); diff != "" {
+ t.Errorf("mismatching completion items (-want +got):\n%s", diff)
+ }
+}
+
+func (r *runner) RankCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
+ got := r.callCompletion(t, src, func(opts *source.Options) {
+ opts.DeepCompletion = true
+ opts.Matcher = source.Fuzzy
+ opts.CompleteUnimported = false
+ opts.LiteralCompletions = true
+ opts.ExperimentalPostfixCompletions = true
+ })
+ want := expected(t, test, items)
+ if msg := tests.CheckCompletionOrder(want, got, true); msg != "" {
+ t.Errorf("%s", msg)
+ }
+}
+
+func expected(t *testing.T, test tests.Completion, items tests.CompletionItems) []protocol.CompletionItem {
+ t.Helper()
+
+ toProtocolCompletionItem := func(item *completion.CompletionItem) protocol.CompletionItem {
+ pItem := protocol.CompletionItem{
+ Label: item.Label,
+ Kind: item.Kind,
+ Detail: item.Detail,
+ Documentation: &protocol.Or_CompletionItem_documentation{
+ Value: item.Documentation,
+ },
+ InsertText: item.InsertText,
+ TextEdit: &protocol.TextEdit{
+ NewText: item.Snippet(),
+ },
+ // Negate score so best score has lowest sort text like real API.
+ SortText: fmt.Sprint(-item.Score),
+ }
+ if pItem.InsertText == "" {
+ pItem.InsertText = pItem.Label
+ }
+ return pItem
+ }
+
+ var want []protocol.CompletionItem
+ for _, pos := range test.CompletionItems {
+ want = append(want, toProtocolCompletionItem(items[pos]))
+ }
+ return want
+}
+
+func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*source.Options)) []protocol.CompletionItem {
+ t.Helper()
+
+ view, err := r.server.session.ViewOf(src.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ original := view.Options()
+ modified := view.Options().Clone()
+ options(modified)
+ view, err = r.server.session.SetViewOptions(r.ctx, view, modified)
+ if err != nil {
+ t.Error(err)
+ return nil
+ }
+ defer r.server.session.SetViewOptions(r.ctx, view, original)
+
+ list, err := r.server.Completion(r.ctx, &protocol.CompletionParams{
+ TextDocumentPositionParams: protocol.TextDocumentPositionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(src.URI()),
+ },
+ Position: protocol.Position{
+ Line: uint32(src.Start().Line() - 1),
+ Character: uint32(src.Start().Column() - 1),
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ return list.Items
+}
diff --git a/internal/lsp/debounce.go b/gopls/internal/lsp/debounce.go
index 06f411471..06f411471 100644
--- a/internal/lsp/debounce.go
+++ b/gopls/internal/lsp/debounce.go
diff --git a/internal/lsp/debounce_test.go b/gopls/internal/lsp/debounce_test.go
index b5597faf5..b5597faf5 100644
--- a/internal/lsp/debounce_test.go
+++ b/gopls/internal/lsp/debounce_test.go
diff --git a/internal/lsp/debug/buildinfo_go1.12.go b/gopls/internal/lsp/debug/buildinfo_go1.12.go
index 2f360dbfc..2f360dbfc 100644
--- a/internal/lsp/debug/buildinfo_go1.12.go
+++ b/gopls/internal/lsp/debug/buildinfo_go1.12.go
diff --git a/internal/lsp/debug/buildinfo_go1.18.go b/gopls/internal/lsp/debug/buildinfo_go1.18.go
index 4121c4bc9..4121c4bc9 100644
--- a/internal/lsp/debug/buildinfo_go1.18.go
+++ b/gopls/internal/lsp/debug/buildinfo_go1.18.go
diff --git a/gopls/internal/lsp/debug/info.go b/gopls/internal/lsp/debug/info.go
new file mode 100644
index 000000000..00752e6f9
--- /dev/null
+++ b/gopls/internal/lsp/debug/info.go
@@ -0,0 +1,254 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package debug exports debug information for gopls.
+package debug
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+type PrintMode int
+
+const (
+ PlainText = PrintMode(iota)
+ Markdown
+ HTML
+ JSON
+)
+
+// Version is a manually-updated mechanism for tracking versions.
+const Version = "master"
+
+// ServerVersion is the format used by gopls to report its version to the
+// client. This format is structured so that the client can parse it easily.
+type ServerVersion struct {
+ *BuildInfo
+ Version string
+}
+
+// VersionInfo returns the build info for the gopls process. If it was not
+// built in module mode, we return a GOPATH-specific message with the
+// hardcoded version.
+func VersionInfo() *ServerVersion {
+ if info, ok := readBuildInfo(); ok {
+ return getVersion(info)
+ }
+ buildInfo := &BuildInfo{}
+ // go1.17 or earlier, part of s.BuildInfo are embedded fields.
+ buildInfo.Path = "gopls, built in GOPATH mode"
+ buildInfo.GoVersion = runtime.Version()
+ return &ServerVersion{
+ Version: Version,
+ BuildInfo: buildInfo,
+ }
+}
+
+func getVersion(info *BuildInfo) *ServerVersion {
+ return &ServerVersion{
+ Version: Version,
+ BuildInfo: info,
+ }
+}
+
+// PrintServerInfo writes HTML debug info to w for the Instance.
+func (i *Instance) PrintServerInfo(ctx context.Context, w io.Writer) {
+ section(w, HTML, "Server Instance", func() {
+ fmt.Fprintf(w, "Start time: %v\n", i.StartTime)
+ fmt.Fprintf(w, "LogFile: %s\n", i.Logfile)
+ fmt.Fprintf(w, "Working directory: %s\n", i.Workdir)
+ fmt.Fprintf(w, "Address: %s\n", i.ServerAddress)
+ fmt.Fprintf(w, "Debug address: %s\n", i.DebugAddress())
+ })
+ PrintVersionInfo(ctx, w, true, HTML)
+ section(w, HTML, "Command Line", func() {
+ fmt.Fprintf(w, "<a href=/debug/pprof/cmdline>cmdline</a>")
+ })
+}
+
+// PrintVersionInfo writes version information to w, using the output format
+// specified by mode. verbose controls whether additional information is
+// written, including section headers.
+func PrintVersionInfo(_ context.Context, w io.Writer, verbose bool, mode PrintMode) error {
+ info := VersionInfo()
+ if mode == JSON {
+ return printVersionInfoJSON(w, info)
+ }
+
+ if !verbose {
+ printBuildInfo(w, info, false, mode)
+ return nil
+ }
+ section(w, mode, "Build info", func() {
+ printBuildInfo(w, info, true, mode)
+ })
+ return nil
+}
+
+func printVersionInfoJSON(w io.Writer, info *ServerVersion) error {
+ js, err := json.MarshalIndent(info, "", "\t")
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprint(w, string(js))
+ return err
+}
+
+func section(w io.Writer, mode PrintMode, title string, body func()) {
+ switch mode {
+ case PlainText:
+ fmt.Fprintln(w, title)
+ fmt.Fprintln(w, strings.Repeat("-", len(title)))
+ body()
+ case Markdown:
+ fmt.Fprintf(w, "#### %s\n\n```\n", title)
+ body()
+ fmt.Fprintf(w, "```\n")
+ case HTML:
+ fmt.Fprintf(w, "<h3>%s</h3>\n<pre>\n", title)
+ body()
+ fmt.Fprint(w, "</pre>\n")
+ }
+}
+
+func printBuildInfo(w io.Writer, info *ServerVersion, verbose bool, mode PrintMode) {
+ fmt.Fprintf(w, "%v %v\n", info.Path, Version)
+ printModuleInfo(w, info.Main, mode)
+ if !verbose {
+ return
+ }
+ for _, dep := range info.Deps {
+ printModuleInfo(w, *dep, mode)
+ }
+ fmt.Fprintf(w, "go: %v\n", info.GoVersion)
+}
+
+func printModuleInfo(w io.Writer, m debug.Module, _ PrintMode) {
+ fmt.Fprintf(w, " %s@%s", m.Path, m.Version)
+ if m.Sum != "" {
+ fmt.Fprintf(w, " %s", m.Sum)
+ }
+ if m.Replace != nil {
+ fmt.Fprintf(w, " => %v", m.Replace.Path)
+ }
+ fmt.Fprintf(w, "\n")
+}
+
+type field struct {
+ index []int
+}
+
+var fields []field
+
+// find all the options. The presumption is that the Options are nested structs
+// and that pointers don't need to be dereferenced
+func swalk(t reflect.Type, ix []int, indent string) {
+ switch t.Kind() {
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ fld := t.Field(i)
+ ixx := append(append([]int{}, ix...), i)
+ swalk(fld.Type, ixx, indent+". ")
+ }
+ default:
+ // everything is either a struct or a field (that's an assumption about Options)
+ fields = append(fields, field{ix})
+ }
+}
+
+type sessionOption struct {
+ Name string
+ Type string
+ Current string
+ Default string
+}
+
+func showOptions(o *source.Options) []sessionOption {
+ var out []sessionOption
+ t := reflect.TypeOf(*o)
+ swalk(t, []int{}, "")
+ v := reflect.ValueOf(*o)
+ do := reflect.ValueOf(*source.DefaultOptions())
+ for _, f := range fields {
+ val := v.FieldByIndex(f.index)
+ def := do.FieldByIndex(f.index)
+ tx := t.FieldByIndex(f.index)
+ is := strVal(val)
+ was := strVal(def)
+ out = append(out, sessionOption{
+ Name: tx.Name,
+ Type: tx.Type.String(),
+ Current: is,
+ Default: was,
+ })
+ }
+ sort.Slice(out, func(i, j int) bool {
+ rd := out[i].Current == out[i].Default
+ ld := out[j].Current == out[j].Default
+ if rd != ld {
+ return ld
+ }
+ return out[i].Name < out[j].Name
+ })
+ return out
+}
+
+func strVal(val reflect.Value) string {
+ switch val.Kind() {
+ case reflect.Bool:
+ return fmt.Sprintf("%v", val.Interface())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return fmt.Sprintf("%v", val.Interface())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return fmt.Sprintf("%v", val.Interface())
+ case reflect.Uintptr, reflect.UnsafePointer:
+ return fmt.Sprintf("0x%x", val.Pointer())
+ case reflect.Complex64, reflect.Complex128:
+ return fmt.Sprintf("%v", val.Complex())
+ case reflect.Array, reflect.Slice:
+ ans := []string{}
+ for i := 0; i < val.Len(); i++ {
+ ans = append(ans, strVal(val.Index(i)))
+ }
+ sort.Strings(ans)
+ return fmt.Sprintf("%v", ans)
+ case reflect.Chan, reflect.Func, reflect.Ptr:
+ return val.Kind().String()
+ case reflect.Struct:
+ var x source.Analyzer
+ if val.Type() != reflect.TypeOf(x) {
+ return val.Kind().String()
+ }
+ // this is sort of ugly, but usable
+ str := val.FieldByName("Analyzer").Elem().FieldByName("Doc").String()
+ ix := strings.Index(str, "\n")
+ if ix == -1 {
+ ix = len(str)
+ }
+ return str[:ix]
+ case reflect.String:
+ return fmt.Sprintf("%q", val.Interface())
+ case reflect.Map:
+ ans := []string{}
+ iter := val.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ ans = append(ans, fmt.Sprintf("%s:%s, ", strVal(k), strVal(v)))
+ }
+ sort.Strings(ans)
+ return fmt.Sprintf("%v", ans)
+ }
+ return fmt.Sprintf("??%s??", val.Type())
+}
diff --git a/internal/lsp/debug/info_test.go b/gopls/internal/lsp/debug/info_test.go
index 5a5362841..5a5362841 100644
--- a/internal/lsp/debug/info_test.go
+++ b/gopls/internal/lsp/debug/info_test.go
diff --git a/gopls/internal/lsp/debug/log/log.go b/gopls/internal/lsp/debug/log/log.go
new file mode 100644
index 000000000..e3eaa106f
--- /dev/null
+++ b/gopls/internal/lsp/debug/log/log.go
@@ -0,0 +1,43 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package log provides helper methods for exporting log events to the
+// internal/event package.
+package log
+
+import (
+ "context"
+ "fmt"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/label"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+// Level parameterizes log severity.
+type Level int
+
+const (
+ _ Level = iota
+ Error
+ Warning
+ Info
+ Debug
+ Trace
+)
+
+// Log exports a log event labeled with level l.
+func (l Level) Log(ctx context.Context, msg string) {
+ event.Log(ctx, msg, tag.Level.Of(int(l)))
+}
+
+// Logf formats and exports a log event labeled with level l.
+func (l Level) Logf(ctx context.Context, format string, args ...interface{}) {
+ l.Log(ctx, fmt.Sprintf(format, args...))
+}
+
+// LabeledLevel extracts the labeled log l
+func LabeledLevel(lm label.Map) Level {
+ return Level(tag.Level.Get(lm))
+}
diff --git a/gopls/internal/lsp/debug/metrics.go b/gopls/internal/lsp/debug/metrics.go
new file mode 100644
index 000000000..c8da803d6
--- /dev/null
+++ b/gopls/internal/lsp/debug/metrics.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "golang.org/x/tools/internal/event/export/metric"
+ "golang.org/x/tools/internal/event/label"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+var (
+ // the distributions we use for histograms
+ bytesDistribution = []int64{1 << 10, 1 << 11, 1 << 12, 1 << 14, 1 << 16, 1 << 20}
+ millisecondsDistribution = []float64{0.1, 0.5, 1, 2, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000}
+
+ receivedBytes = metric.HistogramInt64{
+ Name: "received_bytes",
+ Description: "Distribution of received bytes, by method.",
+ Keys: []label.Key{tag.RPCDirection, tag.Method},
+ Buckets: bytesDistribution,
+ }
+
+ sentBytes = metric.HistogramInt64{
+ Name: "sent_bytes",
+ Description: "Distribution of sent bytes, by method.",
+ Keys: []label.Key{tag.RPCDirection, tag.Method},
+ Buckets: bytesDistribution,
+ }
+
+ latency = metric.HistogramFloat64{
+ Name: "latency",
+ Description: "Distribution of latency in milliseconds, by method.",
+ Keys: []label.Key{tag.RPCDirection, tag.Method},
+ Buckets: millisecondsDistribution,
+ }
+
+ started = metric.Scalar{
+ Name: "started",
+ Description: "Count of RPCs started by method.",
+ Keys: []label.Key{tag.RPCDirection, tag.Method},
+ }
+
+ completed = metric.Scalar{
+ Name: "completed",
+ Description: "Count of RPCs completed by method and status.",
+ Keys: []label.Key{tag.RPCDirection, tag.Method, tag.StatusCode},
+ }
+)
+
+func registerMetrics(m *metric.Config) {
+ receivedBytes.Record(m, tag.ReceivedBytes)
+ sentBytes.Record(m, tag.SentBytes)
+ latency.Record(m, tag.Latency)
+ started.Count(m, tag.Started)
+ completed.Count(m, tag.Latency)
+}
diff --git a/gopls/internal/lsp/debug/rpc.go b/gopls/internal/lsp/debug/rpc.go
new file mode 100644
index 000000000..561002147
--- /dev/null
+++ b/gopls/internal/lsp/debug/rpc.go
@@ -0,0 +1,239 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "context"
+ "fmt"
+ "html/template"
+ "net/http"
+ "sort"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/core"
+ "golang.org/x/tools/internal/event/export"
+ "golang.org/x/tools/internal/event/label"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+var RPCTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}RPC Information{{end}}
+{{define "body"}}
+ <H2>Inbound</H2>
+ {{template "rpcSection" .Inbound}}
+ <H2>Outbound</H2>
+ {{template "rpcSection" .Outbound}}
+{{end}}
+{{define "rpcSection"}}
+ {{range .}}<P>
+ <b>{{.Method}}</b> {{.Started}} <a href="/trace/{{.Method}}">traces</a> ({{.InProgress}} in progress)
+ <br>
+ <i>Latency</i> {{with .Latency}}{{.Mean}} ({{.Min}}<{{.Max}}){{end}}
+ <i>By bucket</i> 0s {{range .Latency.Values}}{{if gt .Count 0}}<b>{{.Count}}</b> {{.Limit}} {{end}}{{end}}
+ <br>
+ <i>Received</i> {{.Received}} (avg. {{.ReceivedMean}})
+ <i>Sent</i> {{.Sent}} (avg. {{.SentMean}})
+ <br>
+ <i>Result codes</i> {{range .Codes}}{{.Key}}={{.Count}} {{end}}
+ </P>
+ {{end}}
+{{end}}
+`))
+
+type Rpcs struct { // exported for testing
+ mu sync.Mutex
+ Inbound []*rpcStats // stats for incoming lsp rpcs sorted by method name
+ Outbound []*rpcStats // stats for outgoing lsp rpcs sorted by method name
+}
+
+type rpcStats struct {
+ Method string
+ Started int64
+ Completed int64
+
+ Latency rpcTimeHistogram
+ Received byteUnits
+ Sent byteUnits
+ Codes []*rpcCodeBucket
+}
+
+type rpcTimeHistogram struct {
+ Sum timeUnits
+ Count int64
+ Min timeUnits
+ Max timeUnits
+ Values []rpcTimeBucket
+}
+
+type rpcTimeBucket struct {
+ Limit timeUnits
+ Count int64
+}
+
+type rpcCodeBucket struct {
+ Key string
+ Count int64
+}
+
+func (r *Rpcs) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ switch {
+ case event.IsStart(ev):
+ if _, stats := r.getRPCSpan(ctx, ev); stats != nil {
+ stats.Started++
+ }
+ case event.IsEnd(ev):
+ span, stats := r.getRPCSpan(ctx, ev)
+ if stats != nil {
+ endRPC(ctx, ev, span, stats)
+ }
+ case event.IsMetric(ev):
+ sent := byteUnits(tag.SentBytes.Get(lm))
+ rec := byteUnits(tag.ReceivedBytes.Get(lm))
+ if sent != 0 || rec != 0 {
+ if _, stats := r.getRPCSpan(ctx, ev); stats != nil {
+ stats.Sent += sent
+ stats.Received += rec
+ }
+ }
+ }
+ return ctx
+}
+
+func endRPC(ctx context.Context, ev core.Event, span *export.Span, stats *rpcStats) {
+ // update the basic counts
+ stats.Completed++
+
+ // get and record the status code
+ if status := getStatusCode(span); status != "" {
+ var b *rpcCodeBucket
+ for c, entry := range stats.Codes {
+ if entry.Key == status {
+ b = stats.Codes[c]
+ break
+ }
+ }
+ if b == nil {
+ b = &rpcCodeBucket{Key: status}
+ stats.Codes = append(stats.Codes, b)
+ sort.Slice(stats.Codes, func(i int, j int) bool {
+ return stats.Codes[i].Key < stats.Codes[j].Key
+ })
+ }
+ b.Count++
+ }
+
+ // calculate latency if this was an rpc span
+ elapsedTime := span.Finish().At().Sub(span.Start().At())
+ latencyMillis := timeUnits(elapsedTime) / timeUnits(time.Millisecond)
+ if stats.Latency.Count == 0 {
+ stats.Latency.Min = latencyMillis
+ stats.Latency.Max = latencyMillis
+ } else {
+ if stats.Latency.Min > latencyMillis {
+ stats.Latency.Min = latencyMillis
+ }
+ if stats.Latency.Max < latencyMillis {
+ stats.Latency.Max = latencyMillis
+ }
+ }
+ stats.Latency.Count++
+ stats.Latency.Sum += latencyMillis
+ for i := range stats.Latency.Values {
+ if stats.Latency.Values[i].Limit > latencyMillis {
+ stats.Latency.Values[i].Count++
+ break
+ }
+ }
+}
+
+func (r *Rpcs) getRPCSpan(ctx context.Context, ev core.Event) (*export.Span, *rpcStats) {
+ // get the span
+ span := export.GetSpan(ctx)
+ if span == nil {
+ return nil, nil
+ }
+ // use the span start event look up the correct stats block
+ // we do this because it prevents us matching a sub span
+ return span, r.getRPCStats(span.Start())
+}
+
+func (r *Rpcs) getRPCStats(lm label.Map) *rpcStats {
+ method := tag.Method.Get(lm)
+ if method == "" {
+ return nil
+ }
+ set := &r.Inbound
+ if tag.RPCDirection.Get(lm) != tag.Inbound {
+ set = &r.Outbound
+ }
+ // get the record for this method
+ index := sort.Search(len(*set), func(i int) bool {
+ return (*set)[i].Method >= method
+ })
+
+ if index < len(*set) && (*set)[index].Method == method {
+ return (*set)[index]
+ }
+
+ old := *set
+ *set = make([]*rpcStats, len(old)+1)
+ copy(*set, old[:index])
+ copy((*set)[index+1:], old[index:])
+ stats := &rpcStats{Method: method}
+ stats.Latency.Values = make([]rpcTimeBucket, len(millisecondsDistribution))
+ for i, m := range millisecondsDistribution {
+ stats.Latency.Values[i].Limit = timeUnits(m)
+ }
+ (*set)[index] = stats
+ return stats
+}
+
+func (s *rpcStats) InProgress() int64 { return s.Started - s.Completed }
+func (s *rpcStats) SentMean() byteUnits { return s.Sent / byteUnits(s.Started) }
+func (s *rpcStats) ReceivedMean() byteUnits { return s.Received / byteUnits(s.Started) }
+
+func (h *rpcTimeHistogram) Mean() timeUnits { return h.Sum / timeUnits(h.Count) }
+
+func getStatusCode(span *export.Span) string {
+ for _, ev := range span.Events() {
+ if status := tag.StatusCode.Get(ev); status != "" {
+ return status
+ }
+ }
+ return ""
+}
+
+func (r *Rpcs) getData(req *http.Request) interface{} {
+ return r
+}
+
+func units(v float64, suffixes []string) string {
+ s := ""
+ for _, s = range suffixes {
+ n := v / 1000
+ if n < 1 {
+ break
+ }
+ v = n
+ }
+ return fmt.Sprintf("%.2f%s", v, s)
+}
+
+type timeUnits float64
+
+func (v timeUnits) String() string {
+ v = v * 1000 * 1000
+ return units(float64(v), []string{"ns", "μs", "ms", "s"})
+}
+
+type byteUnits float64
+
+func (v byteUnits) String() string {
+ return units(float64(v), []string{"B", "KB", "MB", "GB", "TB"})
+}
diff --git a/gopls/internal/lsp/debug/serve.go b/gopls/internal/lsp/debug/serve.go
new file mode 100644
index 000000000..4bbcbcb30
--- /dev/null
+++ b/gopls/internal/lsp/debug/serve.go
@@ -0,0 +1,909 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "archive/zip"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "html/template"
+ "io"
+ stdlog "log"
+ "net"
+ "net/http"
+ "net/http/pprof"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ rpprof "runtime/pprof"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug/log"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/core"
+ "golang.org/x/tools/internal/event/export"
+ "golang.org/x/tools/internal/event/export/metric"
+ "golang.org/x/tools/internal/event/export/ocagent"
+ "golang.org/x/tools/internal/event/export/prometheus"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+type contextKeyType int
+
+const (
+ instanceKey contextKeyType = iota
+ traceKey
+)
+
+// An Instance holds all debug information associated with a gopls instance.
+type Instance struct {
+ Logfile string
+ StartTime time.Time
+ ServerAddress string
+ Workdir string
+ OCAgentConfig string
+
+ LogWriter io.Writer
+
+ exporter event.Exporter
+
+ ocagent *ocagent.Exporter
+ prometheus *prometheus.Exporter
+ rpcs *Rpcs
+ traces *traces
+ State *State
+
+ serveMu sync.Mutex
+ debugAddress string
+ listenedDebugAddress string
+}
+
+// State holds debugging information related to the server state.
+type State struct {
+ mu sync.Mutex
+ clients []*Client
+ servers []*Server
+}
+
+func (st *State) Bugs() []bug.Bug {
+ return bug.List()
+}
+
+// Caches returns the set of Cache objects currently being served.
+func (st *State) Caches() []*cache.Cache {
+ var caches []*cache.Cache
+ seen := make(map[string]struct{})
+ for _, client := range st.Clients() {
+ cache := client.Session.Cache()
+ if _, found := seen[cache.ID()]; found {
+ continue
+ }
+ seen[cache.ID()] = struct{}{}
+ caches = append(caches, cache)
+ }
+ return caches
+}
+
+// Cache returns the Cache that matches the supplied id.
+func (st *State) Cache(id string) *cache.Cache {
+ for _, c := range st.Caches() {
+ if c.ID() == id {
+ return c
+ }
+ }
+ return nil
+}
+
+// Sessions returns the set of Session objects currently being served.
+func (st *State) Sessions() []*cache.Session {
+ var sessions []*cache.Session
+ for _, client := range st.Clients() {
+ sessions = append(sessions, client.Session)
+ }
+ return sessions
+}
+
+// Session returns the Session that matches the supplied id.
+func (st *State) Session(id string) *cache.Session {
+ for _, s := range st.Sessions() {
+ if s.ID() == id {
+ return s
+ }
+ }
+ return nil
+}
+
+// Views returns the set of View objects currently being served.
+func (st *State) Views() []*cache.View {
+ var views []*cache.View
+ for _, s := range st.Sessions() {
+ views = append(views, s.Views()...)
+ }
+ return views
+}
+
+// View returns the View that matches the supplied id.
+func (st *State) View(id string) *cache.View {
+ for _, v := range st.Views() {
+ if v.ID() == id {
+ return v
+ }
+ }
+ return nil
+}
+
+// Clients returns the set of Clients currently being served.
+func (st *State) Clients() []*Client {
+ st.mu.Lock()
+ defer st.mu.Unlock()
+ clients := make([]*Client, len(st.clients))
+ copy(clients, st.clients)
+ return clients
+}
+
+// Client returns the Client matching the supplied id.
+func (st *State) Client(id string) *Client {
+ for _, c := range st.Clients() {
+ if c.Session.ID() == id {
+ return c
+ }
+ }
+ return nil
+}
+
+// Servers returns the set of Servers the instance is currently connected to.
+func (st *State) Servers() []*Server {
+ st.mu.Lock()
+ defer st.mu.Unlock()
+ servers := make([]*Server, len(st.servers))
+ copy(servers, st.servers)
+ return servers
+}
+
+// A Client is an incoming connection from a remote client.
+type Client struct {
+ Session *cache.Session
+ DebugAddress string
+ Logfile string
+ GoplsPath string
+ ServerID string
+ Service protocol.Server
+}
+
+// A Server is an outgoing connection to a remote LSP server.
+type Server struct {
+ ID string
+ DebugAddress string
+ Logfile string
+ GoplsPath string
+ ClientID string
+}
+
+// addClient adds a client to the set being served.
+func (st *State) addClient(session *cache.Session) {
+ st.mu.Lock()
+ defer st.mu.Unlock()
+ st.clients = append(st.clients, &Client{Session: session})
+}
+
+// dropClient removes a client from the set being served.
+func (st *State) dropClient(session *cache.Session) {
+ st.mu.Lock()
+ defer st.mu.Unlock()
+ for i, c := range st.clients {
+ if c.Session == session {
+ copy(st.clients[i:], st.clients[i+1:])
+ st.clients[len(st.clients)-1] = nil
+ st.clients = st.clients[:len(st.clients)-1]
+ return
+ }
+ }
+}
+
+// updateServer updates a server to the set being queried. In practice, there should
+// be at most one remote server.
+func (st *State) updateServer(server *Server) {
+ st.mu.Lock()
+ defer st.mu.Unlock()
+ for i, existing := range st.servers {
+ if existing.ID == server.ID {
+ // Replace, rather than mutate, to avoid a race.
+ newServers := make([]*Server, len(st.servers))
+ copy(newServers, st.servers[:i])
+ newServers[i] = server
+ copy(newServers[i+1:], st.servers[i+1:])
+ st.servers = newServers
+ return
+ }
+ }
+ st.servers = append(st.servers, server)
+}
+
+// dropServer drops a server from the set being queried.
+func (st *State) dropServer(id string) {
+ st.mu.Lock()
+ defer st.mu.Unlock()
+ for i, s := range st.servers {
+ if s.ID == id {
+ copy(st.servers[i:], st.servers[i+1:])
+ st.servers[len(st.servers)-1] = nil
+ st.servers = st.servers[:len(st.servers)-1]
+ return
+ }
+ }
+}
+
+// an http.ResponseWriter that filters writes
+type filterResponse struct {
+ w http.ResponseWriter
+ edit func([]byte) []byte
+}
+
+func (c filterResponse) Header() http.Header {
+ return c.w.Header()
+}
+
+func (c filterResponse) Write(buf []byte) (int, error) {
+ ans := c.edit(buf)
+ return c.w.Write(ans)
+}
+
+func (c filterResponse) WriteHeader(n int) {
+ c.w.WriteHeader(n)
+}
+
+// replace annoying nuls by spaces
+func cmdline(w http.ResponseWriter, r *http.Request) {
+ fake := filterResponse{
+ w: w,
+ edit: func(buf []byte) []byte {
+ return bytes.ReplaceAll(buf, []byte{0}, []byte{' '})
+ },
+ }
+ pprof.Cmdline(fake, r)
+}
+
+func (i *Instance) getCache(r *http.Request) interface{} {
+ return i.State.Cache(path.Base(r.URL.Path))
+}
+
+func (i *Instance) getSession(r *http.Request) interface{} {
+ return i.State.Session(path.Base(r.URL.Path))
+}
+
+func (i *Instance) getClient(r *http.Request) interface{} {
+ return i.State.Client(path.Base(r.URL.Path))
+}
+
+func (i *Instance) getServer(r *http.Request) interface{} {
+ i.State.mu.Lock()
+ defer i.State.mu.Unlock()
+ id := path.Base(r.URL.Path)
+ for _, s := range i.State.servers {
+ if s.ID == id {
+ return s
+ }
+ }
+ return nil
+}
+
+func (i *Instance) getView(r *http.Request) interface{} {
+ return i.State.View(path.Base(r.URL.Path))
+}
+
+func (i *Instance) getFile(r *http.Request) interface{} {
+ identifier := path.Base(r.URL.Path)
+ sid := path.Base(path.Dir(r.URL.Path))
+ s := i.State.Session(sid)
+ if s == nil {
+ return nil
+ }
+ for _, o := range s.Overlays() {
+ // TODO(adonovan): understand and document this comparison.
+ if o.FileIdentity().Hash.String() == identifier {
+ return o
+ }
+ }
+ return nil
+}
+
+func (i *Instance) getInfo(r *http.Request) interface{} {
+ buf := &bytes.Buffer{}
+ i.PrintServerInfo(r.Context(), buf)
+ return template.HTML(buf.String())
+}
+
+func (i *Instance) AddService(s protocol.Server, session *cache.Session) {
+ for _, c := range i.State.clients {
+ if c.Session == session {
+ c.Service = s
+ return
+ }
+ }
+ stdlog.Printf("unable to find a Client to add the protocol.Server to")
+}
+
+func getMemory(_ *http.Request) interface{} {
+ var m runtime.MemStats
+ runtime.ReadMemStats(&m)
+ return m
+}
+
+func init() {
+ event.SetExporter(makeGlobalExporter(os.Stderr))
+}
+
+func GetInstance(ctx context.Context) *Instance {
+ if ctx == nil {
+ return nil
+ }
+ v := ctx.Value(instanceKey)
+ if v == nil {
+ return nil
+ }
+ return v.(*Instance)
+}
+
+// WithInstance creates debug instance ready for use using the supplied
+// configuration and stores it in the returned context.
+func WithInstance(ctx context.Context, workdir, agent string) context.Context {
+ i := &Instance{
+ StartTime: time.Now(),
+ Workdir: workdir,
+ OCAgentConfig: agent,
+ }
+ i.LogWriter = os.Stderr
+ ocConfig := ocagent.Discover()
+ //TODO: we should not need to adjust the discovered configuration
+ ocConfig.Address = i.OCAgentConfig
+ i.ocagent = ocagent.Connect(ocConfig)
+ i.prometheus = prometheus.New()
+ i.rpcs = &Rpcs{}
+ i.traces = &traces{}
+ i.State = &State{}
+ i.exporter = makeInstanceExporter(i)
+ return context.WithValue(ctx, instanceKey, i)
+}
+
+// SetLogFile sets the logfile for use with this instance.
+func (i *Instance) SetLogFile(logfile string, isDaemon bool) (func(), error) {
+ // TODO: probably a better solution for deferring closure to the caller would
+ // be for the debug instance to itself be closed, but this fixes the
+ // immediate bug of logs not being captured.
+ closeLog := func() {}
+ if logfile != "" {
+ if logfile == "auto" {
+ if isDaemon {
+ logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-daemon-%d.log", os.Getpid()))
+ } else {
+ logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.log", os.Getpid()))
+ }
+ }
+ f, err := os.Create(logfile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create log file: %w", err)
+ }
+ closeLog = func() {
+ defer f.Close()
+ }
+ stdlog.SetOutput(io.MultiWriter(os.Stderr, f))
+ i.LogWriter = f
+ }
+ i.Logfile = logfile
+ return closeLog, nil
+}
+
+// Serve starts and runs a debug server in the background on the given addr.
+// It also logs the port the server starts on, to allow for :0 auto assigned
+// ports.
+func (i *Instance) Serve(ctx context.Context, addr string) (string, error) {
+ stdlog.SetFlags(stdlog.Lshortfile)
+ if addr == "" {
+ return "", nil
+ }
+ i.serveMu.Lock()
+ defer i.serveMu.Unlock()
+
+ if i.listenedDebugAddress != "" {
+ // Already serving. Return the bound address.
+ return i.listenedDebugAddress, nil
+ }
+
+ i.debugAddress = addr
+ listener, err := net.Listen("tcp", i.debugAddress)
+ if err != nil {
+ return "", err
+ }
+ i.listenedDebugAddress = listener.Addr().String()
+
+ port := listener.Addr().(*net.TCPAddr).Port
+ if strings.HasSuffix(i.debugAddress, ":0") {
+ stdlog.Printf("debug server listening at http://localhost:%d", port)
+ }
+ event.Log(ctx, "Debug serving", tag.Port.Of(port))
+ go func() {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", render(MainTmpl, func(*http.Request) interface{} { return i }))
+ mux.HandleFunc("/debug/", render(DebugTmpl, nil))
+ mux.HandleFunc("/debug/pprof/", pprof.Index)
+ mux.HandleFunc("/debug/pprof/cmdline", cmdline)
+ mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
+ mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
+ mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
+ if i.prometheus != nil {
+ mux.HandleFunc("/metrics/", i.prometheus.Serve)
+ }
+ if i.rpcs != nil {
+ mux.HandleFunc("/rpc/", render(RPCTmpl, i.rpcs.getData))
+ }
+ if i.traces != nil {
+ mux.HandleFunc("/trace/", render(TraceTmpl, i.traces.getData))
+ }
+ mux.HandleFunc("/cache/", render(CacheTmpl, i.getCache))
+ mux.HandleFunc("/session/", render(SessionTmpl, i.getSession))
+ mux.HandleFunc("/view/", render(ViewTmpl, i.getView))
+ mux.HandleFunc("/client/", render(ClientTmpl, i.getClient))
+ mux.HandleFunc("/server/", render(ServerTmpl, i.getServer))
+ mux.HandleFunc("/file/", render(FileTmpl, i.getFile))
+ mux.HandleFunc("/info", render(InfoTmpl, i.getInfo))
+ mux.HandleFunc("/memory", render(MemoryTmpl, getMemory))
+
+ // Internal debugging helpers.
+ mux.HandleFunc("/_dogc", func(w http.ResponseWriter, r *http.Request) {
+ runtime.GC()
+ runtime.GC()
+ runtime.GC()
+ http.Error(w, "OK", 200)
+ })
+ mux.HandleFunc("/_makeabug", func(w http.ResponseWriter, r *http.Request) {
+ bug.Report("bug here", nil)
+ http.Error(w, "made a bug", http.StatusOK)
+ })
+
+ if err := http.Serve(listener, mux); err != nil {
+ event.Error(ctx, "Debug server failed", err)
+ return
+ }
+ event.Log(ctx, "Debug server finished")
+ }()
+ return i.listenedDebugAddress, nil
+}
+
+func (i *Instance) DebugAddress() string {
+ i.serveMu.Lock()
+ defer i.serveMu.Unlock()
+ return i.debugAddress
+}
+
+func (i *Instance) ListenedDebugAddress() string {
+ i.serveMu.Lock()
+ defer i.serveMu.Unlock()
+ return i.listenedDebugAddress
+}
+
+// MonitorMemory starts recording memory statistics each second.
+func (i *Instance) MonitorMemory(ctx context.Context) {
+ tick := time.NewTicker(time.Second)
+ nextThresholdGiB := uint64(1)
+ go func() {
+ for {
+ <-tick.C
+ var mem runtime.MemStats
+ runtime.ReadMemStats(&mem)
+ if mem.HeapAlloc < nextThresholdGiB*1<<30 {
+ continue
+ }
+ if err := i.writeMemoryDebug(nextThresholdGiB, true); err != nil {
+ event.Error(ctx, "writing memory debug info", err)
+ }
+ if err := i.writeMemoryDebug(nextThresholdGiB, false); err != nil {
+ event.Error(ctx, "writing memory debug info", err)
+ }
+ event.Log(ctx, fmt.Sprintf("Wrote memory usage debug info to %v", os.TempDir()))
+ nextThresholdGiB++
+ }
+ }()
+}
+
+func (i *Instance) writeMemoryDebug(threshold uint64, withNames bool) error {
+ suffix := "withnames"
+ if !withNames {
+ suffix = "nonames"
+ }
+
+ filename := fmt.Sprintf("gopls.%d-%dGiB-%s.zip", os.Getpid(), threshold, suffix)
+ zipf, err := os.OpenFile(filepath.Join(os.TempDir(), filename), os.O_CREATE|os.O_RDWR, 0644)
+ if err != nil {
+ return err
+ }
+ zipw := zip.NewWriter(zipf)
+
+ f, err := zipw.Create("heap.pb.gz")
+ if err != nil {
+ return err
+ }
+ if err := rpprof.Lookup("heap").WriteTo(f, 0); err != nil {
+ return err
+ }
+
+ f, err = zipw.Create("goroutines.txt")
+ if err != nil {
+ return err
+ }
+ if err := rpprof.Lookup("goroutine").WriteTo(f, 1); err != nil {
+ return err
+ }
+
+ if err := zipw.Close(); err != nil {
+ return err
+ }
+ return zipf.Close()
+}
+
+func makeGlobalExporter(stderr io.Writer) event.Exporter {
+ p := export.Printer{}
+ var pMu sync.Mutex
+ return func(ctx context.Context, ev core.Event, lm label.Map) context.Context {
+ i := GetInstance(ctx)
+
+ if event.IsLog(ev) {
+ // Don't log context cancellation errors.
+ if err := keys.Err.Get(ev); errors.Is(err, context.Canceled) {
+ return ctx
+ }
+ // Make sure any log messages without an instance go to stderr.
+ if i == nil {
+ pMu.Lock()
+ p.WriteEvent(stderr, ev, lm)
+ pMu.Unlock()
+ }
+ level := log.LabeledLevel(lm)
+ // Exclude trace logs from LSP logs.
+ if level < log.Trace {
+ ctx = protocol.LogEvent(ctx, ev, lm, messageType(level))
+ }
+ }
+ if i == nil {
+ return ctx
+ }
+ return i.exporter(ctx, ev, lm)
+ }
+}
+
+func messageType(l log.Level) protocol.MessageType {
+ switch l {
+ case log.Error:
+ return protocol.Error
+ case log.Warning:
+ return protocol.Warning
+ case log.Debug:
+ return protocol.Log
+ }
+ return protocol.Info
+}
+
+func makeInstanceExporter(i *Instance) event.Exporter {
+ exporter := func(ctx context.Context, ev core.Event, lm label.Map) context.Context {
+ if i.ocagent != nil {
+ ctx = i.ocagent.ProcessEvent(ctx, ev, lm)
+ }
+ if i.prometheus != nil {
+ ctx = i.prometheus.ProcessEvent(ctx, ev, lm)
+ }
+ if i.rpcs != nil {
+ ctx = i.rpcs.ProcessEvent(ctx, ev, lm)
+ }
+ if i.traces != nil {
+ ctx = i.traces.ProcessEvent(ctx, ev, lm)
+ }
+ if event.IsLog(ev) {
+ if s := cache.KeyCreateSession.Get(ev); s != nil {
+ i.State.addClient(s)
+ }
+ if sid := tag.NewServer.Get(ev); sid != "" {
+ i.State.updateServer(&Server{
+ ID: sid,
+ Logfile: tag.Logfile.Get(ev),
+ DebugAddress: tag.DebugAddress.Get(ev),
+ GoplsPath: tag.GoplsPath.Get(ev),
+ ClientID: tag.ClientID.Get(ev),
+ })
+ }
+ if s := cache.KeyShutdownSession.Get(ev); s != nil {
+ i.State.dropClient(s)
+ }
+ if sid := tag.EndServer.Get(ev); sid != "" {
+ i.State.dropServer(sid)
+ }
+ if s := cache.KeyUpdateSession.Get(ev); s != nil {
+ if c := i.State.Client(s.ID()); c != nil {
+ c.DebugAddress = tag.DebugAddress.Get(ev)
+ c.Logfile = tag.Logfile.Get(ev)
+ c.ServerID = tag.ServerID.Get(ev)
+ c.GoplsPath = tag.GoplsPath.Get(ev)
+ }
+ }
+ }
+ return ctx
+ }
+ // StdTrace must be above export.Spans below (by convention, export
+ // middleware applies its wrapped exporter last).
+ exporter = StdTrace(exporter)
+ metrics := metric.Config{}
+ registerMetrics(&metrics)
+ exporter = metrics.Exporter(exporter)
+ exporter = export.Spans(exporter)
+ exporter = export.Labels(exporter)
+ return exporter
+}
+
+type dataFunc func(*http.Request) interface{}
+
+func render(tmpl *template.Template, fun dataFunc) func(http.ResponseWriter, *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ var data interface{}
+ if fun != nil {
+ data = fun(r)
+ }
+ if err := tmpl.Execute(w, data); err != nil {
+ event.Error(context.Background(), "", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ }
+}
+
+func commas(s string) string {
+ for i := len(s); i > 3; {
+ i -= 3
+ s = s[:i] + "," + s[i:]
+ }
+ return s
+}
+
+func fuint64(v uint64) string {
+ return commas(strconv.FormatUint(v, 10))
+}
+
+func fuint32(v uint32) string {
+ return commas(strconv.FormatUint(uint64(v), 10))
+}
+
+func fcontent(v []byte) string {
+ return string(v)
+}
+
+var BaseTemplate = template.Must(template.New("").Parse(`
+<html>
+<head>
+<title>{{template "title" .}}</title>
+<style>
+.profile-name{
+ display:inline-block;
+ width:6rem;
+}
+td.value {
+ text-align: right;
+}
+ul.events {
+ list-style-type: none;
+}
+
+</style>
+{{block "head" .}}{{end}}
+</head>
+<body>
+<a href="/">Main</a>
+<a href="/info">Info</a>
+<a href="/memory">Memory</a>
+<a href="/metrics">Metrics</a>
+<a href="/rpc">RPC</a>
+<a href="/trace">Trace</a>
+<hr>
+<h1>{{template "title" .}}</h1>
+{{block "body" .}}
+Unknown page
+{{end}}
+</body>
+</html>
+
+{{define "cachelink"}}<a href="/cache/{{.}}">Cache {{.}}</a>{{end}}
+{{define "clientlink"}}<a href="/client/{{.}}">Client {{.}}</a>{{end}}
+{{define "serverlink"}}<a href="/server/{{.}}">Server {{.}}</a>{{end}}
+{{define "sessionlink"}}<a href="/session/{{.}}">Session {{.}}</a>{{end}}
+{{define "viewlink"}}<a href="/view/{{.}}">View {{.}}</a>{{end}}
+`)).Funcs(template.FuncMap{
+ "fuint64": fuint64,
+ "fuint32": fuint32,
+ "fcontent": fcontent,
+ "localAddress": func(s string) string {
+ // Try to translate loopback addresses to localhost, both for cosmetics and
+ // because unspecified ipv6 addresses can break links on Windows.
+ //
+ // TODO(rfindley): In the future, it would be better not to assume the
+ // server is running on localhost, and instead construct this address using
+ // the remote host.
+ host, port, err := net.SplitHostPort(s)
+ if err != nil {
+ return s
+ }
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return s
+ }
+ if ip.IsLoopback() || ip.IsUnspecified() {
+ return "localhost:" + port
+ }
+ return s
+ },
+ "options": func(s *cache.Session) []sessionOption {
+ return showOptions(s.Options())
+ },
+})
+
+var MainTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}GoPls server information{{end}}
+{{define "body"}}
+<h2>Caches</h2>
+<ul>{{range .State.Caches}}<li>{{template "cachelink" .ID}}</li>{{end}}</ul>
+<h2>Sessions</h2>
+<ul>{{range .State.Sessions}}<li>{{template "sessionlink" .ID}} from {{template "cachelink" .Cache.ID}}</li>{{end}}</ul>
+<h2>Clients</h2>
+<ul>{{range .State.Clients}}<li>{{template "clientlink" .Session.ID}}</li>{{end}}</ul>
+<h2>Servers</h2>
+<ul>{{range .State.Servers}}<li>{{template "serverlink" .ID}}</li>{{end}}</ul>
+<h2>Bug reports</h2>
+<dl>{{range .State.Bugs}}<dt>{{.Key}}</dt><dd>{{.Description}}</dd>{{end}}</dl>
+{{end}}
+`))
+
+var InfoTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}GoPls version information{{end}}
+{{define "body"}}
+{{.}}
+{{end}}
+`))
+
+var MemoryTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}GoPls memory usage{{end}}
+{{define "head"}}<meta http-equiv="refresh" content="5">{{end}}
+{{define "body"}}
+<h2>Stats</h2>
+<table>
+<tr><td class="label">Allocated bytes</td><td class="value">{{fuint64 .HeapAlloc}}</td></tr>
+<tr><td class="label">Total allocated bytes</td><td class="value">{{fuint64 .TotalAlloc}}</td></tr>
+<tr><td class="label">System bytes</td><td class="value">{{fuint64 .Sys}}</td></tr>
+<tr><td class="label">Heap system bytes</td><td class="value">{{fuint64 .HeapSys}}</td></tr>
+<tr><td class="label">Malloc calls</td><td class="value">{{fuint64 .Mallocs}}</td></tr>
+<tr><td class="label">Frees</td><td class="value">{{fuint64 .Frees}}</td></tr>
+<tr><td class="label">Idle heap bytes</td><td class="value">{{fuint64 .HeapIdle}}</td></tr>
+<tr><td class="label">In use bytes</td><td class="value">{{fuint64 .HeapInuse}}</td></tr>
+<tr><td class="label">Released to system bytes</td><td class="value">{{fuint64 .HeapReleased}}</td></tr>
+<tr><td class="label">Heap object count</td><td class="value">{{fuint64 .HeapObjects}}</td></tr>
+<tr><td class="label">Stack in use bytes</td><td class="value">{{fuint64 .StackInuse}}</td></tr>
+<tr><td class="label">Stack from system bytes</td><td class="value">{{fuint64 .StackSys}}</td></tr>
+<tr><td class="label">Bucket hash bytes</td><td class="value">{{fuint64 .BuckHashSys}}</td></tr>
+<tr><td class="label">GC metadata bytes</td><td class="value">{{fuint64 .GCSys}}</td></tr>
+<tr><td class="label">Off heap bytes</td><td class="value">{{fuint64 .OtherSys}}</td></tr>
+</table>
+<h2>By size</h2>
+<table>
+<tr><th>Size</th><th>Mallocs</th><th>Frees</th></tr>
+{{range .BySize}}<tr><td class="value">{{fuint32 .Size}}</td><td class="value">{{fuint64 .Mallocs}}</td><td class="value">{{fuint64 .Frees}}</td></tr>{{end}}
+</table>
+{{end}}
+`))
+
+var DebugTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}GoPls Debug pages{{end}}
+{{define "body"}}
+<a href="/debug/pprof">Profiling</a>
+{{end}}
+`))
+
+var CacheTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}Cache {{.ID}}{{end}}
+{{define "body"}}
+<h2>memoize.Store entries</h2>
+<ul>{{range $k,$v := .MemStats}}<li>{{$k}} - {{$v}}</li>{{end}}</ul>
+{{end}}
+`))
+
+var ClientTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}Client {{.Session.ID}}{{end}}
+{{define "body"}}
+Using session: <b>{{template "sessionlink" .Session.ID}}</b><br>
+{{if .DebugAddress}}Debug this client at: <a href="http://{{localAddress .DebugAddress}}">{{localAddress .DebugAddress}}</a><br>{{end}}
+Logfile: {{.Logfile}}<br>
+Gopls Path: {{.GoplsPath}}<br>
+<h2>Diagnostics</h2>
+{{/*Service: []protocol.Server; each server has map[uri]fileReports;
+ each fileReport: map[diagnosticSoure]diagnosticReport
+ diagnosticSource is one of 5 source
+ diagnosticReport: snapshotID and map[hash]*source.Diagnostic
+ sourceDiagnostic: struct {
+ Range protocol.Range
+ Message string
+ Source string
+ Code string
+ CodeHref string
+ Severity protocol.DiagnosticSeverity
+ Tags []protocol.DiagnosticTag
+
+ Related []RelatedInformation
+ }
+ RelatedInformation: struct {
+ URI span.URI
+ Range protocol.Range
+ Message string
+ }
+ */}}
+<ul>{{range $k, $v := .Service.Diagnostics}}<li>{{$k}}:<ol>{{range $v}}<li>{{.}}</li>{{end}}</ol></li>{{end}}</ul>
+{{end}}
+`))
+
+var ServerTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}Server {{.ID}}{{end}}
+{{define "body"}}
+{{if .DebugAddress}}Debug this server at: <a href="http://{{localAddress .DebugAddress}}">{{localAddress .DebugAddress}}</a><br>{{end}}
+Logfile: {{.Logfile}}<br>
+Gopls Path: {{.GoplsPath}}<br>
+{{end}}
+`))
+
+var SessionTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}Session {{.ID}}{{end}}
+{{define "body"}}
+From: <b>{{template "cachelink" .Cache.ID}}</b><br>
+<h2>Views</h2>
+<ul>{{range .Views}}<li>{{.Name}} is {{template "viewlink" .ID}} in {{.Folder}}</li>{{end}}</ul>
+<h2>Overlays</h2>
+{{$session := .}}
+<ul>{{range .Overlays}}
+<li>
+<a href="/file/{{$session.ID}}/{{.FileIdentity.Hash}}">{{.FileIdentity.URI}}</a>
+</li>{{end}}</ul>
+<h2>Options</h2>
+{{range options .}}
+<p><b>{{.Name}}</b> {{.Type}}</p>
+<p><i>default:</i> {{.Default}}</p>
+{{if ne .Default .Current}}<p><i>current:</i> {{.Current}}</p>{{end}}
+{{end}}
+{{end}}
+`))
+
+var ViewTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}View {{.ID}}{{end}}
+{{define "body"}}
+Name: <b>{{.Name}}</b><br>
+Folder: <b>{{.Folder}}</b><br>
+<h2>Environment</h2>
+<ul>{{range .Options.Env}}<li>{{.}}</li>{{end}}</ul>
+{{end}}
+`))
+
+var FileTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}Overlay {{.FileIdentity.Hash}}{{end}}
+{{define "body"}}
+{{with .}}
+ URI: <b>{{.URI}}</b><br>
+ Identifier: <b>{{.FileIdentity.Hash}}</b><br>
+ Version: <b>{{.Version}}</b><br>
+ Kind: <b>{{.Kind}}</b><br>
+{{end}}
+<h3>Contents</h3>
+<pre>{{fcontent .Read}}</pre>
+{{end}}
+`))
diff --git a/gopls/internal/lsp/debug/trace.go b/gopls/internal/lsp/debug/trace.go
new file mode 100644
index 000000000..bb402cfaa
--- /dev/null
+++ b/gopls/internal/lsp/debug/trace.go
@@ -0,0 +1,233 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "html/template"
+ "net/http"
+ "runtime/trace"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/core"
+ "golang.org/x/tools/internal/event/export"
+ "golang.org/x/tools/internal/event/label"
+)
+
+var TraceTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
+{{define "title"}}Trace Information{{end}}
+{{define "body"}}
+ {{range .Traces}}<a href="/trace/{{.Name}}">{{.Name}}</a> last: {{.Last.Duration}}, longest: {{.Longest.Duration}}<br>{{end}}
+ {{if .Selected}}
+ <H2>{{.Selected.Name}}</H2>
+ {{if .Selected.Last}}<H3>Last</H3><ul>{{template "details" .Selected.Last}}</ul>{{end}}
+ {{if .Selected.Longest}}<H3>Longest</H3><ul>{{template "details" .Selected.Longest}}</ul>{{end}}
+ {{end}}
+{{end}}
+{{define "details"}}
+ <li>{{.Offset}} {{.Name}} {{.Duration}} {{.Tags}}</li>
+ {{if .Events}}<ul class=events>{{range .Events}}<li>{{.Offset}} {{.Tags}}</li>{{end}}</ul>{{end}}
+ {{if .Children}}<ul>{{range .Children}}{{template "details" .}}{{end}}</ul>{{end}}
+{{end}}
+`))
+
+type traces struct {
+ mu sync.Mutex
+ sets map[string]*traceSet
+ unfinished map[export.SpanContext]*traceData
+}
+
+type TraceResults struct { // exported for testing
+ Traces []*traceSet
+ Selected *traceSet
+}
+
+type traceSet struct {
+ Name string
+ Last *traceData
+ Longest *traceData
+}
+
+type traceData struct {
+ TraceID export.TraceID
+ SpanID export.SpanID
+ ParentID export.SpanID
+ Name string
+ Start time.Time
+ Finish time.Time
+ Offset time.Duration
+ Duration time.Duration
+ Tags string
+ Events []traceEvent
+ Children []*traceData
+}
+
+type traceEvent struct {
+ Time time.Time
+ Offset time.Duration
+ Tags string
+}
+
+func StdTrace(exporter event.Exporter) event.Exporter {
+ return func(ctx context.Context, ev core.Event, lm label.Map) context.Context {
+ span := export.GetSpan(ctx)
+ if span == nil {
+ return exporter(ctx, ev, lm)
+ }
+ switch {
+ case event.IsStart(ev):
+ if span.ParentID.IsValid() {
+ region := trace.StartRegion(ctx, span.Name)
+ ctx = context.WithValue(ctx, traceKey, region)
+ } else {
+ var task *trace.Task
+ ctx, task = trace.NewTask(ctx, span.Name)
+ ctx = context.WithValue(ctx, traceKey, task)
+ }
+ // Log the start event as it may contain useful labels.
+ msg := formatEvent(ctx, ev, lm)
+ trace.Log(ctx, "start", msg)
+ case event.IsLog(ev):
+ category := ""
+ if event.IsError(ev) {
+ category = "error"
+ }
+ msg := formatEvent(ctx, ev, lm)
+ trace.Log(ctx, category, msg)
+ case event.IsEnd(ev):
+ if v := ctx.Value(traceKey); v != nil {
+ v.(interface{ End() }).End()
+ }
+ }
+ return exporter(ctx, ev, lm)
+ }
+}
+
+func formatEvent(ctx context.Context, ev core.Event, lm label.Map) string {
+ buf := &bytes.Buffer{}
+ p := export.Printer{}
+ p.WriteEvent(buf, ev, lm)
+ return buf.String()
+}
+
+func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context {
+ span := export.GetSpan(ctx)
+ if span == nil {
+ return ctx
+ }
+
+ switch {
+ case event.IsStart(ev):
+ // Just starting: add it to the unfinished map.
+ // Allocate before the critical section.
+ td := &traceData{
+ TraceID: span.ID.TraceID,
+ SpanID: span.ID.SpanID,
+ ParentID: span.ParentID,
+ Name: span.Name,
+ Start: span.Start().At(),
+ Tags: renderLabels(span.Start()),
+ }
+
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.sets == nil {
+ t.sets = make(map[string]*traceSet)
+ t.unfinished = make(map[export.SpanContext]*traceData)
+ }
+ t.unfinished[span.ID] = td
+ // and wire up parents if we have them
+ if !span.ParentID.IsValid() {
+ return ctx
+ }
+ parentID := export.SpanContext{TraceID: span.ID.TraceID, SpanID: span.ParentID}
+ parent, found := t.unfinished[parentID]
+ if !found {
+ // trace had an invalid parent, so it cannot itself be valid
+ return ctx
+ }
+ parent.Children = append(parent.Children, td)
+
+ case event.IsEnd(ev):
+ // Finishing: must be already in the map.
+ // Allocate events before the critical section.
+ events := span.Events()
+ tdEvents := make([]traceEvent, len(events))
+ for i, event := range events {
+ tdEvents[i] = traceEvent{
+ Time: event.At(),
+ Tags: renderLabels(event),
+ }
+ }
+
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ td, found := t.unfinished[span.ID]
+ if !found {
+ return ctx // if this happens we are in a bad place
+ }
+ delete(t.unfinished, span.ID)
+
+ td.Finish = span.Finish().At()
+ td.Duration = span.Finish().At().Sub(span.Start().At())
+ td.Events = tdEvents
+
+ set, ok := t.sets[span.Name]
+ if !ok {
+ set = &traceSet{Name: span.Name}
+ t.sets[span.Name] = set
+ }
+ set.Last = td
+ if set.Longest == nil || set.Last.Duration > set.Longest.Duration {
+ set.Longest = set.Last
+ }
+ if !td.ParentID.IsValid() {
+ fillOffsets(td, td.Start)
+ }
+ }
+ return ctx
+}
+
+func (t *traces) getData(req *http.Request) interface{} {
+ if len(t.sets) == 0 {
+ return nil
+ }
+ data := TraceResults{}
+ data.Traces = make([]*traceSet, 0, len(t.sets))
+ for _, set := range t.sets {
+ data.Traces = append(data.Traces, set)
+ }
+ sort.Slice(data.Traces, func(i, j int) bool { return data.Traces[i].Name < data.Traces[j].Name })
+ if bits := strings.SplitN(req.URL.Path, "/trace/", 2); len(bits) > 1 {
+ data.Selected = t.sets[bits[1]]
+ }
+ return data
+}
+
+func fillOffsets(td *traceData, start time.Time) {
+ td.Offset = td.Start.Sub(start)
+ for i := range td.Events {
+ td.Events[i].Offset = td.Events[i].Time.Sub(start)
+ }
+ for _, child := range td.Children {
+ fillOffsets(child, start)
+ }
+}
+
+func renderLabels(labels label.List) string {
+ buf := &bytes.Buffer{}
+ for index := 0; labels.Valid(index); index++ {
+ if l := labels.Label(index); l.Valid() {
+ fmt.Fprintf(buf, "%v ", l)
+ }
+ }
+ return buf.String()
+}
diff --git a/gopls/internal/lsp/definition.go b/gopls/internal/lsp/definition.go
new file mode 100644
index 000000000..6259d4dbb
--- /dev/null
+++ b/gopls/internal/lsp/definition.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+)
+
+func (s *Server) definition(ctx context.Context, params *protocol.DefinitionParams) ([]protocol.Location, error) {
+ // TODO(rfindley): definition requests should be multiplexed across all views.
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ switch kind := snapshot.View().FileKind(fh); kind {
+ case source.Tmpl:
+ return template.Definition(snapshot, fh, params.Position)
+ case source.Go:
+ // Partial support for jumping from linkname directive (position at 2nd argument).
+ locations, err := source.LinknameDefinition(ctx, snapshot, fh, params.Position)
+ if !errors.Is(err, source.ErrNoLinkname) {
+ return locations, err
+ }
+ return source.Definition(ctx, snapshot, fh, params.Position)
+ default:
+ return nil, fmt.Errorf("can't find definitions for file type %s", kind)
+ }
+}
+
+func (s *Server) typeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) ([]protocol.Location, error) {
+ // TODO(rfindley): type definition requests should be multiplexed across all views.
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ switch kind := snapshot.View().FileKind(fh); kind {
+ case source.Go:
+ return source.TypeDefinition(ctx, snapshot, fh, params.Position)
+ default:
+ return nil, fmt.Errorf("can't find type definitions for file type %s", kind)
+ }
+}
diff --git a/gopls/internal/lsp/diagnostics.go b/gopls/internal/lsp/diagnostics.go
new file mode 100644
index 000000000..99319bc49
--- /dev/null
+++ b/gopls/internal/lsp/diagnostics.go
@@ -0,0 +1,764 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/gopls/internal/lsp/debug/log"
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+ "golang.org/x/tools/gopls/internal/lsp/work"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+// diagnosticSource differentiates different sources of diagnostics.
+type diagnosticSource int
+
+const (
+ modSource diagnosticSource = iota
+ gcDetailsSource
+ analysisSource
+ typeCheckSource
+ orphanedSource
+ workSource
+ modCheckUpgradesSource
+ modVulncheckSource // source.Govulncheck + source.Vulncheck
+)
+
+// A diagnosticReport holds results for a single diagnostic source.
+type diagnosticReport struct {
+ snapshotID source.GlobalSnapshotID // global snapshot ID on which the report was computed
+ publishedHash string // last published hash for this (URI, source)
+ diags map[string]*source.Diagnostic
+}
+
+// fileReports holds a collection of diagnostic reports for a single file, as
+// well as the hash of the last published set of diagnostics.
+type fileReports struct {
+ // publishedSnapshotID is the last snapshot ID for which we have "published"
+ // diagnostics (though the publishDiagnostics notification may not have
+ // actually been sent, if nothing changed).
+ //
+ // Specifically, publishedSnapshotID is updated to a later snapshot ID when
+ // we either:
+ // (1) publish diagnostics for the file for a snapshot, or
+ // (2) determine that published diagnostics are valid for a new snapshot.
+ //
+ // Notably publishedSnapshotID may not match the snapshot id on individual reports in
+ // the reports map:
+ // - we may have published partial diagnostics from only a subset of
+ // diagnostic sources for which new results have been computed, or
+ // - we may have started computing reports for an even new snapshot, but not
+ // yet published.
+ //
+ // This prevents gopls from publishing stale diagnostics.
+ publishedSnapshotID source.GlobalSnapshotID
+
+ // publishedHash is a hash of the latest diagnostics published for the file.
+ publishedHash string
+
+ // If set, mustPublish marks diagnostics as needing publication, independent
+ // of whether their publishedHash has changed.
+ mustPublish bool
+
+ // The last stored diagnostics for each diagnostic source.
+ reports map[diagnosticSource]diagnosticReport
+}
+
+func (d diagnosticSource) String() string {
+ switch d {
+ case modSource:
+ return "FromSource"
+ case gcDetailsSource:
+ return "FromGCDetails"
+ case analysisSource:
+ return "FromAnalysis"
+ case typeCheckSource:
+ return "FromTypeChecking"
+ case orphanedSource:
+ return "FromOrphans"
+ case workSource:
+ return "FromGoWork"
+ case modCheckUpgradesSource:
+ return "FromCheckForUpgrades"
+ case modVulncheckSource:
+ return "FromModVulncheck"
+ default:
+ return fmt.Sprintf("From?%d?", d)
+ }
+}
+
+// hashDiagnostics computes a hash to identify diags.
+func hashDiagnostics(diags ...*source.Diagnostic) string {
+ source.SortDiagnostics(diags)
+ h := sha256.New()
+ for _, d := range diags {
+ for _, t := range d.Tags {
+ fmt.Fprintf(h, "%s", t)
+ }
+ for _, r := range d.Related {
+ fmt.Fprintf(h, "%s%s%s", r.Location.URI.SpanURI(), r.Message, r.Location.Range)
+ }
+ fmt.Fprintf(h, "%s%s%s%s", d.Message, d.Range, d.Severity, d.Source)
+ }
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func (s *Server) diagnoseDetached(snapshot source.Snapshot) {
+ ctx := snapshot.BackgroundContext()
+ ctx = xcontext.Detach(ctx)
+ s.diagnose(ctx, snapshot, false)
+ s.publishDiagnostics(ctx, true, snapshot)
+}
+
+func (s *Server) diagnoseSnapshots(snapshots map[source.Snapshot][]span.URI, onDisk bool) {
+ var diagnosticWG sync.WaitGroup
+ for snapshot, uris := range snapshots {
+ diagnosticWG.Add(1)
+ go func(snapshot source.Snapshot, uris []span.URI) {
+ defer diagnosticWG.Done()
+ s.diagnoseSnapshot(snapshot, uris, onDisk)
+ }(snapshot, uris)
+ }
+ diagnosticWG.Wait()
+}
+
+func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.URI, onDisk bool) {
+ ctx := snapshot.BackgroundContext()
+ ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", source.SnapshotLabels(snapshot)...)
+ defer done()
+
+ delay := snapshot.View().Options().DiagnosticsDelay
+ if delay > 0 {
+ // 2-phase diagnostics.
+ //
+ // The first phase just parses and type-checks (but
+ // does not analyze) packages directly affected by
+ // file modifications.
+ //
+ // The second phase runs analysis on the entire snapshot,
+ // and is debounced by the configured delay.
+ s.diagnoseChangedFiles(ctx, snapshot, changedURIs, onDisk)
+ s.publishDiagnostics(ctx, false, snapshot)
+
+ // We debounce diagnostics separately for each view, using the snapshot
+ // local ID as logical ordering.
+ //
+ // TODO(rfindley): it would be cleaner to simply put the diagnostic
+ // debouncer on the view, and remove the "key" argument to debouncing.
+ if ok := <-s.diagDebouncer.debounce(snapshot.View().Name(), snapshot.SequenceID(), time.After(delay)); ok {
+ s.diagnose(ctx, snapshot, false)
+ s.publishDiagnostics(ctx, true, snapshot)
+ }
+ return
+ }
+
+ // Ignore possible workspace configuration warnings in the normal flow.
+ s.diagnose(ctx, snapshot, false)
+ s.publishDiagnostics(ctx, true, snapshot)
+}
+
+func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snapshot, uris []span.URI, onDisk bool) {
+ ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", source.SnapshotLabels(snapshot)...)
+ defer done()
+
+ // TODO(adonovan): safety: refactor so that group.Go is called
+ // in a second loop, so that if we should later add an early
+ // return to the first loop, we don't leak goroutines.
+ var group errgroup.Group
+ seen := make(map[*source.Metadata]bool)
+ for _, uri := range uris {
+ // If the change is only on-disk and the file is not open, don't
+ // directly request its package. It may not be a workspace package.
+ if onDisk && !snapshot.IsOpen(uri) {
+ continue
+ }
+ // If the file is not known to the snapshot (e.g., if it was deleted),
+ // don't diagnose it.
+ if snapshot.FindFile(uri) == nil {
+ continue
+ }
+
+ // Don't request type-checking for builtin.go: it's not a real package.
+ if snapshot.IsBuiltin(ctx, uri) {
+ continue
+ }
+
+ // Find all packages that include this file and diagnose them in parallel.
+ metas, err := snapshot.MetadataForFile(ctx, uri)
+ if err != nil {
+ // TODO(findleyr): we should probably do something with the error here,
+ // but as of now this can fail repeatedly if load fails, so can be too
+ // noisy to log (and we'll handle things later in the slow pass).
+ continue
+ }
+ for _, m := range metas {
+ if m.IsIntermediateTestVariant() {
+ continue
+ }
+ if !seen[m] {
+ seen[m] = true
+ m := m
+ group.Go(func() error {
+ s.diagnosePkg(ctx, snapshot, m, false)
+ return nil // error result is ignored
+ })
+ }
+ }
+ }
+ group.Wait() // ignore error
+}
+
+// diagnose is a helper function for running diagnostics with a given context.
+// Do not call it directly. forceAnalysis is only true for testing purposes.
+func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, forceAnalysis bool) {
+ ctx, done := event.Start(ctx, "Server.diagnose", source.SnapshotLabels(snapshot)...)
+ defer done()
+
+ // Wait for a free diagnostics slot.
+ // TODO(adonovan): opt: shouldn't it be the analysis implementation's
+ // job to de-dup and limit resource consumption? In any case this
+ // this function spends most its time waiting for awaitLoaded, at
+ // least initially.
+ select {
+ case <-ctx.Done():
+ return
+ case s.diagnosticsSema <- struct{}{}:
+ }
+ defer func() {
+ <-s.diagnosticsSema
+ }()
+
+ // common code for dispatching diagnostics
+ store := func(dsource diagnosticSource, operation string, diagsByFile map[span.URI][]*source.Diagnostic, err error, merge bool) {
+ if err != nil {
+ event.Error(ctx, "warning: while "+operation, err, source.SnapshotLabels(snapshot)...)
+ }
+ for uri, diags := range diagsByFile {
+ if uri == "" {
+ event.Error(ctx, "missing URI while "+operation, fmt.Errorf("empty URI"), tag.Directory.Of(snapshot.View().Folder().Filename()))
+ continue
+ }
+ s.storeDiagnostics(snapshot, uri, dsource, diags, merge)
+ }
+ }
+
+ // Diagnose go.mod upgrades.
+ upgradeReports, upgradeErr := mod.UpgradeDiagnostics(ctx, snapshot)
+ if ctx.Err() != nil {
+ log.Trace.Log(ctx, "diagnose cancelled")
+ return
+ }
+ store(modCheckUpgradesSource, "diagnosing go.mod upgrades", upgradeReports, upgradeErr, true)
+
+ // Diagnose go.work file.
+ workReports, workErr := work.Diagnostics(ctx, snapshot)
+ if ctx.Err() != nil {
+ log.Trace.Log(ctx, "diagnose cancelled")
+ return
+ }
+ store(workSource, "diagnosing go.work file", workReports, workErr, true)
+
+ // Diagnose go.mod file.
+ // (This step demands type checking of all active packages:
+ // the bottleneck in the startup sequence for a big workspace.)
+ modReports, modErr := mod.Diagnostics(ctx, snapshot)
+ if ctx.Err() != nil {
+ log.Trace.Log(ctx, "diagnose cancelled")
+ return
+ }
+ store(modSource, "diagnosing go.mod file", modReports, modErr, true)
+
+ // Diagnose vulnerabilities.
+ vulnReports, vulnErr := mod.VulnerabilityDiagnostics(ctx, snapshot)
+ if ctx.Err() != nil {
+ log.Trace.Log(ctx, "diagnose cancelled")
+ return
+ }
+ store(modVulncheckSource, "diagnosing vulnerabilities", vulnReports, vulnErr, false)
+
+ activeMetas, activeErr := snapshot.ActiveMetadata(ctx)
+ if s.shouldIgnoreError(ctx, snapshot, activeErr) {
+ return
+ }
+ criticalErr := snapshot.GetCriticalError(ctx)
+ if ctx.Err() != nil { // must check ctx after GetCriticalError
+ return
+ }
+
+ // Show the error as a progress error report so that it appears in the
+ // status bar. If a client doesn't support progress reports, the error
+ // will still be shown as a ShowMessage. If there is no error, any running
+ // error progress reports will be closed.
+ s.showCriticalErrorStatus(ctx, snapshot, criticalErr)
+
+ // Diagnose template (.tmpl) files.
+ for _, f := range snapshot.Templates() {
+ diags := template.Diagnose(f)
+ s.storeDiagnostics(snapshot, f.URI(), typeCheckSource, diags, true)
+ }
+
+ // If there are no workspace packages, there is nothing to diagnose and
+ // there are no orphaned files.
+ if len(activeMetas) == 0 {
+ return
+ }
+
+ // Run go/analysis diagnosis of packages in parallel.
+ // TODO(adonovan): opt: it may be more efficient to
+ // have diagnosePkg take a set of packages.
+ //
+ // TODO(adonovan): opt: since the new analysis driver does its
+ // own type checking, we could strength-reduce pkg to
+ // PackageID and get this step started as soon as the set of
+ // active package IDs are known, without waiting for them to load.
+ var (
+ wg sync.WaitGroup
+ seen = map[span.URI]struct{}{}
+ )
+ for _, m := range activeMetas {
+ for _, uri := range m.CompiledGoFiles {
+ seen[uri] = struct{}{}
+ }
+
+ wg.Add(1)
+ go func(m *source.Metadata) {
+ defer wg.Done()
+ s.diagnosePkg(ctx, snapshot, m, forceAnalysis)
+ }(m)
+ }
+ wg.Wait()
+
+ // Orphaned files.
+ // Confirm that every opened file belongs to a package (if any exist in
+ // the workspace). Otherwise, add a diagnostic to the file.
+ for _, o := range s.session.Overlays() {
+ if _, ok := seen[o.URI()]; ok {
+ continue
+ }
+ diagnostic := s.checkForOrphanedFile(ctx, snapshot, o)
+ if diagnostic == nil {
+ continue
+ }
+ s.storeDiagnostics(snapshot, o.URI(), orphanedSource, []*source.Diagnostic{diagnostic}, true)
+ }
+}
+
+func (s *Server) diagnosePkg(ctx context.Context, snapshot source.Snapshot, m *source.Metadata, alwaysAnalyze bool) {
+ ctx, done := event.Start(ctx, "Server.diagnosePkg", append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...)
+ defer done()
+ enableDiagnostics := false
+ includeAnalysis := alwaysAnalyze // only run analyses for packages with open files
+ for _, uri := range m.CompiledGoFiles {
+ enableDiagnostics = enableDiagnostics || !snapshot.IgnoredFile(uri)
+ includeAnalysis = includeAnalysis || snapshot.IsOpen(uri)
+ }
+ // Don't show any diagnostics on ignored files.
+ if !enableDiagnostics {
+ return
+ }
+
+ diags, err := snapshot.PackageDiagnostics(ctx, m.ID)
+ if err != nil {
+ event.Error(ctx, "warning: diagnostics failed", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...)
+ return
+ }
+
+ // Get diagnostics from analysis framework.
+ // This includes type-error analyzers, which suggest fixes to compiler errors.
+ var analysisDiags map[span.URI][]*source.Diagnostic
+ if includeAnalysis {
+ diags, err := source.Analyze(ctx, snapshot, m.ID, false)
+ if err != nil {
+ event.Error(ctx, "warning: analyzing package", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...)
+ return
+ }
+ analysisDiags = diags
+ }
+
+ // For each file, update the server's diagnostics state.
+ for _, uri := range m.CompiledGoFiles {
+ // builtin.go exists only for documentation purposes and
+ // is not valid Go code. Don't report distracting errors.
+ if snapshot.IsBuiltin(ctx, uri) {
+ continue
+ }
+
+ pkgDiags := diags[uri]
+ var tdiags, adiags []*source.Diagnostic
+ source.CombineDiagnostics(pkgDiags, analysisDiags[uri], &tdiags, &adiags)
+ s.storeDiagnostics(snapshot, uri, typeCheckSource, tdiags, true)
+ s.storeDiagnostics(snapshot, uri, analysisSource, adiags, true)
+ }
+
+ // If gc optimization details are requested, add them to the
+ // diagnostic reports.
+ s.gcOptimizationDetailsMu.Lock()
+ _, enableGCDetails := s.gcOptimizationDetails[m.ID]
+ s.gcOptimizationDetailsMu.Unlock()
+ if enableGCDetails {
+ gcReports, err := source.GCOptimizationDetails(ctx, snapshot, m)
+ if err != nil {
+ event.Error(ctx, "warning: gc details", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(m.ID)))...)
+ }
+ s.gcOptimizationDetailsMu.Lock()
+ _, enableGCDetails := s.gcOptimizationDetails[m.ID]
+
+ // NOTE(golang/go#44826): hold the gcOptimizationDetails lock, and re-check
+ // whether gc optimization details are enabled, while storing gc_details
+ // results. This ensures that the toggling of GC details and clearing of
+ // diagnostics does not race with storing the results here.
+ if enableGCDetails {
+ for uri, diags := range gcReports {
+ fh := snapshot.FindFile(uri)
+ // Don't publish gc details for unsaved buffers, since the underlying
+ // logic operates on the file on disk.
+ if fh == nil || !fh.Saved() {
+ continue
+ }
+ s.storeDiagnostics(snapshot, uri, gcDetailsSource, diags, true)
+ }
+ }
+ s.gcOptimizationDetailsMu.Unlock()
+ }
+}
+
+// mustPublishDiagnostics marks the uri as needing publication, independent of
+// whether the published contents have changed.
+//
+// This can be used for ensuring gopls publishes diagnostics after certain file
+// events.
+func (s *Server) mustPublishDiagnostics(uri span.URI) {
+ s.diagnosticsMu.Lock()
+ defer s.diagnosticsMu.Unlock()
+
+ if s.diagnostics[uri] == nil {
+ s.diagnostics[uri] = &fileReports{
+ publishedHash: hashDiagnostics(), // Hash for 0 diagnostics.
+ reports: map[diagnosticSource]diagnosticReport{},
+ }
+ }
+ s.diagnostics[uri].mustPublish = true
+}
+
+// storeDiagnostics stores results from a single diagnostic source. If merge is
+// true, it merges results into any existing results for this snapshot.
+//
+// TODO(hyangah): investigate whether we can unconditionally overwrite previous report.diags
+// with the new diags and eliminate the need for the `merge` flag.
+func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsource diagnosticSource, diags []*source.Diagnostic, merge bool) {
+ // Safeguard: ensure that the file actually exists in the snapshot
+ // (see golang.org/issues/38602).
+ fh := snapshot.FindFile(uri)
+ if fh == nil {
+ return
+ }
+
+ s.diagnosticsMu.Lock()
+ defer s.diagnosticsMu.Unlock()
+ if s.diagnostics[uri] == nil {
+ s.diagnostics[uri] = &fileReports{
+ publishedHash: hashDiagnostics(), // Hash for 0 diagnostics.
+ reports: map[diagnosticSource]diagnosticReport{},
+ }
+ }
+ report := s.diagnostics[uri].reports[dsource]
+ // Don't set obsolete diagnostics.
+ if report.snapshotID > snapshot.GlobalID() {
+ return
+ }
+ if report.diags == nil || report.snapshotID != snapshot.GlobalID() || !merge {
+ report.diags = map[string]*source.Diagnostic{}
+ }
+ report.snapshotID = snapshot.GlobalID()
+ for _, d := range diags {
+ report.diags[hashDiagnostics(d)] = d
+ }
+ s.diagnostics[uri].reports[dsource] = report
+}
+
+// clearDiagnosticSource clears all diagnostics for a given source type. It is
+// necessary for cases where diagnostics have been invalidated by something
+// other than a snapshot change, for example when gc_details is toggled.
+func (s *Server) clearDiagnosticSource(dsource diagnosticSource) {
+ s.diagnosticsMu.Lock()
+ defer s.diagnosticsMu.Unlock()
+ for _, reports := range s.diagnostics {
+ delete(reports.reports, dsource)
+ }
+}
+
+const WorkspaceLoadFailure = "Error loading workspace"
+
+// showCriticalErrorStatus shows the error as a progress report.
+// If the error is nil, it clears any existing error progress report.
+func (s *Server) showCriticalErrorStatus(ctx context.Context, snapshot source.Snapshot, err *source.CriticalError) {
+ s.criticalErrorStatusMu.Lock()
+ defer s.criticalErrorStatusMu.Unlock()
+
+ // Remove all newlines so that the error message can be formatted in a
+ // status bar.
+ var errMsg string
+ if err != nil {
+ event.Error(ctx, "errors loading workspace", err.MainError, source.SnapshotLabels(snapshot)...)
+ for _, d := range err.Diagnostics {
+ s.storeDiagnostics(snapshot, d.URI, modSource, []*source.Diagnostic{d}, true)
+ }
+ errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ")
+ }
+
+ if s.criticalErrorStatus == nil {
+ if errMsg != "" {
+ s.criticalErrorStatus = s.progress.Start(ctx, WorkspaceLoadFailure, errMsg, nil, nil)
+ }
+ return
+ }
+
+ // If an error is already shown to the user, update it or mark it as
+ // resolved.
+ if errMsg == "" {
+ s.criticalErrorStatus.End(ctx, "Done.")
+ s.criticalErrorStatus = nil
+ } else {
+ s.criticalErrorStatus.Report(ctx, errMsg, 0)
+ }
+}
+
+// checkForOrphanedFile checks that the given URIs can be mapped to packages.
+// If they cannot and the workspace is not otherwise unloaded, it also surfaces
+// a warning, suggesting that the user check the file for build tags.
+func (s *Server) checkForOrphanedFile(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) *source.Diagnostic {
+ // TODO(rfindley): this function may fail to produce a diagnostic for a
+ // variety of reasons, some of which should probably not be ignored. For
+ // example, should this function be tolerant of the case where fh does not
+ // exist, or does not have a package name?
+ //
+ // It would be better to panic or report a bug in several of the cases below,
+ // so that we can move toward guaranteeing we show the user a meaningful
+ // error whenever it makes sense.
+ if snapshot.View().FileKind(fh) != source.Go {
+ return nil
+ }
+ // builtin files won't have a package, but they are never orphaned.
+ if snapshot.IsBuiltin(ctx, fh.URI()) {
+ return nil
+ }
+
+ // This call has the effect of inserting fh into snapshot.files,
+ // where for better or worse (actually: just worse) it influences
+ // the sets of open, known, and orphaned files.
+ snapshot.GetFile(ctx, fh.URI())
+
+ metas, _ := snapshot.MetadataForFile(ctx, fh.URI())
+ if len(metas) > 0 || ctx.Err() != nil {
+ return nil // no package, or cancelled
+ }
+ // Inv: file does not belong to a package we know about.
+ pgf, err := snapshot.ParseGo(ctx, fh, source.ParseHeader)
+ if err != nil {
+ return nil
+ }
+ if !pgf.File.Name.Pos().IsValid() {
+ return nil
+ }
+ rng, err := pgf.NodeRange(pgf.File.Name)
+ if err != nil {
+ return nil
+ }
+ // If the file no longer has a name ending in .go, this diagnostic is wrong
+ if filepath.Ext(fh.URI().Filename()) != ".go" {
+ return nil
+ }
+ // TODO(rstambler): We should be able to parse the build tags in the
+ // file and show a more specific error message. For now, put the diagnostic
+ // on the package declaration.
+ return &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityWarning,
+ Source: source.ListError,
+ Message: fmt.Sprintf(`No packages found for open file %s: %v.
+If this file contains build tags, try adding "-tags=<build tag>" to your gopls "buildFlags" configuration (see (https://github.com/golang/tools/blob/master/gopls/doc/settings.md#buildflags-string).
+Otherwise, see the troubleshooting guidelines for help investigating (https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md).
+`, fh.URI().Filename(), err),
+ }
+}
+
+// publishDiagnostics collects and publishes any unpublished diagnostic reports.
+func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot source.Snapshot) {
+ ctx, done := event.Start(ctx, "Server.publishDiagnostics", source.SnapshotLabels(snapshot)...)
+ defer done()
+
+ s.diagnosticsMu.Lock()
+ defer s.diagnosticsMu.Unlock()
+
+ for uri, r := range s.diagnostics {
+ // Global snapshot IDs are monotonic, so we use them to enforce an ordering
+ // for diagnostics.
+ //
+ // If we've already delivered diagnostics for a future snapshot for this
+ // file, do not deliver them. See golang/go#42837 for an example of why
+ // this is necessary.
+ //
+ // TODO(rfindley): even using a global snapshot ID, this mechanism is
+ // potentially racy: elsewhere in the code (e.g. invalidateContent) we
+ // allow for multiple views track a given file. In this case, we should
+ // either only report diagnostics for snapshots from the "best" view of a
+ // URI, or somehow merge diagnostics from multiple views.
+ if r.publishedSnapshotID > snapshot.GlobalID() {
+ continue
+ }
+
+ anyReportsChanged := false
+ reportHashes := map[diagnosticSource]string{}
+ var diags []*source.Diagnostic
+ for dsource, report := range r.reports {
+ if report.snapshotID != snapshot.GlobalID() {
+ continue
+ }
+ var reportDiags []*source.Diagnostic
+ for _, d := range report.diags {
+ diags = append(diags, d)
+ reportDiags = append(reportDiags, d)
+ }
+ hash := hashDiagnostics(reportDiags...)
+ if hash != report.publishedHash {
+ anyReportsChanged = true
+ }
+ reportHashes[dsource] = hash
+ }
+
+ if !final && !anyReportsChanged {
+ // Don't invalidate existing reports on the client if we haven't got any
+ // new information.
+ continue
+ }
+
+ source.SortDiagnostics(diags)
+ hash := hashDiagnostics(diags...)
+ if hash == r.publishedHash && !r.mustPublish {
+ // Update snapshotID to be the latest snapshot for which this diagnostic
+ // hash is valid.
+ r.publishedSnapshotID = snapshot.GlobalID()
+ continue
+ }
+ var version int32
+ if fh := snapshot.FindFile(uri); fh != nil { // file may have been deleted
+ version = fh.Version()
+ }
+ if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
+ Diagnostics: toProtocolDiagnostics(diags),
+ URI: protocol.URIFromSpanURI(uri),
+ Version: version,
+ }); err == nil {
+ r.publishedHash = hash
+ r.mustPublish = false // diagnostics have been successfully published
+ r.publishedSnapshotID = snapshot.GlobalID()
+ for dsource, hash := range reportHashes {
+ report := r.reports[dsource]
+ report.publishedHash = hash
+ r.reports[dsource] = report
+ }
+ } else {
+ if ctx.Err() != nil {
+ // Publish may have failed due to a cancelled context.
+ log.Trace.Log(ctx, "publish cancelled")
+ return
+ }
+ event.Error(ctx, "publishReports: failed to deliver diagnostic", err, tag.URI.Of(uri))
+ }
+ }
+}
+
+func toProtocolDiagnostics(diagnostics []*source.Diagnostic) []protocol.Diagnostic {
+ reports := []protocol.Diagnostic{}
+ for _, diag := range diagnostics {
+ pdiag := protocol.Diagnostic{
+ // diag.Message might start with \n or \t
+ Message: strings.TrimSpace(diag.Message),
+ Range: diag.Range,
+ Severity: diag.Severity,
+ Source: string(diag.Source),
+ Tags: diag.Tags,
+ RelatedInformation: diag.Related,
+ }
+ if diag.Code != "" {
+ pdiag.Code = diag.Code
+ }
+ if diag.CodeHref != "" {
+ pdiag.CodeDescription = &protocol.CodeDescription{Href: diag.CodeHref}
+ }
+ reports = append(reports, pdiag)
+ }
+ return reports
+}
+
+func (s *Server) shouldIgnoreError(ctx context.Context, snapshot source.Snapshot, err error) bool {
+ if err == nil { // if there is no error at all
+ return false
+ }
+ if errors.Is(err, context.Canceled) {
+ return true
+ }
+ // If the folder has no Go code in it, we shouldn't spam the user with a warning.
+ var hasGo bool
+ _ = filepath.Walk(snapshot.View().Folder().Filename(), func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !strings.HasSuffix(info.Name(), ".go") {
+ return nil
+ }
+ hasGo = true
+ return errors.New("done")
+ })
+ return !hasGo
+}
+
+// Diagnostics formattedfor the debug server
+// (all the relevant fields of Server are private)
+// (The alternative is to export them)
+func (s *Server) Diagnostics() map[string][]string {
+ ans := make(map[string][]string)
+ s.diagnosticsMu.Lock()
+ defer s.diagnosticsMu.Unlock()
+ for k, v := range s.diagnostics {
+ fn := k.Filename()
+ for typ, d := range v.reports {
+ if len(d.diags) == 0 {
+ continue
+ }
+ for _, dx := range d.diags {
+ ans[fn] = append(ans[fn], auxStr(dx, d, typ))
+ }
+ }
+ }
+ return ans
+}
+
+func auxStr(v *source.Diagnostic, d diagnosticReport, typ diagnosticSource) string {
+ // Tags? RelatedInformation?
+ msg := fmt.Sprintf("(%s)%q(source:%q,code:%q,severity:%s,snapshot:%d,type:%s)",
+ v.Range, v.Message, v.Source, v.Code, v.Severity, d.snapshotID, typ)
+ for _, r := range v.Related {
+ msg += fmt.Sprintf(" [%s:%s,%q]", r.Location.URI.SpanURI().Filename(), r.Location.Range, r.Message)
+ }
+ return msg
+}
diff --git a/gopls/internal/lsp/fake/client.go b/gopls/internal/lsp/fake/client.go
new file mode 100644
index 000000000..bb9bda0fa
--- /dev/null
+++ b/gopls/internal/lsp/fake/client.go
@@ -0,0 +1,187 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/glob"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+// ClientHooks are a set of optional hooks called during handling of
+// the corresponding client method (see protocol.Client for the the
+// LSP server-to-client RPCs) in order to make test expectations
+// awaitable.
+type ClientHooks struct {
+ OnLogMessage func(context.Context, *protocol.LogMessageParams) error
+ OnDiagnostics func(context.Context, *protocol.PublishDiagnosticsParams) error
+ OnWorkDoneProgressCreate func(context.Context, *protocol.WorkDoneProgressCreateParams) error
+ OnProgress func(context.Context, *protocol.ProgressParams) error
+ OnShowMessage func(context.Context, *protocol.ShowMessageParams) error
+ OnShowMessageRequest func(context.Context, *protocol.ShowMessageRequestParams) error
+ OnRegisterCapability func(context.Context, *protocol.RegistrationParams) error
+ OnUnregisterCapability func(context.Context, *protocol.UnregistrationParams) error
+ OnApplyEdit func(context.Context, *protocol.ApplyWorkspaceEditParams) error
+}
+
+// Client is an adapter that converts an *Editor into an LSP Client. It mostly
+// delegates functionality to hooks that can be configured by tests.
+type Client struct {
+ editor *Editor
+ hooks ClientHooks
+ skipApplyEdits bool // don't apply edits from ApplyEdit downcalls to Editor
+}
+
+func (c *Client) CodeLensRefresh(context.Context) error { return nil }
+
+func (c *Client) InlayHintRefresh(context.Context) error { return nil }
+
+func (c *Client) DiagnosticRefresh(context.Context) error { return nil }
+
+func (c *Client) InlineValueRefresh(context.Context) error { return nil }
+
+func (c *Client) SemanticTokensRefresh(context.Context) error { return nil }
+
+func (c *Client) LogTrace(context.Context, *protocol.LogTraceParams) error { return nil }
+
+func (c *Client) ShowMessage(ctx context.Context, params *protocol.ShowMessageParams) error {
+ if c.hooks.OnShowMessage != nil {
+ return c.hooks.OnShowMessage(ctx, params)
+ }
+ return nil
+}
+
+func (c *Client) ShowMessageRequest(ctx context.Context, params *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) {
+ if c.hooks.OnShowMessageRequest != nil {
+ if err := c.hooks.OnShowMessageRequest(ctx, params); err != nil {
+ return nil, err
+ }
+ }
+ if len(params.Actions) == 0 || len(params.Actions) > 1 {
+ return nil, fmt.Errorf("fake editor cannot handle multiple action items")
+ }
+ return &params.Actions[0], nil
+}
+
+func (c *Client) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
+ if c.hooks.OnLogMessage != nil {
+ return c.hooks.OnLogMessage(ctx, params)
+ }
+ return nil
+}
+
+func (c *Client) Event(ctx context.Context, event *interface{}) error {
+ return nil
+}
+
+func (c *Client) PublishDiagnostics(ctx context.Context, params *protocol.PublishDiagnosticsParams) error {
+ if c.hooks.OnDiagnostics != nil {
+ return c.hooks.OnDiagnostics(ctx, params)
+ }
+ return nil
+}
+
+func (c *Client) WorkspaceFolders(context.Context) ([]protocol.WorkspaceFolder, error) {
+ return []protocol.WorkspaceFolder{}, nil
+}
+
+func (c *Client) Configuration(_ context.Context, p *protocol.ParamConfiguration) ([]interface{}, error) {
+ results := make([]interface{}, len(p.Items))
+ for i, item := range p.Items {
+ if item.Section == "gopls" {
+ c.editor.mu.Lock()
+ results[i] = c.editor.settingsLocked()
+ c.editor.mu.Unlock()
+ }
+ }
+ return results, nil
+}
+
+func (c *Client) RegisterCapability(ctx context.Context, params *protocol.RegistrationParams) error {
+ if c.hooks.OnRegisterCapability != nil {
+ if err := c.hooks.OnRegisterCapability(ctx, params); err != nil {
+ return err
+ }
+ }
+ // Update file watching patterns.
+ //
+ // TODO(rfindley): We could verify more here, like verify that the
+ // registration ID is distinct, and that the capability is not currently
+ // registered.
+ for _, registration := range params.Registrations {
+ if registration.Method == "workspace/didChangeWatchedFiles" {
+ // Marshal and unmarshal to interpret RegisterOptions as
+ // DidChangeWatchedFilesRegistrationOptions.
+ raw, err := json.Marshal(registration.RegisterOptions)
+ if err != nil {
+ return fmt.Errorf("marshaling registration options: %v", err)
+ }
+ var opts protocol.DidChangeWatchedFilesRegistrationOptions
+ if err := json.Unmarshal(raw, &opts); err != nil {
+ return fmt.Errorf("unmarshaling registration options: %v", err)
+ }
+ var globs []*glob.Glob
+ for _, watcher := range opts.Watchers {
+ // TODO(rfindley): honor the watch kind.
+ g, err := glob.Parse(watcher.GlobPattern)
+ if err != nil {
+ return fmt.Errorf("error parsing glob pattern %q: %v", watcher.GlobPattern, err)
+ }
+ globs = append(globs, g)
+ }
+ c.editor.mu.Lock()
+ c.editor.watchPatterns = globs
+ c.editor.mu.Unlock()
+ }
+ }
+ return nil
+}
+
+func (c *Client) UnregisterCapability(ctx context.Context, params *protocol.UnregistrationParams) error {
+ if c.hooks.OnUnregisterCapability != nil {
+ return c.hooks.OnUnregisterCapability(ctx, params)
+ }
+ return nil
+}
+
+func (c *Client) Progress(ctx context.Context, params *protocol.ProgressParams) error {
+ if c.hooks.OnProgress != nil {
+ return c.hooks.OnProgress(ctx, params)
+ }
+ return nil
+}
+
+func (c *Client) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) error {
+ if c.hooks.OnWorkDoneProgressCreate != nil {
+ return c.hooks.OnWorkDoneProgressCreate(ctx, params)
+ }
+ return nil
+}
+
+func (c *Client) ShowDocument(context.Context, *protocol.ShowDocumentParams) (*protocol.ShowDocumentResult, error) {
+ return nil, nil
+}
+
+func (c *Client) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) {
+ if len(params.Edit.Changes) != 0 {
+ return &protocol.ApplyWorkspaceEditResult{FailureReason: "Edit.Changes is unsupported"}, nil
+ }
+ if c.hooks.OnApplyEdit != nil {
+ if err := c.hooks.OnApplyEdit(ctx, params); err != nil {
+ return nil, err
+ }
+ }
+ if !c.skipApplyEdits {
+ for _, change := range params.Edit.DocumentChanges {
+ if err := c.editor.applyDocumentChange(ctx, change); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return &protocol.ApplyWorkspaceEditResult{Applied: true}, nil
+}
diff --git a/internal/lsp/fake/doc.go b/gopls/internal/lsp/fake/doc.go
index 6051781de..6051781de 100644
--- a/internal/lsp/fake/doc.go
+++ b/gopls/internal/lsp/fake/doc.go
diff --git a/gopls/internal/lsp/fake/edit.go b/gopls/internal/lsp/fake/edit.go
new file mode 100644
index 000000000..40762f2ff
--- /dev/null
+++ b/gopls/internal/lsp/fake/edit.go
@@ -0,0 +1,51 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/diff"
+)
+
+// NewEdit creates an edit replacing all content between the 0-based
+// (startLine, startColumn) and (endLine, endColumn) with text.
+//
+// Columns measure UTF-16 codes.
+func NewEdit(startLine, startColumn, endLine, endColumn uint32, text string) protocol.TextEdit {
+ return protocol.TextEdit{
+ Range: protocol.Range{
+ Start: protocol.Position{Line: startLine, Character: startColumn},
+ End: protocol.Position{Line: endLine, Character: endColumn},
+ },
+ NewText: text,
+ }
+}
+
+func EditToChangeEvent(e protocol.TextEdit) protocol.TextDocumentContentChangeEvent {
+ var rng protocol.Range = e.Range
+ return protocol.TextDocumentContentChangeEvent{
+ Range: &rng,
+ Text: e.NewText,
+ }
+}
+
+// applyEdits applies the edits to a file with the specified lines,
+// and returns a new slice containing the lines of the patched file.
+// It is a wrapper around diff.Apply; see that function for preconditions.
+func applyEdits(mapper *protocol.Mapper, edits []protocol.TextEdit, windowsLineEndings bool) ([]byte, error) {
+ diffEdits, err := source.FromProtocolEdits(mapper, edits)
+ if err != nil {
+ return nil, err
+ }
+ patched, err := diff.ApplyBytes(mapper.Content, diffEdits)
+ if err != nil {
+ return nil, err
+ }
+ if windowsLineEndings {
+ patched = toWindowsLineEndings(patched)
+ }
+ return patched, nil
+}
diff --git a/gopls/internal/lsp/fake/edit_test.go b/gopls/internal/lsp/fake/edit_test.go
new file mode 100644
index 000000000..97e2c73e4
--- /dev/null
+++ b/gopls/internal/lsp/fake/edit_test.go
@@ -0,0 +1,96 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+func TestApplyEdits(t *testing.T) {
+ tests := []struct {
+ label string
+ content string
+ edits []protocol.TextEdit
+ want string
+ wantErr bool
+ }{
+ {
+ label: "empty content",
+ },
+ {
+ label: "empty edit",
+ content: "hello",
+ edits: []protocol.TextEdit{},
+ want: "hello",
+ },
+ {
+ label: "unicode edit",
+ content: "hello, 日本語",
+ edits: []protocol.TextEdit{
+ NewEdit(0, 7, 0, 10, "world"),
+ },
+ want: "hello, world",
+ },
+ {
+ label: "range edit",
+ content: "ABC\nDEF\nGHI\nJKL",
+ edits: []protocol.TextEdit{
+ NewEdit(1, 1, 2, 3, "12\n345"),
+ },
+ want: "ABC\nD12\n345\nJKL",
+ },
+ {
+ label: "regression test for issue #57627",
+ content: "go 1.18\nuse moda/a",
+ edits: []protocol.TextEdit{
+ NewEdit(1, 0, 1, 0, "\n"),
+ NewEdit(2, 0, 2, 0, "\n"),
+ },
+ want: "go 1.18\n\nuse moda/a\n",
+ },
+ {
+ label: "end before start",
+ content: "ABC\nDEF\nGHI\nJKL",
+ edits: []protocol.TextEdit{
+ NewEdit(2, 3, 1, 1, "12\n345"),
+ },
+ wantErr: true,
+ },
+ {
+ label: "out of bounds line",
+ content: "ABC\nDEF\nGHI\nJKL",
+ edits: []protocol.TextEdit{
+ NewEdit(1, 1, 4, 3, "12\n345"),
+ },
+ wantErr: true,
+ },
+ {
+ label: "out of bounds column",
+ content: "ABC\nDEF\nGHI\nJKL",
+ edits: []protocol.TextEdit{
+ NewEdit(1, 4, 2, 3, "12\n345"),
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.label, func(t *testing.T) {
+ got, err := applyEdits(protocol.NewMapper("", []byte(test.content)), test.edits, false)
+ if (err != nil) != test.wantErr {
+ t.Errorf("got err %v, want error: %t", err, test.wantErr)
+ }
+ if err != nil {
+ return
+ }
+ if got := string(got); got != test.want {
+ t.Errorf("got %q, want %q", got, test.want)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/lsp/fake/editor.go b/gopls/internal/lsp/fake/editor.go
new file mode 100644
index 000000000..aed7b8daf
--- /dev/null
+++ b/gopls/internal/lsp/fake/editor.go
@@ -0,0 +1,1464 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "sync"
+
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/glob"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+// Editor is a fake editor client. It keeps track of client state and can be
+// used for writing LSP tests.
+type Editor struct {
+
+ // Server, client, and sandbox are concurrency safe and written only
+ // at construction time, so do not require synchronization.
+ Server protocol.Server
+ cancelConn func()
+ serverConn jsonrpc2.Conn
+ client *Client
+ sandbox *Sandbox
+ defaultEnv map[string]string
+
+ // TODO(adonovan): buffers should be keyed by protocol.DocumentURI.
+ mu sync.Mutex
+ config EditorConfig // editor configuration
+ buffers map[string]buffer // open buffers (relative path -> buffer content)
+ serverCapabilities protocol.ServerCapabilities // capabilities / options
+ watchPatterns []*glob.Glob // glob patterns to watch
+
+ // Call metrics for the purpose of expectations. This is done in an ad-hoc
+ // manner for now. Perhaps in the future we should do something more
+ // systematic. Guarded with a separate mutex as calls may need to be accessed
+ // asynchronously via callbacks into the Editor.
+ callsMu sync.Mutex
+ calls CallCounts
+}
+
+// CallCounts tracks the number of protocol notifications of different types.
+type CallCounts struct {
+ DidOpen, DidChange, DidSave, DidChangeWatchedFiles, DidClose uint64
+}
+
+// buffer holds information about an open buffer in the editor.
+type buffer struct {
+ version int // monotonic version; incremented on edits
+ path string // relative path in the workspace
+ mapper *protocol.Mapper // buffer content
+ dirty bool // if true, content is unsaved (TODO(rfindley): rename this field)
+}
+
+func (b buffer) text() string {
+ return string(b.mapper.Content)
+}
+
+// EditorConfig configures the editor's LSP session. This is similar to
+// source.UserOptions, but we use a separate type here so that we expose only
+// that configuration which we support.
+//
+// The zero value for EditorConfig should correspond to its defaults.
+type EditorConfig struct {
+ // Env holds environment variables to apply on top of the default editor
+ // environment. When applying these variables, the special string
+ // $SANDBOX_WORKDIR is replaced by the absolute path to the sandbox working
+ // directory.
+ Env map[string]string
+
+ // WorkspaceFolders is the workspace folders to configure on the LSP server,
+ // relative to the sandbox workdir.
+ //
+ // As a special case, if WorkspaceFolders is nil the editor defaults to
+ // configuring a single workspace folder corresponding to the workdir root.
+ // To explicitly send no workspace folders, use an empty (non-nil) slice.
+ WorkspaceFolders []string
+
+ // Whether to edit files with windows line endings.
+ WindowsLineEndings bool
+
+ // Map of language ID -> regexp to match, used to set the file type of new
+ // buffers. Applied as an overlay on top of the following defaults:
+ // "go" -> ".*\.go"
+ // "go.mod" -> "go\.mod"
+ // "go.sum" -> "go\.sum"
+ // "gotmpl" -> ".*tmpl"
+ FileAssociations map[string]string
+
+ // Settings holds user-provided configuration for the LSP server.
+ Settings map[string]interface{}
+}
+
+// NewEditor creates a new Editor.
+func NewEditor(sandbox *Sandbox, config EditorConfig) *Editor {
+ return &Editor{
+ buffers: make(map[string]buffer),
+ sandbox: sandbox,
+ defaultEnv: sandbox.GoEnv(),
+ config: config,
+ }
+}
+
+// Connect configures the editor to communicate with an LSP server on conn. It
+// is not concurrency safe, and should be called at most once, before using the
+// editor.
+//
+// It returns the editor, so that it may be called as follows:
+//
+// editor, err := NewEditor(s).Connect(ctx, conn, hooks)
+func (e *Editor) Connect(ctx context.Context, connector servertest.Connector, hooks ClientHooks, skipApplyEdits bool) (*Editor, error) {
+ bgCtx, cancelConn := context.WithCancel(xcontext.Detach(ctx))
+ conn := connector.Connect(bgCtx)
+ e.cancelConn = cancelConn
+
+ e.serverConn = conn
+ e.Server = protocol.ServerDispatcher(conn)
+ e.client = &Client{editor: e, hooks: hooks, skipApplyEdits: skipApplyEdits}
+ conn.Go(bgCtx,
+ protocol.Handlers(
+ protocol.ClientHandler(e.client,
+ jsonrpc2.MethodNotFound)))
+
+ if err := e.initialize(ctx); err != nil {
+ return nil, err
+ }
+ e.sandbox.Workdir.AddWatcher(e.onFileChanges)
+ return e, nil
+}
+
+func (e *Editor) Stats() CallCounts {
+ e.callsMu.Lock()
+ defer e.callsMu.Unlock()
+ return e.calls
+}
+
+// Shutdown issues the 'shutdown' LSP notification.
+func (e *Editor) Shutdown(ctx context.Context) error {
+ if e.Server != nil {
+ if err := e.Server.Shutdown(ctx); err != nil {
+ return fmt.Errorf("Shutdown: %w", err)
+ }
+ }
+ return nil
+}
+
+// Exit issues the 'exit' LSP notification.
+func (e *Editor) Exit(ctx context.Context) error {
+ if e.Server != nil {
+ // Not all LSP clients issue the exit RPC, but we do so here to ensure that
+ // we gracefully handle it on multi-session servers.
+ if err := e.Server.Exit(ctx); err != nil {
+ return fmt.Errorf("Exit: %w", err)
+ }
+ }
+ return nil
+}
+
+// Close issues the shutdown and exit sequence an editor should.
+func (e *Editor) Close(ctx context.Context) error {
+ if err := e.Shutdown(ctx); err != nil {
+ return err
+ }
+ if err := e.Exit(ctx); err != nil {
+ return err
+ }
+ defer func() {
+ e.cancelConn()
+ }()
+
+ // called close on the editor should result in the connection closing
+ select {
+ case <-e.serverConn.Done():
+ // connection closed itself
+ return nil
+ case <-ctx.Done():
+ return fmt.Errorf("connection not closed: %w", ctx.Err())
+ }
+}
+
+// Client returns the LSP client for this editor.
+func (e *Editor) Client() *Client {
+ return e.client
+}
+
+// settingsLocked builds the settings map for use in LSP settings RPCs.
+//
+// e.mu must be held while calling this function.
+func (e *Editor) settingsLocked() map[string]interface{} {
+ env := make(map[string]string)
+ for k, v := range e.defaultEnv {
+ env[k] = v
+ }
+ for k, v := range e.config.Env {
+ env[k] = v
+ }
+ for k, v := range env {
+ v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename())
+ env[k] = v
+ }
+
+ settings := map[string]interface{}{
+ "env": env,
+
+ // Use verbose progress reporting so that regtests can assert on
+ // asynchronous operations being completed (such as diagnosing a snapshot).
+ "verboseWorkDoneProgress": true,
+
+ // Set a generous completion budget, so that tests don't flake because
+ // completions are too slow.
+ "completionBudget": "10s",
+
+ // Shorten the diagnostic delay to speed up test execution (else we'd add
+ // the default delay to each assertion about diagnostics)
+ "diagnosticsDelay": "10ms",
+ }
+
+ for k, v := range e.config.Settings {
+ if k == "env" {
+ panic("must not provide env via the EditorConfig.Settings field: use the EditorConfig.Env field instead")
+ }
+ settings[k] = v
+ }
+
+ return settings
+}
+
+func (e *Editor) initialize(ctx context.Context) error {
+ params := &protocol.ParamInitialize{}
+ params.ClientInfo = &protocol.Msg_XInitializeParams_clientInfo{}
+ params.ClientInfo.Name = "fakeclient"
+ params.ClientInfo.Version = "v1.0.0"
+ e.mu.Lock()
+ params.WorkspaceFolders = e.makeWorkspaceFoldersLocked()
+ params.InitializationOptions = e.settingsLocked()
+ e.mu.Unlock()
+ params.Capabilities.Workspace.Configuration = true
+ params.Capabilities.Window.WorkDoneProgress = true
+
+ // TODO: set client capabilities
+ params.Capabilities.TextDocument.Completion.CompletionItem.TagSupport.ValueSet = []protocol.CompletionItemTag{protocol.ComplDeprecated}
+
+ params.Capabilities.TextDocument.Completion.CompletionItem.SnippetSupport = true
+ params.Capabilities.TextDocument.SemanticTokens.Requests.Full.Value = true
+ // copied from lsp/semantic.go to avoid import cycle in tests
+ params.Capabilities.TextDocument.SemanticTokens.TokenTypes = []string{
+ "namespace", "type", "class", "enum", "interface",
+ "struct", "typeParameter", "parameter", "variable", "property", "enumMember",
+ "event", "function", "method", "macro", "keyword", "modifier", "comment",
+ "string", "number", "regexp", "operator",
+ }
+ params.Capabilities.TextDocument.SemanticTokens.TokenModifiers = []string{
+ "declaration", "definition", "readonly", "static",
+ "deprecated", "abstract", "async", "modification", "documentation", "defaultLibrary",
+ }
+
+ // This is a bit of a hack, since the fake editor doesn't actually support
+ // watching changed files that match a specific glob pattern. However, the
+ // editor does send didChangeWatchedFiles notifications, so set this to
+ // true.
+ params.Capabilities.Workspace.DidChangeWatchedFiles.DynamicRegistration = true
+ params.Capabilities.Workspace.WorkspaceEdit = &protocol.WorkspaceEditClientCapabilities{
+ ResourceOperations: []protocol.ResourceOperationKind{
+ "rename",
+ },
+ }
+
+ trace := protocol.TraceValues("messages")
+ params.Trace = &trace
+ // TODO: support workspace folders.
+ if e.Server != nil {
+ resp, err := e.Server.Initialize(ctx, params)
+ if err != nil {
+ return fmt.Errorf("initialize: %w", err)
+ }
+ e.mu.Lock()
+ e.serverCapabilities = resp.Capabilities
+ e.mu.Unlock()
+
+ if err := e.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil {
+ return fmt.Errorf("initialized: %w", err)
+ }
+ }
+ // TODO: await initial configuration here, or expect gopls to manage that?
+ return nil
+}
+
+// makeWorkspaceFoldersLocked creates a slice of workspace folders to use for
+// this editing session, based on the editor configuration.
+//
+// e.mu must be held while calling this function.
+func (e *Editor) makeWorkspaceFoldersLocked() (folders []protocol.WorkspaceFolder) {
+ paths := e.config.WorkspaceFolders
+ if len(paths) == 0 {
+ paths = append(paths, string(e.sandbox.Workdir.RelativeTo))
+ }
+
+ for _, path := range paths {
+ uri := string(e.sandbox.Workdir.URI(path))
+ folders = append(folders, protocol.WorkspaceFolder{
+ URI: uri,
+ Name: filepath.Base(uri),
+ })
+ }
+
+ return folders
+}
+
+// onFileChanges is registered to be called by the Workdir on any writes that
+// go through the Workdir API. It is called synchronously by the Workdir.
+func (e *Editor) onFileChanges(ctx context.Context, evts []protocol.FileEvent) {
+ if e.Server == nil {
+ return
+ }
+
+ // e may be locked when onFileChanges is called, but it is important that we
+ // synchronously increment this counter so that we can subsequently assert on
+ // the number of expected DidChangeWatchedFiles calls.
+ e.callsMu.Lock()
+ e.calls.DidChangeWatchedFiles++
+ e.callsMu.Unlock()
+
+ // Since e may be locked, we must run this mutation asynchronously.
+ go func() {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ for _, evt := range evts {
+ // Always send an on-disk change, even for events that seem useless
+ // because they're shadowed by an open buffer.
+ path := e.sandbox.Workdir.URIToPath(evt.URI)
+ if buf, ok := e.buffers[path]; ok {
+ // Following VS Code, don't honor deletions or changes to dirty buffers.
+ if buf.dirty || evt.Type == protocol.Deleted {
+ continue
+ }
+
+ content, err := e.sandbox.Workdir.ReadFile(path)
+ if err != nil {
+ continue // A race with some other operation.
+ }
+ // No need to update if the buffer content hasn't changed.
+ if string(content) == buf.text() {
+ continue
+ }
+ // During shutdown, this call will fail. Ignore the error.
+ _ = e.setBufferContentLocked(ctx, path, false, content, nil)
+ }
+ }
+ var matchedEvts []protocol.FileEvent
+ for _, evt := range evts {
+ filename := filepath.ToSlash(evt.URI.SpanURI().Filename())
+ for _, g := range e.watchPatterns {
+ if g.Match(filename) {
+ matchedEvts = append(matchedEvts, evt)
+ break
+ }
+ }
+ }
+
+ // TODO(rfindley): don't send notifications while locked.
+ e.Server.DidChangeWatchedFiles(ctx, &protocol.DidChangeWatchedFilesParams{
+ Changes: matchedEvts,
+ })
+ }()
+}
+
+// OpenFile creates a buffer for the given workdir-relative file.
+//
+// If the file is already open, it is a no-op.
+func (e *Editor) OpenFile(ctx context.Context, path string) error {
+ if e.HasBuffer(path) {
+ return nil
+ }
+ content, err := e.sandbox.Workdir.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ if e.Config().WindowsLineEndings {
+ content = toWindowsLineEndings(content)
+ }
+ return e.createBuffer(ctx, path, false, content)
+}
+
+// toWindowsLineEndings checks whether content has windows line endings.
+//
+// If so, it returns content unmodified. If not, it returns a new byte slice modified to use CRLF line endings.
+func toWindowsLineEndings(content []byte) []byte {
+ abnormal := false
+ for i, b := range content {
+ if b == '\n' && (i == 0 || content[i-1] != '\r') {
+ abnormal = true
+ break
+ }
+ }
+ if !abnormal {
+ return content
+ }
+ var buf bytes.Buffer
+ for i, b := range content {
+ if b == '\n' && (i == 0 || content[i-1] != '\r') {
+ buf.WriteByte('\r')
+ }
+ buf.WriteByte(b)
+ }
+ return buf.Bytes()
+}
+
+// CreateBuffer creates a new unsaved buffer corresponding to the workdir path,
+// containing the given textual content.
+func (e *Editor) CreateBuffer(ctx context.Context, path, content string) error {
+ return e.createBuffer(ctx, path, true, []byte(content))
+}
+
+func (e *Editor) createBuffer(ctx context.Context, path string, dirty bool, content []byte) error {
+ e.mu.Lock()
+
+ if _, ok := e.buffers[path]; ok {
+ e.mu.Unlock()
+ return fmt.Errorf("buffer %q already exists", path)
+ }
+
+ uri := e.sandbox.Workdir.URI(path).SpanURI()
+ buf := buffer{
+ version: 1,
+ path: path,
+ mapper: protocol.NewMapper(uri, content),
+ dirty: dirty,
+ }
+ e.buffers[path] = buf
+
+ item := e.textDocumentItem(buf)
+ e.mu.Unlock()
+
+ return e.sendDidOpen(ctx, item)
+}
+
+// textDocumentItem builds a protocol.TextDocumentItem for the given buffer.
+//
+// Precondition: e.mu must be held.
+func (e *Editor) textDocumentItem(buf buffer) protocol.TextDocumentItem {
+ return protocol.TextDocumentItem{
+ URI: e.sandbox.Workdir.URI(buf.path),
+ LanguageID: languageID(buf.path, e.config.FileAssociations),
+ Version: int32(buf.version),
+ Text: buf.text(),
+ }
+}
+
+func (e *Editor) sendDidOpen(ctx context.Context, item protocol.TextDocumentItem) error {
+ if e.Server != nil {
+ if err := e.Server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{
+ TextDocument: item,
+ }); err != nil {
+ return fmt.Errorf("DidOpen: %w", err)
+ }
+ e.callsMu.Lock()
+ e.calls.DidOpen++
+ e.callsMu.Unlock()
+ }
+ return nil
+}
+
+var defaultFileAssociations = map[string]*regexp.Regexp{
+ "go": regexp.MustCompile(`^.*\.go$`), // '$' is important: don't match .gotmpl!
+ "go.mod": regexp.MustCompile(`^go\.mod$`),
+ "go.sum": regexp.MustCompile(`^go(\.work)?\.sum$`),
+ "go.work": regexp.MustCompile(`^go\.work$`),
+ "gotmpl": regexp.MustCompile(`^.*tmpl$`),
+}
+
+// languageID returns the language identifier for the path p given the user
+// configured fileAssociations.
+func languageID(p string, fileAssociations map[string]string) string {
+ base := path.Base(p)
+ for lang, re := range fileAssociations {
+ re := regexp.MustCompile(re)
+ if re.MatchString(base) {
+ return lang
+ }
+ }
+ for lang, re := range defaultFileAssociations {
+ if re.MatchString(base) {
+ return lang
+ }
+ }
+ return ""
+}
+
+// CloseBuffer removes the current buffer (regardless of whether it is saved).
+func (e *Editor) CloseBuffer(ctx context.Context, path string) error {
+ e.mu.Lock()
+ _, ok := e.buffers[path]
+ if !ok {
+ e.mu.Unlock()
+ return ErrUnknownBuffer
+ }
+ delete(e.buffers, path)
+ e.mu.Unlock()
+
+ return e.sendDidClose(ctx, e.TextDocumentIdentifier(path))
+}
+
+func (e *Editor) sendDidClose(ctx context.Context, doc protocol.TextDocumentIdentifier) error {
+ if e.Server != nil {
+ if err := e.Server.DidClose(ctx, &protocol.DidCloseTextDocumentParams{
+ TextDocument: doc,
+ }); err != nil {
+ return fmt.Errorf("DidClose: %w", err)
+ }
+ e.callsMu.Lock()
+ e.calls.DidClose++
+ e.callsMu.Unlock()
+ }
+ return nil
+}
+
+func (e *Editor) TextDocumentIdentifier(path string) protocol.TextDocumentIdentifier {
+ return protocol.TextDocumentIdentifier{
+ URI: e.sandbox.Workdir.URI(path),
+ }
+}
+
+// SaveBuffer writes the content of the buffer specified by the given path to
+// the filesystem.
+func (e *Editor) SaveBuffer(ctx context.Context, path string) error {
+ if err := e.OrganizeImports(ctx, path); err != nil {
+ return fmt.Errorf("organizing imports before save: %w", err)
+ }
+ if err := e.FormatBuffer(ctx, path); err != nil {
+ return fmt.Errorf("formatting before save: %w", err)
+ }
+ return e.SaveBufferWithoutActions(ctx, path)
+}
+
+func (e *Editor) SaveBufferWithoutActions(ctx context.Context, path string) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ buf, ok := e.buffers[path]
+ if !ok {
+ return fmt.Errorf(fmt.Sprintf("unknown buffer: %q", path))
+ }
+ content := buf.text()
+ includeText := false
+ syncOptions, ok := e.serverCapabilities.TextDocumentSync.(protocol.TextDocumentSyncOptions)
+ if ok {
+ includeText = syncOptions.Save.IncludeText
+ }
+
+ docID := e.TextDocumentIdentifier(buf.path)
+ if e.Server != nil {
+ if err := e.Server.WillSave(ctx, &protocol.WillSaveTextDocumentParams{
+ TextDocument: docID,
+ Reason: protocol.Manual,
+ }); err != nil {
+ return fmt.Errorf("WillSave: %w", err)
+ }
+ }
+ if err := e.sandbox.Workdir.WriteFile(ctx, path, content); err != nil {
+ return fmt.Errorf("writing %q: %w", path, err)
+ }
+
+ buf.dirty = false
+ e.buffers[path] = buf
+
+ if e.Server != nil {
+ params := &protocol.DidSaveTextDocumentParams{
+ TextDocument: docID,
+ }
+ if includeText {
+ params.Text = &content
+ }
+ if err := e.Server.DidSave(ctx, params); err != nil {
+ return fmt.Errorf("DidSave: %w", err)
+ }
+ e.callsMu.Lock()
+ e.calls.DidSave++
+ e.callsMu.Unlock()
+ }
+ return nil
+}
+
+// ErrNoMatch is returned if a regexp search fails.
+var (
+ ErrNoMatch = errors.New("no match")
+ ErrUnknownBuffer = errors.New("unknown buffer")
+)
+
+// regexpLocation returns the location of the first occurrence of either re
+// or its singular subgroup. It returns ErrNoMatch if the regexp doesn't match.
+func regexpLocation(mapper *protocol.Mapper, re string) (protocol.Location, error) {
+ var start, end int
+ rec, err := regexp.Compile(re)
+ if err != nil {
+ return protocol.Location{}, err
+ }
+ indexes := rec.FindSubmatchIndex(mapper.Content)
+ if indexes == nil {
+ return protocol.Location{}, ErrNoMatch
+ }
+ switch len(indexes) {
+ case 2:
+ // no subgroups: return the range of the regexp expression
+ start, end = indexes[0], indexes[1]
+ case 4:
+ // one subgroup: return its range
+ start, end = indexes[2], indexes[3]
+ default:
+ return protocol.Location{}, fmt.Errorf("invalid search regexp %q: expect either 0 or 1 subgroups, got %d", re, len(indexes)/2-1)
+ }
+ return mapper.OffsetLocation(start, end)
+}
+
+// RegexpSearch returns the Location of the first match for re in the buffer
+// bufName. For convenience, RegexpSearch supports the following two modes:
+// 1. If re has no subgroups, return the position of the match for re itself.
+// 2. If re has one subgroup, return the position of the first subgroup.
+//
+// It returns an error re is invalid, has more than one subgroup, or doesn't
+// match the buffer.
+func (e *Editor) RegexpSearch(bufName, re string) (protocol.Location, error) {
+ e.mu.Lock()
+ buf, ok := e.buffers[bufName]
+ e.mu.Unlock()
+ if !ok {
+ return protocol.Location{}, ErrUnknownBuffer
+ }
+ return regexpLocation(buf.mapper, re)
+}
+
+// RegexpReplace edits the buffer corresponding to path by replacing the first
+// instance of re, or its first subgroup, with the replace text. See
+// RegexpSearch for more explanation of these two modes.
+// It returns an error if re is invalid, has more than one subgroup, or doesn't
+// match the buffer.
+func (e *Editor) RegexpReplace(ctx context.Context, path, re, replace string) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ buf, ok := e.buffers[path]
+ if !ok {
+ return ErrUnknownBuffer
+ }
+ loc, err := regexpLocation(buf.mapper, re)
+ if err != nil {
+ return err
+ }
+ edits := []protocol.TextEdit{{
+ Range: loc.Range,
+ NewText: replace,
+ }}
+ patched, err := applyEdits(buf.mapper, edits, e.config.WindowsLineEndings)
+ if err != nil {
+ return fmt.Errorf("editing %q: %v", path, err)
+ }
+ return e.setBufferContentLocked(ctx, path, true, patched, edits)
+}
+
+// EditBuffer applies the given test edits to the buffer identified by path.
+func (e *Editor) EditBuffer(ctx context.Context, path string, edits []protocol.TextEdit) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ return e.editBufferLocked(ctx, path, edits)
+}
+
+func (e *Editor) SetBufferContent(ctx context.Context, path, content string) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ return e.setBufferContentLocked(ctx, path, true, []byte(content), nil)
+}
+
+// HasBuffer reports whether the file name is open in the editor.
+func (e *Editor) HasBuffer(name string) bool {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ _, ok := e.buffers[name]
+ return ok
+}
+
+// BufferText returns the content of the buffer with the given name, or "" if
+// the file at that path is not open. The second return value reports whether
+// the file is open.
+func (e *Editor) BufferText(name string) (string, bool) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ buf, ok := e.buffers[name]
+ if !ok {
+ return "", false
+ }
+ return buf.text(), true
+}
+
+// Mapper returns the protocol.Mapper for the given buffer name, if it is open.
+func (e *Editor) Mapper(name string) (*protocol.Mapper, error) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ buf, ok := e.buffers[name]
+ if !ok {
+ return nil, fmt.Errorf("no mapper for %q", name)
+ }
+ return buf.mapper, nil
+}
+
+// BufferVersion returns the current version of the buffer corresponding to
+// name (or 0 if it is not being edited).
+func (e *Editor) BufferVersion(name string) int {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ return e.buffers[name].version
+}
+
+func (e *Editor) editBufferLocked(ctx context.Context, path string, edits []protocol.TextEdit) error {
+ buf, ok := e.buffers[path]
+ if !ok {
+ return fmt.Errorf("unknown buffer %q", path)
+ }
+ content, err := applyEdits(buf.mapper, edits, e.config.WindowsLineEndings)
+ if err != nil {
+ return fmt.Errorf("editing %q: %v; edits:\n%v", path, err, edits)
+ }
+ return e.setBufferContentLocked(ctx, path, true, content, edits)
+}
+
+func (e *Editor) setBufferContentLocked(ctx context.Context, path string, dirty bool, content []byte, fromEdits []protocol.TextEdit) error {
+ buf, ok := e.buffers[path]
+ if !ok {
+ return fmt.Errorf("unknown buffer %q", path)
+ }
+ buf.mapper = protocol.NewMapper(buf.mapper.URI, content)
+ buf.version++
+ buf.dirty = dirty
+ e.buffers[path] = buf
+ // A simple heuristic: if there is only one edit, send it incrementally.
+ // Otherwise, send the entire content.
+ var evts []protocol.TextDocumentContentChangeEvent
+ if len(fromEdits) == 1 {
+ evts = append(evts, EditToChangeEvent(fromEdits[0]))
+ } else {
+ evts = append(evts, protocol.TextDocumentContentChangeEvent{
+ Text: buf.text(),
+ })
+ }
+ params := &protocol.DidChangeTextDocumentParams{
+ TextDocument: protocol.VersionedTextDocumentIdentifier{
+ Version: int32(buf.version),
+ TextDocumentIdentifier: e.TextDocumentIdentifier(buf.path),
+ },
+ ContentChanges: evts,
+ }
+ if e.Server != nil {
+ if err := e.Server.DidChange(ctx, params); err != nil {
+ return fmt.Errorf("DidChange: %w", err)
+ }
+ e.callsMu.Lock()
+ e.calls.DidChange++
+ e.callsMu.Unlock()
+ }
+ return nil
+}
+
+// GoToDefinition jumps to the definition of the symbol at the given position
+// in an open buffer. It returns the location of the resulting jump.
+//
+// TODO(rfindley): rename to "Definition", to be consistent with LSP
+// terminology.
+func (e *Editor) GoToDefinition(ctx context.Context, loc protocol.Location) (protocol.Location, error) {
+ if err := e.checkBufferLocation(loc); err != nil {
+ return protocol.Location{}, err
+ }
+ params := &protocol.DefinitionParams{}
+ params.TextDocument.URI = loc.URI
+ params.Position = loc.Range.Start
+
+ resp, err := e.Server.Definition(ctx, params)
+ if err != nil {
+ return protocol.Location{}, fmt.Errorf("definition: %w", err)
+ }
+ return e.extractFirstLocation(ctx, resp)
+}
+
+// GoToTypeDefinition jumps to the type definition of the symbol at the given location
+// in an open buffer.
+func (e *Editor) GoToTypeDefinition(ctx context.Context, loc protocol.Location) (protocol.Location, error) {
+ if err := e.checkBufferLocation(loc); err != nil {
+ return protocol.Location{}, err
+ }
+ params := &protocol.TypeDefinitionParams{}
+ params.TextDocument.URI = loc.URI
+ params.Position = loc.Range.Start
+
+ resp, err := e.Server.TypeDefinition(ctx, params)
+ if err != nil {
+ return protocol.Location{}, fmt.Errorf("type definition: %w", err)
+ }
+ return e.extractFirstLocation(ctx, resp)
+}
+
+// extractFirstLocation returns the first location.
+// It opens the file if needed.
+func (e *Editor) extractFirstLocation(ctx context.Context, locs []protocol.Location) (protocol.Location, error) {
+ if len(locs) == 0 {
+ return protocol.Location{}, nil
+ }
+
+ newPath := e.sandbox.Workdir.URIToPath(locs[0].URI)
+ if !e.HasBuffer(newPath) {
+ if err := e.OpenFile(ctx, newPath); err != nil {
+ return protocol.Location{}, fmt.Errorf("OpenFile: %w", err)
+ }
+ }
+ return locs[0], nil
+}
+
+// Symbol performs a workspace symbol search using query
+func (e *Editor) Symbol(ctx context.Context, query string) ([]protocol.SymbolInformation, error) {
+ params := &protocol.WorkspaceSymbolParams{Query: query}
+ return e.Server.Symbol(ctx, params)
+}
+
+// OrganizeImports requests and performs the source.organizeImports codeAction.
+func (e *Editor) OrganizeImports(ctx context.Context, path string) error {
+ loc := protocol.Location{URI: e.sandbox.Workdir.URI(path)} // zero Range => whole file
+ _, err := e.applyCodeActions(ctx, loc, nil, protocol.SourceOrganizeImports)
+ return err
+}
+
+// RefactorRewrite requests and performs the source.refactorRewrite codeAction.
+func (e *Editor) RefactorRewrite(ctx context.Context, loc protocol.Location) error {
+ applied, err := e.applyCodeActions(ctx, loc, nil, protocol.RefactorRewrite)
+ if err != nil {
+ return err
+ }
+ if applied == 0 {
+ return fmt.Errorf("no refactorings were applied")
+ }
+ return nil
+}
+
+// ApplyQuickFixes requests and performs the quickfix codeAction.
+func (e *Editor) ApplyQuickFixes(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic) error {
+ applied, err := e.applyCodeActions(ctx, loc, diagnostics, protocol.SourceFixAll, protocol.QuickFix)
+ if applied == 0 {
+ return fmt.Errorf("no quick fixes were applied")
+ }
+ return err
+}
+
+// ApplyCodeAction applies the given code action.
+func (e *Editor) ApplyCodeAction(ctx context.Context, action protocol.CodeAction) error {
+ if action.Edit != nil {
+ for _, change := range action.Edit.DocumentChanges {
+ if change.TextDocumentEdit != nil {
+ path := e.sandbox.Workdir.URIToPath(change.TextDocumentEdit.TextDocument.URI)
+ if int32(e.buffers[path].version) != change.TextDocumentEdit.TextDocument.Version {
+ // Skip edits for old versions.
+ continue
+ }
+ if err := e.EditBuffer(ctx, path, change.TextDocumentEdit.Edits); err != nil {
+ return fmt.Errorf("editing buffer %q: %w", path, err)
+ }
+ }
+ }
+ }
+ // Execute any commands. The specification says that commands are
+ // executed after edits are applied.
+ if action.Command != nil {
+ if _, err := e.ExecuteCommand(ctx, &protocol.ExecuteCommandParams{
+ Command: action.Command.Command,
+ Arguments: action.Command.Arguments,
+ }); err != nil {
+ return err
+ }
+ }
+ // Some commands may edit files on disk.
+ return e.sandbox.Workdir.CheckForFileChanges(ctx)
+}
+
+// GetQuickFixes returns the available quick fix code actions.
+func (e *Editor) GetQuickFixes(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) {
+ return e.getCodeActions(ctx, loc, diagnostics, protocol.QuickFix, protocol.SourceFixAll)
+}
+
+func (e *Editor) applyCodeActions(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic, only ...protocol.CodeActionKind) (int, error) {
+ actions, err := e.getCodeActions(ctx, loc, diagnostics, only...)
+ if err != nil {
+ return 0, err
+ }
+ applied := 0
+ for _, action := range actions {
+ if action.Title == "" {
+ return 0, fmt.Errorf("empty title for code action")
+ }
+ var match bool
+ for _, o := range only {
+ if action.Kind == o {
+ match = true
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+ applied++
+ if err := e.ApplyCodeAction(ctx, action); err != nil {
+ return 0, err
+ }
+ }
+ return applied, nil
+}
+
+func (e *Editor) getCodeActions(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic, only ...protocol.CodeActionKind) ([]protocol.CodeAction, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ params := &protocol.CodeActionParams{}
+ params.TextDocument.URI = loc.URI
+ params.Context.Only = only
+ params.Range = loc.Range // may be zero => whole file
+ if diagnostics != nil {
+ params.Context.Diagnostics = diagnostics
+ }
+ return e.Server.CodeAction(ctx, params)
+}
+
+func (e *Editor) ExecuteCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ var match bool
+ if e.serverCapabilities.ExecuteCommandProvider != nil {
+ // Ensure that this command was actually listed as a supported command.
+ for _, command := range e.serverCapabilities.ExecuteCommandProvider.Commands {
+ if command == params.Command {
+ match = true
+ break
+ }
+ }
+ }
+ if !match {
+ return nil, fmt.Errorf("unsupported command %q", params.Command)
+ }
+ result, err := e.Server.ExecuteCommand(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+ // Some commands use the go command, which writes directly to disk.
+ // For convenience, check for those changes.
+ if err := e.sandbox.Workdir.CheckForFileChanges(ctx); err != nil {
+ return nil, fmt.Errorf("checking for file changes: %v", err)
+ }
+ return result, nil
+}
+
+// FormatBuffer gofmts a Go file.
+func (e *Editor) FormatBuffer(ctx context.Context, path string) error {
+ if e.Server == nil {
+ return nil
+ }
+ e.mu.Lock()
+ version := e.buffers[path].version
+ e.mu.Unlock()
+ params := &protocol.DocumentFormattingParams{}
+ params.TextDocument.URI = e.sandbox.Workdir.URI(path)
+ edits, err := e.Server.Formatting(ctx, params)
+ if err != nil {
+ return fmt.Errorf("textDocument/formatting: %w", err)
+ }
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ if versionAfter := e.buffers[path].version; versionAfter != version {
+ return fmt.Errorf("before receipt of formatting edits, buffer version changed from %d to %d", version, versionAfter)
+ }
+ if len(edits) == 0 {
+ return nil
+ }
+ return e.editBufferLocked(ctx, path, edits)
+}
+
+func (e *Editor) checkBufferLocation(loc protocol.Location) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ path := e.sandbox.Workdir.URIToPath(loc.URI)
+ buf, ok := e.buffers[path]
+ if !ok {
+ return fmt.Errorf("buffer %q is not open", path)
+ }
+
+ _, _, err := buf.mapper.RangeOffsets(loc.Range)
+ return err
+}
+
+// RunGenerate runs `go generate` non-recursively in the workdir-relative dir
+// path. It does not report any resulting file changes as a watched file
+// change, so must be followed by a call to Workdir.CheckForFileChanges once
+// the generate command has completed.
+// TODO(rFindley): this shouldn't be necessary anymore. Delete it.
+func (e *Editor) RunGenerate(ctx context.Context, dir string) error {
+ if e.Server == nil {
+ return nil
+ }
+ absDir := e.sandbox.Workdir.AbsPath(dir)
+ cmd, err := command.NewGenerateCommand("", command.GenerateArgs{
+ Dir: protocol.URIFromSpanURI(span.URIFromPath(absDir)),
+ Recursive: false,
+ })
+ if err != nil {
+ return err
+ }
+ params := &protocol.ExecuteCommandParams{
+ Command: cmd.Command,
+ Arguments: cmd.Arguments,
+ }
+ if _, err := e.ExecuteCommand(ctx, params); err != nil {
+ return fmt.Errorf("running generate: %v", err)
+ }
+ // Unfortunately we can't simply poll the workdir for file changes here,
+ // because server-side command may not have completed. In regtests, we can
+ // Await this state change, but here we must delegate that responsibility to
+ // the caller.
+ return nil
+}
+
+// CodeLens executes a codelens request on the server.
+func (e *Editor) CodeLens(ctx context.Context, path string) ([]protocol.CodeLens, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ e.mu.Lock()
+ _, ok := e.buffers[path]
+ e.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("buffer %q is not open", path)
+ }
+ params := &protocol.CodeLensParams{
+ TextDocument: e.TextDocumentIdentifier(path),
+ }
+ lens, err := e.Server.CodeLens(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+ return lens, nil
+}
+
+// Completion executes a completion request on the server.
+func (e *Editor) Completion(ctx context.Context, loc protocol.Location) (*protocol.CompletionList, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ path := e.sandbox.Workdir.URIToPath(loc.URI)
+ e.mu.Lock()
+ _, ok := e.buffers[path]
+ e.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("buffer %q is not open", path)
+ }
+ params := &protocol.CompletionParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ completions, err := e.Server.Completion(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+ return completions, nil
+}
+
+// AcceptCompletion accepts a completion for the given item at the given
+// position.
+func (e *Editor) AcceptCompletion(ctx context.Context, loc protocol.Location, item protocol.CompletionItem) error {
+ if e.Server == nil {
+ return nil
+ }
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ path := e.sandbox.Workdir.URIToPath(loc.URI)
+ _, ok := e.buffers[path]
+ if !ok {
+ return fmt.Errorf("buffer %q is not open", path)
+ }
+ return e.editBufferLocked(ctx, path, append([]protocol.TextEdit{
+ *item.TextEdit,
+ }, item.AdditionalTextEdits...))
+}
+
+// Symbols executes a workspace/symbols request on the server.
+func (e *Editor) Symbols(ctx context.Context, sym string) ([]protocol.SymbolInformation, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ params := &protocol.WorkspaceSymbolParams{Query: sym}
+ ans, err := e.Server.Symbol(ctx, params)
+ return ans, err
+}
+
+// CodeLens executes a codelens request on the server.
+func (e *Editor) InlayHint(ctx context.Context, path string) ([]protocol.InlayHint, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ e.mu.Lock()
+ _, ok := e.buffers[path]
+ e.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("buffer %q is not open", path)
+ }
+ params := &protocol.InlayHintParams{
+ TextDocument: e.TextDocumentIdentifier(path),
+ }
+ hints, err := e.Server.InlayHint(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+ return hints, nil
+}
+
+// References returns references to the object at loc, as returned by
+// the connected LSP server. If no server is connected, it returns (nil, nil).
+func (e *Editor) References(ctx context.Context, loc protocol.Location) ([]protocol.Location, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ path := e.sandbox.Workdir.URIToPath(loc.URI)
+ e.mu.Lock()
+ _, ok := e.buffers[path]
+ e.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("buffer %q is not open", path)
+ }
+ params := &protocol.ReferenceParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ Context: protocol.ReferenceContext{
+ IncludeDeclaration: true,
+ },
+ }
+ locations, err := e.Server.References(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+ return locations, nil
+}
+
+// Rename performs a rename of the object at loc to newName, using the
+// connected LSP server. If no server is connected, it returns nil.
+func (e *Editor) Rename(ctx context.Context, loc protocol.Location, newName string) error {
+ if e.Server == nil {
+ return nil
+ }
+ path := e.sandbox.Workdir.URIToPath(loc.URI)
+
+ // Verify that PrepareRename succeeds.
+ prepareParams := &protocol.PrepareRenameParams{}
+ prepareParams.TextDocument = e.TextDocumentIdentifier(path)
+ prepareParams.Position = loc.Range.Start
+ if _, err := e.Server.PrepareRename(ctx, prepareParams); err != nil {
+ return fmt.Errorf("preparing rename: %v", err)
+ }
+
+ params := &protocol.RenameParams{
+ TextDocument: e.TextDocumentIdentifier(path),
+ Position: loc.Range.Start,
+ NewName: newName,
+ }
+ wsEdits, err := e.Server.Rename(ctx, params)
+ if err != nil {
+ return err
+ }
+ for _, change := range wsEdits.DocumentChanges {
+ if err := e.applyDocumentChange(ctx, change); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Implementations returns implementations for the object at loc, as
+// returned by the connected LSP server. If no server is connected, it returns
+// (nil, nil).
+func (e *Editor) Implementations(ctx context.Context, loc protocol.Location) ([]protocol.Location, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ path := e.sandbox.Workdir.URIToPath(loc.URI)
+ e.mu.Lock()
+ _, ok := e.buffers[path]
+ e.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("buffer %q is not open", path)
+ }
+ params := &protocol.ImplementationParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ return e.Server.Implementation(ctx, params)
+}
+
+func (e *Editor) SignatureHelp(ctx context.Context, loc protocol.Location) (*protocol.SignatureHelp, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ path := e.sandbox.Workdir.URIToPath(loc.URI)
+ e.mu.Lock()
+ _, ok := e.buffers[path]
+ e.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("buffer %q is not open", path)
+ }
+ params := &protocol.SignatureHelpParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ return e.Server.SignatureHelp(ctx, params)
+}
+
+func (e *Editor) RenameFile(ctx context.Context, oldPath, newPath string) error {
+ closed, opened, err := e.renameBuffers(ctx, oldPath, newPath)
+ if err != nil {
+ return err
+ }
+
+ for _, c := range closed {
+ if err := e.sendDidClose(ctx, c); err != nil {
+ return err
+ }
+ }
+ for _, o := range opened {
+ if err := e.sendDidOpen(ctx, o); err != nil {
+ return err
+ }
+ }
+
+ // Finally, perform the renaming on disk.
+ if err := e.sandbox.Workdir.RenameFile(ctx, oldPath, newPath); err != nil {
+ return fmt.Errorf("renaming sandbox file: %w", err)
+ }
+ return nil
+}
+
+// renameBuffers renames in-memory buffers affected by the renaming of
+// oldPath->newPath, returning the resulting text documents that must be closed
+// and opened over the LSP.
+func (e *Editor) renameBuffers(ctx context.Context, oldPath, newPath string) (closed []protocol.TextDocumentIdentifier, opened []protocol.TextDocumentItem, _ error) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ // In case either oldPath or newPath is absolute, convert to absolute paths
+ // before checking for containment.
+ oldAbs := e.sandbox.Workdir.AbsPath(oldPath)
+ newAbs := e.sandbox.Workdir.AbsPath(newPath)
+
+ // Collect buffers that are affected by the given file or directory renaming.
+ buffersToRename := make(map[string]string) // old path -> new path
+
+ for path := range e.buffers {
+ abs := e.sandbox.Workdir.AbsPath(path)
+ if oldAbs == abs || source.InDir(oldAbs, abs) {
+ rel, err := filepath.Rel(oldAbs, abs)
+ if err != nil {
+ return nil, nil, fmt.Errorf("filepath.Rel(%q, %q): %v", oldAbs, abs, err)
+ }
+ nabs := filepath.Join(newAbs, rel)
+ newPath := e.sandbox.Workdir.RelPath(nabs)
+ buffersToRename[path] = newPath
+ }
+ }
+
+ // Update buffers, and build protocol changes.
+ for old, new := range buffersToRename {
+ buf := e.buffers[old]
+ delete(e.buffers, old)
+ buf.version = 1
+ buf.path = new
+ e.buffers[new] = buf
+
+ closed = append(closed, e.TextDocumentIdentifier(old))
+ opened = append(opened, e.textDocumentItem(buf))
+ }
+
+ return closed, opened, nil
+}
+
+func (e *Editor) applyDocumentChange(ctx context.Context, change protocol.DocumentChanges) error {
+ if change.RenameFile != nil {
+ oldPath := e.sandbox.Workdir.URIToPath(change.RenameFile.OldURI)
+ newPath := e.sandbox.Workdir.URIToPath(change.RenameFile.NewURI)
+
+ return e.RenameFile(ctx, oldPath, newPath)
+ }
+ if change.TextDocumentEdit != nil {
+ return e.applyTextDocumentEdit(ctx, *change.TextDocumentEdit)
+ }
+ panic("Internal error: one of RenameFile or TextDocumentEdit must be set")
+}
+
+func (e *Editor) applyTextDocumentEdit(ctx context.Context, change protocol.TextDocumentEdit) error {
+ path := e.sandbox.Workdir.URIToPath(change.TextDocument.URI)
+ if ver := int32(e.BufferVersion(path)); ver != change.TextDocument.Version {
+ return fmt.Errorf("buffer versions for %q do not match: have %d, editing %d", path, ver, change.TextDocument.Version)
+ }
+ if !e.HasBuffer(path) {
+ err := e.OpenFile(ctx, path)
+ if os.IsNotExist(err) {
+ // TODO: it's unclear if this is correct. Here we create the buffer (with
+ // version 1), then apply edits. Perhaps we should apply the edits before
+ // sending the didOpen notification.
+ e.CreateBuffer(ctx, path, "")
+ err = nil
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return e.EditBuffer(ctx, path, change.Edits)
+}
+
+// Config returns the current editor configuration.
+func (e *Editor) Config() EditorConfig {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ return e.config
+}
+
+// ChangeConfiguration sets the new editor configuration, and if applicable
+// sends a didChangeConfiguration notification.
+//
+// An error is returned if the change notification failed to send.
+func (e *Editor) ChangeConfiguration(ctx context.Context, newConfig EditorConfig) error {
+ e.mu.Lock()
+ e.config = newConfig
+ e.mu.Unlock() // don't hold e.mu during server calls
+ if e.Server != nil {
+ var params protocol.DidChangeConfigurationParams // empty: gopls ignores the Settings field
+ if err := e.Server.DidChangeConfiguration(ctx, &params); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ChangeWorkspaceFolders sets the new workspace folders, and sends a
+// didChangeWorkspaceFolders notification to the server.
+//
+// The given folders must all be unique.
+func (e *Editor) ChangeWorkspaceFolders(ctx context.Context, folders []string) error {
+ // capture existing folders so that we can compute the change.
+ e.mu.Lock()
+ oldFolders := e.makeWorkspaceFoldersLocked()
+ e.config.WorkspaceFolders = folders
+ newFolders := e.makeWorkspaceFoldersLocked()
+ e.mu.Unlock()
+
+ if e.Server == nil {
+ return nil
+ }
+
+ var params protocol.DidChangeWorkspaceFoldersParams
+
+ // Keep track of old workspace folders that must be removed.
+ toRemove := make(map[protocol.URI]protocol.WorkspaceFolder)
+ for _, folder := range oldFolders {
+ toRemove[folder.URI] = folder
+ }
+
+ // Sanity check: if we see a folder twice the algorithm below doesn't work,
+ // so track seen folders to ensure that we panic in that case.
+ seen := make(map[protocol.URI]protocol.WorkspaceFolder)
+ for _, folder := range newFolders {
+ if _, ok := seen[folder.URI]; ok {
+ panic(fmt.Sprintf("folder %s seen twice", folder.URI))
+ }
+
+ // If this folder already exists, we don't want to remove it.
+ // Otherwise, we need to add it.
+ if _, ok := toRemove[folder.URI]; ok {
+ delete(toRemove, folder.URI)
+ } else {
+ params.Event.Added = append(params.Event.Added, folder)
+ }
+ }
+
+ for _, v := range toRemove {
+ params.Event.Removed = append(params.Event.Removed, v)
+ }
+
+ return e.Server.DidChangeWorkspaceFolders(ctx, &params)
+}
+
+// CodeAction executes a codeAction request on the server.
+// If loc.Range is zero, the whole file is implied.
+func (e *Editor) CodeAction(ctx context.Context, loc protocol.Location, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ path := e.sandbox.Workdir.URIToPath(loc.URI)
+ e.mu.Lock()
+ _, ok := e.buffers[path]
+ e.mu.Unlock()
+ if !ok {
+ return nil, fmt.Errorf("buffer %q is not open", path)
+ }
+ params := &protocol.CodeActionParams{
+ TextDocument: e.TextDocumentIdentifier(path),
+ Context: protocol.CodeActionContext{
+ Diagnostics: diagnostics,
+ },
+ Range: loc.Range, // may be zero
+ }
+ lens, err := e.Server.CodeAction(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+ return lens, nil
+}
+
+// Hover triggers a hover at the given position in an open buffer.
+func (e *Editor) Hover(ctx context.Context, loc protocol.Location) (*protocol.MarkupContent, protocol.Location, error) {
+ if err := e.checkBufferLocation(loc); err != nil {
+ return nil, protocol.Location{}, err
+ }
+ params := &protocol.HoverParams{}
+ params.TextDocument.URI = loc.URI
+ params.Position = loc.Range.Start
+
+ resp, err := e.Server.Hover(ctx, params)
+ if err != nil {
+ return nil, protocol.Location{}, fmt.Errorf("hover: %w", err)
+ }
+ if resp == nil {
+ return nil, protocol.Location{}, nil
+ }
+ return &resp.Contents, protocol.Location{URI: loc.URI, Range: resp.Range}, nil
+}
+
+func (e *Editor) DocumentLink(ctx context.Context, path string) ([]protocol.DocumentLink, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ params := &protocol.DocumentLinkParams{}
+ params.TextDocument.URI = e.sandbox.Workdir.URI(path)
+ return e.Server.DocumentLink(ctx, params)
+}
+
+func (e *Editor) DocumentHighlight(ctx context.Context, loc protocol.Location) ([]protocol.DocumentHighlight, error) {
+ if e.Server == nil {
+ return nil, nil
+ }
+ if err := e.checkBufferLocation(loc); err != nil {
+ return nil, err
+ }
+ params := &protocol.DocumentHighlightParams{}
+ params.TextDocument.URI = loc.URI
+ params.Position = loc.Range.Start
+
+ return e.Server.DocumentHighlight(ctx, params)
+}
diff --git a/gopls/internal/lsp/fake/editor_test.go b/gopls/internal/lsp/fake/editor_test.go
new file mode 100644
index 000000000..cc8a14744
--- /dev/null
+++ b/gopls/internal/lsp/fake/editor_test.go
@@ -0,0 +1,61 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "context"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+const exampleProgram = `
+-- go.mod --
+go 1.12
+-- main.go --
+package main
+
+import "fmt"
+
+func main() {
+ fmt.Println("Hello World.")
+}
+`
+
+func TestClientEditing(t *testing.T) {
+ ws, err := NewSandbox(&SandboxConfig{Files: UnpackTxt(exampleProgram)})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ws.Close()
+ ctx := context.Background()
+ editor := NewEditor(ws, EditorConfig{})
+ if err := editor.OpenFile(ctx, "main.go"); err != nil {
+ t.Fatal(err)
+ }
+ if err := editor.EditBuffer(ctx, "main.go", []protocol.TextEdit{
+ {
+ Range: protocol.Range{
+ Start: protocol.Position{Line: 5, Character: 14},
+ End: protocol.Position{Line: 5, Character: 26},
+ },
+ NewText: "Hola, mundo.",
+ },
+ }); err != nil {
+ t.Fatal(err)
+ }
+ got := editor.buffers["main.go"].text()
+ want := `package main
+
+import "fmt"
+
+func main() {
+ fmt.Println("Hola, mundo.")
+}
+`
+ if got != want {
+ t.Errorf("got text %q, want %q", got, want)
+ }
+}
diff --git a/internal/lsp/fake/proxy.go b/gopls/internal/lsp/fake/proxy.go
index 9e56efeb1..9e56efeb1 100644
--- a/internal/lsp/fake/proxy.go
+++ b/gopls/internal/lsp/fake/proxy.go
diff --git a/gopls/internal/lsp/fake/sandbox.go b/gopls/internal/lsp/fake/sandbox.go
new file mode 100644
index 000000000..a1557569b
--- /dev/null
+++ b/gopls/internal/lsp/fake/sandbox.go
@@ -0,0 +1,299 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/robustio"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/txtar"
+)
+
+// Sandbox holds a collection of temporary resources to use for working with Go
+// code in tests.
+type Sandbox struct {
+ gopath string
+ rootdir string
+ goproxy string
+ Workdir *Workdir
+ goCommandRunner gocommand.Runner
+}
+
+// SandboxConfig controls the behavior of a test sandbox. The zero value
+// defines a reasonable default.
+type SandboxConfig struct {
+ // RootDir sets the base directory to use when creating temporary
+ // directories. If not specified, defaults to a new temporary directory.
+ RootDir string
+ // Files holds a txtar-encoded archive of files to populate the initial state
+ // of the working directory.
+ //
+ // For convenience, the special substring "$SANDBOX_WORKDIR" is replaced with
+ // the sandbox's resolved working directory before writing files.
+ Files map[string][]byte
+ // InGoPath specifies that the working directory should be within the
+ // temporary GOPATH.
+ InGoPath bool
+ // Workdir configures the working directory of the Sandbox. It behaves as
+ // follows:
+ // - if set to an absolute path, use that path as the working directory.
+ // - if set to a relative path, create and use that path relative to the
+ // sandbox.
+ // - if unset, default to a the 'work' subdirectory of the sandbox.
+ //
+ // This option is incompatible with InGoPath or Files.
+ Workdir string
+ // ProxyFiles holds a txtar-encoded archive of files to populate a file-based
+ // Go proxy.
+ ProxyFiles map[string][]byte
+ // GOPROXY is the explicit GOPROXY value that should be used for the sandbox.
+ //
+ // This option is incompatible with ProxyFiles.
+ GOPROXY string
+}
+
+// NewSandbox creates a collection of named temporary resources, with a
+// working directory populated by the txtar-encoded content in srctxt, and a
+// file-based module proxy populated with the txtar-encoded content in
+// proxytxt.
+//
+// If rootDir is non-empty, it will be used as the root of temporary
+// directories created for the sandbox. Otherwise, a new temporary directory
+// will be used as root.
+//
+// TODO(rfindley): the sandbox abstraction doesn't seem to carry its weight.
+// Sandboxes should be composed out of their building-blocks, rather than via a
+// monolithic configuration.
+func NewSandbox(config *SandboxConfig) (_ *Sandbox, err error) {
+ if config == nil {
+ config = new(SandboxConfig)
+ }
+ if err := validateConfig(*config); err != nil {
+ return nil, fmt.Errorf("invalid SandboxConfig: %v", err)
+ }
+
+ sb := &Sandbox{}
+ defer func() {
+ // Clean up if we fail at any point in this constructor.
+ if err != nil {
+ sb.Close()
+ }
+ }()
+
+ rootDir := config.RootDir
+ if rootDir == "" {
+ rootDir, err = ioutil.TempDir(config.RootDir, "gopls-sandbox-")
+ if err != nil {
+ return nil, fmt.Errorf("creating temporary workdir: %v", err)
+ }
+ }
+ sb.rootdir = rootDir
+ sb.gopath = filepath.Join(sb.rootdir, "gopath")
+ if err := os.Mkdir(sb.gopath, 0755); err != nil {
+ return nil, err
+ }
+ if config.GOPROXY != "" {
+ sb.goproxy = config.GOPROXY
+ } else {
+ proxydir := filepath.Join(sb.rootdir, "proxy")
+ if err := os.Mkdir(proxydir, 0755); err != nil {
+ return nil, err
+ }
+ sb.goproxy, err = WriteProxy(proxydir, config.ProxyFiles)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Short-circuit writing the workdir if we're given an absolute path, since
+ // this is used for running in an existing directory.
+ // TODO(findleyr): refactor this to be less of a workaround.
+ if filepath.IsAbs(config.Workdir) {
+ sb.Workdir, err = NewWorkdir(config.Workdir, nil)
+ if err != nil {
+ return nil, err
+ }
+ return sb, nil
+ }
+ var workdir string
+ if config.Workdir == "" {
+ if config.InGoPath {
+ // Set the working directory as $GOPATH/src.
+ workdir = filepath.Join(sb.gopath, "src")
+ } else if workdir == "" {
+ workdir = filepath.Join(sb.rootdir, "work")
+ }
+ } else {
+ // relative path
+ workdir = filepath.Join(sb.rootdir, config.Workdir)
+ }
+ if err := os.MkdirAll(workdir, 0755); err != nil {
+ return nil, err
+ }
+ sb.Workdir, err = NewWorkdir(workdir, config.Files)
+ if err != nil {
+ return nil, err
+ }
+ return sb, nil
+}
+
+// Tempdir creates a new temp directory with the given txtar-encoded files. It
+// is the responsibility of the caller to call os.RemoveAll on the returned
+// file path when it is no longer needed.
+func Tempdir(files map[string][]byte) (string, error) {
+ dir, err := ioutil.TempDir("", "gopls-tempdir-")
+ if err != nil {
+ return "", err
+ }
+ for name, data := range files {
+ if err := writeFileData(name, data, RelativeTo(dir)); err != nil {
+ return "", fmt.Errorf("writing to tempdir: %w", err)
+ }
+ }
+ return dir, nil
+}
+
+func UnpackTxt(txt string) map[string][]byte {
+ dataMap := make(map[string][]byte)
+ archive := txtar.Parse([]byte(txt))
+ for _, f := range archive.Files {
+ if _, ok := dataMap[f.Name]; ok {
+ panic(fmt.Sprintf("found file %q twice", f.Name))
+ }
+ dataMap[f.Name] = f.Data
+ }
+ return dataMap
+}
+
+func validateConfig(config SandboxConfig) error {
+ if filepath.IsAbs(config.Workdir) && (len(config.Files) > 0 || config.InGoPath) {
+ return errors.New("absolute Workdir cannot be set in conjunction with Files or InGoPath")
+ }
+ if config.Workdir != "" && config.InGoPath {
+ return errors.New("Workdir cannot be set in conjunction with InGoPath")
+ }
+ if config.GOPROXY != "" && config.ProxyFiles != nil {
+ return errors.New("GOPROXY cannot be set in conjunction with ProxyFiles")
+ }
+ return nil
+}
+
+// splitModuleVersionPath extracts module information from files stored in the
+// directory structure modulePath@version/suffix.
+// For example:
+//
+// splitModuleVersionPath("mod.com@v1.2.3/package") = ("mod.com", "v1.2.3", "package")
+func splitModuleVersionPath(path string) (modulePath, version, suffix string) {
+ parts := strings.Split(path, "/")
+ var modulePathParts []string
+ for i, p := range parts {
+ if strings.Contains(p, "@") {
+ mv := strings.SplitN(p, "@", 2)
+ modulePathParts = append(modulePathParts, mv[0])
+ return strings.Join(modulePathParts, "/"), mv[1], strings.Join(parts[i+1:], "/")
+ }
+ modulePathParts = append(modulePathParts, p)
+ }
+ // Default behavior: this is just a module path.
+ return path, "", ""
+}
+
+func (sb *Sandbox) RootDir() string {
+ return sb.rootdir
+}
+
+// GOPATH returns the value of the Sandbox GOPATH.
+func (sb *Sandbox) GOPATH() string {
+ return sb.gopath
+}
+
+// GoEnv returns the default environment variables that can be used for
+// invoking Go commands in the sandbox.
+func (sb *Sandbox) GoEnv() map[string]string {
+ vars := map[string]string{
+ "GOPATH": sb.GOPATH(),
+ "GOPROXY": sb.goproxy,
+ "GO111MODULE": "",
+ "GOSUMDB": "off",
+ "GOPACKAGESDRIVER": "off",
+ }
+ if testenv.Go1Point() >= 5 {
+ vars["GOMODCACHE"] = ""
+ }
+ return vars
+}
+
+// goCommandInvocation returns a new gocommand.Invocation initialized with the
+// sandbox environment variables and working directory.
+func (sb *Sandbox) goCommandInvocation() gocommand.Invocation {
+ var vars []string
+ for k, v := range sb.GoEnv() {
+ vars = append(vars, fmt.Sprintf("%s=%s", k, v))
+ }
+ inv := gocommand.Invocation{
+ Env: vars,
+ }
+ // sb.Workdir may be nil if we exited the constructor with errors (we call
+ // Close to clean up any partial state from the constructor, which calls
+ // RunGoCommand).
+ if sb.Workdir != nil {
+ inv.WorkingDir = string(sb.Workdir.RelativeTo)
+ }
+ return inv
+}
+
+// RunGoCommand executes a go command in the sandbox. If checkForFileChanges is
+// true, the sandbox scans the working directory and emits file change events
+// for any file changes it finds.
+func (sb *Sandbox) RunGoCommand(ctx context.Context, dir, verb string, args []string, checkForFileChanges bool) error {
+ inv := sb.goCommandInvocation()
+ inv.Verb = verb
+ inv.Args = args
+ if dir != "" {
+ inv.WorkingDir = sb.Workdir.AbsPath(dir)
+ }
+ stdout, stderr, _, err := sb.goCommandRunner.RunRaw(ctx, inv)
+ if err != nil {
+ return fmt.Errorf("go command failed (stdout: %s) (stderr: %s): %v", stdout.String(), stderr.String(), err)
+ }
+ // Since running a go command may result in changes to workspace files,
+ // check if we need to send any any "watched" file events.
+ //
+ // TODO(rFindley): this side-effect can impact the usability of the sandbox
+ // for benchmarks. Consider refactoring.
+ if sb.Workdir != nil && checkForFileChanges {
+ if err := sb.Workdir.CheckForFileChanges(ctx); err != nil {
+ return fmt.Errorf("checking for file changes: %w", err)
+ }
+ }
+ return nil
+}
+
+// GoVersion checks the version of the go command.
+// It returns the X in Go 1.X.
+func (sb *Sandbox) GoVersion(ctx context.Context) (int, error) {
+ inv := sb.goCommandInvocation()
+ return gocommand.GoVersion(ctx, inv, &sb.goCommandRunner)
+}
+
+// Close removes all state associated with the sandbox.
+func (sb *Sandbox) Close() error {
+ var goCleanErr error
+ if sb.gopath != "" {
+ goCleanErr = sb.RunGoCommand(context.Background(), "", "clean", []string{"-modcache"}, false)
+ }
+ err := robustio.RemoveAll(sb.rootdir)
+ if err != nil || goCleanErr != nil {
+ return fmt.Errorf("error(s) cleaning sandbox: cleaning modcache: %v; removing files: %v", goCleanErr, err)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/fake/workdir.go b/gopls/internal/lsp/fake/workdir.go
new file mode 100644
index 000000000..29344514d
--- /dev/null
+++ b/gopls/internal/lsp/fake/workdir.go
@@ -0,0 +1,438 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "io/fs"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/robustio"
+)
+
+// RelativeTo is a helper for operations relative to a given directory.
+type RelativeTo string
+
+// AbsPath returns an absolute filesystem path for the workdir-relative path.
+func (r RelativeTo) AbsPath(path string) string {
+ fp := filepath.FromSlash(path)
+ if filepath.IsAbs(fp) {
+ return fp
+ }
+ return filepath.Join(string(r), filepath.FromSlash(path))
+}
+
+// RelPath returns a '/'-encoded path relative to the working directory (or an
+// absolute path if the file is outside of workdir)
+func (r RelativeTo) RelPath(fp string) string {
+ root := string(r)
+ if rel, err := filepath.Rel(root, fp); err == nil && !strings.HasPrefix(rel, "..") {
+ return filepath.ToSlash(rel)
+ }
+ return filepath.ToSlash(fp)
+}
+
+// writeFileData writes content to the relative path, replacing the special
+// token $SANDBOX_WORKDIR with the relative root given by rel. It does not
+// trigger any file events.
+func writeFileData(path string, content []byte, rel RelativeTo) error {
+ content = bytes.ReplaceAll(content, []byte("$SANDBOX_WORKDIR"), []byte(rel))
+ fp := rel.AbsPath(path)
+ if err := os.MkdirAll(filepath.Dir(fp), 0755); err != nil {
+ return fmt.Errorf("creating nested directory: %w", err)
+ }
+ backoff := 1 * time.Millisecond
+ for {
+ err := ioutil.WriteFile(fp, []byte(content), 0644)
+ if err != nil {
+ // This lock file violation is not handled by the robustio package, as it
+ // indicates a real race condition that could be avoided.
+ if isWindowsErrLockViolation(err) {
+ time.Sleep(backoff)
+ backoff *= 2
+ continue
+ }
+ return fmt.Errorf("writing %q: %w", path, err)
+ }
+ return nil
+ }
+}
+
+// isWindowsErrLockViolation reports whether err is ERROR_LOCK_VIOLATION
+// on Windows.
+var isWindowsErrLockViolation = func(err error) bool { return false }
+
+// Workdir is a temporary working directory for tests. It exposes file
+// operations in terms of relative paths, and fakes file watching by triggering
+// events on file operations.
+type Workdir struct {
+ RelativeTo
+
+ watcherMu sync.Mutex
+ watchers []func(context.Context, []protocol.FileEvent)
+
+ fileMu sync.Mutex
+ // File identities we know about, for the purpose of detecting changes.
+ //
+ // Since files is only used for detecting _changes_, we are tolerant of
+ // fileIDs that may have hash and mtime coming from different states of the
+ // file: if either are out of sync, then the next poll should detect a
+ // discrepancy. It is OK if we detect too many changes, but not OK if we miss
+ // changes.
+ //
+ // For that matter, this mechanism for detecting changes can still be flaky
+ // on platforms where mtime is very coarse (such as older versions of WSL).
+ // It would be much better to use a proper fs event library, but we can't
+ // currently import those into x/tools.
+ //
+ // TODO(golang/go#52284): replace this polling mechanism with a
+ // cross-platform library for filesystem notifications.
+ files map[string]fileID
+}
+
+// NewWorkdir writes the txtar-encoded file data in txt to dir, and returns a
+// Workir for operating on these files using
+func NewWorkdir(dir string, files map[string][]byte) (*Workdir, error) {
+ w := &Workdir{RelativeTo: RelativeTo(dir)}
+ for name, data := range files {
+ if err := writeFileData(name, data, w.RelativeTo); err != nil {
+ return nil, fmt.Errorf("writing to workdir: %w", err)
+ }
+ }
+ _, err := w.pollFiles() // poll files to populate the files map.
+ return w, err
+}
+
+// fileID identifies a file version on disk.
+type fileID struct {
+ mtime time.Time
+ hash string // empty if mtime is old enough to be reliabe; otherwise a file digest
+}
+
+func hashFile(data []byte) string {
+ return fmt.Sprintf("%x", sha256.Sum256(data))
+}
+
+// RootURI returns the root URI for this working directory of this scratch
+// environment.
+func (w *Workdir) RootURI() protocol.DocumentURI {
+ return toURI(string(w.RelativeTo))
+}
+
+// AddWatcher registers the given func to be called on any file change.
+func (w *Workdir) AddWatcher(watcher func(context.Context, []protocol.FileEvent)) {
+ w.watcherMu.Lock()
+ w.watchers = append(w.watchers, watcher)
+ w.watcherMu.Unlock()
+}
+
+// URI returns the URI to a the workdir-relative path.
+func (w *Workdir) URI(path string) protocol.DocumentURI {
+ return toURI(w.AbsPath(path))
+}
+
+// URIToPath converts a uri to a workdir-relative path (or an absolute path,
+// if the uri is outside of the workdir).
+func (w *Workdir) URIToPath(uri protocol.DocumentURI) string {
+ fp := uri.SpanURI().Filename()
+ return w.RelPath(fp)
+}
+
+func toURI(fp string) protocol.DocumentURI {
+ return protocol.DocumentURI(span.URIFromPath(fp))
+}
+
+// ReadFile reads a text file specified by a workdir-relative path.
+func (w *Workdir) ReadFile(path string) ([]byte, error) {
+ backoff := 1 * time.Millisecond
+ for {
+ b, err := ioutil.ReadFile(w.AbsPath(path))
+ if err != nil {
+ if runtime.GOOS == "plan9" && strings.HasSuffix(err.Error(), " exclusive use file already open") {
+ // Plan 9 enforces exclusive access to locked files.
+ // Give the owner time to unlock it and retry.
+ time.Sleep(backoff)
+ backoff *= 2
+ continue
+ }
+ return nil, err
+ }
+ return b, nil
+ }
+}
+
+// RegexpSearch searches the file corresponding to path for the first position
+// matching re.
+func (w *Workdir) RegexpSearch(path string, re string) (protocol.Location, error) {
+ content, err := w.ReadFile(path)
+ if err != nil {
+ return protocol.Location{}, err
+ }
+ mapper := protocol.NewMapper(w.URI(path).SpanURI(), content)
+ return regexpLocation(mapper, re)
+}
+
+// RemoveFile removes a workdir-relative file path and notifies watchers of the
+// change.
+func (w *Workdir) RemoveFile(ctx context.Context, path string) error {
+ fp := w.AbsPath(path)
+ if err := robustio.RemoveAll(fp); err != nil {
+ return fmt.Errorf("removing %q: %w", path, err)
+ }
+
+ return w.CheckForFileChanges(ctx)
+}
+
+// WriteFiles writes the text file content to workdir-relative paths and
+// notifies watchers of the changes.
+func (w *Workdir) WriteFiles(ctx context.Context, files map[string]string) error {
+ for path, content := range files {
+ fp := w.AbsPath(path)
+ _, err := os.Stat(fp)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("checking if %q exists: %w", path, err)
+ }
+ if err := writeFileData(path, []byte(content), w.RelativeTo); err != nil {
+ return err
+ }
+ }
+ return w.CheckForFileChanges(ctx)
+}
+
+// WriteFile writes text file content to a workdir-relative path and notifies
+// watchers of the change.
+func (w *Workdir) WriteFile(ctx context.Context, path, content string) error {
+ return w.WriteFiles(ctx, map[string]string{path: content})
+}
+
+func (w *Workdir) fileEvent(path string, changeType protocol.FileChangeType) protocol.FileEvent {
+ return protocol.FileEvent{
+ URI: w.URI(path),
+ Type: changeType,
+ }
+}
+
+// RenameFile performs an on disk-renaming of the workdir-relative oldPath to
+// workdir-relative newPath, and notifies watchers of the changes.
+//
+// oldPath must either be a regular file or in the same directory as newPath.
+func (w *Workdir) RenameFile(ctx context.Context, oldPath, newPath string) error {
+ oldAbs := w.AbsPath(oldPath)
+ newAbs := w.AbsPath(newPath)
+
+ // For os.Rename, “OS-specific restrictions may apply when oldpath and newpath
+ // are in different directories.” If that applies here, we may fall back to
+ // ReadFile, WriteFile, and RemoveFile to perform the rename non-atomically.
+ //
+ // However, the fallback path only works for regular files: renaming a
+ // directory would be much more complex and isn't needed for our tests.
+ fallbackOk := false
+ if filepath.Dir(oldAbs) != filepath.Dir(newAbs) {
+ fi, err := os.Stat(oldAbs)
+ if err == nil && !fi.Mode().IsRegular() {
+ return &os.PathError{
+ Op: "RenameFile",
+ Path: oldPath,
+ Err: fmt.Errorf("%w: file is not regular and not in the same directory as %s", os.ErrInvalid, newPath),
+ }
+ }
+ fallbackOk = true
+ }
+
+ var renameErr error
+ const debugFallback = false
+ if fallbackOk && debugFallback {
+ renameErr = fmt.Errorf("%w: debugging fallback path", os.ErrInvalid)
+ } else {
+ renameErr = robustio.Rename(oldAbs, newAbs)
+ }
+ if renameErr != nil {
+ if !fallbackOk {
+ return renameErr // The OS-specific Rename restrictions do not apply.
+ }
+
+ content, err := w.ReadFile(oldPath)
+ if err != nil {
+ // If we can't even read the file, the error from Rename may be accurate.
+ return renameErr
+ }
+ fi, err := os.Stat(newAbs)
+ if err == nil {
+ if fi.IsDir() {
+ // “If newpath already exists and is not a directory, Rename replaces it.”
+ // But if it is a directory, maybe not?
+ return renameErr
+ }
+ // On most platforms, Rename replaces the named file with a new file,
+ // rather than overwriting the existing file it in place. Mimic that
+ // behavior here.
+ if err := robustio.RemoveAll(newAbs); err != nil {
+ // Maybe we don't have permission to replace newPath?
+ return renameErr
+ }
+ } else if !os.IsNotExist(err) {
+ // If the destination path already exists or there is some problem with it,
+ // the error from Rename may be accurate.
+ return renameErr
+ }
+ if writeErr := writeFileData(newPath, []byte(content), w.RelativeTo); writeErr != nil {
+ // At this point we have tried to actually write the file.
+ // If it still doesn't exist, assume that the error from Rename was accurate:
+ // for example, maybe we don't have permission to create the new path.
+ // Otherwise, return the error from the write, which may indicate some
+ // other problem (such as a full disk).
+ if _, statErr := os.Stat(newAbs); !os.IsNotExist(statErr) {
+ return writeErr
+ }
+ return renameErr
+ }
+ if err := robustio.RemoveAll(oldAbs); err != nil {
+ // If we failed to remove the old file, that may explain the Rename error too.
+ // Make a best effort to back out the write to the new path.
+ robustio.RemoveAll(newAbs)
+ return renameErr
+ }
+ }
+
+ return w.CheckForFileChanges(ctx)
+}
+
+// ListFiles returns a new sorted list of the relative paths of files in dir,
+// recursively.
+func (w *Workdir) ListFiles(dir string) ([]string, error) {
+ absDir := w.AbsPath(dir)
+ var paths []string
+ if err := filepath.Walk(absDir, func(fp string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.Mode()&(fs.ModeDir|fs.ModeSymlink) == 0 {
+ paths = append(paths, w.RelPath(fp))
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ sort.Strings(paths)
+ return paths, nil
+}
+
+// CheckForFileChanges walks the working directory and checks for any files
+// that have changed since the last poll.
+func (w *Workdir) CheckForFileChanges(ctx context.Context) error {
+ evts, err := w.pollFiles()
+ if err != nil {
+ return err
+ }
+ if len(evts) == 0 {
+ return nil
+ }
+ w.watcherMu.Lock()
+ watchers := make([]func(context.Context, []protocol.FileEvent), len(w.watchers))
+ copy(watchers, w.watchers)
+ w.watcherMu.Unlock()
+ for _, w := range watchers {
+ w(ctx, evts)
+ }
+ return nil
+}
+
+// pollFiles updates w.files and calculates FileEvents corresponding to file
+// state changes since the last poll. It does not call sendEvents.
+func (w *Workdir) pollFiles() ([]protocol.FileEvent, error) {
+ w.fileMu.Lock()
+ defer w.fileMu.Unlock()
+
+ newFiles := make(map[string]fileID)
+ var evts []protocol.FileEvent
+ if err := filepath.Walk(string(w.RelativeTo), func(fp string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ // Skip directories and symbolic links (which may be links to directories).
+ //
+ // The latter matters for repos like Kubernetes, which use symlinks.
+ if info.Mode()&(fs.ModeDir|fs.ModeSymlink) != 0 {
+ return nil
+ }
+
+ // Opt: avoid reading the file if mtime is sufficently old to be reliable.
+ //
+ // If mtime is recent, it may not sufficiently identify the file contents:
+ // a subsequent write could result in the same mtime. For these cases, we
+ // must read the file contents.
+ id := fileID{mtime: info.ModTime()}
+ if time.Since(info.ModTime()) < 2*time.Second {
+ data, err := ioutil.ReadFile(fp)
+ if err != nil {
+ return err
+ }
+ id.hash = hashFile(data)
+ }
+ path := w.RelPath(fp)
+ newFiles[path] = id
+
+ if w.files != nil {
+ oldID, ok := w.files[path]
+ delete(w.files, path)
+ switch {
+ case !ok:
+ evts = append(evts, protocol.FileEvent{
+ URI: w.URI(path),
+ Type: protocol.Created,
+ })
+ case oldID != id:
+ changed := true
+
+ // Check whether oldID and id do not match because oldID was polled at
+ // a recent enough to time such as to require hashing.
+ //
+ // In this case, read the content to check whether the file actually
+ // changed.
+ if oldID.mtime.Equal(id.mtime) && oldID.hash != "" && id.hash == "" {
+ data, err := ioutil.ReadFile(fp)
+ if err != nil {
+ return err
+ }
+ if hashFile(data) == oldID.hash {
+ changed = false
+ }
+ }
+ if changed {
+ evts = append(evts, protocol.FileEvent{
+ URI: w.URI(path),
+ Type: protocol.Changed,
+ })
+ }
+ }
+ }
+
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ // Any remaining files must have been deleted.
+ for path := range w.files {
+ evts = append(evts, protocol.FileEvent{
+ URI: w.URI(path),
+ Type: protocol.Deleted,
+ })
+ }
+ w.files = newFiles
+ return evts, nil
+}
diff --git a/gopls/internal/lsp/fake/workdir_test.go b/gopls/internal/lsp/fake/workdir_test.go
new file mode 100644
index 000000000..fe89fa72d
--- /dev/null
+++ b/gopls/internal/lsp/fake/workdir_test.go
@@ -0,0 +1,220 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "sync"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+const sharedData = `
+-- go.mod --
+go 1.12
+-- nested/README.md --
+Hello World!
+`
+
+// newWorkdir sets up a temporary Workdir with the given txtar-encoded content.
+// It also configures an eventBuffer to receive file event notifications. These
+// notifications are sent synchronously for each operation, such that once a
+// workdir file operation has returned the caller can expect that any relevant
+// file notifications are present in the buffer.
+//
+// It is the caller's responsibility to call the returned cleanup function.
+func newWorkdir(t *testing.T, txt string) (*Workdir, *eventBuffer, func()) {
+ t.Helper()
+
+ tmpdir, err := ioutil.TempDir("", "goplstest-workdir-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ wd, err := NewWorkdir(tmpdir, UnpackTxt(txt))
+ if err != nil {
+ t.Fatal(err)
+ }
+ cleanup := func() {
+ if err := os.RemoveAll(tmpdir); err != nil {
+ t.Error(err)
+ }
+ }
+
+ buf := new(eventBuffer)
+ wd.AddWatcher(buf.onEvents)
+ return wd, buf, cleanup
+}
+
+// eventBuffer collects events from a file watcher.
+type eventBuffer struct {
+ mu sync.Mutex
+ events []protocol.FileEvent
+}
+
+// onEvents collects adds events to the buffer; to be used with Workdir.AddWatcher.
+func (c *eventBuffer) onEvents(_ context.Context, events []protocol.FileEvent) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ c.events = append(c.events, events...)
+}
+
+// take empties the buffer, returning its previous contents.
+func (c *eventBuffer) take() []protocol.FileEvent {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ evts := c.events
+ c.events = nil
+ return evts
+}
+
+func TestWorkdir_ReadFile(t *testing.T) {
+ wd, _, cleanup := newWorkdir(t, sharedData)
+ defer cleanup()
+
+ got, err := wd.ReadFile("nested/README.md")
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "Hello World!\n"
+ if got := string(got); got != want {
+ t.Errorf("reading workdir file, got %q, want %q", got, want)
+ }
+}
+
+func TestWorkdir_WriteFile(t *testing.T) {
+ wd, events, cleanup := newWorkdir(t, sharedData)
+ defer cleanup()
+ ctx := context.Background()
+
+ tests := []struct {
+ path string
+ wantType protocol.FileChangeType
+ }{
+ {"data.txt", protocol.Created},
+ {"nested/README.md", protocol.Changed},
+ }
+
+ for _, test := range tests {
+ if err := wd.WriteFile(ctx, test.path, "42"); err != nil {
+ t.Fatal(err)
+ }
+ es := events.take()
+ if got := len(es); got != 1 {
+ t.Fatalf("len(events) = %d, want 1", got)
+ }
+ path := wd.URIToPath(es[0].URI)
+ if path != test.path {
+ t.Errorf("event path = %q, want %q", path, test.path)
+ }
+ if es[0].Type != test.wantType {
+ t.Errorf("event type = %v, want %v", es[0].Type, test.wantType)
+ }
+ got, err := wd.ReadFile(test.path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "42"
+ if got := string(got); got != want {
+ t.Errorf("ws.ReadFile(%q) = %q, want %q", test.path, got, want)
+ }
+ }
+}
+
+// Test for file notifications following file operations.
+func TestWorkdir_FileWatching(t *testing.T) {
+ wd, events, cleanup := newWorkdir(t, "")
+ defer cleanup()
+ ctx := context.Background()
+
+ must := func(err error) {
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ type changeMap map[string]protocol.FileChangeType
+ checkEvent := func(wantChanges changeMap) {
+ gotChanges := make(changeMap)
+ for _, e := range events.take() {
+ gotChanges[wd.URIToPath(e.URI)] = e.Type
+ }
+ if diff := cmp.Diff(wantChanges, gotChanges); diff != "" {
+ t.Errorf("mismatching file events (-want +got):\n%s", diff)
+ }
+ }
+
+ must(wd.WriteFile(ctx, "foo.go", "package foo"))
+ checkEvent(changeMap{"foo.go": protocol.Created})
+
+ must(wd.RenameFile(ctx, "foo.go", "bar.go"))
+ checkEvent(changeMap{"foo.go": protocol.Deleted, "bar.go": protocol.Created})
+
+ must(wd.RemoveFile(ctx, "bar.go"))
+ checkEvent(changeMap{"bar.go": protocol.Deleted})
+}
+
+func TestWorkdir_CheckForFileChanges(t *testing.T) {
+ t.Skip("broken on darwin-amd64-10_12")
+ wd, events, cleanup := newWorkdir(t, sharedData)
+ defer cleanup()
+ ctx := context.Background()
+
+ checkChange := func(wantPath string, wantType protocol.FileChangeType) {
+ if err := wd.CheckForFileChanges(ctx); err != nil {
+ t.Fatal(err)
+ }
+ ev := events.take()
+ if len(ev) == 0 {
+ t.Fatal("no file events received")
+ }
+ gotEvt := ev[0]
+ gotPath := wd.URIToPath(gotEvt.URI)
+ // Only check relative path and Type
+ if gotPath != wantPath || gotEvt.Type != wantType {
+ t.Errorf("file events: got %v, want {Path: %s, Type: %v}", gotEvt, wantPath, wantType)
+ }
+ }
+ // Sleep some positive amount of time to ensure a distinct mtime.
+ if err := writeFileData("go.mod", []byte("module foo.test\n"), wd.RelativeTo); err != nil {
+ t.Fatal(err)
+ }
+ checkChange("go.mod", protocol.Changed)
+ if err := writeFileData("newFile", []byte("something"), wd.RelativeTo); err != nil {
+ t.Fatal(err)
+ }
+ checkChange("newFile", protocol.Created)
+ fp := wd.AbsPath("newFile")
+ if err := os.Remove(fp); err != nil {
+ t.Fatal(err)
+ }
+ checkChange("newFile", protocol.Deleted)
+}
+
+func TestSplitModuleVersionPath(t *testing.T) {
+ tests := []struct {
+ path string
+ wantModule, wantVersion, wantSuffix string
+ }{
+ {"foo.com@v1.2.3/bar", "foo.com", "v1.2.3", "bar"},
+ {"foo.com/module@v1.2.3/bar", "foo.com/module", "v1.2.3", "bar"},
+ {"foo.com@v1.2.3", "foo.com", "v1.2.3", ""},
+ {"std@v1.14.0", "std", "v1.14.0", ""},
+ {"another/module/path", "another/module/path", "", ""},
+ }
+
+ for _, test := range tests {
+ module, version, suffix := splitModuleVersionPath(test.path)
+ if module != test.wantModule || version != test.wantVersion || suffix != test.wantSuffix {
+ t.Errorf("splitModuleVersionPath(%q) =\n\t(%q, %q, %q)\nwant\n\t(%q, %q, %q)",
+ test.path, module, version, suffix, test.wantModule, test.wantVersion, test.wantSuffix)
+ }
+ }
+}
diff --git a/gopls/internal/lsp/fake/workdir_windows.go b/gopls/internal/lsp/fake/workdir_windows.go
new file mode 100644
index 000000000..4d4f01527
--- /dev/null
+++ b/gopls/internal/lsp/fake/workdir_windows.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fake
+
+import (
+ "errors"
+ "syscall"
+)
+
+func init() {
+ // constants copied from GOROOT/src/internal/syscall/windows/syscall_windows.go
+ const (
+ ERROR_LOCK_VIOLATION syscall.Errno = 33
+ )
+
+ isWindowsErrLockViolation = func(err error) bool {
+ return errors.Is(err, ERROR_LOCK_VIOLATION)
+ }
+}
diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go
new file mode 100644
index 000000000..a51098579
--- /dev/null
+++ b/gopls/internal/lsp/filecache/filecache.go
@@ -0,0 +1,369 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The filecache package provides a file-based shared durable blob cache.
+//
+// The cache is a machine-global mapping from (kind string, key
+// [32]byte) to []byte, where kind is an identifier describing the
+// namespace or purpose (e.g. "analysis"), and key is a SHA-256 digest
+// of the recipe of the value. (It need not be the digest of the value
+// itself, so you can query the cache without knowing what value the
+// recipe would produce.)
+//
+// The space budget of the cache can be controlled by [SetBudget].
+// Cache entries may be evicted at any time or in any order.
+// Note that "du -sh $GOPLSCACHE" may report a disk usage
+// figure that is rather larger (e.g. 50%) than the budget because
+// it rounds up partial disk blocks.
+//
+// The Get and Set operations are concurrency-safe.
+package filecache
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/tools/internal/lockedfile"
+)
+
+// Get retrieves from the cache and returns a newly allocated
+// copy of the value most recently supplied to Set(kind, key),
+// possibly by another process.
+// Get returns ErrNotFound if the value was not found.
+func Get(kind string, key [32]byte) ([]byte, error) {
+ name := filename(kind, key)
+ data, err := lockedfile.Read(name)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, ErrNotFound
+ }
+ return nil, err
+ }
+
+ // Verify that the Write was complete
+ // by checking the recorded length.
+ if len(data) < 8 {
+ return nil, ErrNotFound // cache entry is incomplete
+ }
+ if length := binary.LittleEndian.Uint64(data); int(length) != len(data)-8 {
+ return nil, ErrNotFound // cache entry is incomplete (or too long!)
+ }
+ data = data[8:]
+
+ // Update file time for use by LRU eviction.
+ // (This turns every read into a write operation.
+ // If this is a performance problem, we should
+ // touch the files aynchronously.)
+ //
+ // (Traditionally the access time would be updated
+ // automatically, but for efficiency most POSIX systems have
+ // for many years set the noatime mount option to avoid every
+ // open or read operation entailing a metadata write.)
+ now := time.Now()
+ if err := os.Chtimes(name, now, now); err != nil {
+ return nil, fmt.Errorf("failed to update access time: %w", err)
+ }
+
+ return data, nil
+}
+
+// ErrNotFound is the distinguished error
+// returned by Get when the key is not found.
+var ErrNotFound = fmt.Errorf("not found")
+
+// Set updates the value in the cache.
+func Set(kind string, key [32]byte, value []byte) error {
+ name := filename(kind, key)
+ if err := os.MkdirAll(filepath.Dir(name), 0700); err != nil {
+ return err
+ }
+
+ // In the unlikely event of a short write (e.g. ENOSPC)
+ // followed by process termination (e.g. a power cut), we
+ // don't want a reader to see a short file, so we record
+ // the expected length first and verify it in Get.
+ var length [8]byte
+ binary.LittleEndian.PutUint64(length[:], uint64(len(value)))
+ header := bytes.NewReader(length[:])
+ payload := bytes.NewReader(value)
+
+ // Windows doesn't support atomic rename--we tried MoveFile,
+ // MoveFileEx, ReplaceFileEx, and SetFileInformationByHandle
+ // of RenameFileInfo, all to no avail--so instead we use
+ // advisory file locking, which is only about 2x slower even
+ // on POSIX platforms with atomic rename.
+ return lockedfile.Write(name, io.MultiReader(header, payload), 0600)
+}
+
+var budget int64 = 1e9 // 1GB
+
+// SetBudget sets a soft limit on disk usage of the cache (in bytes)
+// and returns the previous value. Supplying a negative value queries
+// the current value without changing it.
+//
+// If two gopls processes have different budgets, the one with the
+// lower budget will collect garbage more actively, but both will
+// observe the effect.
+func SetBudget(new int64) (old int64) {
+ if new < 0 {
+ return atomic.LoadInt64(&budget)
+ }
+ return atomic.SwapInt64(&budget, new)
+}
+
+// --- implementation ----
+
+// filename returns the cache entry of the specified kind and key.
+//
+// A typical cache entry is a file name such as:
+//
+// $HOME/Library/Caches / gopls / VVVVVVVV / kind / KK / KKKK...KKKK
+//
+// The portions separated by spaces are as follows:
+// - The user's preferred cache directory; the default value varies by OS.
+// - The constant "gopls".
+// - The "version", 32 bits of the digest of the gopls executable.
+// - The kind or purpose of this cache subtree (e.g. "analysis").
+// - The first 8 bits of the key, to avoid huge directories.
+// - The full 256 bits of the key.
+//
+// Once a file is written its contents are never modified, though it
+// may be atomically replaced or removed.
+//
+// New versions of gopls are free to reorganize the contents of the
+// version directory as needs evolve. But all versions of gopls must
+// in perpetuity treat the "gopls" directory in a common fashion.
+//
+// In particular, each gopls process attempts to garbage collect
+// the entire gopls directory so that newer binaries can clean up
+// after older ones: in the development cycle especially, new
+// new versions may be created frequently.
+func filename(kind string, key [32]byte) string {
+ hex := fmt.Sprintf("%x", key)
+ return filepath.Join(getCacheDir(), kind, hex[:2], hex)
+}
+
+// getCacheDir returns the persistent cache directory of all processes
+// running this version of the gopls executable.
+//
+// It must incorporate the hash of the executable so that we needn't
+// worry about incompatible changes to the file format or changes to
+// the algorithm that produced the index.
+func getCacheDir() string {
+ cacheDirOnce.Do(func() {
+ // Use user's preferred cache directory.
+ userDir := os.Getenv("GOPLSCACHE")
+ if userDir == "" {
+ var err error
+ userDir, err = os.UserCacheDir()
+ if err != nil {
+ userDir = os.TempDir()
+ }
+ }
+ goplsDir := filepath.Join(userDir, "gopls")
+
+ // UserCacheDir may return a nonexistent directory
+ // (in which case we must create it, which may fail),
+ // or it may return a non-writable directory, in
+ // which case we should ideally respect the user's express
+ // wishes (e.g. XDG_CACHE_HOME) and not write somewhere else.
+ // Sadly UserCacheDir doesn't currently let us distinguish
+ // such intent from accidental misconfiguraton such as HOME=/
+ // in a CI builder. So, we check whether the gopls subdirectory
+ // can be created (or already exists) and not fall back to /tmp.
+ // See also https://github.com/golang/go/issues/57638.
+ if os.MkdirAll(goplsDir, 0700) != nil {
+ goplsDir = filepath.Join(os.TempDir(), "gopls")
+ }
+
+ // Start the garbage collector.
+ go gc(goplsDir)
+
+ // Compute the hash of this executable (~20ms) and create a subdirectory.
+ hash, err := hashExecutable()
+ if err != nil {
+ log.Fatalf("can't hash gopls executable: %v", err)
+ }
+ // Use only 32 bits of the digest to avoid unwieldy filenames.
+ // It's not an adversarial situation.
+ cacheDir = filepath.Join(goplsDir, fmt.Sprintf("%x", hash[:4]))
+ if err := os.MkdirAll(cacheDir, 0700); err != nil {
+ log.Fatalf("can't create cache: %v", err)
+ }
+ })
+ return cacheDir
+}
+
+var (
+ cacheDirOnce sync.Once
+ cacheDir string // only accessed by getCacheDir
+)
+
+func hashExecutable() (hash [32]byte, err error) {
+ exe, err := os.Executable()
+ if err != nil {
+ return hash, err
+ }
+ f, err := os.Open(exe)
+ if err != nil {
+ return hash, err
+ }
+ defer f.Close()
+ h := sha256.New()
+ if _, err := io.Copy(h, f); err != nil {
+ return hash, fmt.Errorf("can't read executable: %w", err)
+ }
+ h.Sum(hash[:0])
+ return hash, nil
+}
+
+// gc runs forever, periodically deleting files from the gopls
+// directory until the space budget is no longer exceeded, and also
+// deleting files older than the maximum age, regardless of budget.
+//
+// One gopls process may delete garbage created by a different gopls
+// process, possibly running a different version of gopls, possibly
+// running concurrently.
+func gc(goplsDir string) {
+ const period = 1 * time.Minute // period between collections
+ // Sleep statDelay*batchSize between stats to smooth out I/O.
+ //
+ // The constants below were chosen using the following heuristics:
+ // - 1GB of filecache is on the order of ~100-200k files, in which case
+ // 100μs delay per file introduces 10-20s of additional walk time, less
+ // than the 1m gc period.
+ // - Processing batches of stats at once is much more efficient than
+ // sleeping after every stat (due to OS optimizations).
+ const statDelay = 100 * time.Microsecond // average delay between stats, to smooth out I/O
+ const batchSize = 1000 // # of stats to process before sleeping
+ const maxAge = 5 * 24 * time.Hour // max time since last access before file is deleted
+
+ // The macOS filesystem is strikingly slow, at least on some machines.
+ // /usr/bin/find achieves only about 25,000 stats per second
+ // at full speed (no pause between items), meaning a large
+ // cache may take several minutes to scan.
+ // We must ensure that short-lived processes (crucially,
+ // tests) are able to make progress sweeping garbage.
+ //
+ // (gopls' caches should never actually get this big in
+ // practice: the example mentioned above resulted from a bug
+ // that caused filecache to fail to delete any files.)
+
+ const debug = false
+
+ // Names of all directories found in first pass; nil thereafter.
+ dirs := make(map[string]bool)
+
+ for {
+ // Enumerate all files in the cache.
+ type item struct {
+ path string
+ stat os.FileInfo
+ }
+ var files []item
+ start := time.Now()
+ var total int64 // bytes
+ _ = filepath.Walk(goplsDir, func(path string, stat os.FileInfo, err error) error {
+ if err != nil {
+ return nil // ignore errors
+ }
+ if stat.IsDir() {
+ // Collect (potentially empty) directories.
+ if dirs != nil {
+ dirs[path] = true
+ }
+ } else {
+ // Unconditionally delete files we haven't used in ages.
+ // (We do this here, not in the second loop, so that we
+ // perform age-based collection even in short-lived processes.)
+ age := time.Since(stat.ModTime())
+ if age > maxAge {
+ if debug {
+ log.Printf("age: deleting stale file %s (%dB, age %v)",
+ path, stat.Size(), age)
+ }
+ os.Remove(path) // ignore error
+ } else {
+ files = append(files, item{path, stat})
+ total += stat.Size()
+ if debug && len(files)%1000 == 0 {
+ log.Printf("filecache: checked %d files in %v", len(files), time.Since(start))
+ }
+ if len(files)%batchSize == 0 {
+ time.Sleep(batchSize * statDelay)
+ }
+ }
+ }
+ return nil
+ })
+
+ // Sort oldest files first.
+ sort.Slice(files, func(i, j int) bool {
+ return files[i].stat.ModTime().Before(files[j].stat.ModTime())
+ })
+
+ // Delete oldest files until we're under budget.
+ budget := atomic.LoadInt64(&budget)
+ for _, file := range files {
+ if total < budget {
+ break
+ }
+ if debug {
+ age := time.Since(file.stat.ModTime())
+ log.Printf("budget: deleting stale file %s (%dB, age %v)",
+ file.path, file.stat.Size(), age)
+ }
+ os.Remove(file.path) // ignore error
+ total -= file.stat.Size()
+ }
+
+ time.Sleep(period)
+
+ // Once only, delete all directories.
+ // This will succeed only for the empty ones,
+ // and ensures that stale directories (whose
+ // files have been deleted) are removed eventually.
+ // They don't take up much space but they do slow
+ // down the traversal.
+ //
+ // We do this after the sleep to minimize the
+ // race against Set, which may create a directory
+ // that is momentarily empty.
+ //
+ // (Test processes don't live that long, so
+ // this may not be reached on the CI builders.)
+ if dirs != nil {
+ dirnames := make([]string, 0, len(dirs))
+ for dir := range dirs {
+ dirnames = append(dirnames, dir)
+ }
+ dirs = nil
+
+ // Descending length order => children before parents.
+ sort.Slice(dirnames, func(i, j int) bool {
+ return len(dirnames[i]) > len(dirnames[j])
+ })
+ var deleted int
+ for _, dir := range dirnames {
+ if os.Remove(dir) == nil { // ignore error
+ deleted++
+ }
+ }
+ if debug {
+ log.Printf("deleted %d empty directories", deleted)
+ }
+ }
+ }
+}
diff --git a/gopls/internal/lsp/filecache/filecache_test.go b/gopls/internal/lsp/filecache/filecache_test.go
new file mode 100644
index 000000000..c96cb16eb
--- /dev/null
+++ b/gopls/internal/lsp/filecache/filecache_test.go
@@ -0,0 +1,215 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filecache_test
+
+// This file defines tests of the API of the filecache package.
+//
+// Some properties (e.g. garbage collection) cannot be exercised
+// through the API, so this test does not attempt to do so.
+
+import (
+ "bytes"
+ cryptorand "crypto/rand"
+ "fmt"
+ "log"
+ mathrand "math/rand"
+ "os"
+ "os/exec"
+ "strconv"
+ "testing"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/gopls/internal/lsp/filecache"
+)
+
+func TestBasics(t *testing.T) {
+ const kind = "TestBasics"
+ key := uniqueKey() // never used before
+ value := []byte("hello")
+
+ // Get of a never-seen key returns not found.
+ if _, err := filecache.Get(kind, key); err != filecache.ErrNotFound {
+ t.Errorf("Get of random key returned err=%q, want not found", err)
+ }
+
+ // Set of a never-seen key and a small value succeeds.
+ if err := filecache.Set(kind, key, value); err != nil {
+ t.Errorf("Set failed: %v", err)
+ }
+
+ // Get of the key returns a copy of the value.
+ if got, err := filecache.Get(kind, key); err != nil {
+ t.Errorf("Get after Set failed: %v", err)
+ } else if string(got) != string(value) {
+ t.Errorf("Get after Set returned different value: got %q, want %q", got, value)
+ }
+
+ // The kind is effectively part of the key.
+ if _, err := filecache.Get("different-kind", key); err != filecache.ErrNotFound {
+ t.Errorf("Get with wrong kind returned err=%q, want not found", err)
+ }
+}
+
+// TestConcurrency exercises concurrent access to the same entry.
+func TestConcurrency(t *testing.T) {
+ const kind = "TestConcurrency"
+ key := uniqueKey()
+ const N = 100 // concurrency level
+
+ // Construct N distinct values, each larger
+ // than a typical 4KB OS file buffer page.
+ var values [N][8192]byte
+ for i := range values {
+ if _, err := mathrand.Read(values[i][:]); err != nil {
+ t.Fatalf("rand: %v", err)
+ }
+ }
+
+ // get calls Get and verifies that the cache entry
+ // matches one of the values passed to Set.
+ get := func(mustBeFound bool) error {
+ got, err := filecache.Get(kind, key)
+ if err != nil {
+ if err == filecache.ErrNotFound && !mustBeFound {
+ return nil // not found
+ }
+ return err
+ }
+ for _, want := range values {
+ if bytes.Equal(want[:], got) {
+ return nil // a match
+ }
+ }
+ return fmt.Errorf("Get returned a value that was never Set")
+ }
+
+ // Perform N concurrent calls to Set and Get.
+ // All sets must succeed.
+ // All gets must return nothing, or one of the Set values;
+ // there is no third possibility.
+ var group errgroup.Group
+ for i := range values {
+ i := i
+ group.Go(func() error { return filecache.Set(kind, key, values[i][:]) })
+ group.Go(func() error { return get(false) })
+ }
+ if err := group.Wait(); err != nil {
+ t.Fatal(err)
+ }
+
+ // A final Get must report one of the values that was Set.
+ if err := get(true); err != nil {
+ t.Fatalf("final Get failed: %v", err)
+ }
+}
+
+const (
+ testIPCKind = "TestIPC"
+ testIPCValueA = "hello"
+ testIPCValueB = "world"
+)
+
+// TestIPC exercises interprocess communication through the cache.
+// It calls Set(A) in the parent, { Get(A); Set(B) } in the child
+// process, then Get(B) in the parent.
+func TestIPC(t *testing.T) {
+ keyA := uniqueKey()
+ keyB := uniqueKey()
+ value := []byte(testIPCValueA)
+
+ // Set keyA.
+ if err := filecache.Set(testIPCKind, keyA, value); err != nil {
+ t.Fatalf("Set: %v", err)
+ }
+
+ // Call ipcChild in a child process,
+ // passing it the keys in the environment
+ // (quoted, to avoid NUL termination of C strings).
+ // It will Get(A) then Set(B).
+ cmd := exec.Command(os.Args[0], os.Args[1:]...)
+ cmd.Env = append(os.Environ(),
+ "ENTRYPOINT=ipcChild",
+ fmt.Sprintf("KEYA=%q", keyA),
+ fmt.Sprintf("KEYB=%q", keyB))
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify keyB.
+ got, err := filecache.Get(testIPCKind, keyB)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(got) != "world" {
+ t.Fatalf("Get(keyB) = %q, want %q", got, "world")
+ }
+}
+
+// We define our own main function so that portions of
+// some tests can run in a separate (child) process.
+func TestMain(m *testing.M) {
+ switch os.Getenv("ENTRYPOINT") {
+ case "ipcChild":
+ ipcChild()
+ default:
+ os.Exit(m.Run())
+ }
+}
+
+// ipcChild is the portion of TestIPC that runs in a child process.
+func ipcChild() {
+ getenv := func(name string) (key [32]byte) {
+ s, _ := strconv.Unquote(os.Getenv(name))
+ copy(key[:], []byte(s))
+ return
+ }
+
+ // Verify key A.
+ got, err := filecache.Get(testIPCKind, getenv("KEYA"))
+ if err != nil || string(got) != testIPCValueA {
+ log.Fatalf("child: Get(key) = %q, %v; want %q", got, err, testIPCValueA)
+ }
+
+ // Set key B.
+ if err := filecache.Set(testIPCKind, getenv("KEYB"), []byte(testIPCValueB)); err != nil {
+ log.Fatalf("child: Set(keyB) failed: %v", err)
+ }
+}
+
+// uniqueKey returns a key that has never been used before.
+func uniqueKey() (key [32]byte) {
+ if _, err := cryptorand.Read(key[:]); err != nil {
+ log.Fatalf("rand: %v", err)
+ }
+ return
+}
+
+func BenchmarkUncontendedGet(b *testing.B) {
+ const kind = "BenchmarkUncontendedGet"
+ key := uniqueKey()
+
+ var value [8192]byte
+ if _, err := mathrand.Read(value[:]); err != nil {
+ b.Fatalf("rand: %v", err)
+ }
+ if err := filecache.Set(kind, key, value[:]); err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+
+ var group errgroup.Group
+ group.SetLimit(50)
+ for i := 0; i < b.N; i++ {
+ group.Go(func() error {
+ _, err := filecache.Get(kind, key)
+ return err
+ })
+ }
+ if err := group.Wait(); err != nil {
+ b.Fatal(err)
+ }
+}
diff --git a/gopls/internal/lsp/folding_range.go b/gopls/internal/lsp/folding_range.go
new file mode 100644
index 000000000..3a29ce992
--- /dev/null
+++ b/gopls/internal/lsp/folding_range.go
@@ -0,0 +1,41 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+func (s *Server) foldingRange(ctx context.Context, params *protocol.FoldingRangeParams) ([]protocol.FoldingRange, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+
+ ranges, err := source.FoldingRange(ctx, snapshot, fh, snapshot.View().Options().LineFoldingOnly)
+ if err != nil {
+ return nil, err
+ }
+ return toProtocolFoldingRanges(ranges)
+}
+
+func toProtocolFoldingRanges(ranges []*source.FoldingRangeInfo) ([]protocol.FoldingRange, error) {
+ result := make([]protocol.FoldingRange, 0, len(ranges))
+ for _, info := range ranges {
+ rng := info.MappedRange.Range()
+ result = append(result, protocol.FoldingRange{
+ StartLine: rng.Start.Line,
+ StartCharacter: rng.Start.Character,
+ EndLine: rng.End.Line,
+ EndCharacter: rng.End.Character,
+ Kind: string(info.Kind),
+ })
+ }
+ return result, nil
+}
diff --git a/gopls/internal/lsp/format.go b/gopls/internal/lsp/format.go
new file mode 100644
index 000000000..773a4690e
--- /dev/null
+++ b/gopls/internal/lsp/format.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/work"
+)
+
+func (s *Server) formatting(ctx context.Context, params *protocol.DocumentFormattingParams) ([]protocol.TextEdit, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ switch snapshot.View().FileKind(fh) {
+ case source.Mod:
+ return mod.Format(ctx, snapshot, fh)
+ case source.Go:
+ return source.Format(ctx, snapshot, fh)
+ case source.Work:
+ return work.Format(ctx, snapshot, fh)
+ }
+ return nil, nil
+}
diff --git a/gopls/internal/lsp/general.go b/gopls/internal/lsp/general.go
new file mode 100644
index 000000000..1d2d58b8b
--- /dev/null
+++ b/gopls/internal/lsp/general.go
@@ -0,0 +1,619 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/jsonrpc2"
+)
+
+func (s *Server) initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
+ s.stateMu.Lock()
+ if s.state >= serverInitializing {
+ defer s.stateMu.Unlock()
+ return nil, fmt.Errorf("%w: initialize called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state)
+ }
+ s.state = serverInitializing
+ s.stateMu.Unlock()
+
+ // For uniqueness, use the gopls PID rather than params.ProcessID (the client
+ // pid). Some clients might start multiple gopls servers, though they
+ // probably shouldn't.
+ pid := os.Getpid()
+ s.tempDir = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.%s", pid, s.session.ID()))
+ err := os.Mkdir(s.tempDir, 0700)
+ if err != nil {
+ // MkdirTemp could fail due to permissions issues. This is a problem with
+ // the user's environment, but should not block gopls otherwise behaving.
+ // All usage of s.tempDir should be predicated on having a non-empty
+ // s.tempDir.
+ event.Error(ctx, "creating temp dir", err)
+ s.tempDir = ""
+ }
+ s.progress.SetSupportsWorkDoneProgress(params.Capabilities.Window.WorkDoneProgress)
+
+ options := s.session.Options()
+ defer func() { s.session.SetOptions(options) }()
+
+ if err := s.handleOptionResults(ctx, source.SetOptions(options, params.InitializationOptions)); err != nil {
+ return nil, err
+ }
+ options.ForClientCapabilities(params.Capabilities)
+
+ if options.ShowBugReports {
+ // Report the next bug that occurs on the server.
+ bugCh := bug.Notify()
+ go func() {
+ b := <-bugCh
+ msg := &protocol.ShowMessageParams{
+ Type: protocol.Error,
+ Message: fmt.Sprintf("A bug occurred on the server: %s\nLocation:%s", b.Description, b.Key),
+ }
+ if err := s.eventuallyShowMessage(context.Background(), msg); err != nil {
+ log.Printf("error showing bug: %v", err)
+ }
+ }()
+ }
+
+ folders := params.WorkspaceFolders
+ if len(folders) == 0 {
+ if params.RootURI != "" {
+ folders = []protocol.WorkspaceFolder{{
+ URI: string(params.RootURI),
+ Name: path.Base(params.RootURI.SpanURI().Filename()),
+ }}
+ }
+ }
+ for _, folder := range folders {
+ uri := span.URIFromURI(folder.URI)
+ if !uri.IsFile() {
+ continue
+ }
+ s.pendingFolders = append(s.pendingFolders, folder)
+ }
+ // gopls only supports URIs with a file:// scheme, so if we have no
+ // workspace folders with a supported scheme, fail to initialize.
+ if len(folders) > 0 && len(s.pendingFolders) == 0 {
+ return nil, fmt.Errorf("unsupported URI schemes: %v (gopls only supports file URIs)", folders)
+ }
+
+ var codeActionProvider interface{} = true
+ if ca := params.Capabilities.TextDocument.CodeAction; len(ca.CodeActionLiteralSupport.CodeActionKind.ValueSet) > 0 {
+ // If the client has specified CodeActionLiteralSupport,
+ // send the code actions we support.
+ //
+ // Using CodeActionOptions is only valid if codeActionLiteralSupport is set.
+ codeActionProvider = &protocol.CodeActionOptions{
+ CodeActionKinds: s.getSupportedCodeActions(),
+ }
+ }
+ var renameOpts interface{} = true
+ if r := params.Capabilities.TextDocument.Rename; r != nil && r.PrepareSupport {
+ renameOpts = protocol.RenameOptions{
+ PrepareProvider: r.PrepareSupport,
+ }
+ }
+
+ versionInfo := debug.VersionInfo()
+
+ // golang/go#45732: Warn users who've installed sergi/go-diff@v1.2.0, since
+ // it will corrupt the formatting of their files.
+ for _, dep := range versionInfo.Deps {
+ if dep.Path == "github.com/sergi/go-diff" && dep.Version == "v1.2.0" {
+ if err := s.eventuallyShowMessage(ctx, &protocol.ShowMessageParams{
+ Message: `It looks like you have a bad gopls installation.
+Please reinstall gopls by running 'GO111MODULE=on go install golang.org/x/tools/gopls@latest'.
+See https://github.com/golang/go/issues/45732 for more information.`,
+ Type: protocol.Error,
+ }); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ goplsVersion, err := json.Marshal(versionInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ return &protocol.InitializeResult{
+ Capabilities: protocol.ServerCapabilities{
+ CallHierarchyProvider: &protocol.Or_ServerCapabilities_callHierarchyProvider{Value: true},
+ CodeActionProvider: codeActionProvider,
+ CodeLensProvider: &protocol.CodeLensOptions{}, // must be non-nil to enable the code lens capability
+ CompletionProvider: &protocol.CompletionOptions{
+ TriggerCharacters: []string{"."},
+ },
+ DefinitionProvider: &protocol.Or_ServerCapabilities_definitionProvider{Value: true},
+ TypeDefinitionProvider: &protocol.Or_ServerCapabilities_typeDefinitionProvider{Value: true},
+ ImplementationProvider: &protocol.Or_ServerCapabilities_implementationProvider{Value: true},
+ DocumentFormattingProvider: &protocol.Or_ServerCapabilities_documentFormattingProvider{Value: true},
+ DocumentSymbolProvider: &protocol.Or_ServerCapabilities_documentSymbolProvider{Value: true},
+ WorkspaceSymbolProvider: &protocol.Or_ServerCapabilities_workspaceSymbolProvider{Value: true},
+ ExecuteCommandProvider: &protocol.ExecuteCommandOptions{
+ Commands: options.SupportedCommands,
+ },
+ FoldingRangeProvider: &protocol.Or_ServerCapabilities_foldingRangeProvider{Value: true},
+ HoverProvider: &protocol.Or_ServerCapabilities_hoverProvider{Value: true},
+ DocumentHighlightProvider: &protocol.Or_ServerCapabilities_documentHighlightProvider{Value: true},
+ DocumentLinkProvider: &protocol.DocumentLinkOptions{},
+ InlayHintProvider: protocol.InlayHintOptions{},
+ ReferencesProvider: &protocol.Or_ServerCapabilities_referencesProvider{Value: true},
+ RenameProvider: renameOpts,
+ SelectionRangeProvider: &protocol.Or_ServerCapabilities_selectionRangeProvider{Value: true},
+ SemanticTokensProvider: protocol.SemanticTokensOptions{
+ Range: &protocol.Or_SemanticTokensOptions_range{Value: true},
+ Full: &protocol.Or_SemanticTokensOptions_full{Value: true},
+ Legend: protocol.SemanticTokensLegend{
+ TokenTypes: s.session.Options().SemanticTypes,
+ TokenModifiers: s.session.Options().SemanticMods,
+ },
+ },
+ SignatureHelpProvider: &protocol.SignatureHelpOptions{
+ TriggerCharacters: []string{"(", ","},
+ },
+ TextDocumentSync: &protocol.TextDocumentSyncOptions{
+ Change: protocol.Incremental,
+ OpenClose: true,
+ Save: &protocol.SaveOptions{
+ IncludeText: false,
+ },
+ },
+ Workspace: &protocol.Workspace6Gn{
+ WorkspaceFolders: &protocol.WorkspaceFolders5Gn{
+ Supported: true,
+ ChangeNotifications: "workspace/didChangeWorkspaceFolders",
+ },
+ },
+ },
+ ServerInfo: &protocol.PServerInfoMsg_initialize{
+ Name: "gopls",
+ Version: string(goplsVersion),
+ },
+ }, nil
+}
+
+func (s *Server) initialized(ctx context.Context, params *protocol.InitializedParams) error {
+ s.stateMu.Lock()
+ if s.state >= serverInitialized {
+ defer s.stateMu.Unlock()
+ return fmt.Errorf("%w: initialized called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state)
+ }
+ s.state = serverInitialized
+ s.stateMu.Unlock()
+
+ for _, not := range s.notifications {
+ s.client.ShowMessage(ctx, not)
+ }
+ s.notifications = nil
+
+ options := s.session.Options()
+ defer func() { s.session.SetOptions(options) }()
+
+ if err := s.addFolders(ctx, s.pendingFolders); err != nil {
+ return err
+ }
+ s.pendingFolders = nil
+ s.checkViewGoVersions()
+
+ var registrations []protocol.Registration
+ if options.ConfigurationSupported && options.DynamicConfigurationSupported {
+ registrations = append(registrations, protocol.Registration{
+ ID: "workspace/didChangeConfiguration",
+ Method: "workspace/didChangeConfiguration",
+ })
+ }
+ if len(registrations) > 0 {
+ if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
+ Registrations: registrations,
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GoVersionTable maps Go versions to the gopls version in which support will
+// be deprecated, and the final gopls version supporting them without warnings.
+// Keep this in sync with gopls/README.md
+//
+// Must be sorted in ascending order of Go version.
+//
+// Mutable for testing.
+var GoVersionTable = []GoVersionSupport{
+ {12, "", "v0.7.5"},
+ {15, "v0.11.0", "v0.9.5"},
+}
+
+// GoVersionSupport holds information about end-of-life Go version support.
+type GoVersionSupport struct {
+ GoVersion int
+ DeprecatedVersion string // if unset, the version is already deprecated
+ InstallGoplsVersion string
+}
+
+// OldestSupportedGoVersion is the last X in Go 1.X that this version of gopls
+// supports.
+func OldestSupportedGoVersion() int {
+ return GoVersionTable[len(GoVersionTable)-1].GoVersion + 1
+}
+
+// versionMessage returns the warning/error message to display if the user is
+// on the given Go version, if any. The goVersion variable is the X in Go 1.X.
+//
+// If goVersion is invalid (< 0), it returns "", 0.
+func versionMessage(goVersion int) (string, protocol.MessageType) {
+ if goVersion < 0 {
+ return "", 0
+ }
+
+ for _, v := range GoVersionTable {
+ if goVersion <= v.GoVersion {
+ var msgBuilder strings.Builder
+
+ mType := protocol.Error
+ fmt.Fprintf(&msgBuilder, "Found Go version 1.%d", goVersion)
+ if v.DeprecatedVersion != "" {
+ // not deprecated yet, just a warning
+ fmt.Fprintf(&msgBuilder, ", which will be unsupported by gopls %s. ", v.DeprecatedVersion)
+ mType = protocol.Warning
+ } else {
+ fmt.Fprint(&msgBuilder, ", which is not supported by this version of gopls. ")
+ }
+ fmt.Fprintf(&msgBuilder, "Please upgrade to Go 1.%d or later and reinstall gopls. ", OldestSupportedGoVersion())
+ fmt.Fprintf(&msgBuilder, "If you can't upgrade and want this message to go away, please install gopls %s. ", v.InstallGoplsVersion)
+ fmt.Fprint(&msgBuilder, "See https://go.dev/s/gopls-support-policy for more details.")
+
+ return msgBuilder.String(), mType
+ }
+ }
+ return "", 0
+}
+
+// checkViewGoVersions checks whether any Go version used by a view is too old,
+// raising a showMessage notification if so.
+//
+// It should be called after views change.
+func (s *Server) checkViewGoVersions() {
+ oldestVersion := -1
+ for _, view := range s.session.Views() {
+ viewVersion := view.GoVersion()
+ if oldestVersion == -1 || viewVersion < oldestVersion {
+ oldestVersion = viewVersion
+ }
+ }
+
+ if msg, mType := versionMessage(oldestVersion); msg != "" {
+ s.eventuallyShowMessage(context.Background(), &protocol.ShowMessageParams{
+ Type: mType,
+ Message: msg,
+ })
+ }
+}
+
+func (s *Server) addFolders(ctx context.Context, folders []protocol.WorkspaceFolder) error {
+ originalViews := len(s.session.Views())
+ viewErrors := make(map[span.URI]error)
+
+ var ndiagnose sync.WaitGroup // number of unfinished diagnose calls
+ if s.session.Options().VerboseWorkDoneProgress {
+ work := s.progress.Start(ctx, DiagnosticWorkTitle(FromInitialWorkspaceLoad), "Calculating diagnostics for initial workspace load...", nil, nil)
+ defer func() {
+ go func() {
+ ndiagnose.Wait()
+ work.End(ctx, "Done.")
+ }()
+ }()
+ }
+ // Only one view gets to have a workspace.
+ var nsnapshots sync.WaitGroup // number of unfinished snapshot initializations
+ for _, folder := range folders {
+ uri := span.URIFromURI(folder.URI)
+ // Ignore non-file URIs.
+ if !uri.IsFile() {
+ continue
+ }
+ work := s.progress.Start(ctx, "Setting up workspace", "Loading packages...", nil, nil)
+ snapshot, release, err := s.addView(ctx, folder.Name, uri)
+ if err != nil {
+ if err == source.ErrViewExists {
+ continue
+ }
+ viewErrors[uri] = err
+ work.End(ctx, fmt.Sprintf("Error loading packages: %s", err))
+ continue
+ }
+ // Inv: release() must be called once.
+
+ // Initialize snapshot asynchronously.
+ initialized := make(chan struct{})
+ nsnapshots.Add(1)
+ go func() {
+ snapshot.AwaitInitialized(ctx)
+ work.End(ctx, "Finished loading packages.")
+ nsnapshots.Done()
+ close(initialized) // signal
+ }()
+
+ // Diagnose the newly created view asynchronously.
+ ndiagnose.Add(1)
+ go func() {
+ s.diagnoseDetached(snapshot)
+ <-initialized
+ release()
+ ndiagnose.Done()
+ }()
+ }
+
+ // Wait for snapshots to be initialized so that all files are known.
+ // (We don't need to wait for diagnosis to finish.)
+ nsnapshots.Wait()
+
+ // Register for file watching notifications, if they are supported.
+ if err := s.updateWatchedDirectories(ctx); err != nil {
+ event.Error(ctx, "failed to register for file watching notifications", err)
+ }
+
+ if len(viewErrors) > 0 {
+ errMsg := fmt.Sprintf("Error loading workspace folders (expected %v, got %v)\n", len(folders), len(s.session.Views())-originalViews)
+ for uri, err := range viewErrors {
+ errMsg += fmt.Sprintf("failed to load view for %s: %v\n", uri, err)
+ }
+ return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Error,
+ Message: errMsg,
+ })
+ }
+ return nil
+}
+
+// updateWatchedDirectories compares the current set of directories to watch
+// with the previously registered set of directories. If the set of directories
+// has changed, we unregister and re-register for file watching notifications.
+// updatedSnapshots is the set of snapshots that have been updated.
+func (s *Server) updateWatchedDirectories(ctx context.Context) error {
+ patterns := s.session.FileWatchingGlobPatterns(ctx)
+
+ s.watchedGlobPatternsMu.Lock()
+ defer s.watchedGlobPatternsMu.Unlock()
+
+ // Nothing to do if the set of workspace directories is unchanged.
+ if equalURISet(s.watchedGlobPatterns, patterns) {
+ return nil
+ }
+
+ // If the set of directories to watch has changed, register the updates and
+ // unregister the previously watched directories. This ordering avoids a
+ // period where no files are being watched. Still, if a user makes on-disk
+ // changes before these updates are complete, we may miss them for the new
+ // directories.
+ prevID := s.watchRegistrationCount - 1
+ if err := s.registerWatchedDirectoriesLocked(ctx, patterns); err != nil {
+ return err
+ }
+ if prevID >= 0 {
+ return s.client.UnregisterCapability(ctx, &protocol.UnregistrationParams{
+ Unregisterations: []protocol.Unregistration{{
+ ID: watchedFilesCapabilityID(prevID),
+ Method: "workspace/didChangeWatchedFiles",
+ }},
+ })
+ }
+ return nil
+}
+
+func watchedFilesCapabilityID(id int) string {
+ return fmt.Sprintf("workspace/didChangeWatchedFiles-%d", id)
+}
+
+func equalURISet(m1, m2 map[string]struct{}) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k := range m1 {
+ _, ok := m2[k]
+ if !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// registerWatchedDirectoriesLocked sends the workspace/didChangeWatchedFiles
+// registrations to the client and updates s.watchedDirectories.
+func (s *Server) registerWatchedDirectoriesLocked(ctx context.Context, patterns map[string]struct{}) error {
+ if !s.session.Options().DynamicWatchedFilesSupported {
+ return nil
+ }
+ for k := range s.watchedGlobPatterns {
+ delete(s.watchedGlobPatterns, k)
+ }
+ var watchers []protocol.FileSystemWatcher
+ val := protocol.WatchChange | protocol.WatchDelete | protocol.WatchCreate
+ for pattern := range patterns {
+ watchers = append(watchers, protocol.FileSystemWatcher{
+ GlobPattern: pattern,
+ Kind: &val,
+ })
+ }
+
+ if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
+ Registrations: []protocol.Registration{{
+ ID: watchedFilesCapabilityID(s.watchRegistrationCount),
+ Method: "workspace/didChangeWatchedFiles",
+ RegisterOptions: protocol.DidChangeWatchedFilesRegistrationOptions{
+ Watchers: watchers,
+ },
+ }},
+ }); err != nil {
+ return err
+ }
+ s.watchRegistrationCount++
+
+ for k, v := range patterns {
+ s.watchedGlobPatterns[k] = v
+ }
+ return nil
+}
+
+func (s *Server) fetchConfig(ctx context.Context, name string, folder span.URI, o *source.Options) error {
+ if !s.session.Options().ConfigurationSupported {
+ return nil
+ }
+ configs, err := s.client.Configuration(ctx, &protocol.ParamConfiguration{
+ Items: []protocol.ConfigurationItem{{
+ ScopeURI: string(folder),
+ Section: "gopls",
+ }},
+ },
+ )
+ if err != nil {
+ return fmt.Errorf("failed to get workspace configuration from client (%s): %v", folder, err)
+ }
+ for _, config := range configs {
+ if err := s.handleOptionResults(ctx, source.SetOptions(o, config)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *Server) eventuallyShowMessage(ctx context.Context, msg *protocol.ShowMessageParams) error {
+ s.stateMu.Lock()
+ defer s.stateMu.Unlock()
+ if s.state == serverInitialized {
+ return s.client.ShowMessage(ctx, msg)
+ }
+ s.notifications = append(s.notifications, msg)
+ return nil
+}
+
+func (s *Server) handleOptionResults(ctx context.Context, results source.OptionResults) error {
+ var warnings, errors []string
+ for _, result := range results {
+ switch result.Error.(type) {
+ case nil:
+ // nothing to do
+ case *source.SoftError:
+ warnings = append(warnings, result.Error.Error())
+ default:
+ errors = append(errors, result.Error.Error())
+ }
+ }
+
+ // Sort messages, but put errors first.
+ //
+ // Having stable content for the message allows clients to de-duplicate. This
+ // matters because we may send duplicate warnings for clients that support
+ // dynamic configuration: one for the initial settings, and then more for the
+ // individual view settings.
+ var msgs []string
+ msgType := protocol.Warning
+ if len(errors) > 0 {
+ msgType = protocol.Error
+ sort.Strings(errors)
+ msgs = append(msgs, errors...)
+ }
+ if len(warnings) > 0 {
+ sort.Strings(warnings)
+ msgs = append(msgs, warnings...)
+ }
+
+ if len(msgs) > 0 {
+ // Settings
+ combined := "Invalid settings: " + strings.Join(msgs, "; ")
+ params := &protocol.ShowMessageParams{
+ Type: msgType,
+ Message: combined,
+ }
+ return s.eventuallyShowMessage(ctx, params)
+ }
+
+ return nil
+}
+
+// beginFileRequest checks preconditions for a file-oriented request and routes
+// it to a snapshot.
+// We don't want to return errors for benign conditions like wrong file type,
+// so callers should do if !ok { return err } rather than if err != nil.
+// The returned cleanup function is non-nil even in case of false/error result.
+func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI, expectKind source.FileKind) (source.Snapshot, source.FileHandle, bool, func(), error) {
+ uri := pURI.SpanURI()
+ if !uri.IsFile() {
+ // Not a file URI. Stop processing the request, but don't return an error.
+ return nil, nil, false, func() {}, nil
+ }
+ view, err := s.session.ViewOf(uri)
+ if err != nil {
+ return nil, nil, false, func() {}, err
+ }
+ snapshot, release, err := view.Snapshot()
+ if err != nil {
+ return nil, nil, false, func() {}, err
+ }
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ release()
+ return nil, nil, false, func() {}, err
+ }
+ if expectKind != source.UnknownKind && view.FileKind(fh) != expectKind {
+ // Wrong kind of file. Nothing to do.
+ release()
+ return nil, nil, false, func() {}, nil
+ }
+ return snapshot, fh, true, release, nil
+}
+
+// shutdown implements the 'shutdown' LSP handler. It releases resources
+// associated with the server and waits for all ongoing work to complete.
+func (s *Server) shutdown(ctx context.Context) error {
+ s.stateMu.Lock()
+ defer s.stateMu.Unlock()
+ if s.state < serverInitialized {
+ event.Log(ctx, "server shutdown without initialization")
+ }
+ if s.state != serverShutDown {
+ // drop all the active views
+ s.session.Shutdown(ctx)
+ s.state = serverShutDown
+ if s.tempDir != "" {
+ if err := os.RemoveAll(s.tempDir); err != nil {
+ event.Error(ctx, "removing temp dir", err)
+ }
+ }
+ }
+ return nil
+}
+
+func (s *Server) exit(ctx context.Context) error {
+ s.stateMu.Lock()
+ defer s.stateMu.Unlock()
+
+ s.client.Close()
+
+ if s.state != serverShutDown {
+ // TODO: We should be able to do better than this.
+ os.Exit(1)
+ }
+ // we don't terminate the process on a normal exit, we just allow it to
+ // close naturally if needed after the connection is closed.
+ return nil
+}
diff --git a/gopls/internal/lsp/general_test.go b/gopls/internal/lsp/general_test.go
new file mode 100644
index 000000000..a0312ba1b
--- /dev/null
+++ b/gopls/internal/lsp/general_test.go
@@ -0,0 +1,44 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+func TestVersionMessage(t *testing.T) {
+ tests := []struct {
+ goVersion int
+ wantContains []string // string fragments that we expect to see
+ wantType protocol.MessageType
+ }{
+ {-1, nil, 0},
+ {12, []string{"1.12", "not supported", "upgrade to Go 1.16", "install gopls v0.7.5"}, protocol.Error},
+ {13, []string{"1.13", "will be unsupported by gopls v0.11.0", "upgrade to Go 1.16", "install gopls v0.9.5"}, protocol.Warning},
+ {15, []string{"1.15", "will be unsupported by gopls v0.11.0", "upgrade to Go 1.16", "install gopls v0.9.5"}, protocol.Warning},
+ {16, nil, 0},
+ }
+
+ for _, test := range tests {
+ gotMsg, gotType := versionMessage(test.goVersion)
+
+ if len(test.wantContains) == 0 && gotMsg != "" {
+ t.Errorf("versionMessage(%d) = %q, want \"\"", test.goVersion, gotMsg)
+ }
+
+ for _, want := range test.wantContains {
+ if !strings.Contains(gotMsg, want) {
+ t.Errorf("versionMessage(%d) = %q, want containing %q", test.goVersion, gotMsg, want)
+ }
+ }
+
+ if gotType != test.wantType {
+ t.Errorf("versionMessage(%d) = returned message type %d, want %d", test.goVersion, gotType, test.wantType)
+ }
+ }
+}
diff --git a/gopls/internal/lsp/glob/glob.go b/gopls/internal/lsp/glob/glob.go
new file mode 100644
index 000000000..a540ebefa
--- /dev/null
+++ b/gopls/internal/lsp/glob/glob.go
@@ -0,0 +1,349 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package glob implements an LSP-compliant glob pattern matcher for testing.
+package glob
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// A Glob is an LSP-compliant glob pattern, as defined by the spec:
+// https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#documentFilter
+//
+// NOTE: this implementation is currently only intended for testing. In order
+// to make it production ready, we'd need to:
+// - verify it against the VS Code implementation
+// - add more tests
+// - microbenchmark, likely avoiding the element interface
+// - resolve the question of what is meant by "character". If it's a UTF-16
+// code (as we suspect) it'll be a bit more work.
+//
+// Quoting from the spec:
+// Glob patterns can have the following syntax:
+// - `*` to match one or more characters in a path segment
+// - `?` to match on one character in a path segment
+// - `**` to match any number of path segments, including none
+// - `{}` to group sub patterns into an OR expression. (e.g. `**/*.{ts,js}`
+// matches all TypeScript and JavaScript files)
+// - `[]` to declare a range of characters to match in a path segment
+// (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
+// - `[!...]` to negate a range of characters to match in a path segment
+// (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but
+// not `example.0`)
+//
+// Expanding on this:
+// - '/' matches one or more literal slashes.
+// - any other character matches itself literally.
+type Glob struct {
+ elems []element // pattern elements
+}
+
+// Parse builds a Glob for the given pattern, returning an error if the pattern
+// is invalid.
+func Parse(pattern string) (*Glob, error) {
+ g, _, err := parse(pattern, false)
+ return g, err
+}
+
+func parse(pattern string, nested bool) (*Glob, string, error) {
+ g := new(Glob)
+ for len(pattern) > 0 {
+ switch pattern[0] {
+ case '/':
+ pattern = pattern[1:]
+ g.elems = append(g.elems, slash{})
+
+ case '*':
+ if len(pattern) > 1 && pattern[1] == '*' {
+ if (len(g.elems) > 0 && g.elems[len(g.elems)-1] != slash{}) || (len(pattern) > 2 && pattern[2] != '/') {
+ return nil, "", errors.New("** may only be adjacent to '/'")
+ }
+ pattern = pattern[2:]
+ g.elems = append(g.elems, starStar{})
+ break
+ }
+ pattern = pattern[1:]
+ g.elems = append(g.elems, star{})
+
+ case '?':
+ pattern = pattern[1:]
+ g.elems = append(g.elems, anyChar{})
+
+ case '{':
+ var gs group
+ for pattern[0] != '}' {
+ pattern = pattern[1:]
+ g, pat, err := parse(pattern, true)
+ if err != nil {
+ return nil, "", err
+ }
+ if len(pat) == 0 {
+ return nil, "", errors.New("unmatched '{'")
+ }
+ pattern = pat
+ gs = append(gs, g)
+ }
+ pattern = pattern[1:]
+ g.elems = append(g.elems, gs)
+
+ case '}', ',':
+ if nested {
+ return g, pattern, nil
+ }
+ pattern = g.parseLiteral(pattern, false)
+
+ case '[':
+ pattern = pattern[1:]
+ if len(pattern) == 0 {
+ return nil, "", errBadRange
+ }
+ negate := false
+ if pattern[0] == '!' {
+ pattern = pattern[1:]
+ negate = true
+ }
+ low, sz, err := readRangeRune(pattern)
+ if err != nil {
+ return nil, "", err
+ }
+ pattern = pattern[sz:]
+ if len(pattern) == 0 || pattern[0] != '-' {
+ return nil, "", errBadRange
+ }
+ pattern = pattern[1:]
+ high, sz, err := readRangeRune(pattern)
+ if err != nil {
+ return nil, "", err
+ }
+ pattern = pattern[sz:]
+ if len(pattern) == 0 || pattern[0] != ']' {
+ return nil, "", errBadRange
+ }
+ pattern = pattern[1:]
+ g.elems = append(g.elems, charRange{negate, low, high})
+
+ default:
+ pattern = g.parseLiteral(pattern, nested)
+ }
+ }
+ return g, "", nil
+}
+
+// helper for decoding a rune in range elements, e.g. [a-z]
+func readRangeRune(input string) (rune, int, error) {
+ r, sz := utf8.DecodeRuneInString(input)
+ var err error
+ if r == utf8.RuneError {
+ // See the documentation for DecodeRuneInString.
+ switch sz {
+ case 0:
+ err = errBadRange
+ case 1:
+ err = errInvalidUTF8
+ }
+ }
+ return r, sz, err
+}
+
+var (
+ errBadRange = errors.New("'[' patterns must be of the form [x-y]")
+ errInvalidUTF8 = errors.New("invalid UTF-8 encoding")
+)
+
+func (g *Glob) parseLiteral(pattern string, nested bool) string {
+ var specialChars string
+ if nested {
+ specialChars = "*?{[/},"
+ } else {
+ specialChars = "*?{[/"
+ }
+ end := strings.IndexAny(pattern, specialChars)
+ if end == -1 {
+ end = len(pattern)
+ }
+ g.elems = append(g.elems, literal(pattern[:end]))
+ return pattern[end:]
+}
+
+func (g *Glob) String() string {
+ var b strings.Builder
+ for _, e := range g.elems {
+ fmt.Fprint(&b, e)
+ }
+ return b.String()
+}
+
+// element holds a glob pattern element, as defined below.
+type element fmt.Stringer
+
+// element types.
+type (
+ slash struct{} // One or more '/' separators
+ literal string // string literal, not containing /, *, ?, {}, or []
+ star struct{} // *
+ anyChar struct{} // ?
+ starStar struct{} // **
+ group []*Glob // {foo, bar, ...} grouping
+ charRange struct { // [a-z] character range
+ negate bool
+ low, high rune
+ }
+)
+
+func (s slash) String() string { return "/" }
+func (l literal) String() string { return string(l) }
+func (s star) String() string { return "*" }
+func (a anyChar) String() string { return "?" }
+func (s starStar) String() string { return "**" }
+func (g group) String() string {
+ var parts []string
+ for _, g := range g {
+ parts = append(parts, g.String())
+ }
+ return "{" + strings.Join(parts, ",") + "}"
+}
+func (r charRange) String() string {
+ return "[" + string(r.low) + "-" + string(r.high) + "]"
+}
+
+// Match reports whether the input string matches the glob pattern.
+func (g *Glob) Match(input string) bool {
+ return match(g.elems, input)
+}
+
+func match(elems []element, input string) (ok bool) {
+ var elem interface{}
+ for len(elems) > 0 {
+ elem, elems = elems[0], elems[1:]
+ switch elem := elem.(type) {
+ case slash:
+ if len(input) == 0 || input[0] != '/' {
+ return false
+ }
+ for input[0] == '/' {
+ input = input[1:]
+ }
+
+ case starStar:
+ // Special cases:
+ // - **/a matches "a"
+ // - **/ matches everything
+ //
+ // Note that if ** is followed by anything, it must be '/' (this is
+ // enforced by Parse).
+ if len(elems) > 0 {
+ elems = elems[1:]
+ }
+
+ // A trailing ** matches anything.
+ if len(elems) == 0 {
+ return true
+ }
+
+ // Backtracking: advance pattern segments until the remaining pattern
+ // elements match.
+ for len(input) != 0 {
+ if match(elems, input) {
+ return true
+ }
+ _, input = split(input)
+ }
+ return false
+
+ case literal:
+ if !strings.HasPrefix(input, string(elem)) {
+ return false
+ }
+ input = input[len(elem):]
+
+ case star:
+ var segInput string
+ segInput, input = split(input)
+
+ elemEnd := len(elems)
+ for i, e := range elems {
+ if e == (slash{}) {
+ elemEnd = i
+ break
+ }
+ }
+ segElems := elems[:elemEnd]
+ elems = elems[elemEnd:]
+
+ // A trailing * matches the entire segment.
+ if len(segElems) == 0 {
+ break
+ }
+
+ // Backtracking: advance characters until remaining subpattern elements
+ // match.
+ matched := false
+ for i := range segInput {
+ if match(segElems, segInput[i:]) {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ return false
+ }
+
+ case anyChar:
+ if len(input) == 0 || input[0] == '/' {
+ return false
+ }
+ input = input[1:]
+
+ case group:
+ // Append remaining pattern elements to each group member looking for a
+ // match.
+ var branch []element
+ for _, m := range elem {
+ branch = branch[:0]
+ branch = append(branch, m.elems...)
+ branch = append(branch, elems...)
+ if match(branch, input) {
+ return true
+ }
+ }
+ return false
+
+ case charRange:
+ if len(input) == 0 || input[0] == '/' {
+ return false
+ }
+ c, sz := utf8.DecodeRuneInString(input)
+ if c < elem.low || c > elem.high {
+ return false
+ }
+ input = input[sz:]
+
+ default:
+ panic(fmt.Sprintf("segment type %T not implemented", elem))
+ }
+ }
+
+ return len(input) == 0
+}
+
+// split returns the portion before and after the first slash
+// (or sequence of consecutive slashes). If there is no slash
+// it returns (input, nil).
+func split(input string) (first, rest string) {
+ i := strings.IndexByte(input, '/')
+ if i < 0 {
+ return input, ""
+ }
+ first = input[:i]
+ for j := i; j < len(input); j++ {
+ if input[j] != '/' {
+ return first, input[j:]
+ }
+ }
+ return first, ""
+}
diff --git a/gopls/internal/lsp/glob/glob_test.go b/gopls/internal/lsp/glob/glob_test.go
new file mode 100644
index 000000000..df602624d
--- /dev/null
+++ b/gopls/internal/lsp/glob/glob_test.go
@@ -0,0 +1,118 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package glob_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/glob"
+)
+
+func TestParseErrors(t *testing.T) {
+ tests := []string{
+ "***",
+ "ab{c",
+ "[]",
+ "[a-]",
+ "ab{c{d}",
+ }
+
+ for _, test := range tests {
+ _, err := glob.Parse(test)
+ if err == nil {
+ t.Errorf("Parse(%q) succeeded unexpectedly", test)
+ }
+ }
+}
+
+func TestMatch(t *testing.T) {
+ tests := []struct {
+ pattern, input string
+ want bool
+ }{
+ // Basic cases.
+ {"", "", true},
+ {"", "a", false},
+ {"", "/", false},
+ {"abc", "abc", true},
+
+ // ** behavior
+ {"**", "abc", true},
+ {"**/abc", "abc", true},
+ {"**", "abc/def", true},
+ {"{a/**/c,a/**/d}", "a/b/c", true},
+ {"{a/**/c,a/**/d}", "a/b/c/d", true},
+ {"{a/**/c,a/**/e}", "a/b/c/d", false},
+ {"{a/**/c,a/**/e,a/**/d}", "a/b/c/d", true},
+ {"{/a/**/c,a/**/e,a/**/d}", "a/b/c/d", true},
+ {"{/a/**/c,a/**/e,a/**/d}", "/a/b/c/d", false},
+ {"{/a/**/c,a/**/e,a/**/d}", "/a/b/c", true},
+ {"{/a/**/e,a/**/e,a/**/d}", "/a/b/c", false},
+
+ // * and ? behavior
+ {"/*", "/a", true},
+ {"*", "foo", true},
+ {"*o", "foo", true},
+ {"*o", "foox", false},
+ {"f*o", "foo", true},
+ {"f*o", "fo", true},
+ {"fo?", "foo", true},
+ {"fo?", "fox", true},
+ {"fo?", "fooo", false},
+ {"fo?", "fo", false},
+ {"?", "a", true},
+ {"?", "ab", false},
+ {"?", "", false},
+ {"*?", "", false},
+ {"?b", "ab", true},
+ {"?c", "ab", false},
+
+ // {} behavior
+ {"ab{c,d}e", "abce", true},
+ {"ab{c,d}e", "abde", true},
+ {"ab{c,d}e", "abxe", false},
+ {"ab{c,d}e", "abe", false},
+ {"{a,b}c", "ac", true},
+ {"{a,b}c", "bc", true},
+ {"{a,b}c", "ab", false},
+ {"a{b,c}", "ab", true},
+ {"a{b,c}", "ac", true},
+ {"a{b,c}", "bc", false},
+ {"ab{c{1,2},d}e", "abc1e", true},
+ {"ab{c{1,2},d}e", "abde", true},
+ {"ab{c{1,2},d}e", "abc1f", false},
+ {"ab{c{1,2},d}e", "abce", false},
+ {"ab{c[}-~]}d", "abc}d", true},
+ {"ab{c[}-~]}d", "abc~d", true},
+ {"ab{c[}-~],y}d", "abcxd", false},
+ {"ab{c[}-~],y}d", "abyd", true},
+ {"ab{c[}-~],y}d", "abd", false},
+ {"{a/b/c,d/e/f}", "a/b/c", true},
+ {"/ab{/c,d}e", "/ab/ce", true},
+ {"/ab{/c,d}e", "/ab/cf", false},
+
+ // [-] behavior
+ {"[a-c]", "a", true},
+ {"[a-c]", "b", true},
+ {"[a-c]", "c", true},
+ {"[a-c]", "d", false},
+ {"[a-c]", " ", false},
+
+ // Realistic examples.
+ {"**/*.{ts,js}", "path/to/foo.ts", true},
+ {"**/*.{ts,js}", "path/to/foo.js", true},
+ {"**/*.{ts,js}", "path/to/foo.go", false},
+ }
+
+ for _, test := range tests {
+ g, err := glob.Parse(test.pattern)
+ if err != nil {
+ t.Fatalf("New(%q) failed unexpectedly: %v", test.pattern, err)
+ }
+ if got := g.Match(test.input); got != test.want {
+ t.Errorf("New(%q).Match(%q) = %t, want %t", test.pattern, test.input, got, test.want)
+ }
+ }
+}
diff --git a/gopls/internal/lsp/helper/README.md b/gopls/internal/lsp/helper/README.md
new file mode 100644
index 000000000..512427992
--- /dev/null
+++ b/gopls/internal/lsp/helper/README.md
@@ -0,0 +1,35 @@
+# Generate server_gen.go
+
+`helper` generates the file `../server_gen.go` (in package
+`internal/lsp`) which contains stub declarations of server methods.
+
+To invoke it, run `go generate` in the `gopls/internal/lsp` directory.
+
+It is derived from `gopls/internal/lsp/protocol/tsserver.go`, which
+itself is generated from the protocol downloaded from VSCode, so be
+sure to run `go generate` in the protocol first. Or run `go generate
+./...` twice in the gopls directory.
+
+It decides what stubs are needed and their signatures
+by looking at the `Server` interface (`-t` flag). These all look somewhat like
+`Resolve(context.Context, *CompletionItem) (*CompletionItem, error)`.
+
+It then parses the `lsp` directory (`-u` flag) to see if there is a corresponding
+implementation function (which in this case would be named `resolve`). If so
+it discovers the parameter names needed, and generates (in `server_gen.go`) code
+like
+
+``` go
+func (s *Server) resolve(ctx context.Context, params *protocol.CompletionItem) (*protocol.CompletionItem, error) {
+ return s.resolve(ctx, params)
+}
+```
+
+If `resolve` is not defined (and it is not), then the body of the generated function is
+
+```go
+ return nil, notImplemented("resolve")
+```
+
+So to add a capability currently not implemented, just define it somewhere in `lsp`.
+In this case, just define `func (s *Server) resolve(...)` and re-generate `server_gen.go`.
diff --git a/gopls/internal/lsp/helper/helper.go b/gopls/internal/lsp/helper/helper.go
new file mode 100644
index 000000000..84ac7aa7b
--- /dev/null
+++ b/gopls/internal/lsp/helper/helper.go
@@ -0,0 +1,264 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The helper command generates the declaration of the concrete
+// 'server' type that implements the abstract Server interface defined
+// in protocol/tsserver.go (which is itself generated from the LSP
+// protocol).
+//
+// To run, invoke "go generate" in the parent (lsp) directory.
+//
+// TODO(adonovan): merge this into the main LSP generator.
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "log"
+ "os"
+ "sort"
+ "strings"
+ "text/template"
+)
+
+var (
+ typ = flag.String("t", "Server", "generate code for this type")
+ def = flag.String("d", "", "the file the type is defined in") // this relies on punning
+ use = flag.String("u", "", "look for uses in this package")
+ out = flag.String("o", "", "where to write the generated file")
+)
+
+func main() {
+ log.SetFlags(log.Lshortfile)
+ flag.Parse()
+ if *typ == "" || *def == "" || *use == "" || *out == "" {
+ flag.PrintDefaults()
+ os.Exit(1)
+ }
+ // read the type definition and see what methods we're looking for
+ doTypes()
+
+ // parse the package and see which methods are defined
+ doUses()
+
+ output()
+}
+
+// replace "\\\n" with nothing before using
+var tmpl = `// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+// code generated by helper. DO NOT EDIT.
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+{{range $key, $v := .Stuff}}
+func (s *{{$.Type}}) {{$v.Name}}({{.Param}}) {{.Result}} {
+ {{if ne .Found ""}} return s.{{.Internal}}({{.Invoke}})\
+ {{else}}return {{if lt 1 (len .Results)}}nil, {{end}}notImplemented("{{.Name}}"){{end}}
+}
+{{end}}
+`
+
+func output() {
+ // put in empty param names as needed
+ for _, t := range types {
+ if t.paramnames == nil {
+ t.paramnames = make([]string, len(t.paramtypes))
+ }
+ for i, p := range t.paramtypes {
+ cm := ""
+ if i > 0 {
+ cm = ", "
+ }
+ t.Param += fmt.Sprintf("%s%s %s", cm, t.paramnames[i], p)
+ this := t.paramnames[i]
+ if this == "_" {
+ this = "nil"
+ }
+ t.Invoke += fmt.Sprintf("%s%s", cm, this)
+ }
+ if len(t.Results) > 1 {
+ t.Result = "("
+ }
+ for i, r := range t.Results {
+ cm := ""
+ if i > 0 {
+ cm = ", "
+ }
+ t.Result += fmt.Sprintf("%s%s", cm, r)
+ }
+ if len(t.Results) > 1 {
+ t.Result += ")"
+ }
+ }
+
+ fd, err := os.Create(*out)
+ if err != nil {
+ log.Fatal(err)
+ }
+ t, err := template.New("foo").Parse(tmpl)
+ if err != nil {
+ log.Fatal(err)
+ }
+ type par struct {
+ Type string
+ Stuff []*Function
+ }
+ p := par{*typ, types}
+ if false { // debugging the template
+ t.Execute(os.Stderr, &p)
+ }
+ buf := bytes.NewBuffer(nil)
+ err = t.Execute(buf, &p)
+ if err != nil {
+ log.Fatal(err)
+ }
+ ans, err := format.Source(bytes.Replace(buf.Bytes(), []byte("\\\n"), []byte{}, -1))
+ if err != nil {
+ log.Fatal(err)
+ }
+ fd.Write(ans)
+}
+
+func doUses() {
+ fset := token.NewFileSet()
+ pkgs, err := parser.ParseDir(fset, *use, nil, 0)
+ if err != nil {
+ log.Fatalf("%q:%v", *use, err)
+ }
+ pkg := pkgs["lsp"] // CHECK
+ files := pkg.Files
+ for fname, f := range files {
+ for _, d := range f.Decls {
+ fd, ok := d.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ nm := fd.Name.String()
+ if ast.IsExported(nm) {
+ // we're looking for things like didChange
+ continue
+ }
+ if fx, ok := byname[nm]; ok {
+ if fx.Found != "" {
+ log.Fatalf("found %s in %s and %s", fx.Internal, fx.Found, fname)
+ }
+ fx.Found = fname
+ // and the Paramnames
+ ft := fd.Type
+ for _, f := range ft.Params.List {
+ nm := ""
+ if len(f.Names) > 0 {
+ nm = f.Names[0].String()
+ if nm == "_" {
+ nm = "_gen"
+ }
+ }
+ fx.paramnames = append(fx.paramnames, nm)
+ }
+ }
+ }
+ }
+ if false {
+ for i, f := range types {
+ log.Printf("%d %s %s", i, f.Internal, f.Found)
+ }
+ }
+}
+
+type Function struct {
+ Name string
+ Internal string // first letter lower case
+ paramtypes []string
+ paramnames []string
+ Results []string
+ Param string
+ Result string // do it in code, easier than in a template
+ Invoke string
+ Found string // file it was found in
+}
+
+var types []*Function
+var byname = map[string]*Function{} // internal names
+
+func doTypes() {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, *def, nil, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fd, err := os.Create("/tmp/ast")
+ if err != nil {
+ log.Fatal(err)
+ }
+ ast.Fprint(fd, fset, f, ast.NotNilFilter)
+ ast.Inspect(f, inter)
+ sort.Slice(types, func(i, j int) bool { return types[i].Name < types[j].Name })
+ if false {
+ for i, f := range types {
+ log.Printf("%d %s(%v) %v", i, f.Name, f.paramtypes, f.Results)
+ }
+ }
+}
+
+func inter(n ast.Node) bool {
+ x, ok := n.(*ast.TypeSpec)
+ if !ok || x.Name.Name != *typ {
+ return true
+ }
+ m := x.Type.(*ast.InterfaceType).Methods.List
+ for _, fld := range m {
+ fn := fld.Type.(*ast.FuncType)
+ p := fn.Params.List
+ r := fn.Results.List
+ fx := &Function{
+ Name: fld.Names[0].String(),
+ }
+ fx.Internal = strings.ToLower(fx.Name[:1]) + fx.Name[1:]
+ for _, f := range p {
+ fx.paramtypes = append(fx.paramtypes, whatis(f.Type))
+ }
+ for _, f := range r {
+ fx.Results = append(fx.Results, whatis(f.Type))
+ }
+ types = append(types, fx)
+ byname[fx.Internal] = fx
+ }
+ return false
+}
+
+func whatis(x ast.Expr) string {
+ switch n := x.(type) {
+ case *ast.SelectorExpr:
+ return whatis(n.X) + "." + n.Sel.String()
+ case *ast.StarExpr:
+ return "*" + whatis(n.X)
+ case *ast.Ident:
+ if ast.IsExported(n.Name) {
+ // these are from package protocol
+ return "protocol." + n.Name
+ }
+ return n.Name
+ case *ast.ArrayType:
+ return "[]" + whatis(n.Elt)
+ case *ast.InterfaceType:
+ return "interface{}"
+ default:
+ log.Fatalf("Fatal %T", x)
+ return fmt.Sprintf("%T", x)
+ }
+}
diff --git a/gopls/internal/lsp/highlight.go b/gopls/internal/lsp/highlight.go
new file mode 100644
index 000000000..290444ec9
--- /dev/null
+++ b/gopls/internal/lsp/highlight.go
@@ -0,0 +1,45 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+)
+
+func (s *Server) documentHighlight(ctx context.Context, params *protocol.DocumentHighlightParams) ([]protocol.DocumentHighlight, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+
+ if snapshot.View().FileKind(fh) == source.Tmpl {
+ return template.Highlight(ctx, snapshot, fh, params.Position)
+ }
+
+ rngs, err := source.Highlight(ctx, snapshot, fh, params.Position)
+ if err != nil {
+ event.Error(ctx, "no highlight", err, tag.URI.Of(params.TextDocument.URI))
+ }
+ return toProtocolHighlight(rngs), nil
+}
+
+func toProtocolHighlight(rngs []protocol.Range) []protocol.DocumentHighlight {
+ result := make([]protocol.DocumentHighlight, 0, len(rngs))
+ kind := protocol.Text
+ for _, rng := range rngs {
+ result = append(result, protocol.DocumentHighlight{
+ Kind: kind,
+ Range: rng,
+ })
+ }
+ return result
+}
diff --git a/gopls/internal/lsp/hover.go b/gopls/internal/lsp/hover.go
new file mode 100644
index 000000000..2d1aae7d5
--- /dev/null
+++ b/gopls/internal/lsp/hover.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/mod"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+ "golang.org/x/tools/gopls/internal/lsp/work"
+)
+
+func (s *Server) hover(ctx context.Context, params *protocol.HoverParams) (*protocol.Hover, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ switch snapshot.View().FileKind(fh) {
+ case source.Mod:
+ return mod.Hover(ctx, snapshot, fh, params.Position)
+ case source.Go:
+ return source.Hover(ctx, snapshot, fh, params.Position)
+ case source.Tmpl:
+ return template.Hover(ctx, snapshot, fh, params.Position)
+ case source.Work:
+ return work.Hover(ctx, snapshot, fh, params.Position)
+ }
+ return nil, nil
+}
diff --git a/gopls/internal/lsp/implementation.go b/gopls/internal/lsp/implementation.go
new file mode 100644
index 000000000..0eb82652e
--- /dev/null
+++ b/gopls/internal/lsp/implementation.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+func (s *Server) implementation(ctx context.Context, params *protocol.ImplementationParams) ([]protocol.Location, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ return source.Implementation(ctx, snapshot, fh, params.Position)
+}
diff --git a/gopls/internal/lsp/inlay_hint.go b/gopls/internal/lsp/inlay_hint.go
new file mode 100644
index 000000000..6aceecb0d
--- /dev/null
+++ b/gopls/internal/lsp/inlay_hint.go
@@ -0,0 +1,21 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+func (s *Server) inlayHint(ctx context.Context, params *protocol.InlayHintParams) ([]protocol.InlayHint, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ return source.InlayHint(ctx, snapshot, fh, params.Range)
+}
diff --git a/gopls/internal/lsp/link.go b/gopls/internal/lsp/link.go
new file mode 100644
index 000000000..2713715cd
--- /dev/null
+++ b/gopls/internal/lsp/link.go
@@ -0,0 +1,278 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "net/url"
+ "regexp"
+ "strings"
+ "sync"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+func (s *Server) documentLink(ctx context.Context, params *protocol.DocumentLinkParams) (links []protocol.DocumentLink, err error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ switch snapshot.View().FileKind(fh) {
+ case source.Mod:
+ links, err = modLinks(ctx, snapshot, fh)
+ case source.Go:
+ links, err = goLinks(ctx, snapshot, fh)
+ }
+ // Don't return errors for document links.
+ if err != nil {
+ event.Error(ctx, "failed to compute document links", err, tag.URI.Of(fh.URI()))
+ return nil, nil
+ }
+ return links, nil
+}
+
+func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentLink, error) {
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil {
+ return nil, err
+ }
+
+ var links []protocol.DocumentLink
+ for _, req := range pm.File.Require {
+ if req.Syntax == nil {
+ continue
+ }
+ // See golang/go#36998: don't link to modules matching GOPRIVATE.
+ if snapshot.View().IsGoPrivatePath(req.Mod.Path) {
+ continue
+ }
+ dep := []byte(req.Mod.Path)
+ start, end := req.Syntax.Start.Byte, req.Syntax.End.Byte
+ i := bytes.Index(pm.Mapper.Content[start:end], dep)
+ if i == -1 {
+ continue
+ }
+ // Shift the start position to the location of the
+ // dependency within the require statement.
+ target := source.BuildLink(snapshot.View().Options().LinkTarget, "mod/"+req.Mod.String(), "")
+ l, err := toProtocolLink(pm.Mapper, target, start+i, start+i+len(dep))
+ if err != nil {
+ return nil, err
+ }
+ links = append(links, l)
+ }
+ // TODO(ridersofrohan): handle links for replace and exclude directives.
+ if syntax := pm.File.Syntax; syntax == nil {
+ return links, nil
+ }
+
+ // Get all the links that are contained in the comments of the file.
+ urlRegexp := snapshot.View().Options().URLRegexp
+ for _, expr := range pm.File.Syntax.Stmt {
+ comments := expr.Comment()
+ if comments == nil {
+ continue
+ }
+ for _, section := range [][]modfile.Comment{comments.Before, comments.Suffix, comments.After} {
+ for _, comment := range section {
+ l, err := findLinksInString(urlRegexp, comment.Token, comment.Start.Byte, pm.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ links = append(links, l...)
+ }
+ }
+ }
+ return links, nil
+}
+
+// goLinks returns the set of hyperlink annotations for the specified Go file.
+func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentLink, error) {
+ view := snapshot.View()
+
+ pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull)
+ if err != nil {
+ return nil, err
+ }
+
+ var links []protocol.DocumentLink
+
+ // Create links for import specs.
+ if view.Options().ImportShortcut.ShowLinks() {
+
+ // If links are to pkg.go.dev, append module version suffixes.
+ // This requires the import map from the package metadata. Ignore errors.
+ var depsByImpPath map[source.ImportPath]source.PackageID
+ if strings.ToLower(view.Options().LinkTarget) == "pkg.go.dev" {
+ if metas, _ := snapshot.MetadataForFile(ctx, fh.URI()); len(metas) > 0 {
+ depsByImpPath = metas[0].DepsByImpPath // 0 => narrowest package
+ }
+ }
+
+ for _, imp := range pgf.File.Imports {
+ importPath := source.UnquoteImportPath(imp)
+ if importPath == "" {
+ continue // bad import
+ }
+ // See golang/go#36998: don't link to modules matching GOPRIVATE.
+ if view.IsGoPrivatePath(string(importPath)) {
+ continue
+ }
+
+ urlPath := string(importPath)
+
+ // For pkg.go.dev, append module version suffix to package import path.
+ if m := snapshot.Metadata(depsByImpPath[importPath]); m != nil && m.Module != nil && m.Module.Path != "" && m.Module.Version != "" {
+ urlPath = strings.Replace(urlPath, m.Module.Path, m.Module.Path+"@"+m.Module.Version, 1)
+ }
+
+ start, end, err := safetoken.Offsets(pgf.Tok, imp.Path.Pos(), imp.Path.End())
+ if err != nil {
+ return nil, err
+ }
+ targetURL := source.BuildLink(view.Options().LinkTarget, urlPath, "")
+ // Account for the quotation marks in the positions.
+ l, err := toProtocolLink(pgf.Mapper, targetURL, start+len(`"`), end-len(`"`))
+ if err != nil {
+ return nil, err
+ }
+ links = append(links, l)
+ }
+ }
+
+ urlRegexp := snapshot.View().Options().URLRegexp
+
+ // Gather links found in string literals.
+ var str []*ast.BasicLit
+ ast.Inspect(pgf.File, func(node ast.Node) bool {
+ switch n := node.(type) {
+ case *ast.ImportSpec:
+ return false // don't process import strings again
+ case *ast.BasicLit:
+ if n.Kind == token.STRING {
+ str = append(str, n)
+ }
+ }
+ return true
+ })
+ for _, s := range str {
+ strOffset, err := safetoken.Offset(pgf.Tok, s.Pos())
+ if err != nil {
+ return nil, err
+ }
+ l, err := findLinksInString(urlRegexp, s.Value, strOffset, pgf.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ links = append(links, l...)
+ }
+
+ // Gather links found in comments.
+ for _, commentGroup := range pgf.File.Comments {
+ for _, comment := range commentGroup.List {
+ commentOffset, err := safetoken.Offset(pgf.Tok, comment.Pos())
+ if err != nil {
+ return nil, err
+ }
+ l, err := findLinksInString(urlRegexp, comment.Text, commentOffset, pgf.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ links = append(links, l...)
+ }
+ }
+
+ return links, nil
+}
+
+// acceptedSchemes controls the schemes that URLs must have to be shown to the
+// user. Other schemes can't be opened by LSP clients, so linkifying them is
+// distracting. See golang/go#43990.
+var acceptedSchemes = map[string]bool{
+ "http": true,
+ "https": true,
+}
+
+// urlRegexp is the user-supplied regular expression to match URL.
+// srcOffset is the start offset of 'src' within m's file.
+func findLinksInString(urlRegexp *regexp.Regexp, src string, srcOffset int, m *protocol.Mapper) ([]protocol.DocumentLink, error) {
+ var links []protocol.DocumentLink
+ for _, index := range urlRegexp.FindAllIndex([]byte(src), -1) {
+ start, end := index[0], index[1]
+ link := src[start:end]
+ linkURL, err := url.Parse(link)
+ // Fallback: Linkify IP addresses as suggested in golang/go#18824.
+ if err != nil {
+ linkURL, err = url.Parse("//" + link)
+ // Not all potential links will be valid, so don't return this error.
+ if err != nil {
+ continue
+ }
+ }
+ // If the URL has no scheme, use https.
+ if linkURL.Scheme == "" {
+ linkURL.Scheme = "https"
+ }
+ if !acceptedSchemes[linkURL.Scheme] {
+ continue
+ }
+
+ l, err := toProtocolLink(m, linkURL.String(), srcOffset+start, srcOffset+end)
+ if err != nil {
+ return nil, err
+ }
+ links = append(links, l)
+ }
+ // Handle golang/go#1234-style links.
+ r := getIssueRegexp()
+ for _, index := range r.FindAllIndex([]byte(src), -1) {
+ start, end := index[0], index[1]
+ matches := r.FindStringSubmatch(src)
+ if len(matches) < 4 {
+ continue
+ }
+ org, repo, number := matches[1], matches[2], matches[3]
+ targetURL := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number)
+ l, err := toProtocolLink(m, targetURL, srcOffset+start, srcOffset+end)
+ if err != nil {
+ return nil, err
+ }
+ links = append(links, l)
+ }
+ return links, nil
+}
+
+func getIssueRegexp() *regexp.Regexp {
+ once.Do(func() {
+ issueRegexp = regexp.MustCompile(`(\w+)/([\w-]+)#([0-9]+)`)
+ })
+ return issueRegexp
+}
+
+var (
+ once sync.Once
+ issueRegexp *regexp.Regexp
+)
+
+func toProtocolLink(m *protocol.Mapper, targetURL string, start, end int) (protocol.DocumentLink, error) {
+ rng, err := m.OffsetRange(start, end)
+ if err != nil {
+ return protocol.DocumentLink{}, err
+ }
+ return protocol.DocumentLink{
+ Range: rng,
+ Target: targetURL,
+ }, nil
+}
diff --git a/gopls/internal/lsp/lsp_test.go b/gopls/internal/lsp/lsp_test.go
new file mode 100644
index 000000000..75c46fe31
--- /dev/null
+++ b/gopls/internal/lsp/lsp_test.go
@@ -0,0 +1,1360 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
+ testenv.ExitIfSmallMachine()
+
+ // Set the global exporter to nil so that we don't log to stderr. This avoids
+ // a lot of misleading noise in test output.
+ //
+ // TODO(rfindley): investigate whether we can/should capture logs scoped to
+ // individual tests by passing in a context with a local exporter.
+ event.SetExporter(nil)
+
+ os.Exit(m.Run())
+}
+
+// TestLSP runs the marker tests in files beneath testdata/ using
+// implementations of each of the marker operations (e.g. @codelens) that
+// make LSP RPCs (e.g. textDocument/codeLens) to a gopls server.
+func TestLSP(t *testing.T) {
+ tests.RunTests(t, "testdata", true, testLSP)
+}
+
+func testLSP(t *testing.T, datum *tests.Data) {
+ ctx := tests.Context(t)
+
+ session := cache.NewSession(ctx, cache.New(nil), nil)
+ options := source.DefaultOptions().Clone()
+ tests.DefaultOptions(options)
+ session.SetOptions(options)
+ options.SetEnvSlice(datum.Config.Env)
+ view, snapshot, release, err := session.NewView(ctx, datum.Config.Dir, span.URIFromPath(datum.Config.Dir), options)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer session.RemoveView(view)
+
+ // Enable type error analyses for tests.
+ // TODO(golang/go#38212): Delete this once they are enabled by default.
+ tests.EnableAllAnalyzers(options)
+ session.SetViewOptions(ctx, view, options)
+
+ // Enable all inlay hints for tests.
+ tests.EnableAllInlayHints(options)
+
+ // Only run the -modfile specific tests in module mode with Go 1.14 or above.
+ datum.ModfileFlagAvailable = len(snapshot.ModFiles()) > 0 && testenv.Go1Point() >= 14
+ release()
+
+ // Open all files for performance reasons. This is done because gopls only
+ // keeps active packages in memory for open files.
+ //
+ // In practice clients will only send document-oriented requests for open
+ // files.
+ var modifications []source.FileModification
+ for _, module := range datum.Exported.Modules {
+ for name := range module.Files {
+ filename := datum.Exported.File(module.Name, name)
+ if filepath.Ext(filename) != ".go" {
+ continue
+ }
+ content, err := datum.Exported.FileContents(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ modifications = append(modifications, source.FileModification{
+ URI: span.URIFromPath(filename),
+ Action: source.Open,
+ Version: -1,
+ Text: content,
+ LanguageID: "go",
+ })
+ }
+ }
+ for filename, content := range datum.Config.Overlay {
+ if filepath.Ext(filename) != ".go" {
+ continue
+ }
+ modifications = append(modifications, source.FileModification{
+ URI: span.URIFromPath(filename),
+ Action: source.Open,
+ Version: -1,
+ Text: content,
+ LanguageID: "go",
+ })
+ }
+ if err := session.ModifyFiles(ctx, modifications); err != nil {
+ t.Fatal(err)
+ }
+ r := &runner{
+ data: datum,
+ ctx: ctx,
+ normalizers: tests.CollectNormalizers(datum.Exported),
+ editRecv: make(chan map[span.URI][]byte, 1),
+ }
+
+ r.server = NewServer(session, testClient{runner: r})
+ tests.Run(t, r, datum)
+}
+
+// runner implements tests.Tests by making LSP RPCs to a gopls server.
+type runner struct {
+ server *Server
+ data *tests.Data
+ diagnostics map[span.URI][]*source.Diagnostic
+ ctx context.Context
+ normalizers []tests.Normalizer
+ editRecv chan map[span.URI][]byte
+}
+
+// testClient stubs any client functions that may be called by LSP functions.
+type testClient struct {
+ protocol.Client
+ runner *runner
+}
+
+func (c testClient) Close() error {
+ return nil
+}
+
+// Trivially implement PublishDiagnostics so that we can call
+// server.publishReports below to de-dup sent diagnostics.
+func (c testClient) PublishDiagnostics(context.Context, *protocol.PublishDiagnosticsParams) error {
+ return nil
+}
+
+func (c testClient) ShowMessage(context.Context, *protocol.ShowMessageParams) error {
+ return nil
+}
+
+func (c testClient) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) {
+ res, err := applyTextDocumentEdits(c.runner, params.Edit.DocumentChanges)
+ if err != nil {
+ return nil, err
+ }
+ c.runner.editRecv <- res
+ return &protocol.ApplyWorkspaceEditResult{Applied: true}, nil
+}
+
+func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) {
+ mapper, err := r.data.Mapper(spn.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := mapper.SpanLocation(spn)
+ if err != nil {
+ t.Fatalf("failed for %v: %v", spn, err)
+ }
+
+ params := &protocol.CallHierarchyPrepareParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+
+ items, err := r.server.PrepareCallHierarchy(r.ctx, params)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(items) == 0 {
+ t.Fatalf("expected call hierarchy item to be returned for identifier at %v\n", loc.Range)
+ }
+
+ callLocation := protocol.Location{
+ URI: items[0].URI,
+ Range: items[0].Range,
+ }
+ if callLocation != loc {
+ t.Fatalf("expected server.PrepareCallHierarchy to return identifier at %v but got %v\n", loc, callLocation)
+ }
+
+ incomingCalls, err := r.server.IncomingCalls(r.ctx, &protocol.CallHierarchyIncomingCallsParams{Item: items[0]})
+ if err != nil {
+ t.Error(err)
+ }
+ var incomingCallItems []protocol.CallHierarchyItem
+ for _, item := range incomingCalls {
+ incomingCallItems = append(incomingCallItems, item.From)
+ }
+ msg := tests.DiffCallHierarchyItems(incomingCallItems, expectedCalls.IncomingCalls)
+ if msg != "" {
+ t.Error(fmt.Sprintf("incoming calls: %s", msg))
+ }
+
+ outgoingCalls, err := r.server.OutgoingCalls(r.ctx, &protocol.CallHierarchyOutgoingCallsParams{Item: items[0]})
+ if err != nil {
+ t.Error(err)
+ }
+ var outgoingCallItems []protocol.CallHierarchyItem
+ for _, item := range outgoingCalls {
+ outgoingCallItems = append(outgoingCallItems, item.To)
+ }
+ msg = tests.DiffCallHierarchyItems(outgoingCallItems, expectedCalls.OutgoingCalls)
+ if msg != "" {
+ t.Error(fmt.Sprintf("outgoing calls: %s", msg))
+ }
+}
+
+func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) {
+ if !strings.HasSuffix(uri.Filename(), "go.mod") {
+ return
+ }
+ got, err := r.server.codeLens(r.ctx, &protocol.CodeLensParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.DocumentURI(uri),
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := tests.DiffCodeLens(uri, want, got); diff != "" {
+ t.Errorf("%s: %s", uri, diff)
+ }
+}
+
+func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) {
+ // Get the diagnostics for this view if we have not done it before.
+ v := r.server.session.View(r.data.Config.Dir)
+ r.collectDiagnostics(v)
+ tests.CompareDiagnostics(t, uri, want, r.diagnostics[uri])
+}
+
+func (r *runner) FoldingRanges(t *testing.T, spn span.Span) {
+ uri := spn.URI()
+ view, err := r.server.session.ViewOf(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ original := view.Options()
+ modified := original
+ defer r.server.session.SetViewOptions(r.ctx, view, original)
+
+ for _, test := range []struct {
+ lineFoldingOnly bool
+ prefix string
+ }{
+ {false, "foldingRange"},
+ {true, "foldingRange-lineFolding"},
+ } {
+ modified.LineFoldingOnly = test.lineFoldingOnly
+ view, err = r.server.session.SetViewOptions(r.ctx, view, modified)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ ranges, err := r.server.FoldingRange(r.ctx, &protocol.FoldingRangeParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ })
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ r.foldingRanges(t, test.prefix, uri, ranges)
+ }
+}
+
+func (r *runner) foldingRanges(t *testing.T, prefix string, uri span.URI, ranges []protocol.FoldingRange) {
+ m, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Fold all ranges.
+ nonOverlapping := nonOverlappingRanges(ranges)
+ for i, rngs := range nonOverlapping {
+ got, err := foldRanges(m, string(m.Content), rngs)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ tag := fmt.Sprintf("%s-%d", prefix, i)
+ want := string(r.data.Golden(t, tag, uri.Filename(), func() ([]byte, error) {
+ return []byte(got), nil
+ }))
+
+ if want != got {
+ t.Errorf("%s: foldingRanges failed for %s, expected:\n%v\ngot:\n%v", tag, uri.Filename(), want, got)
+ }
+ }
+
+ // Filter by kind.
+ kinds := []protocol.FoldingRangeKind{protocol.Imports, protocol.Comment}
+ for _, kind := range kinds {
+ var kindOnly []protocol.FoldingRange
+ for _, fRng := range ranges {
+ if fRng.Kind == string(kind) {
+ kindOnly = append(kindOnly, fRng)
+ }
+ }
+
+ nonOverlapping := nonOverlappingRanges(kindOnly)
+ for i, rngs := range nonOverlapping {
+ got, err := foldRanges(m, string(m.Content), rngs)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ tag := fmt.Sprintf("%s-%s-%d", prefix, kind, i)
+ want := string(r.data.Golden(t, tag, uri.Filename(), func() ([]byte, error) {
+ return []byte(got), nil
+ }))
+
+ if want != got {
+ t.Errorf("%s: foldingRanges failed for %s, expected:\n%v\ngot:\n%v", tag, uri.Filename(), want, got)
+ }
+ }
+
+ }
+}
+
+func nonOverlappingRanges(ranges []protocol.FoldingRange) (res [][]protocol.FoldingRange) {
+ for _, fRng := range ranges {
+ setNum := len(res)
+ for i := 0; i < len(res); i++ {
+ canInsert := true
+ for _, rng := range res[i] {
+ if conflict(rng, fRng) {
+ canInsert = false
+ break
+ }
+ }
+ if canInsert {
+ setNum = i
+ break
+ }
+ }
+ if setNum == len(res) {
+ res = append(res, []protocol.FoldingRange{})
+ }
+ res[setNum] = append(res[setNum], fRng)
+ }
+ return res
+}
+
+func conflict(a, b protocol.FoldingRange) bool {
+ // a start position is <= b start positions
+ return (a.StartLine < b.StartLine || (a.StartLine == b.StartLine && a.StartCharacter <= b.StartCharacter)) &&
+ (a.EndLine > b.StartLine || (a.EndLine == b.StartLine && a.EndCharacter > b.StartCharacter))
+}
+
+func foldRanges(m *protocol.Mapper, contents string, ranges []protocol.FoldingRange) (string, error) {
+ foldedText := "<>"
+ res := contents
+ // Apply the edits from the end of the file forward
+ // to preserve the offsets
+ // TODO(adonovan): factor to use diff.ApplyEdits, which validates the input.
+ for i := len(ranges) - 1; i >= 0; i-- {
+ r := ranges[i]
+ start, err := m.PositionPoint(protocol.Position{Line: r.StartLine, Character: r.StartCharacter})
+ if err != nil {
+ return "", err
+ }
+ end, err := m.PositionPoint(protocol.Position{Line: r.EndLine, Character: r.EndCharacter})
+ if err != nil {
+ return "", err
+ }
+ res = res[:start.Offset()] + foldedText + res[end.Offset():]
+ }
+ return res, nil
+}
+
+func (r *runner) Format(t *testing.T, spn span.Span) {
+ uri := spn.URI()
+ filename := uri.Filename()
+ gofmted := r.data.Golden(t, "gofmt", filename, func() ([]byte, error) {
+ cmd := exec.Command("gofmt", filename)
+ out, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files
+ return out, nil
+ })
+
+ edits, err := r.server.Formatting(r.ctx, &protocol.DocumentFormattingParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ })
+ if err != nil {
+ if len(gofmted) > 0 {
+ t.Error(err)
+ }
+ return
+ }
+ m, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, _, err := source.ApplyProtocolEdits(m, edits)
+ if err != nil {
+ t.Error(err)
+ }
+ if diff := compare.Bytes(gofmted, got); diff != "" {
+ t.Errorf("format failed for %s (-want +got):\n%s", filename, diff)
+ }
+}
+
+func (r *runner) SemanticTokens(t *testing.T, spn span.Span) {
+ uri := spn.URI()
+ filename := uri.Filename()
+ // this is called solely for coverage in semantic.go
+ _, err := r.server.semanticTokensFull(r.ctx, &protocol.SemanticTokensParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ })
+ if err != nil {
+ t.Errorf("%v for %s", err, filename)
+ }
+ _, err = r.server.semanticTokensRange(r.ctx, &protocol.SemanticTokensRangeParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ // any legal range. Just to exercise the call.
+ Range: protocol.Range{
+ Start: protocol.Position{
+ Line: 0,
+ Character: 0,
+ },
+ End: protocol.Position{
+ Line: 2,
+ Character: 0,
+ },
+ },
+ })
+ if err != nil {
+ t.Errorf("%v for Range %s", err, filename)
+ }
+}
+
+func (r *runner) Import(t *testing.T, spn span.Span) {
+ // Invokes textDocument/codeAction and applies all the "goimports" edits.
+
+ uri := spn.URI()
+ filename := uri.Filename()
+ actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ m, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := m.Content
+ if len(actions) > 0 {
+ res, err := applyTextDocumentEdits(r, actions[0].Edit.DocumentChanges)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got = res[uri]
+ }
+ want := r.data.Golden(t, "goimports", filename, func() ([]byte, error) {
+ return got, nil
+ })
+ if diff := compare.Bytes(want, got); diff != "" {
+ t.Errorf("import failed for %s:\n%s", filename, diff)
+ }
+}
+
+func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []tests.SuggestedFix, expectedActions int) {
+ uri := spn.URI()
+ view, err := r.server.session.ViewOf(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rng, err := m.SpanRange(spn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Get the diagnostics for this view if we have not done it before.
+ r.collectDiagnostics(view)
+ var diagnostics []protocol.Diagnostic
+ for _, d := range r.diagnostics[uri] {
+ // Compare the start positions rather than the entire range because
+ // some diagnostics have a range with the same start and end position (8:1-8:1).
+ // The current marker functionality prevents us from having a range of 0 length.
+ if protocol.ComparePosition(d.Range.Start, rng.Start) == 0 {
+ diagnostics = append(diagnostics, toProtocolDiagnostics([]*source.Diagnostic{d})...)
+ break
+ }
+ }
+ var codeActionKinds []protocol.CodeActionKind
+ for _, k := range actionKinds {
+ codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k.ActionKind))
+ }
+ allActions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ Range: rng,
+ Context: protocol.CodeActionContext{
+ Only: codeActionKinds,
+ Diagnostics: diagnostics,
+ },
+ })
+ if err != nil {
+ t.Fatalf("CodeAction %s failed: %v", spn, err)
+ }
+ var actions []protocol.CodeAction
+ for _, action := range allActions {
+ for _, fix := range actionKinds {
+ if strings.Contains(action.Title, fix.Title) {
+ actions = append(actions, action)
+ break
+ }
+ }
+
+ }
+ if len(actions) != expectedActions {
+ var summaries []string
+ for _, a := range actions {
+ summaries = append(summaries, fmt.Sprintf("%q (%s)", a.Title, a.Kind))
+ }
+ t.Fatalf("CodeAction(...): got %d code actions (%v), want %d", len(actions), summaries, expectedActions)
+ }
+ action := actions[0]
+ var match bool
+ for _, k := range codeActionKinds {
+ if action.Kind == k {
+ match = true
+ break
+ }
+ }
+ if !match {
+ t.Fatalf("unexpected kind for code action %s, got %v, want one of %v", action.Title, action.Kind, codeActionKinds)
+ }
+ var res map[span.URI][]byte
+ if cmd := action.Command; cmd != nil {
+ _, err := r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{
+ Command: action.Command.Command,
+ Arguments: action.Command.Arguments,
+ })
+ if err != nil {
+ t.Fatalf("error converting command %q to edits: %v", action.Command.Command, err)
+ }
+ res = <-r.editRecv
+ } else {
+ res, err = applyTextDocumentEdits(r, action.Edit.DocumentChanges)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ for u, got := range res {
+ want := r.data.Golden(t, "suggestedfix_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) {
+ return got, nil
+ })
+ if diff := compare.Bytes(want, got); diff != "" {
+ t.Errorf("suggested fixes failed for %s:\n%s", u.Filename(), diff)
+ }
+ }
+}
+
+func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {
+ uri := start.URI()
+ m, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ spn := span.New(start.URI(), start.Start(), end.End())
+ rng, err := m.SpanRange(spn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ Range: rng,
+ Context: protocol.CodeActionContext{
+ Only: []protocol.CodeActionKind{"refactor.extract"},
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ var actions []protocol.CodeAction
+ for _, action := range actionsRaw {
+ if action.Command.Title == "Extract function" {
+ actions = append(actions, action)
+ }
+ }
+ // Hack: We assume that we only get one code action per range.
+ // TODO(rstambler): Support multiple code actions per test.
+ if len(actions) == 0 || len(actions) > 1 {
+ t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions))
+ }
+ _, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{
+ Command: actions[0].Command.Command,
+ Arguments: actions[0].Command.Arguments,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ res := <-r.editRecv
+ for u, got := range res {
+ want := r.data.Golden(t, "functionextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) {
+ return got, nil
+ })
+ if diff := compare.Bytes(want, got); diff != "" {
+ t.Errorf("function extraction failed for %s:\n%s", u.Filename(), diff)
+ }
+ }
+}
+
+func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {
+ uri := start.URI()
+ m, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ spn := span.New(start.URI(), start.Start(), end.End())
+ rng, err := m.SpanRange(spn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ Range: rng,
+ Context: protocol.CodeActionContext{
+ Only: []protocol.CodeActionKind{"refactor.extract"},
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ var actions []protocol.CodeAction
+ for _, action := range actionsRaw {
+ if action.Command.Title == "Extract method" {
+ actions = append(actions, action)
+ }
+ }
+ // Hack: We assume that we only get one matching code action per range.
+ // TODO(rstambler): Support multiple code actions per test.
+ if len(actions) == 0 || len(actions) > 1 {
+ t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions))
+ }
+ _, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{
+ Command: actions[0].Command.Command,
+ Arguments: actions[0].Command.Arguments,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ res := <-r.editRecv
+ for u, got := range res {
+ want := r.data.Golden(t, "methodextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) {
+ return got, nil
+ })
+ if diff := compare.Bytes(want, got); diff != "" {
+ t.Errorf("method extraction failed for %s:\n%s", u.Filename(), diff)
+ }
+ }
+}
+
+// TODO(rfindley): This handler needs more work. The output is still a bit hard
+// to read (range diffs do not format nicely), and it is too entangled with hover.
+func (r *runner) Definition(t *testing.T, _ span.Span, d tests.Definition) {
+ sm, err := r.data.Mapper(d.Src.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := sm.SpanLocation(d.Src)
+ if err != nil {
+ t.Fatalf("failed for %v: %v", d.Src, err)
+ }
+ tdpp := protocol.LocationTextDocumentPositionParams(loc)
+ var got []protocol.Location
+ var hover *protocol.Hover
+ if d.IsType {
+ params := &protocol.TypeDefinitionParams{
+ TextDocumentPositionParams: tdpp,
+ }
+ got, err = r.server.TypeDefinition(r.ctx, params)
+ } else {
+ params := &protocol.DefinitionParams{
+ TextDocumentPositionParams: tdpp,
+ }
+ got, err = r.server.Definition(r.ctx, params)
+ if err != nil {
+ t.Fatalf("failed for %v: %+v", d.Src, err)
+ }
+ v := &protocol.HoverParams{
+ TextDocumentPositionParams: tdpp,
+ }
+ hover, err = r.server.Hover(r.ctx, v)
+ }
+ if err != nil {
+ t.Fatalf("failed for %v: %v", d.Src, err)
+ }
+ dm, err := r.data.Mapper(d.Def.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ def, err := dm.SpanLocation(d.Def)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !d.OnlyHover {
+ want := []protocol.Location{def}
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Fatalf("Definition(%s) mismatch (-want +got):\n%s", d.Src, diff)
+ }
+ }
+ didSomething := false
+ if hover != nil {
+ didSomething = true
+ tag := fmt.Sprintf("%s-hoverdef", d.Name)
+ want := string(r.data.Golden(t, tag, d.Src.URI().Filename(), func() ([]byte, error) {
+ return []byte(hover.Contents.Value), nil
+ }))
+ got := hover.Contents.Value
+ if diff := tests.DiffMarkdown(want, got); diff != "" {
+ t.Errorf("%s: markdown mismatch:\n%s", d.Src, diff)
+ }
+ }
+ if !d.OnlyHover {
+ didSomething = true
+ locURI := got[0].URI.SpanURI()
+ lm, err := r.data.Mapper(locURI)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if def, err := lm.LocationSpan(got[0]); err != nil {
+ t.Fatalf("failed for %v: %v", got[0], err)
+ } else if def != d.Def {
+ t.Errorf("for %v got %v want %v", d.Src, def, d.Def)
+ }
+ }
+ if !didSomething {
+ t.Errorf("no tests ran for %s", d.Src.URI())
+ }
+}
+
+func (r *runner) Implementation(t *testing.T, spn span.Span, wantSpans []span.Span) {
+ sm, err := r.data.Mapper(spn.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := sm.SpanLocation(spn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotImpls, err := r.server.Implementation(r.ctx, &protocol.ImplementationParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ })
+ if err != nil {
+ t.Fatalf("Server.Implementation(%s): %v", spn, err)
+ }
+ gotLocs, err := tests.LocationsToSpans(r.data, gotImpls)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sanitize := func(s string) string {
+ return strings.ReplaceAll(s, r.data.Config.Dir, "gopls/internal/lsp/testdata")
+ }
+ want := sanitize(tests.SortAndFormatSpans(wantSpans))
+ got := sanitize(tests.SortAndFormatSpans(gotLocs))
+ if got != want {
+ t.Errorf("implementations(%s):\n%s", sanitize(fmt.Sprint(spn)), diff.Unified("want", "got", want, got))
+ }
+}
+
+func (r *runner) Highlight(t *testing.T, src span.Span, spans []span.Span) {
+ m, err := r.data.Mapper(src.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := m.SpanLocation(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ params := &protocol.DocumentHighlightParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ highlights, err := r.server.DocumentHighlight(r.ctx, params)
+ if err != nil {
+ t.Fatalf("DocumentHighlight(%v) failed: %v", params, err)
+ }
+ var got []protocol.Range
+ for _, h := range highlights {
+ got = append(got, h.Range)
+ }
+
+ var want []protocol.Range
+ for _, s := range spans {
+ rng, err := m.SpanRange(s)
+ if err != nil {
+ t.Fatalf("Mapper.SpanRange(%v) failed: %v", s, err)
+ }
+ want = append(want, rng)
+ }
+
+ sortRanges := func(s []protocol.Range) {
+ sort.Slice(s, func(i, j int) bool {
+ return protocol.CompareRange(s[i], s[j]) < 0
+ })
+ }
+
+ sortRanges(got)
+ sortRanges(want)
+
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("DocumentHighlight(%v) mismatch (-want +got):\n%s", src, diff)
+ }
+}
+
+func (r *runner) References(t *testing.T, src span.Span, itemList []span.Span) {
+ // This test is substantially the same as (*runner).References in source/source_test.go.
+ // TODO(adonovan): Factor (and remove fluff). Where should the common code live?
+
+ sm, err := r.data.Mapper(src.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := sm.SpanLocation(src)
+ if err != nil {
+ t.Fatalf("failed for %v: %v", src, err)
+ }
+ for _, includeDeclaration := range []bool{true, false} {
+ t.Run(fmt.Sprintf("refs-declaration-%v", includeDeclaration), func(t *testing.T) {
+ want := make(map[protocol.Location]bool)
+ for i, pos := range itemList {
+ // We don't want the first result if we aren't including the declaration.
+ // TODO(adonovan): don't assume a single declaration:
+ // there may be >1 if corresponding methods are considered.
+ if i == 0 && !includeDeclaration {
+ continue
+ }
+ m, err := r.data.Mapper(pos.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := m.SpanLocation(pos)
+ if err != nil {
+ t.Fatalf("failed for %v: %v", src, err)
+ }
+ want[loc] = true
+ }
+ params := &protocol.ReferenceParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ Context: protocol.ReferenceContext{
+ IncludeDeclaration: includeDeclaration,
+ },
+ }
+ got, err := r.server.References(r.ctx, params)
+ if err != nil {
+ t.Fatalf("failed for %v: %v", src, err)
+ }
+
+ sanitize := func(s string) string {
+ // In practice, CONFIGDIR means "gopls/internal/lsp/testdata".
+ return strings.ReplaceAll(s, r.data.Config.Dir, "CONFIGDIR")
+ }
+ formatLocation := func(loc protocol.Location) string {
+ return fmt.Sprintf("%s:%d.%d-%d.%d",
+ sanitize(string(loc.URI)),
+ loc.Range.Start.Line+1,
+ loc.Range.Start.Character+1,
+ loc.Range.End.Line+1,
+ loc.Range.End.Character+1)
+ }
+ toSlice := func(set map[protocol.Location]bool) []protocol.Location {
+ // TODO(adonovan): use generic maps.Keys(), someday.
+ list := make([]protocol.Location, 0, len(set))
+ for key := range set {
+ list = append(list, key)
+ }
+ return list
+ }
+ toString := func(locs []protocol.Location) string {
+ // TODO(adonovan): use generic JoinValues(locs, formatLocation).
+ strs := make([]string, len(locs))
+ for i, loc := range locs {
+ strs[i] = formatLocation(loc)
+ }
+ sort.Strings(strs)
+ return strings.Join(strs, "\n")
+ }
+ gotStr := toString(got)
+ wantStr := toString(toSlice(want))
+ if gotStr != wantStr {
+ t.Errorf("incorrect references (got %d, want %d) at %s:\n%s",
+ len(got), len(want),
+ formatLocation(loc),
+ diff.Unified("want", "got", wantStr, gotStr))
+ }
+ })
+ }
+}
+
+func (r *runner) InlayHints(t *testing.T, spn span.Span) {
+ uri := spn.URI()
+ filename := uri.Filename()
+
+ hints, err := r.server.InlayHint(r.ctx, &protocol.InlayHintParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ // TODO: add Range
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Map inlay hints to text edits.
+ edits := make([]protocol.TextEdit, len(hints))
+ for i, hint := range hints {
+ var paddingLeft, paddingRight string
+ if hint.PaddingLeft {
+ paddingLeft = " "
+ }
+ if hint.PaddingRight {
+ paddingRight = " "
+ }
+ edits[i] = protocol.TextEdit{
+ Range: protocol.Range{Start: hint.Position, End: hint.Position},
+ NewText: fmt.Sprintf("<%s%s%s>", paddingLeft, hint.Label[0].Value, paddingRight),
+ }
+ }
+
+ m, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, _, err := source.ApplyProtocolEdits(m, edits)
+ if err != nil {
+ t.Error(err)
+ }
+
+ withinlayHints := r.data.Golden(t, "inlayHint", filename, func() ([]byte, error) {
+ return got, nil
+ })
+
+ if !bytes.Equal(withinlayHints, got) {
+ t.Errorf("inlay hints failed for %s, expected:\n%s\ngot:\n%s", filename, withinlayHints, got)
+ }
+}
+
+func (r *runner) Rename(t *testing.T, spn span.Span, newText string) {
+ tag := fmt.Sprintf("%s-rename", newText)
+
+ uri := spn.URI()
+ filename := uri.Filename()
+ sm, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := sm.SpanLocation(spn)
+ if err != nil {
+ t.Fatalf("failed for %v: %v", spn, err)
+ }
+
+ wedit, err := r.server.Rename(r.ctx, &protocol.RenameParams{
+ TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
+ Position: loc.Range.Start,
+ NewName: newText,
+ })
+ if err != nil {
+ renamed := string(r.data.Golden(t, tag, filename, func() ([]byte, error) {
+ return []byte(err.Error()), nil
+ }))
+ if err.Error() != renamed {
+ t.Errorf("%s: rename failed for %s, expected:\n%v\ngot:\n%v\n", spn, newText, renamed, err)
+ }
+ return
+ }
+ res, err := applyTextDocumentEdits(r, wedit.DocumentChanges)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var orderedURIs []string
+ for uri := range res {
+ orderedURIs = append(orderedURIs, string(uri))
+ }
+ sort.Strings(orderedURIs)
+
+ // Print the name and content of each modified file,
+ // concatenated, and compare against the golden.
+ var buf bytes.Buffer
+ for i := 0; i < len(res); i++ {
+ if i != 0 {
+ buf.WriteByte('\n')
+ }
+ uri := span.URIFromURI(orderedURIs[i])
+ if len(res) > 1 {
+ buf.WriteString(filepath.Base(uri.Filename()))
+ buf.WriteString(":\n")
+ }
+ buf.Write(res[uri])
+ }
+ got := buf.Bytes()
+ want := r.data.Golden(t, tag, filename, func() ([]byte, error) {
+ return got, nil
+ })
+ if diff := compare.Bytes(want, got); diff != "" {
+ t.Errorf("rename failed for %s:\n%s", newText, diff)
+ }
+}
+
+func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) {
+ m, err := r.data.Mapper(src.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := m.SpanLocation(src)
+ if err != nil {
+ t.Fatalf("failed for %v: %v", src, err)
+ }
+ params := &protocol.PrepareRenameParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ got, err := r.server.PrepareRename(context.Background(), params)
+ if err != nil {
+ t.Errorf("prepare rename failed for %v: got error: %v", src, err)
+ return
+ }
+
+ // TODO(rfindley): can we consolidate on a single representation for
+ // PrepareRename results, and use cmp.Diff here?
+
+ // PrepareRename may fail with no error if there was no object found at the
+ // position.
+ if got == nil {
+ if want.Text != "" { // expected an ident.
+ t.Errorf("prepare rename failed for %v: got nil", src)
+ }
+ return
+ }
+ if got.Range.Start == got.Range.End {
+ // Special case for 0-length ranges. Marks can't specify a 0-length range,
+ // so just compare the start.
+ if got.Range.Start != want.Range.Start {
+ t.Errorf("prepare rename failed: incorrect point, got %v want %v", got.Range.Start, want.Range.Start)
+ }
+ } else {
+ if got.Range != want.Range {
+ t.Errorf("prepare rename failed: incorrect range got %v want %v", got.Range, want.Range)
+ }
+ }
+ if got.Placeholder != want.Text {
+ t.Errorf("prepare rename failed: incorrect text got %v want %v", got.Placeholder, want.Text)
+ }
+}
+
+func applyTextDocumentEdits(r *runner, edits []protocol.DocumentChanges) (map[span.URI][]byte, error) {
+ res := make(map[span.URI][]byte)
+ for _, docEdits := range edits {
+ if docEdits.TextDocumentEdit != nil {
+ uri := docEdits.TextDocumentEdit.TextDocument.URI.SpanURI()
+ var m *protocol.Mapper
+ // If we have already edited this file, we use the edited version (rather than the
+ // file in its original state) so that we preserve our initial changes.
+ if content, ok := res[uri]; ok {
+ m = protocol.NewMapper(uri, content)
+ } else {
+ var err error
+ if m, err = r.data.Mapper(uri); err != nil {
+ return nil, err
+ }
+ }
+ patched, _, err := source.ApplyProtocolEdits(m, docEdits.TextDocumentEdit.Edits)
+ if err != nil {
+ return nil, err
+ }
+ res[uri] = patched
+ }
+ }
+ return res, nil
+}
+
+func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) {
+ params := &protocol.DocumentSymbolParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ }
+ got, err := r.server.DocumentSymbol(r.ctx, params)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ symbols := make([]protocol.DocumentSymbol, len(got))
+ for i, s := range got {
+ s, ok := s.(protocol.DocumentSymbol)
+ if !ok {
+ t.Fatalf("%v: wanted []DocumentSymbols but got %v", uri, got)
+ }
+ symbols[i] = s
+ }
+
+ // Sort by position to make it easier to find errors.
+ sortSymbols := func(s []protocol.DocumentSymbol) {
+ sort.Slice(s, func(i, j int) bool {
+ return protocol.CompareRange(s[i].SelectionRange, s[j].SelectionRange) < 0
+ })
+ }
+ sortSymbols(expectedSymbols)
+ sortSymbols(symbols)
+
+ // Ignore 'Range' here as it is difficult (impossible?) to express
+ // multi-line ranges in the packagestest framework.
+ ignoreRange := cmpopts.IgnoreFields(protocol.DocumentSymbol{}, "Range")
+ if diff := cmp.Diff(expectedSymbols, symbols, ignoreRange); diff != "" {
+ t.Errorf("mismatching symbols (-want +got)\n%s", diff)
+ }
+}
+
+func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) {
+ matcher := tests.WorkspaceSymbolsTestTypeToMatcher(typ)
+
+ original := r.server.session.Options()
+ modified := original
+ modified.SymbolMatcher = matcher
+ r.server.session.SetOptions(modified)
+ defer r.server.session.SetOptions(original)
+
+ params := &protocol.WorkspaceSymbolParams{
+ Query: query,
+ }
+ gotSymbols, err := r.server.Symbol(r.ctx, params)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := tests.WorkspaceSymbolsString(r.ctx, r.data, uri, gotSymbols)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got = filepath.ToSlash(tests.Normalize(got, r.normalizers))
+ want := string(r.data.Golden(t, fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) {
+ return []byte(got), nil
+ }))
+ if diff := compare.Text(want, got); diff != "" {
+ t.Error(diff)
+ }
+}
+
+func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) {
+ m, err := r.data.Mapper(spn.URI())
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := m.SpanLocation(spn)
+ if err != nil {
+ t.Fatalf("failed for %v: %v", loc, err)
+ }
+ params := &protocol.SignatureHelpParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ got, err := r.server.SignatureHelp(r.ctx, params)
+ if err != nil {
+ // Only fail if we got an error we did not expect.
+ if want != nil {
+ t.Fatal(err)
+ }
+ return
+ }
+ if want == nil {
+ if got != nil {
+ t.Errorf("expected no signature, got %v", got)
+ }
+ return
+ }
+ if got == nil {
+ t.Fatalf("expected %v, got nil", want)
+ }
+ if diff := tests.DiffSignatures(spn, want, got); diff != "" {
+ t.Error(diff)
+ }
+}
+
+func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {
+ m, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := r.server.DocumentLink(r.ctx, &protocol.DocumentLinkParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := tests.DiffLinks(m, wantLinks, got); diff != "" {
+ t.Error(diff)
+ }
+}
+
+func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) {
+ cmd, err := command.NewListKnownPackagesCommand("List Known Packages", command.URIArg{
+ URI: protocol.URIFromSpanURI(uri),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp, err := r.server.executeCommand(r.ctx, &protocol.ExecuteCommandParams{
+ Command: cmd.Command,
+ Arguments: cmd.Arguments,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ res := resp.(command.ListKnownPackagesResult)
+ var hasPkg bool
+ for _, p := range res.Packages {
+ if p == expectedImport {
+ hasPkg = true
+ break
+ }
+ }
+ if !hasPkg {
+ t.Fatalf("%s: got %v packages\nwant contains %q", command.ListKnownPackages, res.Packages, expectedImport)
+ }
+ cmd, err = command.NewAddImportCommand("Add Imports", command.AddImportArgs{
+ URI: protocol.URIFromSpanURI(uri),
+ ImportPath: expectedImport,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = r.server.executeCommand(r.ctx, &protocol.ExecuteCommandParams{
+ Command: cmd.Command,
+ Arguments: cmd.Arguments,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := (<-r.editRecv)[uri]
+ want := r.data.Golden(t, "addimport", uri.Filename(), func() ([]byte, error) {
+ return []byte(got), nil
+ })
+ if want == nil {
+ t.Fatalf("golden file %q not found", uri.Filename())
+ }
+ if diff := compare.Bytes(want, got); diff != "" {
+ t.Errorf("%s mismatch\n%s", command.AddImport, diff)
+ }
+}
+
+func (r *runner) SelectionRanges(t *testing.T, spn span.Span) {
+ uri := spn.URI()
+ sm, err := r.data.Mapper(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+ loc, err := sm.SpanLocation(spn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ ranges, err := r.server.selectionRange(r.ctx, &protocol.SelectionRangeParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(uri),
+ },
+ Positions: []protocol.Position{loc.Range.Start},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sb := &strings.Builder{}
+ for i, path := range ranges {
+ fmt.Fprintf(sb, "Ranges %d: ", i)
+ rng := path
+ for {
+ s, e, err := sm.RangeOffsets(rng.Range)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var snippet string
+ if e-s < 30 {
+ snippet = string(sm.Content[s:e])
+ } else {
+ snippet = string(sm.Content[s:s+15]) + "..." + string(sm.Content[e-15:e])
+ }
+
+ fmt.Fprintf(sb, "\n\t%v %q", rng.Range, strings.ReplaceAll(snippet, "\n", "\\n"))
+
+ if rng.Parent == nil {
+ break
+ }
+ rng = *rng.Parent
+ }
+ sb.WriteRune('\n')
+ }
+ got := sb.String()
+
+ testName := "selectionrange_" + tests.SpanName(spn)
+ want := r.data.Golden(t, testName, uri.Filename(), func() ([]byte, error) {
+ return []byte(got), nil
+ })
+ if want == nil {
+ t.Fatalf("golden file %q not found", uri.Filename())
+ }
+ if diff := compare.Text(got, string(want)); diff != "" {
+ t.Errorf("%s mismatch\n%s", testName, diff)
+ }
+}
+
+func (r *runner) collectDiagnostics(view *cache.View) {
+ if r.diagnostics != nil {
+ return
+ }
+ r.diagnostics = make(map[span.URI][]*source.Diagnostic)
+
+ snapshot, release, err := view.Snapshot()
+ if err != nil {
+ panic(err)
+ }
+ defer release()
+
+ // Always run diagnostics with analysis.
+ r.server.diagnose(r.ctx, snapshot, true)
+ for uri, reports := range r.server.diagnostics {
+ for _, report := range reports.reports {
+ for _, d := range report.diags {
+ r.diagnostics[uri] = append(r.diagnostics[uri], d)
+ }
+ }
+ }
+}
diff --git a/gopls/internal/lsp/lsprpc/autostart_default.go b/gopls/internal/lsp/lsprpc/autostart_default.go
new file mode 100644
index 000000000..20b974728
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/autostart_default.go
@@ -0,0 +1,39 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+ "fmt"
+
+ exec "golang.org/x/sys/execabs"
+)
+
+var (
+ daemonize = func(*exec.Cmd) {}
+ autoNetworkAddress = autoNetworkAddressDefault
+ verifyRemoteOwnership = verifyRemoteOwnershipDefault
+)
+
+func runRemote(cmd *exec.Cmd) error {
+ daemonize(cmd)
+ if err := cmd.Start(); err != nil {
+ return fmt.Errorf("starting remote gopls: %w", err)
+ }
+ return nil
+}
+
+// autoNetworkAddressDefault returns the default network and address for the
+// automatically-started gopls remote. See autostart_posix.go for more
+// information.
+func autoNetworkAddressDefault(goplsPath, id string) (network string, address string) {
+ if id != "" {
+ panic("identified remotes are not supported on windows")
+ }
+ return "tcp", "localhost:37374"
+}
+
+func verifyRemoteOwnershipDefault(network, address string) (bool, error) {
+ return true, nil
+}
diff --git a/gopls/internal/lsp/lsprpc/autostart_posix.go b/gopls/internal/lsp/lsprpc/autostart_posix.go
new file mode 100644
index 000000000..90cc72ddf
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/autostart_posix.go
@@ -0,0 +1,97 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package lsprpc
+
+import (
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "os/user"
+ "path/filepath"
+ "strconv"
+ "syscall"
+
+ exec "golang.org/x/sys/execabs"
+)
+
+func init() {
+ daemonize = daemonizePosix
+ autoNetworkAddress = autoNetworkAddressPosix
+ verifyRemoteOwnership = verifyRemoteOwnershipPosix
+}
+
+func daemonizePosix(cmd *exec.Cmd) {
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Setsid: true,
+ }
+}
+
+// autoNetworkAddressPosix resolves an id on the 'auto' pseduo-network to a
+// real network and address. On unix, this uses unix domain sockets.
+func autoNetworkAddressPosix(goplsPath, id string) (network string, address string) {
+ // Especially when doing local development or testing, it's important that
+ // the remote gopls instance we connect to is running the same binary as our
+ // forwarder. So we encode a short hash of the binary path into the daemon
+ // socket name. If possible, we also include the buildid in this hash, to
+ // account for long-running processes where the binary has been subsequently
+ // rebuilt.
+ h := sha256.New()
+ cmd := exec.Command("go", "tool", "buildid", goplsPath)
+ cmd.Stdout = h
+ var pathHash []byte
+ if err := cmd.Run(); err == nil {
+ pathHash = h.Sum(nil)
+ } else {
+ log.Printf("error getting current buildid: %v", err)
+ sum := sha256.Sum256([]byte(goplsPath))
+ pathHash = sum[:]
+ }
+ shortHash := fmt.Sprintf("%x", pathHash)[:6]
+ user := os.Getenv("USER")
+ if user == "" {
+ user = "shared"
+ }
+ basename := filepath.Base(goplsPath)
+ idComponent := ""
+ if id != "" {
+ idComponent = "-" + id
+ }
+ runtimeDir := os.TempDir()
+ if xdg := os.Getenv("XDG_RUNTIME_DIR"); xdg != "" {
+ runtimeDir = xdg
+ }
+ return "unix", filepath.Join(runtimeDir, fmt.Sprintf("%s-%s-daemon.%s%s", basename, shortHash, user, idComponent))
+}
+
+func verifyRemoteOwnershipPosix(network, address string) (bool, error) {
+ if network != "unix" {
+ return true, nil
+ }
+ fi, err := os.Stat(address)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return true, nil
+ }
+ return false, fmt.Errorf("checking socket owner: %w", err)
+ }
+ stat, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return false, errors.New("fi.Sys() is not a Stat_t")
+ }
+ user, err := user.Current()
+ if err != nil {
+ return false, fmt.Errorf("checking current user: %w", err)
+ }
+ uid, err := strconv.ParseUint(user.Uid, 10, 32)
+ if err != nil {
+ return false, fmt.Errorf("parsing current UID: %w", err)
+ }
+ return stat.Uid == uint32(uid), nil
+}
diff --git a/gopls/internal/lsp/lsprpc/binder.go b/gopls/internal/lsp/lsprpc/binder.go
new file mode 100644
index 000000000..01e59f7bb
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/binder.go
@@ -0,0 +1,148 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/event"
+ jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+// The BinderFunc type adapts a bind function to implement the jsonrpc2.Binder
+// interface.
+type BinderFunc func(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions
+
+func (f BinderFunc) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions {
+ return f(ctx, conn)
+}
+
+// Middleware defines a transformation of jsonrpc2 Binders, that may be
+// composed to build jsonrpc2 servers.
+type Middleware func(jsonrpc2_v2.Binder) jsonrpc2_v2.Binder
+
+// A ServerFunc is used to construct an LSP server for a given client.
+type ServerFunc func(context.Context, protocol.ClientCloser) protocol.Server
+
+// ServerBinder binds incoming connections to a new server.
+type ServerBinder struct {
+ newServer ServerFunc
+}
+
+func NewServerBinder(newServer ServerFunc) *ServerBinder {
+ return &ServerBinder{newServer: newServer}
+}
+
+func (b *ServerBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions {
+ client := protocol.ClientDispatcherV2(conn)
+ server := b.newServer(ctx, client)
+ serverHandler := protocol.ServerHandlerV2(server)
+ // Wrap the server handler to inject the client into each request context, so
+ // that log events are reflected back to the client.
+ wrapped := jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+ ctx = protocol.WithClient(ctx, client)
+ return serverHandler.Handle(ctx, req)
+ })
+ preempter := &canceler{
+ conn: conn,
+ }
+ return jsonrpc2_v2.ConnectionOptions{
+ Handler: wrapped,
+ Preempter: preempter,
+ }
+}
+
+type canceler struct {
+ conn *jsonrpc2_v2.Connection
+}
+
+func (c *canceler) Preempt(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+ if req.Method != "$/cancelRequest" {
+ return nil, jsonrpc2_v2.ErrNotHandled
+ }
+ var params protocol.CancelParams
+ if err := json.Unmarshal(req.Params, &params); err != nil {
+ return nil, fmt.Errorf("%w: %v", jsonrpc2_v2.ErrParse, err)
+ }
+ var id jsonrpc2_v2.ID
+ switch raw := params.ID.(type) {
+ case float64:
+ id = jsonrpc2_v2.Int64ID(int64(raw))
+ case string:
+ id = jsonrpc2_v2.StringID(raw)
+ default:
+ return nil, fmt.Errorf("%w: invalid ID type %T", jsonrpc2_v2.ErrParse, params.ID)
+ }
+ c.conn.Cancel(id)
+ return nil, nil
+}
+
+type ForwardBinder struct {
+ dialer jsonrpc2_v2.Dialer
+ onBind func(*jsonrpc2_v2.Connection)
+}
+
+func NewForwardBinder(dialer jsonrpc2_v2.Dialer) *ForwardBinder {
+ return &ForwardBinder{
+ dialer: dialer,
+ }
+}
+
+func (b *ForwardBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (opts jsonrpc2_v2.ConnectionOptions) {
+ client := protocol.ClientDispatcherV2(conn)
+ clientBinder := NewClientBinder(func(context.Context, protocol.Server) protocol.Client { return client })
+
+ serverConn, err := jsonrpc2_v2.Dial(context.Background(), b.dialer, clientBinder)
+ if err != nil {
+ return jsonrpc2_v2.ConnectionOptions{
+ Handler: jsonrpc2_v2.HandlerFunc(func(context.Context, *jsonrpc2_v2.Request) (interface{}, error) {
+ return nil, fmt.Errorf("%w: %v", jsonrpc2_v2.ErrInternal, err)
+ }),
+ }
+ }
+
+ if b.onBind != nil {
+ b.onBind(serverConn)
+ }
+ server := protocol.ServerDispatcherV2(serverConn)
+ preempter := &canceler{
+ conn: conn,
+ }
+ detached := xcontext.Detach(ctx)
+ go func() {
+ conn.Wait()
+ if err := serverConn.Close(); err != nil {
+ event.Log(detached, fmt.Sprintf("closing remote connection: %v", err))
+ }
+ }()
+ return jsonrpc2_v2.ConnectionOptions{
+ Handler: protocol.ServerHandlerV2(server),
+ Preempter: preempter,
+ }
+}
+
+// A ClientFunc is used to construct an LSP client for a given server.
+type ClientFunc func(context.Context, protocol.Server) protocol.Client
+
+// ClientBinder binds an LSP client to an incoming connection.
+type ClientBinder struct {
+ newClient ClientFunc
+}
+
+func NewClientBinder(newClient ClientFunc) *ClientBinder {
+ return &ClientBinder{newClient}
+}
+
+func (b *ClientBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions {
+ server := protocol.ServerDispatcherV2(conn)
+ client := b.newClient(ctx, server)
+ return jsonrpc2_v2.ConnectionOptions{
+ Handler: protocol.ClientHandlerV2(client),
+ }
+}
diff --git a/gopls/internal/lsp/lsprpc/binder_test.go b/gopls/internal/lsp/lsprpc/binder_test.go
new file mode 100644
index 000000000..3315c3eb7
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/binder_test.go
@@ -0,0 +1,147 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc_test
+
+import (
+ "context"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+
+ . "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+)
+
+type TestEnv struct {
+ Conns []*jsonrpc2_v2.Connection
+ Servers []*jsonrpc2_v2.Server
+}
+
+func (e *TestEnv) Shutdown(t *testing.T) {
+ for _, s := range e.Servers {
+ s.Shutdown()
+ }
+ for _, c := range e.Conns {
+ if err := c.Close(); err != nil {
+ t.Error(err)
+ }
+ }
+ for _, s := range e.Servers {
+ if err := s.Wait(); err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+func (e *TestEnv) serve(ctx context.Context, t *testing.T, server jsonrpc2_v2.Binder) (jsonrpc2_v2.Listener, *jsonrpc2_v2.Server) {
+ l, err := jsonrpc2_v2.NetPipeListener(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := jsonrpc2_v2.NewServer(ctx, l, server)
+ e.Servers = append(e.Servers, s)
+ return l, s
+}
+
+func (e *TestEnv) dial(ctx context.Context, t *testing.T, dialer jsonrpc2_v2.Dialer, client jsonrpc2_v2.Binder, forwarded bool) *jsonrpc2_v2.Connection {
+ if forwarded {
+ l, _ := e.serve(ctx, t, NewForwardBinder(dialer))
+ dialer = l.Dialer()
+ }
+ conn, err := jsonrpc2_v2.Dial(ctx, dialer, client)
+ if err != nil {
+ t.Fatal(err)
+ }
+ e.Conns = append(e.Conns, conn)
+ return conn
+}
+
+func staticClientBinder(client protocol.Client) jsonrpc2_v2.Binder {
+ f := func(context.Context, protocol.Server) protocol.Client { return client }
+ return NewClientBinder(f)
+}
+
+func staticServerBinder(server protocol.Server) jsonrpc2_v2.Binder {
+ f := func(ctx context.Context, client protocol.ClientCloser) protocol.Server {
+ return server
+ }
+ return NewServerBinder(f)
+}
+
+func TestClientLoggingV2(t *testing.T) {
+ ctx := context.Background()
+
+ for name, forwarded := range map[string]bool{
+ "forwarded": true,
+ "standalone": false,
+ } {
+ t.Run(name, func(t *testing.T) {
+ client := FakeClient{Logs: make(chan string, 10)}
+ env := new(TestEnv)
+ defer env.Shutdown(t)
+ l, _ := env.serve(ctx, t, staticServerBinder(PingServer{}))
+ conn := env.dial(ctx, t, l.Dialer(), staticClientBinder(client), forwarded)
+
+ if err := protocol.ServerDispatcherV2(conn).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{}); err != nil {
+ t.Errorf("DidOpen: %v", err)
+ }
+ select {
+ case got := <-client.Logs:
+ want := "ping"
+ matched, err := regexp.MatchString(want, got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Errorf("got log %q, want a log containing %q", got, want)
+ }
+ case <-time.After(1 * time.Second):
+ t.Error("timeout waiting for client log")
+ }
+ })
+ }
+}
+
+func TestRequestCancellationV2(t *testing.T) {
+ ctx := context.Background()
+
+ for name, forwarded := range map[string]bool{
+ "forwarded": true,
+ "standalone": false,
+ } {
+ t.Run(name, func(t *testing.T) {
+ server := WaitableServer{
+ Started: make(chan struct{}),
+ Completed: make(chan error),
+ }
+ env := new(TestEnv)
+ defer env.Shutdown(t)
+ l, _ := env.serve(ctx, t, staticServerBinder(server))
+ client := FakeClient{Logs: make(chan string, 10)}
+ conn := env.dial(ctx, t, l.Dialer(), staticClientBinder(client), forwarded)
+
+ sd := protocol.ServerDispatcherV2(conn)
+ ctx, cancel := context.WithCancel(ctx)
+
+ result := make(chan error)
+ go func() {
+ _, err := sd.Hover(ctx, &protocol.HoverParams{})
+ result <- err
+ }()
+ // Wait for the Hover request to start.
+ <-server.Started
+ cancel()
+ if err := <-result; err == nil {
+ t.Error("nil error for cancelled Hover(), want non-nil")
+ }
+ if err := <-server.Completed; err == nil || !strings.Contains(err.Error(), "cancelled hover") {
+ t.Errorf("Hover(): unexpected server-side error %v", err)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/lsp/lsprpc/commandinterceptor.go b/gopls/internal/lsp/lsprpc/commandinterceptor.go
new file mode 100644
index 000000000..607ee9c9e
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/commandinterceptor.go
@@ -0,0 +1,44 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+ "context"
+ "encoding/json"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+)
+
+// HandlerMiddleware is a middleware that only modifies the jsonrpc2 handler.
+type HandlerMiddleware func(jsonrpc2_v2.Handler) jsonrpc2_v2.Handler
+
+// BindHandler transforms a HandlerMiddleware into a Middleware.
+func BindHandler(hmw HandlerMiddleware) Middleware {
+ return Middleware(func(binder jsonrpc2_v2.Binder) jsonrpc2_v2.Binder {
+ return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions {
+ opts := binder.Bind(ctx, conn)
+ opts.Handler = hmw(opts.Handler)
+ return opts
+ })
+ })
+}
+
+func CommandInterceptor(command string, run func(*protocol.ExecuteCommandParams) (interface{}, error)) Middleware {
+ return BindHandler(func(delegate jsonrpc2_v2.Handler) jsonrpc2_v2.Handler {
+ return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+ if req.Method == "workspace/executeCommand" {
+ var params protocol.ExecuteCommandParams
+ if err := json.Unmarshal(req.Params, &params); err == nil {
+ if params.Command == command {
+ return run(&params)
+ }
+ }
+ }
+
+ return delegate.Handle(ctx, req)
+ })
+ })
+}
diff --git a/gopls/internal/lsp/lsprpc/commandinterceptor_test.go b/gopls/internal/lsp/lsprpc/commandinterceptor_test.go
new file mode 100644
index 000000000..555f15130
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/commandinterceptor_test.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc_test
+
+import (
+ "context"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+
+ . "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+)
+
+func TestCommandInterceptor(t *testing.T) {
+ const command = "foo"
+ caught := false
+ intercept := func(_ *protocol.ExecuteCommandParams) (interface{}, error) {
+ caught = true
+ return map[string]interface{}{}, nil
+ }
+
+ ctx := context.Background()
+ env := new(TestEnv)
+ defer env.Shutdown(t)
+ mw := CommandInterceptor(command, intercept)
+ l, _ := env.serve(ctx, t, mw(noopBinder))
+ conn := env.dial(ctx, t, l.Dialer(), noopBinder, false)
+
+ params := &protocol.ExecuteCommandParams{
+ Command: command,
+ }
+ var res interface{}
+ err := conn.Call(ctx, "workspace/executeCommand", params).Await(ctx, &res)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !caught {
+ t.Errorf("workspace/executeCommand was not intercepted")
+ }
+}
diff --git a/gopls/internal/lsp/lsprpc/dialer.go b/gopls/internal/lsp/lsprpc/dialer.go
new file mode 100644
index 000000000..37e0c5680
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/dialer.go
@@ -0,0 +1,114 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "time"
+
+ exec "golang.org/x/sys/execabs"
+ "golang.org/x/tools/internal/event"
+)
+
+// AutoNetwork is the pseudo network type used to signal that gopls should use
+// automatic discovery to resolve a remote address.
+const AutoNetwork = "auto"
+
+// An AutoDialer is a jsonrpc2 dialer that understands the 'auto' network.
+type AutoDialer struct {
+ network, addr string // the 'real' network and address
+ isAuto bool // whether the server is on the 'auto' network
+
+ executable string
+ argFunc func(network, addr string) []string
+}
+
+func NewAutoDialer(rawAddr string, argFunc func(network, addr string) []string) (*AutoDialer, error) {
+ d := AutoDialer{
+ argFunc: argFunc,
+ }
+ d.network, d.addr = ParseAddr(rawAddr)
+ if d.network == AutoNetwork {
+ d.isAuto = true
+ bin, err := os.Executable()
+ if err != nil {
+ return nil, fmt.Errorf("getting executable: %w", err)
+ }
+ d.executable = bin
+ d.network, d.addr = autoNetworkAddress(bin, d.addr)
+ }
+ return &d, nil
+}
+
+// Dial implements the jsonrpc2.Dialer interface.
+func (d *AutoDialer) Dial(ctx context.Context) (io.ReadWriteCloser, error) {
+ conn, err := d.dialNet(ctx)
+ return conn, err
+}
+
+// TODO(rFindley): remove this once we no longer need to integrate with v1 of
+// the jsonrpc2 package.
+func (d *AutoDialer) dialNet(ctx context.Context) (net.Conn, error) {
+ // Attempt to verify that we own the remote. This is imperfect, but if we can
+ // determine that the remote is owned by a different user, we should fail.
+ ok, err := verifyRemoteOwnership(d.network, d.addr)
+ if err != nil {
+ // If the ownership check itself failed, we fail open but log an error to
+ // the user.
+ event.Error(ctx, "unable to check daemon socket owner, failing open", err)
+ } else if !ok {
+ // We successfully checked that the socket is not owned by us, we fail
+ // closed.
+ return nil, fmt.Errorf("socket %q is owned by a different user", d.addr)
+ }
+ const dialTimeout = 1 * time.Second
+ // Try dialing our remote once, in case it is already running.
+ netConn, err := net.DialTimeout(d.network, d.addr, dialTimeout)
+ if err == nil {
+ return netConn, nil
+ }
+ if d.isAuto && d.argFunc != nil {
+ if d.network == "unix" {
+ // Sometimes the socketfile isn't properly cleaned up when the server
+ // shuts down. Since we have already tried and failed to dial this
+ // address, it should *usually* be safe to remove the socket before
+ // binding to the address.
+ // TODO(rfindley): there is probably a race here if multiple server
+ // instances are simultaneously starting up.
+ if _, err := os.Stat(d.addr); err == nil {
+ if err := os.Remove(d.addr); err != nil {
+ return nil, fmt.Errorf("removing remote socket file: %w", err)
+ }
+ }
+ }
+ args := d.argFunc(d.network, d.addr)
+ cmd := exec.Command(d.executable, args...)
+ if err := runRemote(cmd); err != nil {
+ return nil, err
+ }
+ }
+
+ const retries = 5
+ // It can take some time for the newly started server to bind to our address,
+ // so we retry for a bit.
+ for retry := 0; retry < retries; retry++ {
+ startDial := time.Now()
+ netConn, err = net.DialTimeout(d.network, d.addr, dialTimeout)
+ if err == nil {
+ return netConn, nil
+ }
+ event.Log(ctx, fmt.Sprintf("failed attempt #%d to connect to remote: %v\n", retry+2, err))
+ // In case our failure was a fast-failure, ensure we wait at least
+ // f.dialTimeout before trying again.
+ if retry != retries-1 {
+ time.Sleep(dialTimeout - time.Since(startDial))
+ }
+ }
+ return nil, fmt.Errorf("dialing remote: %w", err)
+}
diff --git a/gopls/internal/lsp/lsprpc/goenv.go b/gopls/internal/lsp/lsprpc/goenv.go
new file mode 100644
index 000000000..c316ea07c
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/goenv.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/gocommand"
+ jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+func GoEnvMiddleware() (Middleware, error) {
+ return BindHandler(func(delegate jsonrpc2_v2.Handler) jsonrpc2_v2.Handler {
+ return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+ if req.Method == "initialize" {
+ if err := addGoEnvToInitializeRequestV2(ctx, req); err != nil {
+ event.Error(ctx, "adding go env to initialize", err)
+ }
+ }
+ return delegate.Handle(ctx, req)
+ })
+ }), nil
+}
+
+func addGoEnvToInitializeRequestV2(ctx context.Context, req *jsonrpc2_v2.Request) error {
+ var params protocol.ParamInitialize
+ if err := json.Unmarshal(req.Params, &params); err != nil {
+ return err
+ }
+ var opts map[string]interface{}
+ switch v := params.InitializationOptions.(type) {
+ case nil:
+ opts = make(map[string]interface{})
+ case map[string]interface{}:
+ opts = v
+ default:
+ return fmt.Errorf("unexpected type for InitializationOptions: %T", v)
+ }
+ envOpt, ok := opts["env"]
+ if !ok {
+ envOpt = make(map[string]interface{})
+ }
+ env, ok := envOpt.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("env option is %T, expected a map", envOpt)
+ }
+ goenv, err := getGoEnv(ctx, env)
+ if err != nil {
+ return err
+ }
+ // We don't want to propagate GOWORK unless explicitly set since that could mess with
+ // path inference during cmd/go invocations, see golang/go#51825.
+ _, goworkSet := os.LookupEnv("GOWORK")
+ for govar, value := range goenv {
+ if govar == "GOWORK" && !goworkSet {
+ continue
+ }
+ env[govar] = value
+ }
+ opts["env"] = env
+ params.InitializationOptions = opts
+ raw, err := json.Marshal(params)
+ if err != nil {
+ return fmt.Errorf("marshaling updated options: %v", err)
+ }
+ req.Params = json.RawMessage(raw)
+ return nil
+}
+
+func getGoEnv(ctx context.Context, env map[string]interface{}) (map[string]string, error) {
+ var runEnv []string
+ for k, v := range env {
+ runEnv = append(runEnv, fmt.Sprintf("%s=%s", k, v))
+ }
+ runner := gocommand.Runner{}
+ output, err := runner.Run(ctx, gocommand.Invocation{
+ Verb: "env",
+ Args: []string{"-json"},
+ Env: runEnv,
+ })
+ if err != nil {
+ return nil, err
+ }
+ envmap := make(map[string]string)
+ if err := json.Unmarshal(output.Bytes(), &envmap); err != nil {
+ return nil, err
+ }
+ return envmap, nil
+}
diff --git a/gopls/internal/lsp/lsprpc/goenv_test.go b/gopls/internal/lsp/lsprpc/goenv_test.go
new file mode 100644
index 000000000..5edd64fbe
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/goenv_test.go
@@ -0,0 +1,65 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc_test
+
+import (
+ "context"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+
+ . "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+)
+
+type initServer struct {
+ protocol.Server
+
+ params *protocol.ParamInitialize
+}
+
+func (s *initServer) Initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
+ s.params = params
+ return &protocol.InitializeResult{}, nil
+}
+
+func TestGoEnvMiddleware(t *testing.T) {
+ ctx := context.Background()
+
+ server := &initServer{}
+ env := new(TestEnv)
+ defer env.Shutdown(t)
+ l, _ := env.serve(ctx, t, staticServerBinder(server))
+ mw, err := GoEnvMiddleware()
+ if err != nil {
+ t.Fatal(err)
+ }
+ binder := mw(NewForwardBinder(l.Dialer()))
+ l, _ = env.serve(ctx, t, binder)
+ conn := env.dial(ctx, t, l.Dialer(), noopBinder, true)
+ dispatch := protocol.ServerDispatcherV2(conn)
+ initParams := &protocol.ParamInitialize{}
+ initParams.InitializationOptions = map[string]interface{}{
+ "env": map[string]interface{}{
+ "GONOPROXY": "example.com",
+ },
+ }
+ if _, err := dispatch.Initialize(ctx, initParams); err != nil {
+ t.Fatal(err)
+ }
+
+ if server.params == nil {
+ t.Fatalf("initialize params are unset")
+ }
+ envOpts := server.params.InitializationOptions.(map[string]interface{})["env"].(map[string]interface{})
+
+ // Check for an arbitrary Go variable. It should be set.
+ if _, ok := envOpts["GOPRIVATE"]; !ok {
+ t.Errorf("Go environment variable GOPRIVATE unset in initialization options")
+ }
+ // Check that the variable present in our user config was not overwritten.
+ if got, want := envOpts["GONOPROXY"], "example.com"; got != want {
+ t.Errorf("GONOPROXY=%q, want %q", got, want)
+ }
+}
diff --git a/gopls/internal/lsp/lsprpc/lsprpc.go b/gopls/internal/lsp/lsprpc/lsprpc.go
new file mode 100644
index 000000000..6b02cf5aa
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/lsprpc.go
@@ -0,0 +1,543 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lsprpc implements a jsonrpc2.StreamServer that may be used to
+// serve the LSP on a jsonrpc2 channel.
+package lsprpc
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/jsonrpc2"
+)
+
+// Unique identifiers for client/server.
+var serverIndex int64
+
+// The StreamServer type is a jsonrpc2.StreamServer that handles incoming
+// streams as a new LSP session, using a shared cache.
+type StreamServer struct {
+ cache *cache.Cache
+ // daemon controls whether or not to log new connections.
+ daemon bool
+
+ // optionsOverrides is passed to newly created sessions.
+ optionsOverrides func(*source.Options)
+
+ // serverForTest may be set to a test fake for testing.
+ serverForTest protocol.Server
+}
+
+// NewStreamServer creates a StreamServer using the shared cache. If
+// withTelemetry is true, each session is instrumented with telemetry that
+// records RPC statistics.
+func NewStreamServer(cache *cache.Cache, daemon bool, optionsFunc func(*source.Options)) *StreamServer {
+ return &StreamServer{cache: cache, daemon: daemon, optionsOverrides: optionsFunc}
+}
+
+func (s *StreamServer) Binder() *ServerBinder {
+ newServer := func(ctx context.Context, client protocol.ClientCloser) protocol.Server {
+ session := cache.NewSession(ctx, s.cache, s.optionsOverrides)
+ server := s.serverForTest
+ if server == nil {
+ server = lsp.NewServer(session, client)
+ if instance := debug.GetInstance(ctx); instance != nil {
+ instance.AddService(server, session)
+ }
+ }
+ return server
+ }
+ return NewServerBinder(newServer)
+}
+
+// ServeStream implements the jsonrpc2.StreamServer interface, by handling
+// incoming streams using a new lsp server.
+func (s *StreamServer) ServeStream(ctx context.Context, conn jsonrpc2.Conn) error {
+ client := protocol.ClientDispatcher(conn)
+ session := cache.NewSession(ctx, s.cache, s.optionsOverrides)
+ server := s.serverForTest
+ if server == nil {
+ server = lsp.NewServer(session, client)
+ if instance := debug.GetInstance(ctx); instance != nil {
+ instance.AddService(server, session)
+ }
+ }
+ // Clients may or may not send a shutdown message. Make sure the server is
+ // shut down.
+ // TODO(rFindley): this shutdown should perhaps be on a disconnected context.
+ defer func() {
+ if err := server.Shutdown(ctx); err != nil {
+ event.Error(ctx, "error shutting down", err)
+ }
+ }()
+ executable, err := os.Executable()
+ if err != nil {
+ log.Printf("error getting gopls path: %v", err)
+ executable = ""
+ }
+ ctx = protocol.WithClient(ctx, client)
+ conn.Go(ctx,
+ protocol.Handlers(
+ handshaker(session, executable, s.daemon,
+ protocol.ServerHandler(server,
+ jsonrpc2.MethodNotFound))))
+ if s.daemon {
+ log.Printf("Session %s: connected", session.ID())
+ defer log.Printf("Session %s: exited", session.ID())
+ }
+ <-conn.Done()
+ return conn.Err()
+}
+
+// A Forwarder is a jsonrpc2.StreamServer that handles an LSP stream by
+// forwarding it to a remote. This is used when the gopls process started by
+// the editor is in the `-remote` mode, which means it finds and connects to a
+// separate gopls daemon. In these cases, we still want the forwarder gopls to
+// be instrumented with telemetry, and want to be able to in some cases hijack
+// the jsonrpc2 connection with the daemon.
+type Forwarder struct {
+ dialer *AutoDialer
+
+ mu sync.Mutex
+ // Hold on to the server connection so that we can redo the handshake if any
+ // information changes.
+ serverConn jsonrpc2.Conn
+ serverID string
+}
+
+// NewForwarder creates a new Forwarder, ready to forward connections to the
+// remote server specified by rawAddr. If provided and rawAddr indicates an
+// 'automatic' address (starting with 'auto;'), argFunc may be used to start a
+// remote server for the auto-discovered address.
+func NewForwarder(rawAddr string, argFunc func(network, address string) []string) (*Forwarder, error) {
+ dialer, err := NewAutoDialer(rawAddr, argFunc)
+ if err != nil {
+ return nil, err
+ }
+ fwd := &Forwarder{
+ dialer: dialer,
+ }
+ return fwd, nil
+}
+
+// QueryServerState queries the server state of the current server.
+func QueryServerState(ctx context.Context, addr string) (*ServerState, error) {
+ serverConn, err := dialRemote(ctx, addr)
+ if err != nil {
+ return nil, err
+ }
+ var state ServerState
+ if err := protocol.Call(ctx, serverConn, sessionsMethod, nil, &state); err != nil {
+ return nil, fmt.Errorf("querying server state: %w", err)
+ }
+ return &state, nil
+}
+
+// dialRemote is used for making calls into the gopls daemon. addr should be a
+// URL, possibly on the synthetic 'auto' network (e.g. tcp://..., unix://...,
+// or auto://...).
+func dialRemote(ctx context.Context, addr string) (jsonrpc2.Conn, error) {
+ network, address := ParseAddr(addr)
+ if network == AutoNetwork {
+ gp, err := os.Executable()
+ if err != nil {
+ return nil, fmt.Errorf("getting gopls path: %w", err)
+ }
+ network, address = autoNetworkAddress(gp, address)
+ }
+ netConn, err := net.DialTimeout(network, address, 5*time.Second)
+ if err != nil {
+ return nil, fmt.Errorf("dialing remote: %w", err)
+ }
+ serverConn := jsonrpc2.NewConn(jsonrpc2.NewHeaderStream(netConn))
+ serverConn.Go(ctx, jsonrpc2.MethodNotFound)
+ return serverConn, nil
+}
+
+func ExecuteCommand(ctx context.Context, addr string, id string, request, result interface{}) error {
+ serverConn, err := dialRemote(ctx, addr)
+ if err != nil {
+ return err
+ }
+ args, err := command.MarshalArgs(request)
+ if err != nil {
+ return err
+ }
+ params := protocol.ExecuteCommandParams{
+ Command: id,
+ Arguments: args,
+ }
+ return protocol.Call(ctx, serverConn, "workspace/executeCommand", params, result)
+}
+
+// ServeStream dials the forwarder remote and binds the remote to serve the LSP
+// on the incoming stream.
+func (f *Forwarder) ServeStream(ctx context.Context, clientConn jsonrpc2.Conn) error {
+ client := protocol.ClientDispatcher(clientConn)
+
+ netConn, err := f.dialer.dialNet(ctx)
+ if err != nil {
+ return fmt.Errorf("forwarder: connecting to remote: %w", err)
+ }
+ serverConn := jsonrpc2.NewConn(jsonrpc2.NewHeaderStream(netConn))
+ server := protocol.ServerDispatcher(serverConn)
+
+ // Forward between connections.
+ serverConn.Go(ctx,
+ protocol.Handlers(
+ protocol.ClientHandler(client,
+ jsonrpc2.MethodNotFound)))
+
+ // Don't run the clientConn yet, so that we can complete the handshake before
+ // processing any client messages.
+
+ // Do a handshake with the server instance to exchange debug information.
+ index := atomic.AddInt64(&serverIndex, 1)
+ f.mu.Lock()
+ f.serverConn = serverConn
+ f.serverID = strconv.FormatInt(index, 10)
+ f.mu.Unlock()
+ f.handshake(ctx)
+ clientConn.Go(ctx,
+ protocol.Handlers(
+ f.handler(
+ protocol.ServerHandler(server,
+ jsonrpc2.MethodNotFound))))
+
+ select {
+ case <-serverConn.Done():
+ clientConn.Close()
+ case <-clientConn.Done():
+ serverConn.Close()
+ }
+
+ err = nil
+ if serverConn.Err() != nil {
+ err = fmt.Errorf("remote disconnected: %v", serverConn.Err())
+ } else if clientConn.Err() != nil {
+ err = fmt.Errorf("client disconnected: %v", clientConn.Err())
+ }
+ event.Log(ctx, fmt.Sprintf("forwarder: exited with error: %v", err))
+ return err
+}
+
+// TODO(rfindley): remove this handshaking in favor of middleware.
+func (f *Forwarder) handshake(ctx context.Context) {
+ // This call to os.Executable is redundant, and will be eliminated by the
+ // transition to the V2 API.
+ goplsPath, err := os.Executable()
+ if err != nil {
+ event.Error(ctx, "getting executable for handshake", err)
+ goplsPath = ""
+ }
+ var (
+ hreq = handshakeRequest{
+ ServerID: f.serverID,
+ GoplsPath: goplsPath,
+ }
+ hresp handshakeResponse
+ )
+ if di := debug.GetInstance(ctx); di != nil {
+ hreq.Logfile = di.Logfile
+ hreq.DebugAddr = di.ListenedDebugAddress()
+ }
+ if err := protocol.Call(ctx, f.serverConn, handshakeMethod, hreq, &hresp); err != nil {
+ // TODO(rfindley): at some point in the future we should return an error
+ // here. Handshakes have become functional in nature.
+ event.Error(ctx, "forwarder: gopls handshake failed", err)
+ }
+ if hresp.GoplsPath != goplsPath {
+ event.Error(ctx, "", fmt.Errorf("forwarder: gopls path mismatch: forwarder is %q, remote is %q", goplsPath, hresp.GoplsPath))
+ }
+ event.Log(ctx, "New server",
+ tag.NewServer.Of(f.serverID),
+ tag.Logfile.Of(hresp.Logfile),
+ tag.DebugAddress.Of(hresp.DebugAddr),
+ tag.GoplsPath.Of(hresp.GoplsPath),
+ tag.ClientID.Of(hresp.SessionID),
+ )
+}
+
+func ConnectToRemote(ctx context.Context, addr string) (net.Conn, error) {
+ dialer, err := NewAutoDialer(addr, nil)
+ if err != nil {
+ return nil, err
+ }
+ return dialer.dialNet(ctx)
+}
+
+// handler intercepts messages to the daemon to enrich them with local
+// information.
+func (f *Forwarder) handler(handler jsonrpc2.Handler) jsonrpc2.Handler {
+ return func(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2.Request) error {
+ // Intercept certain messages to add special handling.
+ switch r.Method() {
+ case "initialize":
+ if newr, err := addGoEnvToInitializeRequest(ctx, r); err == nil {
+ r = newr
+ } else {
+ log.Printf("unable to add local env to initialize request: %v", err)
+ }
+ case "workspace/executeCommand":
+ var params protocol.ExecuteCommandParams
+ if err := json.Unmarshal(r.Params(), &params); err == nil {
+ if params.Command == command.StartDebugging.ID() {
+ var args command.DebuggingArgs
+ if err := command.UnmarshalArgs(params.Arguments, &args); err == nil {
+ reply = f.replyWithDebugAddress(ctx, reply, args)
+ } else {
+ event.Error(ctx, "unmarshaling debugging args", err)
+ }
+ }
+ } else {
+ event.Error(ctx, "intercepting executeCommand request", err)
+ }
+ }
+ // The gopls workspace environment defaults to the process environment in
+ // which gopls daemon was started. To avoid discrepancies in Go environment
+ // between the editor and daemon, inject any unset variables in `go env`
+ // into the options sent by initialize.
+ //
+ // See also golang.org/issue/37830.
+ return handler(ctx, reply, r)
+ }
+}
+
+// addGoEnvToInitializeRequest builds a new initialize request in which we set
+// any environment variables output by `go env` and not already present in the
+// request.
+//
+// It returns an error if r is not an initialize request, or is otherwise
+// malformed.
+func addGoEnvToInitializeRequest(ctx context.Context, r jsonrpc2.Request) (jsonrpc2.Request, error) {
+ var params protocol.ParamInitialize
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return nil, err
+ }
+ var opts map[string]interface{}
+ switch v := params.InitializationOptions.(type) {
+ case nil:
+ opts = make(map[string]interface{})
+ case map[string]interface{}:
+ opts = v
+ default:
+ return nil, fmt.Errorf("unexpected type for InitializationOptions: %T", v)
+ }
+ envOpt, ok := opts["env"]
+ if !ok {
+ envOpt = make(map[string]interface{})
+ }
+ env, ok := envOpt.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf(`env option is %T, expected a map`, envOpt)
+ }
+ goenv, err := getGoEnv(ctx, env)
+ if err != nil {
+ return nil, err
+ }
+ // We don't want to propagate GOWORK unless explicitly set since that could mess with
+ // path inference during cmd/go invocations, see golang/go#51825.
+ _, goworkSet := os.LookupEnv("GOWORK")
+ for govar, value := range goenv {
+ if govar == "GOWORK" && !goworkSet {
+ continue
+ }
+ env[govar] = value
+ }
+ opts["env"] = env
+ params.InitializationOptions = opts
+ call, ok := r.(*jsonrpc2.Call)
+ if !ok {
+ return nil, fmt.Errorf("%T is not a *jsonrpc2.Call", r)
+ }
+ return jsonrpc2.NewCall(call.ID(), "initialize", params)
+}
+
+func (f *Forwarder) replyWithDebugAddress(outerCtx context.Context, r jsonrpc2.Replier, args command.DebuggingArgs) jsonrpc2.Replier {
+ di := debug.GetInstance(outerCtx)
+ if di == nil {
+ event.Log(outerCtx, "no debug instance to start")
+ return r
+ }
+ return func(ctx context.Context, result interface{}, outerErr error) error {
+ if outerErr != nil {
+ return r(ctx, result, outerErr)
+ }
+ // Enrich the result with our own debugging information. Since we're an
+ // intermediary, the jsonrpc2 package has deserialized the result into
+ // maps, by default. Re-do the unmarshalling.
+ raw, err := json.Marshal(result)
+ if err != nil {
+ event.Error(outerCtx, "marshaling intermediate command result", err)
+ return r(ctx, result, err)
+ }
+ var modified command.DebuggingResult
+ if err := json.Unmarshal(raw, &modified); err != nil {
+ event.Error(outerCtx, "unmarshaling intermediate command result", err)
+ return r(ctx, result, err)
+ }
+ addr := args.Addr
+ if addr == "" {
+ addr = "localhost:0"
+ }
+ addr, err = di.Serve(outerCtx, addr)
+ if err != nil {
+ event.Error(outerCtx, "starting debug server", err)
+ return r(ctx, result, outerErr)
+ }
+ urls := []string{"http://" + addr}
+ modified.URLs = append(urls, modified.URLs...)
+ go f.handshake(ctx)
+ return r(ctx, modified, nil)
+ }
+}
+
+// A handshakeRequest identifies a client to the LSP server.
+type handshakeRequest struct {
+ // ServerID is the ID of the server on the client. This should usually be 0.
+ ServerID string `json:"serverID"`
+ // Logfile is the location of the clients log file.
+ Logfile string `json:"logfile"`
+ // DebugAddr is the client debug address.
+ DebugAddr string `json:"debugAddr"`
+ // GoplsPath is the path to the Gopls binary running the current client
+ // process.
+ GoplsPath string `json:"goplsPath"`
+}
+
+// A handshakeResponse is returned by the LSP server to tell the LSP client
+// information about its session.
+type handshakeResponse struct {
+ // SessionID is the server session associated with the client.
+ SessionID string `json:"sessionID"`
+ // Logfile is the location of the server logs.
+ Logfile string `json:"logfile"`
+ // DebugAddr is the server debug address.
+ DebugAddr string `json:"debugAddr"`
+ // GoplsPath is the path to the Gopls binary running the current server
+ // process.
+ GoplsPath string `json:"goplsPath"`
+}
+
+// ClientSession identifies a current client LSP session on the server. Note
+// that it looks similar to handshakeResposne, but in fact 'Logfile' and
+// 'DebugAddr' now refer to the client.
+type ClientSession struct {
+ SessionID string `json:"sessionID"`
+ Logfile string `json:"logfile"`
+ DebugAddr string `json:"debugAddr"`
+}
+
+// ServerState holds information about the gopls daemon process, including its
+// debug information and debug information of all of its current connected
+// clients.
+type ServerState struct {
+ Logfile string `json:"logfile"`
+ DebugAddr string `json:"debugAddr"`
+ GoplsPath string `json:"goplsPath"`
+ CurrentClientID string `json:"currentClientID"`
+ Clients []ClientSession `json:"clients"`
+}
+
+const (
+ handshakeMethod = "gopls/handshake"
+ sessionsMethod = "gopls/sessions"
+)
+
+func handshaker(session *cache.Session, goplsPath string, logHandshakes bool, handler jsonrpc2.Handler) jsonrpc2.Handler {
+ return func(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2.Request) error {
+ switch r.Method() {
+ case handshakeMethod:
+ // We log.Printf in this handler, rather than event.Log when we want logs
+ // to go to the daemon log rather than being reflected back to the
+ // client.
+ var req handshakeRequest
+ if err := json.Unmarshal(r.Params(), &req); err != nil {
+ if logHandshakes {
+ log.Printf("Error processing handshake for session %s: %v", session.ID(), err)
+ }
+ sendError(ctx, reply, err)
+ return nil
+ }
+ if logHandshakes {
+ log.Printf("Session %s: got handshake. Logfile: %q, Debug addr: %q", session.ID(), req.Logfile, req.DebugAddr)
+ }
+ event.Log(ctx, "Handshake session update",
+ cache.KeyUpdateSession.Of(session),
+ tag.DebugAddress.Of(req.DebugAddr),
+ tag.Logfile.Of(req.Logfile),
+ tag.ServerID.Of(req.ServerID),
+ tag.GoplsPath.Of(req.GoplsPath),
+ )
+ resp := handshakeResponse{
+ SessionID: session.ID(),
+ GoplsPath: goplsPath,
+ }
+ if di := debug.GetInstance(ctx); di != nil {
+ resp.Logfile = di.Logfile
+ resp.DebugAddr = di.ListenedDebugAddress()
+ }
+ return reply(ctx, resp, nil)
+
+ case sessionsMethod:
+ resp := ServerState{
+ GoplsPath: goplsPath,
+ CurrentClientID: session.ID(),
+ }
+ if di := debug.GetInstance(ctx); di != nil {
+ resp.Logfile = di.Logfile
+ resp.DebugAddr = di.ListenedDebugAddress()
+ for _, c := range di.State.Clients() {
+ resp.Clients = append(resp.Clients, ClientSession{
+ SessionID: c.Session.ID(),
+ Logfile: c.Logfile,
+ DebugAddr: c.DebugAddress,
+ })
+ }
+ }
+ return reply(ctx, resp, nil)
+ }
+ return handler(ctx, reply, r)
+ }
+}
+
+func sendError(ctx context.Context, reply jsonrpc2.Replier, err error) {
+ err = fmt.Errorf("%v: %w", err, jsonrpc2.ErrParse)
+ if err := reply(ctx, nil, err); err != nil {
+ event.Error(ctx, "", err)
+ }
+}
+
+// ParseAddr parses the address of a gopls remote.
+// TODO(rFindley): further document this syntax, and allow URI-style remote
+// addresses such as "auto://...".
+func ParseAddr(listen string) (network string, address string) {
+ // Allow passing just -remote=auto, as a shorthand for using automatic remote
+ // resolution.
+ if listen == AutoNetwork {
+ return AutoNetwork, ""
+ }
+ if parts := strings.SplitN(listen, ";", 2); len(parts) == 2 {
+ return parts[0], parts[1]
+ }
+ return "tcp", listen
+}
diff --git a/gopls/internal/lsp/lsprpc/lsprpc_test.go b/gopls/internal/lsp/lsprpc/lsprpc_test.go
new file mode 100644
index 000000000..3ec1a13d2
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/lsprpc_test.go
@@ -0,0 +1,345 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+ "context"
+ "errors"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
+)
+
+type FakeClient struct {
+ protocol.Client
+
+ Logs chan string
+}
+
+func (c FakeClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
+ c.Logs <- params.Message
+ return nil
+}
+
+// fakeServer is intended to be embedded in the test fakes below, to trivially
+// implement Shutdown.
+type fakeServer struct {
+ protocol.Server
+}
+
+func (fakeServer) Shutdown(ctx context.Context) error {
+ return nil
+}
+
+type PingServer struct{ fakeServer }
+
+func (s PingServer) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
+ event.Log(ctx, "ping")
+ return nil
+}
+
+func TestClientLogging(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ server := PingServer{}
+ client := FakeClient{Logs: make(chan string, 10)}
+
+ ctx = debug.WithInstance(ctx, "", "")
+ ss := NewStreamServer(cache.New(nil), false, nil)
+ ss.serverForTest = server
+ ts := servertest.NewPipeServer(ss, nil)
+ defer checkClose(t, ts.Close)
+ cc := ts.Connect(ctx)
+ cc.Go(ctx, protocol.ClientHandler(client, jsonrpc2.MethodNotFound))
+
+ if err := protocol.ServerDispatcher(cc).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{}); err != nil {
+ t.Errorf("DidOpen: %v", err)
+ }
+
+ select {
+ case got := <-client.Logs:
+ want := "ping"
+ matched, err := regexp.MatchString(want, got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Errorf("got log %q, want a log containing %q", got, want)
+ }
+ case <-time.After(1 * time.Second):
+ t.Error("timeout waiting for client log")
+ }
+}
+
+// WaitableServer instruments LSP request so that we can control their timing.
+// The requests chosen are arbitrary: we simply needed one that blocks, and
+// another that doesn't.
+type WaitableServer struct {
+ fakeServer
+
+ Started chan struct{}
+ Completed chan error
+}
+
+func (s WaitableServer) Hover(ctx context.Context, _ *protocol.HoverParams) (_ *protocol.Hover, err error) {
+ s.Started <- struct{}{}
+ defer func() {
+ s.Completed <- err
+ }()
+ select {
+ case <-ctx.Done():
+ return nil, errors.New("cancelled hover")
+ case <-time.After(10 * time.Second):
+ }
+ return &protocol.Hover{}, nil
+}
+
+func (s WaitableServer) ResolveCompletionItem(_ context.Context, item *protocol.CompletionItem) (*protocol.CompletionItem, error) {
+ return item, nil
+}
+
+func checkClose(t *testing.T, closer func() error) {
+ t.Helper()
+ if err := closer(); err != nil {
+ t.Errorf("closing: %v", err)
+ }
+}
+
+func setupForwarding(ctx context.Context, t *testing.T, s protocol.Server) (direct, forwarded servertest.Connector, cleanup func()) {
+ t.Helper()
+ serveCtx := debug.WithInstance(ctx, "", "")
+ ss := NewStreamServer(cache.New(nil), false, nil)
+ ss.serverForTest = s
+ tsDirect := servertest.NewTCPServer(serveCtx, ss, nil)
+
+ forwarder, err := NewForwarder("tcp;"+tsDirect.Addr, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tsForwarded := servertest.NewPipeServer(forwarder, nil)
+ return tsDirect, tsForwarded, func() {
+ checkClose(t, tsDirect.Close)
+ checkClose(t, tsForwarded.Close)
+ }
+}
+
+func TestRequestCancellation(t *testing.T) {
+ ctx := context.Background()
+ server := WaitableServer{
+ Started: make(chan struct{}),
+ Completed: make(chan error),
+ }
+ tsDirect, tsForwarded, cleanup := setupForwarding(ctx, t, server)
+ defer cleanup()
+ tests := []struct {
+ serverType string
+ ts servertest.Connector
+ }{
+ {"direct", tsDirect},
+ {"forwarder", tsForwarded},
+ }
+
+ for _, test := range tests {
+ t.Run(test.serverType, func(t *testing.T) {
+ cc := test.ts.Connect(ctx)
+ sd := protocol.ServerDispatcher(cc)
+ cc.Go(ctx,
+ protocol.Handlers(
+ jsonrpc2.MethodNotFound))
+
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+
+ result := make(chan error)
+ go func() {
+ _, err := sd.Hover(ctx, &protocol.HoverParams{})
+ result <- err
+ }()
+ // Wait for the Hover request to start.
+ <-server.Started
+ cancel()
+ if err := <-result; err == nil {
+ t.Error("nil error for cancelled Hover(), want non-nil")
+ }
+ if err := <-server.Completed; err == nil || !strings.Contains(err.Error(), "cancelled hover") {
+ t.Errorf("Hover(): unexpected server-side error %v", err)
+ }
+ })
+ }
+}
+
+const exampleProgram = `
+-- go.mod --
+module mod
+
+go 1.12
+-- main.go --
+package main
+
+import "fmt"
+
+func main() {
+ fmt.Println("Hello World.")
+}`
+
+func TestDebugInfoLifecycle(t *testing.T) {
+ sb, err := fake.NewSandbox(&fake.SandboxConfig{Files: fake.UnpackTxt(exampleProgram)})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := sb.Close(); err != nil {
+ // TODO(golang/go#38490): we can't currently make this an error because
+ // it fails on Windows: the workspace directory is still locked by a
+ // separate Go process.
+ // Once we have a reliable way to wait for proper shutdown, make this an
+ // error.
+ t.Logf("closing workspace failed: %v", err)
+ }
+ }()
+
+ baseCtx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ clientCtx := debug.WithInstance(baseCtx, "", "")
+ serverCtx := debug.WithInstance(baseCtx, "", "")
+
+ cache := cache.New(nil)
+ ss := NewStreamServer(cache, false, nil)
+ tsBackend := servertest.NewTCPServer(serverCtx, ss, nil)
+
+ forwarder, err := NewForwarder("tcp;"+tsBackend.Addr, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tsForwarder := servertest.NewPipeServer(forwarder, nil)
+
+ const skipApplyEdits = false
+ ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, tsForwarder, fake.ClientHooks{}, skipApplyEdits)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ed1.Close(clientCtx)
+ ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, tsBackend, fake.ClientHooks{}, skipApplyEdits)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ed2.Close(baseCtx)
+
+ serverDebug := debug.GetInstance(serverCtx)
+ if got, want := len(serverDebug.State.Clients()), 2; got != want {
+ t.Errorf("len(server:Clients) = %d, want %d", got, want)
+ }
+ if got, want := len(serverDebug.State.Sessions()), 2; got != want {
+ t.Errorf("len(server:Sessions) = %d, want %d", got, want)
+ }
+ clientDebug := debug.GetInstance(clientCtx)
+ if got, want := len(clientDebug.State.Servers()), 1; got != want {
+ t.Errorf("len(client:Servers) = %d, want %d", got, want)
+ }
+ // Close one of the connections to verify that the client and session were
+ // dropped.
+ if err := ed1.Close(clientCtx); err != nil {
+ t.Fatal(err)
+ }
+ /*TODO: at this point we have verified the editor is closed
+ However there is no way currently to wait for all associated go routines to
+ go away, and we need to wait for those to trigger the client drop
+ for now we just give it a little bit of time, but we need to fix this
+ in a principled way
+ */
+ start := time.Now()
+ delay := time.Millisecond
+ const maxWait = time.Second
+ for len(serverDebug.State.Clients()) > 1 {
+ if time.Since(start) > maxWait {
+ break
+ }
+ time.Sleep(delay)
+ delay *= 2
+ }
+ if got, want := len(serverDebug.State.Clients()), 1; got != want {
+ t.Errorf("len(server:Clients) = %d, want %d", got, want)
+ }
+ if got, want := len(serverDebug.State.Sessions()), 1; got != want {
+ t.Errorf("len(server:Sessions()) = %d, want %d", got, want)
+ }
+}
+
+type initServer struct {
+ fakeServer
+
+ params *protocol.ParamInitialize
+}
+
+func (s *initServer) Initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
+ s.params = params
+ return &protocol.InitializeResult{}, nil
+}
+
+func TestEnvForwarding(t *testing.T) {
+ ctx := context.Background()
+
+ server := &initServer{}
+ _, tsForwarded, cleanup := setupForwarding(ctx, t, server)
+ defer cleanup()
+
+ conn := tsForwarded.Connect(ctx)
+ conn.Go(ctx, jsonrpc2.MethodNotFound)
+ dispatch := protocol.ServerDispatcher(conn)
+ initParams := &protocol.ParamInitialize{}
+ initParams.InitializationOptions = map[string]interface{}{
+ "env": map[string]interface{}{
+ "GONOPROXY": "example.com",
+ },
+ }
+ _, err := dispatch.Initialize(ctx, initParams)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if server.params == nil {
+ t.Fatalf("initialize params are unset")
+ }
+ env := server.params.InitializationOptions.(map[string]interface{})["env"].(map[string]interface{})
+
+ // Check for an arbitrary Go variable. It should be set.
+ if _, ok := env["GOPRIVATE"]; !ok {
+ t.Errorf("Go environment variable GOPRIVATE unset in initialization options")
+ }
+ // Check that the variable present in our user config was not overwritten.
+ if v := env["GONOPROXY"]; v != "example.com" {
+ t.Errorf("GONOPROXY environment variable was overwritten")
+ }
+}
+
+func TestListenParsing(t *testing.T) {
+ tests := []struct {
+ input, wantNetwork, wantAddr string
+ }{
+ {"127.0.0.1:0", "tcp", "127.0.0.1:0"},
+ {"unix;/tmp/sock", "unix", "/tmp/sock"},
+ {"auto", "auto", ""},
+ {"auto;foo", "auto", "foo"},
+ }
+
+ for _, test := range tests {
+ gotNetwork, gotAddr := ParseAddr(test.input)
+ if gotNetwork != test.wantNetwork {
+ t.Errorf("network = %q, want %q", gotNetwork, test.wantNetwork)
+ }
+ if gotAddr != test.wantAddr {
+ t.Errorf("addr = %q, want %q", gotAddr, test.wantAddr)
+ }
+ }
+}
diff --git a/gopls/internal/lsp/lsprpc/middleware.go b/gopls/internal/lsp/lsprpc/middleware.go
new file mode 100644
index 000000000..50089cde7
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/middleware.go
@@ -0,0 +1,142 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sync"
+
+ "golang.org/x/tools/internal/event"
+ jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+)
+
+// Metadata holds arbitrary data transferred between jsonrpc2 peers.
+type Metadata map[string]interface{}
+
+// PeerInfo holds information about a peering between jsonrpc2 servers.
+type PeerInfo struct {
+ // RemoteID is the identity of the current server on its peer.
+ RemoteID int64
+
+ // LocalID is the identity of the peer on the server.
+ LocalID int64
+
+ // IsClient reports whether the peer is a client. If false, the peer is a
+ // server.
+ IsClient bool
+
+ // Metadata holds arbitrary information provided by the peer.
+ Metadata Metadata
+}
+
+// Handshaker handles both server and client handshaking over jsonrpc2. To
+// instrument server-side handshaking, use Handshaker.Middleware. To instrument
+// client-side handshaking, call Handshaker.ClientHandshake for any new
+// client-side connections.
+type Handshaker struct {
+ // Metadata will be shared with peers via handshaking.
+ Metadata Metadata
+
+ mu sync.Mutex
+ prevID int64
+ peers map[int64]PeerInfo
+}
+
+// Peers returns the peer info this handshaker knows about by way of either the
+// server-side handshake middleware, or client-side handshakes.
+func (h *Handshaker) Peers() []PeerInfo {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ var c []PeerInfo
+ for _, v := range h.peers {
+ c = append(c, v)
+ }
+ return c
+}
+
+// Middleware is a jsonrpc2 middleware function to augment connection binding
+// to handle the handshake method, and record disconnections.
+func (h *Handshaker) Middleware(inner jsonrpc2_v2.Binder) jsonrpc2_v2.Binder {
+ return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions {
+ opts := inner.Bind(ctx, conn)
+
+ localID := h.nextID()
+ info := &PeerInfo{
+ RemoteID: localID,
+ Metadata: h.Metadata,
+ }
+
+ // Wrap the delegated handler to accept the handshake.
+ delegate := opts.Handler
+ opts.Handler = jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+ if req.Method == handshakeMethod {
+ var peerInfo PeerInfo
+ if err := json.Unmarshal(req.Params, &peerInfo); err != nil {
+ return nil, fmt.Errorf("%w: unmarshaling client info: %v", jsonrpc2_v2.ErrInvalidParams, err)
+ }
+ peerInfo.LocalID = localID
+ peerInfo.IsClient = true
+ h.recordPeer(peerInfo)
+ return info, nil
+ }
+ return delegate.Handle(ctx, req)
+ })
+
+ // Record the dropped client.
+ go h.cleanupAtDisconnect(conn, localID)
+
+ return opts
+ })
+}
+
+// ClientHandshake performs a client-side handshake with the server at the
+// other end of conn, recording the server's peer info and watching for conn's
+// disconnection.
+func (h *Handshaker) ClientHandshake(ctx context.Context, conn *jsonrpc2_v2.Connection) {
+ localID := h.nextID()
+ info := &PeerInfo{
+ RemoteID: localID,
+ Metadata: h.Metadata,
+ }
+
+ call := conn.Call(ctx, handshakeMethod, info)
+ var serverInfo PeerInfo
+ if err := call.Await(ctx, &serverInfo); err != nil {
+ event.Error(ctx, "performing handshake", err)
+ return
+ }
+ serverInfo.LocalID = localID
+ h.recordPeer(serverInfo)
+
+ go h.cleanupAtDisconnect(conn, localID)
+}
+
+func (h *Handshaker) nextID() int64 {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ h.prevID++
+ return h.prevID
+}
+
+func (h *Handshaker) cleanupAtDisconnect(conn *jsonrpc2_v2.Connection, peerID int64) {
+ conn.Wait()
+
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ delete(h.peers, peerID)
+}
+
+func (h *Handshaker) recordPeer(info PeerInfo) {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ if h.peers == nil {
+ h.peers = make(map[int64]PeerInfo)
+ }
+ h.peers[info.LocalID] = info
+}
diff --git a/gopls/internal/lsp/lsprpc/middleware_test.go b/gopls/internal/lsp/lsprpc/middleware_test.go
new file mode 100644
index 000000000..c528eae5c
--- /dev/null
+++ b/gopls/internal/lsp/lsprpc/middleware_test.go
@@ -0,0 +1,93 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc_test
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+ "time"
+
+ . "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+)
+
+var noopBinder = BinderFunc(func(context.Context, *jsonrpc2_v2.Connection) jsonrpc2_v2.ConnectionOptions {
+ return jsonrpc2_v2.ConnectionOptions{}
+})
+
+func TestHandshakeMiddleware(t *testing.T) {
+ sh := &Handshaker{
+ Metadata: Metadata{
+ "answer": 42,
+ },
+ }
+ ctx := context.Background()
+ env := new(TestEnv)
+ defer env.Shutdown(t)
+ l, _ := env.serve(ctx, t, sh.Middleware(noopBinder))
+ conn := env.dial(ctx, t, l.Dialer(), noopBinder, false)
+ ch := &Handshaker{
+ Metadata: Metadata{
+ "question": 6 * 9,
+ },
+ }
+
+ check := func(connected bool) error {
+ clients := sh.Peers()
+ servers := ch.Peers()
+ want := 0
+ if connected {
+ want = 1
+ }
+ if got := len(clients); got != want {
+ return fmt.Errorf("got %d clients on the server, want %d", got, want)
+ }
+ if got := len(servers); got != want {
+ return fmt.Errorf("got %d servers on the client, want %d", got, want)
+ }
+ if !connected {
+ return nil
+ }
+ client := clients[0]
+ server := servers[0]
+ if _, ok := client.Metadata["question"]; !ok {
+ return errors.New("no client metadata")
+ }
+ if _, ok := server.Metadata["answer"]; !ok {
+ return errors.New("no server metadata")
+ }
+ if client.LocalID != server.RemoteID {
+ return fmt.Errorf("client.LocalID == %d, server.PeerID == %d", client.LocalID, server.RemoteID)
+ }
+ if client.RemoteID != server.LocalID {
+ return fmt.Errorf("client.PeerID == %d, server.LocalID == %d", client.RemoteID, server.LocalID)
+ }
+ return nil
+ }
+
+ if err := check(false); err != nil {
+ t.Fatalf("before handshake: %v", err)
+ }
+ ch.ClientHandshake(ctx, conn)
+ if err := check(true); err != nil {
+ t.Fatalf("after handshake: %v", err)
+ }
+ conn.Close()
+ // Wait for up to ~2s for connections to get cleaned up.
+ delay := 25 * time.Millisecond
+ for retries := 3; retries >= 0; retries-- {
+ time.Sleep(delay)
+ err := check(false)
+ if err == nil {
+ return
+ }
+ if retries == 0 {
+ t.Fatalf("after closing connection: %v", err)
+ }
+ delay *= 4
+ }
+}
diff --git a/gopls/internal/lsp/mod/code_lens.go b/gopls/internal/lsp/mod/code_lens.go
new file mode 100644
index 000000000..b93ac44f1
--- /dev/null
+++ b/gopls/internal/lsp/mod/code_lens.go
@@ -0,0 +1,191 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mod
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+// LensFuncs returns the supported lensFuncs for go.mod files.
+func LensFuncs() map[command.Command]source.LensFunc {
+ return map[command.Command]source.LensFunc{
+ command.UpgradeDependency: upgradeLenses,
+ command.Tidy: tidyLens,
+ command.Vendor: vendorLens,
+ command.RunGovulncheck: vulncheckLenses,
+ }
+}
+
+func upgradeLenses(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) {
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil || pm.File == nil {
+ return nil, err
+ }
+ uri := protocol.URIFromSpanURI(fh.URI())
+ reset, err := command.NewResetGoModDiagnosticsCommand("Reset go.mod diagnostics", command.ResetGoModDiagnosticsArgs{URIArg: command.URIArg{URI: uri}})
+ if err != nil {
+ return nil, err
+ }
+ // Put the `Reset go.mod diagnostics` codelens on the module statement.
+ modrng, err := moduleStmtRange(fh, pm)
+ if err != nil {
+ return nil, err
+ }
+ lenses := []protocol.CodeLens{{Range: modrng, Command: &reset}}
+ if len(pm.File.Require) == 0 {
+ // Nothing to upgrade.
+ return lenses, nil
+ }
+ var requires []string
+ for _, req := range pm.File.Require {
+ requires = append(requires, req.Mod.Path)
+ }
+ checkUpgrade, err := command.NewCheckUpgradesCommand("Check for upgrades", command.CheckUpgradesArgs{
+ URI: uri,
+ Modules: requires,
+ })
+ if err != nil {
+ return nil, err
+ }
+ upgradeTransitive, err := command.NewUpgradeDependencyCommand("Upgrade transitive dependencies", command.DependencyArgs{
+ URI: uri,
+ AddRequire: false,
+ GoCmdArgs: []string{"-d", "-u", "-t", "./..."},
+ })
+ if err != nil {
+ return nil, err
+ }
+ upgradeDirect, err := command.NewUpgradeDependencyCommand("Upgrade direct dependencies", command.DependencyArgs{
+ URI: uri,
+ AddRequire: false,
+ GoCmdArgs: append([]string{"-d"}, requires...),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Put the upgrade code lenses above the first require block or statement.
+ rng, err := firstRequireRange(fh, pm)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(lenses, []protocol.CodeLens{
+ {Range: rng, Command: &checkUpgrade},
+ {Range: rng, Command: &upgradeTransitive},
+ {Range: rng, Command: &upgradeDirect},
+ }...), nil
+}
+
+func tidyLens(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) {
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil || pm.File == nil {
+ return nil, err
+ }
+ uri := protocol.URIFromSpanURI(fh.URI())
+ cmd, err := command.NewTidyCommand("Run go mod tidy", command.URIArgs{URIs: []protocol.DocumentURI{uri}})
+ if err != nil {
+ return nil, err
+ }
+ rng, err := moduleStmtRange(fh, pm)
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.CodeLens{{
+ Range: rng,
+ Command: &cmd,
+ }}, nil
+}
+
+func vendorLens(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) {
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil || pm.File == nil {
+ return nil, err
+ }
+ if len(pm.File.Require) == 0 {
+ // Nothing to vendor.
+ return nil, nil
+ }
+ rng, err := moduleStmtRange(fh, pm)
+ if err != nil {
+ return nil, err
+ }
+ title := "Create vendor directory"
+ uri := protocol.URIFromSpanURI(fh.URI())
+ cmd, err := command.NewVendorCommand(title, command.URIArg{URI: uri})
+ if err != nil {
+ return nil, err
+ }
+ // Change the message depending on whether or not the module already has a
+ // vendor directory.
+ vendorDir := filepath.Join(filepath.Dir(fh.URI().Filename()), "vendor")
+ if info, _ := os.Stat(vendorDir); info != nil && info.IsDir() {
+ title = "Sync vendor directory"
+ }
+ return []protocol.CodeLens{{Range: rng, Command: &cmd}}, nil
+}
+
+func moduleStmtRange(fh source.FileHandle, pm *source.ParsedModule) (protocol.Range, error) {
+ if pm.File == nil || pm.File.Module == nil || pm.File.Module.Syntax == nil {
+ return protocol.Range{}, fmt.Errorf("no module statement in %s", fh.URI())
+ }
+ syntax := pm.File.Module.Syntax
+ return pm.Mapper.OffsetRange(syntax.Start.Byte, syntax.End.Byte)
+}
+
+// firstRequireRange returns the range for the first "require" in the given
+// go.mod file. This is either a require block or an individual require line.
+func firstRequireRange(fh source.FileHandle, pm *source.ParsedModule) (protocol.Range, error) {
+ if len(pm.File.Require) == 0 {
+ return protocol.Range{}, fmt.Errorf("no requires in the file %s", fh.URI())
+ }
+ var start, end modfile.Position
+ for _, stmt := range pm.File.Syntax.Stmt {
+ if b, ok := stmt.(*modfile.LineBlock); ok && len(b.Token) == 1 && b.Token[0] == "require" {
+ start, end = b.Span()
+ break
+ }
+ }
+
+ firstRequire := pm.File.Require[0].Syntax
+ if start.Byte == 0 || firstRequire.Start.Byte < start.Byte {
+ start, end = firstRequire.Start, firstRequire.End
+ }
+ return pm.Mapper.OffsetRange(start.Byte, end.Byte)
+}
+
+func vulncheckLenses(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) {
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil || pm.File == nil {
+ return nil, err
+ }
+ // Place the codelenses near the module statement.
+ // A module may not have the require block,
+ // but vulnerabilities can exist in standard libraries.
+ uri := protocol.URIFromSpanURI(fh.URI())
+ rng, err := moduleStmtRange(fh, pm)
+ if err != nil {
+ return nil, err
+ }
+
+ vulncheck, err := command.NewRunGovulncheckCommand("Run govulncheck", command.VulncheckArgs{
+ URI: uri,
+ Pattern: "./...",
+ })
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.CodeLens{
+ {Range: rng, Command: &vulncheck},
+ }, nil
+}
diff --git a/gopls/internal/lsp/mod/diagnostics.go b/gopls/internal/lsp/mod/diagnostics.go
new file mode 100644
index 000000000..746a14e91
--- /dev/null
+++ b/gopls/internal/lsp/mod/diagnostics.go
@@ -0,0 +1,561 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mod provides core features related to go.mod file
+// handling for use by Go editors and tools.
+package mod
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/semver"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/vuln/osv"
+)
+
+// Diagnostics returns diagnostics for the modules in the workspace.
+//
+// It waits for completion of type-checking of all active packages.
+func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[span.URI][]*source.Diagnostic, error) {
+ ctx, done := event.Start(ctx, "mod.Diagnostics", source.SnapshotLabels(snapshot)...)
+ defer done()
+
+ return collectDiagnostics(ctx, snapshot, ModDiagnostics)
+}
+
+// UpgradeDiagnostics returns upgrade diagnostics for the modules in the
+// workspace with known upgrades.
+func UpgradeDiagnostics(ctx context.Context, snapshot source.Snapshot) (map[span.URI][]*source.Diagnostic, error) {
+ ctx, done := event.Start(ctx, "mod.UpgradeDiagnostics", source.SnapshotLabels(snapshot)...)
+ defer done()
+
+ return collectDiagnostics(ctx, snapshot, ModUpgradeDiagnostics)
+}
+
+// VulnerabilityDiagnostics returns vulnerability diagnostics for the active modules in the
+// workspace with known vulnerabilities.
+func VulnerabilityDiagnostics(ctx context.Context, snapshot source.Snapshot) (map[span.URI][]*source.Diagnostic, error) {
+ ctx, done := event.Start(ctx, "mod.VulnerabilityDiagnostics", source.SnapshotLabels(snapshot)...)
+ defer done()
+
+ return collectDiagnostics(ctx, snapshot, ModVulnerabilityDiagnostics)
+}
+
+func collectDiagnostics(ctx context.Context, snapshot source.Snapshot, diagFn func(context.Context, source.Snapshot, source.FileHandle) ([]*source.Diagnostic, error)) (map[span.URI][]*source.Diagnostic, error) {
+ reports := make(map[span.URI][]*source.Diagnostic)
+ for _, uri := range snapshot.ModFiles() {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ reports[fh.URI()] = []*source.Diagnostic{}
+ diagnostics, err := diagFn(ctx, snapshot, fh)
+ if err != nil {
+ return nil, err
+ }
+ for _, d := range diagnostics {
+ fh, err := snapshot.GetFile(ctx, d.URI)
+ if err != nil {
+ return nil, err
+ }
+ reports[fh.URI()] = append(reports[fh.URI()], d)
+ }
+ }
+ return reports, nil
+}
+
+// ModDiagnostics waits for completion of type-checking of all active
+// packages, then returns diagnostics from diagnosing the packages in
+// the workspace and from tidying the go.mod file.
+func ModDiagnostics(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) (diagnostics []*source.Diagnostic, err error) {
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil {
+ if pm == nil || len(pm.ParseErrors) == 0 {
+ return nil, err
+ }
+ return pm.ParseErrors, nil
+ }
+
+ // Packages in the workspace can contribute diagnostics to go.mod files.
+ // TODO(rfindley): Try to avoid type checking all packages in the workspace here,
+ // for every go.mod file. If gc_details is enabled, it looks like this could lead to extra
+ // go command invocations (as gc details is not memoized).
+ active, err := snapshot.ActiveMetadata(ctx)
+ if err != nil && !source.IsNonFatalGoModError(err) {
+ event.Error(ctx, fmt.Sprintf("workspace packages: diagnosing %s", pm.URI), err)
+ }
+ if err == nil {
+ // Note: the call to PackageDiagnostics below may be the first operation
+ // after the initial metadata load, and therefore result in type-checking
+ // or loading many packages.
+ ids := make([]source.PackageID, len(active))
+ for i, meta := range active {
+ ids[i] = meta.ID
+ }
+ diags, err := snapshot.PackageDiagnostics(ctx, ids...)
+ if err != nil {
+ return nil, err
+ }
+ diagnostics = append(diagnostics, diags[fh.URI()]...)
+ }
+
+ tidied, err := snapshot.ModTidy(ctx, pm)
+ if err != nil && !source.IsNonFatalGoModError(err) {
+ event.Error(ctx, fmt.Sprintf("tidy: diagnosing %s", pm.URI), err)
+ }
+ if err == nil {
+ for _, d := range tidied.Diagnostics {
+ if d.URI != fh.URI() {
+ continue
+ }
+ diagnostics = append(diagnostics, d)
+ }
+ }
+ return diagnostics, nil
+}
+
+// ModUpgradeDiagnostics adds upgrade quick fixes for individual modules if the upgrades
+// are recorded in the view.
+func ModUpgradeDiagnostics(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) (upgradeDiagnostics []*source.Diagnostic, err error) {
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil {
+ // Don't return an error if there are parse error diagnostics to be shown, but also do not
+ // continue since we won't be able to show the upgrade diagnostics.
+ if pm != nil && len(pm.ParseErrors) != 0 {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ upgrades := snapshot.View().ModuleUpgrades(fh.URI())
+ for _, req := range pm.File.Require {
+ ver, ok := upgrades[req.Mod.Path]
+ if !ok || req.Mod.Version == ver {
+ continue
+ }
+ rng, err := pm.Mapper.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte)
+ if err != nil {
+ return nil, err
+ }
+ // Upgrade to the exact version we offer the user, not the most recent.
+ title := fmt.Sprintf("%s%v", upgradeCodeActionPrefix, ver)
+ cmd, err := command.NewUpgradeDependencyCommand(title, command.DependencyArgs{
+ URI: protocol.URIFromSpanURI(fh.URI()),
+ AddRequire: false,
+ GoCmdArgs: []string{req.Mod.Path + "@" + ver},
+ })
+ if err != nil {
+ return nil, err
+ }
+ upgradeDiagnostics = append(upgradeDiagnostics, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityInformation,
+ Source: source.UpgradeNotification,
+ Message: fmt.Sprintf("%v can be upgraded", req.Mod.Path),
+ SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+ })
+ }
+
+ return upgradeDiagnostics, nil
+}
+
+const upgradeCodeActionPrefix = "Upgrade to "
+
+// ModVulnerabilityDiagnostics adds diagnostics for vulnerabilities in individual modules
+// if the vulnerability is recorded in the view.
+func ModVulnerabilityDiagnostics(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) (vulnDiagnostics []*source.Diagnostic, err error) {
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil {
+ // Don't return an error if there are parse error diagnostics to be shown, but also do not
+ // continue since we won't be able to show the vulnerability diagnostics.
+ if pm != nil && len(pm.ParseErrors) != 0 {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ diagSource := source.Govulncheck
+ vs := snapshot.View().Vulnerabilities(fh.URI())[fh.URI()]
+ if vs == nil && snapshot.View().Options().Vulncheck == source.ModeVulncheckImports {
+ vs, err = snapshot.ModVuln(ctx, fh.URI())
+ if err != nil {
+ return nil, err
+ }
+ diagSource = source.Vulncheck
+ }
+ if vs == nil || len(vs.Vulns) == 0 {
+ return nil, nil
+ }
+
+ suggestRunOrResetGovulncheck, err := suggestGovulncheckAction(diagSource == source.Govulncheck, fh.URI())
+ if err != nil {
+ // must not happen
+ return nil, err // TODO: bug report
+ }
+ type modVuln struct {
+ mod *govulncheck.Module
+ vuln *govulncheck.Vuln
+ }
+ vulnsByModule := make(map[string][]modVuln)
+ for _, vuln := range vs.Vulns {
+ for _, mod := range vuln.Modules {
+ vulnsByModule[mod.Path] = append(vulnsByModule[mod.Path], modVuln{mod, vuln})
+ }
+ }
+
+ for _, req := range pm.File.Require {
+ vulns := vulnsByModule[req.Mod.Path]
+ if len(vulns) == 0 {
+ continue
+ }
+ // note: req.Syntax is the line corresponding to 'require', which means
+ // req.Syntax.Start can point to the beginning of the "require" keyword
+ // for a single line require (e.g. "require golang.org/x/mod v0.0.0").
+ start := req.Syntax.Start.Byte
+ if len(req.Syntax.Token) == 3 {
+ start += len("require ")
+ }
+ rng, err := pm.Mapper.OffsetRange(start, req.Syntax.End.Byte)
+ if err != nil {
+ return nil, err
+ }
+ // Map affecting vulns to 'warning' level diagnostics,
+ // others to 'info' level diagnostics.
+ // Fixes will include only the upgrades for warning level diagnostics.
+ var warningFixes, infoFixes []source.SuggestedFix
+ var warning, info []string
+ var relatedInfo []protocol.DiagnosticRelatedInformation
+ for _, mv := range vulns {
+ mod, vuln := mv.mod, mv.vuln
+ // It is possible that the source code was changed since the last
+ // govulncheck run and information in the `vulns` info is stale.
+ // For example, imagine that a user is in the middle of updating
+ // problematic modules detected by the govulncheck run by applying
+ // quick fixes. Stale diagnostics can be confusing and prevent the
+ // user from quickly locating the next module to fix.
+ // Ideally we should rerun the analysis with the updated module
+ // dependencies or any other code changes, but we are not yet
+ // in the position of automatically triggering the analysis
+ // (govulncheck can take a while). We also don't know exactly what
+ // part of source code was changed since `vulns` was computed.
+ // As a heuristic, we assume that a user upgrades the affecting
+ // module to the version with the fix or the latest one, and if the
+ // version in the require statement is equal to or higher than the
+ // fixed version, skip generating a diagnostic about the vulnerability.
+ // Eventually, the user has to rerun govulncheck.
+ if mod.FixedVersion != "" && semver.IsValid(req.Mod.Version) && semver.Compare(mod.FixedVersion, req.Mod.Version) <= 0 {
+ continue
+ }
+ if !vuln.IsCalled() {
+ info = append(info, vuln.OSV.ID)
+ } else {
+ warning = append(warning, vuln.OSV.ID)
+ relatedInfo = append(relatedInfo, listRelatedInfo(ctx, snapshot, vuln)...)
+ }
+ // Upgrade to the exact version we offer the user, not the most recent.
+ if fixedVersion := mod.FixedVersion; semver.IsValid(fixedVersion) && semver.Compare(req.Mod.Version, fixedVersion) < 0 {
+ cmd, err := getUpgradeCodeAction(fh, req, fixedVersion)
+ if err != nil {
+ return nil, err // TODO: bug report
+ }
+ sf := source.SuggestedFixFromCommand(cmd, protocol.QuickFix)
+ if !vuln.IsCalled() {
+ infoFixes = append(infoFixes, sf)
+ } else {
+ warningFixes = append(warningFixes, sf)
+ }
+ }
+ }
+
+ if len(warning) == 0 && len(info) == 0 {
+ continue
+ }
+ // Add an upgrade for module@latest.
+ // TODO(suzmue): verify if latest is the same as fixedVersion.
+ latest, err := getUpgradeCodeAction(fh, req, "latest")
+ if err != nil {
+ return nil, err // TODO: bug report
+ }
+ sf := source.SuggestedFixFromCommand(latest, protocol.QuickFix)
+ if len(warningFixes) > 0 {
+ warningFixes = append(warningFixes, sf)
+ }
+ if len(infoFixes) > 0 {
+ infoFixes = append(infoFixes, sf)
+ }
+
+ sort.Strings(warning)
+ sort.Strings(info)
+
+ if len(warning) > 0 {
+ warningFixes = append(warningFixes, suggestRunOrResetGovulncheck)
+ vulnDiagnostics = append(vulnDiagnostics, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityWarning,
+ Source: diagSource,
+ Message: getVulnMessage(req.Mod.Path, warning, true, diagSource == source.Govulncheck),
+ SuggestedFixes: warningFixes,
+ Related: relatedInfo,
+ })
+ }
+ if len(info) > 0 {
+ infoFixes = append(infoFixes, suggestRunOrResetGovulncheck)
+ vulnDiagnostics = append(vulnDiagnostics, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityInformation,
+ Source: diagSource,
+ Message: getVulnMessage(req.Mod.Path, info, false, diagSource == source.Govulncheck),
+ SuggestedFixes: infoFixes,
+ Related: relatedInfo,
+ })
+ }
+ }
+
+ // TODO(hyangah): place this diagnostic on the `go` directive or `toolchain` directive
+ // after https://go.dev/issue/57001.
+ const diagnoseStdLib = false
+ if diagnoseStdLib {
+ // Add standard library vulnerabilities.
+ stdlibVulns := vulnsByModule["stdlib"]
+ if len(stdlibVulns) == 0 {
+ return vulnDiagnostics, nil
+ }
+
+ // Put the standard library diagnostic on the module declaration.
+ rng, err := pm.Mapper.OffsetRange(pm.File.Module.Syntax.Start.Byte, pm.File.Module.Syntax.End.Byte)
+ if err != nil {
+ return vulnDiagnostics, nil // TODO: bug report
+ }
+
+ stdlib := stdlibVulns[0].mod.FoundVersion
+ var warning, info []string
+ var relatedInfo []protocol.DiagnosticRelatedInformation
+ for _, mv := range stdlibVulns {
+ vuln := mv.vuln
+ stdlib = mv.mod.FoundVersion
+ if !vuln.IsCalled() {
+ info = append(info, vuln.OSV.ID)
+ } else {
+ warning = append(warning, vuln.OSV.ID)
+ relatedInfo = append(relatedInfo, listRelatedInfo(ctx, snapshot, vuln)...)
+ }
+ }
+ if len(warning) > 0 {
+ fixes := []source.SuggestedFix{suggestRunOrResetGovulncheck}
+ vulnDiagnostics = append(vulnDiagnostics, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityWarning,
+ Source: diagSource,
+ Message: getVulnMessage(stdlib, warning, true, diagSource == source.Govulncheck),
+ SuggestedFixes: fixes,
+ Related: relatedInfo,
+ })
+ }
+ if len(info) > 0 {
+ fixes := []source.SuggestedFix{suggestRunOrResetGovulncheck}
+ vulnDiagnostics = append(vulnDiagnostics, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityInformation,
+ Source: diagSource,
+ Message: getVulnMessage(stdlib, info, false, diagSource == source.Govulncheck),
+ SuggestedFixes: fixes,
+ Related: relatedInfo,
+ })
+ }
+ }
+
+ return vulnDiagnostics, nil
+}
+
+// suggestGovulncheckAction returns a code action that suggests either run govulncheck
+// for more accurate investigation (if the present vulncheck diagnostics are based on
+// analysis less accurate than govulncheck) or reset the existing govulncheck result
+// (if the present vulncheck diagnostics are already based on govulncheck run).
+func suggestGovulncheckAction(fromGovulncheck bool, uri span.URI) (source.SuggestedFix, error) {
+ if fromGovulncheck {
+ resetVulncheck, err := command.NewResetGoModDiagnosticsCommand("Reset govulncheck result", command.ResetGoModDiagnosticsArgs{
+ URIArg: command.URIArg{URI: protocol.DocumentURI(uri)},
+ DiagnosticSource: string(source.Govulncheck),
+ })
+ if err != nil {
+ return source.SuggestedFix{}, err
+ }
+ return source.SuggestedFixFromCommand(resetVulncheck, protocol.QuickFix), nil
+ }
+ vulncheck, err := command.NewRunGovulncheckCommand("Run govulncheck to verify", command.VulncheckArgs{
+ URI: protocol.DocumentURI(uri),
+ Pattern: "./...",
+ })
+ if err != nil {
+ return source.SuggestedFix{}, err
+ }
+ return source.SuggestedFixFromCommand(vulncheck, protocol.QuickFix), nil
+}
+
+func getVulnMessage(mod string, vulns []string, used, fromGovulncheck bool) string {
+ var b strings.Builder
+ if used {
+ switch len(vulns) {
+ case 1:
+ fmt.Fprintf(&b, "%v has a vulnerability used in the code: %v.", mod, vulns[0])
+ default:
+ fmt.Fprintf(&b, "%v has vulnerabilities used in the code: %v.", mod, strings.Join(vulns, ", "))
+ }
+ } else {
+ if fromGovulncheck {
+ switch len(vulns) {
+ case 1:
+ fmt.Fprintf(&b, "%v has a vulnerability %v that is not used in the code.", mod, vulns[0])
+ default:
+ fmt.Fprintf(&b, "%v has known vulnerabilities %v that are not used in the code.", mod, strings.Join(vulns, ", "))
+ }
+ } else {
+ switch len(vulns) {
+ case 1:
+ fmt.Fprintf(&b, "%v has a vulnerability %v.", mod, vulns[0])
+ default:
+ fmt.Fprintf(&b, "%v has known vulnerabilities %v.", mod, strings.Join(vulns, ", "))
+ }
+ }
+ }
+ return b.String()
+}
+
+func listRelatedInfo(ctx context.Context, snapshot source.Snapshot, vuln *govulncheck.Vuln) []protocol.DiagnosticRelatedInformation {
+ var ri []protocol.DiagnosticRelatedInformation
+ for _, m := range vuln.Modules {
+ for _, p := range m.Packages {
+ for _, c := range p.CallStacks {
+ if len(c.Frames) == 0 {
+ continue
+ }
+ entry := c.Frames[0]
+ pos := entry.Position
+ if pos.Filename == "" {
+ continue // token.Position Filename is an optional field.
+ }
+ uri := span.URIFromPath(pos.Filename)
+ startPos := protocol.Position{
+ Line: uint32(pos.Line) - 1,
+ // We need to read the file contents to precisesly map
+ // token.Position (pos) to the UTF16-based column offset
+ // protocol.Position requires. That can be expensive.
+ // We need this related info to just help users to open
+ // the entry points of the callstack and once the file is
+ // open, we will compute the precise location based on the
+ // open file contents. So, use the beginning of the line
+ // as the position here instead of precise UTF16-based
+ // position computation.
+ Character: 0,
+ }
+ ri = append(ri, protocol.DiagnosticRelatedInformation{
+ Location: protocol.Location{
+ URI: protocol.URIFromSpanURI(uri),
+ Range: protocol.Range{
+ Start: startPos,
+ End: startPos,
+ },
+ },
+ Message: fmt.Sprintf("[%v] %v -> %v.%v", vuln.OSV.ID, entry.Name(), p.Path, c.Symbol),
+ })
+ }
+ }
+ }
+ return ri
+}
+
+func formatMessage(v *govulncheck.Vuln) string {
+ details := []byte(v.OSV.Details)
+ // Remove any new lines that are not preceded or followed by a new line.
+ for i, r := range details {
+ if r == '\n' && i > 0 && details[i-1] != '\n' && i+1 < len(details) && details[i+1] != '\n' {
+ details[i] = ' '
+ }
+ }
+ return strings.TrimSpace(strings.Replace(string(details), "\n\n", "\n\n ", -1))
+}
+
+// href returns the url for the vulnerability information.
+// Eventually we should retrieve the url embedded in the osv.Entry.
+// While vuln.go.dev is under development, this always returns
+// the page in pkg.go.dev.
+func href(vuln *osv.Entry) string {
+ return fmt.Sprintf("https://pkg.go.dev/vuln/%s", vuln.ID)
+}
+
+func getUpgradeCodeAction(fh source.FileHandle, req *modfile.Require, version string) (protocol.Command, error) {
+ cmd, err := command.NewUpgradeDependencyCommand(upgradeTitle(version), command.DependencyArgs{
+ URI: protocol.URIFromSpanURI(fh.URI()),
+ AddRequire: false,
+ GoCmdArgs: []string{req.Mod.Path + "@" + version},
+ })
+ if err != nil {
+ return protocol.Command{}, err
+ }
+ return cmd, nil
+}
+
+func upgradeTitle(fixedVersion string) string {
+ title := fmt.Sprintf("%s%v", upgradeCodeActionPrefix, fixedVersion)
+ return title
+}
+
+// SelectUpgradeCodeActions takes a list of code actions for a required module
+// and returns a more selective list of upgrade code actions,
+// where the code actions have been deduped. Code actions unrelated to upgrade
+// are deduplicated by the name.
+func SelectUpgradeCodeActions(actions []protocol.CodeAction) []protocol.CodeAction {
+ if len(actions) <= 1 {
+ return actions // return early if no sorting necessary
+ }
+ var versionedUpgrade, latestUpgrade, resetAction protocol.CodeAction
+ var chosenVersionedUpgrade string
+ var selected []protocol.CodeAction
+
+ seen := make(map[string]bool)
+
+ for _, action := range actions {
+ if strings.HasPrefix(action.Title, upgradeCodeActionPrefix) {
+ if v := getUpgradeVersion(action); v == "latest" && latestUpgrade.Title == "" {
+ latestUpgrade = action
+ } else if versionedUpgrade.Title == "" || semver.Compare(v, chosenVersionedUpgrade) > 0 {
+ chosenVersionedUpgrade = v
+ versionedUpgrade = action
+ }
+ } else if strings.HasPrefix(action.Title, "Reset govulncheck") {
+ resetAction = action
+ } else if !seen[action.Command.Title] {
+ seen[action.Command.Title] = true
+ selected = append(selected, action)
+ }
+ }
+ if versionedUpgrade.Title != "" {
+ selected = append(selected, versionedUpgrade)
+ }
+ if latestUpgrade.Title != "" {
+ selected = append(selected, latestUpgrade)
+ }
+ if resetAction.Title != "" {
+ selected = append(selected, resetAction)
+ }
+ return selected
+}
+
+func getUpgradeVersion(p protocol.CodeAction) string {
+ return strings.TrimPrefix(p.Title, upgradeCodeActionPrefix)
+}
diff --git a/gopls/internal/lsp/mod/format.go b/gopls/internal/lsp/mod/format.go
new file mode 100644
index 000000000..9c3942ee0
--- /dev/null
+++ b/gopls/internal/lsp/mod/format.go
@@ -0,0 +1,30 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mod
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+)
+
+func Format(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.TextEdit, error) {
+ ctx, done := event.Start(ctx, "mod.Format")
+ defer done()
+
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil {
+ return nil, err
+ }
+ formatted, err := pm.File.Format()
+ if err != nil {
+ return nil, err
+ }
+ // Calculate the edits to be made due to the change.
+ diffs := snapshot.View().Options().ComputeEdits(string(pm.Mapper.Content), string(formatted))
+ return source.ToProtocolEdits(pm.Mapper, diffs)
+}
diff --git a/gopls/internal/lsp/mod/hover.go b/gopls/internal/lsp/mod/hover.go
new file mode 100644
index 000000000..fbd3c0000
--- /dev/null
+++ b/gopls/internal/lsp/mod/hover.go
@@ -0,0 +1,358 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mod
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/mod/semver"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+)
+
+func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) {
+ var found bool
+ for _, uri := range snapshot.ModFiles() {
+ if fh.URI() == uri {
+ found = true
+ break
+ }
+ }
+
+ // We only provide hover information for the view's go.mod files.
+ if !found {
+ return nil, nil
+ }
+
+ ctx, done := event.Start(ctx, "mod.Hover")
+ defer done()
+
+ // Get the position of the cursor.
+ pm, err := snapshot.ParseMod(ctx, fh)
+ if err != nil {
+ return nil, fmt.Errorf("getting modfile handle: %w", err)
+ }
+ offset, err := pm.Mapper.PositionOffset(position)
+ if err != nil {
+ return nil, fmt.Errorf("computing cursor position: %w", err)
+ }
+
+ // If the cursor position is on a module statement
+ if hover, ok := hoverOnModuleStatement(ctx, pm, offset, snapshot, fh); ok {
+ return hover, nil
+ }
+ return hoverOnRequireStatement(ctx, pm, offset, snapshot, fh)
+}
+
+func hoverOnRequireStatement(ctx context.Context, pm *source.ParsedModule, offset int, snapshot source.Snapshot, fh source.FileHandle) (*protocol.Hover, error) {
+ // Confirm that the cursor is at the position of a require statement.
+ var req *modfile.Require
+ var startOffset, endOffset int
+ for _, r := range pm.File.Require {
+ dep := []byte(r.Mod.Path)
+ s, e := r.Syntax.Start.Byte, r.Syntax.End.Byte
+ i := bytes.Index(pm.Mapper.Content[s:e], dep)
+ if i == -1 {
+ continue
+ }
+ // Shift the start position to the location of the
+ // dependency within the require statement.
+ startOffset, endOffset = s+i, e
+ if startOffset <= offset && offset <= endOffset {
+ req = r
+ break
+ }
+ }
+ // TODO(hyangah): find position for info about vulnerabilities in Go
+
+ // The cursor position is not on a require statement.
+ if req == nil {
+ return nil, nil
+ }
+
+ // Get the vulnerability info.
+ fromGovulncheck := true
+ vs := snapshot.View().Vulnerabilities(fh.URI())[fh.URI()]
+ if vs == nil && snapshot.View().Options().Vulncheck == source.ModeVulncheckImports {
+ var err error
+ vs, err = snapshot.ModVuln(ctx, fh.URI())
+ if err != nil {
+ return nil, err
+ }
+ fromGovulncheck = false
+ }
+ affecting, nonaffecting := lookupVulns(vs, req.Mod.Path, req.Mod.Version)
+
+ // Get the `go mod why` results for the given file.
+ why, err := snapshot.ModWhy(ctx, fh)
+ if err != nil {
+ return nil, err
+ }
+ explanation, ok := why[req.Mod.Path]
+ if !ok {
+ return nil, nil
+ }
+
+ // Get the range to highlight for the hover.
+ // TODO(hyangah): adjust the hover range to include the version number
+ // to match the diagnostics' range.
+ rng, err := pm.Mapper.OffsetRange(startOffset, endOffset)
+ if err != nil {
+ return nil, err
+ }
+ options := snapshot.View().Options()
+ isPrivate := snapshot.View().IsGoPrivatePath(req.Mod.Path)
+ header := formatHeader(req.Mod.Path, options)
+ explanation = formatExplanation(explanation, req, options, isPrivate)
+ vulns := formatVulnerabilities(req.Mod.Path, affecting, nonaffecting, options, fromGovulncheck)
+
+ return &protocol.Hover{
+ Contents: protocol.MarkupContent{
+ Kind: options.PreferredContentFormat,
+ Value: header + vulns + explanation,
+ },
+ Range: rng,
+ }, nil
+}
+
+func hoverOnModuleStatement(ctx context.Context, pm *source.ParsedModule, offset int, snapshot source.Snapshot, fh source.FileHandle) (*protocol.Hover, bool) {
+ module := pm.File.Module
+ if module == nil {
+ return nil, false // no module stmt
+ }
+ if offset < module.Syntax.Start.Byte || offset > module.Syntax.End.Byte {
+ return nil, false // cursor not in module stmt
+ }
+
+ rng, err := pm.Mapper.OffsetRange(module.Syntax.Start.Byte, module.Syntax.End.Byte)
+ if err != nil {
+ return nil, false
+ }
+ fromGovulncheck := true
+ vs := snapshot.View().Vulnerabilities(fh.URI())[fh.URI()]
+
+ if vs == nil && snapshot.View().Options().Vulncheck == source.ModeVulncheckImports {
+ vs, err = snapshot.ModVuln(ctx, fh.URI())
+ if err != nil {
+ return nil, false
+ }
+ fromGovulncheck = false
+ }
+ modpath := "stdlib"
+ goVersion := snapshot.View().GoVersionString()
+ affecting, nonaffecting := lookupVulns(vs, modpath, goVersion)
+ options := snapshot.View().Options()
+ vulns := formatVulnerabilities(modpath, affecting, nonaffecting, options, fromGovulncheck)
+
+ return &protocol.Hover{
+ Contents: protocol.MarkupContent{
+ Kind: options.PreferredContentFormat,
+ Value: vulns,
+ },
+ Range: rng,
+ }, true
+}
+
+func formatHeader(modpath string, options *source.Options) string {
+ var b strings.Builder
+ // Write the heading as an H3.
+ b.WriteString("#### " + modpath)
+ if options.PreferredContentFormat == protocol.Markdown {
+ b.WriteString("\n\n")
+ } else {
+ b.WriteRune('\n')
+ }
+ return b.String()
+}
+
+func lookupVulns(vulns *govulncheck.Result, modpath, version string) (affecting, nonaffecting []*govulncheck.Vuln) {
+ if vulns == nil {
+ return nil, nil
+ }
+ for _, vuln := range vulns.Vulns {
+ for _, mod := range vuln.Modules {
+ if mod.Path != modpath {
+ continue
+ }
+ // It is possible that the source code was changed since the last
+ // govulncheck run and information in the `vulns` info is stale.
+ // For example, imagine that a user is in the middle of updating
+ // problematic modules detected by the govulncheck run by applying
+ // quick fixes. Stale diagnostics can be confusing and prevent the
+ // user from quickly locating the next module to fix.
+ // Ideally we should rerun the analysis with the updated module
+ // dependencies or any other code changes, but we are not yet
+ // in the position of automatically triggering the analysis
+ // (govulncheck can take a while). We also don't know exactly what
+ // part of source code was changed since `vulns` was computed.
+ // As a heuristic, we assume that a user upgrades the affecting
+ // module to the version with the fix or the latest one, and if the
+ // version in the require statement is equal to or higher than the
+ // fixed version, skip the vulnerability information in the hover.
+ // Eventually, the user has to rerun govulncheck.
+ if mod.FixedVersion != "" && semver.IsValid(version) && semver.Compare(mod.FixedVersion, version) <= 0 {
+ continue
+ }
+ if vuln.IsCalled() {
+ affecting = append(affecting, vuln)
+ } else {
+ nonaffecting = append(nonaffecting, vuln)
+ }
+ }
+ }
+ sort.Slice(nonaffecting, func(i, j int) bool { return nonaffecting[i].OSV.ID < nonaffecting[j].OSV.ID })
+ sort.Slice(affecting, func(i, j int) bool { return affecting[i].OSV.ID < affecting[j].OSV.ID })
+ return affecting, nonaffecting
+}
+
+func formatVulnerabilities(modPath string, affecting, nonaffecting []*govulncheck.Vuln, options *source.Options, fromGovulncheck bool) string {
+ if len(affecting) == 0 && len(nonaffecting) == 0 {
+ return ""
+ }
+
+ // TODO(hyangah): can we use go templates to generate hover messages?
+ // Then, we can use a different template for markdown case.
+ useMarkdown := options.PreferredContentFormat == protocol.Markdown
+
+ var b strings.Builder
+
+ if len(affecting) > 0 {
+ // TODO(hyangah): make the message more eyecatching (icon/codicon/color)
+ if len(affecting) == 1 {
+ b.WriteString(fmt.Sprintf("\n**WARNING:** Found %d reachable vulnerability.\n", len(affecting)))
+ } else {
+ b.WriteString(fmt.Sprintf("\n**WARNING:** Found %d reachable vulnerabilities.\n", len(affecting)))
+ }
+ }
+ for _, v := range affecting {
+ fix := fixedVersionInfo(v, modPath)
+ pkgs := vulnerablePkgsInfo(v, modPath, useMarkdown)
+
+ if useMarkdown {
+ fmt.Fprintf(&b, "- [**%v**](%v) %v%v%v\n", v.OSV.ID, href(v.OSV), formatMessage(v), pkgs, fix)
+ } else {
+ fmt.Fprintf(&b, " - [%v] %v (%v) %v%v\n", v.OSV.ID, formatMessage(v), href(v.OSV), pkgs, fix)
+ }
+ }
+ if len(nonaffecting) > 0 {
+ if fromGovulncheck {
+ fmt.Fprintf(&b, "\n**Note:** The project imports packages with known vulnerabilities, but does not call the vulnerable code.\n")
+ } else {
+ fmt.Fprintf(&b, "\n**Note:** The project imports packages with known vulnerabilities. Use `govulncheck` to check if the project uses vulnerable symbols.\n")
+ }
+ }
+ for _, v := range nonaffecting {
+ fix := fixedVersionInfo(v, modPath)
+ pkgs := vulnerablePkgsInfo(v, modPath, useMarkdown)
+ if useMarkdown {
+ fmt.Fprintf(&b, "- [%v](%v) %v%v%v\n", v.OSV.ID, href(v.OSV), formatMessage(v), pkgs, fix)
+ } else {
+ fmt.Fprintf(&b, " - [%v] %v (%v) %v%v\n", v.OSV.ID, formatMessage(v), href(v.OSV), pkgs, fix)
+ }
+ }
+ b.WriteString("\n")
+ return b.String()
+}
+
+func vulnerablePkgsInfo(v *govulncheck.Vuln, modPath string, useMarkdown bool) string {
+ var b bytes.Buffer
+ for _, m := range v.Modules {
+ if m.Path != modPath {
+ continue
+ }
+ if c := len(m.Packages); c == 1 {
+ b.WriteString("\n Vulnerable package is:")
+ } else if c > 1 {
+ b.WriteString("\n Vulnerable packages are:")
+ }
+ for _, pkg := range m.Packages {
+ if useMarkdown {
+ b.WriteString("\n * `")
+ } else {
+ b.WriteString("\n ")
+ }
+ b.WriteString(pkg.Path)
+ if useMarkdown {
+ b.WriteString("`")
+ }
+ }
+ }
+ if b.Len() == 0 {
+ return ""
+ }
+ return b.String()
+}
+func fixedVersionInfo(v *govulncheck.Vuln, modPath string) string {
+ fix := "\n\n **No fix is available.**"
+ for _, m := range v.Modules {
+ if m.Path != modPath {
+ continue
+ }
+ if m.FixedVersion != "" {
+ fix = "\n\n Fixed in " + m.FixedVersion + "."
+ }
+ break
+ }
+ return fix
+}
+
+func formatExplanation(text string, req *modfile.Require, options *source.Options, isPrivate bool) string {
+ text = strings.TrimSuffix(text, "\n")
+ splt := strings.Split(text, "\n")
+ length := len(splt)
+
+ var b strings.Builder
+
+ // If the explanation is 2 lines, then it is of the form:
+ // # golang.org/x/text/encoding
+ // (main module does not need package golang.org/x/text/encoding)
+ if length == 2 {
+ b.WriteString(splt[1])
+ return b.String()
+ }
+
+ imp := splt[length-1] // import path
+ reference := imp
+ // See golang/go#36998: don't link to modules matching GOPRIVATE.
+ if !isPrivate && options.PreferredContentFormat == protocol.Markdown {
+ target := imp
+ if strings.ToLower(options.LinkTarget) == "pkg.go.dev" {
+ target = strings.Replace(target, req.Mod.Path, req.Mod.String(), 1)
+ }
+ reference = fmt.Sprintf("[%s](%s)", imp, source.BuildLink(options.LinkTarget, target, ""))
+ }
+ b.WriteString("This module is necessary because " + reference + " is imported in")
+
+ // If the explanation is 3 lines, then it is of the form:
+ // # golang.org/x/tools
+ // modtest
+ // golang.org/x/tools/go/packages
+ if length == 3 {
+ msg := fmt.Sprintf(" `%s`.", splt[1])
+ b.WriteString(msg)
+ return b.String()
+ }
+
+ // If the explanation is more than 3 lines, then it is of the form:
+ // # golang.org/x/text/language
+ // rsc.io/quote
+ // rsc.io/sampler
+ // golang.org/x/text/language
+ b.WriteString(":\n```text")
+ dash := ""
+ for _, imp := range splt[1 : length-1] {
+ dash += "-"
+ b.WriteString("\n" + dash + " " + imp)
+ }
+ b.WriteString("\n```")
+ return b.String()
+}
diff --git a/gopls/internal/lsp/mod/mod_test.go b/gopls/internal/lsp/mod/mod_test.go
new file mode 100644
index 000000000..c2aa1af6f
--- /dev/null
+++ b/gopls/internal/lsp/mod/mod_test.go
@@ -0,0 +1,57 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mod
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestMain(m *testing.M) {
+ testenv.ExitIfSmallMachine()
+ os.Exit(m.Run())
+}
+
+func TestModfileRemainsUnchanged(t *testing.T) {
+ ctx := tests.Context(t)
+ session := cache.NewSession(ctx, cache.New(nil), nil)
+ options := source.DefaultOptions().Clone()
+ tests.DefaultOptions(options)
+ options.TempModfile = true
+ options.Env = map[string]string{"GOPACKAGESDRIVER": "off", "GOROOT": ""}
+
+ // Make sure to copy the test directory to a temporary directory so we do not
+ // modify the test code or add go.sum files when we run the tests.
+ folder, err := tests.CopyFolderToTempDir(filepath.Join("testdata", "unchanged"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+
+ before, err := ioutil.ReadFile(filepath.Join(folder, "go.mod"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, _, release, err := session.NewView(ctx, "diagnostics_test", span.URIFromPath(folder), options)
+ if err != nil {
+ t.Fatal(err)
+ }
+ release()
+ after, err := ioutil.ReadFile(filepath.Join(folder, "go.mod"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(before) != string(after) {
+ t.Errorf("the real go.mod file was changed even when tempModfile=true")
+ }
+}
diff --git a/internal/lsp/mod/testdata/unchanged/go.mod b/gopls/internal/lsp/mod/testdata/unchanged/go.mod
index e3d13cebe..e3d13cebe 100644
--- a/internal/lsp/mod/testdata/unchanged/go.mod
+++ b/gopls/internal/lsp/mod/testdata/unchanged/go.mod
diff --git a/internal/lsp/mod/testdata/unchanged/main.go b/gopls/internal/lsp/mod/testdata/unchanged/main.go
index b258445f4..b258445f4 100644
--- a/internal/lsp/mod/testdata/unchanged/main.go
+++ b/gopls/internal/lsp/mod/testdata/unchanged/main.go
diff --git a/gopls/internal/lsp/progress/progress.go b/gopls/internal/lsp/progress/progress.go
new file mode 100644
index 000000000..32ac91186
--- /dev/null
+++ b/gopls/internal/lsp/progress/progress.go
@@ -0,0 +1,271 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package progress
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+type Tracker struct {
+ client protocol.Client
+ supportsWorkDoneProgress bool
+
+ mu sync.Mutex
+ inProgress map[protocol.ProgressToken]*WorkDone
+}
+
+func NewTracker(client protocol.Client) *Tracker {
+ return &Tracker{
+ client: client,
+ inProgress: make(map[protocol.ProgressToken]*WorkDone),
+ }
+}
+
+func (tracker *Tracker) SetSupportsWorkDoneProgress(b bool) {
+ tracker.supportsWorkDoneProgress = b
+}
+
+// Start notifies the client of work being done on the server. It uses either
+// ShowMessage RPCs or $/progress messages, depending on the capabilities of
+// the client. The returned WorkDone handle may be used to report incremental
+// progress, and to report work completion. In particular, it is an error to
+// call start and not call end(...) on the returned WorkDone handle.
+//
+// If token is empty, a token will be randomly generated.
+//
+// The progress item is considered cancellable if the given cancel func is
+// non-nil. In this case, cancel is called when the work done
+//
+// Example:
+//
+// func Generate(ctx) (err error) {
+// ctx, cancel := context.WithCancel(ctx)
+// defer cancel()
+// work := s.progress.start(ctx, "generate", "running go generate", cancel)
+// defer func() {
+// if err != nil {
+// work.end(ctx, fmt.Sprintf("generate failed: %v", err))
+// } else {
+// work.end(ctx, "done")
+// }
+// }()
+// // Do the work...
+// }
+func (t *Tracker) Start(ctx context.Context, title, message string, token protocol.ProgressToken, cancel func()) *WorkDone {
+ ctx = xcontext.Detach(ctx) // progress messages should not be cancelled
+ wd := &WorkDone{
+ client: t.client,
+ token: token,
+ cancel: cancel,
+ }
+ if !t.supportsWorkDoneProgress {
+ // Previous iterations of this fallback attempted to retain cancellation
+ // support by using ShowMessageCommand with a 'Cancel' button, but this is
+ // not ideal as the 'Cancel' dialog stays open even after the command
+ // completes.
+ //
+ // Just show a simple message. Clients can implement workDone progress
+ // reporting to get cancellation support.
+ if err := wd.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Log,
+ Message: message,
+ }); err != nil {
+ event.Error(ctx, "showing start message for "+title, err)
+ }
+ return wd
+ }
+ if wd.token == nil {
+ token = strconv.FormatInt(rand.Int63(), 10)
+ err := wd.client.WorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
+ Token: token,
+ })
+ if err != nil {
+ wd.err = err
+ event.Error(ctx, "starting work for "+title, err)
+ return wd
+ }
+ wd.token = token
+ }
+ // At this point we have a token that the client knows about. Store the token
+ // before starting work.
+ t.mu.Lock()
+ t.inProgress[wd.token] = wd
+ t.mu.Unlock()
+ wd.cleanup = func() {
+ t.mu.Lock()
+ delete(t.inProgress, token)
+ t.mu.Unlock()
+ }
+ err := wd.client.Progress(ctx, &protocol.ProgressParams{
+ Token: wd.token,
+ Value: &protocol.WorkDoneProgressBegin{
+ Kind: "begin",
+ Cancellable: wd.cancel != nil,
+ Message: message,
+ Title: title,
+ },
+ })
+ if err != nil {
+ event.Error(ctx, "progress begin", err)
+ }
+ return wd
+}
+
+func (t *Tracker) Cancel(token protocol.ProgressToken) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ wd, ok := t.inProgress[token]
+ if !ok {
+ return fmt.Errorf("token %q not found in progress", token)
+ }
+ if wd.cancel == nil {
+ return fmt.Errorf("work %q is not cancellable", token)
+ }
+ wd.doCancel()
+ return nil
+}
+
+// WorkDone represents a unit of work that is reported to the client via the
+// progress API.
+type WorkDone struct {
+ client protocol.Client
+ // If token is nil, this workDone object uses the ShowMessage API, rather
+ // than $/progress.
+ token protocol.ProgressToken
+ // err is set if progress reporting is broken for some reason (for example,
+ // if there was an initial error creating a token).
+ err error
+
+ cancelMu sync.Mutex
+ cancelled bool
+ cancel func()
+
+ cleanup func()
+}
+
+func (wd *WorkDone) Token() protocol.ProgressToken {
+ return wd.token
+}
+
+func (wd *WorkDone) doCancel() {
+ wd.cancelMu.Lock()
+ defer wd.cancelMu.Unlock()
+ if !wd.cancelled {
+ wd.cancel()
+ }
+}
+
+// Report reports an update on WorkDone report back to the client.
+func (wd *WorkDone) Report(ctx context.Context, message string, percentage float64) {
+ ctx = xcontext.Detach(ctx) // progress messages should not be cancelled
+ if wd == nil {
+ return
+ }
+ wd.cancelMu.Lock()
+ cancelled := wd.cancelled
+ wd.cancelMu.Unlock()
+ if cancelled {
+ return
+ }
+ if wd.err != nil || wd.token == nil {
+ // Not using the workDone API, so we do nothing. It would be far too spammy
+ // to send incremental messages.
+ return
+ }
+ message = strings.TrimSuffix(message, "\n")
+ err := wd.client.Progress(ctx, &protocol.ProgressParams{
+ Token: wd.token,
+ Value: &protocol.WorkDoneProgressReport{
+ Kind: "report",
+ // Note that in the LSP spec, the value of Cancellable may be changed to
+ // control whether the cancel button in the UI is enabled. Since we don't
+ // yet use this feature, the value is kept constant here.
+ Cancellable: wd.cancel != nil,
+ Message: message,
+ Percentage: uint32(percentage),
+ },
+ })
+ if err != nil {
+ event.Error(ctx, "reporting progress", err)
+ }
+}
+
+// End reports a workdone completion back to the client.
+func (wd *WorkDone) End(ctx context.Context, message string) {
+ ctx = xcontext.Detach(ctx) // progress messages should not be cancelled
+ if wd == nil {
+ return
+ }
+ var err error
+ switch {
+ case wd.err != nil:
+ // There is a prior error.
+ case wd.token == nil:
+ // We're falling back to message-based reporting.
+ err = wd.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Type: protocol.Info,
+ Message: message,
+ })
+ default:
+ err = wd.client.Progress(ctx, &protocol.ProgressParams{
+ Token: wd.token,
+ Value: &protocol.WorkDoneProgressEnd{
+ Kind: "end",
+ Message: message,
+ },
+ })
+ }
+ if err != nil {
+ event.Error(ctx, "ending work", err)
+ }
+ if wd.cleanup != nil {
+ wd.cleanup()
+ }
+}
+
+// EventWriter writes every incoming []byte to
+// event.Print with the operation=generate tag
+// to distinguish its logs from others.
+type EventWriter struct {
+ ctx context.Context
+ operation string
+}
+
+func NewEventWriter(ctx context.Context, operation string) *EventWriter {
+ return &EventWriter{ctx: ctx, operation: operation}
+}
+
+func (ew *EventWriter) Write(p []byte) (n int, err error) {
+ event.Log(ew.ctx, string(p), tag.Operation.Of(ew.operation))
+ return len(p), nil
+}
+
+// WorkDoneWriter wraps a workDone handle to provide a Writer interface,
+// so that workDone reporting can more easily be hooked into commands.
+type WorkDoneWriter struct {
+ // In order to implement the io.Writer interface, we must close over ctx.
+ ctx context.Context
+ wd *WorkDone
+}
+
+func NewWorkDoneWriter(ctx context.Context, wd *WorkDone) *WorkDoneWriter {
+ return &WorkDoneWriter{ctx: ctx, wd: wd}
+}
+
+func (wdw *WorkDoneWriter) Write(p []byte) (n int, err error) {
+ wdw.wd.Report(wdw.ctx, string(p), 0)
+ // Don't fail just because of a failure to report progress.
+ return len(p), nil
+}
diff --git a/gopls/internal/lsp/progress/progress_test.go b/gopls/internal/lsp/progress/progress_test.go
new file mode 100644
index 000000000..ef87eba12
--- /dev/null
+++ b/gopls/internal/lsp/progress/progress_test.go
@@ -0,0 +1,161 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package progress
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+type fakeClient struct {
+ protocol.Client
+
+ token protocol.ProgressToken
+
+ mu sync.Mutex
+ created, begun, reported, messages, ended int
+}
+
+func (c *fakeClient) checkToken(token protocol.ProgressToken) {
+ if token == nil {
+ panic("nil token in progress message")
+ }
+ if c.token != nil && c.token != token {
+ panic(fmt.Errorf("invalid token in progress message: got %v, want %v", token, c.token))
+ }
+}
+
+func (c *fakeClient) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.checkToken(params.Token)
+ c.created++
+ return nil
+}
+
+func (c *fakeClient) Progress(ctx context.Context, params *protocol.ProgressParams) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.checkToken(params.Token)
+ switch params.Value.(type) {
+ case *protocol.WorkDoneProgressBegin:
+ c.begun++
+ case *protocol.WorkDoneProgressReport:
+ c.reported++
+ case *protocol.WorkDoneProgressEnd:
+ c.ended++
+ default:
+ panic(fmt.Errorf("unknown progress value %T", params.Value))
+ }
+ return nil
+}
+
+func (c *fakeClient) ShowMessage(context.Context, *protocol.ShowMessageParams) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.messages++
+ return nil
+}
+
+func setup(token protocol.ProgressToken) (context.Context, *Tracker, *fakeClient) {
+ c := &fakeClient{}
+ tracker := NewTracker(c)
+ tracker.SetSupportsWorkDoneProgress(true)
+ return context.Background(), tracker, c
+}
+
+func TestProgressTracker_Reporting(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ supported bool
+ token protocol.ProgressToken
+ wantReported, wantCreated, wantBegun, wantEnded int
+ wantMessages int
+ }{
+ {
+ name: "unsupported",
+ wantMessages: 2,
+ },
+ {
+ name: "random token",
+ supported: true,
+ wantCreated: 1,
+ wantBegun: 1,
+ wantReported: 1,
+ wantEnded: 1,
+ },
+ {
+ name: "string token",
+ supported: true,
+ token: "token",
+ wantBegun: 1,
+ wantReported: 1,
+ wantEnded: 1,
+ },
+ {
+ name: "numeric token",
+ supported: true,
+ token: 1,
+ wantReported: 1,
+ wantBegun: 1,
+ wantEnded: 1,
+ },
+ } {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ ctx, tracker, client := setup(test.token)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ tracker.supportsWorkDoneProgress = test.supported
+ work := tracker.Start(ctx, "work", "message", test.token, nil)
+ client.mu.Lock()
+ gotCreated, gotBegun := client.created, client.begun
+ client.mu.Unlock()
+ if gotCreated != test.wantCreated {
+ t.Errorf("got %d created tokens, want %d", gotCreated, test.wantCreated)
+ }
+ if gotBegun != test.wantBegun {
+ t.Errorf("got %d work begun, want %d", gotBegun, test.wantBegun)
+ }
+ // Ignore errors: this is just testing the reporting behavior.
+ work.Report(ctx, "report", 50)
+ client.mu.Lock()
+ gotReported := client.reported
+ client.mu.Unlock()
+ if gotReported != test.wantReported {
+ t.Errorf("got %d progress reports, want %d", gotReported, test.wantCreated)
+ }
+ work.End(ctx, "done")
+ client.mu.Lock()
+ gotEnded, gotMessages := client.ended, client.messages
+ client.mu.Unlock()
+ if gotEnded != test.wantEnded {
+ t.Errorf("got %d ended reports, want %d", gotEnded, test.wantEnded)
+ }
+ if gotMessages != test.wantMessages {
+ t.Errorf("got %d messages, want %d", gotMessages, test.wantMessages)
+ }
+ })
+ }
+}
+
+func TestProgressTracker_Cancellation(t *testing.T) {
+ for _, token := range []protocol.ProgressToken{nil, 1, "a"} {
+ ctx, tracker, _ := setup(token)
+ var canceled bool
+ cancel := func() { canceled = true }
+ work := tracker.Start(ctx, "work", "message", token, cancel)
+ if err := tracker.Cancel(work.Token()); err != nil {
+ t.Fatal(err)
+ }
+ if !canceled {
+ t.Errorf("tracker.cancel(...): cancel not called")
+ }
+ }
+}
diff --git a/internal/lsp/protocol/codeactionkind.go b/gopls/internal/lsp/protocol/codeactionkind.go
index 9a95800fb..9a95800fb 100644
--- a/internal/lsp/protocol/codeactionkind.go
+++ b/gopls/internal/lsp/protocol/codeactionkind.go
diff --git a/internal/lsp/protocol/context.go b/gopls/internal/lsp/protocol/context.go
index 487e4dfe5..487e4dfe5 100644
--- a/internal/lsp/protocol/context.go
+++ b/gopls/internal/lsp/protocol/context.go
diff --git a/gopls/internal/lsp/protocol/doc.go b/gopls/internal/lsp/protocol/doc.go
new file mode 100644
index 000000000..4a7f90439
--- /dev/null
+++ b/gopls/internal/lsp/protocol/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run ./generate
+
+// Package protocol contains the structs that map directly to the
+// request and response messages of the Language Server Protocol.
+//
+// It is a literal transcription, with unmodified comments, and only the changes
+// required to make it go code.
+// Names are uppercased to export them.
+// All fields have JSON tags added to correct the names.
+// Fields marked with a ? are also marked as "omitempty"
+// Fields that are "|| null" are made pointers
+// Fields that are string or number are left as string
+// Fields that are type "number" are made float64
+package protocol
diff --git a/gopls/internal/lsp/protocol/enums.go b/gopls/internal/lsp/protocol/enums.go
new file mode 100644
index 000000000..82398e221
--- /dev/null
+++ b/gopls/internal/lsp/protocol/enums.go
@@ -0,0 +1,231 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protocol
+
+import (
+ "fmt"
+)
+
+var (
+ namesTextDocumentSyncKind [int(Incremental) + 1]string
+ namesMessageType [int(Log) + 1]string
+ namesFileChangeType [int(Deleted) + 1]string
+ namesWatchKind [int(WatchDelete) + 1]string
+ namesCompletionTriggerKind [int(TriggerForIncompleteCompletions) + 1]string
+ namesDiagnosticSeverity [int(SeverityHint) + 1]string
+ namesDiagnosticTag [int(Unnecessary) + 1]string
+ namesCompletionItemKind [int(TypeParameterCompletion) + 1]string
+ namesInsertTextFormat [int(SnippetTextFormat) + 1]string
+ namesDocumentHighlightKind [int(Write) + 1]string
+ namesSymbolKind [int(TypeParameter) + 1]string
+ namesTextDocumentSaveReason [int(FocusOut) + 1]string
+)
+
+func init() {
+ namesTextDocumentSyncKind[int(None)] = "None"
+ namesTextDocumentSyncKind[int(Full)] = "Full"
+ namesTextDocumentSyncKind[int(Incremental)] = "Incremental"
+
+ namesMessageType[int(Error)] = "Error"
+ namesMessageType[int(Warning)] = "Warning"
+ namesMessageType[int(Info)] = "Info"
+ namesMessageType[int(Log)] = "Log"
+
+ namesFileChangeType[int(Created)] = "Created"
+ namesFileChangeType[int(Changed)] = "Changed"
+ namesFileChangeType[int(Deleted)] = "Deleted"
+
+ namesWatchKind[int(WatchCreate)] = "WatchCreate"
+ namesWatchKind[int(WatchChange)] = "WatchChange"
+ namesWatchKind[int(WatchDelete)] = "WatchDelete"
+
+ namesCompletionTriggerKind[int(Invoked)] = "Invoked"
+ namesCompletionTriggerKind[int(TriggerCharacter)] = "TriggerCharacter"
+ namesCompletionTriggerKind[int(TriggerForIncompleteCompletions)] = "TriggerForIncompleteCompletions"
+
+ namesDiagnosticSeverity[int(SeverityError)] = "Error"
+ namesDiagnosticSeverity[int(SeverityWarning)] = "Warning"
+ namesDiagnosticSeverity[int(SeverityInformation)] = "Information"
+ namesDiagnosticSeverity[int(SeverityHint)] = "Hint"
+
+ namesDiagnosticTag[int(Unnecessary)] = "Unnecessary"
+
+ namesCompletionItemKind[int(TextCompletion)] = "text"
+ namesCompletionItemKind[int(MethodCompletion)] = "method"
+ namesCompletionItemKind[int(FunctionCompletion)] = "func"
+ namesCompletionItemKind[int(ConstructorCompletion)] = "constructor"
+ namesCompletionItemKind[int(FieldCompletion)] = "field"
+ namesCompletionItemKind[int(VariableCompletion)] = "var"
+ namesCompletionItemKind[int(ClassCompletion)] = "type"
+ namesCompletionItemKind[int(InterfaceCompletion)] = "interface"
+ namesCompletionItemKind[int(ModuleCompletion)] = "package"
+ namesCompletionItemKind[int(PropertyCompletion)] = "property"
+ namesCompletionItemKind[int(UnitCompletion)] = "unit"
+ namesCompletionItemKind[int(ValueCompletion)] = "value"
+ namesCompletionItemKind[int(EnumCompletion)] = "enum"
+ namesCompletionItemKind[int(KeywordCompletion)] = "keyword"
+ namesCompletionItemKind[int(SnippetCompletion)] = "snippet"
+ namesCompletionItemKind[int(ColorCompletion)] = "color"
+ namesCompletionItemKind[int(FileCompletion)] = "file"
+ namesCompletionItemKind[int(ReferenceCompletion)] = "reference"
+ namesCompletionItemKind[int(FolderCompletion)] = "folder"
+ namesCompletionItemKind[int(EnumMemberCompletion)] = "enumMember"
+ namesCompletionItemKind[int(ConstantCompletion)] = "const"
+ namesCompletionItemKind[int(StructCompletion)] = "struct"
+ namesCompletionItemKind[int(EventCompletion)] = "event"
+ namesCompletionItemKind[int(OperatorCompletion)] = "operator"
+ namesCompletionItemKind[int(TypeParameterCompletion)] = "typeParam"
+
+ namesInsertTextFormat[int(PlainTextTextFormat)] = "PlainText"
+ namesInsertTextFormat[int(SnippetTextFormat)] = "Snippet"
+
+ namesDocumentHighlightKind[int(Text)] = "Text"
+ namesDocumentHighlightKind[int(Read)] = "Read"
+ namesDocumentHighlightKind[int(Write)] = "Write"
+
+ namesSymbolKind[int(File)] = "File"
+ namesSymbolKind[int(Module)] = "Module"
+ namesSymbolKind[int(Namespace)] = "Namespace"
+ namesSymbolKind[int(Package)] = "Package"
+ namesSymbolKind[int(Class)] = "Class"
+ namesSymbolKind[int(Method)] = "Method"
+ namesSymbolKind[int(Property)] = "Property"
+ namesSymbolKind[int(Field)] = "Field"
+ namesSymbolKind[int(Constructor)] = "Constructor"
+ namesSymbolKind[int(Enum)] = "Enum"
+ namesSymbolKind[int(Interface)] = "Interface"
+ namesSymbolKind[int(Function)] = "Function"
+ namesSymbolKind[int(Variable)] = "Variable"
+ namesSymbolKind[int(Constant)] = "Constant"
+ namesSymbolKind[int(String)] = "String"
+ namesSymbolKind[int(Number)] = "Number"
+ namesSymbolKind[int(Boolean)] = "Boolean"
+ namesSymbolKind[int(Array)] = "Array"
+ namesSymbolKind[int(Object)] = "Object"
+ namesSymbolKind[int(Key)] = "Key"
+ namesSymbolKind[int(Null)] = "Null"
+ namesSymbolKind[int(EnumMember)] = "EnumMember"
+ namesSymbolKind[int(Struct)] = "Struct"
+ namesSymbolKind[int(Event)] = "Event"
+ namesSymbolKind[int(Operator)] = "Operator"
+ namesSymbolKind[int(TypeParameter)] = "TypeParameter"
+
+ namesTextDocumentSaveReason[int(Manual)] = "Manual"
+ namesTextDocumentSaveReason[int(AfterDelay)] = "AfterDelay"
+ namesTextDocumentSaveReason[int(FocusOut)] = "FocusOut"
+}
+
+func formatEnum(f fmt.State, c rune, i int, names []string, unknown string) {
+ s := ""
+ if i >= 0 && i < len(names) {
+ s = names[i]
+ }
+ if s != "" {
+ fmt.Fprint(f, s)
+ } else {
+ fmt.Fprintf(f, "%s(%d)", unknown, i)
+ }
+}
+
+func parseEnum(s string, names []string) int {
+ for i, name := range names {
+ if s == name {
+ return i
+ }
+ }
+ return 0
+}
+
+func (e TextDocumentSyncKind) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesTextDocumentSyncKind[:], "TextDocumentSyncKind")
+}
+
+func ParseTextDocumentSyncKind(s string) TextDocumentSyncKind {
+ return TextDocumentSyncKind(parseEnum(s, namesTextDocumentSyncKind[:]))
+}
+
+func (e MessageType) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesMessageType[:], "MessageType")
+}
+
+func ParseMessageType(s string) MessageType {
+ return MessageType(parseEnum(s, namesMessageType[:]))
+}
+
+func (e FileChangeType) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesFileChangeType[:], "FileChangeType")
+}
+
+func ParseFileChangeType(s string) FileChangeType {
+ return FileChangeType(parseEnum(s, namesFileChangeType[:]))
+}
+
+func ParseWatchKind(s string) WatchKind {
+ return WatchKind(parseEnum(s, namesWatchKind[:]))
+}
+
+func (e CompletionTriggerKind) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesCompletionTriggerKind[:], "CompletionTriggerKind")
+}
+
+func ParseCompletionTriggerKind(s string) CompletionTriggerKind {
+ return CompletionTriggerKind(parseEnum(s, namesCompletionTriggerKind[:]))
+}
+
+func (e DiagnosticSeverity) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesDiagnosticSeverity[:], "DiagnosticSeverity")
+}
+
+func ParseDiagnosticSeverity(s string) DiagnosticSeverity {
+ return DiagnosticSeverity(parseEnum(s, namesDiagnosticSeverity[:]))
+}
+
+func (e DiagnosticTag) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesDiagnosticTag[:], "DiagnosticTag")
+}
+
+func ParseDiagnosticTag(s string) DiagnosticTag {
+ return DiagnosticTag(parseEnum(s, namesDiagnosticTag[:]))
+}
+
+func (e CompletionItemKind) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesCompletionItemKind[:], "CompletionItemKind")
+}
+
+func ParseCompletionItemKind(s string) CompletionItemKind {
+ return CompletionItemKind(parseEnum(s, namesCompletionItemKind[:]))
+}
+
+func (e InsertTextFormat) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesInsertTextFormat[:], "InsertTextFormat")
+}
+
+func ParseInsertTextFormat(s string) InsertTextFormat {
+ return InsertTextFormat(parseEnum(s, namesInsertTextFormat[:]))
+}
+
+func (e DocumentHighlightKind) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesDocumentHighlightKind[:], "DocumentHighlightKind")
+}
+
+func ParseDocumentHighlightKind(s string) DocumentHighlightKind {
+ return DocumentHighlightKind(parseEnum(s, namesDocumentHighlightKind[:]))
+}
+
+func (e SymbolKind) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesSymbolKind[:], "SymbolKind")
+}
+
+func ParseSymbolKind(s string) SymbolKind {
+ return SymbolKind(parseEnum(s, namesSymbolKind[:]))
+}
+
+func (e TextDocumentSaveReason) Format(f fmt.State, c rune) {
+ formatEnum(f, c, int(e), namesTextDocumentSaveReason[:], "TextDocumentSaveReason")
+}
+
+func ParseTextDocumentSaveReason(s string) TextDocumentSaveReason {
+ return TextDocumentSaveReason(parseEnum(s, namesTextDocumentSaveReason[:]))
+}
diff --git a/gopls/internal/lsp/protocol/generate/README.md b/gopls/internal/lsp/protocol/generate/README.md
new file mode 100644
index 000000000..c8047f32b
--- /dev/null
+++ b/gopls/internal/lsp/protocol/generate/README.md
@@ -0,0 +1,136 @@
+# LSP Support for gopls
+
+## The protocol
+
+The LSP protocol exchanges json-encoded messages between the client and the server.
+(gopls is the server.) The messages are either Requests, which require Responses, or
+Notifications, which generate no response. Each Request or Notification has a method name
+such as "textDocument/hover" that indicates its meaning and determines which function in the server will handle it.
+The protocol is described in a
+[web page](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.18/specification/),
+in words, and in a json file (metaModel.json) available either linked towards the bottom of the
+web page, or in the vscode-languageserver-node repository. This code uses the latter so the
+exact version can be tied to a githash. By default, the command will download the `github.com/microsoft/vscode-languageserver-node` repository to a temporary directory.
+
+The specification has five sections
+1. Requests, which describe the Request and Response types for request methods (e.g., *textDocument/didChange*),
+2. Notifications, which describe the Request types for notification methods,
+3. Structures, which describe named struct-like types,
+4. TypeAliases, which describe type aliases,
+5. Enumerations, which describe named constants.
+
+Requests and Notifications are tagged with a Method (e.g., `"textDocument/hover"`).
+The specification does not specify the names of the functions that handle the messages. These
+names are specified by the `methodNames` map. Enumerations generate Go `const`s, but
+in Typescript they are scoped to namespaces, while in Go they are scoped to a package, so the Go names
+may need to be modified to avoid name collisions. (See the `disambiguate` map, and its use.)
+
+Finally, the specified types are Typescript types, which are quite different from Go types.
+
+### Optionality
+The specification can mark fields in structs as Optional. The client distinguishes between missing
+fields and `null` fields in some cases. The Go translation for an optional type
+should be making sure the field's value
+can be `nil`, and adding the json tag `,omitempty`. The former condition would be satisfied by
+adding `*` to the field's type if the type is not a reference type.
+
+### Types
+The specification uses a number of different types, only a few of which correspond directly to Go types.
+The specification's types are "base", "reference", "map", "literal", "stringLiteral", "tuple", "and", "or".
+The "base" types correspond directly to Go types, although some Go types needs to be chosen for `URI` and `DocumentUri`. (The "base" types`RegExp`, `BooleanLiteral`, `NumericLiteral` never occur.)
+
+"reference" types are the struct-like types in the Structures section of the specification. The given
+names are suitable for Go to use, except the code needs to change names like `_Initialze` to `XInitialize` so
+they are exported for json marshaling and unmarshaling.
+
+"map" types are just like Go. (The key type in all of them is `DocumentUri`.)
+
+"stringLiteral" types are types whose type name and value are a single string. The chosen Go equivalent
+is to make the type `string` and the value a constant. (The alternative would be to generate a new
+named type, which seemed redundant.)
+
+"literal" types are like Go anonymous structs, so they have to be given a name. (All instances
+of the remaining types have to be given names. One approach is to construct the name from the components
+of the type, but this leads to misleading punning, and is unstable if components are added. The other approach
+is to construct the name from the context of the definition, that is, from the types it is defined within.
+For instance `Lit__InitializeParams_clientInfo` is the "literal" type at the
+`clientInfo` field in the `_InitializeParams`
+struct. Although this choice is sensitive to the ordering of the components, the code uses this approach,
+presuming that reordering components is an unlikely protocol change.)
+
+"tuple" types are generated as Go structs. (There is only one, with two `uint32` fields.)
+
+"and" types are Go structs with embedded type names. (There is only one, `And_Param_workspace_configuration`.)
+
+"or" types are the most complicated. There are a lot of them and there is no simple Go equivalent.
+They are defined as structs with a single `Value interface{}` field and custom json marshaling
+and unmarshaling code. Users can assign anything to `Value` but the type will be checked, and
+correctly marshaled, by the custom marshaling code. The unmarshaling code checks types, so `Value`
+will have one of the permitted types. (`nil` is always allowed.) There are about 40 "or" types that
+have a single non-null component, and these are converted to the component type.
+
+## Processing
+The code parses the json specification file, and scans all the types. It assigns names, as described
+above, to the types that are unnamed in the specification, and constructs Go equivalents as required.
+(Most of this code is in typenames.go.)
+
+There are four output files. tsclient.go and tsserver.go contain the definition and implementation
+of the `protocol.Client` and `protocol.Server` types and the code that dispatches on the Method
+of the Request or Notification. tsjson.go contains the custom marshaling and unmarshaling code.
+And tsprotocol.go contains the type and const definitions.
+
+### Accommodating gopls
+As the code generates output, mostly in generateoutput.go and main.go,
+it makes adjustments so that no changes are required to the existing Go code.
+(Organizing the computation this way makes the code's structure simpler, but results in
+a lot of unused types.)
+There are three major classes of these adjustments, and leftover special cases.
+
+The first major
+adjustment is to change generated type names to the ones gopls expects. Some of these don't change the
+semantics of the type, just the name.
+But for historical reasons a lot of them replace "or" types by a single
+component of the type. (Until fairly recently Go only saw or used only one of components.)
+The `goplsType` map in tables.go controls this process.
+
+The second major adjustment is to the types of fields of structs, which is done using the
+`renameProp` map in tables.go.
+
+The third major adjustment handles optionality, controlling `*` and `,omitempty` placement when
+the default rules don't match what gopls is expecting. (The map is `goplsStar`, also in tables.go)
+(If the intermediate components in expressions of the form `A.B.C.S` were optional, the code would need
+a lot of useless checking for nils. Typescript has a language construct to avoid most checks.)
+
+Then there are some additional special cases. There are a few places with adjustments to avoid
+recursive types. For instance `LSPArray` is `[]LSPAny`, but `LSPAny` is an "or" type including `LSPArray`.
+The solution is to make `LSPAny` an `interface{}`. Another instance is `_InitializeParams.trace`
+whose type is an "or" of 3 stringLiterals, which just becomes a `string`.
+
+### Checking
+`TestAll(t *testing.T)` checks that there are no unexpected fields in the json specification.
+
+While the code is executing, it checks that all the entries in the maps in tables.go are used.
+It also checks that the entries in `renameProp` and `goplsStar` are not redundant.
+
+As a one-time check on the first release of this code, diff-ing the existing and generated tsclient.go
+and tsserver.go code results in only whitespace and comment diffs. The existing and generated
+tsprotocol.go differ in whitespace and comments, and in a substantial number of new type definitions
+that the older, more heuristic, code did not generate. (And the unused type `_InitializeParams` differs
+slightly between the new and the old, and is not worth fixing.)
+
+### Some history
+The original stub code was written by hand, but with the protocol under active development, that
+couldn't last. The web page existed before the json specification, but it lagged the implementation
+and was hard to process by machine. So the earlier version of the generating code was written in Typescript, and
+used the Typescript compiler's API to parse the protocol code in the repository.
+It then used a set of heuristics
+to pick out the elements of the protocol, and another set of overlapping heuristics to create the Go code.
+The output was functional, but idiosyncratic, and the code was fragile and barely maintainable.
+
+### The future
+Most of the adjustments using the maps in tables.go could be removed by making changes, mostly to names,
+in the gopls code. Using more "or" types in gopls requires more elaborate, but stereotyped, changes.
+But even without all the adjustments, making this its own module would face problems; a number of
+dependencies would have to be factored out. And, it is fragile. The custom unmarshaling code knows what
+types it expects. A design that return an 'any' on unexpected types would match the json
+'ignore unexpected values' philosophy better, but the the Go code would need extra checking.
diff --git a/gopls/internal/lsp/protocol/generate/generate.go b/gopls/internal/lsp/protocol/generate/generate.go
new file mode 100644
index 000000000..0496b7d06
--- /dev/null
+++ b/gopls/internal/lsp/protocol/generate/generate.go
@@ -0,0 +1,121 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "strings"
+)
+
+// a newType is a type that needs a name and a definition
+// These are the various types that the json specification doesn't name
+type newType struct {
+ name string
+ properties Properties // for struct/literal types
+ items []*Type // for other types ("and", "tuple")
+ line int
+ kind string // Or, And, Tuple, Lit, Map
+ typ *Type
+}
+
+func generateDoc(out *bytes.Buffer, doc string) {
+ if doc == "" {
+ return
+ }
+
+ if !strings.Contains(doc, "\n") {
+ fmt.Fprintf(out, "// %s\n", doc)
+ return
+ }
+ var list bool
+ for _, line := range strings.Split(doc, "\n") {
+ // Lists in metaModel.json start with a dash.
+ // To make a go doc list they have to be preceded
+ // by a blank line, and indented.
+ // (see type TextDccumentFilter in protocol.go)
+ if len(line) > 0 && line[0] == '-' {
+ if !list {
+ list = true
+ fmt.Fprintf(out, "//\n")
+ }
+ fmt.Fprintf(out, "// %s\n", line)
+ } else {
+ if len(line) == 0 {
+ list = false
+ }
+ fmt.Fprintf(out, "// %s\n", line)
+ }
+ }
+}
+
+// decide if a property is optional, and if it needs a *
+// return ",omitempty" if it is optional, and "*" if it needs a pointer
+func propStar(name string, t NameType, gotype string) (string, string) {
+ var opt, star string
+ if t.Optional {
+ star = "*"
+ opt = ",omitempty"
+ }
+ if strings.HasPrefix(gotype, "[]") || strings.HasPrefix(gotype, "map[") {
+ star = "" // passed by reference, so no need for *
+ } else {
+ switch gotype {
+ case "bool", "uint32", "int32", "string", "interface{}":
+ star = "" // gopls compatibility if t.Optional
+ }
+ }
+ ostar, oopt := star, opt
+ if newStar, ok := goplsStar[prop{name, t.Name}]; ok {
+ switch newStar {
+ case nothing:
+ star, opt = "", ""
+ case wantStar:
+ star, opt = "*", ""
+ case wantOpt:
+ star, opt = "", ",omitempty"
+ case wantOptStar:
+ star, opt = "*", ",omitempty"
+ }
+ if star == ostar && opt == oopt { // no change
+ log.Printf("goplsStar[ {%q, %q} ](%d) useless %s/%s %s/%s", name, t.Name, t.Line, ostar, star, oopt, opt)
+ }
+ usedGoplsStar[prop{name, t.Name}] = true
+ }
+
+ return opt, star
+}
+
+func goName(s string) string {
+ // Go naming conventions
+ if strings.HasSuffix(s, "Id") {
+ s = s[:len(s)-len("Id")] + "ID"
+ } else if strings.HasSuffix(s, "Uri") {
+ s = s[:len(s)-3] + "URI"
+ } else if s == "uri" {
+ s = "URI"
+ } else if s == "id" {
+ s = "ID"
+ }
+
+ // renames for temporary GOPLS compatibility
+ if news := goplsType[s]; news != "" {
+ usedGoplsType[s] = true
+ s = news
+ }
+ // Names beginning _ are not exported
+ if strings.HasPrefix(s, "_") {
+ s = strings.Replace(s, "_", "X", 1)
+ }
+ if s != "string" { // base types are unchanged (textDocuemnt/diagnostic)
+ // Title is deprecated, but a) s is only one word, b) replacement is too heavy-weight
+ s = strings.Title(s)
+ }
+ return s
+}
diff --git a/gopls/internal/lsp/protocol/generate/main.go b/gopls/internal/lsp/protocol/generate/main.go
new file mode 100644
index 000000000..d1114911e
--- /dev/null
+++ b/gopls/internal/lsp/protocol/generate/main.go
@@ -0,0 +1,387 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+// The generate command generates Go declarations from VSCode's
+// description of the Language Server Protocol.
+//
+// To run it, type 'go generate' in the parent (protocol) directory.
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "go/format"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+const vscodeRepo = "https://github.com/microsoft/vscode-languageserver-node"
+
+// lspGitRef names a branch or tag in vscodeRepo.
+// It implicitly determines the protocol version of the LSP used by gopls.
+// For example, tag release/protocol/3.17.3 of the repo defines protocol version 3.17.0.
+// (Point releases are reflected in the git tag version even when they are cosmetic
+// and don't change the protocol.)
+var lspGitRef = "release/protocol/3.17.3-next.6"
+
+var (
+ repodir = flag.String("d", "", "directory containing clone of "+vscodeRepo)
+ outputdir = flag.String("o", ".", "output directory")
+ // PJW: not for real code
+ cmpdir = flag.String("c", "", "directory of earlier code")
+ doboth = flag.String("b", "", "generate and compare")
+)
+
+func main() {
+ log.SetFlags(log.Lshortfile) // log file name and line number, not time
+ flag.Parse()
+
+ processinline()
+}
+
+func processinline() {
+ // A local repository may be specified during debugging.
+ // The default behavior is to download the canonical version.
+ if *repodir == "" {
+ tmpdir, err := os.MkdirTemp("", "")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir) // ignore error
+
+ // Clone the repository.
+ cmd := exec.Command("git", "clone", "--quiet", "--depth=1", "-c", "advice.detachedHead=false", vscodeRepo, "--branch="+lspGitRef, "--single-branch", tmpdir)
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ log.Fatal(err)
+ }
+
+ *repodir = tmpdir
+ } else {
+ lspGitRef = fmt.Sprintf("(not git, local dir %s)", *repodir)
+ }
+
+ model := parse(filepath.Join(*repodir, "protocol/metaModel.json"))
+
+ findTypeNames(model)
+ generateOutput(model)
+
+ fileHdr = fileHeader(model)
+
+ // write the files
+ writeclient()
+ writeserver()
+ writeprotocol()
+ writejsons()
+
+ checkTables()
+}
+
+// common file header for output files
+var fileHdr string
+
+func writeclient() {
+ out := new(bytes.Buffer)
+ fmt.Fprintln(out, fileHdr)
+ out.WriteString(
+ `import (
+ "context"
+ "encoding/json"
+
+ "golang.org/x/tools/internal/jsonrpc2"
+)
+`)
+ out.WriteString("type Client interface {\n")
+ for _, k := range cdecls.keys() {
+ out.WriteString(cdecls[k])
+ }
+ out.WriteString("}\n\n")
+ out.WriteString("func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) {\n")
+ out.WriteString("\tswitch r.Method() {\n")
+ for _, k := range ccases.keys() {
+ out.WriteString(ccases[k])
+ }
+ out.WriteString(("\tdefault:\n\t\treturn false, nil\n\t}\n}\n\n"))
+ for _, k := range cfuncs.keys() {
+ out.WriteString(cfuncs[k])
+ }
+
+ x, err := format.Source(out.Bytes())
+ if err != nil {
+ os.WriteFile("/tmp/a.go", out.Bytes(), 0644)
+ log.Fatalf("tsclient.go: %v", err)
+ }
+
+ if err := os.WriteFile(filepath.Join(*outputdir, "tsclient.go"), x, 0644); err != nil {
+ log.Fatalf("%v writing tsclient.go", err)
+ }
+}
+
+func writeserver() {
+ out := new(bytes.Buffer)
+ fmt.Fprintln(out, fileHdr)
+ out.WriteString(
+ `import (
+ "context"
+ "encoding/json"
+
+ "golang.org/x/tools/internal/jsonrpc2"
+)
+`)
+ out.WriteString("type Server interface {\n")
+ for _, k := range sdecls.keys() {
+ out.WriteString(sdecls[k])
+ }
+ out.WriteString(` NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error)
+}
+
+func serverDispatch(ctx context.Context, server Server, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) {
+ switch r.Method() {
+`)
+ for _, k := range scases.keys() {
+ out.WriteString(scases[k])
+ }
+ out.WriteString(("\tdefault:\n\t\treturn false, nil\n\t}\n}\n\n"))
+ for _, k := range sfuncs.keys() {
+ out.WriteString(sfuncs[k])
+ }
+ out.WriteString(`func (s *serverDispatcher) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
+ var result interface{}
+ if err := s.sender.Call(ctx, method, params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+`)
+
+ x, err := format.Source(out.Bytes())
+ if err != nil {
+ os.WriteFile("/tmp/a.go", out.Bytes(), 0644)
+ log.Fatalf("tsserver.go: %v", err)
+ }
+
+ if err := os.WriteFile(filepath.Join(*outputdir, "tsserver.go"), x, 0644); err != nil {
+ log.Fatalf("%v writing tsserver.go", err)
+ }
+}
+
+func writeprotocol() {
+ out := new(bytes.Buffer)
+ fmt.Fprintln(out, fileHdr)
+ out.WriteString("import \"encoding/json\"\n\n")
+
+ // The followiing are unneeded, but make the new code a superset of the old
+ hack := func(newer, existing string) {
+ if _, ok := types[existing]; !ok {
+ log.Fatalf("types[%q] not found", existing)
+ }
+ types[newer] = strings.Replace(types[existing], existing, newer, 1)
+ }
+ hack("ConfigurationParams", "ParamConfiguration")
+ hack("InitializeParams", "ParamInitialize")
+ hack("PreviousResultId", "PreviousResultID")
+ hack("WorkspaceFoldersServerCapabilities", "WorkspaceFolders5Gn")
+ hack("_InitializeParams", "XInitializeParams")
+ // and some aliases to make the new code contain the old
+ types["PrepareRename2Gn"] = "type PrepareRename2Gn = Msg_PrepareRename2Gn // (alias) line 13927\n"
+ types["PrepareRenameResult"] = "type PrepareRenameResult = Msg_PrepareRename2Gn // (alias) line 13927\n"
+ for _, k := range types.keys() {
+ if k == "WatchKind" {
+ types[k] = "type WatchKind = uint32 // line 13505" // strict gopls compatibility needs the '='
+ }
+ out.WriteString(types[k])
+ }
+
+ out.WriteString("\nconst (\n")
+ for _, k := range consts.keys() {
+ out.WriteString(consts[k])
+ }
+ out.WriteString(")\n\n")
+ x, err := format.Source(out.Bytes())
+ if err != nil {
+ os.WriteFile("/tmp/a.go", out.Bytes(), 0644)
+ log.Fatalf("tsprotocol.go: %v", err)
+ }
+ if err := os.WriteFile(filepath.Join(*outputdir, "tsprotocol.go"), x, 0644); err != nil {
+ log.Fatalf("%v writing tsprotocol.go", err)
+ }
+}
+
+func writejsons() {
+ out := new(bytes.Buffer)
+ fmt.Fprintln(out, fileHdr)
+ out.WriteString("import \"encoding/json\"\n\n")
+ out.WriteString("import \"fmt\"\n")
+
+ out.WriteString(`
+// UnmarshalError indicates that a JSON value did not conform to
+// one of the expected cases of an LSP union type.
+type UnmarshalError struct {
+ msg string
+}
+
+func (e UnmarshalError) Error() string {
+ return e.msg
+}
+`)
+
+ for _, k := range jsons.keys() {
+ out.WriteString(jsons[k])
+ }
+ x, err := format.Source(out.Bytes())
+ if err != nil {
+ os.WriteFile("/tmp/a.go", out.Bytes(), 0644)
+ log.Fatalf("tsjson.go: %v", err)
+ }
+ if err := os.WriteFile(filepath.Join(*outputdir, "tsjson.go"), x, 0644); err != nil {
+ log.Fatalf("%v writing tsjson.go", err)
+ }
+}
+
+// create the common file header for the output files
+func fileHeader(model Model) string {
+ fname := filepath.Join(*repodir, ".git", "HEAD")
+ buf, err := os.ReadFile(fname)
+ if err != nil {
+ log.Fatal(err)
+ }
+ buf = bytes.TrimSpace(buf)
+ var githash string
+ if len(buf) == 40 {
+ githash = string(buf[:40])
+ } else if bytes.HasPrefix(buf, []byte("ref: ")) {
+ fname = filepath.Join(*repodir, ".git", string(buf[5:]))
+ buf, err = os.ReadFile(fname)
+ if err != nil {
+ log.Fatal(err)
+ }
+ githash = string(buf[:40])
+ } else {
+ log.Fatalf("githash cannot be recovered from %s", fname)
+ }
+
+ format := `// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated for LSP. DO NOT EDIT.
+
+package protocol
+
+// Code generated from %[1]s at ref %[2]s (hash %[3]s).
+// %[4]s/blob/%[2]s/%[1]s
+// LSP metaData.version = %[5]s.
+
+`
+ return fmt.Sprintf(format,
+ "protocol/metaModel.json", // 1
+ lspGitRef, // 2
+ githash, // 3
+ vscodeRepo, // 4
+ model.Version.Version) // 5
+}
+
+func parse(fname string) Model {
+ buf, err := os.ReadFile(fname)
+ if err != nil {
+ log.Fatal(err)
+ }
+ buf = addLineNumbers(buf)
+ var model Model
+ if err := json.Unmarshal(buf, &model); err != nil {
+ log.Fatal(err)
+ }
+ return model
+}
+
+// Type.Value has to be treated specially for literals and maps
+func (t *Type) UnmarshalJSON(data []byte) error {
+ // First unmarshal only the unambiguous fields.
+ var x struct {
+ Kind string `json:"kind"`
+ Items []*Type `json:"items"`
+ Element *Type `json:"element"`
+ Name string `json:"name"`
+ Key *Type `json:"key"`
+ Value any `json:"value"`
+ Line int `json:"line"`
+ }
+ if err := json.Unmarshal(data, &x); err != nil {
+ return err
+ }
+ *t = Type{
+ Kind: x.Kind,
+ Items: x.Items,
+ Element: x.Element,
+ Name: x.Name,
+ Value: x.Value,
+ Line: x.Line,
+ }
+
+ // Then unmarshal the 'value' field based on the kind.
+ // This depends on Unmarshal ignoring fields it doesn't know about.
+ switch x.Kind {
+ case "map":
+ var x struct {
+ Key *Type `json:"key"`
+ Value *Type `json:"value"`
+ }
+ if err := json.Unmarshal(data, &x); err != nil {
+ return fmt.Errorf("Type.kind=map: %v", err)
+ }
+ t.Key = x.Key
+ t.Value = x.Value
+
+ case "literal":
+ var z struct {
+ Value ParseLiteral `json:"value"`
+ }
+
+ if err := json.Unmarshal(data, &z); err != nil {
+ return fmt.Errorf("Type.kind=literal: %v", err)
+ }
+ t.Value = z.Value
+
+ case "base", "reference", "array", "and", "or", "tuple",
+ "stringLiteral":
+ // no-op. never seen integerLiteral or booleanLiteral.
+
+ default:
+ return fmt.Errorf("cannot decode Type.kind %q: %s", x.Kind, data)
+ }
+ return nil
+}
+
+// which table entries were not used
+func checkTables() {
+ for k := range disambiguate {
+ if !usedDisambiguate[k] {
+ log.Printf("disambiguate[%v] unused", k)
+ }
+ }
+ for k := range renameProp {
+ if !usedRenameProp[k] {
+ log.Printf("renameProp {%q, %q} unused", k[0], k[1])
+ }
+ }
+ for k := range goplsStar {
+ if !usedGoplsStar[k] {
+ log.Printf("goplsStar {%q, %q} unused", k[0], k[1])
+ }
+ }
+ for k := range goplsType {
+ if !usedGoplsType[k] {
+ log.Printf("unused goplsType[%q]->%s", k, goplsType[k])
+ }
+ }
+}
diff --git a/gopls/internal/lsp/protocol/generate/main_test.go b/gopls/internal/lsp/protocol/generate/main_test.go
new file mode 100644
index 000000000..f887066ee
--- /dev/null
+++ b/gopls/internal/lsp/protocol/generate/main_test.go
@@ -0,0 +1,118 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "testing"
+)
+
+// These tests require the result of
+//"git clone https://github.com/microsoft/vscode-languageserver-node" in the HOME directory
+
+// this is not a test, but a way to get code coverage,
+// (in vscode, just run the test with "go.coverOnSingleTest": true)
+func TestAll(t *testing.T) {
+ t.Skip("needs vscode-languageserver-node repository")
+ log.SetFlags(log.Lshortfile)
+ main()
+}
+
+// check that the parsed file includes all the information
+// from the json file. This test will fail if the spec
+// introduces new fields. (one can test this test by
+// commenting out the version field in Model.)
+func TestParseContents(t *testing.T) {
+ t.Skip("needs vscode-languageserver-node repository")
+ log.SetFlags(log.Lshortfile)
+
+ // compute our parse of the specification
+ dir := os.Getenv("HOME") + "/vscode-languageserver-node"
+ fname := dir + "/protocol/metaModel.json"
+ v := parse(fname)
+ out, err := json.Marshal(v)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var our interface{}
+ if err := json.Unmarshal(out, &our); err != nil {
+ t.Fatal(err)
+ }
+
+ // process the json file
+ buf, err := os.ReadFile(fname)
+ if err != nil {
+ t.Fatalf("could not read metaModel.json: %v", err)
+ }
+ var raw interface{}
+ if err := json.Unmarshal(buf, &raw); err != nil {
+ t.Fatal(err)
+ }
+
+ // convert to strings showing the fields
+ them := flatten(raw)
+ us := flatten(our)
+
+ // everything in them should be in us
+ lesser := make(sortedMap[bool])
+ for _, s := range them {
+ lesser[s] = true
+ }
+ greater := make(sortedMap[bool]) // set of fields we have
+ for _, s := range us {
+ greater[s] = true
+ }
+ for _, k := range lesser.keys() { // set if fields they have
+ if !greater[k] {
+ t.Errorf("missing %s", k)
+ }
+ }
+}
+
+// flatten(nil) = "nil"
+// flatten(v string) = fmt.Sprintf("%q", v)
+// flatten(v float64)= fmt.Sprintf("%g", v)
+// flatten(v bool) = fmt.Sprintf("%v", v)
+// flatten(v []any) = []string{"[0]"flatten(v[0]), "[1]"flatten(v[1]), ...}
+// flatten(v map[string]any) = {"key1": flatten(v["key1"]), "key2": flatten(v["key2"]), ...}
+func flatten(x any) []string {
+ switch v := x.(type) {
+ case nil:
+ return []string{"nil"}
+ case string:
+ return []string{fmt.Sprintf("%q", v)}
+ case float64:
+ return []string{fmt.Sprintf("%g", v)}
+ case bool:
+ return []string{fmt.Sprintf("%v", v)}
+ case []any:
+ var ans []string
+ for i, x := range v {
+ idx := fmt.Sprintf("[%.3d]", i)
+ for _, s := range flatten(x) {
+ ans = append(ans, idx+s)
+ }
+ }
+ return ans
+ case map[string]any:
+ var ans []string
+ for k, x := range v {
+ idx := fmt.Sprintf("%q:", k)
+ for _, s := range flatten(x) {
+ ans = append(ans, idx+s)
+ }
+ }
+ return ans
+ default:
+ log.Fatalf("unexpected type %T", x)
+ return nil
+ }
+}
diff --git a/gopls/internal/lsp/protocol/generate/output.go b/gopls/internal/lsp/protocol/generate/output.go
new file mode 100644
index 000000000..18dd6ea3f
--- /dev/null
+++ b/gopls/internal/lsp/protocol/generate/output.go
@@ -0,0 +1,420 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "sort"
+ "strings"
+)
+
+var (
+ // tsclient.go has 3 sections
+ cdecls = make(sortedMap[string])
+ ccases = make(sortedMap[string])
+ cfuncs = make(sortedMap[string])
+ // tsserver.go has 3 sections
+ sdecls = make(sortedMap[string])
+ scases = make(sortedMap[string])
+ sfuncs = make(sortedMap[string])
+ // tsprotocol.go has 2 sections
+ types = make(sortedMap[string])
+ consts = make(sortedMap[string])
+ // tsjson has 1 section
+ jsons = make(sortedMap[string])
+)
+
+func generateOutput(model Model) {
+ for _, r := range model.Requests {
+ genDecl(r.Method, r.Params, r.Result, r.Direction)
+ genCase(r.Method, r.Params, r.Result, r.Direction)
+ genFunc(r.Method, r.Params, r.Result, r.Direction, false)
+ }
+ for _, n := range model.Notifications {
+ if n.Method == "$/cancelRequest" {
+ continue // handled internally by jsonrpc2
+ }
+ genDecl(n.Method, n.Params, nil, n.Direction)
+ genCase(n.Method, n.Params, nil, n.Direction)
+ genFunc(n.Method, n.Params, nil, n.Direction, true)
+ }
+ genStructs(model)
+ genAliases(model)
+ genGenTypes() // generate the unnamed types
+ genConsts(model)
+ genMarshal()
+}
+
+func genDecl(method string, param, result *Type, dir string) {
+ fname := methodNames[method]
+ p := ""
+ if notNil(param) {
+ p = ", *" + goplsName(param)
+ }
+ ret := "error"
+ if notNil(result) {
+ tp := goplsName(result)
+ if !hasNilValue(tp) {
+ tp = "*" + tp
+ }
+ ret = fmt.Sprintf("(%s, error)", tp)
+ }
+ // special gopls compatibility case (PJW: still needed?)
+ switch method {
+ case "workspace/configuration":
+ // was And_Param_workspace_configuration, but the type substitution doesn't work,
+ // as ParamConfiguration is embedded in And_Param_workspace_configuration
+ p = ", *ParamConfiguration"
+ ret = "([]LSPAny, error)"
+ }
+ msg := fmt.Sprintf("\t%s(context.Context%s) %s // %s\n", fname, p, ret, method)
+ switch dir {
+ case "clientToServer":
+ sdecls[method] = msg
+ case "serverToClient":
+ cdecls[method] = msg
+ case "both":
+ sdecls[method] = msg
+ cdecls[method] = msg
+ default:
+ log.Fatalf("impossible direction %q", dir)
+ }
+}
+
+func genCase(method string, param, result *Type, dir string) {
+ out := new(bytes.Buffer)
+ fmt.Fprintf(out, "\tcase %q:\n", method)
+ var p string
+ fname := methodNames[method]
+ if notNil(param) {
+ nm := goplsName(param)
+ if method == "workspace/configuration" { // gopls compatibility
+ // was And_Param_workspace_configuration, which contains ParamConfiguration
+ // so renaming the type leads to circular definitions
+ nm = "ParamConfiguration" // gopls compatibility
+ }
+ fmt.Fprintf(out, "\t\tvar params %s\n", nm)
+ fmt.Fprintf(out, "\t\tif err := json.Unmarshal(r.Params(), &params); err != nil {\n")
+ fmt.Fprintf(out, "\t\t\treturn true, sendParseError(ctx, reply, err)\n\t\t}\n")
+ p = ", &params"
+ }
+ if notNil(result) {
+ fmt.Fprintf(out, "\t\tresp, err := %%s.%s(ctx%s)\n", fname, p)
+ out.WriteString("\t\tif err != nil {\n")
+ out.WriteString("\t\t\treturn true, reply(ctx, nil, err)\n")
+ out.WriteString("\t\t}\n")
+ out.WriteString("\t\treturn true, reply(ctx, resp, nil)\n")
+ } else {
+ fmt.Fprintf(out, "\t\terr := %%s.%s(ctx%s)\n", fname, p)
+ out.WriteString("\t\treturn true, reply(ctx, nil, err)\n")
+ }
+ msg := out.String()
+ switch dir {
+ case "clientToServer":
+ scases[method] = fmt.Sprintf(msg, "server")
+ case "serverToClient":
+ ccases[method] = fmt.Sprintf(msg, "client")
+ case "both":
+ scases[method] = fmt.Sprintf(msg, "server")
+ ccases[method] = fmt.Sprintf(msg, "client")
+ default:
+ log.Fatalf("impossible direction %q", dir)
+ }
+}
+
+func genFunc(method string, param, result *Type, dir string, isnotify bool) {
+ out := new(bytes.Buffer)
+ var p, r string
+ var goResult string
+ if notNil(param) {
+ p = ", params *" + goplsName(param)
+ }
+ if notNil(result) {
+ goResult = goplsName(result)
+ if !hasNilValue(goResult) {
+ goResult = "*" + goResult
+ }
+ r = fmt.Sprintf("(%s, error)", goResult)
+ } else {
+ r = "error"
+ }
+ // special gopls compatibility case
+ switch method {
+ case "workspace/configuration":
+ // was And_Param_workspace_configuration, but the type substitution doesn't work,
+ // as ParamConfiguration is embedded in And_Param_workspace_configuration
+ p = ", params *ParamConfiguration"
+ r = "([]LSPAny, error)"
+ goResult = "[]LSPAny"
+ }
+ fname := methodNames[method]
+ fmt.Fprintf(out, "func (s *%%sDispatcher) %s(ctx context.Context%s) %s {\n",
+ fname, p, r)
+
+ if !notNil(result) {
+ if isnotify {
+ if notNil(param) {
+ fmt.Fprintf(out, "\treturn s.sender.Notify(ctx, %q, params)\n", method)
+ } else {
+ fmt.Fprintf(out, "\treturn s.sender.Notify(ctx, %q, nil)\n", method)
+ }
+ } else {
+ if notNil(param) {
+ fmt.Fprintf(out, "\treturn s.sender.Call(ctx, %q, params, nil)\n", method)
+ } else {
+ fmt.Fprintf(out, "\treturn s.sender.Call(ctx, %q, nil, nil)\n", method)
+ }
+ }
+ } else {
+ fmt.Fprintf(out, "\tvar result %s\n", goResult)
+ if isnotify {
+ if notNil(param) {
+ fmt.Fprintf(out, "\ts.sender.Notify(ctx, %q, params)\n", method)
+ } else {
+ fmt.Fprintf(out, "\t\tif err := s.sender.Notify(ctx, %q, nil); err != nil {\n", method)
+ }
+ } else {
+ if notNil(param) {
+ fmt.Fprintf(out, "\t\tif err := s.sender.Call(ctx, %q, params, &result); err != nil {\n", method)
+ } else {
+ fmt.Fprintf(out, "\t\tif err := s.sender.Call(ctx, %q, nil, &result); err != nil {\n", method)
+ }
+ }
+ fmt.Fprintf(out, "\t\treturn nil, err\n\t}\n\treturn result, nil\n")
+ }
+ out.WriteString("}\n")
+ msg := out.String()
+ switch dir {
+ case "clientToServer":
+ sfuncs[method] = fmt.Sprintf(msg, "server")
+ case "serverToClient":
+ cfuncs[method] = fmt.Sprintf(msg, "client")
+ case "both":
+ sfuncs[method] = fmt.Sprintf(msg, "server")
+ cfuncs[method] = fmt.Sprintf(msg, "client")
+ default:
+ log.Fatalf("impossible direction %q", dir)
+ }
+}
+
+func genStructs(model Model) {
+ structures := make(map[string]*Structure) // for expanding Extends
+ for _, s := range model.Structures {
+ structures[s.Name] = s
+ }
+ for _, s := range model.Structures {
+ out := new(bytes.Buffer)
+ generateDoc(out, s.Documentation)
+ nm := goName(s.Name)
+ if nm == "string" { // an unacceptable strut name
+ // a weird case, and needed only so the generated code contains the old gopls code
+ nm = "DocumentDiagnosticParams"
+ }
+ fmt.Fprintf(out, "type %s struct { // line %d\n", nm, s.Line)
+ // for gpls compatibilitye, embed most extensions, but expand the rest some day
+ props := append([]NameType{}, s.Properties...)
+ if s.Name == "SymbolInformation" { // but expand this one
+ for _, ex := range s.Extends {
+ fmt.Fprintf(out, "\t// extends %s\n", ex.Name)
+ props = append(props, structures[ex.Name].Properties...)
+ }
+ genProps(out, props, nm)
+ } else {
+ genProps(out, props, nm)
+ for _, ex := range s.Extends {
+ fmt.Fprintf(out, "\t%s\n", goName(ex.Name))
+ }
+ }
+ for _, ex := range s.Mixins {
+ fmt.Fprintf(out, "\t%s\n", goName(ex.Name))
+ }
+ out.WriteString("}\n")
+ types[nm] = out.String()
+ }
+ // base types
+ types["DocumentURI"] = "type DocumentURI string\n"
+ types["URI"] = "type URI = string\n"
+
+ types["LSPAny"] = "type LSPAny = interface{}\n"
+ // A special case, the only previously existing Or type
+ types["DocumentDiagnosticReport"] = "type DocumentDiagnosticReport = Or_DocumentDiagnosticReport // (alias) line 13909\n"
+
+}
+
+func genProps(out *bytes.Buffer, props []NameType, name string) {
+ for _, p := range props {
+ tp := goplsName(p.Type)
+ if newNm, ok := renameProp[prop{name, p.Name}]; ok {
+ usedRenameProp[prop{name, p.Name}] = true
+ if tp == newNm {
+ log.Printf("renameProp useless {%q, %q} for %s", name, p.Name, tp)
+ }
+ tp = newNm
+ }
+ // it's a pointer if it is optional, or for gopls compatibility
+ opt, star := propStar(name, p, tp)
+ json := fmt.Sprintf(" `json:\"%s%s\"`", p.Name, opt)
+ generateDoc(out, p.Documentation)
+ fmt.Fprintf(out, "\t%s %s%s %s\n", goName(p.Name), star, tp, json)
+ }
+}
+
+func genAliases(model Model) {
+ for _, ta := range model.TypeAliases {
+ out := new(bytes.Buffer)
+ generateDoc(out, ta.Documentation)
+ nm := goName(ta.Name)
+ if nm != ta.Name {
+ continue // renamed the type, e.g., "DocumentDiagnosticReport", an or-type to "string"
+ }
+ tp := goplsName(ta.Type)
+ fmt.Fprintf(out, "type %s = %s // (alias) line %d\n", nm, tp, ta.Line)
+ types[nm] = out.String()
+ }
+}
+
+func genGenTypes() {
+ for _, nt := range genTypes {
+ out := new(bytes.Buffer)
+ nm := goplsName(nt.typ)
+ switch nt.kind {
+ case "literal":
+ fmt.Fprintf(out, "// created for Literal (%s)\n", nt.name)
+ fmt.Fprintf(out, "type %s struct { // line %d\n", nm, nt.line+1)
+ genProps(out, nt.properties, nt.name) // systematic name, not gopls name; is this a good choice?
+ case "or":
+ if !strings.HasPrefix(nm, "Or") {
+ // It was replaced by a narrower type defined elsewhere
+ continue
+ }
+ names := []string{}
+ for _, t := range nt.items {
+ if notNil(t) {
+ names = append(names, goplsName(t))
+ }
+ }
+ sort.Strings(names)
+ fmt.Fprintf(out, "// created for Or %v\n", names)
+ fmt.Fprintf(out, "type %s struct { // line %d\n", nm, nt.line+1)
+ fmt.Fprintf(out, "\tValue interface{} `json:\"value\"`\n")
+ case "and":
+ fmt.Fprintf(out, "// created for And\n")
+ fmt.Fprintf(out, "type %s struct { // line %d\n", nm, nt.line+1)
+ for _, x := range nt.items {
+ nm := goplsName(x)
+ fmt.Fprintf(out, "\t%s\n", nm)
+ }
+ case "tuple": // there's only this one
+ nt.name = "UIntCommaUInt"
+ fmt.Fprintf(out, "//created for Tuple\ntype %s struct { // line %d\n", nm, nt.line+1)
+ fmt.Fprintf(out, "\tFld0 uint32 `json:\"fld0\"`\n")
+ fmt.Fprintf(out, "\tFld1 uint32 `json:\"fld1\"`\n")
+ default:
+ log.Fatalf("%s not handled", nt.kind)
+ }
+ out.WriteString("}\n")
+ types[nm] = out.String()
+ }
+}
+func genConsts(model Model) {
+ for _, e := range model.Enumerations {
+ out := new(bytes.Buffer)
+ generateDoc(out, e.Documentation)
+ tp := goplsName(e.Type)
+ nm := goName(e.Name)
+ fmt.Fprintf(out, "type %s %s // line %d\n", nm, tp, e.Line)
+ types[nm] = out.String()
+ vals := new(bytes.Buffer)
+ generateDoc(vals, e.Documentation)
+ for _, v := range e.Values {
+ generateDoc(vals, v.Documentation)
+ nm := goName(v.Name)
+ more, ok := disambiguate[e.Name]
+ if ok {
+ usedDisambiguate[e.Name] = true
+ nm = more.prefix + nm + more.suffix
+ nm = goName(nm) // stringType
+ }
+ var val string
+ switch v := v.Value.(type) {
+ case string:
+ val = fmt.Sprintf("%q", v)
+ case float64:
+ val = fmt.Sprintf("%d", int(v))
+ default:
+ log.Fatalf("impossible type %T", v)
+ }
+ fmt.Fprintf(vals, "\t%s %s = %s // line %d\n", nm, e.Name, val, v.Line)
+ }
+ consts[nm] = vals.String()
+ }
+}
+func genMarshal() {
+ for _, nt := range genTypes {
+ nm := goplsName(nt.typ)
+ if !strings.HasPrefix(nm, "Or") {
+ continue
+ }
+ names := []string{}
+ for _, t := range nt.items {
+ if notNil(t) {
+ names = append(names, goplsName(t))
+ }
+ }
+ sort.Strings(names)
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "// from line %d\n", nt.line)
+ fmt.Fprintf(&buf, "func (t %s) MarshalJSON() ([]byte, error) {\n", nm)
+ buf.WriteString("\tswitch x := t.Value.(type){\n")
+ for _, nmx := range names {
+ fmt.Fprintf(&buf, "\tcase %s:\n", nmx)
+ fmt.Fprintf(&buf, "\t\treturn json.Marshal(x)\n")
+ }
+ buf.WriteString("\tcase nil:\n\t\treturn []byte(\"null\"), nil\n\t}\n")
+ fmt.Fprintf(&buf, "\treturn nil, fmt.Errorf(\"type %%T not one of %v\", t)\n", names)
+ buf.WriteString("}\n\n")
+
+ fmt.Fprintf(&buf, "func (t *%s) UnmarshalJSON(x []byte) error {\n", nm)
+ buf.WriteString("\tif string(x) == \"null\" {\n\t\tt.Value = nil\n\t\t\treturn nil\n\t}\n")
+ for i, nmx := range names {
+ fmt.Fprintf(&buf, "\tvar h%d %s\n", i, nmx)
+ fmt.Fprintf(&buf, "\tif err := json.Unmarshal(x, &h%d); err == nil {\n\t\tt.Value = h%d\n\t\t\treturn nil\n\t\t}\n", i, i)
+ }
+ fmt.Fprintf(&buf, "return &UnmarshalError{\"unmarshal failed to match one of %v\"}", names)
+ buf.WriteString("}\n\n")
+ jsons[nm] = buf.String()
+ }
+}
+
+func goplsName(t *Type) string {
+ nm := typeNames[t]
+ // translate systematic name to gopls name
+ if newNm, ok := goplsType[nm]; ok {
+ usedGoplsType[nm] = true
+ nm = newNm
+ }
+ return nm
+}
+
+func notNil(t *Type) bool { // shutdwon is the special case that needs this
+ return t != nil && (t.Kind != "base" || t.Name != "null")
+}
+
+func hasNilValue(t string) bool {
+ // this may be unreliable, and need a supplementary table
+ if strings.HasPrefix(t, "[]") || strings.HasPrefix(t, "*") {
+ return true
+ }
+ if t == "interface{}" || t == "any" {
+ return true
+ }
+ // that's all the cases that occur currently
+ return false
+}
diff --git a/gopls/internal/lsp/protocol/generate/tables.go b/gopls/internal/lsp/protocol/generate/tables.go
new file mode 100644
index 000000000..126301a05
--- /dev/null
+++ b/gopls/internal/lsp/protocol/generate/tables.go
@@ -0,0 +1,327 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package main
+
+// prop combines the name of a property with the name of the structure it is in.
+type prop [2]string
+
+const (
+ nothing = iota
+ wantStar
+ wantOpt
+ wantOptStar
+)
+
+// goplsStar records the optionality of each field in the protocol.
+// The comments are vague hints as to why removing the line is not trivial.
+// A.B.C.D means that one of B or C would change to a pointer
+// so a test or initialization would be needed
+var goplsStar = map[prop]int{
+ {"ClientCapabilities", "textDocument"}: wantOpt, // A.B.C.D at fake/editor.go:255
+ {"ClientCapabilities", "window"}: wantOpt, // regtest failures
+ {"ClientCapabilities", "workspace"}: wantOpt, // regtest failures
+ {"CodeAction", "kind"}: wantOpt, // A.B.C.D
+
+ {"CodeActionClientCapabilities", "codeActionLiteralSupport"}: wantOpt, // regtest failures
+
+ {"CompletionClientCapabilities", "completionItem"}: wantOpt, // A.B.C.D
+ {"CompletionClientCapabilities", "insertTextMode"}: wantOpt, // A.B.C.D
+ {"CompletionItem", "kind"}: wantOpt, // need temporary variables
+ {"CompletionParams", "context"}: wantOpt, // needs nil checks
+
+ {"Diagnostic", "severity"}: wantOpt, // nil checks or more careful thought
+ {"DidSaveTextDocumentParams", "text"}: wantOptStar, // capabilities_test.go:112 logic
+ {"DocumentHighlight", "kind"}: wantOpt, // need temporary variables
+ {"Hover", "range"}: wantOpt, // complex expressions
+ {"InlayHint", "kind"}: wantOpt, // temporary variables
+
+ {"Lit_CompletionClientCapabilities_completionItem", "tagSupport"}: nothing, // A.B.C.
+ {"Lit_SemanticTokensClientCapabilities_requests", "full"}: nothing, // A.B.C.D
+ {"Lit_SemanticTokensClientCapabilities_requests", "range"}: nothing, // A.B.C.D
+ {"Lit_SemanticTokensClientCapabilities_requests_full_Item1", "delta"}: nothing, // A.B.C.D
+ {"Lit_SemanticTokensOptions_full_Item1", "delta"}: nothing, // A.B.C.
+
+ {"Lit_TextDocumentContentChangeEvent_Item0", "range"}: wantStar, // == nil test
+
+ {"TextDocumentClientCapabilities", "codeAction"}: wantOpt, // A.B.C.D
+ {"TextDocumentClientCapabilities", "completion"}: wantOpt, // A.B.C.D
+ {"TextDocumentClientCapabilities", "documentSymbol"}: wantOpt, // A.B.C.D
+ {"TextDocumentClientCapabilities", "publishDiagnostics"}: wantOpt, //A.B.C.D
+ {"TextDocumentClientCapabilities", "semanticTokens"}: wantOpt, // A.B.C.D
+ {"TextDocumentSyncOptions", "change"}: wantOpt, // &constant
+ {"WorkDoneProgressParams", "workDoneToken"}: wantOpt, // regtest
+ {"WorkspaceClientCapabilities", "didChangeConfiguration"}: wantOpt, // A.B.C.D
+ {"WorkspaceClientCapabilities", "didChangeWatchedFiles"}: wantOpt, // A.B.C.D
+}
+
+// keep track of which entries in goplsStar are used
+var usedGoplsStar = make(map[prop]bool)
+
+// For gopls compatibility, use a different, typically more restrictive, type for some fields.
+var renameProp = map[prop]string{
+ {"CancelParams", "id"}: "interface{}",
+ {"Command", "arguments"}: "[]json.RawMessage",
+ {"CompletionItem", "textEdit"}: "TextEdit",
+ {"Diagnostic", "code"}: "interface{}",
+
+ {"DocumentDiagnosticReportPartialResult", "relatedDocuments"}: "map[DocumentURI]interface{}",
+
+ {"ExecuteCommandParams", "arguments"}: "[]json.RawMessage",
+ {"FoldingRange", "kind"}: "string",
+ {"Hover", "contents"}: "MarkupContent",
+ {"InlayHint", "label"}: "[]InlayHintLabelPart",
+
+ {"RelatedFullDocumentDiagnosticReport", "relatedDocuments"}: "map[DocumentURI]interface{}",
+ {"RelatedUnchangedDocumentDiagnosticReport", "relatedDocuments"}: "map[DocumentURI]interface{}",
+
+ // PJW: this one is tricky.
+ {"ServerCapabilities", "codeActionProvider"}: "interface{}",
+
+ {"ServerCapabilities", "inlayHintProvider"}: "interface{}",
+ // slightly tricky
+ {"ServerCapabilities", "renameProvider"}: "interface{}",
+ // slightly tricky
+ {"ServerCapabilities", "semanticTokensProvider"}: "interface{}",
+ // slightly tricky
+ {"ServerCapabilities", "textDocumentSync"}: "interface{}",
+ {"TextDocumentEdit", "edits"}: "[]TextEdit",
+ {"TextDocumentSyncOptions", "save"}: "SaveOptions",
+ {"WorkspaceEdit", "documentChanges"}: "[]DocumentChanges",
+}
+
+// which entries of renameProp were used
+var usedRenameProp = make(map[prop]bool)
+
+type adjust struct {
+ prefix, suffix string
+}
+
+// disambiguate specifies prefixes or suffixes to add to all values of
+// some enum types to avoid name conflicts
+var disambiguate = map[string]adjust{
+ "CodeActionTriggerKind": {"CodeAction", ""},
+ "CompletionItemKind": {"", "Completion"},
+ "CompletionItemTag": {"Compl", ""},
+ "DiagnosticSeverity": {"Severity", ""},
+ "DocumentDiagnosticReportKind": {"Diagnostic", ""},
+ "FileOperationPatternKind": {"", "Pattern"},
+ "InsertTextFormat": {"", "TextFormat"},
+ "SemanticTokenModifiers": {"Mod", ""},
+ "SemanticTokenTypes": {"", "Type"},
+ "SignatureHelpTriggerKind": {"Sig", ""},
+ "SymbolTag": {"", "Symbol"},
+ "WatchKind": {"Watch", ""},
+}
+
+// which entries of disambiguate got used
+var usedDisambiguate = make(map[string]bool)
+
+// for gopls compatibility, replace generated type names with existing ones
+var goplsType = map[string]string{
+ "And_RegOpt_textDocument_colorPresentation": "WorkDoneProgressOptionsAndTextDocumentRegistrationOptions",
+ "ConfigurationParams": "ParamConfiguration",
+ "DocumentDiagnosticParams": "string",
+ "DocumentDiagnosticReport": "string",
+ "DocumentUri": "DocumentURI",
+ "InitializeParams": "ParamInitialize",
+ "LSPAny": "interface{}",
+
+ "Lit_CodeActionClientCapabilities_codeActionLiteralSupport": "PCodeActionLiteralSupportPCodeAction",
+ "Lit_CodeActionClientCapabilities_codeActionLiteralSupport_codeActionKind": "FCodeActionKindPCodeActionLiteralSupport",
+
+ "Lit_CodeActionClientCapabilities_resolveSupport": "PResolveSupportPCodeAction",
+ "Lit_CodeAction_disabled": "PDisabledMsg_textDocument_codeAction",
+ "Lit_CompletionClientCapabilities_completionItem": "PCompletionItemPCompletion",
+ "Lit_CompletionClientCapabilities_completionItemKind": "PCompletionItemKindPCompletion",
+
+ "Lit_CompletionClientCapabilities_completionItem_insertTextModeSupport": "FInsertTextModeSupportPCompletionItem",
+
+ "Lit_CompletionClientCapabilities_completionItem_resolveSupport": "FResolveSupportPCompletionItem",
+ "Lit_CompletionClientCapabilities_completionItem_tagSupport": "FTagSupportPCompletionItem",
+
+ "Lit_CompletionClientCapabilities_completionList": "PCompletionListPCompletion",
+ "Lit_CompletionList_itemDefaults": "PItemDefaultsMsg_textDocument_completion",
+ "Lit_CompletionList_itemDefaults_editRange_Item1": "FEditRangePItemDefaults",
+ "Lit_CompletionOptions_completionItem": "PCompletionItemPCompletionProvider",
+ "Lit_DocumentSymbolClientCapabilities_symbolKind": "PSymbolKindPDocumentSymbol",
+ "Lit_DocumentSymbolClientCapabilities_tagSupport": "PTagSupportPDocumentSymbol",
+ "Lit_FoldingRangeClientCapabilities_foldingRange": "PFoldingRangePFoldingRange",
+ "Lit_FoldingRangeClientCapabilities_foldingRangeKind": "PFoldingRangeKindPFoldingRange",
+ "Lit_GeneralClientCapabilities_staleRequestSupport": "PStaleRequestSupportPGeneral",
+ "Lit_InitializeResult_serverInfo": "PServerInfoMsg_initialize",
+ "Lit_InlayHintClientCapabilities_resolveSupport": "PResolveSupportPInlayHint",
+ "Lit_MarkedString_Item1": "Msg_MarkedString",
+ "Lit_NotebookDocumentChangeEvent_cells": "PCellsPChange",
+ "Lit_NotebookDocumentChangeEvent_cells_structure": "FStructurePCells",
+ "Lit_NotebookDocumentFilter_Item0": "Msg_NotebookDocumentFilter",
+
+ "Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0": "PNotebookSelectorPNotebookDocumentSync",
+
+ "Lit_PrepareRenameResult_Item1": "Msg_PrepareRename2Gn",
+
+ "Lit_PublishDiagnosticsClientCapabilities_tagSupport": "PTagSupportPPublishDiagnostics",
+ "Lit_SemanticTokensClientCapabilities_requests": "PRequestsPSemanticTokens",
+ "Lit_SemanticTokensClientCapabilities_requests_full_Item1": "FFullPRequests",
+ "Lit_SemanticTokensClientCapabilities_requests_range_Item1": "FRangePRequests",
+
+ "Lit_SemanticTokensOptions_full_Item1": "PFullESemanticTokensOptions",
+ "Lit_SemanticTokensOptions_range_Item1": "PRangeESemanticTokensOptions",
+ "Lit_ServerCapabilities_workspace": "Workspace6Gn",
+
+ "Lit_ShowMessageRequestClientCapabilities_messageActionItem": "PMessageActionItemPShowMessage",
+ "Lit_SignatureHelpClientCapabilities_signatureInformation": "PSignatureInformationPSignatureHelp",
+
+ "Lit_SignatureHelpClientCapabilities_signatureInformation_parameterInformation": "FParameterInformationPSignatureInformation",
+
+ "Lit_TextDocumentContentChangeEvent_Item0": "Msg_TextDocumentContentChangeEvent",
+ "Lit_TextDocumentFilter_Item0": "Msg_TextDocumentFilter",
+ "Lit_TextDocumentFilter_Item1": "Msg_TextDocumentFilter",
+ "Lit_WorkspaceEditClientCapabilities_changeAnnotationSupport": "PChangeAnnotationSupportPWorkspaceEdit",
+ "Lit_WorkspaceSymbolClientCapabilities_resolveSupport": "PResolveSupportPSymbol",
+ "Lit_WorkspaceSymbolClientCapabilities_symbolKind": "PSymbolKindPSymbol",
+ "Lit_WorkspaceSymbolClientCapabilities_tagSupport": "PTagSupportPSymbol",
+ "Lit_WorkspaceSymbol_location_Item1": "PLocationMsg_workspace_symbol",
+ "Lit__InitializeParams_clientInfo": "Msg_XInitializeParams_clientInfo",
+ "Or_CompletionList_itemDefaults_editRange": "OrFEditRangePItemDefaults",
+ "Or_Declaration": "[]Location",
+ "Or_DidChangeConfigurationRegistrationOptions_section": "OrPSection_workspace_didChangeConfiguration",
+ "Or_GlobPattern": "string",
+ "Or_InlayHintLabelPart_tooltip": "OrPTooltipPLabel",
+ "Or_InlayHint_tooltip": "OrPTooltip_textDocument_inlayHint",
+ "Or_LSPAny": "interface{}",
+ "Or_NotebookDocumentFilter": "Msg_NotebookDocumentFilter",
+ "Or_NotebookDocumentSyncOptions_notebookSelector_Elem": "PNotebookSelectorPNotebookDocumentSync",
+
+ "Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0_notebook": "OrFNotebookPNotebookSelector",
+
+ "Or_ParameterInformation_documentation": "string",
+ "Or_ParameterInformation_label": "string",
+ "Or_PrepareRenameResult": "Msg_PrepareRename2Gn",
+ "Or_ProgressToken": "interface{}",
+ "Or_Result_textDocument_completion": "CompletionList",
+ "Or_Result_textDocument_declaration": "Or_textDocument_declaration",
+ "Or_Result_textDocument_definition": "[]Location",
+ "Or_Result_textDocument_documentSymbol": "[]interface{}",
+ "Or_Result_textDocument_implementation": "[]Location",
+ "Or_Result_textDocument_semanticTokens_full_delta": "interface{}",
+ "Or_Result_textDocument_typeDefinition": "[]Location",
+ "Or_Result_workspace_symbol": "[]SymbolInformation",
+ "Or_TextDocumentContentChangeEvent": "Msg_TextDocumentContentChangeEvent",
+ "Or_TextDocumentFilter": "Msg_TextDocumentFilter",
+ "Or_WorkspaceFoldersServerCapabilities_changeNotifications": "string",
+ "Or_WorkspaceSymbol_location": "OrPLocation_workspace_symbol",
+ "PrepareRenameResult": "PrepareRename2Gn",
+ "Tuple_ParameterInformation_label_Item1": "UIntCommaUInt",
+ "WorkspaceFoldersServerCapabilities": "WorkspaceFolders5Gn",
+ "[]LSPAny": "[]interface{}",
+ "[]Or_NotebookDocumentSyncOptions_notebookSelector_Elem": "[]PNotebookSelectorPNotebookDocumentSync",
+ "[]Or_Result_textDocument_codeAction_Item0_Elem": "[]CodeAction",
+ "[]PreviousResultId": "[]PreviousResultID",
+ "[]uinteger": "[]uint32",
+ "boolean": "bool",
+ "decimal": "float64",
+ "integer": "int32",
+ "map[DocumentUri][]TextEdit": "map[DocumentURI][]TextEdit",
+ "uinteger": "uint32",
+}
+
+var usedGoplsType = make(map[string]bool)
+
+// methodNames is a map from the method to the name of the function that handles it
+var methodNames = map[string]string{
+ "$/cancelRequest": "CancelRequest",
+ "$/logTrace": "LogTrace",
+ "$/progress": "Progress",
+ "$/setTrace": "SetTrace",
+ "callHierarchy/incomingCalls": "IncomingCalls",
+ "callHierarchy/outgoingCalls": "OutgoingCalls",
+ "client/registerCapability": "RegisterCapability",
+ "client/unregisterCapability": "UnregisterCapability",
+ "codeAction/resolve": "ResolveCodeAction",
+ "codeLens/resolve": "ResolveCodeLens",
+ "completionItem/resolve": "ResolveCompletionItem",
+ "documentLink/resolve": "ResolveDocumentLink",
+ "exit": "Exit",
+ "initialize": "Initialize",
+ "initialized": "Initialized",
+ "inlayHint/resolve": "Resolve",
+ "notebookDocument/didChange": "DidChangeNotebookDocument",
+ "notebookDocument/didClose": "DidCloseNotebookDocument",
+ "notebookDocument/didOpen": "DidOpenNotebookDocument",
+ "notebookDocument/didSave": "DidSaveNotebookDocument",
+ "shutdown": "Shutdown",
+ "telemetry/event": "Event",
+ "textDocument/codeAction": "CodeAction",
+ "textDocument/codeLens": "CodeLens",
+ "textDocument/colorPresentation": "ColorPresentation",
+ "textDocument/completion": "Completion",
+ "textDocument/declaration": "Declaration",
+ "textDocument/definition": "Definition",
+ "textDocument/diagnostic": "Diagnostic",
+ "textDocument/didChange": "DidChange",
+ "textDocument/didClose": "DidClose",
+ "textDocument/didOpen": "DidOpen",
+ "textDocument/didSave": "DidSave",
+ "textDocument/documentColor": "DocumentColor",
+ "textDocument/documentHighlight": "DocumentHighlight",
+ "textDocument/documentLink": "DocumentLink",
+ "textDocument/documentSymbol": "DocumentSymbol",
+ "textDocument/foldingRange": "FoldingRange",
+ "textDocument/formatting": "Formatting",
+ "textDocument/hover": "Hover",
+ "textDocument/implementation": "Implementation",
+ "textDocument/inlayHint": "InlayHint",
+ "textDocument/inlineValue": "InlineValue",
+ "textDocument/linkedEditingRange": "LinkedEditingRange",
+ "textDocument/moniker": "Moniker",
+ "textDocument/onTypeFormatting": "OnTypeFormatting",
+ "textDocument/prepareCallHierarchy": "PrepareCallHierarchy",
+ "textDocument/prepareRename": "PrepareRename",
+ "textDocument/prepareTypeHierarchy": "PrepareTypeHierarchy",
+ "textDocument/publishDiagnostics": "PublishDiagnostics",
+ "textDocument/rangeFormatting": "RangeFormatting",
+ "textDocument/references": "References",
+ "textDocument/rename": "Rename",
+ "textDocument/selectionRange": "SelectionRange",
+ "textDocument/semanticTokens/full": "SemanticTokensFull",
+ "textDocument/semanticTokens/full/delta": "SemanticTokensFullDelta",
+ "textDocument/semanticTokens/range": "SemanticTokensRange",
+ "textDocument/signatureHelp": "SignatureHelp",
+ "textDocument/typeDefinition": "TypeDefinition",
+ "textDocument/willSave": "WillSave",
+ "textDocument/willSaveWaitUntil": "WillSaveWaitUntil",
+ "typeHierarchy/subtypes": "Subtypes",
+ "typeHierarchy/supertypes": "Supertypes",
+ "window/logMessage": "LogMessage",
+ "window/showDocument": "ShowDocument",
+ "window/showMessage": "ShowMessage",
+ "window/showMessageRequest": "ShowMessageRequest",
+ "window/workDoneProgress/cancel": "WorkDoneProgressCancel",
+ "window/workDoneProgress/create": "WorkDoneProgressCreate",
+ "workspace/applyEdit": "ApplyEdit",
+ "workspace/codeLens/refresh": "CodeLensRefresh",
+ "workspace/configuration": "Configuration",
+ "workspace/diagnostic": "DiagnosticWorkspace",
+ "workspace/diagnostic/refresh": "DiagnosticRefresh",
+ "workspace/didChangeConfiguration": "DidChangeConfiguration",
+ "workspace/didChangeWatchedFiles": "DidChangeWatchedFiles",
+ "workspace/didChangeWorkspaceFolders": "DidChangeWorkspaceFolders",
+ "workspace/didCreateFiles": "DidCreateFiles",
+ "workspace/didDeleteFiles": "DidDeleteFiles",
+ "workspace/didRenameFiles": "DidRenameFiles",
+ "workspace/executeCommand": "ExecuteCommand",
+ "workspace/inlayHint/refresh": "InlayHintRefresh",
+ "workspace/inlineValue/refresh": "InlineValueRefresh",
+ "workspace/semanticTokens/refresh": "SemanticTokensRefresh",
+ "workspace/symbol": "Symbol",
+ "workspace/willCreateFiles": "WillCreateFiles",
+ "workspace/willDeleteFiles": "WillDeleteFiles",
+ "workspace/willRenameFiles": "WillRenameFiles",
+ "workspace/workspaceFolders": "WorkspaceFolders",
+ "workspaceSymbol/resolve": "ResolveWorkspaceSymbol",
+}
diff --git a/gopls/internal/lsp/protocol/generate/typenames.go b/gopls/internal/lsp/protocol/generate/typenames.go
new file mode 100644
index 000000000..8bacdd2a1
--- /dev/null
+++ b/gopls/internal/lsp/protocol/generate/typenames.go
@@ -0,0 +1,184 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package main
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+var typeNames = make(map[*Type]string)
+var genTypes []*newType
+
+func findTypeNames(model Model) {
+ for _, s := range model.Structures {
+ for _, e := range s.Extends {
+ nameType(e, nil) // all references
+ }
+ for _, m := range s.Mixins {
+ nameType(m, nil) // all references
+ }
+ for _, p := range s.Properties {
+ nameType(p.Type, []string{s.Name, p.Name})
+ }
+ }
+ for _, t := range model.Enumerations {
+ nameType(t.Type, []string{t.Name})
+ }
+ for _, t := range model.TypeAliases {
+ nameType(t.Type, []string{t.Name})
+ }
+ for _, r := range model.Requests {
+ nameType(r.Params, []string{"Param", r.Method})
+ nameType(r.Result, []string{"Result", r.Method})
+ nameType(r.RegistrationOptions, []string{"RegOpt", r.Method})
+ }
+ for _, n := range model.Notifications {
+ nameType(n.Params, []string{"Param", n.Method})
+ nameType(n.RegistrationOptions, []string{"RegOpt", n.Method})
+ }
+}
+
+// nameType populates typeNames[t] with the computed name of the type.
+// path is the list of enclosing constructs in the JSON model.
+func nameType(t *Type, path []string) string {
+ if t == nil || typeNames[t] != "" {
+ return ""
+ }
+ switch t.Kind {
+ case "base":
+ typeNames[t] = t.Name
+ return t.Name
+ case "reference":
+ typeNames[t] = t.Name
+ return t.Name
+ case "array":
+ nm := "[]" + nameType(t.Element, append(path, "Elem"))
+ typeNames[t] = nm
+ return nm
+ case "map":
+ key := nameType(t.Key, nil) // never a generated type
+ value := nameType(t.Value.(*Type), append(path, "Value"))
+ nm := "map[" + key + "]" + value
+ typeNames[t] = nm
+ return nm
+ // generated types
+ case "and":
+ nm := nameFromPath("And", path)
+ typeNames[t] = nm
+ for _, it := range t.Items {
+ nameType(it, append(path, "Item"))
+ }
+ genTypes = append(genTypes, &newType{
+ name: nm,
+ typ: t,
+ kind: "and",
+ items: t.Items,
+ line: t.Line,
+ })
+ return nm
+ case "literal":
+ nm := nameFromPath("Lit", path)
+ typeNames[t] = nm
+ for _, p := range t.Value.(ParseLiteral).Properties {
+ nameType(p.Type, append(path, p.Name))
+ }
+ genTypes = append(genTypes, &newType{
+ name: nm,
+ typ: t,
+ kind: "literal",
+ properties: t.Value.(ParseLiteral).Properties,
+ line: t.Line,
+ })
+ return nm
+ case "tuple":
+ nm := nameFromPath("Tuple", path)
+ typeNames[t] = nm
+ for _, it := range t.Items {
+ nameType(it, append(path, "Item"))
+ }
+ genTypes = append(genTypes, &newType{
+ name: nm,
+ typ: t,
+ kind: "tuple",
+ items: t.Items,
+ line: t.Line,
+ })
+ return nm
+ case "or":
+ nm := nameFromPath("Or", path)
+ typeNames[t] = nm
+ for i, it := range t.Items {
+ // these names depend on the ordering within the "or" type
+ nameType(it, append(path, fmt.Sprintf("Item%d", i)))
+ }
+ // this code handles an "or" of stringLiterals (_InitializeParams.trace)
+ names := make(map[string]int)
+ msg := ""
+ for _, it := range t.Items {
+ if line, ok := names[typeNames[it]]; ok {
+ // duplicate component names are bad
+ msg += fmt.Sprintf("lines %d %d dup, %s for %s\n", line, it.Line, typeNames[it], nm)
+ }
+ names[typeNames[it]] = t.Line
+ }
+ // this code handles an "or" of stringLiterals (_InitializeParams.trace)
+ if len(names) == 1 {
+ var solekey string
+ for k := range names {
+ solekey = k // the sole name
+ }
+ if solekey == "string" { // _InitializeParams.trace
+ typeNames[t] = "string"
+ return "string"
+ }
+ // otherwise unexpected
+ log.Printf("unexpected: single-case 'or' type has non-string key %s: %s", nm, solekey)
+ log.Fatal(msg)
+ } else if len(names) == 2 {
+ // if one of the names is null, just use the other, rather than generating an "or".
+ // This removes about 40 types from the generated code. An entry in goplsStar
+ // could be added to handle the null case, if necessary.
+ newNm := ""
+ sawNull := false
+ for k := range names {
+ if k == "null" {
+ sawNull = true
+ } else {
+ newNm = k
+ }
+ }
+ if sawNull {
+ typeNames[t] = newNm
+ return newNm
+ }
+ }
+ genTypes = append(genTypes, &newType{
+ name: nm,
+ typ: t,
+ kind: "or",
+ items: t.Items,
+ line: t.Line,
+ })
+ return nm
+ case "stringLiteral": // a single type, like 'kind' or 'rename'
+ typeNames[t] = "string"
+ return "string"
+ default:
+ log.Fatalf("nameType: %T unexpected, line:%d path:%v", t, t.Line, path)
+ panic("unreachable in nameType")
+ }
+}
+
+func nameFromPath(prefix string, path []string) string {
+ nm := prefix + "_" + strings.Join(path, "_")
+ // methods have slashes
+ nm = strings.ReplaceAll(nm, "/", "_")
+ return nm
+}
diff --git a/gopls/internal/lsp/protocol/generate/types.go b/gopls/internal/lsp/protocol/generate/types.go
new file mode 100644
index 000000000..0d01ae43c
--- /dev/null
+++ b/gopls/internal/lsp/protocol/generate/types.go
@@ -0,0 +1,170 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package main
+
+import (
+ "fmt"
+ "sort"
+)
+
+// Model contains the parsed version of the spec
+type Model struct {
+ Version Metadata `json:"metaData"`
+ Requests []*Request `json:"requests"`
+ Notifications []*Notification `json:"notifications"`
+ Structures []*Structure `json:"structures"`
+ Enumerations []*Enumeration `json:"enumerations"`
+ TypeAliases []*TypeAlias `json:"typeAliases"`
+ Line int `json:"line"`
+}
+
+// Metadata is information about the version of the spec
+type Metadata struct {
+ Version string `json:"version"`
+ Line int `json:"line"`
+}
+
+// A Request is the parsed version of an LSP request
+type Request struct {
+ Documentation string `json:"documentation"`
+ ErrorData *Type `json:"errorData"`
+ Direction string `json:"messageDirection"`
+ Method string `json:"method"`
+ Params *Type `json:"params"`
+ PartialResult *Type `json:"partialResult"`
+ Proposed bool `json:"proposed"`
+ RegistrationMethod string `json:"registrationMethod"`
+ RegistrationOptions *Type `json:"registrationOptions"`
+ Result *Type `json:"result"`
+ Since string `json:"since"`
+ Line int `json:"line"`
+}
+
+// A Notificatin is the parsed version of an LSP notification
+type Notification struct {
+ Documentation string `json:"documentation"`
+ Direction string `json:"messageDirection"`
+ Method string `json:"method"`
+ Params *Type `json:"params"`
+ Proposed bool `json:"proposed"`
+ RegistrationMethod string `json:"registrationMethod"`
+ RegistrationOptions *Type `json:"registrationOptions"`
+ Since string `json:"since"`
+ Line int `json:"line"`
+}
+
+// A Structure is the parsed version of an LSP structure from the spec
+type Structure struct {
+ Documentation string `json:"documentation"`
+ Extends []*Type `json:"extends"`
+ Mixins []*Type `json:"mixins"`
+ Name string `json:"name"`
+ Properties []NameType `json:"properties"`
+ Proposed bool `json:"proposed"`
+ Since string `json:"since"`
+ Line int `json:"line"`
+}
+
+// An enumeration is the parsed version of an LSP enumeration from the spec
+type Enumeration struct {
+ Documentation string `json:"documentation"`
+ Name string `json:"name"`
+ Proposed bool `json:"proposed"`
+ Since string `json:"since"`
+ SupportsCustomValues bool `json:"supportsCustomValues"`
+ Type *Type `json:"type"`
+ Values []NameValue `json:"values"`
+ Line int `json:"line"`
+}
+
+// A TypeAlias is the parsed version of an LSP type alias from the spec
+type TypeAlias struct {
+ Documentation string `json:"documentation"`
+ Deprecated string `json:"deprecated"`
+ Name string `json:"name"`
+ Proposed bool `json:"proposed"`
+ Since string `json:"since"`
+ Type *Type `json:"type"`
+ Line int `json:"line"`
+}
+
+// A NameValue describes an enumeration constant
+type NameValue struct {
+ Documentation string `json:"documentation"`
+ Name string `json:"name"`
+ Proposed bool `json:"proposed"`
+ Since string `json:"since"`
+ Value any `json:"value"` // number or string
+ Line int `json:"line"`
+}
+
+// A Type is the parsed version of an LSP type from the spec,
+// or a Type the code constructs
+type Type struct {
+ Kind string `json:"kind"` // -- which kind goes with which field --
+ Items []*Type `json:"items"` // "and", "or", "tuple"
+ Element *Type `json:"element"` // "array"
+ Name string `json:"name"` // "base", "reference"
+ Key *Type `json:"key"` // "map"
+ Value any `json:"value"` // "map", "stringLiteral", "literal"
+ Line int `json:"line"` // JSON source line
+}
+
+// ParsedLiteral is Type.Value when Type.Kind is "literal"
+type ParseLiteral struct {
+ Properties `json:"properties"`
+}
+
+// A NameType represents the name and type of a structure element
+type NameType struct {
+ Name string `json:"name"`
+ Type *Type `json:"type"`
+ Optional bool `json:"optional"`
+ Documentation string `json:"documentation"`
+ Deprecated string `json:"deprecated"`
+ Since string `json:"since"`
+ Proposed bool `json:"proposed"`
+ Line int `json:"line"`
+}
+
+// Properties are the collection of structure fields
+type Properties []NameType
+
+// addLineNumbers adds a "line" field to each object in the JSON.
+func addLineNumbers(buf []byte) []byte {
+ var ans []byte
+ // In the specification .json file, the delimiter '{' is
+ // always followed by a newline. There are other {s embedded in strings.
+ // json.Token does not return \n, or :, or , so using it would
+ // require parsing the json to reconstruct the missing information.
+ for linecnt, i := 1, 0; i < len(buf); i++ {
+ ans = append(ans, buf[i])
+ switch buf[i] {
+ case '{':
+ if buf[i+1] == '\n' {
+ ans = append(ans, fmt.Sprintf(`"line": %d, `, linecnt)...)
+ // warning: this would fail if the spec file had
+ // `"value": {\n}`, but it does not, as comma is a separator.
+ }
+ case '\n':
+ linecnt++
+ }
+ }
+ return ans
+}
+
+type sortedMap[T any] map[string]T
+
+func (s sortedMap[T]) keys() []string {
+ var keys []string
+ for k := range s {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
diff --git a/internal/lsp/protocol/log.go b/gopls/internal/lsp/protocol/log.go
index fdcbb7a8d..fdcbb7a8d 100644
--- a/internal/lsp/protocol/log.go
+++ b/gopls/internal/lsp/protocol/log.go
diff --git a/gopls/internal/lsp/protocol/mapper.go b/gopls/internal/lsp/protocol/mapper.go
new file mode 100644
index 000000000..d61524d83
--- /dev/null
+++ b/gopls/internal/lsp/protocol/mapper.go
@@ -0,0 +1,529 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protocol
+
+// This file defines Mapper, which wraps a file content buffer
+// ([]byte) and provides efficient conversion between every kind of
+// position representation.
+//
+// gopls uses four main representations of position:
+//
+// 1. byte offsets, e.g. (start, end int), starting from zero.
+//
+// 2. go/token notation. Use these types when interacting directly
+// with the go/* syntax packages:
+//
+// token.Pos
+// token.FileSet
+// token.File
+//
+// Because File.Offset and File.Pos panic on invalid inputs,
+// we do not call them directly and instead use the safetoken package
+// for these conversions. This is enforced by a static check.
+//
+// Beware also that the methods of token.File have two bugs for which
+// safetoken contains workarounds:
+// - #57490, whereby the parser may create ast.Nodes during error
+// recovery whose computed positions are out of bounds (EOF+1).
+// - #41029, whereby the wrong line number is returned for the EOF position.
+//
+// 3. the span package.
+//
+// span.Point = (line, col8, offset).
+// span.Span = (uri URI, start, end span.Point)
+//
+// Line and column are 1-based.
+// Columns are measured in bytes (UTF-8 codes).
+// All fields are optional.
+//
+// These types are useful as intermediate conversions of validated
+// ranges (though MappedRange is superior as it is self contained
+// and universally convertible). Since their fields are optional
+// they are also useful for parsing user-provided positions (e.g. in
+// the CLI) before we have access to file contents.
+//
+// 4. protocol, the LSP RPC message format.
+//
+// protocol.Position = (Line, Character uint32)
+// protocol.Range = (start, end Position)
+// protocol.Location = (URI, protocol.Range)
+//
+// Line and Character are 0-based.
+// Characters (columns) are measured in UTF-16 codes.
+//
+// protocol.Mapper holds the (URI, Content) of a file, enabling
+// efficient mapping between byte offsets, span ranges, and
+// protocol ranges.
+//
+// protocol.MappedRange holds a protocol.Mapper and valid (start,
+// end int) byte offsets, enabling infallible, efficient conversion
+// to any other format.
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "unicode/utf8"
+
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+)
+
+// A Mapper wraps the content of a file and provides mapping
+// between byte offsets and notations of position such as:
+//
+// - (line, col8) pairs, where col8 is a 1-based UTF-8 column number
+// (bytes), as used by the go/token and span packages.
+//
+// - (line, col16) pairs, where col16 is a 1-based UTF-16 column
+// number, as used by the LSP protocol.
+//
+// All conversion methods are named "FromTo", where From and To are the two types.
+// For example, the PointPosition method converts from a Point to a Position.
+//
+// Mapper does not intrinsically depend on go/token-based
+// representations. Use safetoken to map between token.Pos <=> byte
+// offsets, or the convenience methods such as PosPosition,
+// NodePosition, or NodeRange.
+//
+// See overview comments at top of this file.
+type Mapper struct {
+ URI span.URI
+ Content []byte
+
+ // Line-number information is requested only for a tiny
+ // fraction of Mappers, so we compute it lazily.
+ // Call initLines() before accessing fields below.
+ linesOnce sync.Once
+ lineStart []int // byte offset of start of ith line (0-based); last=EOF iff \n-terminated
+ nonASCII bool
+
+ // TODO(adonovan): adding an extra lineStart entry for EOF
+ // might simplify every method that accesses it. Try it out.
+}
+
+// NewMapper creates a new mapper for the given URI and content.
+func NewMapper(uri span.URI, content []byte) *Mapper {
+ return &Mapper{URI: uri, Content: content}
+}
+
+// initLines populates the lineStart table.
+func (m *Mapper) initLines() {
+ m.linesOnce.Do(func() {
+ nlines := bytes.Count(m.Content, []byte("\n"))
+ m.lineStart = make([]int, 1, nlines+1) // initially []int{0}
+ for offset, b := range m.Content {
+ if b == '\n' {
+ m.lineStart = append(m.lineStart, offset+1)
+ }
+ if b >= utf8.RuneSelf {
+ m.nonASCII = true
+ }
+ }
+ })
+}
+
+// -- conversions from span (UTF-8) domain --
+
+// SpanLocation converts a (UTF-8) span to a protocol (UTF-16) range.
+// Precondition: the URIs of SpanLocation and Mapper match.
+func (m *Mapper) SpanLocation(s span.Span) (Location, error) {
+ rng, err := m.SpanRange(s)
+ if err != nil {
+ return Location{}, err
+ }
+ return m.RangeLocation(rng), nil
+}
+
+// SpanRange converts a (UTF-8) span to a protocol (UTF-16) range.
+// Precondition: the URIs of Span and Mapper match.
+func (m *Mapper) SpanRange(s span.Span) (Range, error) {
+ // Assert that we aren't using the wrong mapper.
+ // We check only the base name, and case insensitively,
+ // because we can't assume clean paths, no symbolic links,
+ // case-sensitive directories. The authoritative answer
+ // requires querying the file system, and we don't want
+ // to do that.
+ if !strings.EqualFold(filepath.Base(string(m.URI)), filepath.Base(string(s.URI()))) {
+ return Range{}, bug.Errorf("mapper is for file %q instead of %q", m.URI, s.URI())
+ }
+ start, err := m.PointPosition(s.Start())
+ if err != nil {
+ return Range{}, fmt.Errorf("start: %w", err)
+ }
+ end, err := m.PointPosition(s.End())
+ if err != nil {
+ return Range{}, fmt.Errorf("end: %w", err)
+ }
+ return Range{Start: start, End: end}, nil
+}
+
+// PointPosition converts a valid span (UTF-8) point to a protocol (UTF-16) position.
+func (m *Mapper) PointPosition(p span.Point) (Position, error) {
+ if p.HasPosition() {
+ line, col8 := p.Line()-1, p.Column()-1 // both 0-based
+ m.initLines()
+ if line >= len(m.lineStart) {
+ return Position{}, fmt.Errorf("line number %d out of range (max %d)", line, len(m.lineStart))
+ }
+ offset := m.lineStart[line]
+ end := offset + col8
+
+ // Validate column.
+ if end > len(m.Content) {
+ return Position{}, fmt.Errorf("column is beyond end of file")
+ } else if line+1 < len(m.lineStart) && end >= m.lineStart[line+1] {
+ return Position{}, fmt.Errorf("column is beyond end of line")
+ }
+
+ char := UTF16Len(m.Content[offset:end])
+ return Position{Line: uint32(line), Character: uint32(char)}, nil
+ }
+ if p.HasOffset() {
+ return m.OffsetPosition(p.Offset())
+ }
+ return Position{}, fmt.Errorf("point has neither offset nor line/column")
+}
+
+// -- conversions from byte offsets --
+
+// OffsetLocation converts a byte-offset interval to a protocol (UTF-16) location.
+func (m *Mapper) OffsetLocation(start, end int) (Location, error) {
+ rng, err := m.OffsetRange(start, end)
+ if err != nil {
+ return Location{}, err
+ }
+ return m.RangeLocation(rng), nil
+}
+
+// OffsetRange converts a byte-offset interval to a protocol (UTF-16) range.
+func (m *Mapper) OffsetRange(start, end int) (Range, error) {
+ if start > end {
+ return Range{}, fmt.Errorf("start offset (%d) > end (%d)", start, end)
+ }
+ startPosition, err := m.OffsetPosition(start)
+ if err != nil {
+ return Range{}, fmt.Errorf("start: %v", err)
+ }
+ endPosition, err := m.OffsetPosition(end)
+ if err != nil {
+ return Range{}, fmt.Errorf("end: %v", err)
+ }
+ return Range{Start: startPosition, End: endPosition}, nil
+}
+
+// OffsetSpan converts a byte-offset interval to a (UTF-8) span.
+// The resulting span contains line, column, and offset information.
+func (m *Mapper) OffsetSpan(start, end int) (span.Span, error) {
+ if start > end {
+ return span.Span{}, fmt.Errorf("start offset (%d) > end (%d)", start, end)
+ }
+ startPoint, err := m.OffsetPoint(start)
+ if err != nil {
+ return span.Span{}, fmt.Errorf("start: %v", err)
+ }
+ endPoint, err := m.OffsetPoint(end)
+ if err != nil {
+ return span.Span{}, fmt.Errorf("end: %v", err)
+ }
+ return span.New(m.URI, startPoint, endPoint), nil
+}
+
+// OffsetPosition converts a byte offset to a protocol (UTF-16) position.
+func (m *Mapper) OffsetPosition(offset int) (Position, error) {
+ if !(0 <= offset && offset <= len(m.Content)) {
+ return Position{}, fmt.Errorf("invalid offset %d (want 0-%d)", offset, len(m.Content))
+ }
+ // No error may be returned after this point,
+ // even if the offset does not fall at a rune boundary.
+ // (See panic in MappedRange.Range reachable.)
+
+ line, col16 := m.lineCol16(offset)
+ return Position{Line: uint32(line), Character: uint32(col16)}, nil
+}
+
+// lineCol16 converts a valid byte offset to line and UTF-16 column numbers, both 0-based.
+func (m *Mapper) lineCol16(offset int) (int, int) {
+ line, start, cr := m.line(offset)
+ var col16 int
+ if m.nonASCII {
+ col16 = UTF16Len(m.Content[start:offset])
+ } else {
+ col16 = offset - start
+ }
+ if cr {
+ col16-- // retreat from \r at line end
+ }
+ return line, col16
+}
+
+// lineCol8 converts a valid byte offset to line and UTF-8 column numbers, both 0-based.
+func (m *Mapper) lineCol8(offset int) (int, int) {
+ line, start, cr := m.line(offset)
+ col8 := offset - start
+ if cr {
+ col8-- // retreat from \r at line end
+ }
+ return line, col8
+}
+
+// line returns:
+// - the 0-based index of the line that encloses the (valid) byte offset;
+// - the start offset of that line; and
+// - whether the offset denotes a carriage return (\r) at line end.
+func (m *Mapper) line(offset int) (int, int, bool) {
+ m.initLines()
+ // In effect, binary search returns a 1-based result.
+ line := sort.Search(len(m.lineStart), func(i int) bool {
+ return offset < m.lineStart[i]
+ })
+
+ // Adjustment for line-endings: \r|\n is the same as |\r\n.
+ var eol int
+ if line == len(m.lineStart) {
+ eol = len(m.Content) // EOF
+ } else {
+ eol = m.lineStart[line] - 1
+ }
+ cr := offset == eol && offset > 0 && m.Content[offset-1] == '\r'
+
+ line-- // 0-based
+
+ return line, m.lineStart[line], cr
+}
+
+// OffsetPoint converts a byte offset to a span (UTF-8) point.
+// The resulting point contains line, column, and offset information.
+func (m *Mapper) OffsetPoint(offset int) (span.Point, error) {
+ if !(0 <= offset && offset <= len(m.Content)) {
+ return span.Point{}, fmt.Errorf("invalid offset %d (want 0-%d)", offset, len(m.Content))
+ }
+ line, col8 := m.lineCol8(offset)
+ return span.NewPoint(line+1, col8+1, offset), nil
+}
+
+// OffsetMappedRange returns a MappedRange for the given byte offsets.
+// A MappedRange can be converted to any other form.
+func (m *Mapper) OffsetMappedRange(start, end int) (MappedRange, error) {
+ if !(0 <= start && start <= end && end <= len(m.Content)) {
+ return MappedRange{}, fmt.Errorf("invalid offsets (%d, %d) (file %s has size %d)", start, end, m.URI, len(m.Content))
+ }
+ return MappedRange{m, start, end}, nil
+}
+
+// -- conversions from protocol (UTF-16) domain --
+
+// LocationSpan converts a protocol (UTF-16) Location to a (UTF-8) span.
+// Precondition: the URIs of Location and Mapper match.
+func (m *Mapper) LocationSpan(l Location) (span.Span, error) {
+ // TODO(adonovan): check that l.URI matches m.URI.
+ return m.RangeSpan(l.Range)
+}
+
+// RangeSpan converts a protocol (UTF-16) range to a (UTF-8) span.
+// The resulting span has valid Positions and Offsets.
+func (m *Mapper) RangeSpan(r Range) (span.Span, error) {
+ start, end, err := m.RangeOffsets(r)
+ if err != nil {
+ return span.Span{}, err
+ }
+ return m.OffsetSpan(start, end)
+}
+
+// RangeOffsets converts a protocol (UTF-16) range to start/end byte offsets.
+func (m *Mapper) RangeOffsets(r Range) (int, int, error) {
+ start, err := m.PositionOffset(r.Start)
+ if err != nil {
+ return 0, 0, err
+ }
+ end, err := m.PositionOffset(r.End)
+ if err != nil {
+ return 0, 0, err
+ }
+ return start, end, nil
+}
+
+// PositionOffset converts a protocol (UTF-16) position to a byte offset.
+func (m *Mapper) PositionOffset(p Position) (int, error) {
+ m.initLines()
+
+ // Validate line number.
+ if p.Line > uint32(len(m.lineStart)) {
+ return 0, fmt.Errorf("line number %d out of range 0-%d", p.Line, len(m.lineStart))
+ } else if p.Line == uint32(len(m.lineStart)) {
+ if p.Character == 0 {
+ return len(m.Content), nil // EOF
+ }
+ return 0, fmt.Errorf("column is beyond end of file")
+ }
+
+ offset := m.lineStart[p.Line]
+ content := m.Content[offset:] // rest of file from start of enclosing line
+
+ // Advance bytes up to the required number of UTF-16 codes.
+ col8 := 0
+ for col16 := 0; col16 < int(p.Character); col16++ {
+ r, sz := utf8.DecodeRune(content)
+ if sz == 0 {
+ return 0, fmt.Errorf("column is beyond end of file")
+ }
+ if r == '\n' {
+ return 0, fmt.Errorf("column is beyond end of line")
+ }
+ if sz == 1 && r == utf8.RuneError {
+ return 0, fmt.Errorf("buffer contains invalid UTF-8 text")
+ }
+ content = content[sz:]
+
+ if r >= 0x10000 {
+ col16++ // rune was encoded by a pair of surrogate UTF-16 codes
+
+ if col16 == int(p.Character) {
+ break // requested position is in the middle of a rune
+ }
+ }
+ col8 += sz
+ }
+ return offset + col8, nil
+}
+
+// PositionPoint converts a protocol (UTF-16) position to a span (UTF-8) point.
+// The resulting point has a valid Position and Offset.
+func (m *Mapper) PositionPoint(p Position) (span.Point, error) {
+ offset, err := m.PositionOffset(p)
+ if err != nil {
+ return span.Point{}, err
+ }
+ line, col8 := m.lineCol8(offset)
+
+ return span.NewPoint(line+1, col8+1, offset), nil
+}
+
+// -- go/token domain convenience methods --
+
+// PosPosition converts a token pos to a protocol (UTF-16) position.
+func (m *Mapper) PosPosition(tf *token.File, pos token.Pos) (Position, error) {
+ offset, err := safetoken.Offset(tf, pos)
+ if err != nil {
+ return Position{}, err
+ }
+ return m.OffsetPosition(offset)
+}
+
+// PosLocation converts a token range to a protocol (UTF-16) location.
+func (m *Mapper) PosLocation(tf *token.File, start, end token.Pos) (Location, error) {
+ startOffset, endOffset, err := safetoken.Offsets(tf, start, end)
+ if err != nil {
+ return Location{}, err
+ }
+ rng, err := m.OffsetRange(startOffset, endOffset)
+ if err != nil {
+ return Location{}, err
+ }
+ return m.RangeLocation(rng), nil
+}
+
+// PosRange converts a token range to a protocol (UTF-16) range.
+func (m *Mapper) PosRange(tf *token.File, start, end token.Pos) (Range, error) {
+ startOffset, endOffset, err := safetoken.Offsets(tf, start, end)
+ if err != nil {
+ return Range{}, err
+ }
+ return m.OffsetRange(startOffset, endOffset)
+}
+
+// NodeRange converts a syntax node range to a protocol (UTF-16) range.
+func (m *Mapper) NodeRange(tf *token.File, node ast.Node) (Range, error) {
+ return m.PosRange(tf, node.Pos(), node.End())
+}
+
+// RangeLocation pairs a protocol Range with its URI, in a Location.
+func (m *Mapper) RangeLocation(rng Range) Location {
+ return Location{URI: URIFromSpanURI(m.URI), Range: rng}
+}
+
+// PosMappedRange returns a MappedRange for the given token.Pos range.
+func (m *Mapper) PosMappedRange(tf *token.File, start, end token.Pos) (MappedRange, error) {
+ startOffset, endOffset, err := safetoken.Offsets(tf, start, end)
+ if err != nil {
+ return MappedRange{}, nil
+ }
+ return m.OffsetMappedRange(startOffset, endOffset)
+}
+
+// NodeMappedRange returns a MappedRange for the given node range.
+func (m *Mapper) NodeMappedRange(tf *token.File, node ast.Node) (MappedRange, error) {
+ return m.PosMappedRange(tf, node.Pos(), node.End())
+}
+
+// -- MappedRange --
+
+// A MappedRange represents a valid byte-offset range of a file.
+// Through its Mapper it can be converted into other forms such
+// as protocol.Range or span.Span.
+//
+// Construct one by calling Mapper.OffsetMappedRange with start/end offsets.
+// From the go/token domain, call safetoken.Offsets first,
+// or use a helper such as ParsedGoFile.MappedPosRange.
+//
+// Two MappedRanges produced the same Mapper are equal if and only if they
+// denote the same range. Two MappedRanges produced by different Mappers
+// are unequal even when they represent the same range of the same file.
+type MappedRange struct {
+ Mapper *Mapper
+ start, end int // valid byte offsets: 0 <= start <= end <= len(Mapper.Content)
+}
+
+// Offsets returns the (start, end) byte offsets of this range.
+func (mr MappedRange) Offsets() (start, end int) { return mr.start, mr.end }
+
+// -- convenience functions --
+
+// URI returns the URI of the range's file.
+func (mr MappedRange) URI() span.URI {
+ return mr.Mapper.URI
+}
+
+// Range returns the range in protocol (UTF-16) form.
+func (mr MappedRange) Range() Range {
+ rng, err := mr.Mapper.OffsetRange(mr.start, mr.end)
+ if err != nil {
+ panic(err) // can't happen
+ }
+ return rng
+}
+
+// Location returns the range in protocol location (UTF-16) form.
+func (mr MappedRange) Location() Location {
+ return mr.Mapper.RangeLocation(mr.Range())
+}
+
+// Span returns the range in span (UTF-8) form.
+func (mr MappedRange) Span() span.Span {
+ spn, err := mr.Mapper.OffsetSpan(mr.start, mr.end)
+ if err != nil {
+ panic(err) // can't happen
+ }
+ return spn
+}
+
+// String formats the range in span (UTF-8) notation.
+func (mr MappedRange) String() string {
+ return fmt.Sprint(mr.Span())
+}
+
+// LocationTextDocumentPositionParams converts its argument to its result.
+func LocationTextDocumentPositionParams(loc Location) TextDocumentPositionParams {
+ return TextDocumentPositionParams{
+ TextDocument: TextDocumentIdentifier{URI: loc.URI},
+ Position: loc.Range.Start,
+ }
+}
diff --git a/gopls/internal/lsp/protocol/mapper_test.go b/gopls/internal/lsp/protocol/mapper_test.go
new file mode 100644
index 000000000..0780491de
--- /dev/null
+++ b/gopls/internal/lsp/protocol/mapper_test.go
@@ -0,0 +1,441 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protocol_test
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// This file tests Mapper's logic for converting between
+// span.Point and UTF-16 columns. (The strange form attests to an
+// earlier abstraction.)
+
+// 𐐀 is U+10400 = [F0 90 90 80] in UTF-8, [D801 DC00] in UTF-16.
+var funnyString = []byte("𐐀23\n𐐀45")
+
+var toUTF16Tests = []struct {
+ scenario string
+ input []byte
+ line int // 1-indexed count
+ col int // 1-indexed byte position in line
+ offset int // 0-indexed byte offset into input
+ resUTF16col int // 1-indexed UTF-16 col number
+ pre string // everything before the cursor on the line
+ post string // everything from the cursor onwards
+ err string // expected error string in call to ToUTF16Column
+ issue *bool
+}{
+ {
+ scenario: "cursor missing content",
+ input: nil,
+ offset: -1,
+ err: "point has neither offset nor line/column",
+ },
+ {
+ scenario: "cursor missing position",
+ input: funnyString,
+ line: -1,
+ col: -1,
+ offset: -1,
+ err: "point has neither offset nor line/column",
+ },
+ {
+ scenario: "zero length input; cursor at first col, first line",
+ input: []byte(""),
+ line: 1,
+ col: 1,
+ offset: 0,
+ resUTF16col: 1,
+ },
+ {
+ scenario: "cursor before funny character; first line",
+ input: funnyString,
+ line: 1,
+ col: 1,
+ offset: 0,
+ resUTF16col: 1,
+ pre: "",
+ post: "𐐀23",
+ },
+ {
+ scenario: "cursor after funny character; first line",
+ input: funnyString,
+ line: 1,
+ col: 5, // 4 + 1 (1-indexed)
+ offset: 4, // (unused since we have line+col)
+ resUTF16col: 3, // 2 + 1 (1-indexed)
+ pre: "𐐀",
+ post: "23",
+ },
+ {
+ scenario: "cursor after last character on first line",
+ input: funnyString,
+ line: 1,
+ col: 7, // 4 + 1 + 1 + 1 (1-indexed)
+ offset: 6, // 4 + 1 + 1 (unused since we have line+col)
+ resUTF16col: 5, // 2 + 1 + 1 + 1 (1-indexed)
+ pre: "𐐀23",
+ post: "",
+ },
+ {
+ scenario: "cursor before funny character; second line",
+ input: funnyString,
+ line: 2,
+ col: 1,
+ offset: 7, // length of first line (unused since we have line+col)
+ resUTF16col: 1,
+ pre: "",
+ post: "𐐀45",
+ },
+ {
+ scenario: "cursor after funny character; second line",
+ input: funnyString,
+ line: 1,
+ col: 5, // 4 + 1 (1-indexed)
+ offset: 11, // 7 (length of first line) + 4 (unused since we have line+col)
+ resUTF16col: 3, // 2 + 1 (1-indexed)
+ pre: "𐐀",
+ post: "45",
+ },
+ {
+ scenario: "cursor after last character on second line",
+ input: funnyString,
+ line: 2,
+ col: 7, // 4 + 1 + 1 + 1 (1-indexed)
+ offset: 13, // 7 (length of first line) + 4 + 1 + 1 (unused since we have line+col)
+ resUTF16col: 5, // 2 + 1 + 1 + 1 (1-indexed)
+ pre: "𐐀45",
+ post: "",
+ },
+ {
+ scenario: "cursor beyond end of file",
+ input: funnyString,
+ line: 2,
+ col: 8, // 4 + 1 + 1 + 1 + 1 (1-indexed)
+ offset: 14, // 4 + 1 + 1 + 1 (unused since we have line+col)
+ err: "column is beyond end of file",
+ },
+}
+
+var fromUTF16Tests = []struct {
+ scenario string
+ input []byte
+ line int // 1-indexed line number (isn't actually used)
+ utf16col int // 1-indexed UTF-16 col number
+ resCol int // 1-indexed byte position in line
+ resOffset int // 0-indexed byte offset into input
+ pre string // everything before the cursor on the line
+ post string // everything from the cursor onwards
+ err string // expected error string in call to ToUTF16Column
+}{
+ {
+ scenario: "zero length input; cursor at first col, first line",
+ input: []byte(""),
+ line: 1,
+ utf16col: 1,
+ resCol: 1,
+ resOffset: 0,
+ pre: "",
+ post: "",
+ },
+ {
+ scenario: "cursor before funny character",
+ input: funnyString,
+ line: 1,
+ utf16col: 1,
+ resCol: 1,
+ resOffset: 0,
+ pre: "",
+ post: "𐐀23",
+ },
+ {
+ scenario: "cursor after funny character",
+ input: funnyString,
+ line: 1,
+ utf16col: 3,
+ resCol: 5,
+ resOffset: 4,
+ pre: "𐐀",
+ post: "23",
+ },
+ {
+ scenario: "cursor after last character on line",
+ input: funnyString,
+ line: 1,
+ utf16col: 5,
+ resCol: 7,
+ resOffset: 6,
+ pre: "𐐀23",
+ post: "",
+ },
+ {
+ scenario: "cursor beyond last character on line",
+ input: funnyString,
+ line: 1,
+ utf16col: 6,
+ resCol: 7,
+ resOffset: 6,
+ pre: "𐐀23",
+ post: "",
+ err: "column is beyond end of line",
+ },
+ {
+ scenario: "cursor before funny character; second line",
+ input: funnyString,
+ line: 2,
+ utf16col: 1,
+ resCol: 1,
+ resOffset: 7,
+ pre: "",
+ post: "𐐀45",
+ },
+ {
+ scenario: "cursor after funny character; second line",
+ input: funnyString,
+ line: 2,
+ utf16col: 3, // 2 + 1 (1-indexed)
+ resCol: 5, // 4 + 1 (1-indexed)
+ resOffset: 11, // 7 (length of first line) + 4
+ pre: "𐐀",
+ post: "45",
+ },
+ {
+ scenario: "cursor after last character on second line",
+ input: funnyString,
+ line: 2,
+ utf16col: 5, // 2 + 1 + 1 + 1 (1-indexed)
+ resCol: 7, // 4 + 1 + 1 + 1 (1-indexed)
+ resOffset: 13, // 7 (length of first line) + 4 + 1 + 1
+ pre: "𐐀45",
+ post: "",
+ },
+ {
+ scenario: "cursor beyond end of file",
+ input: funnyString,
+ line: 2,
+ utf16col: 6, // 2 + 1 + 1 + 1 + 1(1-indexed)
+ resCol: 8, // 4 + 1 + 1 + 1 + 1 (1-indexed)
+ resOffset: 14, // 7 (length of first line) + 4 + 1 + 1 + 1
+ err: "column is beyond end of file",
+ },
+}
+
+func TestToUTF16(t *testing.T) {
+ for _, e := range toUTF16Tests {
+ t.Run(e.scenario, func(t *testing.T) {
+ if e.issue != nil && !*e.issue {
+ t.Skip("expected to fail")
+ }
+ p := span.NewPoint(e.line, e.col, e.offset)
+ m := protocol.NewMapper("", e.input)
+ pos, err := m.PointPosition(p)
+ if err != nil {
+ if err.Error() != e.err {
+ t.Fatalf("expected error %v; got %v", e.err, err)
+ }
+ return
+ }
+ if e.err != "" {
+ t.Fatalf("unexpected success; wanted %v", e.err)
+ }
+ got := int(pos.Character) + 1
+ if got != e.resUTF16col {
+ t.Fatalf("expected result %v; got %v", e.resUTF16col, got)
+ }
+ pre, post := getPrePost(e.input, p.Offset())
+ if string(pre) != e.pre {
+ t.Fatalf("expected #%d pre %q; got %q", p.Offset(), e.pre, pre)
+ }
+ if string(post) != e.post {
+ t.Fatalf("expected #%d, post %q; got %q", p.Offset(), e.post, post)
+ }
+ })
+ }
+}
+
+func TestFromUTF16(t *testing.T) {
+ for _, e := range fromUTF16Tests {
+ t.Run(e.scenario, func(t *testing.T) {
+ m := protocol.NewMapper("", []byte(e.input))
+ p, err := m.PositionPoint(protocol.Position{
+ Line: uint32(e.line - 1),
+ Character: uint32(e.utf16col - 1),
+ })
+ if err != nil {
+ if err.Error() != e.err {
+ t.Fatalf("expected error %v; got %v", e.err, err)
+ }
+ return
+ }
+ if e.err != "" {
+ t.Fatalf("unexpected success; wanted %v", e.err)
+ }
+ if p.Column() != e.resCol {
+ t.Fatalf("expected resulting col %v; got %v", e.resCol, p.Column())
+ }
+ if p.Offset() != e.resOffset {
+ t.Fatalf("expected resulting offset %v; got %v", e.resOffset, p.Offset())
+ }
+ pre, post := getPrePost(e.input, p.Offset())
+ if string(pre) != e.pre {
+ t.Fatalf("expected #%d pre %q; got %q", p.Offset(), e.pre, pre)
+ }
+ if string(post) != e.post {
+ t.Fatalf("expected #%d post %q; got %q", p.Offset(), e.post, post)
+ }
+ })
+ }
+}
+
+func getPrePost(content []byte, offset int) (string, string) {
+ pre, post := string(content)[:offset], string(content)[offset:]
+ if i := strings.LastIndex(pre, "\n"); i >= 0 {
+ pre = pre[i+1:]
+ }
+ if i := strings.IndexRune(post, '\n'); i >= 0 {
+ post = post[:i]
+ }
+ return pre, post
+}
+
+// -- these are the historical lsppos tests --
+
+type testCase struct {
+ content string // input text
+ substrOrOffset interface{} // explicit integer offset, or a substring
+ wantLine, wantChar int // expected LSP position information
+}
+
+// offset returns the test case byte offset
+func (c testCase) offset() int {
+ switch x := c.substrOrOffset.(type) {
+ case int:
+ return x
+ case string:
+ i := strings.Index(c.content, x)
+ if i < 0 {
+ panic(fmt.Sprintf("%q does not contain substring %q", c.content, x))
+ }
+ return i
+ }
+ panic("substrOrIndex must be an integer or string")
+}
+
+var tests = []testCase{
+ {"a𐐀b", "a", 0, 0},
+ {"a𐐀b", "𐐀", 0, 1},
+ {"a𐐀b", "b", 0, 3},
+ {"a𐐀b\n", "\n", 0, 4},
+ {"a𐐀b\r\n", "\n", 0, 4}, // \r|\n is not a valid position, so we move back to the end of the first line.
+ {"a𐐀b\r\nx", "x", 1, 0},
+ {"a𐐀b\r\nx\ny", "y", 2, 0},
+
+ // Testing EOL and EOF positions
+ {"", 0, 0, 0}, // 0th position of an empty buffer is (0, 0)
+ {"abc", "c", 0, 2},
+ {"abc", 3, 0, 3},
+ {"abc\n", "\n", 0, 3},
+ {"abc\n", 4, 1, 0}, // position after a newline is on the next line
+}
+
+func TestLineChar(t *testing.T) {
+ for _, test := range tests {
+ m := protocol.NewMapper("", []byte(test.content))
+ offset := test.offset()
+ posn, _ := m.OffsetPosition(offset)
+ gotLine, gotChar := int(posn.Line), int(posn.Character)
+ if gotLine != test.wantLine || gotChar != test.wantChar {
+ t.Errorf("LineChar(%d) = (%d,%d), want (%d,%d)", offset, gotLine, gotChar, test.wantLine, test.wantChar)
+ }
+ }
+}
+
+func TestInvalidOffset(t *testing.T) {
+ content := []byte("a𐐀b\r\nx\ny")
+ m := protocol.NewMapper("", content)
+ for _, offset := range []int{-1, 100} {
+ posn, err := m.OffsetPosition(offset)
+ if err == nil {
+ t.Errorf("OffsetPosition(%d) = %s, want error", offset, posn)
+ }
+ }
+}
+
+func TestPosition(t *testing.T) {
+ for _, test := range tests {
+ m := protocol.NewMapper("", []byte(test.content))
+ offset := test.offset()
+ got, err := m.OffsetPosition(offset)
+ if err != nil {
+ t.Errorf("OffsetPosition(%d) failed: %v", offset, err)
+ continue
+ }
+ want := protocol.Position{Line: uint32(test.wantLine), Character: uint32(test.wantChar)}
+ if got != want {
+ t.Errorf("Position(%d) = %v, want %v", offset, got, want)
+ }
+ }
+}
+
+func TestRange(t *testing.T) {
+ for _, test := range tests {
+ m := protocol.NewMapper("", []byte(test.content))
+ offset := test.offset()
+ got, err := m.OffsetRange(0, offset)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := protocol.Range{
+ End: protocol.Position{Line: uint32(test.wantLine), Character: uint32(test.wantChar)},
+ }
+ if got != want {
+ t.Errorf("Range(%d) = %v, want %v", offset, got, want)
+ }
+ }
+}
+
+func TestBytesOffset(t *testing.T) {
+ tests := []struct {
+ text string
+ pos protocol.Position
+ want int
+ }{
+ // U+10400 encodes as [F0 90 90 80] in UTF-8 and [D801 DC00] in UTF-16.
+ {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 0}, want: 0},
+ {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 1}, want: 1},
+ {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 2}, want: 1},
+ {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 3}, want: 5},
+ {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 4}, want: 6},
+ {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 5}, want: -1},
+ {text: "aaa\nbbb\n", pos: protocol.Position{Line: 0, Character: 3}, want: 3},
+ {text: "aaa\nbbb\n", pos: protocol.Position{Line: 0, Character: 4}, want: -1},
+ {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 0}, want: 4},
+ {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 3}, want: 7},
+ {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 4}, want: -1},
+ {text: "aaa\nbbb\n", pos: protocol.Position{Line: 2, Character: 0}, want: 8},
+ {text: "aaa\nbbb\n", pos: protocol.Position{Line: 2, Character: 1}, want: -1},
+ {text: "aaa\nbbb\n\n", pos: protocol.Position{Line: 2, Character: 0}, want: 8},
+ }
+
+ for i, test := range tests {
+ fname := fmt.Sprintf("test %d", i)
+ uri := span.URIFromPath(fname)
+ mapper := protocol.NewMapper(uri, []byte(test.text))
+ got, err := mapper.PositionPoint(test.pos)
+ if err != nil && test.want != -1 {
+ t.Errorf("%d: unexpected error: %v", i, err)
+ }
+ if err == nil && got.Offset() != test.want {
+ t.Errorf("want %d for %q(Line:%d,Character:%d), but got %d", test.want, test.text, int(test.pos.Line), int(test.pos.Character), got.Offset())
+ }
+ }
+}
+
+// -- end --
diff --git a/gopls/internal/lsp/protocol/protocol.go b/gopls/internal/lsp/protocol/protocol.go
new file mode 100644
index 000000000..7ca8f2bc6
--- /dev/null
+++ b/gopls/internal/lsp/protocol/protocol.go
@@ -0,0 +1,284 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protocol
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/jsonrpc2"
+ jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+var (
+ // RequestCancelledError should be used when a request is cancelled early.
+ RequestCancelledError = jsonrpc2.NewError(-32800, "JSON RPC cancelled")
+ RequestCancelledErrorV2 = jsonrpc2_v2.NewError(-32800, "JSON RPC cancelled")
+)
+
+type ClientCloser interface {
+ Client
+ io.Closer
+}
+
+type connSender interface {
+ io.Closer
+
+ Notify(ctx context.Context, method string, params interface{}) error
+ Call(ctx context.Context, method string, params, result interface{}) error
+}
+
+type clientDispatcher struct {
+ sender connSender
+}
+
+func (c *clientDispatcher) Close() error {
+ return c.sender.Close()
+}
+
+// ClientDispatcher returns a Client that dispatches LSP requests across the
+// given jsonrpc2 connection.
+func ClientDispatcher(conn jsonrpc2.Conn) ClientCloser {
+ return &clientDispatcher{sender: clientConn{conn}}
+}
+
+type clientConn struct {
+ conn jsonrpc2.Conn
+}
+
+func (c clientConn) Close() error {
+ return c.conn.Close()
+}
+
+func (c clientConn) Notify(ctx context.Context, method string, params interface{}) error {
+ return c.conn.Notify(ctx, method, params)
+}
+
+func (c clientConn) Call(ctx context.Context, method string, params interface{}, result interface{}) error {
+ id, err := c.conn.Call(ctx, method, params, result)
+ if ctx.Err() != nil {
+ cancelCall(ctx, c, id)
+ }
+ return err
+}
+
+func ClientDispatcherV2(conn *jsonrpc2_v2.Connection) ClientCloser {
+ return &clientDispatcher{clientConnV2{conn}}
+}
+
+type clientConnV2 struct {
+ conn *jsonrpc2_v2.Connection
+}
+
+func (c clientConnV2) Close() error {
+ return c.conn.Close()
+}
+
+func (c clientConnV2) Notify(ctx context.Context, method string, params interface{}) error {
+ return c.conn.Notify(ctx, method, params)
+}
+
+func (c clientConnV2) Call(ctx context.Context, method string, params interface{}, result interface{}) error {
+ call := c.conn.Call(ctx, method, params)
+ err := call.Await(ctx, result)
+ if ctx.Err() != nil {
+ detached := xcontext.Detach(ctx)
+ c.conn.Notify(detached, "$/cancelRequest", &CancelParams{ID: call.ID().Raw()})
+ }
+ return err
+}
+
+// ServerDispatcher returns a Server that dispatches LSP requests across the
+// given jsonrpc2 connection.
+func ServerDispatcher(conn jsonrpc2.Conn) Server {
+ return &serverDispatcher{sender: clientConn{conn}}
+}
+
+func ServerDispatcherV2(conn *jsonrpc2_v2.Connection) Server {
+ return &serverDispatcher{sender: clientConnV2{conn}}
+}
+
+type serverDispatcher struct {
+ sender connSender
+}
+
+func ClientHandler(client Client, handler jsonrpc2.Handler) jsonrpc2.Handler {
+ return func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
+ if ctx.Err() != nil {
+ ctx := xcontext.Detach(ctx)
+ return reply(ctx, nil, RequestCancelledError)
+ }
+ handled, err := clientDispatch(ctx, client, reply, req)
+ if handled || err != nil {
+ return err
+ }
+ return handler(ctx, reply, req)
+ }
+}
+
+func ClientHandlerV2(client Client) jsonrpc2_v2.Handler {
+ return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+ if ctx.Err() != nil {
+ return nil, RequestCancelledErrorV2
+ }
+ req1 := req2to1(req)
+ var (
+ result interface{}
+ resErr error
+ )
+ replier := func(_ context.Context, res interface{}, err error) error {
+ if err != nil {
+ resErr = err
+ return nil
+ }
+ result = res
+ return nil
+ }
+ _, err := clientDispatch(ctx, client, replier, req1)
+ if err != nil {
+ return nil, err
+ }
+ return result, resErr
+ })
+}
+
+func ServerHandler(server Server, handler jsonrpc2.Handler) jsonrpc2.Handler {
+ return func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
+ if ctx.Err() != nil {
+ ctx := xcontext.Detach(ctx)
+ return reply(ctx, nil, RequestCancelledError)
+ }
+ handled, err := serverDispatch(ctx, server, reply, req)
+ if handled || err != nil {
+ return err
+ }
+ //TODO: This code is wrong, it ignores handler and assumes non standard
+ // request handles everything
+ // non standard request should just be a layered handler.
+ var params interface{}
+ if err := json.Unmarshal(req.Params(), &params); err != nil {
+ return sendParseError(ctx, reply, err)
+ }
+ resp, err := server.NonstandardRequest(ctx, req.Method(), params)
+ return reply(ctx, resp, err)
+
+ }
+}
+
+func ServerHandlerV2(server Server) jsonrpc2_v2.Handler {
+ return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+ if ctx.Err() != nil {
+ return nil, RequestCancelledErrorV2
+ }
+ req1 := req2to1(req)
+ var (
+ result interface{}
+ resErr error
+ )
+ replier := func(_ context.Context, res interface{}, err error) error {
+ if err != nil {
+ resErr = err
+ return nil
+ }
+ result = res
+ return nil
+ }
+ _, err := serverDispatch(ctx, server, replier, req1)
+ if err != nil {
+ return nil, err
+ }
+ return result, resErr
+ })
+}
+
+func req2to1(req2 *jsonrpc2_v2.Request) jsonrpc2.Request {
+ if req2.ID.IsValid() {
+ raw := req2.ID.Raw()
+ var idv1 jsonrpc2.ID
+ switch v := raw.(type) {
+ case int64:
+ idv1 = jsonrpc2.NewIntID(v)
+ case string:
+ idv1 = jsonrpc2.NewStringID(v)
+ default:
+ panic(fmt.Sprintf("unsupported ID type %T", raw))
+ }
+ req1, err := jsonrpc2.NewCall(idv1, req2.Method, req2.Params)
+ if err != nil {
+ panic(err)
+ }
+ return req1
+ }
+ req1, err := jsonrpc2.NewNotification(req2.Method, req2.Params)
+ if err != nil {
+ panic(err)
+ }
+ return req1
+}
+
+func Handlers(handler jsonrpc2.Handler) jsonrpc2.Handler {
+ return CancelHandler(
+ jsonrpc2.AsyncHandler(
+ jsonrpc2.MustReplyHandler(handler)))
+}
+
+func CancelHandler(handler jsonrpc2.Handler) jsonrpc2.Handler {
+ handler, canceller := jsonrpc2.CancelHandler(handler)
+ return func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
+ if req.Method() != "$/cancelRequest" {
+ // TODO(iancottrell): See if we can generate a reply for the request to be cancelled
+ // at the point of cancellation rather than waiting for gopls to naturally reply.
+ // To do that, we need to keep track of whether a reply has been sent already and
+ // be careful about racing between the two paths.
+ // TODO(iancottrell): Add a test that watches the stream and verifies the response
+ // for the cancelled request flows.
+ replyWithDetachedContext := func(ctx context.Context, resp interface{}, err error) error {
+ // https://microsoft.github.io/language-server-protocol/specifications/specification-current/#cancelRequest
+ if ctx.Err() != nil && err == nil {
+ err = RequestCancelledError
+ }
+ ctx = xcontext.Detach(ctx)
+ return reply(ctx, resp, err)
+ }
+ return handler(ctx, replyWithDetachedContext, req)
+ }
+ var params CancelParams
+ if err := json.Unmarshal(req.Params(), &params); err != nil {
+ return sendParseError(ctx, reply, err)
+ }
+ if n, ok := params.ID.(float64); ok {
+ canceller(jsonrpc2.NewIntID(int64(n)))
+ } else if s, ok := params.ID.(string); ok {
+ canceller(jsonrpc2.NewStringID(s))
+ } else {
+ return sendParseError(ctx, reply, fmt.Errorf("request ID %v malformed", params.ID))
+ }
+ return reply(ctx, nil, nil)
+ }
+}
+
+func Call(ctx context.Context, conn jsonrpc2.Conn, method string, params interface{}, result interface{}) error {
+ id, err := conn.Call(ctx, method, params, result)
+ if ctx.Err() != nil {
+ cancelCall(ctx, clientConn{conn}, id)
+ }
+ return err
+}
+
+func cancelCall(ctx context.Context, sender connSender, id jsonrpc2.ID) {
+ ctx = xcontext.Detach(ctx)
+ ctx, done := event.Start(ctx, "protocol.canceller")
+ defer done()
+ // Note that only *jsonrpc2.ID implements json.Marshaler.
+ sender.Notify(ctx, "$/cancelRequest", &CancelParams{ID: &id})
+}
+
+func sendParseError(ctx context.Context, reply jsonrpc2.Replier, err error) error {
+ return reply(ctx, nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err))
+}
diff --git a/gopls/internal/lsp/protocol/span.go b/gopls/internal/lsp/protocol/span.go
new file mode 100644
index 000000000..d484f8f74
--- /dev/null
+++ b/gopls/internal/lsp/protocol/span.go
@@ -0,0 +1,118 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protocol
+
+import (
+ "fmt"
+ "unicode/utf8"
+
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+func URIFromSpanURI(uri span.URI) DocumentURI {
+ return DocumentURI(uri) // simple conversion
+}
+
+func URIFromPath(path string) DocumentURI {
+ return URIFromSpanURI(span.URIFromPath(path)) // normalizing conversion
+}
+
+func (u DocumentURI) SpanURI() span.URI {
+ return span.URIFromURI(string(u)) // normalizing conversion
+}
+
+func IsPoint(r Range) bool {
+ return r.Start.Line == r.End.Line && r.Start.Character == r.End.Character
+}
+
+// CompareLocation defines a three-valued comparison over locations,
+// lexicographically ordered by (URI, Range).
+func CompareLocation(x, y Location) int {
+ if x.URI != y.URI {
+ if x.URI < y.URI {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ return CompareRange(x.Range, y.Range)
+}
+
+// CompareRange returns -1 if a is before b, 0 if a == b, and 1 if a is after b.
+//
+// A range a is defined to be 'before' b if a.Start is before b.Start, or
+// a.Start == b.Start and a.End is before b.End.
+func CompareRange(a, b Range) int {
+ if r := ComparePosition(a.Start, b.Start); r != 0 {
+ return r
+ }
+ return ComparePosition(a.End, b.End)
+}
+
+// ComparePosition returns -1 if a is before b, 0 if a == b, and 1 if a is after b.
+func ComparePosition(a, b Position) int {
+ if a.Line != b.Line {
+ if a.Line < b.Line {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ if a.Character != b.Character {
+ if a.Character < b.Character {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ return 0
+}
+
+func Intersect(a, b Range) bool {
+ if a.Start.Line > b.End.Line || a.End.Line < b.Start.Line {
+ return false
+ }
+ return !((a.Start.Line == b.End.Line) && a.Start.Character > b.End.Character ||
+ (a.End.Line == b.Start.Line) && a.End.Character < b.Start.Character)
+}
+
+// Format implements fmt.Formatter.
+//
+// Note: Formatter is implemented instead of Stringer (presumably) for
+// performance reasons, though it is not clear that it matters in practice.
+func (r Range) Format(f fmt.State, _ rune) {
+ fmt.Fprintf(f, "%v-%v", r.Start, r.End)
+}
+
+// Format implements fmt.Formatter.
+//
+// See Range.Format for discussion of why the Formatter interface is
+// implemented rather than Stringer.
+func (p Position) Format(f fmt.State, _ rune) {
+ fmt.Fprintf(f, "%v:%v", p.Line, p.Character)
+}
+
+// -- implementation helpers --
+
+// UTF16Len returns the number of codes in the UTF-16 transcoding of s.
+func UTF16Len(s []byte) int {
+ var n int
+ for len(s) > 0 {
+ n++
+
+ // Fast path for ASCII.
+ if s[0] < 0x80 {
+ s = s[1:]
+ continue
+ }
+
+ r, size := utf8.DecodeRune(s)
+ if r >= 0x10000 {
+ n++ // surrogate pair
+ }
+ s = s[size:]
+ }
+ return n
+}
diff --git a/gopls/internal/lsp/protocol/tsclient.go b/gopls/internal/lsp/protocol/tsclient.go
new file mode 100644
index 000000000..cfafecfdc
--- /dev/null
+++ b/gopls/internal/lsp/protocol/tsclient.go
@@ -0,0 +1,249 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated for LSP. DO NOT EDIT.
+
+package protocol
+
+// Code generated from protocol/metaModel.json at ref release/protocol/3.17.3-next.6 (hash 56c23c557e3568a9f56f42435fd5a80f9458957f).
+// https://github.com/microsoft/vscode-languageserver-node/blob/release/protocol/3.17.3-next.6/protocol/metaModel.json
+// LSP metaData.version = 3.17.0.
+
+import (
+ "context"
+ "encoding/json"
+
+ "golang.org/x/tools/internal/jsonrpc2"
+)
+
+type Client interface {
+ LogTrace(context.Context, *LogTraceParams) error // $/logTrace
+ Progress(context.Context, *ProgressParams) error // $/progress
+ RegisterCapability(context.Context, *RegistrationParams) error // client/registerCapability
+ UnregisterCapability(context.Context, *UnregistrationParams) error // client/unregisterCapability
+ Event(context.Context, *interface{}) error // telemetry/event
+ PublishDiagnostics(context.Context, *PublishDiagnosticsParams) error // textDocument/publishDiagnostics
+ LogMessage(context.Context, *LogMessageParams) error // window/logMessage
+ ShowDocument(context.Context, *ShowDocumentParams) (*ShowDocumentResult, error) // window/showDocument
+ ShowMessage(context.Context, *ShowMessageParams) error // window/showMessage
+ ShowMessageRequest(context.Context, *ShowMessageRequestParams) (*MessageActionItem, error) // window/showMessageRequest
+ WorkDoneProgressCreate(context.Context, *WorkDoneProgressCreateParams) error // window/workDoneProgress/create
+ ApplyEdit(context.Context, *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResult, error) // workspace/applyEdit
+ CodeLensRefresh(context.Context) error // workspace/codeLens/refresh
+ Configuration(context.Context, *ParamConfiguration) ([]LSPAny, error) // workspace/configuration
+ DiagnosticRefresh(context.Context) error // workspace/diagnostic/refresh
+ InlayHintRefresh(context.Context) error // workspace/inlayHint/refresh
+ InlineValueRefresh(context.Context) error // workspace/inlineValue/refresh
+ SemanticTokensRefresh(context.Context) error // workspace/semanticTokens/refresh
+ WorkspaceFolders(context.Context) ([]WorkspaceFolder, error) // workspace/workspaceFolders
+}
+
+func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) {
+ switch r.Method() {
+ case "$/logTrace":
+ var params LogTraceParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.LogTrace(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "$/progress":
+ var params ProgressParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.Progress(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "client/registerCapability":
+ var params RegistrationParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.RegisterCapability(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "client/unregisterCapability":
+ var params UnregistrationParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.UnregisterCapability(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "telemetry/event":
+ var params interface{}
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.Event(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "textDocument/publishDiagnostics":
+ var params PublishDiagnosticsParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.PublishDiagnostics(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "window/logMessage":
+ var params LogMessageParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.LogMessage(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "window/showDocument":
+ var params ShowDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := client.ShowDocument(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "window/showMessage":
+ var params ShowMessageParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.ShowMessage(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "window/showMessageRequest":
+ var params ShowMessageRequestParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := client.ShowMessageRequest(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "window/workDoneProgress/create":
+ var params WorkDoneProgressCreateParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := client.WorkDoneProgressCreate(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "workspace/applyEdit":
+ var params ApplyWorkspaceEditParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := client.ApplyEdit(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "workspace/codeLens/refresh":
+ err := client.CodeLensRefresh(ctx)
+ return true, reply(ctx, nil, err)
+ case "workspace/configuration":
+ var params ParamConfiguration
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := client.Configuration(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "workspace/diagnostic/refresh":
+ err := client.DiagnosticRefresh(ctx)
+ return true, reply(ctx, nil, err)
+ case "workspace/inlayHint/refresh":
+ err := client.InlayHintRefresh(ctx)
+ return true, reply(ctx, nil, err)
+ case "workspace/inlineValue/refresh":
+ err := client.InlineValueRefresh(ctx)
+ return true, reply(ctx, nil, err)
+ case "workspace/semanticTokens/refresh":
+ err := client.SemanticTokensRefresh(ctx)
+ return true, reply(ctx, nil, err)
+ case "workspace/workspaceFolders":
+ resp, err := client.WorkspaceFolders(ctx)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ default:
+ return false, nil
+ }
+}
+
+func (s *clientDispatcher) LogTrace(ctx context.Context, params *LogTraceParams) error {
+ return s.sender.Notify(ctx, "$/logTrace", params)
+}
+func (s *clientDispatcher) Progress(ctx context.Context, params *ProgressParams) error {
+ return s.sender.Notify(ctx, "$/progress", params)
+}
+func (s *clientDispatcher) RegisterCapability(ctx context.Context, params *RegistrationParams) error {
+ return s.sender.Call(ctx, "client/registerCapability", params, nil)
+}
+func (s *clientDispatcher) UnregisterCapability(ctx context.Context, params *UnregistrationParams) error {
+ return s.sender.Call(ctx, "client/unregisterCapability", params, nil)
+}
+func (s *clientDispatcher) Event(ctx context.Context, params *interface{}) error {
+ return s.sender.Notify(ctx, "telemetry/event", params)
+}
+func (s *clientDispatcher) PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) error {
+ return s.sender.Notify(ctx, "textDocument/publishDiagnostics", params)
+}
+func (s *clientDispatcher) LogMessage(ctx context.Context, params *LogMessageParams) error {
+ return s.sender.Notify(ctx, "window/logMessage", params)
+}
+func (s *clientDispatcher) ShowDocument(ctx context.Context, params *ShowDocumentParams) (*ShowDocumentResult, error) {
+ var result *ShowDocumentResult
+ if err := s.sender.Call(ctx, "window/showDocument", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *clientDispatcher) ShowMessage(ctx context.Context, params *ShowMessageParams) error {
+ return s.sender.Notify(ctx, "window/showMessage", params)
+}
+func (s *clientDispatcher) ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (*MessageActionItem, error) {
+ var result *MessageActionItem
+ if err := s.sender.Call(ctx, "window/showMessageRequest", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *clientDispatcher) WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) error {
+ return s.sender.Call(ctx, "window/workDoneProgress/create", params, nil)
+}
+func (s *clientDispatcher) ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResult, error) {
+ var result *ApplyWorkspaceEditResult
+ if err := s.sender.Call(ctx, "workspace/applyEdit", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *clientDispatcher) CodeLensRefresh(ctx context.Context) error {
+ return s.sender.Call(ctx, "workspace/codeLens/refresh", nil, nil)
+}
+func (s *clientDispatcher) Configuration(ctx context.Context, params *ParamConfiguration) ([]LSPAny, error) {
+ var result []LSPAny
+ if err := s.sender.Call(ctx, "workspace/configuration", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *clientDispatcher) DiagnosticRefresh(ctx context.Context) error {
+ return s.sender.Call(ctx, "workspace/diagnostic/refresh", nil, nil)
+}
+func (s *clientDispatcher) InlayHintRefresh(ctx context.Context) error {
+ return s.sender.Call(ctx, "workspace/inlayHint/refresh", nil, nil)
+}
+func (s *clientDispatcher) InlineValueRefresh(ctx context.Context) error {
+ return s.sender.Call(ctx, "workspace/inlineValue/refresh", nil, nil)
+}
+func (s *clientDispatcher) SemanticTokensRefresh(ctx context.Context) error {
+ return s.sender.Call(ctx, "workspace/semanticTokens/refresh", nil, nil)
+}
+func (s *clientDispatcher) WorkspaceFolders(ctx context.Context) ([]WorkspaceFolder, error) {
+ var result []WorkspaceFolder
+ if err := s.sender.Call(ctx, "workspace/workspaceFolders", nil, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
diff --git a/gopls/internal/lsp/protocol/tsdocument_changes.go b/gopls/internal/lsp/protocol/tsdocument_changes.go
new file mode 100644
index 000000000..2c7a524e1
--- /dev/null
+++ b/gopls/internal/lsp/protocol/tsdocument_changes.go
@@ -0,0 +1,42 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protocol
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// DocumentChanges is a union of a file edit and directory rename operations
+// for package renaming feature. At most one field of this struct is non-nil.
+type DocumentChanges struct {
+ TextDocumentEdit *TextDocumentEdit
+ RenameFile *RenameFile
+}
+
+func (d *DocumentChanges) UnmarshalJSON(data []byte) error {
+ var m map[string]interface{}
+
+ if err := json.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if _, ok := m["textDocument"]; ok {
+ d.TextDocumentEdit = new(TextDocumentEdit)
+ return json.Unmarshal(data, d.TextDocumentEdit)
+ }
+
+ d.RenameFile = new(RenameFile)
+ return json.Unmarshal(data, d.RenameFile)
+}
+
+func (d *DocumentChanges) MarshalJSON() ([]byte, error) {
+ if d.TextDocumentEdit != nil {
+ return json.Marshal(d.TextDocumentEdit)
+ } else if d.RenameFile != nil {
+ return json.Marshal(d.RenameFile)
+ }
+ return nil, fmt.Errorf("Empty DocumentChanges union value")
+}
diff --git a/gopls/internal/lsp/protocol/tsjson.go b/gopls/internal/lsp/protocol/tsjson.go
new file mode 100644
index 000000000..320fa0838
--- /dev/null
+++ b/gopls/internal/lsp/protocol/tsjson.go
@@ -0,0 +1,1997 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated for LSP. DO NOT EDIT.
+
+package protocol
+
+// Code generated from protocol/metaModel.json at ref release/protocol/3.17.3-next.6 (hash 56c23c557e3568a9f56f42435fd5a80f9458957f).
+// https://github.com/microsoft/vscode-languageserver-node/blob/release/protocol/3.17.3-next.6/protocol/metaModel.json
+// LSP metaData.version = 3.17.0.
+
+import "encoding/json"
+
+import "fmt"
+
+// UnmarshalError indicates that a JSON value did not conform to
+// one of the expected cases of an LSP union type.
+type UnmarshalError struct {
+ msg string
+}
+
+func (e UnmarshalError) Error() string {
+ return e.msg
+}
+
+// from line 4769
+func (t OrFEditRangePItemDefaults) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case FEditRangePItemDefaults:
+ return json.Marshal(x)
+ case Range:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [FEditRangePItemDefaults Range]", t)
+}
+
+func (t *OrFEditRangePItemDefaults) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 FEditRangePItemDefaults
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 Range
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [FEditRangePItemDefaults Range]"}
+}
+
+// from line 9811
+func (t OrFNotebookPNotebookSelector) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case NotebookDocumentFilter:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [NotebookDocumentFilter string]", t)
+}
+
+func (t *OrFNotebookPNotebookSelector) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 NotebookDocumentFilter
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [NotebookDocumentFilter string]"}
+}
+
+// from line 5520
+func (t OrPLocation_workspace_symbol) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case Location:
+ return json.Marshal(x)
+ case PLocationMsg_workspace_symbol:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [Location PLocationMsg_workspace_symbol]", t)
+}
+
+func (t *OrPLocation_workspace_symbol) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 Location
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 PLocationMsg_workspace_symbol
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [Location PLocationMsg_workspace_symbol]"}
+}
+
+// from line 4163
+func (t OrPSection_workspace_didChangeConfiguration) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case []string:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [[]string string]", t)
+}
+
+func (t *OrPSection_workspace_didChangeConfiguration) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 []string
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [[]string string]"}
+}
+
+// from line 7075
+func (t OrPTooltipPLabel) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case MarkupContent:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [MarkupContent string]", t)
+}
+
+func (t *OrPTooltipPLabel) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 MarkupContent
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [MarkupContent string]"}
+}
+
+// from line 3699
+func (t OrPTooltip_textDocument_inlayHint) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case MarkupContent:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [MarkupContent string]", t)
+}
+
+func (t *OrPTooltip_textDocument_inlayHint) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 MarkupContent
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [MarkupContent string]"}
+}
+
+// from line 6184
+func (t Or_CancelParams_id) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case int32:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [int32 string]", t)
+}
+
+func (t *Or_CancelParams_id) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 int32
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [int32 string]"}
+}
+
+// from line 4582
+func (t Or_CompletionItem_documentation) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case MarkupContent:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [MarkupContent string]", t)
+}
+
+func (t *Or_CompletionItem_documentation) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 MarkupContent
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [MarkupContent string]"}
+}
+
+// from line 4665
+func (t Or_CompletionItem_textEdit) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case InsertReplaceEdit:
+ return json.Marshal(x)
+ case TextEdit:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [InsertReplaceEdit TextEdit]", t)
+}
+
+func (t *Or_CompletionItem_textEdit) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 InsertReplaceEdit
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 TextEdit
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [InsertReplaceEdit TextEdit]"}
+}
+
+// from line 13753
+func (t Or_Definition) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case Location:
+ return json.Marshal(x)
+ case []Location:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [Location []Location]", t)
+}
+
+func (t *Or_Definition) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 Location
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 []Location
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [Location []Location]"}
+}
+
+// from line 8547
+func (t Or_Diagnostic_code) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case int32:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [int32 string]", t)
+}
+
+func (t *Or_Diagnostic_code) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 int32
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [int32 string]"}
+}
+
+// from line 13885
+func (t Or_DocumentDiagnosticReport) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case RelatedFullDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case RelatedUnchangedDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [RelatedFullDocumentDiagnosticReport RelatedUnchangedDocumentDiagnosticReport]", t)
+}
+
+func (t *Or_DocumentDiagnosticReport) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 RelatedFullDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 RelatedUnchangedDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [RelatedFullDocumentDiagnosticReport RelatedUnchangedDocumentDiagnosticReport]"}
+}
+
+// from line 3822
+func (t Or_DocumentDiagnosticReportPartialResult_relatedDocuments_Value) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case FullDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case UnchangedDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]", t)
+}
+
+func (t *Or_DocumentDiagnosticReportPartialResult_relatedDocuments_Value) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 FullDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 UnchangedDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]"}
+}
+
+// from line 14095
+func (t Or_DocumentFilter) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case NotebookCellTextDocumentFilter:
+ return json.Marshal(x)
+ case TextDocumentFilter:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [NotebookCellTextDocumentFilter TextDocumentFilter]", t)
+}
+
+func (t *Or_DocumentFilter) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 NotebookCellTextDocumentFilter
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 TextDocumentFilter
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [NotebookCellTextDocumentFilter TextDocumentFilter]"}
+}
+
+// from line 4891
+func (t Or_Hover_contents) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case MarkedString:
+ return json.Marshal(x)
+ case MarkupContent:
+ return json.Marshal(x)
+ case []MarkedString:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [MarkedString MarkupContent []MarkedString]", t)
+}
+
+func (t *Or_Hover_contents) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 MarkedString
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 MarkupContent
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 []MarkedString
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [MarkedString MarkupContent []MarkedString]"}
+}
+
+// from line 3658
+func (t Or_InlayHint_label) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case []InlayHintLabelPart:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [[]InlayHintLabelPart string]", t)
+}
+
+func (t *Or_InlayHint_label) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 []InlayHintLabelPart
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [[]InlayHintLabelPart string]"}
+}
+
+// from line 13863
+func (t Or_InlineValue) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case InlineValueEvaluatableExpression:
+ return json.Marshal(x)
+ case InlineValueText:
+ return json.Marshal(x)
+ case InlineValueVariableLookup:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [InlineValueEvaluatableExpression InlineValueText InlineValueVariableLookup]", t)
+}
+
+func (t *Or_InlineValue) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 InlineValueEvaluatableExpression
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 InlineValueText
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 InlineValueVariableLookup
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [InlineValueEvaluatableExpression InlineValueText InlineValueVariableLookup]"}
+}
+
+// from line 14060
+func (t Or_MarkedString) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case Msg_MarkedString:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [Msg_MarkedString string]", t)
+}
+
+func (t *Or_MarkedString) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 Msg_MarkedString
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [Msg_MarkedString string]"}
+}
+
+// from line 10118
+func (t Or_NotebookCellTextDocumentFilter_notebook) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case NotebookDocumentFilter:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [NotebookDocumentFilter string]", t)
+}
+
+func (t *Or_NotebookCellTextDocumentFilter_notebook) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 NotebookDocumentFilter
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [NotebookDocumentFilter string]"}
+}
+
+// from line 9857
+func (t Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_notebook) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case NotebookDocumentFilter:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [NotebookDocumentFilter string]", t)
+}
+
+func (t *Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_notebook) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 NotebookDocumentFilter
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [NotebookDocumentFilter string]"}
+}
+
+// from line 7168
+func (t Or_RelatedFullDocumentDiagnosticReport_relatedDocuments_Value) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case FullDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case UnchangedDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]", t)
+}
+
+func (t *Or_RelatedFullDocumentDiagnosticReport_relatedDocuments_Value) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 FullDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 UnchangedDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]"}
+}
+
+// from line 7207
+func (t Or_RelatedUnchangedDocumentDiagnosticReport_relatedDocuments_Value) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case FullDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case UnchangedDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]", t)
+}
+
+func (t *Or_RelatedUnchangedDocumentDiagnosticReport_relatedDocuments_Value) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 FullDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 UnchangedDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]"}
+}
+
+// from line 10741
+func (t Or_RelativePattern_baseUri) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case URI:
+ return json.Marshal(x)
+ case WorkspaceFolder:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [URI WorkspaceFolder]", t)
+}
+
+func (t *Or_RelativePattern_baseUri) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 URI
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 WorkspaceFolder
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [URI WorkspaceFolder]"}
+}
+
+// from line 1371
+func (t Or_Result_textDocument_codeAction_Item0_Elem) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case CodeAction:
+ return json.Marshal(x)
+ case Command:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [CodeAction Command]", t)
+}
+
+func (t *Or_Result_textDocument_codeAction_Item0_Elem) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 CodeAction
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 Command
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [CodeAction Command]"}
+}
+
+// from line 12197
+func (t Or_SemanticTokensClientCapabilities_requests_full) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case FFullPRequests:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [FFullPRequests bool]", t)
+}
+
+func (t *Or_SemanticTokensClientCapabilities_requests_full) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 FFullPRequests
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [FFullPRequests bool]"}
+}
+
+// from line 12177
+func (t Or_SemanticTokensClientCapabilities_requests_range) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case FRangePRequests:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [FRangePRequests bool]", t)
+}
+
+func (t *Or_SemanticTokensClientCapabilities_requests_range) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 FRangePRequests
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [FRangePRequests bool]"}
+}
+
+// from line 6579
+func (t Or_SemanticTokensOptions_full) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case PFullESemanticTokensOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [PFullESemanticTokensOptions bool]", t)
+}
+
+func (t *Or_SemanticTokensOptions_full) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 PFullESemanticTokensOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [PFullESemanticTokensOptions bool]"}
+}
+
+// from line 6559
+func (t Or_SemanticTokensOptions_range) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case PRangeESemanticTokensOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [PRangeESemanticTokensOptions bool]", t)
+}
+
+func (t *Or_SemanticTokensOptions_range) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 PRangeESemanticTokensOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [PRangeESemanticTokensOptions bool]"}
+}
+
+// from line 8227
+func (t Or_ServerCapabilities_callHierarchyProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case CallHierarchyOptions:
+ return json.Marshal(x)
+ case CallHierarchyRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [CallHierarchyOptions CallHierarchyRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_callHierarchyProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 CallHierarchyOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 CallHierarchyRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [CallHierarchyOptions CallHierarchyRegistrationOptions bool]"}
+}
+
+// from line 8035
+func (t Or_ServerCapabilities_codeActionProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case CodeActionOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [CodeActionOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_codeActionProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 CodeActionOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [CodeActionOptions bool]"}
+}
+
+// from line 8071
+func (t Or_ServerCapabilities_colorProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case DocumentColorOptions:
+ return json.Marshal(x)
+ case DocumentColorRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [DocumentColorOptions DocumentColorRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_colorProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 DocumentColorOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 DocumentColorRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [DocumentColorOptions DocumentColorRegistrationOptions bool]"}
+}
+
+// from line 7897
+func (t Or_ServerCapabilities_declarationProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case DeclarationOptions:
+ return json.Marshal(x)
+ case DeclarationRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [DeclarationOptions DeclarationRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_declarationProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 DeclarationOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 DeclarationRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [DeclarationOptions DeclarationRegistrationOptions bool]"}
+}
+
+// from line 7919
+func (t Or_ServerCapabilities_definitionProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case DefinitionOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [DefinitionOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_definitionProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 DefinitionOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [DefinitionOptions bool]"}
+}
+
+// from line 8384
+func (t Or_ServerCapabilities_diagnosticProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case DiagnosticOptions:
+ return json.Marshal(x)
+ case DiagnosticRegistrationOptions:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [DiagnosticOptions DiagnosticRegistrationOptions]", t)
+}
+
+func (t *Or_ServerCapabilities_diagnosticProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 DiagnosticOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 DiagnosticRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [DiagnosticOptions DiagnosticRegistrationOptions]"}
+}
+
+// from line 8111
+func (t Or_ServerCapabilities_documentFormattingProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case DocumentFormattingOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [DocumentFormattingOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_documentFormattingProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 DocumentFormattingOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [DocumentFormattingOptions bool]"}
+}
+
+// from line 7999
+func (t Or_ServerCapabilities_documentHighlightProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case DocumentHighlightOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [DocumentHighlightOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_documentHighlightProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 DocumentHighlightOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [DocumentHighlightOptions bool]"}
+}
+
+// from line 8129
+func (t Or_ServerCapabilities_documentRangeFormattingProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case DocumentRangeFormattingOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [DocumentRangeFormattingOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_documentRangeFormattingProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 DocumentRangeFormattingOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [DocumentRangeFormattingOptions bool]"}
+}
+
+// from line 8017
+func (t Or_ServerCapabilities_documentSymbolProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case DocumentSymbolOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [DocumentSymbolOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_documentSymbolProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 DocumentSymbolOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [DocumentSymbolOptions bool]"}
+}
+
+// from line 8174
+func (t Or_ServerCapabilities_foldingRangeProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case FoldingRangeOptions:
+ return json.Marshal(x)
+ case FoldingRangeRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [FoldingRangeOptions FoldingRangeRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_foldingRangeProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 FoldingRangeOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 FoldingRangeRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [FoldingRangeOptions FoldingRangeRegistrationOptions bool]"}
+}
+
+// from line 7870
+func (t Or_ServerCapabilities_hoverProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case HoverOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [HoverOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_hoverProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 HoverOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [HoverOptions bool]"}
+}
+
+// from line 7959
+func (t Or_ServerCapabilities_implementationProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case ImplementationOptions:
+ return json.Marshal(x)
+ case ImplementationRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [ImplementationOptions ImplementationRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_implementationProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 ImplementationOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 ImplementationRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [ImplementationOptions ImplementationRegistrationOptions bool]"}
+}
+
+// from line 8361
+func (t Or_ServerCapabilities_inlayHintProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case InlayHintOptions:
+ return json.Marshal(x)
+ case InlayHintRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [InlayHintOptions InlayHintRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_inlayHintProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 InlayHintOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 InlayHintRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [InlayHintOptions InlayHintRegistrationOptions bool]"}
+}
+
+// from line 8338
+func (t Or_ServerCapabilities_inlineValueProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case InlineValueOptions:
+ return json.Marshal(x)
+ case InlineValueRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [InlineValueOptions InlineValueRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_inlineValueProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 InlineValueOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 InlineValueRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [InlineValueOptions InlineValueRegistrationOptions bool]"}
+}
+
+// from line 8250
+func (t Or_ServerCapabilities_linkedEditingRangeProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case LinkedEditingRangeOptions:
+ return json.Marshal(x)
+ case LinkedEditingRangeRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [LinkedEditingRangeOptions LinkedEditingRangeRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_linkedEditingRangeProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 LinkedEditingRangeOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 LinkedEditingRangeRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [LinkedEditingRangeOptions LinkedEditingRangeRegistrationOptions bool]"}
+}
+
+// from line 8292
+func (t Or_ServerCapabilities_monikerProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case MonikerOptions:
+ return json.Marshal(x)
+ case MonikerRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [MonikerOptions MonikerRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_monikerProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 MonikerOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 MonikerRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [MonikerOptions MonikerRegistrationOptions bool]"}
+}
+
+// from line 7842
+func (t Or_ServerCapabilities_notebookDocumentSync) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case NotebookDocumentSyncOptions:
+ return json.Marshal(x)
+ case NotebookDocumentSyncRegistrationOptions:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [NotebookDocumentSyncOptions NotebookDocumentSyncRegistrationOptions]", t)
+}
+
+func (t *Or_ServerCapabilities_notebookDocumentSync) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 NotebookDocumentSyncOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 NotebookDocumentSyncRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [NotebookDocumentSyncOptions NotebookDocumentSyncRegistrationOptions]"}
+}
+
+// from line 7981
+func (t Or_ServerCapabilities_referencesProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case ReferenceOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [ReferenceOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_referencesProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 ReferenceOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [ReferenceOptions bool]"}
+}
+
+// from line 8156
+func (t Or_ServerCapabilities_renameProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case RenameOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [RenameOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_renameProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 RenameOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [RenameOptions bool]"}
+}
+
+// from line 8196
+func (t Or_ServerCapabilities_selectionRangeProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case SelectionRangeOptions:
+ return json.Marshal(x)
+ case SelectionRangeRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [SelectionRangeOptions SelectionRangeRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_selectionRangeProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 SelectionRangeOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 SelectionRangeRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [SelectionRangeOptions SelectionRangeRegistrationOptions bool]"}
+}
+
+// from line 8273
+func (t Or_ServerCapabilities_semanticTokensProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case SemanticTokensOptions:
+ return json.Marshal(x)
+ case SemanticTokensRegistrationOptions:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [SemanticTokensOptions SemanticTokensRegistrationOptions]", t)
+}
+
+func (t *Or_ServerCapabilities_semanticTokensProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 SemanticTokensOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 SemanticTokensRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [SemanticTokensOptions SemanticTokensRegistrationOptions]"}
+}
+
+// from line 7824
+func (t Or_ServerCapabilities_textDocumentSync) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case TextDocumentSyncKind:
+ return json.Marshal(x)
+ case TextDocumentSyncOptions:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [TextDocumentSyncKind TextDocumentSyncOptions]", t)
+}
+
+func (t *Or_ServerCapabilities_textDocumentSync) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 TextDocumentSyncKind
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 TextDocumentSyncOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [TextDocumentSyncKind TextDocumentSyncOptions]"}
+}
+
+// from line 7937
+func (t Or_ServerCapabilities_typeDefinitionProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case TypeDefinitionOptions:
+ return json.Marshal(x)
+ case TypeDefinitionRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [TypeDefinitionOptions TypeDefinitionRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_typeDefinitionProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 TypeDefinitionOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 TypeDefinitionRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [TypeDefinitionOptions TypeDefinitionRegistrationOptions bool]"}
+}
+
+// from line 8315
+func (t Or_ServerCapabilities_typeHierarchyProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case TypeHierarchyOptions:
+ return json.Marshal(x)
+ case TypeHierarchyRegistrationOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [TypeHierarchyOptions TypeHierarchyRegistrationOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_typeHierarchyProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 TypeHierarchyOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 TypeHierarchyRegistrationOptions
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 bool
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [TypeHierarchyOptions TypeHierarchyRegistrationOptions bool]"}
+}
+
+// from line 8093
+func (t Or_ServerCapabilities_workspaceSymbolProvider) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case WorkspaceSymbolOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [WorkspaceSymbolOptions bool]", t)
+}
+
+func (t *Or_ServerCapabilities_workspaceSymbolProvider) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 WorkspaceSymbolOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [WorkspaceSymbolOptions bool]"}
+}
+
+// from line 8841
+func (t Or_SignatureInformation_documentation) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case MarkupContent:
+ return json.Marshal(x)
+ case string:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [MarkupContent string]", t)
+}
+
+func (t *Or_SignatureInformation_documentation) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 MarkupContent
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 string
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [MarkupContent string]"}
+}
+
+// from line 6692
+func (t Or_TextDocumentEdit_edits_Elem) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case AnnotatedTextEdit:
+ return json.Marshal(x)
+ case TextEdit:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [AnnotatedTextEdit TextEdit]", t)
+}
+
+func (t *Or_TextDocumentEdit_edits_Elem) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 AnnotatedTextEdit
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 TextEdit
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [AnnotatedTextEdit TextEdit]"}
+}
+
+// from line 9777
+func (t Or_TextDocumentSyncOptions_save) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case SaveOptions:
+ return json.Marshal(x)
+ case bool:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [SaveOptions bool]", t)
+}
+
+func (t *Or_TextDocumentSyncOptions_save) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 SaveOptions
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 bool
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [SaveOptions bool]"}
+}
+
+// from line 13986
+func (t Or_WorkspaceDocumentDiagnosticReport) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case WorkspaceFullDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case WorkspaceUnchangedDocumentDiagnosticReport:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [WorkspaceFullDocumentDiagnosticReport WorkspaceUnchangedDocumentDiagnosticReport]", t)
+}
+
+func (t *Or_WorkspaceDocumentDiagnosticReport) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 WorkspaceFullDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 WorkspaceUnchangedDocumentDiagnosticReport
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [WorkspaceFullDocumentDiagnosticReport WorkspaceUnchangedDocumentDiagnosticReport]"}
+}
+
+// from line 3219
+func (t Or_WorkspaceEdit_documentChanges_Elem) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case CreateFile:
+ return json.Marshal(x)
+ case DeleteFile:
+ return json.Marshal(x)
+ case RenameFile:
+ return json.Marshal(x)
+ case TextDocumentEdit:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [CreateFile DeleteFile RenameFile TextDocumentEdit]", t)
+}
+
+func (t *Or_WorkspaceEdit_documentChanges_Elem) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 CreateFile
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 DeleteFile
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ var h2 RenameFile
+ if err := json.Unmarshal(x, &h2); err == nil {
+ t.Value = h2
+ return nil
+ }
+ var h3 TextDocumentEdit
+ if err := json.Unmarshal(x, &h3); err == nil {
+ t.Value = h3
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [CreateFile DeleteFile RenameFile TextDocumentEdit]"}
+}
+
+// from line 248
+func (t Or_textDocument_declaration) MarshalJSON() ([]byte, error) {
+ switch x := t.Value.(type) {
+ case Declaration:
+ return json.Marshal(x)
+ case []DeclarationLink:
+ return json.Marshal(x)
+ case nil:
+ return []byte("null"), nil
+ }
+ return nil, fmt.Errorf("type %T not one of [Declaration []DeclarationLink]", t)
+}
+
+func (t *Or_textDocument_declaration) UnmarshalJSON(x []byte) error {
+ if string(x) == "null" {
+ t.Value = nil
+ return nil
+ }
+ var h0 Declaration
+ if err := json.Unmarshal(x, &h0); err == nil {
+ t.Value = h0
+ return nil
+ }
+ var h1 []DeclarationLink
+ if err := json.Unmarshal(x, &h1); err == nil {
+ t.Value = h1
+ return nil
+ }
+ return &UnmarshalError{"unmarshal failed to match one of [Declaration []DeclarationLink]"}
+}
diff --git a/gopls/internal/lsp/protocol/tsprotocol.go b/gopls/internal/lsp/protocol/tsprotocol.go
new file mode 100644
index 000000000..e639a57ab
--- /dev/null
+++ b/gopls/internal/lsp/protocol/tsprotocol.go
@@ -0,0 +1,5450 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated for LSP. DO NOT EDIT.
+
+package protocol
+
+// Code generated from protocol/metaModel.json at ref release/protocol/3.17.3-next.6 (hash 56c23c557e3568a9f56f42435fd5a80f9458957f).
+// https://github.com/microsoft/vscode-languageserver-node/blob/release/protocol/3.17.3-next.6/protocol/metaModel.json
+// LSP metaData.version = 3.17.0.
+
+import "encoding/json"
+
+// A special text edit with an additional change annotation.
+//
+// @since 3.16.0.
+type AnnotatedTextEdit struct { // line 9372
+ // The actual identifier of the change annotation
+ AnnotationID ChangeAnnotationIdentifier `json:"annotationId"`
+ TextEdit
+}
+
+// The parameters passed via a apply workspace edit request.
+type ApplyWorkspaceEditParams struct { // line 5984
+ // An optional label of the workspace edit. This label is
+ // presented in the user interface for example on an undo
+ // stack to undo the workspace edit.
+ Label string `json:"label,omitempty"`
+ // The edits to apply.
+ Edit WorkspaceEdit `json:"edit"`
+}
+
+// The result returned from the apply workspace edit request.
+//
+// @since 3.17 renamed from ApplyWorkspaceEditResponse
+type ApplyWorkspaceEditResult struct { // line 6007
+ // Indicates whether the edit was applied or not.
+ Applied bool `json:"applied"`
+ // An optional textual description for why the edit was not applied.
+ // This may be used by the server for diagnostic logging or to provide
+ // a suitable error for a request that triggered the edit.
+ FailureReason string `json:"failureReason,omitempty"`
+ // Depending on the client's failure handling strategy `failedChange` might
+ // contain the index of the change that failed. This property is only available
+ // if the client signals a `failureHandlingStrategy` in its client capabilities.
+ FailedChange uint32 `json:"failedChange,omitempty"`
+}
+
+// A base for all symbol information.
+type BaseSymbolInformation struct { // line 8966
+ // The name of this symbol.
+ Name string `json:"name"`
+ // The kind of this symbol.
+ Kind SymbolKind `json:"kind"`
+ // Tags for this symbol.
+ //
+ // @since 3.16.0
+ Tags []SymbolTag `json:"tags,omitempty"`
+ // The name of the symbol containing this symbol. This information is for
+ // user interface purposes (e.g. to render a qualifier in the user interface
+ // if necessary). It can't be used to re-infer a hierarchy for the document
+ // symbols.
+ ContainerName string `json:"containerName,omitempty"`
+}
+
+// @since 3.16.0
+type CallHierarchyClientCapabilities struct { // line 12141
+ // Whether implementation supports dynamic registration. If this is set to `true`
+ // the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
+ // return value for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// Represents an incoming call, e.g. a caller of a method or constructor.
+//
+// @since 3.16.0
+type CallHierarchyIncomingCall struct { // line 2779
+ // The item that makes the call.
+ From CallHierarchyItem `json:"from"`
+ // The ranges at which the calls appear. This is relative to the caller
+ // denoted by {@link CallHierarchyIncomingCall.from `this.from`}.
+ FromRanges []Range `json:"fromRanges"`
+}
+
+// The parameter of a `callHierarchy/incomingCalls` request.
+//
+// @since 3.16.0
+type CallHierarchyIncomingCallsParams struct { // line 2755
+ Item CallHierarchyItem `json:"item"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Represents programming constructs like functions or constructors in the context
+// of call hierarchy.
+//
+// @since 3.16.0
+type CallHierarchyItem struct { // line 2656
+ // The name of this item.
+ Name string `json:"name"`
+ // The kind of this item.
+ Kind SymbolKind `json:"kind"`
+ // Tags for this item.
+ Tags []SymbolTag `json:"tags,omitempty"`
+ // More detail for this item, e.g. the signature of a function.
+ Detail string `json:"detail,omitempty"`
+ // The resource identifier of this item.
+ URI DocumentURI `json:"uri"`
+ // The range enclosing this symbol not including leading/trailing whitespace but everything else, e.g. comments and code.
+ Range Range `json:"range"`
+ // The range that should be selected and revealed when this symbol is being picked, e.g. the name of a function.
+ // Must be contained by the {@link CallHierarchyItem.range `range`}.
+ SelectionRange Range `json:"selectionRange"`
+ // A data entry field that is preserved between a call hierarchy prepare and
+ // incoming calls or outgoing calls requests.
+ Data interface{} `json:"data,omitempty"`
+}
+
+// Call hierarchy options used during static registration.
+//
+// @since 3.16.0
+type CallHierarchyOptions struct { // line 6534
+ WorkDoneProgressOptions
+}
+
+// Represents an outgoing call, e.g. calling a getter from a method or a method from a constructor etc.
+//
+// @since 3.16.0
+type CallHierarchyOutgoingCall struct { // line 2829
+ // The item that is called.
+ To CallHierarchyItem `json:"to"`
+ // The range at which this item is called. This is the range relative to the caller, e.g the item
+ // passed to {@link CallHierarchyItemProvider.provideCallHierarchyOutgoingCalls `provideCallHierarchyOutgoingCalls`}
+ // and not {@link CallHierarchyOutgoingCall.to `this.to`}.
+ FromRanges []Range `json:"fromRanges"`
+}
+
+// The parameter of a `callHierarchy/outgoingCalls` request.
+//
+// @since 3.16.0
+type CallHierarchyOutgoingCallsParams struct { // line 2805
+ Item CallHierarchyItem `json:"item"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// The parameter of a `textDocument/prepareCallHierarchy` request.
+//
+// @since 3.16.0
+type CallHierarchyPrepareParams struct { // line 2638
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+}
+
+// Call hierarchy options used during static or dynamic registration.
+//
+// @since 3.16.0
+type CallHierarchyRegistrationOptions struct { // line 2733
+ TextDocumentRegistrationOptions
+ CallHierarchyOptions
+ StaticRegistrationOptions
+}
+type CancelParams struct { // line 6179
+ // The request id to cancel.
+ ID interface{} `json:"id"`
+}
+
+// Additional information that describes document changes.
+//
+// @since 3.16.0
+type ChangeAnnotation struct { // line 6831
+ // A human-readable string describing the actual change. The string
+ // is rendered prominent in the user interface.
+ Label string `json:"label"`
+ // A flag which indicates that user confirmation is needed
+ // before applying the change.
+ NeedsConfirmation bool `json:"needsConfirmation,omitempty"`
+ // A human-readable string which is rendered less prominent in
+ // the user interface.
+ Description string `json:"description,omitempty"`
+}
+
+// An identifier to refer to a change annotation stored with a workspace edit.
+type ChangeAnnotationIdentifier = string // (alias) line 13976
+// Defines the capabilities provided by the client.
+type ClientCapabilities struct { // line 9674
+ // Workspace specific client capabilities.
+ Workspace WorkspaceClientCapabilities `json:"workspace,omitempty"`
+ // Text document specific client capabilities.
+ TextDocument TextDocumentClientCapabilities `json:"textDocument,omitempty"`
+ // Capabilities specific to the notebook document support.
+ //
+ // @since 3.17.0
+ NotebookDocument *NotebookDocumentClientCapabilities `json:"notebookDocument,omitempty"`
+ // Window specific client capabilities.
+ Window WindowClientCapabilities `json:"window,omitempty"`
+ // General client capabilities.
+ //
+ // @since 3.16.0
+ General *GeneralClientCapabilities `json:"general,omitempty"`
+ // Experimental client capabilities.
+ Experimental interface{} `json:"experimental,omitempty"`
+}
+
+// A code action represents a change that can be performed in code, e.g. to fix a problem or
+// to refactor code.
+//
+// A CodeAction must set either `edit` and/or a `command`. If both are supplied, the `edit` is applied first, then the `command` is executed.
+type CodeAction struct { // line 5382
+ // A short, human-readable, title for this code action.
+ Title string `json:"title"`
+ // The kind of the code action.
+ //
+ // Used to filter code actions.
+ Kind CodeActionKind `json:"kind,omitempty"`
+ // The diagnostics that this code action resolves.
+ Diagnostics []Diagnostic `json:"diagnostics,omitempty"`
+ // Marks this as a preferred action. Preferred actions are used by the `auto fix` command and can be targeted
+ // by keybindings.
+ //
+ // A quick fix should be marked preferred if it properly addresses the underlying error.
+ // A refactoring should be marked preferred if it is the most reasonable choice of actions to take.
+ //
+ // @since 3.15.0
+ IsPreferred bool `json:"isPreferred,omitempty"`
+ // Marks that the code action cannot currently be applied.
+ //
+ // Clients should follow the following guidelines regarding disabled code actions:
+ //
+ // - Disabled code actions are not shown in automatic [lightbulbs](https://code.visualstudio.com/docs/editor/editingevolved#_code-action)
+ // code action menus.
+ //
+ // - Disabled actions are shown as faded out in the code action menu when the user requests a more specific type
+ // of code action, such as refactorings.
+ //
+ // - If the user has a [keybinding](https://code.visualstudio.com/docs/editor/refactoring#_keybindings-for-code-actions)
+ // that auto applies a code action and only disabled code actions are returned, the client should show the user an
+ // error message with `reason` in the editor.
+ //
+ // @since 3.16.0
+ Disabled *PDisabledMsg_textDocument_codeAction `json:"disabled,omitempty"`
+ // The workspace edit this code action performs.
+ Edit *WorkspaceEdit `json:"edit,omitempty"`
+ // A command this code action executes. If a code action
+ // provides an edit and a command, first the edit is
+ // executed and then the command.
+ Command *Command `json:"command,omitempty"`
+ // A data entry field that is preserved on a code action between
+ // a `textDocument/codeAction` and a `codeAction/resolve` request.
+ //
+ // @since 3.16.0
+ Data interface{} `json:"data,omitempty"`
+}
+
+// The Client Capabilities of a {@link CodeActionRequest}.
+type CodeActionClientCapabilities struct { // line 11721
+ // Whether code action supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client support code action literals of type `CodeAction` as a valid
+ // response of the `textDocument/codeAction` request. If the property is not
+ // set the request can only return `Command` literals.
+ //
+ // @since 3.8.0
+ CodeActionLiteralSupport PCodeActionLiteralSupportPCodeAction `json:"codeActionLiteralSupport,omitempty"`
+ // Whether code action supports the `isPreferred` property.
+ //
+ // @since 3.15.0
+ IsPreferredSupport bool `json:"isPreferredSupport,omitempty"`
+ // Whether code action supports the `disabled` property.
+ //
+ // @since 3.16.0
+ DisabledSupport bool `json:"disabledSupport,omitempty"`
+ // Whether code action supports the `data` property which is
+ // preserved between a `textDocument/codeAction` and a
+ // `codeAction/resolve` request.
+ //
+ // @since 3.16.0
+ DataSupport bool `json:"dataSupport,omitempty"`
+ // Whether the client supports resolving additional code action
+ // properties via a separate `codeAction/resolve` request.
+ //
+ // @since 3.16.0
+ ResolveSupport *PResolveSupportPCodeAction `json:"resolveSupport,omitempty"`
+ // Whether the client honors the change annotations in
+ // text edits and resource operations returned via the
+ // `CodeAction#edit` property by for example presenting
+ // the workspace edit in the user interface and asking
+ // for confirmation.
+ //
+ // @since 3.16.0
+ HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"`
+}
+
+// Contains additional diagnostic information about the context in which
+// a {@link CodeActionProvider.provideCodeActions code action} is run.
+type CodeActionContext struct { // line 9032
+ // An array of diagnostics known on the client side overlapping the range provided to the
+ // `textDocument/codeAction` request. They are provided so that the server knows which
+ // errors are currently presented to the user for the given range. There is no guarantee
+ // that these accurately reflect the error state of the resource. The primary parameter
+ // to compute code actions is the provided range.
+ Diagnostics []Diagnostic `json:"diagnostics"`
+ // Requested kind of actions to return.
+ //
+ // Actions not of this kind are filtered out by the client before being shown. So servers
+ // can omit computing them.
+ Only []CodeActionKind `json:"only,omitempty"`
+ // The reason why code actions were requested.
+ //
+ // @since 3.17.0
+ TriggerKind *CodeActionTriggerKind `json:"triggerKind,omitempty"`
+}
+
+// A set of predefined code action kinds
+type CodeActionKind string // line 13326
+// Provider options for a {@link CodeActionRequest}.
+type CodeActionOptions struct { // line 9071
+ // CodeActionKinds that this server may return.
+ //
+ // The list of kinds may be generic, such as `CodeActionKind.Refactor`, or the server
+ // may list out every specific kind they provide.
+ CodeActionKinds []CodeActionKind `json:"codeActionKinds,omitempty"`
+ // The server provides support to resolve additional
+ // information for a code action.
+ //
+ // @since 3.16.0
+ ResolveProvider bool `json:"resolveProvider,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// The parameters of a {@link CodeActionRequest}.
+type CodeActionParams struct { // line 5308
+ // The document in which the command was invoked.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The range for which the command was invoked.
+ Range Range `json:"range"`
+ // Context carrying additional information.
+ Context CodeActionContext `json:"context"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link CodeActionRequest}.
+type CodeActionRegistrationOptions struct { // line 5476
+ TextDocumentRegistrationOptions
+ CodeActionOptions
+}
+
+// The reason why code actions were requested.
+//
+// @since 3.17.0
+type CodeActionTriggerKind uint32 // line 13606
+// Structure to capture a description for an error code.
+//
+// @since 3.16.0
+type CodeDescription struct { // line 10026
+ // An URI to open with more information about the diagnostic error.
+ Href URI `json:"href"`
+}
+
+// A code lens represents a {@link Command command} that should be shown along with
+// source text, like the number of references, a way to run tests, etc.
+//
+// A code lens is _unresolved_ when no command is associated to it. For performance
+// reasons the creation of a code lens and resolving should be done in two stages.
+type CodeLens struct { // line 5599
+ // The range in which this code lens is valid. Should only span a single line.
+ Range Range `json:"range"`
+ // The command this code lens represents.
+ Command *Command `json:"command,omitempty"`
+ // A data entry field that is preserved on a code lens item between
+ // a {@link CodeLensRequest} and a [CodeLensResolveRequest]
+ // (#CodeLensResolveRequest)
+ Data interface{} `json:"data,omitempty"`
+}
+
+// The client capabilities of a {@link CodeLensRequest}.
+type CodeLensClientCapabilities struct { // line 11835
+ // Whether code lens supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// Code Lens provider options of a {@link CodeLensRequest}.
+type CodeLensOptions struct { // line 9127
+ // Code lens has a resolve provider as well.
+ ResolveProvider bool `json:"resolveProvider,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// The parameters of a {@link CodeLensRequest}.
+type CodeLensParams struct { // line 5575
+ // The document to request code lens for.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link CodeLensRequest}.
+type CodeLensRegistrationOptions struct { // line 5631
+ TextDocumentRegistrationOptions
+ CodeLensOptions
+}
+
+// @since 3.16.0
+type CodeLensWorkspaceClientCapabilities struct { // line 10993
+ // Whether the client implementation supports a refresh request sent from the
+ // server to the client.
+ //
+ // Note that this event is global and will force the client to refresh all
+ // code lenses currently shown. It should be used with absolute care and is
+ // useful for situation where a server for example detect a project wide
+ // change that requires such a calculation.
+ RefreshSupport bool `json:"refreshSupport,omitempty"`
+}
+
+// Represents a color in RGBA space.
+type Color struct { // line 6433
+ // The red component of this color in the range [0-1].
+ Red float64 `json:"red"`
+ // The green component of this color in the range [0-1].
+ Green float64 `json:"green"`
+ // The blue component of this color in the range [0-1].
+ Blue float64 `json:"blue"`
+ // The alpha component of this color in the range [0-1].
+ Alpha float64 `json:"alpha"`
+}
+
+// Represents a color range from a document.
+type ColorInformation struct { // line 2239
+ // The range in the document where this color appears.
+ Range Range `json:"range"`
+ // The actual color value for this color range.
+ Color Color `json:"color"`
+}
+type ColorPresentation struct { // line 2321
+ // The label of this color presentation. It will be shown on the color
+ // picker header. By default this is also the text that is inserted when selecting
+ // this color presentation.
+ Label string `json:"label"`
+ // An {@link TextEdit edit} which is applied to a document when selecting
+ // this presentation for the color. When `falsy` the {@link ColorPresentation.label label}
+ // is used.
+ TextEdit *TextEdit `json:"textEdit,omitempty"`
+ // An optional array of additional {@link TextEdit text edits} that are applied when
+ // selecting this color presentation. Edits must not overlap with the main {@link ColorPresentation.textEdit edit} nor with themselves.
+ AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"`
+}
+
+// Parameters for a {@link ColorPresentationRequest}.
+type ColorPresentationParams struct { // line 2281
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The color to request presentations for.
+ Color Color `json:"color"`
+ // The range where the color would be inserted. Serves as a context.
+ Range Range `json:"range"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Represents a reference to a command. Provides a title which
+// will be used to represent a command in the UI and, optionally,
+// an array of arguments which will be passed to the command handler
+// function when invoked.
+type Command struct { // line 5348
+ // Title of the command, like `save`.
+ Title string `json:"title"`
+ // The identifier of the actual command handler.
+ Command string `json:"command"`
+ // Arguments that the command handler should be
+ // invoked with.
+ Arguments []json.RawMessage `json:"arguments,omitempty"`
+}
+
+// Completion client capabilities
+type CompletionClientCapabilities struct { // line 11168
+ // Whether completion supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client supports the following `CompletionItem` specific
+ // capabilities.
+ CompletionItem PCompletionItemPCompletion `json:"completionItem,omitempty"`
+ CompletionItemKind *PCompletionItemKindPCompletion `json:"completionItemKind,omitempty"`
+ // Defines how the client handles whitespace and indentation
+ // when accepting a completion item that uses multi line
+ // text in either `insertText` or `textEdit`.
+ //
+ // @since 3.17.0
+ InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"`
+ // The client supports to send additional context information for a
+ // `textDocument/completion` request.
+ ContextSupport bool `json:"contextSupport,omitempty"`
+ // The client supports the following `CompletionList` specific
+ // capabilities.
+ //
+ // @since 3.17.0
+ CompletionList *PCompletionListPCompletion `json:"completionList,omitempty"`
+}
+
+// Contains additional information about the context in which a completion request is triggered.
+type CompletionContext struct { // line 8628
+ // How the completion was triggered.
+ TriggerKind CompletionTriggerKind `json:"triggerKind"`
+ // The trigger character (a single character) that has trigger code complete.
+ // Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter`
+ TriggerCharacter string `json:"triggerCharacter,omitempty"`
+}
+
+// A completion item represents a text snippet that is
+// proposed to complete text that is being typed.
+type CompletionItem struct { // line 4528
+ // The label of this completion item.
+ //
+ // The label property is also by default the text that
+ // is inserted when selecting this completion.
+ //
+ // If label details are provided the label itself should
+ // be an unqualified name of the completion item.
+ Label string `json:"label"`
+ // Additional details for the label
+ //
+ // @since 3.17.0
+ LabelDetails *CompletionItemLabelDetails `json:"labelDetails,omitempty"`
+ // The kind of this completion item. Based of the kind
+ // an icon is chosen by the editor.
+ Kind CompletionItemKind `json:"kind,omitempty"`
+ // Tags for this completion item.
+ //
+ // @since 3.15.0
+ Tags []CompletionItemTag `json:"tags,omitempty"`
+ // A human-readable string with additional information
+ // about this item, like type or symbol information.
+ Detail string `json:"detail,omitempty"`
+ // A human-readable string that represents a doc-comment.
+ Documentation *Or_CompletionItem_documentation `json:"documentation,omitempty"`
+ // Indicates if this item is deprecated.
+ // @deprecated Use `tags` instead.
+ Deprecated bool `json:"deprecated,omitempty"`
+ // Select this item when showing.
+ //
+ // *Note* that only one completion item can be selected and that the
+ // tool / client decides which item that is. The rule is that the *first*
+ // item of those that match best is selected.
+ Preselect bool `json:"preselect,omitempty"`
+ // A string that should be used when comparing this item
+ // with other items. When `falsy` the {@link CompletionItem.label label}
+ // is used.
+ SortText string `json:"sortText,omitempty"`
+ // A string that should be used when filtering a set of
+ // completion items. When `falsy` the {@link CompletionItem.label label}
+ // is used.
+ FilterText string `json:"filterText,omitempty"`
+ // A string that should be inserted into a document when selecting
+ // this completion. When `falsy` the {@link CompletionItem.label label}
+ // is used.
+ //
+ // The `insertText` is subject to interpretation by the client side.
+ // Some tools might not take the string literally. For example
+ // VS Code when code complete is requested in this example
+ // `con<cursor position>` and a completion item with an `insertText` of
+ // `console` is provided it will only insert `sole`. Therefore it is
+ // recommended to use `textEdit` instead since it avoids additional client
+ // side interpretation.
+ InsertText string `json:"insertText,omitempty"`
+ // The format of the insert text. The format applies to both the
+ // `insertText` property and the `newText` property of a provided
+ // `textEdit`. If omitted defaults to `InsertTextFormat.PlainText`.
+ //
+ // Please note that the insertTextFormat doesn't apply to
+ // `additionalTextEdits`.
+ InsertTextFormat *InsertTextFormat `json:"insertTextFormat,omitempty"`
+ // How whitespace and indentation is handled during completion
+ // item insertion. If not provided the clients default value depends on
+ // the `textDocument.completion.insertTextMode` client capability.
+ //
+ // @since 3.16.0
+ InsertTextMode *InsertTextMode `json:"insertTextMode,omitempty"`
+ // An {@link TextEdit edit} which is applied to a document when selecting
+ // this completion. When an edit is provided the value of
+ // {@link CompletionItem.insertText insertText} is ignored.
+ //
+ // Most editors support two different operations when accepting a completion
+ // item. One is to insert a completion text and the other is to replace an
+ // existing text with a completion text. Since this can usually not be
+ // predetermined by a server it can report both ranges. Clients need to
+ // signal support for `InsertReplaceEdits` via the
+ // `textDocument.completion.insertReplaceSupport` client capability
+ // property.
+ //
+ // *Note 1:* The text edit's range as well as both ranges from an insert
+ // replace edit must be a [single line] and they must contain the position
+ // at which completion has been requested.
+ // *Note 2:* If an `InsertReplaceEdit` is returned the edit's insert range
+ // must be a prefix of the edit's replace range, that means it must be
+ // contained and starting at the same position.
+ //
+ // @since 3.16.0 additional type `InsertReplaceEdit`
+ TextEdit *TextEdit `json:"textEdit,omitempty"`
+ // The edit text used if the completion item is part of a CompletionList and
+ // CompletionList defines an item default for the text edit range.
+ //
+ // Clients will only honor this property if they opt into completion list
+ // item defaults using the capability `completionList.itemDefaults`.
+ //
+ // If not provided and a list's default range is provided the label
+ // property is used as a text.
+ //
+ // @since 3.17.0
+ TextEditText string `json:"textEditText,omitempty"`
+ // An optional array of additional {@link TextEdit text edits} that are applied when
+ // selecting this completion. Edits must not overlap (including the same insert position)
+ // with the main {@link CompletionItem.textEdit edit} nor with themselves.
+ //
+ // Additional text edits should be used to change text unrelated to the current cursor position
+ // (for example adding an import statement at the top of the file if the completion item will
+ // insert an unqualified type).
+ AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"`
+ // An optional set of characters that when pressed while this completion is active will accept it first and
+ // then type that character. *Note* that all commit characters should have `length=1` and that superfluous
+ // characters will be ignored.
+ CommitCharacters []string `json:"commitCharacters,omitempty"`
+ // An optional {@link Command command} that is executed *after* inserting this completion. *Note* that
+ // additional modifications to the current document should be described with the
+ // {@link CompletionItem.additionalTextEdits additionalTextEdits}-property.
+ Command *Command `json:"command,omitempty"`
+ // A data entry field that is preserved on a completion item between a
+ // {@link CompletionRequest} and a {@link CompletionResolveRequest}.
+ Data interface{} `json:"data,omitempty"`
+}
+
+// The kind of a completion entry.
+type CompletionItemKind uint32 // line 13134
+// Additional details for a completion item label.
+//
+// @since 3.17.0
+type CompletionItemLabelDetails struct { // line 8651
+ // An optional string which is rendered less prominently directly after {@link CompletionItem.label label},
+ // without any spacing. Should be used for function signatures and type annotations.
+ Detail string `json:"detail,omitempty"`
+ // An optional string which is rendered less prominently after {@link CompletionItem.detail}. Should be used
+ // for fully qualified names and file paths.
+ Description string `json:"description,omitempty"`
+}
+
+// Completion item tags are extra annotations that tweak the rendering of a completion
+// item.
+//
+// @since 3.15.0
+type CompletionItemTag uint32 // line 13244
+// Represents a collection of {@link CompletionItem completion items} to be presented
+// in the editor.
+type CompletionList struct { // line 4737
+ // This list it not complete. Further typing results in recomputing this list.
+ //
+ // Recomputed lists have all their items replaced (not appended) in the
+ // incomplete completion sessions.
+ IsIncomplete bool `json:"isIncomplete"`
+ // In many cases the items of an actual completion result share the same
+ // value for properties like `commitCharacters` or the range of a text
+ // edit. A completion list can therefore define item defaults which will
+ // be used if a completion item itself doesn't specify the value.
+ //
+ // If a completion list specifies a default value and a completion item
+ // also specifies a corresponding value the one from the item is used.
+ //
+ // Servers are only allowed to return default values if the client
+ // signals support for this via the `completionList.itemDefaults`
+ // capability.
+ //
+ // @since 3.17.0
+ ItemDefaults *PItemDefaultsMsg_textDocument_completion `json:"itemDefaults,omitempty"`
+ // The completion items.
+ Items []CompletionItem `json:"items"`
+}
+
+// Completion options.
+type CompletionOptions struct { // line 8707
+ // Most tools trigger completion request automatically without explicitly requesting
+ // it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user
+ // starts to type an identifier. For example if the user types `c` in a JavaScript file
+ // code complete will automatically pop up present `console` besides others as a
+ // completion item. Characters that make up identifiers don't need to be listed here.
+ //
+ // If code complete should automatically be trigger on characters not being valid inside
+ // an identifier (for example `.` in JavaScript) list them in `triggerCharacters`.
+ TriggerCharacters []string `json:"triggerCharacters,omitempty"`
+ // The list of all possible characters that commit a completion. This field can be used
+ // if clients don't support individual commit characters per completion item. See
+ // `ClientCapabilities.textDocument.completion.completionItem.commitCharactersSupport`
+ //
+ // If a server provides both `allCommitCharacters` and commit characters on an individual
+ // completion item the ones on the completion item win.
+ //
+ // @since 3.2.0
+ AllCommitCharacters []string `json:"allCommitCharacters,omitempty"`
+ // The server provides support to resolve additional
+ // information for a completion item.
+ ResolveProvider bool `json:"resolveProvider,omitempty"`
+ // The server supports the following `CompletionItem` specific
+ // capabilities.
+ //
+ // @since 3.17.0
+ CompletionItem *PCompletionItemPCompletionProvider `json:"completionItem,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// Completion parameters
+type CompletionParams struct { // line 4497
+ // The completion context. This is only available it the client specifies
+ // to send this using the client capability `textDocument.completion.contextSupport === true`
+ Context CompletionContext `json:"context,omitempty"`
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link CompletionRequest}.
+type CompletionRegistrationOptions struct { // line 4854
+ TextDocumentRegistrationOptions
+ CompletionOptions
+}
+
+// How a completion was triggered
+type CompletionTriggerKind uint32 // line 13555
+type ConfigurationItem struct { // line 6396
+ // The scope to get the configuration section for.
+ ScopeURI string `json:"scopeUri,omitempty"`
+ // The configuration section asked for.
+ Section string `json:"section,omitempty"`
+}
+
+// The parameters of a configuration request.
+type ConfigurationParams struct { // line 2199
+ Items []ConfigurationItem `json:"items"`
+}
+
+// Create file operation.
+type CreateFile struct { // line 6712
+ // A create
+ Kind string `json:"kind"`
+ // The resource to create.
+ URI DocumentURI `json:"uri"`
+ // Additional options
+ Options *CreateFileOptions `json:"options,omitempty"`
+ ResourceOperation
+}
+
+// Options to create a file.
+type CreateFileOptions struct { // line 9417
+ // Overwrite existing file. Overwrite wins over `ignoreIfExists`
+ Overwrite bool `json:"overwrite,omitempty"`
+ // Ignore if exists.
+ IgnoreIfExists bool `json:"ignoreIfExists,omitempty"`
+}
+
+// The parameters sent in notifications/requests for user-initiated creation of
+// files.
+//
+// @since 3.16.0
+type CreateFilesParams struct { // line 3175
+ // An array of all files/folders created in this operation.
+ Files []FileCreate `json:"files"`
+}
+
+// The declaration of a symbol representation as one or many {@link Location locations}.
+type Declaration = []Location // (alias) line 13833
+// @since 3.14.0
+type DeclarationClientCapabilities struct { // line 11509
+ // Whether declaration supports dynamic registration. If this is set to `true`
+ // the client supports the new `DeclarationRegistrationOptions` return value
+ // for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client supports additional metadata in the form of declaration links.
+ LinkSupport bool `json:"linkSupport,omitempty"`
+}
+
+// Information about where a symbol is declared.
+//
+// Provides additional metadata over normal {@link Location location} declarations, including the range of
+// the declaring symbol.
+//
+// Servers should prefer returning `DeclarationLink` over `Declaration` if supported
+// by the client.
+type DeclarationLink = LocationLink // (alias) line 13853
+type DeclarationOptions struct { // line 6491
+ WorkDoneProgressOptions
+}
+type DeclarationParams struct { // line 2494
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+ PartialResultParams
+}
+type DeclarationRegistrationOptions struct { // line 2514
+ DeclarationOptions
+ TextDocumentRegistrationOptions
+ StaticRegistrationOptions
+}
+
+// The definition of a symbol represented as one or many {@link Location locations}.
+// For most programming languages there is only one location at which a symbol is
+// defined.
+//
+// Servers should prefer returning `DefinitionLink` over `Definition` if supported
+// by the client.
+type Definition = Or_Definition // (alias) line 13751
+// Client Capabilities for a {@link DefinitionRequest}.
+type DefinitionClientCapabilities struct { // line 11534
+ // Whether definition supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client supports additional metadata in the form of definition links.
+ //
+ // @since 3.14.0
+ LinkSupport bool `json:"linkSupport,omitempty"`
+}
+
+// Information about where a symbol is defined.
+//
+// Provides additional metadata over normal {@link Location location} definitions, including the range of
+// the defining symbol
+type DefinitionLink = LocationLink // (alias) line 13771
+// Server Capabilities for a {@link DefinitionRequest}.
+type DefinitionOptions struct { // line 8919
+ WorkDoneProgressOptions
+}
+
+// Parameters for a {@link DefinitionRequest}.
+type DefinitionParams struct { // line 5018
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link DefinitionRequest}.
+type DefinitionRegistrationOptions struct { // line 5039
+ TextDocumentRegistrationOptions
+ DefinitionOptions
+}
+
+// Delete file operation
+type DeleteFile struct { // line 6794
+ // A delete
+ Kind string `json:"kind"`
+ // The file to delete.
+ URI DocumentURI `json:"uri"`
+ // Delete options.
+ Options *DeleteFileOptions `json:"options,omitempty"`
+ ResourceOperation
+}
+
+// Delete file options
+type DeleteFileOptions struct { // line 9465
+ // Delete the content recursively if a folder is denoted.
+ Recursive bool `json:"recursive,omitempty"`
+ // Ignore the operation if the file doesn't exist.
+ IgnoreIfNotExists bool `json:"ignoreIfNotExists,omitempty"`
+}
+
+// The parameters sent in notifications/requests for user-initiated deletes of
+// files.
+//
+// @since 3.16.0
+type DeleteFilesParams struct { // line 3300
+ // An array of all files/folders deleted in this operation.
+ Files []FileDelete `json:"files"`
+}
+
+// Represents a diagnostic, such as a compiler error or warning. Diagnostic objects
+// are only valid in the scope of a resource.
+type Diagnostic struct { // line 8525
+ // The range at which the message applies
+ Range Range `json:"range"`
+ // The diagnostic's severity. Can be omitted. If omitted it is up to the
+ // client to interpret diagnostics as error, warning, info or hint.
+ Severity DiagnosticSeverity `json:"severity,omitempty"`
+ // The diagnostic's code, which usually appear in the user interface.
+ Code interface{} `json:"code,omitempty"`
+ // An optional property to describe the error code.
+ // Requires the code field (above) to be present/not null.
+ //
+ // @since 3.16.0
+ CodeDescription *CodeDescription `json:"codeDescription,omitempty"`
+ // A human-readable string describing the source of this
+ // diagnostic, e.g. 'typescript' or 'super lint'. It usually
+ // appears in the user interface.
+ Source string `json:"source,omitempty"`
+ // The diagnostic's message. It usually appears in the user interface
+ Message string `json:"message"`
+ // Additional metadata about the diagnostic.
+ //
+ // @since 3.15.0
+ Tags []DiagnosticTag `json:"tags,omitempty"`
+ // An array of related diagnostic information, e.g. when symbol-names within
+ // a scope collide all definitions can be marked via this property.
+ RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"`
+ // A data entry field that is preserved between a `textDocument/publishDiagnostics`
+ // notification and `textDocument/codeAction` request.
+ //
+ // @since 3.16.0
+ Data interface{} `json:"data,omitempty"`
+}
+
+// Client capabilities specific to diagnostic pull requests.
+//
+// @since 3.17.0
+type DiagnosticClientCapabilities struct { // line 12408
+ // Whether implementation supports dynamic registration. If this is set to `true`
+ // the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
+ // return value for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Whether the clients supports related documents for document diagnostic pulls.
+ RelatedDocumentSupport bool `json:"relatedDocumentSupport,omitempty"`
+}
+
+// Diagnostic options.
+//
+// @since 3.17.0
+type DiagnosticOptions struct { // line 7293
+ // An optional identifier under which the diagnostics are
+ // managed by the client.
+ Identifier string `json:"identifier,omitempty"`
+ // Whether the language has inter file dependencies meaning that
+ // editing code in one file can result in a different diagnostic
+ // set in another file. Inter file dependencies are common for
+ // most programming languages and typically uncommon for linters.
+ InterFileDependencies bool `json:"interFileDependencies"`
+ // The server provides support for workspace diagnostics as well.
+ WorkspaceDiagnostics bool `json:"workspaceDiagnostics"`
+ WorkDoneProgressOptions
+}
+
+// Diagnostic registration options.
+//
+// @since 3.17.0
+type DiagnosticRegistrationOptions struct { // line 3855
+ TextDocumentRegistrationOptions
+ DiagnosticOptions
+ StaticRegistrationOptions
+}
+
+// Represents a related message and source code location for a diagnostic. This should be
+// used to point to code locations that cause or related to a diagnostics, e.g when duplicating
+// a symbol in a scope.
+type DiagnosticRelatedInformation struct { // line 10041
+ // The location of this related diagnostic information.
+ Location Location `json:"location"`
+ // The message of this related diagnostic information.
+ Message string `json:"message"`
+}
+
+// Cancellation data returned from a diagnostic request.
+//
+// @since 3.17.0
+type DiagnosticServerCancellationData struct { // line 3841
+ RetriggerRequest bool `json:"retriggerRequest"`
+}
+
+// The diagnostic's severity.
+type DiagnosticSeverity uint32 // line 13504
+// The diagnostic tags.
+//
+// @since 3.15.0
+type DiagnosticTag uint32 // line 13534
+// Workspace client capabilities specific to diagnostic pull requests.
+//
+// @since 3.17.0
+type DiagnosticWorkspaceClientCapabilities struct { // line 11111
+ // Whether the client implementation supports a refresh request sent from
+ // the server to the client.
+ //
+ // Note that this event is global and will force the client to refresh all
+ // pulled diagnostics currently shown. It should be used with absolute care and
+ // is useful for situation where a server for example detects a project wide
+ // change that requires such a calculation.
+ RefreshSupport bool `json:"refreshSupport,omitempty"`
+}
+type DidChangeConfigurationClientCapabilities struct { // line 10837
+ // Did change configuration notification supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// The parameters of a change configuration notification.
+type DidChangeConfigurationParams struct { // line 4144
+ // The actual changed settings
+ Settings interface{} `json:"settings"`
+}
+type DidChangeConfigurationRegistrationOptions struct { // line 4158
+ Section *OrPSection_workspace_didChangeConfiguration `json:"section,omitempty"`
+}
+
+// The params sent in a change notebook document notification.
+//
+// @since 3.17.0
+type DidChangeNotebookDocumentParams struct { // line 3974
+ // The notebook document that did change. The version number points
+ // to the version after all provided changes have been applied. If
+ // only the text document content of a cell changes the notebook version
+ // doesn't necessarily have to change.
+ NotebookDocument VersionedNotebookDocumentIdentifier `json:"notebookDocument"`
+ // The actual changes to the notebook document.
+ //
+ // The changes describe single state changes to the notebook document.
+ // So if there are two changes c1 (at array index 0) and c2 (at array
+ // index 1) for a notebook in state S then c1 moves the notebook from
+ // S to S' and c2 from S' to S''. So c1 is computed on the state S and
+ // c2 is computed on the state S'.
+ //
+ // To mirror the content of a notebook using change events use the following approach:
+ //
+ // - start with the same initial content
+ // - apply the 'notebookDocument/didChange' notifications in the order you receive them.
+ // - apply the `NotebookChangeEvent`s in a single notification in the order
+ // you receive them.
+ Change NotebookDocumentChangeEvent `json:"change"`
+}
+
+// The change text document notification's parameters.
+type DidChangeTextDocumentParams struct { // line 4287
+ // The document that did change. The version number points
+ // to the version after all provided content changes have
+ // been applied.
+ TextDocument VersionedTextDocumentIdentifier `json:"textDocument"`
+ // The actual content changes. The content changes describe single state changes
+ // to the document. So if there are two content changes c1 (at array index 0) and
+ // c2 (at array index 1) for a document in state S then c1 moves the document from
+ // S to S' and c2 from S' to S''. So c1 is computed on the state S and c2 is computed
+ // on the state S'.
+ //
+ // To mirror the content of a document using change events use the following approach:
+ //
+ // - start with the same initial content
+ // - apply the 'textDocument/didChange' notifications in the order you receive them.
+ // - apply the `TextDocumentContentChangeEvent`s in a single notification in the order
+ // you receive them.
+ ContentChanges []TextDocumentContentChangeEvent `json:"contentChanges"`
+}
+type DidChangeWatchedFilesClientCapabilities struct { // line 10851
+ // Did change watched files notification supports dynamic registration. Please note
+ // that the current protocol doesn't support static configuration for file changes
+ // from the server side.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Whether the client has support for {@link RelativePattern relative pattern}
+ // or not.
+ //
+ // @since 3.17.0
+ RelativePatternSupport bool `json:"relativePatternSupport,omitempty"`
+}
+
+// The watched files change notification's parameters.
+type DidChangeWatchedFilesParams struct { // line 4428
+ // The actual file events.
+ Changes []FileEvent `json:"changes"`
+}
+
+// Describe options to be used when registered for text document change events.
+type DidChangeWatchedFilesRegistrationOptions struct { // line 4445
+ // The watchers to register.
+ Watchers []FileSystemWatcher `json:"watchers"`
+}
+
+// The parameters of a `workspace/didChangeWorkspaceFolders` notification.
+type DidChangeWorkspaceFoldersParams struct { // line 2185
+ // The actual workspace folder change event.
+ Event WorkspaceFoldersChangeEvent `json:"event"`
+}
+
+// The params sent in a close notebook document notification.
+//
+// @since 3.17.0
+type DidCloseNotebookDocumentParams struct { // line 4012
+ // The notebook document that got closed.
+ NotebookDocument NotebookDocumentIdentifier `json:"notebookDocument"`
+ // The text documents that represent the content
+ // of a notebook cell that got closed.
+ CellTextDocuments []TextDocumentIdentifier `json:"cellTextDocuments"`
+}
+
+// The parameters sent in a close text document notification
+type DidCloseTextDocumentParams struct { // line 4332
+ // The document that was closed.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+}
+
+// The params sent in an open notebook document notification.
+//
+// @since 3.17.0
+type DidOpenNotebookDocumentParams struct { // line 3948
+ // The notebook document that got opened.
+ NotebookDocument NotebookDocument `json:"notebookDocument"`
+ // The text documents that represent the content
+ // of a notebook cell.
+ CellTextDocuments []TextDocumentItem `json:"cellTextDocuments"`
+}
+
+// The parameters sent in an open text document notification
+type DidOpenTextDocumentParams struct { // line 4273
+ // The document that was opened.
+ TextDocument TextDocumentItem `json:"textDocument"`
+}
+
+// The params sent in a save notebook document notification.
+//
+// @since 3.17.0
+type DidSaveNotebookDocumentParams struct { // line 3997
+ // The notebook document that got saved.
+ NotebookDocument NotebookDocumentIdentifier `json:"notebookDocument"`
+}
+
+// The parameters sent in a save text document notification
+type DidSaveTextDocumentParams struct { // line 4346
+ // The document that was saved.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // Optional the content when saved. Depends on the includeText value
+ // when the save notification was requested.
+ Text *string `json:"text,omitempty"`
+}
+type DocumentColorClientCapabilities struct { // line 11875
+ // Whether implementation supports dynamic registration. If this is set to `true`
+ // the client supports the new `DocumentColorRegistrationOptions` return value
+ // for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+type DocumentColorOptions struct { // line 6471
+ WorkDoneProgressOptions
+}
+
+// Parameters for a {@link DocumentColorRequest}.
+type DocumentColorParams struct { // line 2215
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+type DocumentColorRegistrationOptions struct { // line 2261
+ TextDocumentRegistrationOptions
+ DocumentColorOptions
+ StaticRegistrationOptions
+}
+
+// Parameters of the document diagnostic request.
+//
+// @since 3.17.0
+type DocumentDiagnosticParams struct { // line 3768
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The additional identifier provided during registration.
+ Identifier string `json:"identifier,omitempty"`
+ // The result id of a previous response if provided.
+ PreviousResultID string `json:"previousResultId,omitempty"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+type DocumentDiagnosticReport = Or_DocumentDiagnosticReport // (alias) line 13909
+// The document diagnostic report kinds.
+//
+// @since 3.17.0
+type DocumentDiagnosticReportKind string // line 12722
+// A partial result for a document diagnostic report.
+//
+// @since 3.17.0
+type DocumentDiagnosticReportPartialResult struct { // line 3811
+ RelatedDocuments map[DocumentURI]interface{} `json:"relatedDocuments"`
+}
+
+// A document filter describes a top level text document or
+// a notebook cell document.
+//
+// @since 3.17.0 - proposed support for NotebookCellTextDocumentFilter.
+type DocumentFilter = Or_DocumentFilter // (alias) line 14093
+// Client capabilities of a {@link DocumentFormattingRequest}.
+type DocumentFormattingClientCapabilities struct { // line 11889
+ // Whether formatting supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// Provider options for a {@link DocumentFormattingRequest}.
+type DocumentFormattingOptions struct { // line 9221
+ WorkDoneProgressOptions
+}
+
+// The parameters of a {@link DocumentFormattingRequest}.
+type DocumentFormattingParams struct { // line 5727
+ // The document to format.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The format options.
+ Options FormattingOptions `json:"options"`
+ WorkDoneProgressParams
+}
+
+// Registration options for a {@link DocumentFormattingRequest}.
+type DocumentFormattingRegistrationOptions struct { // line 5755
+ TextDocumentRegistrationOptions
+ DocumentFormattingOptions
+}
+
+// A document highlight is a range inside a text document which deserves
+// special attention. Usually a document highlight is visualized by changing
+// the background color of its range.
+type DocumentHighlight struct { // line 5119
+ // The range this highlight applies to.
+ Range Range `json:"range"`
+ // The highlight kind, default is {@link DocumentHighlightKind.Text text}.
+ Kind DocumentHighlightKind `json:"kind,omitempty"`
+}
+
+// Client Capabilities for a {@link DocumentHighlightRequest}.
+type DocumentHighlightClientCapabilities struct { // line 11624
+ // Whether document highlight supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// A document highlight kind.
+type DocumentHighlightKind uint32 // line 13301
+// Provider options for a {@link DocumentHighlightRequest}.
+type DocumentHighlightOptions struct { // line 8955
+ WorkDoneProgressOptions
+}
+
+// Parameters for a {@link DocumentHighlightRequest}.
+type DocumentHighlightParams struct { // line 5098
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link DocumentHighlightRequest}.
+type DocumentHighlightRegistrationOptions struct { // line 5142
+ TextDocumentRegistrationOptions
+ DocumentHighlightOptions
+}
+
+// A document link is a range in a text document that links to an internal or external resource, like another
+// text document or a web site.
+type DocumentLink struct { // line 5670
+ // The range this link applies to.
+ Range Range `json:"range"`
+ // The uri this link points to. If missing a resolve request is sent later.
+ Target string `json:"target,omitempty"`
+ // The tooltip text when you hover over this link.
+ //
+ // If a tooltip is provided, is will be displayed in a string that includes instructions on how to
+ // trigger the link, such as `{0} (ctrl + click)`. The specific instructions vary depending on OS,
+ // user settings, and localization.
+ //
+ // @since 3.15.0
+ Tooltip string `json:"tooltip,omitempty"`
+ // A data entry field that is preserved on a document link between a
+ // DocumentLinkRequest and a DocumentLinkResolveRequest.
+ Data interface{} `json:"data,omitempty"`
+}
+
+// The client capabilities of a {@link DocumentLinkRequest}.
+type DocumentLinkClientCapabilities struct { // line 11850
+ // Whether document link supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Whether the client supports the `tooltip` property on `DocumentLink`.
+ //
+ // @since 3.15.0
+ TooltipSupport bool `json:"tooltipSupport,omitempty"`
+}
+
+// Provider options for a {@link DocumentLinkRequest}.
+type DocumentLinkOptions struct { // line 9148
+ // Document links have a resolve provider as well.
+ ResolveProvider bool `json:"resolveProvider,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// The parameters of a {@link DocumentLinkRequest}.
+type DocumentLinkParams struct { // line 5646
+ // The document to provide document links for.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link DocumentLinkRequest}.
+type DocumentLinkRegistrationOptions struct { // line 5712
+ TextDocumentRegistrationOptions
+ DocumentLinkOptions
+}
+
+// Client capabilities of a {@link DocumentOnTypeFormattingRequest}.
+type DocumentOnTypeFormattingClientCapabilities struct { // line 11919
+ // Whether on type formatting supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// Provider options for a {@link DocumentOnTypeFormattingRequest}.
+type DocumentOnTypeFormattingOptions struct { // line 9243
+ // A character on which formatting should be triggered, like `{`.
+ FirstTriggerCharacter string `json:"firstTriggerCharacter"`
+ // More trigger characters.
+ MoreTriggerCharacter []string `json:"moreTriggerCharacter,omitempty"`
+}
+
+// The parameters of a {@link DocumentOnTypeFormattingRequest}.
+type DocumentOnTypeFormattingParams struct { // line 5821
+ // The document to format.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The position around which the on type formatting should happen.
+ // This is not necessarily the exact position where the character denoted
+ // by the property `ch` got typed.
+ Position Position `json:"position"`
+ // The character that has been typed that triggered the formatting
+ // on type request. That is not necessarily the last character that
+ // got inserted into the document since the client could auto insert
+ // characters as well (e.g. like automatic brace completion).
+ Ch string `json:"ch"`
+ // The formatting options.
+ Options FormattingOptions `json:"options"`
+}
+
+// Registration options for a {@link DocumentOnTypeFormattingRequest}.
+type DocumentOnTypeFormattingRegistrationOptions struct { // line 5859
+ TextDocumentRegistrationOptions
+ DocumentOnTypeFormattingOptions
+}
+
+// Client capabilities of a {@link DocumentRangeFormattingRequest}.
+type DocumentRangeFormattingClientCapabilities struct { // line 11904
+ // Whether range formatting supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// Provider options for a {@link DocumentRangeFormattingRequest}.
+type DocumentRangeFormattingOptions struct { // line 9232
+ WorkDoneProgressOptions
+}
+
+// The parameters of a {@link DocumentRangeFormattingRequest}.
+type DocumentRangeFormattingParams struct { // line 5770
+ // The document to format.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The range to format
+ Range Range `json:"range"`
+ // The format options
+ Options FormattingOptions `json:"options"`
+ WorkDoneProgressParams
+}
+
+// Registration options for a {@link DocumentRangeFormattingRequest}.
+type DocumentRangeFormattingRegistrationOptions struct { // line 5806
+ TextDocumentRegistrationOptions
+ DocumentRangeFormattingOptions
+}
+
+// A document selector is the combination of one or many document filters.
+//
+// @sample `let sel:DocumentSelector = [{ language: 'typescript' }, { language: 'json', pattern: '**∕tsconfig.json' }]`;
+//
+// The use of a string as a document filter is deprecated @since 3.16.0.
+type DocumentSelector = []DocumentFilter // (alias) line 13948
+// Represents programming constructs like variables, classes, interfaces etc.
+// that appear in a document. Document symbols can be hierarchical and they
+// have two ranges: one that encloses its definition and one that points to
+// its most interesting range, e.g. the range of an identifier.
+type DocumentSymbol struct { // line 5211
+ // The name of this symbol. Will be displayed in the user interface and therefore must not be
+ // an empty string or a string only consisting of white spaces.
+ Name string `json:"name"`
+ // More detail for this symbol, e.g the signature of a function.
+ Detail string `json:"detail,omitempty"`
+ // The kind of this symbol.
+ Kind SymbolKind `json:"kind"`
+ // Tags for this document symbol.
+ //
+ // @since 3.16.0
+ Tags []SymbolTag `json:"tags,omitempty"`
+ // Indicates if this symbol is deprecated.
+ //
+ // @deprecated Use tags instead
+ Deprecated bool `json:"deprecated,omitempty"`
+ // The range enclosing this symbol not including leading/trailing whitespace but everything else
+ // like comments. This information is typically used to determine if the clients cursor is
+ // inside the symbol to reveal in the symbol in the UI.
+ Range Range `json:"range"`
+ // The range that should be selected and revealed when this symbol is being picked, e.g the name of a function.
+ // Must be contained by the `range`.
+ SelectionRange Range `json:"selectionRange"`
+ // Children of this symbol, e.g. properties of a class.
+ Children []DocumentSymbol `json:"children,omitempty"`
+}
+
+// Client Capabilities for a {@link DocumentSymbolRequest}.
+type DocumentSymbolClientCapabilities struct { // line 11639
+ // Whether document symbol supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Specific capabilities for the `SymbolKind` in the
+ // `textDocument/documentSymbol` request.
+ SymbolKind *PSymbolKindPDocumentSymbol `json:"symbolKind,omitempty"`
+ // The client supports hierarchical document symbols.
+ HierarchicalDocumentSymbolSupport bool `json:"hierarchicalDocumentSymbolSupport,omitempty"`
+ // The client supports tags on `SymbolInformation`. Tags are supported on
+ // `DocumentSymbol` if `hierarchicalDocumentSymbolSupport` is set to true.
+ // Clients supporting tags have to handle unknown tags gracefully.
+ //
+ // @since 3.16.0
+ TagSupport *PTagSupportPDocumentSymbol `json:"tagSupport,omitempty"`
+ // The client supports an additional label presented in the UI when
+ // registering a document symbol provider.
+ //
+ // @since 3.16.0
+ LabelSupport bool `json:"labelSupport,omitempty"`
+}
+
+// Provider options for a {@link DocumentSymbolRequest}.
+type DocumentSymbolOptions struct { // line 9010
+ // A human-readable string that is shown when multiple outlines trees
+ // are shown for the same document.
+ //
+ // @since 3.16.0
+ Label string `json:"label,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// Parameters for a {@link DocumentSymbolRequest}.
+type DocumentSymbolParams struct { // line 5157
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link DocumentSymbolRequest}.
+type DocumentSymbolRegistrationOptions struct { // line 5293
+ TextDocumentRegistrationOptions
+ DocumentSymbolOptions
+}
+type DocumentURI string
+
+// Predefined error codes.
+type ErrorCodes int32 // line 12743
+// The client capabilities of a {@link ExecuteCommandRequest}.
+type ExecuteCommandClientCapabilities struct { // line 10962
+ // Execute command supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// The server capabilities of a {@link ExecuteCommandRequest}.
+type ExecuteCommandOptions struct { // line 9291
+ // The commands to be executed on the server
+ Commands []string `json:"commands"`
+ WorkDoneProgressOptions
+}
+
+// The parameters of a {@link ExecuteCommandRequest}.
+type ExecuteCommandParams struct { // line 5941
+ // The identifier of the actual command handler.
+ Command string `json:"command"`
+ // Arguments that the command should be invoked with.
+ Arguments []json.RawMessage `json:"arguments,omitempty"`
+ WorkDoneProgressParams
+}
+
+// Registration options for a {@link ExecuteCommandRequest}.
+type ExecuteCommandRegistrationOptions struct { // line 5973
+ ExecuteCommandOptions
+}
+type ExecutionSummary struct { // line 10162
+ // A strict monotonically increasing value
+ // indicating the execution order of a cell
+ // inside a notebook.
+ ExecutionOrder uint32 `json:"executionOrder"`
+ // Whether the execution was successful or
+ // not if known by the client.
+ Success bool `json:"success,omitempty"`
+}
+
+// created for Literal (Lit_CodeActionClientCapabilities_codeActionLiteralSupport_codeActionKind)
+type FCodeActionKindPCodeActionLiteralSupport struct { // line 11742
+ // The code action kind values the client supports. When this
+ // property exists the client also guarantees that it will
+ // handle values outside its set gracefully and falls back
+ // to a default value when unknown.
+ ValueSet []CodeActionKind `json:"valueSet"`
+}
+
+// created for Literal (Lit_CompletionList_itemDefaults_editRange_Item1)
+type FEditRangePItemDefaults struct { // line 4777
+ Insert Range `json:"insert"`
+ Replace Range `json:"replace"`
+}
+
+// created for Literal (Lit_SemanticTokensClientCapabilities_requests_full_Item1)
+type FFullPRequests struct { // line 12205
+ // The client will send the `textDocument/semanticTokens/full/delta` request if
+ // the server provides a corresponding handler.
+ Delta bool `json:"delta"`
+}
+
+// created for Literal (Lit_CompletionClientCapabilities_completionItem_insertTextModeSupport)
+type FInsertTextModeSupportPCompletionItem struct { // line 11295
+ ValueSet []InsertTextMode `json:"valueSet"`
+}
+
+// created for Literal (Lit_SignatureHelpClientCapabilities_signatureInformation_parameterInformation)
+type FParameterInformationPSignatureInformation struct { // line 11461
+ // The client supports processing label offsets instead of a
+ // simple label string.
+ //
+ // @since 3.14.0
+ LabelOffsetSupport bool `json:"labelOffsetSupport,omitempty"`
+}
+
+// created for Literal (Lit_SemanticTokensClientCapabilities_requests_range_Item1)
+type FRangePRequests struct { // line 12185
+}
+
+// created for Literal (Lit_CompletionClientCapabilities_completionItem_resolveSupport)
+type FResolveSupportPCompletionItem struct { // line 11271
+ // The properties that a client can resolve lazily.
+ Properties []string `json:"properties"`
+}
+
+// created for Literal (Lit_NotebookDocumentChangeEvent_cells_structure)
+type FStructurePCells struct { // line 7487
+ // The change to the cell array.
+ Array NotebookCellArrayChange `json:"array"`
+ // Additional opened cell text documents.
+ DidOpen []TextDocumentItem `json:"didOpen,omitempty"`
+ // Additional closed cell text documents.
+ DidClose []TextDocumentIdentifier `json:"didClose,omitempty"`
+}
+
+// created for Literal (Lit_CompletionClientCapabilities_completionItem_tagSupport)
+type FTagSupportPCompletionItem struct { // line 11237
+ // The tags supported by the client.
+ ValueSet []CompletionItemTag `json:"valueSet"`
+}
+type FailureHandlingKind string // line 13693
+// The file event type
+type FileChangeType uint32 // line 13454
+// Represents information on a file/folder create.
+//
+// @since 3.16.0
+type FileCreate struct { // line 6662
+ // A file:// URI for the location of the file/folder being created.
+ URI string `json:"uri"`
+}
+
+// Represents information on a file/folder delete.
+//
+// @since 3.16.0
+type FileDelete struct { // line 6911
+ // A file:// URI for the location of the file/folder being deleted.
+ URI string `json:"uri"`
+}
+
+// An event describing a file change.
+type FileEvent struct { // line 8480
+ // The file's uri.
+ URI DocumentURI `json:"uri"`
+ // The change type.
+ Type FileChangeType `json:"type"`
+}
+
+// Capabilities relating to events from file operations by the user in the client.
+//
+// These events do not come from the file system, they come from user operations
+// like renaming a file in the UI.
+//
+// @since 3.16.0
+type FileOperationClientCapabilities struct { // line 11009
+ // Whether the client supports dynamic registration for file requests/notifications.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client has support for sending didCreateFiles notifications.
+ DidCreate bool `json:"didCreate,omitempty"`
+ // The client has support for sending willCreateFiles requests.
+ WillCreate bool `json:"willCreate,omitempty"`
+ // The client has support for sending didRenameFiles notifications.
+ DidRename bool `json:"didRename,omitempty"`
+ // The client has support for sending willRenameFiles requests.
+ WillRename bool `json:"willRename,omitempty"`
+ // The client has support for sending didDeleteFiles notifications.
+ DidDelete bool `json:"didDelete,omitempty"`
+ // The client has support for sending willDeleteFiles requests.
+ WillDelete bool `json:"willDelete,omitempty"`
+}
+
+// A filter to describe in which file operation requests or notifications
+// the server is interested in receiving.
+//
+// @since 3.16.0
+type FileOperationFilter struct { // line 6864
+ // A Uri scheme like `file` or `untitled`.
+ Scheme string `json:"scheme,omitempty"`
+ // The actual file operation pattern.
+ Pattern FileOperationPattern `json:"pattern"`
+}
+
+// Options for notifications/requests for user operations on files.
+//
+// @since 3.16.0
+type FileOperationOptions struct { // line 9965
+ // The server is interested in receiving didCreateFiles notifications.
+ DidCreate *FileOperationRegistrationOptions `json:"didCreate,omitempty"`
+ // The server is interested in receiving willCreateFiles requests.
+ WillCreate *FileOperationRegistrationOptions `json:"willCreate,omitempty"`
+ // The server is interested in receiving didRenameFiles notifications.
+ DidRename *FileOperationRegistrationOptions `json:"didRename,omitempty"`
+ // The server is interested in receiving willRenameFiles requests.
+ WillRename *FileOperationRegistrationOptions `json:"willRename,omitempty"`
+ // The server is interested in receiving didDeleteFiles file notifications.
+ DidDelete *FileOperationRegistrationOptions `json:"didDelete,omitempty"`
+ // The server is interested in receiving willDeleteFiles file requests.
+ WillDelete *FileOperationRegistrationOptions `json:"willDelete,omitempty"`
+}
+
+// A pattern to describe in which file operation requests or notifications
+// the server is interested in receiving.
+//
+// @since 3.16.0
+type FileOperationPattern struct { // line 9489
+ // The glob pattern to match. Glob patterns can have the following syntax:
+ //
+ // - `*` to match one or more characters in a path segment
+ // - `?` to match on one character in a path segment
+ // - `**` to match any number of path segments, including none
+ // - `{}` to group sub patterns into an OR expression. (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files)
+ // - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
+ // - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`)
+ Glob string `json:"glob"`
+ // Whether to match files or folders with this pattern.
+ //
+ // Matches both if undefined.
+ Matches *FileOperationPatternKind `json:"matches,omitempty"`
+ // Additional options used during matching.
+ Options *FileOperationPatternOptions `json:"options,omitempty"`
+}
+
+// A pattern kind describing if a glob pattern matches a file a folder or
+// both.
+//
+// @since 3.16.0
+type FileOperationPatternKind string // line 13627
+// Matching options for the file operation pattern.
+//
+// @since 3.16.0
+type FileOperationPatternOptions struct { // line 10146
+ // The pattern should be matched ignoring casing.
+ IgnoreCase bool `json:"ignoreCase,omitempty"`
+}
+
+// The options to register for file operations.
+//
+// @since 3.16.0
+type FileOperationRegistrationOptions struct { // line 3264
+ // The actual filters.
+ Filters []FileOperationFilter `json:"filters"`
+}
+
+// Represents information on a file/folder rename.
+//
+// @since 3.16.0
+type FileRename struct { // line 6888
+ // A file:// URI for the original location of the file/folder being renamed.
+ OldURI string `json:"oldUri"`
+ // A file:// URI for the new location of the file/folder being renamed.
+ NewURI string `json:"newUri"`
+}
+type FileSystemWatcher struct { // line 8502
+ // The glob pattern to watch. See {@link GlobPattern glob pattern} for more detail.
+ //
+ // @since 3.17.0 support for relative patterns.
+ GlobPattern GlobPattern `json:"globPattern"`
+ // The kind of events of interest. If omitted it defaults
+ // to WatchKind.Create | WatchKind.Change | WatchKind.Delete
+ // which is 7.
+ Kind *WatchKind `json:"kind,omitempty"`
+}
+
+// Represents a folding range. To be valid, start and end line must be bigger than zero and smaller
+// than the number of lines in the document. Clients are free to ignore invalid ranges.
+type FoldingRange struct { // line 2415
+ // The zero-based start line of the range to fold. The folded area starts after the line's last character.
+ // To be valid, the end must be zero or larger and smaller than the number of lines in the document.
+ StartLine uint32 `json:"startLine"`
+ // The zero-based character offset from where the folded range starts. If not defined, defaults to the length of the start line.
+ StartCharacter uint32 `json:"startCharacter,omitempty"`
+ // The zero-based end line of the range to fold. The folded area ends with the line's last character.
+ // To be valid, the end must be zero or larger and smaller than the number of lines in the document.
+ EndLine uint32 `json:"endLine"`
+ // The zero-based character offset before the folded range ends. If not defined, defaults to the length of the end line.
+ EndCharacter uint32 `json:"endCharacter,omitempty"`
+ // Describes the kind of the folding range such as `comment' or 'region'. The kind
+ // is used to categorize folding ranges and used by commands like 'Fold all comments'.
+ // See {@link FoldingRangeKind} for an enumeration of standardized kinds.
+ Kind string `json:"kind,omitempty"`
+ // The text that the client should show when the specified range is
+ // collapsed. If not defined or not supported by the client, a default
+ // will be chosen by the client.
+ //
+ // @since 3.17.0
+ CollapsedText string `json:"collapsedText,omitempty"`
+}
+type FoldingRangeClientCapabilities struct { // line 11978
+ // Whether implementation supports dynamic registration for folding range
+ // providers. If this is set to `true` the client supports the new
+ // `FoldingRangeRegistrationOptions` return value for the corresponding
+ // server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The maximum number of folding ranges that the client prefers to receive
+ // per document. The value serves as a hint, servers are free to follow the
+ // limit.
+ RangeLimit uint32 `json:"rangeLimit,omitempty"`
+ // If set, the client signals that it only supports folding complete lines.
+ // If set, client will ignore specified `startCharacter` and `endCharacter`
+ // properties in a FoldingRange.
+ LineFoldingOnly bool `json:"lineFoldingOnly,omitempty"`
+ // Specific options for the folding range kind.
+ //
+ // @since 3.17.0
+ FoldingRangeKind *PFoldingRangeKindPFoldingRange `json:"foldingRangeKind,omitempty"`
+ // Specific options for the folding range.
+ //
+ // @since 3.17.0
+ FoldingRange *PFoldingRangePFoldingRange `json:"foldingRange,omitempty"`
+}
+
+// A set of predefined range kinds.
+type FoldingRangeKind string // line 12815
+type FoldingRangeOptions struct { // line 6481
+ WorkDoneProgressOptions
+}
+
+// Parameters for a {@link FoldingRangeRequest}.
+type FoldingRangeParams struct { // line 2391
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+type FoldingRangeRegistrationOptions struct { // line 2474
+ TextDocumentRegistrationOptions
+ FoldingRangeOptions
+ StaticRegistrationOptions
+}
+
+// Value-object describing what options formatting should use.
+type FormattingOptions struct { // line 9169
+ // Size of a tab in spaces.
+ TabSize uint32 `json:"tabSize"`
+ // Prefer spaces over tabs.
+ InsertSpaces bool `json:"insertSpaces"`
+ // Trim trailing whitespace on a line.
+ //
+ // @since 3.15.0
+ TrimTrailingWhitespace bool `json:"trimTrailingWhitespace,omitempty"`
+ // Insert a newline character at the end of the file if one does not exist.
+ //
+ // @since 3.15.0
+ InsertFinalNewline bool `json:"insertFinalNewline,omitempty"`
+ // Trim all newlines after the final newline at the end of the file.
+ //
+ // @since 3.15.0
+ TrimFinalNewlines bool `json:"trimFinalNewlines,omitempty"`
+}
+
+// A diagnostic report with a full set of problems.
+//
+// @since 3.17.0
+type FullDocumentDiagnosticReport struct { // line 7235
+ // A full document diagnostic report.
+ Kind string `json:"kind"`
+ // An optional result id. If provided it will
+ // be sent on the next diagnostic request for the
+ // same document.
+ ResultID string `json:"resultId,omitempty"`
+ // The actual items.
+ Items []Diagnostic `json:"items"`
+}
+
+// General client capabilities.
+//
+// @since 3.16.0
+type GeneralClientCapabilities struct { // line 10664
+ // Client capability that signals how the client
+ // handles stale requests (e.g. a request
+ // for which the client will not process the response
+ // anymore since the information is outdated).
+ //
+ // @since 3.17.0
+ StaleRequestSupport *PStaleRequestSupportPGeneral `json:"staleRequestSupport,omitempty"`
+ // Client capabilities specific to regular expressions.
+ //
+ // @since 3.16.0
+ RegularExpressions *RegularExpressionsClientCapabilities `json:"regularExpressions,omitempty"`
+ // Client capabilities specific to the client's markdown parser.
+ //
+ // @since 3.16.0
+ Markdown *MarkdownClientCapabilities `json:"markdown,omitempty"`
+ // The position encodings supported by the client. Client and server
+ // have to agree on the same position encoding to ensure that offsets
+ // (e.g. character position in a line) are interpreted the same on both
+ // sides.
+ //
+ // To keep the protocol backwards compatible the following applies: if
+ // the value 'utf-16' is missing from the array of position encodings
+ // servers can assume that the client supports UTF-16. UTF-16 is
+ // therefore a mandatory encoding.
+ //
+ // If omitted it defaults to ['utf-16'].
+ //
+ // Implementation considerations: since the conversion from one encoding
+ // into another requires the content of the file / line the conversion
+ // is best done where the file is read which is usually on the server
+ // side.
+ //
+ // @since 3.17.0
+ PositionEncodings []PositionEncodingKind `json:"positionEncodings,omitempty"`
+}
+
+// The glob pattern. Either a string pattern or a relative pattern.
+//
+// @since 3.17.0
+type GlobPattern = string // (alias) line 14127
+// The result of a hover request.
+type Hover struct { // line 4886
+ // The hover's content
+ Contents MarkupContent `json:"contents"`
+ // An optional range inside the text document that is used to
+ // visualize the hover, e.g. by changing the background color.
+ Range Range `json:"range,omitempty"`
+}
+type HoverClientCapabilities struct { // line 11402
+ // Whether hover supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Client supports the following content formats for the content
+ // property. The order describes the preferred format of the client.
+ ContentFormat []MarkupKind `json:"contentFormat,omitempty"`
+}
+
+// Hover options.
+type HoverOptions struct { // line 8776
+ WorkDoneProgressOptions
+}
+
+// Parameters for a {@link HoverRequest}.
+type HoverParams struct { // line 4869
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+}
+
+// Registration options for a {@link HoverRequest}.
+type HoverRegistrationOptions struct { // line 4925
+ TextDocumentRegistrationOptions
+ HoverOptions
+}
+
+// @since 3.6.0
+type ImplementationClientCapabilities struct { // line 11583
+ // Whether implementation supports dynamic registration. If this is set to `true`
+ // the client supports the new `ImplementationRegistrationOptions` return value
+ // for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client supports additional metadata in the form of definition links.
+ //
+ // @since 3.14.0
+ LinkSupport bool `json:"linkSupport,omitempty"`
+}
+type ImplementationOptions struct { // line 6333
+ WorkDoneProgressOptions
+}
+type ImplementationParams struct { // line 2063
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+ PartialResultParams
+}
+type ImplementationRegistrationOptions struct { // line 2103
+ TextDocumentRegistrationOptions
+ ImplementationOptions
+ StaticRegistrationOptions
+}
+
+// The data type of the ResponseError if the
+// initialize request fails.
+type InitializeError struct { // line 4126
+ // Indicates whether the client execute the following retry logic:
+ // (1) show the message provided by the ResponseError to the user
+ // (2) user selects retry or cancel
+ // (3) if user selected retry the initialize method is sent again.
+ Retry bool `json:"retry"`
+}
+type InitializeParams struct { // line 4068
+ XInitializeParams
+ WorkspaceFoldersInitializeParams
+}
+
+// The result returned from an initialize request.
+type InitializeResult struct { // line 4082
+ // The capabilities the language server provides.
+ Capabilities ServerCapabilities `json:"capabilities"`
+ // Information about the server.
+ //
+ // @since 3.15.0
+ ServerInfo *PServerInfoMsg_initialize `json:"serverInfo,omitempty"`
+}
+type InitializedParams struct { // line 4140
+}
+
+// Inlay hint information.
+//
+// @since 3.17.0
+type InlayHint struct { // line 3645
+ // The position of this hint.
+ Position Position `json:"position"`
+ // The label of this hint. A human readable string or an array of
+ // InlayHintLabelPart label parts.
+ //
+ // *Note* that neither the string nor the label part can be empty.
+ Label []InlayHintLabelPart `json:"label"`
+ // The kind of this hint. Can be omitted in which case the client
+ // should fall back to a reasonable default.
+ Kind InlayHintKind `json:"kind,omitempty"`
+ // Optional text edits that are performed when accepting this inlay hint.
+ //
+ // *Note* that edits are expected to change the document so that the inlay
+ // hint (or its nearest variant) is now part of the document and the inlay
+ // hint itself is now obsolete.
+ TextEdits []TextEdit `json:"textEdits,omitempty"`
+ // The tooltip text when you hover over this item.
+ Tooltip *OrPTooltip_textDocument_inlayHint `json:"tooltip,omitempty"`
+ // Render padding before the hint.
+ //
+ // Note: Padding should use the editor's background color, not the
+ // background color of the hint itself. That means padding can be used
+ // to visually align/separate an inlay hint.
+ PaddingLeft bool `json:"paddingLeft,omitempty"`
+ // Render padding after the hint.
+ //
+ // Note: Padding should use the editor's background color, not the
+ // background color of the hint itself. That means padding can be used
+ // to visually align/separate an inlay hint.
+ PaddingRight bool `json:"paddingRight,omitempty"`
+ // A data entry field that is preserved on an inlay hint between
+ // a `textDocument/inlayHint` and a `inlayHint/resolve` request.
+ Data interface{} `json:"data,omitempty"`
+}
+
+// Inlay hint client capabilities.
+//
+// @since 3.17.0
+type InlayHintClientCapabilities struct { // line 12369
+ // Whether inlay hints support dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Indicates which properties a client can resolve lazily on an inlay
+ // hint.
+ ResolveSupport *PResolveSupportPInlayHint `json:"resolveSupport,omitempty"`
+}
+
+// Inlay hint kinds.
+//
+// @since 3.17.0
+type InlayHintKind uint32 // line 13033
+// An inlay hint label part allows for interactive and composite labels
+// of inlay hints.
+//
+// @since 3.17.0
+type InlayHintLabelPart struct { // line 7062
+ // The value of this label part.
+ Value string `json:"value"`
+ // The tooltip text when you hover over this label part. Depending on
+ // the client capability `inlayHint.resolveSupport` clients might resolve
+ // this property late using the resolve request.
+ Tooltip *OrPTooltipPLabel `json:"tooltip,omitempty"`
+ // An optional source code location that represents this
+ // label part.
+ //
+ // The editor will use this location for the hover and for code navigation
+ // features: This part will become a clickable link that resolves to the
+ // definition of the symbol at the given location (not necessarily the
+ // location itself), it shows the hover that shows at the given location,
+ // and it shows a context menu with further code navigation commands.
+ //
+ // Depending on the client capability `inlayHint.resolveSupport` clients
+ // might resolve this property late using the resolve request.
+ Location *Location `json:"location,omitempty"`
+ // An optional command for this label part.
+ //
+ // Depending on the client capability `inlayHint.resolveSupport` clients
+ // might resolve this property late using the resolve request.
+ Command *Command `json:"command,omitempty"`
+}
+
+// Inlay hint options used during static registration.
+//
+// @since 3.17.0
+type InlayHintOptions struct { // line 7135
+ // The server provides support to resolve additional
+ // information for an inlay hint item.
+ ResolveProvider bool `json:"resolveProvider,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// A parameter literal used in inlay hint requests.
+//
+// @since 3.17.0
+type InlayHintParams struct { // line 3616
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The document range for which inlay hints should be computed.
+ Range Range `json:"range"`
+ WorkDoneProgressParams
+}
+
+// Inlay hint options used during static or dynamic registration.
+//
+// @since 3.17.0
+type InlayHintRegistrationOptions struct { // line 3746
+ InlayHintOptions
+ TextDocumentRegistrationOptions
+ StaticRegistrationOptions
+}
+
+// Client workspace capabilities specific to inlay hints.
+//
+// @since 3.17.0
+type InlayHintWorkspaceClientCapabilities struct { // line 11095
+ // Whether the client implementation supports a refresh request sent from
+ // the server to the client.
+ //
+ // Note that this event is global and will force the client to refresh all
+ // inlay hints currently shown. It should be used with absolute care and
+ // is useful for situation where a server for example detects a project wide
+ // change that requires such a calculation.
+ RefreshSupport bool `json:"refreshSupport,omitempty"`
+}
+
+// Inline value information can be provided by different means:
+//
+// - directly as a text value (class InlineValueText).
+// - as a name to use for a variable lookup (class InlineValueVariableLookup)
+// - as an evaluatable expression (class InlineValueEvaluatableExpression)
+//
+// The InlineValue types combines all inline value types into one type.
+//
+// @since 3.17.0
+type InlineValue = Or_InlineValue // (alias) line 13861
+// Client capabilities specific to inline values.
+//
+// @since 3.17.0
+type InlineValueClientCapabilities struct { // line 12353
+ // Whether implementation supports dynamic registration for inline value providers.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// @since 3.17.0
+type InlineValueContext struct { // line 6948
+ // The stack frame (as a DAP Id) where the execution has stopped.
+ FrameID int32 `json:"frameId"`
+ // The document range where execution has stopped.
+ // Typically the end position of the range denotes the line where the inline values are shown.
+ StoppedLocation Range `json:"stoppedLocation"`
+}
+
+// Provide an inline value through an expression evaluation.
+// If only a range is specified, the expression will be extracted from the underlying document.
+// An optional expression can be used to override the extracted expression.
+//
+// @since 3.17.0
+type InlineValueEvaluatableExpression struct { // line 7026
+ // The document range for which the inline value applies.
+ // The range is used to extract the evaluatable expression from the underlying document.
+ Range Range `json:"range"`
+ // If specified the expression overrides the extracted expression.
+ Expression string `json:"expression,omitempty"`
+}
+
+// Inline value options used during static registration.
+//
+// @since 3.17.0
+type InlineValueOptions struct { // line 7050
+ WorkDoneProgressOptions
+}
+
+// A parameter literal used in inline value requests.
+//
+// @since 3.17.0
+type InlineValueParams struct { // line 3557
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The document range for which inline values should be computed.
+ Range Range `json:"range"`
+ // Additional information about the context in which inline values were
+ // requested.
+ Context InlineValueContext `json:"context"`
+ WorkDoneProgressParams
+}
+
+// Inline value options used during static or dynamic registration.
+//
+// @since 3.17.0
+type InlineValueRegistrationOptions struct { // line 3594
+ InlineValueOptions
+ TextDocumentRegistrationOptions
+ StaticRegistrationOptions
+}
+
+// Provide inline value as text.
+//
+// @since 3.17.0
+type InlineValueText struct { // line 6971
+ // The document range for which the inline value applies.
+ Range Range `json:"range"`
+ // The text of the inline value.
+ Text string `json:"text"`
+}
+
+// Provide inline value through a variable lookup.
+// If only a range is specified, the variable name will be extracted from the underlying document.
+// An optional variable name can be used to override the extracted name.
+//
+// @since 3.17.0
+type InlineValueVariableLookup struct { // line 6994
+ // The document range for which the inline value applies.
+ // The range is used to extract the variable name from the underlying document.
+ Range Range `json:"range"`
+ // If specified the name of the variable to look up.
+ VariableName string `json:"variableName,omitempty"`
+ // How to perform the lookup.
+ CaseSensitiveLookup bool `json:"caseSensitiveLookup"`
+}
+
+// Client workspace capabilities specific to inline values.
+//
+// @since 3.17.0
+type InlineValueWorkspaceClientCapabilities struct { // line 11079
+ // Whether the client implementation supports a refresh request sent from the
+ // server to the client.
+ //
+ // Note that this event is global and will force the client to refresh all
+ // inline values currently shown. It should be used with absolute care and is
+ // useful for situation where a server for example detects a project wide
+ // change that requires such a calculation.
+ RefreshSupport bool `json:"refreshSupport,omitempty"`
+}
+
+// A special text edit to provide an insert and a replace operation.
+//
+// @since 3.16.0
+type InsertReplaceEdit struct { // line 8676
+ // The string to be inserted.
+ NewText string `json:"newText"`
+ // The range if the insert is requested
+ Insert Range `json:"insert"`
+ // The range if the replace is requested.
+ Replace Range `json:"replace"`
+}
+
+// Defines whether the insert text in a completion item should be interpreted as
+// plain text or a snippet.
+type InsertTextFormat uint32 // line 13260
+// How whitespace and indentation is handled during completion
+// item insertion.
+//
+// @since 3.16.0
+type InsertTextMode uint32 // line 13280
+type LSPAny = interface{}
+
+// LSP arrays.
+// @since 3.17.0
+type LSPArray = []interface{} // (alias) line 13779
+type LSPErrorCodes int32 // line 12783
+// LSP object definition.
+// @since 3.17.0
+type LSPObject = map[string]LSPAny // (alias) line 14111
+// Client capabilities for the linked editing range request.
+//
+// @since 3.16.0
+type LinkedEditingRangeClientCapabilities struct { // line 12305
+ // Whether implementation supports dynamic registration. If this is set to `true`
+ // the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
+ // return value for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+type LinkedEditingRangeOptions struct { // line 6652
+ WorkDoneProgressOptions
+}
+type LinkedEditingRangeParams struct { // line 3112
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+}
+type LinkedEditingRangeRegistrationOptions struct { // line 3155
+ TextDocumentRegistrationOptions
+ LinkedEditingRangeOptions
+ StaticRegistrationOptions
+}
+
+// The result of a linked editing range request.
+//
+// @since 3.16.0
+type LinkedEditingRanges struct { // line 3128
+ // A list of ranges that can be edited together. The ranges must have
+ // identical length and contain identical text content. The ranges cannot overlap.
+ Ranges []Range `json:"ranges"`
+ // An optional word pattern (regular expression) that describes valid contents for
+ // the given ranges. If no pattern is provided, the client configuration's word
+ // pattern will be used.
+ WordPattern string `json:"wordPattern,omitempty"`
+}
+
+// created for Literal (Lit_NotebookDocumentChangeEvent_cells_textContent_Elem)
+type Lit_NotebookDocumentChangeEvent_cells_textContent_Elem struct { // line 7545
+ Document VersionedTextDocumentIdentifier `json:"document"`
+ Changes []TextDocumentContentChangeEvent `json:"changes"`
+}
+
+// created for Literal (Lit_NotebookDocumentFilter_Item1)
+type Lit_NotebookDocumentFilter_Item1 struct { // line 14293
+ // The type of the enclosing notebook.
+ NotebookType string `json:"notebookType,omitempty"`
+ // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`.
+ Scheme string `json:"scheme"`
+ // A glob pattern.
+ Pattern string `json:"pattern,omitempty"`
+}
+
+// created for Literal (Lit_NotebookDocumentFilter_Item2)
+type Lit_NotebookDocumentFilter_Item2 struct { // line 14326
+ // The type of the enclosing notebook.
+ NotebookType string `json:"notebookType,omitempty"`
+ // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`.
+ Scheme string `json:"scheme,omitempty"`
+ // A glob pattern.
+ Pattern string `json:"pattern"`
+}
+
+// created for Literal (Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0_cells_Elem)
+type Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0_cells_Elem struct { // line 9831
+ Language string `json:"language"`
+}
+
+// created for Literal (Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1)
+type Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1 struct { // line 9852
+ // The notebook to be synced If a string
+ // value is provided it matches against the
+ // notebook type. '*' matches every notebook.
+ Notebook *Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_notebook `json:"notebook,omitempty"`
+ // The cells of the matching notebook to be synced.
+ Cells []Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_cells_Elem `json:"cells"`
+}
+
+// created for Literal (Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_cells_Elem)
+type Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_cells_Elem struct { // line 9878
+ Language string `json:"language"`
+}
+
+// created for Literal (Lit_PrepareRenameResult_Item2)
+type Lit_PrepareRenameResult_Item2 struct { // line 13932
+ DefaultBehavior bool `json:"defaultBehavior"`
+}
+
+// created for Literal (Lit_TextDocumentContentChangeEvent_Item1)
+type Lit_TextDocumentContentChangeEvent_Item1 struct { // line 14040
+ // The new text of the whole document.
+ Text string `json:"text"`
+}
+
+// created for Literal (Lit_TextDocumentFilter_Item2)
+type Lit_TextDocumentFilter_Item2 struct { // line 14217
+ // A language id, like `typescript`.
+ Language string `json:"language,omitempty"`
+ // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`.
+ Scheme string `json:"scheme,omitempty"`
+ // A glob pattern, like `*.{ts,js}`.
+ Pattern string `json:"pattern"`
+}
+
+// Represents a location inside a resource, such as a line
+// inside a text file.
+type Location struct { // line 2083
+ URI DocumentURI `json:"uri"`
+ Range Range `json:"range"`
+}
+
+// Represents the connection of two locations. Provides additional metadata over normal {@link Location locations},
+// including an origin range.
+type LocationLink struct { // line 6272
+ // Span of the origin of this link.
+ //
+ // Used as the underlined span for mouse interaction. Defaults to the word range at
+ // the definition position.
+ OriginSelectionRange *Range `json:"originSelectionRange,omitempty"`
+ // The target resource identifier of this link.
+ TargetURI DocumentURI `json:"targetUri"`
+ // The full target range of this link. If the target for example is a symbol then target range is the
+ // range enclosing this symbol not including leading/trailing whitespace but everything else
+ // like comments. This information is typically used to highlight the range in the editor.
+ TargetRange Range `json:"targetRange"`
+ // The range that should be selected and revealed when this link is being followed, e.g the name of a function.
+ // Must be contained by the `targetRange`. See also `DocumentSymbol#range`
+ TargetSelectionRange Range `json:"targetSelectionRange"`
+}
+
+// The log message parameters.
+type LogMessageParams struct { // line 4251
+ // The message type. See {@link MessageType}
+ Type MessageType `json:"type"`
+ // The actual message.
+ Message string `json:"message"`
+}
+type LogTraceParams struct { // line 6159
+ Message string `json:"message"`
+ Verbose string `json:"verbose,omitempty"`
+}
+
+// Client capabilities specific to the used markdown parser.
+//
+// @since 3.16.0
+type MarkdownClientCapabilities struct { // line 12524
+ // The name of the parser.
+ Parser string `json:"parser"`
+ // The version of the parser.
+ Version string `json:"version,omitempty"`
+ // A list of HTML tags that the client allows / supports in
+ // Markdown.
+ //
+ // @since 3.17.0
+ AllowedTags []string `json:"allowedTags,omitempty"`
+}
+
+// MarkedString can be used to render human readable text. It is either a markdown string
+// or a code-block that provides a language and a code snippet. The language identifier
+// is semantically equal to the optional language identifier in fenced code blocks in GitHub
+// issues. See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting
+//
+// The pair of a language and a value is an equivalent to markdown:
+// ```${language}
+// ${value}
+// ```
+//
+// Note that markdown strings will be sanitized - that means html will be escaped.
+// @deprecated use MarkupContent instead.
+type MarkedString = Or_MarkedString // (alias) line 14058
+// A `MarkupContent` literal represents a string value which content is interpreted base on its
+// kind flag. Currently the protocol supports `plaintext` and `markdown` as markup kinds.
+//
+// If the kind is `markdown` then the value can contain fenced code blocks like in GitHub issues.
+// See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting
+//
+// Here is an example how such a string can be constructed using JavaScript / TypeScript:
+// ```ts
+//
+// let markdown: MarkdownContent = {
+// kind: MarkupKind.Markdown,
+// value: [
+// '# Header',
+// 'Some text',
+// '```typescript',
+// 'someCode();',
+// '```'
+// ].join('\n')
+// };
+//
+// ```
+//
+// *Please Note* that clients might sanitize the return markdown. A client could decide to
+// remove HTML from the markdown to avoid script execution.
+type MarkupContent struct { // line 7113
+ // The type of the Markup
+ Kind MarkupKind `json:"kind"`
+ // The content itself
+ Value string `json:"value"`
+}
+
+// Describes the content type that a client supports in various
+// result literals like `Hover`, `ParameterInfo` or `CompletionItem`.
+//
+// Please note that `MarkupKinds` must not start with a `$`. This kinds
+// are reserved for internal usage.
+type MarkupKind string // line 13407
+type MessageActionItem struct { // line 4238
+ // A short title like 'Retry', 'Open Log' etc.
+ Title string `json:"title"`
+}
+
+// The message type
+type MessageType uint32 // line 13054
+// Moniker definition to match LSIF 0.5 moniker definition.
+//
+// @since 3.16.0
+type Moniker struct { // line 3338
+ // The scheme of the moniker. For example tsc or .Net
+ Scheme string `json:"scheme"`
+ // The identifier of the moniker. The value is opaque in LSIF however
+ // schema owners are allowed to define the structure if they want.
+ Identifier string `json:"identifier"`
+ // The scope in which the moniker is unique
+ Unique UniquenessLevel `json:"unique"`
+ // The moniker kind if known.
+ Kind *MonikerKind `json:"kind,omitempty"`
+}
+
+// Client capabilities specific to the moniker request.
+//
+// @since 3.16.0
+type MonikerClientCapabilities struct { // line 12321
+ // Whether moniker supports dynamic registration. If this is set to `true`
+ // the client supports the new `MonikerRegistrationOptions` return value
+ // for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// The moniker kind.
+//
+// @since 3.16.0
+type MonikerKind string // line 13007
+type MonikerOptions struct { // line 6926
+ WorkDoneProgressOptions
+}
+type MonikerParams struct { // line 3318
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+ PartialResultParams
+}
+type MonikerRegistrationOptions struct { // line 3378
+ TextDocumentRegistrationOptions
+ MonikerOptions
+}
+
+// created for Literal (Lit_MarkedString_Item1)
+type Msg_MarkedString struct { // line 14068
+ Language string `json:"language"`
+ Value string `json:"value"`
+}
+
+// created for Literal (Lit_NotebookDocumentFilter_Item0)
+type Msg_NotebookDocumentFilter struct { // line 14260
+ // The type of the enclosing notebook.
+ NotebookType string `json:"notebookType"`
+ // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`.
+ Scheme string `json:"scheme,omitempty"`
+ // A glob pattern.
+ Pattern string `json:"pattern,omitempty"`
+}
+
+// created for Literal (Lit_PrepareRenameResult_Item1)
+type Msg_PrepareRename2Gn struct { // line 13911
+ Range Range `json:"range"`
+ Placeholder string `json:"placeholder"`
+}
+
+// created for Literal (Lit_TextDocumentContentChangeEvent_Item0)
+type Msg_TextDocumentContentChangeEvent struct { // line 14008
+ // The range of the document that changed.
+ Range *Range `json:"range"`
+ // The optional length of the range that got replaced.
+ //
+ // @deprecated use range instead.
+ RangeLength uint32 `json:"rangeLength,omitempty"`
+ // The new text for the provided range.
+ Text string `json:"text"`
+}
+
+// created for Literal (Lit_TextDocumentFilter_Item1)
+type Msg_TextDocumentFilter struct { // line 14184
+ // A language id, like `typescript`.
+ Language string `json:"language,omitempty"`
+ // A Uri {@link Uri.scheme scheme}, like `file` or `untitled`.
+ Scheme string `json:"scheme"`
+ // A glob pattern, like `*.{ts,js}`.
+ Pattern string `json:"pattern,omitempty"`
+}
+
+// created for Literal (Lit__InitializeParams_clientInfo)
+type Msg_XInitializeParams_clientInfo struct { // line 7673
+ // The name of the client as defined by the client.
+ Name string `json:"name"`
+ // The client's version as defined by the client.
+ Version string `json:"version,omitempty"`
+}
+
+// A notebook cell.
+//
+// A cell's document URI must be unique across ALL notebook
+// cells and can therefore be used to uniquely identify a
+// notebook cell or the cell's text document.
+//
+// @since 3.17.0
+type NotebookCell struct { // line 9598
+ // The cell's kind
+ Kind NotebookCellKind `json:"kind"`
+ // The URI of the cell's text document
+ // content.
+ Document DocumentURI `json:"document"`
+ // Additional metadata stored with the cell.
+ //
+ // Note: should always be an object literal (e.g. LSPObject)
+ Metadata *LSPObject `json:"metadata,omitempty"`
+ // Additional execution summary information
+ // if supported by the client.
+ ExecutionSummary *ExecutionSummary `json:"executionSummary,omitempty"`
+}
+
+// A change describing how to move a `NotebookCell`
+// array from state S to S'.
+//
+// @since 3.17.0
+type NotebookCellArrayChange struct { // line 9639
+ // The start oftest of the cell that changed.
+ Start uint32 `json:"start"`
+ // The deleted cells
+ DeleteCount uint32 `json:"deleteCount"`
+ // The new cells, if any
+ Cells []NotebookCell `json:"cells,omitempty"`
+}
+
+// A notebook cell kind.
+//
+// @since 3.17.0
+type NotebookCellKind uint32 // line 13648
+// A notebook cell text document filter denotes a cell text
+// document by different properties.
+//
+// @since 3.17.0
+type NotebookCellTextDocumentFilter struct { // line 10113
+ // A filter that matches against the notebook
+ // containing the notebook cell. If a string
+ // value is provided it matches against the
+ // notebook type. '*' matches every notebook.
+ Notebook Or_NotebookCellTextDocumentFilter_notebook `json:"notebook"`
+ // A language id like `python`.
+ //
+ // Will be matched against the language id of the
+ // notebook cell document. '*' matches every language.
+ Language string `json:"language,omitempty"`
+}
+
+// A notebook document.
+//
+// @since 3.17.0
+type NotebookDocument struct { // line 7354
+ // The notebook document's uri.
+ URI URI `json:"uri"`
+ // The type of the notebook.
+ NotebookType string `json:"notebookType"`
+ // The version number of this document (it will increase after each
+ // change, including undo/redo).
+ Version int32 `json:"version"`
+ // Additional metadata stored with the notebook
+ // document.
+ //
+ // Note: should always be an object literal (e.g. LSPObject)
+ Metadata *LSPObject `json:"metadata,omitempty"`
+ // The cells of a notebook.
+ Cells []NotebookCell `json:"cells"`
+}
+
+// A change event for a notebook document.
+//
+// @since 3.17.0
+type NotebookDocumentChangeEvent struct { // line 7466
+ // The changed meta data if any.
+ //
+ // Note: should always be an object literal (e.g. LSPObject)
+ Metadata *LSPObject `json:"metadata,omitempty"`
+ // Changes to cells
+ Cells *PCellsPChange `json:"cells,omitempty"`
+}
+
+// Capabilities specific to the notebook document support.
+//
+// @since 3.17.0
+type NotebookDocumentClientCapabilities struct { // line 10613
+ // Capabilities specific to notebook document synchronization
+ //
+ // @since 3.17.0
+ Synchronization NotebookDocumentSyncClientCapabilities `json:"synchronization"`
+}
+
+// A notebook document filter denotes a notebook document by
+// different properties. The properties will be match
+// against the notebook's URI (same as with documents)
+//
+// @since 3.17.0
+type NotebookDocumentFilter = Msg_NotebookDocumentFilter // (alias) line 14254
+// A literal to identify a notebook document in the client.
+//
+// @since 3.17.0
+type NotebookDocumentIdentifier struct { // line 7582
+ // The notebook document's uri.
+ URI URI `json:"uri"`
+}
+
+// Notebook specific client capabilities.
+//
+// @since 3.17.0
+type NotebookDocumentSyncClientCapabilities struct { // line 12433
+ // Whether implementation supports dynamic registration. If this is
+ // set to `true` the client supports the new
+ // `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
+ // return value for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client supports sending execution summary data per cell.
+ ExecutionSummarySupport bool `json:"executionSummarySupport,omitempty"`
+}
+
+// Options specific to a notebook plus its cells
+// to be synced to the server.
+//
+// If a selector provides a notebook document
+// filter but no cell selector all cells of a
+// matching notebook document will be synced.
+//
+// If a selector provides no notebook document
+// filter but only a cell selector all notebook
+// document that contain at least one matching
+// cell will be synced.
+//
+// @since 3.17.0
+type NotebookDocumentSyncOptions struct { // line 9795
+ // The notebooks to be synced
+ NotebookSelector []PNotebookSelectorPNotebookDocumentSync `json:"notebookSelector"`
+ // Whether save notification should be forwarded to
+ // the server. Will only be honored if mode === `notebook`.
+ Save bool `json:"save,omitempty"`
+}
+
+// Registration options specific to a notebook.
+//
+// @since 3.17.0
+type NotebookDocumentSyncRegistrationOptions struct { // line 9915
+ NotebookDocumentSyncOptions
+ StaticRegistrationOptions
+}
+
+// A text document identifier to optionally denote a specific version of a text document.
+type OptionalVersionedTextDocumentIdentifier struct { // line 9343
+ // The version number of this document. If a versioned text document identifier
+ // is sent from the server to the client and the file is not open in the editor
+ // (the server has not received an open notification before) the server can send
+ // `null` to indicate that the version is unknown and the content on disk is the
+ // truth (as specified with document content ownership).
+ Version int32 `json:"version"`
+ TextDocumentIdentifier
+}
+
+// created for Or [FEditRangePItemDefaults Range]
+type OrFEditRangePItemDefaults struct { // line 4770
+ Value interface{} `json:"value"`
+}
+
+// created for Or [NotebookDocumentFilter string]
+type OrFNotebookPNotebookSelector struct { // line 9812
+ Value interface{} `json:"value"`
+}
+
+// created for Or [Location PLocationMsg_workspace_symbol]
+type OrPLocation_workspace_symbol struct { // line 5521
+ Value interface{} `json:"value"`
+}
+
+// created for Or [[]string string]
+type OrPSection_workspace_didChangeConfiguration struct { // line 4164
+ Value interface{} `json:"value"`
+}
+
+// created for Or [MarkupContent string]
+type OrPTooltipPLabel struct { // line 7076
+ Value interface{} `json:"value"`
+}
+
+// created for Or [MarkupContent string]
+type OrPTooltip_textDocument_inlayHint struct { // line 3700
+ Value interface{} `json:"value"`
+}
+
+// created for Or [int32 string]
+type Or_CancelParams_id struct { // line 6185
+ Value interface{} `json:"value"`
+}
+
+// created for Or [MarkupContent string]
+type Or_CompletionItem_documentation struct { // line 4583
+ Value interface{} `json:"value"`
+}
+
+// created for Or [InsertReplaceEdit TextEdit]
+type Or_CompletionItem_textEdit struct { // line 4666
+ Value interface{} `json:"value"`
+}
+
+// created for Or [Location []Location]
+type Or_Definition struct { // line 13754
+ Value interface{} `json:"value"`
+}
+
+// created for Or [int32 string]
+type Or_Diagnostic_code struct { // line 8548
+ Value interface{} `json:"value"`
+}
+
+// created for Or [RelatedFullDocumentDiagnosticReport RelatedUnchangedDocumentDiagnosticReport]
+type Or_DocumentDiagnosticReport struct { // line 13886
+ Value interface{} `json:"value"`
+}
+
+// created for Or [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]
+type Or_DocumentDiagnosticReportPartialResult_relatedDocuments_Value struct { // line 3823
+ Value interface{} `json:"value"`
+}
+
+// created for Or [NotebookCellTextDocumentFilter TextDocumentFilter]
+type Or_DocumentFilter struct { // line 14096
+ Value interface{} `json:"value"`
+}
+
+// created for Or [MarkedString MarkupContent []MarkedString]
+type Or_Hover_contents struct { // line 4892
+ Value interface{} `json:"value"`
+}
+
+// created for Or [[]InlayHintLabelPart string]
+type Or_InlayHint_label struct { // line 3659
+ Value interface{} `json:"value"`
+}
+
+// created for Or [InlineValueEvaluatableExpression InlineValueText InlineValueVariableLookup]
+type Or_InlineValue struct { // line 13864
+ Value interface{} `json:"value"`
+}
+
+// created for Or [Msg_MarkedString string]
+type Or_MarkedString struct { // line 14061
+ Value interface{} `json:"value"`
+}
+
+// created for Or [NotebookDocumentFilter string]
+type Or_NotebookCellTextDocumentFilter_notebook struct { // line 10119
+ Value interface{} `json:"value"`
+}
+
+// created for Or [NotebookDocumentFilter string]
+type Or_NotebookDocumentSyncOptions_notebookSelector_Elem_Item1_notebook struct { // line 9858
+ Value interface{} `json:"value"`
+}
+
+// created for Or [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]
+type Or_RelatedFullDocumentDiagnosticReport_relatedDocuments_Value struct { // line 7169
+ Value interface{} `json:"value"`
+}
+
+// created for Or [FullDocumentDiagnosticReport UnchangedDocumentDiagnosticReport]
+type Or_RelatedUnchangedDocumentDiagnosticReport_relatedDocuments_Value struct { // line 7208
+ Value interface{} `json:"value"`
+}
+
+// created for Or [URI WorkspaceFolder]
+type Or_RelativePattern_baseUri struct { // line 10742
+ Value interface{} `json:"value"`
+}
+
+// created for Or [CodeAction Command]
+type Or_Result_textDocument_codeAction_Item0_Elem struct { // line 1372
+ Value interface{} `json:"value"`
+}
+
+// created for Or [FFullPRequests bool]
+type Or_SemanticTokensClientCapabilities_requests_full struct { // line 12198
+ Value interface{} `json:"value"`
+}
+
+// created for Or [FRangePRequests bool]
+type Or_SemanticTokensClientCapabilities_requests_range struct { // line 12178
+ Value interface{} `json:"value"`
+}
+
+// created for Or [PFullESemanticTokensOptions bool]
+type Or_SemanticTokensOptions_full struct { // line 6580
+ Value interface{} `json:"value"`
+}
+
+// created for Or [PRangeESemanticTokensOptions bool]
+type Or_SemanticTokensOptions_range struct { // line 6560
+ Value interface{} `json:"value"`
+}
+
+// created for Or [CallHierarchyOptions CallHierarchyRegistrationOptions bool]
+type Or_ServerCapabilities_callHierarchyProvider struct { // line 8228
+ Value interface{} `json:"value"`
+}
+
+// created for Or [CodeActionOptions bool]
+type Or_ServerCapabilities_codeActionProvider struct { // line 8036
+ Value interface{} `json:"value"`
+}
+
+// created for Or [DocumentColorOptions DocumentColorRegistrationOptions bool]
+type Or_ServerCapabilities_colorProvider struct { // line 8072
+ Value interface{} `json:"value"`
+}
+
+// created for Or [DeclarationOptions DeclarationRegistrationOptions bool]
+type Or_ServerCapabilities_declarationProvider struct { // line 7898
+ Value interface{} `json:"value"`
+}
+
+// created for Or [DefinitionOptions bool]
+type Or_ServerCapabilities_definitionProvider struct { // line 7920
+ Value interface{} `json:"value"`
+}
+
+// created for Or [DiagnosticOptions DiagnosticRegistrationOptions]
+type Or_ServerCapabilities_diagnosticProvider struct { // line 8385
+ Value interface{} `json:"value"`
+}
+
+// created for Or [DocumentFormattingOptions bool]
+type Or_ServerCapabilities_documentFormattingProvider struct { // line 8112
+ Value interface{} `json:"value"`
+}
+
+// created for Or [DocumentHighlightOptions bool]
+type Or_ServerCapabilities_documentHighlightProvider struct { // line 8000
+ Value interface{} `json:"value"`
+}
+
+// created for Or [DocumentRangeFormattingOptions bool]
+type Or_ServerCapabilities_documentRangeFormattingProvider struct { // line 8130
+ Value interface{} `json:"value"`
+}
+
+// created for Or [DocumentSymbolOptions bool]
+type Or_ServerCapabilities_documentSymbolProvider struct { // line 8018
+ Value interface{} `json:"value"`
+}
+
+// created for Or [FoldingRangeOptions FoldingRangeRegistrationOptions bool]
+type Or_ServerCapabilities_foldingRangeProvider struct { // line 8175
+ Value interface{} `json:"value"`
+}
+
+// created for Or [HoverOptions bool]
+type Or_ServerCapabilities_hoverProvider struct { // line 7871
+ Value interface{} `json:"value"`
+}
+
+// created for Or [ImplementationOptions ImplementationRegistrationOptions bool]
+type Or_ServerCapabilities_implementationProvider struct { // line 7960
+ Value interface{} `json:"value"`
+}
+
+// created for Or [InlayHintOptions InlayHintRegistrationOptions bool]
+type Or_ServerCapabilities_inlayHintProvider struct { // line 8362
+ Value interface{} `json:"value"`
+}
+
+// created for Or [InlineValueOptions InlineValueRegistrationOptions bool]
+type Or_ServerCapabilities_inlineValueProvider struct { // line 8339
+ Value interface{} `json:"value"`
+}
+
+// created for Or [LinkedEditingRangeOptions LinkedEditingRangeRegistrationOptions bool]
+type Or_ServerCapabilities_linkedEditingRangeProvider struct { // line 8251
+ Value interface{} `json:"value"`
+}
+
+// created for Or [MonikerOptions MonikerRegistrationOptions bool]
+type Or_ServerCapabilities_monikerProvider struct { // line 8293
+ Value interface{} `json:"value"`
+}
+
+// created for Or [NotebookDocumentSyncOptions NotebookDocumentSyncRegistrationOptions]
+type Or_ServerCapabilities_notebookDocumentSync struct { // line 7843
+ Value interface{} `json:"value"`
+}
+
+// created for Or [ReferenceOptions bool]
+type Or_ServerCapabilities_referencesProvider struct { // line 7982
+ Value interface{} `json:"value"`
+}
+
+// created for Or [RenameOptions bool]
+type Or_ServerCapabilities_renameProvider struct { // line 8157
+ Value interface{} `json:"value"`
+}
+
+// created for Or [SelectionRangeOptions SelectionRangeRegistrationOptions bool]
+type Or_ServerCapabilities_selectionRangeProvider struct { // line 8197
+ Value interface{} `json:"value"`
+}
+
+// created for Or [SemanticTokensOptions SemanticTokensRegistrationOptions]
+type Or_ServerCapabilities_semanticTokensProvider struct { // line 8274
+ Value interface{} `json:"value"`
+}
+
+// created for Or [TextDocumentSyncKind TextDocumentSyncOptions]
+type Or_ServerCapabilities_textDocumentSync struct { // line 7825
+ Value interface{} `json:"value"`
+}
+
+// created for Or [TypeDefinitionOptions TypeDefinitionRegistrationOptions bool]
+type Or_ServerCapabilities_typeDefinitionProvider struct { // line 7938
+ Value interface{} `json:"value"`
+}
+
+// created for Or [TypeHierarchyOptions TypeHierarchyRegistrationOptions bool]
+type Or_ServerCapabilities_typeHierarchyProvider struct { // line 8316
+ Value interface{} `json:"value"`
+}
+
+// created for Or [WorkspaceSymbolOptions bool]
+type Or_ServerCapabilities_workspaceSymbolProvider struct { // line 8094
+ Value interface{} `json:"value"`
+}
+
+// created for Or [MarkupContent string]
+type Or_SignatureInformation_documentation struct { // line 8842
+ Value interface{} `json:"value"`
+}
+
+// created for Or [AnnotatedTextEdit TextEdit]
+type Or_TextDocumentEdit_edits_Elem struct { // line 6693
+ Value interface{} `json:"value"`
+}
+
+// created for Or [SaveOptions bool]
+type Or_TextDocumentSyncOptions_save struct { // line 9778
+ Value interface{} `json:"value"`
+}
+
+// created for Or [WorkspaceFullDocumentDiagnosticReport WorkspaceUnchangedDocumentDiagnosticReport]
+type Or_WorkspaceDocumentDiagnosticReport struct { // line 13987
+ Value interface{} `json:"value"`
+}
+
+// created for Or [CreateFile DeleteFile RenameFile TextDocumentEdit]
+type Or_WorkspaceEdit_documentChanges_Elem struct { // line 3220
+ Value interface{} `json:"value"`
+}
+
+// created for Or [Declaration []DeclarationLink]
+type Or_textDocument_declaration struct { // line 249
+ Value interface{} `json:"value"`
+}
+
+// created for Literal (Lit_NotebookDocumentChangeEvent_cells)
+type PCellsPChange struct { // line 7481
+ // Changes to the cell structure to add or
+ // remove cells.
+ Structure *FStructurePCells `json:"structure,omitempty"`
+ // Changes to notebook cells properties like its
+ // kind, execution summary or metadata.
+ Data []NotebookCell `json:"data,omitempty"`
+ // Changes to the text content of notebook cells.
+ TextContent []Lit_NotebookDocumentChangeEvent_cells_textContent_Elem `json:"textContent,omitempty"`
+}
+
+// created for Literal (Lit_WorkspaceEditClientCapabilities_changeAnnotationSupport)
+type PChangeAnnotationSupportPWorkspaceEdit struct { // line 10816
+ // Whether the client groups edits with equal labels into tree nodes,
+ // for instance all edits labelled with "Changes in Strings" would
+ // be a tree node.
+ GroupsOnLabel bool `json:"groupsOnLabel,omitempty"`
+}
+
+// created for Literal (Lit_CodeActionClientCapabilities_codeActionLiteralSupport)
+type PCodeActionLiteralSupportPCodeAction struct { // line 11736
+ // The code action kind is support with the following value
+ // set.
+ CodeActionKind FCodeActionKindPCodeActionLiteralSupport `json:"codeActionKind"`
+}
+
+// created for Literal (Lit_CompletionClientCapabilities_completionItemKind)
+type PCompletionItemKindPCompletion struct { // line 11334
+ // The completion item kind values the client supports. When this
+ // property exists the client also guarantees that it will
+ // handle values outside its set gracefully and falls back
+ // to a default value when unknown.
+ //
+ // If this property is not present the client only supports
+ // the completion items kinds from `Text` to `Reference` as defined in
+ // the initial version of the protocol.
+ ValueSet []CompletionItemKind `json:"valueSet,omitempty"`
+}
+
+// created for Literal (Lit_CompletionClientCapabilities_completionItem)
+type PCompletionItemPCompletion struct { // line 11183
+ // Client supports snippets as insert text.
+ //
+ // A snippet can define tab stops and placeholders with `$1`, `$2`
+ // and `${3:foo}`. `$0` defines the final tab stop, it defaults to
+ // the end of the snippet. Placeholders with equal identifiers are linked,
+ // that is typing in one will update others too.
+ SnippetSupport bool `json:"snippetSupport,omitempty"`
+ // Client supports commit characters on a completion item.
+ CommitCharactersSupport bool `json:"commitCharactersSupport,omitempty"`
+ // Client supports the following content formats for the documentation
+ // property. The order describes the preferred format of the client.
+ DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"`
+ // Client supports the deprecated property on a completion item.
+ DeprecatedSupport bool `json:"deprecatedSupport,omitempty"`
+ // Client supports the preselect property on a completion item.
+ PreselectSupport bool `json:"preselectSupport,omitempty"`
+ // Client supports the tag property on a completion item. Clients supporting
+ // tags have to handle unknown tags gracefully. Clients especially need to
+ // preserve unknown tags when sending a completion item back to the server in
+ // a resolve call.
+ //
+ // @since 3.15.0
+ TagSupport FTagSupportPCompletionItem `json:"tagSupport"`
+ // Client support insert replace edit to control different behavior if a
+ // completion item is inserted in the text or should replace text.
+ //
+ // @since 3.16.0
+ InsertReplaceSupport bool `json:"insertReplaceSupport,omitempty"`
+ // Indicates which properties a client can resolve lazily on a completion
+ // item. Before version 3.16.0 only the predefined properties `documentation`
+ // and `details` could be resolved lazily.
+ //
+ // @since 3.16.0
+ ResolveSupport *FResolveSupportPCompletionItem `json:"resolveSupport,omitempty"`
+ // The client supports the `insertTextMode` property on
+ // a completion item to override the whitespace handling mode
+ // as defined by the client (see `insertTextMode`).
+ //
+ // @since 3.16.0
+ InsertTextModeSupport *FInsertTextModeSupportPCompletionItem `json:"insertTextModeSupport,omitempty"`
+ // The client has support for completion item label
+ // details (see also `CompletionItemLabelDetails`).
+ //
+ // @since 3.17.0
+ LabelDetailsSupport bool `json:"labelDetailsSupport,omitempty"`
+}
+
+// created for Literal (Lit_CompletionOptions_completionItem)
+type PCompletionItemPCompletionProvider struct { // line 8747
+ // The server has support for completion item label
+ // details (see also `CompletionItemLabelDetails`) when
+ // receiving a completion item in a resolve call.
+ //
+ // @since 3.17.0
+ LabelDetailsSupport bool `json:"labelDetailsSupport,omitempty"`
+}
+
+// created for Literal (Lit_CompletionClientCapabilities_completionList)
+type PCompletionListPCompletion struct { // line 11376
+ // The client supports the following itemDefaults on
+ // a completion list.
+ //
+ // The value lists the supported property names of the
+ // `CompletionList.itemDefaults` object. If omitted
+ // no properties are supported.
+ //
+ // @since 3.17.0
+ ItemDefaults []string `json:"itemDefaults,omitempty"`
+}
+
+// created for Literal (Lit_CodeAction_disabled)
+type PDisabledMsg_textDocument_codeAction struct { // line 5427
+ // Human readable description of why the code action is currently disabled.
+ //
+ // This is displayed in the code actions UI.
+ Reason string `json:"reason"`
+}
+
+// created for Literal (Lit_FoldingRangeClientCapabilities_foldingRangeKind)
+type PFoldingRangeKindPFoldingRange struct { // line 12011
+ // The folding range kind values the client supports. When this
+ // property exists the client also guarantees that it will
+ // handle values outside its set gracefully and falls back
+ // to a default value when unknown.
+ ValueSet []FoldingRangeKind `json:"valueSet,omitempty"`
+}
+
+// created for Literal (Lit_FoldingRangeClientCapabilities_foldingRange)
+type PFoldingRangePFoldingRange struct { // line 12036
+ // If set, the client signals that it supports setting collapsedText on
+ // folding ranges to display custom labels instead of the default text.
+ //
+ // @since 3.17.0
+ CollapsedText bool `json:"collapsedText,omitempty"`
+}
+
+// created for Literal (Lit_SemanticTokensOptions_full_Item1)
+type PFullESemanticTokensOptions struct { // line 6587
+ // The server supports deltas for full documents.
+ Delta bool `json:"delta"`
+}
+
+// created for Literal (Lit_CompletionList_itemDefaults)
+type PItemDefaultsMsg_textDocument_completion struct { // line 4751
+ // A default commit character set.
+ //
+ // @since 3.17.0
+ CommitCharacters []string `json:"commitCharacters,omitempty"`
+ // A default edit range.
+ //
+ // @since 3.17.0
+ EditRange *OrFEditRangePItemDefaults `json:"editRange,omitempty"`
+ // A default insert text format.
+ //
+ // @since 3.17.0
+ InsertTextFormat *InsertTextFormat `json:"insertTextFormat,omitempty"`
+ // A default insert text mode.
+ //
+ // @since 3.17.0
+ InsertTextMode *InsertTextMode `json:"insertTextMode,omitempty"`
+ // A default data value.
+ //
+ // @since 3.17.0
+ Data interface{} `json:"data,omitempty"`
+}
+
+// created for Literal (Lit_WorkspaceSymbol_location_Item1)
+type PLocationMsg_workspace_symbol struct { // line 5528
+ URI DocumentURI `json:"uri"`
+}
+
+// created for Literal (Lit_ShowMessageRequestClientCapabilities_messageActionItem)
+type PMessageActionItemPShowMessage struct { // line 12464
+ // Whether the client supports additional attributes which
+ // are preserved and send back to the server in the
+ // request's response.
+ AdditionalPropertiesSupport bool `json:"additionalPropertiesSupport,omitempty"`
+}
+
+// created for Literal (Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0)
+type PNotebookSelectorPNotebookDocumentSync struct { // line 9806
+ // The notebook to be synced If a string
+ // value is provided it matches against the
+ // notebook type. '*' matches every notebook.
+ Notebook OrFNotebookPNotebookSelector `json:"notebook"`
+ // The cells of the matching notebook to be synced.
+ Cells []Lit_NotebookDocumentSyncOptions_notebookSelector_Elem_Item0_cells_Elem `json:"cells,omitempty"`
+}
+
+// created for Literal (Lit_SemanticTokensOptions_range_Item1)
+type PRangeESemanticTokensOptions struct { // line 6567
+}
+
+// created for Literal (Lit_SemanticTokensClientCapabilities_requests)
+type PRequestsPSemanticTokens struct { // line 12172
+ // The client will send the `textDocument/semanticTokens/range` request if
+ // the server provides a corresponding handler.
+ Range Or_SemanticTokensClientCapabilities_requests_range `json:"range"`
+ // The client will send the `textDocument/semanticTokens/full` request if
+ // the server provides a corresponding handler.
+ Full Or_SemanticTokensClientCapabilities_requests_full `json:"full"`
+}
+
+// created for Literal (Lit_CodeActionClientCapabilities_resolveSupport)
+type PResolveSupportPCodeAction struct { // line 11801
+ // The properties that a client can resolve lazily.
+ Properties []string `json:"properties"`
+}
+
+// created for Literal (Lit_InlayHintClientCapabilities_resolveSupport)
+type PResolveSupportPInlayHint struct { // line 12384
+ // The properties that a client can resolve lazily.
+ Properties []string `json:"properties"`
+}
+
+// created for Literal (Lit_WorkspaceSymbolClientCapabilities_resolveSupport)
+type PResolveSupportPSymbol struct { // line 10938
+ // The properties that a client can resolve lazily. Usually
+ // `location.range`
+ Properties []string `json:"properties"`
+}
+
+// created for Literal (Lit_InitializeResult_serverInfo)
+type PServerInfoMsg_initialize struct { // line 4096
+ // The name of the server as defined by the server.
+ Name string `json:"name"`
+ // The server's version as defined by the server.
+ Version string `json:"version,omitempty"`
+}
+
+// created for Literal (Lit_SignatureHelpClientCapabilities_signatureInformation)
+type PSignatureInformationPSignatureHelp struct { // line 11443
+ // Client supports the following content formats for the documentation
+ // property. The order describes the preferred format of the client.
+ DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"`
+ // Client capabilities specific to parameter information.
+ ParameterInformation *FParameterInformationPSignatureInformation `json:"parameterInformation,omitempty"`
+ // The client supports the `activeParameter` property on `SignatureInformation`
+ // literal.
+ //
+ // @since 3.16.0
+ ActiveParameterSupport bool `json:"activeParameterSupport,omitempty"`
+}
+
+// created for Literal (Lit_GeneralClientCapabilities_staleRequestSupport)
+type PStaleRequestSupportPGeneral struct { // line 10670
+ // The client will actively cancel the request.
+ Cancel bool `json:"cancel"`
+ // The list of requests for which the client
+ // will retry the request if it receives a
+ // response with error code `ContentModified`
+ RetryOnContentModified []string `json:"retryOnContentModified"`
+}
+
+// created for Literal (Lit_DocumentSymbolClientCapabilities_symbolKind)
+type PSymbolKindPDocumentSymbol struct { // line 11654
+ // The symbol kind values the client supports. When this
+ // property exists the client also guarantees that it will
+ // handle values outside its set gracefully and falls back
+ // to a default value when unknown.
+ //
+ // If this property is not present the client only supports
+ // the symbol kinds from `File` to `Array` as defined in
+ // the initial version of the protocol.
+ ValueSet []SymbolKind `json:"valueSet,omitempty"`
+}
+
+// created for Literal (Lit_WorkspaceSymbolClientCapabilities_symbolKind)
+type PSymbolKindPSymbol struct { // line 10890
+ // The symbol kind values the client supports. When this
+ // property exists the client also guarantees that it will
+ // handle values outside its set gracefully and falls back
+ // to a default value when unknown.
+ //
+ // If this property is not present the client only supports
+ // the symbol kinds from `File` to `Array` as defined in
+ // the initial version of the protocol.
+ ValueSet []SymbolKind `json:"valueSet,omitempty"`
+}
+
+// created for Literal (Lit_DocumentSymbolClientCapabilities_tagSupport)
+type PTagSupportPDocumentSymbol struct { // line 11687
+ // The tags supported by the client.
+ ValueSet []SymbolTag `json:"valueSet"`
+}
+
+// created for Literal (Lit_PublishDiagnosticsClientCapabilities_tagSupport)
+type PTagSupportPPublishDiagnostics struct { // line 12087
+ // The tags supported by the client.
+ ValueSet []DiagnosticTag `json:"valueSet"`
+}
+
+// created for Literal (Lit_WorkspaceSymbolClientCapabilities_tagSupport)
+type PTagSupportPSymbol struct { // line 10914
+ // The tags supported by the client.
+ ValueSet []SymbolTag `json:"valueSet"`
+}
+
+// The parameters of a configuration request.
+type ParamConfiguration struct { // line 2199
+ Items []ConfigurationItem `json:"items"`
+}
+type ParamInitialize struct { // line 4068
+ XInitializeParams
+ WorkspaceFoldersInitializeParams
+}
+
+// Represents a parameter of a callable-signature. A parameter can
+// have a label and a doc-comment.
+type ParameterInformation struct { // line 10063
+ // The label of this parameter information.
+ //
+ // Either a string or an inclusive start and exclusive end offsets within its containing
+ // signature label. (see SignatureInformation.label). The offsets are based on a UTF-16
+ // string representation as `Position` and `Range` does.
+ //
+ // *Note*: a label of type string should be a substring of its containing signature label.
+ // Its intended use case is to highlight the parameter label part in the `SignatureInformation.label`.
+ Label string `json:"label"`
+ // The human-readable doc-comment of this parameter. Will be shown
+ // in the UI but can be omitted.
+ Documentation string `json:"documentation,omitempty"`
+}
+type PartialResultParams struct { // line 6258
+ // An optional token that a server can use to report partial results (e.g. streaming) to
+ // the client.
+ PartialResultToken *ProgressToken `json:"partialResultToken,omitempty"`
+}
+
+// The glob pattern to watch relative to the base path. Glob patterns can have the following syntax:
+//
+// - `*` to match one or more characters in a path segment
+// - `?` to match on one character in a path segment
+// - `**` to match any number of path segments, including none
+// - `{}` to group conditions (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files)
+// - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
+// - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`)
+//
+// @since 3.17.0
+type Pattern = string // (alias) line 14363
+// Position in a text document expressed as zero-based line and character
+// offset. Prior to 3.17 the offsets were always based on a UTF-16 string
+// representation. So a string of the form `a𐐀b` the character offset of the
+// character `a` is 0, the character offset of `𐐀` is 1 and the character
+// offset of b is 3 since `𐐀` is represented using two code units in UTF-16.
+// Since 3.17 clients and servers can agree on a different string encoding
+// representation (e.g. UTF-8). The client announces it's supported encoding
+// via the client capability [`general.positionEncodings`](#clientCapabilities).
+// The value is an array of position encodings the client supports, with
+// decreasing preference (e.g. the encoding at index `0` is the most preferred
+// one). To stay backwards compatible the only mandatory encoding is UTF-16
+// represented via the string `utf-16`. The server can pick one of the
+// encodings offered by the client and signals that encoding back to the
+// client via the initialize result's property
+// [`capabilities.positionEncoding`](#serverCapabilities). If the string value
+// `utf-16` is missing from the client's capability `general.positionEncodings`
+// servers can safely assume that the client supports UTF-16. If the server
+// omits the position encoding in its initialize result the encoding defaults
+// to the string value `utf-16`. Implementation considerations: since the
+// conversion from one encoding into another requires the content of the
+// file / line the conversion is best done where the file is read which is
+// usually on the server side.
+//
+// Positions are line end character agnostic. So you can not specify a position
+// that denotes `\r|\n` or `\n|` where `|` represents the character offset.
+//
+// @since 3.17.0 - support for negotiated position encoding.
+type Position struct { // line 6501
+ // Line position in a document (zero-based).
+ //
+ // If a line number is greater than the number of lines in a document, it defaults back to the number of lines in the document.
+ // If a line number is negative, it defaults to 0.
+ Line uint32 `json:"line"`
+ // Character offset on a line in a document (zero-based).
+ //
+ // The meaning of this offset is determined by the negotiated
+ // `PositionEncodingKind`.
+ //
+ // If the character value is greater than the line length it defaults back to the
+ // line length.
+ Character uint32 `json:"character"`
+}
+
+// A set of predefined position encoding kinds.
+//
+// @since 3.17.0
+type PositionEncodingKind string // line 13427
+type PrepareRename2Gn = Msg_PrepareRename2Gn // (alias) line 13927
+type PrepareRenameParams struct { // line 5925
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+}
+type PrepareRenameResult = Msg_PrepareRename2Gn // (alias) line 13927
+type PrepareSupportDefaultBehavior uint32 // line 13722
+// A previous result id in a workspace pull request.
+//
+// @since 3.17.0
+type PreviousResultID struct { // line 7331
+ // The URI for which the client knowns a
+ // result id.
+ URI DocumentURI `json:"uri"`
+ // The value of the previous result id.
+ Value string `json:"value"`
+}
+
+// A previous result id in a workspace pull request.
+//
+// @since 3.17.0
+type PreviousResultId struct { // line 7331
+ // The URI for which the client knowns a
+ // result id.
+ URI DocumentURI `json:"uri"`
+ // The value of the previous result id.
+ Value string `json:"value"`
+}
+type ProgressParams struct { // line 6201
+ // The progress token provided by the client or server.
+ Token ProgressToken `json:"token"`
+ // The progress data.
+ Value interface{} `json:"value"`
+}
+type ProgressToken = interface{} // (alias) line 13960
+// The publish diagnostic client capabilities.
+type PublishDiagnosticsClientCapabilities struct { // line 12072
+ // Whether the clients accepts diagnostics with related information.
+ RelatedInformation bool `json:"relatedInformation,omitempty"`
+ // Client supports the tag property to provide meta data about a diagnostic.
+ // Clients supporting tags have to handle unknown tags gracefully.
+ //
+ // @since 3.15.0
+ TagSupport *PTagSupportPPublishDiagnostics `json:"tagSupport,omitempty"`
+ // Whether the client interprets the version property of the
+ // `textDocument/publishDiagnostics` notification's parameter.
+ //
+ // @since 3.15.0
+ VersionSupport bool `json:"versionSupport,omitempty"`
+ // Client supports a codeDescription property
+ //
+ // @since 3.16.0
+ CodeDescriptionSupport bool `json:"codeDescriptionSupport,omitempty"`
+ // Whether code action supports the `data` property which is
+ // preserved between a `textDocument/publishDiagnostics` and
+ // `textDocument/codeAction` request.
+ //
+ // @since 3.16.0
+ DataSupport bool `json:"dataSupport,omitempty"`
+}
+
+// The publish diagnostic notification's parameters.
+type PublishDiagnosticsParams struct { // line 4462
+ // The URI for which diagnostic information is reported.
+ URI DocumentURI `json:"uri"`
+ // Optional the version number of the document the diagnostics are published for.
+ //
+ // @since 3.15.0
+ Version int32 `json:"version,omitempty"`
+ // An array of diagnostic information items.
+ Diagnostics []Diagnostic `json:"diagnostics"`
+}
+
+// A range in a text document expressed as (zero-based) start and end positions.
+//
+// If you want to specify a range that contains a line including the line ending
+// character(s) then use an end position denoting the start of the next line.
+// For example:
+// ```ts
+//
+// {
+// start: { line: 5, character: 23 }
+// end : { line 6, character : 0 }
+// }
+//
+// ```
+type Range struct { // line 6311
+ // The range's start position.
+ Start Position `json:"start"`
+ // The range's end position.
+ End Position `json:"end"`
+}
+
+// Client Capabilities for a {@link ReferencesRequest}.
+type ReferenceClientCapabilities struct { // line 11609
+ // Whether references supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// Value-object that contains additional information when
+// requesting references.
+type ReferenceContext struct { // line 8930
+ // Include the declaration of the current symbol.
+ IncludeDeclaration bool `json:"includeDeclaration"`
+}
+
+// Reference options.
+type ReferenceOptions struct { // line 8944
+ WorkDoneProgressOptions
+}
+
+// Parameters for a {@link ReferencesRequest}.
+type ReferenceParams struct { // line 5054
+ Context ReferenceContext `json:"context"`
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link ReferencesRequest}.
+type ReferenceRegistrationOptions struct { // line 5083
+ TextDocumentRegistrationOptions
+ ReferenceOptions
+}
+
+// General parameters to to register for an notification or to register a provider.
+type Registration struct { // line 7597
+ // The id used to register the request. The id can be used to deregister
+ // the request again.
+ ID string `json:"id"`
+ // The method / capability to register for.
+ Method string `json:"method"`
+ // Options necessary for the registration.
+ RegisterOptions interface{} `json:"registerOptions,omitempty"`
+}
+type RegistrationParams struct { // line 4038
+ Registrations []Registration `json:"registrations"`
+}
+
+// Client capabilities specific to regular expressions.
+//
+// @since 3.16.0
+type RegularExpressionsClientCapabilities struct { // line 12500
+ // The engine's name.
+ Engine string `json:"engine"`
+ // The engine's version.
+ Version string `json:"version,omitempty"`
+}
+
+// A full diagnostic report with a set of related documents.
+//
+// @since 3.17.0
+type RelatedFullDocumentDiagnosticReport struct { // line 7157
+ // Diagnostics of related documents. This information is useful
+ // in programming languages where code in a file A can generate
+ // diagnostics in a file B which A depends on. An example of
+ // such a language is C/C++ where marco definitions in a file
+ // a.cpp and result in errors in a header file b.hpp.
+ //
+ // @since 3.17.0
+ RelatedDocuments map[DocumentURI]interface{} `json:"relatedDocuments,omitempty"`
+ FullDocumentDiagnosticReport
+}
+
+// An unchanged diagnostic report with a set of related documents.
+//
+// @since 3.17.0
+type RelatedUnchangedDocumentDiagnosticReport struct { // line 7196
+ // Diagnostics of related documents. This information is useful
+ // in programming languages where code in a file A can generate
+ // diagnostics in a file B which A depends on. An example of
+ // such a language is C/C++ where marco definitions in a file
+ // a.cpp and result in errors in a header file b.hpp.
+ //
+ // @since 3.17.0
+ RelatedDocuments map[DocumentURI]interface{} `json:"relatedDocuments,omitempty"`
+ UnchangedDocumentDiagnosticReport
+}
+
+// A relative pattern is a helper to construct glob patterns that are matched
+// relatively to a base URI. The common value for a `baseUri` is a workspace
+// folder root, but it can be another absolute URI as well.
+//
+// @since 3.17.0
+type RelativePattern struct { // line 10736
+ // A workspace folder or a base URI to which this pattern will be matched
+ // against relatively.
+ BaseURI Or_RelativePattern_baseUri `json:"baseUri"`
+ // The actual glob pattern;
+ Pattern Pattern `json:"pattern"`
+}
+type RenameClientCapabilities struct { // line 11934
+ // Whether rename supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Client supports testing for validity of rename operations
+ // before execution.
+ //
+ // @since 3.12.0
+ PrepareSupport bool `json:"prepareSupport,omitempty"`
+ // Client supports the default behavior result.
+ //
+ // The value indicates the default behavior used by the
+ // client.
+ //
+ // @since 3.16.0
+ PrepareSupportDefaultBehavior *PrepareSupportDefaultBehavior `json:"prepareSupportDefaultBehavior,omitempty"`
+ // Whether the client honors the change annotations in
+ // text edits and resource operations returned via the
+ // rename request's workspace edit by for example presenting
+ // the workspace edit in the user interface and asking
+ // for confirmation.
+ //
+ // @since 3.16.0
+ HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"`
+}
+
+// Rename file operation
+type RenameFile struct { // line 6749
+ // A rename
+ Kind string `json:"kind"`
+ // The old (existing) location.
+ OldURI DocumentURI `json:"oldUri"`
+ // The new location.
+ NewURI DocumentURI `json:"newUri"`
+ // Rename options.
+ Options *RenameFileOptions `json:"options,omitempty"`
+ ResourceOperation
+}
+
+// Rename file options
+type RenameFileOptions struct { // line 9441
+ // Overwrite target if existing. Overwrite wins over `ignoreIfExists`
+ Overwrite bool `json:"overwrite,omitempty"`
+ // Ignores if target exists.
+ IgnoreIfExists bool `json:"ignoreIfExists,omitempty"`
+}
+
+// The parameters sent in notifications/requests for user-initiated renames of
+// files.
+//
+// @since 3.16.0
+type RenameFilesParams struct { // line 3282
+ // An array of all files/folders renamed in this operation. When a folder is renamed, only
+ // the folder will be included, and not its children.
+ Files []FileRename `json:"files"`
+}
+
+// Provider options for a {@link RenameRequest}.
+type RenameOptions struct { // line 9269
+ // Renames should be checked and tested before being executed.
+ //
+ // @since version 3.12.0
+ PrepareProvider bool `json:"prepareProvider,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// The parameters of a {@link RenameRequest}.
+type RenameParams struct { // line 5874
+ // The document to rename.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The position at which this request was sent.
+ Position Position `json:"position"`
+ // The new name of the symbol. If the given name is not valid the
+ // request must return a {@link ResponseError} with an
+ // appropriate message set.
+ NewName string `json:"newName"`
+ WorkDoneProgressParams
+}
+
+// Registration options for a {@link RenameRequest}.
+type RenameRegistrationOptions struct { // line 5910
+ TextDocumentRegistrationOptions
+ RenameOptions
+}
+
+// A generic resource operation.
+type ResourceOperation struct { // line 9393
+ // The resource operation kind.
+ Kind string `json:"kind"`
+ // An optional annotation identifier describing the operation.
+ //
+ // @since 3.16.0
+ AnnotationID *ChangeAnnotationIdentifier `json:"annotationId,omitempty"`
+}
+type ResourceOperationKind string // line 13669
+// Save options.
+type SaveOptions struct { // line 8465
+ // The client is supposed to include the content on save.
+ IncludeText bool `json:"includeText,omitempty"`
+}
+
+// A selection range represents a part of a selection hierarchy. A selection range
+// may have a parent selection range that contains it.
+type SelectionRange struct { // line 2569
+ // The {@link Range range} of this selection range.
+ Range Range `json:"range"`
+ // The parent selection range containing this range. Therefore `parent.range` must contain `this.range`.
+ Parent *SelectionRange `json:"parent,omitempty"`
+}
+type SelectionRangeClientCapabilities struct { // line 12058
+ // Whether implementation supports dynamic registration for selection range providers. If this is set to `true`
+ // the client supports the new `SelectionRangeRegistrationOptions` return value for the corresponding server
+ // capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+type SelectionRangeOptions struct { // line 6524
+ WorkDoneProgressOptions
+}
+
+// A parameter literal used in selection range requests.
+type SelectionRangeParams struct { // line 2534
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The positions inside the text document.
+ Positions []Position `json:"positions"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+type SelectionRangeRegistrationOptions struct { // line 2592
+ SelectionRangeOptions
+ TextDocumentRegistrationOptions
+ StaticRegistrationOptions
+}
+
+// A set of predefined token modifiers. This set is not fixed
+// an clients can specify additional token types via the
+// corresponding client capabilities.
+//
+// @since 3.16.0
+type SemanticTokenModifiers string // line 12670
+// A set of predefined token types. This set is not fixed
+// an clients can specify additional token types via the
+// corresponding client capabilities.
+//
+// @since 3.16.0
+type SemanticTokenTypes string // line 12563
+// @since 3.16.0
+type SemanticTokens struct { // line 2880
+ // An optional result id. If provided and clients support delta updating
+ // the client will include the result id in the next semantic token request.
+ // A server can then instead of computing all semantic tokens again simply
+ // send a delta.
+ ResultID string `json:"resultId,omitempty"`
+ // The actual tokens.
+ Data []uint32 `json:"data"`
+}
+
+// @since 3.16.0
+type SemanticTokensClientCapabilities struct { // line 12157
+ // Whether implementation supports dynamic registration. If this is set to `true`
+ // the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
+ // return value for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Which requests the client supports and might send to the server
+ // depending on the server's capability. Please note that clients might not
+ // show semantic tokens or degrade some of the user experience if a range
+ // or full request is advertised by the client but not provided by the
+ // server. If for example the client capability `requests.full` and
+ // `request.range` are both set to true but the server only provides a
+ // range provider the client might not render a minimap correctly or might
+ // even decide to not show any semantic tokens at all.
+ Requests PRequestsPSemanticTokens `json:"requests"`
+ // The token types that the client supports.
+ TokenTypes []string `json:"tokenTypes"`
+ // The token modifiers that the client supports.
+ TokenModifiers []string `json:"tokenModifiers"`
+ // The token formats the clients supports.
+ Formats []TokenFormat `json:"formats"`
+ // Whether the client supports tokens that can overlap each other.
+ OverlappingTokenSupport bool `json:"overlappingTokenSupport,omitempty"`
+ // Whether the client supports tokens that can span multiple lines.
+ MultilineTokenSupport bool `json:"multilineTokenSupport,omitempty"`
+ // Whether the client allows the server to actively cancel a
+ // semantic token request, e.g. supports returning
+ // LSPErrorCodes.ServerCancelled. If a server does the client
+ // needs to retrigger the request.
+ //
+ // @since 3.17.0
+ ServerCancelSupport bool `json:"serverCancelSupport,omitempty"`
+ // Whether the client uses semantic tokens to augment existing
+ // syntax tokens. If set to `true` client side created syntax
+ // tokens and semantic tokens are both used for colorization. If
+ // set to `false` the client only uses the returned semantic tokens
+ // for colorization.
+ //
+ // If the value is `undefined` then the client behavior is not
+ // specified.
+ //
+ // @since 3.17.0
+ AugmentsSyntaxTokens bool `json:"augmentsSyntaxTokens,omitempty"`
+}
+
+// @since 3.16.0
+type SemanticTokensDelta struct { // line 2979
+ ResultID string `json:"resultId,omitempty"`
+ // The semantic token edits to transform a previous result into a new result.
+ Edits []SemanticTokensEdit `json:"edits"`
+}
+
+// @since 3.16.0
+type SemanticTokensDeltaParams struct { // line 2946
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The result id of a previous response. The result Id can either point to a full response
+ // or a delta response depending on what was received last.
+ PreviousResultID string `json:"previousResultId"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// @since 3.16.0
+type SemanticTokensDeltaPartialResult struct { // line 3005
+ Edits []SemanticTokensEdit `json:"edits"`
+}
+
+// @since 3.16.0
+type SemanticTokensEdit struct { // line 6617
+ // The start offset of the edit.
+ Start uint32 `json:"start"`
+ // The count of elements to remove.
+ DeleteCount uint32 `json:"deleteCount"`
+ // The elements to insert.
+ Data []uint32 `json:"data,omitempty"`
+}
+
+// @since 3.16.0
+type SemanticTokensLegend struct { // line 9314
+ // The token types a server uses.
+ TokenTypes []string `json:"tokenTypes"`
+ // The token modifiers a server uses.
+ TokenModifiers []string `json:"tokenModifiers"`
+}
+
+// @since 3.16.0
+type SemanticTokensOptions struct { // line 6546
+ // The legend used by the server
+ Legend SemanticTokensLegend `json:"legend"`
+ // Server supports providing semantic tokens for a specific range
+ // of a document.
+ Range *Or_SemanticTokensOptions_range `json:"range,omitempty"`
+ // Server supports providing semantic tokens for a full document.
+ Full *Or_SemanticTokensOptions_full `json:"full,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// @since 3.16.0
+type SemanticTokensParams struct { // line 2855
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// @since 3.16.0
+type SemanticTokensPartialResult struct { // line 2907
+ Data []uint32 `json:"data"`
+}
+
+// @since 3.16.0
+type SemanticTokensRangeParams struct { // line 3022
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The range the semantic tokens are requested for.
+ Range Range `json:"range"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// @since 3.16.0
+type SemanticTokensRegistrationOptions struct { // line 2924
+ TextDocumentRegistrationOptions
+ SemanticTokensOptions
+ StaticRegistrationOptions
+}
+
+// @since 3.16.0
+type SemanticTokensWorkspaceClientCapabilities struct { // line 10977
+ // Whether the client implementation supports a refresh request sent from
+ // the server to the client.
+ //
+ // Note that this event is global and will force the client to refresh all
+ // semantic tokens currently shown. It should be used with absolute care
+ // and is useful for situation where a server for example detects a project
+ // wide change that requires such a calculation.
+ RefreshSupport bool `json:"refreshSupport,omitempty"`
+}
+
+// Defines the capabilities provided by a language
+// server.
+type ServerCapabilities struct { // line 7809
+ // The position encoding the server picked from the encodings offered
+ // by the client via the client capability `general.positionEncodings`.
+ //
+ // If the client didn't provide any position encodings the only valid
+ // value that a server can return is 'utf-16'.
+ //
+ // If omitted it defaults to 'utf-16'.
+ //
+ // @since 3.17.0
+ PositionEncoding *PositionEncodingKind `json:"positionEncoding,omitempty"`
+ // Defines how text documents are synced. Is either a detailed structure
+ // defining each notification or for backwards compatibility the
+ // TextDocumentSyncKind number.
+ TextDocumentSync interface{} `json:"textDocumentSync,omitempty"`
+ // Defines how notebook documents are synced.
+ //
+ // @since 3.17.0
+ NotebookDocumentSync *Or_ServerCapabilities_notebookDocumentSync `json:"notebookDocumentSync,omitempty"`
+ // The server provides completion support.
+ CompletionProvider *CompletionOptions `json:"completionProvider,omitempty"`
+ // The server provides hover support.
+ HoverProvider *Or_ServerCapabilities_hoverProvider `json:"hoverProvider,omitempty"`
+ // The server provides signature help support.
+ SignatureHelpProvider *SignatureHelpOptions `json:"signatureHelpProvider,omitempty"`
+ // The server provides Goto Declaration support.
+ DeclarationProvider *Or_ServerCapabilities_declarationProvider `json:"declarationProvider,omitempty"`
+ // The server provides goto definition support.
+ DefinitionProvider *Or_ServerCapabilities_definitionProvider `json:"definitionProvider,omitempty"`
+ // The server provides Goto Type Definition support.
+ TypeDefinitionProvider *Or_ServerCapabilities_typeDefinitionProvider `json:"typeDefinitionProvider,omitempty"`
+ // The server provides Goto Implementation support.
+ ImplementationProvider *Or_ServerCapabilities_implementationProvider `json:"implementationProvider,omitempty"`
+ // The server provides find references support.
+ ReferencesProvider *Or_ServerCapabilities_referencesProvider `json:"referencesProvider,omitempty"`
+ // The server provides document highlight support.
+ DocumentHighlightProvider *Or_ServerCapabilities_documentHighlightProvider `json:"documentHighlightProvider,omitempty"`
+ // The server provides document symbol support.
+ DocumentSymbolProvider *Or_ServerCapabilities_documentSymbolProvider `json:"documentSymbolProvider,omitempty"`
+ // The server provides code actions. CodeActionOptions may only be
+ // specified if the client states that it supports
+ // `codeActionLiteralSupport` in its initial `initialize` request.
+ CodeActionProvider interface{} `json:"codeActionProvider,omitempty"`
+ // The server provides code lens.
+ CodeLensProvider *CodeLensOptions `json:"codeLensProvider,omitempty"`
+ // The server provides document link support.
+ DocumentLinkProvider *DocumentLinkOptions `json:"documentLinkProvider,omitempty"`
+ // The server provides color provider support.
+ ColorProvider *Or_ServerCapabilities_colorProvider `json:"colorProvider,omitempty"`
+ // The server provides workspace symbol support.
+ WorkspaceSymbolProvider *Or_ServerCapabilities_workspaceSymbolProvider `json:"workspaceSymbolProvider,omitempty"`
+ // The server provides document formatting.
+ DocumentFormattingProvider *Or_ServerCapabilities_documentFormattingProvider `json:"documentFormattingProvider,omitempty"`
+ // The server provides document range formatting.
+ DocumentRangeFormattingProvider *Or_ServerCapabilities_documentRangeFormattingProvider `json:"documentRangeFormattingProvider,omitempty"`
+ // The server provides document formatting on typing.
+ DocumentOnTypeFormattingProvider *DocumentOnTypeFormattingOptions `json:"documentOnTypeFormattingProvider,omitempty"`
+ // The server provides rename support. RenameOptions may only be
+ // specified if the client states that it supports
+ // `prepareSupport` in its initial `initialize` request.
+ RenameProvider interface{} `json:"renameProvider,omitempty"`
+ // The server provides folding provider support.
+ FoldingRangeProvider *Or_ServerCapabilities_foldingRangeProvider `json:"foldingRangeProvider,omitempty"`
+ // The server provides selection range support.
+ SelectionRangeProvider *Or_ServerCapabilities_selectionRangeProvider `json:"selectionRangeProvider,omitempty"`
+ // The server provides execute command support.
+ ExecuteCommandProvider *ExecuteCommandOptions `json:"executeCommandProvider,omitempty"`
+ // The server provides call hierarchy support.
+ //
+ // @since 3.16.0
+ CallHierarchyProvider *Or_ServerCapabilities_callHierarchyProvider `json:"callHierarchyProvider,omitempty"`
+ // The server provides linked editing range support.
+ //
+ // @since 3.16.0
+ LinkedEditingRangeProvider *Or_ServerCapabilities_linkedEditingRangeProvider `json:"linkedEditingRangeProvider,omitempty"`
+ // The server provides semantic tokens support.
+ //
+ // @since 3.16.0
+ SemanticTokensProvider interface{} `json:"semanticTokensProvider,omitempty"`
+ // The server provides moniker support.
+ //
+ // @since 3.16.0
+ MonikerProvider *Or_ServerCapabilities_monikerProvider `json:"monikerProvider,omitempty"`
+ // The server provides type hierarchy support.
+ //
+ // @since 3.17.0
+ TypeHierarchyProvider *Or_ServerCapabilities_typeHierarchyProvider `json:"typeHierarchyProvider,omitempty"`
+ // The server provides inline values.
+ //
+ // @since 3.17.0
+ InlineValueProvider *Or_ServerCapabilities_inlineValueProvider `json:"inlineValueProvider,omitempty"`
+ // The server provides inlay hints.
+ //
+ // @since 3.17.0
+ InlayHintProvider interface{} `json:"inlayHintProvider,omitempty"`
+ // The server has support for pull model diagnostics.
+ //
+ // @since 3.17.0
+ DiagnosticProvider *Or_ServerCapabilities_diagnosticProvider `json:"diagnosticProvider,omitempty"`
+ // Workspace specific server capabilities.
+ Workspace *Workspace6Gn `json:"workspace,omitempty"`
+ // Experimental server capabilities.
+ Experimental interface{} `json:"experimental,omitempty"`
+}
+type SetTraceParams struct { // line 6147
+ Value TraceValues `json:"value"`
+}
+
+// Client capabilities for the showDocument request.
+//
+// @since 3.16.0
+type ShowDocumentClientCapabilities struct { // line 12485
+ // The client has support for the showDocument
+ // request.
+ Support bool `json:"support"`
+}
+
+// Params to show a document.
+//
+// @since 3.16.0
+type ShowDocumentParams struct { // line 3055
+ // The document uri to show.
+ URI URI `json:"uri"`
+ // Indicates to show the resource in an external program.
+ // To show for example `https://code.visualstudio.com/`
+ // in the default WEB browser set `external` to `true`.
+ External bool `json:"external,omitempty"`
+ // An optional property to indicate whether the editor
+ // showing the document should take focus or not.
+ // Clients might ignore this property if an external
+ // program is started.
+ TakeFocus bool `json:"takeFocus,omitempty"`
+ // An optional selection range if the document is a text
+ // document. Clients might ignore the property if an
+ // external program is started or the file is not a text
+ // file.
+ Selection *Range `json:"selection,omitempty"`
+}
+
+// The result of a showDocument request.
+//
+// @since 3.16.0
+type ShowDocumentResult struct { // line 3097
+ // A boolean indicating if the show was successful.
+ Success bool `json:"success"`
+}
+
+// The parameters of a notification message.
+type ShowMessageParams struct { // line 4183
+ // The message type. See {@link MessageType}
+ Type MessageType `json:"type"`
+ // The actual message.
+ Message string `json:"message"`
+}
+
+// Show message request client capabilities
+type ShowMessageRequestClientCapabilities struct { // line 12458
+ // Capabilities specific to the `MessageActionItem` type.
+ MessageActionItem *PMessageActionItemPShowMessage `json:"messageActionItem,omitempty"`
+}
+type ShowMessageRequestParams struct { // line 4205
+ // The message type. See {@link MessageType}
+ Type MessageType `json:"type"`
+ // The actual message.
+ Message string `json:"message"`
+ // The message action items to present.
+ Actions []MessageActionItem `json:"actions,omitempty"`
+}
+
+// Signature help represents the signature of something
+// callable. There can be multiple signature but only one
+// active and only one active parameter.
+type SignatureHelp struct { // line 4968
+ // One or more signatures.
+ Signatures []SignatureInformation `json:"signatures"`
+ // The active signature. If omitted or the value lies outside the
+ // range of `signatures` the value defaults to zero or is ignored if
+ // the `SignatureHelp` has no signatures.
+ //
+ // Whenever possible implementors should make an active decision about
+ // the active signature and shouldn't rely on a default value.
+ //
+ // In future version of the protocol this property might become
+ // mandatory to better express this.
+ ActiveSignature uint32 `json:"activeSignature,omitempty"`
+ // The active parameter of the active signature. If omitted or the value
+ // lies outside the range of `signatures[activeSignature].parameters`
+ // defaults to 0 if the active signature has parameters. If
+ // the active signature has no parameters it is ignored.
+ // In future version of the protocol this property might become
+ // mandatory to better express the active parameter if the
+ // active signature does have any.
+ ActiveParameter uint32 `json:"activeParameter,omitempty"`
+}
+
+// Client Capabilities for a {@link SignatureHelpRequest}.
+type SignatureHelpClientCapabilities struct { // line 11428
+ // Whether signature help supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client supports the following `SignatureInformation`
+ // specific properties.
+ SignatureInformation *PSignatureInformationPSignatureHelp `json:"signatureInformation,omitempty"`
+ // The client supports to send additional context information for a
+ // `textDocument/signatureHelp` request. A client that opts into
+ // contextSupport will also support the `retriggerCharacters` on
+ // `SignatureHelpOptions`.
+ //
+ // @since 3.15.0
+ ContextSupport bool `json:"contextSupport,omitempty"`
+}
+
+// Additional information about the context in which a signature help request was triggered.
+//
+// @since 3.15.0
+type SignatureHelpContext struct { // line 8787
+ // Action that caused signature help to be triggered.
+ TriggerKind SignatureHelpTriggerKind `json:"triggerKind"`
+ // Character that caused signature help to be triggered.
+ //
+ // This is undefined when `triggerKind !== SignatureHelpTriggerKind.TriggerCharacter`
+ TriggerCharacter string `json:"triggerCharacter,omitempty"`
+ // `true` if signature help was already showing when it was triggered.
+ //
+ // Retriggers occurs when the signature help is already active and can be caused by actions such as
+ // typing a trigger character, a cursor move, or document content changes.
+ IsRetrigger bool `json:"isRetrigger"`
+ // The currently active `SignatureHelp`.
+ //
+ // The `activeSignatureHelp` has its `SignatureHelp.activeSignature` field updated based on
+ // the user navigating through available signatures.
+ ActiveSignatureHelp *SignatureHelp `json:"activeSignatureHelp,omitempty"`
+}
+
+// Server Capabilities for a {@link SignatureHelpRequest}.
+type SignatureHelpOptions struct { // line 8882
+ // List of characters that trigger signature help automatically.
+ TriggerCharacters []string `json:"triggerCharacters,omitempty"`
+ // List of characters that re-trigger signature help.
+ //
+ // These trigger characters are only active when signature help is already showing. All trigger characters
+ // are also counted as re-trigger characters.
+ //
+ // @since 3.15.0
+ RetriggerCharacters []string `json:"retriggerCharacters,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// Parameters for a {@link SignatureHelpRequest}.
+type SignatureHelpParams struct { // line 4940
+ // The signature help context. This is only available if the client specifies
+ // to send this using the client capability `textDocument.signatureHelp.contextSupport === true`
+ //
+ // @since 3.15.0
+ Context *SignatureHelpContext `json:"context,omitempty"`
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+}
+
+// Registration options for a {@link SignatureHelpRequest}.
+type SignatureHelpRegistrationOptions struct { // line 5003
+ TextDocumentRegistrationOptions
+ SignatureHelpOptions
+}
+
+// How a signature help was triggered.
+//
+// @since 3.15.0
+type SignatureHelpTriggerKind uint32 // line 13580
+// Represents the signature of something callable. A signature
+// can have a label, like a function-name, a doc-comment, and
+// a set of parameters.
+type SignatureInformation struct { // line 8828
+ // The label of this signature. Will be shown in
+ // the UI.
+ Label string `json:"label"`
+ // The human-readable doc-comment of this signature. Will be shown
+ // in the UI but can be omitted.
+ Documentation *Or_SignatureInformation_documentation `json:"documentation,omitempty"`
+ // The parameters of this signature.
+ Parameters []ParameterInformation `json:"parameters,omitempty"`
+ // The index of the active parameter.
+ //
+ // If provided, this is used in place of `SignatureHelp.activeParameter`.
+ //
+ // @since 3.16.0
+ ActiveParameter uint32 `json:"activeParameter,omitempty"`
+}
+
+// Static registration options to be returned in the initialize
+// request.
+type StaticRegistrationOptions struct { // line 6343
+ // The id used to register the request. The id can be used to deregister
+ // the request again. See also Registration#id.
+ ID string `json:"id,omitempty"`
+}
+
+// Represents information about programming constructs like variables, classes,
+// interfaces etc.
+type SymbolInformation struct { // line 5181
+ // extends BaseSymbolInformation
+ // Indicates if this symbol is deprecated.
+ //
+ // @deprecated Use tags instead
+ Deprecated bool `json:"deprecated,omitempty"`
+ // The location of this symbol. The location's range is used by a tool
+ // to reveal the location in the editor. If the symbol is selected in the
+ // tool the range's start information is used to position the cursor. So
+ // the range usually spans more than the actual symbol's name and does
+ // normally include things like visibility modifiers.
+ //
+ // The range doesn't have to denote a node range in the sense of an abstract
+ // syntax tree. It can therefore not be used to re-construct a hierarchy of
+ // the symbols.
+ Location Location `json:"location"`
+ // The name of this symbol.
+ Name string `json:"name"`
+ // The kind of this symbol.
+ Kind SymbolKind `json:"kind"`
+ // Tags for this symbol.
+ //
+ // @since 3.16.0
+ Tags []SymbolTag `json:"tags,omitempty"`
+ // The name of the symbol containing this symbol. This information is for
+ // user interface purposes (e.g. to render a qualifier in the user interface
+ // if necessary). It can't be used to re-infer a hierarchy for the document
+ // symbols.
+ ContainerName string `json:"containerName,omitempty"`
+}
+
+// A symbol kind.
+type SymbolKind uint32 // line 12841
+// Symbol tags are extra annotations that tweak the rendering of a symbol.
+//
+// @since 3.16
+type SymbolTag uint32 // line 12955
+// Describe options to be used when registered for text document change events.
+type TextDocumentChangeRegistrationOptions struct { // line 4312
+ // How documents are synced to the server.
+ SyncKind TextDocumentSyncKind `json:"syncKind"`
+ TextDocumentRegistrationOptions
+}
+
+// Text document specific client capabilities.
+type TextDocumentClientCapabilities struct { // line 10323
+ // Defines which synchronization capabilities the client supports.
+ Synchronization *TextDocumentSyncClientCapabilities `json:"synchronization,omitempty"`
+ // Capabilities specific to the `textDocument/completion` request.
+ Completion CompletionClientCapabilities `json:"completion,omitempty"`
+ // Capabilities specific to the `textDocument/hover` request.
+ Hover *HoverClientCapabilities `json:"hover,omitempty"`
+ // Capabilities specific to the `textDocument/signatureHelp` request.
+ SignatureHelp *SignatureHelpClientCapabilities `json:"signatureHelp,omitempty"`
+ // Capabilities specific to the `textDocument/declaration` request.
+ //
+ // @since 3.14.0
+ Declaration *DeclarationClientCapabilities `json:"declaration,omitempty"`
+ // Capabilities specific to the `textDocument/definition` request.
+ Definition *DefinitionClientCapabilities `json:"definition,omitempty"`
+ // Capabilities specific to the `textDocument/typeDefinition` request.
+ //
+ // @since 3.6.0
+ TypeDefinition *TypeDefinitionClientCapabilities `json:"typeDefinition,omitempty"`
+ // Capabilities specific to the `textDocument/implementation` request.
+ //
+ // @since 3.6.0
+ Implementation *ImplementationClientCapabilities `json:"implementation,omitempty"`
+ // Capabilities specific to the `textDocument/references` request.
+ References *ReferenceClientCapabilities `json:"references,omitempty"`
+ // Capabilities specific to the `textDocument/documentHighlight` request.
+ DocumentHighlight *DocumentHighlightClientCapabilities `json:"documentHighlight,omitempty"`
+ // Capabilities specific to the `textDocument/documentSymbol` request.
+ DocumentSymbol DocumentSymbolClientCapabilities `json:"documentSymbol,omitempty"`
+ // Capabilities specific to the `textDocument/codeAction` request.
+ CodeAction CodeActionClientCapabilities `json:"codeAction,omitempty"`
+ // Capabilities specific to the `textDocument/codeLens` request.
+ CodeLens *CodeLensClientCapabilities `json:"codeLens,omitempty"`
+ // Capabilities specific to the `textDocument/documentLink` request.
+ DocumentLink *DocumentLinkClientCapabilities `json:"documentLink,omitempty"`
+ // Capabilities specific to the `textDocument/documentColor` and the
+ // `textDocument/colorPresentation` request.
+ //
+ // @since 3.6.0
+ ColorProvider *DocumentColorClientCapabilities `json:"colorProvider,omitempty"`
+ // Capabilities specific to the `textDocument/formatting` request.
+ Formatting *DocumentFormattingClientCapabilities `json:"formatting,omitempty"`
+ // Capabilities specific to the `textDocument/rangeFormatting` request.
+ RangeFormatting *DocumentRangeFormattingClientCapabilities `json:"rangeFormatting,omitempty"`
+ // Capabilities specific to the `textDocument/onTypeFormatting` request.
+ OnTypeFormatting *DocumentOnTypeFormattingClientCapabilities `json:"onTypeFormatting,omitempty"`
+ // Capabilities specific to the `textDocument/rename` request.
+ Rename *RenameClientCapabilities `json:"rename,omitempty"`
+ // Capabilities specific to the `textDocument/foldingRange` request.
+ //
+ // @since 3.10.0
+ FoldingRange *FoldingRangeClientCapabilities `json:"foldingRange,omitempty"`
+ // Capabilities specific to the `textDocument/selectionRange` request.
+ //
+ // @since 3.15.0
+ SelectionRange *SelectionRangeClientCapabilities `json:"selectionRange,omitempty"`
+ // Capabilities specific to the `textDocument/publishDiagnostics` notification.
+ PublishDiagnostics PublishDiagnosticsClientCapabilities `json:"publishDiagnostics,omitempty"`
+ // Capabilities specific to the various call hierarchy requests.
+ //
+ // @since 3.16.0
+ CallHierarchy *CallHierarchyClientCapabilities `json:"callHierarchy,omitempty"`
+ // Capabilities specific to the various semantic token request.
+ //
+ // @since 3.16.0
+ SemanticTokens SemanticTokensClientCapabilities `json:"semanticTokens,omitempty"`
+ // Capabilities specific to the `textDocument/linkedEditingRange` request.
+ //
+ // @since 3.16.0
+ LinkedEditingRange *LinkedEditingRangeClientCapabilities `json:"linkedEditingRange,omitempty"`
+ // Client capabilities specific to the `textDocument/moniker` request.
+ //
+ // @since 3.16.0
+ Moniker *MonikerClientCapabilities `json:"moniker,omitempty"`
+ // Capabilities specific to the various type hierarchy requests.
+ //
+ // @since 3.17.0
+ TypeHierarchy *TypeHierarchyClientCapabilities `json:"typeHierarchy,omitempty"`
+ // Capabilities specific to the `textDocument/inlineValue` request.
+ //
+ // @since 3.17.0
+ InlineValue *InlineValueClientCapabilities `json:"inlineValue,omitempty"`
+ // Capabilities specific to the `textDocument/inlayHint` request.
+ //
+ // @since 3.17.0
+ InlayHint *InlayHintClientCapabilities `json:"inlayHint,omitempty"`
+ // Capabilities specific to the diagnostic pull model.
+ //
+ // @since 3.17.0
+ Diagnostic *DiagnosticClientCapabilities `json:"diagnostic,omitempty"`
+}
+
+// An event describing a change to a text document. If only a text is provided
+// it is considered to be the full content of the document.
+type TextDocumentContentChangeEvent = Msg_TextDocumentContentChangeEvent // (alias) line 14002
+// Describes textual changes on a text document. A TextDocumentEdit describes all changes
+// on a document version Si and after they are applied move the document to version Si+1.
+// So the creator of a TextDocumentEdit doesn't need to sort the array of edits or do any
+// kind of ordering. However the edits must be non overlapping.
+type TextDocumentEdit struct { // line 6677
+ // The text document to change.
+ TextDocument OptionalVersionedTextDocumentIdentifier `json:"textDocument"`
+ // The edits to be applied.
+ //
+ // @since 3.16.0 - support for AnnotatedTextEdit. This is guarded using a
+ // client capability.
+ Edits []TextEdit `json:"edits"`
+}
+
+// A document filter denotes a document by different properties like
+// the {@link TextDocument.languageId language}, the {@link Uri.scheme scheme} of
+// its resource, or a glob-pattern that is applied to the {@link TextDocument.fileName path}.
+//
+// Glob patterns can have the following syntax:
+//
+// - `*` to match one or more characters in a path segment
+// - `?` to match on one character in a path segment
+// - `**` to match any number of path segments, including none
+// - `{}` to group sub patterns into an OR expression. (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files)
+// - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
+// - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`)
+//
+// @sample A language filter that applies to typescript files on disk: `{ language: 'typescript', scheme: 'file' }`
+// @sample A language filter that applies to all package.json paths: `{ language: 'json', pattern: '**package.json' }`
+//
+// @since 3.17.0
+type TextDocumentFilter = Msg_TextDocumentFilter // (alias) line 14145
+// A literal to identify a text document in the client.
+type TextDocumentIdentifier struct { // line 6419
+ // The text document's uri.
+ URI DocumentURI `json:"uri"`
+}
+
+// An item to transfer a text document from the client to the
+// server.
+type TextDocumentItem struct { // line 7405
+ // The text document's uri.
+ URI DocumentURI `json:"uri"`
+ // The text document's language identifier.
+ LanguageID string `json:"languageId"`
+ // The version number of this document (it will increase after each
+ // change, including undo/redo).
+ Version int32 `json:"version"`
+ // The content of the opened text document.
+ Text string `json:"text"`
+}
+
+// A parameter literal used in requests to pass a text document and a position inside that
+// document.
+type TextDocumentPositionParams struct { // line 6222
+ // The text document.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The position inside the text document.
+ Position Position `json:"position"`
+}
+
+// General text document registration options.
+type TextDocumentRegistrationOptions struct { // line 2368
+ // A document selector to identify the scope of the registration. If set to null
+ // the document selector provided on the client side will be used.
+ DocumentSelector DocumentSelector `json:"documentSelector"`
+}
+
+// Represents reasons why a text document is saved.
+type TextDocumentSaveReason uint32 // line 13109
+// Save registration options.
+type TextDocumentSaveRegistrationOptions struct { // line 4369
+ TextDocumentRegistrationOptions
+ SaveOptions
+}
+type TextDocumentSyncClientCapabilities struct { // line 11127
+ // Whether text document synchronization supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client supports sending will save notifications.
+ WillSave bool `json:"willSave,omitempty"`
+ // The client supports sending a will save request and
+ // waits for a response providing text edits which will
+ // be applied to the document before it is saved.
+ WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"`
+ // The client supports did save notifications.
+ DidSave bool `json:"didSave,omitempty"`
+}
+
+// Defines how the host (editor) should sync
+// document changes to the language server.
+type TextDocumentSyncKind uint32 // line 13084
+type TextDocumentSyncOptions struct { // line 9736
+ // Open and close notifications are sent to the server. If omitted open close notification should not
+ // be sent.
+ OpenClose bool `json:"openClose,omitempty"`
+ // Change notifications are sent to the server. See TextDocumentSyncKind.None, TextDocumentSyncKind.Full
+ // and TextDocumentSyncKind.Incremental. If omitted it defaults to TextDocumentSyncKind.None.
+ Change TextDocumentSyncKind `json:"change,omitempty"`
+ // If present will save notifications are sent to the server. If omitted the notification should not be
+ // sent.
+ WillSave bool `json:"willSave,omitempty"`
+ // If present will save wait until requests are sent to the server. If omitted the request should not be
+ // sent.
+ WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"`
+ // If present save notifications are sent to the server. If omitted the notification should not be
+ // sent.
+ Save *SaveOptions `json:"save,omitempty"`
+}
+
+// A text edit applicable to a text document.
+type TextEdit struct { // line 4406
+ // The range of the text document to be manipulated. To insert
+ // text into a document create a range where start === end.
+ Range Range `json:"range"`
+ // The string to be inserted. For delete operations use an
+ // empty string.
+ NewText string `json:"newText"`
+}
+type TokenFormat string // line 13736
+type TraceValues string // line 13383
+// Since 3.6.0
+type TypeDefinitionClientCapabilities struct { // line 11559
+ // Whether implementation supports dynamic registration. If this is set to `true`
+ // the client supports the new `TypeDefinitionRegistrationOptions` return value
+ // for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // The client supports additional metadata in the form of definition links.
+ //
+ // Since 3.14.0
+ LinkSupport bool `json:"linkSupport,omitempty"`
+}
+type TypeDefinitionOptions struct { // line 6358
+ WorkDoneProgressOptions
+}
+type TypeDefinitionParams struct { // line 2123
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+ PartialResultParams
+}
+type TypeDefinitionRegistrationOptions struct { // line 2143
+ TextDocumentRegistrationOptions
+ TypeDefinitionOptions
+ StaticRegistrationOptions
+}
+
+// @since 3.17.0
+type TypeHierarchyClientCapabilities struct { // line 12337
+ // Whether implementation supports dynamic registration. If this is set to `true`
+ // the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
+ // return value for the corresponding server capability as well.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+}
+
+// @since 3.17.0
+type TypeHierarchyItem struct { // line 3410
+ // The name of this item.
+ Name string `json:"name"`
+ // The kind of this item.
+ Kind SymbolKind `json:"kind"`
+ // Tags for this item.
+ Tags []SymbolTag `json:"tags,omitempty"`
+ // More detail for this item, e.g. the signature of a function.
+ Detail string `json:"detail,omitempty"`
+ // The resource identifier of this item.
+ URI DocumentURI `json:"uri"`
+ // The range enclosing this symbol not including leading/trailing whitespace
+ // but everything else, e.g. comments and code.
+ Range Range `json:"range"`
+ // The range that should be selected and revealed when this symbol is being
+ // picked, e.g. the name of a function. Must be contained by the
+ // {@link TypeHierarchyItem.range `range`}.
+ SelectionRange Range `json:"selectionRange"`
+ // A data entry field that is preserved between a type hierarchy prepare and
+ // supertypes or subtypes requests. It could also be used to identify the
+ // type hierarchy in the server, helping improve the performance on
+ // resolving supertypes and subtypes.
+ Data interface{} `json:"data,omitempty"`
+}
+
+// Type hierarchy options used during static registration.
+//
+// @since 3.17.0
+type TypeHierarchyOptions struct { // line 6936
+ WorkDoneProgressOptions
+}
+
+// The parameter of a `textDocument/prepareTypeHierarchy` request.
+//
+// @since 3.17.0
+type TypeHierarchyPrepareParams struct { // line 3392
+ TextDocumentPositionParams
+ WorkDoneProgressParams
+}
+
+// Type hierarchy options used during static or dynamic registration.
+//
+// @since 3.17.0
+type TypeHierarchyRegistrationOptions struct { // line 3487
+ TextDocumentRegistrationOptions
+ TypeHierarchyOptions
+ StaticRegistrationOptions
+}
+
+// The parameter of a `typeHierarchy/subtypes` request.
+//
+// @since 3.17.0
+type TypeHierarchySubtypesParams struct { // line 3533
+ Item TypeHierarchyItem `json:"item"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// The parameter of a `typeHierarchy/supertypes` request.
+//
+// @since 3.17.0
+type TypeHierarchySupertypesParams struct { // line 3509
+ Item TypeHierarchyItem `json:"item"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// created for Tuple
+type UIntCommaUInt struct { // line 10076
+ Fld0 uint32 `json:"fld0"`
+ Fld1 uint32 `json:"fld1"`
+}
+type URI = string
+
+// A diagnostic report indicating that the last returned
+// report is still accurate.
+//
+// @since 3.17.0
+type UnchangedDocumentDiagnosticReport struct { // line 7270
+ // A document diagnostic report indicating
+ // no changes to the last result. A server can
+ // only return `unchanged` if result ids are
+ // provided.
+ Kind string `json:"kind"`
+ // A result id which will be sent on the next
+ // diagnostic request for the same document.
+ ResultID string `json:"resultId"`
+}
+
+// Moniker uniqueness level to define scope of the moniker.
+//
+// @since 3.16.0
+type UniquenessLevel string // line 12971
+// General parameters to unregister a request or notification.
+type Unregistration struct { // line 7628
+ // The id used to unregister the request or notification. Usually an id
+ // provided during the register request.
+ ID string `json:"id"`
+ // The method to unregister for.
+ Method string `json:"method"`
+}
+type UnregistrationParams struct { // line 4053
+ Unregisterations []Unregistration `json:"unregisterations"`
+}
+
+// A versioned notebook document identifier.
+//
+// @since 3.17.0
+type VersionedNotebookDocumentIdentifier struct { // line 7443
+ // The version number of this notebook document.
+ Version int32 `json:"version"`
+ // The notebook document's uri.
+ URI URI `json:"uri"`
+}
+
+// A text document identifier to denote a specific version of a text document.
+type VersionedTextDocumentIdentifier struct { // line 8445
+ // The version number of this document.
+ Version int32 `json:"version"`
+ TextDocumentIdentifier
+}
+type WatchKind = uint32 // line 13505// The parameters sent in a will save text document notification.
+type WillSaveTextDocumentParams struct { // line 4384
+ // The document that will be saved.
+ TextDocument TextDocumentIdentifier `json:"textDocument"`
+ // The 'TextDocumentSaveReason'.
+ Reason TextDocumentSaveReason `json:"reason"`
+}
+type WindowClientCapabilities struct { // line 10629
+ // It indicates whether the client supports server initiated
+ // progress using the `window/workDoneProgress/create` request.
+ //
+ // The capability also controls Whether client supports handling
+ // of progress notifications. If set servers are allowed to report a
+ // `workDoneProgress` property in the request specific server
+ // capabilities.
+ //
+ // @since 3.15.0
+ WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
+ // Capabilities specific to the showMessage request.
+ //
+ // @since 3.16.0
+ ShowMessage *ShowMessageRequestClientCapabilities `json:"showMessage,omitempty"`
+ // Capabilities specific to the showDocument request.
+ //
+ // @since 3.16.0
+ ShowDocument *ShowDocumentClientCapabilities `json:"showDocument,omitempty"`
+}
+type WorkDoneProgressBegin struct { // line 6040
+ Kind string `json:"kind"`
+ // Mandatory title of the progress operation. Used to briefly inform about
+ // the kind of operation being performed.
+ //
+ // Examples: "Indexing" or "Linking dependencies".
+ Title string `json:"title"`
+ // Controls if a cancel button should show to allow the user to cancel the
+ // long running operation. Clients that don't support cancellation are allowed
+ // to ignore the setting.
+ Cancellable bool `json:"cancellable,omitempty"`
+ // Optional, more detailed associated progress message. Contains
+ // complementary information to the `title`.
+ //
+ // Examples: "3/25 files", "project/src/module2", "node_modules/some_dep".
+ // If unset, the previous progress message (if any) is still valid.
+ Message string `json:"message,omitempty"`
+ // Optional progress percentage to display (value 100 is considered 100%).
+ // If not provided infinite progress is assumed and clients are allowed
+ // to ignore the `percentage` value in subsequent in report notifications.
+ //
+ // The value should be steadily rising. Clients are free to ignore values
+ // that are not following this rule. The value range is [0, 100].
+ Percentage uint32 `json:"percentage,omitempty"`
+}
+type WorkDoneProgressCancelParams struct { // line 2625
+ // The token to be used to report progress.
+ Token ProgressToken `json:"token"`
+}
+type WorkDoneProgressCreateParams struct { // line 2612
+ // The token to be used to report progress.
+ Token ProgressToken `json:"token"`
+}
+type WorkDoneProgressEnd struct { // line 6126
+ Kind string `json:"kind"`
+ // Optional, a final message indicating to for example indicate the outcome
+ // of the operation.
+ Message string `json:"message,omitempty"`
+}
+type WorkDoneProgressOptions struct { // line 2355
+ WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
+}
+
+// created for And
+type WorkDoneProgressOptionsAndTextDocumentRegistrationOptions struct { // line 196
+ WorkDoneProgressOptions
+ TextDocumentRegistrationOptions
+}
+type WorkDoneProgressParams struct { // line 6244
+ // An optional token that a server can use to report work done progress.
+ WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
+}
+type WorkDoneProgressReport struct { // line 6087
+ Kind string `json:"kind"`
+ // Controls enablement state of a cancel button.
+ //
+ // Clients that don't support cancellation or don't support controlling the button's
+ // enablement state are allowed to ignore the property.
+ Cancellable bool `json:"cancellable,omitempty"`
+ // Optional, more detailed associated progress message. Contains
+ // complementary information to the `title`.
+ //
+ // Examples: "3/25 files", "project/src/module2", "node_modules/some_dep".
+ // If unset, the previous progress message (if any) is still valid.
+ Message string `json:"message,omitempty"`
+ // Optional progress percentage to display (value 100 is considered 100%).
+ // If not provided infinite progress is assumed and clients are allowed
+ // to ignore the `percentage` value in subsequent in report notifications.
+ //
+ // The value should be steadily rising. Clients are free to ignore values
+ // that are not following this rule. The value range is [0, 100]
+ Percentage uint32 `json:"percentage,omitempty"`
+}
+
+// created for Literal (Lit_ServerCapabilities_workspace)
+type Workspace6Gn struct { // line 8404
+ // The server supports workspace folder.
+ //
+ // @since 3.6.0
+ WorkspaceFolders *WorkspaceFolders5Gn `json:"workspaceFolders,omitempty"`
+ // The server is interested in notifications/requests for operations on files.
+ //
+ // @since 3.16.0
+ FileOperations *FileOperationOptions `json:"fileOperations,omitempty"`
+}
+
+// Workspace specific client capabilities.
+type WorkspaceClientCapabilities struct { // line 10184
+ // The client supports applying batch edits
+ // to the workspace by supporting the request
+ // 'workspace/applyEdit'
+ ApplyEdit bool `json:"applyEdit,omitempty"`
+ // Capabilities specific to `WorkspaceEdit`s.
+ WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"`
+ // Capabilities specific to the `workspace/didChangeConfiguration` notification.
+ DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"`
+ // Capabilities specific to the `workspace/didChangeWatchedFiles` notification.
+ DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"`
+ // Capabilities specific to the `workspace/symbol` request.
+ Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"`
+ // Capabilities specific to the `workspace/executeCommand` request.
+ ExecuteCommand *ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"`
+ // The client has support for workspace folders.
+ //
+ // @since 3.6.0
+ WorkspaceFolders bool `json:"workspaceFolders,omitempty"`
+ // The client supports `workspace/configuration` requests.
+ //
+ // @since 3.6.0
+ Configuration bool `json:"configuration,omitempty"`
+ // Capabilities specific to the semantic token requests scoped to the
+ // workspace.
+ //
+ // @since 3.16.0.
+ SemanticTokens *SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"`
+ // Capabilities specific to the code lens requests scoped to the
+ // workspace.
+ //
+ // @since 3.16.0.
+ CodeLens *CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"`
+ // The client has support for file notifications/requests for user operations on files.
+ //
+ // Since 3.16.0
+ FileOperations *FileOperationClientCapabilities `json:"fileOperations,omitempty"`
+ // Capabilities specific to the inline values requests scoped to the
+ // workspace.
+ //
+ // @since 3.17.0.
+ InlineValue *InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"`
+ // Capabilities specific to the inlay hint requests scoped to the
+ // workspace.
+ //
+ // @since 3.17.0.
+ InlayHint *InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"`
+ // Capabilities specific to the diagnostic requests scoped to the
+ // workspace.
+ //
+ // @since 3.17.0.
+ Diagnostics *DiagnosticWorkspaceClientCapabilities `json:"diagnostics,omitempty"`
+}
+
+// Parameters of the workspace diagnostic request.
+//
+// @since 3.17.0
+type WorkspaceDiagnosticParams struct { // line 3877
+ // The additional identifier provided during registration.
+ Identifier string `json:"identifier,omitempty"`
+ // The currently known diagnostic reports with their
+ // previous result ids.
+ PreviousResultIds []PreviousResultID `json:"previousResultIds"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// A workspace diagnostic report.
+//
+// @since 3.17.0
+type WorkspaceDiagnosticReport struct { // line 3914
+ Items []WorkspaceDocumentDiagnosticReport `json:"items"`
+}
+
+// A partial result for a workspace diagnostic report.
+//
+// @since 3.17.0
+type WorkspaceDiagnosticReportPartialResult struct { // line 3931
+ Items []WorkspaceDocumentDiagnosticReport `json:"items"`
+}
+
+// A workspace diagnostic document report.
+//
+// @since 3.17.0
+type WorkspaceDocumentDiagnosticReport = Or_WorkspaceDocumentDiagnosticReport // (alias) line 13984
+// A workspace edit represents changes to many resources managed in the workspace. The edit
+// should either provide `changes` or `documentChanges`. If documentChanges are present
+// they are preferred over `changes` if the client can handle versioned document edits.
+//
+// Since version 3.13.0 a workspace edit can contain resource operations as well. If resource
+// operations are present clients need to execute the operations in the order in which they
+// are provided. So a workspace edit for example can consist of the following two changes:
+// (1) a create file a.txt and (2) a text document edit which insert text into file a.txt.
+//
+// An invalid sequence (e.g. (1) delete file a.txt and (2) insert text into file a.txt) will
+// cause failure of the operation. How the client recovers from the failure is described by
+// the client capability: `workspace.workspaceEdit.failureHandling`
+type WorkspaceEdit struct { // line 3193
+ // Holds changes to existing resources.
+ Changes map[DocumentURI][]TextEdit `json:"changes,omitempty"`
+ // Depending on the client capability `workspace.workspaceEdit.resourceOperations` document changes
+ // are either an array of `TextDocumentEdit`s to express changes to n different text documents
+ // where each text document edit addresses a specific version of a text document. Or it can contain
+ // above `TextDocumentEdit`s mixed with create, rename and delete file / folder operations.
+ //
+ // Whether a client supports versioned document edits is expressed via
+ // `workspace.workspaceEdit.documentChanges` client capability.
+ //
+ // If a client neither supports `documentChanges` nor `workspace.workspaceEdit.resourceOperations` then
+ // only plain `TextEdit`s using the `changes` property are supported.
+ DocumentChanges []DocumentChanges `json:"documentChanges,omitempty"`
+ // A map of change annotations that can be referenced in `AnnotatedTextEdit`s or create, rename and
+ // delete file / folder operations.
+ //
+ // Whether clients honor this property depends on the client capability `workspace.changeAnnotationSupport`.
+ //
+ // @since 3.16.0
+ ChangeAnnotations map[ChangeAnnotationIdentifier]ChangeAnnotation `json:"changeAnnotations,omitempty"`
+}
+type WorkspaceEditClientCapabilities struct { // line 10768
+ // The client supports versioned document changes in `WorkspaceEdit`s
+ DocumentChanges bool `json:"documentChanges,omitempty"`
+ // The resource operations the client supports. Clients should at least
+ // support 'create', 'rename' and 'delete' files and folders.
+ //
+ // @since 3.13.0
+ ResourceOperations []ResourceOperationKind `json:"resourceOperations,omitempty"`
+ // The failure handling strategy of a client if applying the workspace edit
+ // fails.
+ //
+ // @since 3.13.0
+ FailureHandling *FailureHandlingKind `json:"failureHandling,omitempty"`
+ // Whether the client normalizes line endings to the client specific
+ // setting.
+ // If set to `true` the client will normalize line ending characters
+ // in a workspace edit to the client-specified new line
+ // character.
+ //
+ // @since 3.16.0
+ NormalizesLineEndings bool `json:"normalizesLineEndings,omitempty"`
+ // Whether the client in general supports change annotations on text edits,
+ // create file, rename file and delete file changes.
+ //
+ // @since 3.16.0
+ ChangeAnnotationSupport *PChangeAnnotationSupportPWorkspaceEdit `json:"changeAnnotationSupport,omitempty"`
+}
+
+// A workspace folder inside a client.
+type WorkspaceFolder struct { // line 2163
+ // The associated URI for this workspace folder.
+ URI URI `json:"uri"`
+ // The name of the workspace folder. Used to refer to this
+ // workspace folder in the user interface.
+ Name string `json:"name"`
+}
+type WorkspaceFolders5Gn struct { // line 9933
+ // The server has support for workspace folders
+ Supported bool `json:"supported,omitempty"`
+ // Whether the server wants to receive workspace folder
+ // change notifications.
+ //
+ // If a string is provided the string is treated as an ID
+ // under which the notification is registered on the client
+ // side. The ID can be used to unregister for these events
+ // using the `client/unregisterCapability` request.
+ ChangeNotifications string `json:"changeNotifications,omitempty"`
+}
+
+// The workspace folder change event.
+type WorkspaceFoldersChangeEvent struct { // line 6368
+ // The array of added workspace folders
+ Added []WorkspaceFolder `json:"added"`
+ // The array of the removed workspace folders
+ Removed []WorkspaceFolder `json:"removed"`
+}
+type WorkspaceFoldersInitializeParams struct { // line 7782
+ // The workspace folders configured in the client when the server starts.
+ //
+ // This property is only available if the client supports workspace folders.
+ // It can be `null` if the client supports workspace folders but none are
+ // configured.
+ //
+ // @since 3.6.0
+ WorkspaceFolders []WorkspaceFolder `json:"workspaceFolders,omitempty"`
+}
+type WorkspaceFoldersServerCapabilities struct { // line 9933
+ // The server has support for workspace folders
+ Supported bool `json:"supported,omitempty"`
+ // Whether the server wants to receive workspace folder
+ // change notifications.
+ //
+ // If a string is provided the string is treated as an ID
+ // under which the notification is registered on the client
+ // side. The ID can be used to unregister for these events
+ // using the `client/unregisterCapability` request.
+ ChangeNotifications string `json:"changeNotifications,omitempty"`
+}
+
+// A full document diagnostic report for a workspace diagnostic result.
+//
+// @since 3.17.0
+type WorkspaceFullDocumentDiagnosticReport struct { // line 9522
+ // The URI for which diagnostic information is reported.
+ URI DocumentURI `json:"uri"`
+ // The version number for which the diagnostics are reported.
+ // If the document is not marked as open `null` can be provided.
+ Version int32 `json:"version"`
+ FullDocumentDiagnosticReport
+}
+
+// A special workspace symbol that supports locations without a range.
+//
+// See also SymbolInformation.
+//
+// @since 3.17.0
+type WorkspaceSymbol struct { // line 5515
+ // The location of the symbol. Whether a server is allowed to
+ // return a location without a range depends on the client
+ // capability `workspace.symbol.resolveSupport`.
+ //
+ // See SymbolInformation#location for more details.
+ Location OrPLocation_workspace_symbol `json:"location"`
+ // A data entry field that is preserved on a workspace symbol between a
+ // workspace symbol request and a workspace symbol resolve request.
+ Data interface{} `json:"data,omitempty"`
+ BaseSymbolInformation
+}
+
+// Client capabilities for a {@link WorkspaceSymbolRequest}.
+type WorkspaceSymbolClientCapabilities struct { // line 10875
+ // Symbol request supports dynamic registration.
+ DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
+ // Specific capabilities for the `SymbolKind` in the `workspace/symbol` request.
+ SymbolKind *PSymbolKindPSymbol `json:"symbolKind,omitempty"`
+ // The client supports tags on `SymbolInformation`.
+ // Clients supporting tags have to handle unknown tags gracefully.
+ //
+ // @since 3.16.0
+ TagSupport *PTagSupportPSymbol `json:"tagSupport,omitempty"`
+ // The client support partial workspace symbols. The client will send the
+ // request `workspaceSymbol/resolve` to the server to resolve additional
+ // properties.
+ //
+ // @since 3.17.0
+ ResolveSupport *PResolveSupportPSymbol `json:"resolveSupport,omitempty"`
+}
+
+// Server capabilities for a {@link WorkspaceSymbolRequest}.
+type WorkspaceSymbolOptions struct { // line 9105
+ // The server provides support to resolve additional
+ // information for a workspace symbol.
+ //
+ // @since 3.17.0
+ ResolveProvider bool `json:"resolveProvider,omitempty"`
+ WorkDoneProgressOptions
+}
+
+// The parameters of a {@link WorkspaceSymbolRequest}.
+type WorkspaceSymbolParams struct { // line 5491
+ // A query string to filter symbols by. Clients may send an empty
+ // string here to request all symbols.
+ Query string `json:"query"`
+ WorkDoneProgressParams
+ PartialResultParams
+}
+
+// Registration options for a {@link WorkspaceSymbolRequest}.
+type WorkspaceSymbolRegistrationOptions struct { // line 5564
+ WorkspaceSymbolOptions
+}
+
+// An unchanged document diagnostic report for a workspace diagnostic result.
+//
+// @since 3.17.0
+type WorkspaceUnchangedDocumentDiagnosticReport struct { // line 9560
+ // The URI for which diagnostic information is reported.
+ URI DocumentURI `json:"uri"`
+ // The version number for which the diagnostics are reported.
+ // If the document is not marked as open `null` can be provided.
+ Version int32 `json:"version"`
+ UnchangedDocumentDiagnosticReport
+}
+
+// The initialize parameters
+type XInitializeParams struct { // line 7650
+ // The process Id of the parent process that started
+ // the server.
+ //
+ // Is `null` if the process has not been started by another process.
+ // If the parent process is not alive then the server should exit.
+ ProcessID int32 `json:"processId"`
+ // Information about the client
+ //
+ // @since 3.15.0
+ ClientInfo *Msg_XInitializeParams_clientInfo `json:"clientInfo,omitempty"`
+ // The locale the client is currently showing the user interface
+ // in. This must not necessarily be the locale of the operating
+ // system.
+ //
+ // Uses IETF language tags as the value's syntax
+ // (See https://en.wikipedia.org/wiki/IETF_language_tag)
+ //
+ // @since 3.16.0
+ Locale string `json:"locale,omitempty"`
+ // The rootPath of the workspace. Is null
+ // if no folder is open.
+ //
+ // @deprecated in favour of rootUri.
+ RootPath string `json:"rootPath,omitempty"`
+ // The rootUri of the workspace. Is null if no
+ // folder is open. If both `rootPath` and `rootUri` are set
+ // `rootUri` wins.
+ //
+ // @deprecated in favour of workspaceFolders.
+ RootURI DocumentURI `json:"rootUri"`
+ // The capabilities provided by the client (editor or tool)
+ Capabilities ClientCapabilities `json:"capabilities"`
+ // User provided initialization options.
+ InitializationOptions interface{} `json:"initializationOptions,omitempty"`
+ // The initial trace setting. If omitted trace is disabled ('off').
+ Trace *TraceValues `json:"trace,omitempty"`
+ WorkDoneProgressParams
+}
+
+// The initialize parameters
+type _InitializeParams struct { // line 7650
+ // The process Id of the parent process that started
+ // the server.
+ //
+ // Is `null` if the process has not been started by another process.
+ // If the parent process is not alive then the server should exit.
+ ProcessID int32 `json:"processId"`
+ // Information about the client
+ //
+ // @since 3.15.0
+ ClientInfo *Msg_XInitializeParams_clientInfo `json:"clientInfo,omitempty"`
+ // The locale the client is currently showing the user interface
+ // in. This must not necessarily be the locale of the operating
+ // system.
+ //
+ // Uses IETF language tags as the value's syntax
+ // (See https://en.wikipedia.org/wiki/IETF_language_tag)
+ //
+ // @since 3.16.0
+ Locale string `json:"locale,omitempty"`
+ // The rootPath of the workspace. Is null
+ // if no folder is open.
+ //
+ // @deprecated in favour of rootUri.
+ RootPath string `json:"rootPath,omitempty"`
+ // The rootUri of the workspace. Is null if no
+ // folder is open. If both `rootPath` and `rootUri` are set
+ // `rootUri` wins.
+ //
+ // @deprecated in favour of workspaceFolders.
+ RootURI DocumentURI `json:"rootUri"`
+ // The capabilities provided by the client (editor or tool)
+ Capabilities ClientCapabilities `json:"capabilities"`
+ // User provided initialization options.
+ InitializationOptions interface{} `json:"initializationOptions,omitempty"`
+ // The initial trace setting. If omitted trace is disabled ('off').
+ Trace *TraceValues `json:"trace,omitempty"`
+ WorkDoneProgressParams
+}
+
+const (
+ // A set of predefined code action kinds
+ // Empty kind.
+ Empty CodeActionKind = "" // line 13333
+ // Base kind for quickfix actions: 'quickfix'
+ QuickFix CodeActionKind = "quickfix" // line 13338
+ // Base kind for refactoring actions: 'refactor'
+ Refactor CodeActionKind = "refactor" // line 13343
+ // Base kind for refactoring extraction actions: 'refactor.extract'
+ //
+ // Example extract actions:
+ //
+ //
+ // - Extract method
+ // - Extract function
+ // - Extract variable
+ // - Extract interface from class
+ // - ...
+ RefactorExtract CodeActionKind = "refactor.extract" // line 13348
+ // Base kind for refactoring inline actions: 'refactor.inline'
+ //
+ // Example inline actions:
+ //
+ //
+ // - Inline function
+ // - Inline variable
+ // - Inline constant
+ // - ...
+ RefactorInline CodeActionKind = "refactor.inline" // line 13353
+ // Base kind for refactoring rewrite actions: 'refactor.rewrite'
+ //
+ // Example rewrite actions:
+ //
+ //
+ // - Convert JavaScript function to class
+ // - Add or remove parameter
+ // - Encapsulate field
+ // - Make method static
+ // - Move method to base class
+ // - ...
+ RefactorRewrite CodeActionKind = "refactor.rewrite" // line 13358
+ // Base kind for source actions: `source`
+ //
+ // Source code actions apply to the entire file.
+ Source CodeActionKind = "source" // line 13363
+ // Base kind for an organize imports source action: `source.organizeImports`
+ SourceOrganizeImports CodeActionKind = "source.organizeImports" // line 13368
+ // Base kind for auto-fix source actions: `source.fixAll`.
+ //
+ // Fix all actions automatically fix errors that have a clear fix that do not require user input.
+ // They should not suppress errors or perform unsafe fixes such as generating new types or classes.
+ //
+ // @since 3.15.0
+ SourceFixAll CodeActionKind = "source.fixAll" // line 13373
+ // The reason why code actions were requested.
+ //
+ // @since 3.17.0
+ // Code actions were explicitly requested by the user or by an extension.
+ CodeActionInvoked CodeActionTriggerKind = 1 // line 13613
+ // Code actions were requested automatically.
+ //
+ // This typically happens when current selection in a file changes, but can
+ // also be triggered when file content changes.
+ CodeActionAutomatic CodeActionTriggerKind = 2 // line 13618
+ // The kind of a completion entry.
+ TextCompletion CompletionItemKind = 1 // line 13141
+ MethodCompletion CompletionItemKind = 2 // line 13145
+ FunctionCompletion CompletionItemKind = 3 // line 13149
+ ConstructorCompletion CompletionItemKind = 4 // line 13153
+ FieldCompletion CompletionItemKind = 5 // line 13157
+ VariableCompletion CompletionItemKind = 6 // line 13161
+ ClassCompletion CompletionItemKind = 7 // line 13165
+ InterfaceCompletion CompletionItemKind = 8 // line 13169
+ ModuleCompletion CompletionItemKind = 9 // line 13173
+ PropertyCompletion CompletionItemKind = 10 // line 13177
+ UnitCompletion CompletionItemKind = 11 // line 13181
+ ValueCompletion CompletionItemKind = 12 // line 13185
+ EnumCompletion CompletionItemKind = 13 // line 13189
+ KeywordCompletion CompletionItemKind = 14 // line 13193
+ SnippetCompletion CompletionItemKind = 15 // line 13197
+ ColorCompletion CompletionItemKind = 16 // line 13201
+ FileCompletion CompletionItemKind = 17 // line 13205
+ ReferenceCompletion CompletionItemKind = 18 // line 13209
+ FolderCompletion CompletionItemKind = 19 // line 13213
+ EnumMemberCompletion CompletionItemKind = 20 // line 13217
+ ConstantCompletion CompletionItemKind = 21 // line 13221
+ StructCompletion CompletionItemKind = 22 // line 13225
+ EventCompletion CompletionItemKind = 23 // line 13229
+ OperatorCompletion CompletionItemKind = 24 // line 13233
+ TypeParameterCompletion CompletionItemKind = 25 // line 13237
+ // Completion item tags are extra annotations that tweak the rendering of a completion
+ // item.
+ //
+ // @since 3.15.0
+ // Render a completion as obsolete, usually using a strike-out.
+ ComplDeprecated CompletionItemTag = 1 // line 13251
+ // How a completion was triggered
+ // Completion was triggered by typing an identifier (24x7 code
+ // complete), manual invocation (e.g Ctrl+Space) or via API.
+ Invoked CompletionTriggerKind = 1 // line 13562
+ // Completion was triggered by a trigger character specified by
+ // the `triggerCharacters` properties of the `CompletionRegistrationOptions`.
+ TriggerCharacter CompletionTriggerKind = 2 // line 13567
+ // Completion was re-triggered as current completion list is incomplete
+ TriggerForIncompleteCompletions CompletionTriggerKind = 3 // line 13572
+ // The diagnostic's severity.
+ // Reports an error.
+ SeverityError DiagnosticSeverity = 1 // line 13511
+ // Reports a warning.
+ SeverityWarning DiagnosticSeverity = 2 // line 13516
+ // Reports an information.
+ SeverityInformation DiagnosticSeverity = 3 // line 13521
+ // Reports a hint.
+ SeverityHint DiagnosticSeverity = 4 // line 13526
+ // The diagnostic tags.
+ //
+ // @since 3.15.0
+ // Unused or unnecessary code.
+ //
+ // Clients are allowed to render diagnostics with this tag faded out instead of having
+ // an error squiggle.
+ Unnecessary DiagnosticTag = 1 // line 13541
+ // Deprecated or obsolete code.
+ //
+ // Clients are allowed to rendered diagnostics with this tag strike through.
+ Deprecated DiagnosticTag = 2 // line 13546
+ // The document diagnostic report kinds.
+ //
+ // @since 3.17.0
+ // A diagnostic report with a full
+ // set of problems.
+ DiagnosticFull DocumentDiagnosticReportKind = "full" // line 12729
+ // A report indicating that the last
+ // returned report is still accurate.
+ DiagnosticUnchanged DocumentDiagnosticReportKind = "unchanged" // line 12734
+ // A document highlight kind.
+ // A textual occurrence.
+ Text DocumentHighlightKind = 1 // line 13308
+ // Read-access of a symbol, like reading a variable.
+ Read DocumentHighlightKind = 2 // line 13313
+ // Write-access of a symbol, like writing to a variable.
+ Write DocumentHighlightKind = 3 // line 13318
+ // Predefined error codes.
+ ParseError ErrorCodes = -32700 // line 12750
+ InvalidRequest ErrorCodes = -32600 // line 12754
+ MethodNotFound ErrorCodes = -32601 // line 12758
+ InvalidParams ErrorCodes = -32602 // line 12762
+ InternalError ErrorCodes = -32603 // line 12766
+ // Error code indicating that a server received a notification or
+ // request before the server has received the `initialize` request.
+ ServerNotInitialized ErrorCodes = -32002 // line 12770
+ UnknownErrorCode ErrorCodes = -32001 // line 12775
+ // Applying the workspace change is simply aborted if one of the changes provided
+ // fails. All operations executed before the failing operation stay executed.
+ Abort FailureHandlingKind = "abort" // line 13700
+ // All operations are executed transactional. That means they either all
+ // succeed or no changes at all are applied to the workspace.
+ Transactional FailureHandlingKind = "transactional" // line 13705
+ // If the workspace edit contains only textual file changes they are executed transactional.
+ // If resource changes (create, rename or delete file) are part of the change the failure
+ // handling strategy is abort.
+ TextOnlyTransactional FailureHandlingKind = "textOnlyTransactional" // line 13710
+ // The client tries to undo the operations already executed. But there is no
+ // guarantee that this is succeeding.
+ Undo FailureHandlingKind = "undo" // line 13715
+ // The file event type
+ // The file got created.
+ Created FileChangeType = 1 // line 13461
+ // The file got changed.
+ Changed FileChangeType = 2 // line 13466
+ // The file got deleted.
+ Deleted FileChangeType = 3 // line 13471
+ // A pattern kind describing if a glob pattern matches a file a folder or
+ // both.
+ //
+ // @since 3.16.0
+ // The pattern matches a file only.
+ FilePattern FileOperationPatternKind = "file" // line 13634
+ // The pattern matches a folder only.
+ FolderPattern FileOperationPatternKind = "folder" // line 13639
+ // A set of predefined range kinds.
+ // Folding range for a comment
+ Comment FoldingRangeKind = "comment" // line 12822
+ // Folding range for an import or include
+ Imports FoldingRangeKind = "imports" // line 12827
+ // Folding range for a region (e.g. `#region`)
+ Region FoldingRangeKind = "region" // line 12832
+ // Inlay hint kinds.
+ //
+ // @since 3.17.0
+ // An inlay hint that for a type annotation.
+ Type InlayHintKind = 1 // line 13040
+ // An inlay hint that is for a parameter.
+ Parameter InlayHintKind = 2 // line 13045
+ // Defines whether the insert text in a completion item should be interpreted as
+ // plain text or a snippet.
+ // The primary text to be inserted is treated as a plain string.
+ PlainTextTextFormat InsertTextFormat = 1 // line 13267
+ // The primary text to be inserted is treated as a snippet.
+ //
+ // A snippet can define tab stops and placeholders with `$1`, `$2`
+ // and `${3:foo}`. `$0` defines the final tab stop, it defaults to
+ // the end of the snippet. Placeholders with equal identifiers are linked,
+ // that is typing in one will update others too.
+ //
+ // See also: https://microsoft.github.io/language-server-protocol/specifications/specification-current/#snippet_syntax
+ SnippetTextFormat InsertTextFormat = 2 // line 13272
+ // How whitespace and indentation is handled during completion
+ // item insertion.
+ //
+ // @since 3.16.0
+ // The insertion or replace strings is taken as it is. If the
+ // value is multi line the lines below the cursor will be
+ // inserted using the indentation defined in the string value.
+ // The client will not apply any kind of adjustments to the
+ // string.
+ AsIs InsertTextMode = 1 // line 13287
+ // The editor adjusts leading whitespace of new lines so that
+ // they match the indentation up to the cursor of the line for
+ // which the item is accepted.
+ //
+ // Consider a line like this: <2tabs><cursor><3tabs>foo. Accepting a
+ // multi line completion item is indented using 2 tabs and all
+ // following lines inserted will be indented using 2 tabs as well.
+ AdjustIndentation InsertTextMode = 2 // line 13292
+ // A request failed but it was syntactically correct, e.g the
+ // method name was known and the parameters were valid. The error
+ // message should contain human readable information about why
+ // the request failed.
+ //
+ // @since 3.17.0
+ RequestFailed LSPErrorCodes = -32803 // line 12790
+ // The server cancelled the request. This error code should
+ // only be used for requests that explicitly support being
+ // server cancellable.
+ //
+ // @since 3.17.0
+ ServerCancelled LSPErrorCodes = -32802 // line 12796
+ // The server detected that the content of a document got
+ // modified outside normal conditions. A server should
+ // NOT send this error code if it detects a content change
+ // in it unprocessed messages. The result even computed
+ // on an older state might still be useful for the client.
+ //
+ // If a client decides that a result is not of any use anymore
+ // the client should cancel the request.
+ ContentModified LSPErrorCodes = -32801 // line 12802
+ // The client has canceled a request and a server as detected
+ // the cancel.
+ RequestCancelled LSPErrorCodes = -32800 // line 12807
+ // Describes the content type that a client supports in various
+ // result literals like `Hover`, `ParameterInfo` or `CompletionItem`.
+ //
+ // Please note that `MarkupKinds` must not start with a `$`. This kinds
+ // are reserved for internal usage.
+ // Plain text is supported as a content format
+ PlainText MarkupKind = "plaintext" // line 13414
+ // Markdown is supported as a content format
+ Markdown MarkupKind = "markdown" // line 13419
+ // The message type
+ // An error message.
+ Error MessageType = 1 // line 13061
+ // A warning message.
+ Warning MessageType = 2 // line 13066
+ // An information message.
+ Info MessageType = 3 // line 13071
+ // A log message.
+ Log MessageType = 4 // line 13076
+ // The moniker kind.
+ //
+ // @since 3.16.0
+ // The moniker represent a symbol that is imported into a project
+ Import MonikerKind = "import" // line 13014
+ // The moniker represents a symbol that is exported from a project
+ Export MonikerKind = "export" // line 13019
+ // The moniker represents a symbol that is local to a project (e.g. a local
+ // variable of a function, a class not visible outside the project, ...)
+ Local MonikerKind = "local" // line 13024
+ // A notebook cell kind.
+ //
+ // @since 3.17.0
+ // A markup-cell is formatted source that is used for display.
+ Markup NotebookCellKind = 1 // line 13655
+ // A code-cell is source code.
+ Code NotebookCellKind = 2 // line 13660
+ // A set of predefined position encoding kinds.
+ //
+ // @since 3.17.0
+ // Character offsets count UTF-8 code units.
+ UTF8 PositionEncodingKind = "utf-8" // line 13434
+ // Character offsets count UTF-16 code units.
+ //
+ // This is the default and must always be supported
+ // by servers
+ UTF16 PositionEncodingKind = "utf-16" // line 13439
+ // Character offsets count UTF-32 code units.
+ //
+ // Implementation note: these are the same as Unicode code points,
+ // so this `PositionEncodingKind` may also be used for an
+ // encoding-agnostic representation of character offsets.
+ UTF32 PositionEncodingKind = "utf-32" // line 13444
+ // The client's default behavior is to select the identifier
+ // according the to language's syntax rule.
+ Identifier PrepareSupportDefaultBehavior = 1 // line 13729
+ // Supports creating new files and folders.
+ Create ResourceOperationKind = "create" // line 13676
+ // Supports renaming existing files and folders.
+ Rename ResourceOperationKind = "rename" // line 13681
+ // Supports deleting existing files and folders.
+ Delete ResourceOperationKind = "delete" // line 13686
+ // A set of predefined token modifiers. This set is not fixed
+ // an clients can specify additional token types via the
+ // corresponding client capabilities.
+ //
+ // @since 3.16.0
+ ModDeclaration SemanticTokenModifiers = "declaration" // line 12677
+ ModDefinition SemanticTokenModifiers = "definition" // line 12681
+ ModReadonly SemanticTokenModifiers = "readonly" // line 12685
+ ModStatic SemanticTokenModifiers = "static" // line 12689
+ ModDeprecated SemanticTokenModifiers = "deprecated" // line 12693
+ ModAbstract SemanticTokenModifiers = "abstract" // line 12697
+ ModAsync SemanticTokenModifiers = "async" // line 12701
+ ModModification SemanticTokenModifiers = "modification" // line 12705
+ ModDocumentation SemanticTokenModifiers = "documentation" // line 12709
+ ModDefaultLibrary SemanticTokenModifiers = "defaultLibrary" // line 12713
+ // A set of predefined token types. This set is not fixed
+ // an clients can specify additional token types via the
+ // corresponding client capabilities.
+ //
+ // @since 3.16.0
+ NamespaceType SemanticTokenTypes = "namespace" // line 12570
+ // Represents a generic type. Acts as a fallback for types which can't be mapped to
+ // a specific type like class or enum.
+ TypeType SemanticTokenTypes = "type" // line 12574
+ ClassType SemanticTokenTypes = "class" // line 12579
+ EnumType SemanticTokenTypes = "enum" // line 12583
+ InterfaceType SemanticTokenTypes = "interface" // line 12587
+ StructType SemanticTokenTypes = "struct" // line 12591
+ TypeParameterType SemanticTokenTypes = "typeParameter" // line 12595
+ ParameterType SemanticTokenTypes = "parameter" // line 12599
+ VariableType SemanticTokenTypes = "variable" // line 12603
+ PropertyType SemanticTokenTypes = "property" // line 12607
+ EnumMemberType SemanticTokenTypes = "enumMember" // line 12611
+ EventType SemanticTokenTypes = "event" // line 12615
+ FunctionType SemanticTokenTypes = "function" // line 12619
+ MethodType SemanticTokenTypes = "method" // line 12623
+ MacroType SemanticTokenTypes = "macro" // line 12627
+ KeywordType SemanticTokenTypes = "keyword" // line 12631
+ ModifierType SemanticTokenTypes = "modifier" // line 12635
+ CommentType SemanticTokenTypes = "comment" // line 12639
+ StringType SemanticTokenTypes = "string" // line 12643
+ NumberType SemanticTokenTypes = "number" // line 12647
+ RegexpType SemanticTokenTypes = "regexp" // line 12651
+ OperatorType SemanticTokenTypes = "operator" // line 12655
+ // @since 3.17.0
+ DecoratorType SemanticTokenTypes = "decorator" // line 12659
+ // How a signature help was triggered.
+ //
+ // @since 3.15.0
+ // Signature help was invoked manually by the user or by a command.
+ SigInvoked SignatureHelpTriggerKind = 1 // line 13587
+ // Signature help was triggered by a trigger character.
+ SigTriggerCharacter SignatureHelpTriggerKind = 2 // line 13592
+ // Signature help was triggered by the cursor moving or by the document content changing.
+ SigContentChange SignatureHelpTriggerKind = 3 // line 13597
+ // A symbol kind.
+ File SymbolKind = 1 // line 12848
+ Module SymbolKind = 2 // line 12852
+ Namespace SymbolKind = 3 // line 12856
+ Package SymbolKind = 4 // line 12860
+ Class SymbolKind = 5 // line 12864
+ Method SymbolKind = 6 // line 12868
+ Property SymbolKind = 7 // line 12872
+ Field SymbolKind = 8 // line 12876
+ Constructor SymbolKind = 9 // line 12880
+ Enum SymbolKind = 10 // line 12884
+ Interface SymbolKind = 11 // line 12888
+ Function SymbolKind = 12 // line 12892
+ Variable SymbolKind = 13 // line 12896
+ Constant SymbolKind = 14 // line 12900
+ String SymbolKind = 15 // line 12904
+ Number SymbolKind = 16 // line 12908
+ Boolean SymbolKind = 17 // line 12912
+ Array SymbolKind = 18 // line 12916
+ Object SymbolKind = 19 // line 12920
+ Key SymbolKind = 20 // line 12924
+ Null SymbolKind = 21 // line 12928
+ EnumMember SymbolKind = 22 // line 12932
+ Struct SymbolKind = 23 // line 12936
+ Event SymbolKind = 24 // line 12940
+ Operator SymbolKind = 25 // line 12944
+ TypeParameter SymbolKind = 26 // line 12948
+ // Symbol tags are extra annotations that tweak the rendering of a symbol.
+ //
+ // @since 3.16
+ // Render a symbol as obsolete, usually using a strike-out.
+ DeprecatedSymbol SymbolTag = 1 // line 12962
+ // Represents reasons why a text document is saved.
+ // Manually triggered, e.g. by the user pressing save, by starting debugging,
+ // or by an API call.
+ Manual TextDocumentSaveReason = 1 // line 13116
+ // Automatic after a delay.
+ AfterDelay TextDocumentSaveReason = 2 // line 13121
+ // When the editor lost focus.
+ FocusOut TextDocumentSaveReason = 3 // line 13126
+ // Defines how the host (editor) should sync
+ // document changes to the language server.
+ // Documents should not be synced at all.
+ None TextDocumentSyncKind = 0 // line 13091
+ // Documents are synced by always sending the full content
+ // of the document.
+ Full TextDocumentSyncKind = 1 // line 13096
+ // Documents are synced by sending the full content on open.
+ // After that only incremental updates to the document are
+ // send.
+ Incremental TextDocumentSyncKind = 2 // line 13101
+ Relative TokenFormat = "relative" // line 13743
+ // Turn tracing off.
+ Off TraceValues = "off" // line 13390
+ // Trace messages only.
+ Messages TraceValues = "messages" // line 13395
+ // Verbose message tracing.
+ Verbose TraceValues = "verbose" // line 13400
+ // Moniker uniqueness level to define scope of the moniker.
+ //
+ // @since 3.16.0
+ // The moniker is only unique inside a document
+ Document UniquenessLevel = "document" // line 12978
+ // The moniker is unique inside a project for which a dump got created
+ Project UniquenessLevel = "project" // line 12983
+ // The moniker is unique inside the group to which a project belongs
+ Group UniquenessLevel = "group" // line 12988
+ // The moniker is unique inside the moniker scheme.
+ Scheme UniquenessLevel = "scheme" // line 12993
+ // The moniker is globally unique
+ Global UniquenessLevel = "global" // line 12998
+ // Interested in create events.
+ WatchCreate WatchKind = 1 // line 13486
+ // Interested in change events
+ WatchChange WatchKind = 2 // line 13491
+ // Interested in delete events
+ WatchDelete WatchKind = 4 // line 13496
+)
diff --git a/gopls/internal/lsp/protocol/tsserver.go b/gopls/internal/lsp/protocol/tsserver.go
new file mode 100644
index 000000000..e02e1fdb9
--- /dev/null
+++ b/gopls/internal/lsp/protocol/tsserver.go
@@ -0,0 +1,1160 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated for LSP. DO NOT EDIT.
+
+package protocol
+
+// Code generated from protocol/metaModel.json at ref release/protocol/3.17.3-next.6 (hash 56c23c557e3568a9f56f42435fd5a80f9458957f).
+// https://github.com/microsoft/vscode-languageserver-node/blob/release/protocol/3.17.3-next.6/protocol/metaModel.json
+// LSP metaData.version = 3.17.0.
+
+import (
+ "context"
+ "encoding/json"
+
+ "golang.org/x/tools/internal/jsonrpc2"
+)
+
+type Server interface {
+ Progress(context.Context, *ProgressParams) error // $/progress
+ SetTrace(context.Context, *SetTraceParams) error // $/setTrace
+ IncomingCalls(context.Context, *CallHierarchyIncomingCallsParams) ([]CallHierarchyIncomingCall, error) // callHierarchy/incomingCalls
+ OutgoingCalls(context.Context, *CallHierarchyOutgoingCallsParams) ([]CallHierarchyOutgoingCall, error) // callHierarchy/outgoingCalls
+ ResolveCodeAction(context.Context, *CodeAction) (*CodeAction, error) // codeAction/resolve
+ ResolveCodeLens(context.Context, *CodeLens) (*CodeLens, error) // codeLens/resolve
+ ResolveCompletionItem(context.Context, *CompletionItem) (*CompletionItem, error) // completionItem/resolve
+ ResolveDocumentLink(context.Context, *DocumentLink) (*DocumentLink, error) // documentLink/resolve
+ Exit(context.Context) error // exit
+ Initialize(context.Context, *ParamInitialize) (*InitializeResult, error) // initialize
+ Initialized(context.Context, *InitializedParams) error // initialized
+ Resolve(context.Context, *InlayHint) (*InlayHint, error) // inlayHint/resolve
+ DidChangeNotebookDocument(context.Context, *DidChangeNotebookDocumentParams) error // notebookDocument/didChange
+ DidCloseNotebookDocument(context.Context, *DidCloseNotebookDocumentParams) error // notebookDocument/didClose
+ DidOpenNotebookDocument(context.Context, *DidOpenNotebookDocumentParams) error // notebookDocument/didOpen
+ DidSaveNotebookDocument(context.Context, *DidSaveNotebookDocumentParams) error // notebookDocument/didSave
+ Shutdown(context.Context) error // shutdown
+ CodeAction(context.Context, *CodeActionParams) ([]CodeAction, error) // textDocument/codeAction
+ CodeLens(context.Context, *CodeLensParams) ([]CodeLens, error) // textDocument/codeLens
+ ColorPresentation(context.Context, *ColorPresentationParams) ([]ColorPresentation, error) // textDocument/colorPresentation
+ Completion(context.Context, *CompletionParams) (*CompletionList, error) // textDocument/completion
+ Declaration(context.Context, *DeclarationParams) (*Or_textDocument_declaration, error) // textDocument/declaration
+ Definition(context.Context, *DefinitionParams) ([]Location, error) // textDocument/definition
+ Diagnostic(context.Context, *string) (*string, error) // textDocument/diagnostic
+ DidChange(context.Context, *DidChangeTextDocumentParams) error // textDocument/didChange
+ DidClose(context.Context, *DidCloseTextDocumentParams) error // textDocument/didClose
+ DidOpen(context.Context, *DidOpenTextDocumentParams) error // textDocument/didOpen
+ DidSave(context.Context, *DidSaveTextDocumentParams) error // textDocument/didSave
+ DocumentColor(context.Context, *DocumentColorParams) ([]ColorInformation, error) // textDocument/documentColor
+ DocumentHighlight(context.Context, *DocumentHighlightParams) ([]DocumentHighlight, error) // textDocument/documentHighlight
+ DocumentLink(context.Context, *DocumentLinkParams) ([]DocumentLink, error) // textDocument/documentLink
+ DocumentSymbol(context.Context, *DocumentSymbolParams) ([]interface{}, error) // textDocument/documentSymbol
+ FoldingRange(context.Context, *FoldingRangeParams) ([]FoldingRange, error) // textDocument/foldingRange
+ Formatting(context.Context, *DocumentFormattingParams) ([]TextEdit, error) // textDocument/formatting
+ Hover(context.Context, *HoverParams) (*Hover, error) // textDocument/hover
+ Implementation(context.Context, *ImplementationParams) ([]Location, error) // textDocument/implementation
+ InlayHint(context.Context, *InlayHintParams) ([]InlayHint, error) // textDocument/inlayHint
+ InlineValue(context.Context, *InlineValueParams) ([]InlineValue, error) // textDocument/inlineValue
+ LinkedEditingRange(context.Context, *LinkedEditingRangeParams) (*LinkedEditingRanges, error) // textDocument/linkedEditingRange
+ Moniker(context.Context, *MonikerParams) ([]Moniker, error) // textDocument/moniker
+ OnTypeFormatting(context.Context, *DocumentOnTypeFormattingParams) ([]TextEdit, error) // textDocument/onTypeFormatting
+ PrepareCallHierarchy(context.Context, *CallHierarchyPrepareParams) ([]CallHierarchyItem, error) // textDocument/prepareCallHierarchy
+ PrepareRename(context.Context, *PrepareRenameParams) (*PrepareRename2Gn, error) // textDocument/prepareRename
+ PrepareTypeHierarchy(context.Context, *TypeHierarchyPrepareParams) ([]TypeHierarchyItem, error) // textDocument/prepareTypeHierarchy
+ RangeFormatting(context.Context, *DocumentRangeFormattingParams) ([]TextEdit, error) // textDocument/rangeFormatting
+ References(context.Context, *ReferenceParams) ([]Location, error) // textDocument/references
+ Rename(context.Context, *RenameParams) (*WorkspaceEdit, error) // textDocument/rename
+ SelectionRange(context.Context, *SelectionRangeParams) ([]SelectionRange, error) // textDocument/selectionRange
+ SemanticTokensFull(context.Context, *SemanticTokensParams) (*SemanticTokens, error) // textDocument/semanticTokens/full
+ SemanticTokensFullDelta(context.Context, *SemanticTokensDeltaParams) (interface{}, error) // textDocument/semanticTokens/full/delta
+ SemanticTokensRange(context.Context, *SemanticTokensRangeParams) (*SemanticTokens, error) // textDocument/semanticTokens/range
+ SignatureHelp(context.Context, *SignatureHelpParams) (*SignatureHelp, error) // textDocument/signatureHelp
+ TypeDefinition(context.Context, *TypeDefinitionParams) ([]Location, error) // textDocument/typeDefinition
+ WillSave(context.Context, *WillSaveTextDocumentParams) error // textDocument/willSave
+ WillSaveWaitUntil(context.Context, *WillSaveTextDocumentParams) ([]TextEdit, error) // textDocument/willSaveWaitUntil
+ Subtypes(context.Context, *TypeHierarchySubtypesParams) ([]TypeHierarchyItem, error) // typeHierarchy/subtypes
+ Supertypes(context.Context, *TypeHierarchySupertypesParams) ([]TypeHierarchyItem, error) // typeHierarchy/supertypes
+ WorkDoneProgressCancel(context.Context, *WorkDoneProgressCancelParams) error // window/workDoneProgress/cancel
+ DiagnosticWorkspace(context.Context, *WorkspaceDiagnosticParams) (*WorkspaceDiagnosticReport, error) // workspace/diagnostic
+ DidChangeConfiguration(context.Context, *DidChangeConfigurationParams) error // workspace/didChangeConfiguration
+ DidChangeWatchedFiles(context.Context, *DidChangeWatchedFilesParams) error // workspace/didChangeWatchedFiles
+ DidChangeWorkspaceFolders(context.Context, *DidChangeWorkspaceFoldersParams) error // workspace/didChangeWorkspaceFolders
+ DidCreateFiles(context.Context, *CreateFilesParams) error // workspace/didCreateFiles
+ DidDeleteFiles(context.Context, *DeleteFilesParams) error // workspace/didDeleteFiles
+ DidRenameFiles(context.Context, *RenameFilesParams) error // workspace/didRenameFiles
+ ExecuteCommand(context.Context, *ExecuteCommandParams) (interface{}, error) // workspace/executeCommand
+ Symbol(context.Context, *WorkspaceSymbolParams) ([]SymbolInformation, error) // workspace/symbol
+ WillCreateFiles(context.Context, *CreateFilesParams) (*WorkspaceEdit, error) // workspace/willCreateFiles
+ WillDeleteFiles(context.Context, *DeleteFilesParams) (*WorkspaceEdit, error) // workspace/willDeleteFiles
+ WillRenameFiles(context.Context, *RenameFilesParams) (*WorkspaceEdit, error) // workspace/willRenameFiles
+ ResolveWorkspaceSymbol(context.Context, *WorkspaceSymbol) (*WorkspaceSymbol, error) // workspaceSymbol/resolve
+ NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error)
+}
+
+func serverDispatch(ctx context.Context, server Server, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) {
+ switch r.Method() {
+ case "$/progress":
+ var params ProgressParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.Progress(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "$/setTrace":
+ var params SetTraceParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.SetTrace(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "callHierarchy/incomingCalls":
+ var params CallHierarchyIncomingCallsParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.IncomingCalls(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "callHierarchy/outgoingCalls":
+ var params CallHierarchyOutgoingCallsParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.OutgoingCalls(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "codeAction/resolve":
+ var params CodeAction
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.ResolveCodeAction(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "codeLens/resolve":
+ var params CodeLens
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.ResolveCodeLens(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "completionItem/resolve":
+ var params CompletionItem
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.ResolveCompletionItem(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "documentLink/resolve":
+ var params DocumentLink
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.ResolveDocumentLink(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "exit":
+ err := server.Exit(ctx)
+ return true, reply(ctx, nil, err)
+ case "initialize":
+ var params ParamInitialize
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Initialize(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "initialized":
+ var params InitializedParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.Initialized(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "inlayHint/resolve":
+ var params InlayHint
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Resolve(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "notebookDocument/didChange":
+ var params DidChangeNotebookDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidChangeNotebookDocument(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "notebookDocument/didClose":
+ var params DidCloseNotebookDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidCloseNotebookDocument(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "notebookDocument/didOpen":
+ var params DidOpenNotebookDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidOpenNotebookDocument(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "notebookDocument/didSave":
+ var params DidSaveNotebookDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidSaveNotebookDocument(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "shutdown":
+ err := server.Shutdown(ctx)
+ return true, reply(ctx, nil, err)
+ case "textDocument/codeAction":
+ var params CodeActionParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.CodeAction(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/codeLens":
+ var params CodeLensParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.CodeLens(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/colorPresentation":
+ var params ColorPresentationParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.ColorPresentation(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/completion":
+ var params CompletionParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Completion(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/declaration":
+ var params DeclarationParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Declaration(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/definition":
+ var params DefinitionParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Definition(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/diagnostic":
+ var params string
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Diagnostic(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/didChange":
+ var params DidChangeTextDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidChange(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "textDocument/didClose":
+ var params DidCloseTextDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidClose(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "textDocument/didOpen":
+ var params DidOpenTextDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidOpen(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "textDocument/didSave":
+ var params DidSaveTextDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidSave(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "textDocument/documentColor":
+ var params DocumentColorParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.DocumentColor(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/documentHighlight":
+ var params DocumentHighlightParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.DocumentHighlight(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/documentLink":
+ var params DocumentLinkParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.DocumentLink(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/documentSymbol":
+ var params DocumentSymbolParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.DocumentSymbol(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/foldingRange":
+ var params FoldingRangeParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.FoldingRange(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/formatting":
+ var params DocumentFormattingParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Formatting(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/hover":
+ var params HoverParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Hover(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/implementation":
+ var params ImplementationParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Implementation(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/inlayHint":
+ var params InlayHintParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.InlayHint(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/inlineValue":
+ var params InlineValueParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.InlineValue(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/linkedEditingRange":
+ var params LinkedEditingRangeParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.LinkedEditingRange(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/moniker":
+ var params MonikerParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Moniker(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/onTypeFormatting":
+ var params DocumentOnTypeFormattingParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.OnTypeFormatting(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/prepareCallHierarchy":
+ var params CallHierarchyPrepareParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.PrepareCallHierarchy(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/prepareRename":
+ var params PrepareRenameParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.PrepareRename(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/prepareTypeHierarchy":
+ var params TypeHierarchyPrepareParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.PrepareTypeHierarchy(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/rangeFormatting":
+ var params DocumentRangeFormattingParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.RangeFormatting(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/references":
+ var params ReferenceParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.References(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/rename":
+ var params RenameParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Rename(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/selectionRange":
+ var params SelectionRangeParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.SelectionRange(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/semanticTokens/full":
+ var params SemanticTokensParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.SemanticTokensFull(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/semanticTokens/full/delta":
+ var params SemanticTokensDeltaParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.SemanticTokensFullDelta(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/semanticTokens/range":
+ var params SemanticTokensRangeParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.SemanticTokensRange(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/signatureHelp":
+ var params SignatureHelpParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.SignatureHelp(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/typeDefinition":
+ var params TypeDefinitionParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.TypeDefinition(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "textDocument/willSave":
+ var params WillSaveTextDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.WillSave(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "textDocument/willSaveWaitUntil":
+ var params WillSaveTextDocumentParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.WillSaveWaitUntil(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "typeHierarchy/subtypes":
+ var params TypeHierarchySubtypesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Subtypes(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "typeHierarchy/supertypes":
+ var params TypeHierarchySupertypesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Supertypes(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "window/workDoneProgress/cancel":
+ var params WorkDoneProgressCancelParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.WorkDoneProgressCancel(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "workspace/diagnostic":
+ var params WorkspaceDiagnosticParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.DiagnosticWorkspace(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "workspace/didChangeConfiguration":
+ var params DidChangeConfigurationParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidChangeConfiguration(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "workspace/didChangeWatchedFiles":
+ var params DidChangeWatchedFilesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidChangeWatchedFiles(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "workspace/didChangeWorkspaceFolders":
+ var params DidChangeWorkspaceFoldersParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidChangeWorkspaceFolders(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "workspace/didCreateFiles":
+ var params CreateFilesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidCreateFiles(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "workspace/didDeleteFiles":
+ var params DeleteFilesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidDeleteFiles(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "workspace/didRenameFiles":
+ var params RenameFilesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ err := server.DidRenameFiles(ctx, &params)
+ return true, reply(ctx, nil, err)
+ case "workspace/executeCommand":
+ var params ExecuteCommandParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.ExecuteCommand(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "workspace/symbol":
+ var params WorkspaceSymbolParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.Symbol(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "workspace/willCreateFiles":
+ var params CreateFilesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.WillCreateFiles(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "workspace/willDeleteFiles":
+ var params DeleteFilesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.WillDeleteFiles(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "workspace/willRenameFiles":
+ var params RenameFilesParams
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.WillRenameFiles(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ case "workspaceSymbol/resolve":
+ var params WorkspaceSymbol
+ if err := json.Unmarshal(r.Params(), &params); err != nil {
+ return true, sendParseError(ctx, reply, err)
+ }
+ resp, err := server.ResolveWorkspaceSymbol(ctx, &params)
+ if err != nil {
+ return true, reply(ctx, nil, err)
+ }
+ return true, reply(ctx, resp, nil)
+ default:
+ return false, nil
+ }
+}
+
+func (s *serverDispatcher) Progress(ctx context.Context, params *ProgressParams) error {
+ return s.sender.Notify(ctx, "$/progress", params)
+}
+func (s *serverDispatcher) SetTrace(ctx context.Context, params *SetTraceParams) error {
+ return s.sender.Notify(ctx, "$/setTrace", params)
+}
+func (s *serverDispatcher) IncomingCalls(ctx context.Context, params *CallHierarchyIncomingCallsParams) ([]CallHierarchyIncomingCall, error) {
+ var result []CallHierarchyIncomingCall
+ if err := s.sender.Call(ctx, "callHierarchy/incomingCalls", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) OutgoingCalls(ctx context.Context, params *CallHierarchyOutgoingCallsParams) ([]CallHierarchyOutgoingCall, error) {
+ var result []CallHierarchyOutgoingCall
+ if err := s.sender.Call(ctx, "callHierarchy/outgoingCalls", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) ResolveCodeAction(ctx context.Context, params *CodeAction) (*CodeAction, error) {
+ var result *CodeAction
+ if err := s.sender.Call(ctx, "codeAction/resolve", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) ResolveCodeLens(ctx context.Context, params *CodeLens) (*CodeLens, error) {
+ var result *CodeLens
+ if err := s.sender.Call(ctx, "codeLens/resolve", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) ResolveCompletionItem(ctx context.Context, params *CompletionItem) (*CompletionItem, error) {
+ var result *CompletionItem
+ if err := s.sender.Call(ctx, "completionItem/resolve", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) ResolveDocumentLink(ctx context.Context, params *DocumentLink) (*DocumentLink, error) {
+ var result *DocumentLink
+ if err := s.sender.Call(ctx, "documentLink/resolve", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Exit(ctx context.Context) error {
+ return s.sender.Notify(ctx, "exit", nil)
+}
+func (s *serverDispatcher) Initialize(ctx context.Context, params *ParamInitialize) (*InitializeResult, error) {
+ var result *InitializeResult
+ if err := s.sender.Call(ctx, "initialize", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Initialized(ctx context.Context, params *InitializedParams) error {
+ return s.sender.Notify(ctx, "initialized", params)
+}
+func (s *serverDispatcher) Resolve(ctx context.Context, params *InlayHint) (*InlayHint, error) {
+ var result *InlayHint
+ if err := s.sender.Call(ctx, "inlayHint/resolve", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) DidChangeNotebookDocument(ctx context.Context, params *DidChangeNotebookDocumentParams) error {
+ return s.sender.Notify(ctx, "notebookDocument/didChange", params)
+}
+func (s *serverDispatcher) DidCloseNotebookDocument(ctx context.Context, params *DidCloseNotebookDocumentParams) error {
+ return s.sender.Notify(ctx, "notebookDocument/didClose", params)
+}
+func (s *serverDispatcher) DidOpenNotebookDocument(ctx context.Context, params *DidOpenNotebookDocumentParams) error {
+ return s.sender.Notify(ctx, "notebookDocument/didOpen", params)
+}
+func (s *serverDispatcher) DidSaveNotebookDocument(ctx context.Context, params *DidSaveNotebookDocumentParams) error {
+ return s.sender.Notify(ctx, "notebookDocument/didSave", params)
+}
+func (s *serverDispatcher) Shutdown(ctx context.Context) error {
+ return s.sender.Call(ctx, "shutdown", nil, nil)
+}
+func (s *serverDispatcher) CodeAction(ctx context.Context, params *CodeActionParams) ([]CodeAction, error) {
+ var result []CodeAction
+ if err := s.sender.Call(ctx, "textDocument/codeAction", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) CodeLens(ctx context.Context, params *CodeLensParams) ([]CodeLens, error) {
+ var result []CodeLens
+ if err := s.sender.Call(ctx, "textDocument/codeLens", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) ColorPresentation(ctx context.Context, params *ColorPresentationParams) ([]ColorPresentation, error) {
+ var result []ColorPresentation
+ if err := s.sender.Call(ctx, "textDocument/colorPresentation", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Completion(ctx context.Context, params *CompletionParams) (*CompletionList, error) {
+ var result *CompletionList
+ if err := s.sender.Call(ctx, "textDocument/completion", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Declaration(ctx context.Context, params *DeclarationParams) (*Or_textDocument_declaration, error) {
+ var result *Or_textDocument_declaration
+ if err := s.sender.Call(ctx, "textDocument/declaration", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Definition(ctx context.Context, params *DefinitionParams) ([]Location, error) {
+ var result []Location
+ if err := s.sender.Call(ctx, "textDocument/definition", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Diagnostic(ctx context.Context, params *string) (*string, error) {
+ var result *string
+ if err := s.sender.Call(ctx, "textDocument/diagnostic", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) DidChange(ctx context.Context, params *DidChangeTextDocumentParams) error {
+ return s.sender.Notify(ctx, "textDocument/didChange", params)
+}
+func (s *serverDispatcher) DidClose(ctx context.Context, params *DidCloseTextDocumentParams) error {
+ return s.sender.Notify(ctx, "textDocument/didClose", params)
+}
+func (s *serverDispatcher) DidOpen(ctx context.Context, params *DidOpenTextDocumentParams) error {
+ return s.sender.Notify(ctx, "textDocument/didOpen", params)
+}
+func (s *serverDispatcher) DidSave(ctx context.Context, params *DidSaveTextDocumentParams) error {
+ return s.sender.Notify(ctx, "textDocument/didSave", params)
+}
+func (s *serverDispatcher) DocumentColor(ctx context.Context, params *DocumentColorParams) ([]ColorInformation, error) {
+ var result []ColorInformation
+ if err := s.sender.Call(ctx, "textDocument/documentColor", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) DocumentHighlight(ctx context.Context, params *DocumentHighlightParams) ([]DocumentHighlight, error) {
+ var result []DocumentHighlight
+ if err := s.sender.Call(ctx, "textDocument/documentHighlight", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) DocumentLink(ctx context.Context, params *DocumentLinkParams) ([]DocumentLink, error) {
+ var result []DocumentLink
+ if err := s.sender.Call(ctx, "textDocument/documentLink", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) DocumentSymbol(ctx context.Context, params *DocumentSymbolParams) ([]interface{}, error) {
+ var result []interface{}
+ if err := s.sender.Call(ctx, "textDocument/documentSymbol", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) FoldingRange(ctx context.Context, params *FoldingRangeParams) ([]FoldingRange, error) {
+ var result []FoldingRange
+ if err := s.sender.Call(ctx, "textDocument/foldingRange", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Formatting(ctx context.Context, params *DocumentFormattingParams) ([]TextEdit, error) {
+ var result []TextEdit
+ if err := s.sender.Call(ctx, "textDocument/formatting", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Hover(ctx context.Context, params *HoverParams) (*Hover, error) {
+ var result *Hover
+ if err := s.sender.Call(ctx, "textDocument/hover", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Implementation(ctx context.Context, params *ImplementationParams) ([]Location, error) {
+ var result []Location
+ if err := s.sender.Call(ctx, "textDocument/implementation", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) InlayHint(ctx context.Context, params *InlayHintParams) ([]InlayHint, error) {
+ var result []InlayHint
+ if err := s.sender.Call(ctx, "textDocument/inlayHint", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) InlineValue(ctx context.Context, params *InlineValueParams) ([]InlineValue, error) {
+ var result []InlineValue
+ if err := s.sender.Call(ctx, "textDocument/inlineValue", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) LinkedEditingRange(ctx context.Context, params *LinkedEditingRangeParams) (*LinkedEditingRanges, error) {
+ var result *LinkedEditingRanges
+ if err := s.sender.Call(ctx, "textDocument/linkedEditingRange", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Moniker(ctx context.Context, params *MonikerParams) ([]Moniker, error) {
+ var result []Moniker
+ if err := s.sender.Call(ctx, "textDocument/moniker", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) OnTypeFormatting(ctx context.Context, params *DocumentOnTypeFormattingParams) ([]TextEdit, error) {
+ var result []TextEdit
+ if err := s.sender.Call(ctx, "textDocument/onTypeFormatting", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) PrepareCallHierarchy(ctx context.Context, params *CallHierarchyPrepareParams) ([]CallHierarchyItem, error) {
+ var result []CallHierarchyItem
+ if err := s.sender.Call(ctx, "textDocument/prepareCallHierarchy", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) PrepareRename(ctx context.Context, params *PrepareRenameParams) (*PrepareRename2Gn, error) {
+ var result *PrepareRename2Gn
+ if err := s.sender.Call(ctx, "textDocument/prepareRename", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) PrepareTypeHierarchy(ctx context.Context, params *TypeHierarchyPrepareParams) ([]TypeHierarchyItem, error) {
+ var result []TypeHierarchyItem
+ if err := s.sender.Call(ctx, "textDocument/prepareTypeHierarchy", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) RangeFormatting(ctx context.Context, params *DocumentRangeFormattingParams) ([]TextEdit, error) {
+ var result []TextEdit
+ if err := s.sender.Call(ctx, "textDocument/rangeFormatting", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) References(ctx context.Context, params *ReferenceParams) ([]Location, error) {
+ var result []Location
+ if err := s.sender.Call(ctx, "textDocument/references", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Rename(ctx context.Context, params *RenameParams) (*WorkspaceEdit, error) {
+ var result *WorkspaceEdit
+ if err := s.sender.Call(ctx, "textDocument/rename", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) SelectionRange(ctx context.Context, params *SelectionRangeParams) ([]SelectionRange, error) {
+ var result []SelectionRange
+ if err := s.sender.Call(ctx, "textDocument/selectionRange", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) SemanticTokensFull(ctx context.Context, params *SemanticTokensParams) (*SemanticTokens, error) {
+ var result *SemanticTokens
+ if err := s.sender.Call(ctx, "textDocument/semanticTokens/full", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) SemanticTokensFullDelta(ctx context.Context, params *SemanticTokensDeltaParams) (interface{}, error) {
+ var result interface{}
+ if err := s.sender.Call(ctx, "textDocument/semanticTokens/full/delta", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) SemanticTokensRange(ctx context.Context, params *SemanticTokensRangeParams) (*SemanticTokens, error) {
+ var result *SemanticTokens
+ if err := s.sender.Call(ctx, "textDocument/semanticTokens/range", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) SignatureHelp(ctx context.Context, params *SignatureHelpParams) (*SignatureHelp, error) {
+ var result *SignatureHelp
+ if err := s.sender.Call(ctx, "textDocument/signatureHelp", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) TypeDefinition(ctx context.Context, params *TypeDefinitionParams) ([]Location, error) {
+ var result []Location
+ if err := s.sender.Call(ctx, "textDocument/typeDefinition", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) WillSave(ctx context.Context, params *WillSaveTextDocumentParams) error {
+ return s.sender.Notify(ctx, "textDocument/willSave", params)
+}
+func (s *serverDispatcher) WillSaveWaitUntil(ctx context.Context, params *WillSaveTextDocumentParams) ([]TextEdit, error) {
+ var result []TextEdit
+ if err := s.sender.Call(ctx, "textDocument/willSaveWaitUntil", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Subtypes(ctx context.Context, params *TypeHierarchySubtypesParams) ([]TypeHierarchyItem, error) {
+ var result []TypeHierarchyItem
+ if err := s.sender.Call(ctx, "typeHierarchy/subtypes", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Supertypes(ctx context.Context, params *TypeHierarchySupertypesParams) ([]TypeHierarchyItem, error) {
+ var result []TypeHierarchyItem
+ if err := s.sender.Call(ctx, "typeHierarchy/supertypes", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) WorkDoneProgressCancel(ctx context.Context, params *WorkDoneProgressCancelParams) error {
+ return s.sender.Notify(ctx, "window/workDoneProgress/cancel", params)
+}
+func (s *serverDispatcher) DiagnosticWorkspace(ctx context.Context, params *WorkspaceDiagnosticParams) (*WorkspaceDiagnosticReport, error) {
+ var result *WorkspaceDiagnosticReport
+ if err := s.sender.Call(ctx, "workspace/diagnostic", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) DidChangeConfiguration(ctx context.Context, params *DidChangeConfigurationParams) error {
+ return s.sender.Notify(ctx, "workspace/didChangeConfiguration", params)
+}
+func (s *serverDispatcher) DidChangeWatchedFiles(ctx context.Context, params *DidChangeWatchedFilesParams) error {
+ return s.sender.Notify(ctx, "workspace/didChangeWatchedFiles", params)
+}
+func (s *serverDispatcher) DidChangeWorkspaceFolders(ctx context.Context, params *DidChangeWorkspaceFoldersParams) error {
+ return s.sender.Notify(ctx, "workspace/didChangeWorkspaceFolders", params)
+}
+func (s *serverDispatcher) DidCreateFiles(ctx context.Context, params *CreateFilesParams) error {
+ return s.sender.Notify(ctx, "workspace/didCreateFiles", params)
+}
+func (s *serverDispatcher) DidDeleteFiles(ctx context.Context, params *DeleteFilesParams) error {
+ return s.sender.Notify(ctx, "workspace/didDeleteFiles", params)
+}
+func (s *serverDispatcher) DidRenameFiles(ctx context.Context, params *RenameFilesParams) error {
+ return s.sender.Notify(ctx, "workspace/didRenameFiles", params)
+}
+func (s *serverDispatcher) ExecuteCommand(ctx context.Context, params *ExecuteCommandParams) (interface{}, error) {
+ var result interface{}
+ if err := s.sender.Call(ctx, "workspace/executeCommand", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) Symbol(ctx context.Context, params *WorkspaceSymbolParams) ([]SymbolInformation, error) {
+ var result []SymbolInformation
+ if err := s.sender.Call(ctx, "workspace/symbol", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) WillCreateFiles(ctx context.Context, params *CreateFilesParams) (*WorkspaceEdit, error) {
+ var result *WorkspaceEdit
+ if err := s.sender.Call(ctx, "workspace/willCreateFiles", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) WillDeleteFiles(ctx context.Context, params *DeleteFilesParams) (*WorkspaceEdit, error) {
+ var result *WorkspaceEdit
+ if err := s.sender.Call(ctx, "workspace/willDeleteFiles", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) WillRenameFiles(ctx context.Context, params *RenameFilesParams) (*WorkspaceEdit, error) {
+ var result *WorkspaceEdit
+ if err := s.sender.Call(ctx, "workspace/willRenameFiles", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) ResolveWorkspaceSymbol(ctx context.Context, params *WorkspaceSymbol) (*WorkspaceSymbol, error) {
+ var result *WorkspaceSymbol
+ if err := s.sender.Call(ctx, "workspaceSymbol/resolve", params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+func (s *serverDispatcher) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
+ var result interface{}
+ if err := s.sender.Call(ctx, method, params, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
diff --git a/gopls/internal/lsp/references.go b/gopls/internal/lsp/references.go
new file mode 100644
index 000000000..190c15741
--- /dev/null
+++ b/gopls/internal/lsp/references.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+)
+
+func (s *Server) references(ctx context.Context, params *protocol.ReferenceParams) ([]protocol.Location, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ if snapshot.View().FileKind(fh) == source.Tmpl {
+ return template.References(ctx, snapshot, fh, params)
+ }
+ return source.References(ctx, snapshot, fh, params.Position, params.Context.IncludeDeclaration)
+}
diff --git a/gopls/internal/lsp/regtest/doc.go b/gopls/internal/lsp/regtest/doc.go
new file mode 100644
index 000000000..4f4c7c020
--- /dev/null
+++ b/gopls/internal/lsp/regtest/doc.go
@@ -0,0 +1,157 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package regtest provides a framework for writing gopls regression tests.
+//
+// User reported regressions are often expressed in terms of editor
+// interactions. For example: "When I open my editor in this directory,
+// navigate to this file, and change this line, I get a diagnostic that doesn't
+// make sense". In these cases reproducing, diagnosing, and writing a test to
+// protect against this regression can be difficult.
+//
+// The regtest package provides an API for developers to express these types of
+// user interactions in ordinary Go tests, validate them, and run them in a
+// variety of execution modes.
+//
+// # Test package setup
+//
+// The regression test package uses a couple of uncommon patterns to reduce
+// boilerplate in test bodies. First, it is intended to be imported as "." so
+// that helpers do not need to be qualified. Second, it requires some setup
+// that is currently implemented in the regtest.Main function, which must be
+// invoked by TestMain. Therefore, a minimal regtest testing package looks
+// like this:
+//
+// package lsptests
+//
+// import (
+// "fmt"
+// "testing"
+//
+// "golang.org/x/tools/gopls/internal/hooks"
+// . "golang.org/x/tools/gopls/internal/lsp/regtest"
+// )
+//
+// func TestMain(m *testing.M) {
+// Main(m, hooks.Options)
+// }
+//
+// # Writing a simple regression test
+//
+// To run a regression test use the regtest.Run function, which accepts a
+// txtar-encoded archive defining the initial workspace state. This function
+// sets up the workspace in a temporary directory, creates a fake text editor,
+// starts gopls, and initializes an LSP session. It then invokes the provided
+// test function with an *Env handle encapsulating the newly created
+// environment. Because gopls may be run in various modes (as a sidecar or
+// daemon process, with different settings), the test runner may perform this
+// process multiple times, re-running the test function each time with a new
+// environment.
+//
+// func TestOpenFile(t *testing.T) {
+// const files = `
+// -- go.mod --
+// module mod.com
+//
+// go 1.12
+// -- foo.go --
+// package foo
+// `
+// Run(t, files, func(t *testing.T, env *Env) {
+// env.OpenFile("foo.go")
+// })
+// }
+//
+// # Configuring Regtest Execution
+//
+// The regtest package exposes several options that affect the setup process
+// described above. To use these options, use the WithOptions function:
+//
+// WithOptions(opts...).Run(...)
+//
+// See options.go for a full list of available options.
+//
+// # Operating on editor state
+//
+// To operate on editor state within the test body, the Env type provides
+// access to the workspace directory (Env.SandBox), text editor (Env.Editor),
+// LSP server (Env.Server), and 'awaiter' (Env.Awaiter).
+//
+// In most cases, operations on these primitive building blocks of the
+// regression test environment expect a Context (which should be a child of
+// env.Ctx), and return an error. To avoid boilerplate, the Env exposes a set
+// of wrappers in wrappers.go for use in scripting:
+//
+// env.CreateBuffer("c/c.go", "")
+// env.EditBuffer("c/c.go", fake.Edit{
+// Text: `package c`,
+// })
+//
+// These wrappers thread through Env.Ctx, and call t.Fatal on any errors.
+//
+// # Expressing expectations
+//
+// The general pattern for a regression test is to script interactions with the
+// fake editor and sandbox, and assert that gopls behaves correctly after each
+// state change. Unfortunately, this is complicated by the fact that state
+// changes are communicated to gopls via unidirectional client->server
+// notifications (didOpen, didChange, etc.), and resulting gopls behavior such
+// as diagnostics, logs, or messages is communicated back via server->client
+// notifications. Therefore, within regression tests we must be able to say "do
+// this, and then eventually gopls should do that". To achieve this, the
+// regtest package provides a framework for expressing conditions that must
+// eventually be met, in terms of the Expectation type.
+//
+// To express the assertion that "eventually gopls must meet these
+// expectations", use env.Await(...):
+//
+// env.RegexpReplace("x/x.go", `package x`, `package main`)
+// env.Await(env.DiagnosticAtRegexp("x/main.go", `fmt`))
+//
+// Await evaluates the provided expectations atomically, whenever the client
+// receives a state-changing notification from gopls. See expectation.go for a
+// full list of available expectations.
+//
+// A fundamental problem with this model is that if gopls never meets the
+// provided expectations, the test runner will hang until the test timeout
+// (which defaults to 10m). There are two ways to work around this poor
+// behavior:
+//
+// 1. Use a precondition to define precisely when we expect conditions to be
+// met. Gopls provides the OnceMet(precondition, expectations...) pattern
+// to express ("once this precondition is met, the following expectations
+// must all hold"). To instrument preconditions, gopls uses verbose
+// progress notifications to inform the client about ongoing work (see
+// CompletedWork). The most common precondition is to wait for gopls to be
+// done processing all change notifications, for which the regtest package
+// provides the AfterChange helper. For example:
+//
+// // We expect diagnostics to be cleared after gopls is done processing the
+// // didSave notification.
+// env.SaveBuffer("a/go.mod")
+// env.AfterChange(EmptyDiagnostics("a/go.mod"))
+//
+// 2. Set a shorter timeout during development, if you expect to be breaking
+// tests. By setting the environment variable GOPLS_REGTEST_TIMEOUT=5s,
+// regression tests will time out after 5 seconds.
+//
+// # Tips & Tricks
+//
+// Here are some tips and tricks for working with regression tests:
+//
+// 1. Set the environment variable GOPLS_REGTEST_TIMEOUT=5s during development.
+// 2. Run tests with -short. This will only run regression tests in the
+// default gopls execution mode.
+// 3. Use capture groups to narrow regexp positions. All regular-expression
+// based positions (such as DiagnosticAtRegexp) will match the position of
+// the first capture group, if any are provided. This can be used to
+// identify a specific position in the code for a pattern that may occur in
+// multiple places. For example `var (mu) sync.Mutex` matches the position
+// of "mu" within the variable declaration.
+// 4. Read diagnostics into a variable to implement more complicated
+// assertions about diagnostic state in the editor. To do this, use the
+// pattern OnceMet(precondition, ReadDiagnostics("file.go", &d)) to capture
+// the current diagnostics as soon as the precondition is met. This is
+// preferable to accessing the diagnostics directly, as it avoids races.
+package regtest
diff --git a/gopls/internal/lsp/regtest/env.go b/gopls/internal/lsp/regtest/env.go
new file mode 100644
index 000000000..91335bdab
--- /dev/null
+++ b/gopls/internal/lsp/regtest/env.go
@@ -0,0 +1,391 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regtest
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
+)
+
+// Env holds the building blocks of an editor testing environment, providing
+// wrapper methods that hide the boilerplate of plumbing contexts and checking
+// errors.
+type Env struct {
+ T testing.TB // TODO(rfindley): rename to TB
+ Ctx context.Context
+
+ // Most tests should not need to access the scratch area, editor, server, or
+ // connection, but they are available if needed.
+ Sandbox *fake.Sandbox
+ Server servertest.Connector
+
+ // Editor is owned by the Env, and shut down
+ Editor *fake.Editor
+
+ Awaiter *Awaiter
+}
+
+// An Awaiter keeps track of relevant LSP state, so that it may be asserted
+// upon with Expectations.
+//
+// Wire it into a fake.Editor using Awaiter.Hooks().
+//
+// TODO(rfindley): consider simply merging Awaiter with the fake.Editor. It
+// probably is not worth its own abstraction.
+type Awaiter struct {
+ workdir *fake.Workdir
+
+ mu sync.Mutex
+ // For simplicity, each waiter gets a unique ID.
+ nextWaiterID int
+ state State
+ waiters map[int]*condition
+}
+
+func NewAwaiter(workdir *fake.Workdir) *Awaiter {
+ return &Awaiter{
+ workdir: workdir,
+ state: State{
+ diagnostics: make(map[string]*protocol.PublishDiagnosticsParams),
+ work: make(map[protocol.ProgressToken]*workProgress),
+ },
+ waiters: make(map[int]*condition),
+ }
+}
+
+// Hooks returns LSP client hooks required for awaiting asynchronous expectations.
+func (a *Awaiter) Hooks() fake.ClientHooks {
+ return fake.ClientHooks{
+ OnDiagnostics: a.onDiagnostics,
+ OnLogMessage: a.onLogMessage,
+ OnWorkDoneProgressCreate: a.onWorkDoneProgressCreate,
+ OnProgress: a.onProgress,
+ OnShowMessage: a.onShowMessage,
+ OnShowMessageRequest: a.onShowMessageRequest,
+ OnRegisterCapability: a.onRegisterCapability,
+ OnUnregisterCapability: a.onUnregisterCapability,
+ OnApplyEdit: a.onApplyEdit,
+ }
+}
+
+// State encapsulates the server state TODO: explain more
+type State struct {
+ // diagnostics are a map of relative path->diagnostics params
+ diagnostics map[string]*protocol.PublishDiagnosticsParams
+ logs []*protocol.LogMessageParams
+ showMessage []*protocol.ShowMessageParams
+ showMessageRequest []*protocol.ShowMessageRequestParams
+
+ registrations []*protocol.RegistrationParams
+ registeredCapabilities map[string]protocol.Registration
+ unregistrations []*protocol.UnregistrationParams
+ documentChanges []protocol.DocumentChanges // collected from ApplyEdit downcalls
+
+ // outstandingWork is a map of token->work summary. All tokens are assumed to
+ // be string, though the spec allows for numeric tokens as well. When work
+ // completes, it is deleted from this map.
+ work map[protocol.ProgressToken]*workProgress
+}
+
+// outstandingWork counts started but not complete work items by title.
+func (s State) outstandingWork() map[string]uint64 {
+ outstanding := make(map[string]uint64)
+ for _, work := range s.work {
+ if !work.complete {
+ outstanding[work.title]++
+ }
+ }
+ return outstanding
+}
+
+// completedWork counts complete work items by title.
+func (s State) completedWork() map[string]uint64 {
+ completed := make(map[string]uint64)
+ for _, work := range s.work {
+ if work.complete {
+ completed[work.title]++
+ }
+ }
+ return completed
+}
+
+// startedWork counts started (and possibly complete) work items.
+func (s State) startedWork() map[string]uint64 {
+ started := make(map[string]uint64)
+ for _, work := range s.work {
+ started[work.title]++
+ }
+ return started
+}
+
+type workProgress struct {
+ title, msg, endMsg string
+ percent float64
+ complete bool // seen 'end'.
+}
+
+// This method, provided for debugging, accesses mutable fields without a lock,
+// so it must not be called concurrent with any State mutation.
+func (s State) String() string {
+ var b strings.Builder
+ b.WriteString("#### log messages (see RPC logs for full text):\n")
+ for _, msg := range s.logs {
+ summary := fmt.Sprintf("%v: %q", msg.Type, msg.Message)
+ if len(summary) > 60 {
+ summary = summary[:57] + "..."
+ }
+ // Some logs are quite long, and since they should be reproduced in the RPC
+ // logs on any failure we include here just a short summary.
+ fmt.Fprint(&b, "\t"+summary+"\n")
+ }
+ b.WriteString("\n")
+ b.WriteString("#### diagnostics:\n")
+ for name, params := range s.diagnostics {
+ fmt.Fprintf(&b, "\t%s (version %d):\n", name, int(params.Version))
+ for _, d := range params.Diagnostics {
+ fmt.Fprintf(&b, "\t\t(%d, %d) [%s]: %s\n", int(d.Range.Start.Line), int(d.Range.Start.Character), d.Source, d.Message)
+ }
+ }
+ b.WriteString("\n")
+ b.WriteString("#### outstanding work:\n")
+ for token, state := range s.work {
+ if state.complete {
+ continue
+ }
+ name := state.title
+ if name == "" {
+ name = fmt.Sprintf("!NO NAME(token: %s)", token)
+ }
+ fmt.Fprintf(&b, "\t%s: %.2f\n", name, state.percent)
+ }
+ b.WriteString("#### completed work:\n")
+ for name, count := range s.completedWork() {
+ fmt.Fprintf(&b, "\t%s: %d\n", name, count)
+ }
+ return b.String()
+}
+
+// A condition is satisfied when all expectations are simultaneously
+// met. At that point, the 'met' channel is closed. On any failure, err is set
+// and the failed channel is closed.
+type condition struct {
+ expectations []Expectation
+ verdict chan Verdict
+}
+
+func (a *Awaiter) onApplyEdit(_ context.Context, params *protocol.ApplyWorkspaceEditParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.state.documentChanges = append(a.state.documentChanges, params.Edit.DocumentChanges...)
+ a.checkConditionsLocked()
+ return nil
+}
+
+func (a *Awaiter) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ pth := a.workdir.URIToPath(d.URI)
+ a.state.diagnostics[pth] = d
+ a.checkConditionsLocked()
+ return nil
+}
+
+func (a *Awaiter) onShowMessage(_ context.Context, m *protocol.ShowMessageParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.state.showMessage = append(a.state.showMessage, m)
+ a.checkConditionsLocked()
+ return nil
+}
+
+func (a *Awaiter) onShowMessageRequest(_ context.Context, m *protocol.ShowMessageRequestParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.state.showMessageRequest = append(a.state.showMessageRequest, m)
+ a.checkConditionsLocked()
+ return nil
+}
+
+func (a *Awaiter) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.state.logs = append(a.state.logs, m)
+ a.checkConditionsLocked()
+ return nil
+}
+
+func (a *Awaiter) onWorkDoneProgressCreate(_ context.Context, m *protocol.WorkDoneProgressCreateParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.state.work[m.Token] = &workProgress{}
+ return nil
+}
+
+func (a *Awaiter) onProgress(_ context.Context, m *protocol.ProgressParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ work, ok := a.state.work[m.Token]
+ if !ok {
+ panic(fmt.Sprintf("got progress report for unknown report %v: %v", m.Token, m))
+ }
+ v := m.Value.(map[string]interface{})
+ switch kind := v["kind"]; kind {
+ case "begin":
+ work.title = v["title"].(string)
+ if msg, ok := v["message"]; ok {
+ work.msg = msg.(string)
+ }
+ case "report":
+ if pct, ok := v["percentage"]; ok {
+ work.percent = pct.(float64)
+ }
+ if msg, ok := v["message"]; ok {
+ work.msg = msg.(string)
+ }
+ case "end":
+ work.complete = true
+ if msg, ok := v["message"]; ok {
+ work.endMsg = msg.(string)
+ }
+ }
+ a.checkConditionsLocked()
+ return nil
+}
+
+func (a *Awaiter) onRegisterCapability(_ context.Context, m *protocol.RegistrationParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.state.registrations = append(a.state.registrations, m)
+ if a.state.registeredCapabilities == nil {
+ a.state.registeredCapabilities = make(map[string]protocol.Registration)
+ }
+ for _, reg := range m.Registrations {
+ a.state.registeredCapabilities[reg.Method] = reg
+ }
+ a.checkConditionsLocked()
+ return nil
+}
+
+func (a *Awaiter) onUnregisterCapability(_ context.Context, m *protocol.UnregistrationParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.state.unregistrations = append(a.state.unregistrations, m)
+ a.checkConditionsLocked()
+ return nil
+}
+
+func (a *Awaiter) checkConditionsLocked() {
+ for id, condition := range a.waiters {
+ if v, _ := checkExpectations(a.state, condition.expectations); v != Unmet {
+ delete(a.waiters, id)
+ condition.verdict <- v
+ }
+ }
+}
+
+// takeDocumentChanges returns any accumulated document changes (from
+// server ApplyEdit RPC downcalls) and resets the list.
+func (a *Awaiter) takeDocumentChanges() []protocol.DocumentChanges {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ res := a.state.documentChanges
+ a.state.documentChanges = nil
+ return res
+}
+
+// checkExpectations reports whether s meets all expectations.
+func checkExpectations(s State, expectations []Expectation) (Verdict, string) {
+ finalVerdict := Met
+ var summary strings.Builder
+ for _, e := range expectations {
+ v := e.Check(s)
+ if v > finalVerdict {
+ finalVerdict = v
+ }
+ fmt.Fprintf(&summary, "%v: %s\n", v, e.Description)
+ }
+ return finalVerdict, summary.String()
+}
+
+// Await blocks until the given expectations are all simultaneously met.
+//
+// Generally speaking Await should be avoided because it blocks indefinitely if
+// gopls ends up in a state where the expectations are never going to be met.
+// Use AfterChange or OnceMet instead, so that the runner knows when to stop
+// waiting.
+func (e *Env) Await(expectations ...Expectation) {
+ e.T.Helper()
+ if err := e.Awaiter.Await(e.Ctx, expectations...); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// OnceMet blocks until the precondition is met by the state or becomes
+// unmeetable. If it was met, OnceMet checks that the state meets all
+// expectations in mustMeets.
+func (e *Env) OnceMet(precondition Expectation, mustMeets ...Expectation) {
+ e.Await(OnceMet(precondition, mustMeets...))
+}
+
+// Await waits for all expectations to simultaneously be met. It should only be
+// called from the main test goroutine.
+func (a *Awaiter) Await(ctx context.Context, expectations ...Expectation) error {
+ a.mu.Lock()
+ // Before adding the waiter, we check if the condition is currently met or
+ // failed to avoid a race where the condition was realized before Await was
+ // called.
+ switch verdict, summary := checkExpectations(a.state, expectations); verdict {
+ case Met:
+ a.mu.Unlock()
+ return nil
+ case Unmeetable:
+ err := fmt.Errorf("unmeetable expectations:\n%s\nstate:\n%v", summary, a.state)
+ a.mu.Unlock()
+ return err
+ }
+ cond := &condition{
+ expectations: expectations,
+ verdict: make(chan Verdict),
+ }
+ a.waiters[a.nextWaiterID] = cond
+ a.nextWaiterID++
+ a.mu.Unlock()
+
+ var err error
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ case v := <-cond.verdict:
+ if v != Met {
+ err = fmt.Errorf("condition has final verdict %v", v)
+ }
+ }
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ _, summary := checkExpectations(a.state, expectations)
+
+ // Debugging an unmet expectation can be tricky, so we put some effort into
+ // nicely formatting the failure.
+ if err != nil {
+ return fmt.Errorf("waiting on:\n%s\nerr:%v\n\nstate:\n%v", summary, err, a.state)
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/regtest/env_test.go b/gopls/internal/lsp/regtest/env_test.go
new file mode 100644
index 000000000..e334faa90
--- /dev/null
+++ b/gopls/internal/lsp/regtest/env_test.go
@@ -0,0 +1,66 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regtest
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+func TestProgressUpdating(t *testing.T) {
+ a := &Awaiter{
+ state: State{
+ work: make(map[protocol.ProgressToken]*workProgress),
+ },
+ }
+ ctx := context.Background()
+ if err := a.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
+ Token: "foo",
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := a.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
+ Token: "bar",
+ }); err != nil {
+ t.Fatal(err)
+ }
+ updates := []struct {
+ token string
+ value interface{}
+ }{
+ {"foo", protocol.WorkDoneProgressBegin{Kind: "begin", Title: "foo work"}},
+ {"bar", protocol.WorkDoneProgressBegin{Kind: "begin", Title: "bar work"}},
+ {"foo", protocol.WorkDoneProgressEnd{Kind: "end"}},
+ {"bar", protocol.WorkDoneProgressReport{Kind: "report", Percentage: 42}},
+ }
+ for _, update := range updates {
+ params := &protocol.ProgressParams{
+ Token: update.token,
+ Value: update.value,
+ }
+ data, err := json.Marshal(params)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var unmarshaled protocol.ProgressParams
+ if err := json.Unmarshal(data, &unmarshaled); err != nil {
+ t.Fatal(err)
+ }
+ if err := a.onProgress(ctx, &unmarshaled); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if !a.state.work["foo"].complete {
+ t.Error("work entry \"foo\" is incomplete, want complete")
+ }
+ got := *a.state.work["bar"]
+ want := workProgress{title: "bar work", percent: 42}
+ if got != want {
+ t.Errorf("work progress for \"bar\": %v, want %v", got, want)
+ }
+}
diff --git a/gopls/internal/lsp/regtest/expectation.go b/gopls/internal/lsp/regtest/expectation.go
new file mode 100644
index 000000000..9d9f023d9
--- /dev/null
+++ b/gopls/internal/lsp/regtest/expectation.go
@@ -0,0 +1,769 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regtest
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+var (
+ // InitialWorkspaceLoad is an expectation that the workspace initial load has
+ // completed. It is verified via workdone reporting.
+ InitialWorkspaceLoad = CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromInitialWorkspaceLoad), 1, false)
+)
+
+// A Verdict is the result of checking an expectation against the current
+// editor state.
+type Verdict int
+
+// Order matters for the following constants: verdicts are sorted in order of
+// decisiveness.
+const (
+ // Met indicates that an expectation is satisfied by the current state.
+ Met Verdict = iota
+ // Unmet indicates that an expectation is not currently met, but could be met
+ // in the future.
+ Unmet
+ // Unmeetable indicates that an expectation cannot be satisfied in the
+ // future.
+ Unmeetable
+)
+
+func (v Verdict) String() string {
+ switch v {
+ case Met:
+ return "Met"
+ case Unmet:
+ return "Unmet"
+ case Unmeetable:
+ return "Unmeetable"
+ }
+ return fmt.Sprintf("unrecognized verdict %d", v)
+}
+
+// An Expectation is an expected property of the state of the LSP client.
+// The Check function reports whether the property is met.
+//
+// Expectations are combinators. By composing them, tests may express
+// complex expectations in terms of simpler ones.
+//
+// TODO(rfindley): as expectations are combined, it becomes harder to identify
+// why they failed. A better signature for Check would be
+//
+// func(State) (Verdict, string)
+//
+// returning a reason for the verdict that can be composed similarly to
+// descriptions.
+type Expectation struct {
+ Check func(State) Verdict
+
+ // Description holds a noun-phrase identifying what the expectation checks.
+ //
+ // TODO(rfindley): revisit existing descriptions to ensure they compose nicely.
+ Description string
+}
+
+// OnceMet returns an Expectation that, once the precondition is met, asserts
+// that mustMeet is met.
+func OnceMet(precondition Expectation, mustMeets ...Expectation) Expectation {
+ check := func(s State) Verdict {
+ switch pre := precondition.Check(s); pre {
+ case Unmeetable:
+ return Unmeetable
+ case Met:
+ for _, mustMeet := range mustMeets {
+ verdict := mustMeet.Check(s)
+ if verdict != Met {
+ return Unmeetable
+ }
+ }
+ return Met
+ default:
+ return Unmet
+ }
+ }
+ description := describeExpectations(mustMeets...)
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("once %q is met, must have:\n%s", precondition.Description, description),
+ }
+}
+
+func describeExpectations(expectations ...Expectation) string {
+ var descriptions []string
+ for _, e := range expectations {
+ descriptions = append(descriptions, e.Description)
+ }
+ return strings.Join(descriptions, "\n")
+}
+
+// AnyOf returns an expectation that is satisfied when any of the given
+// expectations is met.
+func AnyOf(anyOf ...Expectation) Expectation {
+ check := func(s State) Verdict {
+ for _, e := range anyOf {
+ verdict := e.Check(s)
+ if verdict == Met {
+ return Met
+ }
+ }
+ return Unmet
+ }
+ description := describeExpectations(anyOf...)
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("Any of:\n%s", description),
+ }
+}
+
+// AllOf expects that all given expectations are met.
+//
+// TODO(rfindley): the problem with these types of combinators (OnceMet, AnyOf
+// and AllOf) is that we lose the information of *why* they failed: the Awaiter
+// is not smart enough to look inside.
+//
+// Refactor the API such that the Check function is responsible for explaining
+// why an expectation failed. This should allow us to significantly improve
+// test output: we won't need to summarize state at all, as the verdict
+// explanation itself should describe clearly why the expectation not met.
+func AllOf(allOf ...Expectation) Expectation {
+ check := func(s State) Verdict {
+ verdict := Met
+ for _, e := range allOf {
+ if v := e.Check(s); v > verdict {
+ verdict = v
+ }
+ }
+ return verdict
+ }
+ description := describeExpectations(allOf...)
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("All of:\n%s", description),
+ }
+}
+
+// ReadDiagnostics is an Expectation that stores the current diagnostics for
+// fileName in into, whenever it is evaluated.
+//
+// It can be used in combination with OnceMet or AfterChange to capture the
+// state of diagnostics when other expectations are satisfied.
+func ReadDiagnostics(fileName string, into *protocol.PublishDiagnosticsParams) Expectation {
+ check := func(s State) Verdict {
+ diags, ok := s.diagnostics[fileName]
+ if !ok {
+ return Unmeetable
+ }
+ *into = *diags
+ return Met
+ }
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("read diagnostics for %q", fileName),
+ }
+}
+
+// ReadAllDiagnostics is an expectation that stores all published diagnostics
+// into the provided map, whenever it is evaluated.
+//
+// It can be used in combination with OnceMet or AfterChange to capture the
+// state of diagnostics when other expectations are satisfied.
+func ReadAllDiagnostics(into *map[string]*protocol.PublishDiagnosticsParams) Expectation {
+ check := func(s State) Verdict {
+ allDiags := make(map[string]*protocol.PublishDiagnosticsParams)
+ for name, diags := range s.diagnostics {
+ allDiags[name] = diags
+ }
+ *into = allDiags
+ return Met
+ }
+ return Expectation{
+ Check: check,
+ Description: "read all diagnostics",
+ }
+}
+
+// NoOutstandingWork asserts that there is no work initiated using the LSP
+// $/progress API that has not completed.
+func NoOutstandingWork() Expectation {
+ check := func(s State) Verdict {
+ if len(s.outstandingWork()) == 0 {
+ return Met
+ }
+ return Unmet
+ }
+ return Expectation{
+ Check: check,
+ Description: "no outstanding work",
+ }
+}
+
+// NoShownMessage asserts that the editor has not received a ShowMessage.
+func NoShownMessage(subString string) Expectation {
+ check := func(s State) Verdict {
+ for _, m := range s.showMessage {
+ if strings.Contains(m.Message, subString) {
+ return Unmeetable
+ }
+ }
+ return Met
+ }
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("no ShowMessage received containing %q", subString),
+ }
+}
+
+// ShownMessage asserts that the editor has received a ShowMessageRequest
+// containing the given substring.
+func ShownMessage(containing string) Expectation {
+ check := func(s State) Verdict {
+ for _, m := range s.showMessage {
+ if strings.Contains(m.Message, containing) {
+ return Met
+ }
+ }
+ return Unmet
+ }
+ return Expectation{
+ Check: check,
+ Description: "received ShowMessage",
+ }
+}
+
+// ShowMessageRequest asserts that the editor has received a ShowMessageRequest
+// with an action item that has the given title.
+func ShowMessageRequest(title string) Expectation {
+ check := func(s State) Verdict {
+ if len(s.showMessageRequest) == 0 {
+ return Unmet
+ }
+ // Only check the most recent one.
+ m := s.showMessageRequest[len(s.showMessageRequest)-1]
+ if len(m.Actions) == 0 || len(m.Actions) > 1 {
+ return Unmet
+ }
+ if m.Actions[0].Title == title {
+ return Met
+ }
+ return Unmet
+ }
+ return Expectation{
+ Check: check,
+ Description: "received ShowMessageRequest",
+ }
+}
+
+// DoneDiagnosingChanges expects that diagnostics are complete from common
+// change notifications: didOpen, didChange, didSave, didChangeWatchedFiles,
+// and didClose.
+//
+// This can be used when multiple notifications may have been sent, such as
+// when a didChange is immediately followed by a didSave. It is insufficient to
+// simply await NoOutstandingWork, because the LSP client has no control over
+// when the server starts processing a notification. Therefore, we must keep
+// track of
+func (e *Env) DoneDiagnosingChanges() Expectation {
+ stats := e.Editor.Stats()
+ statsBySource := map[lsp.ModificationSource]uint64{
+ lsp.FromDidOpen: stats.DidOpen,
+ lsp.FromDidChange: stats.DidChange,
+ lsp.FromDidSave: stats.DidSave,
+ lsp.FromDidChangeWatchedFiles: stats.DidChangeWatchedFiles,
+ lsp.FromDidClose: stats.DidClose,
+ }
+
+ var expected []lsp.ModificationSource
+ for k, v := range statsBySource {
+ if v > 0 {
+ expected = append(expected, k)
+ }
+ }
+
+ // Sort for stability.
+ sort.Slice(expected, func(i, j int) bool {
+ return expected[i] < expected[j]
+ })
+
+ var all []Expectation
+ for _, source := range expected {
+ all = append(all, CompletedWork(lsp.DiagnosticWorkTitle(source), statsBySource[source], true))
+ }
+
+ return AllOf(all...)
+}
+
+// AfterChange expects that the given expectations will be met after all
+// state-changing notifications have been processed by the server.
+//
+// It awaits the completion of all anticipated work before checking the given
+// expectations.
+func (e *Env) AfterChange(expectations ...Expectation) {
+ e.T.Helper()
+ e.OnceMet(
+ e.DoneDiagnosingChanges(),
+ expectations...,
+ )
+}
+
+// DoneWithOpen expects all didOpen notifications currently sent by the editor
+// to be completely processed.
+func (e *Env) DoneWithOpen() Expectation {
+ opens := e.Editor.Stats().DidOpen
+ return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidOpen), opens, true)
+}
+
+// StartedChange expects that the server has at least started processing all
+// didChange notifications sent from the client.
+func (e *Env) StartedChange() Expectation {
+ changes := e.Editor.Stats().DidChange
+ return StartedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), changes)
+}
+
+// DoneWithChange expects all didChange notifications currently sent by the
+// editor to be completely processed.
+func (e *Env) DoneWithChange() Expectation {
+ changes := e.Editor.Stats().DidChange
+ return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), changes, true)
+}
+
+// DoneWithSave expects all didSave notifications currently sent by the editor
+// to be completely processed.
+func (e *Env) DoneWithSave() Expectation {
+ saves := e.Editor.Stats().DidSave
+ return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidSave), saves, true)
+}
+
+// StartedChangeWatchedFiles expects that the server has at least started
+// processing all didChangeWatchedFiles notifications sent from the client.
+func (e *Env) StartedChangeWatchedFiles() Expectation {
+ changes := e.Editor.Stats().DidChangeWatchedFiles
+ return StartedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChangeWatchedFiles), changes)
+}
+
+// DoneWithChangeWatchedFiles expects all didChangeWatchedFiles notifications
+// currently sent by the editor to be completely processed.
+func (e *Env) DoneWithChangeWatchedFiles() Expectation {
+ changes := e.Editor.Stats().DidChangeWatchedFiles
+ return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChangeWatchedFiles), changes, true)
+}
+
+// DoneWithClose expects all didClose notifications currently sent by the
+// editor to be completely processed.
+func (e *Env) DoneWithClose() Expectation {
+ changes := e.Editor.Stats().DidClose
+ return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidClose), changes, true)
+}
+
+// StartedWork expect a work item to have been started >= atLeast times.
+//
+// See CompletedWork.
+func StartedWork(title string, atLeast uint64) Expectation {
+ check := func(s State) Verdict {
+ if s.startedWork()[title] >= atLeast {
+ return Met
+ }
+ return Unmet
+ }
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("started work %q at least %d time(s)", title, atLeast),
+ }
+}
+
+// CompletedWork expects a work item to have been completed >= atLeast times.
+//
+// Since the Progress API doesn't include any hidden metadata, we must use the
+// progress notification title to identify the work we expect to be completed.
+func CompletedWork(title string, count uint64, atLeast bool) Expectation {
+ check := func(s State) Verdict {
+ completed := s.completedWork()
+ if completed[title] == count || atLeast && completed[title] > count {
+ return Met
+ }
+ return Unmet
+ }
+ desc := fmt.Sprintf("completed work %q %v times", title, count)
+ if atLeast {
+ desc = fmt.Sprintf("completed work %q at least %d time(s)", title, count)
+ }
+ return Expectation{
+ Check: check,
+ Description: desc,
+ }
+}
+
+type WorkStatus struct {
+ // Last seen message from either `begin` or `report` progress.
+ Msg string
+ // Message sent with `end` progress message.
+ EndMsg string
+}
+
+// CompletedProgress expects that workDone progress is complete for the given
+// progress token. When non-nil WorkStatus is provided, it will be filled
+// when the expectation is met.
+//
+// If the token is not a progress token that the client has seen, this
+// expectation is Unmeetable.
+func CompletedProgress(token protocol.ProgressToken, into *WorkStatus) Expectation {
+ check := func(s State) Verdict {
+ work, ok := s.work[token]
+ if !ok {
+ return Unmeetable // TODO(rfindley): refactor to allow the verdict to explain this result
+ }
+ if work.complete {
+ if into != nil {
+ into.Msg = work.msg
+ into.EndMsg = work.endMsg
+ }
+ return Met
+ }
+ return Unmet
+ }
+ desc := fmt.Sprintf("completed work for token %v", token)
+ return Expectation{
+ Check: check,
+ Description: desc,
+ }
+}
+
+// OutstandingWork expects a work item to be outstanding. The given title must
+// be an exact match, whereas the given msg must only be contained in the work
+// item's message.
+func OutstandingWork(title, msg string) Expectation {
+ check := func(s State) Verdict {
+ for _, work := range s.work {
+ if work.complete {
+ continue
+ }
+ if work.title == title && strings.Contains(work.msg, msg) {
+ return Met
+ }
+ }
+ return Unmet
+ }
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("outstanding work: %q containing %q", title, msg),
+ }
+}
+
+// NoErrorLogs asserts that the client has not received any log messages of
+// error severity.
+func NoErrorLogs() Expectation {
+ return NoLogMatching(protocol.Error, "")
+}
+
+// LogMatching asserts that the client has received a log message
+// of type typ matching the regexp re a certain number of times.
+//
+// The count argument specifies the expected number of matching logs. If
+// atLeast is set, this is a lower bound, otherwise there must be exactly count
+// matching logs.
+func LogMatching(typ protocol.MessageType, re string, count int, atLeast bool) Expectation {
+ rec, err := regexp.Compile(re)
+ if err != nil {
+ panic(err)
+ }
+ check := func(state State) Verdict {
+ var found int
+ for _, msg := range state.logs {
+ if msg.Type == typ && rec.Match([]byte(msg.Message)) {
+ found++
+ }
+ }
+ // Check for an exact or "at least" match.
+ if found == count || (found >= count && atLeast) {
+ return Met
+ }
+ return Unmet
+ }
+ desc := fmt.Sprintf("log message matching %q expected %v times", re, count)
+ if atLeast {
+ desc = fmt.Sprintf("log message matching %q expected at least %v times", re, count)
+ }
+ return Expectation{
+ Check: check,
+ Description: desc,
+ }
+}
+
+// NoLogMatching asserts that the client has not received a log message
+// of type typ matching the regexp re. If re is an empty string, any log
+// message is considered a match.
+func NoLogMatching(typ protocol.MessageType, re string) Expectation {
+ var r *regexp.Regexp
+ if re != "" {
+ var err error
+ r, err = regexp.Compile(re)
+ if err != nil {
+ panic(err)
+ }
+ }
+ check := func(state State) Verdict {
+ for _, msg := range state.logs {
+ if msg.Type != typ {
+ continue
+ }
+ if r == nil || r.Match([]byte(msg.Message)) {
+ return Unmeetable
+ }
+ }
+ return Met
+ }
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("no log message matching %q", re),
+ }
+}
+
+// FileWatchMatching expects that a file registration matches re.
+func FileWatchMatching(re string) Expectation {
+ return Expectation{
+ Check: checkFileWatch(re, Met, Unmet),
+ Description: fmt.Sprintf("file watch matching %q", re),
+ }
+}
+
+// NoFileWatchMatching expects that no file registration matches re.
+func NoFileWatchMatching(re string) Expectation {
+ return Expectation{
+ Check: checkFileWatch(re, Unmet, Met),
+ Description: fmt.Sprintf("no file watch matching %q", re),
+ }
+}
+
+func checkFileWatch(re string, onMatch, onNoMatch Verdict) func(State) Verdict {
+ rec := regexp.MustCompile(re)
+ return func(s State) Verdict {
+ r := s.registeredCapabilities["workspace/didChangeWatchedFiles"]
+ watchers := jsonProperty(r.RegisterOptions, "watchers").([]interface{})
+ for _, watcher := range watchers {
+ pattern := jsonProperty(watcher, "globPattern").(string)
+ if rec.MatchString(pattern) {
+ return onMatch
+ }
+ }
+ return onNoMatch
+ }
+}
+
+// jsonProperty extracts a value from a path of JSON property names, assuming
+// the default encoding/json unmarshaling to the empty interface (i.e.: that
+// JSON objects are unmarshalled as map[string]interface{})
+//
+// For example, if obj is unmarshalled from the following json:
+//
+// {
+// "foo": { "bar": 3 }
+// }
+//
+// Then jsonProperty(obj, "foo", "bar") will be 3.
+func jsonProperty(obj interface{}, path ...string) interface{} {
+ if len(path) == 0 || obj == nil {
+ return obj
+ }
+ m := obj.(map[string]interface{})
+ return jsonProperty(m[path[0]], path[1:]...)
+}
+
+// RegistrationMatching asserts that the client has received a capability
+// registration matching the given regexp.
+//
+// TODO(rfindley): remove this once TestWatchReplaceTargets has been revisited.
+//
+// Deprecated: use (No)FileWatchMatching
+func RegistrationMatching(re string) Expectation {
+ rec := regexp.MustCompile(re)
+ check := func(s State) Verdict {
+ for _, p := range s.registrations {
+ for _, r := range p.Registrations {
+ if rec.Match([]byte(r.Method)) {
+ return Met
+ }
+ }
+ }
+ return Unmet
+ }
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("registration matching %q", re),
+ }
+}
+
+// UnregistrationMatching asserts that the client has received an
+// unregistration whose ID matches the given regexp.
+func UnregistrationMatching(re string) Expectation {
+ rec := regexp.MustCompile(re)
+ check := func(s State) Verdict {
+ for _, p := range s.unregistrations {
+ for _, r := range p.Unregisterations {
+ if rec.Match([]byte(r.Method)) {
+ return Met
+ }
+ }
+ }
+ return Unmet
+ }
+ return Expectation{
+ Check: check,
+ Description: fmt.Sprintf("unregistration matching %q", re),
+ }
+}
+
+// Diagnostics asserts that there is at least one diagnostic matching the given
+// filters.
+func Diagnostics(filters ...DiagnosticFilter) Expectation {
+ check := func(s State) Verdict {
+ diags := flattenDiagnostics(s)
+ for _, filter := range filters {
+ var filtered []flatDiagnostic
+ for _, d := range diags {
+ if filter.check(d.name, d.diag) {
+ filtered = append(filtered, d)
+ }
+ }
+ if len(filtered) == 0 {
+ // TODO(rfindley): if/when expectations describe their own failure, we
+ // can provide more useful information here as to which filter caused
+ // the failure.
+ return Unmet
+ }
+ diags = filtered
+ }
+ return Met
+ }
+ var descs []string
+ for _, filter := range filters {
+ descs = append(descs, filter.desc)
+ }
+ return Expectation{
+ Check: check,
+ Description: "any diagnostics " + strings.Join(descs, ", "),
+ }
+}
+
+// NoDiagnostics asserts that there are no diagnostics matching the given
+// filters. Notably, if no filters are supplied this assertion checks that
+// there are no diagnostics at all, for any file.
+func NoDiagnostics(filters ...DiagnosticFilter) Expectation {
+ check := func(s State) Verdict {
+ diags := flattenDiagnostics(s)
+ for _, filter := range filters {
+ var filtered []flatDiagnostic
+ for _, d := range diags {
+ if filter.check(d.name, d.diag) {
+ filtered = append(filtered, d)
+ }
+ }
+ diags = filtered
+ }
+ if len(diags) > 0 {
+ return Unmet
+ }
+ return Met
+ }
+ var descs []string
+ for _, filter := range filters {
+ descs = append(descs, filter.desc)
+ }
+ return Expectation{
+ Check: check,
+ Description: "no diagnostics " + strings.Join(descs, ", "),
+ }
+}
+
+type flatDiagnostic struct {
+ name string
+ diag protocol.Diagnostic
+}
+
+func flattenDiagnostics(state State) []flatDiagnostic {
+ var result []flatDiagnostic
+ for name, diags := range state.diagnostics {
+ for _, diag := range diags.Diagnostics {
+ result = append(result, flatDiagnostic{name, diag})
+ }
+ }
+ return result
+}
+
+// -- Diagnostic filters --
+
+// A DiagnosticFilter filters the set of diagnostics, for assertion with
+// Diagnostics or NoDiagnostics.
+type DiagnosticFilter struct {
+ desc string
+ check func(name string, _ protocol.Diagnostic) bool
+}
+
+// ForFile filters to diagnostics matching the sandbox-relative file name.
+func ForFile(name string) DiagnosticFilter {
+ return DiagnosticFilter{
+ desc: fmt.Sprintf("for file %q", name),
+ check: func(diagName string, _ protocol.Diagnostic) bool {
+ return diagName == name
+ },
+ }
+}
+
+// FromSource filters to diagnostics matching the given diagnostics source.
+func FromSource(source string) DiagnosticFilter {
+ return DiagnosticFilter{
+ desc: fmt.Sprintf("with source %q", source),
+ check: func(_ string, d protocol.Diagnostic) bool {
+ return d.Source == source
+ },
+ }
+}
+
+// AtRegexp filters to diagnostics in the file with sandbox-relative path name,
+// at the first position matching the given regexp pattern.
+//
+// TODO(rfindley): pass in the editor to expectations, so that they may depend
+// on editor state and AtRegexp can be a function rather than a method.
+func (e *Env) AtRegexp(name, pattern string) DiagnosticFilter {
+ loc := e.RegexpSearch(name, pattern)
+ return DiagnosticFilter{
+ desc: fmt.Sprintf("at the first position matching %#q in %q", pattern, name),
+ check: func(diagName string, d protocol.Diagnostic) bool {
+ return diagName == name && d.Range.Start == loc.Range.Start
+ },
+ }
+}
+
+// AtPosition filters to diagnostics at location name:line:character, for a
+// sandbox-relative path name.
+//
+// Line and character are 0-based, and character measures UTF-16 codes.
+//
+// Note: prefer the more readable AtRegexp.
+func AtPosition(name string, line, character uint32) DiagnosticFilter {
+ pos := protocol.Position{Line: line, Character: character}
+ return DiagnosticFilter{
+ desc: fmt.Sprintf("at %s:%d:%d", name, line, character),
+ check: func(diagName string, d protocol.Diagnostic) bool {
+ return diagName == name && d.Range.Start == pos
+ },
+ }
+}
+
+// WithMessage filters to diagnostics whose message contains the given
+// substring.
+func WithMessage(substring string) DiagnosticFilter {
+ return DiagnosticFilter{
+ desc: fmt.Sprintf("with message containing %q", substring),
+ check: func(_ string, d protocol.Diagnostic) bool {
+ return strings.Contains(d.Message, substring)
+ },
+ }
+}
diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go
new file mode 100644
index 000000000..ba0e388fa
--- /dev/null
+++ b/gopls/internal/lsp/regtest/marker.go
@@ -0,0 +1,1273 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regtest
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "go/token"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/expect"
+ "golang.org/x/tools/gopls/internal/hooks"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/txtar"
+)
+
+var update = flag.Bool("update", false, "if set, update test data during marker tests")
+
+// RunMarkerTests runs "marker" tests in the given test data directory.
+//
+// A marker test uses the '//@' marker syntax of the x/tools/go/expect package
+// to annotate source code with various information such as locations and
+// arguments of LSP operations to be executed by the test. The syntax following
+// '@' is parsed as a comma-separated list of ordinary Go function calls, for
+// example
+//
+// //@foo(a, "b", 3),bar(0)
+//
+// and delegates to a corresponding function to perform LSP-related operations.
+// See the Marker types documentation below for a list of supported markers.
+//
+// Each call argument is converted to the type of the corresponding parameter of
+// the designated function. The conversion logic may use the surrounding context,
+// such as the position or nearby text. See the Argument conversion section below
+// for the full set of special conversions. As a special case, the blank
+// identifier '_' is treated as the zero value of the parameter type.
+//
+// The test runner collects test cases by searching the given directory for
+// files with the .txt extension. Each file is interpreted as a txtar archive,
+// which is extracted to a temporary directory. The relative path to the .txt
+// file is used as the subtest name. The preliminary section of the file
+// (before the first archive entry) is a free-form comment.
+//
+// These tests were inspired by (and in many places copied from) a previous
+// iteration of the marker tests built on top of the packagestest framework.
+// Key design decisions motivating this reimplementation are as follows:
+// - The old tests had a single global session, causing interaction at a
+// distance and several awkward workarounds.
+// - The old tests could not be safely parallelized, because certain tests
+// manipulated the server options
+// - Relatedly, the old tests did not have a logic grouping of assertions into
+// a single unit, resulting in clusters of files serving clusters of
+// entangled assertions.
+// - The old tests used locations in the source as test names and as the
+// identity of golden content, meaning that a single edit could change the
+// name of an arbitrary number of subtests, and making it difficult to
+// manually edit golden content.
+// - The old tests did not hew closely to LSP concepts, resulting in, for
+// example, each marker implementation doing its own position
+// transformations, and inventing its own mechanism for configuration.
+// - The old tests had an ad-hoc session initialization process. The regtest
+// environment has had more time devoted to its initialization, and has a
+// more convenient API.
+// - The old tests lacked documentation, and often had failures that were hard
+// to understand. By starting from scratch, we can revisit these aspects.
+//
+// # Special files
+//
+// There are three types of file within the test archive that are given special
+// treatment by the test runner:
+// - "flags": this file is treated as a whitespace-separated list of flags
+// that configure the MarkerTest instance. For example, -min_go=go1.18 sets
+// the minimum required Go version for the test.
+// TODO(rfindley): support flag values containing whitespace.
+// - "settings.json": this file is parsed as JSON, and used as the
+// session configuration (see gopls/doc/settings.md)
+// - "env": this file is parsed as a list of VAR=VALUE fields specifying the
+// editor environment.
+// - Golden files: Within the archive, file names starting with '@' are
+// treated as "golden" content, and are not written to disk, but instead are
+// made available to test methods expecting an argument of type *Golden,
+// using the identifier following '@'. For example, if the first parameter of
+// Foo were of type *Golden, the test runner would convert the identifier a
+// in the call @foo(a, "b", 3) into a *Golden by collecting golden file
+// data starting with "@a/".
+//
+// # Marker types
+//
+// The following markers are supported within marker tests:
+//
+// - diag(location, regexp): specifies an expected diagnostic matching the
+// given regexp at the given location. The test runner requires
+// a 1:1 correspondence between observed diagnostics and diag annotations
+//
+// - def(src, dst location): perform a textDocument/definition request at
+// the src location, and check the the result points to the dst location.
+//
+// - hover(src, dst location, g Golden): perform a textDocument/hover at the
+// src location, and checks that the result is the dst location, with hover
+// content matching "hover.md" in the golden data g.
+//
+// - loc(name, location): specifies the name for a location in the source. These
+// locations may be referenced by other markers.
+//
+// - rename(location, new, golden): specifies a renaming of the
+// identifier at the specified location to the new name.
+// The golden directory contains the transformed files.
+//
+// - renameerr(location, new, wantError): specifies a renaming that
+// fails with an error that matches the expectation.
+//
+// - suggestedfix(location, regexp, kind, golden): like diag, the location and
+// regexp identify an expected diagnostic. This diagnostic must
+// to have exactly one associated code action of the specified kind.
+// This action is executed for its editing effects on the source files.
+// Like rename, the golden directory contains the expected transformed files.
+//
+// # Argument conversion
+//
+// Marker arguments are first parsed by the go/expect package, which accepts
+// the following tokens as defined by the Go spec:
+// - string, int64, float64, and rune literals
+// - true and false
+// - nil
+// - identifiers (type expect.Identifier)
+// - regular expressions, denoted the two tokens re"abc" (type *regexp.Regexp)
+//
+// These values are passed as arguments to the corresponding parameter of the
+// test function. Additional value conversions may occur for these argument ->
+// parameter type pairs:
+// - string->regexp: the argument is parsed as a regular expressions.
+// - string->location: the argument is converted to the location of the first
+// instance of the argument in the partial line preceding the note.
+// - regexp->location: the argument is converted to the location of the first
+// match for the argument in the partial line preceding the note. If the
+// regular expression contains exactly one subgroup, the position of the
+// subgroup is used rather than the position of the submatch.
+// - name->location: the argument is replaced by the named location.
+// - name->Golden: the argument is used to look up golden content prefixed by
+// @<argument>.
+// - {string,regexp,identifier}->wantError: a wantError type specifies
+// an expected error message, either in the form of a substring that
+// must be present, a regular expression that it must match, or an
+// identifier (e.g. foo) such that the archive entry @foo
+// exists and contains the exact expected error.
+//
+// # Example
+//
+// Here is a complete example:
+//
+// -- a.go --
+// package a
+//
+// const abc = 0x2a //@hover("b", "abc", abc),hover(" =", "abc", abc)
+// -- @abc/hover.md --
+// ```go
+// const abc untyped int = 42
+// ```
+//
+// @hover("b", "abc", abc),hover(" =", "abc", abc)
+//
+// In this example, the @hover annotation tells the test runner to run the
+// hoverMarker function, which has parameters:
+//
+// (mark marker, src, dsc protocol.Location, g *Golden).
+//
+// The first argument holds the test context, including fake editor with open
+// files, and sandboxed directory.
+//
+// Argument converters translate the "b" and "abc" arguments into locations by
+// interpreting each one as a regular expression and finding the location of
+// its first match on the preceding portion of the line, and the abc identifier
+// into a dictionary of golden content containing "hover.md". Then the
+// hoverMarker method executes a textDocument/hover LSP request at the src
+// position, and ensures the result spans "abc", with the markdown content from
+// hover.md. (Note that the markdown content includes the expect annotation as
+// the doc comment.)
+//
+// The next hover on the same line asserts the same result, but initiates the
+// hover immediately after "abc" in the source. This tests that we find the
+// preceding identifier when hovering.
+//
+// # Updating golden files
+//
+// To update golden content in the test archive, it is easier to regenerate
+// content automatically rather than edit it by hand. To do this, run the
+// tests with the -update flag. Only tests that actually run will be updated.
+//
+// In some cases, golden content will vary by Go version (for example, gopls
+// produces different markdown at Go versions before the 1.19 go/doc update).
+// By convention, the golden content in test archives should match the output
+// at Go tip. Each test function can normalize golden content for older Go
+// versions.
+//
+// Note that -update does not cause missing @diag or @loc markers to be added.
+//
+// # TODO
+//
+// This API is a work-in-progress, as we migrate existing marker tests from
+// internal/lsp/tests.
+//
+// Remaining TODO:
+// - parallelize/optimize test execution
+// - reorganize regtest packages (and rename to just 'test'?)
+// - Rename the files .txtar.
+//
+// Existing marker tests to port:
+// - CallHierarchy
+// - CodeLens
+// - Diagnostics
+// - CompletionItems
+// - Completions
+// - CompletionSnippets
+// - UnimportedCompletions
+// - DeepCompletions
+// - FuzzyCompletions
+// - CaseSensitiveCompletions
+// - RankCompletions
+// - FoldingRanges
+// - Formats
+// - Imports
+// - SemanticTokens
+// - SuggestedFixes
+// - FunctionExtractions
+// - MethodExtractions
+// - Definitions
+// - Implementations
+// - Highlights
+// - References
+// - Renames
+// - PrepareRenames
+// - Symbols
+// - InlayHints
+// - WorkspaceSymbols
+// - Signatures
+// - Links
+// - AddImport
+// - SelectionRanges
+func RunMarkerTests(t *testing.T, dir string) {
+ // The marker tests must be able to run go/packages.Load.
+ testenv.NeedsGoPackages(t)
+
+ tests, err := loadMarkerTests(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Opt: use a shared cache.
+ // TODO(rfindley): opt: use a memoize store with no eviction.
+ cache := cache.New(nil)
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ // TODO(rfindley): it may be more useful to have full support for build
+ // constraints.
+ if test.minGoVersion != "" {
+ var go1point int
+ if _, err := fmt.Sscanf(test.minGoVersion, "go1.%d", &go1point); err != nil {
+ t.Fatalf("parsing -min_go version: %v", err)
+ }
+ testenv.NeedsGo1Point(t, 18)
+ }
+ config := fake.EditorConfig{
+ Settings: test.settings,
+ Env: test.env,
+ }
+ run := &markerTestRun{
+ test: test,
+ env: newEnv(t, cache, test.files, config),
+
+ locations: make(map[expect.Identifier]protocol.Location),
+ diags: make(map[protocol.Location][]protocol.Diagnostic),
+ }
+ // TODO(rfindley): make it easier to clean up the regtest environment.
+ defer run.env.Editor.Shutdown(context.Background()) // ignore error
+ defer run.env.Sandbox.Close() // ignore error
+
+ // Open all files so that we operate consistently with LSP clients, and
+ // (pragmatically) so that we have a Mapper available via the fake
+ // editor.
+ //
+ // This also allows avoiding mutating the editor state in tests.
+ for file := range test.files {
+ run.env.OpenFile(file)
+ }
+
+ // Pre-process locations.
+ var markers []marker
+ for _, note := range test.notes {
+ mark := marker{run: run, note: note}
+ switch note.Name {
+ case "loc":
+ mark.execute()
+ default:
+ markers = append(markers, mark)
+ }
+ }
+
+ // Wait for the didOpen notifications to be processed, then collect
+ // diagnostics.
+ var diags map[string]*protocol.PublishDiagnosticsParams
+ run.env.AfterChange(ReadAllDiagnostics(&diags))
+ for path, params := range diags {
+ uri := run.env.Sandbox.Workdir.URI(path)
+ for _, diag := range params.Diagnostics {
+ loc := protocol.Location{
+ URI: uri,
+ Range: diag.Range,
+ }
+ run.diags[loc] = append(run.diags[loc], diag)
+ }
+ }
+
+ // Invoke each remaining marker in the test.
+ for _, mark := range markers {
+ mark.execute()
+ }
+
+ // Any remaining (un-eliminated) diagnostics are an error.
+ for loc, diags := range run.diags {
+ for _, diag := range diags {
+ t.Errorf("%s: unexpected diagnostic: %q", run.fmtLoc(loc), diag.Message)
+ }
+ }
+
+ formatted, err := formatTest(test)
+ if err != nil {
+ t.Errorf("formatTest: %v", err)
+ } else if *update {
+ filename := filepath.Join(dir, test.name)
+ if err := os.WriteFile(filename, formatted, 0644); err != nil {
+ t.Error(err)
+ }
+ } else {
+ // On go 1.19 and later, verify that the testdata has not changed.
+ //
+ // On earlier Go versions, the golden test data varies due to different
+ // markdown escaping.
+ //
+ // Only check this if the test hasn't already failed, otherwise we'd
+ // report duplicate mismatches of golden data.
+ if testenv.Go1Point() >= 19 && !t.Failed() {
+ // Otherwise, verify that formatted content matches.
+ if diff := compare.NamedText("formatted", "on-disk", string(formatted), string(test.content)); diff != "" {
+ t.Errorf("formatted test does not match on-disk content:\n%s", diff)
+ }
+ }
+ }
+ })
+ }
+}
+
+// A marker holds state for the execution of a single @marker
+// annotation in the source.
+type marker struct {
+ run *markerTestRun
+ note *expect.Note
+}
+
+// errorf reports an error with a prefix indicating the position of the marker note.
+func (mark marker) errorf(format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ // TODO(adonovan): consider using fmt.Fprintf(os.Stderr)+t.Fail instead of
+ // t.Errorf to avoid reporting uninteresting positions in the Go source of
+ // the driver. However, this loses the order of stderr wrt "FAIL: TestFoo"
+ // subtest dividers.
+ mark.run.env.T.Errorf("%s: %s", mark.run.fmtPos(mark.note.Pos), msg)
+}
+
+// execute invokes the marker's function with the arguments from note.
+func (mark marker) execute() {
+ fn, ok := markerFuncs[mark.note.Name]
+ if !ok {
+ mark.errorf("no marker function named %s", mark.note.Name)
+ return
+ }
+
+ // The first converter corresponds to the *Env argument.
+ // All others must be converted from the marker syntax.
+ if got, want := len(mark.note.Args), len(fn.converters); got != want {
+ mark.errorf("got %d arguments to %s, expect %d", got, mark.note.Name, want)
+ return
+ }
+
+ args := []reflect.Value{reflect.ValueOf(mark)}
+ for i, in := range mark.note.Args {
+ // Special handling for the blank identifier: treat it as the zero
+ // value.
+ if ident, ok := in.(expect.Identifier); ok && ident == "_" {
+ zero := reflect.Zero(fn.paramTypes[i])
+ args = append(args, zero)
+ continue
+ }
+ out, err := fn.converters[i](mark, in)
+ if err != nil {
+ mark.errorf("converting argument #%d of %s (%v): %v", i, mark.note.Name, in, err)
+ return
+ }
+ args = append(args, reflect.ValueOf(out))
+ }
+
+ fn.fn.Call(args)
+}
+
+// Supported marker functions.
+//
+// Each marker function must accept a marker as its first argument, with
+// subsequent arguments converted from the marker arguments.
+//
+// Marker funcs should not mutate the test environment (e.g. via opening files
+// or applying edits in the editor).
+var markerFuncs = map[string]markerFunc{
+ "def": makeMarkerFunc(defMarker),
+ "diag": makeMarkerFunc(diagMarker),
+ "hover": makeMarkerFunc(hoverMarker),
+ "loc": makeMarkerFunc(locMarker),
+ "rename": makeMarkerFunc(renameMarker),
+ "renameerr": makeMarkerFunc(renameErrMarker),
+ "suggestedfix": makeMarkerFunc(suggestedfixMarker),
+}
+
+// markerTest holds all the test data extracted from a test txtar archive.
+//
+// See the documentation for RunMarkerTests for more information on the archive
+// format.
+type markerTest struct {
+ name string // relative path to the txtar file in the testdata dir
+ fset *token.FileSet // fileset used for parsing notes
+ content []byte // raw test content
+ archive *txtar.Archive // original test archive
+ settings map[string]interface{} // gopls settings
+ env map[string]string // editor environment
+ files map[string][]byte // data files from the archive (excluding special files)
+ notes []*expect.Note // extracted notes from data files
+ golden map[string]*Golden // extracted golden content, by identifier name
+
+ // flags holds flags extracted from the special "flags" archive file.
+ flags []string
+ // Parsed flags values.
+ minGoVersion string
+}
+
+// flagSet returns the flagset used for parsing the special "flags" file in the
+// test archive.
+func (t *markerTest) flagSet() *flag.FlagSet {
+ flags := flag.NewFlagSet(t.name, flag.ContinueOnError)
+ flags.StringVar(&t.minGoVersion, "min_go", "", "if set, the minimum go1.X version required for this test")
+ return flags
+}
+
+func (t *markerTest) getGolden(id string) *Golden {
+ golden, ok := t.golden[id]
+ // If there was no golden content for this identifier, we must create one
+ // to handle the case where -update is set: we need a place to store
+ // the updated content.
+ if !ok {
+ golden = &Golden{id: id}
+
+ // TODO(adonovan): the separation of markerTest (the
+ // static aspects) from markerTestRun (the dynamic
+ // ones) is evidently bogus because here we modify
+ // markerTest during execution. Let's merge the two.
+ t.golden[id] = golden
+ }
+ return golden
+}
+
+// Golden holds extracted golden content for a single @<name> prefix.
+//
+// When -update is set, golden captures the updated golden contents for later
+// writing.
+type Golden struct {
+ id string
+ data map[string][]byte // key "" => @id itself
+ updated map[string][]byte
+}
+
+// Get returns golden content for the given name, which corresponds to the
+// relative path following the golden prefix @<name>/. For example, to access
+// the content of @foo/path/to/result.json from the Golden associated with
+// @foo, name should be "path/to/result.json".
+//
+// If -update is set, the given update function will be called to get the
+// updated golden content that should be written back to testdata.
+//
+// Marker functions must use this method instead of accessing data entries
+// directly otherwise the -update operation will delete those entries.
+//
+// TODO(rfindley): rethink the logic here. We may want to separate Get and Set,
+// and not delete golden content that isn't set.
+func (g *Golden) Get(t testing.TB, name string, updated []byte) ([]byte, bool) {
+ if existing, ok := g.updated[name]; ok {
+ // Multiple tests may reference the same golden data, but if they do they
+ // must agree about its expected content.
+ if diff := compare.NamedText("existing", "updated", string(existing), string(updated)); diff != "" {
+ t.Errorf("conflicting updates for golden data %s/%s:\n%s", g.id, name, diff)
+ }
+ }
+ if g.updated == nil {
+ g.updated = make(map[string][]byte)
+ }
+ g.updated[name] = updated
+ if *update {
+ return updated, true
+ }
+
+ res, ok := g.data[name]
+ return res, ok
+}
+
+// loadMarkerTests walks the given dir looking for .txt files, which it
+// interprets as a txtar archive.
+//
+// See the documentation for RunMarkerTests for more details on the test data
+// archive.
+//
+// TODO(rfindley): this test could sanity check the results. For example, it is
+// too easy to write "// @" instead of "//@", which we will happy skip silently.
+func loadMarkerTests(dir string) ([]*markerTest, error) {
+ var tests []*markerTest
+ err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if strings.HasSuffix(path, ".txt") {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ name := strings.TrimPrefix(path, dir+string(filepath.Separator))
+ test, err := loadMarkerTest(name, content)
+ if err != nil {
+ return fmt.Errorf("%s: %v", path, err)
+ }
+ tests = append(tests, test)
+ }
+ return nil
+ })
+ return tests, err
+}
+
+func loadMarkerTest(name string, content []byte) (*markerTest, error) {
+ archive := txtar.Parse(content)
+ test := &markerTest{
+ name: name,
+ fset: token.NewFileSet(),
+ content: content,
+ archive: archive,
+ files: make(map[string][]byte),
+ golden: make(map[string]*Golden),
+ }
+ for _, file := range archive.Files {
+ switch {
+ case file.Name == "flags":
+ test.flags = strings.Fields(string(file.Data))
+ if err := test.flagSet().Parse(test.flags); err != nil {
+ return nil, fmt.Errorf("parsing flags: %v", err)
+ }
+
+ case file.Name == "settings.json":
+ if err := json.Unmarshal(file.Data, &test.settings); err != nil {
+ return nil, err
+ }
+
+ case file.Name == "env":
+ test.env = make(map[string]string)
+ fields := strings.Fields(string(file.Data))
+ for _, field := range fields {
+ // TODO: use strings.Cut once we are on 1.18+.
+ key, value, ok := cut(field, "=")
+ if !ok {
+ return nil, fmt.Errorf("env vars must be formatted as var=value, got %q", field)
+ }
+ test.env[key] = value
+ }
+
+ case strings.HasPrefix(file.Name, "@"): // golden content
+ id, name, _ := cut(file.Name[len("@"):], "/")
+ // Note that a file.Name of just "@id" gives (id, name) = ("id", "").
+ if _, ok := test.golden[id]; !ok {
+ test.golden[id] = &Golden{
+ id: id,
+ data: make(map[string][]byte),
+ }
+ }
+ test.golden[id].data[name] = file.Data
+
+ default: // ordinary file content
+ notes, err := expect.Parse(test.fset, file.Name, file.Data)
+ if err != nil {
+ return nil, fmt.Errorf("parsing notes in %q: %v", file.Name, err)
+ }
+ test.notes = append(test.notes, notes...)
+ test.files[file.Name] = file.Data
+ }
+ }
+
+ return test, nil
+}
+
+// cut is a copy of strings.Cut.
+//
+// TODO: once we only support Go 1.18+, just use strings.Cut.
+func cut(s, sep string) (before, after string, found bool) {
+ if i := strings.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, "", false
+}
+
+// formatTest formats the test as a txtar archive.
+func formatTest(test *markerTest) ([]byte, error) {
+ arch := &txtar.Archive{
+ Comment: test.archive.Comment,
+ }
+
+ updatedGolden := make(map[string][]byte)
+ for id, g := range test.golden {
+ for name, data := range g.updated {
+ filename := "@" + path.Join(id, name) // name may be ""
+ updatedGolden[filename] = data
+ }
+ }
+
+ // Preserve the original ordering of archive files.
+ for _, file := range test.archive.Files {
+ switch file.Name {
+ // Preserve configuration files exactly as they were. They must have parsed
+ // if we got this far.
+ case "flags", "settings.json", "env":
+ arch.Files = append(arch.Files, file)
+ default:
+ if _, ok := test.files[file.Name]; ok { // ordinary file
+ arch.Files = append(arch.Files, file)
+ } else if data, ok := updatedGolden[file.Name]; ok { // golden file
+ arch.Files = append(arch.Files, txtar.File{Name: file.Name, Data: data})
+ delete(updatedGolden, file.Name)
+ }
+ }
+ }
+
+ // ...followed by any new golden files.
+ var newGoldenFiles []txtar.File
+ for filename, data := range updatedGolden {
+ newGoldenFiles = append(newGoldenFiles, txtar.File{Name: filename, Data: data})
+ }
+ // Sort new golden files lexically.
+ sort.Slice(newGoldenFiles, func(i, j int) bool {
+ return newGoldenFiles[i].Name < newGoldenFiles[j].Name
+ })
+ arch.Files = append(arch.Files, newGoldenFiles...)
+
+ return txtar.Format(arch), nil
+}
+
+// newEnv creates a new environment for a marker test.
+//
+// TODO(rfindley): simplify and refactor the construction of testing
+// environments across regtests, marker tests, and benchmarks.
+func newEnv(t *testing.T, cache *cache.Cache, files map[string][]byte, config fake.EditorConfig) *Env {
+ sandbox, err := fake.NewSandbox(&fake.SandboxConfig{
+ RootDir: t.TempDir(),
+ GOPROXY: "https://proxy.golang.org",
+ Files: files,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Put a debug instance in the context to prevent logging to stderr.
+ // See associated TODO in runner.go: we should revisit this pattern.
+ ctx := context.Background()
+ ctx = debug.WithInstance(ctx, "", "off")
+
+ awaiter := NewAwaiter(sandbox.Workdir)
+ ss := lsprpc.NewStreamServer(cache, false, hooks.Options)
+ server := servertest.NewPipeServer(ss, jsonrpc2.NewRawStream)
+ const skipApplyEdits = true // capture edits but don't apply them
+ editor, err := fake.NewEditor(sandbox, config).Connect(ctx, server, awaiter.Hooks(), skipApplyEdits)
+ if err != nil {
+ sandbox.Close() // ignore error
+ t.Fatal(err)
+ }
+ if err := awaiter.Await(ctx, InitialWorkspaceLoad); err != nil {
+ sandbox.Close() // ignore error
+ t.Fatal(err)
+ }
+ return &Env{
+ T: t,
+ Ctx: ctx,
+ Editor: editor,
+ Sandbox: sandbox,
+ Awaiter: awaiter,
+ }
+}
+
+// A markerFunc is a reflectively callable @mark implementation function.
+type markerFunc struct {
+ fn reflect.Value // the func to invoke
+ paramTypes []reflect.Type // parameter types, for zero values
+ converters []converter // to convert non-blank arguments
+}
+
+// A markerTestRun holds the state of one run of a marker test archive.
+type markerTestRun struct {
+ test *markerTest
+ env *Env
+
+ // Collected information.
+ // Each @diag/@suggestedfix marker eliminates an entry from diags.
+ locations map[expect.Identifier]protocol.Location
+ diags map[protocol.Location][]protocol.Diagnostic
+}
+
+// fmtLoc formats the given pos in the context of the test, using
+// archive-relative paths for files and including the line number in the full
+// archive file.
+func (run *markerTestRun) fmtPos(pos token.Pos) string {
+ file := run.test.fset.File(pos)
+ if file == nil {
+ run.env.T.Errorf("position %d not in test fileset", pos)
+ return "<invalid location>"
+ }
+ m, err := run.env.Editor.Mapper(file.Name())
+ if err != nil {
+ run.env.T.Errorf("%s", err)
+ return "<invalid location>"
+ }
+ loc, err := m.PosLocation(file, pos, pos)
+ if err != nil {
+ run.env.T.Errorf("Mapper(%s).PosLocation failed: %v", file.Name(), err)
+ }
+ return run.fmtLoc(loc)
+}
+
+// fmtLoc formats the given location in the context of the test, using
+// archive-relative paths for files and including the line number in the full
+// archive file.
+func (run *markerTestRun) fmtLoc(loc protocol.Location) string {
+ if loc == (protocol.Location{}) {
+ return "<missing location>"
+ }
+ lines := bytes.Count(run.test.archive.Comment, []byte("\n"))
+ var name string
+ for _, f := range run.test.archive.Files {
+ lines++ // -- separator --
+ uri := run.env.Sandbox.Workdir.URI(f.Name)
+ if uri == loc.URI {
+ name = f.Name
+ break
+ }
+ lines += bytes.Count(f.Data, []byte("\n"))
+ }
+ if name == "" {
+ run.env.T.Errorf("unable to find %s in test archive", loc)
+ return "<invalid location>"
+ }
+ m, err := run.env.Editor.Mapper(name)
+ if err != nil {
+ run.env.T.Errorf("internal error: %v", err)
+ return "<invalid location>"
+ }
+ s, err := m.LocationSpan(loc)
+ if err != nil {
+ run.env.T.Errorf("error formatting location %s: %v", loc, err)
+ return "<invalid location>"
+ }
+
+ innerSpan := fmt.Sprintf("%d:%d", s.Start().Line(), s.Start().Column()) // relative to the embedded file
+ outerSpan := fmt.Sprintf("%d:%d", lines+s.Start().Line(), s.Start().Column()) // relative to the archive file
+ if s.Start() != s.End() {
+ if s.End().Line() == s.Start().Line() {
+ innerSpan += fmt.Sprintf("-%d", s.End().Column())
+ outerSpan += fmt.Sprintf("-%d", s.End().Column())
+ } else {
+ innerSpan += fmt.Sprintf("-%d:%d", s.End().Line(), s.End().Column())
+ innerSpan += fmt.Sprintf("-%d:%d", lines+s.End().Line(), s.End().Column())
+ }
+ }
+
+ return fmt.Sprintf("%s:%s (%s:%s)", name, innerSpan, run.test.name, outerSpan)
+}
+
+// makeMarkerFunc uses reflection to create a markerFunc for the given func value.
+func makeMarkerFunc(fn interface{}) markerFunc {
+ mi := markerFunc{
+ fn: reflect.ValueOf(fn),
+ }
+ mtyp := mi.fn.Type()
+ if mtyp.NumIn() == 0 || mtyp.In(0) != markerType {
+ panic(fmt.Sprintf("marker function %#v must accept marker as its first argument", mi.fn))
+ }
+ if mtyp.NumOut() != 0 {
+ panic(fmt.Sprintf("marker function %#v must not have results", mi.fn))
+ }
+ for a := 1; a < mtyp.NumIn(); a++ {
+ in := mtyp.In(a)
+ mi.paramTypes = append(mi.paramTypes, in)
+ c := makeConverter(in)
+ mi.converters = append(mi.converters, c)
+ }
+ return mi
+}
+
+// ---- converters ----
+
+// converter is the signature of argument converters.
+// A converter should return an error rather than calling marker.errorf().
+type converter func(marker, interface{}) (interface{}, error)
+
+// Types with special conversions.
+var (
+ goldenType = reflect.TypeOf(&Golden{})
+ locationType = reflect.TypeOf(protocol.Location{})
+ markerType = reflect.TypeOf(marker{})
+ regexpType = reflect.TypeOf(&regexp.Regexp{})
+ wantErrorType = reflect.TypeOf(wantError{})
+)
+
+func makeConverter(paramType reflect.Type) converter {
+ switch paramType {
+ case goldenType:
+ return goldenConverter
+ case locationType:
+ return locationConverter
+ case wantErrorType:
+ return wantErrorConverter
+ default:
+ return func(_ marker, arg interface{}) (interface{}, error) {
+ if argType := reflect.TypeOf(arg); argType != paramType {
+ return nil, fmt.Errorf("cannot convert type %s to %s", argType, paramType)
+ }
+ return arg, nil
+ }
+ }
+}
+
+// locationConverter converts a string argument into the protocol location
+// corresponding to the first position of the string in the line preceding the
+// note.
+func locationConverter(mark marker, arg interface{}) (interface{}, error) {
+ switch arg := arg.(type) {
+ case string:
+ startOff, preceding, m, err := linePreceding(mark.run, mark.note.Pos)
+ if err != nil {
+ return protocol.Location{}, err
+ }
+ idx := bytes.Index(preceding, []byte(arg))
+ if idx < 0 {
+ return nil, fmt.Errorf("substring %q not found in %q", arg, preceding)
+ }
+ off := startOff + idx
+ return m.OffsetLocation(off, off+len(arg))
+ case *regexp.Regexp:
+ return findRegexpInLine(mark.run, mark.note.Pos, arg)
+ case expect.Identifier:
+ loc, ok := mark.run.locations[arg]
+ if !ok {
+ return nil, fmt.Errorf("no location named %q", arg)
+ }
+ return loc, nil
+ default:
+ return nil, fmt.Errorf("cannot convert argument type %T to location (must be a string to match the preceding line)", arg)
+ }
+}
+
+// findRegexpInLine searches the partial line preceding pos for a match for the
+// regular expression re, returning a location spanning the first match. If re
+// contains exactly one subgroup, the position of this subgroup match is
+// returned rather than the position of the full match.
+func findRegexpInLine(run *markerTestRun, pos token.Pos, re *regexp.Regexp) (protocol.Location, error) {
+ startOff, preceding, m, err := linePreceding(run, pos)
+ if err != nil {
+ return protocol.Location{}, err
+ }
+
+ matches := re.FindSubmatchIndex(preceding)
+ if len(matches) == 0 {
+ return protocol.Location{}, fmt.Errorf("no match for regexp %q found in %q", re, string(preceding))
+ }
+ var start, end int
+ switch len(matches) {
+ case 2:
+ // no subgroups: return the range of the regexp expression
+ start, end = matches[0], matches[1]
+ case 4:
+ // one subgroup: return its range
+ start, end = matches[2], matches[3]
+ default:
+ return protocol.Location{}, fmt.Errorf("invalid location regexp %q: expect either 0 or 1 subgroups, got %d", re, len(matches)/2-1)
+ }
+
+ return m.OffsetLocation(start+startOff, end+startOff)
+}
+
+func linePreceding(run *markerTestRun, pos token.Pos) (int, []byte, *protocol.Mapper, error) {
+ file := run.test.fset.File(pos)
+ posn := safetoken.Position(file, pos)
+ lineStart := file.LineStart(posn.Line)
+ startOff, endOff, err := safetoken.Offsets(file, lineStart, pos)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ m, err := run.env.Editor.Mapper(file.Name())
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ return startOff, m.Content[startOff:endOff], m, nil
+}
+
+// wantErrorConverter converts a string, regexp, or identifier
+// argument into a wantError. The string is a substring of the
+// expected error, the regexp is a pattern than matches the expected
+// error, and the identifier is a golden file containing the expected
+// error.
+func wantErrorConverter(mark marker, arg interface{}) (interface{}, error) {
+ switch arg := arg.(type) {
+ case string:
+ return wantError{substr: arg}, nil
+ case *regexp.Regexp:
+ return wantError{pattern: arg}, nil
+ case expect.Identifier:
+ golden := mark.run.test.getGolden(string(arg))
+ return wantError{golden: golden}, nil
+ default:
+ return nil, fmt.Errorf("cannot convert %T to wantError (want: string, regexp, or identifier)", arg)
+ }
+}
+
+// A wantError represents an expectation of a specific error message.
+//
+// It may be indicated in one of three ways, in 'expect' notation:
+// - an identifier 'foo', to compare with the contents of the golden section @foo;
+// - a pattern expression re"ab.*c", to match against a regular expression;
+// - a string literal "abc", to check for a substring.
+type wantError struct {
+ golden *Golden
+ pattern *regexp.Regexp
+ substr string
+}
+
+func (we wantError) String() string {
+ if we.golden != nil {
+ return fmt.Sprintf("error from @%s entry", we.golden.id)
+ } else if we.pattern != nil {
+ return fmt.Sprintf("error matching %#q", we.pattern)
+ } else {
+ return fmt.Sprintf("error with substring %q", we.substr)
+ }
+}
+
+// check asserts that 'err' matches the wantError's expectations.
+func (we wantError) check(mark marker, err error) {
+ if err == nil {
+ mark.errorf("@%s succeeded unexpectedly, want %v", mark.note.Name, we)
+ return
+ }
+ got := err.Error()
+
+ if we.golden != nil {
+ // Error message must match @id golden file.
+ wantBytes, ok := we.golden.Get(mark.run.env.T, "", []byte(got))
+ if !ok {
+ mark.errorf("@%s: missing @%s entry", mark.note.Name, we.golden.id)
+ return
+ }
+ want := strings.TrimSpace(string(wantBytes))
+ if got != want {
+ // (ignore leading/trailing space)
+ mark.errorf("@%s failed with wrong error: got:\n%s\nwant:\n%s\ndiff:\n%s",
+ mark.note.Name, got, want, compare.Text(want, got))
+ }
+
+ } else if we.pattern != nil {
+ // Error message must match regular expression pattern.
+ if !we.pattern.MatchString(got) {
+ mark.errorf("got error %q, does not match pattern %#q", got, we.pattern)
+ }
+
+ } else if !strings.Contains(got, we.substr) {
+ // Error message must contain expected substring.
+ mark.errorf("got error %q, want substring %q", got, we.substr)
+ }
+}
+
+// goldenConverter converts an identifier into the Golden directory of content
+// prefixed by @<ident> in the test archive file.
+func goldenConverter(mark marker, arg interface{}) (interface{}, error) {
+ switch arg := arg.(type) {
+ case expect.Identifier:
+ return mark.run.test.getGolden(string(arg)), nil
+ default:
+ return nil, fmt.Errorf("invalid input type %T: golden key must be an identifier", arg)
+ }
+}
+
+// checkChangedFiles compares the files changed by an operation with their expected (golden) state.
+func checkChangedFiles(mark marker, changed map[string][]byte, golden *Golden) {
+ // Check changed files match expectations.
+ for filename, got := range changed {
+ if want, ok := golden.Get(mark.run.env.T, filename, got); !ok {
+ mark.errorf("%s: unexpected change to file %s; got:\n%s",
+ mark.note.Name, filename, got)
+
+ } else if string(got) != string(want) {
+ mark.errorf("%s: wrong file content for %s: got:\n%s\nwant:\n%s\ndiff:\n%s",
+ mark.note.Name, filename, got, want,
+ compare.Bytes(want, got))
+ }
+ }
+
+ // Report unmet expectations.
+ for filename := range golden.data {
+ if _, ok := changed[filename]; !ok {
+ want, _ := golden.Get(mark.run.env.T, filename, nil)
+ mark.errorf("%s: missing change to file %s; want:\n%s",
+ mark.note.Name, filename, want)
+ }
+ }
+}
+
+// ---- marker functions ----
+
+// defMarker implements the @godef marker, running textDocument/definition at
+// the given src location and asserting that there is exactly one resulting
+// location, matching dst.
+//
+// TODO(rfindley): support a variadic destination set.
+func defMarker(mark marker, src, dst protocol.Location) {
+ got := mark.run.env.GoToDefinition(src)
+ if got != dst {
+ mark.errorf("definition location does not match:\n\tgot: %s\n\twant %s",
+ mark.run.fmtLoc(got), mark.run.fmtLoc(dst))
+ }
+}
+
+// hoverMarker implements the @hover marker, running textDocument/hover at the
+// given src location and asserting that the resulting hover is over the dst
+// location (typically a span surrounding src), and that the markdown content
+// matches the golden content.
+func hoverMarker(mark marker, src, dst protocol.Location, golden *Golden) {
+ content, gotDst := mark.run.env.Hover(src)
+ if gotDst != dst {
+ mark.errorf("hover location does not match:\n\tgot: %s\n\twant %s)", mark.run.fmtLoc(gotDst), mark.run.fmtLoc(dst))
+ }
+ gotMD := ""
+ if content != nil {
+ gotMD = content.Value
+ }
+ wantMD := ""
+ if golden != nil {
+ wantBytes, _ := golden.Get(mark.run.env.T, "hover.md", []byte(gotMD))
+ wantMD = string(wantBytes)
+ }
+ // Normalize newline termination: archive files can't express non-newline
+ // terminated files.
+ if strings.HasSuffix(wantMD, "\n") && !strings.HasSuffix(gotMD, "\n") {
+ gotMD += "\n"
+ }
+ if diff := tests.DiffMarkdown(wantMD, gotMD); diff != "" {
+ mark.errorf("hover markdown mismatch (-want +got):\n%s", diff)
+ }
+}
+
+// locMarker implements the @loc marker. It is executed before other
+// markers, so that locations are available.
+func locMarker(mark marker, name expect.Identifier, loc protocol.Location) {
+ mark.run.locations[name] = loc
+}
+
+// diagMarker implements the @diag marker. It eliminates diagnostics from
+// the observed set in mark.test.
+func diagMarker(mark marker, loc protocol.Location, re *regexp.Regexp) {
+ if _, err := removeDiagnostic(mark, loc, re); err != nil {
+ mark.errorf("%v", err)
+ }
+}
+
+func removeDiagnostic(mark marker, loc protocol.Location, re *regexp.Regexp) (protocol.Diagnostic, error) {
+ diags := mark.run.diags[loc]
+ for i, diag := range diags {
+ if re.MatchString(diag.Message) {
+ mark.run.diags[loc] = append(diags[:i], diags[i+1:]...)
+ return diag, nil
+ }
+ }
+ return protocol.Diagnostic{}, fmt.Errorf("no diagnostic matches %q", re)
+}
+
+// renameMarker implements the @rename(location, new, golden) marker.
+func renameMarker(mark marker, loc protocol.Location, newName expect.Identifier, golden *Golden) {
+ changed, err := rename(mark.run.env, loc, string(newName))
+ if err != nil {
+ mark.errorf("rename failed: %v. (Use @renameerr for expected errors.)", err)
+ return
+ }
+ checkChangedFiles(mark, changed, golden)
+}
+
+// renameErrMarker implements the @renamererr(location, new, error) marker.
+func renameErrMarker(mark marker, loc protocol.Location, newName expect.Identifier, wantErr wantError) {
+ _, err := rename(mark.run.env, loc, string(newName))
+ wantErr.check(mark, err)
+}
+
+// rename returns the new contents of the files that would be modified
+// by renaming the identifier at loc to newName.
+func rename(env *Env, loc protocol.Location, newName string) (map[string][]byte, error) {
+ // We call Server.Rename directly, instead of
+ // env.Editor.Rename(env.Ctx, loc, newName)
+ // to isolate Rename from PrepareRename, and because we don't
+ // want to modify the file system in a scenario with multiple
+ // @rename markers.
+
+ editMap, err := env.Editor.Server.Rename(env.Ctx, &protocol.RenameParams{
+ TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
+ Position: loc.Range.Start,
+ NewName: string(newName),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return applyDocumentChanges(env, editMap.DocumentChanges)
+}
+
+// applyDocumentChanges returns the effect of applying the document
+// changes to the contents of the Editor buffers. The actual editor
+// buffers are unchanged.
+func applyDocumentChanges(env *Env, changes []protocol.DocumentChanges) (map[string][]byte, error) {
+ result := make(map[string][]byte)
+ for _, change := range changes {
+ if change.RenameFile != nil {
+ // rename
+ oldFile := env.Sandbox.Workdir.URIToPath(change.RenameFile.OldURI)
+ newFile := env.Sandbox.Workdir.URIToPath(change.RenameFile.NewURI)
+ mapper, err := env.Editor.Mapper(oldFile)
+ if err != nil {
+ return nil, err
+ }
+ result[newFile] = mapper.Content
+
+ } else {
+ // edit
+ filename := env.Sandbox.Workdir.URIToPath(change.TextDocumentEdit.TextDocument.URI)
+ mapper, err := env.Editor.Mapper(filename)
+ if err != nil {
+ return nil, err
+ }
+ patched, _, err := source.ApplyProtocolEdits(mapper, change.TextDocumentEdit.Edits)
+ if err != nil {
+ return nil, err
+ }
+ result[filename] = patched
+ }
+ }
+
+ return result, nil
+}
+
+// suggestedfixMarker implements the @suggestedfix(location, regexp,
+// kind, golden) marker. It acts like @diag(location, regexp), to set
+// the expectation of a diagnostic, but then it applies the first code
+// action of the specified kind suggested by the matched diagnostic.
+func suggestedfixMarker(mark marker, loc protocol.Location, re *regexp.Regexp, actionKind string, golden *Golden) {
+ // Find and remove the matching diagnostic.
+ diag, err := removeDiagnostic(mark, loc, re)
+ if err != nil {
+ mark.errorf("%v", err)
+ return
+ }
+
+ // Apply the fix it suggests.
+ changed, err := suggestedfix(mark.run.env, loc, diag, actionKind)
+ if err != nil {
+ mark.errorf("suggestedfix failed: %v. (Use @suggestedfixerr for expected errors.)", err)
+ return
+ }
+
+ // Check the file state.
+ checkChangedFiles(mark, changed, golden)
+}
+
+func suggestedfix(env *Env, loc protocol.Location, diag protocol.Diagnostic, actionKind string) (map[string][]byte, error) {
+
+ // Request all code actions that apply to the diagnostic.
+ // (The protocol supports filtering using Context.Only={actionKind}
+ // but we can give a better error if we don't filter.)
+ actions, err := env.Editor.Server.CodeAction(env.Ctx, &protocol.CodeActionParams{
+ TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
+ Range: diag.Range,
+ Context: protocol.CodeActionContext{
+ Only: nil, // => all kinds
+ Diagnostics: []protocol.Diagnostic{diag},
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Find the sole candidates CodeAction of the specified kind (e.g. refactor.rewrite).
+ var candidates []protocol.CodeAction
+ for _, act := range actions {
+ if act.Kind == protocol.CodeActionKind(actionKind) {
+ candidates = append(candidates, act)
+ }
+ }
+ if len(candidates) != 1 {
+ for _, act := range actions {
+ env.T.Logf("found CodeAction Kind=%s Title=%q", act.Kind, act.Title)
+ }
+ return nil, fmt.Errorf("found %d CodeActions of kind %s for this diagnostic, want 1", len(candidates), actionKind)
+ }
+ action := candidates[0]
+
+ // An action may specify an edit and/or a command, to be
+ // applied in that order. But since applyDocumentChanges(env,
+ // action.Edit.DocumentChanges) doesn't compose, for now we
+ // assert that all commands used in the @suggestedfix tests
+ // return only a command.
+ if action.Edit.DocumentChanges != nil {
+ env.T.Errorf("internal error: discarding unexpected CodeAction{Kind=%s, Title=%q}.Edit.DocumentChanges", action.Kind, action.Title)
+ }
+ if action.Command == nil {
+ return nil, fmt.Errorf("missing CodeAction{Kind=%s, Title=%q}.Command", action.Kind, action.Title)
+ }
+
+ // This is a typical CodeAction command:
+ //
+ // Title: "Implement error"
+ // Command: gopls.apply_fix
+ // Arguments: [{"Fix":"stub_methods","URI":".../a.go","Range":...}}]
+ //
+ // The client makes an ExecuteCommand RPC to the server,
+ // which dispatches it to the ApplyFix handler.
+ // ApplyFix dispatches to the "stub_methods" suggestedfix hook (the meat).
+ // The server then makes an ApplyEdit RPC to the client,
+ // whose Awaiter hook gathers the edits instead of applying them.
+
+ _ = env.Awaiter.takeDocumentChanges() // reset (assuming Env is confined to this thread)
+
+ if _, err := env.Editor.Server.ExecuteCommand(env.Ctx, &protocol.ExecuteCommandParams{
+ Command: action.Command.Command,
+ Arguments: action.Command.Arguments,
+ }); err != nil {
+ env.T.Fatalf("error converting command %q to edits: %v", action.Command.Command, err)
+ }
+
+ return applyDocumentChanges(env, env.Awaiter.takeDocumentChanges())
+}
+
+// TODO(adonovan): suggestedfixerr
diff --git a/gopls/internal/lsp/regtest/options.go b/gopls/internal/lsp/regtest/options.go
new file mode 100644
index 000000000..3820e96b3
--- /dev/null
+++ b/gopls/internal/lsp/regtest/options.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regtest
+
+import "golang.org/x/tools/gopls/internal/lsp/fake"
+
+type runConfig struct {
+ editor fake.EditorConfig
+ sandbox fake.SandboxConfig
+ modes Mode
+ skipHooks bool
+}
+
+// A RunOption augments the behavior of the test runner.
+type RunOption interface {
+ set(*runConfig)
+}
+
+type optionSetter func(*runConfig)
+
+func (f optionSetter) set(opts *runConfig) {
+ f(opts)
+}
+
+// ProxyFiles configures a file proxy using the given txtar-encoded string.
+func ProxyFiles(txt string) RunOption {
+ return optionSetter(func(opts *runConfig) {
+ opts.sandbox.ProxyFiles = fake.UnpackTxt(txt)
+ })
+}
+
+// Modes configures the execution modes that the test should run in.
+//
+// By default, modes are configured by the test runner. If this option is set,
+// it overrides the set of default modes and the test runs in exactly these
+// modes.
+func Modes(modes Mode) RunOption {
+ return optionSetter(func(opts *runConfig) {
+ if opts.modes != 0 {
+ panic("modes set more than once")
+ }
+ opts.modes = modes
+ })
+}
+
+// WindowsLineEndings configures the editor to use windows line endings.
+func WindowsLineEndings() RunOption {
+ return optionSetter(func(opts *runConfig) {
+ opts.editor.WindowsLineEndings = true
+ })
+}
+
+// Settings is a RunOption that sets user-provided configuration for the LSP
+// server.
+//
+// As a special case, the env setting must not be provided via Settings: use
+// EnvVars instead.
+type Settings map[string]interface{}
+
+func (s Settings) set(opts *runConfig) {
+ if opts.editor.Settings == nil {
+ opts.editor.Settings = make(map[string]interface{})
+ }
+ for k, v := range s {
+ opts.editor.Settings[k] = v
+ }
+}
+
+// WorkspaceFolders configures the workdir-relative workspace folders to send
+// to the LSP server. By default the editor sends a single workspace folder
+// corresponding to the workdir root. To explicitly configure no workspace
+// folders, use WorkspaceFolders with no arguments.
+func WorkspaceFolders(relFolders ...string) RunOption {
+ if len(relFolders) == 0 {
+ // Use an empty non-nil slice to signal explicitly no folders.
+ relFolders = []string{}
+ }
+ return optionSetter(func(opts *runConfig) {
+ opts.editor.WorkspaceFolders = relFolders
+ })
+}
+
+// EnvVars sets environment variables for the LSP session. When applying these
+// variables to the session, the special string $SANDBOX_WORKDIR is replaced by
+// the absolute path to the sandbox working directory.
+type EnvVars map[string]string
+
+func (e EnvVars) set(opts *runConfig) {
+ if opts.editor.Env == nil {
+ opts.editor.Env = make(map[string]string)
+ }
+ for k, v := range e {
+ opts.editor.Env[k] = v
+ }
+}
+
+// InGOPATH configures the workspace working directory to be GOPATH, rather
+// than a separate working directory for use with modules.
+func InGOPATH() RunOption {
+ return optionSetter(func(opts *runConfig) {
+ opts.sandbox.InGoPath = true
+ })
+}
diff --git a/gopls/internal/lsp/regtest/regtest.go b/gopls/internal/lsp/regtest/regtest.go
new file mode 100644
index 000000000..5a5232f3e
--- /dev/null
+++ b/gopls/internal/lsp/regtest/regtest.go
@@ -0,0 +1,153 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regtest
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/cmd"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/memoize"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/internal/tool"
+)
+
+var (
+ runSubprocessTests = flag.Bool("enable_gopls_subprocess_tests", false, "run regtests against a gopls subprocess")
+ goplsBinaryPath = flag.String("gopls_test_binary", "", "path to the gopls binary for use as a remote, for use with the -enable_gopls_subprocess_tests flag")
+ regtestTimeout = flag.Duration("regtest_timeout", defaultRegtestTimeout(), "if nonzero, default timeout for each regtest; defaults to GOPLS_REGTEST_TIMEOUT")
+ skipCleanup = flag.Bool("regtest_skip_cleanup", false, "whether to skip cleaning up temp directories")
+ printGoroutinesOnFailure = flag.Bool("regtest_print_goroutines", false, "whether to print goroutines info on failure")
+ printLogs = flag.Bool("regtest_print_logs", false, "whether to print LSP logs")
+)
+
+func defaultRegtestTimeout() time.Duration {
+ s := os.Getenv("GOPLS_REGTEST_TIMEOUT")
+ if s == "" {
+ return 0
+ }
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "invalid GOPLS_REGTEST_TIMEOUT %q: %v\n", s, err)
+ os.Exit(2)
+ }
+ return d
+}
+
+var runner *Runner
+
+type regtestRunner interface {
+ Run(t *testing.T, files string, f TestFunc)
+}
+
+func Run(t *testing.T, files string, f TestFunc) {
+ runner.Run(t, files, f)
+}
+
+func WithOptions(opts ...RunOption) configuredRunner {
+ return configuredRunner{opts: opts}
+}
+
+type configuredRunner struct {
+ opts []RunOption
+}
+
+func (r configuredRunner) Run(t *testing.T, files string, f TestFunc) {
+ runner.Run(t, files, f, r.opts...)
+}
+
+type RunMultiple []struct {
+ Name string
+ Runner regtestRunner
+}
+
+func (r RunMultiple) Run(t *testing.T, files string, f TestFunc) {
+ for _, runner := range r {
+ t.Run(runner.Name, func(t *testing.T) {
+ runner.Runner.Run(t, files, f)
+ })
+ }
+}
+
+// DefaultModes returns the default modes to run for each regression test (they
+// may be reconfigured by the tests themselves).
+func DefaultModes() Mode {
+ modes := Default
+ if !testing.Short() {
+ modes |= Experimental | Forwarded
+ }
+ if *runSubprocessTests {
+ modes |= SeparateProcess
+ }
+ return modes
+}
+
+// Main sets up and tears down the shared regtest state.
+func Main(m *testing.M, hook func(*source.Options)) {
+ // golang/go#54461: enable additional debugging around hanging Go commands.
+ gocommand.DebugHangingGoCommands = true
+
+ // If this magic environment variable is set, run gopls instead of the test
+ // suite. See the documentation for runTestAsGoplsEnvvar for more details.
+ if os.Getenv(runTestAsGoplsEnvvar) == "true" {
+ tool.Main(context.Background(), cmd.New("gopls", "", nil, hook), os.Args[1:])
+ os.Exit(0)
+ }
+
+ testenv.ExitIfSmallMachine()
+
+ // Disable GOPACKAGESDRIVER, as it can cause spurious test failures.
+ os.Setenv("GOPACKAGESDRIVER", "off")
+
+ flag.Parse()
+
+ runner = &Runner{
+ DefaultModes: DefaultModes(),
+ Timeout: *regtestTimeout,
+ PrintGoroutinesOnFailure: *printGoroutinesOnFailure,
+ SkipCleanup: *skipCleanup,
+ OptionsHook: hook,
+ store: memoize.NewStore(memoize.NeverEvict),
+ }
+
+ runner.goplsPath = *goplsBinaryPath
+ if runner.goplsPath == "" {
+ var err error
+ runner.goplsPath, err = os.Executable()
+ if err != nil {
+ panic(fmt.Sprintf("finding test binary path: %v", err))
+ }
+ }
+
+ dir, err := ioutil.TempDir("", "gopls-regtest-")
+ if err != nil {
+ panic(fmt.Errorf("creating regtest temp directory: %v", err))
+ }
+ runner.tempDir = dir
+
+ var code int
+ defer func() {
+ if err := runner.Close(); err != nil {
+ fmt.Fprintf(os.Stderr, "closing test runner: %v\n", err)
+ // Regtest cleanup is broken in go1.12 and earlier, and sometimes flakes on
+ // Windows due to file locking, but this is OK for our CI.
+ //
+ // Fail on go1.13+, except for windows and android which have shutdown problems.
+ if testenv.Go1Point() >= 13 && runtime.GOOS != "windows" && runtime.GOOS != "android" {
+ os.Exit(1)
+ }
+ }
+ os.Exit(code)
+ }()
+ code = m.Run()
+}
diff --git a/gopls/internal/lsp/regtest/runner.go b/gopls/internal/lsp/regtest/runner.go
new file mode 100644
index 000000000..57f541c63
--- /dev/null
+++ b/gopls/internal/lsp/regtest/runner.go
@@ -0,0 +1,437 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regtest
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "runtime"
+ "runtime/pprof"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ exec "golang.org/x/sys/execabs"
+
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
+ "golang.org/x/tools/internal/memoize"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+// Mode is a bitmask that defines for which execution modes a test should run.
+//
+// Each mode controls several aspects of gopls' configuration:
+// - Which server options to use for gopls sessions
+// - Whether to use a shared cache
+// - Whether to use a shared server
+// - Whether to run the server in-process or in a separate process
+//
+// The behavior of each mode with respect to these aspects is summarized below.
+// TODO(rfindley, cleanup): rather than using arbitrary names for these modes,
+// we can compose them explicitly out of the features described here, allowing
+// individual tests more freedom in constructing problematic execution modes.
+// For example, a test could assert on a certain behavior when running with
+// experimental options on a separate process. Moreover, we could unify 'Modes'
+// with 'Options', and use RunMultiple rather than a hard-coded loop through
+// modes.
+//
+// Mode | Options | Shared Cache? | Shared Server? | In-process?
+// ---------------------------------------------------------------------------
+// Default | Default | Y | N | Y
+// Forwarded | Default | Y | Y | Y
+// SeparateProcess | Default | Y | Y | N
+// Experimental | Experimental | N | N | Y
+type Mode int
+
+const (
+ // Default mode runs gopls with the default options, communicating over pipes
+ // to emulate the lsp sidecar execution mode, which communicates over
+ // stdin/stdout.
+ //
+ // It uses separate servers for each test, but a shared cache, to avoid
+ // duplicating work when processing GOROOT.
+ Default Mode = 1 << iota
+
+ // Forwarded uses the default options, but forwards connections to a shared
+ // in-process gopls server.
+ Forwarded
+
+ // SeparateProcess uses the default options, but forwards connection to an
+ // external gopls daemon.
+ //
+ // Only supported on GOOS=linux.
+ SeparateProcess
+
+ // Experimental enables all of the experimental configurations that are
+ // being developed, and runs gopls in sidecar mode.
+ //
+ // It uses a separate cache for each test, to exercise races that may only
+ // appear with cache misses.
+ Experimental
+)
+
+func (m Mode) String() string {
+ switch m {
+ case Default:
+ return "default"
+ case Forwarded:
+ return "forwarded"
+ case SeparateProcess:
+ return "separate process"
+ case Experimental:
+ return "experimental"
+ default:
+ return "unknown mode"
+ }
+}
+
+// A Runner runs tests in gopls execution environments, as specified by its
+// modes. For modes that share state (for example, a shared cache or common
+// remote), any tests that execute on the same Runner will share the same
+// state.
+type Runner struct {
+ // Configuration
+ DefaultModes Mode // modes to run for each test
+ Timeout time.Duration // per-test timeout, if set
+ PrintGoroutinesOnFailure bool // whether to dump goroutines on test failure
+ SkipCleanup bool // if set, don't delete test data directories when the test exits
+ OptionsHook func(*source.Options) // if set, use these options when creating gopls sessions
+
+ // Immutable state shared across test invocations
+ goplsPath string // path to the gopls executable (for SeparateProcess mode)
+ tempDir string // shared parent temp directory
+ store *memoize.Store // shared store
+
+ // Lazily allocated resources
+ tsOnce sync.Once
+ ts *servertest.TCPServer // shared in-process test server ("forwarded" mode)
+
+ startRemoteOnce sync.Once
+ remoteSocket string // unix domain socket for shared daemon ("separate process" mode)
+ remoteErr error
+ cancelRemote func()
+}
+
+type TestFunc func(t *testing.T, env *Env)
+
+// Run executes the test function in the default configured gopls execution
+// modes. For each a test run, a new workspace is created containing the
+// un-txtared files specified by filedata.
+func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOption) {
+ // TODO(rfindley): this function has gotten overly complicated, and warrants
+ // refactoring.
+ t.Helper()
+ checkBuilder(t)
+ testenv.NeedsGoPackages(t)
+
+ tests := []struct {
+ name string
+ mode Mode
+ getServer func(func(*source.Options)) jsonrpc2.StreamServer
+ }{
+ {"default", Default, r.defaultServer},
+ {"forwarded", Forwarded, r.forwardedServer},
+ {"separate_process", SeparateProcess, r.separateProcessServer},
+ {"experimental", Experimental, r.experimentalServer},
+ }
+
+ for _, tc := range tests {
+ tc := tc
+ var config runConfig
+ for _, opt := range opts {
+ opt.set(&config)
+ }
+ modes := r.DefaultModes
+ if config.modes != 0 {
+ modes = config.modes
+ }
+ if modes&tc.mode == 0 {
+ continue
+ }
+
+ t.Run(tc.name, func(t *testing.T) {
+ // TODO(rfindley): once jsonrpc2 shutdown is fixed, we should not leak
+ // goroutines in this test function.
+ // stacktest.NoLeak(t)
+
+ ctx := context.Background()
+ if r.Timeout != 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, r.Timeout)
+ defer cancel()
+ } else if d, ok := testenv.Deadline(t); ok {
+ timeout := time.Until(d) * 19 / 20 // Leave an arbitrary 5% for cleanup.
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+
+ // TODO(rfindley): do we need an instance at all? Can it be removed?
+ ctx = debug.WithInstance(ctx, "", "off")
+
+ rootDir := filepath.Join(r.tempDir, filepath.FromSlash(t.Name()))
+ if err := os.MkdirAll(rootDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ files := fake.UnpackTxt(files)
+ if config.editor.WindowsLineEndings {
+ for name, data := range files {
+ files[name] = bytes.ReplaceAll(data, []byte("\n"), []byte("\r\n"))
+ }
+ }
+ config.sandbox.Files = files
+ config.sandbox.RootDir = rootDir
+ sandbox, err := fake.NewSandbox(&config.sandbox)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if !r.SkipCleanup {
+ if err := sandbox.Close(); err != nil {
+ pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
+ t.Errorf("closing the sandbox: %v", err)
+ }
+ }
+ }()
+
+ ss := tc.getServer(r.OptionsHook)
+
+ framer := jsonrpc2.NewRawStream
+ ls := &loggingFramer{}
+ framer = ls.framer(jsonrpc2.NewRawStream)
+ ts := servertest.NewPipeServer(ss, framer)
+
+ awaiter := NewAwaiter(sandbox.Workdir)
+ const skipApplyEdits = false
+ editor, err := fake.NewEditor(sandbox, config.editor).Connect(ctx, ts, awaiter.Hooks(), skipApplyEdits)
+ if err != nil {
+ t.Fatal(err)
+ }
+ env := &Env{
+ T: t,
+ Ctx: ctx,
+ Sandbox: sandbox,
+ Editor: editor,
+ Server: ts,
+ Awaiter: awaiter,
+ }
+ defer func() {
+ if t.Failed() && r.PrintGoroutinesOnFailure {
+ pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
+ }
+ if t.Failed() || *printLogs {
+ ls.printBuffers(t.Name(), os.Stderr)
+ }
+ // For tests that failed due to a timeout, don't fail to shutdown
+ // because ctx is done.
+ //
+ // There is little point to setting an arbitrary timeout for closing
+ // the editor: in general we want to clean up before proceeding to the
+ // next test, and if there is a deadlock preventing closing it will
+ // eventually be handled by the `go test` timeout.
+ if err := editor.Close(xcontext.Detach(ctx)); err != nil {
+ t.Errorf("closing editor: %v", err)
+ }
+ }()
+ // Always await the initial workspace load.
+ env.Await(InitialWorkspaceLoad)
+ test(t, env)
+ })
+ }
+}
+
+// longBuilders maps builders that are skipped when -short is set to a
+// (possibly empty) justification.
+var longBuilders = map[string]string{
+ "openbsd-amd64-64": "golang.org/issues/42789",
+ "openbsd-386-64": "golang.org/issues/42789",
+ "openbsd-386-68": "golang.org/issues/42789",
+ "openbsd-amd64-68": "golang.org/issues/42789",
+ "darwin-amd64-10_12": "",
+ "freebsd-amd64-race": "",
+ "illumos-amd64": "",
+ "netbsd-arm-bsiegert": "",
+ "solaris-amd64-oraclerel": "",
+ "windows-arm-zx2c4": "",
+}
+
+func checkBuilder(t *testing.T) {
+ t.Helper()
+ builder := os.Getenv("GO_BUILDER_NAME")
+ if reason, ok := longBuilders[builder]; ok && testing.Short() {
+ if reason != "" {
+ t.Skipf("Skipping %s with -short due to %s", builder, reason)
+ } else {
+ t.Skipf("Skipping %s with -short", builder)
+ }
+ }
+}
+
+type loggingFramer struct {
+ mu sync.Mutex
+ buf *safeBuffer
+}
+
+// safeBuffer is a threadsafe buffer for logs.
+type safeBuffer struct {
+ mu sync.Mutex
+ buf bytes.Buffer
+}
+
+func (b *safeBuffer) Write(p []byte) (int, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.buf.Write(p)
+}
+
+func (s *loggingFramer) framer(f jsonrpc2.Framer) jsonrpc2.Framer {
+ return func(nc net.Conn) jsonrpc2.Stream {
+ s.mu.Lock()
+ framed := false
+ if s.buf == nil {
+ s.buf = &safeBuffer{buf: bytes.Buffer{}}
+ framed = true
+ }
+ s.mu.Unlock()
+ stream := f(nc)
+ if framed {
+ return protocol.LoggingStream(stream, s.buf)
+ }
+ return stream
+ }
+}
+
+func (s *loggingFramer) printBuffers(testname string, w io.Writer) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.buf == nil {
+ return
+ }
+ fmt.Fprintf(os.Stderr, "#### Start Gopls Test Logs for %q\n", testname)
+ s.buf.mu.Lock()
+ io.Copy(w, &s.buf.buf)
+ s.buf.mu.Unlock()
+ fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs for %q\n", testname)
+}
+
+// defaultServer handles the Default execution mode.
+func (r *Runner) defaultServer(optsHook func(*source.Options)) jsonrpc2.StreamServer {
+ return lsprpc.NewStreamServer(cache.New(r.store), false, optsHook)
+}
+
+// experimentalServer handles the Experimental execution mode.
+func (r *Runner) experimentalServer(optsHook func(*source.Options)) jsonrpc2.StreamServer {
+ options := func(o *source.Options) {
+ optsHook(o)
+ o.EnableAllExperiments()
+ }
+ return lsprpc.NewStreamServer(cache.New(nil), false, options)
+}
+
+// forwardedServer handles the Forwarded execution mode.
+func (r *Runner) forwardedServer(optsHook func(*source.Options)) jsonrpc2.StreamServer {
+ r.tsOnce.Do(func() {
+ ctx := context.Background()
+ ctx = debug.WithInstance(ctx, "", "off")
+ ss := lsprpc.NewStreamServer(cache.New(nil), false, optsHook)
+ r.ts = servertest.NewTCPServer(ctx, ss, nil)
+ })
+ return newForwarder("tcp", r.ts.Addr)
+}
+
+// runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running
+// tests. It's a trick to allow tests to find a binary to use to start a gopls
+// subprocess.
+const runTestAsGoplsEnvvar = "_GOPLS_TEST_BINARY_RUN_AS_GOPLS"
+
+// separateProcessServer handles the SeparateProcess execution mode.
+func (r *Runner) separateProcessServer(optsHook func(*source.Options)) jsonrpc2.StreamServer {
+ if runtime.GOOS != "linux" {
+ panic("separate process execution mode is only supported on linux")
+ }
+
+ r.startRemoteOnce.Do(func() {
+ socketDir, err := ioutil.TempDir(r.tempDir, "gopls-regtest-socket")
+ if err != nil {
+ r.remoteErr = err
+ return
+ }
+ r.remoteSocket = filepath.Join(socketDir, "gopls-test-daemon")
+
+ // The server should be killed by when the test runner exits, but to be
+ // conservative also set a listen timeout.
+ args := []string{"serve", "-listen", "unix;" + r.remoteSocket, "-listen.timeout", "1m"}
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cmd := exec.CommandContext(ctx, r.goplsPath, args...)
+ cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true")
+
+ // Start the external gopls process. This is still somewhat racy, as we
+ // don't know when gopls binds to the socket, but the gopls forwarder
+ // client has built-in retry behavior that should mostly mitigate this
+ // problem (and if it doesn't, we probably want to improve the retry
+ // behavior).
+ if err := cmd.Start(); err != nil {
+ cancel()
+ r.remoteSocket = ""
+ r.remoteErr = err
+ } else {
+ r.cancelRemote = cancel
+ // Spin off a goroutine to wait, so that we free up resources when the
+ // server exits.
+ go cmd.Wait()
+ }
+ })
+
+ return newForwarder("unix", r.remoteSocket)
+}
+
+func newForwarder(network, address string) *lsprpc.Forwarder {
+ server, err := lsprpc.NewForwarder(network+";"+address, nil)
+ if err != nil {
+ // This should never happen, as we are passing an explicit address.
+ panic(fmt.Sprintf("internal error: unable to create forwarder: %v", err))
+ }
+ return server
+}
+
+// Close cleans up resource that have been allocated to this workspace.
+func (r *Runner) Close() error {
+ var errmsgs []string
+ if r.ts != nil {
+ if err := r.ts.Close(); err != nil {
+ errmsgs = append(errmsgs, err.Error())
+ }
+ }
+ if r.cancelRemote != nil {
+ r.cancelRemote()
+ }
+ if !r.SkipCleanup {
+ if err := os.RemoveAll(r.tempDir); err != nil {
+ errmsgs = append(errmsgs, err.Error())
+ }
+ }
+ if len(errmsgs) > 0 {
+ return fmt.Errorf("errors closing the test runner:\n\t%s", strings.Join(errmsgs, "\n\t"))
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/regtest/wrappers.go b/gopls/internal/lsp/regtest/wrappers.go
new file mode 100644
index 000000000..0315c6de3
--- /dev/null
+++ b/gopls/internal/lsp/regtest/wrappers.go
@@ -0,0 +1,489 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package regtest
+
+import (
+ "encoding/json"
+ "path"
+
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/xcontext"
+)
+
+// RemoveWorkspaceFile deletes a file on disk but does nothing in the
+// editor. It calls t.Fatal on any error.
+func (e *Env) RemoveWorkspaceFile(name string) {
+ e.T.Helper()
+ if err := e.Sandbox.Workdir.RemoveFile(e.Ctx, name); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// ReadWorkspaceFile reads a file from the workspace, calling t.Fatal on any
+// error.
+func (e *Env) ReadWorkspaceFile(name string) string {
+ e.T.Helper()
+ content, err := e.Sandbox.Workdir.ReadFile(name)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return string(content)
+}
+
+// WriteWorkspaceFile writes a file to disk but does nothing in the editor.
+// It calls t.Fatal on any error.
+func (e *Env) WriteWorkspaceFile(name, content string) {
+ e.T.Helper()
+ if err := e.Sandbox.Workdir.WriteFile(e.Ctx, name, content); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// WriteWorkspaceFiles deletes a file on disk but does nothing in the
+// editor. It calls t.Fatal on any error.
+func (e *Env) WriteWorkspaceFiles(files map[string]string) {
+ e.T.Helper()
+ if err := e.Sandbox.Workdir.WriteFiles(e.Ctx, files); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// ListFiles lists relative paths to files in the given directory.
+// It calls t.Fatal on any error.
+func (e *Env) ListFiles(dir string) []string {
+ e.T.Helper()
+ paths, err := e.Sandbox.Workdir.ListFiles(dir)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return paths
+}
+
+// OpenFile opens a file in the editor, calling t.Fatal on any error.
+func (e *Env) OpenFile(name string) {
+ e.T.Helper()
+ if err := e.Editor.OpenFile(e.Ctx, name); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// CreateBuffer creates a buffer in the editor, calling t.Fatal on any error.
+func (e *Env) CreateBuffer(name string, content string) {
+ e.T.Helper()
+ if err := e.Editor.CreateBuffer(e.Ctx, name, content); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// BufferText returns the current buffer contents for the file with the given
+// relative path, calling t.Fatal if the file is not open in a buffer.
+func (e *Env) BufferText(name string) string {
+ e.T.Helper()
+ text, ok := e.Editor.BufferText(name)
+ if !ok {
+ e.T.Fatalf("buffer %q is not open", name)
+ }
+ return text
+}
+
+// CloseBuffer closes an editor buffer without saving, calling t.Fatal on any
+// error.
+func (e *Env) CloseBuffer(name string) {
+ e.T.Helper()
+ if err := e.Editor.CloseBuffer(e.Ctx, name); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// EditBuffer applies edits to an editor buffer, calling t.Fatal on any error.
+func (e *Env) EditBuffer(name string, edits ...protocol.TextEdit) {
+ e.T.Helper()
+ if err := e.Editor.EditBuffer(e.Ctx, name, edits); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+func (e *Env) SetBufferContent(name string, content string) {
+ e.T.Helper()
+ if err := e.Editor.SetBufferContent(e.Ctx, name, content); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// RegexpSearch returns the starting position of the first match for re in the
+// buffer specified by name, calling t.Fatal on any error. It first searches
+// for the position in open buffers, then in workspace files.
+func (e *Env) RegexpSearch(name, re string) protocol.Location {
+ e.T.Helper()
+ loc, err := e.Editor.RegexpSearch(name, re)
+ if err == fake.ErrUnknownBuffer {
+ loc, err = e.Sandbox.Workdir.RegexpSearch(name, re)
+ }
+ if err != nil {
+ e.T.Fatalf("RegexpSearch: %v, %v for %q", name, err, re)
+ }
+ return loc
+}
+
+// RegexpReplace replaces the first group in the first match of regexpStr with
+// the replace text, calling t.Fatal on any error.
+func (e *Env) RegexpReplace(name, regexpStr, replace string) {
+ e.T.Helper()
+ if err := e.Editor.RegexpReplace(e.Ctx, name, regexpStr, replace); err != nil {
+ e.T.Fatalf("RegexpReplace: %v", err)
+ }
+}
+
+// SaveBuffer saves an editor buffer, calling t.Fatal on any error.
+func (e *Env) SaveBuffer(name string) {
+ e.T.Helper()
+ if err := e.Editor.SaveBuffer(e.Ctx, name); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+func (e *Env) SaveBufferWithoutActions(name string) {
+ e.T.Helper()
+ if err := e.Editor.SaveBufferWithoutActions(e.Ctx, name); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// GoToDefinition goes to definition in the editor, calling t.Fatal on any
+// error. It returns the path and position of the resulting jump.
+func (e *Env) GoToDefinition(loc protocol.Location) protocol.Location {
+ e.T.Helper()
+ loc, err := e.Editor.GoToDefinition(e.Ctx, loc)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return loc
+}
+
+// FormatBuffer formats the editor buffer, calling t.Fatal on any error.
+func (e *Env) FormatBuffer(name string) {
+ e.T.Helper()
+ if err := e.Editor.FormatBuffer(e.Ctx, name); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// OrganizeImports processes the source.organizeImports codeAction, calling
+// t.Fatal on any error.
+func (e *Env) OrganizeImports(name string) {
+ e.T.Helper()
+ if err := e.Editor.OrganizeImports(e.Ctx, name); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// ApplyQuickFixes processes the quickfix codeAction, calling t.Fatal on any error.
+func (e *Env) ApplyQuickFixes(path string, diagnostics []protocol.Diagnostic) {
+ e.T.Helper()
+ loc := protocol.Location{URI: e.Sandbox.Workdir.URI(path)} // zero Range => whole file
+ if err := e.Editor.ApplyQuickFixes(e.Ctx, loc, diagnostics); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// ApplyCodeAction applies the given code action.
+func (e *Env) ApplyCodeAction(action protocol.CodeAction) {
+ e.T.Helper()
+ if err := e.Editor.ApplyCodeAction(e.Ctx, action); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// GetQuickFixes returns the available quick fix code actions.
+func (e *Env) GetQuickFixes(path string, diagnostics []protocol.Diagnostic) []protocol.CodeAction {
+ e.T.Helper()
+ loc := protocol.Location{URI: e.Sandbox.Workdir.URI(path)} // zero Range => whole file
+ actions, err := e.Editor.GetQuickFixes(e.Ctx, loc, diagnostics)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return actions
+}
+
+// Hover in the editor, calling t.Fatal on any error.
+func (e *Env) Hover(loc protocol.Location) (*protocol.MarkupContent, protocol.Location) {
+ e.T.Helper()
+ c, loc, err := e.Editor.Hover(e.Ctx, loc)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return c, loc
+}
+
+func (e *Env) DocumentLink(name string) []protocol.DocumentLink {
+ e.T.Helper()
+ links, err := e.Editor.DocumentLink(e.Ctx, name)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return links
+}
+
+func (e *Env) DocumentHighlight(loc protocol.Location) []protocol.DocumentHighlight {
+ e.T.Helper()
+ highlights, err := e.Editor.DocumentHighlight(e.Ctx, loc)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return highlights
+}
+
+// RunGenerate runs "go generate" in the given dir, calling t.Fatal on any error.
+// It waits for the generate command to complete and checks for file changes
+// before returning.
+func (e *Env) RunGenerate(dir string) {
+ e.T.Helper()
+ if err := e.Editor.RunGenerate(e.Ctx, dir); err != nil {
+ e.T.Fatal(err)
+ }
+ e.Await(NoOutstandingWork())
+ // Ideally the fake.Workspace would handle all synthetic file watching, but
+ // we help it out here as we need to wait for the generate command to
+ // complete before checking the filesystem.
+ e.CheckForFileChanges()
+}
+
+// RunGoCommand runs the given command in the sandbox's default working
+// directory.
+func (e *Env) RunGoCommand(verb string, args ...string) {
+ e.T.Helper()
+ if err := e.Sandbox.RunGoCommand(e.Ctx, "", verb, args, true); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// RunGoCommandInDir is like RunGoCommand, but executes in the given
+// relative directory of the sandbox.
+func (e *Env) RunGoCommandInDir(dir, verb string, args ...string) {
+ e.T.Helper()
+ if err := e.Sandbox.RunGoCommand(e.Ctx, dir, verb, args, true); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// GoVersion checks the version of the go command.
+// It returns the X in Go 1.X.
+func (e *Env) GoVersion() int {
+ e.T.Helper()
+ v, err := e.Sandbox.GoVersion(e.Ctx)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return v
+}
+
+// DumpGoSum prints the correct go.sum contents for dir in txtar format,
+// for use in creating regtests.
+func (e *Env) DumpGoSum(dir string) {
+ e.T.Helper()
+
+ if err := e.Sandbox.RunGoCommand(e.Ctx, dir, "list", []string{"-mod=mod", "..."}, true); err != nil {
+ e.T.Fatal(err)
+ }
+ sumFile := path.Join(dir, "/go.sum")
+ e.T.Log("\n\n-- " + sumFile + " --\n" + e.ReadWorkspaceFile(sumFile))
+ e.T.Fatal("see contents above")
+}
+
+// CheckForFileChanges triggers a manual poll of the workspace for any file
+// changes since creation, or since last polling. It is a workaround for the
+// lack of true file watching support in the fake workspace.
+func (e *Env) CheckForFileChanges() {
+ e.T.Helper()
+ if err := e.Sandbox.Workdir.CheckForFileChanges(e.Ctx); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// CodeLens calls textDocument/codeLens for the given path, calling t.Fatal on
+// any error.
+func (e *Env) CodeLens(path string) []protocol.CodeLens {
+ e.T.Helper()
+ lens, err := e.Editor.CodeLens(e.Ctx, path)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return lens
+}
+
+// ExecuteCodeLensCommand executes the command for the code lens matching the
+// given command name.
+func (e *Env) ExecuteCodeLensCommand(path string, cmd command.Command, result interface{}) {
+ e.T.Helper()
+ lenses := e.CodeLens(path)
+ var lens protocol.CodeLens
+ var found bool
+ for _, l := range lenses {
+ if l.Command.Command == cmd.ID() {
+ lens = l
+ found = true
+ }
+ }
+ if !found {
+ e.T.Fatalf("found no command with the ID %s", cmd.ID())
+ }
+ e.ExecuteCommand(&protocol.ExecuteCommandParams{
+ Command: lens.Command.Command,
+ Arguments: lens.Command.Arguments,
+ }, result)
+}
+
+func (e *Env) ExecuteCommand(params *protocol.ExecuteCommandParams, result interface{}) {
+ e.T.Helper()
+ response, err := e.Editor.ExecuteCommand(e.Ctx, params)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ if result == nil {
+ return
+ }
+ // Hack: The result of an executeCommand request will be unmarshaled into
+ // maps. Re-marshal and unmarshal into the type we expect.
+ //
+ // This could be improved by generating a jsonrpc2 command client from the
+ // command.Interface, but that should only be done if we're consolidating
+ // this part of the tsprotocol generation.
+ data, err := json.Marshal(response)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ if err := json.Unmarshal(data, result); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// InlayHints calls textDocument/inlayHints for the given path, calling t.Fatal on
+// any error.
+func (e *Env) InlayHints(path string) []protocol.InlayHint {
+ e.T.Helper()
+ hints, err := e.Editor.InlayHint(e.Ctx, path)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return hints
+}
+
+// Symbol calls workspace/symbol
+func (e *Env) Symbol(query string) []protocol.SymbolInformation {
+ e.T.Helper()
+ ans, err := e.Editor.Symbols(e.Ctx, query)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return ans
+}
+
+// References wraps Editor.References, calling t.Fatal on any error.
+func (e *Env) References(loc protocol.Location) []protocol.Location {
+ e.T.Helper()
+ locations, err := e.Editor.References(e.Ctx, loc)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return locations
+}
+
+// Rename wraps Editor.Rename, calling t.Fatal on any error.
+func (e *Env) Rename(loc protocol.Location, newName string) {
+ e.T.Helper()
+ if err := e.Editor.Rename(e.Ctx, loc, newName); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// Implementations wraps Editor.Implementations, calling t.Fatal on any error.
+func (e *Env) Implementations(loc protocol.Location) []protocol.Location {
+ e.T.Helper()
+ locations, err := e.Editor.Implementations(e.Ctx, loc)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return locations
+}
+
+// RenameFile wraps Editor.RenameFile, calling t.Fatal on any error.
+func (e *Env) RenameFile(oldPath, newPath string) {
+ e.T.Helper()
+ if err := e.Editor.RenameFile(e.Ctx, oldPath, newPath); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// SignatureHelp wraps Editor.SignatureHelp, calling t.Fatal on error
+func (e *Env) SignatureHelp(loc protocol.Location) *protocol.SignatureHelp {
+ e.T.Helper()
+ sighelp, err := e.Editor.SignatureHelp(e.Ctx, loc)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return sighelp
+}
+
+// Completion executes a completion request on the server.
+func (e *Env) Completion(loc protocol.Location) *protocol.CompletionList {
+ e.T.Helper()
+ completions, err := e.Editor.Completion(e.Ctx, loc)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return completions
+}
+
+// AcceptCompletion accepts a completion for the given item at the given
+// position.
+func (e *Env) AcceptCompletion(loc protocol.Location, item protocol.CompletionItem) {
+ e.T.Helper()
+ if err := e.Editor.AcceptCompletion(e.Ctx, loc, item); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// CodeAction calls testDocument/codeAction for the given path, and calls
+// t.Fatal if there are errors.
+func (e *Env) CodeAction(path string, diagnostics []protocol.Diagnostic) []protocol.CodeAction {
+ e.T.Helper()
+ loc := protocol.Location{URI: e.Sandbox.Workdir.URI(path)} // no Range => whole file
+ actions, err := e.Editor.CodeAction(e.Ctx, loc, diagnostics)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return actions
+}
+
+// ChangeConfiguration updates the editor config, calling t.Fatal on any error.
+func (e *Env) ChangeConfiguration(newConfig fake.EditorConfig) {
+ e.T.Helper()
+ if err := e.Editor.ChangeConfiguration(e.Ctx, newConfig); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// ChangeWorkspaceFolders updates the editor workspace folders, calling t.Fatal
+// on any error.
+func (e *Env) ChangeWorkspaceFolders(newFolders ...string) {
+ e.T.Helper()
+ if err := e.Editor.ChangeWorkspaceFolders(e.Ctx, newFolders); err != nil {
+ e.T.Fatal(err)
+ }
+}
+
+// Close shuts down the editor session and cleans up the sandbox directory,
+// calling t.Error on any error.
+func (e *Env) Close() {
+ ctx := xcontext.Detach(e.Ctx)
+ if err := e.Editor.Close(ctx); err != nil {
+ e.T.Errorf("closing editor: %v", err)
+ }
+ if err := e.Sandbox.Close(); err != nil {
+ e.T.Errorf("cleaning up sandbox: %v", err)
+ }
+}
diff --git a/gopls/internal/lsp/rename.go b/gopls/internal/lsp/rename.go
new file mode 100644
index 000000000..7111e92dc
--- /dev/null
+++ b/gopls/internal/lsp/rename.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+ "path/filepath"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+func (s *Server) rename(ctx context.Context, params *protocol.RenameParams) (*protocol.WorkspaceEdit, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ // Because we don't handle directory renaming within source.Rename, source.Rename returns
+ // boolean value isPkgRenaming to determine whether an DocumentChanges of type RenameFile should
+ // be added to the return protocol.WorkspaceEdit value.
+ edits, isPkgRenaming, err := source.Rename(ctx, snapshot, fh, params.Position, params.NewName)
+ if err != nil {
+ return nil, err
+ }
+
+ var docChanges []protocol.DocumentChanges
+ for uri, e := range edits {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ docChanges = append(docChanges, documentChanges(fh, e)...)
+ }
+ if isPkgRenaming {
+ // Update the last component of the file's enclosing directory.
+ oldBase := filepath.Dir(fh.URI().Filename())
+ newURI := filepath.Join(filepath.Dir(oldBase), params.NewName)
+ docChanges = append(docChanges, protocol.DocumentChanges{
+ RenameFile: &protocol.RenameFile{
+ Kind: "rename",
+ OldURI: protocol.URIFromPath(oldBase),
+ NewURI: protocol.URIFromPath(newURI),
+ },
+ })
+ }
+ return &protocol.WorkspaceEdit{
+ DocumentChanges: docChanges,
+ }, nil
+}
+
+// prepareRename implements the textDocument/prepareRename handler. It may
+// return (nil, nil) if there is no rename at the cursor position, but it is
+// not desirable to display an error to the user.
+//
+// TODO(rfindley): why wouldn't we want to show an error to the user, if the
+// user initiated a rename request at the cursor?
+func (s *Server) prepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (*protocol.PrepareRename2Gn, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ // Do not return errors here, as it adds clutter.
+ // Returning a nil result means there is not a valid rename.
+ item, usererr, err := source.PrepareRename(ctx, snapshot, fh, params.Position)
+ if err != nil {
+ // Return usererr here rather than err, to avoid cluttering the UI with
+ // internal error details.
+ return nil, usererr
+ }
+ return &protocol.PrepareRename2Gn{
+ Range: item.Range,
+ Placeholder: item.Text,
+ }, nil
+}
diff --git a/gopls/internal/lsp/reset_golden.sh b/gopls/internal/lsp/reset_golden.sh
new file mode 100755
index 000000000..ff7f4d082
--- /dev/null
+++ b/gopls/internal/lsp/reset_golden.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2022 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+#
+# Updates the *.golden files ... to match the tests' current behavior.
+
+set -eu
+
+GO117BIN="go1.17.9"
+
+command -v $GO117BIN >/dev/null 2>&1 || {
+ go install golang.org/dl/$GO117BIN@latest
+ $GO117BIN download
+}
+
+find ./internal/lsp/testdata -name *.golden ! -name summary*.txt.golden -delete
+# Here we intentionally do not run the ./internal/lsp/source tests with
+# -golden. Eventually these tests will be deleted, and in the meantime they are
+# redundant with the ./internal/lsp tests.
+#
+# Note: go1.17.9 tests must be run *before* go tests, as by convention the
+# golden output should match the output of gopls built with the most recent
+# version of Go. If output differs at 1.17, tests must be tolerant of the 1.17
+# output.
+$GO117BIN test ./internal/lsp -golden
+go test ./internal/lsp -golden
+$GO117BIN test ./test -golden
+go test ./test -golden
diff --git a/gopls/internal/lsp/safetoken/safetoken.go b/gopls/internal/lsp/safetoken/safetoken.go
new file mode 100644
index 000000000..29cc1b1c6
--- /dev/null
+++ b/gopls/internal/lsp/safetoken/safetoken.go
@@ -0,0 +1,122 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package safetoken provides wrappers around methods in go/token,
+// that return errors rather than panicking.
+//
+// It also provides a central place for workarounds in the underlying
+// packages. The use of this package's functions instead of methods of
+// token.File (such as Offset, Position, and PositionFor) is mandatory
+// throughout the gopls codebase and enforced by a static check.
+package safetoken
+
+import (
+ "fmt"
+ "go/token"
+)
+
+// Offset returns f.Offset(pos), but first checks that the file
+// contains the pos.
+//
+// The definition of "contains" here differs from that of token.File
+// in order to work around a bug in the parser (issue #57490): during
+// error recovery, the parser may create syntax nodes whose computed
+// End position is 1 byte beyond EOF, which would cause
+// token.File.Offset to panic. The workaround is that this function
+// accepts a Pos that is exactly 1 byte beyond EOF and maps it to the
+// EOF offset.
+func Offset(f *token.File, pos token.Pos) (int, error) {
+ if !inRange(f, pos) {
+ // Accept a Pos that is 1 byte beyond EOF,
+ // and map it to the EOF offset.
+ // (Workaround for #57490.)
+ if int(pos) == f.Base()+f.Size()+1 {
+ return f.Size(), nil
+ }
+
+ return -1, fmt.Errorf("pos %d is not in range [%d:%d] of file %s",
+ pos, f.Base(), f.Base()+f.Size(), f.Name())
+ }
+ return int(pos) - f.Base(), nil
+}
+
+// Offsets returns Offset(start) and Offset(end).
+func Offsets(f *token.File, start, end token.Pos) (int, int, error) {
+ startOffset, err := Offset(f, start)
+ if err != nil {
+ return 0, 0, fmt.Errorf("start: %v", err)
+ }
+ endOffset, err := Offset(f, end)
+ if err != nil {
+ return 0, 0, fmt.Errorf("end: %v", err)
+ }
+ return startOffset, endOffset, nil
+}
+
+// Pos returns f.Pos(offset), but first checks that the offset is
+// non-negative and not larger than the size of the file.
+func Pos(f *token.File, offset int) (token.Pos, error) {
+ if !(0 <= offset && offset <= f.Size()) {
+ return token.NoPos, fmt.Errorf("offset %d is not in range for file %s of size %d", offset, f.Name(), f.Size())
+ }
+ return token.Pos(f.Base() + offset), nil
+}
+
+// inRange reports whether file f contains position pos,
+// according to the invariants of token.File.
+//
+// This function is not public because of the ambiguity it would
+// create w.r.t. the definition of "contains". Use Offset instead.
+func inRange(f *token.File, pos token.Pos) bool {
+ return token.Pos(f.Base()) <= pos && pos <= token.Pos(f.Base()+f.Size())
+}
+
+// Position returns the Position for the pos value in the given file.
+//
+// p must be NoPos, a valid Pos in the range of f, or exactly 1 byte
+// beyond the end of f. (See [Offset] for explanation.)
+// Any other value causes a panic.
+//
+// Line directives (//line comments) are ignored.
+func Position(f *token.File, pos token.Pos) token.Position {
+ // Work around issue #57490.
+ if int(pos) == f.Base()+f.Size()+1 {
+ pos--
+ }
+
+ // TODO(adonovan): centralize the workaround for
+ // golang/go#41029 (newline at EOF) here too.
+
+ return f.PositionFor(pos, false)
+}
+
+// StartPosition converts a start Pos in the FileSet into a Position.
+//
+// Call this function only if start represents the start of a token or
+// parse tree, such as the result of Node.Pos(). If start is the end of
+// an interval, such as Node.End(), call EndPosition instead, as it
+// may need the correction described at [Position].
+func StartPosition(fset *token.FileSet, start token.Pos) (_ token.Position) {
+ if f := fset.File(start); f != nil {
+ return Position(f, start)
+ }
+ return
+}
+
+// EndPosition converts an end Pos in the FileSet into a Position.
+//
+// Call this function only if pos represents the end of
+// a non-empty interval, such as the result of Node.End().
+func EndPosition(fset *token.FileSet, end token.Pos) (_ token.Position) {
+ if f := fset.File(end); f != nil && int(end) > f.Base() {
+ return Position(f, end)
+ }
+
+ // Work around issue #57490.
+ if f := fset.File(end - 1); f != nil {
+ return Position(f, end)
+ }
+
+ return
+}
diff --git a/gopls/internal/lsp/safetoken/safetoken_test.go b/gopls/internal/lsp/safetoken/safetoken_test.go
new file mode 100644
index 000000000..afd569472
--- /dev/null
+++ b/gopls/internal/lsp/safetoken/safetoken_test.go
@@ -0,0 +1,121 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package safetoken_test
+
+import (
+ "fmt"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "os"
+ "testing"
+
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestWorkaroundIssue57490(t *testing.T) {
+ // During error recovery the parser synthesizes various close
+ // tokens at EOF, causing the End position of incomplete
+ // syntax nodes, computed as Rbrace+len("}"), to be beyond EOF.
+ src := `package p; func f() { var x struct`
+ fset := token.NewFileSet()
+ file, _ := parser.ParseFile(fset, "a.go", src, 0)
+ tf := fset.File(file.Pos())
+
+ // Add another file to the FileSet.
+ file2, _ := parser.ParseFile(fset, "b.go", "package q", 0)
+
+ // This is the ambiguity of #57490...
+ if file.End() != file2.Pos() {
+ t.Errorf("file.End() %d != %d file2.Pos()", file.End(), file2.Pos())
+ }
+ // ...which causes these statements to panic.
+ if false {
+ tf.Offset(file.End()) // panic: invalid Pos value 36 (should be in [1, 35])
+ tf.Position(file.End()) // panic: invalid Pos value 36 (should be in [1, 35])
+ }
+
+ // The offset of the EOF position is the file size.
+ offset, err := safetoken.Offset(tf, file.End()-1)
+ if err != nil || offset != tf.Size() {
+ t.Errorf("Offset(EOF) = (%d, %v), want token.File.Size %d", offset, err, tf.Size())
+ }
+
+ // The offset of the file.End() position, 1 byte beyond EOF,
+ // is also the size of the file.
+ offset, err = safetoken.Offset(tf, file.End())
+ if err != nil || offset != tf.Size() {
+ t.Errorf("Offset(ast.File.End()) = (%d, %v), want token.File.Size %d", offset, err, tf.Size())
+ }
+
+ if got, want := safetoken.Position(tf, file.End()).String(), "a.go:1:35"; got != want {
+ t.Errorf("Position(ast.File.End()) = %s, want %s", got, want)
+ }
+
+ if got, want := safetoken.EndPosition(fset, file.End()).String(), "a.go:1:35"; got != want {
+ t.Errorf("EndPosition(ast.File.End()) = %s, want %s", got, want)
+ }
+
+ // Note that calling StartPosition on an end may yield the wrong file:
+ if got, want := safetoken.StartPosition(fset, file.End()).String(), "b.go:1:1"; got != want {
+ t.Errorf("StartPosition(ast.File.End()) = %s, want %s", got, want)
+ }
+}
+
+// To reduce the risk of panic, or bugs for which this package
+// provides a workaround, this test statically reports references to
+// forbidden methods of token.File or FileSet throughout gopls and
+// suggests alternatives.
+func TestGoplsSourceDoesNotCallTokenFileMethods(t *testing.T) {
+ testenv.NeedsGoPackages(t)
+
+ pkgs, err := packages.Load(&packages.Config{
+ Mode: packages.NeedName | packages.NeedModule | packages.NeedCompiledGoFiles | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedImports | packages.NeedDeps,
+ }, "go/token", "golang.org/x/tools/gopls/...")
+ if err != nil {
+ t.Fatal(err)
+ }
+ var tokenPkg *packages.Package
+ for _, pkg := range pkgs {
+ if pkg.PkgPath == "go/token" {
+ tokenPkg = pkg
+ break
+ }
+ }
+ if tokenPkg == nil {
+ t.Fatal("missing package go/token")
+ }
+
+ File := tokenPkg.Types.Scope().Lookup("File")
+ FileSet := tokenPkg.Types.Scope().Lookup("FileSet")
+
+ alternative := make(map[types.Object]string)
+ setAlternative := func(recv types.Object, old, new string) {
+ oldMethod, _, _ := types.LookupFieldOrMethod(recv.Type(), true, recv.Pkg(), old)
+ alternative[oldMethod] = new
+ }
+ setAlternative(File, "Offset", "safetoken.Offset")
+ setAlternative(File, "Position", "safetoken.Position")
+ setAlternative(File, "PositionFor", "safetoken.Position")
+ setAlternative(FileSet, "Position", "safetoken.StartPosition or EndPosition")
+ setAlternative(FileSet, "PositionFor", "safetoken.StartPosition or EndPosition")
+
+ for _, pkg := range pkgs {
+ switch pkg.PkgPath {
+ case "go/token", "golang.org/x/tools/gopls/internal/lsp/safetoken":
+ continue // allow calls within these packages
+ }
+
+ for ident, obj := range pkg.TypesInfo.Uses {
+ if alt, ok := alternative[obj]; ok {
+ posn := safetoken.StartPosition(pkg.Fset, ident.Pos())
+ fmt.Fprintf(os.Stderr, "%s: forbidden use of %v; use %s instead.\n", posn, obj, alt)
+ t.Fail()
+ }
+ }
+ }
+}
diff --git a/gopls/internal/lsp/selection_range.go b/gopls/internal/lsp/selection_range.go
new file mode 100644
index 000000000..5b3fd31e9
--- /dev/null
+++ b/gopls/internal/lsp/selection_range.go
@@ -0,0 +1,69 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+)
+
+// selectionRange defines the textDocument/selectionRange feature,
+// which, given a list of positions within a file,
+// reports a linked list of enclosing syntactic blocks, innermost first.
+//
+// See https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_selectionRange.
+//
+// This feature can be used by a client to implement "expand selection" in a
+// language-aware fashion. Multiple input positions are supported to allow
+// for multiple cursors, and the entire path up to the whole document is
+// returned for each cursor to avoid multiple round-trips when the user is
+// likely to issue this command multiple times in quick succession.
+func (s *Server) selectionRange(ctx context.Context, params *protocol.SelectionRangeParams) ([]protocol.SelectionRange, error) {
+ ctx, done := event.Start(ctx, "lsp.Server.documentSymbol")
+ defer done()
+
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+
+ pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make([]protocol.SelectionRange, len(params.Positions))
+ for i, protocolPos := range params.Positions {
+ pos, err := pgf.PositionPos(protocolPos)
+ if err != nil {
+ return nil, err
+ }
+
+ path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos)
+
+ tail := &result[i] // tail of the Parent linked list, built head first
+
+ for j, node := range path {
+ rng, err := pgf.NodeRange(node)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add node to tail.
+ if j > 0 {
+ tail.Parent = &protocol.SelectionRange{}
+ tail = tail.Parent
+ }
+ tail.Range = rng
+ }
+ }
+
+ return result, nil
+}
diff --git a/gopls/internal/lsp/semantic.go b/gopls/internal/lsp/semantic.go
new file mode 100644
index 000000000..46f9483c6
--- /dev/null
+++ b/gopls/internal/lsp/semantic.go
@@ -0,0 +1,1003 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "log"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// The LSP says that errors for the semantic token requests should only be returned
+// for exceptions (a word not otherwise defined). This code treats a too-large file
+// as an exception. On parse errors, the code does what it can.
+
+// reject full semantic token requests for large files
+const maxFullFileSize int = 100000
+
+// to control comprehensive logging of decisions (gopls semtok foo.go > /dev/null shows log output)
+// semDebug should NEVER be true in checked-in code
+const semDebug = false
+
+func (s *Server) semanticTokensFull(ctx context.Context, p *protocol.SemanticTokensParams) (*protocol.SemanticTokens, error) {
+ ret, err := s.computeSemanticTokens(ctx, p.TextDocument, nil)
+ return ret, err
+}
+
+func (s *Server) semanticTokensFullDelta(ctx context.Context, p *protocol.SemanticTokensDeltaParams) (interface{}, error) {
+ return nil, fmt.Errorf("implement SemanticTokensFullDelta")
+}
+
+func (s *Server) semanticTokensRange(ctx context.Context, p *protocol.SemanticTokensRangeParams) (*protocol.SemanticTokens, error) {
+ ret, err := s.computeSemanticTokens(ctx, p.TextDocument, &p.Range)
+ return ret, err
+}
+
+func (s *Server) semanticTokensRefresh(ctx context.Context) error {
+ // in the code, but not in the protocol spec
+ return fmt.Errorf("implement SemanticTokensRefresh")
+}
+
+func (s *Server) computeSemanticTokens(ctx context.Context, td protocol.TextDocumentIdentifier, rng *protocol.Range) (*protocol.SemanticTokens, error) {
+ ans := protocol.SemanticTokens{
+ Data: []uint32{},
+ }
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, td.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ vv := snapshot.View()
+ if !vv.Options().SemanticTokens {
+ // return an error, so if the option changes
+ // the client won't remember the wrong answer
+ return nil, fmt.Errorf("semantictokens are disabled")
+ }
+ kind := snapshot.View().FileKind(fh)
+ if kind == source.Tmpl {
+ // this is a little cumbersome to avoid both exporting 'encoded' and its methods
+ // and to avoid import cycles
+ e := &encoded{
+ ctx: ctx,
+ metadataSource: snapshot,
+ rng: rng,
+ tokTypes: s.session.Options().SemanticTypes,
+ tokMods: s.session.Options().SemanticMods,
+ }
+ add := func(line, start uint32, len uint32) {
+ e.add(line, start, len, tokMacro, nil)
+ }
+ data := func() []uint32 {
+ return e.Data()
+ }
+ return template.SemanticTokens(ctx, snapshot, fh.URI(), add, data)
+ }
+ if kind != source.Go {
+ return nil, nil
+ }
+ pkg, pgf, err := source.PackageForFile(ctx, snapshot, fh.URI(), source.NarrowestPackage)
+ if err != nil {
+ return nil, err
+ }
+
+ if rng == nil && len(pgf.Src) > maxFullFileSize {
+ err := fmt.Errorf("semantic tokens: file %s too large for full (%d>%d)",
+ fh.URI().Filename(), len(pgf.Src), maxFullFileSize)
+ return nil, err
+ }
+ e := &encoded{
+ ctx: ctx,
+ metadataSource: snapshot,
+ pgf: pgf,
+ rng: rng,
+ ti: pkg.GetTypesInfo(),
+ pkg: pkg,
+ fset: pkg.FileSet(),
+ tokTypes: s.session.Options().SemanticTypes,
+ tokMods: s.session.Options().SemanticMods,
+ noStrings: vv.Options().NoSemanticString,
+ noNumbers: vv.Options().NoSemanticNumber,
+ }
+ if err := e.init(); err != nil {
+ // e.init should never return an error, unless there's some
+ // seemingly impossible race condition
+ return nil, err
+ }
+ e.semantics()
+ ans.Data = e.Data()
+ // For delta requests, but we've never seen any.
+ ans.ResultID = fmt.Sprintf("%v", time.Now())
+ return &ans, nil
+}
+
+func (e *encoded) semantics() {
+ f := e.pgf.File
+ // may not be in range, but harmless
+ e.token(f.Package, len("package"), tokKeyword, nil)
+ e.token(f.Name.NamePos, len(f.Name.Name), tokNamespace, nil)
+ inspect := func(n ast.Node) bool {
+ return e.inspector(n)
+ }
+ for _, d := range f.Decls {
+ // only look at the decls that overlap the range
+ start, end := d.Pos(), d.End()
+ if end <= e.start || start >= e.end {
+ continue
+ }
+ ast.Inspect(d, inspect)
+ }
+ for _, cg := range f.Comments {
+ for _, c := range cg.List {
+ if !strings.Contains(c.Text, "\n") {
+ e.token(c.Pos(), len(c.Text), tokComment, nil)
+ continue
+ }
+ e.multiline(c.Pos(), c.End(), c.Text, tokComment)
+ }
+ }
+}
+
+type tokenType string
+
+const (
+ tokNamespace tokenType = "namespace"
+ tokType tokenType = "type"
+ tokInterface tokenType = "interface"
+ tokTypeParam tokenType = "typeParameter"
+ tokParameter tokenType = "parameter"
+ tokVariable tokenType = "variable"
+ tokMethod tokenType = "method"
+ tokFunction tokenType = "function"
+ tokKeyword tokenType = "keyword"
+ tokComment tokenType = "comment"
+ tokString tokenType = "string"
+ tokNumber tokenType = "number"
+ tokOperator tokenType = "operator"
+
+ tokMacro tokenType = "macro" // for templates
+)
+
+func (e *encoded) token(start token.Pos, leng int, typ tokenType, mods []string) {
+ if !start.IsValid() {
+ // This is not worth reporting. TODO(pjw): does it still happen?
+ return
+ }
+ if start >= e.end || start+token.Pos(leng) <= e.start {
+ return
+ }
+ // want a line and column from start (in LSP coordinates). Ignore line directives.
+ lspRange, err := e.pgf.PosRange(start, start+token.Pos(leng))
+ if err != nil {
+ event.Error(e.ctx, "failed to convert to range", err)
+ return
+ }
+ if lspRange.End.Line != lspRange.Start.Line {
+ // this happens if users are typing at the end of the file, but report nothing
+ return
+ }
+ // token is all on one line
+ length := lspRange.End.Character - lspRange.Start.Character
+ e.add(lspRange.Start.Line, lspRange.Start.Character, length, typ, mods)
+}
+
+func (e *encoded) add(line, start uint32, len uint32, tok tokenType, mod []string) {
+ x := semItem{line, start, len, tok, mod}
+ e.items = append(e.items, x)
+}
+
+// semItem represents a token found walking the parse tree
+type semItem struct {
+ line, start uint32
+ len uint32
+ typeStr tokenType
+ mods []string
+}
+
+type encoded struct {
+ // the generated data
+ items []semItem
+
+ noStrings bool
+ noNumbers bool
+
+ ctx context.Context
+ // metadataSource is used to resolve imports
+ metadataSource source.MetadataSource
+ tokTypes, tokMods []string
+ pgf *source.ParsedGoFile
+ rng *protocol.Range
+ ti *types.Info
+ pkg source.Package
+ fset *token.FileSet
+ // allowed starting and ending token.Pos, set by init
+ // used to avoid looking at declarations not in range
+ start, end token.Pos
+ // path from the root of the parse tree, used for debugging
+ stack []ast.Node
+}
+
+// convert the stack to a string, for debugging
+func (e *encoded) strStack() string {
+ msg := []string{"["}
+ for i := len(e.stack) - 1; i >= 0; i-- {
+ s := e.stack[i]
+ msg = append(msg, fmt.Sprintf("%T", s)[5:])
+ }
+ if len(e.stack) > 0 {
+ loc := e.stack[len(e.stack)-1].Pos()
+ if _, err := safetoken.Offset(e.pgf.Tok, loc); err != nil {
+ msg = append(msg, fmt.Sprintf("invalid position %v for %s", loc, e.pgf.URI))
+ } else {
+ add := safetoken.Position(e.pgf.Tok, loc)
+ nm := filepath.Base(add.Filename)
+ msg = append(msg, fmt.Sprintf("(%s:%d,col:%d)", nm, add.Line, add.Column))
+ }
+ }
+ msg = append(msg, "]")
+ return strings.Join(msg, " ")
+}
+
+// find the line in the source
+func (e *encoded) srcLine(x ast.Node) string {
+ file := e.pgf.Tok
+ line := file.Line(x.Pos())
+ start, err := safetoken.Offset(file, file.LineStart(line))
+ if err != nil {
+ return ""
+ }
+ end := start
+ for ; end < len(e.pgf.Src) && e.pgf.Src[end] != '\n'; end++ {
+
+ }
+ ans := e.pgf.Src[start:end]
+ return string(ans)
+}
+
+func (e *encoded) inspector(n ast.Node) bool {
+ pop := func() {
+ e.stack = e.stack[:len(e.stack)-1]
+ }
+ if n == nil {
+ pop()
+ return true
+ }
+ e.stack = append(e.stack, n)
+ switch x := n.(type) {
+ case *ast.ArrayType:
+ case *ast.AssignStmt:
+ e.token(x.TokPos, len(x.Tok.String()), tokOperator, nil)
+ case *ast.BasicLit:
+ if strings.Contains(x.Value, "\n") {
+ // has to be a string.
+ e.multiline(x.Pos(), x.End(), x.Value, tokString)
+ break
+ }
+ ln := len(x.Value)
+ what := tokNumber
+ if x.Kind == token.STRING {
+ what = tokString
+ }
+ e.token(x.Pos(), ln, what, nil)
+ case *ast.BinaryExpr:
+ e.token(x.OpPos, len(x.Op.String()), tokOperator, nil)
+ case *ast.BlockStmt:
+ case *ast.BranchStmt:
+ e.token(x.TokPos, len(x.Tok.String()), tokKeyword, nil)
+ // There's no semantic encoding for labels
+ case *ast.CallExpr:
+ if x.Ellipsis != token.NoPos {
+ e.token(x.Ellipsis, len("..."), tokOperator, nil)
+ }
+ case *ast.CaseClause:
+ iam := "case"
+ if x.List == nil {
+ iam = "default"
+ }
+ e.token(x.Case, len(iam), tokKeyword, nil)
+ case *ast.ChanType:
+ // chan | chan <- | <- chan
+ switch {
+ case x.Arrow == token.NoPos:
+ e.token(x.Begin, len("chan"), tokKeyword, nil)
+ case x.Arrow == x.Begin:
+ e.token(x.Arrow, 2, tokOperator, nil)
+ pos := e.findKeyword("chan", x.Begin+2, x.Value.Pos())
+ e.token(pos, len("chan"), tokKeyword, nil)
+ case x.Arrow != x.Begin:
+ e.token(x.Begin, len("chan"), tokKeyword, nil)
+ e.token(x.Arrow, 2, tokOperator, nil)
+ }
+ case *ast.CommClause:
+ iam := len("case")
+ if x.Comm == nil {
+ iam = len("default")
+ }
+ e.token(x.Case, iam, tokKeyword, nil)
+ case *ast.CompositeLit:
+ case *ast.DeclStmt:
+ case *ast.DeferStmt:
+ e.token(x.Defer, len("defer"), tokKeyword, nil)
+ case *ast.Ellipsis:
+ e.token(x.Ellipsis, len("..."), tokOperator, nil)
+ case *ast.EmptyStmt:
+ case *ast.ExprStmt:
+ case *ast.Field:
+ case *ast.FieldList:
+ case *ast.ForStmt:
+ e.token(x.For, len("for"), tokKeyword, nil)
+ case *ast.FuncDecl:
+ case *ast.FuncLit:
+ case *ast.FuncType:
+ if x.Func != token.NoPos {
+ e.token(x.Func, len("func"), tokKeyword, nil)
+ }
+ case *ast.GenDecl:
+ e.token(x.TokPos, len(x.Tok.String()), tokKeyword, nil)
+ case *ast.GoStmt:
+ e.token(x.Go, len("go"), tokKeyword, nil)
+ case *ast.Ident:
+ e.ident(x)
+ case *ast.IfStmt:
+ e.token(x.If, len("if"), tokKeyword, nil)
+ if x.Else != nil {
+ // x.Body.End() or x.Body.End()+1, not that it matters
+ pos := e.findKeyword("else", x.Body.End(), x.Else.Pos())
+ e.token(pos, len("else"), tokKeyword, nil)
+ }
+ case *ast.ImportSpec:
+ e.importSpec(x)
+ pop()
+ return false
+ case *ast.IncDecStmt:
+ e.token(x.TokPos, len(x.Tok.String()), tokOperator, nil)
+ case *ast.IndexExpr:
+ case *typeparams.IndexListExpr:
+ case *ast.InterfaceType:
+ e.token(x.Interface, len("interface"), tokKeyword, nil)
+ case *ast.KeyValueExpr:
+ case *ast.LabeledStmt:
+ case *ast.MapType:
+ e.token(x.Map, len("map"), tokKeyword, nil)
+ case *ast.ParenExpr:
+ case *ast.RangeStmt:
+ e.token(x.For, len("for"), tokKeyword, nil)
+ // x.TokPos == token.NoPos is legal (for range foo {})
+ offset := x.TokPos
+ if offset == token.NoPos {
+ offset = x.For
+ }
+ pos := e.findKeyword("range", offset, x.X.Pos())
+ e.token(pos, len("range"), tokKeyword, nil)
+ case *ast.ReturnStmt:
+ e.token(x.Return, len("return"), tokKeyword, nil)
+ case *ast.SelectStmt:
+ e.token(x.Select, len("select"), tokKeyword, nil)
+ case *ast.SelectorExpr:
+ case *ast.SendStmt:
+ e.token(x.Arrow, len("<-"), tokOperator, nil)
+ case *ast.SliceExpr:
+ case *ast.StarExpr:
+ e.token(x.Star, len("*"), tokOperator, nil)
+ case *ast.StructType:
+ e.token(x.Struct, len("struct"), tokKeyword, nil)
+ case *ast.SwitchStmt:
+ e.token(x.Switch, len("switch"), tokKeyword, nil)
+ case *ast.TypeAssertExpr:
+ if x.Type == nil {
+ pos := e.findKeyword("type", x.Lparen, x.Rparen)
+ e.token(pos, len("type"), tokKeyword, nil)
+ }
+ case *ast.TypeSpec:
+ case *ast.TypeSwitchStmt:
+ e.token(x.Switch, len("switch"), tokKeyword, nil)
+ case *ast.UnaryExpr:
+ e.token(x.OpPos, len(x.Op.String()), tokOperator, nil)
+ case *ast.ValueSpec:
+ // things only seen with parsing or type errors, so ignore them
+ case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
+ return true
+ // not going to see these
+ case *ast.File, *ast.Package:
+ e.unexpected(fmt.Sprintf("implement %T %s", x, safetoken.Position(e.pgf.Tok, x.Pos())))
+ // other things we knowingly ignore
+ case *ast.Comment, *ast.CommentGroup:
+ pop()
+ return false
+ default:
+ e.unexpected(fmt.Sprintf("failed to implement %T", x))
+ }
+ return true
+}
+
+func (e *encoded) ident(x *ast.Ident) {
+ if e.ti == nil {
+ what, mods := e.unkIdent(x)
+ if what != "" {
+ e.token(x.Pos(), len(x.String()), what, mods)
+ }
+ if semDebug {
+ log.Printf(" nil %s/nil/nil %q %v %s", x.String(), what, mods, e.strStack())
+ }
+ return
+ }
+ def := e.ti.Defs[x]
+ if def != nil {
+ what, mods := e.definitionFor(x, def)
+ if what != "" {
+ e.token(x.Pos(), len(x.String()), what, mods)
+ }
+ if semDebug {
+ log.Printf(" for %s/%T/%T got %s %v (%s)", x.String(), def, def.Type(), what, mods, e.strStack())
+ }
+ return
+ }
+ use := e.ti.Uses[x]
+ tok := func(pos token.Pos, lng int, tok tokenType, mods []string) {
+ e.token(pos, lng, tok, mods)
+ q := "nil"
+ if use != nil {
+ q = fmt.Sprintf("%T", use.Type())
+ }
+ if semDebug {
+ log.Printf(" use %s/%T/%s got %s %v (%s)", x.String(), use, q, tok, mods, e.strStack())
+ }
+ }
+
+ switch y := use.(type) {
+ case nil:
+ what, mods := e.unkIdent(x)
+ if what != "" {
+ tok(x.Pos(), len(x.String()), what, mods)
+ } else if semDebug {
+ // tok() wasn't called, so didn't log
+ log.Printf(" nil %s/%T/nil %q %v (%s)", x.String(), use, what, mods, e.strStack())
+ }
+ return
+ case *types.Builtin:
+ tok(x.NamePos, len(x.Name), tokFunction, []string{"defaultLibrary"})
+ case *types.Const:
+ mods := []string{"readonly"}
+ tt := y.Type()
+ if _, ok := tt.(*types.Basic); ok {
+ tok(x.Pos(), len(x.String()), tokVariable, mods)
+ break
+ }
+ if ttx, ok := tt.(*types.Named); ok {
+ if x.String() == "iota" {
+ e.unexpected(fmt.Sprintf("iota:%T", ttx))
+ }
+ if _, ok := ttx.Underlying().(*types.Basic); ok {
+ tok(x.Pos(), len(x.String()), tokVariable, mods)
+ break
+ }
+ e.unexpected(fmt.Sprintf("%q/%T", x.String(), tt))
+ }
+ // can this happen? Don't think so
+ e.unexpected(fmt.Sprintf("%s %T %#v", x.String(), tt, tt))
+ case *types.Func:
+ tok(x.Pos(), len(x.Name), tokFunction, nil)
+ case *types.Label:
+ // nothing to map it to
+ case *types.Nil:
+ // nil is a predeclared identifier
+ tok(x.Pos(), len("nil"), tokVariable, []string{"readonly", "defaultLibrary"})
+ case *types.PkgName:
+ tok(x.Pos(), len(x.Name), tokNamespace, nil)
+ case *types.TypeName: // could be a tokTpeParam
+ var mods []string
+ if _, ok := y.Type().(*types.Basic); ok {
+ mods = []string{"defaultLibrary"}
+ } else if _, ok := y.Type().(*typeparams.TypeParam); ok {
+ tok(x.Pos(), len(x.String()), tokTypeParam, mods)
+ break
+ }
+ tok(x.Pos(), len(x.String()), tokType, mods)
+ case *types.Var:
+ if isSignature(y) {
+ tok(x.Pos(), len(x.Name), tokFunction, nil)
+ } else if e.isParam(use.Pos()) {
+ // variable, unless use.pos is the pos of a Field in an ancestor FuncDecl
+ // or FuncLit and then it's a parameter
+ tok(x.Pos(), len(x.Name), tokParameter, nil)
+ } else {
+ tok(x.Pos(), len(x.Name), tokVariable, nil)
+ }
+
+ default:
+ // can't happen
+ if use == nil {
+ msg := fmt.Sprintf("%#v %#v %#v", x, e.ti.Defs[x], e.ti.Uses[x])
+ e.unexpected(msg)
+ }
+ if use.Type() != nil {
+ e.unexpected(fmt.Sprintf("%s %T/%T,%#v", x.String(), use, use.Type(), use))
+ } else {
+ e.unexpected(fmt.Sprintf("%s %T", x.String(), use))
+ }
+ }
+}
+
+func (e *encoded) isParam(pos token.Pos) bool {
+ for i := len(e.stack) - 1; i >= 0; i-- {
+ switch n := e.stack[i].(type) {
+ case *ast.FuncDecl:
+ for _, f := range n.Type.Params.List {
+ for _, id := range f.Names {
+ if id.Pos() == pos {
+ return true
+ }
+ }
+ }
+ case *ast.FuncLit:
+ for _, f := range n.Type.Params.List {
+ for _, id := range f.Names {
+ if id.Pos() == pos {
+ return true
+ }
+ }
+ }
+ }
+ }
+ return false
+}
+
+func isSignature(use types.Object) bool {
+ if _, ok := use.(*types.Var); !ok {
+ return false
+ }
+ v := use.Type()
+ if v == nil {
+ return false
+ }
+ if _, ok := v.(*types.Signature); ok {
+ return true
+ }
+ return false
+}
+
+// both e.ti.Defs and e.ti.Uses are nil. use the parse stack.
+// a lot of these only happen when the package doesn't compile
+// but in that case it is all best-effort from the parse tree
+func (e *encoded) unkIdent(x *ast.Ident) (tokenType, []string) {
+ def := []string{"definition"}
+ n := len(e.stack) - 2 // parent of Ident
+ if n < 0 {
+ e.unexpected("no stack?")
+ return "", nil
+ }
+ switch nd := e.stack[n].(type) {
+ case *ast.BinaryExpr, *ast.UnaryExpr, *ast.ParenExpr, *ast.StarExpr,
+ *ast.IncDecStmt, *ast.SliceExpr, *ast.ExprStmt, *ast.IndexExpr,
+ *ast.ReturnStmt, *ast.ChanType, *ast.SendStmt,
+ *ast.ForStmt, // possibly incomplete
+ *ast.IfStmt, /* condition */
+ *ast.KeyValueExpr: // either key or value
+ return tokVariable, nil
+ case *typeparams.IndexListExpr:
+ return tokVariable, nil
+ case *ast.Ellipsis:
+ return tokType, nil
+ case *ast.CaseClause:
+ if n-2 >= 0 {
+ if _, ok := e.stack[n-2].(*ast.TypeSwitchStmt); ok {
+ return tokType, nil
+ }
+ }
+ return tokVariable, nil
+ case *ast.ArrayType:
+ if x == nd.Len {
+ // or maybe a Type Param, but we can't just from the parse tree
+ return tokVariable, nil
+ } else {
+ return tokType, nil
+ }
+ case *ast.MapType:
+ return tokType, nil
+ case *ast.CallExpr:
+ if x == nd.Fun {
+ return tokFunction, nil
+ }
+ return tokVariable, nil
+ case *ast.SwitchStmt:
+ return tokVariable, nil
+ case *ast.TypeAssertExpr:
+ if x == nd.X {
+ return tokVariable, nil
+ } else if x == nd.Type {
+ return tokType, nil
+ }
+ case *ast.ValueSpec:
+ for _, p := range nd.Names {
+ if p == x {
+ return tokVariable, def
+ }
+ }
+ for _, p := range nd.Values {
+ if p == x {
+ return tokVariable, nil
+ }
+ }
+ return tokType, nil
+ case *ast.SelectorExpr: // e.ti.Selections[nd] is nil, so no help
+ if n-1 >= 0 {
+ if ce, ok := e.stack[n-1].(*ast.CallExpr); ok {
+ // ... CallExpr SelectorExpr Ident (_.x())
+ if ce.Fun == nd && nd.Sel == x {
+ return tokFunction, nil
+ }
+ }
+ }
+ return tokVariable, nil
+ case *ast.AssignStmt:
+ for _, p := range nd.Lhs {
+ // x := ..., or x = ...
+ if p == x {
+ if nd.Tok != token.DEFINE {
+ def = nil
+ }
+ return tokVariable, def // '_' in _ = ...
+ }
+ }
+ // RHS, = x
+ return tokVariable, nil
+ case *ast.TypeSpec: // it's a type if it is either the Name or the Type
+ if x == nd.Type {
+ def = nil
+ }
+ return tokType, def
+ case *ast.Field:
+ // ident could be type in a field, or a method in an interface type, or a variable
+ if x == nd.Type {
+ return tokType, nil
+ }
+ if n-2 >= 0 {
+ _, okit := e.stack[n-2].(*ast.InterfaceType)
+ _, okfl := e.stack[n-1].(*ast.FieldList)
+ if okit && okfl {
+ return tokMethod, def
+ }
+ }
+ return tokVariable, nil
+ case *ast.LabeledStmt, *ast.BranchStmt:
+ // nothing to report
+ case *ast.CompositeLit:
+ if nd.Type == x {
+ return tokType, nil
+ }
+ return tokVariable, nil
+ case *ast.RangeStmt:
+ if nd.Tok != token.DEFINE {
+ def = nil
+ }
+ return tokVariable, def
+ case *ast.FuncDecl:
+ return tokFunction, def
+ default:
+ msg := fmt.Sprintf("%T undexpected: %s %s%q", nd, x.Name, e.strStack(), e.srcLine(x))
+ e.unexpected(msg)
+ }
+ return "", nil
+}
+
+func isDeprecated(n *ast.CommentGroup) bool {
+ if n == nil {
+ return false
+ }
+ for _, c := range n.List {
+ if strings.HasPrefix(c.Text, "// Deprecated") {
+ return true
+ }
+ }
+ return false
+}
+
+func (e *encoded) definitionFor(x *ast.Ident, def types.Object) (tokenType, []string) {
+ // PJW: def == types.Label? probably a nothing
+ // PJW: look into replacing these syntactic tests with types more generally
+ mods := []string{"definition"}
+ for i := len(e.stack) - 1; i >= 0; i-- {
+ s := e.stack[i]
+ switch y := s.(type) {
+ case *ast.AssignStmt, *ast.RangeStmt:
+ if x.Name == "_" {
+ return "", nil // not really a variable
+ }
+ return tokVariable, mods
+ case *ast.GenDecl:
+ if isDeprecated(y.Doc) {
+ mods = append(mods, "deprecated")
+ }
+ if y.Tok == token.CONST {
+ mods = append(mods, "readonly")
+ }
+ return tokVariable, mods
+ case *ast.FuncDecl:
+ // If x is immediately under a FuncDecl, it is a function or method
+ if i == len(e.stack)-2 {
+ if isDeprecated(y.Doc) {
+ mods = append(mods, "deprecated")
+ }
+ if y.Recv != nil {
+ return tokMethod, mods
+ }
+ return tokFunction, mods
+ }
+ // if x < ... < FieldList < FuncDecl, this is the receiver, a variable
+ // PJW: maybe not. it might be a typeparameter in the type of the receiver
+ if _, ok := e.stack[i+1].(*ast.FieldList); ok {
+ if _, ok := def.(*types.TypeName); ok {
+ return tokTypeParam, mods
+ }
+ return tokVariable, nil
+ }
+ // if x < ... < FieldList < FuncType < FuncDecl, this is a param
+ return tokParameter, mods
+ case *ast.FuncType: // is it in the TypeParams?
+ if isTypeParam(x, y) {
+ return tokTypeParam, mods
+ }
+ return tokParameter, mods
+ case *ast.InterfaceType:
+ return tokMethod, mods
+ case *ast.TypeSpec:
+ // GenDecl/Typespec/FuncType/FieldList/Field/Ident
+ // (type A func(b uint64)) (err error)
+ // b and err should not be tokType, but tokVaraible
+ // and in GenDecl/TpeSpec/StructType/FieldList/Field/Ident
+ // (type A struct{b uint64}
+ // but on type B struct{C}), C is a type, but is not being defined.
+ // GenDecl/TypeSpec/FieldList/Field/Ident is a typeParam
+ if _, ok := e.stack[i+1].(*ast.FieldList); ok {
+ return tokTypeParam, mods
+ }
+ fldm := e.stack[len(e.stack)-2]
+ if fld, ok := fldm.(*ast.Field); ok {
+ // if len(fld.names) == 0 this is a tokType, being used
+ if len(fld.Names) == 0 {
+ return tokType, nil
+ }
+ return tokVariable, mods
+ }
+ return tokType, mods
+ }
+ }
+ // can't happen
+ msg := fmt.Sprintf("failed to find the decl for %s", safetoken.Position(e.pgf.Tok, x.Pos()))
+ e.unexpected(msg)
+ return "", []string{""}
+}
+
+func isTypeParam(x *ast.Ident, y *ast.FuncType) bool {
+ tp := typeparams.ForFuncType(y)
+ if tp == nil {
+ return false
+ }
+ for _, p := range tp.List {
+ for _, n := range p.Names {
+ if x == n {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (e *encoded) multiline(start, end token.Pos, val string, tok tokenType) {
+ f := e.fset.File(start)
+ // the hard part is finding the lengths of lines. include the \n
+ leng := func(line int) int {
+ n := f.LineStart(line)
+ if line >= f.LineCount() {
+ return f.Size() - int(n)
+ }
+ return int(f.LineStart(line+1) - n)
+ }
+ spos := safetoken.StartPosition(e.fset, start)
+ epos := safetoken.EndPosition(e.fset, end)
+ sline := spos.Line
+ eline := epos.Line
+ // first line is from spos.Column to end
+ e.token(start, leng(sline)-spos.Column, tok, nil) // leng(sline)-1 - (spos.Column-1)
+ for i := sline + 1; i < eline; i++ {
+ // intermediate lines are from 1 to end
+ e.token(f.LineStart(i), leng(i)-1, tok, nil) // avoid the newline
+ }
+ // last line is from 1 to epos.Column
+ e.token(f.LineStart(eline), epos.Column-1, tok, nil) // columns are 1-based
+}
+
+// findKeyword finds a keyword rather than guessing its location
+func (e *encoded) findKeyword(keyword string, start, end token.Pos) token.Pos {
+ offset := int(start) - e.pgf.Tok.Base()
+ last := int(end) - e.pgf.Tok.Base()
+ buf := e.pgf.Src
+ idx := bytes.Index(buf[offset:last], []byte(keyword))
+ if idx != -1 {
+ return start + token.Pos(idx)
+ }
+ //(in unparsable programs: type _ <-<-chan int)
+ e.unexpected(fmt.Sprintf("not found:%s %v", keyword, safetoken.StartPosition(e.fset, start)))
+ return token.NoPos
+}
+
+func (e *encoded) init() error {
+ e.start = token.Pos(e.pgf.Tok.Base())
+ e.end = e.start + token.Pos(e.pgf.Tok.Size())
+ if e.rng == nil {
+ return nil
+ }
+ span, err := e.pgf.Mapper.RangeSpan(*e.rng)
+ if err != nil {
+ return fmt.Errorf("range span (%w) error for %s", err, e.pgf.File.Name)
+ }
+ e.end = e.start + token.Pos(span.End().Offset())
+ e.start += token.Pos(span.Start().Offset())
+ return nil
+}
+
+func (e *encoded) Data() []uint32 {
+ // binary operators, at least, will be out of order
+ sort.Slice(e.items, func(i, j int) bool {
+ if e.items[i].line != e.items[j].line {
+ return e.items[i].line < e.items[j].line
+ }
+ return e.items[i].start < e.items[j].start
+ })
+ typeMap, modMap := e.maps()
+ // each semantic token needs five values
+ // (see Integer Encoding for Tokens in the LSP spec)
+ x := make([]uint32, 5*len(e.items))
+ var j int
+ var last semItem
+ for i := 0; i < len(e.items); i++ {
+ item := e.items[i]
+ typ, ok := typeMap[item.typeStr]
+ if !ok {
+ continue // client doesn't want typeStr
+ }
+ if item.typeStr == tokString && e.noStrings {
+ continue
+ }
+ if item.typeStr == tokNumber && e.noNumbers {
+ continue
+ }
+ if j == 0 {
+ x[0] = e.items[0].line
+ } else {
+ x[j] = item.line - last.line
+ }
+ x[j+1] = item.start
+ if j > 0 && x[j] == 0 {
+ x[j+1] = item.start - last.start
+ }
+ x[j+2] = item.len
+ x[j+3] = uint32(typ)
+ mask := 0
+ for _, s := range item.mods {
+ // modMap[s] is 0 if the client doesn't want this modifier
+ mask |= modMap[s]
+ }
+ x[j+4] = uint32(mask)
+ j += 5
+ last = item
+ }
+ return x[:j]
+}
+
+func (e *encoded) importSpec(d *ast.ImportSpec) {
+ // a local package name or the last component of the Path
+ if d.Name != nil {
+ nm := d.Name.String()
+ if nm != "_" && nm != "." {
+ e.token(d.Name.Pos(), len(nm), tokNamespace, nil)
+ }
+ return // don't mark anything for . or _
+ }
+ importPath := source.UnquoteImportPath(d)
+ if importPath == "" {
+ return
+ }
+ // Import strings are implementation defined. Try to match with parse information.
+ depID := e.pkg.Metadata().DepsByImpPath[importPath]
+ if depID == "" {
+ return
+ }
+ depMD := e.metadataSource.Metadata(depID)
+ if depMD == nil {
+ // unexpected, but impact is that maybe some import is not colored
+ return
+ }
+ // Check whether the original literal contains the package's declared name.
+ j := strings.LastIndex(d.Path.Value, string(depMD.Name))
+ if j == -1 {
+ // Package name does not match import path, so there is nothing to report.
+ return
+ }
+ // Report virtual declaration at the position of the substring.
+ start := d.Path.Pos() + token.Pos(j)
+ e.token(start, len(depMD.Name), tokNamespace, nil)
+}
+
+// log unexpected state
+func (e *encoded) unexpected(msg string) {
+ if semDebug {
+ panic(msg)
+ }
+ event.Error(e.ctx, e.strStack(), errors.New(msg))
+}
+
+// SemType returns a string equivalent of the type, for gopls semtok
+func SemType(n int) string {
+ tokTypes := SemanticTypes()
+ tokMods := SemanticModifiers()
+ if n >= 0 && n < len(tokTypes) {
+ return tokTypes[n]
+ }
+ // not found for some reason
+ return fmt.Sprintf("?%d[%d,%d]?", n, len(tokTypes), len(tokMods))
+}
+
+// SemMods returns the []string equivalent of the mods, for gopls semtok.
+func SemMods(n int) []string {
+ tokMods := SemanticModifiers()
+ mods := []string{}
+ for i := 0; i < len(tokMods); i++ {
+ if (n & (1 << uint(i))) != 0 {
+ mods = append(mods, tokMods[i])
+ }
+ }
+ return mods
+}
+
+func (e *encoded) maps() (map[tokenType]int, map[string]int) {
+ tmap := make(map[tokenType]int)
+ mmap := make(map[string]int)
+ for i, t := range e.tokTypes {
+ tmap[tokenType(t)] = i
+ }
+ for i, m := range e.tokMods {
+ mmap[m] = 1 << uint(i) // go 1.12 compatibility
+ }
+ return tmap, mmap
+}
+
+// SemanticTypes to use in case there is no client, as in the command line, or tests
+func SemanticTypes() []string {
+ return semanticTypes[:]
+}
+
+// SemanticModifiers to use in case there is no client.
+func SemanticModifiers() []string {
+ return semanticModifiers[:]
+}
+
+var (
+ semanticTypes = [...]string{
+ "namespace", "type", "class", "enum", "interface",
+ "struct", "typeParameter", "parameter", "variable", "property", "enumMember",
+ "event", "function", "method", "macro", "keyword", "modifier", "comment",
+ "string", "number", "regexp", "operator",
+ }
+ semanticModifiers = [...]string{
+ "declaration", "definition", "readonly", "static",
+ "deprecated", "abstract", "async", "modification", "documentation", "defaultLibrary",
+ }
+)
diff --git a/gopls/internal/lsp/server.go b/gopls/internal/lsp/server.go
new file mode 100644
index 000000000..13441c40b
--- /dev/null
+++ b/gopls/internal/lsp/server.go
@@ -0,0 +1,158 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run ./helper -d protocol/tsserver.go -o server_gen.go -u .
+
+// Package lsp implements LSP for gopls.
+package lsp
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/progress"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/jsonrpc2"
+)
+
+const concurrentAnalyses = 1
+
+// NewServer creates an LSP server and binds it to handle incoming client
+// messages on on the supplied stream.
+func NewServer(session *cache.Session, client protocol.ClientCloser) *Server {
+ return &Server{
+ diagnostics: map[span.URI]*fileReports{},
+ gcOptimizationDetails: make(map[source.PackageID]struct{}),
+ watchedGlobPatterns: make(map[string]struct{}),
+ changedFiles: make(map[span.URI]struct{}),
+ session: session,
+ client: client,
+ diagnosticsSema: make(chan struct{}, concurrentAnalyses),
+ progress: progress.NewTracker(client),
+ diagDebouncer: newDebouncer(),
+ }
+}
+
+type serverState int
+
+const (
+ serverCreated = serverState(iota)
+ serverInitializing // set once the server has received "initialize" request
+ serverInitialized // set once the server has received "initialized" request
+ serverShutDown
+)
+
+func (s serverState) String() string {
+ switch s {
+ case serverCreated:
+ return "created"
+ case serverInitializing:
+ return "initializing"
+ case serverInitialized:
+ return "initialized"
+ case serverShutDown:
+ return "shutDown"
+ }
+ return fmt.Sprintf("(unknown state: %d)", int(s))
+}
+
+// Server implements the protocol.Server interface.
+type Server struct {
+ client protocol.ClientCloser
+
+ stateMu sync.Mutex
+ state serverState
+ // notifications generated before serverInitialized
+ notifications []*protocol.ShowMessageParams
+
+ session *cache.Session
+
+ tempDir string
+
+ // changedFiles tracks files for which there has been a textDocument/didChange.
+ changedFilesMu sync.Mutex
+ changedFiles map[span.URI]struct{}
+
+ // folders is only valid between initialize and initialized, and holds the
+ // set of folders to build views for when we are ready
+ pendingFolders []protocol.WorkspaceFolder
+
+ // watchedGlobPatterns is the set of glob patterns that we have requested
+ // the client watch on disk. It will be updated as the set of directories
+ // that the server should watch changes.
+ watchedGlobPatternsMu sync.Mutex
+ watchedGlobPatterns map[string]struct{}
+ watchRegistrationCount int
+
+ diagnosticsMu sync.Mutex
+ diagnostics map[span.URI]*fileReports
+
+ // gcOptimizationDetails describes the packages for which we want
+ // optimization details to be included in the diagnostics. The key is the
+ // ID of the package.
+ gcOptimizationDetailsMu sync.Mutex
+ gcOptimizationDetails map[source.PackageID]struct{}
+
+ // diagnosticsSema limits the concurrency of diagnostics runs, which can be
+ // expensive.
+ diagnosticsSema chan struct{}
+
+ progress *progress.Tracker
+
+ // diagDebouncer is used for debouncing diagnostics.
+ diagDebouncer *debouncer
+
+ // When the workspace fails to load, we show its status through a progress
+ // report with an error message.
+ criticalErrorStatusMu sync.Mutex
+ criticalErrorStatus *progress.WorkDone
+}
+
+func (s *Server) workDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) error {
+ return s.progress.Cancel(params.Token)
+}
+
+func (s *Server) nonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
+ switch method {
+ case "gopls/diagnoseFiles":
+ paramMap := params.(map[string]interface{})
+ // TODO(adonovan): opt: parallelize FileDiagnostics(URI...), either
+ // by calling it in multiple goroutines or, better, by making
+ // the relevant APIs accept a set of URIs/packages.
+ for _, file := range paramMap["files"].([]interface{}) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, protocol.DocumentURI(file.(string)), source.UnknownKind)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+
+ fileID, diagnostics, err := source.FileDiagnostics(ctx, snapshot, fh.URI())
+ if err != nil {
+ return nil, err
+ }
+ if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
+ URI: protocol.URIFromSpanURI(fh.URI()),
+ Diagnostics: toProtocolDiagnostics(diagnostics),
+ Version: fileID.Version(),
+ }); err != nil {
+ return nil, err
+ }
+ }
+ if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
+ URI: "gopls://diagnostics-done",
+ }); err != nil {
+ return nil, err
+ }
+ return struct{}{}, nil
+ }
+ return nil, notImplemented(method)
+}
+
+func notImplemented(method string) error {
+ return fmt.Errorf("%w: %q not yet implemented", jsonrpc2.ErrMethodNotFound, method)
+}
diff --git a/gopls/internal/lsp/server_gen.go b/gopls/internal/lsp/server_gen.go
new file mode 100644
index 000000000..2c6e9954d
--- /dev/null
+++ b/gopls/internal/lsp/server_gen.go
@@ -0,0 +1,301 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+// code generated by helper. DO NOT EDIT.
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+func (s *Server) CodeAction(ctx context.Context, params *protocol.CodeActionParams) ([]protocol.CodeAction, error) {
+ return s.codeAction(ctx, params)
+}
+
+func (s *Server) CodeLens(ctx context.Context, params *protocol.CodeLensParams) ([]protocol.CodeLens, error) {
+ return s.codeLens(ctx, params)
+}
+
+func (s *Server) ColorPresentation(context.Context, *protocol.ColorPresentationParams) ([]protocol.ColorPresentation, error) {
+ return nil, notImplemented("ColorPresentation")
+}
+
+func (s *Server) Completion(ctx context.Context, params *protocol.CompletionParams) (*protocol.CompletionList, error) {
+ return s.completion(ctx, params)
+}
+
+func (s *Server) Declaration(context.Context, *protocol.DeclarationParams) (*protocol.Or_textDocument_declaration, error) {
+ return nil, notImplemented("Declaration")
+}
+
+func (s *Server) Definition(ctx context.Context, params *protocol.DefinitionParams) ([]protocol.Location, error) {
+ return s.definition(ctx, params)
+}
+
+func (s *Server) Diagnostic(context.Context, *string) (*string, error) {
+ return nil, notImplemented("Diagnostic")
+}
+
+func (s *Server) DiagnosticWorkspace(context.Context, *protocol.WorkspaceDiagnosticParams) (*protocol.WorkspaceDiagnosticReport, error) {
+ return nil, notImplemented("DiagnosticWorkspace")
+}
+
+func (s *Server) DidChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error {
+ return s.didChange(ctx, params)
+}
+
+func (s *Server) DidChangeConfiguration(ctx context.Context, _gen *protocol.DidChangeConfigurationParams) error {
+ return s.didChangeConfiguration(ctx, _gen)
+}
+
+func (s *Server) DidChangeNotebookDocument(context.Context, *protocol.DidChangeNotebookDocumentParams) error {
+ return notImplemented("DidChangeNotebookDocument")
+}
+
+func (s *Server) DidChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) error {
+ return s.didChangeWatchedFiles(ctx, params)
+}
+
+func (s *Server) DidChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) error {
+ return s.didChangeWorkspaceFolders(ctx, params)
+}
+
+func (s *Server) DidClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) error {
+ return s.didClose(ctx, params)
+}
+
+func (s *Server) DidCloseNotebookDocument(context.Context, *protocol.DidCloseNotebookDocumentParams) error {
+ return notImplemented("DidCloseNotebookDocument")
+}
+
+func (s *Server) DidCreateFiles(context.Context, *protocol.CreateFilesParams) error {
+ return notImplemented("DidCreateFiles")
+}
+
+func (s *Server) DidDeleteFiles(context.Context, *protocol.DeleteFilesParams) error {
+ return notImplemented("DidDeleteFiles")
+}
+
+func (s *Server) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
+ return s.didOpen(ctx, params)
+}
+
+func (s *Server) DidOpenNotebookDocument(context.Context, *protocol.DidOpenNotebookDocumentParams) error {
+ return notImplemented("DidOpenNotebookDocument")
+}
+
+func (s *Server) DidRenameFiles(context.Context, *protocol.RenameFilesParams) error {
+ return notImplemented("DidRenameFiles")
+}
+
+func (s *Server) DidSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) error {
+ return s.didSave(ctx, params)
+}
+
+func (s *Server) DidSaveNotebookDocument(context.Context, *protocol.DidSaveNotebookDocumentParams) error {
+ return notImplemented("DidSaveNotebookDocument")
+}
+
+func (s *Server) DocumentColor(context.Context, *protocol.DocumentColorParams) ([]protocol.ColorInformation, error) {
+ return nil, notImplemented("DocumentColor")
+}
+
+func (s *Server) DocumentHighlight(ctx context.Context, params *protocol.DocumentHighlightParams) ([]protocol.DocumentHighlight, error) {
+ return s.documentHighlight(ctx, params)
+}
+
+func (s *Server) DocumentLink(ctx context.Context, params *protocol.DocumentLinkParams) ([]protocol.DocumentLink, error) {
+ return s.documentLink(ctx, params)
+}
+
+func (s *Server) DocumentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) ([]interface{}, error) {
+ return s.documentSymbol(ctx, params)
+}
+
+func (s *Server) ExecuteCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) {
+ return s.executeCommand(ctx, params)
+}
+
+func (s *Server) Exit(ctx context.Context) error {
+ return s.exit(ctx)
+}
+
+func (s *Server) FoldingRange(ctx context.Context, params *protocol.FoldingRangeParams) ([]protocol.FoldingRange, error) {
+ return s.foldingRange(ctx, params)
+}
+
+func (s *Server) Formatting(ctx context.Context, params *protocol.DocumentFormattingParams) ([]protocol.TextEdit, error) {
+ return s.formatting(ctx, params)
+}
+
+func (s *Server) Hover(ctx context.Context, params *protocol.HoverParams) (*protocol.Hover, error) {
+ return s.hover(ctx, params)
+}
+
+func (s *Server) Implementation(ctx context.Context, params *protocol.ImplementationParams) ([]protocol.Location, error) {
+ return s.implementation(ctx, params)
+}
+
+func (s *Server) IncomingCalls(ctx context.Context, params *protocol.CallHierarchyIncomingCallsParams) ([]protocol.CallHierarchyIncomingCall, error) {
+ return s.incomingCalls(ctx, params)
+}
+
+func (s *Server) Initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
+ return s.initialize(ctx, params)
+}
+
+func (s *Server) Initialized(ctx context.Context, params *protocol.InitializedParams) error {
+ return s.initialized(ctx, params)
+}
+
+func (s *Server) InlayHint(ctx context.Context, params *protocol.InlayHintParams) ([]protocol.InlayHint, error) {
+ return s.inlayHint(ctx, params)
+}
+
+func (s *Server) InlineValue(context.Context, *protocol.InlineValueParams) ([]protocol.InlineValue, error) {
+ return nil, notImplemented("InlineValue")
+}
+
+func (s *Server) LinkedEditingRange(context.Context, *protocol.LinkedEditingRangeParams) (*protocol.LinkedEditingRanges, error) {
+ return nil, notImplemented("LinkedEditingRange")
+}
+
+func (s *Server) Moniker(context.Context, *protocol.MonikerParams) ([]protocol.Moniker, error) {
+ return nil, notImplemented("Moniker")
+}
+
+func (s *Server) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
+ return s.nonstandardRequest(ctx, method, params)
+}
+
+func (s *Server) OnTypeFormatting(context.Context, *protocol.DocumentOnTypeFormattingParams) ([]protocol.TextEdit, error) {
+ return nil, notImplemented("OnTypeFormatting")
+}
+
+func (s *Server) OutgoingCalls(ctx context.Context, params *protocol.CallHierarchyOutgoingCallsParams) ([]protocol.CallHierarchyOutgoingCall, error) {
+ return s.outgoingCalls(ctx, params)
+}
+
+func (s *Server) PrepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) ([]protocol.CallHierarchyItem, error) {
+ return s.prepareCallHierarchy(ctx, params)
+}
+
+func (s *Server) PrepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (*protocol.PrepareRename2Gn, error) {
+ return s.prepareRename(ctx, params)
+}
+
+func (s *Server) PrepareTypeHierarchy(context.Context, *protocol.TypeHierarchyPrepareParams) ([]protocol.TypeHierarchyItem, error) {
+ return nil, notImplemented("PrepareTypeHierarchy")
+}
+
+func (s *Server) Progress(context.Context, *protocol.ProgressParams) error {
+ return notImplemented("Progress")
+}
+
+func (s *Server) RangeFormatting(context.Context, *protocol.DocumentRangeFormattingParams) ([]protocol.TextEdit, error) {
+ return nil, notImplemented("RangeFormatting")
+}
+
+func (s *Server) References(ctx context.Context, params *protocol.ReferenceParams) ([]protocol.Location, error) {
+ return s.references(ctx, params)
+}
+
+func (s *Server) Rename(ctx context.Context, params *protocol.RenameParams) (*protocol.WorkspaceEdit, error) {
+ return s.rename(ctx, params)
+}
+
+func (s *Server) Resolve(context.Context, *protocol.InlayHint) (*protocol.InlayHint, error) {
+ return nil, notImplemented("Resolve")
+}
+
+func (s *Server) ResolveCodeAction(context.Context, *protocol.CodeAction) (*protocol.CodeAction, error) {
+ return nil, notImplemented("ResolveCodeAction")
+}
+
+func (s *Server) ResolveCodeLens(context.Context, *protocol.CodeLens) (*protocol.CodeLens, error) {
+ return nil, notImplemented("ResolveCodeLens")
+}
+
+func (s *Server) ResolveCompletionItem(context.Context, *protocol.CompletionItem) (*protocol.CompletionItem, error) {
+ return nil, notImplemented("ResolveCompletionItem")
+}
+
+func (s *Server) ResolveDocumentLink(context.Context, *protocol.DocumentLink) (*protocol.DocumentLink, error) {
+ return nil, notImplemented("ResolveDocumentLink")
+}
+
+func (s *Server) ResolveWorkspaceSymbol(context.Context, *protocol.WorkspaceSymbol) (*protocol.WorkspaceSymbol, error) {
+ return nil, notImplemented("ResolveWorkspaceSymbol")
+}
+
+func (s *Server) SelectionRange(ctx context.Context, params *protocol.SelectionRangeParams) ([]protocol.SelectionRange, error) {
+ return s.selectionRange(ctx, params)
+}
+
+func (s *Server) SemanticTokensFull(ctx context.Context, p *protocol.SemanticTokensParams) (*protocol.SemanticTokens, error) {
+ return s.semanticTokensFull(ctx, p)
+}
+
+func (s *Server) SemanticTokensFullDelta(ctx context.Context, p *protocol.SemanticTokensDeltaParams) (interface{}, error) {
+ return s.semanticTokensFullDelta(ctx, p)
+}
+
+func (s *Server) SemanticTokensRange(ctx context.Context, p *protocol.SemanticTokensRangeParams) (*protocol.SemanticTokens, error) {
+ return s.semanticTokensRange(ctx, p)
+}
+
+func (s *Server) SetTrace(context.Context, *protocol.SetTraceParams) error {
+ return notImplemented("SetTrace")
+}
+
+func (s *Server) Shutdown(ctx context.Context) error {
+ return s.shutdown(ctx)
+}
+
+func (s *Server) SignatureHelp(ctx context.Context, params *protocol.SignatureHelpParams) (*protocol.SignatureHelp, error) {
+ return s.signatureHelp(ctx, params)
+}
+
+func (s *Server) Subtypes(context.Context, *protocol.TypeHierarchySubtypesParams) ([]protocol.TypeHierarchyItem, error) {
+ return nil, notImplemented("Subtypes")
+}
+
+func (s *Server) Supertypes(context.Context, *protocol.TypeHierarchySupertypesParams) ([]protocol.TypeHierarchyItem, error) {
+ return nil, notImplemented("Supertypes")
+}
+
+func (s *Server) Symbol(ctx context.Context, params *protocol.WorkspaceSymbolParams) ([]protocol.SymbolInformation, error) {
+ return s.symbol(ctx, params)
+}
+
+func (s *Server) TypeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) ([]protocol.Location, error) {
+ return s.typeDefinition(ctx, params)
+}
+
+func (s *Server) WillCreateFiles(context.Context, *protocol.CreateFilesParams) (*protocol.WorkspaceEdit, error) {
+ return nil, notImplemented("WillCreateFiles")
+}
+
+func (s *Server) WillDeleteFiles(context.Context, *protocol.DeleteFilesParams) (*protocol.WorkspaceEdit, error) {
+ return nil, notImplemented("WillDeleteFiles")
+}
+
+func (s *Server) WillRenameFiles(context.Context, *protocol.RenameFilesParams) (*protocol.WorkspaceEdit, error) {
+ return nil, notImplemented("WillRenameFiles")
+}
+
+func (s *Server) WillSave(context.Context, *protocol.WillSaveTextDocumentParams) error {
+ return notImplemented("WillSave")
+}
+
+func (s *Server) WillSaveWaitUntil(context.Context, *protocol.WillSaveTextDocumentParams) ([]protocol.TextEdit, error) {
+ return nil, notImplemented("WillSaveWaitUntil")
+}
+
+func (s *Server) WorkDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) error {
+ return s.workDoneProgressCancel(ctx, params)
+}
diff --git a/gopls/internal/lsp/signature_help.go b/gopls/internal/lsp/signature_help.go
new file mode 100644
index 000000000..b623f78ea
--- /dev/null
+++ b/gopls/internal/lsp/signature_help.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+func (s *Server) signatureHelp(ctx context.Context, params *protocol.SignatureHelpParams) (*protocol.SignatureHelp, error) {
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
+ defer release()
+ if !ok {
+ return nil, err
+ }
+ info, activeParameter, err := source.SignatureHelp(ctx, snapshot, fh, params.Position)
+ if err != nil {
+ event.Error(ctx, "no signature help", err, tag.Position.Of(params.Position))
+ return nil, nil // sic? There could be many reasons for failure.
+ }
+ return &protocol.SignatureHelp{
+ Signatures: []protocol.SignatureInformation{*info},
+ ActiveParameter: uint32(activeParameter),
+ }, nil
+}
diff --git a/gopls/internal/lsp/snippet/snippet_builder.go b/gopls/internal/lsp/snippet/snippet_builder.go
new file mode 100644
index 000000000..fa63e8d83
--- /dev/null
+++ b/gopls/internal/lsp/snippet/snippet_builder.go
@@ -0,0 +1,111 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snippet implements the specification for the LSP snippet format.
+//
+// Snippets are "tab stop" templates returned as an optional attribute of LSP
+// completion candidates. As the user presses tab, they cycle through a series of
+// tab stops defined in the snippet. Each tab stop can optionally have placeholder
+// text, which can be pre-selected by editors. For a full description of syntax
+// and features, see "Snippet Syntax" at
+// https://microsoft.github.io/language-server-protocol/specifications/specification-3-14/#textDocument_completion.
+//
+// A typical snippet looks like "foo(${1:i int}, ${2:s string})".
+package snippet
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Builder is used to build an LSP snippet piecemeal.
+// The zero value is ready to use. Do not copy a non-zero Builder.
+type Builder struct {
+ // currentTabStop is the index of the previous tab stop. The
+ // next tab stop will be currentTabStop+1.
+ currentTabStop int
+ sb strings.Builder
+}
+
+// Escape characters defined in https://microsoft.github.io/language-server-protocol/specifications/specification-3-14/#textDocument_completion under "Grammar".
+var replacer = strings.NewReplacer(
+ `\`, `\\`,
+ `}`, `\}`,
+ `$`, `\$`,
+)
+
+func (b *Builder) WriteText(s string) {
+ replacer.WriteString(&b.sb, s)
+}
+
+func (b *Builder) PrependText(s string) {
+ rawSnip := b.String()
+ b.sb.Reset()
+ b.WriteText(s)
+ b.sb.WriteString(rawSnip)
+}
+
+func (b *Builder) Write(data []byte) (int, error) {
+ return b.sb.Write(data)
+}
+
+// WritePlaceholder writes a tab stop and placeholder value to the Builder.
+// The callback style allows for creating nested placeholders. To write an
+// empty tab stop, provide a nil callback.
+func (b *Builder) WritePlaceholder(fn func(*Builder)) {
+ fmt.Fprintf(&b.sb, "${%d:", b.nextTabStop())
+ if fn != nil {
+ fn(b)
+ }
+ b.sb.WriteByte('}')
+}
+
+// WriteFinalTabstop marks where cursor ends up after the user has
+// cycled through all the normal tab stops. It defaults to the
+// character after the snippet.
+func (b *Builder) WriteFinalTabstop() {
+ fmt.Fprint(&b.sb, "$0")
+}
+
+// In addition to '\', '}', and '$', snippet choices also use '|' and ',' as
+// meta characters, so they must be escaped within the choices.
+var choiceReplacer = strings.NewReplacer(
+ `\`, `\\`,
+ `}`, `\}`,
+ `$`, `\$`,
+ `|`, `\|`,
+ `,`, `\,`,
+)
+
+// WriteChoice writes a tab stop and list of text choices to the Builder.
+// The user's editor will prompt the user to choose one of the choices.
+func (b *Builder) WriteChoice(choices []string) {
+ fmt.Fprintf(&b.sb, "${%d|", b.nextTabStop())
+ for i, c := range choices {
+ if i != 0 {
+ b.sb.WriteByte(',')
+ }
+ choiceReplacer.WriteString(&b.sb, c)
+ }
+ b.sb.WriteString("|}")
+}
+
+// String returns the built snippet string.
+func (b *Builder) String() string {
+ return b.sb.String()
+}
+
+// Clone returns a copy of b.
+func (b *Builder) Clone() *Builder {
+ var clone Builder
+ clone.sb.WriteString(b.String())
+ return &clone
+}
+
+// nextTabStop returns the next tab stop index for a new placeholder.
+func (b *Builder) nextTabStop() int {
+ // Tab stops start from 1, so increment before returning.
+ b.currentTabStop++
+ return b.currentTabStop
+}
diff --git a/internal/lsp/snippet/snippet_builder_test.go b/gopls/internal/lsp/snippet/snippet_builder_test.go
index bc814b16d..bc814b16d 100644
--- a/internal/lsp/snippet/snippet_builder_test.go
+++ b/gopls/internal/lsp/snippet/snippet_builder_test.go
diff --git a/gopls/internal/lsp/source/add_import.go b/gopls/internal/lsp/source/add_import.go
new file mode 100644
index 000000000..cd8ec7ab7
--- /dev/null
+++ b/gopls/internal/lsp/source/add_import.go
@@ -0,0 +1,26 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/imports"
+)
+
+// AddImport adds a single import statement to the given file
+func AddImport(ctx context.Context, snapshot Snapshot, fh FileHandle, importPath string) ([]protocol.TextEdit, error) {
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, err
+ }
+ return ComputeOneImportFixEdits(snapshot, pgf, &imports.ImportFix{
+ StmtInfo: imports.ImportInfo{
+ ImportPath: importPath,
+ },
+ FixType: imports.AddImport,
+ })
+}
diff --git a/gopls/internal/lsp/source/api_json.go b/gopls/internal/lsp/source/api_json.go
new file mode 100755
index 000000000..e655eb01b
--- /dev/null
+++ b/gopls/internal/lsp/source/api_json.go
@@ -0,0 +1,1118 @@
+// Code generated by "golang.org/x/tools/gopls/doc/generate"; DO NOT EDIT.
+
+package source
+
+var GeneratedAPIJSON = &APIJSON{
+ Options: map[string][]*OptionJSON{
+ "User": {
+ {
+ Name: "buildFlags",
+ Type: "[]string",
+ Doc: "buildFlags is the set of flags passed on to the build system when invoked.\nIt is applied to queries like `go list`, which is used when discovering files.\nThe most common use is to set `-tags`.\n",
+ Default: "[]",
+ Hierarchy: "build",
+ },
+ {
+ Name: "env",
+ Type: "map[string]string",
+ Doc: "env adds environment variables to external commands run by `gopls`, most notably `go list`.\n",
+ Default: "{}",
+ Hierarchy: "build",
+ },
+ {
+ Name: "directoryFilters",
+ Type: "[]string",
+ Doc: "directoryFilters can be used to exclude unwanted directories from the\nworkspace. By default, all directories are included. Filters are an\noperator, `+` to include and `-` to exclude, followed by a path prefix\nrelative to the workspace folder. They are evaluated in order, and\nthe last filter that applies to a path controls whether it is included.\nThe path prefix can be empty, so an initial `-` excludes everything.\n\nDirectoryFilters also supports the `**` operator to match 0 or more directories.\n\nExamples:\n\nExclude node_modules at current depth: `-node_modules`\n\nExclude node_modules at any depth: `-**/node_modules`\n\nInclude only project_a: `-` (exclude everything), `+project_a`\n\nInclude only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules`\n",
+ Default: "[\"-**/node_modules\"]",
+ Hierarchy: "build",
+ },
+ {
+ Name: "templateExtensions",
+ Type: "[]string",
+ Doc: "templateExtensions gives the extensions of file names that are treateed\nas template files. (The extension\nis the part of the file name after the final dot.)\n",
+ Default: "[]",
+ Hierarchy: "build",
+ },
+ {
+ Name: "memoryMode",
+ Type: "enum",
+ Doc: "memoryMode controls the tradeoff `gopls` makes between memory usage and\ncorrectness.\n\nValues other than `Normal` are untested and may break in surprising ways.\n",
+ EnumValues: []EnumValue{
+ {
+ Value: "\"DegradeClosed\"",
+ Doc: "`\"DegradeClosed\"`: In DegradeClosed mode, `gopls` will collect less information about\npackages without open files. As a result, features like Find\nReferences and Rename will miss results in such packages.\n",
+ },
+ {Value: "\"Normal\""},
+ },
+ Default: "\"Normal\"",
+ Status: "experimental",
+ Hierarchy: "build",
+ },
+ {
+ Name: "expandWorkspaceToModule",
+ Type: "bool",
+ Doc: "expandWorkspaceToModule instructs `gopls` to adjust the scope of the\nworkspace to find the best available module root. `gopls` first looks for\na go.mod file in any parent directory of the workspace folder, expanding\nthe scope to that directory if it exists. If no viable parent directory is\nfound, gopls will check if there is exactly one child directory containing\na go.mod file, narrowing the scope to that directory if it exists.\n",
+ Default: "true",
+ Status: "experimental",
+ Hierarchy: "build",
+ },
+ {
+ Name: "allowModfileModifications",
+ Type: "bool",
+ Doc: "allowModfileModifications disables -mod=readonly, allowing imports from\nout-of-scope modules. This option will eventually be removed.\n",
+ Default: "false",
+ Status: "experimental",
+ Hierarchy: "build",
+ },
+ {
+ Name: "allowImplicitNetworkAccess",
+ Type: "bool",
+ Doc: "allowImplicitNetworkAccess disables GOPROXY=off, allowing implicit module\ndownloads rather than requiring user action. This option will eventually\nbe removed.\n",
+ Default: "false",
+ Status: "experimental",
+ Hierarchy: "build",
+ },
+ {
+ Name: "standaloneTags",
+ Type: "[]string",
+ Doc: "standaloneTags specifies a set of build constraints that identify\nindividual Go source files that make up the entire main package of an\nexecutable.\n\nA common example of standalone main files is the convention of using the\ndirective `//go:build ignore` to denote files that are not intended to be\nincluded in any package, for example because they are invoked directly by\nthe developer using `go run`.\n\nGopls considers a file to be a standalone main file if and only if it has\npackage name \"main\" and has a build directive of the exact form\n\"//go:build tag\" or \"// +build tag\", where tag is among the list of tags\nconfigured by this setting. Notably, if the build constraint is more\ncomplicated than a simple tag (such as the composite constraint\n`//go:build tag && go1.18`), the file is not considered to be a standalone\nmain file.\n\nThis setting is only supported when gopls is built with Go 1.16 or later.\n",
+ Default: "[\"ignore\"]",
+ Hierarchy: "build",
+ },
+ {
+ Name: "hoverKind",
+ Type: "enum",
+ Doc: "hoverKind controls the information that appears in the hover text.\nSingleLine and Structured are intended for use only by authors of editor plugins.\n",
+ EnumValues: []EnumValue{
+ {Value: "\"FullDocumentation\""},
+ {Value: "\"NoDocumentation\""},
+ {Value: "\"SingleLine\""},
+ {
+ Value: "\"Structured\"",
+ Doc: "`\"Structured\"` is an experimental setting that returns a structured hover format.\nThis format separates the signature from the documentation, so that the client\ncan do more manipulation of these fields.\n\nThis should only be used by clients that support this behavior.\n",
+ },
+ {Value: "\"SynopsisDocumentation\""},
+ },
+ Default: "\"FullDocumentation\"",
+ Hierarchy: "ui.documentation",
+ },
+ {
+ Name: "linkTarget",
+ Type: "string",
+ Doc: "linkTarget controls where documentation links go.\nIt might be one of:\n\n* `\"godoc.org\"`\n* `\"pkg.go.dev\"`\n\nIf company chooses to use its own `godoc.org`, its address can be used as well.\n\nModules matching the GOPRIVATE environment variable will not have\ndocumentation links in hover.\n",
+ Default: "\"pkg.go.dev\"",
+ Hierarchy: "ui.documentation",
+ },
+ {
+ Name: "linksInHover",
+ Type: "bool",
+ Doc: "linksInHover toggles the presence of links to documentation in hover.\n",
+ Default: "true",
+ Hierarchy: "ui.documentation",
+ },
+ {
+ Name: "usePlaceholders",
+ Type: "bool",
+ Doc: "placeholders enables placeholders for function parameters or struct\nfields in completion responses.\n",
+ Default: "false",
+ Hierarchy: "ui.completion",
+ },
+ {
+ Name: "completionBudget",
+ Type: "time.Duration",
+ Doc: "completionBudget is the soft latency goal for completion requests. Most\nrequests finish in a couple milliseconds, but in some cases deep\ncompletions can take much longer. As we use up our budget we\ndynamically reduce the search scope to ensure we return timely\nresults. Zero means unlimited.\n",
+ Default: "\"100ms\"",
+ Status: "debug",
+ Hierarchy: "ui.completion",
+ },
+ {
+ Name: "matcher",
+ Type: "enum",
+ Doc: "matcher sets the algorithm that is used when calculating completion\ncandidates.\n",
+ EnumValues: []EnumValue{
+ {Value: "\"CaseInsensitive\""},
+ {Value: "\"CaseSensitive\""},
+ {Value: "\"Fuzzy\""},
+ },
+ Default: "\"Fuzzy\"",
+ Status: "advanced",
+ Hierarchy: "ui.completion",
+ },
+ {
+ Name: "experimentalPostfixCompletions",
+ Type: "bool",
+ Doc: "experimentalPostfixCompletions enables artificial method snippets\nsuch as \"someSlice.sort!\".\n",
+ Default: "true",
+ Status: "experimental",
+ Hierarchy: "ui.completion",
+ },
+ {
+ Name: "importShortcut",
+ Type: "enum",
+ Doc: "importShortcut specifies whether import statements should link to\ndocumentation or go to definitions.\n",
+ EnumValues: []EnumValue{
+ {Value: "\"Both\""},
+ {Value: "\"Definition\""},
+ {Value: "\"Link\""},
+ },
+ Default: "\"Both\"",
+ Hierarchy: "ui.navigation",
+ },
+ {
+ Name: "symbolMatcher",
+ Type: "enum",
+ Doc: "symbolMatcher sets the algorithm that is used when finding workspace symbols.\n",
+ EnumValues: []EnumValue{
+ {Value: "\"CaseInsensitive\""},
+ {Value: "\"CaseSensitive\""},
+ {Value: "\"FastFuzzy\""},
+ {Value: "\"Fuzzy\""},
+ },
+ Default: "\"FastFuzzy\"",
+ Status: "advanced",
+ Hierarchy: "ui.navigation",
+ },
+ {
+ Name: "symbolStyle",
+ Type: "enum",
+ Doc: "symbolStyle controls how symbols are qualified in symbol responses.\n\nExample Usage:\n\n```json5\n\"gopls\": {\n...\n \"symbolStyle\": \"Dynamic\",\n...\n}\n```\n",
+ EnumValues: []EnumValue{
+ {
+ Value: "\"Dynamic\"",
+ Doc: "`\"Dynamic\"` uses whichever qualifier results in the highest scoring\nmatch for the given symbol query. Here a \"qualifier\" is any \"/\" or \".\"\ndelimited suffix of the fully qualified symbol. i.e. \"to/pkg.Foo.Field\" or\njust \"Foo.Field\".\n",
+ },
+ {
+ Value: "\"Full\"",
+ Doc: "`\"Full\"` is fully qualified symbols, i.e.\n\"path/to/pkg.Foo.Field\".\n",
+ },
+ {
+ Value: "\"Package\"",
+ Doc: "`\"Package\"` is package qualified symbols i.e.\n\"pkg.Foo.Field\".\n",
+ },
+ },
+ Default: "\"Dynamic\"",
+ Status: "advanced",
+ Hierarchy: "ui.navigation",
+ },
+ {
+ Name: "analyses",
+ Type: "map[string]bool",
+ Doc: "analyses specify analyses that the user would like to enable or disable.\nA map of the names of analysis passes that should be enabled/disabled.\nA full list of analyzers that gopls uses can be found in\n[analyzers.md](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).\n\nExample Usage:\n\n```json5\n...\n\"analyses\": {\n \"unreachable\": false, // Disable the unreachable analyzer.\n \"unusedparams\": true // Enable the unusedparams analyzer.\n}\n...\n```\n",
+ EnumKeys: EnumKeys{
+ ValueType: "bool",
+ Keys: []EnumKey{
+ {
+ Name: "\"asmdecl\"",
+ Doc: "report mismatches between assembly files and Go declarations",
+ Default: "true",
+ },
+ {
+ Name: "\"assign\"",
+ Doc: "check for useless assignments\n\nThis checker reports assignments of the form x = x or a[i] = a[i].\nThese are almost always useless, and even when they aren't they are\nusually a mistake.",
+ Default: "true",
+ },
+ {
+ Name: "\"atomic\"",
+ Doc: "check for common mistakes using the sync/atomic package\n\nThe atomic checker looks for assignment statements of the form:\n\n\tx = atomic.AddUint64(&x, 1)\n\nwhich are not atomic.",
+ Default: "true",
+ },
+ {
+ Name: "\"atomicalign\"",
+ Doc: "check for non-64-bits-aligned arguments to sync/atomic functions",
+ Default: "true",
+ },
+ {
+ Name: "\"bools\"",
+ Doc: "check for common mistakes involving boolean operators",
+ Default: "true",
+ },
+ {
+ Name: "\"buildtag\"",
+ Doc: "check //go:build and // +build directives",
+ Default: "true",
+ },
+ {
+ Name: "\"cgocall\"",
+ Doc: "detect some violations of the cgo pointer passing rules\n\nCheck for invalid cgo pointer passing.\nThis looks for code that uses cgo to call C code passing values\nwhose types are almost always invalid according to the cgo pointer\nsharing rules.\nSpecifically, it warns about attempts to pass a Go chan, map, func,\nor slice to C, either directly, or via a pointer, array, or struct.",
+ Default: "true",
+ },
+ {
+ Name: "\"composites\"",
+ Doc: "check for unkeyed composite literals\n\nThis analyzer reports a diagnostic for composite literals of struct\ntypes imported from another package that do not use the field-keyed\nsyntax. Such literals are fragile because the addition of a new field\n(even if unexported) to the struct will cause compilation to fail.\n\nAs an example,\n\n\terr = &net.DNSConfigError{err}\n\nshould be replaced by:\n\n\terr = &net.DNSConfigError{Err: err}\n",
+ Default: "true",
+ },
+ {
+ Name: "\"copylocks\"",
+ Doc: "check for locks erroneously passed by value\n\nInadvertently copying a value containing a lock, such as sync.Mutex or\nsync.WaitGroup, may cause both copies to malfunction. Generally such\nvalues should be referred to through a pointer.",
+ Default: "true",
+ },
+ {
+ Name: "\"deepequalerrors\"",
+ Doc: "check for calls of reflect.DeepEqual on error values\n\nThe deepequalerrors checker looks for calls of the form:\n\n reflect.DeepEqual(err1, err2)\n\nwhere err1 and err2 are errors. Using reflect.DeepEqual to compare\nerrors is discouraged.",
+ Default: "true",
+ },
+ {
+ Name: "\"directive\"",
+ Doc: "check Go toolchain directives such as //go:debug\n\nThis analyzer checks for problems with known Go toolchain directives\nin all Go source files in a package directory, even those excluded by\n//go:build constraints, and all non-Go source files too.\n\nFor //go:debug (see https://go.dev/doc/godebug), the analyzer checks\nthat the directives are placed only in Go source files, only above the\npackage comment, and only in package main or *_test.go files.\n\nSupport for other known directives may be added in the future.\n\nThis analyzer does not check //go:build, which is handled by the\nbuildtag analyzer.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"embed\"",
+ Doc: "check for //go:embed directive import\n\nThis analyzer checks that the embed package is imported when source code contains //go:embed comment directives.\nThe embed package must be imported for //go:embed directives to function.import _ \"embed\".",
+ Default: "true",
+ },
+ {
+ Name: "\"errorsas\"",
+ Doc: "report passing non-pointer or non-error values to errors.As\n\nThe errorsas analysis reports calls to errors.As where the type\nof the second argument is not a pointer to a type implementing error.",
+ Default: "true",
+ },
+ {
+ Name: "\"fieldalignment\"",
+ Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the most compact order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n\nBe aware that the most compact order is not always the most efficient.\nIn rare cases it may cause two variables each updated by its own goroutine\nto occupy the same CPU cache line, inducing a form of memory contention\nknown as \"false sharing\" that slows down both goroutines.\n",
+ Default: "false",
+ },
+ {
+ Name: "\"httpresponse\"",
+ Doc: "check for mistakes using HTTP responses\n\nA common mistake when using the net/http package is to defer a function\ncall to close the http.Response Body before checking the error that\ndetermines whether the response is valid:\n\n\tresp, err := http.Head(url)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// (defer statement belongs here)\n\nThis checker helps uncover latent nil dereference bugs by reporting a\ndiagnostic for such mistakes.",
+ Default: "true",
+ },
+ {
+ Name: "\"ifaceassert\"",
+ Doc: "detect impossible interface-to-interface type assertions\n\nThis checker flags type assertions v.(T) and corresponding type-switch cases\nin which the static type V of v is an interface that cannot possibly implement\nthe target interface T. This occurs when V and T contain methods with the same\nname but different signatures. Example:\n\n\tvar v interface {\n\t\tRead()\n\t}\n\t_ = v.(io.Reader)\n\nThe Read method in v has a different signature than the Read method in\nio.Reader, so this assertion cannot succeed.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"infertypeargs\"",
+ Doc: "check for unnecessary type arguments in call expressions\n\nExplicit type arguments may be omitted from call expressions if they can be\ninferred from function arguments, or from other type arguments:\n\n\tfunc f[T any](T) {}\n\t\n\tfunc _() {\n\t\tf[string](\"foo\") // string could be inferred\n\t}\n",
+ Default: "true",
+ },
+ {
+ Name: "\"loopclosure\"",
+ Doc: "check references to loop variables from within nested functions\n\nThis analyzer reports places where a function literal references the\niteration variable of an enclosing loop, and the loop calls the function\nin such a way (e.g. with go or defer) that it may outlive the loop\niteration and possibly observe the wrong value of the variable.\n\nIn this example, all the deferred functions run after the loop has\ncompleted, so all observe the final value of v.\n\n for _, v := range list {\n defer func() {\n use(v) // incorrect\n }()\n }\n\nOne fix is to create a new variable for each iteration of the loop:\n\n for _, v := range list {\n v := v // new var per iteration\n defer func() {\n use(v) // ok\n }()\n }\n\nThe next example uses a go statement and has a similar problem.\nIn addition, it has a data race because the loop updates v\nconcurrent with the goroutines accessing it.\n\n for _, v := range elem {\n go func() {\n use(v) // incorrect, and a data race\n }()\n }\n\nA fix is the same as before. The checker also reports problems\nin goroutines started by golang.org/x/sync/errgroup.Group.\nA hard-to-spot variant of this form is common in parallel tests:\n\n func Test(t *testing.T) {\n for _, test := range tests {\n t.Run(test.name, func(t *testing.T) {\n t.Parallel()\n use(test) // incorrect, and a data race\n })\n }\n }\n\nThe t.Parallel() call causes the rest of the function to execute\nconcurrent with the loop.\n\nThe analyzer reports references only in the last statement,\nas it is not deep enough to understand the effects of subsequent\nstatements that might render the reference benign.\n(\"Last statement\" is defined recursively in compound\nstatements such as if, switch, and select.)\n\nSee: https://golang.org/doc/go_faq.html#closures_and_goroutines",
+ Default: "true",
+ },
+ {
+ Name: "\"lostcancel\"",
+ Doc: "check cancel func returned by context.WithCancel is called\n\nThe cancellation function returned by context.WithCancel, WithTimeout,\nand WithDeadline must be called or the new context will remain live\nuntil its parent context is cancelled.\n(The background context is never cancelled.)",
+ Default: "true",
+ },
+ {
+ Name: "\"nilfunc\"",
+ Doc: "check for useless comparisons between functions and nil\n\nA useless comparison is one like f == nil as opposed to f() == nil.",
+ Default: "true",
+ },
+ {
+ Name: "\"nilness\"",
+ Doc: "check for redundant or impossible nil comparisons\n\nThe nilness checker inspects the control-flow graph of each function in\na package and reports nil pointer dereferences, degenerate nil\npointers, and panics with nil values. A degenerate comparison is of the form\nx==nil or x!=nil where x is statically known to be nil or non-nil. These are\noften a mistake, especially in control flow related to errors. Panics with nil\nvalues are checked because they are not detectable by\n\n\tif r := recover(); r != nil {\n\nThis check reports conditions such as:\n\n\tif f == nil { // impossible condition (f is a function)\n\t}\n\nand:\n\n\tp := &v\n\t...\n\tif p != nil { // tautological condition\n\t}\n\nand:\n\n\tif p == nil {\n\t\tprint(*p) // nil dereference\n\t}\n\nand:\n\n\tif p == nil {\n\t\tpanic(p)\n\t}\n",
+ Default: "false",
+ },
+ {
+ Name: "\"printf\"",
+ Doc: "check consistency of Printf format strings and arguments\n\nThe check applies to known functions (for example, those in package fmt)\nas well as any detected wrappers of known functions.\n\nA function that wants to avail itself of printf checking but is not\nfound by this analyzer's heuristics (for example, due to use of\ndynamic calls) can insert a bogus call:\n\n\tif false {\n\t\t_ = fmt.Sprintf(format, args...) // enable printf checking\n\t}\n\nThe -funcs flag specifies a comma-separated list of names of additional\nknown formatting functions or methods. If the name contains a period,\nit must denote a specific function using one of the following forms:\n\n\tdir/pkg.Function\n\tdir/pkg.Type.Method\n\t(*dir/pkg.Type).Method\n\nOtherwise the name is interpreted as a case-insensitive unqualified\nidentifier such as \"errorf\". Either way, if a listed name ends in f, the\nfunction is assumed to be Printf-like, taking a format string before the\nargument list. Otherwise it is assumed to be Print-like, taking a list\nof arguments with no format string.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"shadow\"",
+ Doc: "check for possible unintended shadowing of variables\n\nThis analyzer check for shadowed variables.\nA shadowed variable is a variable declared in an inner scope\nwith the same name and type as a variable in an outer scope,\nand where the outer variable is mentioned after the inner one\nis declared.\n\n(This definition can be refined; the module generates too many\nfalse positives and is not yet enabled by default.)\n\nFor example:\n\n\tfunc BadRead(f *os.File, buf []byte) error {\n\t\tvar err error\n\t\tfor {\n\t\t\tn, err := f.Read(buf) // shadows the function variable 'err'\n\t\t\tif err != nil {\n\t\t\t\tbreak // causes return of wrong value\n\t\t\t}\n\t\t\tfoo(buf)\n\t\t}\n\t\treturn err\n\t}\n",
+ Default: "false",
+ },
+ {
+ Name: "\"shift\"",
+ Doc: "check for shifts that equal or exceed the width of the integer",
+ Default: "true",
+ },
+ {
+ Name: "\"simplifycompositelit\"",
+ Doc: "check for composite literal simplifications\n\nAn array, slice, or map composite literal of the form:\n\t[]T{T{}, T{}}\nwill be simplified to:\n\t[]T{{}, {}}\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
+ Default: "true",
+ },
+ {
+ Name: "\"simplifyrange\"",
+ Doc: "check for range statement simplifications\n\nA range of the form:\n\tfor x, _ = range v {...}\nwill be simplified to:\n\tfor x = range v {...}\n\nA range of the form:\n\tfor _ = range v {...}\nwill be simplified to:\n\tfor range v {...}\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
+ Default: "true",
+ },
+ {
+ Name: "\"simplifyslice\"",
+ Doc: "check for slice simplifications\n\nA slice expression of the form:\n\ts[a:len(s)]\nwill be simplified to:\n\ts[a:]\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
+ Default: "true",
+ },
+ {
+ Name: "\"sortslice\"",
+ Doc: "check the argument type of sort.Slice\n\nsort.Slice requires an argument of a slice type. Check that\nthe interface{} value passed to sort.Slice is actually a slice.",
+ Default: "true",
+ },
+ {
+ Name: "\"stdmethods\"",
+ Doc: "check signature of methods of well-known interfaces\n\nSometimes a type may be intended to satisfy an interface but may fail to\ndo so because of a mistake in its method signature.\nFor example, the result of this WriteTo method should be (int64, error),\nnot error, to satisfy io.WriterTo:\n\n\ttype myWriterTo struct{...}\n func (myWriterTo) WriteTo(w io.Writer) error { ... }\n\nThis check ensures that each method whose name matches one of several\nwell-known interface methods from the standard library has the correct\nsignature for that interface.\n\nChecked method names include:\n\tFormat GobEncode GobDecode MarshalJSON MarshalXML\n\tPeek ReadByte ReadFrom ReadRune Scan Seek\n\tUnmarshalJSON UnreadByte UnreadRune WriteByte\n\tWriteTo\n",
+ Default: "true",
+ },
+ {
+ Name: "\"stringintconv\"",
+ Doc: "check for string(int) conversions\n\nThis checker flags conversions of the form string(x) where x is an integer\n(but not byte or rune) type. Such conversions are discouraged because they\nreturn the UTF-8 representation of the Unicode code point x, and not a decimal\nstring representation of x as one might expect. Furthermore, if x denotes an\ninvalid code point, the conversion cannot be statically rejected.\n\nFor conversions that intend on using the code point, consider replacing them\nwith string(rune(x)). Otherwise, strconv.Itoa and its equivalents return the\nstring representation of the value in the desired base.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"structtag\"",
+ Doc: "check that struct field tags conform to reflect.StructTag.Get\n\nAlso report certain struct tags (json, xml) used with unexported fields.",
+ Default: "true",
+ },
+ {
+ Name: "\"testinggoroutine\"",
+ Doc: "report calls to (*testing.T).Fatal from goroutines started by a test.\n\nFunctions that abruptly terminate a test, such as the Fatal, Fatalf, FailNow, and\nSkip{,f,Now} methods of *testing.T, must be called from the test goroutine itself.\nThis checker detects calls to these functions that occur within a goroutine\nstarted by the test. For example:\n\nfunc TestFoo(t *testing.T) {\n go func() {\n t.Fatal(\"oops\") // error: (*T).Fatal called from non-test goroutine\n }()\n}\n",
+ Default: "true",
+ },
+ {
+ Name: "\"tests\"",
+ Doc: "check for common mistaken usages of tests and examples\n\nThe tests checker walks Test, Benchmark and Example functions checking\nmalformed names, wrong signatures and examples documenting non-existent\nidentifiers.\n\nPlease see the documentation for package testing in golang.org/pkg/testing\nfor the conventions that are enforced for Tests, Benchmarks, and Examples.",
+ Default: "true",
+ },
+ {
+ Name: "\"timeformat\"",
+ Doc: "check for calls of (time.Time).Format or time.Parse with 2006-02-01\n\nThe timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)\nformat. Internationally, \"yyyy-dd-mm\" does not occur in common calendar date\nstandards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"unmarshal\"",
+ Doc: "report passing non-pointer or non-interface values to unmarshal\n\nThe unmarshal analysis reports calls to functions such as json.Unmarshal\nin which the argument type is not a pointer or an interface.",
+ Default: "true",
+ },
+ {
+ Name: "\"unreachable\"",
+ Doc: "check for unreachable code\n\nThe unreachable analyzer finds statements that execution can never reach\nbecause they are preceded by an return statement, a call to panic, an\ninfinite loop, or similar constructs.",
+ Default: "true",
+ },
+ {
+ Name: "\"unsafeptr\"",
+ Doc: "check for invalid conversions of uintptr to unsafe.Pointer\n\nThe unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer\nto convert integers to pointers. A conversion from uintptr to\nunsafe.Pointer is invalid if it implies that there is a uintptr-typed\nword in memory that holds a pointer value, because that word will be\ninvisible to stack copying and to the garbage collector.",
+ Default: "true",
+ },
+ {
+ Name: "\"unusedparams\"",
+ Doc: "check for unused parameters of functions\n\nThe unusedparams analyzer checks functions to see if there are\nany parameters that are not being used.\n\nTo reduce false positives it ignores:\n- methods\n- parameters that do not have a name or are underscored\n- functions in test files\n- functions with empty bodies or those with just a return stmt",
+ Default: "false",
+ },
+ {
+ Name: "\"unusedresult\"",
+ Doc: "check for unused results of calls to some functions\n\nSome functions like fmt.Errorf return a result and have no side effects,\nso it is always a mistake to discard the result. This analyzer reports\ncalls to certain functions in which the result of the call is ignored.\n\nThe set of functions may be controlled using flags.",
+ Default: "true",
+ },
+ {
+ Name: "\"unusedwrite\"",
+ Doc: "checks for unused writes\n\nThe analyzer reports instances of writes to struct fields and\narrays that are never read. Specifically, when a struct object\nor an array is copied, its elements are copied implicitly by\nthe compiler, and any element write to this copy does nothing\nwith the original object.\n\nFor example:\n\n\ttype T struct { x int }\n\tfunc f(input []T) {\n\t\tfor i, v := range input { // v is a copy\n\t\t\tv.x = i // unused write to field x\n\t\t}\n\t}\n\nAnother example is about non-pointer receiver:\n\n\ttype T struct { x int }\n\tfunc (t T) f() { // t is a copy\n\t\tt.x = i // unused write to field x\n\t}\n",
+ Default: "false",
+ },
+ {
+ Name: "\"useany\"",
+ Doc: "check for constraints that could be simplified to \"any\"",
+ Default: "false",
+ },
+ {
+ Name: "\"fillreturns\"",
+ Doc: "suggest fixes for errors due to an incorrect number of return values\n\nThis checker provides suggested fixes for type errors of the\ntype \"wrong number of return values (want %d, got %d)\". For example:\n\tfunc m() (int, string, *bool, error) {\n\t\treturn\n\t}\nwill turn into\n\tfunc m() (int, string, *bool, error) {\n\t\treturn 0, \"\", nil, nil\n\t}\n\nThis functionality is similar to https://github.com/sqs/goreturns.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"nonewvars\"",
+ Doc: "suggested fixes for \"no new vars on left side of :=\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"no new vars on left side of :=\". For example:\n\tz := 1\n\tz := 2\nwill turn into\n\tz := 1\n\tz = 2\n",
+ Default: "true",
+ },
+ {
+ Name: "\"noresultvalues\"",
+ Doc: "suggested fixes for unexpected return values\n\nThis checker provides suggested fixes for type errors of the\ntype \"no result values expected\" or \"too many return values\".\nFor example:\n\tfunc z() { return nil }\nwill turn into\n\tfunc z() { return }\n",
+ Default: "true",
+ },
+ {
+ Name: "\"undeclaredname\"",
+ Doc: "suggested fixes for \"undeclared name: <>\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"undeclared name: <>\". It will either insert a new statement,\nsuch as:\n\n\"<> := \"\n\nor a new function declaration, such as:\n\nfunc <>(inferred parameters) {\n\tpanic(\"implement me!\")\n}\n",
+ Default: "true",
+ },
+ {
+ Name: "\"unusedvariable\"",
+ Doc: "check for unused variables\n\nThe unusedvariable analyzer suggests fixes for unused variables errors.\n",
+ Default: "false",
+ },
+ {
+ Name: "\"fillstruct\"",
+ Doc: "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"stubmethods\"",
+ Doc: "stub methods analyzer\n\nThis analyzer generates method stubs for concrete types\nin order to implement a target interface",
+ Default: "true",
+ },
+ },
+ },
+ Default: "{}",
+ Hierarchy: "ui.diagnostic",
+ },
+ {
+ Name: "staticcheck",
+ Type: "bool",
+ Doc: "staticcheck enables additional analyses from staticcheck.io.\nThese analyses are documented on\n[Staticcheck's website](https://staticcheck.io/docs/checks/).\n",
+ Default: "false",
+ Status: "experimental",
+ Hierarchy: "ui.diagnostic",
+ },
+ {
+ Name: "annotations",
+ Type: "map[string]bool",
+ Doc: "annotations specifies the various kinds of optimization diagnostics\nthat should be reported by the gc_details command.\n",
+ EnumKeys: EnumKeys{
+ ValueType: "bool",
+ Keys: []EnumKey{
+ {
+ Name: "\"bounds\"",
+ Doc: "`\"bounds\"` controls bounds checking diagnostics.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"escape\"",
+ Doc: "`\"escape\"` controls diagnostics about escape choices.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"inline\"",
+ Doc: "`\"inline\"` controls diagnostics about inlining choices.\n",
+ Default: "true",
+ },
+ {
+ Name: "\"nil\"",
+ Doc: "`\"nil\"` controls nil checks.\n",
+ Default: "true",
+ },
+ },
+ },
+ Default: "{\"bounds\":true,\"escape\":true,\"inline\":true,\"nil\":true}",
+ Status: "experimental",
+ Hierarchy: "ui.diagnostic",
+ },
+ {
+ Name: "vulncheck",
+ Type: "enum",
+ Doc: "vulncheck enables vulnerability scanning.\n",
+ EnumValues: []EnumValue{
+ {
+ Value: "\"Imports\"",
+ Doc: "`\"Imports\"`: In Imports mode, `gopls` will report vulnerabilities that affect packages\ndirectly and indirectly used by the analyzed main module.\n",
+ },
+ {
+ Value: "\"Off\"",
+ Doc: "`\"Off\"`: Disable vulnerability analysis.\n",
+ },
+ },
+ Default: "\"Off\"",
+ Status: "experimental",
+ Hierarchy: "ui.diagnostic",
+ },
+ {
+ Name: "diagnosticsDelay",
+ Type: "time.Duration",
+ Doc: "diagnosticsDelay controls the amount of time that gopls waits\nafter the most recent file modification before computing deep diagnostics.\nSimple diagnostics (parsing and type-checking) are always run immediately\non recently modified packages.\n\nThis option must be set to a valid duration string, for example `\"250ms\"`.\n",
+ Default: "\"250ms\"",
+ Status: "advanced",
+ Hierarchy: "ui.diagnostic",
+ },
+ {
+ Name: "hints",
+ Type: "map[string]bool",
+ Doc: "hints specify inlay hints that users want to see. A full list of hints\nthat gopls uses can be found in\n[inlayHints.md](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md).\n",
+ EnumKeys: EnumKeys{Keys: []EnumKey{
+ {
+ Name: "\"assignVariableTypes\"",
+ Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```",
+ Default: "false",
+ },
+ {
+ Name: "\"compositeLiteralFields\"",
+ Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```",
+ Default: "false",
+ },
+ {
+ Name: "\"compositeLiteralTypes\"",
+ Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```",
+ Default: "false",
+ },
+ {
+ Name: "\"constantValues\"",
+ Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```",
+ Default: "false",
+ },
+ {
+ Name: "\"functionTypeParameters\"",
+ Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```",
+ Default: "false",
+ },
+ {
+ Name: "\"parameterNames\"",
+ Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```",
+ Default: "false",
+ },
+ {
+ Name: "\"rangeVariableTypes\"",
+ Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```",
+ Default: "false",
+ },
+ }},
+ Default: "{}",
+ Status: "experimental",
+ Hierarchy: "ui.inlayhint",
+ },
+ {
+ Name: "codelenses",
+ Type: "map[string]bool",
+ Doc: "codelenses overrides the enabled/disabled state of code lenses. See the\n\"Code Lenses\" section of the\n[Settings page](https://github.com/golang/tools/blob/master/gopls/doc/settings.md#code-lenses)\nfor the list of supported lenses.\n\nExample Usage:\n\n```json5\n\"gopls\": {\n...\n \"codelenses\": {\n \"generate\": false, // Don't show the `go generate` lens.\n \"gc_details\": true // Show a code lens toggling the display of gc's choices.\n }\n...\n}\n```\n",
+ EnumKeys: EnumKeys{
+ ValueType: "bool",
+ Keys: []EnumKey{
+ {
+ Name: "\"gc_details\"",
+ Doc: "Toggle the calculation of gc annotations.",
+ Default: "false",
+ },
+ {
+ Name: "\"generate\"",
+ Doc: "Runs `go generate` for a given directory.",
+ Default: "true",
+ },
+ {
+ Name: "\"regenerate_cgo\"",
+ Doc: "Regenerates cgo definitions.",
+ Default: "true",
+ },
+ {
+ Name: "\"run_govulncheck\"",
+ Doc: "Run vulnerability check (`govulncheck`).",
+ Default: "false",
+ },
+ {
+ Name: "\"test\"",
+ Doc: "Runs `go test` for a specific set of test or benchmark functions.",
+ Default: "false",
+ },
+ {
+ Name: "\"tidy\"",
+ Doc: "Runs `go mod tidy` for a module.",
+ Default: "true",
+ },
+ {
+ Name: "\"upgrade_dependency\"",
+ Doc: "Upgrades a dependency in the go.mod file for a module.",
+ Default: "true",
+ },
+ {
+ Name: "\"vendor\"",
+ Doc: "Runs `go mod vendor` for a module.",
+ Default: "true",
+ },
+ },
+ },
+ Default: "{\"gc_details\":false,\"generate\":true,\"regenerate_cgo\":true,\"tidy\":true,\"upgrade_dependency\":true,\"vendor\":true}",
+ Hierarchy: "ui",
+ },
+ {
+ Name: "semanticTokens",
+ Type: "bool",
+ Doc: "semanticTokens controls whether the LSP server will send\nsemantic tokens to the client.\n",
+ Default: "false",
+ Status: "experimental",
+ Hierarchy: "ui",
+ },
+ {
+ Name: "noSemanticString",
+ Type: "bool",
+ Doc: "noSemanticString turns off the sending of the semantic token 'string'\n",
+ Default: "false",
+ Status: "experimental",
+ Hierarchy: "ui",
+ },
+ {
+ Name: "noSemanticNumber",
+ Type: "bool",
+ Doc: "noSemanticNumber turns off the sending of the semantic token 'number'\n",
+ Default: "false",
+ Status: "experimental",
+ Hierarchy: "ui",
+ },
+ {
+ Name: "local",
+ Type: "string",
+ Doc: "local is the equivalent of the `goimports -local` flag, which puts\nimports beginning with this string after third-party packages. It should\nbe the prefix of the import path whose imports should be grouped\nseparately.\n",
+ Default: "\"\"",
+ Hierarchy: "formatting",
+ },
+ {
+ Name: "gofumpt",
+ Type: "bool",
+ Doc: "gofumpt indicates if we should run gofumpt formatting.\n",
+ Default: "false",
+ Hierarchy: "formatting",
+ },
+ {
+ Name: "verboseOutput",
+ Type: "bool",
+ Doc: "verboseOutput enables additional debug logging.\n",
+ Default: "false",
+ Status: "debug",
+ },
+ },
+ },
+ Commands: []*CommandJSON{
+ {
+ Command: "gopls.add_dependency",
+ Title: "Add a dependency",
+ Doc: "Adds a dependency to the go.mod file for a module.",
+ ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// Additional args to pass to the go command.\n\t\"GoCmdArgs\": []string,\n\t// Whether to add a require directive.\n\t\"AddRequire\": bool,\n}",
+ },
+ {
+ Command: "gopls.add_import",
+ Title: "Add an import",
+ Doc: "Ask the server to add an import path to a given Go file. The method will\ncall applyEdit on the client so that clients don't have to apply the edit\nthemselves.",
+ ArgDoc: "{\n\t// ImportPath is the target import path that should\n\t// be added to the URI file\n\t\"ImportPath\": string,\n\t// URI is the file that the ImportPath should be\n\t// added to\n\t\"URI\": string,\n}",
+ },
+ {
+ Command: "gopls.apply_fix",
+ Title: "Apply a fix",
+ Doc: "Applies a fix to a region of source code.",
+ ArgDoc: "{\n\t// The fix to apply.\n\t\"Fix\": string,\n\t// The file URI for the document to fix.\n\t\"URI\": string,\n\t// The document range to scan for fixes.\n\t\"Range\": {\n\t\t\"start\": {\n\t\t\t\"line\": uint32,\n\t\t\t\"character\": uint32,\n\t\t},\n\t\t\"end\": {\n\t\t\t\"line\": uint32,\n\t\t\t\"character\": uint32,\n\t\t},\n\t},\n}",
+ },
+ {
+ Command: "gopls.check_upgrades",
+ Title: "Check for upgrades",
+ Doc: "Checks for module upgrades.",
+ ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The modules to check.\n\t\"Modules\": []string,\n}",
+ },
+ {
+ Command: "gopls.edit_go_directive",
+ Title: "Run go mod edit -go=version",
+ Doc: "Runs `go mod edit -go=version` for a module.",
+ ArgDoc: "{\n\t// Any document URI within the relevant module.\n\t\"URI\": string,\n\t// The version to pass to `go mod edit -go`.\n\t\"Version\": string,\n}",
+ },
+ {
+ Command: "gopls.fetch_vulncheck_result",
+ Title: "Get known vulncheck result",
+ Doc: "Fetch the result of latest vulnerability check (`govulncheck`).",
+ ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+ ResultDoc: "map[golang.org/x/tools/gopls/internal/lsp/protocol.DocumentURI]*golang.org/x/tools/gopls/internal/govulncheck.Result",
+ },
+ {
+ Command: "gopls.gc_details",
+ Title: "Toggle gc_details",
+ Doc: "Toggle the calculation of gc annotations.",
+ ArgDoc: "string",
+ },
+ {
+ Command: "gopls.generate",
+ Title: "Run go generate",
+ Doc: "Runs `go generate` for a given directory.",
+ ArgDoc: "{\n\t// URI for the directory to generate.\n\t\"Dir\": string,\n\t// Whether to generate recursively (go generate ./...)\n\t\"Recursive\": bool,\n}",
+ },
+ {
+ Command: "gopls.go_get_package",
+ Title: "go get a package",
+ Doc: "Runs `go get` to fetch a package.",
+ ArgDoc: "{\n\t// Any document URI within the relevant module.\n\t\"URI\": string,\n\t// The package to go get.\n\t\"Pkg\": string,\n\t\"AddRequire\": bool,\n}",
+ },
+ {
+ Command: "gopls.list_imports",
+ Title: "List imports of a file and its package",
+ Doc: "Retrieve a list of imports in the given Go file, and the package it\nbelongs to.",
+ ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+ ResultDoc: "{\n\t// Imports is a list of imports in the requested file.\n\t\"Imports\": []{\n\t\t\"Path\": string,\n\t\t\"Name\": string,\n\t},\n\t// PackageImports is a list of all imports in the requested file's package.\n\t\"PackageImports\": []{\n\t\t\"Path\": string,\n\t},\n}",
+ },
+ {
+ Command: "gopls.list_known_packages",
+ Title: "List known packages",
+ Doc: "Retrieve a list of packages that are importable from the given URI.",
+ ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+ ResultDoc: "{\n\t// Packages is a list of packages relative\n\t// to the URIArg passed by the command request.\n\t// In other words, it omits paths that are already\n\t// imported or cannot be imported due to compiler\n\t// restrictions.\n\t\"Packages\": []string,\n}",
+ },
+ {
+ Command: "gopls.mem_stats",
+ Title: "fetch memory statistics",
+ Doc: "Call runtime.GC multiple times and return memory statistics as reported by\nruntime.MemStats.\n\nThis command is used for benchmarking, and may change in the future.",
+ ResultDoc: "{\n\t\"HeapAlloc\": uint64,\n\t\"HeapInUse\": uint64,\n}",
+ },
+ {
+ Command: "gopls.regenerate_cgo",
+ Title: "Regenerate cgo",
+ Doc: "Regenerates cgo definitions.",
+ ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+ },
+ {
+ Command: "gopls.remove_dependency",
+ Title: "Remove a dependency",
+ Doc: "Removes a dependency from the go.mod file of a module.",
+ ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The module path to remove.\n\t\"ModulePath\": string,\n\t\"OnlyDiagnostic\": bool,\n}",
+ },
+ {
+ Command: "gopls.reset_go_mod_diagnostics",
+ Title: "Reset go.mod diagnostics",
+ Doc: "Reset diagnostics in the go.mod file of a module.",
+ ArgDoc: "{\n\t\"URIArg\": {\n\t\t\"URI\": string,\n\t},\n\t// Optional: source of the diagnostics to reset.\n\t// If not set, all resettable go.mod diagnostics will be cleared.\n\t\"DiagnosticSource\": string,\n}",
+ },
+ {
+ Command: "gopls.run_govulncheck",
+ Title: "Run govulncheck.",
+ Doc: "Run vulnerability check (`govulncheck`).",
+ ArgDoc: "{\n\t// Any document in the directory from which govulncheck will run.\n\t\"URI\": string,\n\t// Package pattern. E.g. \"\", \".\", \"./...\".\n\t\"Pattern\": string,\n}",
+ ResultDoc: "{\n\t// Token holds the progress token for LSP workDone reporting of the vulncheck\n\t// invocation.\n\t\"Token\": interface{},\n}",
+ },
+ {
+ Command: "gopls.run_tests",
+ Title: "Run test(s)",
+ Doc: "Runs `go test` for a specific set of test or benchmark functions.",
+ ArgDoc: "{\n\t// The test file containing the tests to run.\n\t\"URI\": string,\n\t// Specific test names to run, e.g. TestFoo.\n\t\"Tests\": []string,\n\t// Specific benchmarks to run, e.g. BenchmarkFoo.\n\t\"Benchmarks\": []string,\n}",
+ },
+ {
+ Command: "gopls.start_debugging",
+ Title: "Start the gopls debug server",
+ Doc: "Start the gopls debug server if it isn't running, and return the debug\naddress.",
+ ArgDoc: "{\n\t// Optional: the address (including port) for the debug server to listen on.\n\t// If not provided, the debug server will bind to \"localhost:0\", and the\n\t// full debug URL will be contained in the result.\n\t// \n\t// If there is more than one gopls instance along the serving path (i.e. you\n\t// are using a daemon), each gopls instance will attempt to start debugging.\n\t// If Addr specifies a port, only the daemon will be able to bind to that\n\t// port, and each intermediate gopls instance will fail to start debugging.\n\t// For this reason it is recommended not to specify a port (or equivalently,\n\t// to specify \":0\").\n\t// \n\t// If the server was already debugging this field has no effect, and the\n\t// result will contain the previously configured debug URL(s).\n\t\"Addr\": string,\n}",
+ ResultDoc: "{\n\t// The URLs to use to access the debug servers, for all gopls instances in\n\t// the serving path. For the common case of a single gopls instance (i.e. no\n\t// daemon), this will be exactly one address.\n\t// \n\t// In the case of one or more gopls instances forwarding the LSP to a daemon,\n\t// URLs will contain debug addresses for each server in the serving path, in\n\t// serving order. The daemon debug address will be the last entry in the\n\t// slice. If any intermediate gopls instance fails to start debugging, no\n\t// error will be returned but the debug URL for that server in the URLs slice\n\t// will be empty.\n\t\"URLs\": []string,\n}",
+ },
+ {
+ Command: "gopls.test",
+ Title: "Run test(s) (legacy)",
+ Doc: "Runs `go test` for a specific set of test or benchmark functions.",
+ ArgDoc: "string,\n[]string,\n[]string",
+ },
+ {
+ Command: "gopls.tidy",
+ Title: "Run go mod tidy",
+ Doc: "Runs `go mod tidy` for a module.",
+ ArgDoc: "{\n\t// The file URIs.\n\t\"URIs\": []string,\n}",
+ },
+ {
+ Command: "gopls.toggle_gc_details",
+ Title: "Toggle gc_details",
+ Doc: "Toggle the calculation of gc annotations.",
+ ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+ },
+ {
+ Command: "gopls.update_go_sum",
+ Title: "Update go.sum",
+ Doc: "Updates the go.sum file for a module.",
+ ArgDoc: "{\n\t// The file URIs.\n\t\"URIs\": []string,\n}",
+ },
+ {
+ Command: "gopls.upgrade_dependency",
+ Title: "Upgrade a dependency",
+ Doc: "Upgrades a dependency in the go.mod file for a module.",
+ ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// Additional args to pass to the go command.\n\t\"GoCmdArgs\": []string,\n\t// Whether to add a require directive.\n\t\"AddRequire\": bool,\n}",
+ },
+ {
+ Command: "gopls.vendor",
+ Title: "Run go mod vendor",
+ Doc: "Runs `go mod vendor` for a module.",
+ ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+ },
+ },
+ Lenses: []*LensJSON{
+ {
+ Lens: "gc_details",
+ Title: "Toggle gc_details",
+ Doc: "Toggle the calculation of gc annotations.",
+ },
+ {
+ Lens: "generate",
+ Title: "Run go generate",
+ Doc: "Runs `go generate` for a given directory.",
+ },
+ {
+ Lens: "regenerate_cgo",
+ Title: "Regenerate cgo",
+ Doc: "Regenerates cgo definitions.",
+ },
+ {
+ Lens: "run_govulncheck",
+ Title: "Run govulncheck.",
+ Doc: "Run vulnerability check (`govulncheck`).",
+ },
+ {
+ Lens: "test",
+ Title: "Run test(s) (legacy)",
+ Doc: "Runs `go test` for a specific set of test or benchmark functions.",
+ },
+ {
+ Lens: "tidy",
+ Title: "Run go mod tidy",
+ Doc: "Runs `go mod tidy` for a module.",
+ },
+ {
+ Lens: "upgrade_dependency",
+ Title: "Upgrade a dependency",
+ Doc: "Upgrades a dependency in the go.mod file for a module.",
+ },
+ {
+ Lens: "vendor",
+ Title: "Run go mod vendor",
+ Doc: "Runs `go mod vendor` for a module.",
+ },
+ },
+ Analyzers: []*AnalyzerJSON{
+ {
+ Name: "asmdecl",
+ Doc: "report mismatches between assembly files and Go declarations",
+ Default: true,
+ },
+ {
+ Name: "assign",
+ Doc: "check for useless assignments\n\nThis checker reports assignments of the form x = x or a[i] = a[i].\nThese are almost always useless, and even when they aren't they are\nusually a mistake.",
+ Default: true,
+ },
+ {
+ Name: "atomic",
+ Doc: "check for common mistakes using the sync/atomic package\n\nThe atomic checker looks for assignment statements of the form:\n\n\tx = atomic.AddUint64(&x, 1)\n\nwhich are not atomic.",
+ Default: true,
+ },
+ {
+ Name: "atomicalign",
+ Doc: "check for non-64-bits-aligned arguments to sync/atomic functions",
+ Default: true,
+ },
+ {
+ Name: "bools",
+ Doc: "check for common mistakes involving boolean operators",
+ Default: true,
+ },
+ {
+ Name: "buildtag",
+ Doc: "check //go:build and // +build directives",
+ Default: true,
+ },
+ {
+ Name: "cgocall",
+ Doc: "detect some violations of the cgo pointer passing rules\n\nCheck for invalid cgo pointer passing.\nThis looks for code that uses cgo to call C code passing values\nwhose types are almost always invalid according to the cgo pointer\nsharing rules.\nSpecifically, it warns about attempts to pass a Go chan, map, func,\nor slice to C, either directly, or via a pointer, array, or struct.",
+ Default: true,
+ },
+ {
+ Name: "composites",
+ Doc: "check for unkeyed composite literals\n\nThis analyzer reports a diagnostic for composite literals of struct\ntypes imported from another package that do not use the field-keyed\nsyntax. Such literals are fragile because the addition of a new field\n(even if unexported) to the struct will cause compilation to fail.\n\nAs an example,\n\n\terr = &net.DNSConfigError{err}\n\nshould be replaced by:\n\n\terr = &net.DNSConfigError{Err: err}\n",
+ Default: true,
+ },
+ {
+ Name: "copylocks",
+ Doc: "check for locks erroneously passed by value\n\nInadvertently copying a value containing a lock, such as sync.Mutex or\nsync.WaitGroup, may cause both copies to malfunction. Generally such\nvalues should be referred to through a pointer.",
+ Default: true,
+ },
+ {
+ Name: "deepequalerrors",
+ Doc: "check for calls of reflect.DeepEqual on error values\n\nThe deepequalerrors checker looks for calls of the form:\n\n reflect.DeepEqual(err1, err2)\n\nwhere err1 and err2 are errors. Using reflect.DeepEqual to compare\nerrors is discouraged.",
+ Default: true,
+ },
+ {
+ Name: "directive",
+ Doc: "check Go toolchain directives such as //go:debug\n\nThis analyzer checks for problems with known Go toolchain directives\nin all Go source files in a package directory, even those excluded by\n//go:build constraints, and all non-Go source files too.\n\nFor //go:debug (see https://go.dev/doc/godebug), the analyzer checks\nthat the directives are placed only in Go source files, only above the\npackage comment, and only in package main or *_test.go files.\n\nSupport for other known directives may be added in the future.\n\nThis analyzer does not check //go:build, which is handled by the\nbuildtag analyzer.\n",
+ Default: true,
+ },
+ {
+ Name: "embed",
+ Doc: "check for //go:embed directive import\n\nThis analyzer checks that the embed package is imported when source code contains //go:embed comment directives.\nThe embed package must be imported for //go:embed directives to function.import _ \"embed\".",
+ Default: true,
+ },
+ {
+ Name: "errorsas",
+ Doc: "report passing non-pointer or non-error values to errors.As\n\nThe errorsas analysis reports calls to errors.As where the type\nof the second argument is not a pointer to a type implementing error.",
+ Default: true,
+ },
+ {
+ Name: "fieldalignment",
+ Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the most compact order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n\nBe aware that the most compact order is not always the most efficient.\nIn rare cases it may cause two variables each updated by its own goroutine\nto occupy the same CPU cache line, inducing a form of memory contention\nknown as \"false sharing\" that slows down both goroutines.\n",
+ },
+ {
+ Name: "httpresponse",
+ Doc: "check for mistakes using HTTP responses\n\nA common mistake when using the net/http package is to defer a function\ncall to close the http.Response Body before checking the error that\ndetermines whether the response is valid:\n\n\tresp, err := http.Head(url)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// (defer statement belongs here)\n\nThis checker helps uncover latent nil dereference bugs by reporting a\ndiagnostic for such mistakes.",
+ Default: true,
+ },
+ {
+ Name: "ifaceassert",
+ Doc: "detect impossible interface-to-interface type assertions\n\nThis checker flags type assertions v.(T) and corresponding type-switch cases\nin which the static type V of v is an interface that cannot possibly implement\nthe target interface T. This occurs when V and T contain methods with the same\nname but different signatures. Example:\n\n\tvar v interface {\n\t\tRead()\n\t}\n\t_ = v.(io.Reader)\n\nThe Read method in v has a different signature than the Read method in\nio.Reader, so this assertion cannot succeed.\n",
+ Default: true,
+ },
+ {
+ Name: "infertypeargs",
+ Doc: "check for unnecessary type arguments in call expressions\n\nExplicit type arguments may be omitted from call expressions if they can be\ninferred from function arguments, or from other type arguments:\n\n\tfunc f[T any](T) {}\n\t\n\tfunc _() {\n\t\tf[string](\"foo\") // string could be inferred\n\t}\n",
+ Default: true,
+ },
+ {
+ Name: "loopclosure",
+ Doc: "check references to loop variables from within nested functions\n\nThis analyzer reports places where a function literal references the\niteration variable of an enclosing loop, and the loop calls the function\nin such a way (e.g. with go or defer) that it may outlive the loop\niteration and possibly observe the wrong value of the variable.\n\nIn this example, all the deferred functions run after the loop has\ncompleted, so all observe the final value of v.\n\n for _, v := range list {\n defer func() {\n use(v) // incorrect\n }()\n }\n\nOne fix is to create a new variable for each iteration of the loop:\n\n for _, v := range list {\n v := v // new var per iteration\n defer func() {\n use(v) // ok\n }()\n }\n\nThe next example uses a go statement and has a similar problem.\nIn addition, it has a data race because the loop updates v\nconcurrent with the goroutines accessing it.\n\n for _, v := range elem {\n go func() {\n use(v) // incorrect, and a data race\n }()\n }\n\nA fix is the same as before. The checker also reports problems\nin goroutines started by golang.org/x/sync/errgroup.Group.\nA hard-to-spot variant of this form is common in parallel tests:\n\n func Test(t *testing.T) {\n for _, test := range tests {\n t.Run(test.name, func(t *testing.T) {\n t.Parallel()\n use(test) // incorrect, and a data race\n })\n }\n }\n\nThe t.Parallel() call causes the rest of the function to execute\nconcurrent with the loop.\n\nThe analyzer reports references only in the last statement,\nas it is not deep enough to understand the effects of subsequent\nstatements that might render the reference benign.\n(\"Last statement\" is defined recursively in compound\nstatements such as if, switch, and select.)\n\nSee: https://golang.org/doc/go_faq.html#closures_and_goroutines",
+ Default: true,
+ },
+ {
+ Name: "lostcancel",
+ Doc: "check cancel func returned by context.WithCancel is called\n\nThe cancellation function returned by context.WithCancel, WithTimeout,\nand WithDeadline must be called or the new context will remain live\nuntil its parent context is cancelled.\n(The background context is never cancelled.)",
+ Default: true,
+ },
+ {
+ Name: "nilfunc",
+ Doc: "check for useless comparisons between functions and nil\n\nA useless comparison is one like f == nil as opposed to f() == nil.",
+ Default: true,
+ },
+ {
+ Name: "nilness",
+ Doc: "check for redundant or impossible nil comparisons\n\nThe nilness checker inspects the control-flow graph of each function in\na package and reports nil pointer dereferences, degenerate nil\npointers, and panics with nil values. A degenerate comparison is of the form\nx==nil or x!=nil where x is statically known to be nil or non-nil. These are\noften a mistake, especially in control flow related to errors. Panics with nil\nvalues are checked because they are not detectable by\n\n\tif r := recover(); r != nil {\n\nThis check reports conditions such as:\n\n\tif f == nil { // impossible condition (f is a function)\n\t}\n\nand:\n\n\tp := &v\n\t...\n\tif p != nil { // tautological condition\n\t}\n\nand:\n\n\tif p == nil {\n\t\tprint(*p) // nil dereference\n\t}\n\nand:\n\n\tif p == nil {\n\t\tpanic(p)\n\t}\n",
+ },
+ {
+ Name: "printf",
+ Doc: "check consistency of Printf format strings and arguments\n\nThe check applies to known functions (for example, those in package fmt)\nas well as any detected wrappers of known functions.\n\nA function that wants to avail itself of printf checking but is not\nfound by this analyzer's heuristics (for example, due to use of\ndynamic calls) can insert a bogus call:\n\n\tif false {\n\t\t_ = fmt.Sprintf(format, args...) // enable printf checking\n\t}\n\nThe -funcs flag specifies a comma-separated list of names of additional\nknown formatting functions or methods. If the name contains a period,\nit must denote a specific function using one of the following forms:\n\n\tdir/pkg.Function\n\tdir/pkg.Type.Method\n\t(*dir/pkg.Type).Method\n\nOtherwise the name is interpreted as a case-insensitive unqualified\nidentifier such as \"errorf\". Either way, if a listed name ends in f, the\nfunction is assumed to be Printf-like, taking a format string before the\nargument list. Otherwise it is assumed to be Print-like, taking a list\nof arguments with no format string.\n",
+ Default: true,
+ },
+ {
+ Name: "shadow",
+ Doc: "check for possible unintended shadowing of variables\n\nThis analyzer check for shadowed variables.\nA shadowed variable is a variable declared in an inner scope\nwith the same name and type as a variable in an outer scope,\nand where the outer variable is mentioned after the inner one\nis declared.\n\n(This definition can be refined; the module generates too many\nfalse positives and is not yet enabled by default.)\n\nFor example:\n\n\tfunc BadRead(f *os.File, buf []byte) error {\n\t\tvar err error\n\t\tfor {\n\t\t\tn, err := f.Read(buf) // shadows the function variable 'err'\n\t\t\tif err != nil {\n\t\t\t\tbreak // causes return of wrong value\n\t\t\t}\n\t\t\tfoo(buf)\n\t\t}\n\t\treturn err\n\t}\n",
+ },
+ {
+ Name: "shift",
+ Doc: "check for shifts that equal or exceed the width of the integer",
+ Default: true,
+ },
+ {
+ Name: "simplifycompositelit",
+ Doc: "check for composite literal simplifications\n\nAn array, slice, or map composite literal of the form:\n\t[]T{T{}, T{}}\nwill be simplified to:\n\t[]T{{}, {}}\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
+ Default: true,
+ },
+ {
+ Name: "simplifyrange",
+ Doc: "check for range statement simplifications\n\nA range of the form:\n\tfor x, _ = range v {...}\nwill be simplified to:\n\tfor x = range v {...}\n\nA range of the form:\n\tfor _ = range v {...}\nwill be simplified to:\n\tfor range v {...}\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
+ Default: true,
+ },
+ {
+ Name: "simplifyslice",
+ Doc: "check for slice simplifications\n\nA slice expression of the form:\n\ts[a:len(s)]\nwill be simplified to:\n\ts[a:]\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
+ Default: true,
+ },
+ {
+ Name: "sortslice",
+ Doc: "check the argument type of sort.Slice\n\nsort.Slice requires an argument of a slice type. Check that\nthe interface{} value passed to sort.Slice is actually a slice.",
+ Default: true,
+ },
+ {
+ Name: "stdmethods",
+ Doc: "check signature of methods of well-known interfaces\n\nSometimes a type may be intended to satisfy an interface but may fail to\ndo so because of a mistake in its method signature.\nFor example, the result of this WriteTo method should be (int64, error),\nnot error, to satisfy io.WriterTo:\n\n\ttype myWriterTo struct{...}\n func (myWriterTo) WriteTo(w io.Writer) error { ... }\n\nThis check ensures that each method whose name matches one of several\nwell-known interface methods from the standard library has the correct\nsignature for that interface.\n\nChecked method names include:\n\tFormat GobEncode GobDecode MarshalJSON MarshalXML\n\tPeek ReadByte ReadFrom ReadRune Scan Seek\n\tUnmarshalJSON UnreadByte UnreadRune WriteByte\n\tWriteTo\n",
+ Default: true,
+ },
+ {
+ Name: "stringintconv",
+ Doc: "check for string(int) conversions\n\nThis checker flags conversions of the form string(x) where x is an integer\n(but not byte or rune) type. Such conversions are discouraged because they\nreturn the UTF-8 representation of the Unicode code point x, and not a decimal\nstring representation of x as one might expect. Furthermore, if x denotes an\ninvalid code point, the conversion cannot be statically rejected.\n\nFor conversions that intend on using the code point, consider replacing them\nwith string(rune(x)). Otherwise, strconv.Itoa and its equivalents return the\nstring representation of the value in the desired base.\n",
+ Default: true,
+ },
+ {
+ Name: "structtag",
+ Doc: "check that struct field tags conform to reflect.StructTag.Get\n\nAlso report certain struct tags (json, xml) used with unexported fields.",
+ Default: true,
+ },
+ {
+ Name: "testinggoroutine",
+ Doc: "report calls to (*testing.T).Fatal from goroutines started by a test.\n\nFunctions that abruptly terminate a test, such as the Fatal, Fatalf, FailNow, and\nSkip{,f,Now} methods of *testing.T, must be called from the test goroutine itself.\nThis checker detects calls to these functions that occur within a goroutine\nstarted by the test. For example:\n\nfunc TestFoo(t *testing.T) {\n go func() {\n t.Fatal(\"oops\") // error: (*T).Fatal called from non-test goroutine\n }()\n}\n",
+ Default: true,
+ },
+ {
+ Name: "tests",
+ Doc: "check for common mistaken usages of tests and examples\n\nThe tests checker walks Test, Benchmark and Example functions checking\nmalformed names, wrong signatures and examples documenting non-existent\nidentifiers.\n\nPlease see the documentation for package testing in golang.org/pkg/testing\nfor the conventions that are enforced for Tests, Benchmarks, and Examples.",
+ Default: true,
+ },
+ {
+ Name: "timeformat",
+ Doc: "check for calls of (time.Time).Format or time.Parse with 2006-02-01\n\nThe timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)\nformat. Internationally, \"yyyy-dd-mm\" does not occur in common calendar date\nstandards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.\n",
+ Default: true,
+ },
+ {
+ Name: "unmarshal",
+ Doc: "report passing non-pointer or non-interface values to unmarshal\n\nThe unmarshal analysis reports calls to functions such as json.Unmarshal\nin which the argument type is not a pointer or an interface.",
+ Default: true,
+ },
+ {
+ Name: "unreachable",
+ Doc: "check for unreachable code\n\nThe unreachable analyzer finds statements that execution can never reach\nbecause they are preceded by an return statement, a call to panic, an\ninfinite loop, or similar constructs.",
+ Default: true,
+ },
+ {
+ Name: "unsafeptr",
+ Doc: "check for invalid conversions of uintptr to unsafe.Pointer\n\nThe unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer\nto convert integers to pointers. A conversion from uintptr to\nunsafe.Pointer is invalid if it implies that there is a uintptr-typed\nword in memory that holds a pointer value, because that word will be\ninvisible to stack copying and to the garbage collector.",
+ Default: true,
+ },
+ {
+ Name: "unusedparams",
+ Doc: "check for unused parameters of functions\n\nThe unusedparams analyzer checks functions to see if there are\nany parameters that are not being used.\n\nTo reduce false positives it ignores:\n- methods\n- parameters that do not have a name or are underscored\n- functions in test files\n- functions with empty bodies or those with just a return stmt",
+ },
+ {
+ Name: "unusedresult",
+ Doc: "check for unused results of calls to some functions\n\nSome functions like fmt.Errorf return a result and have no side effects,\nso it is always a mistake to discard the result. This analyzer reports\ncalls to certain functions in which the result of the call is ignored.\n\nThe set of functions may be controlled using flags.",
+ Default: true,
+ },
+ {
+ Name: "unusedwrite",
+ Doc: "checks for unused writes\n\nThe analyzer reports instances of writes to struct fields and\narrays that are never read. Specifically, when a struct object\nor an array is copied, its elements are copied implicitly by\nthe compiler, and any element write to this copy does nothing\nwith the original object.\n\nFor example:\n\n\ttype T struct { x int }\n\tfunc f(input []T) {\n\t\tfor i, v := range input { // v is a copy\n\t\t\tv.x = i // unused write to field x\n\t\t}\n\t}\n\nAnother example is about non-pointer receiver:\n\n\ttype T struct { x int }\n\tfunc (t T) f() { // t is a copy\n\t\tt.x = i // unused write to field x\n\t}\n",
+ },
+ {
+ Name: "useany",
+ Doc: "check for constraints that could be simplified to \"any\"",
+ },
+ {
+ Name: "fillreturns",
+ Doc: "suggest fixes for errors due to an incorrect number of return values\n\nThis checker provides suggested fixes for type errors of the\ntype \"wrong number of return values (want %d, got %d)\". For example:\n\tfunc m() (int, string, *bool, error) {\n\t\treturn\n\t}\nwill turn into\n\tfunc m() (int, string, *bool, error) {\n\t\treturn 0, \"\", nil, nil\n\t}\n\nThis functionality is similar to https://github.com/sqs/goreturns.\n",
+ Default: true,
+ },
+ {
+ Name: "nonewvars",
+ Doc: "suggested fixes for \"no new vars on left side of :=\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"no new vars on left side of :=\". For example:\n\tz := 1\n\tz := 2\nwill turn into\n\tz := 1\n\tz = 2\n",
+ Default: true,
+ },
+ {
+ Name: "noresultvalues",
+ Doc: "suggested fixes for unexpected return values\n\nThis checker provides suggested fixes for type errors of the\ntype \"no result values expected\" or \"too many return values\".\nFor example:\n\tfunc z() { return nil }\nwill turn into\n\tfunc z() { return }\n",
+ Default: true,
+ },
+ {
+ Name: "undeclaredname",
+ Doc: "suggested fixes for \"undeclared name: <>\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"undeclared name: <>\". It will either insert a new statement,\nsuch as:\n\n\"<> := \"\n\nor a new function declaration, such as:\n\nfunc <>(inferred parameters) {\n\tpanic(\"implement me!\")\n}\n",
+ Default: true,
+ },
+ {
+ Name: "unusedvariable",
+ Doc: "check for unused variables\n\nThe unusedvariable analyzer suggests fixes for unused variables errors.\n",
+ },
+ {
+ Name: "fillstruct",
+ Doc: "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n",
+ Default: true,
+ },
+ {
+ Name: "stubmethods",
+ Doc: "stub methods analyzer\n\nThis analyzer generates method stubs for concrete types\nin order to implement a target interface",
+ Default: true,
+ },
+ },
+ Hints: []*HintJSON{
+ {
+ Name: "assignVariableTypes",
+ Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```",
+ },
+ {
+ Name: "compositeLiteralFields",
+ Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```",
+ },
+ {
+ Name: "compositeLiteralTypes",
+ Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```",
+ },
+ {
+ Name: "constantValues",
+ Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```",
+ },
+ {
+ Name: "functionTypeParameters",
+ Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```",
+ },
+ {
+ Name: "parameterNames",
+ Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```",
+ },
+ {
+ Name: "rangeVariableTypes",
+ Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```",
+ },
+ },
+}
diff --git a/gopls/internal/lsp/source/call_hierarchy.go b/gopls/internal/lsp/source/call_hierarchy.go
new file mode 100644
index 000000000..2bdf7df40
--- /dev/null
+++ b/gopls/internal/lsp/source/call_hierarchy.go
@@ -0,0 +1,311 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "path/filepath"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+// PrepareCallHierarchy returns an array of CallHierarchyItem for a file and the position within the file.
+func PrepareCallHierarchy(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position) ([]protocol.CallHierarchyItem, error) {
+ ctx, done := event.Start(ctx, "source.PrepareCallHierarchy")
+ defer done()
+
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, err
+ }
+ pos, err := pgf.PositionPos(pp)
+ if err != nil {
+ return nil, err
+ }
+
+ _, obj, _ := referencedObject(pkg, pgf, pos)
+ if obj == nil {
+ return nil, nil
+ }
+
+ if _, ok := obj.Type().Underlying().(*types.Signature); !ok {
+ return nil, nil
+ }
+
+ declLoc, err := mapPosition(ctx, pkg.FileSet(), snapshot, obj.Pos(), adjustedObjEnd(obj))
+ if err != nil {
+ return nil, err
+ }
+ rng := declLoc.Range
+
+ callHierarchyItem := protocol.CallHierarchyItem{
+ Name: obj.Name(),
+ Kind: protocol.Function,
+ Tags: []protocol.SymbolTag{},
+ Detail: fmt.Sprintf("%s • %s", obj.Pkg().Path(), filepath.Base(declLoc.URI.SpanURI().Filename())),
+ URI: declLoc.URI,
+ Range: rng,
+ SelectionRange: rng,
+ }
+ return []protocol.CallHierarchyItem{callHierarchyItem}, nil
+}
+
+// IncomingCalls returns an array of CallHierarchyIncomingCall for a file and the position within the file.
+func IncomingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.CallHierarchyIncomingCall, error) {
+ ctx, done := event.Start(ctx, "source.IncomingCalls")
+ defer done()
+
+ refs, err := references(ctx, snapshot, fh, pos, false)
+ if err != nil {
+ if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ // Group references by their enclosing function declaration.
+ incomingCalls := make(map[protocol.Location]*protocol.CallHierarchyIncomingCall)
+ for _, ref := range refs {
+ callItem, err := enclosingNodeCallItem(ctx, snapshot, ref.pkgPath, ref.location)
+ if err != nil {
+ event.Error(ctx, "error getting enclosing node", err, tag.Method.Of(string(ref.pkgPath)))
+ continue
+ }
+ loc := protocol.Location{
+ URI: callItem.URI,
+ Range: callItem.Range,
+ }
+ call, ok := incomingCalls[loc]
+ if !ok {
+ call = &protocol.CallHierarchyIncomingCall{From: callItem}
+ incomingCalls[loc] = call
+ }
+ call.FromRanges = append(call.FromRanges, ref.location.Range)
+ }
+
+ // Flatten the map of pointers into a slice of values.
+ incomingCallItems := make([]protocol.CallHierarchyIncomingCall, 0, len(incomingCalls))
+ for _, callItem := range incomingCalls {
+ incomingCallItems = append(incomingCallItems, *callItem)
+ }
+ return incomingCallItems, nil
+}
+
+// enclosingNodeCallItem creates a CallHierarchyItem representing the function call at loc.
+func enclosingNodeCallItem(ctx context.Context, snapshot Snapshot, pkgPath PackagePath, loc protocol.Location) (protocol.CallHierarchyItem, error) {
+ // Parse the file containing the reference.
+ fh, err := snapshot.GetFile(ctx, loc.URI.SpanURI())
+ if err != nil {
+ return protocol.CallHierarchyItem{}, err
+ }
+ // TODO(adonovan): opt: before parsing, trim the bodies of functions
+ // that don't contain the reference, using either a scanner-based
+ // implementation such as https://go.dev/play/p/KUrObH1YkX8
+ // (~31% speedup), or a byte-oriented implementation (2x speedup).
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return protocol.CallHierarchyItem{}, err
+ }
+ start, end, err := pgf.RangePos(loc.Range)
+ if err != nil {
+ return protocol.CallHierarchyItem{}, err
+ }
+
+ // Find the enclosing function, if any, and the number of func literals in between.
+ var funcDecl *ast.FuncDecl
+ var funcLit *ast.FuncLit // innermost function literal
+ var litCount int
+ path, _ := astutil.PathEnclosingInterval(pgf.File, start, end)
+outer:
+ for _, node := range path {
+ switch n := node.(type) {
+ case *ast.FuncDecl:
+ funcDecl = n
+ break outer
+ case *ast.FuncLit:
+ litCount++
+ if litCount > 1 {
+ continue
+ }
+ funcLit = n
+ }
+ }
+
+ nameIdent := path[len(path)-1].(*ast.File).Name
+ kind := protocol.Package
+ if funcDecl != nil {
+ nameIdent = funcDecl.Name
+ kind = protocol.Function
+ }
+
+ nameStart, nameEnd := nameIdent.Pos(), nameIdent.End()
+ if funcLit != nil {
+ nameStart, nameEnd = funcLit.Type.Func, funcLit.Type.Params.Pos()
+ kind = protocol.Function
+ }
+ rng, err := pgf.PosRange(nameStart, nameEnd)
+ if err != nil {
+ return protocol.CallHierarchyItem{}, err
+ }
+
+ name := nameIdent.Name
+ for i := 0; i < litCount; i++ {
+ name += ".func()"
+ }
+
+ return protocol.CallHierarchyItem{
+ Name: name,
+ Kind: kind,
+ Tags: []protocol.SymbolTag{},
+ Detail: fmt.Sprintf("%s • %s", pkgPath, filepath.Base(fh.URI().Filename())),
+ URI: loc.URI,
+ Range: rng,
+ SelectionRange: rng,
+ }, nil
+}
+
+// OutgoingCalls returns an array of CallHierarchyOutgoingCall for a file and the position within the file.
+func OutgoingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position) ([]protocol.CallHierarchyOutgoingCall, error) {
+ ctx, done := event.Start(ctx, "source.OutgoingCalls")
+ defer done()
+
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, err
+ }
+ pos, err := pgf.PositionPos(pp)
+ if err != nil {
+ return nil, err
+ }
+
+ _, obj, _ := referencedObject(pkg, pgf, pos)
+ if obj == nil {
+ return nil, nil
+ }
+
+ if _, ok := obj.Type().Underlying().(*types.Signature); !ok {
+ return nil, nil
+ }
+
+ // Skip builtins.
+ if obj.Pkg() == nil {
+ return nil, nil
+ }
+
+ if !obj.Pos().IsValid() {
+ return nil, bug.Errorf("internal error: object %s.%s missing position", obj.Pkg().Path(), obj.Name())
+ }
+
+ declFile := pkg.FileSet().File(obj.Pos())
+ if declFile == nil {
+ return nil, bug.Errorf("file not found for %d", obj.Pos())
+ }
+
+ uri := span.URIFromPath(declFile.Name())
+ offset, err := safetoken.Offset(declFile, obj.Pos())
+ if err != nil {
+ return nil, err
+ }
+
+ // Use TypecheckFull as we want to inspect the body of the function declaration.
+ declPkg, declPGF, err := PackageForFile(ctx, snapshot, uri, NarrowestPackage)
+ if err != nil {
+ return nil, err
+ }
+
+ declPos, err := safetoken.Pos(declPGF.Tok, offset)
+ if err != nil {
+ return nil, err
+ }
+
+ declNode, _, _ := findDeclInfo([]*ast.File{declPGF.File}, declPos)
+ if declNode == nil {
+ // TODO(rfindley): why don't we return an error here, or even bug.Errorf?
+ return nil, nil
+ // return nil, bug.Errorf("failed to find declaration for object %s.%s", obj.Pkg().Path(), obj.Name())
+ }
+
+ type callRange struct {
+ start, end token.Pos
+ }
+ callRanges := []callRange{}
+ ast.Inspect(declNode, func(n ast.Node) bool {
+ if call, ok := n.(*ast.CallExpr); ok {
+ var start, end token.Pos
+ switch n := call.Fun.(type) {
+ case *ast.SelectorExpr:
+ start, end = n.Sel.NamePos, call.Lparen
+ case *ast.Ident:
+ start, end = n.NamePos, call.Lparen
+ case *ast.FuncLit:
+ // while we don't add the function literal as an 'outgoing' call
+ // we still want to traverse into it
+ return true
+ default:
+ // ignore any other kind of call expressions
+ // for ex: direct function literal calls since that's not an 'outgoing' call
+ return false
+ }
+ callRanges = append(callRanges, callRange{start: start, end: end})
+ }
+ return true
+ })
+
+ outgoingCalls := map[token.Pos]*protocol.CallHierarchyOutgoingCall{}
+ for _, callRange := range callRanges {
+ _, obj, _ := referencedObject(declPkg, declPGF, callRange.start)
+ if obj == nil {
+ continue
+ }
+
+ // ignore calls to builtin functions
+ if obj.Pkg() == nil {
+ continue
+ }
+
+ outgoingCall, ok := outgoingCalls[obj.Pos()]
+ if !ok {
+ loc, err := mapPosition(ctx, declPkg.FileSet(), snapshot, obj.Pos(), obj.Pos()+token.Pos(len(obj.Name())))
+ if err != nil {
+ return nil, err
+ }
+ outgoingCall = &protocol.CallHierarchyOutgoingCall{
+ To: protocol.CallHierarchyItem{
+ Name: obj.Name(),
+ Kind: protocol.Function,
+ Tags: []protocol.SymbolTag{},
+ Detail: fmt.Sprintf("%s • %s", obj.Pkg().Path(), filepath.Base(loc.URI.SpanURI().Filename())),
+ URI: loc.URI,
+ Range: loc.Range,
+ SelectionRange: loc.Range,
+ },
+ }
+ outgoingCalls[obj.Pos()] = outgoingCall
+ }
+
+ rng, err := declPGF.PosRange(callRange.start, callRange.end)
+ if err != nil {
+ return nil, err
+ }
+ outgoingCall.FromRanges = append(outgoingCall.FromRanges, rng)
+ }
+
+ outgoingCallItems := make([]protocol.CallHierarchyOutgoingCall, 0, len(outgoingCalls))
+ for _, callItem := range outgoingCalls {
+ outgoingCallItems = append(outgoingCallItems, *callItem)
+ }
+ return outgoingCallItems, nil
+}
diff --git a/gopls/internal/lsp/source/code_lens.go b/gopls/internal/lsp/source/code_lens.go
new file mode 100644
index 000000000..ef1c3aa54
--- /dev/null
+++ b/gopls/internal/lsp/source/code_lens.go
@@ -0,0 +1,248 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+type LensFunc func(context.Context, Snapshot, FileHandle) ([]protocol.CodeLens, error)
+
+// LensFuncs returns the supported lensFuncs for Go files.
+func LensFuncs() map[command.Command]LensFunc {
+ return map[command.Command]LensFunc{
+ command.Generate: goGenerateCodeLens,
+ command.Test: runTestCodeLens,
+ command.RegenerateCgo: regenerateCgoLens,
+ command.GCDetails: toggleDetailsCodeLens,
+ }
+}
+
+var (
+ testRe = regexp.MustCompile("^Test[^a-z]")
+ benchmarkRe = regexp.MustCompile("^Benchmark[^a-z]")
+)
+
+func runTestCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) {
+ codeLens := make([]protocol.CodeLens, 0)
+
+ fns, err := TestsAndBenchmarks(ctx, snapshot, fh)
+ if err != nil {
+ return nil, err
+ }
+ puri := protocol.URIFromSpanURI(fh.URI())
+ for _, fn := range fns.Tests {
+ cmd, err := command.NewTestCommand("run test", puri, []string{fn.Name}, nil)
+ if err != nil {
+ return nil, err
+ }
+ rng := protocol.Range{Start: fn.Rng.Start, End: fn.Rng.Start}
+ codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: &cmd})
+ }
+
+ for _, fn := range fns.Benchmarks {
+ cmd, err := command.NewTestCommand("run benchmark", puri, nil, []string{fn.Name})
+ if err != nil {
+ return nil, err
+ }
+ rng := protocol.Range{Start: fn.Rng.Start, End: fn.Rng.Start}
+ codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: &cmd})
+ }
+
+ if len(fns.Benchmarks) > 0 {
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, err
+ }
+ // add a code lens to the top of the file which runs all benchmarks in the file
+ rng, err := pgf.PosRange(pgf.File.Package, pgf.File.Package)
+ if err != nil {
+ return nil, err
+ }
+ var benches []string
+ for _, fn := range fns.Benchmarks {
+ benches = append(benches, fn.Name)
+ }
+ cmd, err := command.NewTestCommand("run file benchmarks", puri, nil, benches)
+ if err != nil {
+ return nil, err
+ }
+ codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: &cmd})
+ }
+ return codeLens, nil
+}
+
+type testFn struct {
+ Name string
+ Rng protocol.Range
+}
+
+type testFns struct {
+ Tests []testFn
+ Benchmarks []testFn
+}
+
+func TestsAndBenchmarks(ctx context.Context, snapshot Snapshot, fh FileHandle) (testFns, error) {
+ var out testFns
+
+ if !strings.HasSuffix(fh.URI().Filename(), "_test.go") {
+ return out, nil
+ }
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return out, err
+ }
+
+ for _, d := range pgf.File.Decls {
+ fn, ok := d.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+
+ rng, err := pgf.NodeRange(fn)
+ if err != nil {
+ return out, err
+ }
+
+ if matchTestFunc(fn, pkg, testRe, "T") {
+ out.Tests = append(out.Tests, testFn{fn.Name.Name, rng})
+ }
+
+ if matchTestFunc(fn, pkg, benchmarkRe, "B") {
+ out.Benchmarks = append(out.Benchmarks, testFn{fn.Name.Name, rng})
+ }
+ }
+
+ return out, nil
+}
+
+func matchTestFunc(fn *ast.FuncDecl, pkg Package, nameRe *regexp.Regexp, paramID string) bool {
+ // Make sure that the function name matches a test function.
+ if !nameRe.MatchString(fn.Name.Name) {
+ return false
+ }
+ info := pkg.GetTypesInfo()
+ if info == nil {
+ return false
+ }
+ obj := info.ObjectOf(fn.Name)
+ if obj == nil {
+ return false
+ }
+ sig, ok := obj.Type().(*types.Signature)
+ if !ok {
+ return false
+ }
+ // Test functions should have only one parameter.
+ if sig.Params().Len() != 1 {
+ return false
+ }
+
+ // Check the type of the only parameter
+ paramTyp, ok := sig.Params().At(0).Type().(*types.Pointer)
+ if !ok {
+ return false
+ }
+ named, ok := paramTyp.Elem().(*types.Named)
+ if !ok {
+ return false
+ }
+ namedObj := named.Obj()
+ if namedObj.Pkg().Path() != "testing" {
+ return false
+ }
+ return namedObj.Id() == paramID
+}
+
+func goGenerateCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) {
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, err
+ }
+ const ggDirective = "//go:generate"
+ for _, c := range pgf.File.Comments {
+ for _, l := range c.List {
+ if !strings.HasPrefix(l.Text, ggDirective) {
+ continue
+ }
+ rng, err := pgf.PosRange(l.Pos(), l.Pos()+token.Pos(len(ggDirective)))
+ if err != nil {
+ return nil, err
+ }
+ dir := protocol.URIFromSpanURI(span.URIFromPath(filepath.Dir(fh.URI().Filename())))
+ nonRecursiveCmd, err := command.NewGenerateCommand("run go generate", command.GenerateArgs{Dir: dir, Recursive: false})
+ if err != nil {
+ return nil, err
+ }
+ recursiveCmd, err := command.NewGenerateCommand("run go generate ./...", command.GenerateArgs{Dir: dir, Recursive: true})
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.CodeLens{
+ {Range: rng, Command: &recursiveCmd},
+ {Range: rng, Command: &nonRecursiveCmd},
+ }, nil
+
+ }
+ }
+ return nil, nil
+}
+
+func regenerateCgoLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) {
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, err
+ }
+ var c *ast.ImportSpec
+ for _, imp := range pgf.File.Imports {
+ if imp.Path.Value == `"C"` {
+ c = imp
+ }
+ }
+ if c == nil {
+ return nil, nil
+ }
+ rng, err := pgf.NodeRange(c)
+ if err != nil {
+ return nil, err
+ }
+ puri := protocol.URIFromSpanURI(fh.URI())
+ cmd, err := command.NewRegenerateCgoCommand("regenerate cgo definitions", command.URIArg{URI: puri})
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.CodeLens{{Range: rng, Command: &cmd}}, nil
+}
+
+func toggleDetailsCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) {
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, err
+ }
+ if !pgf.File.Package.IsValid() {
+ // Without a package name we have nowhere to put the codelens, so give up.
+ return nil, nil
+ }
+ rng, err := pgf.PosRange(pgf.File.Package, pgf.File.Package)
+ if err != nil {
+ return nil, err
+ }
+ puri := protocol.URIFromSpanURI(fh.URI())
+ cmd, err := command.NewGCDetailsCommand("Toggle gc annotation details", puri)
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.CodeLens{{Range: rng, Command: &cmd}}, nil
+}
diff --git a/gopls/internal/lsp/source/comment.go b/gopls/internal/lsp/source/comment.go
new file mode 100644
index 000000000..beed328ae
--- /dev/null
+++ b/gopls/internal/lsp/source/comment.go
@@ -0,0 +1,384 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package source
+
+import (
+ "bytes"
+ "io"
+ "regexp"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// CommentToMarkdown converts comment text to formatted markdown.
+// The comment was prepared by DocReader,
+// so it is known not to have leading, trailing blank lines
+// nor to have trailing spaces at the end of lines.
+// The comment markers have already been removed.
+//
+// Each line is converted into a markdown line and empty lines are just converted to
+// newlines. Heading are prefixed with `### ` to make it a markdown heading.
+//
+// A span of indented lines retains a 4 space prefix block, with the common indent
+// prefix removed unless empty, in which case it will be converted to a newline.
+//
+// URLs in the comment text are converted into links.
+func CommentToMarkdown(text string, _ *Options) string {
+ buf := &bytes.Buffer{}
+ commentToMarkdown(buf, text)
+ return buf.String()
+}
+
+var (
+ mdNewline = []byte("\n")
+ mdHeader = []byte("### ")
+ mdIndent = []byte(" ")
+ mdLinkStart = []byte("[")
+ mdLinkDiv = []byte("](")
+ mdLinkEnd = []byte(")")
+)
+
+func commentToMarkdown(w io.Writer, text string) {
+ blocks := blocks(text)
+ for i, b := range blocks {
+ switch b.op {
+ case opPara:
+ for _, line := range b.lines {
+ emphasize(w, line, true)
+ }
+ case opHead:
+ // The header block can consist of only one line.
+ // However, check the number of lines, just in case.
+ if len(b.lines) == 0 {
+ // Skip this block.
+ continue
+ }
+ header := b.lines[0]
+
+ w.Write(mdHeader)
+ commentEscape(w, header, true)
+ // Header doesn't end with \n unlike the lines of other blocks.
+ w.Write(mdNewline)
+ case opPre:
+ for _, line := range b.lines {
+ if isBlank(line) {
+ w.Write(mdNewline)
+ continue
+ }
+ w.Write(mdIndent)
+ w.Write([]byte(line))
+ }
+ }
+
+ if i < len(blocks)-1 {
+ w.Write(mdNewline)
+ }
+ }
+}
+
+const (
+ ulquo = "“"
+ urquo = "”"
+)
+
+var (
+ markdownEscape = regexp.MustCompile(`([\\\x60*{}[\]()#+\-.!_>~|"$%&'\/:;<=?@^])`)
+
+ unicodeQuoteReplacer = strings.NewReplacer("``", ulquo, "''", urquo)
+)
+
+// commentEscape escapes comment text for markdown. If nice is set,
+// also turn double ` and ' into “ and ”.
+func commentEscape(w io.Writer, text string, nice bool) {
+ if nice {
+ text = convertQuotes(text)
+ }
+ text = escapeRegex(text)
+ w.Write([]byte(text))
+}
+
+func convertQuotes(text string) string {
+ return unicodeQuoteReplacer.Replace(text)
+}
+
+func escapeRegex(text string) string {
+ return markdownEscape.ReplaceAllString(text, `\$1`)
+}
+
+func emphasize(w io.Writer, line string, nice bool) {
+ for {
+ m := matchRx.FindStringSubmatchIndex(line)
+ if m == nil {
+ break
+ }
+ // m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx)
+
+ // write text before match
+ commentEscape(w, line[0:m[0]], nice)
+
+ // adjust match for URLs
+ match := line[m[0]:m[1]]
+ if strings.Contains(match, "://") {
+ m0, m1 := m[0], m[1]
+ for _, s := range []string{"()", "{}", "[]"} {
+ open, close := s[:1], s[1:] // E.g., "(" and ")"
+ // require opening parentheses before closing parentheses (#22285)
+ if i := strings.Index(match, close); i >= 0 && i < strings.Index(match, open) {
+ m1 = m0 + i
+ match = line[m0:m1]
+ }
+ // require balanced pairs of parentheses (#5043)
+ for i := 0; strings.Count(match, open) != strings.Count(match, close) && i < 10; i++ {
+ m1 = strings.LastIndexAny(line[:m1], s)
+ match = line[m0:m1]
+ }
+ }
+ if m1 != m[1] {
+ // redo matching with shortened line for correct indices
+ m = matchRx.FindStringSubmatchIndex(line[:m[0]+len(match)])
+ }
+ }
+
+ // Following code has been modified from go/doc since words is always
+ // nil. All html formatting has also been transformed into markdown formatting
+
+ // analyze match
+ url := ""
+ if m[2] >= 0 {
+ url = match
+ }
+
+ // write match
+ if len(url) > 0 {
+ w.Write(mdLinkStart)
+ }
+
+ commentEscape(w, match, nice)
+
+ if len(url) > 0 {
+ w.Write(mdLinkDiv)
+ w.Write([]byte(urlReplacer.Replace(url)))
+ w.Write(mdLinkEnd)
+ }
+
+ // advance
+ line = line[m[1]:]
+ }
+ commentEscape(w, line, nice)
+}
+
+// Everything from here on is a copy of go/doc/comment.go
+
+const (
+ // Regexp for Go identifiers
+ identRx = `[\pL_][\pL_0-9]*`
+
+ // Regexp for URLs
+ // Match parens, and check later for balance - see #5043, #22285
+ // Match .,:;?! within path, but not at end - see #18139, #16565
+ // This excludes some rare yet valid urls ending in common punctuation
+ // in order to allow sentences ending in URLs.
+
+ // protocol (required) e.g. http
+ protoPart = `(https?|ftp|file|gopher|mailto|nntp)`
+ // host (required) e.g. www.example.com or [::1]:8080
+ hostPart = `([a-zA-Z0-9_@\-.\[\]:]+)`
+ // path+query+fragment (optional) e.g. /path/index.html?q=foo#bar
+ pathPart = `([.,:;?!]*[a-zA-Z0-9$'()*+&#=@~_/\-\[\]%])*`
+
+ urlRx = protoPart + `://` + hostPart + pathPart
+)
+
+var (
+ matchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`)
+ urlReplacer = strings.NewReplacer(`(`, `\(`, `)`, `\)`)
+)
+
+func indentLen(s string) int {
+ i := 0
+ for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
+ i++
+ }
+ return i
+}
+
+func isBlank(s string) bool {
+ return len(s) == 0 || (len(s) == 1 && s[0] == '\n')
+}
+
+func commonPrefix(a, b string) string {
+ i := 0
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return a[0:i]
+}
+
+func unindent(block []string) {
+ if len(block) == 0 {
+ return
+ }
+
+ // compute maximum common white prefix
+ prefix := block[0][0:indentLen(block[0])]
+ for _, line := range block {
+ if !isBlank(line) {
+ prefix = commonPrefix(prefix, line)
+ }
+ }
+ n := len(prefix)
+
+ // remove
+ for i, line := range block {
+ if !isBlank(line) {
+ block[i] = line[n:]
+ }
+ }
+}
+
+// heading returns the trimmed line if it passes as a section heading;
+// otherwise it returns the empty string.
+func heading(line string) string {
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ return ""
+ }
+
+ // a heading must start with an uppercase letter
+ r, _ := utf8.DecodeRuneInString(line)
+ if !unicode.IsLetter(r) || !unicode.IsUpper(r) {
+ return ""
+ }
+
+ // it must end in a letter or digit:
+ r, _ = utf8.DecodeLastRuneInString(line)
+ if !unicode.IsLetter(r) && !unicode.IsDigit(r) {
+ return ""
+ }
+
+ // exclude lines with illegal characters. we allow "(),"
+ if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") {
+ return ""
+ }
+
+ // allow "'" for possessive "'s" only
+ for b := line; ; {
+ i := strings.IndexRune(b, '\'')
+ if i < 0 {
+ break
+ }
+ if i+1 >= len(b) || b[i+1] != 's' || (i+2 < len(b) && b[i+2] != ' ') {
+ return "" // not followed by "s "
+ }
+ b = b[i+2:]
+ }
+
+ // allow "." when followed by non-space
+ for b := line; ; {
+ i := strings.IndexRune(b, '.')
+ if i < 0 {
+ break
+ }
+ if i+1 >= len(b) || b[i+1] == ' ' {
+ return "" // not followed by non-space
+ }
+ b = b[i+1:]
+ }
+
+ return line
+}
+
+type op int
+
+const (
+ opPara op = iota
+ opHead
+ opPre
+)
+
+type block struct {
+ op op
+ lines []string
+}
+
+func blocks(text string) []block {
+ var (
+ out []block
+ para []string
+
+ lastWasBlank = false
+ lastWasHeading = false
+ )
+
+ close := func() {
+ if para != nil {
+ out = append(out, block{opPara, para})
+ para = nil
+ }
+ }
+
+ lines := strings.SplitAfter(text, "\n")
+ unindent(lines)
+ for i := 0; i < len(lines); {
+ line := lines[i]
+ if isBlank(line) {
+ // close paragraph
+ close()
+ i++
+ lastWasBlank = true
+ continue
+ }
+ if indentLen(line) > 0 {
+ // close paragraph
+ close()
+
+ // count indented or blank lines
+ j := i + 1
+ for j < len(lines) && (isBlank(lines[j]) || indentLen(lines[j]) > 0) {
+ j++
+ }
+ // but not trailing blank lines
+ for j > i && isBlank(lines[j-1]) {
+ j--
+ }
+ pre := lines[i:j]
+ i = j
+
+ unindent(pre)
+
+ // put those lines in a pre block
+ out = append(out, block{opPre, pre})
+ lastWasHeading = false
+ continue
+ }
+
+ if lastWasBlank && !lastWasHeading && i+2 < len(lines) &&
+ isBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {
+ // current line is non-blank, surrounded by blank lines
+ // and the next non-blank line is not indented: this
+ // might be a heading.
+ if head := heading(line); head != "" {
+ close()
+ out = append(out, block{opHead, []string{head}})
+ i += 2
+ lastWasHeading = true
+ continue
+ }
+ }
+
+ // open paragraph
+ lastWasBlank = false
+ lastWasHeading = false
+ para = append(para, lines[i])
+ i++
+ }
+ close()
+
+ return out
+}
diff --git a/gopls/internal/lsp/source/comment_go118_test.go b/gopls/internal/lsp/source/comment_go118_test.go
new file mode 100644
index 000000000..60bd14b9f
--- /dev/null
+++ b/gopls/internal/lsp/source/comment_go118_test.go
@@ -0,0 +1,371 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package source
+
+import (
+ "bytes"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+// This file is a copy of go/doc/comment_test.go with the exception for
+// the test cases for TestEmphasize and TestCommentEscape
+
+var headingTests = []struct {
+ line string
+ ok bool
+}{
+ {"Section", true},
+ {"A typical usage", true},
+ {"ΔΛΞ is Greek", true},
+ {"Foo 42", true},
+ {"", false},
+ {"section", false},
+ {"A typical usage:", false},
+ {"This code:", false},
+ {"δ is Greek", false},
+ {"Foo §", false},
+ {"Fermat's Last Sentence", true},
+ {"Fermat's", true},
+ {"'sX", false},
+ {"Ted 'Too' Bar", false},
+ {"Use n+m", false},
+ {"Scanning:", false},
+ {"N:M", false},
+}
+
+func TestIsHeading(t *testing.T) {
+ for _, tt := range headingTests {
+ if h := heading(tt.line); (len(h) > 0) != tt.ok {
+ t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok)
+ }
+ }
+}
+
+var blocksTests = []struct {
+ in string
+ out []block
+ text string
+}{
+ {
+ in: `Para 1.
+Para 1 line 2.
+
+Para 2.
+
+Section
+
+Para 3.
+
+ pre
+ pre1
+
+Para 4.
+
+ pre
+ pre1
+
+ pre2
+
+Para 5.
+
+
+ pre
+
+
+ pre1
+ pre2
+
+Para 6.
+ pre
+ pre2
+`,
+ out: []block{
+ {opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}},
+ {opPara, []string{"Para 2.\n"}},
+ {opHead, []string{"Section"}},
+ {opPara, []string{"Para 3.\n"}},
+ {opPre, []string{"pre\n", "pre1\n"}},
+ {opPara, []string{"Para 4.\n"}},
+ {opPre, []string{"pre\n", "pre1\n", "\n", "pre2\n"}},
+ {opPara, []string{"Para 5.\n"}},
+ {opPre, []string{"pre\n", "\n", "\n", "pre1\n", "pre2\n"}},
+ {opPara, []string{"Para 6.\n"}},
+ {opPre, []string{"pre\n", "pre2\n"}},
+ },
+ text: `. Para 1. Para 1 line 2.
+
+. Para 2.
+
+
+. Section
+
+. Para 3.
+
+$ pre
+$ pre1
+
+. Para 4.
+
+$ pre
+$ pre1
+
+$ pre2
+
+. Para 5.
+
+$ pre
+
+
+$ pre1
+$ pre2
+
+. Para 6.
+
+$ pre
+$ pre2
+`,
+ },
+ {
+ in: "Para.\n\tshould not be ``escaped''",
+ out: []block{
+ {opPara, []string{"Para.\n"}},
+ {opPre, []string{"should not be ``escaped''"}},
+ },
+ text: ". Para.\n\n$ should not be ``escaped''",
+ },
+ {
+ in: "// A very long line of 46 char for line wrapping.",
+ out: []block{
+ {opPara, []string{"// A very long line of 46 char for line wrapping."}},
+ },
+ text: `. // A very long line of 46 char for line
+. // wrapping.
+`,
+ },
+ {
+ in: `/* A very long line of 46 char for line wrapping.
+A very long line of 46 char for line wrapping. */`,
+ out: []block{
+ {opPara, []string{"/* A very long line of 46 char for line wrapping.\n", "A very long line of 46 char for line wrapping. */"}},
+ },
+ text: `. /* A very long line of 46 char for line
+. wrapping. A very long line of 46 char
+. for line wrapping. */
+`,
+ },
+}
+
+func TestBlocks(t *testing.T) {
+ for i, tt := range blocksTests {
+ b := blocks(tt.in)
+ if !reflect.DeepEqual(b, tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out)
+ }
+ }
+}
+
+// This has been modified from go/doc to use markdown links instead of html ones
+// and use markdown escaping instead oh html
+var emphasizeTests = []struct {
+ in, out string
+}{
+ {"", ""},
+ {"http://[::1]:8080/foo.txt", `[http\:\/\/\[\:\:1\]\:8080\/foo\.txt](http://[::1]:8080/foo.txt)`},
+ {"before (https://www.google.com) after", `before \([https\:\/\/www\.google\.com](https://www.google.com)\) after`},
+ {"before https://www.google.com:30/x/y/z:b::c. After", `before [https\:\/\/www\.google\.com\:30\/x\/y\/z\:b\:\:c](https://www.google.com:30/x/y/z:b::c)\. After`},
+ {"http://www.google.com/path/:;!-/?query=%34b#093124", `[http\:\/\/www\.google\.com\/path\/\:\;\!\-\/\?query\=\%34b\#093124](http://www.google.com/path/:;!-/?query=%34b#093124)`},
+ {"http://www.google.com/path/:;!-/?query=%34bar#093124", `[http\:\/\/www\.google\.com\/path\/\:\;\!\-\/\?query\=\%34bar\#093124](http://www.google.com/path/:;!-/?query=%34bar#093124)`},
+ {"http://www.google.com/index.html! After", `[http\:\/\/www\.google\.com\/index\.html](http://www.google.com/index.html)\! After`},
+ {"http://www.google.com/", `[http\:\/\/www\.google\.com\/](http://www.google.com/)`},
+ {"https://www.google.com/", `[https\:\/\/www\.google\.com\/](https://www.google.com/)`},
+ {"http://www.google.com/path.", `[http\:\/\/www\.google\.com\/path](http://www.google.com/path)\.`},
+ {"http://en.wikipedia.org/wiki/Camellia_(cipher)", `[http\:\/\/en\.wikipedia\.org\/wiki\/Camellia\_\(cipher\)](http://en.wikipedia.org/wiki/Camellia_\(cipher\))`},
+ {"(http://www.google.com/)", `\([http\:\/\/www\.google\.com\/](http://www.google.com/)\)`},
+ {"http://gmail.com)", `[http\:\/\/gmail\.com](http://gmail.com)\)`},
+ {"((http://gmail.com))", `\(\([http\:\/\/gmail\.com](http://gmail.com)\)\)`},
+ {"http://gmail.com ((http://gmail.com)) ()", `[http\:\/\/gmail\.com](http://gmail.com) \(\([http\:\/\/gmail\.com](http://gmail.com)\)\) \(\)`},
+ {"Foo bar http://example.com/ quux!", `Foo bar [http\:\/\/example\.com\/](http://example.com/) quux\!`},
+ {"Hello http://example.com/%2f/ /world.", `Hello [http\:\/\/example\.com\/\%2f\/](http://example.com/%2f/) \/world\.`},
+ {"Lorem http: ipsum //host/path", `Lorem http\: ipsum \/\/host\/path`},
+ {"javascript://is/not/linked", `javascript\:\/\/is\/not\/linked`},
+ {"http://foo", `[http\:\/\/foo](http://foo)`},
+ {"art by [[https://www.example.com/person/][Person Name]]", `art by \[\[[https\:\/\/www\.example\.com\/person\/](https://www.example.com/person/)\]\[Person Name\]\]`},
+ {"please visit (http://golang.org/)", `please visit \([http\:\/\/golang\.org\/](http://golang.org/)\)`},
+ {"please visit http://golang.org/hello())", `please visit [http\:\/\/golang\.org\/hello\(\)](http://golang.org/hello\(\))\)`},
+ {"http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD", `[http\:\/\/git\.qemu\.org\/\?p\=qemu\.git\;a\=blob\;f\=qapi\-schema\.json\;hb\=HEAD](http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD)`},
+ {"https://foo.bar/bal/x(])", `[https\:\/\/foo\.bar\/bal\/x\(](https://foo.bar/bal/x\()\]\)`},
+ {"foo [ http://bar(])", `foo \[ [http\:\/\/bar\(](http://bar\()\]\)`},
+}
+
+func TestEmphasize(t *testing.T) {
+ for i, tt := range emphasizeTests {
+ var buf bytes.Buffer
+ emphasize(&buf, tt.in, true)
+ out := buf.String()
+ if out != tt.out {
+ t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out)
+ }
+ }
+}
+
+func TestCommentEscape(t *testing.T) {
+ //ldquo -> ulquo and rdquo -> urquo
+ commentTests := []struct {
+ in, out string
+ }{
+ {"typically invoked as ``go tool asm'',", "typically invoked as " + ulquo + "go tool asm" + urquo + ","},
+ {"For more detail, run ``go help test'' and ``go help testflag''", "For more detail, run " + ulquo + "go help test" + urquo + " and " + ulquo + "go help testflag" + urquo}}
+ for i, tt := range commentTests {
+ var buf strings.Builder
+ commentEscape(&buf, tt.in, true)
+ out := buf.String()
+ if out != tt.out {
+ t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out)
+ }
+ }
+}
+
+func TestCommentToMarkdown(t *testing.T) {
+ tests := []struct {
+ in, out string
+ }{
+ {
+ in: "F declaration.\n",
+ out: "F declaration\\.\n",
+ },
+ {
+ in: `
+F declaration. Lorem ipsum dolor sit amet.
+Etiam mattis eros at orci mollis molestie.
+`,
+ out: `
+F declaration\. Lorem ipsum dolor sit amet\.
+Etiam mattis eros at orci mollis molestie\.
+`,
+ },
+ {
+ in: `
+F declaration.
+
+Lorem ipsum dolor sit amet.
+Sed id dui turpis.
+
+
+
+
+Aenean tempus velit non auctor eleifend.
+Aenean efficitur a sem id ultricies.
+
+
+Phasellus efficitur mauris et viverra bibendum.
+`,
+ out: `
+F declaration\.
+
+Lorem ipsum dolor sit amet\.
+Sed id dui turpis\.
+
+Aenean tempus velit non auctor eleifend\.
+Aenean efficitur a sem id ultricies\.
+
+Phasellus efficitur mauris et viverra bibendum\.
+`,
+ },
+ {
+ in: `
+F declaration.
+
+Aenean tempus velit non auctor eleifend.
+
+Section
+
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+
+ func foo() {}
+
+
+ func bar() {}
+
+Fusce lorem lacus.
+
+ func foo() {}
+
+ func bar() {}
+
+Maecenas in lobortis lectus.
+
+ func foo() {}
+
+ func bar() {}
+
+Phasellus efficitur mauris et viverra bibendum.
+`,
+ out: `
+F declaration\.
+
+Aenean tempus velit non auctor eleifend\.
+
+### Section
+
+Lorem ipsum dolor sit amet, consectetur adipiscing elit\.
+
+ func foo() {}
+
+
+ func bar() {}
+
+Fusce lorem lacus\.
+
+ func foo() {}
+
+ func bar() {}
+
+Maecenas in lobortis lectus\.
+
+ func foo() {}
+
+ func bar() {}
+
+Phasellus efficitur mauris et viverra bibendum\.
+`,
+ },
+ {
+ in: `
+F declaration.
+
+ func foo() {
+ fmt.Println("foo")
+ }
+ func bar() {
+ fmt.Println("bar")
+ }
+`,
+ out: `
+F declaration\.
+
+ func foo() {
+ fmt.Println("foo")
+ }
+ func bar() {
+ fmt.Println("bar")
+ }
+`,
+ },
+ }
+ for i, tt := range tests {
+ // Comments start with new lines for better readability. So, we should trim them.
+ tt.in = strings.TrimPrefix(tt.in, "\n")
+ tt.out = strings.TrimPrefix(tt.out, "\n")
+
+ if out := CommentToMarkdown(tt.in, nil); out != tt.out {
+ t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out)
+ }
+ }
+}
diff --git a/gopls/internal/lsp/source/comment_go119.go b/gopls/internal/lsp/source/comment_go119.go
new file mode 100644
index 000000000..c379a4a4f
--- /dev/null
+++ b/gopls/internal/lsp/source/comment_go119.go
@@ -0,0 +1,56 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package source
+
+// Starting with go1.19, the formatting of comments has changed, and there
+// is a new package (go/doc/comment) for processing them.
+// As long as gopls has to compile under earlier versions, tests
+// have to pass with both the old and new code, which produce
+// slightly different results. (cmd/test/definition.go, source/comment_test.go,
+// and source/source_test.go) Each of the test files checks the results
+// with a function, tests.CheckSameMarkdown, that accepts both the old and the new
+// results. (The old code escapes many characters the new code does not,
+// and the new code sometimes adds a blank line.)
+
+// When gopls no longer needs to compile with go1.18, the old comment.go should
+// be replaced by this file, the golden test files should be updated.
+// (and checkSameMarkdown() could be replaced by a simple comparison.)
+
+import (
+ "fmt"
+ "go/doc/comment"
+)
+
+// CommentToMarkdown converts comment text to formatted markdown.
+// The comment was prepared by DocReader,
+// so it is known not to have leading, trailing blank lines
+// nor to have trailing spaces at the end of lines.
+// The comment markers have already been removed.
+func CommentToMarkdown(text string, options *Options) string {
+ var p comment.Parser
+ doc := p.Parse(text)
+ var pr comment.Printer
+ // The default produces {#Hdr-...} tags for headings.
+ // vscode displays thems, which is undesirable.
+ // The godoc for comment.Printer says the tags
+ // avoid a security problem.
+ pr.HeadingID = func(*comment.Heading) string { return "" }
+ pr.DocLinkURL = func(link *comment.DocLink) string {
+ msg := fmt.Sprintf("https://%s/%s", options.LinkTarget, link.ImportPath)
+ if link.Name != "" {
+ msg += "#"
+ if link.Recv != "" {
+ msg += link.Recv + "."
+ }
+ msg += link.Name
+ }
+ return msg
+ }
+ easy := pr.Markdown(doc)
+ return string(easy)
+}
diff --git a/internal/lsp/source/completion/builtin.go b/gopls/internal/lsp/source/completion/builtin.go
index 39732d864..39732d864 100644
--- a/internal/lsp/source/completion/builtin.go
+++ b/gopls/internal/lsp/source/completion/builtin.go
diff --git a/gopls/internal/lsp/source/completion/completion.go b/gopls/internal/lsp/source/completion/completion.go
new file mode 100644
index 000000000..f8c7654f6
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/completion.go
@@ -0,0 +1,3252 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package completion provides core functionality for code completion in Go
+// editors and tools.
+package completion
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/snippet"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/fuzzy"
+ "golang.org/x/tools/internal/imports"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// A CompletionItem represents a possible completion suggested by the algorithm.
+type CompletionItem struct {
+
+ // Invariant: CompletionItem does not refer to syntax or types.
+
+ // Label is the primary text the user sees for this completion item.
+ Label string
+
+ // Detail is supplemental information to present to the user.
+ // This often contains the type or return type of the completion item.
+ Detail string
+
+ // InsertText is the text to insert if this item is selected.
+ // Any of the prefix that has already been typed is not trimmed.
+ // The insert text does not contain snippets.
+ InsertText string
+
+ Kind protocol.CompletionItemKind
+ Tags []protocol.CompletionItemTag
+ Deprecated bool // Deprecated, prefer Tags if available
+
+ // An optional array of additional TextEdits that are applied when
+ // selecting this completion.
+ //
+ // Additional text edits should be used to change text unrelated to the current cursor position
+ // (for example adding an import statement at the top of the file if the completion item will
+ // insert an unqualified type).
+ AdditionalTextEdits []protocol.TextEdit
+
+ // Depth is how many levels were searched to find this completion.
+ // For example when completing "foo<>", "fooBar" is depth 0, and
+ // "fooBar.Baz" is depth 1.
+ Depth int
+
+ // Score is the internal relevance score.
+ // A higher score indicates that this completion item is more relevant.
+ Score float64
+
+ // snippet is the LSP snippet for the completion item. The LSP
+ // specification contains details about LSP snippets. For example, a
+ // snippet for a function with the following signature:
+ //
+ // func foo(a, b, c int)
+ //
+ // would be:
+ //
+ // foo(${1:a int}, ${2: b int}, ${3: c int})
+ //
+ // If Placeholders is false in the CompletionOptions, the above
+ // snippet would instead be:
+ //
+ // foo(${1:})
+ snippet *snippet.Builder
+
+ // Documentation is the documentation for the completion item.
+ Documentation string
+
+ // isSlice reports whether the underlying type of the object
+ // from which this candidate was derived is a slice.
+ // (Used to complete append() calls.)
+ isSlice bool
+}
+
+// completionOptions holds completion specific configuration.
+type completionOptions struct {
+ unimported bool
+ documentation bool
+ fullDocumentation bool
+ placeholders bool
+ literal bool
+ snippets bool
+ postfix bool
+ matcher source.Matcher
+ budget time.Duration
+}
+
+// Snippet is a convenience returns the snippet if available, otherwise
+// the InsertText.
+// used for an item, depending on if the callee wants placeholders or not.
+func (i *CompletionItem) Snippet() string {
+ if i.snippet != nil {
+ return i.snippet.String()
+ }
+ return i.InsertText
+}
+
+// Scoring constants are used for weighting the relevance of different candidates.
+const (
+ // stdScore is the base score for all completion items.
+ stdScore float64 = 1.0
+
+ // highScore indicates a very relevant completion item.
+ highScore float64 = 10.0
+
+ // lowScore indicates an irrelevant or not useful completion item.
+ lowScore float64 = 0.01
+)
+
+// matcher matches a candidate's label against the user input. The
+// returned score reflects the quality of the match. A score of zero
+// indicates no match, and a score of one means a perfect match.
+type matcher interface {
+ Score(candidateLabel string) (score float32)
+}
+
+// prefixMatcher implements case sensitive prefix matching.
+type prefixMatcher string
+
+func (pm prefixMatcher) Score(candidateLabel string) float32 {
+ if strings.HasPrefix(candidateLabel, string(pm)) {
+ return 1
+ }
+ return -1
+}
+
+// insensitivePrefixMatcher implements case insensitive prefix matching.
+type insensitivePrefixMatcher string
+
+func (ipm insensitivePrefixMatcher) Score(candidateLabel string) float32 {
+ if strings.HasPrefix(strings.ToLower(candidateLabel), string(ipm)) {
+ return 1
+ }
+ return -1
+}
+
+// completer contains the necessary information for a single completion request.
+type completer struct {
+ snapshot source.Snapshot
+ pkg source.Package
+ qf types.Qualifier // for qualifying typed expressions
+ mq source.MetadataQualifier // for syntactic qualifying
+ opts *completionOptions
+
+ // completionContext contains information about the trigger for this
+ // completion request.
+ completionContext completionContext
+
+ // fh is a handle to the file associated with this completion request.
+ fh source.FileHandle
+
+ // filename is the name of the file associated with this completion request.
+ filename string
+
+ // file is the AST of the file associated with this completion request.
+ file *ast.File
+
+ // (tokFile, pos) is the position at which the request was triggered.
+ tokFile *token.File
+ pos token.Pos
+
+ // path is the path of AST nodes enclosing the position.
+ path []ast.Node
+
+ // seen is the map that ensures we do not return duplicate results.
+ seen map[types.Object]bool
+
+ // items is the list of completion items returned.
+ items []CompletionItem
+
+ // completionCallbacks is a list of callbacks to collect completions that
+ // require expensive operations. This includes operations where we search
+ // through the entire module cache.
+ completionCallbacks []func(opts *imports.Options) error
+
+ // surrounding describes the identifier surrounding the position.
+ surrounding *Selection
+
+ // inference contains information we've inferred about ideal
+ // candidates such as the candidate's type.
+ inference candidateInference
+
+ // enclosingFunc contains information about the function enclosing
+ // the position.
+ enclosingFunc *funcInfo
+
+ // enclosingCompositeLiteral contains information about the composite literal
+ // enclosing the position.
+ enclosingCompositeLiteral *compLitInfo
+
+ // deepState contains the current state of our deep completion search.
+ deepState deepCompletionState
+
+ // matcher matches the candidates against the surrounding prefix.
+ matcher matcher
+
+ // methodSetCache caches the types.NewMethodSet call, which is relatively
+ // expensive and can be called many times for the same type while searching
+ // for deep completions.
+ methodSetCache map[methodSetKey]*types.MethodSet
+
+ // mapper converts the positions in the file from which the completion originated.
+ mapper *protocol.Mapper
+
+ // startTime is when we started processing this completion request. It does
+ // not include any time the request spent in the queue.
+ startTime time.Time
+
+ // scopes contains all scopes defined by nodes in our path,
+ // including nil values for nodes that don't defined a scope. It
+ // also includes our package scope and the universal scope at the
+ // end.
+ scopes []*types.Scope
+}
+
+// funcInfo holds info about a function object.
+type funcInfo struct {
+ // sig is the function declaration enclosing the position.
+ sig *types.Signature
+
+ // body is the function's body.
+ body *ast.BlockStmt
+}
+
+type compLitInfo struct {
+ // cl is the *ast.CompositeLit enclosing the position.
+ cl *ast.CompositeLit
+
+ // clType is the type of cl.
+ clType types.Type
+
+ // kv is the *ast.KeyValueExpr enclosing the position, if any.
+ kv *ast.KeyValueExpr
+
+ // inKey is true if we are certain the position is in the key side
+ // of a key-value pair.
+ inKey bool
+
+ // maybeInFieldName is true if inKey is false and it is possible
+ // we are completing a struct field name. For example,
+ // "SomeStruct{<>}" will be inKey=false, but maybeInFieldName=true
+ // because we _could_ be completing a field name.
+ maybeInFieldName bool
+}
+
+type importInfo struct {
+ importPath string
+ name string
+}
+
+type methodSetKey struct {
+ typ types.Type
+ addressable bool
+}
+
+type completionContext struct {
+ // triggerCharacter is the character used to trigger completion at current
+ // position, if any.
+ triggerCharacter string
+
+ // triggerKind is information about how a completion was triggered.
+ triggerKind protocol.CompletionTriggerKind
+
+ // commentCompletion is true if we are completing a comment.
+ commentCompletion bool
+
+ // packageCompletion is true if we are completing a package name.
+ packageCompletion bool
+}
+
+// A Selection represents the cursor position and surrounding identifier.
+type Selection struct {
+ content string
+ tokFile *token.File
+ start, end, cursor token.Pos // relative to rng.TokFile
+ mapper *protocol.Mapper
+}
+
+func (p Selection) Content() string {
+ return p.content
+}
+
+func (p Selection) Range() (protocol.Range, error) {
+ return p.mapper.PosRange(p.tokFile, p.start, p.end)
+}
+
+func (p Selection) Prefix() string {
+ return p.content[:p.cursor-p.start]
+}
+
+func (p Selection) Suffix() string {
+ return p.content[p.cursor-p.start:]
+}
+
+func (c *completer) setSurrounding(ident *ast.Ident) {
+ if c.surrounding != nil {
+ return
+ }
+ if !(ident.Pos() <= c.pos && c.pos <= ident.End()) {
+ return
+ }
+
+ c.surrounding = &Selection{
+ content: ident.Name,
+ cursor: c.pos,
+ // Overwrite the prefix only.
+ tokFile: c.tokFile,
+ start: ident.Pos(),
+ end: ident.End(),
+ mapper: c.mapper,
+ }
+
+ c.setMatcherFromPrefix(c.surrounding.Prefix())
+}
+
+func (c *completer) setMatcherFromPrefix(prefix string) {
+ switch c.opts.matcher {
+ case source.Fuzzy:
+ c.matcher = fuzzy.NewMatcher(prefix)
+ case source.CaseSensitive:
+ c.matcher = prefixMatcher(prefix)
+ default:
+ c.matcher = insensitivePrefixMatcher(strings.ToLower(prefix))
+ }
+}
+
+func (c *completer) getSurrounding() *Selection {
+ if c.surrounding == nil {
+ c.surrounding = &Selection{
+ content: "",
+ cursor: c.pos,
+ tokFile: c.tokFile,
+ start: c.pos,
+ end: c.pos,
+ mapper: c.mapper,
+ }
+ }
+ return c.surrounding
+}
+
+// candidate represents a completion candidate.
+type candidate struct {
+ // obj is the types.Object to complete to.
+ // TODO(adonovan): eliminate dependence on go/types throughout this struct.
+ obj types.Object
+
+ // score is used to rank candidates.
+ score float64
+
+ // name is the deep object name path, e.g. "foo.bar"
+ name string
+
+ // detail is additional information about this item. If not specified,
+ // defaults to type string for the object.
+ detail string
+
+ // path holds the path from the search root (excluding the candidate
+ // itself) for a deep candidate.
+ path []types.Object
+
+ // pathInvokeMask is a bit mask tracking whether each entry in path
+ // should be formatted with "()" (i.e. whether it is a function
+ // invocation).
+ pathInvokeMask uint16
+
+ // mods contains modifications that should be applied to the
+ // candidate when inserted. For example, "foo" may be inserted as
+ // "*foo" or "foo()".
+ mods []typeModKind
+
+ // addressable is true if a pointer can be taken to the candidate.
+ addressable bool
+
+ // convertTo is a type that this candidate should be cast to. For
+ // example, if convertTo is float64, "foo" should be formatted as
+ // "float64(foo)".
+ convertTo types.Type
+
+ // imp is the import that needs to be added to this package in order
+ // for this candidate to be valid. nil if no import needed.
+ imp *importInfo
+}
+
+func (c candidate) hasMod(mod typeModKind) bool {
+ for _, m := range c.mods {
+ if m == mod {
+ return true
+ }
+ }
+ return false
+}
+
+// ErrIsDefinition is an error that informs the user they got no
+// completions because they tried to complete the name of a new object
+// being defined.
+type ErrIsDefinition struct {
+ objStr string
+}
+
+func (e ErrIsDefinition) Error() string {
+ msg := "this is a definition"
+ if e.objStr != "" {
+ msg += " of " + e.objStr
+ }
+ return msg
+}
+
+// Completion returns a list of possible candidates for completion, given a
+// a file and a position.
+//
+// The selection is computed based on the preceding identifier and can be used by
+// the client to score the quality of the completion. For instance, some clients
+// may tolerate imperfect matches as valid completion results, since users may make typos.
+func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, protoPos protocol.Position, protoContext protocol.CompletionContext) ([]CompletionItem, *Selection, error) {
+ ctx, done := event.Start(ctx, "completion.Completion")
+ defer done()
+
+ startTime := time.Now()
+
+ pkg, pgf, err := source.PackageForFile(ctx, snapshot, fh.URI(), source.NarrowestPackage)
+ if err != nil || pgf.File.Package == token.NoPos {
+ // If we can't parse this file or find position for the package
+ // keyword, it may be missing a package declaration. Try offering
+ // suggestions for the package declaration.
+ // Note that this would be the case even if the keyword 'package' is
+ // present but no package name exists.
+ items, surrounding, innerErr := packageClauseCompletions(ctx, snapshot, fh, protoPos)
+ if innerErr != nil {
+ // return the error for GetParsedFile since it's more relevant in this situation.
+ return nil, nil, fmt.Errorf("getting file %s for Completion: %w (package completions: %v)", fh.URI(), err, innerErr)
+ }
+ return items, surrounding, nil
+ }
+ pos, err := pgf.PositionPos(protoPos)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Completion is based on what precedes the cursor.
+ // Find the path to the position before pos.
+ path, _ := astutil.PathEnclosingInterval(pgf.File, pos-1, pos-1)
+ if path == nil {
+ return nil, nil, fmt.Errorf("cannot find node enclosing position")
+ }
+
+ // Check if completion at this position is valid. If not, return early.
+ switch n := path[0].(type) {
+ case *ast.BasicLit:
+ // Skip completion inside literals except for ImportSpec
+ if len(path) > 1 {
+ if _, ok := path[1].(*ast.ImportSpec); ok {
+ break
+ }
+ }
+ return nil, nil, nil
+ case *ast.CallExpr:
+ if n.Ellipsis.IsValid() && pos > n.Ellipsis && pos <= n.Ellipsis+token.Pos(len("...")) {
+ // Don't offer completions inside or directly after "...". For
+ // example, don't offer completions at "<>" in "foo(bar...<>").
+ return nil, nil, nil
+ }
+ case *ast.Ident:
+ // reject defining identifiers
+ if obj, ok := pkg.GetTypesInfo().Defs[n]; ok {
+ if v, ok := obj.(*types.Var); ok && v.IsField() && v.Embedded() {
+ // An anonymous field is also a reference to a type.
+ } else if pgf.File.Name == n {
+ // Don't skip completions if Ident is for package name.
+ break
+ } else {
+ objStr := ""
+ if obj != nil {
+ qual := types.RelativeTo(pkg.GetTypes())
+ objStr = types.ObjectString(obj, qual)
+ }
+ ans, sel := definition(path, obj, pgf)
+ if ans != nil {
+ sort.Slice(ans, func(i, j int) bool {
+ return ans[i].Score > ans[j].Score
+ })
+ return ans, sel, nil
+ }
+ return nil, nil, ErrIsDefinition{objStr: objStr}
+ }
+ }
+ }
+
+ // Collect all surrounding scopes, innermost first.
+ scopes := source.CollectScopes(pkg.GetTypesInfo(), path, pos)
+ scopes = append(scopes, pkg.GetTypes().Scope(), types.Universe)
+
+ opts := snapshot.View().Options()
+ c := &completer{
+ pkg: pkg,
+ snapshot: snapshot,
+ qf: source.Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo()),
+ mq: source.MetadataQualifierForFile(snapshot, pgf.File, pkg.Metadata()),
+ completionContext: completionContext{
+ triggerCharacter: protoContext.TriggerCharacter,
+ triggerKind: protoContext.TriggerKind,
+ },
+ fh: fh,
+ filename: fh.URI().Filename(),
+ tokFile: pgf.Tok,
+ file: pgf.File,
+ path: path,
+ pos: pos,
+ seen: make(map[types.Object]bool),
+ enclosingFunc: enclosingFunction(path, pkg.GetTypesInfo()),
+ enclosingCompositeLiteral: enclosingCompositeLiteral(path, pos, pkg.GetTypesInfo()),
+ deepState: deepCompletionState{
+ enabled: opts.DeepCompletion,
+ },
+ opts: &completionOptions{
+ matcher: opts.Matcher,
+ unimported: opts.CompleteUnimported,
+ documentation: opts.CompletionDocumentation && opts.HoverKind != source.NoDocumentation,
+ fullDocumentation: opts.HoverKind == source.FullDocumentation,
+ placeholders: opts.UsePlaceholders,
+ literal: opts.LiteralCompletions && opts.InsertTextFormat == protocol.SnippetTextFormat,
+ budget: opts.CompletionBudget,
+ snippets: opts.InsertTextFormat == protocol.SnippetTextFormat,
+ postfix: opts.ExperimentalPostfixCompletions,
+ },
+ // default to a matcher that always matches
+ matcher: prefixMatcher(""),
+ methodSetCache: make(map[methodSetKey]*types.MethodSet),
+ mapper: pgf.Mapper,
+ startTime: startTime,
+ scopes: scopes,
+ }
+
+ var cancel context.CancelFunc
+ if c.opts.budget == 0 {
+ ctx, cancel = context.WithCancel(ctx)
+ } else {
+ // timeoutDuration is the completion budget remaining. If less than
+ // 10ms, set to 10ms
+ timeoutDuration := time.Until(c.startTime.Add(c.opts.budget))
+ if timeoutDuration < 10*time.Millisecond {
+ timeoutDuration = 10 * time.Millisecond
+ }
+ ctx, cancel = context.WithTimeout(ctx, timeoutDuration)
+ }
+ defer cancel()
+
+ if surrounding := c.containingIdent(pgf.Src); surrounding != nil {
+ c.setSurrounding(surrounding)
+ }
+
+ c.inference = expectedCandidate(ctx, c)
+
+ err = c.collectCompletions(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Deep search collected candidates and their members for more candidates.
+ c.deepSearch(ctx)
+
+ for _, callback := range c.completionCallbacks {
+ if err := c.snapshot.RunProcessEnvFunc(ctx, callback); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // Search candidates populated by expensive operations like
+ // unimportedMembers etc. for more completion items.
+ c.deepSearch(ctx)
+
+ // Statement candidates offer an entire statement in certain contexts, as
+ // opposed to a single object. Add statement candidates last because they
+ // depend on other candidates having already been collected.
+ c.addStatementCandidates()
+
+ c.sortItems()
+ return c.items, c.getSurrounding(), nil
+}
+
+// collectCompletions adds possible completion candidates to either the deep
+// search queue or completion items directly for different completion contexts.
+func (c *completer) collectCompletions(ctx context.Context) error {
+ // Inside import blocks, return completions for unimported packages.
+ for _, importSpec := range c.file.Imports {
+ if !(importSpec.Path.Pos() <= c.pos && c.pos <= importSpec.Path.End()) {
+ continue
+ }
+ return c.populateImportCompletions(ctx, importSpec)
+ }
+
+ // Inside comments, offer completions for the name of the relevant symbol.
+ for _, comment := range c.file.Comments {
+ if comment.Pos() < c.pos && c.pos <= comment.End() {
+ c.populateCommentCompletions(ctx, comment)
+ return nil
+ }
+ }
+
+ // Struct literals are handled entirely separately.
+ if c.wantStructFieldCompletions() {
+ // If we are definitely completing a struct field name, deep completions
+ // don't make sense.
+ if c.enclosingCompositeLiteral.inKey {
+ c.deepState.enabled = false
+ }
+ return c.structLiteralFieldName(ctx)
+ }
+
+ if lt := c.wantLabelCompletion(); lt != labelNone {
+ c.labels(lt)
+ return nil
+ }
+
+ if c.emptySwitchStmt() {
+ // Empty switch statements only admit "default" and "case" keywords.
+ c.addKeywordItems(map[string]bool{}, highScore, CASE, DEFAULT)
+ return nil
+ }
+
+ switch n := c.path[0].(type) {
+ case *ast.Ident:
+ if c.file.Name == n {
+ return c.packageNameCompletions(ctx, c.fh.URI(), n)
+ } else if sel, ok := c.path[1].(*ast.SelectorExpr); ok && sel.Sel == n {
+ // Is this the Sel part of a selector?
+ return c.selector(ctx, sel)
+ }
+ return c.lexical(ctx)
+ // The function name hasn't been typed yet, but the parens are there:
+ // recv.‸(arg)
+ case *ast.TypeAssertExpr:
+ // Create a fake selector expression.
+ return c.selector(ctx, &ast.SelectorExpr{X: n.X})
+ case *ast.SelectorExpr:
+ return c.selector(ctx, n)
+ // At the file scope, only keywords are allowed.
+ case *ast.BadDecl, *ast.File:
+ c.addKeywordCompletions()
+ default:
+ // fallback to lexical completions
+ return c.lexical(ctx)
+ }
+
+ return nil
+}
+
+// containingIdent returns the *ast.Ident containing pos, if any. It
+// synthesizes an *ast.Ident to allow completion in the face of
+// certain syntax errors.
+func (c *completer) containingIdent(src []byte) *ast.Ident {
+ // In the normal case, our leaf AST node is the identifier being completed.
+ if ident, ok := c.path[0].(*ast.Ident); ok {
+ return ident
+ }
+
+ pos, tkn, lit := c.scanToken(src)
+ if !pos.IsValid() {
+ return nil
+ }
+
+ fakeIdent := &ast.Ident{Name: lit, NamePos: pos}
+
+ if _, isBadDecl := c.path[0].(*ast.BadDecl); isBadDecl {
+ // You don't get *ast.Idents at the file level, so look for bad
+ // decls and use the manually extracted token.
+ return fakeIdent
+ } else if c.emptySwitchStmt() {
+ // Only keywords are allowed in empty switch statements.
+ // *ast.Idents are not parsed, so we must use the manually
+ // extracted token.
+ return fakeIdent
+ } else if tkn.IsKeyword() {
+ // Otherwise, manually extract the prefix if our containing token
+ // is a keyword. This improves completion after an "accidental
+ // keyword", e.g. completing to "variance" in "someFunc(var<>)".
+ return fakeIdent
+ }
+
+ return nil
+}
+
+// scanToken scans pgh's contents for the token containing pos.
+func (c *completer) scanToken(contents []byte) (token.Pos, token.Token, string) {
+ tok := c.pkg.FileSet().File(c.pos)
+
+ var s scanner.Scanner
+ s.Init(tok, contents, nil, 0)
+ for {
+ tknPos, tkn, lit := s.Scan()
+ if tkn == token.EOF || tknPos >= c.pos {
+ return token.NoPos, token.ILLEGAL, ""
+ }
+
+ if len(lit) > 0 && tknPos <= c.pos && c.pos <= tknPos+token.Pos(len(lit)) {
+ return tknPos, tkn, lit
+ }
+ }
+}
+
+func (c *completer) sortItems() {
+ sort.SliceStable(c.items, func(i, j int) bool {
+ // Sort by score first.
+ if c.items[i].Score != c.items[j].Score {
+ return c.items[i].Score > c.items[j].Score
+ }
+
+ // Then sort by label so order stays consistent. This also has the
+ // effect of preferring shorter candidates.
+ return c.items[i].Label < c.items[j].Label
+ })
+}
+
+// emptySwitchStmt reports whether pos is in an empty switch or select
+// statement.
+func (c *completer) emptySwitchStmt() bool {
+ block, ok := c.path[0].(*ast.BlockStmt)
+ if !ok || len(block.List) > 0 || len(c.path) == 1 {
+ return false
+ }
+
+ switch c.path[1].(type) {
+ case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt:
+ return true
+ default:
+ return false
+ }
+}
+
+// populateImportCompletions yields completions for an import path around the cursor.
+//
+// Completions are suggested at the directory depth of the given import path so
+// that we don't overwhelm the user with a large list of possibilities. As an
+// example, a completion for the prefix "golang" results in "golang.org/".
+// Completions for "golang.org/" yield its subdirectories
+// (i.e. "golang.org/x/"). The user is meant to accept completion suggestions
+// until they reach a complete import path.
+func (c *completer) populateImportCompletions(ctx context.Context, searchImport *ast.ImportSpec) error {
+ if !strings.HasPrefix(searchImport.Path.Value, `"`) {
+ return nil
+ }
+
+ // deepSearch is not valuable for import completions.
+ c.deepState.enabled = false
+
+ importPath := searchImport.Path.Value
+
+ // Extract the text between the quotes (if any) in an import spec.
+ // prefix is the part of import path before the cursor.
+ prefixEnd := c.pos - searchImport.Path.Pos()
+ prefix := strings.Trim(importPath[:prefixEnd], `"`)
+
+ // The number of directories in the import path gives us the depth at
+ // which to search.
+ depth := len(strings.Split(prefix, "/")) - 1
+
+ content := importPath
+ start, end := searchImport.Path.Pos(), searchImport.Path.End()
+ namePrefix, nameSuffix := `"`, `"`
+ // If a starting quote is present, adjust surrounding to either after the
+ // cursor or after the first slash (/), except if cursor is at the starting
+ // quote. Otherwise we provide a completion including the starting quote.
+ if strings.HasPrefix(importPath, `"`) && c.pos > searchImport.Path.Pos() {
+ content = content[1:]
+ start++
+ if depth > 0 {
+ // Adjust textEdit start to replacement range. For ex: if current
+ // path was "golang.or/x/to<>ols/internal/", where <> is the cursor
+ // position, start of the replacement range would be after
+ // "golang.org/x/".
+ path := strings.SplitAfter(prefix, "/")
+ numChars := len(strings.Join(path[:len(path)-1], ""))
+ content = content[numChars:]
+ start += token.Pos(numChars)
+ }
+ namePrefix = ""
+ }
+
+ // We won't provide an ending quote if one is already present, except if
+ // cursor is after the ending quote but still in import spec. This is
+ // because cursor has to be in our textEdit range.
+ if strings.HasSuffix(importPath, `"`) && c.pos < searchImport.Path.End() {
+ end--
+ content = content[:len(content)-1]
+ nameSuffix = ""
+ }
+
+ c.surrounding = &Selection{
+ content: content,
+ cursor: c.pos,
+ tokFile: c.tokFile,
+ start: start,
+ end: end,
+ mapper: c.mapper,
+ }
+
+ seenImports := make(map[string]struct{})
+ for _, importSpec := range c.file.Imports {
+ if importSpec.Path.Value == importPath {
+ continue
+ }
+ seenImportPath, err := strconv.Unquote(importSpec.Path.Value)
+ if err != nil {
+ return err
+ }
+ seenImports[seenImportPath] = struct{}{}
+ }
+
+ var mu sync.Mutex // guard c.items locally, since searchImports is called in parallel
+ seen := make(map[string]struct{})
+ searchImports := func(pkg imports.ImportFix) {
+ path := pkg.StmtInfo.ImportPath
+ if _, ok := seenImports[path]; ok {
+ return
+ }
+
+ // Any package path containing fewer directories than the search
+ // prefix is not a match.
+ pkgDirList := strings.Split(path, "/")
+ if len(pkgDirList) < depth+1 {
+ return
+ }
+ pkgToConsider := strings.Join(pkgDirList[:depth+1], "/")
+
+ name := pkgDirList[depth]
+ // if we're adding an opening quote to completion too, set name to full
+ // package path since we'll need to overwrite that range.
+ if namePrefix == `"` {
+ name = pkgToConsider
+ }
+
+ score := pkg.Relevance
+ if len(pkgDirList)-1 == depth {
+ score *= highScore
+ } else {
+ // For incomplete package paths, add a terminal slash to indicate that the
+ // user should keep triggering completions.
+ name += "/"
+ pkgToConsider += "/"
+ }
+
+ if _, ok := seen[pkgToConsider]; ok {
+ return
+ }
+ seen[pkgToConsider] = struct{}{}
+
+ mu.Lock()
+ defer mu.Unlock()
+
+ name = namePrefix + name + nameSuffix
+ obj := types.NewPkgName(0, nil, name, types.NewPackage(pkgToConsider, name))
+ c.deepState.enqueue(candidate{
+ obj: obj,
+ detail: fmt.Sprintf("%q", pkgToConsider),
+ score: score,
+ })
+ }
+
+ c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error {
+ return imports.GetImportPaths(ctx, searchImports, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env)
+ })
+ return nil
+}
+
+// populateCommentCompletions yields completions for comments preceding or in declarations.
+func (c *completer) populateCommentCompletions(ctx context.Context, comment *ast.CommentGroup) {
+ // If the completion was triggered by a period, ignore it. These types of
+ // completions will not be useful in comments.
+ if c.completionContext.triggerCharacter == "." {
+ return
+ }
+
+ // Using the comment position find the line after
+ file := c.pkg.FileSet().File(comment.End())
+ if file == nil {
+ return
+ }
+
+ // Deep completion doesn't work properly in comments since we don't
+ // have a type object to complete further.
+ c.deepState.enabled = false
+ c.completionContext.commentCompletion = true
+
+ // Documentation isn't useful in comments, since it might end up being the
+ // comment itself.
+ c.opts.documentation = false
+
+ commentLine := file.Line(comment.End())
+
+ // comment is valid, set surrounding as word boundaries around cursor
+ c.setSurroundingForComment(comment)
+
+ // Using the next line pos, grab and parse the exported symbol on that line
+ for _, n := range c.file.Decls {
+ declLine := file.Line(n.Pos())
+ // if the comment is not in, directly above or on the same line as a declaration
+ if declLine != commentLine && declLine != commentLine+1 &&
+ !(n.Pos() <= comment.Pos() && comment.End() <= n.End()) {
+ continue
+ }
+ switch node := n.(type) {
+ // handle const, vars, and types
+ case *ast.GenDecl:
+ for _, spec := range node.Specs {
+ switch spec := spec.(type) {
+ case *ast.ValueSpec:
+ for _, name := range spec.Names {
+ if name.String() == "_" {
+ continue
+ }
+ obj := c.pkg.GetTypesInfo().ObjectOf(name)
+ c.deepState.enqueue(candidate{obj: obj, score: stdScore})
+ }
+ case *ast.TypeSpec:
+ // add TypeSpec fields to completion
+ switch typeNode := spec.Type.(type) {
+ case *ast.StructType:
+ c.addFieldItems(ctx, typeNode.Fields)
+ case *ast.FuncType:
+ c.addFieldItems(ctx, typeNode.Params)
+ c.addFieldItems(ctx, typeNode.Results)
+ case *ast.InterfaceType:
+ c.addFieldItems(ctx, typeNode.Methods)
+ }
+
+ if spec.Name.String() == "_" {
+ continue
+ }
+
+ obj := c.pkg.GetTypesInfo().ObjectOf(spec.Name)
+ // Type name should get a higher score than fields but not highScore by default
+ // since field near a comment cursor gets a highScore
+ score := stdScore * 1.1
+ // If type declaration is on the line after comment, give it a highScore.
+ if declLine == commentLine+1 {
+ score = highScore
+ }
+
+ c.deepState.enqueue(candidate{obj: obj, score: score})
+ }
+ }
+ // handle functions
+ case *ast.FuncDecl:
+ c.addFieldItems(ctx, node.Recv)
+ c.addFieldItems(ctx, node.Type.Params)
+ c.addFieldItems(ctx, node.Type.Results)
+
+ // collect receiver struct fields
+ if node.Recv != nil {
+ for _, fields := range node.Recv.List {
+ for _, name := range fields.Names {
+ obj := c.pkg.GetTypesInfo().ObjectOf(name)
+ if obj == nil {
+ continue
+ }
+
+ recvType := obj.Type().Underlying()
+ if ptr, ok := recvType.(*types.Pointer); ok {
+ recvType = ptr.Elem()
+ }
+ recvStruct, ok := recvType.Underlying().(*types.Struct)
+ if !ok {
+ continue
+ }
+ for i := 0; i < recvStruct.NumFields(); i++ {
+ field := recvStruct.Field(i)
+ c.deepState.enqueue(candidate{obj: field, score: lowScore})
+ }
+ }
+ }
+ }
+
+ if node.Name.String() == "_" {
+ continue
+ }
+
+ obj := c.pkg.GetTypesInfo().ObjectOf(node.Name)
+ if obj == nil || obj.Pkg() != nil && obj.Pkg() != c.pkg.GetTypes() {
+ continue
+ }
+
+ c.deepState.enqueue(candidate{obj: obj, score: highScore})
+ }
+ }
+}
+
+// sets word boundaries surrounding a cursor for a comment
+func (c *completer) setSurroundingForComment(comments *ast.CommentGroup) {
+ var cursorComment *ast.Comment
+ for _, comment := range comments.List {
+ if c.pos >= comment.Pos() && c.pos <= comment.End() {
+ cursorComment = comment
+ break
+ }
+ }
+ // if cursor isn't in the comment
+ if cursorComment == nil {
+ return
+ }
+
+ // index of cursor in comment text
+ cursorOffset := int(c.pos - cursorComment.Pos())
+ start, end := cursorOffset, cursorOffset
+ for start > 0 && isValidIdentifierChar(cursorComment.Text[start-1]) {
+ start--
+ }
+ for end < len(cursorComment.Text) && isValidIdentifierChar(cursorComment.Text[end]) {
+ end++
+ }
+
+ c.surrounding = &Selection{
+ content: cursorComment.Text[start:end],
+ cursor: c.pos,
+ tokFile: c.tokFile,
+ start: token.Pos(int(cursorComment.Slash) + start),
+ end: token.Pos(int(cursorComment.Slash) + end),
+ mapper: c.mapper,
+ }
+ c.setMatcherFromPrefix(c.surrounding.Prefix())
+}
+
+// isValidIdentifierChar returns true if a byte is a valid go identifier
+// character, i.e. unicode letter or digit or underscore.
+func isValidIdentifierChar(char byte) bool {
+ charRune := rune(char)
+ return unicode.In(charRune, unicode.Letter, unicode.Digit) || char == '_'
+}
+
+// adds struct fields, interface methods, function declaration fields to completion
+func (c *completer) addFieldItems(ctx context.Context, fields *ast.FieldList) {
+ if fields == nil {
+ return
+ }
+
+ cursor := c.surrounding.cursor
+ for _, field := range fields.List {
+ for _, name := range field.Names {
+ if name.String() == "_" {
+ continue
+ }
+ obj := c.pkg.GetTypesInfo().ObjectOf(name)
+ if obj == nil {
+ continue
+ }
+
+ // if we're in a field comment/doc, score that field as more relevant
+ score := stdScore
+ if field.Comment != nil && field.Comment.Pos() <= cursor && cursor <= field.Comment.End() {
+ score = highScore
+ } else if field.Doc != nil && field.Doc.Pos() <= cursor && cursor <= field.Doc.End() {
+ score = highScore
+ }
+
+ c.deepState.enqueue(candidate{obj: obj, score: score})
+ }
+ }
+}
+
+func (c *completer) wantStructFieldCompletions() bool {
+ clInfo := c.enclosingCompositeLiteral
+ if clInfo == nil {
+ return false
+ }
+
+ return clInfo.isStruct() && (clInfo.inKey || clInfo.maybeInFieldName)
+}
+
+func (c *completer) wantTypeName() bool {
+ return !c.completionContext.commentCompletion && c.inference.typeName.wantTypeName
+}
+
+// See https://golang.org/issue/36001. Unimported completions are expensive.
+const (
+ maxUnimportedPackageNames = 5
+ unimportedMemberTarget = 100
+)
+
+// selector finds completions for the specified selector expression.
+func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error {
+ c.inference.objChain = objChain(c.pkg.GetTypesInfo(), sel.X)
+
+ // True selector?
+ if tv, ok := c.pkg.GetTypesInfo().Types[sel.X]; ok {
+ c.methodsAndFields(tv.Type, tv.Addressable(), nil, c.deepState.enqueue)
+ c.addPostfixSnippetCandidates(ctx, sel)
+ return nil
+ }
+
+ id, ok := sel.X.(*ast.Ident)
+ if !ok {
+ return nil
+ }
+
+ // Treat sel as a qualified identifier.
+ var filter func(*source.Metadata) bool
+ needImport := false
+ if pkgName, ok := c.pkg.GetTypesInfo().Uses[id].(*types.PkgName); ok {
+ // Qualified identifier with import declaration.
+ imp := pkgName.Imported()
+
+ // Known direct dependency? Expand using type information.
+ if _, ok := c.pkg.Metadata().DepsByPkgPath[source.PackagePath(imp.Path())]; ok {
+ c.packageMembers(imp, stdScore, nil, c.deepState.enqueue)
+ return nil
+ }
+
+ // Imported declaration with missing type information.
+ // Fall through to shallow completion of unimported package members.
+ // Match candidate packages by path.
+ // TODO(adonovan): simplify by merging with else case and matching on name only?
+ filter = func(m *source.Metadata) bool {
+ return strings.TrimPrefix(string(m.PkgPath), "vendor/") == imp.Path()
+ }
+ } else {
+ // Qualified identifier without import declaration.
+ // Match candidate packages by name.
+ filter = func(m *source.Metadata) bool {
+ return string(m.Name) == id.Name
+ }
+ needImport = true
+ }
+
+ // Search unimported packages.
+ if !c.opts.unimported {
+ return nil // feature disabled
+ }
+
+ // The deep completion algorithm is exceedingly complex and
+ // deeply coupled to the now obsolete notions that all
+ // token.Pos values can be interpreted by as a single FileSet
+ // belonging to the Snapshot and that all types.Object values
+ // are canonicalized by a single types.Importer mapping.
+ // These invariants are no longer true now that gopls uses
+ // an incremental approach, parsing and type-checking each
+ // package separately.
+ //
+ // Consequently, completion of symbols defined in packages that
+ // are not currently imported by the query file cannot use the
+ // deep completion machinery which is based on type information.
+ // Instead it must use only syntax information from a quick
+ // parse of top-level declarations (but not function bodies).
+ //
+ // TODO(adonovan): rewrite the deep completion machinery to
+ // not assume global Pos/Object realms and then use export
+ // data instead of the quick parse approach taken here.
+
+ // First, we search among packages in the workspace.
+ // We'll use a fast parse to extract package members
+ // from those that match the name/path criterion.
+ all, err := c.snapshot.AllMetadata(ctx)
+ if err != nil {
+ return err
+ }
+ var paths []string
+ known := make(map[source.PackagePath][]*source.Metadata) // may include test variant
+ for _, m := range all {
+ if m.IsIntermediateTestVariant() || m.Name == "main" || !filter(m) {
+ continue
+ }
+ known[m.PkgPath] = append(known[m.PkgPath], m)
+ paths = append(paths, string(m.PkgPath))
+ }
+
+ // Rank import paths as goimports would.
+ var relevances map[string]float64
+ if len(paths) > 0 {
+ if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error {
+ var err error
+ relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths)
+ return err
+ }); err != nil {
+ return err
+ }
+ sort.Slice(paths, func(i, j int) bool {
+ return relevances[paths[i]] > relevances[paths[j]]
+ })
+ }
+
+ // quickParse does a quick parse of a single file of package m,
+ // extracts exported package members and adds candidates to c.items.
+ var itemsMu sync.Mutex // guards c.items
+ var enough int32 // atomic bool
+ quickParse := func(uri span.URI, m *source.Metadata) error {
+ if atomic.LoadInt32(&enough) != 0 {
+ return nil
+ }
+
+ fh, err := c.snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return err
+ }
+ content, err := fh.Read()
+ if err != nil {
+ return err
+ }
+ path := string(m.PkgPath)
+ forEachPackageMember(content, func(tok token.Token, id *ast.Ident, fn *ast.FuncDecl) {
+ if atomic.LoadInt32(&enough) != 0 {
+ return
+ }
+
+ if !id.IsExported() ||
+ sel.Sel.Name != "_" && !strings.HasPrefix(id.Name, sel.Sel.Name) {
+ return // not a match
+ }
+
+ // The only detail is the kind and package: `var (from "example.com/foo")`
+ // TODO(adonovan): pretty-print FuncDecl.FuncType or TypeSpec.Type?
+ item := CompletionItem{
+ Label: id.Name,
+ Detail: fmt.Sprintf("%s (from %q)", strings.ToLower(tok.String()), m.PkgPath),
+ InsertText: id.Name,
+ Score: unimportedScore(relevances[path]),
+ }
+ switch tok {
+ case token.FUNC:
+ item.Kind = protocol.FunctionCompletion
+ case token.VAR:
+ item.Kind = protocol.VariableCompletion
+ case token.CONST:
+ item.Kind = protocol.ConstantCompletion
+ case token.TYPE:
+ // Without types, we can't distinguish Class from Interface.
+ item.Kind = protocol.ClassCompletion
+ }
+
+ if needImport {
+ imp := &importInfo{importPath: path}
+ if imports.ImportPathToAssumedName(path) != string(m.Name) {
+ imp.name = string(m.Name)
+ }
+ item.AdditionalTextEdits, _ = c.importEdits(imp)
+ }
+
+ // For functions, add a parameter snippet.
+ if fn != nil {
+ var sn snippet.Builder
+ sn.WriteText(id.Name)
+ sn.WriteText("(")
+ var nparams int
+ for _, field := range fn.Type.Params.List {
+ if field.Names != nil {
+ nparams += len(field.Names)
+ } else {
+ nparams++
+ }
+ }
+ for i := 0; i < nparams; i++ {
+ if i > 0 {
+ sn.WriteText(", ")
+ }
+ sn.WritePlaceholder(nil)
+ }
+ sn.WriteText(")")
+ item.snippet = &sn
+ }
+
+ itemsMu.Lock()
+ c.items = append(c.items, item)
+ if len(c.items) >= unimportedMemberTarget {
+ atomic.StoreInt32(&enough, 1)
+ }
+ itemsMu.Unlock()
+ })
+ return nil
+ }
+
+ // Extract the package-level candidates using a quick parse.
+ var g errgroup.Group
+ for _, path := range paths {
+ for _, m := range known[source.PackagePath(path)] {
+ m := m
+ for _, uri := range m.CompiledGoFiles {
+ uri := uri
+ g.Go(func() error {
+ return quickParse(uri, m)
+ })
+ }
+ }
+ }
+ if err := g.Wait(); err != nil {
+ return err
+ }
+
+ // In addition, we search in the module cache using goimports.
+ ctx, cancel := context.WithCancel(ctx)
+ var mu sync.Mutex
+ add := func(pkgExport imports.PackageExport) {
+ mu.Lock()
+ defer mu.Unlock()
+ // TODO(adonovan): what if the actual package has a vendor/ prefix?
+ if _, ok := known[source.PackagePath(pkgExport.Fix.StmtInfo.ImportPath)]; ok {
+ return // We got this one above.
+ }
+
+ // Continue with untyped proposals.
+ pkg := types.NewPackage(pkgExport.Fix.StmtInfo.ImportPath, pkgExport.Fix.IdentName)
+ for _, export := range pkgExport.Exports {
+ score := unimportedScore(pkgExport.Fix.Relevance)
+ c.deepState.enqueue(candidate{
+ obj: types.NewVar(0, pkg, export, nil),
+ score: score,
+ imp: &importInfo{
+ importPath: pkgExport.Fix.StmtInfo.ImportPath,
+ name: pkgExport.Fix.StmtInfo.Name,
+ },
+ })
+ }
+ if len(c.items) >= unimportedMemberTarget {
+ cancel()
+ }
+ }
+
+ c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error {
+ defer cancel()
+ return imports.GetPackageExports(ctx, add, id.Name, c.filename, c.pkg.GetTypes().Name(), opts.Env)
+ })
+ return nil
+}
+
+// unimportedScore returns a score for an unimported package that is generally
+// lower than other candidates.
+func unimportedScore(relevance float64) float64 {
+ return (stdScore + .1*relevance) / 2
+}
+
+func (c *completer) packageMembers(pkg *types.Package, score float64, imp *importInfo, cb func(candidate)) {
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ cb(candidate{
+ obj: obj,
+ score: score,
+ imp: imp,
+ addressable: isVar(obj),
+ })
+ }
+}
+
+func (c *completer) methodsAndFields(typ types.Type, addressable bool, imp *importInfo, cb func(candidate)) {
+ mset := c.methodSetCache[methodSetKey{typ, addressable}]
+ if mset == nil {
+ if addressable && !types.IsInterface(typ) && !isPointer(typ) {
+ // Add methods of *T, which includes methods with receiver T.
+ mset = types.NewMethodSet(types.NewPointer(typ))
+ } else {
+ // Add methods of T.
+ mset = types.NewMethodSet(typ)
+ }
+ c.methodSetCache[methodSetKey{typ, addressable}] = mset
+ }
+
+ if isStarTestingDotF(typ) && addressable {
+ // is that a sufficient test? (or is more care needed?)
+ if c.fuzz(typ, mset, imp, cb, c.pkg.FileSet()) {
+ return
+ }
+ }
+
+ for i := 0; i < mset.Len(); i++ {
+ cb(candidate{
+ obj: mset.At(i).Obj(),
+ score: stdScore,
+ imp: imp,
+ addressable: addressable || isPointer(typ),
+ })
+ }
+
+ // Add fields of T.
+ eachField(typ, func(v *types.Var) {
+ cb(candidate{
+ obj: v,
+ score: stdScore - 0.01,
+ imp: imp,
+ addressable: addressable || isPointer(typ),
+ })
+ })
+}
+
+// isStarTestingDotF reports whether typ is *testing.F.
+func isStarTestingDotF(typ types.Type) bool {
+ ptr, _ := typ.(*types.Pointer)
+ if ptr == nil {
+ return false
+ }
+ named, _ := ptr.Elem().(*types.Named)
+ if named == nil {
+ return false
+ }
+ obj := named.Obj()
+ // obj.Pkg is nil for the error type.
+ return obj != nil && obj.Pkg() != nil && obj.Pkg().Path() == "testing" && obj.Name() == "F"
+}
+
+// lexical finds completions in the lexical environment.
+func (c *completer) lexical(ctx context.Context) error {
+ var (
+ builtinIota = types.Universe.Lookup("iota")
+ builtinNil = types.Universe.Lookup("nil")
+
+ // TODO(rfindley): only allow "comparable" where it is valid (in constraint
+ // position or embedded in interface declarations).
+ // builtinComparable = types.Universe.Lookup("comparable")
+ )
+
+ // Track seen variables to avoid showing completions for shadowed variables.
+ // This works since we look at scopes from innermost to outermost.
+ seen := make(map[string]struct{})
+
+ // Process scopes innermost first.
+ for i, scope := range c.scopes {
+ if scope == nil {
+ continue
+ }
+
+ Names:
+ for _, name := range scope.Names() {
+ declScope, obj := scope.LookupParent(name, c.pos)
+ if declScope != scope {
+ continue // Name was declared in some enclosing scope, or not at all.
+ }
+
+ // If obj's type is invalid, find the AST node that defines the lexical block
+ // containing the declaration of obj. Don't resolve types for packages.
+ if !isPkgName(obj) && !typeIsValid(obj.Type()) {
+ // Match the scope to its ast.Node. If the scope is the package scope,
+ // use the *ast.File as the starting node.
+ var node ast.Node
+ if i < len(c.path) {
+ node = c.path[i]
+ } else if i == len(c.path) { // use the *ast.File for package scope
+ node = c.path[i-1]
+ }
+ if node != nil {
+ if resolved := resolveInvalid(c.pkg.FileSet(), obj, node, c.pkg.GetTypesInfo()); resolved != nil {
+ obj = resolved
+ }
+ }
+ }
+
+ // Don't use LHS of decl in RHS.
+ for _, ident := range enclosingDeclLHS(c.path) {
+ if obj.Pos() == ident.Pos() {
+ continue Names
+ }
+ }
+
+ // Don't suggest "iota" outside of const decls.
+ if obj == builtinIota && !c.inConstDecl() {
+ continue
+ }
+
+ // Rank outer scopes lower than inner.
+ score := stdScore * math.Pow(.99, float64(i))
+
+ // Dowrank "nil" a bit so it is ranked below more interesting candidates.
+ if obj == builtinNil {
+ score /= 2
+ }
+
+ // If we haven't already added a candidate for an object with this name.
+ if _, ok := seen[obj.Name()]; !ok {
+ seen[obj.Name()] = struct{}{}
+ c.deepState.enqueue(candidate{
+ obj: obj,
+ score: score,
+ addressable: isVar(obj),
+ })
+ }
+ }
+ }
+
+ if c.inference.objType != nil {
+ if named, _ := source.Deref(c.inference.objType).(*types.Named); named != nil {
+ // If we expected a named type, check the type's package for
+ // completion items. This is useful when the current file hasn't
+ // imported the type's package yet.
+
+ if named.Obj() != nil && named.Obj().Pkg() != nil {
+ pkg := named.Obj().Pkg()
+
+ // Make sure the package name isn't already in use by another
+ // object, and that this file doesn't import the package yet.
+ // TODO(adonovan): what if pkg.Path has vendor/ prefix?
+ if _, ok := seen[pkg.Name()]; !ok && pkg != c.pkg.GetTypes() && !alreadyImports(c.file, source.ImportPath(pkg.Path())) {
+ seen[pkg.Name()] = struct{}{}
+ obj := types.NewPkgName(0, nil, pkg.Name(), pkg)
+ imp := &importInfo{
+ importPath: pkg.Path(),
+ }
+ if imports.ImportPathToAssumedName(pkg.Path()) != pkg.Name() {
+ imp.name = pkg.Name()
+ }
+ c.deepState.enqueue(candidate{
+ obj: obj,
+ score: stdScore,
+ imp: imp,
+ })
+ }
+ }
+ }
+ }
+
+ if c.opts.unimported {
+ if err := c.unimportedPackages(ctx, seen); err != nil {
+ return err
+ }
+ }
+
+ if c.inference.typeName.isTypeParam {
+ // If we are completing a type param, offer each structural type.
+ // This ensures we suggest "[]int" and "[]float64" for a constraint
+ // with type union "[]int | []float64".
+ if t, _ := c.inference.objType.(*types.Interface); t != nil {
+ terms, _ := typeparams.InterfaceTermSet(t)
+ for _, term := range terms {
+ c.injectType(ctx, term.Type())
+ }
+ }
+ } else {
+ c.injectType(ctx, c.inference.objType)
+ }
+
+ // Add keyword completion items appropriate in the current context.
+ c.addKeywordCompletions()
+
+ return nil
+}
+
+// injectType manufactures candidates based on the given type. This is
+// intended for types not discoverable via lexical search, such as
+// composite and/or generic types. For example, if the type is "[]int",
+// this method makes sure you get candidates "[]int{}" and "[]int"
+// (the latter applies when completing a type name).
+func (c *completer) injectType(ctx context.Context, t types.Type) {
+ if t == nil {
+ return
+ }
+
+ t = source.Deref(t)
+
+ // If we have an expected type and it is _not_ a named type, handle
+ // it specially. Non-named types like "[]int" will never be
+ // considered via a lexical search, so we need to directly inject
+ // them. Also allow generic types since lexical search does not
+ // infer instantiated versions of them.
+ if named, _ := t.(*types.Named); named == nil || typeparams.ForNamed(named).Len() > 0 {
+ // If our expected type is "[]int", this will add a literal
+ // candidate of "[]int{}".
+ c.literal(ctx, t, nil)
+
+ if _, isBasic := t.(*types.Basic); !isBasic {
+ // If we expect a non-basic type name (e.g. "[]int"), hack up
+ // a named type whose name is literally "[]int". This allows
+ // us to reuse our object based completion machinery.
+ fakeNamedType := candidate{
+ obj: types.NewTypeName(token.NoPos, nil, types.TypeString(t, c.qf), t),
+ score: stdScore,
+ }
+ // Make sure the type name matches before considering
+ // candidate. This cuts down on useless candidates.
+ if c.matchingTypeName(&fakeNamedType) {
+ c.deepState.enqueue(fakeNamedType)
+ }
+ }
+ }
+}
+
+func (c *completer) unimportedPackages(ctx context.Context, seen map[string]struct{}) error {
+ var prefix string
+ if c.surrounding != nil {
+ prefix = c.surrounding.Prefix()
+ }
+
+ // Don't suggest unimported packages if we have absolutely nothing
+ // to go on.
+ if prefix == "" {
+ return nil
+ }
+
+ count := 0
+
+ // Search packages across the entire workspace.
+ all, err := c.snapshot.AllMetadata(ctx)
+ if err != nil {
+ return err
+ }
+ pkgNameByPath := make(map[source.PackagePath]string)
+ var paths []string // actually PackagePaths
+ for _, m := range all {
+ if m.ForTest != "" {
+ continue // skip all test variants
+ }
+ if m.Name == "main" {
+ continue // main is non-importable
+ }
+ if !strings.HasPrefix(string(m.Name), prefix) {
+ continue // not a match
+ }
+ paths = append(paths, string(m.PkgPath))
+ pkgNameByPath[m.PkgPath] = string(m.Name)
+ }
+
+ // Rank candidates using goimports' algorithm.
+ var relevances map[string]float64
+ if len(paths) != 0 {
+ if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error {
+ var err error
+ relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths)
+ return err
+ }); err != nil {
+ return err
+ }
+ }
+ sort.Slice(paths, func(i, j int) bool {
+ if relevances[paths[i]] != relevances[paths[j]] {
+ return relevances[paths[i]] > relevances[paths[j]]
+ }
+
+ // Fall back to lexical sort to keep truncated set of candidates
+ // in a consistent order.
+ return paths[i] < paths[j]
+ })
+
+ for _, path := range paths {
+ name := pkgNameByPath[source.PackagePath(path)]
+ if _, ok := seen[name]; ok {
+ continue
+ }
+ imp := &importInfo{
+ importPath: path,
+ }
+ if imports.ImportPathToAssumedName(path) != name {
+ imp.name = name
+ }
+ if count >= maxUnimportedPackageNames {
+ return nil
+ }
+ c.deepState.enqueue(candidate{
+ // Pass an empty *types.Package to disable deep completions.
+ obj: types.NewPkgName(0, nil, name, types.NewPackage(path, name)),
+ score: unimportedScore(relevances[path]),
+ imp: imp,
+ })
+ count++
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+
+ var mu sync.Mutex
+ add := func(pkg imports.ImportFix) {
+ mu.Lock()
+ defer mu.Unlock()
+ if _, ok := seen[pkg.IdentName]; ok {
+ return
+ }
+ if _, ok := relevances[pkg.StmtInfo.ImportPath]; ok {
+ return
+ }
+
+ if count >= maxUnimportedPackageNames {
+ cancel()
+ return
+ }
+
+ // Do not add the unimported packages to seen, since we can have
+ // multiple packages of the same name as completion suggestions, since
+ // only one will be chosen.
+ obj := types.NewPkgName(0, nil, pkg.IdentName, types.NewPackage(pkg.StmtInfo.ImportPath, pkg.IdentName))
+ c.deepState.enqueue(candidate{
+ obj: obj,
+ score: unimportedScore(pkg.Relevance),
+ imp: &importInfo{
+ importPath: pkg.StmtInfo.ImportPath,
+ name: pkg.StmtInfo.Name,
+ },
+ })
+ count++
+ }
+ c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error {
+ defer cancel()
+ return imports.GetAllCandidates(ctx, add, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env)
+ })
+ return nil
+}
+
+// alreadyImports reports whether f has an import with the specified path.
+func alreadyImports(f *ast.File, path source.ImportPath) bool {
+ for _, s := range f.Imports {
+ if source.UnquoteImportPath(s) == path {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *completer) inConstDecl() bool {
+ for _, n := range c.path {
+ if decl, ok := n.(*ast.GenDecl); ok && decl.Tok == token.CONST {
+ return true
+ }
+ }
+ return false
+}
+
+// structLiteralFieldName finds completions for struct field names inside a struct literal.
+func (c *completer) structLiteralFieldName(ctx context.Context) error {
+ clInfo := c.enclosingCompositeLiteral
+
+ // Mark fields of the composite literal that have already been set,
+ // except for the current field.
+ addedFields := make(map[*types.Var]bool)
+ for _, el := range clInfo.cl.Elts {
+ if kvExpr, ok := el.(*ast.KeyValueExpr); ok {
+ if clInfo.kv == kvExpr {
+ continue
+ }
+
+ if key, ok := kvExpr.Key.(*ast.Ident); ok {
+ if used, ok := c.pkg.GetTypesInfo().Uses[key]; ok {
+ if usedVar, ok := used.(*types.Var); ok {
+ addedFields[usedVar] = true
+ }
+ }
+ }
+ }
+ }
+
+ deltaScore := 0.0001
+ switch t := clInfo.clType.(type) {
+ case *types.Struct:
+ for i := 0; i < t.NumFields(); i++ {
+ field := t.Field(i)
+ if !addedFields[field] {
+ c.deepState.enqueue(candidate{
+ obj: field,
+ score: highScore - float64(i)*deltaScore,
+ })
+ }
+ }
+
+ // Add lexical completions if we aren't certain we are in the key part of a
+ // key-value pair.
+ if clInfo.maybeInFieldName {
+ return c.lexical(ctx)
+ }
+ default:
+ return c.lexical(ctx)
+ }
+
+ return nil
+}
+
+func (cl *compLitInfo) isStruct() bool {
+ _, ok := cl.clType.(*types.Struct)
+ return ok
+}
+
+// enclosingCompositeLiteral returns information about the composite literal enclosing the
+// position.
+func enclosingCompositeLiteral(path []ast.Node, pos token.Pos, info *types.Info) *compLitInfo {
+ for _, n := range path {
+ switch n := n.(type) {
+ case *ast.CompositeLit:
+ // The enclosing node will be a composite literal if the user has just
+ // opened the curly brace (e.g. &x{<>) or the completion request is triggered
+ // from an already completed composite literal expression (e.g. &x{foo: 1, <>})
+ //
+ // The position is not part of the composite literal unless it falls within the
+ // curly braces (e.g. "foo.Foo<>Struct{}").
+ if !(n.Lbrace < pos && pos <= n.Rbrace) {
+ // Keep searching since we may yet be inside a composite literal.
+ // For example "Foo{B: Ba<>{}}".
+ break
+ }
+
+ tv, ok := info.Types[n]
+ if !ok {
+ return nil
+ }
+
+ clInfo := compLitInfo{
+ cl: n,
+ clType: source.Deref(tv.Type).Underlying(),
+ }
+
+ var (
+ expr ast.Expr
+ hasKeys bool
+ )
+ for _, el := range n.Elts {
+ // Remember the expression that the position falls in, if any.
+ if el.Pos() <= pos && pos <= el.End() {
+ expr = el
+ }
+
+ if kv, ok := el.(*ast.KeyValueExpr); ok {
+ hasKeys = true
+ // If expr == el then we know the position falls in this expression,
+ // so also record kv as the enclosing *ast.KeyValueExpr.
+ if expr == el {
+ clInfo.kv = kv
+ break
+ }
+ }
+ }
+
+ if clInfo.kv != nil {
+ // If in a *ast.KeyValueExpr, we know we are in the key if the position
+ // is to the left of the colon (e.g. "Foo{F<>: V}".
+ clInfo.inKey = pos <= clInfo.kv.Colon
+ } else if hasKeys {
+ // If we aren't in a *ast.KeyValueExpr but the composite literal has
+ // other *ast.KeyValueExprs, we must be on the key side of a new
+ // *ast.KeyValueExpr (e.g. "Foo{F: V, <>}").
+ clInfo.inKey = true
+ } else {
+ switch clInfo.clType.(type) {
+ case *types.Struct:
+ if len(n.Elts) == 0 {
+ // If the struct literal is empty, next could be a struct field
+ // name or an expression (e.g. "Foo{<>}" could become "Foo{F:}"
+ // or "Foo{someVar}").
+ clInfo.maybeInFieldName = true
+ } else if len(n.Elts) == 1 {
+ // If there is one expression and the position is in that expression
+ // and the expression is an identifier, we may be writing a field
+ // name or an expression (e.g. "Foo{F<>}").
+ _, clInfo.maybeInFieldName = expr.(*ast.Ident)
+ }
+ case *types.Map:
+ // If we aren't in a *ast.KeyValueExpr we must be adding a new key
+ // to the map.
+ clInfo.inKey = true
+ }
+ }
+
+ return &clInfo
+ default:
+ if breaksExpectedTypeInference(n, pos) {
+ return nil
+ }
+ }
+ }
+
+ return nil
+}
+
+// enclosingFunction returns the signature and body of the function
+// enclosing the given position.
+func enclosingFunction(path []ast.Node, info *types.Info) *funcInfo {
+ for _, node := range path {
+ switch t := node.(type) {
+ case *ast.FuncDecl:
+ if obj, ok := info.Defs[t.Name]; ok {
+ return &funcInfo{
+ sig: obj.Type().(*types.Signature),
+ body: t.Body,
+ }
+ }
+ case *ast.FuncLit:
+ if typ, ok := info.Types[t]; ok {
+ if sig, _ := typ.Type.(*types.Signature); sig == nil {
+ // golang/go#49397: it should not be possible, but we somehow arrived
+ // here with a non-signature type, most likely due to AST mangling
+ // such that node.Type is not a FuncType.
+ return nil
+ }
+ return &funcInfo{
+ sig: typ.Type.(*types.Signature),
+ body: t.Body,
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (c *completer) expectedCompositeLiteralType() types.Type {
+ clInfo := c.enclosingCompositeLiteral
+ switch t := clInfo.clType.(type) {
+ case *types.Slice:
+ if clInfo.inKey {
+ return types.Typ[types.UntypedInt]
+ }
+ return t.Elem()
+ case *types.Array:
+ if clInfo.inKey {
+ return types.Typ[types.UntypedInt]
+ }
+ return t.Elem()
+ case *types.Map:
+ if clInfo.inKey {
+ return t.Key()
+ }
+ return t.Elem()
+ case *types.Struct:
+ // If we are completing a key (i.e. field name), there is no expected type.
+ if clInfo.inKey {
+ return nil
+ }
+
+ // If we are in a key-value pair, but not in the key, then we must be on the
+ // value side. The expected type of the value will be determined from the key.
+ if clInfo.kv != nil {
+ if key, ok := clInfo.kv.Key.(*ast.Ident); ok {
+ for i := 0; i < t.NumFields(); i++ {
+ if field := t.Field(i); field.Name() == key.Name {
+ return field.Type()
+ }
+ }
+ }
+ } else {
+ // If we aren't in a key-value pair and aren't in the key, we must be using
+ // implicit field names.
+
+ // The order of the literal fields must match the order in the struct definition.
+ // Find the element that the position belongs to and suggest that field's type.
+ if i := exprAtPos(c.pos, clInfo.cl.Elts); i < t.NumFields() {
+ return t.Field(i).Type()
+ }
+ }
+ }
+ return nil
+}
+
+// typeMod represents an operator that changes the expected type.
+type typeMod struct {
+ mod typeModKind
+ arrayLen int64
+}
+
+type typeModKind int
+
+const (
+ dereference typeModKind = iota // pointer indirection: "*"
+ reference // adds level of pointer: "&" for values, "*" for type names
+ chanRead // channel read operator: "<-"
+ sliceType // make a slice type: "[]" in "[]int"
+ arrayType // make an array type: "[2]" in "[2]int"
+ invoke // make a function call: "()" in "foo()"
+ takeSlice // take slice of array: "[:]" in "foo[:]"
+ takeDotDotDot // turn slice into variadic args: "..." in "foo..."
+ index // index into slice/array: "[0]" in "foo[0]"
+)
+
+type objKind int
+
+const (
+ kindAny objKind = 0
+ kindArray objKind = 1 << iota
+ kindSlice
+ kindChan
+ kindMap
+ kindStruct
+ kindString
+ kindInt
+ kindBool
+ kindBytes
+ kindPtr
+ kindFloat
+ kindComplex
+ kindError
+ kindStringer
+ kindFunc
+)
+
+// penalizedObj represents an object that should be disfavored as a
+// completion candidate.
+type penalizedObj struct {
+ // objChain is the full "chain", e.g. "foo.bar().baz" becomes
+ // []types.Object{foo, bar, baz}.
+ objChain []types.Object
+ // penalty is score penalty in the range (0, 1).
+ penalty float64
+}
+
+// candidateInference holds information we have inferred about a type that can be
+// used at the current position.
+type candidateInference struct {
+ // objType is the desired type of an object used at the query position.
+ objType types.Type
+
+ // objKind is a mask of expected kinds of types such as "map", "slice", etc.
+ objKind objKind
+
+ // variadic is true if we are completing the initial variadic
+ // parameter. For example:
+ // append([]T{}, <>) // objType=T variadic=true
+ // append([]T{}, T{}, <>) // objType=T variadic=false
+ variadic bool
+
+ // modifiers are prefixes such as "*", "&" or "<-" that influence how
+ // a candidate type relates to the expected type.
+ modifiers []typeMod
+
+ // convertibleTo is a type our candidate type must be convertible to.
+ convertibleTo types.Type
+
+ // typeName holds information about the expected type name at
+ // position, if any.
+ typeName typeNameInference
+
+ // assignees are the types that would receive a function call's
+ // results at the position. For example:
+ //
+ // foo := 123
+ // foo, bar := <>
+ //
+ // at "<>", the assignees are [int, <invalid>].
+ assignees []types.Type
+
+ // variadicAssignees is true if we could be completing an inner
+ // function call that fills out an outer function call's variadic
+ // params. For example:
+ //
+ // func foo(int, ...string) {}
+ //
+ // foo(<>) // variadicAssignees=true
+ // foo(bar<>) // variadicAssignees=true
+ // foo(bar, baz<>) // variadicAssignees=false
+ variadicAssignees bool
+
+ // penalized holds expressions that should be disfavored as
+ // candidates. For example, it tracks expressions already used in a
+ // switch statement's other cases. Each expression is tracked using
+ // its entire object "chain" allowing differentiation between
+ // "a.foo" and "b.foo" when "a" and "b" are the same type.
+ penalized []penalizedObj
+
+ // objChain contains the chain of objects representing the
+ // surrounding *ast.SelectorExpr. For example, if we are completing
+ // "foo.bar.ba<>", objChain will contain []types.Object{foo, bar}.
+ objChain []types.Object
+}
+
+// typeNameInference holds information about the expected type name at
+// position.
+type typeNameInference struct {
+ // wantTypeName is true if we expect the name of a type.
+ wantTypeName bool
+
+ // modifiers are prefixes such as "*", "&" or "<-" that influence how
+ // a candidate type relates to the expected type.
+ modifiers []typeMod
+
+ // assertableFrom is a type that must be assertable to our candidate type.
+ assertableFrom types.Type
+
+ // wantComparable is true if we want a comparable type.
+ wantComparable bool
+
+ // seenTypeSwitchCases tracks types that have already been used by
+ // the containing type switch.
+ seenTypeSwitchCases []types.Type
+
+ // compLitType is true if we are completing a composite literal type
+ // name, e.g "foo<>{}".
+ compLitType bool
+
+ // isTypeParam is true if we are completing a type instantiation parameter
+ isTypeParam bool
+}
+
+// expectedCandidate returns information about the expected candidate
+// for an expression at the query position.
+func expectedCandidate(ctx context.Context, c *completer) (inf candidateInference) {
+ inf.typeName = expectTypeName(c)
+
+ if c.enclosingCompositeLiteral != nil {
+ inf.objType = c.expectedCompositeLiteralType()
+ }
+
+Nodes:
+ for i, node := range c.path {
+ switch node := node.(type) {
+ case *ast.BinaryExpr:
+ // Determine if query position comes from left or right of op.
+ e := node.X
+ if c.pos < node.OpPos {
+ e = node.Y
+ }
+ if tv, ok := c.pkg.GetTypesInfo().Types[e]; ok {
+ switch node.Op {
+ case token.LAND, token.LOR:
+ // Don't infer "bool" type for "&&" or "||". Often you want
+ // to compose a boolean expression from non-boolean
+ // candidates.
+ default:
+ inf.objType = tv.Type
+ }
+ break Nodes
+ }
+ case *ast.AssignStmt:
+ // Only rank completions if you are on the right side of the token.
+ if c.pos > node.TokPos {
+ i := exprAtPos(c.pos, node.Rhs)
+ if i >= len(node.Lhs) {
+ i = len(node.Lhs) - 1
+ }
+ if tv, ok := c.pkg.GetTypesInfo().Types[node.Lhs[i]]; ok {
+ inf.objType = tv.Type
+ }
+
+ // If we have a single expression on the RHS, record the LHS
+ // assignees so we can favor multi-return function calls with
+ // matching result values.
+ if len(node.Rhs) <= 1 {
+ for _, lhs := range node.Lhs {
+ inf.assignees = append(inf.assignees, c.pkg.GetTypesInfo().TypeOf(lhs))
+ }
+ } else {
+ // Otherwise, record our single assignee, even if its type is
+ // not available. We use this info to downrank functions
+ // with the wrong number of result values.
+ inf.assignees = append(inf.assignees, c.pkg.GetTypesInfo().TypeOf(node.Lhs[i]))
+ }
+ }
+ return inf
+ case *ast.ValueSpec:
+ if node.Type != nil && c.pos > node.Type.End() {
+ inf.objType = c.pkg.GetTypesInfo().TypeOf(node.Type)
+ }
+ return inf
+ case *ast.CallExpr:
+ // Only consider CallExpr args if position falls between parens.
+ if node.Lparen < c.pos && c.pos <= node.Rparen {
+ // For type conversions like "int64(foo)" we can only infer our
+ // desired type is convertible to int64.
+ if typ := typeConversion(node, c.pkg.GetTypesInfo()); typ != nil {
+ inf.convertibleTo = typ
+ break Nodes
+ }
+
+ sig, _ := c.pkg.GetTypesInfo().Types[node.Fun].Type.(*types.Signature)
+
+ if sig != nil && typeparams.ForSignature(sig).Len() > 0 {
+ // If we are completing a generic func call, re-check the call expression.
+ // This allows type param inference to work in cases like:
+ //
+ // func foo[T any](T) {}
+ // foo[int](<>) // <- get "int" completions instead of "T"
+ //
+ // TODO: remove this after https://go.dev/issue/52503
+ info := &types.Info{Types: make(map[ast.Expr]types.TypeAndValue)}
+ types.CheckExpr(c.pkg.FileSet(), c.pkg.GetTypes(), node.Fun.Pos(), node.Fun, info)
+ sig, _ = info.Types[node.Fun].Type.(*types.Signature)
+ }
+
+ if sig != nil {
+ inf = c.expectedCallParamType(inf, node, sig)
+ }
+
+ if funIdent, ok := node.Fun.(*ast.Ident); ok {
+ obj := c.pkg.GetTypesInfo().ObjectOf(funIdent)
+
+ if obj != nil && obj.Parent() == types.Universe {
+ // Defer call to builtinArgType so we can provide it the
+ // inferred type from its parent node.
+ defer func() {
+ inf = c.builtinArgType(obj, node, inf)
+ inf.objKind = c.builtinArgKind(ctx, obj, node)
+ }()
+
+ // The expected type of builtin arguments like append() is
+ // the expected type of the builtin call itself. For
+ // example:
+ //
+ // var foo []int = append(<>)
+ //
+ // To find the expected type at <> we "skip" the append()
+ // node and get the expected type one level up, which is
+ // []int.
+ continue Nodes
+ }
+ }
+
+ return inf
+ }
+ case *ast.ReturnStmt:
+ if c.enclosingFunc != nil {
+ sig := c.enclosingFunc.sig
+ // Find signature result that corresponds to our return statement.
+ if resultIdx := exprAtPos(c.pos, node.Results); resultIdx < len(node.Results) {
+ if resultIdx < sig.Results().Len() {
+ inf.objType = sig.Results().At(resultIdx).Type()
+ }
+ }
+ }
+ return inf
+ case *ast.CaseClause:
+ if swtch, ok := findSwitchStmt(c.path[i+1:], c.pos, node).(*ast.SwitchStmt); ok {
+ if tv, ok := c.pkg.GetTypesInfo().Types[swtch.Tag]; ok {
+ inf.objType = tv.Type
+
+ // Record which objects have already been used in the case
+ // statements so we don't suggest them again.
+ for _, cc := range swtch.Body.List {
+ for _, caseExpr := range cc.(*ast.CaseClause).List {
+ // Don't record the expression we are currently completing.
+ if caseExpr.Pos() < c.pos && c.pos <= caseExpr.End() {
+ continue
+ }
+
+ if objs := objChain(c.pkg.GetTypesInfo(), caseExpr); len(objs) > 0 {
+ inf.penalized = append(inf.penalized, penalizedObj{objChain: objs, penalty: 0.1})
+ }
+ }
+ }
+ }
+ }
+ return inf
+ case *ast.SliceExpr:
+ // Make sure position falls within the brackets (e.g. "foo[a:<>]").
+ if node.Lbrack < c.pos && c.pos <= node.Rbrack {
+ inf.objType = types.Typ[types.UntypedInt]
+ }
+ return inf
+ case *ast.IndexExpr:
+ // Make sure position falls within the brackets (e.g. "foo[<>]").
+ if node.Lbrack < c.pos && c.pos <= node.Rbrack {
+ if tv, ok := c.pkg.GetTypesInfo().Types[node.X]; ok {
+ switch t := tv.Type.Underlying().(type) {
+ case *types.Map:
+ inf.objType = t.Key()
+ case *types.Slice, *types.Array:
+ inf.objType = types.Typ[types.UntypedInt]
+ }
+
+ if ct := expectedConstraint(tv.Type, 0); ct != nil {
+ inf.objType = ct
+ inf.typeName.wantTypeName = true
+ inf.typeName.isTypeParam = true
+ }
+ }
+ }
+ return inf
+ case *typeparams.IndexListExpr:
+ if node.Lbrack < c.pos && c.pos <= node.Rbrack {
+ if tv, ok := c.pkg.GetTypesInfo().Types[node.X]; ok {
+ if ct := expectedConstraint(tv.Type, exprAtPos(c.pos, node.Indices)); ct != nil {
+ inf.objType = ct
+ inf.typeName.wantTypeName = true
+ inf.typeName.isTypeParam = true
+ }
+ }
+ }
+ return inf
+ case *ast.SendStmt:
+ // Make sure we are on right side of arrow (e.g. "foo <- <>").
+ if c.pos > node.Arrow+1 {
+ if tv, ok := c.pkg.GetTypesInfo().Types[node.Chan]; ok {
+ if ch, ok := tv.Type.Underlying().(*types.Chan); ok {
+ inf.objType = ch.Elem()
+ }
+ }
+ }
+ return inf
+ case *ast.RangeStmt:
+ if source.NodeContains(node.X, c.pos) {
+ inf.objKind |= kindSlice | kindArray | kindMap | kindString
+ if node.Value == nil {
+ inf.objKind |= kindChan
+ }
+ }
+ return inf
+ case *ast.StarExpr:
+ inf.modifiers = append(inf.modifiers, typeMod{mod: dereference})
+ case *ast.UnaryExpr:
+ switch node.Op {
+ case token.AND:
+ inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
+ case token.ARROW:
+ inf.modifiers = append(inf.modifiers, typeMod{mod: chanRead})
+ }
+ case *ast.DeferStmt, *ast.GoStmt:
+ inf.objKind |= kindFunc
+ return inf
+ default:
+ if breaksExpectedTypeInference(node, c.pos) {
+ return inf
+ }
+ }
+ }
+
+ return inf
+}
+
+func (c *completer) expectedCallParamType(inf candidateInference, node *ast.CallExpr, sig *types.Signature) candidateInference {
+ numParams := sig.Params().Len()
+ if numParams == 0 {
+ return inf
+ }
+
+ exprIdx := exprAtPos(c.pos, node.Args)
+
+ // If we have one or zero arg expressions, we may be
+ // completing to a function call that returns multiple
+ // values, in turn getting passed in to the surrounding
+ // call. Record the assignees so we can favor function
+ // calls that return matching values.
+ if len(node.Args) <= 1 && exprIdx == 0 {
+ for i := 0; i < sig.Params().Len(); i++ {
+ inf.assignees = append(inf.assignees, sig.Params().At(i).Type())
+ }
+
+ // Record that we may be completing into variadic parameters.
+ inf.variadicAssignees = sig.Variadic()
+ }
+
+ // Make sure not to run past the end of expected parameters.
+ if exprIdx >= numParams {
+ inf.objType = sig.Params().At(numParams - 1).Type()
+ } else {
+ inf.objType = sig.Params().At(exprIdx).Type()
+ }
+
+ if sig.Variadic() && exprIdx >= (numParams-1) {
+ // If we are completing a variadic param, deslice the variadic type.
+ inf.objType = deslice(inf.objType)
+ // Record whether we are completing the initial variadic param.
+ inf.variadic = exprIdx == numParams-1 && len(node.Args) <= numParams
+
+ // Check if we can infer object kind from printf verb.
+ inf.objKind |= printfArgKind(c.pkg.GetTypesInfo(), node, exprIdx)
+ }
+
+ // If our expected type is an uninstantiated generic type param,
+ // swap to the constraint which will do a decent job filtering
+ // candidates.
+ if tp, _ := inf.objType.(*typeparams.TypeParam); tp != nil {
+ inf.objType = tp.Constraint()
+ }
+
+ return inf
+}
+
+func expectedConstraint(t types.Type, idx int) types.Type {
+ var tp *typeparams.TypeParamList
+ if named, _ := t.(*types.Named); named != nil {
+ tp = typeparams.ForNamed(named)
+ } else if sig, _ := t.Underlying().(*types.Signature); sig != nil {
+ tp = typeparams.ForSignature(sig)
+ }
+ if tp == nil || idx >= tp.Len() {
+ return nil
+ }
+ return tp.At(idx).Constraint()
+}
+
+// objChain decomposes e into a chain of objects if possible. For
+// example, "foo.bar().baz" will yield []types.Object{foo, bar, baz}.
+// If any part can't be turned into an object, return nil.
+func objChain(info *types.Info, e ast.Expr) []types.Object {
+ var objs []types.Object
+
+ for e != nil {
+ switch n := e.(type) {
+ case *ast.Ident:
+ obj := info.ObjectOf(n)
+ if obj == nil {
+ return nil
+ }
+ objs = append(objs, obj)
+ e = nil
+ case *ast.SelectorExpr:
+ obj := info.ObjectOf(n.Sel)
+ if obj == nil {
+ return nil
+ }
+ objs = append(objs, obj)
+ e = n.X
+ case *ast.CallExpr:
+ if len(n.Args) > 0 {
+ return nil
+ }
+ e = n.Fun
+ default:
+ return nil
+ }
+ }
+
+ // Reverse order so the layout matches the syntactic order.
+ for i := 0; i < len(objs)/2; i++ {
+ objs[i], objs[len(objs)-1-i] = objs[len(objs)-1-i], objs[i]
+ }
+
+ return objs
+}
+
+// applyTypeModifiers applies the list of type modifiers to a type.
+// It returns nil if the modifiers could not be applied.
+func (ci candidateInference) applyTypeModifiers(typ types.Type, addressable bool) types.Type {
+ for _, mod := range ci.modifiers {
+ switch mod.mod {
+ case dereference:
+ // For every "*" indirection operator, remove a pointer layer
+ // from candidate type.
+ if ptr, ok := typ.Underlying().(*types.Pointer); ok {
+ typ = ptr.Elem()
+ } else {
+ return nil
+ }
+ case reference:
+ // For every "&" address operator, add another pointer layer to
+ // candidate type, if the candidate is addressable.
+ if addressable {
+ typ = types.NewPointer(typ)
+ } else {
+ return nil
+ }
+ case chanRead:
+ // For every "<-" operator, remove a layer of channelness.
+ if ch, ok := typ.(*types.Chan); ok {
+ typ = ch.Elem()
+ } else {
+ return nil
+ }
+ }
+ }
+
+ return typ
+}
+
+// applyTypeNameModifiers applies the list of type modifiers to a type name.
+func (ci candidateInference) applyTypeNameModifiers(typ types.Type) types.Type {
+ for _, mod := range ci.typeName.modifiers {
+ switch mod.mod {
+ case reference:
+ typ = types.NewPointer(typ)
+ case arrayType:
+ typ = types.NewArray(typ, mod.arrayLen)
+ case sliceType:
+ typ = types.NewSlice(typ)
+ }
+ }
+ return typ
+}
+
+// matchesVariadic returns true if we are completing a variadic
+// parameter and candType is a compatible slice type.
+func (ci candidateInference) matchesVariadic(candType types.Type) bool {
+ return ci.variadic && ci.objType != nil && assignableTo(candType, types.NewSlice(ci.objType))
+}
+
+// findSwitchStmt returns an *ast.CaseClause's corresponding *ast.SwitchStmt or
+// *ast.TypeSwitchStmt. path should start from the case clause's first ancestor.
+func findSwitchStmt(path []ast.Node, pos token.Pos, c *ast.CaseClause) ast.Stmt {
+ // Make sure position falls within a "case <>:" clause.
+ if exprAtPos(pos, c.List) >= len(c.List) {
+ return nil
+ }
+ // A case clause is always nested within a block statement in a switch statement.
+ if len(path) < 2 {
+ return nil
+ }
+ if _, ok := path[0].(*ast.BlockStmt); !ok {
+ return nil
+ }
+ switch s := path[1].(type) {
+ case *ast.SwitchStmt:
+ return s
+ case *ast.TypeSwitchStmt:
+ return s
+ default:
+ return nil
+ }
+}
+
+// breaksExpectedTypeInference reports if an expression node's type is unrelated
+// to its child expression node types. For example, "Foo{Bar: x.Baz(<>)}" should
+// expect a function argument, not a composite literal value.
+func breaksExpectedTypeInference(n ast.Node, pos token.Pos) bool {
+ switch n := n.(type) {
+ case *ast.CompositeLit:
+ // Doesn't break inference if pos is in type name.
+ // For example: "Foo<>{Bar: 123}"
+ return !source.NodeContains(n.Type, pos)
+ case *ast.CallExpr:
+ // Doesn't break inference if pos is in func name.
+ // For example: "Foo<>(123)"
+ return !source.NodeContains(n.Fun, pos)
+ case *ast.FuncLit, *ast.IndexExpr, *ast.SliceExpr:
+ return true
+ default:
+ return false
+ }
+}
+
+// expectTypeName returns information about the expected type name at position.
+func expectTypeName(c *completer) typeNameInference {
+ var inf typeNameInference
+
+Nodes:
+ for i, p := range c.path {
+ switch n := p.(type) {
+ case *ast.FieldList:
+ // Expect a type name if pos is in a FieldList. This applies to
+ // FuncType params/results, FuncDecl receiver, StructType, and
+ // InterfaceType. We don't need to worry about the field name
+ // because completion bails out early if pos is in an *ast.Ident
+ // that defines an object.
+ inf.wantTypeName = true
+ break Nodes
+ case *ast.CaseClause:
+ // Expect type names in type switch case clauses.
+ if swtch, ok := findSwitchStmt(c.path[i+1:], c.pos, n).(*ast.TypeSwitchStmt); ok {
+ // The case clause types must be assertable from the type switch parameter.
+ ast.Inspect(swtch.Assign, func(n ast.Node) bool {
+ if ta, ok := n.(*ast.TypeAssertExpr); ok {
+ inf.assertableFrom = c.pkg.GetTypesInfo().TypeOf(ta.X)
+ return false
+ }
+ return true
+ })
+ inf.wantTypeName = true
+
+ // Track the types that have already been used in this
+ // switch's case statements so we don't recommend them.
+ for _, e := range swtch.Body.List {
+ for _, typeExpr := range e.(*ast.CaseClause).List {
+ // Skip if type expression contains pos. We don't want to
+ // count it as already used if the user is completing it.
+ if typeExpr.Pos() < c.pos && c.pos <= typeExpr.End() {
+ continue
+ }
+
+ if t := c.pkg.GetTypesInfo().TypeOf(typeExpr); t != nil {
+ inf.seenTypeSwitchCases = append(inf.seenTypeSwitchCases, t)
+ }
+ }
+ }
+
+ break Nodes
+ }
+ return typeNameInference{}
+ case *ast.TypeAssertExpr:
+ // Expect type names in type assert expressions.
+ if n.Lparen < c.pos && c.pos <= n.Rparen {
+ // The type in parens must be assertable from the expression type.
+ inf.assertableFrom = c.pkg.GetTypesInfo().TypeOf(n.X)
+ inf.wantTypeName = true
+ break Nodes
+ }
+ return typeNameInference{}
+ case *ast.StarExpr:
+ inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
+ case *ast.CompositeLit:
+ // We want a type name if position is in the "Type" part of a
+ // composite literal (e.g. "Foo<>{}").
+ if n.Type != nil && n.Type.Pos() <= c.pos && c.pos <= n.Type.End() {
+ inf.wantTypeName = true
+ inf.compLitType = true
+
+ if i < len(c.path)-1 {
+ // Track preceding "&" operator. Technically it applies to
+ // the composite literal and not the type name, but if
+ // affects our type completion nonetheless.
+ if u, ok := c.path[i+1].(*ast.UnaryExpr); ok && u.Op == token.AND {
+ inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
+ }
+ }
+ }
+ break Nodes
+ case *ast.ArrayType:
+ // If we are inside the "Elt" part of an array type, we want a type name.
+ if n.Elt.Pos() <= c.pos && c.pos <= n.Elt.End() {
+ inf.wantTypeName = true
+ if n.Len == nil {
+ // No "Len" expression means a slice type.
+ inf.modifiers = append(inf.modifiers, typeMod{mod: sliceType})
+ } else {
+ // Try to get the array type using the constant value of "Len".
+ tv, ok := c.pkg.GetTypesInfo().Types[n.Len]
+ if ok && tv.Value != nil && tv.Value.Kind() == constant.Int {
+ if arrayLen, ok := constant.Int64Val(tv.Value); ok {
+ inf.modifiers = append(inf.modifiers, typeMod{mod: arrayType, arrayLen: arrayLen})
+ }
+ }
+ }
+
+ // ArrayTypes can be nested, so keep going if our parent is an
+ // ArrayType.
+ if i < len(c.path)-1 {
+ if _, ok := c.path[i+1].(*ast.ArrayType); ok {
+ continue Nodes
+ }
+ }
+
+ break Nodes
+ }
+ case *ast.MapType:
+ inf.wantTypeName = true
+ if n.Key != nil {
+ inf.wantComparable = source.NodeContains(n.Key, c.pos)
+ } else {
+ // If the key is empty, assume we are completing the key if
+ // pos is directly after the "map[".
+ inf.wantComparable = c.pos == n.Pos()+token.Pos(len("map["))
+ }
+ break Nodes
+ case *ast.ValueSpec:
+ inf.wantTypeName = source.NodeContains(n.Type, c.pos)
+ break Nodes
+ case *ast.TypeSpec:
+ inf.wantTypeName = source.NodeContains(n.Type, c.pos)
+ default:
+ if breaksExpectedTypeInference(p, c.pos) {
+ return typeNameInference{}
+ }
+ }
+ }
+
+ return inf
+}
+
+func (c *completer) fakeObj(T types.Type) *types.Var {
+ return types.NewVar(token.NoPos, c.pkg.GetTypes(), "", T)
+}
+
+// derivableTypes iterates types you can derive from t. For example,
+// from "foo" we might derive "&foo", and "foo()".
+func derivableTypes(t types.Type, addressable bool, f func(t types.Type, addressable bool, mod typeModKind) bool) bool {
+ switch t := t.Underlying().(type) {
+ case *types.Signature:
+ // If t is a func type with a single result, offer the result type.
+ if t.Results().Len() == 1 && f(t.Results().At(0).Type(), false, invoke) {
+ return true
+ }
+ case *types.Array:
+ if f(t.Elem(), true, index) {
+ return true
+ }
+ // Try converting array to slice.
+ if f(types.NewSlice(t.Elem()), false, takeSlice) {
+ return true
+ }
+ case *types.Pointer:
+ if f(t.Elem(), false, dereference) {
+ return true
+ }
+ case *types.Slice:
+ if f(t.Elem(), true, index) {
+ return true
+ }
+ case *types.Map:
+ if f(t.Elem(), false, index) {
+ return true
+ }
+ case *types.Chan:
+ if f(t.Elem(), false, chanRead) {
+ return true
+ }
+ }
+
+ // Check if c is addressable and a pointer to c matches our type inference.
+ if addressable && f(types.NewPointer(t), false, reference) {
+ return true
+ }
+
+ return false
+}
+
+// anyCandType reports whether f returns true for any candidate type
+// derivable from c. It searches up to three levels of type
+// modification. For example, given "foo" we could discover "***foo"
+// or "*foo()".
+func (c *candidate) anyCandType(f func(t types.Type, addressable bool) bool) bool {
+ if c.obj == nil || c.obj.Type() == nil {
+ return false
+ }
+
+ const maxDepth = 3
+
+ var searchTypes func(t types.Type, addressable bool, mods []typeModKind) bool
+ searchTypes = func(t types.Type, addressable bool, mods []typeModKind) bool {
+ if f(t, addressable) {
+ if len(mods) > 0 {
+ newMods := make([]typeModKind, len(mods)+len(c.mods))
+ copy(newMods, mods)
+ copy(newMods[len(mods):], c.mods)
+ c.mods = newMods
+ }
+ return true
+ }
+
+ if len(mods) == maxDepth {
+ return false
+ }
+
+ return derivableTypes(t, addressable, func(t types.Type, addressable bool, mod typeModKind) bool {
+ return searchTypes(t, addressable, append(mods, mod))
+ })
+ }
+
+ return searchTypes(c.obj.Type(), c.addressable, make([]typeModKind, 0, maxDepth))
+}
+
+// matchingCandidate reports whether cand matches our type inferences.
+// It mutates cand's score in certain cases.
+func (c *completer) matchingCandidate(cand *candidate) bool {
+ if c.completionContext.commentCompletion {
+ return false
+ }
+
+ // Bail out early if we are completing a field name in a composite literal.
+ if v, ok := cand.obj.(*types.Var); ok && v.IsField() && c.wantStructFieldCompletions() {
+ return true
+ }
+
+ if isTypeName(cand.obj) {
+ return c.matchingTypeName(cand)
+ } else if c.wantTypeName() {
+ // If we want a type, a non-type object never matches.
+ return false
+ }
+
+ if c.inference.candTypeMatches(cand) {
+ return true
+ }
+
+ candType := cand.obj.Type()
+ if candType == nil {
+ return false
+ }
+
+ if sig, ok := candType.Underlying().(*types.Signature); ok {
+ if c.inference.assigneesMatch(cand, sig) {
+ // Invoke the candidate if its results are multi-assignable.
+ cand.mods = append(cand.mods, invoke)
+ return true
+ }
+ }
+
+ // Default to invoking *types.Func candidates. This is so function
+ // completions in an empty statement (or other cases with no expected type)
+ // are invoked by default.
+ if isFunc(cand.obj) {
+ cand.mods = append(cand.mods, invoke)
+ }
+
+ return false
+}
+
+// candTypeMatches reports whether cand makes a good completion
+// candidate given the candidate inference. cand's score may be
+// mutated to downrank the candidate in certain situations.
+func (ci *candidateInference) candTypeMatches(cand *candidate) bool {
+ var (
+ expTypes = make([]types.Type, 0, 2)
+ variadicType types.Type
+ )
+ if ci.objType != nil {
+ expTypes = append(expTypes, ci.objType)
+
+ if ci.variadic {
+ variadicType = types.NewSlice(ci.objType)
+ expTypes = append(expTypes, variadicType)
+ }
+ }
+
+ return cand.anyCandType(func(candType types.Type, addressable bool) bool {
+ // Take into account any type modifiers on the expected type.
+ candType = ci.applyTypeModifiers(candType, addressable)
+ if candType == nil {
+ return false
+ }
+
+ if ci.convertibleTo != nil && convertibleTo(candType, ci.convertibleTo) {
+ return true
+ }
+
+ for _, expType := range expTypes {
+ if isEmptyInterface(expType) {
+ continue
+ }
+
+ matches := ci.typeMatches(expType, candType)
+ if !matches {
+ // If candType doesn't otherwise match, consider if we can
+ // convert candType directly to expType.
+ if considerTypeConversion(candType, expType, cand.path) {
+ cand.convertTo = expType
+ // Give a major score penalty so we always prefer directly
+ // assignable candidates, all else equal.
+ cand.score *= 0.5
+ return true
+ }
+
+ continue
+ }
+
+ if expType == variadicType {
+ cand.mods = append(cand.mods, takeDotDotDot)
+ }
+
+ // Lower candidate score for untyped conversions. This avoids
+ // ranking untyped constants above candidates with an exact type
+ // match. Don't lower score of builtin constants, e.g. "true".
+ if isUntyped(candType) && !types.Identical(candType, expType) && cand.obj.Parent() != types.Universe {
+ // Bigger penalty for deep completions into other packages to
+ // avoid random constants from other packages popping up all
+ // the time.
+ if len(cand.path) > 0 && isPkgName(cand.path[0]) {
+ cand.score *= 0.5
+ } else {
+ cand.score *= 0.75
+ }
+ }
+
+ return true
+ }
+
+ // If we don't have a specific expected type, fall back to coarser
+ // object kind checks.
+ if ci.objType == nil || isEmptyInterface(ci.objType) {
+ // If we were able to apply type modifiers to our candidate type,
+ // count that as a match. For example:
+ //
+ // var foo chan int
+ // <-fo<>
+ //
+ // We were able to apply the "<-" type modifier to "foo", so "foo"
+ // matches.
+ if len(ci.modifiers) > 0 {
+ return true
+ }
+
+ // If we didn't have an exact type match, check if our object kind
+ // matches.
+ if ci.kindMatches(candType) {
+ if ci.objKind == kindFunc {
+ cand.mods = append(cand.mods, invoke)
+ }
+ return true
+ }
+ }
+
+ return false
+ })
+}
+
+// considerTypeConversion returns true if we should offer a completion
+// automatically converting "from" to "to".
+func considerTypeConversion(from, to types.Type, path []types.Object) bool {
+ // Don't offer to convert deep completions from other packages.
+ // Otherwise there are many random package level consts/vars that
+ // pop up as candidates all the time.
+ if len(path) > 0 && isPkgName(path[0]) {
+ return false
+ }
+
+ if _, ok := from.(*typeparams.TypeParam); ok {
+ return false
+ }
+
+ if !convertibleTo(from, to) {
+ return false
+ }
+
+ // Don't offer to convert ints to strings since that probably
+ // doesn't do what the user wants.
+ if isBasicKind(from, types.IsInteger) && isBasicKind(to, types.IsString) {
+ return false
+ }
+
+ return true
+}
+
+// typeMatches reports whether an object of candType makes a good
+// completion candidate given the expected type expType.
+func (ci *candidateInference) typeMatches(expType, candType types.Type) bool {
+ // Handle untyped values specially since AssignableTo gives false negatives
+ // for them (see https://golang.org/issue/32146).
+ if candBasic, ok := candType.Underlying().(*types.Basic); ok {
+ if expBasic, ok := expType.Underlying().(*types.Basic); ok {
+ // Note that the candidate and/or the expected can be untyped.
+ // In "fo<> == 100" the expected type is untyped, and the
+ // candidate could also be an untyped constant.
+
+ // Sort by is_untyped and then by is_int to simplify below logic.
+ a, b := candBasic.Info(), expBasic.Info()
+ if a&types.IsUntyped == 0 || (b&types.IsInteger > 0 && b&types.IsUntyped > 0) {
+ a, b = b, a
+ }
+
+ // If at least one is untyped...
+ if a&types.IsUntyped > 0 {
+ switch {
+ // Untyped integers are compatible with floats.
+ case a&types.IsInteger > 0 && b&types.IsFloat > 0:
+ return true
+
+ // Check if their constant kind (bool|int|float|complex|string) matches.
+ // This doesn't take into account the constant value, so there will be some
+ // false positives due to integer sign and overflow.
+ case a&types.IsConstType == b&types.IsConstType:
+ return true
+ }
+ }
+ }
+ }
+
+ // AssignableTo covers the case where the types are equal, but also handles
+ // cases like assigning a concrete type to an interface type.
+ return assignableTo(candType, expType)
+}
+
+// kindMatches reports whether candType's kind matches our expected
+// kind (e.g. slice, map, etc.).
+func (ci *candidateInference) kindMatches(candType types.Type) bool {
+ return ci.objKind > 0 && ci.objKind&candKind(candType) > 0
+}
+
+// assigneesMatch reports whether an invocation of sig matches the
+// number and type of any assignees.
+func (ci *candidateInference) assigneesMatch(cand *candidate, sig *types.Signature) bool {
+ if len(ci.assignees) == 0 {
+ return false
+ }
+
+ // Uniresult functions are always usable and are handled by the
+ // normal, non-assignees type matching logic.
+ if sig.Results().Len() == 1 {
+ return false
+ }
+
+ // Don't prefer completing into func(...interface{}) calls since all
+ // functions would match.
+ if ci.variadicAssignees && len(ci.assignees) == 1 && isEmptyInterface(deslice(ci.assignees[0])) {
+ return false
+ }
+
+ var numberOfResultsCouldMatch bool
+ if ci.variadicAssignees {
+ numberOfResultsCouldMatch = sig.Results().Len() >= len(ci.assignees)-1
+ } else {
+ numberOfResultsCouldMatch = sig.Results().Len() == len(ci.assignees)
+ }
+
+ // If our signature doesn't return the right number of values, it's
+ // not a match, so downrank it. For example:
+ //
+ // var foo func() (int, int)
+ // a, b, c := <> // downrank "foo()" since it only returns two values
+ if !numberOfResultsCouldMatch {
+ cand.score /= 2
+ return false
+ }
+
+ // If at least one assignee has a valid type, and all valid
+ // assignees match the corresponding sig result value, the signature
+ // is a match.
+ allMatch := false
+ for i := 0; i < sig.Results().Len(); i++ {
+ var assignee types.Type
+
+ // If we are completing into variadic parameters, deslice the
+ // expected variadic type.
+ if ci.variadicAssignees && i >= len(ci.assignees)-1 {
+ assignee = ci.assignees[len(ci.assignees)-1]
+ if elem := deslice(assignee); elem != nil {
+ assignee = elem
+ }
+ } else {
+ assignee = ci.assignees[i]
+ }
+
+ if assignee == nil || assignee == types.Typ[types.Invalid] {
+ continue
+ }
+
+ allMatch = ci.typeMatches(assignee, sig.Results().At(i).Type())
+ if !allMatch {
+ break
+ }
+ }
+ return allMatch
+}
+
+func (c *completer) matchingTypeName(cand *candidate) bool {
+ if !c.wantTypeName() {
+ return false
+ }
+
+ typeMatches := func(candType types.Type) bool {
+ // Take into account any type name modifier prefixes.
+ candType = c.inference.applyTypeNameModifiers(candType)
+
+ if from := c.inference.typeName.assertableFrom; from != nil {
+ // Don't suggest the starting type in type assertions. For example,
+ // if "foo" is an io.Writer, don't suggest "foo.(io.Writer)".
+ if types.Identical(from, candType) {
+ return false
+ }
+
+ if intf, ok := from.Underlying().(*types.Interface); ok {
+ if !types.AssertableTo(intf, candType) {
+ return false
+ }
+ }
+ }
+
+ if c.inference.typeName.wantComparable && !types.Comparable(candType) {
+ return false
+ }
+
+ // Skip this type if it has already been used in another type
+ // switch case.
+ for _, seen := range c.inference.typeName.seenTypeSwitchCases {
+ if types.Identical(candType, seen) {
+ return false
+ }
+ }
+
+ // We can expect a type name and have an expected type in cases like:
+ //
+ // var foo []int
+ // foo = []i<>
+ //
+ // Where our expected type is "[]int", and we expect a type name.
+ if c.inference.objType != nil {
+ return assignableTo(candType, c.inference.objType)
+ }
+
+ // Default to saying any type name is a match.
+ return true
+ }
+
+ t := cand.obj.Type()
+
+ if typeMatches(t) {
+ return true
+ }
+
+ if !types.IsInterface(t) && typeMatches(types.NewPointer(t)) {
+ if c.inference.typeName.compLitType {
+ // If we are completing a composite literal type as in
+ // "foo<>{}", to make a pointer we must prepend "&".
+ cand.mods = append(cand.mods, reference)
+ } else {
+ // If we are completing a normal type name such as "foo<>", to
+ // make a pointer we must prepend "*".
+ cand.mods = append(cand.mods, dereference)
+ }
+ return true
+ }
+
+ return false
+}
+
+var (
+ // "interface { Error() string }" (i.e. error)
+ errorIntf = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
+
+ // "interface { String() string }" (i.e. fmt.Stringer)
+ stringerIntf = types.NewInterfaceType([]*types.Func{
+ types.NewFunc(token.NoPos, nil, "String", types.NewSignature(
+ nil,
+ nil,
+ types.NewTuple(types.NewParam(token.NoPos, nil, "", types.Typ[types.String])),
+ false,
+ )),
+ }, nil).Complete()
+
+ byteType = types.Universe.Lookup("byte").Type()
+)
+
+// candKind returns the objKind of candType, if any.
+func candKind(candType types.Type) objKind {
+ var kind objKind
+
+ switch t := candType.Underlying().(type) {
+ case *types.Array:
+ kind |= kindArray
+ if t.Elem() == byteType {
+ kind |= kindBytes
+ }
+ case *types.Slice:
+ kind |= kindSlice
+ if t.Elem() == byteType {
+ kind |= kindBytes
+ }
+ case *types.Chan:
+ kind |= kindChan
+ case *types.Map:
+ kind |= kindMap
+ case *types.Pointer:
+ kind |= kindPtr
+
+ // Some builtins handle array pointers as arrays, so just report a pointer
+ // to an array as an array.
+ if _, isArray := t.Elem().Underlying().(*types.Array); isArray {
+ kind |= kindArray
+ }
+ case *types.Basic:
+ switch info := t.Info(); {
+ case info&types.IsString > 0:
+ kind |= kindString
+ case info&types.IsInteger > 0:
+ kind |= kindInt
+ case info&types.IsFloat > 0:
+ kind |= kindFloat
+ case info&types.IsComplex > 0:
+ kind |= kindComplex
+ case info&types.IsBoolean > 0:
+ kind |= kindBool
+ }
+ case *types.Signature:
+ return kindFunc
+ }
+
+ if types.Implements(candType, errorIntf) {
+ kind |= kindError
+ }
+
+ if types.Implements(candType, stringerIntf) {
+ kind |= kindStringer
+ }
+
+ return kind
+}
+
+// innermostScope returns the innermost scope for c.pos.
+func (c *completer) innermostScope() *types.Scope {
+ for _, s := range c.scopes {
+ if s != nil {
+ return s
+ }
+ }
+ return nil
+}
+
+// isSlice reports whether the object's underlying type is a slice.
+func isSlice(obj types.Object) bool {
+ if obj != nil && obj.Type() != nil {
+ if _, ok := obj.Type().Underlying().(*types.Slice); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// forEachPackageMember calls f(tok, id, fn) for each package-level
+// TYPE/VAR/CONST/FUNC declaration in the Go source file, based on a
+// quick partial parse. fn is non-nil only for function declarations.
+// The AST position information is garbage.
+func forEachPackageMember(content []byte, f func(tok token.Token, id *ast.Ident, fn *ast.FuncDecl)) {
+ purged := purgeFuncBodies(content)
+ file, _ := parser.ParseFile(token.NewFileSet(), "", purged, 0)
+ for _, decl := range file.Decls {
+ switch decl := decl.(type) {
+ case *ast.GenDecl:
+ for _, spec := range decl.Specs {
+ switch spec := spec.(type) {
+ case *ast.ValueSpec: // var/const
+ for _, id := range spec.Names {
+ f(decl.Tok, id, nil)
+ }
+ case *ast.TypeSpec:
+ f(decl.Tok, spec.Name, nil)
+ }
+ }
+ case *ast.FuncDecl:
+ if decl.Recv == nil {
+ f(token.FUNC, decl.Name, decl)
+ }
+ }
+ }
+}
+
+// purgeFuncBodies returns a copy of src in which the contents of each
+// outermost {...} region except struct and interface types have been
+// deleted. It does not preserve newlines. This reduces the amount of
+// work required to parse the top-level declarations.
+func purgeFuncBodies(src []byte) []byte {
+ // Destroy the content of any {...}-bracketed regions that are
+ // not immediately preceded by a "struct" or "interface"
+ // token. That includes function bodies, composite literals,
+ // switch/select bodies, and all blocks of statements.
+ // This will lead to non-void functions that don't have return
+ // statements, which of course is a type error, but that's ok.
+
+ var out bytes.Buffer
+ file := token.NewFileSet().AddFile("", -1, len(src))
+ var sc scanner.Scanner
+ sc.Init(file, src, nil, 0)
+ var prev token.Token
+ var cursor int // last consumed src offset
+ var braces []token.Pos // stack of unclosed braces or -1 for struct/interface type
+ for {
+ pos, tok, _ := sc.Scan()
+ if tok == token.EOF {
+ break
+ }
+ switch tok {
+ case token.COMMENT:
+ // TODO(adonovan): opt: skip, to save an estimated 20% of time.
+
+ case token.LBRACE:
+ if prev == token.STRUCT || prev == token.INTERFACE {
+ pos = -1
+ }
+ braces = append(braces, pos)
+
+ case token.RBRACE:
+ if last := len(braces) - 1; last >= 0 {
+ top := braces[last]
+ braces = braces[:last]
+ if top < 0 {
+ // struct/interface type: leave alone
+ } else if len(braces) == 0 { // toplevel only
+ // Delete {...} body.
+ start, _ := safetoken.Offset(file, top)
+ end, _ := safetoken.Offset(file, pos)
+ out.Write(src[cursor : start+len("{")])
+ cursor = end
+ }
+ }
+ }
+ prev = tok
+ }
+ out.Write(src[cursor:])
+ return out.Bytes()
+}
diff --git a/gopls/internal/lsp/source/completion/deep_completion.go b/gopls/internal/lsp/source/completion/deep_completion.go
new file mode 100644
index 000000000..a72d56191
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/deep_completion.go
@@ -0,0 +1,362 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "context"
+ "go/types"
+ "strings"
+ "time"
+)
+
+// MaxDeepCompletions limits deep completion results because in most cases
+// there are too many to be useful.
+const MaxDeepCompletions = 3
+
+// deepCompletionState stores our state as we search for deep completions.
+// "deep completion" refers to searching into objects' fields and methods to
+// find more completion candidates.
+type deepCompletionState struct {
+ // enabled indicates whether deep completion is permitted.
+ enabled bool
+
+ // queueClosed is used to disable adding new sub-fields to search queue
+ // once we're running out of our time budget.
+ queueClosed bool
+
+ // thisQueue holds the current breadth first search queue.
+ thisQueue []candidate
+
+ // nextQueue holds the next breadth first search iteration's queue.
+ nextQueue []candidate
+
+ // highScores tracks the highest deep candidate scores we have found
+ // so far. This is used to avoid work for low scoring deep candidates.
+ highScores [MaxDeepCompletions]float64
+
+ // candidateCount is the count of unique deep candidates encountered
+ // so far.
+ candidateCount int
+}
+
+// enqueue adds a candidate to the search queue.
+func (s *deepCompletionState) enqueue(cand candidate) {
+ s.nextQueue = append(s.nextQueue, cand)
+}
+
+// dequeue removes and returns the leftmost element from the search queue.
+func (s *deepCompletionState) dequeue() *candidate {
+ var cand *candidate
+ cand, s.thisQueue = &s.thisQueue[len(s.thisQueue)-1], s.thisQueue[:len(s.thisQueue)-1]
+ return cand
+}
+
+// scorePenalty computes a deep candidate score penalty. A candidate is
+// penalized based on depth to favor shallower candidates. We also give a
+// slight bonus to unexported objects and a slight additional penalty to
+// function objects.
+func (s *deepCompletionState) scorePenalty(cand *candidate) float64 {
+ var deepPenalty float64
+ for _, dc := range cand.path {
+ deepPenalty++
+
+ if !dc.Exported() {
+ deepPenalty -= 0.1
+ }
+
+ if _, isSig := dc.Type().Underlying().(*types.Signature); isSig {
+ deepPenalty += 0.1
+ }
+ }
+
+ // Normalize penalty to a max depth of 10.
+ return deepPenalty / 10
+}
+
+// isHighScore returns whether score is among the top MaxDeepCompletions deep
+// candidate scores encountered so far. If so, it adds score to highScores,
+// possibly displacing an existing high score.
+func (s *deepCompletionState) isHighScore(score float64) bool {
+ // Invariant: s.highScores is sorted with highest score first. Unclaimed
+ // positions are trailing zeros.
+
+ // If we beat an existing score then take its spot.
+ for i, deepScore := range s.highScores {
+ if score <= deepScore {
+ continue
+ }
+
+ if deepScore != 0 && i != len(s.highScores)-1 {
+ // If this wasn't an empty slot then we need to scooch everyone
+ // down one spot.
+ copy(s.highScores[i+1:], s.highScores[i:])
+ }
+ s.highScores[i] = score
+ return true
+ }
+
+ return false
+}
+
+// newPath returns path from search root for an object following a given
+// candidate.
+func (s *deepCompletionState) newPath(cand candidate, obj types.Object) []types.Object {
+ path := make([]types.Object, len(cand.path)+1)
+ copy(path, cand.path)
+ path[len(path)-1] = obj
+
+ return path
+}
+
+// deepSearch searches a candidate and its subordinate objects for completion
+// items if deep completion is enabled and adds the valid candidates to
+// completion items.
+func (c *completer) deepSearch(ctx context.Context) {
+ defer func() {
+ // We can return early before completing the search, so be sure to
+ // clear out our queues to not impact any further invocations.
+ c.deepState.thisQueue = c.deepState.thisQueue[:0]
+ c.deepState.nextQueue = c.deepState.nextQueue[:0]
+ }()
+
+ for len(c.deepState.nextQueue) > 0 {
+ c.deepState.thisQueue, c.deepState.nextQueue = c.deepState.nextQueue, c.deepState.thisQueue[:0]
+
+ outer:
+ for _, cand := range c.deepState.thisQueue {
+ obj := cand.obj
+
+ if obj == nil {
+ continue
+ }
+
+ // At the top level, dedupe by object.
+ if len(cand.path) == 0 {
+ if c.seen[obj] {
+ continue
+ }
+ c.seen[obj] = true
+ }
+
+ // If obj is not accessible because it lives in another package and is
+ // not exported, don't treat it as a completion candidate unless it's
+ // a package completion candidate.
+ if !c.completionContext.packageCompletion &&
+ obj.Pkg() != nil && obj.Pkg() != c.pkg.GetTypes() && !obj.Exported() {
+ continue
+ }
+
+ // If we want a type name, don't offer non-type name candidates.
+ // However, do offer package names since they can contain type names,
+ // and do offer any candidate without a type since we aren't sure if it
+ // is a type name or not (i.e. unimported candidate).
+ if c.wantTypeName() && obj.Type() != nil && !isTypeName(obj) && !isPkgName(obj) {
+ continue
+ }
+
+ // When searching deep, make sure we don't have a cycle in our chain.
+ // We don't dedupe by object because we want to allow both "foo.Baz"
+ // and "bar.Baz" even though "Baz" is represented the same types.Object
+ // in both.
+ for _, seenObj := range cand.path {
+ if seenObj == obj {
+ continue outer
+ }
+ }
+
+ c.addCandidate(ctx, &cand)
+
+ c.deepState.candidateCount++
+ if c.opts.budget > 0 && c.deepState.candidateCount%100 == 0 {
+ spent := float64(time.Since(c.startTime)) / float64(c.opts.budget)
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ // If we are almost out of budgeted time, no further elements
+ // should be added to the queue. This ensures remaining time is
+ // used for processing current queue.
+ if !c.deepState.queueClosed && spent >= 0.85 {
+ c.deepState.queueClosed = true
+ }
+ }
+ }
+
+ // if deep search is disabled, don't add any more candidates.
+ if !c.deepState.enabled || c.deepState.queueClosed {
+ continue
+ }
+
+ // Searching members for a type name doesn't make sense.
+ if isTypeName(obj) {
+ continue
+ }
+ if obj.Type() == nil {
+ continue
+ }
+
+ // Don't search embedded fields because they were already included in their
+ // parent's fields.
+ if v, ok := obj.(*types.Var); ok && v.Embedded() {
+ continue
+ }
+
+ if sig, ok := obj.Type().Underlying().(*types.Signature); ok {
+ // If obj is a function that takes no arguments and returns one
+ // value, keep searching across the function call.
+ if sig.Params().Len() == 0 && sig.Results().Len() == 1 {
+ path := c.deepState.newPath(cand, obj)
+ // The result of a function call is not addressable.
+ c.methodsAndFields(sig.Results().At(0).Type(), false, cand.imp, func(newCand candidate) {
+ newCand.pathInvokeMask = cand.pathInvokeMask | (1 << uint64(len(cand.path)))
+ newCand.path = path
+ c.deepState.enqueue(newCand)
+ })
+ }
+ }
+
+ path := c.deepState.newPath(cand, obj)
+ switch obj := obj.(type) {
+ case *types.PkgName:
+ c.packageMembers(obj.Imported(), stdScore, cand.imp, func(newCand candidate) {
+ newCand.pathInvokeMask = cand.pathInvokeMask
+ newCand.path = path
+ c.deepState.enqueue(newCand)
+ })
+ default:
+ c.methodsAndFields(obj.Type(), cand.addressable, cand.imp, func(newCand candidate) {
+ newCand.pathInvokeMask = cand.pathInvokeMask
+ newCand.path = path
+ c.deepState.enqueue(newCand)
+ })
+ }
+ }
+ }
+}
+
+// addCandidate adds a completion candidate to suggestions, without searching
+// its members for more candidates.
+func (c *completer) addCandidate(ctx context.Context, cand *candidate) {
+ obj := cand.obj
+ if c.matchingCandidate(cand) {
+ cand.score *= highScore
+
+ if p := c.penalty(cand); p > 0 {
+ cand.score *= (1 - p)
+ }
+ } else if isTypeName(obj) {
+ // If obj is a *types.TypeName that didn't otherwise match, check
+ // if a literal object of this type makes a good candidate.
+
+ // We only care about named types (i.e. don't want builtin types).
+ if _, isNamed := obj.Type().(*types.Named); isNamed {
+ c.literal(ctx, obj.Type(), cand.imp)
+ }
+ }
+
+ // Lower score of method calls so we prefer fields and vars over calls.
+ if cand.hasMod(invoke) {
+ if sig, ok := obj.Type().Underlying().(*types.Signature); ok && sig.Recv() != nil {
+ cand.score *= 0.9
+ }
+ }
+
+ // Prefer private objects over public ones.
+ if !obj.Exported() && obj.Parent() != types.Universe {
+ cand.score *= 1.1
+ }
+
+ // Slight penalty for index modifier (e.g. changing "foo" to
+ // "foo[]") to curb false positives.
+ if cand.hasMod(index) {
+ cand.score *= 0.9
+ }
+
+ // Favor shallow matches by lowering score according to depth.
+ cand.score -= cand.score * c.deepState.scorePenalty(cand)
+
+ if cand.score < 0 {
+ cand.score = 0
+ }
+
+ cand.name = deepCandName(cand)
+ if item, err := c.item(ctx, *cand); err == nil {
+ c.items = append(c.items, item)
+ }
+}
+
+// deepCandName produces the full candidate name including any
+// ancestor objects. For example, "foo.bar().baz" for candidate "baz".
+func deepCandName(cand *candidate) string {
+ totalLen := len(cand.obj.Name())
+ for i, obj := range cand.path {
+ totalLen += len(obj.Name()) + 1
+ if cand.pathInvokeMask&(1<<uint16(i)) > 0 {
+ totalLen += 2
+ }
+ }
+
+ var buf strings.Builder
+ buf.Grow(totalLen)
+
+ for i, obj := range cand.path {
+ buf.WriteString(obj.Name())
+ if cand.pathInvokeMask&(1<<uint16(i)) > 0 {
+ buf.WriteByte('(')
+ buf.WriteByte(')')
+ }
+ buf.WriteByte('.')
+ }
+
+ buf.WriteString(cand.obj.Name())
+
+ return buf.String()
+}
+
+// penalty reports a score penalty for cand in the range (0, 1).
+// For example, a candidate is penalized if it has already been used
+// in another switch case statement.
+func (c *completer) penalty(cand *candidate) float64 {
+ for _, p := range c.inference.penalized {
+ if c.objChainMatches(cand, p.objChain) {
+ return p.penalty
+ }
+ }
+
+ return 0
+}
+
+// objChainMatches reports whether cand combined with the surrounding
+// object prefix matches chain.
+func (c *completer) objChainMatches(cand *candidate, chain []types.Object) bool {
+ // For example, when completing:
+ //
+ // foo.ba<>
+ //
+ // If we are considering the deep candidate "bar.baz", cand is baz,
+ // objChain is [foo] and deepChain is [bar]. We would match the
+ // chain [foo, bar, baz].
+ if len(chain) != len(c.inference.objChain)+len(cand.path)+1 {
+ return false
+ }
+
+ if chain[len(chain)-1] != cand.obj {
+ return false
+ }
+
+ for i, o := range c.inference.objChain {
+ if chain[i] != o {
+ return false
+ }
+ }
+
+ for i, o := range cand.path {
+ if chain[i+len(c.inference.objChain)] != o {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/internal/lsp/source/completion/deep_completion_test.go b/gopls/internal/lsp/source/completion/deep_completion_test.go
index 27009af1b..27009af1b 100644
--- a/internal/lsp/source/completion/deep_completion_test.go
+++ b/gopls/internal/lsp/source/completion/deep_completion_test.go
diff --git a/gopls/internal/lsp/source/completion/definition.go b/gopls/internal/lsp/source/completion/definition.go
new file mode 100644
index 000000000..d7f51f002
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/definition.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "go/ast"
+ "go/types"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/snippet"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+// some function definitions in test files can be completed
+// So far, TestFoo(t *testing.T), TestMain(m *testing.M)
+// BenchmarkFoo(b *testing.B), FuzzFoo(f *testing.F)
+
+// path[0] is known to be *ast.Ident
+func definition(path []ast.Node, obj types.Object, pgf *source.ParsedGoFile) ([]CompletionItem, *Selection) {
+ if _, ok := obj.(*types.Func); !ok {
+ return nil, nil // not a function at all
+ }
+ if !strings.HasSuffix(pgf.URI.Filename(), "_test.go") {
+ return nil, nil // not a test file
+ }
+
+ name := path[0].(*ast.Ident).Name
+ if len(name) == 0 {
+ // can't happen
+ return nil, nil
+ }
+ start := path[0].Pos()
+ end := path[0].End()
+ sel := &Selection{
+ content: "",
+ cursor: start,
+ tokFile: pgf.Tok,
+ start: start,
+ end: end,
+ mapper: pgf.Mapper,
+ }
+ var ans []CompletionItem
+ var hasParens bool
+ n, ok := path[1].(*ast.FuncDecl)
+ if !ok {
+ return nil, nil // can't happen
+ }
+ if n.Recv != nil {
+ return nil, nil // a method, not a function
+ }
+ t := n.Type.Params
+ if t.Closing != t.Opening {
+ hasParens = true
+ }
+
+ // Always suggest TestMain, if possible
+ if strings.HasPrefix("TestMain", name) {
+ if hasParens {
+ ans = append(ans, defItem("TestMain", obj))
+ } else {
+ ans = append(ans, defItem("TestMain(m *testing.M)", obj))
+ }
+ }
+
+ // If a snippet is possible, suggest it
+ if strings.HasPrefix("Test", name) {
+ if hasParens {
+ ans = append(ans, defItem("Test", obj))
+ } else {
+ ans = append(ans, defSnippet("Test", "(t *testing.T)", obj))
+ }
+ return ans, sel
+ } else if strings.HasPrefix("Benchmark", name) {
+ if hasParens {
+ ans = append(ans, defItem("Benchmark", obj))
+ } else {
+ ans = append(ans, defSnippet("Benchmark", "(b *testing.B)", obj))
+ }
+ return ans, sel
+ } else if strings.HasPrefix("Fuzz", name) {
+ if hasParens {
+ ans = append(ans, defItem("Fuzz", obj))
+ } else {
+ ans = append(ans, defSnippet("Fuzz", "(f *testing.F)", obj))
+ }
+ return ans, sel
+ }
+
+ // Fill in the argument for what the user has already typed
+ if got := defMatches(name, "Test", path, "(t *testing.T)"); got != "" {
+ ans = append(ans, defItem(got, obj))
+ } else if got := defMatches(name, "Benchmark", path, "(b *testing.B)"); got != "" {
+ ans = append(ans, defItem(got, obj))
+ } else if got := defMatches(name, "Fuzz", path, "(f *testing.F)"); got != "" {
+ ans = append(ans, defItem(got, obj))
+ }
+ return ans, sel
+}
+
+// defMatches returns text for defItem, never for defSnippet
+func defMatches(name, pat string, path []ast.Node, arg string) string {
+ if !strings.HasPrefix(name, pat) {
+ return ""
+ }
+ c, _ := utf8.DecodeRuneInString(name[len(pat):])
+ if unicode.IsLower(c) {
+ return ""
+ }
+ fd, ok := path[1].(*ast.FuncDecl)
+ if !ok {
+ // we don't know what's going on
+ return ""
+ }
+ fp := fd.Type.Params
+ if len(fp.List) > 0 {
+ // signature already there, nothing to suggest
+ return ""
+ }
+ if fp.Opening != fp.Closing {
+ // nothing: completion works on words, not easy to insert arg
+ return ""
+ }
+ // suggesting signature too
+ return name + arg
+}
+
+func defSnippet(prefix, suffix string, obj types.Object) CompletionItem {
+ var sn snippet.Builder
+ sn.WriteText(prefix)
+ sn.WritePlaceholder(func(b *snippet.Builder) { b.WriteText("Xxx") })
+ sn.WriteText(suffix + " {\n\t")
+ sn.WriteFinalTabstop()
+ sn.WriteText("\n}")
+ return CompletionItem{
+ Label: prefix + "Xxx" + suffix,
+ Detail: "tab, type the rest of the name, then tab",
+ Kind: protocol.FunctionCompletion,
+ Depth: 0,
+ Score: 10,
+ snippet: &sn,
+ Documentation: prefix + " test function",
+ isSlice: isSlice(obj),
+ }
+}
+func defItem(val string, obj types.Object) CompletionItem {
+ return CompletionItem{
+ Label: val,
+ InsertText: val,
+ Kind: protocol.FunctionCompletion,
+ Depth: 0,
+ Score: 9, // prefer the snippets when available
+ Documentation: "complete the function name",
+ isSlice: isSlice(obj),
+ }
+}
diff --git a/gopls/internal/lsp/source/completion/format.go b/gopls/internal/lsp/source/completion/format.go
new file mode 100644
index 000000000..c2d2c0bc0
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/format.go
@@ -0,0 +1,338 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/doc"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/snippet"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/imports"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+var (
+ errNoMatch = errors.New("not a surrounding match")
+ errLowScore = errors.New("not a high scoring candidate")
+)
+
+// item formats a candidate to a CompletionItem.
+func (c *completer) item(ctx context.Context, cand candidate) (CompletionItem, error) {
+ obj := cand.obj
+
+ // if the object isn't a valid match against the surrounding, return early.
+ matchScore := c.matcher.Score(cand.name)
+ if matchScore <= 0 {
+ return CompletionItem{}, errNoMatch
+ }
+ cand.score *= float64(matchScore)
+
+ // Ignore deep candidates that won't be in the MaxDeepCompletions anyway.
+ if len(cand.path) != 0 && !c.deepState.isHighScore(cand.score) {
+ return CompletionItem{}, errLowScore
+ }
+
+ // Handle builtin types separately.
+ if obj.Parent() == types.Universe {
+ return c.formatBuiltin(ctx, cand)
+ }
+
+ var (
+ label = cand.name
+ detail = types.TypeString(obj.Type(), c.qf)
+ insert = label
+ kind = protocol.TextCompletion
+ snip snippet.Builder
+ protocolEdits []protocol.TextEdit
+ )
+ if obj.Type() == nil {
+ detail = ""
+ }
+ if isTypeName(obj) && c.wantTypeParams() {
+ x := cand.obj.(*types.TypeName)
+ if named, ok := x.Type().(*types.Named); ok {
+ tp := typeparams.ForNamed(named)
+ label += source.FormatTypeParams(tp)
+ insert = label // maintain invariant above (label == insert)
+ }
+ }
+
+ snip.WriteText(insert)
+
+ switch obj := obj.(type) {
+ case *types.TypeName:
+ detail, kind = source.FormatType(obj.Type(), c.qf)
+ case *types.Const:
+ kind = protocol.ConstantCompletion
+ case *types.Var:
+ if _, ok := obj.Type().(*types.Struct); ok {
+ detail = "struct{...}" // for anonymous structs
+ } else if obj.IsField() {
+ var err error
+ detail, err = source.FormatVarType(ctx, c.snapshot, c.pkg, obj, c.qf, c.mq)
+ if err != nil {
+ return CompletionItem{}, err
+ }
+ }
+ if obj.IsField() {
+ kind = protocol.FieldCompletion
+ c.structFieldSnippet(cand, detail, &snip)
+ } else {
+ kind = protocol.VariableCompletion
+ }
+ if obj.Type() == nil {
+ break
+ }
+ case *types.Func:
+ sig, ok := obj.Type().Underlying().(*types.Signature)
+ if !ok {
+ break
+ }
+ kind = protocol.FunctionCompletion
+ if sig != nil && sig.Recv() != nil {
+ kind = protocol.MethodCompletion
+ }
+ case *types.PkgName:
+ kind = protocol.ModuleCompletion
+ detail = fmt.Sprintf("%q", obj.Imported().Path())
+ case *types.Label:
+ kind = protocol.ConstantCompletion
+ detail = "label"
+ }
+
+ var prefix string
+ for _, mod := range cand.mods {
+ switch mod {
+ case reference:
+ prefix = "&" + prefix
+ case dereference:
+ prefix = "*" + prefix
+ case chanRead:
+ prefix = "<-" + prefix
+ }
+ }
+
+ var (
+ suffix string
+ funcType = obj.Type()
+ )
+Suffixes:
+ for _, mod := range cand.mods {
+ switch mod {
+ case invoke:
+ if sig, ok := funcType.Underlying().(*types.Signature); ok {
+ s, err := source.NewSignature(ctx, c.snapshot, c.pkg, sig, nil, c.qf, c.mq)
+ if err != nil {
+ return CompletionItem{}, err
+ }
+ c.functionCallSnippet("", s.TypeParams(), s.Params(), &snip)
+ if sig.Results().Len() == 1 {
+ funcType = sig.Results().At(0).Type()
+ }
+ detail = "func" + s.Format()
+ }
+
+ if !c.opts.snippets {
+ // Without snippets the candidate will not include "()". Don't
+ // add further suffixes since they will be invalid. For
+ // example, with snippets "foo()..." would become "foo..."
+ // without snippets if we added the dotDotDot.
+ break Suffixes
+ }
+ case takeSlice:
+ suffix += "[:]"
+ case takeDotDotDot:
+ suffix += "..."
+ case index:
+ snip.WriteText("[")
+ snip.WritePlaceholder(nil)
+ snip.WriteText("]")
+ }
+ }
+
+ // If this candidate needs an additional import statement,
+ // add the additional text edits needed.
+ if cand.imp != nil {
+ addlEdits, err := c.importEdits(cand.imp)
+
+ if err != nil {
+ return CompletionItem{}, err
+ }
+
+ protocolEdits = append(protocolEdits, addlEdits...)
+ if kind != protocol.ModuleCompletion {
+ if detail != "" {
+ detail += " "
+ }
+ detail += fmt.Sprintf("(from %q)", cand.imp.importPath)
+ }
+ }
+
+ if cand.convertTo != nil {
+ typeName := types.TypeString(cand.convertTo, c.qf)
+
+ switch cand.convertTo.(type) {
+ // We need extra parens when casting to these types. For example,
+ // we need "(*int)(foo)", not "*int(foo)".
+ case *types.Pointer, *types.Signature:
+ typeName = "(" + typeName + ")"
+ }
+
+ prefix = typeName + "(" + prefix
+ suffix = ")"
+ }
+
+ if prefix != "" {
+ // If we are in a selector, add an edit to place prefix before selector.
+ if sel := enclosingSelector(c.path, c.pos); sel != nil {
+ edits, err := c.editText(sel.Pos(), sel.Pos(), prefix)
+ if err != nil {
+ return CompletionItem{}, err
+ }
+ protocolEdits = append(protocolEdits, edits...)
+ } else {
+ // If there is no selector, just stick the prefix at the start.
+ insert = prefix + insert
+ snip.PrependText(prefix)
+ }
+ }
+
+ if suffix != "" {
+ insert += suffix
+ snip.WriteText(suffix)
+ }
+
+ detail = strings.TrimPrefix(detail, "untyped ")
+ // override computed detail with provided detail, if something is provided.
+ if cand.detail != "" {
+ detail = cand.detail
+ }
+ item := CompletionItem{
+ Label: label,
+ InsertText: insert,
+ AdditionalTextEdits: protocolEdits,
+ Detail: detail,
+ Kind: kind,
+ Score: cand.score,
+ Depth: len(cand.path),
+ snippet: &snip,
+ isSlice: isSlice(obj),
+ }
+ // If the user doesn't want documentation for completion items.
+ if !c.opts.documentation {
+ return item, nil
+ }
+ pos := safetoken.StartPosition(c.pkg.FileSet(), obj.Pos())
+
+ // We ignore errors here, because some types, like "unsafe" or "error",
+ // may not have valid positions that we can use to get documentation.
+ if !pos.IsValid() {
+ return item, nil
+ }
+
+ comment, err := source.HoverDocForObject(ctx, c.snapshot, c.pkg.FileSet(), obj)
+ if err != nil {
+ event.Error(ctx, fmt.Sprintf("failed to find Hover for %q", obj.Name()), err)
+ return item, nil
+ }
+ if c.opts.fullDocumentation {
+ item.Documentation = comment.Text()
+ } else {
+ item.Documentation = doc.Synopsis(comment.Text())
+ }
+ // The desired pattern is `^// Deprecated`, but the prefix has been removed
+ // TODO(rfindley): It doesn't look like this does the right thing for
+ // multi-line comments.
+ if strings.HasPrefix(comment.Text(), "Deprecated") {
+ if c.snapshot.View().Options().CompletionTags {
+ item.Tags = []protocol.CompletionItemTag{protocol.ComplDeprecated}
+ } else if c.snapshot.View().Options().CompletionDeprecated {
+ item.Deprecated = true
+ }
+ }
+
+ return item, nil
+}
+
+// importEdits produces the text edits necessary to add the given import to the current file.
+func (c *completer) importEdits(imp *importInfo) ([]protocol.TextEdit, error) {
+ if imp == nil {
+ return nil, nil
+ }
+
+ pgf, err := c.pkg.File(span.URIFromPath(c.filename))
+ if err != nil {
+ return nil, err
+ }
+
+ return source.ComputeOneImportFixEdits(c.snapshot, pgf, &imports.ImportFix{
+ StmtInfo: imports.ImportInfo{
+ ImportPath: imp.importPath,
+ Name: imp.name,
+ },
+ // IdentName is unused on this path and is difficult to get.
+ FixType: imports.AddImport,
+ })
+}
+
+func (c *completer) formatBuiltin(ctx context.Context, cand candidate) (CompletionItem, error) {
+ obj := cand.obj
+ item := CompletionItem{
+ Label: obj.Name(),
+ InsertText: obj.Name(),
+ Score: cand.score,
+ }
+ switch obj.(type) {
+ case *types.Const:
+ item.Kind = protocol.ConstantCompletion
+ case *types.Builtin:
+ item.Kind = protocol.FunctionCompletion
+ sig, err := source.NewBuiltinSignature(ctx, c.snapshot, obj.Name())
+ if err != nil {
+ return CompletionItem{}, err
+ }
+ item.Detail = "func" + sig.Format()
+ item.snippet = &snippet.Builder{}
+ c.functionCallSnippet(obj.Name(), sig.TypeParams(), sig.Params(), item.snippet)
+ case *types.TypeName:
+ if types.IsInterface(obj.Type()) {
+ item.Kind = protocol.InterfaceCompletion
+ } else {
+ item.Kind = protocol.ClassCompletion
+ }
+ case *types.Nil:
+ item.Kind = protocol.VariableCompletion
+ }
+ return item, nil
+}
+
+// decide if the type params (if any) should be part of the completion
+// which only possible for types.Named and types.Signature
+// (so far, only in receivers, e.g.; func (s *GENERIC[K, V])..., which is a types.Named)
+func (c *completer) wantTypeParams() bool {
+ // Need to be lexically in a receiver, and a child of an IndexListExpr
+ // (but IndexListExpr only exists with go1.18)
+ start := c.path[0].Pos()
+ for i, nd := range c.path {
+ if fd, ok := nd.(*ast.FuncDecl); ok {
+ if i > 0 && fd.Recv != nil && start < fd.Recv.End() {
+ return true
+ } else {
+ return false
+ }
+ }
+ }
+ return false
+}
diff --git a/gopls/internal/lsp/source/completion/fuzz.go b/gopls/internal/lsp/source/completion/fuzz.go
new file mode 100644
index 000000000..08e7654c7
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/fuzz.go
@@ -0,0 +1,142 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+// golang/go#51089
+// *testing.F deserves special treatment as member use is constrained:
+// The arguments to f.Fuzz are determined by the arguments to a previous f.Add
+// Inside f.Fuzz only f.Failed and f.Name are allowed.
+// PJW: are there other packages where we can deduce usage constraints?
+
+// if we find fuzz completions, then return true, as those are the only completions to offer
+func (c *completer) fuzz(typ types.Type, mset *types.MethodSet, imp *importInfo, cb func(candidate), fset *token.FileSet) bool {
+ // 1. inside f.Fuzz? (only f.Failed and f.Name)
+ // 2. possible completing f.Fuzz?
+ // [Ident,SelectorExpr,Callexpr,ExprStmt,BlockiStmt,FuncDecl(Fuzz...)]
+ // 3. before f.Fuzz, same (for 2., offer choice when looking at an F)
+
+ // does the path contain FuncLit as arg to f.Fuzz CallExpr?
+ inside := false
+Loop:
+ for i, n := range c.path {
+ switch v := n.(type) {
+ case *ast.CallExpr:
+ if len(v.Args) != 1 {
+ continue Loop
+ }
+ if _, ok := v.Args[0].(*ast.FuncLit); !ok {
+ continue
+ }
+ if s, ok := v.Fun.(*ast.SelectorExpr); !ok || s.Sel.Name != "Fuzz" {
+ continue
+ }
+ if i > 2 { // avoid t.Fuzz itself in tests
+ inside = true
+ break Loop
+ }
+ }
+ }
+ if inside {
+ for i := 0; i < mset.Len(); i++ {
+ o := mset.At(i).Obj()
+ if o.Name() == "Failed" || o.Name() == "Name" {
+ cb(candidate{
+ obj: o,
+ score: stdScore,
+ imp: imp,
+ addressable: true,
+ })
+ }
+ }
+ return true
+ }
+ // if it could be t.Fuzz, look for the preceding t.Add
+ id, ok := c.path[0].(*ast.Ident)
+ if ok && strings.HasPrefix("Fuzz", id.Name) {
+ var add *ast.CallExpr
+ f := func(n ast.Node) bool {
+ if n == nil {
+ return true
+ }
+ call, ok := n.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ s, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return true
+ }
+ if s.Sel.Name != "Add" {
+ return true
+ }
+ // Sel.X should be of type *testing.F
+ got := c.pkg.GetTypesInfo().Types[s.X]
+ if got.Type.String() == "*testing.F" {
+ add = call
+ }
+ return false // because we're done...
+ }
+ // look at the enclosing FuzzFoo functions
+ if len(c.path) < 2 {
+ return false
+ }
+ n := c.path[len(c.path)-2]
+ if _, ok := n.(*ast.FuncDecl); !ok {
+ // the path should start with ast.File, ast.FuncDecl, ...
+ // but it didn't, so give up
+ return false
+ }
+ ast.Inspect(n, f)
+ if add == nil {
+ // looks like f.Fuzz without a preceding f.Add.
+ // let the regular completion handle it.
+ return false
+ }
+
+ lbl := "Fuzz(func(t *testing.T"
+ for i, a := range add.Args {
+ info := c.pkg.GetTypesInfo().TypeOf(a)
+ if info == nil {
+ return false // How could this happen, but better safe than panic.
+ }
+ lbl += fmt.Sprintf(", %c %s", 'a'+i, info)
+ }
+ lbl += ")"
+ xx := CompletionItem{
+ Label: lbl,
+ InsertText: lbl,
+ Kind: protocol.FunctionCompletion,
+ Depth: 0,
+ Score: 10, // pretty confident the user should see this
+ Documentation: "argument types from f.Add",
+ isSlice: false,
+ }
+ c.items = append(c.items, xx)
+ for i := 0; i < mset.Len(); i++ {
+ o := mset.At(i).Obj()
+ if o.Name() != "Fuzz" {
+ cb(candidate{
+ obj: o,
+ score: stdScore,
+ imp: imp,
+ addressable: true,
+ })
+ }
+ }
+ return true // done
+ }
+ // let the standard processing take care of it instead
+ return false
+}
diff --git a/gopls/internal/lsp/source/completion/keywords.go b/gopls/internal/lsp/source/completion/keywords.go
new file mode 100644
index 000000000..a068ca2d5
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/keywords.go
@@ -0,0 +1,154 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "go/ast"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+const (
+ BREAK = "break"
+ CASE = "case"
+ CHAN = "chan"
+ CONST = "const"
+ CONTINUE = "continue"
+ DEFAULT = "default"
+ DEFER = "defer"
+ ELSE = "else"
+ FALLTHROUGH = "fallthrough"
+ FOR = "for"
+ FUNC = "func"
+ GO = "go"
+ GOTO = "goto"
+ IF = "if"
+ IMPORT = "import"
+ INTERFACE = "interface"
+ MAP = "map"
+ PACKAGE = "package"
+ RANGE = "range"
+ RETURN = "return"
+ SELECT = "select"
+ STRUCT = "struct"
+ SWITCH = "switch"
+ TYPE = "type"
+ VAR = "var"
+)
+
+// addKeywordCompletions offers keyword candidates appropriate at the position.
+func (c *completer) addKeywordCompletions() {
+ seen := make(map[string]bool)
+
+ if c.wantTypeName() && c.inference.objType == nil {
+ // If we want a type name but don't have an expected obj type,
+ // include "interface", "struct", "func", "chan", and "map".
+
+ // "interface" and "struct" are more common declaring named types.
+ // Give them a higher score if we are in a type declaration.
+ structIntf, funcChanMap := stdScore, highScore
+ if len(c.path) > 1 {
+ if _, namedDecl := c.path[1].(*ast.TypeSpec); namedDecl {
+ structIntf, funcChanMap = highScore, stdScore
+ }
+ }
+
+ c.addKeywordItems(seen, structIntf, STRUCT, INTERFACE)
+ c.addKeywordItems(seen, funcChanMap, FUNC, CHAN, MAP)
+ }
+
+ // If we are at the file scope, only offer decl keywords. We don't
+ // get *ast.Idents at the file scope because non-keyword identifiers
+ // turn into *ast.BadDecl, not *ast.Ident.
+ if len(c.path) == 1 || isASTFile(c.path[1]) {
+ c.addKeywordItems(seen, stdScore, TYPE, CONST, VAR, FUNC, IMPORT)
+ return
+ } else if _, ok := c.path[0].(*ast.Ident); !ok {
+ // Otherwise only offer keywords if the client is completing an identifier.
+ return
+ }
+
+ if len(c.path) > 2 {
+ // Offer "range" if we are in ast.ForStmt.Init. This is what the
+ // AST looks like before "range" is typed, e.g. "for i := r<>".
+ if loop, ok := c.path[2].(*ast.ForStmt); ok && source.NodeContains(loop.Init, c.pos) {
+ c.addKeywordItems(seen, stdScore, RANGE)
+ }
+ }
+
+ // Only suggest keywords if we are beginning a statement.
+ switch n := c.path[1].(type) {
+ case *ast.BlockStmt, *ast.ExprStmt:
+ // OK - our ident must be at beginning of statement.
+ case *ast.CommClause:
+ // Make sure we aren't in the Comm statement.
+ if !n.Colon.IsValid() || c.pos <= n.Colon {
+ return
+ }
+ case *ast.CaseClause:
+ // Make sure we aren't in the case List.
+ if !n.Colon.IsValid() || c.pos <= n.Colon {
+ return
+ }
+ default:
+ return
+ }
+
+ // Filter out keywords depending on scope
+ // Skip the first one because we want to look at the enclosing scopes
+ path := c.path[1:]
+ for i, n := range path {
+ switch node := n.(type) {
+ case *ast.CaseClause:
+ // only recommend "fallthrough" and "break" within the bodies of a case clause
+ if c.pos > node.Colon {
+ c.addKeywordItems(seen, stdScore, BREAK)
+ // "fallthrough" is only valid in switch statements.
+ // A case clause is always nested within a block statement in a switch statement,
+ // that block statement is nested within either a TypeSwitchStmt or a SwitchStmt.
+ if i+2 >= len(path) {
+ continue
+ }
+ if _, ok := path[i+2].(*ast.SwitchStmt); ok {
+ c.addKeywordItems(seen, stdScore, FALLTHROUGH)
+ }
+ }
+ case *ast.CommClause:
+ if c.pos > node.Colon {
+ c.addKeywordItems(seen, stdScore, BREAK)
+ }
+ case *ast.TypeSwitchStmt, *ast.SelectStmt, *ast.SwitchStmt:
+ c.addKeywordItems(seen, stdScore, CASE, DEFAULT)
+ case *ast.ForStmt, *ast.RangeStmt:
+ c.addKeywordItems(seen, stdScore, BREAK, CONTINUE)
+ // This is a bit weak, functions allow for many keywords
+ case *ast.FuncDecl:
+ if node.Body != nil && c.pos > node.Body.Lbrace {
+ c.addKeywordItems(seen, stdScore, DEFER, RETURN, FOR, GO, SWITCH, SELECT, IF, ELSE, VAR, CONST, GOTO, TYPE)
+ }
+ }
+ }
+}
+
+// addKeywordItems dedupes and adds completion items for the specified
+// keywords with the specified score.
+func (c *completer) addKeywordItems(seen map[string]bool, score float64, kws ...string) {
+ for _, kw := range kws {
+ if seen[kw] {
+ continue
+ }
+ seen[kw] = true
+
+ if matchScore := c.matcher.Score(kw); matchScore > 0 {
+ c.items = append(c.items, CompletionItem{
+ Label: kw,
+ Kind: protocol.KeywordCompletion,
+ InsertText: kw,
+ Score: score * float64(matchScore),
+ })
+ }
+ }
+}
diff --git a/internal/lsp/source/completion/labels.go b/gopls/internal/lsp/source/completion/labels.go
index e4fd961e3..e4fd961e3 100644
--- a/internal/lsp/source/completion/labels.go
+++ b/gopls/internal/lsp/source/completion/labels.go
diff --git a/gopls/internal/lsp/source/completion/literal.go b/gopls/internal/lsp/source/completion/literal.go
new file mode 100644
index 000000000..06ed559be
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/literal.go
@@ -0,0 +1,592 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "context"
+ "fmt"
+ "go/types"
+ "strings"
+ "unicode"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/snippet"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// literal generates composite literal, function literal, and make()
+// completion items.
+func (c *completer) literal(ctx context.Context, literalType types.Type, imp *importInfo) {
+ if !c.opts.literal {
+ return
+ }
+
+ expType := c.inference.objType
+
+ if c.inference.matchesVariadic(literalType) {
+ // Don't offer literal slice candidates for variadic arguments.
+ // For example, don't offer "[]interface{}{}" in "fmt.Print(<>)".
+ return
+ }
+
+ // Avoid literal candidates if the expected type is an empty
+ // interface. It isn't very useful to suggest a literal candidate of
+ // every possible type.
+ if expType != nil && isEmptyInterface(expType) {
+ return
+ }
+
+ // We handle unnamed literal completions explicitly before searching
+ // for candidates. Avoid named-type literal completions for
+ // unnamed-type expected type since that results in duplicate
+ // candidates. For example, in
+ //
+ // type mySlice []int
+ // var []int = <>
+ //
+ // don't offer "mySlice{}" since we have already added a candidate
+ // of "[]int{}".
+ if _, named := literalType.(*types.Named); named && expType != nil {
+ if _, named := source.Deref(expType).(*types.Named); !named {
+ return
+ }
+ }
+
+ // Check if an object of type literalType would match our expected type.
+ cand := candidate{
+ obj: c.fakeObj(literalType),
+ }
+
+ switch literalType.Underlying().(type) {
+ // These literal types are addressable (e.g. "&[]int{}"), others are
+ // not (e.g. can't do "&(func(){})").
+ case *types.Struct, *types.Array, *types.Slice, *types.Map:
+ cand.addressable = true
+ }
+
+ if !c.matchingCandidate(&cand) || cand.convertTo != nil {
+ return
+ }
+
+ var (
+ qf = c.qf
+ sel = enclosingSelector(c.path, c.pos)
+ )
+
+ // Don't qualify the type name if we are in a selector expression
+ // since the package name is already present.
+ if sel != nil {
+ qf = func(_ *types.Package) string { return "" }
+ }
+
+ snip, typeName := c.typeNameSnippet(literalType, qf)
+
+ // A type name of "[]int" doesn't work very will with the matcher
+ // since "[" isn't a valid identifier prefix. Here we strip off the
+ // slice (and array) prefix yielding just "int".
+ matchName := typeName
+ switch t := literalType.(type) {
+ case *types.Slice:
+ matchName = types.TypeString(t.Elem(), qf)
+ case *types.Array:
+ matchName = types.TypeString(t.Elem(), qf)
+ }
+
+ addlEdits, err := c.importEdits(imp)
+ if err != nil {
+ event.Error(ctx, "error adding import for literal candidate", err)
+ return
+ }
+
+ // If prefix matches the type name, client may want a composite literal.
+ if score := c.matcher.Score(matchName); score > 0 {
+ if cand.hasMod(reference) {
+ if sel != nil {
+ // If we are in a selector we must place the "&" before the selector.
+ // For example, "foo.B<>" must complete to "&foo.Bar{}", not
+ // "foo.&Bar{}".
+ edits, err := c.editText(sel.Pos(), sel.Pos(), "&")
+ if err != nil {
+ event.Error(ctx, "error making edit for literal pointer completion", err)
+ return
+ }
+ addlEdits = append(addlEdits, edits...)
+ } else {
+ // Otherwise we can stick the "&" directly before the type name.
+ typeName = "&" + typeName
+ snip.PrependText("&")
+ }
+ }
+
+ switch t := literalType.Underlying().(type) {
+ case *types.Struct, *types.Array, *types.Slice, *types.Map:
+ c.compositeLiteral(t, snip.Clone(), typeName, float64(score), addlEdits)
+ case *types.Signature:
+ // Add a literal completion for a signature type that implements
+ // an interface. For example, offer "http.HandlerFunc()" when
+ // expected type is "http.Handler".
+ if expType != nil && types.IsInterface(expType) {
+ c.basicLiteral(t, snip.Clone(), typeName, float64(score), addlEdits)
+ }
+ case *types.Basic:
+ // Add a literal completion for basic types that implement our
+ // expected interface (e.g. named string type http.Dir
+ // implements http.FileSystem), or are identical to our expected
+ // type (i.e. yielding a type conversion such as "float64()").
+ if expType != nil && (types.IsInterface(expType) || types.Identical(expType, literalType)) {
+ c.basicLiteral(t, snip.Clone(), typeName, float64(score), addlEdits)
+ }
+ }
+ }
+
+ // If prefix matches "make", client may want a "make()"
+ // invocation. We also include the type name to allow for more
+ // flexible fuzzy matching.
+ if score := c.matcher.Score("make." + matchName); !cand.hasMod(reference) && score > 0 {
+ switch literalType.Underlying().(type) {
+ case *types.Slice:
+ // The second argument to "make()" for slices is required, so default to "0".
+ c.makeCall(snip.Clone(), typeName, "0", float64(score), addlEdits)
+ case *types.Map, *types.Chan:
+ // Maps and channels don't require the second argument, so omit
+ // to keep things simple for now.
+ c.makeCall(snip.Clone(), typeName, "", float64(score), addlEdits)
+ }
+ }
+
+ // If prefix matches "func", client may want a function literal.
+ if score := c.matcher.Score("func"); !cand.hasMod(reference) && score > 0 && (expType == nil || !types.IsInterface(expType)) {
+ switch t := literalType.Underlying().(type) {
+ case *types.Signature:
+ c.functionLiteral(ctx, t, float64(score))
+ }
+ }
+}
+
+// literalCandidateScore is the base score for literal candidates.
+// Literal candidates match the expected type so they should be high
+// scoring, but we want them ranked below lexical objects of the
+// correct type, so scale down highScore.
+const literalCandidateScore = highScore / 2
+
+// functionLiteral adds a function literal completion item for the
+// given signature.
+func (c *completer) functionLiteral(ctx context.Context, sig *types.Signature, matchScore float64) {
+ snip := &snippet.Builder{}
+ snip.WriteText("func(")
+
+ // First we generate names for each param and keep a seen count so
+ // we know if we need to uniquify param names. For example,
+ // "func(int)" will become "func(i int)", but "func(int, int64)"
+ // will become "func(i1 int, i2 int64)".
+ var (
+ paramNames = make([]string, sig.Params().Len())
+ paramNameCount = make(map[string]int)
+ hasTypeParams bool
+ )
+ for i := 0; i < sig.Params().Len(); i++ {
+ var (
+ p = sig.Params().At(i)
+ name = p.Name()
+ )
+
+ if tp, _ := p.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) {
+ hasTypeParams = true
+ }
+
+ if name == "" {
+ // If the param has no name in the signature, guess a name based
+ // on the type. Use an empty qualifier to ignore the package.
+ // For example, we want to name "http.Request" "r", not "hr".
+ typeName, err := source.FormatVarType(ctx, c.snapshot, c.pkg, p,
+ func(p *types.Package) string { return "" },
+ func(source.PackageName, source.ImportPath, source.PackagePath) string { return "" })
+ if err != nil {
+ // In general, the only error we should encounter while formatting is
+ // context cancellation.
+ if ctx.Err() == nil {
+ event.Error(ctx, "formatting var type", err)
+ }
+ return
+ }
+ name = abbreviateTypeName(typeName)
+ }
+ paramNames[i] = name
+ if name != "_" {
+ paramNameCount[name]++
+ }
+ }
+
+ for n, c := range paramNameCount {
+ // Any names we saw more than once will need a unique suffix added
+ // on. Reset the count to 1 to act as the suffix for the first
+ // name.
+ if c >= 2 {
+ paramNameCount[n] = 1
+ } else {
+ delete(paramNameCount, n)
+ }
+ }
+
+ for i := 0; i < sig.Params().Len(); i++ {
+ if hasTypeParams && !c.opts.placeholders {
+ // If there are type params in the args then the user must
+ // choose the concrete types. If placeholders are disabled just
+ // drop them between the parens and let them fill things in.
+ snip.WritePlaceholder(nil)
+ break
+ }
+
+ if i > 0 {
+ snip.WriteText(", ")
+ }
+
+ var (
+ p = sig.Params().At(i)
+ name = paramNames[i]
+ )
+
+ // Uniquify names by adding on an incrementing numeric suffix.
+ if idx, found := paramNameCount[name]; found {
+ paramNameCount[name]++
+ name = fmt.Sprintf("%s%d", name, idx)
+ }
+
+ if name != p.Name() && c.opts.placeholders {
+ // If we didn't use the signature's param name verbatim then we
+ // may have chosen a poor name. Give the user a placeholder so
+ // they can easily fix the name.
+ snip.WritePlaceholder(func(b *snippet.Builder) {
+ b.WriteText(name)
+ })
+ } else {
+ snip.WriteText(name)
+ }
+
+ // If the following param's type is identical to this one, omit
+ // this param's type string. For example, emit "i, j int" instead
+ // of "i int, j int".
+ if i == sig.Params().Len()-1 || !types.Identical(p.Type(), sig.Params().At(i+1).Type()) {
+ snip.WriteText(" ")
+ typeStr, err := source.FormatVarType(ctx, c.snapshot, c.pkg, p, c.qf, c.mq)
+ if err != nil {
+ // In general, the only error we should encounter while formatting is
+ // context cancellation.
+ if ctx.Err() == nil {
+ event.Error(ctx, "formatting var type", err)
+ }
+ return
+ }
+ if sig.Variadic() && i == sig.Params().Len()-1 {
+ typeStr = strings.Replace(typeStr, "[]", "...", 1)
+ }
+
+ if tp, _ := p.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) {
+ snip.WritePlaceholder(func(snip *snippet.Builder) {
+ snip.WriteText(typeStr)
+ })
+ } else {
+ snip.WriteText(typeStr)
+ }
+ }
+ }
+ snip.WriteText(")")
+
+ results := sig.Results()
+ if results.Len() > 0 {
+ snip.WriteText(" ")
+ }
+
+ resultsNeedParens := results.Len() > 1 ||
+ results.Len() == 1 && results.At(0).Name() != ""
+
+ var resultHasTypeParams bool
+ for i := 0; i < results.Len(); i++ {
+ if tp, _ := results.At(i).Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) {
+ resultHasTypeParams = true
+ }
+ }
+
+ if resultsNeedParens {
+ snip.WriteText("(")
+ }
+ for i := 0; i < results.Len(); i++ {
+ if resultHasTypeParams && !c.opts.placeholders {
+ // Leave an empty tabstop if placeholders are disabled and there
+ // are type args that need specificying.
+ snip.WritePlaceholder(nil)
+ break
+ }
+
+ if i > 0 {
+ snip.WriteText(", ")
+ }
+ r := results.At(i)
+ if name := r.Name(); name != "" {
+ snip.WriteText(name + " ")
+ }
+
+ text, err := source.FormatVarType(ctx, c.snapshot, c.pkg, r, c.qf, c.mq)
+ if err != nil {
+ // In general, the only error we should encounter while formatting is
+ // context cancellation.
+ if ctx.Err() == nil {
+ event.Error(ctx, "formatting var type", err)
+ }
+ return
+ }
+ if tp, _ := r.Type().(*typeparams.TypeParam); tp != nil && !c.typeParamInScope(tp) {
+ snip.WritePlaceholder(func(snip *snippet.Builder) {
+ snip.WriteText(text)
+ })
+ } else {
+ snip.WriteText(text)
+ }
+ }
+ if resultsNeedParens {
+ snip.WriteText(")")
+ }
+
+ snip.WriteText(" {")
+ snip.WriteFinalTabstop()
+ snip.WriteText("}")
+
+ c.items = append(c.items, CompletionItem{
+ Label: "func(...) {}",
+ Score: matchScore * literalCandidateScore,
+ Kind: protocol.VariableCompletion,
+ snippet: snip,
+ })
+}
+
+// conventionalAcronyms contains conventional acronyms for type names
+// in lower case. For example, "ctx" for "context" and "err" for "error".
+var conventionalAcronyms = map[string]string{
+ "context": "ctx",
+ "error": "err",
+ "tx": "tx",
+ "responsewriter": "w",
+}
+
+// abbreviateTypeName abbreviates type names into acronyms. For
+// example, "fooBar" is abbreviated "fb". Care is taken to ignore
+// non-identifier runes. For example, "[]int" becomes "i", and
+// "struct { i int }" becomes "s".
+func abbreviateTypeName(s string) string {
+ var (
+ b strings.Builder
+ useNextUpper bool
+ )
+
+ // Trim off leading non-letters. We trim everything between "[" and
+ // "]" to handle array types like "[someConst]int".
+ var inBracket bool
+ s = strings.TrimFunc(s, func(r rune) bool {
+ if inBracket {
+ inBracket = r != ']'
+ return true
+ }
+
+ if r == '[' {
+ inBracket = true
+ }
+
+ return !unicode.IsLetter(r)
+ })
+
+ if acr, ok := conventionalAcronyms[strings.ToLower(s)]; ok {
+ return acr
+ }
+
+ for i, r := range s {
+ // Stop if we encounter a non-identifier rune.
+ if !unicode.IsLetter(r) && !unicode.IsNumber(r) {
+ break
+ }
+
+ if i == 0 {
+ b.WriteRune(unicode.ToLower(r))
+ }
+
+ if unicode.IsUpper(r) {
+ if useNextUpper {
+ b.WriteRune(unicode.ToLower(r))
+ useNextUpper = false
+ }
+ } else {
+ useNextUpper = true
+ }
+ }
+
+ return b.String()
+}
+
+// compositeLiteral adds a composite literal completion item for the given typeName.
+func (c *completer) compositeLiteral(T types.Type, snip *snippet.Builder, typeName string, matchScore float64, edits []protocol.TextEdit) {
+ snip.WriteText("{")
+ // Don't put the tab stop inside the composite literal curlies "{}"
+ // for structs that have no accessible fields.
+ if strct, ok := T.(*types.Struct); !ok || fieldsAccessible(strct, c.pkg.GetTypes()) {
+ snip.WriteFinalTabstop()
+ }
+ snip.WriteText("}")
+
+ nonSnippet := typeName + "{}"
+
+ c.items = append(c.items, CompletionItem{
+ Label: nonSnippet,
+ InsertText: nonSnippet,
+ Score: matchScore * literalCandidateScore,
+ Kind: protocol.VariableCompletion,
+ AdditionalTextEdits: edits,
+ snippet: snip,
+ })
+}
+
+// basicLiteral adds a literal completion item for the given basic
+// type name typeName.
+func (c *completer) basicLiteral(T types.Type, snip *snippet.Builder, typeName string, matchScore float64, edits []protocol.TextEdit) {
+ // Never give type conversions like "untyped int()".
+ if isUntyped(T) {
+ return
+ }
+
+ snip.WriteText("(")
+ snip.WriteFinalTabstop()
+ snip.WriteText(")")
+
+ nonSnippet := typeName + "()"
+
+ c.items = append(c.items, CompletionItem{
+ Label: nonSnippet,
+ InsertText: nonSnippet,
+ Detail: T.String(),
+ Score: matchScore * literalCandidateScore,
+ Kind: protocol.VariableCompletion,
+ AdditionalTextEdits: edits,
+ snippet: snip,
+ })
+}
+
+// makeCall adds a completion item for a "make()" call given a specific type.
+func (c *completer) makeCall(snip *snippet.Builder, typeName string, secondArg string, matchScore float64, edits []protocol.TextEdit) {
+ // Keep it simple and don't add any placeholders for optional "make()" arguments.
+
+ snip.PrependText("make(")
+ if secondArg != "" {
+ snip.WriteText(", ")
+ snip.WritePlaceholder(func(b *snippet.Builder) {
+ if c.opts.placeholders {
+ b.WriteText(secondArg)
+ }
+ })
+ }
+ snip.WriteText(")")
+
+ var nonSnippet strings.Builder
+ nonSnippet.WriteString("make(" + typeName)
+ if secondArg != "" {
+ nonSnippet.WriteString(", ")
+ nonSnippet.WriteString(secondArg)
+ }
+ nonSnippet.WriteByte(')')
+
+ c.items = append(c.items, CompletionItem{
+ Label: nonSnippet.String(),
+ InsertText: nonSnippet.String(),
+ Score: matchScore * literalCandidateScore,
+ Kind: protocol.FunctionCompletion,
+ AdditionalTextEdits: edits,
+ snippet: snip,
+ })
+}
+
+// Create a snippet for a type name where type params become placeholders.
+func (c *completer) typeNameSnippet(literalType types.Type, qf types.Qualifier) (*snippet.Builder, string) {
+ var (
+ snip snippet.Builder
+ typeName string
+ named, _ = literalType.(*types.Named)
+ )
+
+ if named != nil && named.Obj() != nil && typeparams.ForNamed(named).Len() > 0 && !c.fullyInstantiated(named) {
+ // We are not "fully instantiated" meaning we have type params that must be specified.
+ if pkg := qf(named.Obj().Pkg()); pkg != "" {
+ typeName = pkg + "."
+ }
+
+ // We do this to get "someType" instead of "someType[T]".
+ typeName += named.Obj().Name()
+ snip.WriteText(typeName + "[")
+
+ if c.opts.placeholders {
+ for i := 0; i < typeparams.ForNamed(named).Len(); i++ {
+ if i > 0 {
+ snip.WriteText(", ")
+ }
+ snip.WritePlaceholder(func(snip *snippet.Builder) {
+ snip.WriteText(types.TypeString(typeparams.ForNamed(named).At(i), qf))
+ })
+ }
+ } else {
+ snip.WritePlaceholder(nil)
+ }
+ snip.WriteText("]")
+ typeName += "[...]"
+ } else {
+ // We don't have unspecified type params so use default type formatting.
+ typeName = types.TypeString(literalType, qf)
+ snip.WriteText(typeName)
+ }
+
+ return &snip, typeName
+}
+
+// fullyInstantiated reports whether all of t's type params have
+// specified type args.
+func (c *completer) fullyInstantiated(t *types.Named) bool {
+ tps := typeparams.ForNamed(t)
+ tas := typeparams.NamedTypeArgs(t)
+
+ if tps.Len() != tas.Len() {
+ return false
+ }
+
+ for i := 0; i < tas.Len(); i++ {
+ switch ta := tas.At(i).(type) {
+ case *typeparams.TypeParam:
+ // A *TypeParam only counts as specified if it is currently in
+ // scope (i.e. we are in a generic definition).
+ if !c.typeParamInScope(ta) {
+ return false
+ }
+ case *types.Named:
+ if !c.fullyInstantiated(ta) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// typeParamInScope returns whether tp's object is in scope at c.pos.
+// This tells you whether you are in a generic definition and can
+// assume tp has been specified.
+func (c *completer) typeParamInScope(tp *typeparams.TypeParam) bool {
+ obj := tp.Obj()
+ if obj == nil {
+ return false
+ }
+
+ scope := c.innermostScope()
+ if scope == nil {
+ return false
+ }
+
+ _, foundObj := scope.LookupParent(obj.Name(), c.pos)
+ return obj == foundObj
+}
diff --git a/gopls/internal/lsp/source/completion/package.go b/gopls/internal/lsp/source/completion/package.go
new file mode 100644
index 000000000..f3bc30688
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/package.go
@@ -0,0 +1,351 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/fuzzy"
+)
+
+// packageClauseCompletions offers completions for a package declaration when
+// one is not present in the given file.
+func packageClauseCompletions(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) ([]CompletionItem, *Selection, error) {
+ // We know that the AST for this file will be empty due to the missing
+ // package declaration, but parse it anyway to get a mapper.
+ // TODO(adonovan): opt: there's no need to parse just to get a mapper.
+ pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ offset, err := pgf.Mapper.PositionOffset(position)
+ if err != nil {
+ return nil, nil, err
+ }
+ surrounding, err := packageCompletionSurrounding(pgf, offset)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid position for package completion: %w", err)
+ }
+
+ packageSuggestions, err := packageSuggestions(ctx, snapshot, fh.URI(), "")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var items []CompletionItem
+ for _, pkg := range packageSuggestions {
+ insertText := fmt.Sprintf("package %s", pkg.name)
+ items = append(items, CompletionItem{
+ Label: insertText,
+ Kind: protocol.ModuleCompletion,
+ InsertText: insertText,
+ Score: pkg.score,
+ })
+ }
+
+ return items, surrounding, nil
+}
+
+// packageCompletionSurrounding returns surrounding for package completion if a
+// package completions can be suggested at a given cursor offset. A valid location
+// for package completion is above any declarations or import statements.
+func packageCompletionSurrounding(pgf *source.ParsedGoFile, offset int) (*Selection, error) {
+ m := pgf.Mapper
+ // If the file lacks a package declaration, the parser will return an empty
+ // AST. As a work-around, try to parse an expression from the file contents.
+ fset := token.NewFileSet()
+ expr, _ := parser.ParseExprFrom(fset, m.URI.Filename(), pgf.Src, parser.Mode(0))
+ if expr == nil {
+ return nil, fmt.Errorf("unparseable file (%s)", m.URI)
+ }
+ tok := fset.File(expr.Pos())
+ cursor := tok.Pos(offset)
+
+ // If we were able to parse out an identifier as the first expression from
+ // the file, it may be the beginning of a package declaration ("pack ").
+ // We can offer package completions if the cursor is in the identifier.
+ if name, ok := expr.(*ast.Ident); ok {
+ if cursor >= name.Pos() && cursor <= name.End() {
+ if !strings.HasPrefix(PACKAGE, name.Name) {
+ return nil, fmt.Errorf("cursor in non-matching ident")
+ }
+ return &Selection{
+ content: name.Name,
+ cursor: cursor,
+ tokFile: tok,
+ start: name.Pos(),
+ end: name.End(),
+ mapper: m,
+ }, nil
+ }
+ }
+
+ // The file is invalid, but it contains an expression that we were able to
+ // parse. We will use this expression to construct the cursor's
+ // "surrounding".
+
+ // First, consider the possibility that we have a valid "package" keyword
+ // with an empty package name ("package "). "package" is parsed as an
+ // *ast.BadDecl since it is a keyword. This logic would allow "package" to
+ // appear on any line of the file as long as it's the first code expression
+ // in the file.
+ lines := strings.Split(string(pgf.Src), "\n")
+ cursorLine := tok.Line(cursor)
+ if cursorLine <= 0 || cursorLine > len(lines) {
+ return nil, fmt.Errorf("invalid line number")
+ }
+ if safetoken.StartPosition(fset, expr.Pos()).Line == cursorLine {
+ words := strings.Fields(lines[cursorLine-1])
+ if len(words) > 0 && words[0] == PACKAGE {
+ content := PACKAGE
+ // Account for spaces if there are any.
+ if len(words) > 1 {
+ content += " "
+ }
+
+ start := expr.Pos()
+ end := token.Pos(int(expr.Pos()) + len(content) + 1)
+ // We have verified that we have a valid 'package' keyword as our
+ // first expression. Ensure that cursor is in this keyword or
+ // otherwise fallback to the general case.
+ if cursor >= start && cursor <= end {
+ return &Selection{
+ content: content,
+ cursor: cursor,
+ tokFile: tok,
+ start: start,
+ end: end,
+ mapper: m,
+ }, nil
+ }
+ }
+ }
+
+ // If the cursor is after the start of the expression, no package
+ // declaration will be valid.
+ if cursor > expr.Pos() {
+ return nil, fmt.Errorf("cursor after expression")
+ }
+
+ // If the cursor is in a comment, don't offer any completions.
+ if cursorInComment(tok, cursor, m.Content) {
+ return nil, fmt.Errorf("cursor in comment")
+ }
+
+ // The surrounding range in this case is the cursor.
+ return &Selection{
+ content: "",
+ tokFile: tok,
+ start: cursor,
+ end: cursor,
+ cursor: cursor,
+ mapper: m,
+ }, nil
+}
+
+func cursorInComment(file *token.File, cursor token.Pos, src []byte) bool {
+ var s scanner.Scanner
+ s.Init(file, src, func(_ token.Position, _ string) {}, scanner.ScanComments)
+ for {
+ pos, tok, lit := s.Scan()
+ if pos <= cursor && cursor <= token.Pos(int(pos)+len(lit)) {
+ return tok == token.COMMENT
+ }
+ if tok == token.EOF {
+ break
+ }
+ }
+ return false
+}
+
+// packageNameCompletions returns name completions for a package clause using
+// the current name as prefix.
+func (c *completer) packageNameCompletions(ctx context.Context, fileURI span.URI, name *ast.Ident) error {
+ cursor := int(c.pos - name.NamePos)
+ if cursor < 0 || cursor > len(name.Name) {
+ return errors.New("cursor is not in package name identifier")
+ }
+
+ c.completionContext.packageCompletion = true
+
+ prefix := name.Name[:cursor]
+ packageSuggestions, err := packageSuggestions(ctx, c.snapshot, fileURI, prefix)
+ if err != nil {
+ return err
+ }
+
+ for _, pkg := range packageSuggestions {
+ c.deepState.enqueue(pkg)
+ }
+ return nil
+}
+
+// packageSuggestions returns a list of packages from workspace packages that
+// have the given prefix and are used in the same directory as the given
+// file. This also includes test packages for these packages (<pkg>_test) and
+// the directory name itself.
+func packageSuggestions(ctx context.Context, snapshot source.Snapshot, fileURI span.URI, prefix string) (packages []candidate, err error) {
+ active, err := snapshot.ActiveMetadata(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ toCandidate := func(name string, score float64) candidate {
+ obj := types.NewPkgName(0, nil, name, types.NewPackage("", name))
+ return candidate{obj: obj, name: name, detail: name, score: score}
+ }
+
+ matcher := fuzzy.NewMatcher(prefix)
+
+ // Always try to suggest a main package
+ defer func() {
+ if score := float64(matcher.Score("main")); score > 0 {
+ packages = append(packages, toCandidate("main", score*lowScore))
+ }
+ }()
+
+ dirPath := filepath.Dir(fileURI.Filename())
+ dirName := filepath.Base(dirPath)
+ if !isValidDirName(dirName) {
+ return packages, nil
+ }
+ pkgName := convertDirNameToPkgName(dirName)
+
+ seenPkgs := make(map[source.PackageName]struct{})
+
+ // The `go` command by default only allows one package per directory but we
+ // support multiple package suggestions since gopls is build system agnostic.
+ for _, m := range active {
+ if m.Name == "main" || m.Name == "" {
+ continue
+ }
+ if _, ok := seenPkgs[m.Name]; ok {
+ continue
+ }
+
+ // Only add packages that are previously used in the current directory.
+ var relevantPkg bool
+ for _, uri := range m.CompiledGoFiles {
+ if filepath.Dir(uri.Filename()) == dirPath {
+ relevantPkg = true
+ break
+ }
+ }
+ if !relevantPkg {
+ continue
+ }
+
+ // Add a found package used in current directory as a high relevance
+ // suggestion and the test package for it as a medium relevance
+ // suggestion.
+ if score := float64(matcher.Score(string(m.Name))); score > 0 {
+ packages = append(packages, toCandidate(string(m.Name), score*highScore))
+ }
+ seenPkgs[m.Name] = struct{}{}
+
+ testPkgName := m.Name + "_test"
+ if _, ok := seenPkgs[testPkgName]; ok || strings.HasSuffix(string(m.Name), "_test") {
+ continue
+ }
+ if score := float64(matcher.Score(string(testPkgName))); score > 0 {
+ packages = append(packages, toCandidate(string(testPkgName), score*stdScore))
+ }
+ seenPkgs[testPkgName] = struct{}{}
+ }
+
+ // Add current directory name as a low relevance suggestion.
+ if _, ok := seenPkgs[pkgName]; !ok {
+ if score := float64(matcher.Score(string(pkgName))); score > 0 {
+ packages = append(packages, toCandidate(string(pkgName), score*lowScore))
+ }
+
+ testPkgName := pkgName + "_test"
+ if score := float64(matcher.Score(string(testPkgName))); score > 0 {
+ packages = append(packages, toCandidate(string(testPkgName), score*lowScore))
+ }
+ }
+
+ return packages, nil
+}
+
+// isValidDirName checks whether the passed directory name can be used in
+// a package path. Requirements for a package path can be found here:
+// https://golang.org/ref/mod#go-mod-file-ident.
+func isValidDirName(dirName string) bool {
+ if dirName == "" {
+ return false
+ }
+
+ for i, ch := range dirName {
+ if isLetter(ch) || isDigit(ch) {
+ continue
+ }
+ if i == 0 {
+ // Directory name can start only with '_'. '.' is not allowed in module paths.
+ // '-' and '~' are not allowed because elements of package paths must be
+ // safe command-line arguments.
+ if ch == '_' {
+ continue
+ }
+ } else {
+ // Modules path elements can't end with '.'
+ if isAllowedPunctuation(ch) && (i != len(dirName)-1 || ch != '.') {
+ continue
+ }
+ }
+
+ return false
+ }
+ return true
+}
+
+// convertDirNameToPkgName converts a valid directory name to a valid package name.
+// It leaves only letters and digits. All letters are mapped to lower case.
+func convertDirNameToPkgName(dirName string) source.PackageName {
+ var buf bytes.Buffer
+ for _, ch := range dirName {
+ switch {
+ case isLetter(ch):
+ buf.WriteRune(unicode.ToLower(ch))
+
+ case buf.Len() != 0 && isDigit(ch):
+ buf.WriteRune(ch)
+ }
+ }
+ return source.PackageName(buf.String())
+}
+
+// isLetter and isDigit allow only ASCII characters because
+// "Each path element is a non-empty string made of up ASCII letters,
+// ASCII digits, and limited ASCII punctuation"
+// (see https://golang.org/ref/mod#go-mod-file-ident).
+
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z'
+}
+
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+func isAllowedPunctuation(ch rune) bool {
+ return ch == '_' || ch == '-' || ch == '~' || ch == '.'
+}
diff --git a/gopls/internal/lsp/source/completion/package_test.go b/gopls/internal/lsp/source/completion/package_test.go
new file mode 100644
index 000000000..614359fa5
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/package_test.go
@@ -0,0 +1,81 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+func TestIsValidDirName(t *testing.T) {
+ tests := []struct {
+ dirName string
+ valid bool
+ }{
+ {dirName: "", valid: false},
+ //
+ {dirName: "a", valid: true},
+ {dirName: "abcdef", valid: true},
+ {dirName: "AbCdEf", valid: true},
+ //
+ {dirName: "1a35", valid: true},
+ {dirName: "a16", valid: true},
+ //
+ {dirName: "_a", valid: true},
+ {dirName: "a_", valid: true},
+ //
+ {dirName: "~a", valid: false},
+ {dirName: "a~", valid: true},
+ //
+ {dirName: "-a", valid: false},
+ {dirName: "a-", valid: true},
+ //
+ {dirName: ".a", valid: false},
+ {dirName: "a.", valid: false},
+ //
+ {dirName: "a~_b--c.-e", valid: true},
+ {dirName: "~a~_b--c.-e", valid: false},
+ {dirName: "a~_b--c.-e--~", valid: true},
+ {dirName: "a~_b--2134dc42.-e6--~", valid: true},
+ {dirName: "abc`def", valid: false},
+ {dirName: "тест", valid: false},
+ {dirName: "你好", valid: false},
+ }
+ for _, tt := range tests {
+ valid := isValidDirName(tt.dirName)
+ if tt.valid != valid {
+ t.Errorf("%s: expected %v, got %v", tt.dirName, tt.valid, valid)
+ }
+ }
+}
+
+func TestConvertDirNameToPkgName(t *testing.T) {
+ tests := []struct {
+ dirName string
+ pkgName source.PackageName
+ }{
+ {dirName: "a", pkgName: "a"},
+ {dirName: "abcdef", pkgName: "abcdef"},
+ {dirName: "AbCdEf", pkgName: "abcdef"},
+ {dirName: "1a35", pkgName: "a35"},
+ {dirName: "14a35", pkgName: "a35"},
+ {dirName: "a16", pkgName: "a16"},
+ {dirName: "_a", pkgName: "a"},
+ {dirName: "a_", pkgName: "a"},
+ {dirName: "a~", pkgName: "a"},
+ {dirName: "a-", pkgName: "a"},
+ {dirName: "a~_b--c.-e", pkgName: "abce"},
+ {dirName: "a~_b--c.-e--~", pkgName: "abce"},
+ {dirName: "a~_b--2134dc42.-e6--~", pkgName: "ab2134dc42e6"},
+ }
+ for _, tt := range tests {
+ pkgName := convertDirNameToPkgName(tt.dirName)
+ if tt.pkgName != pkgName {
+ t.Errorf("%s: expected %v, got %v", tt.dirName, tt.pkgName, pkgName)
+ continue
+ }
+ }
+}
diff --git a/gopls/internal/lsp/source/completion/postfix_snippets.go b/gopls/internal/lsp/source/completion/postfix_snippets.go
new file mode 100644
index 000000000..0737ec246
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/postfix_snippets.go
@@ -0,0 +1,471 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "log"
+ "reflect"
+ "strings"
+ "sync"
+ "text/template"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/snippet"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/imports"
+)
+
+// Postfix snippets are artificial methods that allow the user to
+// compose common operations in an "argument oriented" fashion. For
+// example, instead of "sort.Slice(someSlice, ...)" a user can expand
+// "someSlice.sort!".
+
+// postfixTmpl represents a postfix snippet completion candidate.
+type postfixTmpl struct {
+ // label is the completion candidate's label presented to the user.
+ label string
+
+ // details is passed along to the client as the candidate's details.
+ details string
+
+ // body is the template text. See postfixTmplArgs for details on the
+ // facilities available to the template.
+ body string
+
+ tmpl *template.Template
+}
+
+// postfixTmplArgs are the template execution arguments available to
+// the postfix snippet templates.
+type postfixTmplArgs struct {
+ // StmtOK is true if it is valid to replace the selector with a
+ // statement. For example:
+ //
+ // func foo() {
+ // bar.sort! // statement okay
+ //
+ // someMethod(bar.sort!) // statement not okay
+ // }
+ StmtOK bool
+
+ // X is the textual SelectorExpr.X. For example, when completing
+ // "foo.bar.print!", "X" is "foo.bar".
+ X string
+
+ // Obj is the types.Object of SelectorExpr.X, if any.
+ Obj types.Object
+
+ // Type is the type of "foo.bar" in "foo.bar.print!".
+ Type types.Type
+
+ scope *types.Scope
+ snip snippet.Builder
+ importIfNeeded func(pkgPath string, scope *types.Scope) (name string, edits []protocol.TextEdit, err error)
+ edits []protocol.TextEdit
+ qf types.Qualifier
+ varNames map[string]bool
+}
+
+var postfixTmpls = []postfixTmpl{{
+ label: "sort",
+ details: "sort.Slice()",
+ body: `{{if and (eq .Kind "slice") .StmtOK -}}
+{{.Import "sort"}}.Slice({{.X}}, func({{.VarName nil "i"}}, {{.VarName nil "j"}} int) bool {
+ {{.Cursor}}
+})
+{{- end}}`,
+}, {
+ label: "last",
+ details: "s[len(s)-1]",
+ body: `{{if and (eq .Kind "slice") .Obj -}}
+{{.X}}[len({{.X}})-1]
+{{- end}}`,
+}, {
+ label: "reverse",
+ details: "reverse slice",
+ body: `{{if and (eq .Kind "slice") .StmtOK -}}
+{{$i := .VarName nil "i"}}{{$j := .VarName nil "j" -}}
+for {{$i}}, {{$j}} := 0, len({{.X}})-1; {{$i}} < {{$j}}; {{$i}}, {{$j}} = {{$i}}+1, {{$j}}-1 {
+ {{.X}}[{{$i}}], {{.X}}[{{$j}}] = {{.X}}[{{$j}}], {{.X}}[{{$i}}]
+}
+{{end}}`,
+}, {
+ label: "range",
+ details: "range over slice",
+ body: `{{if and (eq .Kind "slice") .StmtOK -}}
+for {{.VarName nil "i"}}, {{.VarName .ElemType "v"}} := range {{.X}} {
+ {{.Cursor}}
+}
+{{- end}}`,
+}, {
+ label: "append",
+ details: "append and re-assign slice",
+ body: `{{if and (eq .Kind "slice") .StmtOK .Obj -}}
+{{.X}} = append({{.X}}, {{.Cursor}})
+{{- end}}`,
+}, {
+ label: "append",
+ details: "append to slice",
+ body: `{{if and (eq .Kind "slice") (not .StmtOK) -}}
+append({{.X}}, {{.Cursor}})
+{{- end}}`,
+}, {
+ label: "copy",
+ details: "duplicate slice",
+ body: `{{if and (eq .Kind "slice") .StmtOK .Obj -}}
+{{$v := (.VarName nil (printf "%sCopy" .X))}}{{$v}} := make([]{{.TypeName .ElemType}}, len({{.X}}))
+copy({{$v}}, {{.X}})
+{{end}}`,
+}, {
+ label: "range",
+ details: "range over map",
+ body: `{{if and (eq .Kind "map") .StmtOK -}}
+for {{.VarName .KeyType "k"}}, {{.VarName .ElemType "v"}} := range {{.X}} {
+ {{.Cursor}}
+}
+{{- end}}`,
+}, {
+ label: "clear",
+ details: "clear map contents",
+ body: `{{if and (eq .Kind "map") .StmtOK -}}
+{{$k := (.VarName .KeyType "k")}}for {{$k}} := range {{.X}} {
+ delete({{.X}}, {{$k}})
+}
+{{end}}`,
+}, {
+ label: "keys",
+ details: "create slice of keys",
+ body: `{{if and (eq .Kind "map") .StmtOK -}}
+{{$keysVar := (.VarName nil "keys")}}{{$keysVar}} := make([]{{.TypeName .KeyType}}, 0, len({{.X}}))
+{{$k := (.VarName .KeyType "k")}}for {{$k}} := range {{.X}} {
+ {{$keysVar}} = append({{$keysVar}}, {{$k}})
+}
+{{end}}`,
+}, {
+ label: "range",
+ details: "range over channel",
+ body: `{{if and (eq .Kind "chan") .StmtOK -}}
+for {{.VarName .ElemType "e"}} := range {{.X}} {
+ {{.Cursor}}
+}
+{{- end}}`,
+}, {
+ label: "var",
+ details: "assign to variables",
+ body: `{{if and (eq .Kind "tuple") .StmtOK -}}
+{{$a := .}}{{range $i, $v := .Tuple}}{{if $i}}, {{end}}{{$a.VarName $v.Type $v.Name}}{{end}} := {{.X}}
+{{- end}}`,
+}, {
+ label: "var",
+ details: "assign to variable",
+ body: `{{if and (ne .Kind "tuple") .StmtOK -}}
+{{.VarName .Type ""}} := {{.X}}
+{{- end}}`,
+}, {
+ label: "print",
+ details: "print to stdout",
+ body: `{{if and (ne .Kind "tuple") .StmtOK -}}
+{{.Import "fmt"}}.Printf("{{.EscapeQuotes .X}}: %v\n", {{.X}})
+{{- end}}`,
+}, {
+ label: "print",
+ details: "print to stdout",
+ body: `{{if and (eq .Kind "tuple") .StmtOK -}}
+{{.Import "fmt"}}.Println({{.X}})
+{{- end}}`,
+}, {
+ label: "split",
+ details: "split string",
+ body: `{{if (eq (.TypeName .Type) "string") -}}
+{{.Import "strings"}}.Split({{.X}}, "{{.Cursor}}")
+{{- end}}`,
+}, {
+ label: "join",
+ details: "join string slice",
+ body: `{{if and (eq .Kind "slice") (eq (.TypeName .ElemType) "string") -}}
+{{.Import "strings"}}.Join({{.X}}, "{{.Cursor}}")
+{{- end}}`,
+}}
+
+// Cursor indicates where the client's cursor should end up after the
+// snippet is done.
+func (a *postfixTmplArgs) Cursor() string {
+ a.snip.WriteFinalTabstop()
+ return ""
+}
+
+// Import makes sure the package corresponding to path is imported,
+// returning the identifier to use to refer to the package.
+func (a *postfixTmplArgs) Import(path string) (string, error) {
+ name, edits, err := a.importIfNeeded(path, a.scope)
+ if err != nil {
+ return "", fmt.Errorf("couldn't import %q: %w", path, err)
+ }
+ a.edits = append(a.edits, edits...)
+ return name, nil
+}
+
+func (a *postfixTmplArgs) EscapeQuotes(v string) string {
+ return strings.ReplaceAll(v, `"`, `\\"`)
+}
+
+// ElemType returns the Elem() type of xType, if applicable.
+func (a *postfixTmplArgs) ElemType() types.Type {
+ if e, _ := a.Type.(interface{ Elem() types.Type }); e != nil {
+ return e.Elem()
+ }
+ return nil
+}
+
+// Kind returns the underlying kind of type, e.g. "slice", "struct",
+// etc.
+func (a *postfixTmplArgs) Kind() string {
+ t := reflect.TypeOf(a.Type.Underlying())
+ return strings.ToLower(strings.TrimPrefix(t.String(), "*types."))
+}
+
+// KeyType returns the type of X's key. KeyType panics if X is not a
+// map.
+func (a *postfixTmplArgs) KeyType() types.Type {
+ return a.Type.Underlying().(*types.Map).Key()
+}
+
+// Tuple returns the tuple result vars if X is a call expression.
+func (a *postfixTmplArgs) Tuple() []*types.Var {
+ tuple, _ := a.Type.(*types.Tuple)
+ if tuple == nil {
+ return nil
+ }
+
+ typs := make([]*types.Var, 0, tuple.Len())
+ for i := 0; i < tuple.Len(); i++ {
+ typs = append(typs, tuple.At(i))
+ }
+ return typs
+}
+
+// TypeName returns the textual representation of type t.
+func (a *postfixTmplArgs) TypeName(t types.Type) (string, error) {
+ if t == nil || t == types.Typ[types.Invalid] {
+ return "", fmt.Errorf("invalid type: %v", t)
+ }
+ return types.TypeString(t, a.qf), nil
+}
+
+// VarName returns a suitable variable name for the type t. If t
+// implements the error interface, "err" is used. If t is not a named
+// type then nonNamedDefault is used. Otherwise a name is made by
+// abbreviating the type name. If the resultant name is already in
+// scope, an integer is appended to make a unique name.
+func (a *postfixTmplArgs) VarName(t types.Type, nonNamedDefault string) string {
+ if t == nil {
+ t = types.Typ[types.Invalid]
+ }
+
+ var name string
+ // go/types predicates are undefined on types.Typ[types.Invalid].
+ if !types.Identical(t, types.Typ[types.Invalid]) && types.Implements(t, errorIntf) {
+ name = "err"
+ } else if _, isNamed := source.Deref(t).(*types.Named); !isNamed {
+ name = nonNamedDefault
+ }
+
+ if name == "" {
+ name = types.TypeString(t, func(p *types.Package) string {
+ return ""
+ })
+ name = abbreviateTypeName(name)
+ }
+
+ if dot := strings.LastIndex(name, "."); dot > -1 {
+ name = name[dot+1:]
+ }
+
+ uniqueName := name
+ for i := 2; ; i++ {
+ if s, _ := a.scope.LookupParent(uniqueName, token.NoPos); s == nil && !a.varNames[uniqueName] {
+ break
+ }
+ uniqueName = fmt.Sprintf("%s%d", name, i)
+ }
+
+ a.varNames[uniqueName] = true
+
+ return uniqueName
+}
+
+func (c *completer) addPostfixSnippetCandidates(ctx context.Context, sel *ast.SelectorExpr) {
+ if !c.opts.postfix {
+ return
+ }
+
+ initPostfixRules()
+
+ if sel == nil || sel.Sel == nil {
+ return
+ }
+
+ selType := c.pkg.GetTypesInfo().TypeOf(sel.X)
+ if selType == nil {
+ return
+ }
+
+ // Skip empty tuples since there is no value to operate on.
+ if tuple, ok := selType.Underlying().(*types.Tuple); ok && tuple == nil {
+ return
+ }
+
+ tokFile := c.pkg.FileSet().File(c.pos)
+
+ // Only replace sel with a statement if sel is already a statement.
+ var stmtOK bool
+ for i, n := range c.path {
+ if n == sel && i < len(c.path)-1 {
+ switch p := c.path[i+1].(type) {
+ case *ast.ExprStmt:
+ stmtOK = true
+ case *ast.AssignStmt:
+ // In cases like:
+ //
+ // foo.<>
+ // bar = 123
+ //
+ // detect that "foo." makes up the entire statement since the
+ // apparent selector spans lines.
+ stmtOK = tokFile.Line(c.pos) < tokFile.Line(p.TokPos)
+ }
+ break
+ }
+ }
+
+ scope := c.pkg.GetTypes().Scope().Innermost(c.pos)
+ if scope == nil {
+ return
+ }
+
+ // afterDot is the position after selector dot, e.g. "|" in
+ // "foo.|print".
+ afterDot := sel.Sel.Pos()
+
+ // We must detect dangling selectors such as:
+ //
+ // foo.<>
+ // bar
+ //
+ // and adjust afterDot so that we don't mistakenly delete the
+ // newline thinking "bar" is part of our selector.
+ if startLine := tokFile.Line(sel.Pos()); startLine != tokFile.Line(afterDot) {
+ if tokFile.Line(c.pos) != startLine {
+ return
+ }
+ afterDot = c.pos
+ }
+
+ for _, rule := range postfixTmpls {
+ // When completing foo.print<>, "print" is naturally overwritten,
+ // but we need to also remove "foo." so the snippet has a clean
+ // slate.
+ edits, err := c.editText(sel.Pos(), afterDot, "")
+ if err != nil {
+ event.Error(ctx, "error calculating postfix edits", err)
+ return
+ }
+
+ tmplArgs := postfixTmplArgs{
+ X: source.FormatNode(c.pkg.FileSet(), sel.X),
+ StmtOK: stmtOK,
+ Obj: exprObj(c.pkg.GetTypesInfo(), sel.X),
+ Type: selType,
+ qf: c.qf,
+ importIfNeeded: c.importIfNeeded,
+ scope: scope,
+ varNames: make(map[string]bool),
+ }
+
+ // Feed the template straight into the snippet builder. This
+ // allows templates to build snippets as they are executed.
+ err = rule.tmpl.Execute(&tmplArgs.snip, &tmplArgs)
+ if err != nil {
+ event.Error(ctx, "error executing postfix template", err)
+ continue
+ }
+
+ if strings.TrimSpace(tmplArgs.snip.String()) == "" {
+ continue
+ }
+
+ score := c.matcher.Score(rule.label)
+ if score <= 0 {
+ continue
+ }
+
+ c.items = append(c.items, CompletionItem{
+ Label: rule.label + "!",
+ Detail: rule.details,
+ Score: float64(score) * 0.01,
+ Kind: protocol.SnippetCompletion,
+ snippet: &tmplArgs.snip,
+ AdditionalTextEdits: append(edits, tmplArgs.edits...),
+ })
+ }
+}
+
+var postfixRulesOnce sync.Once
+
+func initPostfixRules() {
+ postfixRulesOnce.Do(func() {
+ var idx int
+ for _, rule := range postfixTmpls {
+ var err error
+ rule.tmpl, err = template.New("postfix_snippet").Parse(rule.body)
+ if err != nil {
+ log.Panicf("error parsing postfix snippet template: %v", err)
+ }
+ postfixTmpls[idx] = rule
+ idx++
+ }
+ postfixTmpls = postfixTmpls[:idx]
+ })
+}
+
+// importIfNeeded returns the package identifier and any necessary
+// edits to import package pkgPath.
+func (c *completer) importIfNeeded(pkgPath string, scope *types.Scope) (string, []protocol.TextEdit, error) {
+ defaultName := imports.ImportPathToAssumedName(pkgPath)
+
+ // Check if file already imports pkgPath.
+ for _, s := range c.file.Imports {
+ // TODO(adonovan): what if pkgPath has a vendor/ suffix?
+ // This may be the cause of go.dev/issue/56291.
+ if source.UnquoteImportPath(s) == source.ImportPath(pkgPath) {
+ if s.Name == nil {
+ return defaultName, nil, nil
+ }
+ if s.Name.Name != "_" {
+ return s.Name.Name, nil, nil
+ }
+ }
+ }
+
+ // Give up if the package's name is already in use by another object.
+ if _, obj := scope.LookupParent(defaultName, token.NoPos); obj != nil {
+ return "", nil, fmt.Errorf("import name %q of %q already in use", defaultName, pkgPath)
+ }
+
+ edits, err := c.importEdits(&importInfo{
+ importPath: pkgPath,
+ })
+ if err != nil {
+ return "", nil, err
+ }
+
+ return defaultName, edits, nil
+}
diff --git a/gopls/internal/lsp/source/completion/printf.go b/gopls/internal/lsp/source/completion/printf.go
new file mode 100644
index 000000000..432011755
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/printf.go
@@ -0,0 +1,172 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "go/ast"
+ "go/constant"
+ "go/types"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// printfArgKind returns the expected objKind when completing a
+// printf-like operand. call is the printf-like function call, and
+// argIdx is the index of call.Args being completed.
+func printfArgKind(info *types.Info, call *ast.CallExpr, argIdx int) objKind {
+ // Printf-like function name must end in "f".
+ fn := exprObj(info, call.Fun)
+ if fn == nil || !strings.HasSuffix(fn.Name(), "f") {
+ return kindAny
+ }
+
+ sig, _ := fn.Type().(*types.Signature)
+ if sig == nil {
+ return kindAny
+ }
+
+ // Must be variadic and take at least two params.
+ numParams := sig.Params().Len()
+ if !sig.Variadic() || numParams < 2 || argIdx < numParams-1 {
+ return kindAny
+ }
+
+ // Param preceding variadic args must be a (format) string.
+ if !types.Identical(sig.Params().At(numParams-2).Type(), types.Typ[types.String]) {
+ return kindAny
+ }
+
+ // Format string must be a constant.
+ strArg := info.Types[call.Args[numParams-2]].Value
+ if strArg == nil || strArg.Kind() != constant.String {
+ return kindAny
+ }
+
+ return formatOperandKind(constant.StringVal(strArg), argIdx-(numParams-1)+1)
+}
+
+// formatOperandKind returns the objKind corresponding to format's
+// operandIdx'th operand.
+func formatOperandKind(format string, operandIdx int) objKind {
+ var (
+ prevOperandIdx int
+ kind = kindAny
+ )
+ for {
+ i := strings.Index(format, "%")
+ if i == -1 {
+ break
+ }
+
+ var operands []formatOperand
+ format, operands = parsePrintfVerb(format[i+1:], prevOperandIdx)
+
+ // Check if any this verb's operands correspond to our target
+ // operandIdx.
+ for _, v := range operands {
+ if v.idx == operandIdx {
+ if kind == kindAny {
+ kind = v.kind
+ } else if v.kind != kindAny {
+ // If multiple verbs refer to the same operand, take the
+ // intersection of their kinds.
+ kind &= v.kind
+ }
+ }
+
+ prevOperandIdx = v.idx
+ }
+ }
+ return kind
+}
+
+type formatOperand struct {
+ // idx is the one-based printf operand index.
+ idx int
+ // kind is a mask of expected kinds of objects for this operand.
+ kind objKind
+}
+
+// parsePrintfVerb parses the leading printf verb in f. The opening
+// "%" must already be trimmed from f. prevIdx is the previous
+// operand's index, or zero if this is the first verb. The format
+// string is returned with the leading verb removed. Multiple operands
+// can be returned in the case of dynamic widths such as "%*.*f".
+func parsePrintfVerb(f string, prevIdx int) (string, []formatOperand) {
+ var verbs []formatOperand
+
+ addVerb := func(k objKind) {
+ verbs = append(verbs, formatOperand{
+ idx: prevIdx + 1,
+ kind: k,
+ })
+ prevIdx++
+ }
+
+ for len(f) > 0 {
+ // Trim first rune off of f so we are guaranteed to make progress.
+ r, l := utf8.DecodeRuneInString(f)
+ f = f[l:]
+
+ // We care about three things:
+ // 1. The verb, which maps directly to object kind.
+ // 2. Explicit operand indices like "%[2]s".
+ // 3. Dynamic widths using "*".
+ switch r {
+ case '%':
+ return f, nil
+ case '*':
+ addVerb(kindInt)
+ continue
+ case '[':
+ // Parse operand index as in "%[2]s".
+ i := strings.Index(f, "]")
+ if i == -1 {
+ return f, nil
+ }
+
+ idx, err := strconv.Atoi(f[:i])
+ f = f[i+1:]
+ if err != nil {
+ return f, nil
+ }
+
+ prevIdx = idx - 1
+ continue
+ case 'v', 'T':
+ addVerb(kindAny)
+ case 't':
+ addVerb(kindBool)
+ case 'c', 'd', 'o', 'O', 'U':
+ addVerb(kindInt)
+ case 'e', 'E', 'f', 'F', 'g', 'G':
+ addVerb(kindFloat | kindComplex)
+ case 'b':
+ addVerb(kindInt | kindFloat | kindComplex | kindBytes)
+ case 'q', 's':
+ addVerb(kindString | kindBytes | kindStringer | kindError)
+ case 'x', 'X':
+ // Omit kindStringer and kindError though technically allowed.
+ addVerb(kindString | kindBytes | kindInt | kindFloat | kindComplex)
+ case 'p':
+ addVerb(kindPtr | kindSlice)
+ case 'w':
+ addVerb(kindError)
+ case '+', '-', '#', ' ', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ // Flag or numeric width/precision value.
+ continue
+ default:
+ // Assume unrecognized rune is a custom fmt.Formatter verb.
+ addVerb(kindAny)
+ }
+
+ if len(verbs) > 0 {
+ break
+ }
+ }
+
+ return f, verbs
+}
diff --git a/internal/lsp/source/completion/printf_test.go b/gopls/internal/lsp/source/completion/printf_test.go
index 19d295b8d..19d295b8d 100644
--- a/internal/lsp/source/completion/printf_test.go
+++ b/gopls/internal/lsp/source/completion/printf_test.go
diff --git a/gopls/internal/lsp/source/completion/snippet.go b/gopls/internal/lsp/source/completion/snippet.go
new file mode 100644
index 000000000..f4ea767e9
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/snippet.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "go/ast"
+
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/snippet"
+)
+
+// structFieldSnippet calculates the snippet for struct literal field names.
+func (c *completer) structFieldSnippet(cand candidate, detail string, snip *snippet.Builder) {
+ if !c.wantStructFieldCompletions() {
+ return
+ }
+
+ // If we are in a deep completion then we can't be completing a field
+ // name (e.g. "Foo{f<>}" completing to "Foo{f.Bar}" should not generate
+ // a snippet).
+ if len(cand.path) > 0 {
+ return
+ }
+
+ clInfo := c.enclosingCompositeLiteral
+
+ // If we are already in a key-value expression, we don't want a snippet.
+ if clInfo.kv != nil {
+ return
+ }
+
+ // A plain snippet turns "Foo{Ba<>" into "Foo{Bar: <>".
+ snip.WriteText(": ")
+ snip.WritePlaceholder(func(b *snippet.Builder) {
+ // A placeholder snippet turns "Foo{Ba<>" into "Foo{Bar: <*int*>".
+ if c.opts.placeholders {
+ b.WriteText(detail)
+ }
+ })
+
+ fset := c.pkg.FileSet()
+
+ // If the cursor position is on a different line from the literal's opening brace,
+ // we are in a multiline literal. Ignore line directives.
+ if safetoken.StartPosition(fset, c.pos).Line != safetoken.StartPosition(fset, clInfo.cl.Lbrace).Line {
+ snip.WriteText(",")
+ }
+}
+
+// functionCallSnippet calculates the snippet for function calls.
+func (c *completer) functionCallSnippet(name string, tparams, params []string, snip *snippet.Builder) {
+ // If there is no suffix then we need to reuse existing call parens
+ // "()" if present. If there is an identifier suffix then we always
+ // need to include "()" since we don't overwrite the suffix.
+ if c.surrounding != nil && c.surrounding.Suffix() == "" && len(c.path) > 1 {
+ // If we are the left side (i.e. "Fun") part of a call expression,
+ // we don't want a snippet since there are already parens present.
+ switch n := c.path[1].(type) {
+ case *ast.CallExpr:
+ // The Lparen != Rparen check detects fudged CallExprs we
+ // inserted when fixing the AST. In this case, we do still need
+ // to insert the calling "()" parens.
+ if n.Fun == c.path[0] && n.Lparen != n.Rparen {
+ return
+ }
+ case *ast.SelectorExpr:
+ if len(c.path) > 2 {
+ if call, ok := c.path[2].(*ast.CallExpr); ok && call.Fun == c.path[1] && call.Lparen != call.Rparen {
+ return
+ }
+ }
+ }
+ }
+
+ snip.WriteText(name)
+
+ if len(tparams) > 0 {
+ snip.WriteText("[")
+ if c.opts.placeholders {
+ for i, tp := range tparams {
+ if i > 0 {
+ snip.WriteText(", ")
+ }
+ snip.WritePlaceholder(func(b *snippet.Builder) {
+ b.WriteText(tp)
+ })
+ }
+ } else {
+ snip.WritePlaceholder(nil)
+ }
+ snip.WriteText("]")
+ }
+
+ snip.WriteText("(")
+
+ if c.opts.placeholders {
+ // A placeholder snippet turns "someFun<>" into "someFunc(<*i int*>, *s string*)".
+ for i, p := range params {
+ if i > 0 {
+ snip.WriteText(", ")
+ }
+ snip.WritePlaceholder(func(b *snippet.Builder) {
+ b.WriteText(p)
+ })
+ }
+ } else {
+ // A plain snippet turns "someFun<>" into "someFunc(<>)".
+ if len(params) > 0 {
+ snip.WritePlaceholder(nil)
+ }
+ }
+
+ snip.WriteText(")")
+}
diff --git a/gopls/internal/lsp/source/completion/statements.go b/gopls/internal/lsp/source/completion/statements.go
new file mode 100644
index 000000000..707375fa1
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/statements.go
@@ -0,0 +1,361 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/snippet"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+// addStatementCandidates adds full statement completion candidates
+// appropriate for the current context.
+func (c *completer) addStatementCandidates() {
+ c.addErrCheck()
+ c.addAssignAppend()
+}
+
+// addAssignAppend offers a completion candidate of the form:
+//
+// someSlice = append(someSlice, )
+//
+// It will offer the "append" completion in either of two situations:
+//
+// 1. Position is in RHS of assign, prefix matches "append", and
+// corresponding LHS object is a slice. For example,
+// "foo = ap<>" completes to "foo = append(foo, )".
+//
+// 2. Prefix is an ident or selector in an *ast.ExprStmt (i.e.
+// beginning of statement), and our best matching candidate is a
+// slice. For example: "foo.ba" completes to "foo.bar = append(foo.bar, )".
+func (c *completer) addAssignAppend() {
+ if len(c.path) < 3 {
+ return
+ }
+
+ ident, _ := c.path[0].(*ast.Ident)
+ if ident == nil {
+ return
+ }
+
+ var (
+ // sliceText is the full name of our slice object, e.g. "s.abc" in
+ // "s.abc = app<>".
+ sliceText string
+ // needsLHS is true if we need to prepend the LHS slice name and
+ // "=" to our candidate.
+ needsLHS = false
+ fset = c.pkg.FileSet()
+ )
+
+ switch n := c.path[1].(type) {
+ case *ast.AssignStmt:
+ // We are already in an assignment. Make sure our prefix matches "append".
+ if c.matcher.Score("append") <= 0 {
+ return
+ }
+
+ exprIdx := exprAtPos(c.pos, n.Rhs)
+ if exprIdx == len(n.Rhs) || exprIdx > len(n.Lhs)-1 {
+ return
+ }
+
+ lhsType := c.pkg.GetTypesInfo().TypeOf(n.Lhs[exprIdx])
+ if lhsType == nil {
+ return
+ }
+
+ // Make sure our corresponding LHS object is a slice.
+ if _, isSlice := lhsType.Underlying().(*types.Slice); !isSlice {
+ return
+ }
+
+ // The name or our slice is whatever's in the LHS expression.
+ sliceText = source.FormatNode(fset, n.Lhs[exprIdx])
+ case *ast.SelectorExpr:
+ // Make sure we are a selector at the beginning of a statement.
+ if _, parentIsExprtStmt := c.path[2].(*ast.ExprStmt); !parentIsExprtStmt {
+ return
+ }
+
+ // So far we only know the first part of our slice name. For
+ // example in "s.a<>" we only know our slice begins with "s."
+ // since the user could still be typing.
+ sliceText = source.FormatNode(fset, n.X) + "."
+ needsLHS = true
+ case *ast.ExprStmt:
+ needsLHS = true
+ default:
+ return
+ }
+
+ var (
+ label string
+ snip snippet.Builder
+ score = highScore
+ )
+
+ if needsLHS {
+ // Offer the long form assign + append candidate if our best
+ // candidate is a slice.
+ bestItem := c.topCandidate()
+ if bestItem == nil || !bestItem.isSlice {
+ return
+ }
+
+ // Don't rank the full form assign + append candidate above the
+ // slice itself.
+ score = bestItem.Score - 0.01
+
+ // Fill in rest of sliceText now that we have the object name.
+ sliceText += bestItem.Label
+
+ // Fill in the candidate's LHS bits.
+ label = fmt.Sprintf("%s = ", bestItem.Label)
+ snip.WriteText(label)
+ }
+
+ snip.WriteText(fmt.Sprintf("append(%s, ", sliceText))
+ snip.WritePlaceholder(nil)
+ snip.WriteText(")")
+
+ c.items = append(c.items, CompletionItem{
+ Label: label + fmt.Sprintf("append(%s, )", sliceText),
+ Kind: protocol.FunctionCompletion,
+ Score: score,
+ snippet: &snip,
+ })
+}
+
+// topCandidate returns the strictly highest scoring candidate
+// collected so far. If the top two candidates have the same score,
+// nil is returned.
+func (c *completer) topCandidate() *CompletionItem {
+ var bestItem, secondBestItem *CompletionItem
+ for i := range c.items {
+ if bestItem == nil || c.items[i].Score > bestItem.Score {
+ bestItem = &c.items[i]
+ } else if secondBestItem == nil || c.items[i].Score > secondBestItem.Score {
+ secondBestItem = &c.items[i]
+ }
+ }
+
+ // If secondBestItem has the same score, bestItem isn't
+ // the strict best.
+ if secondBestItem != nil && secondBestItem.Score == bestItem.Score {
+ return nil
+ }
+
+ return bestItem
+}
+
+// addErrCheck offers a completion candidate of the form:
+//
+// if err != nil {
+// return nil, err
+// }
+//
+// In the case of test functions, it offers a completion candidate of the form:
+//
+// if err != nil {
+// t.Fatal(err)
+// }
+//
+// The position must be in a function that returns an error, and the
+// statement preceding the position must be an assignment where the
+// final LHS object is an error. addErrCheck will synthesize
+// zero values as necessary to make the return statement valid.
+func (c *completer) addErrCheck() {
+ if len(c.path) < 2 || c.enclosingFunc == nil || !c.opts.placeholders {
+ return
+ }
+
+ var (
+ errorType = types.Universe.Lookup("error").Type()
+ result = c.enclosingFunc.sig.Results()
+ testVar = getTestVar(c.enclosingFunc, c.pkg)
+ isTest = testVar != ""
+ doesNotReturnErr = result.Len() == 0 || !types.Identical(result.At(result.Len()-1).Type(), errorType)
+ )
+ // Make sure our enclosing function is a Test func or returns an error.
+ if !isTest && doesNotReturnErr {
+ return
+ }
+
+ prevLine := prevStmt(c.pos, c.path)
+ if prevLine == nil {
+ return
+ }
+
+ // Make sure our preceding statement was as assignment.
+ assign, _ := prevLine.(*ast.AssignStmt)
+ if assign == nil || len(assign.Lhs) == 0 {
+ return
+ }
+
+ lastAssignee := assign.Lhs[len(assign.Lhs)-1]
+
+ // Make sure the final assignee is an error.
+ if !types.Identical(c.pkg.GetTypesInfo().TypeOf(lastAssignee), errorType) {
+ return
+ }
+
+ var (
+ // errVar is e.g. "err" in "foo, err := bar()".
+ errVar = source.FormatNode(c.pkg.FileSet(), lastAssignee)
+
+ // Whether we need to include the "if" keyword in our candidate.
+ needsIf = true
+ )
+
+ // If the returned error from the previous statement is "_", it is not a real object.
+ // If we don't have an error, and the function signature takes a testing.TB that is either ignored
+ // or an "_", then we also can't call t.Fatal(err).
+ if errVar == "_" {
+ return
+ }
+
+ // Below we try to detect if the user has already started typing "if
+ // err" so we can replace what they've typed with our complete
+ // statement.
+ switch n := c.path[0].(type) {
+ case *ast.Ident:
+ switch c.path[1].(type) {
+ case *ast.ExprStmt:
+ // This handles:
+ //
+ // f, err := os.Open("foo")
+ // i<>
+
+ // Make sure they are typing "if".
+ if c.matcher.Score("if") <= 0 {
+ return
+ }
+ case *ast.IfStmt:
+ // This handles:
+ //
+ // f, err := os.Open("foo")
+ // if er<>
+
+ // Make sure they are typing the error's name.
+ if c.matcher.Score(errVar) <= 0 {
+ return
+ }
+
+ needsIf = false
+ default:
+ return
+ }
+ case *ast.IfStmt:
+ // This handles:
+ //
+ // f, err := os.Open("foo")
+ // if <>
+
+ // Avoid false positives by ensuring the if's cond is a bad
+ // expression. For example, don't offer the completion in cases
+ // like "if <> somethingElse".
+ if _, bad := n.Cond.(*ast.BadExpr); !bad {
+ return
+ }
+
+ // If "if" is our direct prefix, we need to include it in our
+ // candidate since the existing "if" will be overwritten.
+ needsIf = c.pos == n.Pos()+token.Pos(len("if"))
+ }
+
+ // Build up a snippet that looks like:
+ //
+ // if err != nil {
+ // return <zero value>, ..., ${1:err}
+ // }
+ //
+ // We make the error a placeholder so it is easy to alter the error.
+ var snip snippet.Builder
+ if needsIf {
+ snip.WriteText("if ")
+ }
+ snip.WriteText(fmt.Sprintf("%s != nil {\n\t", errVar))
+
+ var label string
+ if isTest {
+ snip.WriteText(fmt.Sprintf("%s.Fatal(%s)", testVar, errVar))
+ label = fmt.Sprintf("%[1]s != nil { %[2]s.Fatal(%[1]s) }", errVar, testVar)
+ } else {
+ snip.WriteText("return ")
+ for i := 0; i < result.Len()-1; i++ {
+ snip.WriteText(formatZeroValue(result.At(i).Type(), c.qf))
+ snip.WriteText(", ")
+ }
+ snip.WritePlaceholder(func(b *snippet.Builder) {
+ b.WriteText(errVar)
+ })
+ label = fmt.Sprintf("%[1]s != nil { return %[1]s }", errVar)
+ }
+
+ snip.WriteText("\n}")
+
+ if needsIf {
+ label = "if " + label
+ }
+
+ c.items = append(c.items, CompletionItem{
+ Label: label,
+ // There doesn't seem to be a more appropriate kind.
+ Kind: protocol.KeywordCompletion,
+ Score: highScore,
+ snippet: &snip,
+ })
+}
+
+// getTestVar checks the function signature's input parameters and returns
+// the name of the first parameter that implements "testing.TB". For example,
+// func someFunc(t *testing.T) returns the string "t", func someFunc(b *testing.B)
+// returns "b" etc. An empty string indicates that the function signature
+// does not take a testing.TB parameter or does so but is ignored such
+// as func someFunc(*testing.T).
+func getTestVar(enclosingFunc *funcInfo, pkg source.Package) string {
+ if enclosingFunc == nil || enclosingFunc.sig == nil {
+ return ""
+ }
+
+ var testingPkg *types.Package
+ for _, p := range pkg.GetTypes().Imports() {
+ if p.Path() == "testing" {
+ testingPkg = p
+ break
+ }
+ }
+ if testingPkg == nil {
+ return ""
+ }
+ tbObj := testingPkg.Scope().Lookup("TB")
+ if tbObj == nil {
+ return ""
+ }
+ iface, ok := tbObj.Type().Underlying().(*types.Interface)
+ if !ok {
+ return ""
+ }
+
+ sig := enclosingFunc.sig
+ for i := 0; i < sig.Params().Len(); i++ {
+ param := sig.Params().At(i)
+ if param.Name() == "_" {
+ continue
+ }
+ if !types.Implements(param.Type(), iface) {
+ continue
+ }
+ return param.Name()
+ }
+
+ return ""
+}
diff --git a/gopls/internal/lsp/source/completion/util.go b/gopls/internal/lsp/source/completion/util.go
new file mode 100644
index 000000000..4b6ec09a0
--- /dev/null
+++ b/gopls/internal/lsp/source/completion/util.go
@@ -0,0 +1,344 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package completion
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// exprAtPos returns the index of the expression containing pos.
+func exprAtPos(pos token.Pos, args []ast.Expr) int {
+ for i, expr := range args {
+ if expr.Pos() <= pos && pos <= expr.End() {
+ return i
+ }
+ }
+ return len(args)
+}
+
+// eachField invokes fn for each field that can be selected from a
+// value of type T.
+func eachField(T types.Type, fn func(*types.Var)) {
+ // TODO(adonovan): this algorithm doesn't exclude ambiguous
+ // selections that match more than one field/method.
+ // types.NewSelectionSet should do that for us.
+
+ // for termination on recursive types
+ var seen typeutil.Map
+
+ var visit func(T types.Type)
+ visit = func(T types.Type) {
+ if T, ok := source.Deref(T).Underlying().(*types.Struct); ok {
+ if seen.At(T) != nil {
+ return
+ }
+
+ for i := 0; i < T.NumFields(); i++ {
+ f := T.Field(i)
+ fn(f)
+ if f.Anonymous() {
+ seen.Set(T, true)
+ visit(f.Type())
+ }
+ }
+ }
+ }
+ visit(T)
+}
+
+// typeIsValid reports whether typ doesn't contain any Invalid types.
+func typeIsValid(typ types.Type) bool {
+ // Check named types separately, because we don't want
+ // to call Underlying() on them to avoid problems with recursive types.
+ if _, ok := typ.(*types.Named); ok {
+ return true
+ }
+
+ switch typ := typ.Underlying().(type) {
+ case *types.Basic:
+ return typ.Kind() != types.Invalid
+ case *types.Array:
+ return typeIsValid(typ.Elem())
+ case *types.Slice:
+ return typeIsValid(typ.Elem())
+ case *types.Pointer:
+ return typeIsValid(typ.Elem())
+ case *types.Map:
+ return typeIsValid(typ.Key()) && typeIsValid(typ.Elem())
+ case *types.Chan:
+ return typeIsValid(typ.Elem())
+ case *types.Signature:
+ return typeIsValid(typ.Params()) && typeIsValid(typ.Results())
+ case *types.Tuple:
+ for i := 0; i < typ.Len(); i++ {
+ if !typeIsValid(typ.At(i).Type()) {
+ return false
+ }
+ }
+ return true
+ case *types.Struct, *types.Interface:
+ // Don't bother checking structs, interfaces for validity.
+ return true
+ default:
+ return false
+ }
+}
+
+// resolveInvalid traverses the node of the AST that defines the scope
+// containing the declaration of obj, and attempts to find a user-friendly
+// name for its invalid type. The resulting Object and its Type are fake.
+func resolveInvalid(fset *token.FileSet, obj types.Object, node ast.Node, info *types.Info) types.Object {
+ var resultExpr ast.Expr
+ ast.Inspect(node, func(node ast.Node) bool {
+ switch n := node.(type) {
+ case *ast.ValueSpec:
+ for _, name := range n.Names {
+ if info.Defs[name] == obj {
+ resultExpr = n.Type
+ }
+ }
+ return false
+ case *ast.Field: // This case handles parameters and results of a FuncDecl or FuncLit.
+ for _, name := range n.Names {
+ if info.Defs[name] == obj {
+ resultExpr = n.Type
+ }
+ }
+ return false
+ default:
+ return true
+ }
+ })
+ // Construct a fake type for the object and return a fake object with this type.
+ typename := source.FormatNode(fset, resultExpr)
+ typ := types.NewNamed(types.NewTypeName(token.NoPos, obj.Pkg(), typename, nil), types.Typ[types.Invalid], nil)
+ return types.NewVar(obj.Pos(), obj.Pkg(), obj.Name(), typ)
+}
+
+func isPointer(T types.Type) bool {
+ _, ok := T.(*types.Pointer)
+ return ok
+}
+
+func isVar(obj types.Object) bool {
+ _, ok := obj.(*types.Var)
+ return ok
+}
+
+func isTypeName(obj types.Object) bool {
+ _, ok := obj.(*types.TypeName)
+ return ok
+}
+
+func isFunc(obj types.Object) bool {
+ _, ok := obj.(*types.Func)
+ return ok
+}
+
+func isEmptyInterface(T types.Type) bool {
+ intf, _ := T.(*types.Interface)
+ return intf != nil && intf.NumMethods() == 0 && typeparams.IsMethodSet(intf)
+}
+
+func isUntyped(T types.Type) bool {
+ if basic, ok := T.(*types.Basic); ok {
+ return basic.Info()&types.IsUntyped > 0
+ }
+ return false
+}
+
+func isPkgName(obj types.Object) bool {
+ _, ok := obj.(*types.PkgName)
+ return ok
+}
+
+func isASTFile(n ast.Node) bool {
+ _, ok := n.(*ast.File)
+ return ok
+}
+
+func deslice(T types.Type) types.Type {
+ if slice, ok := T.Underlying().(*types.Slice); ok {
+ return slice.Elem()
+ }
+ return nil
+}
+
+// isSelector returns the enclosing *ast.SelectorExpr when pos is in the
+// selector.
+func enclosingSelector(path []ast.Node, pos token.Pos) *ast.SelectorExpr {
+ if len(path) == 0 {
+ return nil
+ }
+
+ if sel, ok := path[0].(*ast.SelectorExpr); ok {
+ return sel
+ }
+
+ if _, ok := path[0].(*ast.Ident); ok && len(path) > 1 {
+ if sel, ok := path[1].(*ast.SelectorExpr); ok && pos >= sel.Sel.Pos() {
+ return sel
+ }
+ }
+
+ return nil
+}
+
+// enclosingDeclLHS returns LHS idents from containing value spec or
+// assign statement.
+func enclosingDeclLHS(path []ast.Node) []*ast.Ident {
+ for _, n := range path {
+ switch n := n.(type) {
+ case *ast.ValueSpec:
+ return n.Names
+ case *ast.AssignStmt:
+ ids := make([]*ast.Ident, 0, len(n.Lhs))
+ for _, e := range n.Lhs {
+ if id, ok := e.(*ast.Ident); ok {
+ ids = append(ids, id)
+ }
+ }
+ return ids
+ }
+ }
+
+ return nil
+}
+
+// exprObj returns the types.Object associated with the *ast.Ident or
+// *ast.SelectorExpr e.
+func exprObj(info *types.Info, e ast.Expr) types.Object {
+ var ident *ast.Ident
+ switch expr := e.(type) {
+ case *ast.Ident:
+ ident = expr
+ case *ast.SelectorExpr:
+ ident = expr.Sel
+ default:
+ return nil
+ }
+
+ return info.ObjectOf(ident)
+}
+
+// typeConversion returns the type being converted to if call is a type
+// conversion expression.
+func typeConversion(call *ast.CallExpr, info *types.Info) types.Type {
+ // Type conversion (e.g. "float64(foo)").
+ if fun, _ := exprObj(info, call.Fun).(*types.TypeName); fun != nil {
+ return fun.Type()
+ }
+
+ return nil
+}
+
+// fieldsAccessible returns whether s has at least one field accessible by p.
+func fieldsAccessible(s *types.Struct, p *types.Package) bool {
+ for i := 0; i < s.NumFields(); i++ {
+ f := s.Field(i)
+ if f.Exported() || f.Pkg() == p {
+ return true
+ }
+ }
+ return false
+}
+
+// prevStmt returns the statement that precedes the statement containing pos.
+// For example:
+//
+// foo := 1
+// bar(1 + 2<>)
+//
+// If "<>" is pos, prevStmt returns "foo := 1"
+func prevStmt(pos token.Pos, path []ast.Node) ast.Stmt {
+ var blockLines []ast.Stmt
+ for i := 0; i < len(path) && blockLines == nil; i++ {
+ switch n := path[i].(type) {
+ case *ast.BlockStmt:
+ blockLines = n.List
+ case *ast.CommClause:
+ blockLines = n.Body
+ case *ast.CaseClause:
+ blockLines = n.Body
+ }
+ }
+
+ for i := len(blockLines) - 1; i >= 0; i-- {
+ if blockLines[i].End() < pos {
+ return blockLines[i]
+ }
+ }
+
+ return nil
+}
+
+// formatZeroValue produces Go code representing the zero value of T. It
+// returns the empty string if T is invalid.
+func formatZeroValue(T types.Type, qf types.Qualifier) string {
+ switch u := T.Underlying().(type) {
+ case *types.Basic:
+ switch {
+ case u.Info()&types.IsNumeric > 0:
+ return "0"
+ case u.Info()&types.IsString > 0:
+ return `""`
+ case u.Info()&types.IsBoolean > 0:
+ return "false"
+ default:
+ return ""
+ }
+ case *types.Pointer, *types.Interface, *types.Chan, *types.Map, *types.Slice, *types.Signature:
+ return "nil"
+ default:
+ return types.TypeString(T, qf) + "{}"
+ }
+}
+
+// isBasicKind returns whether t is a basic type of kind k.
+func isBasicKind(t types.Type, k types.BasicInfo) bool {
+ b, _ := t.Underlying().(*types.Basic)
+ return b != nil && b.Info()&k > 0
+}
+
+func (c *completer) editText(from, to token.Pos, newText string) ([]protocol.TextEdit, error) {
+ start, end, err := safetoken.Offsets(c.tokFile, from, to)
+ if err != nil {
+ return nil, err // can't happen: from/to came from c
+ }
+ return source.ToProtocolEdits(c.mapper, []diff.Edit{{
+ Start: start,
+ End: end,
+ New: newText,
+ }})
+}
+
+// assignableTo is like types.AssignableTo, but returns false if
+// either type is invalid.
+func assignableTo(x, to types.Type) bool {
+ if x == types.Typ[types.Invalid] || to == types.Typ[types.Invalid] {
+ return false
+ }
+
+ return types.AssignableTo(x, to)
+}
+
+// convertibleTo is like types.ConvertibleTo, but returns false if
+// either type is invalid.
+func convertibleTo(x, to types.Type) bool {
+ if x == types.Typ[types.Invalid] || to == types.Typ[types.Invalid] {
+ return false
+ }
+
+ return types.ConvertibleTo(x, to)
+}
diff --git a/internal/lsp/source/completion/util_test.go b/gopls/internal/lsp/source/completion/util_test.go
index c94d279fb..c94d279fb 100644
--- a/internal/lsp/source/completion/util_test.go
+++ b/gopls/internal/lsp/source/completion/util_test.go
diff --git a/gopls/internal/lsp/source/definition.go b/gopls/internal/lsp/source/definition.go
new file mode 100644
index 000000000..cb87eb061
--- /dev/null
+++ b/gopls/internal/lsp/source/definition.go
@@ -0,0 +1,229 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+)
+
+// Definition handles the textDocument/definition request for Go files.
+func Definition(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) ([]protocol.Location, error) {
+ ctx, done := event.Start(ctx, "source.Definition")
+ defer done()
+
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, err
+ }
+ pos, err := pgf.PositionPos(position)
+ if err != nil {
+ return nil, err
+ }
+
+ // Handle the case where the cursor is in an import.
+ importLocations, err := importDefinition(ctx, snapshot, pkg, pgf, pos)
+ if err != nil {
+ return nil, err
+ }
+ if len(importLocations) > 0 {
+ return importLocations, nil
+ }
+
+ // Handle the case where the cursor is in the package name.
+ // We use "<= End" to accept a query immediately after the package name.
+ if pgf.File != nil && pgf.File.Name.Pos() <= pos && pos <= pgf.File.Name.End() {
+ // If there's no package documentation, just use current file.
+ declFile := pgf
+ for _, pgf := range pkg.CompiledGoFiles() {
+ if pgf.File.Name != nil && pgf.File.Doc != nil {
+ declFile = pgf
+ break
+ }
+ }
+ loc, err := declFile.NodeLocation(declFile.File.Name)
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.Location{loc}, nil
+ }
+
+ // The general case: the cursor is on an identifier.
+ _, obj, _ := referencedObject(pkg, pgf, pos)
+ if obj == nil {
+ return nil, nil
+ }
+
+ // Handle built-in identifiers.
+ if obj.Parent() == types.Universe {
+ builtin, err := snapshot.BuiltinFile(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // Note that builtinObj is an ast.Object, not types.Object :)
+ builtinObj := builtin.File.Scope.Lookup(obj.Name())
+ if builtinObj == nil {
+ // Every builtin should have documentation.
+ return nil, bug.Errorf("internal error: no builtin object for %s", obj.Name())
+ }
+ decl, ok := builtinObj.Decl.(ast.Node)
+ if !ok {
+ return nil, bug.Errorf("internal error: no declaration for %s", obj.Name())
+ }
+ // The builtin package isn't in the dependency graph, so the usual
+ // utilities won't work here.
+ loc, err := builtin.PosLocation(decl.Pos(), decl.Pos()+token.Pos(len(obj.Name())))
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.Location{loc}, nil
+ }
+
+ // Finally, map the object position.
+ var locs []protocol.Location
+ if !obj.Pos().IsValid() {
+ return nil, bug.Errorf("internal error: no position for %v", obj.Name())
+ }
+ loc, err := mapPosition(ctx, pkg.FileSet(), snapshot, obj.Pos(), adjustedObjEnd(obj))
+ if err != nil {
+ return nil, err
+ }
+ locs = append(locs, loc)
+ return locs, nil
+}
+
+// referencedObject returns the identifier and object referenced at the
+// specified position, which must be within the file pgf, for the purposes of
+// definition/hover/call hierarchy operations. It returns a nil object if no
+// object was found at the given position.
+//
+// If the returned identifier is a type-switch implicit (i.e. the x in x :=
+// e.(type)), the third result will be the type of the expression being
+// switched on (the type of e in the example). This facilitates workarounds for
+// limitations of the go/types API, which does not report an object for the
+// identifier x.
+//
+// For embedded fields, referencedObject returns the type name object rather
+// than the var (field) object.
+//
+// TODO(rfindley): this function exists to preserve the pre-existing behavior
+// of source.Identifier. Eliminate this helper in favor of sharing
+// functionality with objectsAt, after choosing suitable primitives.
+func referencedObject(pkg Package, pgf *ParsedGoFile, pos token.Pos) (*ast.Ident, types.Object, types.Type) {
+ path := pathEnclosingObjNode(pgf.File, pos)
+ if len(path) == 0 {
+ return nil, nil, nil
+ }
+ var obj types.Object
+ info := pkg.GetTypesInfo()
+ switch n := path[0].(type) {
+ case *ast.Ident:
+ obj = info.ObjectOf(n)
+ // If n is the var's declaring ident in a type switch
+ // [i.e. the x in x := foo.(type)], it will not have an object. In this
+ // case, set obj to the first implicit object (if any), and return the type
+ // of the expression being switched on.
+ //
+ // The type switch may have no case clauses and thus no
+ // implicit objects; this is a type error ("unused x"),
+ if obj == nil {
+ if implicits, typ := typeSwitchImplicits(info, path); len(implicits) > 0 {
+ return n, implicits[0], typ
+ }
+ }
+
+ // If the original position was an embedded field, we want to jump
+ // to the field's type definition, not the field's definition.
+ if v, ok := obj.(*types.Var); ok && v.Embedded() {
+ // types.Info.Uses contains the embedded field's *types.TypeName.
+ if typeName := info.Uses[n]; typeName != nil {
+ obj = typeName
+ }
+ }
+ return n, obj, nil
+ }
+ return nil, nil, nil
+}
+
+// importDefinition returns locations defining a package referenced by the
+// import spec containing pos.
+//
+// If pos is not inside an import spec, it returns nil, nil.
+func importDefinition(ctx context.Context, s Snapshot, pkg Package, pgf *ParsedGoFile, pos token.Pos) ([]protocol.Location, error) {
+ var imp *ast.ImportSpec
+ for _, spec := range pgf.File.Imports {
+ // We use "<= End" to accept a query immediately after an ImportSpec.
+ if spec.Path.Pos() <= pos && pos <= spec.Path.End() {
+ imp = spec
+ }
+ }
+ if imp == nil {
+ return nil, nil
+ }
+
+ importPath := UnquoteImportPath(imp)
+ impID := pkg.Metadata().DepsByImpPath[importPath]
+ if impID == "" {
+ return nil, fmt.Errorf("failed to resolve import %q", importPath)
+ }
+ impMetadata := s.Metadata(impID)
+ if impMetadata == nil {
+ return nil, fmt.Errorf("missing information for package %q", impID)
+ }
+
+ var locs []protocol.Location
+ for _, f := range impMetadata.CompiledGoFiles {
+ fh, err := s.GetFile(ctx, f)
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ continue
+ }
+ pgf, err := s.ParseGo(ctx, fh, ParseHeader)
+ if err != nil {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ continue
+ }
+ loc, err := pgf.NodeLocation(pgf.File)
+ if err != nil {
+ return nil, err
+ }
+ locs = append(locs, loc)
+ }
+
+ if len(locs) == 0 {
+ return nil, fmt.Errorf("package %q has no readable files", impID) // incl. unsafe
+ }
+
+ return locs, nil
+}
+
+// TODO(rfindley): avoid the duplicate column mapping here, by associating a
+// column mapper with each file handle.
+func mapPosition(ctx context.Context, fset *token.FileSet, s FileSource, start, end token.Pos) (protocol.Location, error) {
+ file := fset.File(start)
+ uri := span.URIFromPath(file.Name())
+ fh, err := s.GetFile(ctx, uri)
+ if err != nil {
+ return protocol.Location{}, err
+ }
+ content, err := fh.Read()
+ if err != nil {
+ return protocol.Location{}, err
+ }
+ m := protocol.NewMapper(fh.URI(), content)
+ return m.PosLocation(file, start, end)
+}
diff --git a/gopls/internal/lsp/source/diagnostics.go b/gopls/internal/lsp/source/diagnostics.go
new file mode 100644
index 000000000..e3f35988e
--- /dev/null
+++ b/gopls/internal/lsp/source/diagnostics.go
@@ -0,0 +1,138 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+type SuggestedFix struct {
+ Title string
+ Edits map[span.URI][]protocol.TextEdit
+ Command *protocol.Command
+ ActionKind protocol.CodeActionKind
+}
+
+// Analyze reports go/analysis-framework diagnostics in the specified package.
+func Analyze(ctx context.Context, snapshot Snapshot, pkgid PackageID, includeConvenience bool) (map[span.URI][]*Diagnostic, error) {
+ // Exit early if the context has been canceled. This also protects us
+ // from a race on Options, see golang/go#36699.
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+
+ options := snapshot.View().Options()
+ categories := []map[string]*Analyzer{
+ options.DefaultAnalyzers,
+ options.StaticcheckAnalyzers,
+ options.TypeErrorAnalyzers,
+ }
+ if includeConvenience { // e.g. for codeAction
+ categories = append(categories, options.ConvenienceAnalyzers) // e.g. fillstruct
+ }
+
+ var analyzers []*Analyzer
+ for _, cat := range categories {
+ for _, a := range cat {
+ analyzers = append(analyzers, a)
+ }
+ }
+
+ analysisDiagnostics, err := snapshot.Analyze(ctx, pkgid, analyzers)
+ if err != nil {
+ return nil, err
+ }
+
+ // Report diagnostics and errors from root analyzers.
+ reports := make(map[span.URI][]*Diagnostic)
+ for _, diag := range analysisDiagnostics {
+ reports[diag.URI] = append(reports[diag.URI], diag)
+ }
+ return reports, nil
+}
+
+// FileDiagnostics reports diagnostics in the specified file,
+// as used by the "gopls check" command.
+//
+// TODO(adonovan): factor in common with (*Server).codeAction, which
+// executes { PackageForFile; Analyze } too?
+//
+// TODO(adonovan): opt: this function is called in a loop from the
+// "gopls/diagnoseFiles" nonstandard request handler. It would be more
+// efficient to compute the set of packages and TypeCheck and
+// Analyze them all at once.
+func FileDiagnostics(ctx context.Context, snapshot Snapshot, uri span.URI) (FileHandle, []*Diagnostic, error) {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, nil, err
+ }
+ pkg, _, err := PackageForFile(ctx, snapshot, uri, NarrowestPackage)
+ if err != nil {
+ return nil, nil, err
+ }
+ pkgDiags, err := pkg.DiagnosticsForFile(ctx, snapshot, uri)
+ if err != nil {
+ return nil, nil, err
+ }
+ adiags, err := Analyze(ctx, snapshot, pkg.Metadata().ID, false)
+ if err != nil {
+ return nil, nil, err
+ }
+ var fileDiags []*Diagnostic // combine load/parse/type + analysis diagnostics
+ CombineDiagnostics(pkgDiags, adiags[uri], &fileDiags, &fileDiags)
+ return fh, fileDiags, nil
+}
+
+// CombineDiagnostics combines and filters list/parse/type diagnostics from
+// tdiags with adiags, and appends the two lists to *outT and *outA,
+// respectively.
+//
+// Type-error analyzers produce diagnostics that are redundant
+// with type checker diagnostics, but more detailed (e.g. fixes).
+// Rather than report two diagnostics for the same problem,
+// we combine them by augmenting the type-checker diagnostic
+// and discarding the analyzer diagnostic.
+//
+// If an analysis diagnostic has the same range and message as
+// a list/parse/type diagnostic, the suggested fix information
+// (et al) of the latter is merged into a copy of the former.
+// This handles the case where a type-error analyzer suggests
+// a fix to a type error, and avoids duplication.
+//
+// The use of out-slices, though irregular, allows the caller to
+// easily choose whether to keep the results separate or combined.
+//
+// The arguments are not modified.
+func CombineDiagnostics(tdiags []*Diagnostic, adiags []*Diagnostic, outT, outA *[]*Diagnostic) {
+
+ // Build index of (list+parse+)type errors.
+ type key struct {
+ Range protocol.Range
+ message string
+ }
+ index := make(map[key]int) // maps (Range,Message) to index in tdiags slice
+ for i, diag := range tdiags {
+ index[key{diag.Range, diag.Message}] = i
+ }
+
+ // Filter out analysis diagnostics that match type errors,
+ // retaining their suggested fix (etc) fields.
+ for _, diag := range adiags {
+ if i, ok := index[key{diag.Range, diag.Message}]; ok {
+ copy := *tdiags[i]
+ copy.SuggestedFixes = diag.SuggestedFixes
+ copy.Tags = diag.Tags
+ tdiags[i] = &copy
+ continue
+ }
+
+ *outA = append(*outA, diag)
+ }
+
+ *outT = append(*outT, tdiags...)
+}
diff --git a/gopls/internal/lsp/source/extract.go b/gopls/internal/lsp/source/extract.go
new file mode 100644
index 000000000..56e8a5e23
--- /dev/null
+++ b/gopls/internal/lsp/source/extract.go
@@ -0,0 +1,1331 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "sort"
+ "strings"
+ "text/scanner"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/bug"
+)
+
+func extractVariable(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, _ *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+ tokFile := fset.File(file.Pos())
+ expr, path, ok, err := CanExtractVariable(start, end, file)
+ if !ok {
+ return nil, fmt.Errorf("extractVariable: cannot extract %s: %v", safetoken.StartPosition(fset, start), err)
+ }
+
+ // Create new AST node for extracted code.
+ var lhsNames []string
+ switch expr := expr.(type) {
+ // TODO: stricter rules for selectorExpr.
+ case *ast.BasicLit, *ast.CompositeLit, *ast.IndexExpr, *ast.SliceExpr,
+ *ast.UnaryExpr, *ast.BinaryExpr, *ast.SelectorExpr:
+ lhsName, _ := generateAvailableIdentifier(expr.Pos(), file, path, info, "x", 0)
+ lhsNames = append(lhsNames, lhsName)
+ case *ast.CallExpr:
+ tup, ok := info.TypeOf(expr).(*types.Tuple)
+ if !ok {
+ // If the call expression only has one return value, we can treat it the
+ // same as our standard extract variable case.
+ lhsName, _ := generateAvailableIdentifier(expr.Pos(), file, path, info, "x", 0)
+ lhsNames = append(lhsNames, lhsName)
+ break
+ }
+ idx := 0
+ for i := 0; i < tup.Len(); i++ {
+ // Generate a unique variable for each return value.
+ var lhsName string
+ lhsName, idx = generateAvailableIdentifier(expr.Pos(), file, path, info, "x", idx)
+ lhsNames = append(lhsNames, lhsName)
+ }
+ default:
+ return nil, fmt.Errorf("cannot extract %T", expr)
+ }
+
+ insertBeforeStmt := analysisinternal.StmtToInsertVarBefore(path)
+ if insertBeforeStmt == nil {
+ return nil, fmt.Errorf("cannot find location to insert extraction")
+ }
+ indent, err := calculateIndentation(src, tokFile, insertBeforeStmt)
+ if err != nil {
+ return nil, err
+ }
+ newLineIndent := "\n" + indent
+
+ lhs := strings.Join(lhsNames, ", ")
+ assignStmt := &ast.AssignStmt{
+ Lhs: []ast.Expr{ast.NewIdent(lhs)},
+ Tok: token.DEFINE,
+ Rhs: []ast.Expr{expr},
+ }
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, assignStmt); err != nil {
+ return nil, err
+ }
+ assignment := strings.ReplaceAll(buf.String(), "\n", newLineIndent) + newLineIndent
+
+ return &analysis.SuggestedFix{
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: insertBeforeStmt.Pos(),
+ End: insertBeforeStmt.Pos(),
+ NewText: []byte(assignment),
+ },
+ {
+ Pos: start,
+ End: end,
+ NewText: []byte(lhs),
+ },
+ },
+ }, nil
+}
+
+// CanExtractVariable reports whether the code in the given range can be
+// extracted to a variable.
+func CanExtractVariable(start, end token.Pos, file *ast.File) (ast.Expr, []ast.Node, bool, error) {
+ if start == end {
+ return nil, nil, false, fmt.Errorf("start and end are equal")
+ }
+ path, _ := astutil.PathEnclosingInterval(file, start, end)
+ if len(path) == 0 {
+ return nil, nil, false, fmt.Errorf("no path enclosing interval")
+ }
+ for _, n := range path {
+ if _, ok := n.(*ast.ImportSpec); ok {
+ return nil, nil, false, fmt.Errorf("cannot extract variable in an import block")
+ }
+ }
+ node := path[0]
+ if start != node.Pos() || end != node.End() {
+ return nil, nil, false, fmt.Errorf("range does not map to an AST node")
+ }
+ expr, ok := node.(ast.Expr)
+ if !ok {
+ return nil, nil, false, fmt.Errorf("node is not an expression")
+ }
+ switch expr.(type) {
+ case *ast.BasicLit, *ast.CompositeLit, *ast.IndexExpr, *ast.CallExpr,
+ *ast.SliceExpr, *ast.UnaryExpr, *ast.BinaryExpr, *ast.SelectorExpr:
+ return expr, path, true, nil
+ }
+ return nil, nil, false, fmt.Errorf("cannot extract an %T to a variable", expr)
+}
+
+// Calculate indentation for insertion.
+// When inserting lines of code, we must ensure that the lines have consistent
+// formatting (i.e. the proper indentation). To do so, we observe the indentation on the
+// line of code on which the insertion occurs.
+func calculateIndentation(content []byte, tok *token.File, insertBeforeStmt ast.Node) (string, error) {
+ line := tok.Line(insertBeforeStmt.Pos())
+ lineOffset, stmtOffset, err := safetoken.Offsets(tok, tok.LineStart(line), insertBeforeStmt.Pos())
+ if err != nil {
+ return "", err
+ }
+ return string(content[lineOffset:stmtOffset]), nil
+}
+
+// generateAvailableIdentifier adjusts the new function name until there are no collisions in scope.
+// Possible collisions include other function and variable names. Returns the next index to check for prefix.
+func generateAvailableIdentifier(pos token.Pos, file *ast.File, path []ast.Node, info *types.Info, prefix string, idx int) (string, int) {
+ scopes := CollectScopes(info, path, pos)
+ return generateIdentifier(idx, prefix, func(name string) bool {
+ return file.Scope.Lookup(name) != nil || !isValidName(name, scopes)
+ })
+}
+
+func generateIdentifier(idx int, prefix string, hasCollision func(string) bool) (string, int) {
+ name := prefix
+ if idx != 0 {
+ name += fmt.Sprintf("%d", idx)
+ }
+ for hasCollision(name) {
+ idx++
+ name = fmt.Sprintf("%v%d", prefix, idx)
+ }
+ return name, idx + 1
+}
+
+// isValidName checks for variable collision in scope.
+func isValidName(name string, scopes []*types.Scope) bool {
+ for _, scope := range scopes {
+ if scope == nil {
+ continue
+ }
+ if scope.Lookup(name) != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// returnVariable keeps track of the information we need to properly introduce a new variable
+// that we will return in the extracted function.
+type returnVariable struct {
+ // name is the identifier that is used on the left-hand side of the call to
+ // the extracted function.
+ name ast.Expr
+ // decl is the declaration of the variable. It is used in the type signature of the
+ // extracted function and for variable declarations.
+ decl *ast.Field
+ // zeroVal is the "zero value" of the type of the variable. It is used in a return
+ // statement in the extracted function.
+ zeroVal ast.Expr
+}
+
+// extractMethod refactors the selected block of code into a new method.
+func extractMethod(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+ return extractFunctionMethod(fset, start, end, src, file, pkg, info, true)
+}
+
+// extractFunction refactors the selected block of code into a new function.
+func extractFunction(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+ return extractFunctionMethod(fset, start, end, src, file, pkg, info, false)
+}
+
+// extractFunctionMethod refactors the selected block of code into a new function/method.
+// It also replaces the selected block of code with a call to the extracted
+// function. First, we manually adjust the selection range. We remove trailing
+// and leading whitespace characters to ensure the range is precisely bounded
+// by AST nodes. Next, we determine the variables that will be the parameters
+// and return values of the extracted function/method. Lastly, we construct the call
+// of the function/method and insert this call as well as the extracted function/method into
+// their proper locations.
+func extractFunctionMethod(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, pkg *types.Package, info *types.Info, isMethod bool) (*analysis.SuggestedFix, error) {
+ errorPrefix := "extractFunction"
+ if isMethod {
+ errorPrefix = "extractMethod"
+ }
+
+ tok := fset.File(file.Pos())
+ if tok == nil {
+ return nil, bug.Errorf("no file for position")
+ }
+ p, ok, methodOk, err := CanExtractFunction(tok, start, end, src, file)
+ if (!ok && !isMethod) || (!methodOk && isMethod) {
+ return nil, fmt.Errorf("%s: cannot extract %s: %v", errorPrefix,
+ safetoken.StartPosition(fset, start), err)
+ }
+ tok, path, start, end, outer, node := p.tok, p.path, p.start, p.end, p.outer, p.node
+ fileScope := info.Scopes[file]
+ if fileScope == nil {
+ return nil, fmt.Errorf("%s: file scope is empty", errorPrefix)
+ }
+ pkgScope := fileScope.Parent()
+ if pkgScope == nil {
+ return nil, fmt.Errorf("%s: package scope is empty", errorPrefix)
+ }
+
+ // A return statement is non-nested if its parent node is equal to the parent node
+ // of the first node in the selection. These cases must be handled separately because
+ // non-nested return statements are guaranteed to execute.
+ var retStmts []*ast.ReturnStmt
+ var hasNonNestedReturn bool
+ startParent := findParent(outer, node)
+ ast.Inspect(outer, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ if n.Pos() < start || n.End() > end {
+ return n.Pos() <= end
+ }
+ ret, ok := n.(*ast.ReturnStmt)
+ if !ok {
+ return true
+ }
+ if findParent(outer, n) == startParent {
+ hasNonNestedReturn = true
+ }
+ retStmts = append(retStmts, ret)
+ return false
+ })
+ containsReturnStatement := len(retStmts) > 0
+
+ // Now that we have determined the correct range for the selection block,
+ // we must determine the signature of the extracted function. We will then replace
+ // the block with an assignment statement that calls the extracted function with
+ // the appropriate parameters and return values.
+ variables, err := collectFreeVars(info, file, fileScope, pkgScope, start, end, path[0])
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ receiverUsed bool
+ receiver *ast.Field
+ receiverName string
+ receiverObj types.Object
+ )
+ if isMethod {
+ if outer == nil || outer.Recv == nil || len(outer.Recv.List) == 0 {
+ return nil, fmt.Errorf("%s: cannot extract need method receiver", errorPrefix)
+ }
+ receiver = outer.Recv.List[0]
+ if len(receiver.Names) == 0 || receiver.Names[0] == nil {
+ return nil, fmt.Errorf("%s: cannot extract need method receiver name", errorPrefix)
+ }
+ recvName := receiver.Names[0]
+ receiverName = recvName.Name
+ receiverObj = info.ObjectOf(recvName)
+ }
+
+ var (
+ params, returns []ast.Expr // used when calling the extracted function
+ paramTypes, returnTypes []*ast.Field // used in the signature of the extracted function
+ uninitialized []types.Object // vars we will need to initialize before the call
+ )
+
+ // Avoid duplicates while traversing vars and uninitialized.
+ seenVars := make(map[types.Object]ast.Expr)
+ seenUninitialized := make(map[types.Object]struct{})
+
+ // Some variables on the left-hand side of our assignment statement may be free. If our
+ // selection begins in the same scope in which the free variable is defined, we can
+ // redefine it in our assignment statement. See the following example, where 'b' and
+ // 'err' (both free variables) can be redefined in the second funcCall() while maintaining
+ // correctness.
+ //
+ //
+ // Not Redefined:
+ //
+ // a, err := funcCall()
+ // var b int
+ // b, err = funcCall()
+ //
+ // Redefined:
+ //
+ // a, err := funcCall()
+ // b, err := funcCall()
+ //
+ // We track the number of free variables that can be redefined to maintain our preference
+ // of using "x, y, z := fn()" style assignment statements.
+ var canRedefineCount int
+
+ // Each identifier in the selected block must become (1) a parameter to the
+ // extracted function, (2) a return value of the extracted function, or (3) a local
+ // variable in the extracted function. Determine the outcome(s) for each variable
+ // based on whether it is free, altered within the selected block, and used outside
+ // of the selected block.
+ for _, v := range variables {
+ if _, ok := seenVars[v.obj]; ok {
+ continue
+ }
+ if v.obj.Name() == "_" {
+ // The blank identifier is always a local variable
+ continue
+ }
+ typ := analysisinternal.TypeExpr(file, pkg, v.obj.Type())
+ if typ == nil {
+ return nil, fmt.Errorf("nil AST expression for type: %v", v.obj.Name())
+ }
+ seenVars[v.obj] = typ
+ identifier := ast.NewIdent(v.obj.Name())
+ // An identifier must meet three conditions to become a return value of the
+ // extracted function. (1) its value must be defined or reassigned within
+ // the selection (isAssigned), (2) it must be used at least once after the
+ // selection (isUsed), and (3) its first use after the selection
+ // cannot be its own reassignment or redefinition (objOverriden).
+ if v.obj.Parent() == nil {
+ return nil, fmt.Errorf("parent nil")
+ }
+ isUsed, firstUseAfter := objUsed(info, end, v.obj.Parent().End(), v.obj)
+ if v.assigned && isUsed && !varOverridden(info, firstUseAfter, v.obj, v.free, outer) {
+ returnTypes = append(returnTypes, &ast.Field{Type: typ})
+ returns = append(returns, identifier)
+ if !v.free {
+ uninitialized = append(uninitialized, v.obj)
+ } else if v.obj.Parent().Pos() == startParent.Pos() {
+ canRedefineCount++
+ }
+ }
+ // An identifier must meet two conditions to become a parameter of the
+ // extracted function. (1) it must be free (isFree), and (2) its first
+ // use within the selection cannot be its own definition (isDefined).
+ if v.free && !v.defined {
+ // Skip the selector for a method.
+ if isMethod && v.obj == receiverObj {
+ receiverUsed = true
+ continue
+ }
+ params = append(params, identifier)
+ paramTypes = append(paramTypes, &ast.Field{
+ Names: []*ast.Ident{identifier},
+ Type: typ,
+ })
+ }
+ }
+
+ // Find the function literal that encloses the selection. The enclosing function literal
+ // may not be the enclosing function declaration (i.e. 'outer'). For example, in the
+ // following block:
+ //
+ // func main() {
+ // ast.Inspect(node, func(n ast.Node) bool {
+ // v := 1 // this line extracted
+ // return true
+ // })
+ // }
+ //
+ // 'outer' is main(). However, the extracted selection most directly belongs to
+ // the anonymous function literal, the second argument of ast.Inspect(). We use the
+ // enclosing function literal to determine the proper return types for return statements
+ // within the selection. We still need the enclosing function declaration because this is
+ // the top-level declaration. We inspect the top-level declaration to look for variables
+ // as well as for code replacement.
+ enclosing := outer.Type
+ for _, p := range path {
+ if p == enclosing {
+ break
+ }
+ if fl, ok := p.(*ast.FuncLit); ok {
+ enclosing = fl.Type
+ break
+ }
+ }
+
+ // We put the selection in a constructed file. We can then traverse and edit
+ // the extracted selection without modifying the original AST.
+ startOffset, endOffset, err := safetoken.Offsets(tok, start, end)
+ if err != nil {
+ return nil, err
+ }
+ selection := src[startOffset:endOffset]
+ extractedBlock, err := parseBlockStmt(fset, selection)
+ if err != nil {
+ return nil, err
+ }
+
+ // We need to account for return statements in the selected block, as they will complicate
+ // the logical flow of the extracted function. See the following example, where ** denotes
+ // the range to be extracted.
+ //
+ // Before:
+ //
+ // func _() int {
+ // a := 1
+ // b := 2
+ // **if a == b {
+ // return a
+ // }**
+ // ...
+ // }
+ //
+ // After:
+ //
+ // func _() int {
+ // a := 1
+ // b := 2
+ // cond0, ret0 := x0(a, b)
+ // if cond0 {
+ // return ret0
+ // }
+ // ...
+ // }
+ //
+ // func x0(a int, b int) (bool, int) {
+ // if a == b {
+ // return true, a
+ // }
+ // return false, 0
+ // }
+ //
+ // We handle returns by adding an additional boolean return value to the extracted function.
+ // This bool reports whether the original function would have returned. Because the
+ // extracted selection contains a return statement, we must also add the types in the
+ // return signature of the enclosing function to the return signature of the
+ // extracted function. We then add an extra if statement checking this boolean value
+ // in the original function. If the condition is met, the original function should
+ // return a value, mimicking the functionality of the original return statement(s)
+ // in the selection.
+ //
+ // If there is a return that is guaranteed to execute (hasNonNestedReturns=true), then
+ // we don't need to include this additional condition check and can simply return.
+ //
+ // Before:
+ //
+ // func _() int {
+ // a := 1
+ // b := 2
+ // **if a == b {
+ // return a
+ // }
+ // return b**
+ // }
+ //
+ // After:
+ //
+ // func _() int {
+ // a := 1
+ // b := 2
+ // return x0(a, b)
+ // }
+ //
+ // func x0(a int, b int) int {
+ // if a == b {
+ // return a
+ // }
+ // return b
+ // }
+
+ var retVars []*returnVariable
+ var ifReturn *ast.IfStmt
+ if containsReturnStatement {
+ if !hasNonNestedReturn {
+ // The selected block contained return statements, so we have to modify the
+ // signature of the extracted function as described above. Adjust all of
+ // the return statements in the extracted function to reflect this change in
+ // signature.
+ if err := adjustReturnStatements(returnTypes, seenVars, fset, file,
+ pkg, extractedBlock); err != nil {
+ return nil, err
+ }
+ }
+ // Collect the additional return values and types needed to accommodate return
+ // statements in the selection. Update the type signature of the extracted
+ // function and construct the if statement that will be inserted in the enclosing
+ // function.
+ retVars, ifReturn, err = generateReturnInfo(enclosing, pkg, path, file, info, fset, start, hasNonNestedReturn)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Add a return statement to the end of the new function. This return statement must include
+ // the values for the types of the original extracted function signature and (if a return
+ // statement is present in the selection) enclosing function signature.
+ // This only needs to be done if the selections does not have a non-nested return, otherwise
+ // it already terminates with a return statement.
+ hasReturnValues := len(returns)+len(retVars) > 0
+ if hasReturnValues && !hasNonNestedReturn {
+ extractedBlock.List = append(extractedBlock.List, &ast.ReturnStmt{
+ Results: append(returns, getZeroVals(retVars)...),
+ })
+ }
+
+ // Construct the appropriate call to the extracted function.
+ // We must meet two conditions to use ":=" instead of '='. (1) there must be at least
+ // one variable on the lhs that is uninitialized (non-free) prior to the assignment.
+ // (2) all of the initialized (free) variables on the lhs must be able to be redefined.
+ sym := token.ASSIGN
+ canDefineCount := len(uninitialized) + canRedefineCount
+ canDefine := len(uninitialized)+len(retVars) > 0 && canDefineCount == len(returns)
+ if canDefine {
+ sym = token.DEFINE
+ }
+ var name, funName string
+ if isMethod {
+ name = "newMethod"
+ // TODO(suzmue): generate a name that does not conflict for "newMethod".
+ funName = name
+ } else {
+ name = "newFunction"
+ funName, _ = generateAvailableIdentifier(start, file, path, info, name, 0)
+ }
+ extractedFunCall := generateFuncCall(hasNonNestedReturn, hasReturnValues, params,
+ append(returns, getNames(retVars)...), funName, sym, receiverName)
+
+ // Build the extracted function.
+ newFunc := &ast.FuncDecl{
+ Name: ast.NewIdent(funName),
+ Type: &ast.FuncType{
+ Params: &ast.FieldList{List: paramTypes},
+ Results: &ast.FieldList{List: append(returnTypes, getDecls(retVars)...)},
+ },
+ Body: extractedBlock,
+ }
+ if isMethod {
+ var names []*ast.Ident
+ if receiverUsed {
+ names = append(names, ast.NewIdent(receiverName))
+ }
+ newFunc.Recv = &ast.FieldList{
+ List: []*ast.Field{{
+ Names: names,
+ Type: receiver.Type,
+ }},
+ }
+ }
+
+ // Create variable declarations for any identifiers that need to be initialized prior to
+ // calling the extracted function. We do not manually initialize variables if every return
+ // value is uninitialized. We can use := to initialize the variables in this situation.
+ var declarations []ast.Stmt
+ if canDefineCount != len(returns) {
+ declarations = initializeVars(uninitialized, retVars, seenUninitialized, seenVars)
+ }
+
+ var declBuf, replaceBuf, newFuncBuf, ifBuf, commentBuf bytes.Buffer
+ if err := format.Node(&declBuf, fset, declarations); err != nil {
+ return nil, err
+ }
+ if err := format.Node(&replaceBuf, fset, extractedFunCall); err != nil {
+ return nil, err
+ }
+ if ifReturn != nil {
+ if err := format.Node(&ifBuf, fset, ifReturn); err != nil {
+ return nil, err
+ }
+ }
+ if err := format.Node(&newFuncBuf, fset, newFunc); err != nil {
+ return nil, err
+ }
+ // Find all the comments within the range and print them to be put somewhere.
+ // TODO(suzmue): print these in the extracted function at the correct place.
+ for _, cg := range file.Comments {
+ if cg.Pos().IsValid() && cg.Pos() < end && cg.Pos() >= start {
+ for _, c := range cg.List {
+ fmt.Fprintln(&commentBuf, c.Text)
+ }
+ }
+ }
+
+ // We're going to replace the whole enclosing function,
+ // so preserve the text before and after the selected block.
+ outerStart, outerEnd, err := safetoken.Offsets(tok, outer.Pos(), outer.End())
+ if err != nil {
+ return nil, err
+ }
+ before := src[outerStart:startOffset]
+ after := src[endOffset:outerEnd]
+ indent, err := calculateIndentation(src, tok, node)
+ if err != nil {
+ return nil, err
+ }
+ newLineIndent := "\n" + indent
+
+ var fullReplacement strings.Builder
+ fullReplacement.Write(before)
+ if commentBuf.Len() > 0 {
+ comments := strings.ReplaceAll(commentBuf.String(), "\n", newLineIndent)
+ fullReplacement.WriteString(comments)
+ }
+ if declBuf.Len() > 0 { // add any initializations, if needed
+ initializations := strings.ReplaceAll(declBuf.String(), "\n", newLineIndent) +
+ newLineIndent
+ fullReplacement.WriteString(initializations)
+ }
+ fullReplacement.Write(replaceBuf.Bytes()) // call the extracted function
+ if ifBuf.Len() > 0 { // add the if statement below the function call, if needed
+ ifstatement := newLineIndent +
+ strings.ReplaceAll(ifBuf.String(), "\n", newLineIndent)
+ fullReplacement.WriteString(ifstatement)
+ }
+ fullReplacement.Write(after)
+ fullReplacement.WriteString("\n\n") // add newlines after the enclosing function
+ fullReplacement.Write(newFuncBuf.Bytes()) // insert the extracted function
+
+ return &analysis.SuggestedFix{
+ TextEdits: []analysis.TextEdit{{
+ Pos: outer.Pos(),
+ End: outer.End(),
+ NewText: []byte(fullReplacement.String()),
+ }},
+ }, nil
+}
+
+// adjustRangeForCommentsAndWhiteSpace adjusts the given range to exclude unnecessary leading or
+// trailing whitespace characters from selection as well as leading or trailing comments.
+// In the following example, each line of the if statement is indented once. There are also two
+// extra spaces after the sclosing bracket before the line break and a comment.
+//
+// \tif (true) {
+// \t _ = 1
+// \t} // hello \n
+//
+// By default, a valid range begins at 'if' and ends at the first whitespace character
+// after the '}'. But, users are likely to highlight full lines rather than adjusting
+// their cursors for whitespace. To support this use case, we must manually adjust the
+// ranges to match the correct AST node. In this particular example, we would adjust
+// rng.Start forward to the start of 'if' and rng.End backward to after '}'.
+func adjustRangeForCommentsAndWhiteSpace(tok *token.File, start, end token.Pos, content []byte, file *ast.File) (token.Pos, token.Pos, error) {
+ // Adjust the end of the range to after leading whitespace and comments.
+ prevStart := token.NoPos
+ startComment := sort.Search(len(file.Comments), func(i int) bool {
+ // Find the index for the first comment that ends after range start.
+ return file.Comments[i].End() > start
+ })
+ for prevStart != start {
+ prevStart = start
+ // If start is within a comment, move start to the end
+ // of the comment group.
+ if startComment < len(file.Comments) && file.Comments[startComment].Pos() <= start && start < file.Comments[startComment].End() {
+ start = file.Comments[startComment].End()
+ startComment++
+ }
+ // Move forwards to find a non-whitespace character.
+ offset, err := safetoken.Offset(tok, start)
+ if err != nil {
+ return 0, 0, err
+ }
+ for offset < len(content) && isGoWhiteSpace(content[offset]) {
+ offset++
+ }
+ start = tok.Pos(offset)
+ }
+
+ // Adjust the end of the range to before trailing whitespace and comments.
+ prevEnd := token.NoPos
+ endComment := sort.Search(len(file.Comments), func(i int) bool {
+ // Find the index for the first comment that ends after the range end.
+ return file.Comments[i].End() >= end
+ })
+ // Search will return n if not found, so we need to adjust if there are no
+ // comments that would match.
+ if endComment == len(file.Comments) {
+ endComment = -1
+ }
+ for prevEnd != end {
+ prevEnd = end
+ // If end is within a comment, move end to the start
+ // of the comment group.
+ if endComment >= 0 && file.Comments[endComment].Pos() < end && end <= file.Comments[endComment].End() {
+ end = file.Comments[endComment].Pos()
+ endComment--
+ }
+ // Move backwards to find a non-whitespace character.
+ offset, err := safetoken.Offset(tok, end)
+ if err != nil {
+ return 0, 0, err
+ }
+ for offset > 0 && isGoWhiteSpace(content[offset-1]) {
+ offset--
+ }
+ end = tok.Pos(offset)
+ }
+
+ return start, end, nil
+}
+
+// isGoWhiteSpace returns true if b is a considered white space in
+// Go as defined by scanner.GoWhitespace.
+func isGoWhiteSpace(b byte) bool {
+ return uint64(scanner.GoWhitespace)&(1<<uint(b)) != 0
+}
+
+// findParent finds the parent AST node of the given target node, if the target is a
+// descendant of the starting node.
+func findParent(start ast.Node, target ast.Node) ast.Node {
+ var parent ast.Node
+ analysisinternal.WalkASTWithParent(start, func(n, p ast.Node) bool {
+ if n == target {
+ parent = p
+ return false
+ }
+ return true
+ })
+ return parent
+}
+
+// variable describes the status of a variable within a selection.
+type variable struct {
+ obj types.Object
+
+ // free reports whether the variable is a free variable, meaning it should
+ // be a parameter to the extracted function.
+ free bool
+
+ // assigned reports whether the variable is assigned to in the selection.
+ assigned bool
+
+ // defined reports whether the variable is defined in the selection.
+ defined bool
+}
+
+// collectFreeVars maps each identifier in the given range to whether it is "free."
+// Given a range, a variable in that range is defined as "free" if it is declared
+// outside of the range and neither at the file scope nor package scope. These free
+// variables will be used as arguments in the extracted function. It also returns a
+// list of identifiers that may need to be returned by the extracted function.
+// Some of the code in this function has been adapted from tools/cmd/guru/freevars.go.
+func collectFreeVars(info *types.Info, file *ast.File, fileScope, pkgScope *types.Scope, start, end token.Pos, node ast.Node) ([]*variable, error) {
+ // id returns non-nil if n denotes an object that is referenced by the span
+ // and defined either within the span or in the lexical environment. The bool
+ // return value acts as an indicator for where it was defined.
+ id := func(n *ast.Ident) (types.Object, bool) {
+ obj := info.Uses[n]
+ if obj == nil {
+ return info.Defs[n], false
+ }
+ if obj.Name() == "_" {
+ return nil, false // exclude objects denoting '_'
+ }
+ if _, ok := obj.(*types.PkgName); ok {
+ return nil, false // imported package
+ }
+ if !(file.Pos() <= obj.Pos() && obj.Pos() <= file.End()) {
+ return nil, false // not defined in this file
+ }
+ scope := obj.Parent()
+ if scope == nil {
+ return nil, false // e.g. interface method, struct field
+ }
+ if scope == fileScope || scope == pkgScope {
+ return nil, false // defined at file or package scope
+ }
+ if start <= obj.Pos() && obj.Pos() <= end {
+ return obj, false // defined within selection => not free
+ }
+ return obj, true
+ }
+ // sel returns non-nil if n denotes a selection o.x.y that is referenced by the
+ // span and defined either within the span or in the lexical environment. The bool
+ // return value acts as an indicator for where it was defined.
+ var sel func(n *ast.SelectorExpr) (types.Object, bool)
+ sel = func(n *ast.SelectorExpr) (types.Object, bool) {
+ switch x := astutil.Unparen(n.X).(type) {
+ case *ast.SelectorExpr:
+ return sel(x)
+ case *ast.Ident:
+ return id(x)
+ }
+ return nil, false
+ }
+ seen := make(map[types.Object]*variable)
+ firstUseIn := make(map[types.Object]token.Pos)
+ var vars []types.Object
+ ast.Inspect(node, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ if start <= n.Pos() && n.End() <= end {
+ var obj types.Object
+ var isFree, prune bool
+ switch n := n.(type) {
+ case *ast.Ident:
+ obj, isFree = id(n)
+ case *ast.SelectorExpr:
+ obj, isFree = sel(n)
+ prune = true
+ }
+ if obj != nil {
+ seen[obj] = &variable{
+ obj: obj,
+ free: isFree,
+ }
+ vars = append(vars, obj)
+ // Find the first time that the object is used in the selection.
+ first, ok := firstUseIn[obj]
+ if !ok || n.Pos() < first {
+ firstUseIn[obj] = n.Pos()
+ }
+ if prune {
+ return false
+ }
+ }
+ }
+ return n.Pos() <= end
+ })
+
+ // Find identifiers that are initialized or whose values are altered at some
+ // point in the selected block. For example, in a selected block from lines 2-4,
+ // variables x, y, and z are included in assigned. However, in a selected block
+ // from lines 3-4, only variables y and z are included in assigned.
+ //
+ // 1: var a int
+ // 2: var x int
+ // 3: y := 3
+ // 4: z := x + a
+ //
+ ast.Inspect(node, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ if n.Pos() < start || n.End() > end {
+ return n.Pos() <= end
+ }
+ switch n := n.(type) {
+ case *ast.AssignStmt:
+ for _, assignment := range n.Lhs {
+ lhs, ok := assignment.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ obj, _ := id(lhs)
+ if obj == nil {
+ continue
+ }
+ if _, ok := seen[obj]; !ok {
+ continue
+ }
+ seen[obj].assigned = true
+ if n.Tok != token.DEFINE {
+ continue
+ }
+ // Find identifiers that are defined prior to being used
+ // elsewhere in the selection.
+ // TODO: Include identifiers that are assigned prior to being
+ // used elsewhere in the selection. Then, change the assignment
+ // to a definition in the extracted function.
+ if firstUseIn[obj] != lhs.Pos() {
+ continue
+ }
+ // Ensure that the object is not used in its own re-definition.
+ // For example:
+ // var f float64
+ // f, e := math.Frexp(f)
+ for _, expr := range n.Rhs {
+ if referencesObj(info, expr, obj) {
+ continue
+ }
+ if _, ok := seen[obj]; !ok {
+ continue
+ }
+ seen[obj].defined = true
+ break
+ }
+ }
+ return false
+ case *ast.DeclStmt:
+ gen, ok := n.Decl.(*ast.GenDecl)
+ if !ok {
+ return false
+ }
+ for _, spec := range gen.Specs {
+ vSpecs, ok := spec.(*ast.ValueSpec)
+ if !ok {
+ continue
+ }
+ for _, vSpec := range vSpecs.Names {
+ obj, _ := id(vSpec)
+ if obj == nil {
+ continue
+ }
+ if _, ok := seen[obj]; !ok {
+ continue
+ }
+ seen[obj].assigned = true
+ }
+ }
+ return false
+ case *ast.IncDecStmt:
+ if ident, ok := n.X.(*ast.Ident); !ok {
+ return false
+ } else if obj, _ := id(ident); obj == nil {
+ return false
+ } else {
+ if _, ok := seen[obj]; !ok {
+ return false
+ }
+ seen[obj].assigned = true
+ }
+ }
+ return true
+ })
+ var variables []*variable
+ for _, obj := range vars {
+ v, ok := seen[obj]
+ if !ok {
+ return nil, fmt.Errorf("no seen types.Object for %v", obj)
+ }
+ variables = append(variables, v)
+ }
+ return variables, nil
+}
+
+// referencesObj checks whether the given object appears in the given expression.
+func referencesObj(info *types.Info, expr ast.Expr, obj types.Object) bool {
+ var hasObj bool
+ ast.Inspect(expr, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ ident, ok := n.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ objUse := info.Uses[ident]
+ if obj == objUse {
+ hasObj = true
+ return false
+ }
+ return false
+ })
+ return hasObj
+}
+
+type fnExtractParams struct {
+ tok *token.File
+ start, end token.Pos
+ path []ast.Node
+ outer *ast.FuncDecl
+ node ast.Node
+}
+
+// CanExtractFunction reports whether the code in the given range can be
+// extracted to a function.
+func CanExtractFunction(tok *token.File, start, end token.Pos, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) {
+ if start == end {
+ return nil, false, false, fmt.Errorf("start and end are equal")
+ }
+ var err error
+ start, end, err = adjustRangeForCommentsAndWhiteSpace(tok, start, end, src, file)
+ if err != nil {
+ return nil, false, false, err
+ }
+ path, _ := astutil.PathEnclosingInterval(file, start, end)
+ if len(path) == 0 {
+ return nil, false, false, fmt.Errorf("no path enclosing interval")
+ }
+ // Node that encloses the selection must be a statement.
+ // TODO: Support function extraction for an expression.
+ _, ok := path[0].(ast.Stmt)
+ if !ok {
+ return nil, false, false, fmt.Errorf("node is not a statement")
+ }
+
+ // Find the function declaration that encloses the selection.
+ var outer *ast.FuncDecl
+ for _, p := range path {
+ if p, ok := p.(*ast.FuncDecl); ok {
+ outer = p
+ break
+ }
+ }
+ if outer == nil {
+ return nil, false, false, fmt.Errorf("no enclosing function")
+ }
+
+ // Find the nodes at the start and end of the selection.
+ var startNode, endNode ast.Node
+ ast.Inspect(outer, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ // Do not override 'start' with a node that begins at the same location
+ // but is nested further from 'outer'.
+ if startNode == nil && n.Pos() == start && n.End() <= end {
+ startNode = n
+ }
+ if endNode == nil && n.End() == end && n.Pos() >= start {
+ endNode = n
+ }
+ return n.Pos() <= end
+ })
+ if startNode == nil || endNode == nil {
+ return nil, false, false, fmt.Errorf("range does not map to AST nodes")
+ }
+ // If the region is a blockStmt, use the first and last nodes in the block
+ // statement.
+ // <rng.start>{ ... }<rng.end> => { <rng.start>...<rng.end> }
+ if blockStmt, ok := startNode.(*ast.BlockStmt); ok {
+ if len(blockStmt.List) == 0 {
+ return nil, false, false, fmt.Errorf("range maps to empty block statement")
+ }
+ startNode, endNode = blockStmt.List[0], blockStmt.List[len(blockStmt.List)-1]
+ start, end = startNode.Pos(), endNode.End()
+ }
+ return &fnExtractParams{
+ tok: tok,
+ start: start,
+ end: end,
+ path: path,
+ outer: outer,
+ node: startNode,
+ }, true, outer.Recv != nil, nil
+}
+
+// objUsed checks if the object is used within the range. It returns the first
+// occurrence of the object in the range, if it exists.
+func objUsed(info *types.Info, start, end token.Pos, obj types.Object) (bool, *ast.Ident) {
+ var firstUse *ast.Ident
+ for id, objUse := range info.Uses {
+ if obj != objUse {
+ continue
+ }
+ if id.Pos() < start || id.End() > end {
+ continue
+ }
+ if firstUse == nil || id.Pos() < firstUse.Pos() {
+ firstUse = id
+ }
+ }
+ return firstUse != nil, firstUse
+}
+
+// varOverridden traverses the given AST node until we find the given identifier. Then, we
+// examine the occurrence of the given identifier and check for (1) whether the identifier
+// is being redefined. If the identifier is free, we also check for (2) whether the identifier
+// is being reassigned. We will not include an identifier in the return statement of the
+// extracted function if it meets one of the above conditions.
+func varOverridden(info *types.Info, firstUse *ast.Ident, obj types.Object, isFree bool, node ast.Node) bool {
+ var isOverriden bool
+ ast.Inspect(node, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ assignment, ok := n.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ // A free variable is initialized prior to the selection. We can always reassign
+ // this variable after the selection because it has already been defined.
+ // Conversely, a non-free variable is initialized within the selection. Thus, we
+ // cannot reassign this variable after the selection unless it is initialized and
+ // returned by the extracted function.
+ if !isFree && assignment.Tok == token.ASSIGN {
+ return false
+ }
+ for _, assigned := range assignment.Lhs {
+ ident, ok := assigned.(*ast.Ident)
+ // Check if we found the first use of the identifier.
+ if !ok || ident != firstUse {
+ continue
+ }
+ objUse := info.Uses[ident]
+ if objUse == nil || objUse != obj {
+ continue
+ }
+ // Ensure that the object is not used in its own definition.
+ // For example:
+ // var f float64
+ // f, e := math.Frexp(f)
+ for _, expr := range assignment.Rhs {
+ if referencesObj(info, expr, obj) {
+ return false
+ }
+ }
+ isOverriden = true
+ return false
+ }
+ return false
+ })
+ return isOverriden
+}
+
+// parseBlockStmt generates an AST file from the given text. We then return the portion of the
+// file that represents the text.
+func parseBlockStmt(fset *token.FileSet, src []byte) (*ast.BlockStmt, error) {
+ text := "package main\nfunc _() { " + string(src) + " }"
+ extract, err := parser.ParseFile(fset, "", text, 0)
+ if err != nil {
+ return nil, err
+ }
+ if len(extract.Decls) == 0 {
+ return nil, fmt.Errorf("parsed file does not contain any declarations")
+ }
+ decl, ok := extract.Decls[0].(*ast.FuncDecl)
+ if !ok {
+ return nil, fmt.Errorf("parsed file does not contain expected function declaration")
+ }
+ if decl.Body == nil {
+ return nil, fmt.Errorf("extracted function has no body")
+ }
+ return decl.Body, nil
+}
+
+// generateReturnInfo generates the information we need to adjust the return statements and
+// signature of the extracted function. We prepare names, signatures, and "zero values" that
+// represent the new variables. We also use this information to construct the if statement that
+// is inserted below the call to the extracted function.
+func generateReturnInfo(enclosing *ast.FuncType, pkg *types.Package, path []ast.Node, file *ast.File, info *types.Info, fset *token.FileSet, pos token.Pos, hasNonNestedReturns bool) ([]*returnVariable, *ast.IfStmt, error) {
+ var retVars []*returnVariable
+ var cond *ast.Ident
+ if !hasNonNestedReturns {
+ // Generate information for the added bool value.
+ name, _ := generateAvailableIdentifier(pos, file, path, info, "shouldReturn", 0)
+ cond = &ast.Ident{Name: name}
+ retVars = append(retVars, &returnVariable{
+ name: cond,
+ decl: &ast.Field{Type: ast.NewIdent("bool")},
+ zeroVal: ast.NewIdent("false"),
+ })
+ }
+ // Generate information for the values in the return signature of the enclosing function.
+ if enclosing.Results != nil {
+ idx := 0
+ for _, field := range enclosing.Results.List {
+ typ := info.TypeOf(field.Type)
+ if typ == nil {
+ return nil, nil, fmt.Errorf(
+ "failed type conversion, AST expression: %T", field.Type)
+ }
+ expr := analysisinternal.TypeExpr(file, pkg, typ)
+ if expr == nil {
+ return nil, nil, fmt.Errorf("nil AST expression")
+ }
+ var name string
+ name, idx = generateAvailableIdentifier(pos, file,
+ path, info, "returnValue", idx)
+ retVars = append(retVars, &returnVariable{
+ name: ast.NewIdent(name),
+ decl: &ast.Field{Type: expr},
+ zeroVal: analysisinternal.ZeroValue(file, pkg, typ),
+ })
+ }
+ }
+ var ifReturn *ast.IfStmt
+ if !hasNonNestedReturns {
+ // Create the return statement for the enclosing function. We must exclude the variable
+ // for the condition of the if statement (cond) from the return statement.
+ ifReturn = &ast.IfStmt{
+ Cond: cond,
+ Body: &ast.BlockStmt{
+ List: []ast.Stmt{&ast.ReturnStmt{Results: getNames(retVars)[1:]}},
+ },
+ }
+ }
+ return retVars, ifReturn, nil
+}
+
+// adjustReturnStatements adds "zero values" of the given types to each return statement
+// in the given AST node.
+func adjustReturnStatements(returnTypes []*ast.Field, seenVars map[types.Object]ast.Expr, fset *token.FileSet, file *ast.File, pkg *types.Package, extractedBlock *ast.BlockStmt) error {
+ var zeroVals []ast.Expr
+ // Create "zero values" for each type.
+ for _, returnType := range returnTypes {
+ var val ast.Expr
+ for obj, typ := range seenVars {
+ if typ != returnType.Type {
+ continue
+ }
+ val = analysisinternal.ZeroValue(file, pkg, obj.Type())
+ break
+ }
+ if val == nil {
+ return fmt.Errorf(
+ "could not find matching AST expression for %T", returnType.Type)
+ }
+ zeroVals = append(zeroVals, val)
+ }
+ // Add "zero values" to each return statement.
+ // The bool reports whether the enclosing function should return after calling the
+ // extracted function. We set the bool to 'true' because, if these return statements
+ // execute, the extracted function terminates early, and the enclosing function must
+ // return as well.
+ zeroVals = append(zeroVals, ast.NewIdent("true"))
+ ast.Inspect(extractedBlock, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ if n, ok := n.(*ast.ReturnStmt); ok {
+ n.Results = append(zeroVals, n.Results...)
+ return false
+ }
+ return true
+ })
+ return nil
+}
+
+// generateFuncCall constructs a call expression for the extracted function, described by the
+// given parameters and return variables.
+func generateFuncCall(hasNonNestedReturn, hasReturnVals bool, params, returns []ast.Expr, name string, token token.Token, selector string) ast.Node {
+ var replace ast.Node
+ callExpr := &ast.CallExpr{
+ Fun: ast.NewIdent(name),
+ Args: params,
+ }
+ if selector != "" {
+ callExpr = &ast.CallExpr{
+ Fun: &ast.SelectorExpr{
+ X: ast.NewIdent(selector),
+ Sel: ast.NewIdent(name),
+ },
+ Args: params,
+ }
+ }
+ if hasReturnVals {
+ if hasNonNestedReturn {
+ // Create a return statement that returns the result of the function call.
+ replace = &ast.ReturnStmt{
+ Return: 0,
+ Results: []ast.Expr{callExpr},
+ }
+ } else {
+ // Assign the result of the function call.
+ replace = &ast.AssignStmt{
+ Lhs: returns,
+ Tok: token,
+ Rhs: []ast.Expr{callExpr},
+ }
+ }
+ } else {
+ replace = callExpr
+ }
+ return replace
+}
+
+// initializeVars creates variable declarations, if needed.
+// Our preference is to replace the selected block with an "x, y, z := fn()" style
+// assignment statement. We can use this style when all of the variables in the
+// extracted function's return statement are either not defined prior to the extracted block
+// or can be safely redefined. However, for example, if z is already defined
+// in a different scope, we replace the selected block with:
+//
+// var x int
+// var y string
+// x, y, z = fn()
+func initializeVars(uninitialized []types.Object, retVars []*returnVariable, seenUninitialized map[types.Object]struct{}, seenVars map[types.Object]ast.Expr) []ast.Stmt {
+ var declarations []ast.Stmt
+ for _, obj := range uninitialized {
+ if _, ok := seenUninitialized[obj]; ok {
+ continue
+ }
+ seenUninitialized[obj] = struct{}{}
+ valSpec := &ast.ValueSpec{
+ Names: []*ast.Ident{ast.NewIdent(obj.Name())},
+ Type: seenVars[obj],
+ }
+ genDecl := &ast.GenDecl{
+ Tok: token.VAR,
+ Specs: []ast.Spec{valSpec},
+ }
+ declarations = append(declarations, &ast.DeclStmt{Decl: genDecl})
+ }
+ // Each variable added from a return statement in the selection
+ // must be initialized.
+ for i, retVar := range retVars {
+ n := retVar.name.(*ast.Ident)
+ valSpec := &ast.ValueSpec{
+ Names: []*ast.Ident{n},
+ Type: retVars[i].decl.Type,
+ }
+ genDecl := &ast.GenDecl{
+ Tok: token.VAR,
+ Specs: []ast.Spec{valSpec},
+ }
+ declarations = append(declarations, &ast.DeclStmt{Decl: genDecl})
+ }
+ return declarations
+}
+
+// getNames returns the names from the given list of returnVariable.
+func getNames(retVars []*returnVariable) []ast.Expr {
+ var names []ast.Expr
+ for _, retVar := range retVars {
+ names = append(names, retVar.name)
+ }
+ return names
+}
+
+// getZeroVals returns the "zero values" from the given list of returnVariable.
+func getZeroVals(retVars []*returnVariable) []ast.Expr {
+ var zvs []ast.Expr
+ for _, retVar := range retVars {
+ zvs = append(zvs, retVar.zeroVal)
+ }
+ return zvs
+}
+
+// getDecls returns the declarations from the given list of returnVariable.
+func getDecls(retVars []*returnVariable) []*ast.Field {
+ var decls []*ast.Field
+ for _, retVar := range retVars {
+ decls = append(decls, retVar.decl)
+ }
+ return decls
+}
diff --git a/gopls/internal/lsp/source/fix.go b/gopls/internal/lsp/source/fix.go
new file mode 100644
index 000000000..2ed55c44d
--- /dev/null
+++ b/gopls/internal/lsp/source/fix.go
@@ -0,0 +1,138 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/fillstruct"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/undeclaredname"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+)
+
+type (
+ // SuggestedFixFunc is a function used to get the suggested fixes for a given
+ // gopls command, some of which are provided by go/analysis.Analyzers. Some of
+ // the analyzers in internal/lsp/analysis are not efficient enough to include
+ // suggested fixes with their diagnostics, so we have to compute them
+ // separately. Such analyzers should provide a function with a signature of
+ // SuggestedFixFunc.
+ //
+ // The returned FileSet must map all token.Pos found in the suggested text
+ // edits.
+ SuggestedFixFunc func(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) (*token.FileSet, *analysis.SuggestedFix, error)
+ singleFileFixFunc func(fset *token.FileSet, start, end token.Pos, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error)
+)
+
+const (
+ FillStruct = "fill_struct"
+ StubMethods = "stub_methods"
+ UndeclaredName = "undeclared_name"
+ ExtractVariable = "extract_variable"
+ ExtractFunction = "extract_function"
+ ExtractMethod = "extract_method"
+)
+
+// suggestedFixes maps a suggested fix command id to its handler.
+var suggestedFixes = map[string]SuggestedFixFunc{
+ FillStruct: singleFile(fillstruct.SuggestedFix),
+ UndeclaredName: singleFile(undeclaredname.SuggestedFix),
+ ExtractVariable: singleFile(extractVariable),
+ ExtractFunction: singleFile(extractFunction),
+ ExtractMethod: singleFile(extractMethod),
+ StubMethods: stubSuggestedFixFunc,
+}
+
+// singleFile calls analyzers that expect inputs for a single file
+func singleFile(sf singleFileFixFunc) SuggestedFixFunc {
+ return func(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) (*token.FileSet, *analysis.SuggestedFix, error) {
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, nil, err
+ }
+ start, end, err := pgf.RangePos(pRng)
+ if err != nil {
+ return nil, nil, err
+ }
+ fix, err := sf(pkg.FileSet(), start, end, pgf.Src, pgf.File, pkg.GetTypes(), pkg.GetTypesInfo())
+ return pkg.FileSet(), fix, err
+ }
+}
+
+func SuggestedFixFromCommand(cmd protocol.Command, kind protocol.CodeActionKind) SuggestedFix {
+ return SuggestedFix{
+ Title: cmd.Title,
+ Command: &cmd,
+ ActionKind: kind,
+ }
+}
+
+// ApplyFix applies the command's suggested fix to the given file and
+// range, returning the resulting edits.
+func ApplyFix(ctx context.Context, fix string, snapshot Snapshot, fh FileHandle, pRng protocol.Range) ([]protocol.TextDocumentEdit, error) {
+ handler, ok := suggestedFixes[fix]
+ if !ok {
+ return nil, fmt.Errorf("no suggested fix function for %s", fix)
+ }
+ fset, suggestion, err := handler(ctx, snapshot, fh, pRng)
+ if err != nil {
+ return nil, err
+ }
+ if suggestion == nil {
+ return nil, nil
+ }
+ editsPerFile := map[span.URI]*protocol.TextDocumentEdit{}
+ for _, edit := range suggestion.TextEdits {
+ tokFile := fset.File(edit.Pos)
+ if tokFile == nil {
+ return nil, bug.Errorf("no file for edit position")
+ }
+ end := edit.End
+ if !end.IsValid() {
+ end = edit.Pos
+ }
+ fh, err := snapshot.GetFile(ctx, span.URIFromPath(tokFile.Name()))
+ if err != nil {
+ return nil, err
+ }
+ te, ok := editsPerFile[fh.URI()]
+ if !ok {
+ te = &protocol.TextDocumentEdit{
+ TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
+ Version: fh.Version(),
+ TextDocumentIdentifier: protocol.TextDocumentIdentifier{
+ URI: protocol.URIFromSpanURI(fh.URI()),
+ },
+ },
+ }
+ editsPerFile[fh.URI()] = te
+ }
+ content, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ m := protocol.NewMapper(fh.URI(), content)
+ rng, err := m.PosRange(tokFile, edit.Pos, end)
+ if err != nil {
+ return nil, err
+ }
+ te.Edits = append(te.Edits, protocol.TextEdit{
+ Range: rng,
+ NewText: string(edit.NewText),
+ })
+ }
+ var edits []protocol.TextDocumentEdit
+ for _, edit := range editsPerFile {
+ edits = append(edits, *edit)
+ }
+ return edits, nil
+}
diff --git a/gopls/internal/lsp/source/folding_range.go b/gopls/internal/lsp/source/folding_range.go
new file mode 100644
index 000000000..41f7b5bf5
--- /dev/null
+++ b/gopls/internal/lsp/source/folding_range.go
@@ -0,0 +1,193 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "go/ast"
+ "go/token"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/bug"
+)
+
+// FoldingRangeInfo holds range and kind info of folding for an ast.Node
+type FoldingRangeInfo struct {
+ MappedRange protocol.MappedRange
+ Kind protocol.FoldingRangeKind
+}
+
+// FoldingRange gets all of the folding range for f.
+func FoldingRange(ctx context.Context, snapshot Snapshot, fh FileHandle, lineFoldingOnly bool) (ranges []*FoldingRangeInfo, err error) {
+ // TODO(suzmue): consider limiting the number of folding ranges returned, and
+ // implement a way to prioritize folding ranges in that case.
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, err
+ }
+
+ // With parse errors, we wouldn't be able to produce accurate folding info.
+ // LSP protocol (3.16) currently does not have a way to handle this case
+ // (https://github.com/microsoft/language-server-protocol/issues/1200).
+ // We cannot return an error either because we are afraid some editors
+ // may not handle errors nicely. As a workaround, we now return an empty
+ // result and let the client handle this case by double check the file
+ // contents (i.e. if the file is not empty and the folding range result
+ // is empty, raise an internal error).
+ if pgf.ParseErr != nil {
+ return nil, nil
+ }
+
+ // Get folding ranges for comments separately as they are not walked by ast.Inspect.
+ ranges = append(ranges, commentsFoldingRange(pgf)...)
+
+ visit := func(n ast.Node) bool {
+ rng := foldingRangeFunc(pgf, n, lineFoldingOnly)
+ if rng != nil {
+ ranges = append(ranges, rng)
+ }
+ return true
+ }
+ // Walk the ast and collect folding ranges.
+ ast.Inspect(pgf.File, visit)
+
+ sort.Slice(ranges, func(i, j int) bool {
+ irng := ranges[i].MappedRange.Range()
+ jrng := ranges[j].MappedRange.Range()
+ return protocol.CompareRange(irng, jrng) < 0
+ })
+
+ return ranges, nil
+}
+
+// foldingRangeFunc calculates the line folding range for ast.Node n
+func foldingRangeFunc(pgf *ParsedGoFile, n ast.Node, lineFoldingOnly bool) *FoldingRangeInfo {
+ // TODO(suzmue): include trailing empty lines before the closing
+ // parenthesis/brace.
+ var kind protocol.FoldingRangeKind
+ var start, end token.Pos
+ switch n := n.(type) {
+ case *ast.BlockStmt:
+ // Fold between positions of or lines between "{" and "}".
+ var startList, endList token.Pos
+ if num := len(n.List); num != 0 {
+ startList, endList = n.List[0].Pos(), n.List[num-1].End()
+ }
+ start, end = validLineFoldingRange(pgf.Tok, n.Lbrace, n.Rbrace, startList, endList, lineFoldingOnly)
+ case *ast.CaseClause:
+ // Fold from position of ":" to end.
+ start, end = n.Colon+1, n.End()
+ case *ast.CommClause:
+ // Fold from position of ":" to end.
+ start, end = n.Colon+1, n.End()
+ case *ast.CallExpr:
+ // Fold from position of "(" to position of ")".
+ start, end = n.Lparen+1, n.Rparen
+ case *ast.FieldList:
+ // Fold between positions of or lines between opening parenthesis/brace and closing parenthesis/brace.
+ var startList, endList token.Pos
+ if num := len(n.List); num != 0 {
+ startList, endList = n.List[0].Pos(), n.List[num-1].End()
+ }
+ start, end = validLineFoldingRange(pgf.Tok, n.Opening, n.Closing, startList, endList, lineFoldingOnly)
+ case *ast.GenDecl:
+ // If this is an import declaration, set the kind to be protocol.Imports.
+ if n.Tok == token.IMPORT {
+ kind = protocol.Imports
+ }
+ // Fold between positions of or lines between "(" and ")".
+ var startSpecs, endSpecs token.Pos
+ if num := len(n.Specs); num != 0 {
+ startSpecs, endSpecs = n.Specs[0].Pos(), n.Specs[num-1].End()
+ }
+ start, end = validLineFoldingRange(pgf.Tok, n.Lparen, n.Rparen, startSpecs, endSpecs, lineFoldingOnly)
+ case *ast.BasicLit:
+ // Fold raw string literals from position of "`" to position of "`".
+ if n.Kind == token.STRING && len(n.Value) >= 2 && n.Value[0] == '`' && n.Value[len(n.Value)-1] == '`' {
+ start, end = n.Pos(), n.End()
+ }
+ case *ast.CompositeLit:
+ // Fold between positions of or lines between "{" and "}".
+ var startElts, endElts token.Pos
+ if num := len(n.Elts); num != 0 {
+ startElts, endElts = n.Elts[0].Pos(), n.Elts[num-1].End()
+ }
+ start, end = validLineFoldingRange(pgf.Tok, n.Lbrace, n.Rbrace, startElts, endElts, lineFoldingOnly)
+ }
+
+ // Check that folding positions are valid.
+ if !start.IsValid() || !end.IsValid() {
+ return nil
+ }
+ // in line folding mode, do not fold if the start and end lines are the same.
+ if lineFoldingOnly && pgf.Tok.Line(start) == pgf.Tok.Line(end) {
+ return nil
+ }
+ mrng, err := pgf.PosMappedRange(start, end)
+ if err != nil {
+ bug.Errorf("%w", err) // can't happen
+ }
+ return &FoldingRangeInfo{
+ MappedRange: mrng,
+ Kind: kind,
+ }
+}
+
+// validLineFoldingRange returns start and end token.Pos for folding range if the range is valid.
+// returns token.NoPos otherwise, which fails token.IsValid check
+func validLineFoldingRange(tokFile *token.File, open, close, start, end token.Pos, lineFoldingOnly bool) (token.Pos, token.Pos) {
+ if lineFoldingOnly {
+ if !open.IsValid() || !close.IsValid() {
+ return token.NoPos, token.NoPos
+ }
+
+ // Don't want to fold if the start/end is on the same line as the open/close
+ // as an example, the example below should *not* fold:
+ // var x = [2]string{"d",
+ // "e" }
+ if tokFile.Line(open) == tokFile.Line(start) ||
+ tokFile.Line(close) == tokFile.Line(end) {
+ return token.NoPos, token.NoPos
+ }
+
+ return open + 1, end
+ }
+ return open + 1, close
+}
+
+// commentsFoldingRange returns the folding ranges for all comment blocks in file.
+// The folding range starts at the end of the first line of the comment block, and ends at the end of the
+// comment block and has kind protocol.Comment.
+func commentsFoldingRange(pgf *ParsedGoFile) (comments []*FoldingRangeInfo) {
+ tokFile := pgf.Tok
+ for _, commentGrp := range pgf.File.Comments {
+ startGrpLine, endGrpLine := tokFile.Line(commentGrp.Pos()), tokFile.Line(commentGrp.End())
+ if startGrpLine == endGrpLine {
+ // Don't fold single line comments.
+ continue
+ }
+
+ firstComment := commentGrp.List[0]
+ startPos, endLinePos := firstComment.Pos(), firstComment.End()
+ startCmmntLine, endCmmntLine := tokFile.Line(startPos), tokFile.Line(endLinePos)
+ if startCmmntLine != endCmmntLine {
+ // If the first comment spans multiple lines, then we want to have the
+ // folding range start at the end of the first line.
+ endLinePos = token.Pos(int(startPos) + len(strings.Split(firstComment.Text, "\n")[0]))
+ }
+ mrng, err := pgf.PosMappedRange(endLinePos, commentGrp.End())
+ if err != nil {
+ bug.Errorf("%w", err) // can't happen
+ }
+ comments = append(comments, &FoldingRangeInfo{
+ // Fold from the end of the first line comment to the end of the comment block.
+ MappedRange: mrng,
+ Kind: protocol.Comment,
+ })
+ }
+ return comments
+}
diff --git a/gopls/internal/lsp/source/format.go b/gopls/internal/lsp/source/format.go
new file mode 100644
index 000000000..b8206edc9
--- /dev/null
+++ b/gopls/internal/lsp/source/format.go
@@ -0,0 +1,391 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package source provides core features for use by Go editors and tools.
+package source
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "strings"
+ "text/scanner"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/imports"
+)
+
+// Format formats a file with a given range.
+func Format(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.TextEdit, error) {
+ ctx, done := event.Start(ctx, "source.Format")
+ defer done()
+
+ // Generated files shouldn't be edited. So, don't format them
+ if IsGenerated(ctx, snapshot, fh.URI()) {
+ return nil, fmt.Errorf("can't format %q: file is generated", fh.URI().Filename())
+ }
+
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, err
+ }
+ // Even if this file has parse errors, it might still be possible to format it.
+ // Using format.Node on an AST with errors may result in code being modified.
+ // Attempt to format the source of this file instead.
+ if pgf.ParseErr != nil {
+ formatted, err := formatSource(ctx, fh)
+ if err != nil {
+ return nil, err
+ }
+ return computeTextEdits(ctx, snapshot, pgf, string(formatted))
+ }
+
+ // format.Node changes slightly from one release to another, so the version
+ // of Go used to build the LSP server will determine how it formats code.
+ // This should be acceptable for all users, who likely be prompted to rebuild
+ // the LSP server on each Go release.
+ buf := &bytes.Buffer{}
+ fset := FileSetFor(pgf.Tok)
+ if err := format.Node(buf, fset, pgf.File); err != nil {
+ return nil, err
+ }
+ formatted := buf.String()
+
+ // Apply additional formatting, if any is supported. Currently, the only
+ // supported additional formatter is gofumpt.
+ if format := snapshot.View().Options().GofumptFormat; snapshot.View().Options().Gofumpt && format != nil {
+ // gofumpt can customize formatting based on language version and module
+ // path, if available.
+ //
+ // Try to derive this information, but fall-back on the default behavior.
+ //
+ // TODO: under which circumstances can we fail to find module information?
+ // Can this, for example, result in inconsistent formatting across saves,
+ // due to pending calls to packages.Load?
+ var langVersion, modulePath string
+ mds, err := snapshot.MetadataForFile(ctx, fh.URI())
+ if err == nil && len(mds) > 0 {
+ if mi := mds[0].Module; mi != nil {
+ langVersion = mi.GoVersion
+ modulePath = mi.Path
+ }
+ }
+ b, err := format(ctx, langVersion, modulePath, buf.Bytes())
+ if err != nil {
+ return nil, err
+ }
+ formatted = string(b)
+ }
+ return computeTextEdits(ctx, snapshot, pgf, formatted)
+}
+
+func formatSource(ctx context.Context, fh FileHandle) ([]byte, error) {
+ _, done := event.Start(ctx, "source.formatSource")
+ defer done()
+
+ data, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ return format.Source(data)
+}
+
+type ImportFix struct {
+ Fix *imports.ImportFix
+ Edits []protocol.TextEdit
+}
+
+// AllImportsFixes formats f for each possible fix to the imports.
+// In addition to returning the result of applying all edits,
+// it returns a list of fixes that could be applied to the file, with the
+// corresponding TextEdits that would be needed to apply that fix.
+func AllImportsFixes(ctx context.Context, snapshot Snapshot, fh FileHandle) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) {
+ ctx, done := event.Start(ctx, "source.AllImportsFixes")
+ defer done()
+
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, nil, err
+ }
+ if err := snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error {
+ allFixEdits, editsPerFix, err = computeImportEdits(snapshot, pgf, opts)
+ return err
+ }); err != nil {
+ return nil, nil, fmt.Errorf("AllImportsFixes: %v", err)
+ }
+ return allFixEdits, editsPerFix, nil
+}
+
+// computeImportEdits computes a set of edits that perform one or all of the
+// necessary import fixes.
+func computeImportEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) {
+ filename := pgf.URI.Filename()
+
+ // Build up basic information about the original file.
+ allFixes, err := imports.FixImports(filename, pgf.Src, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ allFixEdits, err = computeFixEdits(snapshot, pgf, options, allFixes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Apply all of the import fixes to the file.
+ // Add the edits for each fix to the result.
+ for _, fix := range allFixes {
+ edits, err := computeFixEdits(snapshot, pgf, options, []*imports.ImportFix{fix})
+ if err != nil {
+ return nil, nil, err
+ }
+ editsPerFix = append(editsPerFix, &ImportFix{
+ Fix: fix,
+ Edits: edits,
+ })
+ }
+ return allFixEdits, editsPerFix, nil
+}
+
+// ComputeOneImportFixEdits returns text edits for a single import fix.
+func ComputeOneImportFixEdits(snapshot Snapshot, pgf *ParsedGoFile, fix *imports.ImportFix) ([]protocol.TextEdit, error) {
+ options := &imports.Options{
+ LocalPrefix: snapshot.View().Options().Local,
+ // Defaults.
+ AllErrors: true,
+ Comments: true,
+ Fragment: true,
+ FormatOnly: false,
+ TabIndent: true,
+ TabWidth: 8,
+ }
+ return computeFixEdits(snapshot, pgf, options, []*imports.ImportFix{fix})
+}
+
+func computeFixEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options, fixes []*imports.ImportFix) ([]protocol.TextEdit, error) {
+ // trim the original data to match fixedData
+ left, err := importPrefix(pgf.Src)
+ if err != nil {
+ return nil, err
+ }
+ extra := !strings.Contains(left, "\n") // one line may have more than imports
+ if extra {
+ left = string(pgf.Src)
+ }
+ if len(left) > 0 && left[len(left)-1] != '\n' {
+ left += "\n"
+ }
+ // Apply the fixes and re-parse the file so that we can locate the
+ // new imports.
+ flags := parser.ImportsOnly
+ if extra {
+ // used all of origData above, use all of it here too
+ flags = 0
+ }
+ fixedData, err := imports.ApplyFixes(fixes, "", pgf.Src, options, flags)
+ if err != nil {
+ return nil, err
+ }
+ if fixedData == nil || fixedData[len(fixedData)-1] != '\n' {
+ fixedData = append(fixedData, '\n') // ApplyFixes may miss the newline, go figure.
+ }
+ edits := snapshot.View().Options().ComputeEdits(left, string(fixedData))
+ return protocolEditsFromSource([]byte(left), edits)
+}
+
+// importPrefix returns the prefix of the given file content through the final
+// import statement. If there are no imports, the prefix is the package
+// statement and any comment groups below it.
+func importPrefix(src []byte) (string, error) {
+ fset := token.NewFileSet()
+ // do as little parsing as possible
+ f, err := parser.ParseFile(fset, "", src, parser.ImportsOnly|parser.ParseComments)
+ if err != nil { // This can happen if 'package' is misspelled
+ return "", fmt.Errorf("importPrefix: failed to parse: %s", err)
+ }
+ tok := fset.File(f.Pos())
+ var importEnd int
+ for _, d := range f.Decls {
+ if x, ok := d.(*ast.GenDecl); ok && x.Tok == token.IMPORT {
+ if e, err := safetoken.Offset(tok, d.End()); err != nil {
+ return "", fmt.Errorf("importPrefix: %s", err)
+ } else if e > importEnd {
+ importEnd = e
+ }
+ }
+ }
+
+ maybeAdjustToLineEnd := func(pos token.Pos, isCommentNode bool) int {
+ offset, err := safetoken.Offset(tok, pos)
+ if err != nil {
+ return -1
+ }
+
+ // Don't go past the end of the file.
+ if offset > len(src) {
+ offset = len(src)
+ }
+ // The go/ast package does not account for different line endings, and
+ // specifically, in the text of a comment, it will strip out \r\n line
+ // endings in favor of \n. To account for these differences, we try to
+ // return a position on the next line whenever possible.
+ switch line := tok.Line(tok.Pos(offset)); {
+ case line < tok.LineCount():
+ nextLineOffset, err := safetoken.Offset(tok, tok.LineStart(line+1))
+ if err != nil {
+ return -1
+ }
+ // If we found a position that is at the end of a line, move the
+ // offset to the start of the next line.
+ if offset+1 == nextLineOffset {
+ offset = nextLineOffset
+ }
+ case isCommentNode, offset+1 == tok.Size():
+ // If the last line of the file is a comment, or we are at the end
+ // of the file, the prefix is the entire file.
+ offset = len(src)
+ }
+ return offset
+ }
+ if importEnd == 0 {
+ pkgEnd := f.Name.End()
+ importEnd = maybeAdjustToLineEnd(pkgEnd, false)
+ }
+ for _, cgroup := range f.Comments {
+ for _, c := range cgroup.List {
+ if end, err := safetoken.Offset(tok, c.End()); err != nil {
+ return "", err
+ } else if end > importEnd {
+ startLine := safetoken.Position(tok, c.Pos()).Line
+ endLine := safetoken.Position(tok, c.End()).Line
+
+ // Work around golang/go#41197 by checking if the comment might
+ // contain "\r", and if so, find the actual end position of the
+ // comment by scanning the content of the file.
+ startOffset, err := safetoken.Offset(tok, c.Pos())
+ if err != nil {
+ return "", err
+ }
+ if startLine != endLine && bytes.Contains(src[startOffset:], []byte("\r")) {
+ if commentEnd := scanForCommentEnd(src[startOffset:]); commentEnd > 0 {
+ end = startOffset + commentEnd
+ }
+ }
+ importEnd = maybeAdjustToLineEnd(tok.Pos(end), true)
+ }
+ }
+ }
+ if importEnd > len(src) {
+ importEnd = len(src)
+ }
+ return string(src[:importEnd]), nil
+}
+
+// scanForCommentEnd returns the offset of the end of the multi-line comment
+// at the start of the given byte slice.
+func scanForCommentEnd(src []byte) int {
+ var s scanner.Scanner
+ s.Init(bytes.NewReader(src))
+ s.Mode ^= scanner.SkipComments
+
+ t := s.Scan()
+ if t == scanner.Comment {
+ return s.Pos().Offset
+ }
+ return 0
+}
+
+func computeTextEdits(ctx context.Context, snapshot Snapshot, pgf *ParsedGoFile, formatted string) ([]protocol.TextEdit, error) {
+ _, done := event.Start(ctx, "source.computeTextEdits")
+ defer done()
+
+ edits := snapshot.View().Options().ComputeEdits(string(pgf.Src), formatted)
+ return ToProtocolEdits(pgf.Mapper, edits)
+}
+
+// protocolEditsFromSource converts text edits to LSP edits using the original
+// source.
+func protocolEditsFromSource(src []byte, edits []diff.Edit) ([]protocol.TextEdit, error) {
+ m := protocol.NewMapper("", src)
+ var result []protocol.TextEdit
+ for _, edit := range edits {
+ rng, err := m.OffsetRange(edit.Start, edit.End)
+ if err != nil {
+ return nil, err
+ }
+
+ if rng.Start == rng.End && edit.New == "" {
+ // Degenerate case, which may result from a diff tool wanting to delete
+ // '\r' in line endings. Filter it out.
+ continue
+ }
+ result = append(result, protocol.TextEdit{
+ Range: rng,
+ NewText: edit.New,
+ })
+ }
+ return result, nil
+}
+
+// ToProtocolEdits converts diff.Edits to LSP TextEdits.
+// See https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textEditArray
+func ToProtocolEdits(m *protocol.Mapper, edits []diff.Edit) ([]protocol.TextEdit, error) {
+ // LSP doesn't require TextEditArray to be sorted:
+ // this is the receiver's concern. But govim, and perhaps
+ // other clients have historically relied on the order.
+ edits = append([]diff.Edit(nil), edits...)
+ diff.SortEdits(edits)
+
+ result := make([]protocol.TextEdit, len(edits))
+ for i, edit := range edits {
+ rng, err := m.OffsetRange(edit.Start, edit.End)
+ if err != nil {
+ return nil, err
+ }
+ result[i] = protocol.TextEdit{
+ Range: rng,
+ NewText: edit.New,
+ }
+ }
+ return result, nil
+}
+
+// FromProtocolEdits converts LSP TextEdits to diff.Edits.
+// See https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textEditArray
+func FromProtocolEdits(m *protocol.Mapper, edits []protocol.TextEdit) ([]diff.Edit, error) {
+ if edits == nil {
+ return nil, nil
+ }
+ result := make([]diff.Edit, len(edits))
+ for i, edit := range edits {
+ start, end, err := m.RangeOffsets(edit.Range)
+ if err != nil {
+ return nil, err
+ }
+ result[i] = diff.Edit{
+ Start: start,
+ End: end,
+ New: edit.NewText,
+ }
+ }
+ return result, nil
+}
+
+// ApplyProtocolEdits applies the patch (edits) to m.Content and returns the result.
+// It also returns the edits converted to diff-package form.
+func ApplyProtocolEdits(m *protocol.Mapper, edits []protocol.TextEdit) ([]byte, []diff.Edit, error) {
+ diffEdits, err := FromProtocolEdits(m, edits)
+ if err != nil {
+ return nil, nil, err
+ }
+ out, err := diff.ApplyBytes(m.Content, diffEdits)
+ return out, diffEdits, err
+}
diff --git a/gopls/internal/lsp/source/format_test.go b/gopls/internal/lsp/source/format_test.go
new file mode 100644
index 000000000..fac80c311
--- /dev/null
+++ b/gopls/internal/lsp/source/format_test.go
@@ -0,0 +1,75 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+)
+
+func TestImportPrefix(t *testing.T) {
+ for i, tt := range []struct {
+ input, want string
+ }{
+ {"package foo", "package foo"},
+ {"package foo\n", "package foo\n"},
+ {"package foo\n\nfunc f(){}\n", "package foo\n"},
+ {"package foo\n\nimport \"fmt\"\n", "package foo\n\nimport \"fmt\""},
+ {"package foo\nimport (\n\"fmt\"\n)\n", "package foo\nimport (\n\"fmt\"\n)"},
+ {"\n\n\npackage foo\n", "\n\n\npackage foo\n"},
+ {"// hi \n\npackage foo //xx\nfunc _(){}\n", "// hi \n\npackage foo //xx\n"},
+ {"package foo //hi\n", "package foo //hi\n"},
+ {"//hi\npackage foo\n//a\n\n//b\n", "//hi\npackage foo\n//a\n\n//b\n"},
+ {
+ "package a\n\nimport (\n \"fmt\"\n)\n//hi\n",
+ "package a\n\nimport (\n \"fmt\"\n)\n//hi\n",
+ },
+ {`package a /*hi*/`, `package a /*hi*/`},
+ {"package main\r\n\r\nimport \"go/types\"\r\n\r\n/*\r\n\r\n */\r\n", "package main\r\n\r\nimport \"go/types\"\r\n\r\n/*\r\n\r\n */\r\n"},
+ {"package x; import \"os\"; func f() {}\n\n", "package x; import \"os\""},
+ {"package x; func f() {fmt.Println()}\n\n", "package x"},
+ } {
+ got, err := importPrefix([]byte(tt.input))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if d := compare.Text(tt.want, got); d != "" {
+ t.Errorf("%d: failed for %q:\n%s", i, tt.input, d)
+ }
+ }
+}
+
+func TestCRLFFile(t *testing.T) {
+ for i, tt := range []struct {
+ input, want string
+ }{
+ {
+ input: `package main
+
+/*
+Hi description
+*/
+func Hi() {
+}
+`,
+ want: `package main
+
+/*
+Hi description
+*/`,
+ },
+ } {
+ got, err := importPrefix([]byte(strings.ReplaceAll(tt.input, "\n", "\r\n")))
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := strings.ReplaceAll(tt.want, "\n", "\r\n")
+ if d := compare.Text(want, got); d != "" {
+ t.Errorf("%d: failed for %q:\n%s", i, tt.input, d)
+ }
+ }
+}
diff --git a/gopls/internal/lsp/source/gc_annotations.go b/gopls/internal/lsp/source/gc_annotations.go
new file mode 100644
index 000000000..fbdfc3f7b
--- /dev/null
+++ b/gopls/internal/lsp/source/gc_annotations.go
@@ -0,0 +1,221 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/gocommand"
+)
+
+type Annotation string
+
+const (
+ // Nil controls nil checks.
+ Nil Annotation = "nil"
+
+ // Escape controls diagnostics about escape choices.
+ Escape Annotation = "escape"
+
+ // Inline controls diagnostics about inlining choices.
+ Inline Annotation = "inline"
+
+ // Bounds controls bounds checking diagnostics.
+ Bounds Annotation = "bounds"
+)
+
+func GCOptimizationDetails(ctx context.Context, snapshot Snapshot, m *Metadata) (map[span.URI][]*Diagnostic, error) {
+ if len(m.CompiledGoFiles) == 0 {
+ return nil, nil
+ }
+ pkgDir := filepath.Dir(m.CompiledGoFiles[0].Filename())
+ outDir := filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.details", os.Getpid()))
+
+ if err := os.MkdirAll(outDir, 0700); err != nil {
+ return nil, err
+ }
+ tmpFile, err := ioutil.TempFile(os.TempDir(), "gopls-x")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(tmpFile.Name())
+
+ outDirURI := span.URIFromPath(outDir)
+ // GC details doesn't handle Windows URIs in the form of "file:///C:/...",
+ // so rewrite them to "file://C:/...". See golang/go#41614.
+ if !strings.HasPrefix(outDir, "/") {
+ outDirURI = span.URI(strings.Replace(string(outDirURI), "file:///", "file://", 1))
+ }
+ inv := &gocommand.Invocation{
+ Verb: "build",
+ Args: []string{
+ fmt.Sprintf("-gcflags=-json=0,%s", outDirURI),
+ fmt.Sprintf("-o=%s", tmpFile.Name()),
+ ".",
+ },
+ WorkingDir: pkgDir,
+ }
+ _, err = snapshot.RunGoCommandDirect(ctx, Normal, inv)
+ if err != nil {
+ return nil, err
+ }
+ files, err := findJSONFiles(outDir)
+ if err != nil {
+ return nil, err
+ }
+ reports := make(map[span.URI][]*Diagnostic)
+ opts := snapshot.View().Options()
+ var parseError error
+ for _, fn := range files {
+ uri, diagnostics, err := parseDetailsFile(fn, opts)
+ if err != nil {
+ // expect errors for all the files, save 1
+ parseError = err
+ }
+ fh := snapshot.FindFile(uri)
+ if fh == nil {
+ continue
+ }
+ if pkgDir != filepath.Dir(fh.URI().Filename()) {
+ // https://github.com/golang/go/issues/42198
+ // sometimes the detail diagnostics generated for files
+ // outside the package can never be taken back.
+ continue
+ }
+ reports[fh.URI()] = diagnostics
+ }
+ return reports, parseError
+}
+
+func parseDetailsFile(filename string, options *Options) (span.URI, []*Diagnostic, error) {
+ buf, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return "", nil, err
+ }
+ var (
+ uri span.URI
+ i int
+ diagnostics []*Diagnostic
+ )
+ type metadata struct {
+ File string `json:"file,omitempty"`
+ }
+ for dec := json.NewDecoder(bytes.NewReader(buf)); dec.More(); {
+ // The first element always contains metadata.
+ if i == 0 {
+ i++
+ m := new(metadata)
+ if err := dec.Decode(m); err != nil {
+ return "", nil, err
+ }
+ if !strings.HasSuffix(m.File, ".go") {
+ continue // <autogenerated>
+ }
+ uri = span.URIFromPath(m.File)
+ continue
+ }
+ d := new(protocol.Diagnostic)
+ if err := dec.Decode(d); err != nil {
+ return "", nil, err
+ }
+ msg := d.Code.(string)
+ if msg != "" {
+ msg = fmt.Sprintf("%s(%s)", msg, d.Message)
+ }
+ if !showDiagnostic(msg, d.Source, options) {
+ continue
+ }
+ var related []protocol.DiagnosticRelatedInformation
+ for _, ri := range d.RelatedInformation {
+ // TODO(rfindley): The compiler uses LSP-like JSON to encode gc details,
+ // however the positions it uses are 1-based UTF-8:
+ // https://github.com/golang/go/blob/master/src/cmd/compile/internal/logopt/log_opts.go
+ //
+ // Here, we adjust for 0-based positions, but do not translate UTF-8 to UTF-16.
+ related = append(related, protocol.DiagnosticRelatedInformation{
+ Location: protocol.Location{
+ URI: ri.Location.URI,
+ Range: zeroIndexedRange(ri.Location.Range),
+ },
+ Message: ri.Message,
+ })
+ }
+ diagnostic := &Diagnostic{
+ URI: uri,
+ Range: zeroIndexedRange(d.Range),
+ Message: msg,
+ Severity: d.Severity,
+ Source: OptimizationDetailsError, // d.Source is always "go compiler" as of 1.16, use our own
+ Tags: d.Tags,
+ Related: related,
+ }
+ diagnostics = append(diagnostics, diagnostic)
+ i++
+ }
+ return uri, diagnostics, nil
+}
+
+// showDiagnostic reports whether a given diagnostic should be shown to the end
+// user, given the current options.
+func showDiagnostic(msg, source string, o *Options) bool {
+ if source != "go compiler" {
+ return false
+ }
+ if o.Annotations == nil {
+ return true
+ }
+ switch {
+ case strings.HasPrefix(msg, "canInline") ||
+ strings.HasPrefix(msg, "cannotInline") ||
+ strings.HasPrefix(msg, "inlineCall"):
+ return o.Annotations[Inline]
+ case strings.HasPrefix(msg, "escape") || msg == "leak":
+ return o.Annotations[Escape]
+ case strings.HasPrefix(msg, "nilcheck"):
+ return o.Annotations[Nil]
+ case strings.HasPrefix(msg, "isInBounds") ||
+ strings.HasPrefix(msg, "isSliceInBounds"):
+ return o.Annotations[Bounds]
+ }
+ return false
+}
+
+// The range produced by the compiler is 1-indexed, so subtract range by 1.
+func zeroIndexedRange(rng protocol.Range) protocol.Range {
+ return protocol.Range{
+ Start: protocol.Position{
+ Line: rng.Start.Line - 1,
+ Character: rng.Start.Character - 1,
+ },
+ End: protocol.Position{
+ Line: rng.End.Line - 1,
+ Character: rng.End.Character - 1,
+ },
+ }
+}
+
+func findJSONFiles(dir string) ([]string, error) {
+ ans := []string{}
+ f := func(path string, fi os.FileInfo, _ error) error {
+ if fi.IsDir() {
+ return nil
+ }
+ if strings.HasSuffix(path, ".json") {
+ ans = append(ans, path)
+ }
+ return nil
+ }
+ err := filepath.Walk(dir, f)
+ return ans, err
+}
diff --git a/gopls/internal/lsp/source/highlight.go b/gopls/internal/lsp/source/highlight.go
new file mode 100644
index 000000000..a190f4896
--- /dev/null
+++ b/gopls/internal/lsp/source/highlight.go
@@ -0,0 +1,484 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/event"
+)
+
+func Highlight(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) ([]protocol.Range, error) {
+ ctx, done := event.Start(ctx, "source.Highlight")
+ defer done()
+
+ // We always want fully parsed files for highlight, regardless
+ // of whether the file belongs to a workspace package.
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, fmt.Errorf("getting package for Highlight: %w", err)
+ }
+
+ pos, err := pgf.PositionPos(position)
+ if err != nil {
+ return nil, err
+ }
+ path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos)
+ if len(path) == 0 {
+ return nil, fmt.Errorf("no enclosing position found for %v:%v", position.Line, position.Character)
+ }
+ // If start == end for astutil.PathEnclosingInterval, the 1-char interval
+ // following start is used instead. As a result, we might not get an exact
+ // match so we should check the 1-char interval to the left of the passed
+ // in position to see if that is an exact match.
+ if _, ok := path[0].(*ast.Ident); !ok {
+ if p, _ := astutil.PathEnclosingInterval(pgf.File, pos-1, pos-1); p != nil {
+ switch p[0].(type) {
+ case *ast.Ident, *ast.SelectorExpr:
+ path = p // use preceding ident/selector
+ }
+ }
+ }
+ result, err := highlightPath(path, pgf.File, pkg.GetTypesInfo())
+ if err != nil {
+ return nil, err
+ }
+ var ranges []protocol.Range
+ for rng := range result {
+ rng, err := pgf.PosRange(rng.start, rng.end)
+ if err != nil {
+ return nil, err
+ }
+ ranges = append(ranges, rng)
+ }
+ return ranges, nil
+}
+
+func highlightPath(path []ast.Node, file *ast.File, info *types.Info) (map[posRange]struct{}, error) {
+ result := make(map[posRange]struct{})
+ switch node := path[0].(type) {
+ case *ast.BasicLit:
+ if len(path) > 1 {
+ if _, ok := path[1].(*ast.ImportSpec); ok {
+ err := highlightImportUses(path, info, result)
+ return result, err
+ }
+ }
+ highlightFuncControlFlow(path, result)
+ case *ast.ReturnStmt, *ast.FuncDecl, *ast.FuncType:
+ highlightFuncControlFlow(path, result)
+ case *ast.Ident:
+ // Check if ident is inside return or func decl.
+ highlightFuncControlFlow(path, result)
+ highlightIdentifier(node, file, info, result)
+ case *ast.ForStmt, *ast.RangeStmt:
+ highlightLoopControlFlow(path, info, result)
+ case *ast.SwitchStmt:
+ highlightSwitchFlow(path, info, result)
+ case *ast.BranchStmt:
+ // BREAK can exit a loop, switch or select, while CONTINUE exit a loop so
+ // these need to be handled separately. They can also be embedded in any
+ // other loop/switch/select if they have a label. TODO: add support for
+ // GOTO and FALLTHROUGH as well.
+ switch node.Tok {
+ case token.BREAK:
+ if node.Label != nil {
+ highlightLabeledFlow(path, info, node, result)
+ } else {
+ highlightUnlabeledBreakFlow(path, info, result)
+ }
+ case token.CONTINUE:
+ if node.Label != nil {
+ highlightLabeledFlow(path, info, node, result)
+ } else {
+ highlightLoopControlFlow(path, info, result)
+ }
+ }
+ default:
+ // If the cursor is in an unidentified area, return empty results.
+ return nil, nil
+ }
+ return result, nil
+}
+
+type posRange struct {
+ start, end token.Pos
+}
+
+func highlightFuncControlFlow(path []ast.Node, result map[posRange]struct{}) {
+ var enclosingFunc ast.Node
+ var returnStmt *ast.ReturnStmt
+ var resultsList *ast.FieldList
+ inReturnList := false
+
+Outer:
+ // Reverse walk the path till we get to the func block.
+ for i, n := range path {
+ switch node := n.(type) {
+ case *ast.KeyValueExpr:
+ // If cursor is in a key: value expr, we don't want control flow highlighting
+ return
+ case *ast.CallExpr:
+ // If cursor is an arg in a callExpr, we don't want control flow highlighting.
+ if i > 0 {
+ for _, arg := range node.Args {
+ if arg == path[i-1] {
+ return
+ }
+ }
+ }
+ case *ast.Field:
+ inReturnList = true
+ case *ast.FuncLit:
+ enclosingFunc = n
+ resultsList = node.Type.Results
+ break Outer
+ case *ast.FuncDecl:
+ enclosingFunc = n
+ resultsList = node.Type.Results
+ break Outer
+ case *ast.ReturnStmt:
+ returnStmt = node
+ // If the cursor is not directly in a *ast.ReturnStmt, then
+ // we need to know if it is within one of the values that is being returned.
+ inReturnList = inReturnList || path[0] != returnStmt
+ }
+ }
+ // Cursor is not in a function.
+ if enclosingFunc == nil {
+ return
+ }
+ // If the cursor is on a "return" or "func" keyword, we should highlight all of the exit
+ // points of the function, including the "return" and "func" keywords.
+ highlightAllReturnsAndFunc := path[0] == returnStmt || path[0] == enclosingFunc
+ switch path[0].(type) {
+ case *ast.Ident, *ast.BasicLit:
+ // Cursor is in an identifier and not in a return statement or in the results list.
+ if returnStmt == nil && !inReturnList {
+ return
+ }
+ case *ast.FuncType:
+ highlightAllReturnsAndFunc = true
+ }
+ // The user's cursor may be within the return statement of a function,
+ // or within the result section of a function's signature.
+ // index := -1
+ var nodes []ast.Node
+ if returnStmt != nil {
+ for _, n := range returnStmt.Results {
+ nodes = append(nodes, n)
+ }
+ } else if resultsList != nil {
+ for _, n := range resultsList.List {
+ nodes = append(nodes, n)
+ }
+ }
+ _, index := nodeAtPos(nodes, path[0].Pos())
+
+ // Highlight the correct argument in the function declaration return types.
+ if resultsList != nil && -1 < index && index < len(resultsList.List) {
+ rng := posRange{
+ start: resultsList.List[index].Pos(),
+ end: resultsList.List[index].End(),
+ }
+ result[rng] = struct{}{}
+ }
+ // Add the "func" part of the func declaration.
+ if highlightAllReturnsAndFunc {
+ r := posRange{
+ start: enclosingFunc.Pos(),
+ end: enclosingFunc.Pos() + token.Pos(len("func")),
+ }
+ result[r] = struct{}{}
+ }
+ ast.Inspect(enclosingFunc, func(n ast.Node) bool {
+ // Don't traverse any other functions.
+ switch n.(type) {
+ case *ast.FuncDecl, *ast.FuncLit:
+ return enclosingFunc == n
+ }
+ ret, ok := n.(*ast.ReturnStmt)
+ if !ok {
+ return true
+ }
+ var toAdd ast.Node
+ // Add the entire return statement, applies when highlight the word "return" or "func".
+ if highlightAllReturnsAndFunc {
+ toAdd = n
+ }
+ // Add the relevant field within the entire return statement.
+ if -1 < index && index < len(ret.Results) {
+ toAdd = ret.Results[index]
+ }
+ if toAdd != nil {
+ result[posRange{start: toAdd.Pos(), end: toAdd.End()}] = struct{}{}
+ }
+ return false
+ })
+}
+
+// highlightUnlabeledBreakFlow highlights the innermost enclosing for/range/switch or swlect
+func highlightUnlabeledBreakFlow(path []ast.Node, info *types.Info, result map[posRange]struct{}) {
+ // Reverse walk the path until we find closest loop, select, or switch.
+ for _, n := range path {
+ switch n.(type) {
+ case *ast.ForStmt, *ast.RangeStmt:
+ highlightLoopControlFlow(path, info, result)
+ return // only highlight the innermost statement
+ case *ast.SwitchStmt:
+ highlightSwitchFlow(path, info, result)
+ return
+ case *ast.SelectStmt:
+ // TODO: add highlight when breaking a select.
+ return
+ }
+ }
+}
+
+// highlightLabeledFlow highlights the enclosing labeled for, range,
+// or switch statement denoted by a labeled break or continue stmt.
+func highlightLabeledFlow(path []ast.Node, info *types.Info, stmt *ast.BranchStmt, result map[posRange]struct{}) {
+ use := info.Uses[stmt.Label]
+ if use == nil {
+ return
+ }
+ for _, n := range path {
+ if label, ok := n.(*ast.LabeledStmt); ok && info.Defs[label.Label] == use {
+ switch label.Stmt.(type) {
+ case *ast.ForStmt, *ast.RangeStmt:
+ highlightLoopControlFlow([]ast.Node{label.Stmt, label}, info, result)
+ case *ast.SwitchStmt:
+ highlightSwitchFlow([]ast.Node{label.Stmt, label}, info, result)
+ }
+ return
+ }
+ }
+}
+
+func labelFor(path []ast.Node) *ast.Ident {
+ if len(path) > 1 {
+ if n, ok := path[1].(*ast.LabeledStmt); ok {
+ return n.Label
+ }
+ }
+ return nil
+}
+
+func highlightLoopControlFlow(path []ast.Node, info *types.Info, result map[posRange]struct{}) {
+ var loop ast.Node
+ var loopLabel *ast.Ident
+ stmtLabel := labelFor(path)
+Outer:
+ // Reverse walk the path till we get to the for loop.
+ for i := range path {
+ switch n := path[i].(type) {
+ case *ast.ForStmt, *ast.RangeStmt:
+ loopLabel = labelFor(path[i:])
+
+ if stmtLabel == nil || loopLabel == stmtLabel {
+ loop = n
+ break Outer
+ }
+ }
+ }
+ if loop == nil {
+ return
+ }
+
+ // Add the for statement.
+ rng := posRange{
+ start: loop.Pos(),
+ end: loop.Pos() + token.Pos(len("for")),
+ }
+ result[rng] = struct{}{}
+
+ // Traverse AST to find branch statements within the same for-loop.
+ ast.Inspect(loop, func(n ast.Node) bool {
+ switch n.(type) {
+ case *ast.ForStmt, *ast.RangeStmt:
+ return loop == n
+ case *ast.SwitchStmt, *ast.SelectStmt:
+ return false
+ }
+ b, ok := n.(*ast.BranchStmt)
+ if !ok {
+ return true
+ }
+ if b.Label == nil || info.Uses[b.Label] == info.Defs[loopLabel] {
+ result[posRange{start: b.Pos(), end: b.End()}] = struct{}{}
+ }
+ return true
+ })
+
+ // Find continue statements in the same loop or switches/selects.
+ ast.Inspect(loop, func(n ast.Node) bool {
+ switch n.(type) {
+ case *ast.ForStmt, *ast.RangeStmt:
+ return loop == n
+ }
+
+ if n, ok := n.(*ast.BranchStmt); ok && n.Tok == token.CONTINUE {
+ result[posRange{start: n.Pos(), end: n.End()}] = struct{}{}
+ }
+ return true
+ })
+
+ // We don't need to check other for loops if we aren't looking for labeled statements.
+ if loopLabel == nil {
+ return
+ }
+
+ // Find labeled branch statements in any loop.
+ ast.Inspect(loop, func(n ast.Node) bool {
+ b, ok := n.(*ast.BranchStmt)
+ if !ok {
+ return true
+ }
+ // statement with labels that matches the loop
+ if b.Label != nil && info.Uses[b.Label] == info.Defs[loopLabel] {
+ result[posRange{start: b.Pos(), end: b.End()}] = struct{}{}
+ }
+ return true
+ })
+}
+
+func highlightSwitchFlow(path []ast.Node, info *types.Info, result map[posRange]struct{}) {
+ var switchNode ast.Node
+ var switchNodeLabel *ast.Ident
+ stmtLabel := labelFor(path)
+Outer:
+ // Reverse walk the path till we get to the switch statement.
+ for i := range path {
+ switch n := path[i].(type) {
+ case *ast.SwitchStmt:
+ switchNodeLabel = labelFor(path[i:])
+ if stmtLabel == nil || switchNodeLabel == stmtLabel {
+ switchNode = n
+ break Outer
+ }
+ }
+ }
+ // Cursor is not in a switch statement
+ if switchNode == nil {
+ return
+ }
+
+ // Add the switch statement.
+ rng := posRange{
+ start: switchNode.Pos(),
+ end: switchNode.Pos() + token.Pos(len("switch")),
+ }
+ result[rng] = struct{}{}
+
+ // Traverse AST to find break statements within the same switch.
+ ast.Inspect(switchNode, func(n ast.Node) bool {
+ switch n.(type) {
+ case *ast.SwitchStmt:
+ return switchNode == n
+ case *ast.ForStmt, *ast.RangeStmt, *ast.SelectStmt:
+ return false
+ }
+
+ b, ok := n.(*ast.BranchStmt)
+ if !ok || b.Tok != token.BREAK {
+ return true
+ }
+
+ if b.Label == nil || info.Uses[b.Label] == info.Defs[switchNodeLabel] {
+ result[posRange{start: b.Pos(), end: b.End()}] = struct{}{}
+ }
+ return true
+ })
+
+ // We don't need to check other switches if we aren't looking for labeled statements.
+ if switchNodeLabel == nil {
+ return
+ }
+
+ // Find labeled break statements in any switch
+ ast.Inspect(switchNode, func(n ast.Node) bool {
+ b, ok := n.(*ast.BranchStmt)
+ if !ok || b.Tok != token.BREAK {
+ return true
+ }
+
+ if b.Label != nil && info.Uses[b.Label] == info.Defs[switchNodeLabel] {
+ result[posRange{start: b.Pos(), end: b.End()}] = struct{}{}
+ }
+
+ return true
+ })
+}
+
+func highlightImportUses(path []ast.Node, info *types.Info, result map[posRange]struct{}) error {
+ basicLit, ok := path[0].(*ast.BasicLit)
+ if !ok {
+ return fmt.Errorf("highlightImportUses called with an ast.Node of type %T", basicLit)
+ }
+ ast.Inspect(path[len(path)-1], func(node ast.Node) bool {
+ if imp, ok := node.(*ast.ImportSpec); ok && imp.Path == basicLit {
+ result[posRange{start: node.Pos(), end: node.End()}] = struct{}{}
+ return false
+ }
+ n, ok := node.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ obj, ok := info.ObjectOf(n).(*types.PkgName)
+ if !ok {
+ return true
+ }
+ if !strings.Contains(basicLit.Value, obj.Name()) {
+ return true
+ }
+ result[posRange{start: n.Pos(), end: n.End()}] = struct{}{}
+ return false
+ })
+ return nil
+}
+
+func highlightIdentifier(id *ast.Ident, file *ast.File, info *types.Info, result map[posRange]struct{}) {
+ // TODO(rfindley): idObj may be nil. Note that returning early in this case
+ // causes tests to fail (because the nObj == idObj check below was succeeded
+ // for nil == nil!)
+ //
+ // Revisit this. If ObjectOf is nil, there are type errors, and it seems
+ // reasonable for identifier highlighting not to work.
+ idObj := info.ObjectOf(id)
+ pkgObj, isImported := idObj.(*types.PkgName)
+ ast.Inspect(file, func(node ast.Node) bool {
+ if imp, ok := node.(*ast.ImportSpec); ok && isImported {
+ highlightImport(pkgObj, imp, result)
+ }
+ n, ok := node.(*ast.Ident)
+ if !ok {
+ return true
+ }
+ if n.Name != id.Name {
+ return false
+ }
+ if nObj := info.ObjectOf(n); nObj == idObj {
+ result[posRange{start: n.Pos(), end: n.End()}] = struct{}{}
+ }
+ return false
+ })
+}
+
+func highlightImport(obj *types.PkgName, imp *ast.ImportSpec, result map[posRange]struct{}) {
+ if imp.Name != nil || imp.Path == nil {
+ return
+ }
+ if !strings.Contains(imp.Path.Value, obj.Name()) {
+ return
+ }
+ result[posRange{start: imp.Path.Pos(), end: imp.Path.End()}] = struct{}{}
+}
diff --git a/gopls/internal/lsp/source/hover.go b/gopls/internal/lsp/source/hover.go
new file mode 100644
index 000000000..136a3022b
--- /dev/null
+++ b/gopls/internal/lsp/source/hover.go
@@ -0,0 +1,951 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/doc"
+ "go/format"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "golang.org/x/text/unicode/runenames"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// HoverJSON contains information used by hover. It is also the JSON returned
+// for the "structured" hover format
+type HoverJSON struct {
+ // Synopsis is a single sentence synopsis of the symbol's documentation.
+ Synopsis string `json:"synopsis"`
+
+ // FullDocumentation is the symbol's full documentation.
+ FullDocumentation string `json:"fullDocumentation"`
+
+ // Signature is the symbol's signature.
+ Signature string `json:"signature"`
+
+ // SingleLine is a single line describing the symbol.
+ // This is recommended only for use in clients that show a single line for hover.
+ SingleLine string `json:"singleLine"`
+
+ // SymbolName is the human-readable name to use for the symbol in links.
+ SymbolName string `json:"symbolName"`
+
+ // LinkPath is the pkg.go.dev link for the given symbol.
+ // For example, the "go/ast" part of "pkg.go.dev/go/ast#Node".
+ LinkPath string `json:"linkPath"`
+
+ // LinkAnchor is the pkg.go.dev link anchor for the given symbol.
+ // For example, the "Node" part of "pkg.go.dev/go/ast#Node".
+ LinkAnchor string `json:"linkAnchor"`
+}
+
+// Hover implements the "textDocument/hover" RPC for Go files.
+func Hover(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.Hover, error) {
+ ctx, done := event.Start(ctx, "source.Hover")
+ defer done()
+
+ rng, h, err := hover(ctx, snapshot, fh, position)
+ if err != nil {
+ return nil, err
+ }
+ if h == nil {
+ return nil, nil
+ }
+ hover, err := formatHover(h, snapshot.View().Options())
+ if err != nil {
+ return nil, err
+ }
+ return &protocol.Hover{
+ Contents: protocol.MarkupContent{
+ Kind: snapshot.View().Options().PreferredContentFormat,
+ Value: hover,
+ },
+ Range: rng,
+ }, nil
+}
+
+// hover computes hover information at the given position. If we do not support
+// hovering at the position, it returns _, nil, nil: an error is only returned
+// if the position is valid but we fail to compute hover information.
+func hover(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position) (protocol.Range, *HoverJSON, error) {
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return protocol.Range{}, nil, err
+ }
+ pos, err := pgf.PositionPos(pp)
+ if err != nil {
+ return protocol.Range{}, nil, err
+ }
+
+ // Handle hovering over import paths, which do not have an associated
+ // identifier.
+ for _, spec := range pgf.File.Imports {
+ // We are inclusive of the end point here to allow hovering when the cursor
+ // is just after the import path.
+ if spec.Path.Pos() <= pos && pos <= spec.Path.End() {
+ return hoverImport(ctx, snapshot, pkg, pgf, spec)
+ }
+ }
+
+ // Handle hovering over the package name, which does not have an associated
+ // object.
+ // As with import paths, we allow hovering just after the package name.
+ if pgf.File.Name != nil && pgf.File.Name.Pos() <= pos && pos <= pgf.File.Name.Pos() {
+ return hoverPackageName(pkg, pgf)
+ }
+
+ // Handle hovering over (non-import-path) literals.
+ if path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos); len(path) > 0 {
+ if lit, _ := path[0].(*ast.BasicLit); lit != nil {
+ return hoverLit(pgf, lit, pos)
+ }
+ }
+
+ // The general case: compute hover information for the object referenced by
+ // the identifier at pos.
+ ident, obj, selectedType := referencedObject(pkg, pgf, pos)
+ if obj == nil || ident == nil {
+ return protocol.Range{}, nil, nil // no object to hover
+ }
+
+ rng, err := pgf.NodeRange(ident)
+ if err != nil {
+ return protocol.Range{}, nil, err
+ }
+
+ // By convention, we qualify hover information relative to the package
+ // from which the request originated.
+ qf := Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo())
+
+ // Handle type switch identifiers as a special case, since they don't have an
+ // object.
+ //
+ // There's not much useful information to provide.
+ if selectedType != nil {
+ fakeObj := types.NewVar(obj.Pos(), obj.Pkg(), obj.Name(), selectedType)
+ signature := objectString(fakeObj, qf, nil)
+ return rng, &HoverJSON{
+ Signature: signature,
+ SingleLine: signature,
+ SymbolName: fakeObj.Name(),
+ }, nil
+ }
+
+ // Handle builtins, which don't have a package or position.
+ if obj.Pkg() == nil {
+ h, err := hoverBuiltin(ctx, snapshot, obj)
+ return rng, h, err
+ }
+
+ // For all other objects, consider the full syntax of their declaration in
+ // order to correctly compute their documentation, signature, and link.
+ declPGF, declPos, err := parseFull(ctx, snapshot, pkg.FileSet(), obj.Pos())
+ if err != nil {
+ return protocol.Range{}, nil, fmt.Errorf("re-parsing declaration of %s: %v", obj.Name(), err)
+ }
+ decl, spec, field := findDeclInfo([]*ast.File{declPGF.File}, declPos)
+ comment := chooseDocComment(decl, spec, field)
+ docText := comment.Text()
+
+ // By default, types.ObjectString provides a reasonable signature.
+ signature := objectString(obj, qf, nil)
+ // TODO(rfindley): we could do much better for inferred signatures.
+ if inferred := inferredSignature(pkg.GetTypesInfo(), ident); inferred != nil {
+ signature = objectString(obj, qf, inferred)
+ }
+
+ // For "objects defined by a type spec", the signature produced by
+ // objectString is insufficient:
+ // (1) large structs are formatted poorly, with no newlines
+ // (2) we lose inline comments
+ //
+ // Furthermore, we include a summary of their method set.
+ //
+ // TODO(rfindley): this should use FormatVarType to get proper qualification
+ // of identifiers, and we should revisit the formatting of method set.
+ _, isTypeName := obj.(*types.TypeName)
+ _, isTypeParam := obj.Type().(*typeparams.TypeParam)
+ if isTypeName && !isTypeParam {
+ spec, ok := spec.(*ast.TypeSpec)
+ if !ok {
+ return protocol.Range{}, nil, bug.Errorf("type name %q without type spec", obj.Name())
+ }
+ spec2 := *spec
+ // Don't duplicate comments when formatting type specs.
+ spec2.Doc = nil
+ spec2.Comment = nil
+ var b strings.Builder
+ b.WriteString("type ")
+ fset := FileSetFor(declPGF.Tok)
+ if err := format.Node(&b, fset, &spec2); err != nil {
+ return protocol.Range{}, nil, err
+ }
+
+ // Display the declared methods accessible from the identifier.
+ //
+ // (The format.Node call above displays any struct fields, public
+ // or private, in syntactic form. We choose not to recursively
+ // enumerate any fields and methods promoted from them.)
+ if !types.IsInterface(obj.Type()) {
+ sep := "\n\n"
+ for _, m := range typeutil.IntuitiveMethodSet(obj.Type(), nil) {
+ // Show direct methods that are either exported, or defined in the
+ // current package.
+ if (m.Obj().Exported() || m.Obj().Pkg() == pkg.GetTypes()) && len(m.Index()) == 1 {
+ b.WriteString(sep)
+ sep = "\n"
+ b.WriteString(objectString(m.Obj(), qf, nil))
+ }
+ }
+ }
+ signature = b.String()
+ }
+
+ // Compute link data (on pkg.go.dev or other documentation host).
+ //
+ // If linkPath is empty, the symbol is not linkable.
+ var (
+ linkName string // => link title, always non-empty
+ linkPath string // => link path
+ anchor string // link anchor
+ linkMeta *Metadata // metadata for the linked package
+ )
+ {
+ linkMeta = findFileInDeps(snapshot, pkg.Metadata(), declPGF.URI)
+ if linkMeta == nil {
+ return protocol.Range{}, nil, bug.Errorf("no metadata for %s", declPGF.URI)
+ }
+
+ // For package names, we simply link to their imported package.
+ if pkgName, ok := obj.(*types.PkgName); ok {
+ linkName = pkgName.Name()
+ linkPath = pkgName.Imported().Path()
+ impID := linkMeta.DepsByPkgPath[PackagePath(pkgName.Imported().Path())]
+ linkMeta = snapshot.Metadata(impID)
+ if linkMeta == nil {
+ return protocol.Range{}, nil, bug.Errorf("no metadata for %s", declPGF.URI)
+ }
+ } else {
+ // For all others, check whether the object is in the package scope, or
+ // an exported field or method of an object in the package scope.
+ //
+ // We try to match pkgsite's heuristics for what is linkable, and what is
+ // not.
+ var recv types.Object
+ switch obj := obj.(type) {
+ case *types.Func:
+ sig := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ tname := typeToObject(sig.Recv().Type())
+ if tname != nil { // beware typed nil
+ recv = tname
+ }
+ }
+ case *types.Var:
+ if obj.IsField() {
+ if spec, ok := spec.(*ast.TypeSpec); ok {
+ typeName := spec.Name
+ scopeObj, _ := obj.Pkg().Scope().Lookup(typeName.Name).(*types.TypeName)
+ if scopeObj != nil {
+ if st, _ := scopeObj.Type().Underlying().(*types.Struct); st != nil {
+ for i := 0; i < st.NumFields(); i++ {
+ if obj == st.Field(i) {
+ recv = scopeObj
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Even if the object is not available in package documentation, it may
+ // be embedded in a documented receiver. Detect this by searching
+ // enclosing selector expressions.
+ //
+ // TODO(rfindley): pkgsite doesn't document fields from embedding, just
+ // methods.
+ if recv == nil || !recv.Exported() {
+ path := pathEnclosingObjNode(pgf.File, pos)
+ if enclosing := searchForEnclosing(pkg.GetTypesInfo(), path); enclosing != nil {
+ recv = enclosing
+ } else {
+ recv = nil // note: just recv = ... could result in a typed nil.
+ }
+ }
+
+ pkg := obj.Pkg()
+ if recv != nil {
+ linkName = fmt.Sprintf("(%s.%s).%s", pkg.Name(), recv.Name(), obj.Name())
+ if obj.Exported() && recv.Exported() && pkg.Scope().Lookup(recv.Name()) == recv {
+ linkPath = pkg.Path()
+ anchor = fmt.Sprintf("%s.%s", recv.Name(), obj.Name())
+ }
+ } else {
+ linkName = fmt.Sprintf("%s.%s", pkg.Name(), obj.Name())
+ if obj.Exported() && pkg.Scope().Lookup(obj.Name()) == obj {
+ linkPath = pkg.Path()
+ anchor = obj.Name()
+ }
+ }
+ }
+ }
+
+ if snapshot.View().IsGoPrivatePath(linkPath) || linkMeta.ForTest != "" {
+ linkPath = ""
+ } else if linkMeta.Module != nil && linkMeta.Module.Version != "" {
+ mod := linkMeta.Module
+ linkPath = strings.Replace(linkPath, mod.Path, mod.Path+"@"+mod.Version, 1)
+ }
+
+ return rng, &HoverJSON{
+ Synopsis: doc.Synopsis(docText),
+ FullDocumentation: docText,
+ SingleLine: objectString(obj, qf, nil),
+ SymbolName: linkName,
+ Signature: signature,
+ LinkPath: linkPath,
+ LinkAnchor: anchor,
+ }, nil
+}
+
+// hoverBuiltin computes hover information when hovering over a builtin
+// identifier.
+func hoverBuiltin(ctx context.Context, snapshot Snapshot, obj types.Object) (*HoverJSON, error) {
+ // TODO(rfindley): link to the correct version of Go documentation.
+ builtin, err := snapshot.BuiltinFile(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(rfindley): add a test for jump to definition of error.Error (which is
+ // probably failing, considering it lacks special handling).
+ if obj.Name() == "Error" {
+ signature := obj.String()
+ return &HoverJSON{
+ Signature: signature,
+ SingleLine: signature,
+ // TODO(rfindley): these are better than the current behavior.
+ // SymbolName: "(error).Error",
+ // LinkPath: "builtin",
+ // LinkAnchor: "error.Error",
+ }, nil
+ }
+
+ builtinObj := builtin.File.Scope.Lookup(obj.Name())
+ if builtinObj == nil {
+ // All builtins should have a declaration in the builtin file.
+ return nil, bug.Errorf("no builtin object for %s", obj.Name())
+ }
+ node, _ := builtinObj.Decl.(ast.Node)
+ if node == nil {
+ return nil, bug.Errorf("no declaration for %s", obj.Name())
+ }
+
+ var comment *ast.CommentGroup
+ path, _ := astutil.PathEnclosingInterval(builtin.File, node.Pos(), node.End())
+ for _, n := range path {
+ switch n := n.(type) {
+ case *ast.GenDecl:
+ // Separate documentation and signature.
+ comment = n.Doc
+ node2 := *n
+ node2.Doc = nil
+ node = &node2
+ case *ast.FuncDecl:
+ // Ditto.
+ comment = n.Doc
+ node2 := *n
+ node2.Doc = nil
+ node = &node2
+ }
+ }
+
+ signature := FormatNodeFile(builtin.Tok, node)
+ // Replace fake types with their common equivalent.
+ // TODO(rfindley): we should instead use obj.Type(), which would have the
+ // *actual* types of the builtin call.
+ signature = replacer.Replace(signature)
+
+ docText := comment.Text()
+ return &HoverJSON{
+ Synopsis: doc.Synopsis(docText),
+ FullDocumentation: docText,
+ Signature: signature,
+ SingleLine: obj.String(),
+ SymbolName: obj.Name(),
+ LinkPath: "builtin",
+ LinkAnchor: obj.Name(),
+ }, nil
+}
+
+// hoverImport computes hover information when hovering over the import path of
+// imp in the file pgf of pkg.
+//
+// If we do not have metadata for the hovered import, it returns _
+func hoverImport(ctx context.Context, snapshot Snapshot, pkg Package, pgf *ParsedGoFile, imp *ast.ImportSpec) (protocol.Range, *HoverJSON, error) {
+ rng, err := pgf.NodeRange(imp.Path)
+ if err != nil {
+ return protocol.Range{}, nil, err
+ }
+
+ importPath := UnquoteImportPath(imp)
+ if importPath == "" {
+ return protocol.Range{}, nil, fmt.Errorf("invalid import path")
+ }
+ impID := pkg.Metadata().DepsByImpPath[importPath]
+ if impID == "" {
+ return protocol.Range{}, nil, fmt.Errorf("no package data for import %q", importPath)
+ }
+ impMetadata := snapshot.Metadata(impID)
+ if impMetadata == nil {
+ return protocol.Range{}, nil, bug.Errorf("failed to resolve import ID %q", impID)
+ }
+
+ // Find the first file with a package doc comment.
+ var comment *ast.CommentGroup
+ for _, f := range impMetadata.CompiledGoFiles {
+ fh, err := snapshot.GetFile(ctx, f)
+ if err != nil {
+ if ctx.Err() != nil {
+ return protocol.Range{}, nil, ctx.Err()
+ }
+ continue
+ }
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseHeader)
+ if err != nil {
+ if ctx.Err() != nil {
+ return protocol.Range{}, nil, ctx.Err()
+ }
+ continue
+ }
+ if pgf.File.Doc != nil {
+ comment = pgf.File.Doc
+ break
+ }
+ }
+
+ docText := comment.Text()
+ return rng, &HoverJSON{
+ Synopsis: doc.Synopsis(docText),
+ FullDocumentation: docText,
+ }, nil
+}
+
+// hoverPackageName computes hover information for the package name of the file
+// pgf in pkg.
+func hoverPackageName(pkg Package, pgf *ParsedGoFile) (protocol.Range, *HoverJSON, error) {
+ var comment *ast.CommentGroup
+ for _, pgf := range pkg.CompiledGoFiles() {
+ if pgf.File.Doc != nil {
+ comment = pgf.File.Doc
+ break
+ }
+ }
+ rng, err := pgf.NodeRange(pgf.File.Name)
+ if err != nil {
+ return protocol.Range{}, nil, err
+ }
+ docText := comment.Text()
+ return rng, &HoverJSON{
+ Synopsis: doc.Synopsis(docText),
+ FullDocumentation: docText,
+ // Note: including a signature is redundant, since the cursor is already on the
+ // package name.
+ }, nil
+}
+
+// hoverLit computes hover information when hovering over the basic literal lit
+// in the file pgf. The provided pos must be the exact position of the cursor,
+// as it is used to extract the hovered rune in strings.
+//
+// For example, hovering over "\u2211" in "foo \u2211 bar" yields:
+//
+// '∑', U+2211, N-ARY SUMMATION
+func hoverLit(pgf *ParsedGoFile, lit *ast.BasicLit, pos token.Pos) (protocol.Range, *HoverJSON, error) {
+ var r rune
+ var start, end token.Pos
+ // Extract a rune from the current position.
+ // 'Ω', "...Ω...", or 0x03A9 => 'Ω', U+03A9, GREEK CAPITAL LETTER OMEGA
+ switch lit.Kind {
+ case token.CHAR:
+ s, err := strconv.Unquote(lit.Value)
+ if err != nil {
+ // If the conversion fails, it's because of an invalid syntax, therefore
+ // there is no rune to be found.
+ return protocol.Range{}, nil, nil
+ }
+ r, _ = utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError {
+ return protocol.Range{}, nil, fmt.Errorf("rune error")
+ }
+ start, end = lit.Pos(), lit.End()
+ case token.INT:
+ // TODO(rfindley): add support for hex/octal/binary->int conversion here.
+
+ // It's an integer, scan only if it is a hex literal whose bitsize in
+ // ranging from 8 to 32.
+ if !(strings.HasPrefix(lit.Value, "0x") && len(lit.Value[2:]) >= 2 && len(lit.Value[2:]) <= 8) {
+ return protocol.Range{}, nil, nil
+ }
+ v, err := strconv.ParseUint(lit.Value[2:], 16, 32)
+ if err != nil {
+ return protocol.Range{}, nil, fmt.Errorf("parsing int: %v", err)
+ }
+ r = rune(v)
+ if r == utf8.RuneError {
+ return protocol.Range{}, nil, fmt.Errorf("rune error")
+ }
+ start, end = lit.Pos(), lit.End()
+ case token.STRING:
+ // It's a string, scan only if it contains a unicode escape sequence under or before the
+ // current cursor position.
+ litOffset, err := safetoken.Offset(pgf.Tok, lit.Pos())
+ if err != nil {
+ return protocol.Range{}, nil, err
+ }
+ offset, err := safetoken.Offset(pgf.Tok, pos)
+ if err != nil {
+ return protocol.Range{}, nil, err
+ }
+ for i := offset - litOffset; i > 0; i-- {
+ // Start at the cursor position and search backward for the beginning of a rune escape sequence.
+ rr, _ := utf8.DecodeRuneInString(lit.Value[i:])
+ if rr == utf8.RuneError {
+ return protocol.Range{}, nil, fmt.Errorf("rune error")
+ }
+ if rr == '\\' {
+ // Got the beginning, decode it.
+ var tail string
+ r, _, tail, err = strconv.UnquoteChar(lit.Value[i:], '"')
+ if err != nil {
+ // If the conversion fails, it's because of an invalid syntax,
+ // therefore is no rune to be found.
+ return protocol.Range{}, nil, nil
+ }
+ // Only the rune escape sequence part of the string has to be highlighted, recompute the range.
+ runeLen := len(lit.Value) - (int(i) + len(tail))
+ start = token.Pos(int(lit.Pos()) + int(i))
+ end = token.Pos(int(start) + runeLen)
+ break
+ }
+ }
+ }
+ if r == 0 {
+ return protocol.Range{}, nil, nil
+ }
+ rng, err := pgf.PosRange(start, end)
+ if err != nil {
+ return protocol.Range{}, nil, err
+ }
+
+ var desc string
+ runeName := runenames.Name(r)
+ if len(runeName) > 0 && runeName[0] == '<' {
+ // Check if the rune looks like an HTML tag. If so, trim the surrounding <>
+ // characters to work around https://github.com/microsoft/vscode/issues/124042.
+ runeName = strings.TrimRight(runeName[1:], ">")
+ }
+ if strconv.IsPrint(r) {
+ desc = fmt.Sprintf("'%s', U+%04X, %s", string(r), uint32(r), runeName)
+ } else {
+ desc = fmt.Sprintf("U+%04X, %s", uint32(r), runeName)
+ }
+ return rng, &HoverJSON{
+ Synopsis: desc,
+ FullDocumentation: desc,
+ }, nil
+}
+
+// objectString is a wrapper around the types.ObjectString function.
+// It handles adding more information to the object string.
+//
+// TODO(rfindley): this function does too much. We should lift the special
+// handling to callsites.
+func objectString(obj types.Object, qf types.Qualifier, inferred *types.Signature) string {
+ // If the signature type was inferred, prefer the inferred signature with a
+ // comment showing the generic signature.
+ if sig, _ := obj.Type().(*types.Signature); sig != nil && typeparams.ForSignature(sig).Len() > 0 && inferred != nil {
+ obj2 := types.NewFunc(obj.Pos(), obj.Pkg(), obj.Name(), inferred)
+ str := types.ObjectString(obj2, qf)
+ // Try to avoid overly long lines.
+ if len(str) > 60 {
+ str += "\n"
+ } else {
+ str += " "
+ }
+ str += "// " + types.TypeString(sig, qf)
+ return str
+ }
+ str := types.ObjectString(obj, qf)
+ switch obj := obj.(type) {
+ case *types.Const:
+ str = fmt.Sprintf("%s = %s", str, obj.Val())
+
+ // Try to add a formatted duration as an inline comment
+ typ, ok := obj.Type().(*types.Named)
+ if !ok {
+ break
+ }
+ pkg := typ.Obj().Pkg()
+ if pkg.Path() == "time" && typ.Obj().Name() == "Duration" {
+ if d, ok := constant.Int64Val(obj.Val()); ok {
+ str += " // " + time.Duration(d).String()
+ }
+ }
+ }
+ return str
+}
+
+// HoverDocForObject returns the best doc comment for obj (for which
+// fset provides file/line information).
+//
+// TODO(rfindley): there appears to be zero(!) tests for this functionality.
+func HoverDocForObject(ctx context.Context, snapshot Snapshot, fset *token.FileSet, obj types.Object) (*ast.CommentGroup, error) {
+ if _, isTypeName := obj.(*types.TypeName); isTypeName {
+ if _, isTypeParam := obj.Type().(*typeparams.TypeParam); isTypeParam {
+ return nil, nil
+ }
+ }
+
+ pgf, pos, err := parseFull(ctx, snapshot, fset, obj.Pos())
+ if err != nil {
+ return nil, fmt.Errorf("re-parsing: %v", err)
+ }
+
+ decl, spec, field := findDeclInfo([]*ast.File{pgf.File}, pos)
+ return chooseDocComment(decl, spec, field), nil
+}
+
+func chooseDocComment(decl ast.Decl, spec ast.Spec, field *ast.Field) *ast.CommentGroup {
+ if field != nil {
+ if field.Doc != nil {
+ return field.Doc
+ }
+ if field.Comment != nil {
+ return field.Comment
+ }
+ return nil
+ }
+ switch decl := decl.(type) {
+ case *ast.FuncDecl:
+ return decl.Doc
+ case *ast.GenDecl:
+ switch spec := spec.(type) {
+ case *ast.ValueSpec:
+ if spec.Doc != nil {
+ return spec.Doc
+ }
+ if decl.Doc != nil {
+ return decl.Doc
+ }
+ return spec.Comment
+ case *ast.TypeSpec:
+ if spec.Doc != nil {
+ return spec.Doc
+ }
+ if decl.Doc != nil {
+ return decl.Doc
+ }
+ return spec.Comment
+ }
+ }
+ return nil
+}
+
+// parseFull fully parses the file corresponding to position pos (for
+// which fset provides file/line information).
+//
+// It returns the resulting ParsedGoFile as well as new pos contained in the
+// parsed file.
+func parseFull(ctx context.Context, snapshot Snapshot, fset *token.FileSet, pos token.Pos) (*ParsedGoFile, token.Pos, error) {
+ f := fset.File(pos)
+ if f == nil {
+ return nil, 0, bug.Errorf("internal error: no file for position %d", pos)
+ }
+
+ uri := span.URIFromPath(f.Name())
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ offset, err := safetoken.Offset(f, pos)
+ if err != nil {
+ return nil, 0, bug.Errorf("offset out of bounds in %q", uri)
+ }
+
+ fullPos, err := safetoken.Pos(pgf.Tok, offset)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return pgf, fullPos, nil
+}
+
+// extractFieldList recursively tries to extract a field list.
+// If it is not found, nil is returned.
+func extractFieldList(specType ast.Expr) *ast.FieldList {
+ switch t := specType.(type) {
+ case *ast.StructType:
+ return t.Fields
+ case *ast.InterfaceType:
+ return t.Methods
+ case *ast.ArrayType:
+ return extractFieldList(t.Elt)
+ case *ast.MapType:
+ // Map value has a greater chance to be a struct
+ if fields := extractFieldList(t.Value); fields != nil {
+ return fields
+ }
+ return extractFieldList(t.Key)
+ case *ast.ChanType:
+ return extractFieldList(t.Value)
+ }
+ return nil
+}
+
+func formatHover(h *HoverJSON, options *Options) (string, error) {
+ signature := formatSignature(h, options)
+
+ switch options.HoverKind {
+ case SingleLine:
+ return h.SingleLine, nil
+ case NoDocumentation:
+ return signature, nil
+ case Structured:
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+ }
+
+ link := formatLink(h, options)
+ doc := formatDoc(h, options)
+
+ var b strings.Builder
+ parts := []string{signature, doc, link}
+ for i, el := range parts {
+ if el != "" {
+ b.WriteString(el)
+
+ // If any elements of the remainder of the list are non-empty,
+ // write an extra newline.
+ if anyNonEmpty(parts[i+1:]) {
+ if options.PreferredContentFormat == protocol.Markdown {
+ b.WriteString("\n\n")
+ } else {
+ b.WriteRune('\n')
+ }
+ }
+ }
+ }
+ return b.String(), nil
+}
+
+func formatSignature(h *HoverJSON, options *Options) string {
+ signature := h.Signature
+ if signature != "" && options.PreferredContentFormat == protocol.Markdown {
+ signature = fmt.Sprintf("```go\n%s\n```", signature)
+ }
+ return signature
+}
+
+func formatLink(h *HoverJSON, options *Options) string {
+ if !options.LinksInHover || options.LinkTarget == "" || h.LinkPath == "" {
+ return ""
+ }
+ plainLink := BuildLink(options.LinkTarget, h.LinkPath, h.LinkAnchor)
+ switch options.PreferredContentFormat {
+ case protocol.Markdown:
+ return fmt.Sprintf("[`%s` on %s](%s)", h.SymbolName, options.LinkTarget, plainLink)
+ case protocol.PlainText:
+ return ""
+ default:
+ return plainLink
+ }
+}
+
+// BuildLink constructs a URL with the given target, path, and anchor.
+func BuildLink(target, path, anchor string) string {
+ link := fmt.Sprintf("https://%s/%s", target, path)
+ if anchor == "" {
+ return link
+ }
+ return link + "#" + anchor
+}
+
+func formatDoc(h *HoverJSON, options *Options) string {
+ var doc string
+ switch options.HoverKind {
+ case SynopsisDocumentation:
+ doc = h.Synopsis
+ case FullDocumentation:
+ doc = h.FullDocumentation
+ }
+ if options.PreferredContentFormat == protocol.Markdown {
+ return CommentToMarkdown(doc, options)
+ }
+ return doc
+}
+
+func anyNonEmpty(x []string) bool {
+ for _, el := range x {
+ if el != "" {
+ return true
+ }
+ }
+ return false
+}
+
+// findDeclInfo returns the syntax nodes involved in the declaration of the
+// types.Object with position pos, searching the given list of file syntax
+// trees.
+//
+// Pos may be the position of the name-defining identifier in a FuncDecl,
+// ValueSpec, TypeSpec, Field, or as a special case the position of
+// Ellipsis.Elt in an ellipsis field.
+//
+// If found, the resulting decl, spec, and field will be the inner-most
+// instance of each node type surrounding pos.
+//
+// If field is non-nil, pos is the position of a field Var. If field is nil and
+// spec is non-nil, pos is the position of a Var, Const, or TypeName object. If
+// both field and spec are nil and decl is non-nil, pos is the position of a
+// Func object.
+//
+// It returns a nil decl if no object-defining node is found at pos.
+//
+// TODO(rfindley): this function has tricky semantics, and may be worth unit
+// testing and/or refactoring.
+func findDeclInfo(files []*ast.File, pos token.Pos) (decl ast.Decl, spec ast.Spec, field *ast.Field) {
+ // panic(found{}) breaks off the traversal and
+ // causes the function to return normally.
+ type found struct{}
+ defer func() {
+ switch x := recover().(type) {
+ case nil:
+ case found:
+ default:
+ panic(x)
+ }
+ }()
+
+ // Visit the files in search of the node at pos.
+ stack := make([]ast.Node, 0, 20)
+ // Allocate the closure once, outside the loop.
+ f := func(n ast.Node) bool {
+ if n != nil {
+ stack = append(stack, n) // push
+ } else {
+ stack = stack[:len(stack)-1] // pop
+ return false
+ }
+
+ // Skip subtrees (incl. files) that don't contain the search point.
+ if !(n.Pos() <= pos && pos < n.End()) {
+ return false
+ }
+
+ switch n := n.(type) {
+ case *ast.Field:
+ findEnclosingDeclAndSpec := func() {
+ for i := len(stack) - 1; i >= 0; i-- {
+ switch n := stack[i].(type) {
+ case ast.Spec:
+ spec = n
+ case ast.Decl:
+ decl = n
+ return
+ }
+ }
+ }
+
+ // Check each field name since you can have
+ // multiple names for the same type expression.
+ for _, id := range n.Names {
+ if id.Pos() == pos {
+ field = n
+ findEnclosingDeclAndSpec()
+ panic(found{})
+ }
+ }
+
+ // Check *ast.Field itself. This handles embedded
+ // fields which have no associated *ast.Ident name.
+ if n.Pos() == pos {
+ field = n
+ findEnclosingDeclAndSpec()
+ panic(found{})
+ }
+
+ // Also check "X" in "...X". This makes it easy to format variadic
+ // signature params properly.
+ //
+ // TODO(rfindley): I don't understand this comment. How does finding the
+ // field in this case make it easier to format variadic signature params?
+ if ell, ok := n.Type.(*ast.Ellipsis); ok && ell.Elt != nil && ell.Elt.Pos() == pos {
+ field = n
+ findEnclosingDeclAndSpec()
+ panic(found{})
+ }
+
+ case *ast.FuncDecl:
+ if n.Name.Pos() == pos {
+ decl = n
+ panic(found{})
+ }
+
+ case *ast.GenDecl:
+ for _, s := range n.Specs {
+ switch s := s.(type) {
+ case *ast.TypeSpec:
+ if s.Name.Pos() == pos {
+ decl = n
+ spec = s
+ panic(found{})
+ }
+ case *ast.ValueSpec:
+ for _, id := range s.Names {
+ if id.Pos() == pos {
+ decl = n
+ spec = s
+ panic(found{})
+ }
+ }
+ }
+ }
+ }
+ return true
+ }
+ for _, file := range files {
+ ast.Inspect(file, f)
+ }
+
+ return nil, nil, nil
+}
diff --git a/gopls/internal/lsp/source/identifier.go b/gopls/internal/lsp/source/identifier.go
new file mode 100644
index 000000000..15fe13a94
--- /dev/null
+++ b/gopls/internal/lsp/source/identifier.go
@@ -0,0 +1,174 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "errors"
+ "go/ast"
+ "go/types"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// ErrNoIdentFound is error returned when no identifier is found at a particular position
+var ErrNoIdentFound = errors.New("no identifier found")
+
+// inferredSignature determines the resolved non-generic signature for an
+// identifier in an instantiation expression.
+//
+// If no such signature exists, it returns nil.
+func inferredSignature(info *types.Info, id *ast.Ident) *types.Signature {
+ inst := typeparams.GetInstances(info)[id]
+ sig, _ := inst.Type.(*types.Signature)
+ return sig
+}
+
+func searchForEnclosing(info *types.Info, path []ast.Node) *types.TypeName {
+ for _, n := range path {
+ switch n := n.(type) {
+ case *ast.SelectorExpr:
+ if sel, ok := info.Selections[n]; ok {
+ recv := Deref(sel.Recv())
+
+ // Keep track of the last exported type seen.
+ var exported *types.TypeName
+ if named, ok := recv.(*types.Named); ok && named.Obj().Exported() {
+ exported = named.Obj()
+ }
+ // We don't want the last element, as that's the field or
+ // method itself.
+ for _, index := range sel.Index()[:len(sel.Index())-1] {
+ if r, ok := recv.Underlying().(*types.Struct); ok {
+ recv = Deref(r.Field(index).Type())
+ if named, ok := recv.(*types.Named); ok && named.Obj().Exported() {
+ exported = named.Obj()
+ }
+ }
+ }
+ return exported
+ }
+ }
+ }
+ return nil
+}
+
+// typeToObject returns the relevant type name for the given type, after
+// unwrapping pointers, arrays, slices, channels, and function signatures with
+// a single non-error result.
+func typeToObject(typ types.Type) *types.TypeName {
+ switch typ := typ.(type) {
+ case *types.Named:
+ // TODO(rfindley): this should use typeparams.NamedTypeOrigin.
+ return typ.Obj()
+ case *types.Pointer:
+ return typeToObject(typ.Elem())
+ case *types.Array:
+ return typeToObject(typ.Elem())
+ case *types.Slice:
+ return typeToObject(typ.Elem())
+ case *types.Chan:
+ return typeToObject(typ.Elem())
+ case *types.Signature:
+ // Try to find a return value of a named type. If there's only one
+ // such value, jump to its type definition.
+ var res *types.TypeName
+
+ results := typ.Results()
+ for i := 0; i < results.Len(); i++ {
+ obj := typeToObject(results.At(i).Type())
+ if obj == nil || hasErrorType(obj) {
+ // Skip builtins.
+ continue
+ }
+ if res != nil {
+ // The function/method must have only one return value of a named type.
+ return nil
+ }
+
+ res = obj
+ }
+ return res
+ default:
+ return nil
+ }
+}
+
+func hasErrorType(obj types.Object) bool {
+ return types.IsInterface(obj.Type()) && obj.Pkg() == nil && obj.Name() == "error"
+}
+
+// typeSwitchImplicits returns all the implicit type switch objects that
+// correspond to the leaf *ast.Ident. It also returns the original type
+// associated with the identifier (outside of a case clause).
+func typeSwitchImplicits(info *types.Info, path []ast.Node) ([]types.Object, types.Type) {
+ ident, _ := path[0].(*ast.Ident)
+ if ident == nil {
+ return nil, nil
+ }
+
+ var (
+ ts *ast.TypeSwitchStmt
+ assign *ast.AssignStmt
+ cc *ast.CaseClause
+ obj = info.ObjectOf(ident)
+ )
+
+ // Walk our ancestors to determine if our leaf ident refers to a
+ // type switch variable, e.g. the "a" from "switch a := b.(type)".
+Outer:
+ for i := 1; i < len(path); i++ {
+ switch n := path[i].(type) {
+ case *ast.AssignStmt:
+ // Check if ident is the "a" in "a := foo.(type)". The "a" in
+ // this case has no types.Object, so check for ident equality.
+ if len(n.Lhs) == 1 && n.Lhs[0] == ident {
+ assign = n
+ }
+ case *ast.CaseClause:
+ // Check if ident is a use of "a" within a case clause. Each
+ // case clause implicitly maps "a" to a different types.Object,
+ // so check if ident's object is the case clause's implicit
+ // object.
+ if obj != nil && info.Implicits[n] == obj {
+ cc = n
+ }
+ case *ast.TypeSwitchStmt:
+ // Look for the type switch that owns our previously found
+ // *ast.AssignStmt or *ast.CaseClause.
+ if n.Assign == assign {
+ ts = n
+ break Outer
+ }
+
+ for _, stmt := range n.Body.List {
+ if stmt == cc {
+ ts = n
+ break Outer
+ }
+ }
+ }
+ }
+ if ts == nil {
+ return nil, nil
+ }
+ // Our leaf ident refers to a type switch variable. Fan out to the
+ // type switch's implicit case clause objects.
+ var objs []types.Object
+ for _, cc := range ts.Body.List {
+ if ccObj := info.Implicits[cc]; ccObj != nil {
+ objs = append(objs, ccObj)
+ }
+ }
+ // The right-hand side of a type switch should only have one
+ // element, and we need to track its type in order to generate
+ // hover information for implicit type switch variables.
+ var typ types.Type
+ if assign, ok := ts.Assign.(*ast.AssignStmt); ok && len(assign.Rhs) == 1 {
+ if rhs := assign.Rhs[0].(*ast.TypeAssertExpr); ok {
+ typ = info.TypeOf(rhs.X)
+ }
+ }
+ return objs, typ
+}
diff --git a/gopls/internal/lsp/source/identifier_test.go b/gopls/internal/lsp/source/identifier_test.go
new file mode 100644
index 000000000..7756fe402
--- /dev/null
+++ b/gopls/internal/lsp/source/identifier_test.go
@@ -0,0 +1,103 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+)
+
+func TestSearchForEnclosing(t *testing.T) {
+ tests := []struct {
+ desc string
+ // For convenience, consider the first occurrence of the identifier "X" in
+ // src.
+ src string
+ // By convention, "" means no type found.
+ wantTypeName string
+ }{
+ {
+ // TODO(rFindley): is this correct, or do we want to resolve I2 here?
+ desc: "embedded interface in interface",
+ src: `package a; var y = i1.X; type i1 interface {I2}; type I2 interface{X()}`,
+ wantTypeName: "",
+ },
+ {
+ desc: "embedded interface in struct",
+ src: `package a; var y = t.X; type t struct {I}; type I interface{X()}`,
+ wantTypeName: "I",
+ },
+ {
+ desc: "double embedding",
+ src: `package a; var y = t1.X; type t1 struct {t2}; type t2 struct {I}; type I interface{X()}`,
+ wantTypeName: "I",
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.desc, func(t *testing.T) {
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "a.go", test.src, parser.AllErrors)
+ if err != nil {
+ t.Fatal(err)
+ }
+ column := 1 + bytes.IndexRune([]byte(test.src), 'X')
+ pos := posAt(1, column, fset, "a.go")
+ path := pathEnclosingObjNode(file, pos)
+ if path == nil {
+ t.Fatalf("no ident found at (1, %d)", column)
+ }
+ info := newInfo()
+ if _, err = (*types.Config)(nil).Check("p", fset, []*ast.File{file}, info); err != nil {
+ t.Fatal(err)
+ }
+ obj := searchForEnclosing(info, path)
+ if obj == nil {
+ if test.wantTypeName != "" {
+ t.Errorf("searchForEnclosing(...) = <nil>, want %q", test.wantTypeName)
+ }
+ return
+ }
+ if got := obj.Name(); got != test.wantTypeName {
+ t.Errorf("searchForEnclosing(...) = %q, want %q", got, test.wantTypeName)
+ }
+ })
+ }
+}
+
+// posAt returns the token.Pos corresponding to the 1-based (line, column)
+// coordinates in the file fname of fset.
+func posAt(line, column int, fset *token.FileSet, fname string) token.Pos {
+ var tok *token.File
+ fset.Iterate(func(tf *token.File) bool {
+ if tf.Name() == fname {
+ tok = tf
+ return false
+ }
+ return true
+ })
+ if tok == nil {
+ return token.NoPos
+ }
+ start := tok.LineStart(line)
+ return start + token.Pos(column-1)
+}
+
+// newInfo returns a types.Info with all maps populated.
+func newInfo() *types.Info {
+ return &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ Scopes: make(map[ast.Node]*types.Scope),
+ }
+}
diff --git a/gopls/internal/lsp/source/implementation.go b/gopls/internal/lsp/source/implementation.go
new file mode 100644
index 000000000..72ec90d28
--- /dev/null
+++ b/gopls/internal/lsp/source/implementation.go
@@ -0,0 +1,482 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source/methodsets"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+)
+
+// This file defines the new implementation of the 'implementation'
+// operator that does not require type-checker data structures for an
+// unbounded number of packages.
+//
+// TODO(adonovan):
+// - Audit to ensure robustness in face of type errors.
+// - Support 'error' and 'error.Error', which were also lacking from the old implementation.
+// - Eliminate false positives due to 'tricky' cases of the global algorithm.
+// - Ensure we have test coverage of:
+// type aliases
+// nil, PkgName, Builtin (all errors)
+// any (empty result)
+// method of unnamed interface type (e.g. var x interface { f() })
+// (the global algorithm may find implementations of this type
+// but will not include it in the index.)
+
+// Implementation returns a new sorted array of locations of
+// declarations of types that implement (or are implemented by) the
+// type referred to at the given position.
+//
+// If the position denotes a method, the computation is applied to its
+// receiver type and then its corresponding methods are returned.
+func Implementation(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) ([]protocol.Location, error) {
+ ctx, done := event.Start(ctx, "source.Implementation")
+ defer done()
+
+ locs, err := implementations2(ctx, snapshot, f, pp)
+ if err != nil {
+ return nil, err
+ }
+
+ // Sort and de-duplicate locations.
+ sort.Slice(locs, func(i, j int) bool {
+ return protocol.CompareLocation(locs[i], locs[j]) < 0
+ })
+ out := locs[:0]
+ for _, loc := range locs {
+ if len(out) == 0 || out[len(out)-1] != loc {
+ out = append(out, loc)
+ }
+ }
+ locs = out
+
+ return locs, nil
+}
+
+func implementations2(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position) ([]protocol.Location, error) {
+
+ // Type-check the query package, find the query identifier,
+ // and locate the type or method declaration it refers to.
+ declPosn, err := typeDeclPosition(ctx, snapshot, fh.URI(), pp)
+ if err != nil {
+ return nil, err
+ }
+
+ // Type-check the declaring package (incl. variants) for use
+ // by the "local" search, which uses type information to
+ // enumerate all types within the package that satisfy the
+ // query type, even those defined local to a function.
+ declURI := span.URIFromPath(declPosn.Filename)
+ declMetas, err := snapshot.MetadataForFile(ctx, declURI)
+ if err != nil {
+ return nil, err
+ }
+ if len(declMetas) == 0 {
+ return nil, fmt.Errorf("no packages for file %s", declURI)
+ }
+ ids := make([]PackageID, len(declMetas))
+ for i, m := range declMetas {
+ ids[i] = m.ID
+ }
+ localPkgs, err := snapshot.TypeCheck(ctx, ids...)
+ if err != nil {
+ return nil, err
+ }
+ // The narrowest package will do, since the local search is based
+ // on position and the global search is based on fingerprint.
+ // (Neither is based on object identity.)
+ declPkg := localPkgs[0]
+ declFile, err := declPkg.File(declURI)
+ if err != nil {
+ return nil, err // "can't happen"
+ }
+
+ // Find declaration of corresponding object
+ // in this package based on (URI, offset).
+ pos, err := safetoken.Pos(declFile.Tok, declPosn.Offset)
+ if err != nil {
+ return nil, err
+ }
+ // TODO(adonovan): simplify: use objectsAt?
+ path := pathEnclosingObjNode(declFile.File, pos)
+ if path == nil {
+ return nil, ErrNoIdentFound // checked earlier
+ }
+ id, ok := path[0].(*ast.Ident)
+ if !ok {
+ return nil, ErrNoIdentFound // checked earlier
+ }
+ obj := declPkg.GetTypesInfo().ObjectOf(id) // may be nil
+
+ // Is the selected identifier a type name or method?
+ // (For methods, report the corresponding method names.)
+ var queryType types.Type
+ var queryMethodID string
+ switch obj := obj.(type) {
+ case *types.TypeName:
+ queryType = obj.Type()
+ case *types.Func:
+ // For methods, use the receiver type, which may be anonymous.
+ if recv := obj.Type().(*types.Signature).Recv(); recv != nil {
+ queryType = recv.Type()
+ queryMethodID = obj.Id()
+ }
+ }
+ if queryType == nil {
+ return nil, fmt.Errorf("%s is not a type or method", id.Name)
+ }
+
+ // Compute the method-set fingerprint used as a key to the global search.
+ key, hasMethods := methodsets.KeyOf(queryType)
+ if !hasMethods {
+ // A type with no methods yields an empty result.
+ // (No point reporting that every type satisfies 'any'.)
+ return nil, nil
+ }
+
+ // The global search needs to look at every package in the workspace;
+ // see package ./methodsets.
+ //
+ // For now we do all the type checking before beginning the search.
+ // TODO(adonovan): opt: search in parallel topological order
+ // so that we can overlap index lookup with typechecking.
+ // I suspect a number of algorithms on the result of TypeCheck could
+ // be optimized by being applied as soon as each package is available.
+ globalMetas, err := snapshot.AllMetadata(ctx)
+ if err != nil {
+ return nil, err
+ }
+ globalIDs := make([]PackageID, 0, len(globalMetas))
+ for _, m := range globalMetas {
+ if m.PkgPath == declPkg.Metadata().PkgPath {
+ continue // declaring package is handled by local implementation
+ }
+ globalIDs = append(globalIDs, m.ID)
+ }
+ indexes, err := snapshot.MethodSets(ctx, globalIDs...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Search local and global packages in parallel.
+ var (
+ group errgroup.Group
+ locsMu sync.Mutex
+ locs []protocol.Location
+ )
+ // local search
+ for _, localPkg := range localPkgs {
+ localPkg := localPkg
+ group.Go(func() error {
+ localLocs, err := localImplementations(ctx, snapshot, localPkg, queryType, queryMethodID)
+ if err != nil {
+ return err
+ }
+ locsMu.Lock()
+ locs = append(locs, localLocs...)
+ locsMu.Unlock()
+ return nil
+ })
+ }
+ // global search
+ for _, index := range indexes {
+ index := index
+ group.Go(func() error {
+ for _, res := range index.Search(key, queryMethodID) {
+ loc := res.Location
+ // Map offsets to protocol.Locations in parallel (may involve I/O).
+ group.Go(func() error {
+ ploc, err := offsetToLocation(ctx, snapshot, loc.Filename, loc.Start, loc.End)
+ if err != nil {
+ return err
+ }
+ locsMu.Lock()
+ locs = append(locs, ploc)
+ locsMu.Unlock()
+ return nil
+ })
+ }
+ return nil
+ })
+ }
+ if err := group.Wait(); err != nil {
+ return nil, err
+ }
+
+ return locs, nil
+}
+
+// offsetToLocation converts an offset-based position to a protocol.Location,
+// which requires reading the file.
+func offsetToLocation(ctx context.Context, snapshot Snapshot, filename string, start, end int) (protocol.Location, error) {
+ uri := span.URIFromPath(filename)
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return protocol.Location{}, err // cancelled, perhaps
+ }
+ content, err := fh.Read()
+ if err != nil {
+ return protocol.Location{}, err // nonexistent or deleted ("can't happen")
+ }
+ m := protocol.NewMapper(uri, content)
+ return m.OffsetLocation(start, end)
+}
+
+// typeDeclPosition returns the position of the declaration of the
+// type (or one of its methods) referred to at (uri, ppos).
+func typeDeclPosition(ctx context.Context, snapshot Snapshot, uri span.URI, ppos protocol.Position) (token.Position, error) {
+ var noPosn token.Position
+
+ pkg, pgf, err := PackageForFile(ctx, snapshot, uri, WidestPackage)
+ if err != nil {
+ return noPosn, err
+ }
+ pos, err := pgf.PositionPos(ppos)
+ if err != nil {
+ return noPosn, err
+ }
+
+ // This function inherits the limitation of its predecessor in
+ // requiring the selection to be an identifier (of a type or
+ // method). But there's no fundamental reason why one could
+ // not pose this query about any selected piece of syntax that
+ // has a type and thus a method set.
+ // (If LSP was more thorough about passing text selections as
+ // intervals to queries, you could ask about the method set of a
+ // subexpression such as x.f().)
+
+ // TODO(adonovan): simplify: use objectsAt?
+ path := pathEnclosingObjNode(pgf.File, pos)
+ if path == nil {
+ return noPosn, ErrNoIdentFound
+ }
+ id, ok := path[0].(*ast.Ident)
+ if !ok {
+ return noPosn, ErrNoIdentFound
+ }
+
+ // Is the object a type or method? Reject other kinds.
+ obj := pkg.GetTypesInfo().Uses[id]
+ if obj == nil {
+ // Check uses first (unlike ObjectOf) so that T in
+ // struct{T} is treated as a reference to a type,
+ // not a declaration of a field.
+ obj = pkg.GetTypesInfo().Defs[id]
+ }
+ switch obj := obj.(type) {
+ case *types.TypeName:
+ // ok
+ case *types.Func:
+ if obj.Type().(*types.Signature).Recv() == nil {
+ return noPosn, fmt.Errorf("%s is a function, not a method", id.Name)
+ }
+ case nil:
+ return noPosn, fmt.Errorf("%s denotes unknown object", id.Name)
+ default:
+ // e.g. *types.Var -> "var".
+ kind := strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types."))
+ return noPosn, fmt.Errorf("%s is a %s, not a type", id.Name, kind)
+ }
+
+ declPosn := safetoken.StartPosition(pkg.FileSet(), obj.Pos())
+ return declPosn, nil
+}
+
+// localImplementations searches within pkg for declarations of all
+// types that are assignable to/from the query type, and returns a new
+// unordered array of their locations.
+//
+// If methodID is non-empty, the function instead returns the location
+// of each type's method (if any) of that ID.
+//
+// ("Local" refers to the search within the same package, but this
+// function's results may include type declarations that are local to
+// a function body. The global search index excludes such types
+// because reliably naming such types is hard.)
+func localImplementations(ctx context.Context, snapshot Snapshot, pkg Package, queryType types.Type, methodID string) ([]protocol.Location, error) {
+ queryType = methodsets.EnsurePointer(queryType)
+
+ // Scan through all type declarations in the syntax.
+ var locs []protocol.Location
+ var methodLocs []methodsets.Location
+ for _, pgf := range pkg.CompiledGoFiles() {
+ ast.Inspect(pgf.File, func(n ast.Node) bool {
+ spec, ok := n.(*ast.TypeSpec)
+ if !ok {
+ return true // not a type declaration
+ }
+ def := pkg.GetTypesInfo().Defs[spec.Name]
+ if def == nil {
+ return true // "can't happen" for types
+ }
+ if def.(*types.TypeName).IsAlias() {
+ return true // skip type aliases to avoid duplicate reporting
+ }
+ candidateType := methodsets.EnsurePointer(def.Type())
+
+ // The historical behavior enshrined by this
+ // function rejects cases where both are
+ // (nontrivial) interface types?
+ // That seems like useful information.
+ // TODO(adonovan): UX: report I/I pairs too?
+ // The same question appears in the global algorithm (methodsets).
+ if !concreteImplementsIntf(candidateType, queryType) {
+ return true // not assignable
+ }
+
+ // Ignore types with empty method sets.
+ // (No point reporting that every type satisfies 'any'.)
+ mset := types.NewMethodSet(candidateType)
+ if mset.Len() == 0 {
+ return true
+ }
+
+ if methodID == "" {
+ // Found matching type.
+ locs = append(locs, mustLocation(pgf, spec.Name))
+ return true
+ }
+
+ // Find corresponding method.
+ //
+ // We can't use LookupFieldOrMethod because it requires
+ // the methodID's types.Package, which we don't know.
+ // We could recursively search pkg.Imports for it,
+ // but it's easier to walk the method set.
+ for i := 0; i < mset.Len(); i++ {
+ method := mset.At(i).Obj()
+ if method.Id() == methodID {
+ posn := safetoken.StartPosition(pkg.FileSet(), method.Pos())
+ methodLocs = append(methodLocs, methodsets.Location{
+ Filename: posn.Filename,
+ Start: posn.Offset,
+ End: posn.Offset + len(method.Name()),
+ })
+ break
+ }
+ }
+ return true
+ })
+ }
+
+ // Finally convert method positions to protocol form by reading the files.
+ for _, mloc := range methodLocs {
+ loc, err := offsetToLocation(ctx, snapshot, mloc.Filename, mloc.Start, mloc.End)
+ if err != nil {
+ return nil, err
+ }
+ locs = append(locs, loc)
+ }
+
+ return locs, nil
+}
+
+// concreteImplementsIntf returns true if a is an interface type implemented by
+// concrete type b, or vice versa.
+func concreteImplementsIntf(a, b types.Type) bool {
+ aIsIntf, bIsIntf := types.IsInterface(a), types.IsInterface(b)
+
+ // Make sure exactly one is an interface type.
+ if aIsIntf == bIsIntf {
+ return false
+ }
+
+ // Rearrange if needed so "a" is the concrete type.
+ if aIsIntf {
+ a, b = b, a
+ }
+
+ // TODO(adonovan): this should really use GenericAssignableTo
+ // to report (e.g.) "ArrayList[T] implements List[T]", but
+ // GenericAssignableTo doesn't work correctly on pointers to
+ // generic named types. Thus the legacy implementation and the
+ // "local" part of implementation2 fail to report generics.
+ // The global algorithm based on subsets does the right thing.
+ return types.AssignableTo(a, b)
+}
+
+var (
+ // TODO(adonovan): why do various RPC handlers related to
+ // IncomingCalls return (nil, nil) on the protocol in response
+ // to this error? That seems like a violation of the protocol.
+ // Is it perhaps a workaround for VSCode behavior?
+ errNoObjectFound = errors.New("no object found")
+)
+
+// pathEnclosingObjNode returns the AST path to the object-defining
+// node associated with pos. "Object-defining" means either an
+// *ast.Ident mapped directly to a types.Object or an ast.Node mapped
+// implicitly to a types.Object.
+func pathEnclosingObjNode(f *ast.File, pos token.Pos) []ast.Node {
+ var (
+ path []ast.Node
+ found bool
+ )
+
+ ast.Inspect(f, func(n ast.Node) bool {
+ if found {
+ return false
+ }
+
+ if n == nil {
+ path = path[:len(path)-1]
+ return false
+ }
+
+ path = append(path, n)
+
+ switch n := n.(type) {
+ case *ast.Ident:
+ // Include the position directly after identifier. This handles
+ // the common case where the cursor is right after the
+ // identifier the user is currently typing. Previously we
+ // handled this by calling astutil.PathEnclosingInterval twice,
+ // once for "pos" and once for "pos-1".
+ found = n.Pos() <= pos && pos <= n.End()
+ case *ast.ImportSpec:
+ if n.Path.Pos() <= pos && pos < n.Path.End() {
+ found = true
+ // If import spec has a name, add name to path even though
+ // position isn't in the name.
+ if n.Name != nil {
+ path = append(path, n.Name)
+ }
+ }
+ case *ast.StarExpr:
+ // Follow star expressions to the inner identifier.
+ if pos == n.Star {
+ pos = n.X.Pos()
+ }
+ }
+
+ return !found
+ })
+
+ if len(path) == 0 {
+ return nil
+ }
+
+ // Reverse path so leaf is first element.
+ for i := 0; i < len(path)/2; i++ {
+ path[i], path[len(path)-1-i] = path[len(path)-1-i], path[i]
+ }
+
+ return path
+}
diff --git a/gopls/internal/lsp/source/inlay_hint.go b/gopls/internal/lsp/source/inlay_hint.go
new file mode 100644
index 000000000..671d405dc
--- /dev/null
+++ b/gopls/internal/lsp/source/inlay_hint.go
@@ -0,0 +1,394 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+const (
+ maxLabelLength = 28
+)
+
+type InlayHintFunc func(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint
+
+type Hint struct {
+ Name string
+ Doc string
+ Run InlayHintFunc
+}
+
+const (
+ ParameterNames = "parameterNames"
+ AssignVariableTypes = "assignVariableTypes"
+ ConstantValues = "constantValues"
+ RangeVariableTypes = "rangeVariableTypes"
+ CompositeLiteralTypes = "compositeLiteralTypes"
+ CompositeLiteralFieldNames = "compositeLiteralFields"
+ FunctionTypeParameters = "functionTypeParameters"
+)
+
+var AllInlayHints = map[string]*Hint{
+ AssignVariableTypes: {
+ Name: AssignVariableTypes,
+ Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```",
+ Run: assignVariableTypes,
+ },
+ ParameterNames: {
+ Name: ParameterNames,
+ Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```",
+ Run: parameterNames,
+ },
+ ConstantValues: {
+ Name: ConstantValues,
+ Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```",
+ Run: constantValues,
+ },
+ RangeVariableTypes: {
+ Name: RangeVariableTypes,
+ Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```",
+ Run: rangeVariableTypes,
+ },
+ CompositeLiteralTypes: {
+ Name: CompositeLiteralTypes,
+ Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```",
+ Run: compositeLiteralTypes,
+ },
+ CompositeLiteralFieldNames: {
+ Name: CompositeLiteralFieldNames,
+ Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```",
+ Run: compositeLiteralFields,
+ },
+ FunctionTypeParameters: {
+ Name: FunctionTypeParameters,
+ Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```",
+ Run: funcTypeParams,
+ },
+}
+
+func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) ([]protocol.InlayHint, error) {
+ ctx, done := event.Start(ctx, "source.InlayHint")
+ defer done()
+
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, fmt.Errorf("getting file for InlayHint: %w", err)
+ }
+
+ // Collect a list of the inlay hints that are enabled.
+ inlayHintOptions := snapshot.View().Options().InlayHintOptions
+ var enabledHints []InlayHintFunc
+ for hint, enabled := range inlayHintOptions.Hints {
+ if !enabled {
+ continue
+ }
+ if h, ok := AllInlayHints[hint]; ok {
+ enabledHints = append(enabledHints, h.Run)
+ }
+ }
+ if len(enabledHints) == 0 {
+ return nil, nil
+ }
+
+ info := pkg.GetTypesInfo()
+ q := Qualifier(pgf.File, pkg.GetTypes(), info)
+
+ // Set the range to the full file if the range is not valid.
+ start, end := pgf.File.Pos(), pgf.File.End()
+ if pRng.Start.Line < pRng.End.Line || pRng.Start.Character < pRng.End.Character {
+ // Adjust start and end for the specified range.
+ var err error
+ start, end, err = pgf.RangePos(pRng)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var hints []protocol.InlayHint
+ ast.Inspect(pgf.File, func(node ast.Node) bool {
+ // If not in range, we can stop looking.
+ if node == nil || node.End() < start || node.Pos() > end {
+ return false
+ }
+ for _, fn := range enabledHints {
+ hints = append(hints, fn(node, pgf.Mapper, pgf.Tok, info, &q)...)
+ }
+ return true
+ })
+ return hints, nil
+}
+
+func parameterNames(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, _ *types.Qualifier) []protocol.InlayHint {
+ callExpr, ok := node.(*ast.CallExpr)
+ if !ok {
+ return nil
+ }
+ signature, ok := info.TypeOf(callExpr.Fun).(*types.Signature)
+ if !ok {
+ return nil
+ }
+
+ var hints []protocol.InlayHint
+ for i, v := range callExpr.Args {
+ start, err := m.PosPosition(tf, v.Pos())
+ if err != nil {
+ continue
+ }
+ params := signature.Params()
+ // When a function has variadic params, we skip args after
+ // params.Len().
+ if i > params.Len()-1 {
+ break
+ }
+ param := params.At(i)
+ // param.Name is empty for built-ins like append
+ if param.Name() == "" {
+ continue
+ }
+ // Skip the parameter name hint if the arg matches the
+ // the parameter name.
+ if i, ok := v.(*ast.Ident); ok && i.Name == param.Name() {
+ continue
+ }
+
+ label := param.Name()
+ if signature.Variadic() && i == params.Len()-1 {
+ label = label + "..."
+ }
+ hints = append(hints, protocol.InlayHint{
+ Position: start,
+ Label: buildLabel(label + ":"),
+ Kind: protocol.Parameter,
+ PaddingRight: true,
+ })
+ }
+ return hints
+}
+
+func funcTypeParams(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, _ *types.Qualifier) []protocol.InlayHint {
+ ce, ok := node.(*ast.CallExpr)
+ if !ok {
+ return nil
+ }
+ id, ok := ce.Fun.(*ast.Ident)
+ if !ok {
+ return nil
+ }
+ inst := typeparams.GetInstances(info)[id]
+ if inst.TypeArgs == nil {
+ return nil
+ }
+ start, err := m.PosPosition(tf, id.End())
+ if err != nil {
+ return nil
+ }
+ var args []string
+ for i := 0; i < inst.TypeArgs.Len(); i++ {
+ args = append(args, inst.TypeArgs.At(i).String())
+ }
+ if len(args) == 0 {
+ return nil
+ }
+ return []protocol.InlayHint{{
+ Position: start,
+ Label: buildLabel("[" + strings.Join(args, ", ") + "]"),
+ Kind: protocol.Type,
+ }}
+}
+
+func assignVariableTypes(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint {
+ stmt, ok := node.(*ast.AssignStmt)
+ if !ok || stmt.Tok != token.DEFINE {
+ return nil
+ }
+
+ var hints []protocol.InlayHint
+ for _, v := range stmt.Lhs {
+ if h := variableType(v, m, tf, info, q); h != nil {
+ hints = append(hints, *h)
+ }
+ }
+ return hints
+}
+
+func rangeVariableTypes(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint {
+ rStmt, ok := node.(*ast.RangeStmt)
+ if !ok {
+ return nil
+ }
+ var hints []protocol.InlayHint
+ if h := variableType(rStmt.Key, m, tf, info, q); h != nil {
+ hints = append(hints, *h)
+ }
+ if h := variableType(rStmt.Value, m, tf, info, q); h != nil {
+ hints = append(hints, *h)
+ }
+ return hints
+}
+
+func variableType(e ast.Expr, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) *protocol.InlayHint {
+ typ := info.TypeOf(e)
+ if typ == nil {
+ return nil
+ }
+ end, err := m.PosPosition(tf, e.End())
+ if err != nil {
+ return nil
+ }
+ return &protocol.InlayHint{
+ Position: end,
+ Label: buildLabel(types.TypeString(typ, *q)),
+ Kind: protocol.Type,
+ PaddingLeft: true,
+ }
+}
+
+func constantValues(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, _ *types.Qualifier) []protocol.InlayHint {
+ genDecl, ok := node.(*ast.GenDecl)
+ if !ok || genDecl.Tok != token.CONST {
+ return nil
+ }
+
+ var hints []protocol.InlayHint
+ for _, v := range genDecl.Specs {
+ spec, ok := v.(*ast.ValueSpec)
+ if !ok {
+ continue
+ }
+ end, err := m.PosPosition(tf, v.End())
+ if err != nil {
+ continue
+ }
+ // Show hints when values are missing or at least one value is not
+ // a basic literal.
+ showHints := len(spec.Values) == 0
+ checkValues := len(spec.Names) == len(spec.Values)
+ var values []string
+ for i, w := range spec.Names {
+ obj, ok := info.ObjectOf(w).(*types.Const)
+ if !ok || obj.Val().Kind() == constant.Unknown {
+ return nil
+ }
+ if checkValues {
+ switch spec.Values[i].(type) {
+ case *ast.BadExpr:
+ return nil
+ case *ast.BasicLit:
+ default:
+ if obj.Val().Kind() != constant.Bool {
+ showHints = true
+ }
+ }
+ }
+ values = append(values, fmt.Sprintf("%v", obj.Val()))
+ }
+ if !showHints || len(values) == 0 {
+ continue
+ }
+ hints = append(hints, protocol.InlayHint{
+ Position: end,
+ Label: buildLabel("= " + strings.Join(values, ", ")),
+ PaddingLeft: true,
+ })
+ }
+ return hints
+}
+
+func compositeLiteralFields(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint {
+ compLit, ok := node.(*ast.CompositeLit)
+ if !ok {
+ return nil
+ }
+ typ := info.TypeOf(compLit)
+ if typ == nil {
+ return nil
+ }
+ if t, ok := typ.(*types.Pointer); ok {
+ typ = t.Elem()
+ }
+ strct, ok := typ.Underlying().(*types.Struct)
+ if !ok {
+ return nil
+ }
+
+ var hints []protocol.InlayHint
+ var allEdits []protocol.TextEdit
+ for i, v := range compLit.Elts {
+ if _, ok := v.(*ast.KeyValueExpr); !ok {
+ start, err := m.PosPosition(tf, v.Pos())
+ if err != nil {
+ continue
+ }
+ if i > strct.NumFields()-1 {
+ break
+ }
+ hints = append(hints, protocol.InlayHint{
+ Position: start,
+ Label: buildLabel(strct.Field(i).Name() + ":"),
+ Kind: protocol.Parameter,
+ PaddingRight: true,
+ })
+ allEdits = append(allEdits, protocol.TextEdit{
+ Range: protocol.Range{Start: start, End: start},
+ NewText: strct.Field(i).Name() + ": ",
+ })
+ }
+ }
+ // It is not allowed to have a mix of keyed and unkeyed fields, so
+ // have the text edits add keys to all fields.
+ for i := range hints {
+ hints[i].TextEdits = allEdits
+ }
+ return hints
+}
+
+func compositeLiteralTypes(node ast.Node, m *protocol.Mapper, tf *token.File, info *types.Info, q *types.Qualifier) []protocol.InlayHint {
+ compLit, ok := node.(*ast.CompositeLit)
+ if !ok {
+ return nil
+ }
+ typ := info.TypeOf(compLit)
+ if typ == nil {
+ return nil
+ }
+ if compLit.Type != nil {
+ return nil
+ }
+ prefix := ""
+ if t, ok := typ.(*types.Pointer); ok {
+ typ = t.Elem()
+ prefix = "&"
+ }
+ // The type for this composite literal is implicit, add an inlay hint.
+ start, err := m.PosPosition(tf, compLit.Lbrace)
+ if err != nil {
+ return nil
+ }
+ return []protocol.InlayHint{{
+ Position: start,
+ Label: buildLabel(fmt.Sprintf("%s%s", prefix, types.TypeString(typ, *q))),
+ Kind: protocol.Type,
+ }}
+}
+
+func buildLabel(s string) []protocol.InlayHintLabelPart {
+ label := protocol.InlayHintLabelPart{
+ Value: s,
+ }
+ if len(s) > maxLabelLength+len("...") {
+ label.Value = s[:maxLabelLength] + "..."
+ }
+ return []protocol.InlayHintLabelPart{label}
+}
diff --git a/gopls/internal/lsp/source/known_packages.go b/gopls/internal/lsp/source/known_packages.go
new file mode 100644
index 000000000..07b4c30a8
--- /dev/null
+++ b/gopls/internal/lsp/source/known_packages.go
@@ -0,0 +1,140 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/imports"
+)
+
+// KnownPackagePaths returns a new list of package paths of all known
+// packages in the package graph that could potentially be imported by
+// the given file. The list is ordered lexicographically, except that
+// all dot-free paths (standard packages) appear before dotful ones.
+//
+// It is part of the gopls.list_known_packages command.
+func KnownPackagePaths(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]PackagePath, error) {
+ // This algorithm is expressed in terms of Metadata, not Packages,
+ // so it doesn't cause or wait for type checking.
+
+ // Find a Metadata containing the file.
+ metas, err := snapshot.MetadataForFile(ctx, fh.URI())
+ if err != nil {
+ return nil, err // e.g. context cancelled
+ }
+ if len(metas) == 0 {
+ return nil, fmt.Errorf("no loaded package contain file %s", fh.URI())
+ }
+ current := metas[0] // pick one arbitrarily (they should all have the same package path)
+
+ // Parse the file's imports so we can compute which
+ // PackagePaths are imported by this specific file.
+ src, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ file, err := parser.ParseFile(token.NewFileSet(), fh.URI().Filename(), src, parser.ImportsOnly)
+ if err != nil {
+ return nil, err
+ }
+ imported := make(map[PackagePath]bool)
+ for _, imp := range file.Imports {
+ if id := current.DepsByImpPath[UnquoteImportPath(imp)]; id != "" {
+ if m := snapshot.Metadata(id); m != nil {
+ imported[m.PkgPath] = true
+ }
+ }
+ }
+
+ // Now find candidates among known packages.
+ knownPkgs, err := snapshot.AllMetadata(ctx)
+ if err != nil {
+ return nil, err
+ }
+ seen := make(map[PackagePath]bool)
+ for _, knownPkg := range knownPkgs {
+ // package main cannot be imported
+ if knownPkg.Name == "main" {
+ continue
+ }
+ // test packages cannot be imported
+ if knownPkg.ForTest != "" {
+ continue
+ }
+ // No need to import what the file already imports.
+ // This check is based on PackagePath, not PackageID,
+ // so that all test variants are filtered out too.
+ if imported[knownPkg.PkgPath] {
+ continue
+ }
+ // make sure internal packages are importable by the file
+ if !IsValidImport(current.PkgPath, knownPkg.PkgPath) {
+ continue
+ }
+ // naive check on cyclical imports
+ if isDirectlyCyclical(current, knownPkg) {
+ continue
+ }
+ // AllMetadata may have multiple variants of a pkg.
+ seen[knownPkg.PkgPath] = true
+ }
+
+ // Augment the set by invoking the goimports algorithm.
+ if err := snapshot.RunProcessEnvFunc(ctx, func(o *imports.Options) error {
+ ctx, cancel := context.WithTimeout(ctx, time.Millisecond*80)
+ defer cancel()
+ var seenMu sync.Mutex
+ wrapped := func(ifix imports.ImportFix) {
+ seenMu.Lock()
+ defer seenMu.Unlock()
+ // TODO(adonovan): what if the actual package path has a vendor/ prefix?
+ seen[PackagePath(ifix.StmtInfo.ImportPath)] = true
+ }
+ return imports.GetAllCandidates(ctx, wrapped, "", fh.URI().Filename(), string(current.Name), o.Env)
+ }); err != nil {
+ // If goimports failed, proceed with just the candidates from the metadata.
+ event.Error(ctx, "imports.GetAllCandidates", err)
+ }
+
+ // Sort lexicographically, but with std before non-std packages.
+ paths := make([]PackagePath, 0, len(seen))
+ for path := range seen {
+ paths = append(paths, path)
+ }
+ sort.Slice(paths, func(i, j int) bool {
+ importI, importJ := paths[i], paths[j]
+ iHasDot := strings.Contains(string(importI), ".")
+ jHasDot := strings.Contains(string(importJ), ".")
+ if iHasDot != jHasDot {
+ return jHasDot // dot-free paths (standard packages) compare less
+ }
+ return importI < importJ
+ })
+
+ return paths, nil
+}
+
+// isDirectlyCyclical checks if imported directly imports pkg.
+// It does not (yet) offer a full cyclical check because showing a user
+// a list of importable packages already generates a very large list
+// and having a few false positives in there could be worth the
+// performance snappiness.
+//
+// TODO(adonovan): ensure that metadata graph is always cyclic!
+// Many algorithms will get confused or even stuck in the
+// presence of cycles. Then replace this function by 'false'.
+func isDirectlyCyclical(pkg, imported *Metadata) bool {
+ _, ok := imported.DepsByPkgPath[pkg.PkgPath]
+ return ok
+}
diff --git a/gopls/internal/lsp/source/linkname.go b/gopls/internal/lsp/source/linkname.go
new file mode 100644
index 000000000..c8afcdf2d
--- /dev/null
+++ b/gopls/internal/lsp/source/linkname.go
@@ -0,0 +1,136 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "go/token"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// ErrNoLinkname is returned by LinknameDefinition when no linkname
+// directive is found at a particular position.
+// As such it indicates that other definitions could be worth checking.
+var ErrNoLinkname = errors.New("no linkname directive found")
+
+// LinknameDefinition finds the definition of the linkname directive in fh at pos.
+// If there is no linkname directive at pos, returns ErrNoLinkname.
+func LinknameDefinition(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.Location, error) {
+ pkgPath, name := parseLinkname(ctx, snapshot, fh, pos)
+ if pkgPath == "" {
+ return nil, ErrNoLinkname
+ }
+ return findLinkname(ctx, snapshot, fh, pos, PackagePath(pkgPath), name)
+}
+
+// parseLinkname attempts to parse a go:linkname declaration at the given pos.
+// If successful, it returns the package path and object name referenced by the second
+// argument of the linkname directive.
+//
+// If the position is not in the second argument of a go:linkname directive, or parsing fails, it returns "", "".
+func parseLinkname(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) (pkgPath, name string) {
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return "", ""
+ }
+
+ span, err := pgf.Mapper.PositionPoint(pos)
+ if err != nil {
+ return "", ""
+ }
+ atLine := span.Line()
+ atColumn := span.Column()
+
+ // Looking for pkgpath in '//go:linkname f pkgpath.g'.
+ // (We ignore 1-arg linkname directives.)
+ directive, column := findLinknameOnLine(pgf, atLine)
+ parts := strings.Fields(directive)
+ if len(parts) != 3 {
+ return "", ""
+ }
+
+ // Inside 2nd arg [start, end]?
+ end := column + len(directive)
+ start := end - len(parts[2])
+ if !(start <= atColumn && atColumn <= end) {
+ return "", ""
+ }
+ linkname := parts[2]
+
+ // Split the pkg path from the name.
+ dot := strings.LastIndexByte(linkname, '.')
+ if dot < 0 {
+ return "", ""
+ }
+ return linkname[:dot], linkname[dot+1:]
+}
+
+// findLinknameOnLine returns the first linkname directive on line and the column it starts at.
+// Returns "", 0 if no linkname directive is found on the line.
+func findLinknameOnLine(pgf *ParsedGoFile, line int) (string, int) {
+ for _, grp := range pgf.File.Comments {
+ for _, com := range grp.List {
+ if strings.HasPrefix(com.Text, "//go:linkname") {
+ p := safetoken.Position(pgf.Tok, com.Pos())
+ if p.Line == line {
+ return com.Text, p.Column
+ }
+ }
+ }
+ }
+ return "", 0
+}
+
+// findLinkname searches dependencies of packages containing fh for an object
+// with linker name matching the given package path and name.
+func findLinkname(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position, pkgPath PackagePath, name string) ([]protocol.Location, error) {
+ // Typically the linkname refers to a forward dependency
+ // or a reverse dependency, but in general it may refer
+ // to any package in the workspace.
+ var pkgMeta *Metadata
+ metas, err := snapshot.AllMetadata(ctx)
+ if err != nil {
+ return nil, err
+ }
+ metas = RemoveIntermediateTestVariants(metas)
+ for _, meta := range metas {
+ if meta.PkgPath == pkgPath {
+ pkgMeta = meta
+ break
+ }
+ }
+ if pkgMeta == nil {
+ return nil, fmt.Errorf("cannot find package %q", pkgPath)
+ }
+
+ // When found, type check the desired package (snapshot.TypeCheck in TypecheckFull mode),
+ pkgs, err := snapshot.TypeCheck(ctx, pkgMeta.ID)
+ if err != nil {
+ return nil, err
+ }
+ pkg := pkgs[0]
+
+ obj := pkg.GetTypes().Scope().Lookup(name)
+ if obj == nil {
+ return nil, fmt.Errorf("package %q does not define %s", pkgPath, name)
+ }
+
+ objURI := safetoken.StartPosition(pkg.FileSet(), obj.Pos())
+ pgf, err := pkg.File(span.URIFromPath(objURI.Filename))
+ if err != nil {
+ return nil, err
+ }
+ loc, err := pgf.PosLocation(obj.Pos(), obj.Pos()+token.Pos(len(name)))
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.Location{loc}, nil
+}
diff --git a/gopls/internal/lsp/source/methodsets/methodsets.go b/gopls/internal/lsp/source/methodsets/methodsets.go
new file mode 100644
index 000000000..af836a5a4
--- /dev/null
+++ b/gopls/internal/lsp/source/methodsets/methodsets.go
@@ -0,0 +1,508 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package methodsets defines an incremental, serializable index of
+// method-set information that allows efficient 'implements' queries
+// across packages of the workspace without using the type checker.
+//
+// This package provides only the "global" (all workspace) search; the
+// "local" search within a given package uses a different
+// implementation based on type-checker data structures for a single
+// package plus variants; see ../implementation2.go.
+// The local algorithm is more precise as it tests function-local types too.
+//
+// A global index of function-local types is challenging since they
+// may reference other local types, for which we would need to invent
+// stable names, an unsolved problem described in passing in Go issue
+// 57497. The global algorithm also does not index anonymous interface
+// types, even outside function bodies.
+//
+// Consequently, global results are not symmetric: applying the
+// operation twice may not get you back where you started.
+package methodsets
+
+// DESIGN
+//
+// See https://go.dev/cl/452060 for a minimal exposition of the algorithm.
+//
+// For each method, we compute a fingerprint: a string representing
+// the method name and type such that equal fingerprint strings mean
+// identical method types.
+//
+// For efficiency, the fingerprint is reduced to a single bit
+// of a uint64, so that the method set can be represented as
+// the union of those method bits (a uint64 bitmask).
+// Assignability thus reduces to a subset check on bitmasks
+// followed by equality checks on fingerprints.
+//
+// In earlier experiments, using 128-bit masks instead of 64 reduced
+// the number of candidates by about 2x. Using (like a Bloom filter) a
+// different hash function to compute a second 64-bit mask and
+// performing a second mask test reduced it by about 4x.
+// Neither had much effect on the running time, presumably because a
+// single 64-bit mask is quite effective. See CL 452060 for details.
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "go/token"
+ "go/types"
+ "hash/crc32"
+ "log"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// An Index records the non-empty method sets of all package-level
+// types in a package in a form that permits assignability queries
+// without the type checker.
+type Index struct {
+ pkg gobPackage
+}
+
+// Decode decodes the given gob-encoded data as an Index.
+func Decode(data []byte) *Index {
+ var pkg gobPackage
+ mustDecode(data, &pkg)
+ return &Index{pkg}
+}
+
+// Encode encodes the receiver as gob-encoded data.
+func (index *Index) Encode() []byte {
+ return mustEncode(index.pkg)
+}
+
+func mustEncode(x interface{}) []byte {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(x); err != nil {
+ log.Fatalf("internal error encoding %T: %v", x, err)
+ }
+ return buf.Bytes()
+}
+
+func mustDecode(data []byte, ptr interface{}) {
+ if err := gob.NewDecoder(bytes.NewReader(data)).Decode(ptr); err != nil {
+ log.Fatalf("internal error decoding %T: %v", ptr, err)
+ }
+}
+
+// NewIndex returns a new index of method-set information for all
+// package-level types in the specified package.
+func NewIndex(fset *token.FileSet, pkg *types.Package) *Index {
+ return new(indexBuilder).build(fset, pkg)
+}
+
+// A Location records the extent of an identifier in byte-offset form.
+//
+// Conversion to protocol (UTF-16) form is done by the caller after a
+// search, not during index construction.
+type Location struct {
+ Filename string
+ Start, End int // byte offsets
+}
+
+// A Key represents the method set of a given type in a form suitable
+// to pass to the (*Index).Search method of many different Indexes.
+type Key struct {
+ mset gobMethodSet // note: lacks position information
+}
+
+// KeyOf returns the search key for the method sets of a given type.
+// It returns false if the type has no methods.
+func KeyOf(t types.Type) (Key, bool) {
+ mset := methodSetInfo(t, nil)
+ if mset.Mask == 0 {
+ return Key{}, false // no methods
+ }
+ return Key{mset}, true
+}
+
+// A Result reports a matching type or method in a method-set search.
+type Result struct {
+ Location Location // location of the type or method
+
+ // methods only:
+ PkgPath string // path of declaring package (may differ due to embedding)
+ ObjectPath objectpath.Path // path of method within declaring package
+}
+
+// Search reports each type that implements (or is implemented by) the
+// type that produced the search key. If methodID is nonempty, only
+// that method of each type is reported.
+//
+// The result does not include the error.Error method.
+// TODO(adonovan): give this special case a more systematic treatment.
+func (index *Index) Search(key Key, methodID string) []Result {
+ var results []Result
+ for _, candidate := range index.pkg.MethodSets {
+ // Traditionally this feature doesn't report
+ // interface/interface elements of the relation.
+ // I think that's a mistake.
+ // TODO(adonovan): UX: change it, here and in the local implementation.
+ if candidate.IsInterface && key.mset.IsInterface {
+ continue
+ }
+ if !satisfies(candidate, key.mset) && !satisfies(key.mset, candidate) {
+ continue
+ }
+
+ if candidate.Tricky {
+ // If any interface method is tricky then extra
+ // checking may be needed to eliminate a false positive.
+ // TODO(adonovan): implement it.
+ }
+
+ if methodID == "" {
+ results = append(results, Result{Location: index.location(candidate.Posn)})
+ } else {
+ for _, m := range candidate.Methods {
+ // Here we exploit knowledge of the shape of the fingerprint string.
+ if strings.HasPrefix(m.Fingerprint, methodID) &&
+ m.Fingerprint[len(methodID)] == '(' {
+
+ // Don't report error.Error among the results:
+ // it has no true source location, no package,
+ // and is excluded from the xrefs index.
+ if m.PkgPath == 0 || m.ObjectPath == 0 {
+ if methodID != "Error" {
+ panic("missing info for" + methodID)
+ }
+ continue
+ }
+
+ results = append(results, Result{
+ Location: index.location(m.Posn),
+ PkgPath: index.pkg.Strings[m.PkgPath],
+ ObjectPath: objectpath.Path(index.pkg.Strings[m.ObjectPath]),
+ })
+ break
+ }
+ }
+ }
+ }
+ return results
+}
+
+// satisfies does a fast check for whether x satisfies y.
+func satisfies(x, y gobMethodSet) bool {
+ return y.IsInterface && x.Mask&y.Mask == y.Mask && subset(y, x)
+}
+
+// subset reports whether method set x is a subset of y.
+func subset(x, y gobMethodSet) bool {
+outer:
+ for _, mx := range x.Methods {
+ for _, my := range y.Methods {
+ if mx.Sum == my.Sum && mx.Fingerprint == my.Fingerprint {
+ continue outer // found; try next x method
+ }
+ }
+ return false // method of x not found in y
+ }
+ return true // all methods of x found in y
+}
+
+func (index *Index) location(posn gobPosition) Location {
+ return Location{
+ Filename: index.pkg.Strings[posn.File],
+ Start: posn.Offset,
+ End: posn.Offset + posn.Len,
+ }
+}
+
+// An indexBuilder builds an index for a single package.
+type indexBuilder struct {
+ gobPackage
+ stringIndex map[string]int
+}
+
+// build adds to the index all package-level named types of the specified package.
+func (b *indexBuilder) build(fset *token.FileSet, pkg *types.Package) *Index {
+ _ = b.string("") // 0 => ""
+
+ objectPos := func(obj types.Object) gobPosition {
+ posn := safetoken.StartPosition(fset, obj.Pos())
+ return gobPosition{b.string(posn.Filename), posn.Offset, len(obj.Name())}
+ }
+
+ objectpathFor := typesinternal.NewObjectpathFunc()
+
+ // setindexInfo sets the (Posn, PkgPath, ObjectPath) fields for each method declaration.
+ setIndexInfo := func(m *gobMethod, method *types.Func) {
+ // error.Error has empty Position, PkgPath, and ObjectPath.
+ if method.Pkg() == nil {
+ return
+ }
+
+ m.Posn = objectPos(method)
+ m.PkgPath = b.string(method.Pkg().Path())
+
+ // Instantiations of generic methods don't have an
+ // object path, so we use the generic.
+ if p, err := objectpathFor(typeparams.OriginMethod(method)); err != nil {
+ panic(err) // can't happen for a method of a package-level type
+ } else {
+ m.ObjectPath = b.string(string(p))
+ }
+ }
+
+ // We ignore aliases, though in principle they could define a
+ // struct{...} or interface{...} type, or an instantiation of
+ // a generic, that has a novel method set.
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if tname, ok := scope.Lookup(name).(*types.TypeName); ok && !tname.IsAlias() {
+ if mset := methodSetInfo(tname.Type(), setIndexInfo); mset.Mask != 0 {
+ mset.Posn = objectPos(tname)
+ // Only record types with non-trivial method sets.
+ b.MethodSets = append(b.MethodSets, mset)
+ }
+ }
+ }
+
+ return &Index{pkg: b.gobPackage}
+}
+
+// string returns a small integer that encodes the string.
+func (b *indexBuilder) string(s string) int {
+ i, ok := b.stringIndex[s]
+ if !ok {
+ i = len(b.Strings)
+ if b.stringIndex == nil {
+ b.stringIndex = make(map[string]int)
+ }
+ b.stringIndex[s] = i
+ b.Strings = append(b.Strings, s)
+ }
+ return i
+}
+
+// methodSetInfo returns the method-set fingerprint of a type.
+// It calls the optional setIndexInfo function for each gobMethod.
+// This is used during index construction, but not search (KeyOf),
+// to store extra information.
+func methodSetInfo(t types.Type, setIndexInfo func(*gobMethod, *types.Func)) gobMethodSet {
+ // For non-interface types, use *T
+ // (if T is not already a pointer)
+ // since it may have more methods.
+ mset := types.NewMethodSet(EnsurePointer(t))
+
+ // Convert the method set into a compact summary.
+ var mask uint64
+ tricky := false
+ methods := make([]gobMethod, mset.Len())
+ for i := 0; i < mset.Len(); i++ {
+ m := mset.At(i).Obj().(*types.Func)
+ fp, isTricky := fingerprint(m)
+ if isTricky {
+ tricky = true
+ }
+ sum := crc32.ChecksumIEEE([]byte(fp))
+ methods[i] = gobMethod{Fingerprint: fp, Sum: sum}
+ if setIndexInfo != nil {
+ setIndexInfo(&methods[i], m) // set Position, PkgPath, ObjectPath
+ }
+ mask |= 1 << uint64(((sum>>24)^(sum>>16)^(sum>>8)^sum)&0x3f)
+ }
+ return gobMethodSet{
+ IsInterface: types.IsInterface(t),
+ Tricky: tricky,
+ Mask: mask,
+ Methods: methods,
+ }
+}
+
+// EnsurePointer wraps T in a types.Pointer if T is a named, non-interface type.
+// This is useful to make sure you consider a named type's full method set.
+func EnsurePointer(T types.Type) types.Type {
+ if _, ok := T.(*types.Named); ok && !types.IsInterface(T) {
+ return types.NewPointer(T)
+ }
+
+ return T
+}
+
+// fingerprint returns an encoding of a method signature such that two
+// methods with equal encodings have identical types, except for a few
+// tricky types whose encodings may spuriously match and whose exact
+// identity computation requires the type checker to eliminate false
+// positives (which are rare). The boolean result indicates whether
+// the result was one of these tricky types.
+//
+// In the standard library, 99.8% of package-level types have a
+// non-tricky method-set. The most common exceptions are due to type
+// parameters.
+//
+// The fingerprint string starts with method.Id() + "(".
+func fingerprint(method *types.Func) (string, bool) {
+ var buf strings.Builder
+ tricky := false
+ var fprint func(t types.Type)
+ fprint = func(t types.Type) {
+ switch t := t.(type) {
+ case *types.Named:
+ tname := t.Obj()
+ if tname.Pkg() != nil {
+ buf.WriteString(strconv.Quote(tname.Pkg().Path()))
+ buf.WriteByte('.')
+ } else if tname.Name() != "error" {
+ panic(tname) // error is the only named type with no package
+ }
+ buf.WriteString(tname.Name())
+
+ case *types.Array:
+ fmt.Fprintf(&buf, "[%d]", t.Len())
+ fprint(t.Elem())
+
+ case *types.Slice:
+ buf.WriteString("[]")
+ fprint(t.Elem())
+
+ case *types.Pointer:
+ buf.WriteByte('*')
+ fprint(t.Elem())
+
+ case *types.Map:
+ buf.WriteString("map[")
+ fprint(t.Key())
+ buf.WriteByte(']')
+ fprint(t.Elem())
+
+ case *types.Chan:
+ switch t.Dir() {
+ case types.SendRecv:
+ buf.WriteString("chan ")
+ case types.SendOnly:
+ buf.WriteString("<-chan ")
+ case types.RecvOnly:
+ buf.WriteString("chan<- ")
+ }
+ fprint(t.Elem())
+
+ case *types.Tuple:
+ buf.WriteByte('(')
+ for i := 0; i < t.Len(); i++ {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+ fprint(t.At(i).Type())
+ }
+ buf.WriteByte(')')
+
+ case *types.Basic:
+ // Use canonical names for uint8 and int32 aliases.
+ switch t.Kind() {
+ case types.Byte:
+ buf.WriteString("byte")
+ case types.Rune:
+ buf.WriteString("rune")
+ default:
+ buf.WriteString(t.String())
+ }
+
+ case *types.Signature:
+ buf.WriteString("func")
+ fprint(t.Params())
+ if t.Variadic() {
+ buf.WriteString("...") // not quite Go syntax
+ }
+ fprint(t.Results())
+
+ case *types.Struct:
+ // Non-empty unnamed struct types in method
+ // signatures are vanishingly rare.
+ buf.WriteString("struct{")
+ for i := 0; i < t.NumFields(); i++ {
+ if i > 0 {
+ buf.WriteByte(';')
+ }
+ f := t.Field(i)
+ // This isn't quite right for embedded type aliases.
+ // (See types.TypeString(StructType) and #44410 for context.)
+ // But this is vanishingly rare.
+ if !f.Embedded() {
+ buf.WriteString(f.Id())
+ buf.WriteByte(' ')
+ }
+ fprint(f.Type())
+ if tag := t.Tag(i); tag != "" {
+ buf.WriteByte(' ')
+ buf.WriteString(strconv.Quote(tag))
+ }
+ }
+ buf.WriteString("}")
+
+ case *types.Interface:
+ if t.NumMethods() == 0 {
+ buf.WriteString("any") // common case
+ } else {
+ // Interface assignability is particularly
+ // tricky due to the possibility of recursion.
+ tricky = true
+ // We could still give more disambiguating precision
+ // than "..." if we wanted to.
+ buf.WriteString("interface{...}")
+ }
+
+ case *typeparams.TypeParam:
+ tricky = true
+ // TODO(adonovan): refine this by adding a numeric suffix
+ // indicating the index among the receiver type's parameters.
+ buf.WriteByte('?')
+
+ default: // incl. *types.Union
+ panic(t)
+ }
+ }
+
+ buf.WriteString(method.Id()) // e.g. "pkg.Type"
+ sig := method.Type().(*types.Signature)
+ fprint(sig.Params())
+ fprint(sig.Results())
+ return buf.String(), tricky
+}
+
+// -- serial format of index --
+
+// The cost of gob encoding and decoding for most packages in x/tools
+// is under 50us, with occasional peaks of around 1-3ms.
+// The encoded indexes are around 1KB-50KB.
+
+// A gobPackage records the method set of each package-level type for a single package.
+type gobPackage struct {
+ Strings []string // index of strings used by gobPosition.File, gobMethod.{Pkg,Object}Path
+ MethodSets []gobMethodSet
+}
+
+// A gobMethodSet records the method set of a single type.
+type gobMethodSet struct {
+ Posn gobPosition
+ IsInterface bool
+ Tricky bool // at least one method is tricky; assignability requires go/types
+ Mask uint64 // mask with 1 bit from each of methods[*].sum
+ Methods []gobMethod
+}
+
+// A gobMethod records the name, type, and position of a single method.
+type gobMethod struct {
+ Fingerprint string // string of form "methodID(params...)(results)"
+ Sum uint32 // checksum of fingerprint
+
+ // index records only (zero in KeyOf; also for index of error.Error).
+ Posn gobPosition // location of method declaration
+ PkgPath int // path of package containing method declaration
+ ObjectPath int // object path of method relative to PkgPath
+}
+
+// A gobPosition records the file, offset, and length of an identifier.
+type gobPosition struct {
+ File int // index into gobPackage.Strings
+ Offset, Len int // in bytes
+}
diff --git a/gopls/internal/lsp/source/options.go b/gopls/internal/lsp/source/options.go
new file mode 100644
index 000000000..a4ae51a47
--- /dev/null
+++ b/gopls/internal/lsp/source/options.go
@@ -0,0 +1,1631 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/asmdecl"
+ "golang.org/x/tools/go/analysis/passes/assign"
+ "golang.org/x/tools/go/analysis/passes/atomic"
+ "golang.org/x/tools/go/analysis/passes/atomicalign"
+ "golang.org/x/tools/go/analysis/passes/bools"
+ "golang.org/x/tools/go/analysis/passes/buildtag"
+ "golang.org/x/tools/go/analysis/passes/cgocall"
+ "golang.org/x/tools/go/analysis/passes/composite"
+ "golang.org/x/tools/go/analysis/passes/copylock"
+ "golang.org/x/tools/go/analysis/passes/deepequalerrors"
+ "golang.org/x/tools/go/analysis/passes/directive"
+ "golang.org/x/tools/go/analysis/passes/errorsas"
+ "golang.org/x/tools/go/analysis/passes/fieldalignment"
+ "golang.org/x/tools/go/analysis/passes/httpresponse"
+ "golang.org/x/tools/go/analysis/passes/ifaceassert"
+ "golang.org/x/tools/go/analysis/passes/loopclosure"
+ "golang.org/x/tools/go/analysis/passes/lostcancel"
+ "golang.org/x/tools/go/analysis/passes/nilfunc"
+ "golang.org/x/tools/go/analysis/passes/nilness"
+ "golang.org/x/tools/go/analysis/passes/printf"
+ "golang.org/x/tools/go/analysis/passes/shadow"
+ "golang.org/x/tools/go/analysis/passes/shift"
+ "golang.org/x/tools/go/analysis/passes/sortslice"
+ "golang.org/x/tools/go/analysis/passes/stdmethods"
+ "golang.org/x/tools/go/analysis/passes/stringintconv"
+ "golang.org/x/tools/go/analysis/passes/structtag"
+ "golang.org/x/tools/go/analysis/passes/testinggoroutine"
+ "golang.org/x/tools/go/analysis/passes/tests"
+ "golang.org/x/tools/go/analysis/passes/timeformat"
+ "golang.org/x/tools/go/analysis/passes/unmarshal"
+ "golang.org/x/tools/go/analysis/passes/unreachable"
+ "golang.org/x/tools/go/analysis/passes/unsafeptr"
+ "golang.org/x/tools/go/analysis/passes/unusedresult"
+ "golang.org/x/tools/go/analysis/passes/unusedwrite"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/embeddirective"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/fillreturns"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/fillstruct"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/infertypeargs"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/nonewvars"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/noresultvalues"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifycompositelit"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyrange"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/simplifyslice"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/stubmethods"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/undeclaredname"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/unusedparams"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/unusedvariable"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/useany"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/diff/myers"
+)
+
+var (
+ optionsOnce sync.Once
+ defaultOptions *Options
+)
+
+// DefaultOptions is the options that are used for Gopls execution independent
+// of any externally provided configuration (LSP initialization, command
+// invocation, etc.).
+func DefaultOptions() *Options {
+ optionsOnce.Do(func() {
+ var commands []string
+ for _, c := range command.Commands {
+ commands = append(commands, c.ID())
+ }
+ defaultOptions = &Options{
+ ClientOptions: ClientOptions{
+ InsertTextFormat: protocol.PlainTextTextFormat,
+ PreferredContentFormat: protocol.Markdown,
+ ConfigurationSupported: true,
+ DynamicConfigurationSupported: true,
+ DynamicRegistrationSemanticTokensSupported: true,
+ DynamicWatchedFilesSupported: true,
+ LineFoldingOnly: false,
+ HierarchicalDocumentSymbolSupport: true,
+ },
+ ServerOptions: ServerOptions{
+ SupportedCodeActions: map[FileKind]map[protocol.CodeActionKind]bool{
+ Go: {
+ protocol.SourceFixAll: true,
+ protocol.SourceOrganizeImports: true,
+ protocol.QuickFix: true,
+ protocol.RefactorRewrite: true,
+ protocol.RefactorExtract: true,
+ },
+ Mod: {
+ protocol.SourceOrganizeImports: true,
+ protocol.QuickFix: true,
+ },
+ Work: {},
+ Sum: {},
+ Tmpl: {},
+ },
+ SupportedCommands: commands,
+ },
+ UserOptions: UserOptions{
+ BuildOptions: BuildOptions{
+ ExpandWorkspaceToModule: true,
+ MemoryMode: ModeNormal,
+ DirectoryFilters: []string{"-**/node_modules"},
+ TemplateExtensions: []string{},
+ StandaloneTags: []string{"ignore"},
+ },
+ UIOptions: UIOptions{
+ DiagnosticOptions: DiagnosticOptions{
+ DiagnosticsDelay: 250 * time.Millisecond,
+ Annotations: map[Annotation]bool{
+ Bounds: true,
+ Escape: true,
+ Inline: true,
+ Nil: true,
+ },
+ Vulncheck: ModeVulncheckOff,
+ },
+ InlayHintOptions: InlayHintOptions{},
+ DocumentationOptions: DocumentationOptions{
+ HoverKind: FullDocumentation,
+ LinkTarget: "pkg.go.dev",
+ LinksInHover: true,
+ },
+ NavigationOptions: NavigationOptions{
+ ImportShortcut: BothShortcuts,
+ SymbolMatcher: SymbolFastFuzzy,
+ SymbolStyle: DynamicSymbols,
+ },
+ CompletionOptions: CompletionOptions{
+ Matcher: Fuzzy,
+ CompletionBudget: 100 * time.Millisecond,
+ ExperimentalPostfixCompletions: true,
+ },
+ Codelenses: map[string]bool{
+ string(command.Generate): true,
+ string(command.RegenerateCgo): true,
+ string(command.Tidy): true,
+ string(command.GCDetails): false,
+ string(command.UpgradeDependency): true,
+ string(command.Vendor): true,
+ // TODO(hyangah): enable command.RunGovulncheck.
+ },
+ },
+ },
+ InternalOptions: InternalOptions{
+ LiteralCompletions: true,
+ TempModfile: true,
+ CompleteUnimported: true,
+ CompletionDocumentation: true,
+ DeepCompletion: true,
+ ChattyDiagnostics: true,
+ NewDiff: "both",
+ },
+ Hooks: Hooks{
+ // TODO(adonovan): switch to new diff.Strings implementation.
+ ComputeEdits: myers.ComputeEdits,
+ URLRegexp: urlRegexp(),
+ DefaultAnalyzers: defaultAnalyzers(),
+ TypeErrorAnalyzers: typeErrorAnalyzers(),
+ ConvenienceAnalyzers: convenienceAnalyzers(),
+ StaticcheckAnalyzers: map[string]*Analyzer{},
+ GoDiff: true,
+ },
+ }
+ })
+ return defaultOptions
+}
+
+// Options holds various configuration that affects Gopls execution, organized
+// by the nature or origin of the settings.
+type Options struct {
+ ClientOptions
+ ServerOptions
+ UserOptions
+ InternalOptions
+ Hooks
+}
+
+// ClientOptions holds LSP-specific configuration that is provided by the
+// client.
+type ClientOptions struct {
+ InsertTextFormat protocol.InsertTextFormat
+ ConfigurationSupported bool
+ DynamicConfigurationSupported bool
+ DynamicRegistrationSemanticTokensSupported bool
+ DynamicWatchedFilesSupported bool
+ PreferredContentFormat protocol.MarkupKind
+ LineFoldingOnly bool
+ HierarchicalDocumentSymbolSupport bool
+ SemanticTypes []string
+ SemanticMods []string
+ RelatedInformationSupported bool
+ CompletionTags bool
+ CompletionDeprecated bool
+ SupportedResourceOperations []protocol.ResourceOperationKind
+}
+
+// ServerOptions holds LSP-specific configuration that is provided by the
+// server.
+type ServerOptions struct {
+ SupportedCodeActions map[FileKind]map[protocol.CodeActionKind]bool
+ SupportedCommands []string
+}
+
+type BuildOptions struct {
+ // BuildFlags is the set of flags passed on to the build system when invoked.
+ // It is applied to queries like `go list`, which is used when discovering files.
+ // The most common use is to set `-tags`.
+ BuildFlags []string
+
+ // Env adds environment variables to external commands run by `gopls`, most notably `go list`.
+ Env map[string]string
+
+ // DirectoryFilters can be used to exclude unwanted directories from the
+ // workspace. By default, all directories are included. Filters are an
+ // operator, `+` to include and `-` to exclude, followed by a path prefix
+ // relative to the workspace folder. They are evaluated in order, and
+ // the last filter that applies to a path controls whether it is included.
+ // The path prefix can be empty, so an initial `-` excludes everything.
+ //
+ // DirectoryFilters also supports the `**` operator to match 0 or more directories.
+ //
+ // Examples:
+ //
+ // Exclude node_modules at current depth: `-node_modules`
+ //
+ // Exclude node_modules at any depth: `-**/node_modules`
+ //
+ // Include only project_a: `-` (exclude everything), `+project_a`
+ //
+ // Include only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules`
+ DirectoryFilters []string
+
+ // TemplateExtensions gives the extensions of file names that are treateed
+ // as template files. (The extension
+ // is the part of the file name after the final dot.)
+ TemplateExtensions []string
+
+ // MemoryMode controls the tradeoff `gopls` makes between memory usage and
+ // correctness.
+ //
+ // Values other than `Normal` are untested and may break in surprising ways.
+ MemoryMode MemoryMode `status:"experimental"`
+
+ // ExpandWorkspaceToModule instructs `gopls` to adjust the scope of the
+ // workspace to find the best available module root. `gopls` first looks for
+ // a go.mod file in any parent directory of the workspace folder, expanding
+ // the scope to that directory if it exists. If no viable parent directory is
+ // found, gopls will check if there is exactly one child directory containing
+ // a go.mod file, narrowing the scope to that directory if it exists.
+ ExpandWorkspaceToModule bool `status:"experimental"`
+
+ // AllowModfileModifications disables -mod=readonly, allowing imports from
+ // out-of-scope modules. This option will eventually be removed.
+ AllowModfileModifications bool `status:"experimental"`
+
+ // AllowImplicitNetworkAccess disables GOPROXY=off, allowing implicit module
+ // downloads rather than requiring user action. This option will eventually
+ // be removed.
+ AllowImplicitNetworkAccess bool `status:"experimental"`
+
+ // StandaloneTags specifies a set of build constraints that identify
+ // individual Go source files that make up the entire main package of an
+ // executable.
+ //
+ // A common example of standalone main files is the convention of using the
+ // directive `//go:build ignore` to denote files that are not intended to be
+ // included in any package, for example because they are invoked directly by
+ // the developer using `go run`.
+ //
+ // Gopls considers a file to be a standalone main file if and only if it has
+ // package name "main" and has a build directive of the exact form
+ // "//go:build tag" or "// +build tag", where tag is among the list of tags
+ // configured by this setting. Notably, if the build constraint is more
+ // complicated than a simple tag (such as the composite constraint
+ // `//go:build tag && go1.18`), the file is not considered to be a standalone
+ // main file.
+ //
+ // This setting is only supported when gopls is built with Go 1.16 or later.
+ StandaloneTags []string
+}
+
+type UIOptions struct {
+ DocumentationOptions
+ CompletionOptions
+ NavigationOptions
+ DiagnosticOptions
+ InlayHintOptions
+
+ // Codelenses overrides the enabled/disabled state of code lenses. See the
+ // "Code Lenses" section of the
+ // [Settings page](https://github.com/golang/tools/blob/master/gopls/doc/settings.md#code-lenses)
+ // for the list of supported lenses.
+ //
+ // Example Usage:
+ //
+ // ```json5
+ // "gopls": {
+ // ...
+ // "codelenses": {
+ // "generate": false, // Don't show the `go generate` lens.
+ // "gc_details": true // Show a code lens toggling the display of gc's choices.
+ // }
+ // ...
+ // }
+ // ```
+ Codelenses map[string]bool
+
+ // SemanticTokens controls whether the LSP server will send
+ // semantic tokens to the client.
+ SemanticTokens bool `status:"experimental"`
+
+ // NoSemanticString turns off the sending of the semantic token 'string'
+ NoSemanticString bool `status:"experimental"`
+
+ // NoSemanticNumber turns off the sending of the semantic token 'number'
+ NoSemanticNumber bool `status:"experimental"`
+}
+
+type CompletionOptions struct {
+ // Placeholders enables placeholders for function parameters or struct
+ // fields in completion responses.
+ UsePlaceholders bool
+
+ // CompletionBudget is the soft latency goal for completion requests. Most
+ // requests finish in a couple milliseconds, but in some cases deep
+ // completions can take much longer. As we use up our budget we
+ // dynamically reduce the search scope to ensure we return timely
+ // results. Zero means unlimited.
+ CompletionBudget time.Duration `status:"debug"`
+
+ // Matcher sets the algorithm that is used when calculating completion
+ // candidates.
+ Matcher Matcher `status:"advanced"`
+
+ // ExperimentalPostfixCompletions enables artificial method snippets
+ // such as "someSlice.sort!".
+ ExperimentalPostfixCompletions bool `status:"experimental"`
+}
+
+type DocumentationOptions struct {
+ // HoverKind controls the information that appears in the hover text.
+ // SingleLine and Structured are intended for use only by authors of editor plugins.
+ HoverKind HoverKind
+
+ // LinkTarget controls where documentation links go.
+ // It might be one of:
+ //
+ // * `"godoc.org"`
+ // * `"pkg.go.dev"`
+ //
+ // If company chooses to use its own `godoc.org`, its address can be used as well.
+ //
+ // Modules matching the GOPRIVATE environment variable will not have
+ // documentation links in hover.
+ LinkTarget string
+
+ // LinksInHover toggles the presence of links to documentation in hover.
+ LinksInHover bool
+}
+
+type FormattingOptions struct {
+ // Local is the equivalent of the `goimports -local` flag, which puts
+ // imports beginning with this string after third-party packages. It should
+ // be the prefix of the import path whose imports should be grouped
+ // separately.
+ Local string
+
+ // Gofumpt indicates if we should run gofumpt formatting.
+ Gofumpt bool
+}
+
+type DiagnosticOptions struct {
+ // Analyses specify analyses that the user would like to enable or disable.
+ // A map of the names of analysis passes that should be enabled/disabled.
+ // A full list of analyzers that gopls uses can be found in
+ // [analyzers.md](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).
+ //
+ // Example Usage:
+ //
+ // ```json5
+ // ...
+ // "analyses": {
+ // "unreachable": false, // Disable the unreachable analyzer.
+ // "unusedparams": true // Enable the unusedparams analyzer.
+ // }
+ // ...
+ // ```
+ Analyses map[string]bool
+
+ // Staticcheck enables additional analyses from staticcheck.io.
+ // These analyses are documented on
+ // [Staticcheck's website](https://staticcheck.io/docs/checks/).
+ Staticcheck bool `status:"experimental"`
+
+ // Annotations specifies the various kinds of optimization diagnostics
+ // that should be reported by the gc_details command.
+ Annotations map[Annotation]bool `status:"experimental"`
+
+ // Vulncheck enables vulnerability scanning.
+ Vulncheck VulncheckMode `status:"experimental"`
+
+ // DiagnosticsDelay controls the amount of time that gopls waits
+ // after the most recent file modification before computing deep diagnostics.
+ // Simple diagnostics (parsing and type-checking) are always run immediately
+ // on recently modified packages.
+ //
+ // This option must be set to a valid duration string, for example `"250ms"`.
+ DiagnosticsDelay time.Duration `status:"advanced"`
+}
+
+type InlayHintOptions struct {
+ // Hints specify inlay hints that users want to see. A full list of hints
+ // that gopls uses can be found in
+ // [inlayHints.md](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md).
+ Hints map[string]bool `status:"experimental"`
+}
+
+type NavigationOptions struct {
+ // ImportShortcut specifies whether import statements should link to
+ // documentation or go to definitions.
+ ImportShortcut ImportShortcut
+
+ // SymbolMatcher sets the algorithm that is used when finding workspace symbols.
+ SymbolMatcher SymbolMatcher `status:"advanced"`
+
+ // SymbolStyle controls how symbols are qualified in symbol responses.
+ //
+ // Example Usage:
+ //
+ // ```json5
+ // "gopls": {
+ // ...
+ // "symbolStyle": "Dynamic",
+ // ...
+ // }
+ // ```
+ SymbolStyle SymbolStyle `status:"advanced"`
+}
+
+// UserOptions holds custom Gopls configuration (not part of the LSP) that is
+// modified by the client.
+type UserOptions struct {
+ BuildOptions
+ UIOptions
+ FormattingOptions
+
+ // VerboseOutput enables additional debug logging.
+ VerboseOutput bool `status:"debug"`
+}
+
+// EnvSlice returns Env as a slice of k=v strings.
+func (u *UserOptions) EnvSlice() []string {
+ var result []string
+ for k, v := range u.Env {
+ result = append(result, fmt.Sprintf("%v=%v", k, v))
+ }
+ return result
+}
+
+// SetEnvSlice sets Env from a slice of k=v strings.
+func (u *UserOptions) SetEnvSlice(env []string) {
+ u.Env = map[string]string{}
+ for _, kv := range env {
+ split := strings.SplitN(kv, "=", 2)
+ if len(split) != 2 {
+ continue
+ }
+ u.Env[split[0]] = split[1]
+ }
+}
+
+// DiffFunction is the type for a function that produces a set of edits that
+// convert from the before content to the after content.
+type DiffFunction func(before, after string) []diff.Edit
+
+// Hooks contains configuration that is provided to the Gopls command by the
+// main package.
+type Hooks struct {
+ // LicensesText holds third party licenses for software used by gopls.
+ LicensesText string
+
+ // GoDiff is used in gopls/hooks to get Myers' diff
+ GoDiff bool
+
+ // Whether staticcheck is supported.
+ StaticcheckSupported bool
+
+ // ComputeEdits is used to compute edits between file versions.
+ ComputeEdits DiffFunction
+
+ // URLRegexp is used to find potential URLs in comments/strings.
+ //
+ // Not all matches are shown to the user: if the matched URL is not detected
+ // as valid, it will be skipped.
+ URLRegexp *regexp.Regexp
+
+ // GofumptFormat allows the gopls module to wire-in a call to
+ // gofumpt/format.Source. langVersion and modulePath are used for some
+ // Gofumpt formatting rules -- see the Gofumpt documentation for details.
+ GofumptFormat func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error)
+
+ DefaultAnalyzers map[string]*Analyzer
+ TypeErrorAnalyzers map[string]*Analyzer
+ ConvenienceAnalyzers map[string]*Analyzer
+ StaticcheckAnalyzers map[string]*Analyzer
+}
+
+// InternalOptions contains settings that are not intended for use by the
+// average user. These may be settings used by tests or outdated settings that
+// will soon be deprecated. Some of these settings may not even be configurable
+// by the user.
+type InternalOptions struct {
+ // LiteralCompletions controls whether literal candidates such as
+ // "&someStruct{}" are offered. Tests disable this flag to simplify
+ // their expected values.
+ LiteralCompletions bool
+
+ // VerboseWorkDoneProgress controls whether the LSP server should send
+ // progress reports for all work done outside the scope of an RPC.
+ // Used by the regression tests.
+ VerboseWorkDoneProgress bool
+
+ // The following options were previously available to users, but they
+ // really shouldn't be configured by anyone other than "power users".
+
+ // CompletionDocumentation enables documentation with completion results.
+ CompletionDocumentation bool
+
+ // CompleteUnimported enables completion for packages that you do not
+ // currently import.
+ CompleteUnimported bool
+
+ // DeepCompletion enables the ability to return completions from deep
+ // inside relevant entities, rather than just the locally accessible ones.
+ //
+ // Consider this example:
+ //
+ // ```go
+ // package main
+ //
+ // import "fmt"
+ //
+ // type wrapString struct {
+ // str string
+ // }
+ //
+ // func main() {
+ // x := wrapString{"hello world"}
+ // fmt.Printf(<>)
+ // }
+ // ```
+ //
+ // At the location of the `<>` in this program, deep completion would suggest
+ // the result `x.str`.
+ DeepCompletion bool
+
+ // TempModfile controls the use of the -modfile flag in Go 1.14.
+ TempModfile bool
+
+ // ShowBugReports causes a message to be shown when the first bug is reported
+ // on the server.
+ // This option applies only during initialization.
+ ShowBugReports bool
+
+ // NewDiff controls the choice of the new diff implementation. It can be
+ // 'new', 'old', or 'both', which is the default. 'both' computes diffs with
+ // both algorithms, checks that the new algorithm has worked, and write some
+ // summary statistics to a file in os.TmpDir().
+ NewDiff string
+
+ // ChattyDiagnostics controls whether to report file diagnostics for each
+ // file change. If unset, gopls only reports diagnostics when they change, or
+ // when a file is opened or closed.
+ ChattyDiagnostics bool
+}
+
+type ImportShortcut string
+
+const (
+ BothShortcuts ImportShortcut = "Both"
+ LinkShortcut ImportShortcut = "Link"
+ DefinitionShortcut ImportShortcut = "Definition"
+)
+
+func (s ImportShortcut) ShowLinks() bool {
+ return s == BothShortcuts || s == LinkShortcut
+}
+
+func (s ImportShortcut) ShowDefinition() bool {
+ return s == BothShortcuts || s == DefinitionShortcut
+}
+
+type Matcher string
+
+const (
+ Fuzzy Matcher = "Fuzzy"
+ CaseInsensitive Matcher = "CaseInsensitive"
+ CaseSensitive Matcher = "CaseSensitive"
+)
+
+type SymbolMatcher string
+
+const (
+ SymbolFuzzy SymbolMatcher = "Fuzzy"
+ SymbolFastFuzzy SymbolMatcher = "FastFuzzy"
+ SymbolCaseInsensitive SymbolMatcher = "CaseInsensitive"
+ SymbolCaseSensitive SymbolMatcher = "CaseSensitive"
+)
+
+type SymbolStyle string
+
+const (
+ // PackageQualifiedSymbols is package qualified symbols i.e.
+ // "pkg.Foo.Field".
+ PackageQualifiedSymbols SymbolStyle = "Package"
+ // FullyQualifiedSymbols is fully qualified symbols, i.e.
+ // "path/to/pkg.Foo.Field".
+ FullyQualifiedSymbols SymbolStyle = "Full"
+ // DynamicSymbols uses whichever qualifier results in the highest scoring
+ // match for the given symbol query. Here a "qualifier" is any "/" or "."
+ // delimited suffix of the fully qualified symbol. i.e. "to/pkg.Foo.Field" or
+ // just "Foo.Field".
+ DynamicSymbols SymbolStyle = "Dynamic"
+)
+
+type HoverKind string
+
+const (
+ SingleLine HoverKind = "SingleLine"
+ NoDocumentation HoverKind = "NoDocumentation"
+ SynopsisDocumentation HoverKind = "SynopsisDocumentation"
+ FullDocumentation HoverKind = "FullDocumentation"
+
+ // Structured is an experimental setting that returns a structured hover format.
+ // This format separates the signature from the documentation, so that the client
+ // can do more manipulation of these fields.
+ //
+ // This should only be used by clients that support this behavior.
+ Structured HoverKind = "Structured"
+)
+
+type MemoryMode string
+
+const (
+ ModeNormal MemoryMode = "Normal"
+ // In DegradeClosed mode, `gopls` will collect less information about
+ // packages without open files. As a result, features like Find
+ // References and Rename will miss results in such packages.
+ ModeDegradeClosed MemoryMode = "DegradeClosed"
+)
+
+type VulncheckMode string
+
+const (
+ // Disable vulnerability analysis.
+ ModeVulncheckOff VulncheckMode = "Off"
+ // In Imports mode, `gopls` will report vulnerabilities that affect packages
+ // directly and indirectly used by the analyzed main module.
+ ModeVulncheckImports VulncheckMode = "Imports"
+
+ // TODO: VulncheckRequire, VulncheckCallgraph
+)
+
+type OptionResults []OptionResult
+
+type OptionResult struct {
+ Name string
+ Value interface{}
+ Error error
+}
+
+func SetOptions(options *Options, opts interface{}) OptionResults {
+ var results OptionResults
+ switch opts := opts.(type) {
+ case nil:
+ case map[string]interface{}:
+ // If the user's settings contains "allExperiments", set that first,
+ // and then let them override individual settings independently.
+ var enableExperiments bool
+ for name, value := range opts {
+ if b, ok := value.(bool); name == "allExperiments" && ok && b {
+ enableExperiments = true
+ options.EnableAllExperiments()
+ }
+ }
+ seen := map[string]struct{}{}
+ for name, value := range opts {
+ results = append(results, options.set(name, value, seen))
+ }
+ // Finally, enable any experimental features that are specified in
+ // maps, which allows users to individually toggle them on or off.
+ if enableExperiments {
+ options.enableAllExperimentMaps()
+ }
+ default:
+ results = append(results, OptionResult{
+ Value: opts,
+ Error: fmt.Errorf("Invalid options type %T", opts),
+ })
+ }
+ return results
+}
+
+func (o *Options) ForClientCapabilities(caps protocol.ClientCapabilities) {
+ // Check if the client supports snippets in completion items.
+ if caps.Workspace.WorkspaceEdit != nil {
+ o.SupportedResourceOperations = caps.Workspace.WorkspaceEdit.ResourceOperations
+ }
+ if c := caps.TextDocument.Completion; c.CompletionItem.SnippetSupport {
+ o.InsertTextFormat = protocol.SnippetTextFormat
+ }
+ // Check if the client supports configuration messages.
+ o.ConfigurationSupported = caps.Workspace.Configuration
+ o.DynamicConfigurationSupported = caps.Workspace.DidChangeConfiguration.DynamicRegistration
+ o.DynamicRegistrationSemanticTokensSupported = caps.TextDocument.SemanticTokens.DynamicRegistration
+ o.DynamicWatchedFilesSupported = caps.Workspace.DidChangeWatchedFiles.DynamicRegistration
+
+ // Check which types of content format are supported by this client.
+ if hover := caps.TextDocument.Hover; hover != nil && len(hover.ContentFormat) > 0 {
+ o.PreferredContentFormat = hover.ContentFormat[0]
+ }
+ // Check if the client supports only line folding.
+
+ if fr := caps.TextDocument.FoldingRange; fr != nil {
+ o.LineFoldingOnly = fr.LineFoldingOnly
+ }
+ // Check if the client supports hierarchical document symbols.
+ o.HierarchicalDocumentSymbolSupport = caps.TextDocument.DocumentSymbol.HierarchicalDocumentSymbolSupport
+
+ // Client's semantic tokens
+ o.SemanticTypes = caps.TextDocument.SemanticTokens.TokenTypes
+ o.SemanticMods = caps.TextDocument.SemanticTokens.TokenModifiers
+ // we don't need Requests, as we support full functionality
+ // we don't need Formats, as there is only one, for now
+
+ // Check if the client supports diagnostic related information.
+ o.RelatedInformationSupported = caps.TextDocument.PublishDiagnostics.RelatedInformation
+ // Check if the client completion support includes tags (preferred) or deprecation
+ if caps.TextDocument.Completion.CompletionItem.TagSupport.ValueSet != nil {
+ o.CompletionTags = true
+ } else if caps.TextDocument.Completion.CompletionItem.DeprecatedSupport {
+ o.CompletionDeprecated = true
+ }
+}
+
+func (o *Options) Clone() *Options {
+ // TODO(rfindley): has this function gone stale? It appears that there are
+ // settings that are incorrectly cloned here (such as TemplateExtensions).
+ result := &Options{
+ ClientOptions: o.ClientOptions,
+ InternalOptions: o.InternalOptions,
+ Hooks: Hooks{
+ GoDiff: o.GoDiff,
+ StaticcheckSupported: o.StaticcheckSupported,
+ ComputeEdits: o.ComputeEdits,
+ GofumptFormat: o.GofumptFormat,
+ URLRegexp: o.URLRegexp,
+ },
+ ServerOptions: o.ServerOptions,
+ UserOptions: o.UserOptions,
+ }
+ // Fully clone any slice or map fields. Only Hooks, ExperimentalOptions,
+ // and UserOptions can be modified.
+ copyStringMap := func(src map[string]bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, v := range src {
+ dst[k] = v
+ }
+ return dst
+ }
+ result.Analyses = copyStringMap(o.Analyses)
+ result.Codelenses = copyStringMap(o.Codelenses)
+
+ copySlice := func(src []string) []string {
+ dst := make([]string, len(src))
+ copy(dst, src)
+ return dst
+ }
+ result.SetEnvSlice(o.EnvSlice())
+ result.BuildFlags = copySlice(o.BuildFlags)
+ result.DirectoryFilters = copySlice(o.DirectoryFilters)
+ result.StandaloneTags = copySlice(o.StandaloneTags)
+
+ copyAnalyzerMap := func(src map[string]*Analyzer) map[string]*Analyzer {
+ dst := make(map[string]*Analyzer)
+ for k, v := range src {
+ dst[k] = v
+ }
+ return dst
+ }
+ result.DefaultAnalyzers = copyAnalyzerMap(o.DefaultAnalyzers)
+ result.TypeErrorAnalyzers = copyAnalyzerMap(o.TypeErrorAnalyzers)
+ result.ConvenienceAnalyzers = copyAnalyzerMap(o.ConvenienceAnalyzers)
+ result.StaticcheckAnalyzers = copyAnalyzerMap(o.StaticcheckAnalyzers)
+ return result
+}
+
+func (o *Options) AddStaticcheckAnalyzer(a *analysis.Analyzer, enabled bool, severity protocol.DiagnosticSeverity) {
+ o.StaticcheckAnalyzers[a.Name] = &Analyzer{
+ Analyzer: a,
+ Enabled: enabled,
+ Severity: severity,
+ }
+}
+
+// EnableAllExperiments turns on all of the experimental "off-by-default"
+// features offered by gopls. Any experimental features specified in maps
+// should be enabled in enableAllExperimentMaps.
+func (o *Options) EnableAllExperiments() {
+ o.SemanticTokens = true
+}
+
+func (o *Options) enableAllExperimentMaps() {
+ if _, ok := o.Codelenses[string(command.GCDetails)]; !ok {
+ o.Codelenses[string(command.GCDetails)] = true
+ }
+ if _, ok := o.Codelenses[string(command.RunGovulncheck)]; !ok {
+ o.Codelenses[string(command.RunGovulncheck)] = true
+ }
+ if _, ok := o.Analyses[unusedparams.Analyzer.Name]; !ok {
+ o.Analyses[unusedparams.Analyzer.Name] = true
+ }
+ if _, ok := o.Analyses[unusedvariable.Analyzer.Name]; !ok {
+ o.Analyses[unusedvariable.Analyzer.Name] = true
+ }
+}
+
+// validateDirectoryFilter validates if the filter string
+// - is not empty
+// - start with either + or -
+// - doesn't contain currently unsupported glob operators: *, ?
+func validateDirectoryFilter(ifilter string) (string, error) {
+ filter := fmt.Sprint(ifilter)
+ if filter == "" || (filter[0] != '+' && filter[0] != '-') {
+ return "", fmt.Errorf("invalid filter %v, must start with + or -", filter)
+ }
+ segs := strings.Split(filter[1:], "/")
+ unsupportedOps := [...]string{"?", "*"}
+ for _, seg := range segs {
+ if seg != "**" {
+ for _, op := range unsupportedOps {
+ if strings.Contains(seg, op) {
+ return "", fmt.Errorf("invalid filter %v, operator %v not supported. If you want to have this operator supported, consider filing an issue.", filter, op)
+ }
+ }
+ }
+ }
+
+ return strings.TrimRight(filepath.FromSlash(filter), "/"), nil
+}
+
+func (o *Options) set(name string, value interface{}, seen map[string]struct{}) OptionResult {
+ // Flatten the name in case we get options with a hierarchy.
+ split := strings.Split(name, ".")
+ name = split[len(split)-1]
+
+ result := OptionResult{Name: name, Value: value}
+ if _, ok := seen[name]; ok {
+ result.parseErrorf("duplicate configuration for %s", name)
+ }
+ seen[name] = struct{}{}
+
+ switch name {
+ case "env":
+ menv, ok := value.(map[string]interface{})
+ if !ok {
+ result.parseErrorf("invalid type %T, expect map", value)
+ break
+ }
+ if o.Env == nil {
+ o.Env = make(map[string]string)
+ }
+ for k, v := range menv {
+ o.Env[k] = fmt.Sprint(v)
+ }
+
+ case "buildFlags":
+ // TODO(rfindley): use asStringSlice.
+ iflags, ok := value.([]interface{})
+ if !ok {
+ result.parseErrorf("invalid type %T, expect list", value)
+ break
+ }
+ flags := make([]string, 0, len(iflags))
+ for _, flag := range iflags {
+ flags = append(flags, fmt.Sprintf("%s", flag))
+ }
+ o.BuildFlags = flags
+
+ case "directoryFilters":
+ // TODO(rfindley): use asStringSlice.
+ ifilters, ok := value.([]interface{})
+ if !ok {
+ result.parseErrorf("invalid type %T, expect list", value)
+ break
+ }
+ var filters []string
+ for _, ifilter := range ifilters {
+ filter, err := validateDirectoryFilter(fmt.Sprintf("%v", ifilter))
+ if err != nil {
+ result.parseErrorf("%v", err)
+ return result
+ }
+ filters = append(filters, strings.TrimRight(filepath.FromSlash(filter), "/"))
+ }
+ o.DirectoryFilters = filters
+
+ case "memoryMode":
+ if s, ok := result.asOneOf(
+ string(ModeNormal),
+ string(ModeDegradeClosed),
+ ); ok {
+ o.MemoryMode = MemoryMode(s)
+ }
+ case "completionDocumentation":
+ result.setBool(&o.CompletionDocumentation)
+ case "usePlaceholders":
+ result.setBool(&o.UsePlaceholders)
+ case "deepCompletion":
+ result.setBool(&o.DeepCompletion)
+ case "completeUnimported":
+ result.setBool(&o.CompleteUnimported)
+ case "completionBudget":
+ result.setDuration(&o.CompletionBudget)
+ case "matcher":
+ if s, ok := result.asOneOf(
+ string(Fuzzy),
+ string(CaseSensitive),
+ string(CaseInsensitive),
+ ); ok {
+ o.Matcher = Matcher(s)
+ }
+
+ case "symbolMatcher":
+ if s, ok := result.asOneOf(
+ string(SymbolFuzzy),
+ string(SymbolFastFuzzy),
+ string(SymbolCaseInsensitive),
+ string(SymbolCaseSensitive),
+ ); ok {
+ o.SymbolMatcher = SymbolMatcher(s)
+ }
+
+ case "symbolStyle":
+ if s, ok := result.asOneOf(
+ string(FullyQualifiedSymbols),
+ string(PackageQualifiedSymbols),
+ string(DynamicSymbols),
+ ); ok {
+ o.SymbolStyle = SymbolStyle(s)
+ }
+
+ case "hoverKind":
+ if s, ok := result.asOneOf(
+ string(NoDocumentation),
+ string(SingleLine),
+ string(SynopsisDocumentation),
+ string(FullDocumentation),
+ string(Structured),
+ ); ok {
+ o.HoverKind = HoverKind(s)
+ }
+
+ case "linkTarget":
+ result.setString(&o.LinkTarget)
+
+ case "linksInHover":
+ result.setBool(&o.LinksInHover)
+
+ case "importShortcut":
+ if s, ok := result.asOneOf(string(BothShortcuts), string(LinkShortcut), string(DefinitionShortcut)); ok {
+ o.ImportShortcut = ImportShortcut(s)
+ }
+
+ case "analyses":
+ result.setBoolMap(&o.Analyses)
+
+ case "hints":
+ result.setBoolMap(&o.Hints)
+
+ case "annotations":
+ result.setAnnotationMap(&o.Annotations)
+
+ case "vulncheck":
+ if s, ok := result.asOneOf(
+ string(ModeVulncheckOff),
+ string(ModeVulncheckImports),
+ ); ok {
+ o.Vulncheck = VulncheckMode(s)
+ }
+
+ case "codelenses", "codelens":
+ var lensOverrides map[string]bool
+ result.setBoolMap(&lensOverrides)
+ if result.Error == nil {
+ if o.Codelenses == nil {
+ o.Codelenses = make(map[string]bool)
+ }
+ for lens, enabled := range lensOverrides {
+ o.Codelenses[lens] = enabled
+ }
+ }
+
+ // codelens is deprecated, but still works for now.
+ // TODO(rstambler): Remove this for the gopls/v0.7.0 release.
+ if name == "codelens" {
+ result.deprecated("codelenses")
+ }
+
+ case "staticcheck":
+ if v, ok := result.asBool(); ok {
+ o.Staticcheck = v
+ if v && !o.StaticcheckSupported {
+ result.Error = fmt.Errorf("applying setting %q: staticcheck is not supported at %s;"+
+ " rebuild gopls with a more recent version of Go", result.Name, runtime.Version())
+ }
+ }
+
+ case "local":
+ result.setString(&o.Local)
+
+ case "verboseOutput":
+ result.setBool(&o.VerboseOutput)
+
+ case "verboseWorkDoneProgress":
+ result.setBool(&o.VerboseWorkDoneProgress)
+
+ case "tempModfile":
+ result.setBool(&o.TempModfile)
+
+ case "showBugReports":
+ result.setBool(&o.ShowBugReports)
+
+ case "gofumpt":
+ if v, ok := result.asBool(); ok {
+ o.Gofumpt = v
+ if v && o.GofumptFormat == nil {
+ result.Error = fmt.Errorf("applying setting %q: gofumpt is not supported at %s;"+
+ " rebuild gopls with a more recent version of Go", result.Name, runtime.Version())
+ }
+ }
+
+ case "semanticTokens":
+ result.setBool(&o.SemanticTokens)
+
+ case "noSemanticString":
+ result.setBool(&o.NoSemanticString)
+
+ case "noSemanticNumber":
+ result.setBool(&o.NoSemanticNumber)
+
+ case "expandWorkspaceToModule":
+ result.setBool(&o.ExpandWorkspaceToModule)
+
+ case "experimentalPostfixCompletions":
+ result.setBool(&o.ExperimentalPostfixCompletions)
+
+ case "experimentalWorkspaceModule":
+ result.deprecated("")
+
+ case "experimentalTemplateSupport": // TODO(pjw): remove after June 2022
+ result.deprecated("")
+
+ case "templateExtensions":
+ if iexts, ok := value.([]interface{}); ok {
+ ans := []string{}
+ for _, x := range iexts {
+ ans = append(ans, fmt.Sprint(x))
+ }
+ o.TemplateExtensions = ans
+ break
+ }
+ if value == nil {
+ o.TemplateExtensions = nil
+ break
+ }
+ result.parseErrorf("unexpected type %T not []string", value)
+
+ case "experimentalDiagnosticsDelay":
+ result.deprecated("diagnosticsDelay")
+
+ case "diagnosticsDelay":
+ result.setDuration(&o.DiagnosticsDelay)
+
+ case "experimentalWatchedFileDelay":
+ result.deprecated("")
+
+ case "experimentalPackageCacheKey":
+ result.deprecated("")
+
+ case "allowModfileModifications":
+ result.setBool(&o.AllowModfileModifications)
+
+ case "allowImplicitNetworkAccess":
+ result.setBool(&o.AllowImplicitNetworkAccess)
+
+ case "experimentalUseInvalidMetadata":
+ result.deprecated("")
+
+ case "standaloneTags":
+ result.setStringSlice(&o.StandaloneTags)
+
+ case "allExperiments":
+ // This setting should be handled before all of the other options are
+ // processed, so do nothing here.
+
+ case "newDiff":
+ result.setString(&o.NewDiff)
+
+ case "chattyDiagnostics":
+ result.setBool(&o.ChattyDiagnostics)
+
+ // Replaced settings.
+ case "experimentalDisabledAnalyses":
+ result.deprecated("analyses")
+
+ case "disableDeepCompletion":
+ result.deprecated("deepCompletion")
+
+ case "disableFuzzyMatching":
+ result.deprecated("fuzzyMatching")
+
+ case "wantCompletionDocumentation":
+ result.deprecated("completionDocumentation")
+
+ case "wantUnimportedCompletions":
+ result.deprecated("completeUnimported")
+
+ case "fuzzyMatching":
+ result.deprecated("matcher")
+
+ case "caseSensitiveCompletion":
+ result.deprecated("matcher")
+
+ // Deprecated settings.
+ case "wantSuggestedFixes":
+ result.deprecated("")
+
+ case "noIncrementalSync":
+ result.deprecated("")
+
+ case "watchFileChanges":
+ result.deprecated("")
+
+ case "go-diff":
+ result.deprecated("")
+
+ default:
+ result.unexpected()
+ }
+ return result
+}
+
+// parseErrorf reports an error parsing the current configuration value.
+func (r *OptionResult) parseErrorf(msg string, values ...interface{}) {
+ if false {
+ _ = fmt.Sprintf(msg, values...) // this causes vet to check this like printf
+ }
+ prefix := fmt.Sprintf("parsing setting %q: ", r.Name)
+ r.Error = fmt.Errorf(prefix+msg, values...)
+}
+
+// A SoftError is an error that does not affect the functionality of gopls.
+type SoftError struct {
+ msg string
+}
+
+func (e *SoftError) Error() string {
+ return e.msg
+}
+
+// softErrorf reports an error that does not affect the functionality of gopls
+// (a warning in the UI).
+// The formatted message will be shown to the user unmodified.
+func (r *OptionResult) softErrorf(format string, values ...interface{}) {
+ msg := fmt.Sprintf(format, values...)
+ r.Error = &SoftError{msg}
+}
+
+// deprecated reports the current setting as deprecated. If 'replacement' is
+// non-nil, it is suggested to the user.
+func (r *OptionResult) deprecated(replacement string) {
+ msg := fmt.Sprintf("gopls setting %q is deprecated", r.Name)
+ if replacement != "" {
+ msg = fmt.Sprintf("%s, use %q instead", msg, replacement)
+ }
+ r.Error = &SoftError{msg}
+}
+
+// unexpected reports that the current setting is not known to gopls.
+func (r *OptionResult) unexpected() {
+ r.Error = fmt.Errorf("unexpected gopls setting %q", r.Name)
+}
+
+func (r *OptionResult) asBool() (bool, bool) {
+ b, ok := r.Value.(bool)
+ if !ok {
+ r.parseErrorf("invalid type %T, expect bool", r.Value)
+ return false, false
+ }
+ return b, true
+}
+
+func (r *OptionResult) setBool(b *bool) {
+ if v, ok := r.asBool(); ok {
+ *b = v
+ }
+}
+
+func (r *OptionResult) setDuration(d *time.Duration) {
+ if v, ok := r.asString(); ok {
+ parsed, err := time.ParseDuration(v)
+ if err != nil {
+ r.parseErrorf("failed to parse duration %q: %v", v, err)
+ return
+ }
+ *d = parsed
+ }
+}
+
+func (r *OptionResult) setBoolMap(bm *map[string]bool) {
+ m := r.asBoolMap()
+ *bm = m
+}
+
+func (r *OptionResult) setAnnotationMap(bm *map[Annotation]bool) {
+ all := r.asBoolMap()
+ if all == nil {
+ return
+ }
+ // Default to everything enabled by default.
+ m := make(map[Annotation]bool)
+ for k, enabled := range all {
+ a, err := asOneOf(
+ k,
+ string(Nil),
+ string(Escape),
+ string(Inline),
+ string(Bounds),
+ )
+ if err != nil {
+ // In case of an error, process any legacy values.
+ switch k {
+ case "noEscape":
+ m[Escape] = false
+ r.parseErrorf(`"noEscape" is deprecated, set "Escape: false" instead`)
+ case "noNilcheck":
+ m[Nil] = false
+ r.parseErrorf(`"noNilcheck" is deprecated, set "Nil: false" instead`)
+ case "noInline":
+ m[Inline] = false
+ r.parseErrorf(`"noInline" is deprecated, set "Inline: false" instead`)
+ case "noBounds":
+ m[Bounds] = false
+ r.parseErrorf(`"noBounds" is deprecated, set "Bounds: false" instead`)
+ default:
+ r.parseErrorf("%v", err)
+ }
+ continue
+ }
+ m[Annotation(a)] = enabled
+ }
+ *bm = m
+}
+
+func (r *OptionResult) asBoolMap() map[string]bool {
+ all, ok := r.Value.(map[string]interface{})
+ if !ok {
+ r.parseErrorf("invalid type %T for map[string]bool option", r.Value)
+ return nil
+ }
+ m := make(map[string]bool)
+ for a, enabled := range all {
+ if e, ok := enabled.(bool); ok {
+ m[a] = e
+ } else {
+ r.parseErrorf("invalid type %T for map key %q", enabled, a)
+ return m
+ }
+ }
+ return m
+}
+
+func (r *OptionResult) asString() (string, bool) {
+ b, ok := r.Value.(string)
+ if !ok {
+ r.parseErrorf("invalid type %T, expect string", r.Value)
+ return "", false
+ }
+ return b, true
+}
+
+func (r *OptionResult) asStringSlice() ([]string, bool) {
+ iList, ok := r.Value.([]interface{})
+ if !ok {
+ r.parseErrorf("invalid type %T, expect list", r.Value)
+ return nil, false
+ }
+ var list []string
+ for _, elem := range iList {
+ s, ok := elem.(string)
+ if !ok {
+ r.parseErrorf("invalid element type %T, expect string", elem)
+ return nil, false
+ }
+ list = append(list, s)
+ }
+ return list, true
+}
+
+func (r *OptionResult) asOneOf(options ...string) (string, bool) {
+ s, ok := r.asString()
+ if !ok {
+ return "", false
+ }
+ s, err := asOneOf(s, options...)
+ if err != nil {
+ r.parseErrorf("%v", err)
+ }
+ return s, err == nil
+}
+
+func asOneOf(str string, options ...string) (string, error) {
+ lower := strings.ToLower(str)
+ for _, opt := range options {
+ if strings.ToLower(opt) == lower {
+ return opt, nil
+ }
+ }
+ return "", fmt.Errorf("invalid option %q for enum", str)
+}
+
+func (r *OptionResult) setString(s *string) {
+ if v, ok := r.asString(); ok {
+ *s = v
+ }
+}
+
+func (r *OptionResult) setStringSlice(s *[]string) {
+ if v, ok := r.asStringSlice(); ok {
+ *s = v
+ }
+}
+
+func typeErrorAnalyzers() map[string]*Analyzer {
+ return map[string]*Analyzer{
+ fillreturns.Analyzer.Name: {
+ Analyzer: fillreturns.Analyzer,
+ ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix},
+ Enabled: true,
+ },
+ nonewvars.Analyzer.Name: {
+ Analyzer: nonewvars.Analyzer,
+ Enabled: true,
+ },
+ noresultvalues.Analyzer.Name: {
+ Analyzer: noresultvalues.Analyzer,
+ Enabled: true,
+ },
+ undeclaredname.Analyzer.Name: {
+ Analyzer: undeclaredname.Analyzer,
+ Fix: UndeclaredName,
+ Enabled: true,
+ },
+ unusedvariable.Analyzer.Name: {
+ Analyzer: unusedvariable.Analyzer,
+ Enabled: false,
+ },
+ }
+}
+
+func convenienceAnalyzers() map[string]*Analyzer {
+ return map[string]*Analyzer{
+ fillstruct.Analyzer.Name: {
+ Analyzer: fillstruct.Analyzer,
+ Fix: FillStruct,
+ Enabled: true,
+ ActionKind: []protocol.CodeActionKind{protocol.RefactorRewrite},
+ },
+ stubmethods.Analyzer.Name: {
+ Analyzer: stubmethods.Analyzer,
+ ActionKind: []protocol.CodeActionKind{protocol.RefactorRewrite},
+ Fix: StubMethods,
+ Enabled: true,
+ },
+ }
+}
+
+func defaultAnalyzers() map[string]*Analyzer {
+ return map[string]*Analyzer{
+ // The traditional vet suite:
+ asmdecl.Analyzer.Name: {Analyzer: asmdecl.Analyzer, Enabled: true},
+ assign.Analyzer.Name: {Analyzer: assign.Analyzer, Enabled: true},
+ atomic.Analyzer.Name: {Analyzer: atomic.Analyzer, Enabled: true},
+ bools.Analyzer.Name: {Analyzer: bools.Analyzer, Enabled: true},
+ buildtag.Analyzer.Name: {Analyzer: buildtag.Analyzer, Enabled: true},
+ cgocall.Analyzer.Name: {Analyzer: cgocall.Analyzer, Enabled: true},
+ composite.Analyzer.Name: {Analyzer: composite.Analyzer, Enabled: true},
+ copylock.Analyzer.Name: {Analyzer: copylock.Analyzer, Enabled: true},
+ directive.Analyzer.Name: {Analyzer: directive.Analyzer, Enabled: true},
+ errorsas.Analyzer.Name: {Analyzer: errorsas.Analyzer, Enabled: true},
+ httpresponse.Analyzer.Name: {Analyzer: httpresponse.Analyzer, Enabled: true},
+ ifaceassert.Analyzer.Name: {Analyzer: ifaceassert.Analyzer, Enabled: true},
+ loopclosure.Analyzer.Name: {Analyzer: loopclosure.Analyzer, Enabled: true},
+ lostcancel.Analyzer.Name: {Analyzer: lostcancel.Analyzer, Enabled: true},
+ nilfunc.Analyzer.Name: {Analyzer: nilfunc.Analyzer, Enabled: true},
+ printf.Analyzer.Name: {Analyzer: printf.Analyzer, Enabled: true},
+ shift.Analyzer.Name: {Analyzer: shift.Analyzer, Enabled: true},
+ stdmethods.Analyzer.Name: {Analyzer: stdmethods.Analyzer, Enabled: true},
+ stringintconv.Analyzer.Name: {Analyzer: stringintconv.Analyzer, Enabled: true},
+ structtag.Analyzer.Name: {Analyzer: structtag.Analyzer, Enabled: true},
+ tests.Analyzer.Name: {Analyzer: tests.Analyzer, Enabled: true},
+ unmarshal.Analyzer.Name: {Analyzer: unmarshal.Analyzer, Enabled: true},
+ unreachable.Analyzer.Name: {Analyzer: unreachable.Analyzer, Enabled: true},
+ unsafeptr.Analyzer.Name: {Analyzer: unsafeptr.Analyzer, Enabled: true},
+ unusedresult.Analyzer.Name: {Analyzer: unusedresult.Analyzer, Enabled: true},
+
+ // Non-vet analyzers:
+ atomicalign.Analyzer.Name: {Analyzer: atomicalign.Analyzer, Enabled: true},
+ deepequalerrors.Analyzer.Name: {Analyzer: deepequalerrors.Analyzer, Enabled: true},
+ fieldalignment.Analyzer.Name: {Analyzer: fieldalignment.Analyzer, Enabled: false},
+ nilness.Analyzer.Name: {Analyzer: nilness.Analyzer, Enabled: false},
+ shadow.Analyzer.Name: {Analyzer: shadow.Analyzer, Enabled: false},
+ sortslice.Analyzer.Name: {Analyzer: sortslice.Analyzer, Enabled: true},
+ testinggoroutine.Analyzer.Name: {Analyzer: testinggoroutine.Analyzer, Enabled: true},
+ unusedparams.Analyzer.Name: {Analyzer: unusedparams.Analyzer, Enabled: false},
+ unusedwrite.Analyzer.Name: {Analyzer: unusedwrite.Analyzer, Enabled: false},
+ useany.Analyzer.Name: {Analyzer: useany.Analyzer, Enabled: false},
+ infertypeargs.Analyzer.Name: {Analyzer: infertypeargs.Analyzer, Enabled: true},
+ embeddirective.Analyzer.Name: {Analyzer: embeddirective.Analyzer, Enabled: true},
+ timeformat.Analyzer.Name: {Analyzer: timeformat.Analyzer, Enabled: true},
+
+ // gofmt -s suite:
+ simplifycompositelit.Analyzer.Name: {
+ Analyzer: simplifycompositelit.Analyzer,
+ Enabled: true,
+ ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix},
+ },
+ simplifyrange.Analyzer.Name: {
+ Analyzer: simplifyrange.Analyzer,
+ Enabled: true,
+ ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix},
+ },
+ simplifyslice.Analyzer.Name: {
+ Analyzer: simplifyslice.Analyzer,
+ Enabled: true,
+ ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix},
+ },
+ }
+}
+
+func urlRegexp() *regexp.Regexp {
+ // Ensure links are matched as full words, not anywhere.
+ re := regexp.MustCompile(`\b(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?\b`)
+ re.Longest()
+ return re
+}
+
+type APIJSON struct {
+ Options map[string][]*OptionJSON
+ Commands []*CommandJSON
+ Lenses []*LensJSON
+ Analyzers []*AnalyzerJSON
+ Hints []*HintJSON
+}
+
+type OptionJSON struct {
+ Name string
+ Type string
+ Doc string
+ EnumKeys EnumKeys
+ EnumValues []EnumValue
+ Default string
+ Status string
+ Hierarchy string
+}
+
+func (o *OptionJSON) String() string {
+ return o.Name
+}
+
+func (o *OptionJSON) Write(w io.Writer) {
+ fmt.Fprintf(w, "**%v** *%v*\n\n", o.Name, o.Type)
+ writeStatus(w, o.Status)
+ enumValues := collectEnums(o)
+ fmt.Fprintf(w, "%v%v\nDefault: `%v`.\n\n", o.Doc, enumValues, o.Default)
+}
+
+func writeStatus(section io.Writer, status string) {
+ switch status {
+ case "":
+ case "advanced":
+ fmt.Fprint(section, "**This is an advanced setting and should not be configured by most `gopls` users.**\n\n")
+ case "debug":
+ fmt.Fprint(section, "**This setting is for debugging purposes only.**\n\n")
+ case "experimental":
+ fmt.Fprint(section, "**This setting is experimental and may be deleted.**\n\n")
+ default:
+ fmt.Fprintf(section, "**Status: %s.**\n\n", status)
+ }
+}
+
+var parBreakRE = regexp.MustCompile("\n{2,}")
+
+func collectEnums(opt *OptionJSON) string {
+ var b strings.Builder
+ write := func(name, doc string, index, len int) {
+ if doc != "" {
+ unbroken := parBreakRE.ReplaceAllString(doc, "\\\n")
+ fmt.Fprintf(&b, "* %s\n", strings.TrimSpace(unbroken))
+ } else {
+ fmt.Fprintf(&b, "* `%s`\n", name)
+ }
+ }
+ if len(opt.EnumValues) > 0 && opt.Type == "enum" {
+ b.WriteString("\nMust be one of:\n\n")
+ for i, val := range opt.EnumValues {
+ write(val.Value, val.Doc, i, len(opt.EnumValues))
+ }
+ } else if len(opt.EnumKeys.Keys) > 0 && shouldShowEnumKeysInSettings(opt.Name) {
+ b.WriteString("\nCan contain any of:\n\n")
+ for i, val := range opt.EnumKeys.Keys {
+ write(val.Name, val.Doc, i, len(opt.EnumKeys.Keys))
+ }
+ }
+ return b.String()
+}
+
+func shouldShowEnumKeysInSettings(name string) bool {
+ // These fields have too many possible options to print.
+ return !(name == "analyses" || name == "codelenses" || name == "hints")
+}
+
+type EnumKeys struct {
+ ValueType string
+ Keys []EnumKey
+}
+
+type EnumKey struct {
+ Name string
+ Doc string
+ Default string
+}
+
+type EnumValue struct {
+ Value string
+ Doc string
+}
+
+type CommandJSON struct {
+ Command string
+ Title string
+ Doc string
+ ArgDoc string
+ ResultDoc string
+}
+
+func (c *CommandJSON) String() string {
+ return c.Command
+}
+
+func (c *CommandJSON) Write(w io.Writer) {
+ fmt.Fprintf(w, "### **%v**\nIdentifier: `%v`\n\n%v\n\n", c.Title, c.Command, c.Doc)
+ if c.ArgDoc != "" {
+ fmt.Fprintf(w, "Args:\n\n```\n%s\n```\n\n", c.ArgDoc)
+ }
+ if c.ResultDoc != "" {
+ fmt.Fprintf(w, "Result:\n\n```\n%s\n```\n\n", c.ResultDoc)
+ }
+}
+
+type LensJSON struct {
+ Lens string
+ Title string
+ Doc string
+}
+
+func (l *LensJSON) String() string {
+ return l.Title
+}
+
+func (l *LensJSON) Write(w io.Writer) {
+ fmt.Fprintf(w, "%s (%s): %s", l.Title, l.Lens, l.Doc)
+}
+
+type AnalyzerJSON struct {
+ Name string
+ Doc string
+ Default bool
+}
+
+func (a *AnalyzerJSON) String() string {
+ return a.Name
+}
+
+func (a *AnalyzerJSON) Write(w io.Writer) {
+ fmt.Fprintf(w, "%s (%s): %v", a.Name, a.Doc, a.Default)
+}
+
+type HintJSON struct {
+ Name string
+ Doc string
+ Default bool
+}
+
+func (h *HintJSON) String() string {
+ return h.Name
+}
+
+func (h *HintJSON) Write(w io.Writer) {
+ fmt.Fprintf(w, "%s (%s): %v", h.Name, h.Doc, h.Default)
+}
diff --git a/gopls/internal/lsp/source/options_test.go b/gopls/internal/lsp/source/options_test.go
new file mode 100644
index 000000000..4fa6ecf15
--- /dev/null
+++ b/gopls/internal/lsp/source/options_test.go
@@ -0,0 +1,206 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "testing"
+ "time"
+)
+
+func TestSetOption(t *testing.T) {
+ tests := []struct {
+ name string
+ value interface{}
+ wantError bool
+ check func(Options) bool
+ }{
+ {
+ name: "symbolStyle",
+ value: "Dynamic",
+ check: func(o Options) bool { return o.SymbolStyle == DynamicSymbols },
+ },
+ {
+ name: "symbolStyle",
+ value: "",
+ wantError: true,
+ check: func(o Options) bool { return o.SymbolStyle == "" },
+ },
+ {
+ name: "symbolStyle",
+ value: false,
+ wantError: true,
+ check: func(o Options) bool { return o.SymbolStyle == "" },
+ },
+ {
+ name: "symbolMatcher",
+ value: "caseInsensitive",
+ check: func(o Options) bool { return o.SymbolMatcher == SymbolCaseInsensitive },
+ },
+ {
+ name: "completionBudget",
+ value: "2s",
+ check: func(o Options) bool { return o.CompletionBudget == 2*time.Second },
+ },
+ {
+ name: "staticcheck",
+ value: true,
+ check: func(o Options) bool { return o.Staticcheck == true },
+ wantError: true, // o.StaticcheckSupported is unset
+ },
+ {
+ name: "codelenses",
+ value: map[string]interface{}{"generate": true},
+ check: func(o Options) bool { return o.Codelenses["generate"] },
+ },
+ {
+ name: "allExperiments",
+ value: true,
+ check: func(o Options) bool {
+ return true // just confirm that we handle this setting
+ },
+ },
+ {
+ name: "hoverKind",
+ value: "FullDocumentation",
+ check: func(o Options) bool {
+ return o.HoverKind == FullDocumentation
+ },
+ },
+ {
+ name: "hoverKind",
+ value: "NoDocumentation",
+ check: func(o Options) bool {
+ return o.HoverKind == NoDocumentation
+ },
+ },
+ {
+ name: "hoverKind",
+ value: "SingleLine",
+ check: func(o Options) bool {
+ return o.HoverKind == SingleLine
+ },
+ },
+ {
+ name: "hoverKind",
+ value: "Structured",
+ check: func(o Options) bool {
+ return o.HoverKind == Structured
+ },
+ },
+ {
+ name: "ui.documentation.hoverKind",
+ value: "Structured",
+ check: func(o Options) bool {
+ return o.HoverKind == Structured
+ },
+ },
+ {
+ name: "matcher",
+ value: "Fuzzy",
+ check: func(o Options) bool {
+ return o.Matcher == Fuzzy
+ },
+ },
+ {
+ name: "matcher",
+ value: "CaseSensitive",
+ check: func(o Options) bool {
+ return o.Matcher == CaseSensitive
+ },
+ },
+ {
+ name: "matcher",
+ value: "CaseInsensitive",
+ check: func(o Options) bool {
+ return o.Matcher == CaseInsensitive
+ },
+ },
+ {
+ name: "env",
+ value: map[string]interface{}{"testing": "true"},
+ check: func(o Options) bool {
+ v, found := o.Env["testing"]
+ return found && v == "true"
+ },
+ },
+ {
+ name: "env",
+ value: []string{"invalid", "input"},
+ wantError: true,
+ check: func(o Options) bool {
+ return o.Env == nil
+ },
+ },
+ {
+ name: "directoryFilters",
+ value: []interface{}{"-node_modules", "+project_a"},
+ check: func(o Options) bool {
+ return len(o.DirectoryFilters) == 2
+ },
+ },
+ {
+ name: "directoryFilters",
+ value: []interface{}{"invalid"},
+ wantError: true,
+ check: func(o Options) bool {
+ return len(o.DirectoryFilters) == 0
+ },
+ },
+ {
+ name: "directoryFilters",
+ value: []string{"-invalid", "+type"},
+ wantError: true,
+ check: func(o Options) bool {
+ return len(o.DirectoryFilters) == 0
+ },
+ },
+ {
+ name: "annotations",
+ value: map[string]interface{}{
+ "Nil": false,
+ "noBounds": true,
+ },
+ wantError: true,
+ check: func(o Options) bool {
+ return !o.Annotations[Nil] && !o.Annotations[Bounds]
+ },
+ },
+ {
+ name: "vulncheck",
+ value: []interface{}{"invalid"},
+ wantError: true,
+ check: func(o Options) bool {
+ return o.Vulncheck == "" // For invalid value, default to 'off'.
+ },
+ },
+ {
+ name: "vulncheck",
+ value: "Imports",
+ check: func(o Options) bool {
+ return o.Vulncheck == ModeVulncheckImports // For invalid value, default to 'off'.
+ },
+ },
+ {
+ name: "vulncheck",
+ value: "imports",
+ check: func(o Options) bool {
+ return o.Vulncheck == ModeVulncheckImports
+ },
+ },
+ }
+
+ for _, test := range tests {
+ var opts Options
+ result := opts.set(test.name, test.value, map[string]struct{}{})
+ if (result.Error != nil) != test.wantError {
+ t.Fatalf("Options.set(%q, %v): result.Error = %v, want error: %t", test.name, test.value, result.Error, test.wantError)
+ }
+ // TODO: this could be made much better using cmp.Diff, if that becomes
+ // available in this module.
+ if !test.check(opts) {
+ t.Errorf("Options.set(%q, %v): unexpected result %+v", test.name, test.value, opts)
+ }
+ }
+}
diff --git a/gopls/internal/lsp/source/references.go b/gopls/internal/lsp/source/references.go
new file mode 100644
index 000000000..3f8960180
--- /dev/null
+++ b/gopls/internal/lsp/source/references.go
@@ -0,0 +1,582 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+// This file defines the 'references' query based on a serializable
+// index constructed during type checking, thus avoiding the need to
+// type-check packages at search time.
+//
+// See the ./xrefs/ subpackage for the index construction and lookup.
+//
+// This implementation does not intermingle objects from distinct
+// calls to TypeCheck.
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "sort"
+ "strings"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source/methodsets"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+)
+
+// References returns a list of all references (sorted with
+// definitions before uses) to the object denoted by the identifier at
+// the given file/position, searching the entire workspace.
+func References(ctx context.Context, snapshot Snapshot, fh FileHandle, pp protocol.Position, includeDeclaration bool) ([]protocol.Location, error) {
+ references, err := references(ctx, snapshot, fh, pp, includeDeclaration)
+ if err != nil {
+ return nil, err
+ }
+ locations := make([]protocol.Location, len(references))
+ for i, ref := range references {
+ locations[i] = ref.location
+ }
+ return locations, nil
+}
+
+// A reference describes an identifier that refers to the same
+// object as the subject of a References query.
+type reference struct {
+ isDeclaration bool
+ location protocol.Location
+ pkgPath PackagePath // of declaring package (same for all elements of the slice)
+}
+
+// references returns a list of all references (sorted with
+// definitions before uses) to the object denoted by the identifier at
+// the given file/position, searching the entire workspace.
+func references(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position, includeDeclaration bool) ([]reference, error) {
+ ctx, done := event.Start(ctx, "source.References2")
+ defer done()
+
+ // Is the cursor within the package name declaration?
+ _, inPackageName, err := parsePackageNameDecl(ctx, snapshot, f, pp)
+ if err != nil {
+ return nil, err
+ }
+
+ var refs []reference
+ if inPackageName {
+ refs, err = packageReferences(ctx, snapshot, f.URI())
+ } else {
+ refs, err = ordinaryReferences(ctx, snapshot, f.URI(), pp)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ sort.Slice(refs, func(i, j int) bool {
+ x, y := refs[i], refs[j]
+ if x.isDeclaration != y.isDeclaration {
+ return x.isDeclaration // decls < refs
+ }
+ return protocol.CompareLocation(x.location, y.location) < 0
+ })
+
+ // De-duplicate by location, and optionally remove declarations.
+ out := refs[:0]
+ for _, ref := range refs {
+ if !includeDeclaration && ref.isDeclaration {
+ continue
+ }
+ if len(out) == 0 || out[len(out)-1].location != ref.location {
+ out = append(out, ref)
+ }
+ }
+ refs = out
+
+ return refs, nil
+}
+
+// packageReferences returns a list of references to the package
+// declaration of the specified name and uri by searching among the
+// import declarations of all packages that directly import the target
+// package.
+func packageReferences(ctx context.Context, snapshot Snapshot, uri span.URI) ([]reference, error) {
+ metas, err := snapshot.MetadataForFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ if len(metas) == 0 {
+ return nil, fmt.Errorf("found no package containing %s", uri)
+ }
+
+ var refs []reference
+
+ // Find external references to the package declaration
+ // from each direct import of the package.
+ //
+ // The narrowest package is the most broadly imported,
+ // so we choose it for the external references.
+ //
+ // But if the file ends with _test.go then we need to
+ // find the package it is testing; there's no direct way
+ // to do that, so pick a file from the same package that
+ // doesn't end in _test.go and start over.
+ narrowest := metas[0]
+ if narrowest.ForTest != "" && strings.HasSuffix(string(uri), "_test.go") {
+ for _, f := range narrowest.CompiledGoFiles {
+ if !strings.HasSuffix(string(f), "_test.go") {
+ return packageReferences(ctx, snapshot, f)
+ }
+ }
+ // This package has no non-test files.
+ // Skip the search for external references.
+ // (Conceivably one could blank-import an empty package, but why?)
+ } else {
+ rdeps, err := snapshot.ReverseDependencies(ctx, narrowest.ID, false) // direct
+ if err != nil {
+ return nil, err
+ }
+ for _, rdep := range rdeps {
+ for _, uri := range rdep.CompiledGoFiles {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ f, err := snapshot.ParseGo(ctx, fh, ParseHeader)
+ if err != nil {
+ return nil, err
+ }
+ for _, imp := range f.File.Imports {
+ if rdep.DepsByImpPath[UnquoteImportPath(imp)] == narrowest.ID {
+ refs = append(refs, reference{
+ isDeclaration: false,
+ location: mustLocation(f, imp),
+ pkgPath: narrowest.PkgPath,
+ })
+ }
+ }
+ }
+ }
+ }
+
+ // Find internal "references" to the package from
+ // of each package declaration in the target package itself.
+ //
+ // The widest package (possibly a test variant) has the
+ // greatest number of files and thus we choose it for the
+ // "internal" references.
+ widest := metas[len(metas)-1]
+ for _, uri := range widest.CompiledGoFiles {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ f, err := snapshot.ParseGo(ctx, fh, ParseHeader)
+ if err != nil {
+ return nil, err
+ }
+ refs = append(refs, reference{
+ isDeclaration: true, // (one of many)
+ location: mustLocation(f, f.File.Name),
+ pkgPath: widest.PkgPath,
+ })
+ }
+
+ return refs, nil
+}
+
+// ordinaryReferences computes references for all ordinary objects (not package declarations).
+func ordinaryReferences(ctx context.Context, snapshot Snapshot, uri span.URI, pp protocol.Position) ([]reference, error) {
+ // Strategy: use the reference information computed by the
+ // type checker to find the declaration. First type-check this
+ // package to find the declaration, then type check the
+ // declaring package (which may be different), plus variants,
+ // to find local (in-package) references.
+ // Global references are satisfied by the index.
+
+ // Strictly speaking, a wider package could provide a different
+ // declaration (e.g. because the _test.go files can change the
+ // meaning of a field or method selection), but the narrower
+ // package reports the more broadly referenced object.
+ pkg, pgf, err := PackageForFile(ctx, snapshot, uri, NarrowestPackage)
+ if err != nil {
+ return nil, err
+ }
+
+ // Find the selected object (declaration or reference).
+ pos, err := pgf.PositionPos(pp)
+ if err != nil {
+ return nil, err
+ }
+ candidates, _, err := objectsAt(pkg.GetTypesInfo(), pgf.File, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ // Pick first object arbitrarily.
+ // The case variables of a type switch have different
+ // types but that difference is immaterial here.
+ var obj types.Object
+ for obj = range candidates {
+ break
+ }
+ if obj == nil {
+ return nil, ErrNoIdentFound // can't happen
+ }
+
+ // nil, error, error.Error, iota, or other built-in?
+ if obj.Pkg() == nil {
+ // For some reason, existing tests require that iota has no references,
+ // nor an error. TODO(adonovan): do something more principled.
+ if obj.Name() == "iota" {
+ return nil, nil
+ }
+
+ return nil, fmt.Errorf("references to builtin %q are not supported", obj.Name())
+ }
+
+ // Find metadata of all packages containing the object's defining file.
+ // This may include the query pkg, and possibly other variants.
+ declPosn := safetoken.StartPosition(pkg.FileSet(), obj.Pos())
+ declURI := span.URIFromPath(declPosn.Filename)
+ variants, err := snapshot.MetadataForFile(ctx, declURI)
+ if err != nil {
+ return nil, err
+ }
+ if len(variants) == 0 {
+ return nil, fmt.Errorf("no packages for file %q", declURI) // can't happen
+ }
+
+ // Is object exported?
+ // If so, compute scope and targets of the global search.
+ var (
+ globalScope = make(map[PackageID]*Metadata)
+ globalTargets map[PackagePath]map[objectpath.Path]unit
+ )
+ // TODO(adonovan): what about generic functions. Need to consider both
+ // uninstantiated and instantiated. The latter have no objectpath. Use Origin?
+ if path, err := objectpath.For(obj); err == nil && obj.Exported() {
+ pkgPath := variants[0].PkgPath // (all variants have same package path)
+ globalTargets = map[PackagePath]map[objectpath.Path]unit{
+ pkgPath: {path: {}}, // primary target
+ }
+
+ // How far need we search?
+ // For package-level objects, we need only search the direct importers.
+ // For fields and methods, we must search transitively.
+ transitive := obj.Pkg().Scope().Lookup(obj.Name()) != obj
+
+ // The scope is the union of rdeps of each variant.
+ // (Each set is disjoint so there's no benefit to
+ // to combining the metadata graph traversals.)
+ for _, m := range variants {
+ rdeps, err := snapshot.ReverseDependencies(ctx, m.ID, transitive)
+ if err != nil {
+ return nil, err
+ }
+ for id, rdep := range rdeps {
+ globalScope[id] = rdep
+ }
+ }
+
+ // Is object a method?
+ //
+ // If so, expand the search so that the targets include
+ // all methods that correspond to it through interface
+ // satisfaction, and the scope includes the rdeps of
+ // the package that declares each corresponding type.
+ if recv := effectiveReceiver(obj); recv != nil {
+ if err := expandMethodSearch(ctx, snapshot, obj.(*types.Func), recv, globalScope, globalTargets); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ // The search functions will call report(loc) for each hit.
+ var (
+ refsMu sync.Mutex
+ refs []reference
+ )
+ report := func(loc protocol.Location, isDecl bool) {
+ ref := reference{
+ isDeclaration: isDecl,
+ location: loc,
+ pkgPath: pkg.Metadata().PkgPath,
+ }
+ refsMu.Lock()
+ refs = append(refs, ref)
+ refsMu.Unlock()
+ }
+
+ // Loop over the variants of the declaring package,
+ // and perform both the local (in-package) and global
+ // (cross-package) searches, in parallel.
+ //
+ // TODO(adonovan): opt: support LSP reference streaming. See:
+ // - https://github.com/microsoft/vscode-languageserver-node/pull/164
+ // - https://github.com/microsoft/language-server-protocol/pull/182
+ //
+ // Careful: this goroutine must not return before group.Wait.
+ var group errgroup.Group
+
+ // Compute local references for each variant.
+ for _, m := range variants {
+ // We want the ordinary importable package,
+ // plus any test-augmented variants, since
+ // declarations in _test.go files may change
+ // the reference of a selection, or even a
+ // field into a method or vice versa.
+ //
+ // But we don't need intermediate test variants,
+ // as their local references will be covered
+ // already by other variants.
+ if m.IsIntermediateTestVariant() {
+ continue
+ }
+ m := m
+ group.Go(func() error {
+ return localReferences(ctx, snapshot, declURI, declPosn.Offset, m, report)
+ })
+ }
+
+ // Compute global references for selected reverse dependencies.
+ group.Go(func() error {
+ var globalIDs []PackageID
+ for id := range globalScope {
+ globalIDs = append(globalIDs, id)
+ }
+ indexes, err := snapshot.References(ctx, globalIDs...)
+ if err != nil {
+ return err
+ }
+ for _, index := range indexes {
+ for _, loc := range index.Lookup(globalTargets) {
+ report(loc, false)
+ }
+ }
+ return nil
+ })
+
+ if err := group.Wait(); err != nil {
+ return nil, err
+ }
+ return refs, nil
+}
+
+// expandMethodSearch expands the scope and targets of a global search
+// for an exported method to include all methods that correspond to
+// it through interface satisfaction.
+//
+// recv is the method's effective receiver type, for method-set computations.
+func expandMethodSearch(ctx context.Context, snapshot Snapshot, method *types.Func, recv types.Type, scope map[PackageID]*Metadata, targets map[PackagePath]map[objectpath.Path]unit) error {
+ // Compute the method-set fingerprint used as a key to the global search.
+ key, hasMethods := methodsets.KeyOf(recv)
+ if !hasMethods {
+ return bug.Errorf("KeyOf(%s)={} yet %s is a method", recv, method)
+ }
+ metas, err := snapshot.AllMetadata(ctx)
+ if err != nil {
+ return err
+ }
+ allIDs := make([]PackageID, 0, len(metas))
+ for _, m := range metas {
+ allIDs = append(allIDs, m.ID)
+ }
+ // Search the methodset index of each package in the workspace.
+ indexes, err := snapshot.MethodSets(ctx, allIDs...)
+ if err != nil {
+ return err
+ }
+ var mu sync.Mutex // guards scope and targets
+ var group errgroup.Group
+ for i, index := range indexes {
+ i := i
+ index := index
+ group.Go(func() error {
+ // Consult index for matching methods.
+ results := index.Search(key, method.Name())
+ if len(results) == 0 {
+ return nil
+ }
+
+ // Expand global search scope to include rdeps of this pkg.
+ rdeps, err := snapshot.ReverseDependencies(ctx, allIDs[i], true)
+ if err != nil {
+ return err
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ for _, rdep := range rdeps {
+ scope[rdep.ID] = rdep
+ }
+
+ // Add each corresponding method the to set of global search targets.
+ for _, res := range results {
+ methodPkg := PackagePath(res.PkgPath)
+ opaths, ok := targets[methodPkg]
+ if !ok {
+ opaths = make(map[objectpath.Path]unit)
+ targets[methodPkg] = opaths
+ }
+ opaths[res.ObjectPath] = unit{}
+ }
+ return nil
+ })
+ }
+ return group.Wait()
+}
+
+// localReferences reports each reference to the object
+// declared at the specified URI/offset within its enclosing package m.
+func localReferences(ctx context.Context, snapshot Snapshot, declURI span.URI, declOffset int, m *Metadata, report func(loc protocol.Location, isDecl bool)) error {
+ pkgs, err := snapshot.TypeCheck(ctx, m.ID)
+ if err != nil {
+ return err
+ }
+ pkg := pkgs[0] // narrowest
+
+ // Find declaration of corresponding object
+ // in this package based on (URI, offset).
+ pgf, err := pkg.File(declURI)
+ if err != nil {
+ return err
+ }
+ pos, err := safetoken.Pos(pgf.Tok, declOffset)
+ if err != nil {
+ return err
+ }
+ targets, _, err := objectsAt(pkg.GetTypesInfo(), pgf.File, pos)
+ if err != nil {
+ return err // unreachable? (probably caught earlier)
+ }
+
+ // Report the locations of the declaration(s).
+ // TODO(adonovan): what about for corresponding methods? Add tests.
+ for _, node := range targets {
+ report(mustLocation(pgf, node), true)
+ }
+
+ // If we're searching for references to a method, broaden the
+ // search to include references to corresponding methods of
+ // mutually assignable receiver types.
+ // (We use a slice, but objectsAt never returns >1 methods.)
+ var methodRecvs []types.Type
+ var methodName string // name of an arbitrary target, iff a method
+ for obj := range targets {
+ if t := effectiveReceiver(obj); t != nil {
+ methodRecvs = append(methodRecvs, t)
+ methodName = obj.Name()
+ }
+ }
+
+ // matches reports whether obj either is or corresponds to a target.
+ // (Correspondence is defined as usual for interface methods.)
+ matches := func(obj types.Object) bool {
+ if targets[obj] != nil {
+ return true
+ } else if methodRecvs != nil && obj.Name() == methodName {
+ if orecv := effectiveReceiver(obj); orecv != nil {
+ for _, mrecv := range methodRecvs {
+ if concreteImplementsIntf(orecv, mrecv) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+ }
+
+ // Scan through syntax looking for uses of one of the target objects.
+ for _, pgf := range pkg.CompiledGoFiles() {
+ ast.Inspect(pgf.File, func(n ast.Node) bool {
+ if id, ok := n.(*ast.Ident); ok {
+ if obj, ok := pkg.GetTypesInfo().Uses[id]; ok && matches(obj) {
+ report(mustLocation(pgf, id), false)
+ }
+ }
+ return true
+ })
+ }
+ return nil
+}
+
+// effectiveReceiver returns the effective receiver type for method-set
+// comparisons for obj, if it is a method, or nil otherwise.
+func effectiveReceiver(obj types.Object) types.Type {
+ if fn, ok := obj.(*types.Func); ok {
+ if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
+ return methodsets.EnsurePointer(recv.Type())
+ }
+ }
+ return nil
+}
+
+// objectsAt returns the non-empty set of objects denoted (def or use)
+// by the specified position within a file syntax tree, or an error if
+// none were found.
+//
+// The result may contain more than one element because all case
+// variables of a type switch appear to be declared at the same
+// position.
+//
+// Each object is mapped to the syntax node that was treated as an
+// identifier, which is not always an ast.Ident. The second component
+// of the result is the innermost node enclosing pos.
+//
+// TODO(adonovan): factor in common with referencedObject.
+func objectsAt(info *types.Info, file *ast.File, pos token.Pos) (map[types.Object]ast.Node, ast.Node, error) {
+ path := pathEnclosingObjNode(file, pos)
+ if path == nil {
+ return nil, nil, ErrNoIdentFound
+ }
+
+ targets := make(map[types.Object]ast.Node)
+
+ switch leaf := path[0].(type) {
+ case *ast.Ident:
+ // If leaf represents an implicit type switch object or the type
+ // switch "assign" variable, expand to all of the type switch's
+ // implicit objects.
+ if implicits, _ := typeSwitchImplicits(info, path); len(implicits) > 0 {
+ for _, obj := range implicits {
+ targets[obj] = leaf
+ }
+ } else {
+ obj := info.ObjectOf(leaf)
+ if obj == nil {
+ return nil, nil, fmt.Errorf("%w for %q", errNoObjectFound, leaf.Name)
+ }
+ targets[obj] = leaf
+ }
+ case *ast.ImportSpec:
+ // Look up the implicit *types.PkgName.
+ obj := info.Implicits[leaf]
+ if obj == nil {
+ return nil, nil, fmt.Errorf("%w for import %s", errNoObjectFound, UnquoteImportPath(leaf))
+ }
+ targets[obj] = leaf
+ }
+
+ if len(targets) == 0 {
+ return nil, nil, fmt.Errorf("objectAt: internal error: no targets") // can't happen
+ }
+ return targets, path[0], nil
+}
+
+// mustLocation reports the location interval a syntax node,
+// which must belong to m.File.
+//
+// Safe for use only by references2 and implementations2.
+func mustLocation(pgf *ParsedGoFile, n ast.Node) protocol.Location {
+ loc, err := pgf.NodeLocation(n)
+ if err != nil {
+ panic(err) // can't happen in references2 or implementations2
+ }
+ return loc
+}
diff --git a/gopls/internal/lsp/source/rename.go b/gopls/internal/lsp/source/rename.go
new file mode 100644
index 000000000..c67f15ce5
--- /dev/null
+++ b/gopls/internal/lsp/source/rename.go
@@ -0,0 +1,1244 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+// TODO(adonovan):
+//
+// - method of generic concrete type -> arbitrary instances of same
+//
+// - make satisfy work across packages.
+//
+// - tests, tests, tests:
+// - play with renamings in the k8s tree.
+// - generics
+// - error cases (e.g. conflicts)
+// - renaming a symbol declared in the module cache
+// (currently proceeds with half of the renaming!)
+// - make sure all tests have both a local and a cross-package analogue.
+// - look at coverage
+// - special cases: embedded fields, interfaces, test variants,
+// function-local things with uppercase names;
+// packages with type errors (currently 'satisfy' rejects them),
+// pakage with missing imports;
+//
+// - measure performance in k8s.
+//
+// - The original gorename tool assumed well-typedness, but the gopls feature
+// does no such check (which actually makes it much more useful).
+// Audit to ensure it is safe on ill-typed code.
+//
+// - Generics support was no doubt buggy before but incrementalization
+// may have exacerbated it. If the problem were just about objects,
+// defs and uses it would be fairly simple, but type assignability
+// comes into play in the 'satisfy' check for method renamings.
+// De-instantiating Vector[int] to Vector[T] changes its type.
+// We need to come up with a theory for the satisfy check that
+// works with generics, and across packages. We currently have no
+// simple way to pass types between packages (think: objectpath for
+// types), though presumably exportdata could be pressed into service.
+//
+// - FileID-based de-duplication of edits to different URIs for the same file.
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "path"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/refactor/satisfy"
+)
+
+// A renamer holds state of a single call to renameObj, which renames
+// an object (or several coupled objects) within a single type-checked
+// syntax package.
+type renamer struct {
+ pkg Package // the syntax package in which the renaming is applied
+ objsToUpdate map[types.Object]bool // records progress of calls to check
+ hadConflicts bool
+ conflicts []string
+ from, to string
+ satisfyConstraints map[satisfy.Constraint]bool
+ msets typeutil.MethodSetCache
+ changeMethods bool
+}
+
+// A PrepareItem holds the result of a "prepare rename" operation:
+// the source range and value of a selected identifier.
+type PrepareItem struct {
+ Range protocol.Range
+ Text string
+}
+
+// PrepareRename searches for a valid renaming at position pp.
+//
+// The returned usererr is intended to be displayed to the user to explain why
+// the prepare fails. Probably we could eliminate the redundancy in returning
+// two errors, but for now this is done defensively.
+func PrepareRename(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) (_ *PrepareItem, usererr, err error) {
+ ctx, done := event.Start(ctx, "source.PrepareRename")
+ defer done()
+
+ // Is the cursor within the package name declaration?
+ if pgf, inPackageName, err := parsePackageNameDecl(ctx, snapshot, f, pp); err != nil {
+ return nil, err, err
+ } else if inPackageName {
+ item, err := prepareRenamePackageName(ctx, snapshot, pgf)
+ return item, err, err
+ }
+
+ // Ordinary (non-package) renaming.
+ //
+ // Type-check the current package, locate the reference at the position,
+ // validate the object, and report its name and range.
+ //
+ // TODO(adonovan): in all cases below, we return usererr=nil,
+ // which means we return (nil, nil) at the protocol
+ // layer. This seems like a bug, or at best an exploitation of
+ // knowledge of VSCode-specific behavior. Can we avoid that?
+ pkg, pgf, err := PackageForFile(ctx, snapshot, f.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, nil, err
+ }
+ pos, err := pgf.PositionPos(pp)
+ if err != nil {
+ return nil, nil, err
+ }
+ targets, node, err := objectsAt(pkg.GetTypesInfo(), pgf.File, pos)
+ if err != nil {
+ return nil, nil, err
+ }
+ var obj types.Object
+ for obj = range targets {
+ break // pick one arbitrarily
+ }
+ if err := checkRenamable(obj); err != nil {
+ return nil, nil, err
+ }
+ rng, err := pgf.NodeRange(node)
+ if err != nil {
+ return nil, nil, err
+ }
+ if _, isImport := node.(*ast.ImportSpec); isImport {
+ // We're not really renaming the import path.
+ rng.End = rng.Start
+ }
+ return &PrepareItem{
+ Range: rng,
+ Text: obj.Name(),
+ }, nil, nil
+}
+
+func prepareRenamePackageName(ctx context.Context, snapshot Snapshot, pgf *ParsedGoFile) (*PrepareItem, error) {
+ // Does the client support file renaming?
+ fileRenameSupported := false
+ for _, op := range snapshot.View().Options().SupportedResourceOperations {
+ if op == protocol.Rename {
+ fileRenameSupported = true
+ break
+ }
+ }
+ if !fileRenameSupported {
+ return nil, errors.New("can't rename package: LSP client does not support file renaming")
+ }
+
+ // Check validity of the metadata for the file's containing package.
+ fileMeta, err := snapshot.MetadataForFile(ctx, pgf.URI)
+ if err != nil {
+ return nil, err
+ }
+ if len(fileMeta) == 0 {
+ return nil, fmt.Errorf("no packages found for file %q", pgf.URI)
+ }
+ meta := fileMeta[0]
+ if meta.Name == "main" {
+ return nil, fmt.Errorf("can't rename package \"main\"")
+ }
+ if strings.HasSuffix(string(meta.Name), "_test") {
+ return nil, fmt.Errorf("can't rename x_test packages")
+ }
+ if meta.Module == nil {
+ return nil, fmt.Errorf("can't rename package: missing module information for package %q", meta.PkgPath)
+ }
+ if meta.Module.Path == string(meta.PkgPath) {
+ return nil, fmt.Errorf("can't rename package: package path %q is the same as module path %q", meta.PkgPath, meta.Module.Path)
+ }
+
+ // Return the location of the package declaration.
+ rng, err := pgf.NodeRange(pgf.File.Name)
+ if err != nil {
+ return nil, err
+ }
+ return &PrepareItem{
+ Range: rng,
+ Text: string(meta.Name),
+ }, nil
+}
+
+func checkRenamable(obj types.Object) error {
+ switch obj := obj.(type) {
+ case *types.Var:
+ if obj.Embedded() {
+ return fmt.Errorf("can't rename embedded fields: rename the type directly or name the field")
+ }
+ case *types.Builtin, *types.Nil:
+ return fmt.Errorf("%s is built in and cannot be renamed", obj.Name())
+ }
+ if obj.Pkg() == nil || obj.Pkg().Path() == "unsafe" {
+ // e.g. error.Error, unsafe.Pointer
+ return fmt.Errorf("%s is built in and cannot be renamed", obj.Name())
+ }
+ if obj.Name() == "_" {
+ return errors.New("can't rename \"_\"")
+ }
+ return nil
+}
+
+// Rename returns a map of TextEdits for each file modified when renaming a
+// given identifier within a package and a boolean value of true for renaming
+// package and false otherwise.
+func Rename(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position, newName string) (map[span.URI][]protocol.TextEdit, bool, error) {
+ ctx, done := event.Start(ctx, "source.Rename")
+ defer done()
+
+ if !isValidIdentifier(newName) {
+ return nil, false, fmt.Errorf("invalid identifier to rename: %q", newName)
+ }
+
+ // Cursor within package name declaration?
+ _, inPackageName, err := parsePackageNameDecl(ctx, snapshot, f, pp)
+ if err != nil {
+ return nil, false, err
+ }
+
+ var editMap map[span.URI][]diff.Edit
+ if inPackageName {
+ editMap, err = renamePackageName(ctx, snapshot, f, PackageName(newName))
+ } else {
+ editMap, err = renameOrdinary(ctx, snapshot, f, pp, newName)
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ // Convert edits to protocol form.
+ result := make(map[span.URI][]protocol.TextEdit)
+ for uri, edits := range editMap {
+ // Sort and de-duplicate edits.
+ //
+ // Overlapping edits may arise in local renamings (due
+ // to type switch implicits) and globals ones (due to
+ // processing multiple package variants).
+ //
+ // We assume renaming produces diffs that are all
+ // replacements (no adjacent insertions that might
+ // become reordered) and that are either identical or
+ // non-overlapping.
+ diff.SortEdits(edits)
+ filtered := edits[:0]
+ for i, edit := range edits {
+ if i == 0 || edit != filtered[len(filtered)-1] {
+ filtered = append(filtered, edit)
+ }
+ }
+ edits = filtered
+
+ // TODO(adonovan): the logic above handles repeat edits to the
+ // same file URI (e.g. as a member of package p and p_test) but
+ // is not sufficient to handle file-system level aliasing arising
+ // from symbolic or hard links. For that, we should use a
+ // robustio-FileID-keyed map.
+ // See https://go.dev/cl/457615 for example.
+ // This really occurs in practice, e.g. kubernetes has
+ // vendor/k8s.io/kubectl -> ../../staging/src/k8s.io/kubectl.
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, false, err
+ }
+ data, err := fh.Read()
+ if err != nil {
+ return nil, false, err
+ }
+ m := protocol.NewMapper(uri, data)
+ protocolEdits, err := ToProtocolEdits(m, edits)
+ if err != nil {
+ return nil, false, err
+ }
+ result[uri] = protocolEdits
+ }
+
+ return result, inPackageName, nil
+}
+
+// renameOrdinary renames an ordinary (non-package) name throughout the workspace.
+func renameOrdinary(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position, newName string) (map[span.URI][]diff.Edit, error) {
+ // Type-check the referring package and locate the object(s).
+ // We choose the widest variant as, for non-exported
+ // identifiers, it is the only package we need.
+ pkg, pgf, err := PackageForFile(ctx, snapshot, f.URI(), WidestPackage)
+ if err != nil {
+ return nil, err
+ }
+ pos, err := pgf.PositionPos(pp)
+ if err != nil {
+ return nil, err
+ }
+ targets, _, err := objectsAt(pkg.GetTypesInfo(), pgf.File, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ // Pick a representative object arbitrarily.
+ // (All share the same name, pos, and kind.)
+ var obj types.Object
+ for obj = range targets {
+ break
+ }
+ if obj.Name() == newName {
+ return nil, fmt.Errorf("old and new names are the same: %s", newName)
+ }
+ if err := checkRenamable(obj); err != nil {
+ return nil, err
+ }
+
+ // Find objectpath, if object is exported ("" otherwise).
+ var declObjPath objectpath.Path
+ if obj.Exported() {
+ // objectpath.For requires the origin of a generic
+ // function or type, not an instantiation (a bug?).
+ // Unfortunately we can't call {Func,TypeName}.Origin
+ // as these are not available in go/types@go1.18.
+ // So we take a scenic route.
+ switch obj.(type) { // avoid "obj :=" since cases reassign the var
+ case *types.TypeName:
+ if named, ok := obj.Type().(*types.Named); ok {
+ obj = named.Obj()
+ }
+ case *types.Func:
+ obj = funcOrigin(obj.(*types.Func))
+ case *types.Var:
+ // TODO(adonovan): do vars need the origin treatment too? (issue #58462)
+ }
+ if path, err := objectpath.For(obj); err == nil {
+ declObjPath = path
+ }
+ }
+
+ // Nonexported? Search locally.
+ if declObjPath == "" {
+ var objects []types.Object
+ for obj := range targets {
+ objects = append(objects, obj)
+ }
+ editMap, _, err := renameObjects(ctx, snapshot, newName, pkg, objects...)
+ return editMap, err
+ }
+
+ // Exported: search globally.
+ //
+ // For exported package-level var/const/func/type objects, the
+ // search scope is just the direct importers.
+ //
+ // For exported fields and methods, the scope is the
+ // transitive rdeps. (The exportedness of the field's struct
+ // or method's receiver is irrelevant.)
+ transitive := false
+ switch obj.(type) {
+ case *types.TypeName:
+ // Renaming an exported package-level type
+ // requires us to inspect all transitive rdeps
+ // in the event that the type is embedded.
+ //
+ // TODO(adonovan): opt: this is conservative
+ // but inefficient. Instead, expand the scope
+ // of the search only if we actually encounter
+ // an embedding of the type, and only then to
+ // the rdeps of the embedding package.
+ if obj.Parent() == obj.Pkg().Scope() {
+ transitive = true
+ }
+
+ case *types.Var:
+ if obj.(*types.Var).IsField() {
+ transitive = true // field
+ }
+
+ // TODO(adonovan): opt: process only packages that
+ // contain a reference (xrefs) to the target field.
+
+ case *types.Func:
+ if obj.Type().(*types.Signature).Recv() != nil {
+ transitive = true // method
+ }
+
+ // It's tempting to optimize by skipping
+ // packages that don't contain a reference to
+ // the method in the xrefs index, but we still
+ // need to apply the satisfy check to those
+ // packages to find assignment statements that
+ // might expands the scope of the renaming.
+ }
+
+ // Type-check all the packages to inspect.
+ declURI := span.URIFromPath(pkg.FileSet().File(obj.Pos()).Name())
+ pkgs, err := typeCheckReverseDependencies(ctx, snapshot, declURI, transitive)
+ if err != nil {
+ return nil, err
+ }
+
+ // Apply the renaming to the (initial) object.
+ declPkgPath := PackagePath(obj.Pkg().Path())
+ return renameExported(ctx, snapshot, pkgs, declPkgPath, declObjPath, newName)
+}
+
+// funcOrigin is a go1.18-portable implementation of (*types.Func).Origin.
+func funcOrigin(fn *types.Func) *types.Func {
+ // Method?
+ if fn.Type().(*types.Signature).Recv() != nil {
+ return typeparams.OriginMethod(fn)
+ }
+
+ // Package-level function?
+ // (Assume the origin has the same position.)
+ gen := fn.Pkg().Scope().Lookup(fn.Name())
+ if gen != nil && gen.Pos() == fn.Pos() {
+ return gen.(*types.Func)
+ }
+
+ return fn
+}
+
+// typeCheckReverseDependencies returns the type-checked packages for
+// the reverse dependencies of all packages variants containing
+// file declURI. The packages are in some topological order.
+//
+// It includes all variants (even intermediate test variants) for the
+// purposes of computing reverse dependencies, but discards ITVs for
+// the actual renaming work.
+//
+// (This neglects obscure edge cases where a _test.go file changes the
+// selectors used only in an ITV, but life is short. Also sin must be
+// punished.)
+func typeCheckReverseDependencies(ctx context.Context, snapshot Snapshot, declURI span.URI, transitive bool) ([]Package, error) {
+ variants, err := snapshot.MetadataForFile(ctx, declURI)
+ if err != nil {
+ return nil, err
+ }
+ allRdeps := make(map[PackageID]*Metadata)
+ for _, variant := range variants {
+ rdeps, err := snapshot.ReverseDependencies(ctx, variant.ID, transitive)
+ if err != nil {
+ return nil, err
+ }
+ allRdeps[variant.ID] = variant // include self
+ for id, meta := range rdeps {
+ allRdeps[id] = meta
+ }
+ }
+ var ids []PackageID
+ for id, meta := range allRdeps {
+ if meta.IsIntermediateTestVariant() {
+ continue
+ }
+ ids = append(ids, id)
+ }
+
+ // Sort the packages into some topological order of the
+ // (unfiltered) metadata graph.
+ SortPostOrder(snapshot, ids)
+
+ // Dependencies must be visited first since they can expand
+ // the search set. Ideally we would process the (filtered) set
+ // of packages in the parallel postorder of the snapshot's
+ // (unfiltered) metadata graph, but this is quite tricky
+ // without a good graph abstraction.
+ //
+ // For now, we visit packages sequentially in order of
+ // ascending height, like an inverted breadth-first search.
+ //
+ // Type checking is by far the dominant cost, so
+ // overlapping it with renaming may not be worthwhile.
+ return snapshot.TypeCheck(ctx, ids...)
+}
+
+// SortPostOrder sorts the IDs so that if x depends on y, then y appears before x.
+func SortPostOrder(meta MetadataSource, ids []PackageID) {
+ postorder := make(map[PackageID]int)
+ order := 0
+ var visit func(PackageID)
+ visit = func(id PackageID) {
+ if _, ok := postorder[id]; !ok {
+ postorder[id] = -1 // break recursion
+ if m := meta.Metadata(id); m != nil {
+ for _, depID := range m.DepsByPkgPath {
+ visit(depID)
+ }
+ }
+ order++
+ postorder[id] = order
+ }
+ }
+ for _, id := range ids {
+ visit(id)
+ }
+ sort.Slice(ids, func(i, j int) bool {
+ return postorder[ids[i]] < postorder[ids[j]]
+ })
+}
+
+// renameExported renames the object denoted by (pkgPath, objPath)
+// within the specified packages, along with any other objects that
+// must be renamed as a consequence. The slice of packages must be
+// topologically ordered.
+func renameExported(ctx context.Context, snapshot Snapshot, pkgs []Package, declPkgPath PackagePath, declObjPath objectpath.Path, newName string) (map[span.URI][]diff.Edit, error) {
+
+ // A target is a name for an object that is stable across types.Packages.
+ type target struct {
+ pkg PackagePath
+ obj objectpath.Path
+ }
+
+ // Populate the initial set of target objects.
+ // This set may grow as we discover the consequences of each renaming.
+ //
+ // TODO(adonovan): strictly, each cone of reverse dependencies
+ // of a single variant should have its own target map that
+ // monotonically expands as we go up the import graph, because
+ // declarations in test files can alter the set of
+ // package-level names and change the meaning of field and
+ // method selectors. So if we parallelize the graph
+ // visitation (see above), we should also compute the targets
+ // as a union of dependencies.
+ //
+ // Or we could decide that the logic below is fast enough not
+ // to need parallelism. In small measurements so far the
+ // type-checking step is about 95% and the renaming only 5%.
+ targets := map[target]bool{{declPkgPath, declObjPath}: true}
+
+ // Apply the renaming operation to each package.
+ allEdits := make(map[span.URI][]diff.Edit)
+ for _, pkg := range pkgs {
+
+ // Resolved target objects within package pkg.
+ var objects []types.Object
+ for t := range targets {
+ p := pkg.DependencyTypes(t.pkg)
+ if p == nil {
+ continue // indirect dependency of no consequence
+ }
+ obj, err := objectpath.Object(p, t.obj)
+ if err != nil {
+ // Though this can happen with regular export data
+ // due to trimming of inconsequential objects,
+ // it can't happen if we load dependencies from full
+ // syntax (as today) or shallow export data (soon),
+ // as both are complete.
+ bug.Reportf("objectpath.Object(%v, %v) failed: %v", p, t.obj, err)
+ continue
+ }
+ objects = append(objects, obj)
+ }
+ if len(objects) == 0 {
+ continue // no targets of consequence to this package
+ }
+
+ // Apply the renaming.
+ editMap, moreObjects, err := renameObjects(ctx, snapshot, newName, pkg, objects...)
+ if err != nil {
+ return nil, err
+ }
+
+ // It is safe to concatenate the edits as they are non-overlapping
+ // (or identical, in which case they will be de-duped by Rename).
+ for uri, edits := range editMap {
+ allEdits[uri] = append(allEdits[uri], edits...)
+ }
+
+ // Expand the search set?
+ for obj := range moreObjects {
+ objpath, err := objectpath.For(obj)
+ if err != nil {
+ continue // not exported
+ }
+ target := target{PackagePath(obj.Pkg().Path()), objpath}
+ targets[target] = true
+
+ // TODO(adonovan): methods requires dynamic
+ // programming of the product targets x
+ // packages as any package might add a new
+ // target (from a foward dep) as a
+ // consequence, and any target might imply a
+ // new set of rdeps. See golang/go#58461.
+ }
+ }
+
+ return allEdits, nil
+}
+
+// renamePackageName renames package declarations, imports, and go.mod files.
+func renamePackageName(ctx context.Context, s Snapshot, f FileHandle, newName PackageName) (map[span.URI][]diff.Edit, error) {
+ // Rename the package decl and all imports.
+ renamingEdits, err := renamePackage(ctx, s, f, newName)
+ if err != nil {
+ return nil, err
+ }
+
+ // Update the last component of the file's enclosing directory.
+ oldBase := filepath.Dir(f.URI().Filename())
+ newPkgDir := filepath.Join(filepath.Dir(oldBase), string(newName))
+
+ // Update any affected replace directives in go.mod files.
+ // TODO(adonovan): extract into its own function.
+ //
+ // TODO: should this operate on all go.mod files, irrespective of whether they are included in the workspace?
+ // Get all active mod files in the workspace
+ modFiles := s.ModFiles()
+ for _, m := range modFiles {
+ fh, err := s.GetFile(ctx, m)
+ if err != nil {
+ return nil, err
+ }
+ pm, err := s.ParseMod(ctx, fh)
+ if err != nil {
+ return nil, err
+ }
+
+ modFileDir := filepath.Dir(pm.URI.Filename())
+ affectedReplaces := []*modfile.Replace{}
+
+ // Check if any replace directives need to be fixed
+ for _, r := range pm.File.Replace {
+ if !strings.HasPrefix(r.New.Path, "/") && !strings.HasPrefix(r.New.Path, "./") && !strings.HasPrefix(r.New.Path, "../") {
+ continue
+ }
+
+ replacedPath := r.New.Path
+ if strings.HasPrefix(r.New.Path, "./") || strings.HasPrefix(r.New.Path, "../") {
+ replacedPath = filepath.Join(modFileDir, r.New.Path)
+ }
+
+ // TODO: Is there a risk of converting a '\' delimited replacement to a '/' delimited replacement?
+ if !strings.HasPrefix(filepath.ToSlash(replacedPath)+"/", filepath.ToSlash(oldBase)+"/") {
+ continue // not affected by the package renaming
+ }
+
+ affectedReplaces = append(affectedReplaces, r)
+ }
+
+ if len(affectedReplaces) == 0 {
+ continue
+ }
+ copied, err := modfile.Parse("", pm.Mapper.Content, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, r := range affectedReplaces {
+ replacedPath := r.New.Path
+ if strings.HasPrefix(r.New.Path, "./") || strings.HasPrefix(r.New.Path, "../") {
+ replacedPath = filepath.Join(modFileDir, r.New.Path)
+ }
+
+ suffix := strings.TrimPrefix(replacedPath, string(oldBase))
+
+ newReplacedPath, err := filepath.Rel(modFileDir, newPkgDir+suffix)
+ if err != nil {
+ return nil, err
+ }
+
+ newReplacedPath = filepath.ToSlash(newReplacedPath)
+
+ if !strings.HasPrefix(newReplacedPath, "/") && !strings.HasPrefix(newReplacedPath, "../") {
+ newReplacedPath = "./" + newReplacedPath
+ }
+
+ if err := copied.AddReplace(r.Old.Path, "", newReplacedPath, ""); err != nil {
+ return nil, err
+ }
+ }
+
+ copied.Cleanup()
+ newContent, err := copied.Format()
+ if err != nil {
+ return nil, err
+ }
+
+ // Calculate the edits to be made due to the change.
+ edits := s.View().Options().ComputeEdits(string(pm.Mapper.Content), string(newContent))
+ renamingEdits[pm.URI] = append(renamingEdits[pm.URI], edits...)
+ }
+
+ return renamingEdits, nil
+}
+
+// renamePackage computes all workspace edits required to rename the package
+// described by the given metadata, to newName, by renaming its package
+// directory.
+//
+// It updates package clauses and import paths for the renamed package as well
+// as any other packages affected by the directory renaming among packages
+// described by allMetadata.
+func renamePackage(ctx context.Context, s Snapshot, f FileHandle, newName PackageName) (map[span.URI][]diff.Edit, error) {
+ if strings.HasSuffix(string(newName), "_test") {
+ return nil, fmt.Errorf("cannot rename to _test package")
+ }
+
+ // We need metadata for the relevant package and module paths.
+ // These should be the same for all packages containing the file.
+ metas, err := s.MetadataForFile(ctx, f.URI())
+ if err != nil {
+ return nil, err
+ }
+ if len(metas) == 0 {
+ return nil, fmt.Errorf("no packages found for file %q", f.URI())
+ }
+ meta := metas[0] // narrowest
+
+ oldPkgPath := meta.PkgPath
+ if meta.Module == nil {
+ return nil, fmt.Errorf("cannot rename package: missing module information for package %q", meta.PkgPath)
+ }
+ modulePath := PackagePath(meta.Module.Path)
+ if modulePath == oldPkgPath {
+ return nil, fmt.Errorf("cannot rename package: module path %q is the same as the package path, so renaming the package directory would have no effect", modulePath)
+ }
+
+ newPathPrefix := path.Join(path.Dir(string(oldPkgPath)), string(newName))
+
+ // We must inspect all packages, not just direct importers,
+ // because we also rename subpackages, which may be unrelated.
+ // (If the renamed package imports a subpackage it may require
+ // edits to both its package and import decls.)
+ allMetadata, err := s.AllMetadata(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Rename package and import declarations in all relevant packages.
+ edits := make(map[span.URI][]diff.Edit)
+ for _, m := range allMetadata {
+ // Special case: x_test packages for the renamed package will not have the
+ // package path as as a dir prefix, but still need their package clauses
+ // renamed.
+ if m.PkgPath == oldPkgPath+"_test" {
+ if err := renamePackageClause(ctx, m, s, newName+"_test", edits); err != nil {
+ return nil, err
+ }
+ continue
+ }
+
+ // Subtle: check this condition before checking for valid module info
+ // below, because we should not fail this operation if unrelated packages
+ // lack module info.
+ if !strings.HasPrefix(string(m.PkgPath)+"/", string(oldPkgPath)+"/") {
+ continue // not affected by the package renaming
+ }
+
+ if m.Module == nil {
+ // This check will always fail under Bazel.
+ return nil, fmt.Errorf("cannot rename package: missing module information for package %q", m.PkgPath)
+ }
+
+ if modulePath != PackagePath(m.Module.Path) {
+ continue // don't edit imports if nested package and renaming package have different module paths
+ }
+
+ // Renaming a package consists of changing its import path and package name.
+ suffix := strings.TrimPrefix(string(m.PkgPath), string(oldPkgPath))
+ newPath := newPathPrefix + suffix
+
+ pkgName := m.Name
+ if m.PkgPath == oldPkgPath {
+ pkgName = PackageName(newName)
+
+ if err := renamePackageClause(ctx, m, s, newName, edits); err != nil {
+ return nil, err
+ }
+ }
+
+ imp := ImportPath(newPath) // TODO(adonovan): what if newPath has vendor/ prefix?
+ if err := renameImports(ctx, s, m, imp, pkgName, edits); err != nil {
+ return nil, err
+ }
+ }
+
+ return edits, nil
+}
+
+// renamePackageClause computes edits renaming the package clause of files in
+// the package described by the given metadata, to newName.
+//
+// Edits are written into the edits map.
+func renamePackageClause(ctx context.Context, m *Metadata, snapshot Snapshot, newName PackageName, edits map[span.URI][]diff.Edit) error {
+ // Rename internal references to the package in the renaming package.
+ for _, uri := range m.CompiledGoFiles {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return err
+ }
+ f, err := snapshot.ParseGo(ctx, fh, ParseHeader)
+ if err != nil {
+ return err
+ }
+ if f.File.Name == nil {
+ continue // no package declaration
+ }
+
+ edit, err := posEdit(f.Tok, f.File.Name.Pos(), f.File.Name.End(), string(newName))
+ if err != nil {
+ return err
+ }
+ edits[f.URI] = append(edits[f.URI], edit)
+ }
+
+ return nil
+}
+
+// renameImports computes the set of edits to imports resulting from renaming
+// the package described by the given metadata, to a package with import path
+// newPath and name newName.
+//
+// Edits are written into the edits map.
+func renameImports(ctx context.Context, snapshot Snapshot, m *Metadata, newPath ImportPath, newName PackageName, allEdits map[span.URI][]diff.Edit) error {
+ rdeps, err := snapshot.ReverseDependencies(ctx, m.ID, false) // find direct importers
+ if err != nil {
+ return err
+ }
+
+ // Pass 1: rename import paths in import declarations.
+ needsTypeCheck := make(map[PackageID][]span.URI)
+ for _, rdep := range rdeps {
+ if rdep.IsIntermediateTestVariant() {
+ continue // for renaming, these variants are redundant
+ }
+
+ for _, uri := range rdep.CompiledGoFiles {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return err
+ }
+ f, err := snapshot.ParseGo(ctx, fh, ParseHeader)
+ if err != nil {
+ return err
+ }
+ if f.File.Name == nil {
+ continue // no package declaration
+ }
+ for _, imp := range f.File.Imports {
+ if rdep.DepsByImpPath[UnquoteImportPath(imp)] != m.ID {
+ continue // not the import we're looking for
+ }
+
+ // If the import does not explicitly specify
+ // a local name, then we need to invoke the
+ // type checker to locate references to update.
+ //
+ // TODO(adonovan): is this actually true?
+ // Renaming an import with a local name can still
+ // cause conflicts: shadowing of built-ins, or of
+ // package-level decls in the same or another file.
+ if imp.Name == nil {
+ needsTypeCheck[rdep.ID] = append(needsTypeCheck[rdep.ID], uri)
+ }
+
+ // Create text edit for the import path (string literal).
+ edit, err := posEdit(f.Tok, imp.Path.Pos(), imp.Path.End(), strconv.Quote(string(newPath)))
+ if err != nil {
+ return err
+ }
+ allEdits[uri] = append(allEdits[uri], edit)
+ }
+ }
+ }
+
+ // If the imported package's name hasn't changed,
+ // we don't need to rename references within each file.
+ if newName == m.Name {
+ return nil
+ }
+
+ // Pass 2: rename local name (types.PkgName) of imported
+ // package throughout one or more files of the package.
+ ids := make([]PackageID, 0, len(needsTypeCheck))
+ for id := range needsTypeCheck {
+ ids = append(ids, id)
+ }
+ pkgs, err := snapshot.TypeCheck(ctx, ids...)
+ if err != nil {
+ return err
+ }
+ for i, id := range ids {
+ pkg := pkgs[i]
+ for _, uri := range needsTypeCheck[id] {
+ f, err := pkg.File(uri)
+ if err != nil {
+ return err
+ }
+ for _, imp := range f.File.Imports {
+ if imp.Name != nil {
+ continue // has explicit local name
+ }
+ if rdeps[id].DepsByImpPath[UnquoteImportPath(imp)] != m.ID {
+ continue // not the import we're looking for
+ }
+
+ pkgname := pkg.GetTypesInfo().Implicits[imp].(*types.PkgName)
+
+ pkgScope := pkg.GetTypes().Scope()
+ fileScope := pkg.GetTypesInfo().Scopes[f.File]
+
+ localName := string(newName)
+ try := 0
+
+ // Keep trying with fresh names until one succeeds.
+ //
+ // TODO(adonovan): fix: this loop is not sufficient to choose a name
+ // that is guaranteed to be conflict-free; renameObj may still fail.
+ // So the retry loop should be around renameObj, and we shouldn't
+ // bother with scopes here.
+ for fileScope.Lookup(localName) != nil || pkgScope.Lookup(localName) != nil {
+ try++
+ localName = fmt.Sprintf("%s%d", newName, try)
+ }
+
+ // renameObj detects various conflicts, including:
+ // - new name conflicts with a package-level decl in this file;
+ // - new name hides a package-level decl in another file that
+ // is actually referenced in this file;
+ // - new name hides a built-in that is actually referenced
+ // in this file;
+ // - a reference in this file to the old package name would
+ // become shadowed by an intervening declaration that
+ // uses the new name.
+ // It returns the edits if no conflict was detected.
+ editMap, _, err := renameObjects(ctx, snapshot, localName, pkg, pkgname)
+ if err != nil {
+ return err
+ }
+
+ // If the chosen local package name matches the package's
+ // new name, delete the change that would have inserted
+ // an explicit local name, which is always the lexically
+ // first change.
+ if localName == string(newName) {
+ edits, ok := editMap[uri]
+ if !ok {
+ return fmt.Errorf("internal error: no changes for %s", uri)
+ }
+ diff.SortEdits(edits)
+ editMap[uri] = edits[1:]
+ }
+ for uri, edits := range editMap {
+ allEdits[uri] = append(allEdits[uri], edits...)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// renameObjects computes the edits to the type-checked syntax package pkg
+// required to rename a set of target objects to newName.
+//
+// It also returns the set of objects that were found (due to
+// corresponding methods and embedded fields) to require renaming as a
+// consequence of the requested renamings.
+//
+// It returns an error if the renaming would cause a conflict.
+func renameObjects(ctx context.Context, snapshot Snapshot, newName string, pkg Package, targets ...types.Object) (map[span.URI][]diff.Edit, map[types.Object]bool, error) {
+ r := renamer{
+ pkg: pkg,
+ objsToUpdate: make(map[types.Object]bool),
+ from: targets[0].Name(),
+ to: newName,
+ }
+
+ // A renaming initiated at an interface method indicates the
+ // intention to rename abstract and concrete methods as needed
+ // to preserve assignability.
+ // TODO(adonovan): pull this into the caller.
+ for _, obj := range targets {
+ if obj, ok := obj.(*types.Func); ok {
+ recv := obj.Type().(*types.Signature).Recv()
+ if recv != nil && types.IsInterface(recv.Type().Underlying()) {
+ r.changeMethods = true
+ break
+ }
+ }
+ }
+
+ // Check that the renaming of the identifier is ok.
+ for _, obj := range targets {
+ r.check(obj)
+ if len(r.conflicts) > 0 {
+ // Stop at first error.
+ return nil, nil, fmt.Errorf("%s", strings.Join(r.conflicts, "\n"))
+ }
+ }
+
+ editMap, err := r.update()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Remove initial targets so that only 'consequences' remain.
+ for _, obj := range targets {
+ delete(r.objsToUpdate, obj)
+ }
+ return editMap, r.objsToUpdate, nil
+}
+
+// Rename all references to the target objects.
+func (r *renamer) update() (map[span.URI][]diff.Edit, error) {
+ result := make(map[span.URI][]diff.Edit)
+
+ // shouldUpdate reports whether obj is one of (or an
+ // instantiation of one of) the target objects.
+ shouldUpdate := func(obj types.Object) bool {
+ if r.objsToUpdate[obj] {
+ return true
+ }
+ if fn, ok := obj.(*types.Func); ok && r.objsToUpdate[funcOrigin(fn)] {
+ return true
+ }
+ return false
+ }
+
+ // Find all identifiers in the package that define or use a
+ // renamed object. We iterate over info as it is more efficent
+ // than calling ast.Inspect for each of r.pkg.CompiledGoFiles().
+ type item struct {
+ node ast.Node // Ident, ImportSpec (obj=PkgName), or CaseClause (obj=Var)
+ obj types.Object
+ isDef bool
+ }
+ var items []item
+ info := r.pkg.GetTypesInfo()
+ for id, obj := range info.Uses {
+ if shouldUpdate(obj) {
+ items = append(items, item{id, obj, false})
+ }
+ }
+ for id, obj := range info.Defs {
+ if shouldUpdate(obj) {
+ items = append(items, item{id, obj, true})
+ }
+ }
+ for node, obj := range info.Implicits {
+ if shouldUpdate(obj) {
+ switch node.(type) {
+ case *ast.ImportSpec, *ast.CaseClause:
+ items = append(items, item{node, obj, true})
+ }
+ }
+ }
+ sort.Slice(items, func(i, j int) bool {
+ return items[i].node.Pos() < items[j].node.Pos()
+ })
+
+ // Update each identifier.
+ for _, item := range items {
+ pgf, ok := enclosingFile(r.pkg, item.node.Pos())
+ if !ok {
+ bug.Reportf("edit does not belong to syntax of package %q", r.pkg)
+ continue
+ }
+
+ // Renaming a types.PkgName may result in the addition or removal of an identifier,
+ // so we deal with this separately.
+ if pkgName, ok := item.obj.(*types.PkgName); ok && item.isDef {
+ edit, err := r.updatePkgName(pgf, pkgName)
+ if err != nil {
+ return nil, err
+ }
+ result[pgf.URI] = append(result[pgf.URI], edit)
+ continue
+ }
+
+ // Workaround the unfortunate lack of a Var object
+ // for x in "switch x := expr.(type) {}" by adjusting
+ // the case clause to the switch ident.
+ // This may result in duplicate edits, but we de-dup later.
+ if _, ok := item.node.(*ast.CaseClause); ok {
+ path, _ := astutil.PathEnclosingInterval(pgf.File, item.obj.Pos(), item.obj.Pos())
+ item.node = path[0].(*ast.Ident)
+ }
+
+ // Replace the identifier with r.to.
+ edit, err := posEdit(pgf.Tok, item.node.Pos(), item.node.End(), r.to)
+ if err != nil {
+ return nil, err
+ }
+
+ result[pgf.URI] = append(result[pgf.URI], edit)
+
+ if !item.isDef { // uses do not have doc comments to update.
+ continue
+ }
+
+ doc := docComment(pgf, item.node.(*ast.Ident))
+ if doc == nil {
+ continue
+ }
+
+ // Perform the rename in doc comments declared in the original package.
+ // go/parser strips out \r\n returns from the comment text, so go
+ // line-by-line through the comment text to get the correct positions.
+ docRegexp := regexp.MustCompile(`\b` + r.from + `\b`) // valid identifier => valid regexp
+ for _, comment := range doc.List {
+ if isDirective(comment.Text) {
+ continue
+ }
+ // TODO(adonovan): why are we looping over lines?
+ // Just run the loop body once over the entire multiline comment.
+ lines := strings.Split(comment.Text, "\n")
+ tokFile := pgf.Tok
+ commentLine := tokFile.Line(comment.Pos())
+ uri := span.URIFromPath(tokFile.Name())
+ for i, line := range lines {
+ lineStart := comment.Pos()
+ if i > 0 {
+ lineStart = tokFile.LineStart(commentLine + i)
+ }
+ for _, locs := range docRegexp.FindAllIndex([]byte(line), -1) {
+ edit, err := posEdit(tokFile, lineStart+token.Pos(locs[0]), lineStart+token.Pos(locs[1]), r.to)
+ if err != nil {
+ return nil, err // can't happen
+ }
+ result[uri] = append(result[uri], edit)
+ }
+ }
+ }
+ }
+
+ return result, nil
+}
+
+// docComment returns the doc for an identifier within the specified file.
+func docComment(pgf *ParsedGoFile, id *ast.Ident) *ast.CommentGroup {
+ nodes, _ := astutil.PathEnclosingInterval(pgf.File, id.Pos(), id.End())
+ for _, node := range nodes {
+ switch decl := node.(type) {
+ case *ast.FuncDecl:
+ return decl.Doc
+ case *ast.Field:
+ return decl.Doc
+ case *ast.GenDecl:
+ return decl.Doc
+ // For {Type,Value}Spec, if the doc on the spec is absent,
+ // search for the enclosing GenDecl
+ case *ast.TypeSpec:
+ if decl.Doc != nil {
+ return decl.Doc
+ }
+ case *ast.ValueSpec:
+ if decl.Doc != nil {
+ return decl.Doc
+ }
+ case *ast.Ident:
+ case *ast.AssignStmt:
+ // *ast.AssignStmt doesn't have an associated comment group.
+ // So, we try to find a comment just before the identifier.
+
+ // Try to find a comment group only for short variable declarations (:=).
+ if decl.Tok != token.DEFINE {
+ return nil
+ }
+
+ identLine := pgf.Tok.Line(id.Pos())
+ for _, comment := range nodes[len(nodes)-1].(*ast.File).Comments {
+ if comment.Pos() > id.Pos() {
+ // Comment is after the identifier.
+ continue
+ }
+
+ lastCommentLine := pgf.Tok.Line(comment.End())
+ if lastCommentLine+1 == identLine {
+ return comment
+ }
+ }
+ default:
+ return nil
+ }
+ }
+ return nil
+}
+
+// updatePkgName returns the updates to rename a pkgName in the import spec by
+// only modifying the package name portion of the import declaration.
+func (r *renamer) updatePkgName(pgf *ParsedGoFile, pkgName *types.PkgName) (diff.Edit, error) {
+ // Modify ImportSpec syntax to add or remove the Name as needed.
+ path, _ := astutil.PathEnclosingInterval(pgf.File, pkgName.Pos(), pkgName.Pos())
+ if len(path) < 2 {
+ return diff.Edit{}, fmt.Errorf("no path enclosing interval for %s", pkgName.Name())
+ }
+ spec, ok := path[1].(*ast.ImportSpec)
+ if !ok {
+ return diff.Edit{}, fmt.Errorf("failed to update PkgName for %s", pkgName.Name())
+ }
+
+ newText := ""
+ if pkgName.Imported().Name() != r.to {
+ newText = r.to + " "
+ }
+
+ // Replace the portion (possibly empty) of the spec before the path:
+ // local "path" or "path"
+ // -> <- -><-
+ return posEdit(pgf.Tok, spec.Pos(), spec.Path.Pos(), newText)
+}
+
+// parsePackageNameDecl is a convenience function that parses and
+// returns the package name declaration of file fh, and reports
+// whether the position ppos lies within it.
+//
+// Note: also used by references2.
+func parsePackageNameDecl(ctx context.Context, snapshot Snapshot, fh FileHandle, ppos protocol.Position) (*ParsedGoFile, bool, error) {
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseHeader)
+ if err != nil {
+ return nil, false, err
+ }
+ // Careful: because we used ParseHeader,
+ // pgf.Pos(ppos) may be beyond EOF => (0, err).
+ pos, _ := pgf.PositionPos(ppos)
+ return pgf, pgf.File.Name.Pos() <= pos && pos <= pgf.File.Name.End(), nil
+}
+
+// enclosingFile returns the CompiledGoFile of pkg that contains the specified position.
+func enclosingFile(pkg Package, pos token.Pos) (*ParsedGoFile, bool) {
+ for _, pgf := range pkg.CompiledGoFiles() {
+ if pgf.File.Pos() <= pos && pos <= pgf.File.End() {
+ return pgf, true
+ }
+ }
+ return nil, false
+}
+
+// posEdit returns an edit to replace the (start, end) range of tf with 'new'.
+func posEdit(tf *token.File, start, end token.Pos, new string) (diff.Edit, error) {
+ startOffset, endOffset, err := safetoken.Offsets(tf, start, end)
+ if err != nil {
+ return diff.Edit{}, err
+ }
+ return diff.Edit{Start: startOffset, End: endOffset, New: new}, nil
+}
diff --git a/gopls/internal/lsp/source/rename_check.go b/gopls/internal/lsp/source/rename_check.go
new file mode 100644
index 000000000..a858bb7fa
--- /dev/null
+++ b/gopls/internal/lsp/source/rename_check.go
@@ -0,0 +1,921 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Taken from golang.org/x/tools/refactor/rename.
+
+package source
+
+// This file defines the conflict-checking portion of the rename operation.
+//
+// The renamer works on a single package of type-checked syntax, and
+// is called in parallel for all necessary packages in the workspace,
+// possibly up to the transitive reverse dependencies of the
+// declaration. Finally the union of all edits and errors is computed.
+//
+// Renaming one object may entail renaming of others. For example:
+//
+// - An embedded field couples a Var (field) and a TypeName.
+// So, renaming either one requires renaming the other.
+// If the initial object is an embedded field, we must add its
+// TypeName (and its enclosing package) to the renaming set;
+// this is easily discovered at the outset.
+//
+// Conversely, if the initial object is a TypeName, we must observe
+// whether any of its references (from directly importing packages)
+// is coincident with an embedded field Var and, if so, initiate a
+// renaming of it.
+//
+// - A method of an interface type is coupled to all corresponding
+// methods of types that are assigned to the interface (as
+// discovered by the 'satisfy' pass). As a matter of usability, we
+// require that such renamings be initiated from the interface
+// method, not the concrete method.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "unicode"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/refactor/satisfy"
+)
+
+// errorf reports an error (e.g. conflict) and prevents file modification.
+func (r *renamer) errorf(pos token.Pos, format string, args ...interface{}) {
+ // Conflict error messages in the old gorename tool (whence this
+ // logic originated) contain rich information associated with
+ // multiple source lines, such as:
+ //
+ // p/a.go:1:2: renaming "x" to "y" here
+ // p/b.go:3:4: \t would cause this reference to "y"
+ // p/c.go:5:5: \t to become shadowed by this intervening declaration.
+ //
+ // Unfortunately LSP provides no means to transmit the
+ // structure of this error, so we format the positions briefly
+ // using dir/file.go where dir is the base name of the parent
+ // directory.
+
+ var conflict strings.Builder
+
+ // Add prefix of (truncated) position.
+ if pos != token.NoPos {
+ // TODO(adonovan): skip position of first error if it is
+ // on the same line as the renaming itself.
+ posn := safetoken.StartPosition(r.pkg.FileSet(), pos).String()
+ segments := strings.Split(filepath.ToSlash(posn), "/")
+ if n := len(segments); n > 2 {
+ segments = segments[n-2:]
+ }
+ posn = strings.Join(segments, "/")
+ fmt.Fprintf(&conflict, "%s:", posn)
+
+ if !strings.HasPrefix(format, "\t") {
+ conflict.WriteByte(' ')
+ }
+ }
+
+ fmt.Fprintf(&conflict, format, args...)
+ r.conflicts = append(r.conflicts, conflict.String())
+}
+
+// check performs safety checks of the renaming of the 'from' object to r.to.
+func (r *renamer) check(from types.Object) {
+ if r.objsToUpdate[from] {
+ return
+ }
+ r.objsToUpdate[from] = true
+
+ // NB: order of conditions is important.
+ if from_, ok := from.(*types.PkgName); ok {
+ r.checkInFileBlock(from_)
+ } else if from_, ok := from.(*types.Label); ok {
+ r.checkLabel(from_)
+ } else if isPackageLevel(from) {
+ r.checkInPackageBlock(from)
+ } else if v, ok := from.(*types.Var); ok && v.IsField() {
+ r.checkStructField(v)
+ } else if f, ok := from.(*types.Func); ok && recv(f) != nil {
+ r.checkMethod(f)
+ } else if isLocal(from) {
+ r.checkInLexicalScope(from)
+ } else {
+ r.errorf(from.Pos(), "unexpected %s object %q (please report a bug)\n",
+ objectKind(from), from)
+ }
+}
+
+// checkInFileBlock performs safety checks for renames of objects in the file block,
+// i.e. imported package names.
+func (r *renamer) checkInFileBlock(from *types.PkgName) {
+ // Check import name is not "init".
+ if r.to == "init" {
+ r.errorf(from.Pos(), "%q is not a valid imported package name", r.to)
+ }
+
+ // Check for conflicts between file and package block.
+ if prev := from.Pkg().Scope().Lookup(r.to); prev != nil {
+ r.errorf(from.Pos(), "renaming this %s %q to %q would conflict",
+ objectKind(from), from.Name(), r.to)
+ r.errorf(prev.Pos(), "\twith this package member %s",
+ objectKind(prev))
+ return // since checkInPackageBlock would report redundant errors
+ }
+
+ // Check for conflicts in lexical scope.
+ r.checkInLexicalScope(from)
+}
+
+// checkInPackageBlock performs safety checks for renames of
+// func/var/const/type objects in the package block.
+func (r *renamer) checkInPackageBlock(from types.Object) {
+ // Check that there are no references to the name from another
+ // package if the renaming would make it unexported.
+ if typ := r.pkg.GetTypes(); typ != from.Pkg() && ast.IsExported(r.from) && !ast.IsExported(r.to) {
+ if id := someUse(r.pkg.GetTypesInfo(), from); id != nil {
+ r.checkExport(id, typ, from)
+ }
+ }
+
+ // Check that in the package block, "init" is a function, and never referenced.
+ if r.to == "init" {
+ kind := objectKind(from)
+ if kind == "func" {
+ // Reject if intra-package references to it exist.
+ for id, obj := range r.pkg.GetTypesInfo().Uses {
+ if obj == from {
+ r.errorf(from.Pos(),
+ "renaming this func %q to %q would make it a package initializer",
+ from.Name(), r.to)
+ r.errorf(id.Pos(), "\tbut references to it exist")
+ break
+ }
+ }
+ } else {
+ r.errorf(from.Pos(), "you cannot have a %s at package level named %q",
+ kind, r.to)
+ }
+ }
+
+ // Check for conflicts between package block and all file blocks.
+ for _, f := range r.pkg.GetSyntax() {
+ fileScope := r.pkg.GetTypesInfo().Scopes[f]
+ b, prev := fileScope.LookupParent(r.to, token.NoPos)
+ if b == fileScope {
+ r.errorf(from.Pos(), "renaming this %s %q to %q would conflict", objectKind(from), from.Name(), r.to)
+ var prevPos token.Pos
+ if prev != nil {
+ prevPos = prev.Pos()
+ }
+ r.errorf(prevPos, "\twith this %s", objectKind(prev))
+ return // since checkInPackageBlock would report redundant errors
+ }
+ }
+
+ // Check for conflicts in lexical scope.
+ r.checkInLexicalScope(from)
+}
+
+// checkInLexicalScope performs safety checks that a renaming does not
+// change the lexical reference structure of the specified package.
+//
+// For objects in lexical scope, there are three kinds of conflicts:
+// same-, sub-, and super-block conflicts. We will illustrate all three
+// using this example:
+//
+// var x int
+// var z int
+//
+// func f(y int) {
+// print(x)
+// print(y)
+// }
+//
+// Renaming x to z encounters a "same-block conflict", because an object
+// with the new name already exists, defined in the same lexical block
+// as the old object.
+//
+// Renaming x to y encounters a "sub-block conflict", because there exists
+// a reference to x from within (what would become) a hole in its scope.
+// The definition of y in an (inner) sub-block would cast a shadow in
+// the scope of the renamed variable.
+//
+// Renaming y to x encounters a "super-block conflict". This is the
+// converse situation: there is an existing definition of the new name
+// (x) in an (enclosing) super-block, and the renaming would create a
+// hole in its scope, within which there exist references to it. The
+// new name shadows the existing definition of x in the super-block.
+//
+// Removing the old name (and all references to it) is always safe, and
+// requires no checks.
+func (r *renamer) checkInLexicalScope(from types.Object) {
+ b := from.Parent() // the block defining the 'from' object
+ if b != nil {
+ toBlock, to := b.LookupParent(r.to, from.Parent().End())
+ if toBlock == b {
+ // same-block conflict
+ r.errorf(from.Pos(), "renaming this %s %q to %q",
+ objectKind(from), from.Name(), r.to)
+ r.errorf(to.Pos(), "\tconflicts with %s in same block",
+ objectKind(to))
+ return
+ } else if toBlock != nil {
+ // Check for super-block conflict.
+ // The name r.to is defined in a superblock.
+ // Is that name referenced from within this block?
+ forEachLexicalRef(r.pkg, to, func(id *ast.Ident, block *types.Scope) bool {
+ _, obj := block.LookupParent(from.Name(), id.Pos())
+ if obj == from {
+ // super-block conflict
+ r.errorf(from.Pos(), "renaming this %s %q to %q",
+ objectKind(from), from.Name(), r.to)
+ r.errorf(id.Pos(), "\twould shadow this reference")
+ r.errorf(to.Pos(), "\tto the %s declared here",
+ objectKind(to))
+ return false // stop
+ }
+ return true
+ })
+ }
+ }
+ // Check for sub-block conflict.
+ // Is there an intervening definition of r.to between
+ // the block defining 'from' and some reference to it?
+ forEachLexicalRef(r.pkg, from, func(id *ast.Ident, block *types.Scope) bool {
+ // Find the block that defines the found reference.
+ // It may be an ancestor.
+ fromBlock, _ := block.LookupParent(from.Name(), id.Pos())
+ // See what r.to would resolve to in the same scope.
+ toBlock, to := block.LookupParent(r.to, id.Pos())
+ if to != nil {
+ // sub-block conflict
+ if deeper(toBlock, fromBlock) {
+ r.errorf(from.Pos(), "renaming this %s %q to %q",
+ objectKind(from), from.Name(), r.to)
+ r.errorf(id.Pos(), "\twould cause this reference to become shadowed")
+ r.errorf(to.Pos(), "\tby this intervening %s definition",
+ objectKind(to))
+ return false // stop
+ }
+ }
+ return true
+ })
+
+ // Renaming a type that is used as an embedded field
+ // requires renaming the field too. e.g.
+ // type T int // if we rename this to U..
+ // var s struct {T}
+ // print(s.T) // ...this must change too
+ if _, ok := from.(*types.TypeName); ok {
+ for id, obj := range r.pkg.GetTypesInfo().Uses {
+ if obj == from {
+ if field := r.pkg.GetTypesInfo().Defs[id]; field != nil {
+ r.check(field)
+ }
+ }
+ }
+ }
+}
+
+// deeper reports whether block x is lexically deeper than y.
+func deeper(x, y *types.Scope) bool {
+ if x == y || x == nil {
+ return false
+ } else if y == nil {
+ return true
+ } else {
+ return deeper(x.Parent(), y.Parent())
+ }
+}
+
+// forEachLexicalRef calls fn(id, block) for each identifier id in package
+// pkg that is a reference to obj in lexical scope. block is the
+// lexical block enclosing the reference. If fn returns false the
+// iteration is terminated and findLexicalRefs returns false.
+func forEachLexicalRef(pkg Package, obj types.Object, fn func(id *ast.Ident, block *types.Scope) bool) bool {
+ ok := true
+ var stack []ast.Node
+
+ var visit func(n ast.Node) bool
+ visit = func(n ast.Node) bool {
+ if n == nil {
+ stack = stack[:len(stack)-1] // pop
+ return false
+ }
+ if !ok {
+ return false // bail out
+ }
+
+ stack = append(stack, n) // push
+ switch n := n.(type) {
+ case *ast.Ident:
+ if pkg.GetTypesInfo().Uses[n] == obj {
+ block := enclosingBlock(pkg.GetTypesInfo(), stack)
+ if !fn(n, block) {
+ ok = false
+ }
+ }
+ return visit(nil) // pop stack
+
+ case *ast.SelectorExpr:
+ // don't visit n.Sel
+ ast.Inspect(n.X, visit)
+ return visit(nil) // pop stack, don't descend
+
+ case *ast.CompositeLit:
+ // Handle recursion ourselves for struct literals
+ // so we don't visit field identifiers.
+ tv, ok := pkg.GetTypesInfo().Types[n]
+ if !ok {
+ return visit(nil) // pop stack, don't descend
+ }
+ if _, ok := Deref(tv.Type).Underlying().(*types.Struct); ok {
+ if n.Type != nil {
+ ast.Inspect(n.Type, visit)
+ }
+ for _, elt := range n.Elts {
+ if kv, ok := elt.(*ast.KeyValueExpr); ok {
+ ast.Inspect(kv.Value, visit)
+ } else {
+ ast.Inspect(elt, visit)
+ }
+ }
+ return visit(nil) // pop stack, don't descend
+ }
+ }
+ return true
+ }
+
+ for _, f := range pkg.GetSyntax() {
+ ast.Inspect(f, visit)
+ if len(stack) != 0 {
+ panic(stack)
+ }
+ if !ok {
+ break
+ }
+ }
+ return ok
+}
+
+// enclosingBlock returns the innermost block enclosing the specified
+// AST node, specified in the form of a path from the root of the file,
+// [file...n].
+func enclosingBlock(info *types.Info, stack []ast.Node) *types.Scope {
+ for i := range stack {
+ n := stack[len(stack)-1-i]
+ // For some reason, go/types always associates a
+ // function's scope with its FuncType.
+ // TODO(adonovan): feature or a bug?
+ switch f := n.(type) {
+ case *ast.FuncDecl:
+ n = f.Type
+ case *ast.FuncLit:
+ n = f.Type
+ }
+ if b := info.Scopes[n]; b != nil {
+ return b
+ }
+ }
+ panic("no Scope for *ast.File")
+}
+
+func (r *renamer) checkLabel(label *types.Label) {
+ // Check there are no identical labels in the function's label block.
+ // (Label blocks don't nest, so this is easy.)
+ if prev := label.Parent().Lookup(r.to); prev != nil {
+ r.errorf(label.Pos(), "renaming this label %q to %q", label.Name(), prev.Name())
+ r.errorf(prev.Pos(), "\twould conflict with this one")
+ }
+}
+
+// checkStructField checks that the field renaming will not cause
+// conflicts at its declaration, or ambiguity or changes to any selection.
+func (r *renamer) checkStructField(from *types.Var) {
+ // Check that the struct declaration is free of field conflicts,
+ // and field/method conflicts.
+
+ // go/types offers no easy way to get from a field (or interface
+ // method) to its declaring struct (or interface), so we must
+ // ascend the AST.
+ pgf, ok := enclosingFile(r.pkg, from.Pos())
+ if !ok {
+ return // not declared by syntax of this package
+ }
+ path, _ := astutil.PathEnclosingInterval(pgf.File, from.Pos(), from.Pos())
+ // path matches this pattern:
+ // [Ident SelectorExpr? StarExpr? Field FieldList StructType ParenExpr* ... File]
+
+ // Ascend to FieldList.
+ var i int
+ for {
+ if _, ok := path[i].(*ast.FieldList); ok {
+ break
+ }
+ i++
+ }
+ i++
+ tStruct := path[i].(*ast.StructType)
+ i++
+ // Ascend past parens (unlikely).
+ for {
+ _, ok := path[i].(*ast.ParenExpr)
+ if !ok {
+ break
+ }
+ i++
+ }
+ if spec, ok := path[i].(*ast.TypeSpec); ok {
+ // This struct is also a named type.
+ // We must check for direct (non-promoted) field/field
+ // and method/field conflicts.
+ named := r.pkg.GetTypesInfo().Defs[spec.Name].Type()
+ prev, indices, _ := types.LookupFieldOrMethod(named, true, r.pkg.GetTypes(), r.to)
+ if len(indices) == 1 {
+ r.errorf(from.Pos(), "renaming this field %q to %q",
+ from.Name(), r.to)
+ r.errorf(prev.Pos(), "\twould conflict with this %s",
+ objectKind(prev))
+ return // skip checkSelections to avoid redundant errors
+ }
+ } else {
+ // This struct is not a named type.
+ // We need only check for direct (non-promoted) field/field conflicts.
+ T := r.pkg.GetTypesInfo().Types[tStruct].Type.Underlying().(*types.Struct)
+ for i := 0; i < T.NumFields(); i++ {
+ if prev := T.Field(i); prev.Name() == r.to {
+ r.errorf(from.Pos(), "renaming this field %q to %q",
+ from.Name(), r.to)
+ r.errorf(prev.Pos(), "\twould conflict with this field")
+ return // skip checkSelections to avoid redundant errors
+ }
+ }
+ }
+
+ // Renaming an anonymous field requires renaming the type too. e.g.
+ // print(s.T) // if we rename T to U,
+ // type T int // this and
+ // var s struct {T} // this must change too.
+ if from.Anonymous() {
+ if named, ok := from.Type().(*types.Named); ok {
+ r.check(named.Obj())
+ } else if named, ok := Deref(from.Type()).(*types.Named); ok {
+ r.check(named.Obj())
+ }
+ }
+
+ // Check integrity of existing (field and method) selections.
+ r.checkSelections(from)
+}
+
+// checkSelections checks that all uses and selections that resolve to
+// the specified object would continue to do so after the renaming.
+func (r *renamer) checkSelections(from types.Object) {
+ pkg := r.pkg
+ typ := pkg.GetTypes()
+ {
+ if id := someUse(pkg.GetTypesInfo(), from); id != nil {
+ if !r.checkExport(id, typ, from) {
+ return
+ }
+ }
+
+ for syntax, sel := range pkg.GetTypesInfo().Selections {
+ // There may be extant selections of only the old
+ // name or only the new name, so we must check both.
+ // (If neither, the renaming is sound.)
+ //
+ // In both cases, we wish to compare the lengths
+ // of the implicit field path (Selection.Index)
+ // to see if the renaming would change it.
+ //
+ // If a selection that resolves to 'from', when renamed,
+ // would yield a path of the same or shorter length,
+ // this indicates ambiguity or a changed referent,
+ // analogous to same- or sub-block lexical conflict.
+ //
+ // If a selection using the name 'to' would
+ // yield a path of the same or shorter length,
+ // this indicates ambiguity or shadowing,
+ // analogous to same- or super-block lexical conflict.
+
+ // TODO(adonovan): fix: derive from Types[syntax.X].Mode
+ // TODO(adonovan): test with pointer, value, addressable value.
+ isAddressable := true
+
+ if sel.Obj() == from {
+ if obj, indices, _ := types.LookupFieldOrMethod(sel.Recv(), isAddressable, from.Pkg(), r.to); obj != nil {
+ // Renaming this existing selection of
+ // 'from' may block access to an existing
+ // type member named 'to'.
+ delta := len(indices) - len(sel.Index())
+ if delta > 0 {
+ continue // no ambiguity
+ }
+ r.selectionConflict(from, delta, syntax, obj)
+ return
+ }
+ } else if sel.Obj().Name() == r.to {
+ if obj, indices, _ := types.LookupFieldOrMethod(sel.Recv(), isAddressable, from.Pkg(), from.Name()); obj == from {
+ // Renaming 'from' may cause this existing
+ // selection of the name 'to' to change
+ // its meaning.
+ delta := len(indices) - len(sel.Index())
+ if delta > 0 {
+ continue // no ambiguity
+ }
+ r.selectionConflict(from, -delta, syntax, sel.Obj())
+ return
+ }
+ }
+ }
+ }
+}
+
+func (r *renamer) selectionConflict(from types.Object, delta int, syntax *ast.SelectorExpr, obj types.Object) {
+ r.errorf(from.Pos(), "renaming this %s %q to %q",
+ objectKind(from), from.Name(), r.to)
+
+ switch {
+ case delta < 0:
+ // analogous to sub-block conflict
+ r.errorf(syntax.Sel.Pos(),
+ "\twould change the referent of this selection")
+ r.errorf(obj.Pos(), "\tof this %s", objectKind(obj))
+ case delta == 0:
+ // analogous to same-block conflict
+ r.errorf(syntax.Sel.Pos(),
+ "\twould make this reference ambiguous")
+ r.errorf(obj.Pos(), "\twith this %s", objectKind(obj))
+ case delta > 0:
+ // analogous to super-block conflict
+ r.errorf(syntax.Sel.Pos(),
+ "\twould shadow this selection")
+ r.errorf(obj.Pos(), "\tof the %s declared here",
+ objectKind(obj))
+ }
+}
+
+// checkMethod performs safety checks for renaming a method.
+// There are three hazards:
+// - declaration conflicts
+// - selection ambiguity/changes
+// - entailed renamings of assignable concrete/interface types.
+//
+// We reject renamings initiated at concrete methods if it would
+// change the assignability relation. For renamings of abstract
+// methods, we rename all methods transitively coupled to it via
+// assignability.
+func (r *renamer) checkMethod(from *types.Func) {
+ // e.g. error.Error
+ if from.Pkg() == nil {
+ r.errorf(from.Pos(), "you cannot rename built-in method %s", from)
+ return
+ }
+
+ // ASSIGNABILITY: We reject renamings of concrete methods that
+ // would break a 'satisfy' constraint; but renamings of abstract
+ // methods are allowed to proceed, and we rename affected
+ // concrete and abstract methods as necessary. It is the
+ // initial method that determines the policy.
+
+ // Check for conflict at point of declaration.
+ // Check to ensure preservation of assignability requirements.
+ R := recv(from).Type()
+ if types.IsInterface(R) {
+ // Abstract method
+
+ // declaration
+ prev, _, _ := types.LookupFieldOrMethod(R, false, from.Pkg(), r.to)
+ if prev != nil {
+ r.errorf(from.Pos(), "renaming this interface method %q to %q",
+ from.Name(), r.to)
+ r.errorf(prev.Pos(), "\twould conflict with this method")
+ return
+ }
+
+ // Check all interfaces that embed this one for
+ // declaration conflicts too.
+ {
+ // Start with named interface types (better errors)
+ for _, obj := range r.pkg.GetTypesInfo().Defs {
+ if obj, ok := obj.(*types.TypeName); ok && types.IsInterface(obj.Type()) {
+ f, _, _ := types.LookupFieldOrMethod(
+ obj.Type(), false, from.Pkg(), from.Name())
+ if f == nil {
+ continue
+ }
+ t, _, _ := types.LookupFieldOrMethod(
+ obj.Type(), false, from.Pkg(), r.to)
+ if t == nil {
+ continue
+ }
+ r.errorf(from.Pos(), "renaming this interface method %q to %q",
+ from.Name(), r.to)
+ r.errorf(t.Pos(), "\twould conflict with this method")
+ r.errorf(obj.Pos(), "\tin named interface type %q", obj.Name())
+ }
+ }
+
+ // Now look at all literal interface types (includes named ones again).
+ for e, tv := range r.pkg.GetTypesInfo().Types {
+ if e, ok := e.(*ast.InterfaceType); ok {
+ _ = e
+ _ = tv.Type.(*types.Interface)
+ // TODO(adonovan): implement same check as above.
+ }
+ }
+ }
+
+ // assignability
+ //
+ // Find the set of concrete or abstract methods directly
+ // coupled to abstract method 'from' by some
+ // satisfy.Constraint, and rename them too.
+ for key := range r.satisfy() {
+ // key = (lhs, rhs) where lhs is always an interface.
+
+ lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name())
+ if lsel == nil {
+ continue
+ }
+ rmethods := r.msets.MethodSet(key.RHS)
+ rsel := rmethods.Lookup(from.Pkg(), from.Name())
+ if rsel == nil {
+ continue
+ }
+
+ // If both sides have a method of this name,
+ // and one of them is m, the other must be coupled.
+ var coupled *types.Func
+ switch from {
+ case lsel.Obj():
+ coupled = rsel.Obj().(*types.Func)
+ case rsel.Obj():
+ coupled = lsel.Obj().(*types.Func)
+ default:
+ continue
+ }
+
+ // We must treat concrete-to-interface
+ // constraints like an implicit selection C.f of
+ // each interface method I.f, and check that the
+ // renaming leaves the selection unchanged and
+ // unambiguous.
+ //
+ // Fun fact: the implicit selection of C.f
+ // type I interface{f()}
+ // type C struct{I}
+ // func (C) g()
+ // var _ I = C{} // here
+ // yields abstract method I.f. This can make error
+ // messages less than obvious.
+ //
+ if !types.IsInterface(key.RHS) {
+ // The logic below was derived from checkSelections.
+
+ rtosel := rmethods.Lookup(from.Pkg(), r.to)
+ if rtosel != nil {
+ rto := rtosel.Obj().(*types.Func)
+ delta := len(rsel.Index()) - len(rtosel.Index())
+ if delta < 0 {
+ continue // no ambiguity
+ }
+
+ // TODO(adonovan): record the constraint's position.
+ keyPos := token.NoPos
+
+ r.errorf(from.Pos(), "renaming this method %q to %q",
+ from.Name(), r.to)
+ if delta == 0 {
+ // analogous to same-block conflict
+ r.errorf(keyPos, "\twould make the %s method of %s invoked via interface %s ambiguous",
+ r.to, key.RHS, key.LHS)
+ r.errorf(rto.Pos(), "\twith (%s).%s",
+ recv(rto).Type(), r.to)
+ } else {
+ // analogous to super-block conflict
+ r.errorf(keyPos, "\twould change the %s method of %s invoked via interface %s",
+ r.to, key.RHS, key.LHS)
+ r.errorf(coupled.Pos(), "\tfrom (%s).%s",
+ recv(coupled).Type(), r.to)
+ r.errorf(rto.Pos(), "\tto (%s).%s",
+ recv(rto).Type(), r.to)
+ }
+ return // one error is enough
+ }
+ }
+
+ if !r.changeMethods {
+ // This should be unreachable.
+ r.errorf(from.Pos(), "internal error: during renaming of abstract method %s", from)
+ r.errorf(coupled.Pos(), "\tchangedMethods=false, coupled method=%s", coupled)
+ r.errorf(from.Pos(), "\tPlease file a bug report")
+ return
+ }
+
+ // Rename the coupled method to preserve assignability.
+ r.check(coupled)
+ }
+ } else {
+ // Concrete method
+
+ // declaration
+ prev, indices, _ := types.LookupFieldOrMethod(R, true, from.Pkg(), r.to)
+ if prev != nil && len(indices) == 1 {
+ r.errorf(from.Pos(), "renaming this method %q to %q",
+ from.Name(), r.to)
+ r.errorf(prev.Pos(), "\twould conflict with this %s",
+ objectKind(prev))
+ return
+ }
+
+ // assignability
+ //
+ // Find the set of abstract methods coupled to concrete
+ // method 'from' by some satisfy.Constraint, and rename
+ // them too.
+ //
+ // Coupling may be indirect, e.g. I.f <-> C.f via type D.
+ //
+ // type I interface {f()}
+ // type C int
+ // type (C) f()
+ // type D struct{C}
+ // var _ I = D{}
+ //
+ for key := range r.satisfy() {
+ // key = (lhs, rhs) where lhs is always an interface.
+ if types.IsInterface(key.RHS) {
+ continue
+ }
+ rsel := r.msets.MethodSet(key.RHS).Lookup(from.Pkg(), from.Name())
+ if rsel == nil || rsel.Obj() != from {
+ continue // rhs does not have the method
+ }
+ lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name())
+ if lsel == nil {
+ continue
+ }
+ imeth := lsel.Obj().(*types.Func)
+
+ // imeth is the abstract method (e.g. I.f)
+ // and key.RHS is the concrete coupling type (e.g. D).
+ if !r.changeMethods {
+ r.errorf(from.Pos(), "renaming this method %q to %q",
+ from.Name(), r.to)
+ var pos token.Pos
+ var iface string
+
+ I := recv(imeth).Type()
+ if named, ok := I.(*types.Named); ok {
+ pos = named.Obj().Pos()
+ iface = "interface " + named.Obj().Name()
+ } else {
+ pos = from.Pos()
+ iface = I.String()
+ }
+ r.errorf(pos, "\twould make %s no longer assignable to %s",
+ key.RHS, iface)
+ r.errorf(imeth.Pos(), "\t(rename %s.%s if you intend to change both types)",
+ I, from.Name())
+ return // one error is enough
+ }
+
+ // Rename the coupled interface method to preserve assignability.
+ r.check(imeth)
+ }
+ }
+
+ // Check integrity of existing (field and method) selections.
+ // We skip this if there were errors above, to avoid redundant errors.
+ r.checkSelections(from)
+}
+
+func (r *renamer) checkExport(id *ast.Ident, pkg *types.Package, from types.Object) bool {
+ // Reject cross-package references if r.to is unexported.
+ // (Such references may be qualified identifiers or field/method
+ // selections.)
+ if !ast.IsExported(r.to) && pkg != from.Pkg() {
+ r.errorf(from.Pos(),
+ "renaming %q to %q would make it unexported",
+ from.Name(), r.to)
+ r.errorf(id.Pos(), "\tbreaking references from packages such as %q",
+ pkg.Path())
+ return false
+ }
+ return true
+}
+
+// satisfy returns the set of interface satisfaction constraints.
+func (r *renamer) satisfy() map[satisfy.Constraint]bool {
+ if r.satisfyConstraints == nil {
+ // Compute on demand: it's expensive.
+ var f satisfy.Finder
+ pkg := r.pkg
+ {
+ // From satisfy.Finder documentation:
+ //
+ // The package must be free of type errors, and
+ // info.{Defs,Uses,Selections,Types} must have been populated by the
+ // type-checker.
+ //
+ // Only proceed if all packages have no errors.
+ if pkg.HasParseErrors() || pkg.HasTypeErrors() {
+ r.errorf(token.NoPos, // we don't have a position for this error.
+ "renaming %q to %q not possible because %q has errors",
+ r.from, r.to, pkg.Metadata().PkgPath)
+ return nil
+ }
+ f.Find(pkg.GetTypesInfo(), pkg.GetSyntax())
+ }
+ r.satisfyConstraints = f.Result
+ }
+ return r.satisfyConstraints
+}
+
+// -- helpers ----------------------------------------------------------
+
+// recv returns the method's receiver.
+func recv(meth *types.Func) *types.Var {
+ return meth.Type().(*types.Signature).Recv()
+}
+
+// someUse returns an arbitrary use of obj within info.
+func someUse(info *types.Info, obj types.Object) *ast.Ident {
+ for id, o := range info.Uses {
+ if o == obj {
+ return id
+ }
+ }
+ return nil
+}
+
+func objectKind(obj types.Object) string {
+ if obj == nil {
+ return "nil object"
+ }
+ switch obj := obj.(type) {
+ case *types.PkgName:
+ return "imported package name"
+ case *types.TypeName:
+ return "type"
+ case *types.Var:
+ if obj.IsField() {
+ return "field"
+ }
+ case *types.Func:
+ if obj.Type().(*types.Signature).Recv() != nil {
+ return "method"
+ }
+ }
+ // label, func, var, const
+ return strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types."))
+}
+
+// NB: for renamings, blank is not considered valid.
+func isValidIdentifier(id string) bool {
+ if id == "" || id == "_" {
+ return false
+ }
+ for i, r := range id {
+ if !isLetter(r) && (i == 0 || !isDigit(r)) {
+ return false
+ }
+ }
+ return token.Lookup(id) == token.IDENT
+}
+
+// isLocal reports whether obj is local to some function.
+// Precondition: not a struct field or interface method.
+func isLocal(obj types.Object) bool {
+ // [... 5=stmt 4=func 3=file 2=pkg 1=universe]
+ var depth int
+ for scope := obj.Parent(); scope != nil; scope = scope.Parent() {
+ depth++
+ }
+ return depth >= 4
+}
+
+func isPackageLevel(obj types.Object) bool {
+ if obj == nil {
+ return false
+ }
+ return obj.Pkg().Scope().Lookup(obj.Name()) == obj
+}
+
+// -- Plundered from go/scanner: ---------------------------------------
+
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
diff --git a/gopls/internal/lsp/source/signature_help.go b/gopls/internal/lsp/source/signature_help.go
new file mode 100644
index 000000000..716de2dd9
--- /dev/null
+++ b/gopls/internal/lsp/source/signature_help.go
@@ -0,0 +1,185 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/event"
+)
+
+func SignatureHelp(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.SignatureInformation, int, error) {
+ ctx, done := event.Start(ctx, "source.SignatureHelp")
+ defer done()
+
+ // We need full type-checking here, as we must type-check function bodies in
+ // order to provide signature help at the requested position.
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, 0, fmt.Errorf("getting file for SignatureHelp: %w", err)
+ }
+ pos, err := pgf.PositionPos(position)
+ if err != nil {
+ return nil, 0, err
+ }
+ // Find a call expression surrounding the query position.
+ var callExpr *ast.CallExpr
+ path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos)
+ if path == nil {
+ return nil, 0, fmt.Errorf("cannot find node enclosing position")
+ }
+FindCall:
+ for _, node := range path {
+ switch node := node.(type) {
+ case *ast.CallExpr:
+ if pos >= node.Lparen && pos <= node.Rparen {
+ callExpr = node
+ break FindCall
+ }
+ case *ast.FuncLit, *ast.FuncType:
+ // The user is within an anonymous function,
+ // which may be the parameter to the *ast.CallExpr.
+ // Don't show signature help in this case.
+ return nil, 0, fmt.Errorf("no signature help within a function declaration")
+ case *ast.BasicLit:
+ if node.Kind == token.STRING {
+ return nil, 0, fmt.Errorf("no signature help within a string literal")
+ }
+ }
+
+ }
+ if callExpr == nil || callExpr.Fun == nil {
+ return nil, 0, fmt.Errorf("cannot find an enclosing function")
+ }
+
+ qf := Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo())
+
+ // Get the object representing the function, if available.
+ // There is no object in certain cases such as calling a function returned by
+ // a function (e.g. "foo()()").
+ var obj types.Object
+ switch t := callExpr.Fun.(type) {
+ case *ast.Ident:
+ obj = pkg.GetTypesInfo().ObjectOf(t)
+ case *ast.SelectorExpr:
+ obj = pkg.GetTypesInfo().ObjectOf(t.Sel)
+ }
+
+ // Handle builtin functions separately.
+ if obj, ok := obj.(*types.Builtin); ok {
+ return builtinSignature(ctx, snapshot, callExpr, obj.Name(), pos)
+ }
+
+ // Get the type information for the function being called.
+ sigType := pkg.GetTypesInfo().TypeOf(callExpr.Fun)
+ if sigType == nil {
+ return nil, 0, fmt.Errorf("cannot get type for Fun %[1]T (%[1]v)", callExpr.Fun)
+ }
+
+ sig, _ := sigType.Underlying().(*types.Signature)
+ if sig == nil {
+ return nil, 0, fmt.Errorf("cannot find signature for Fun %[1]T (%[1]v)", callExpr.Fun)
+ }
+
+ activeParam := activeParameter(callExpr, sig.Params().Len(), sig.Variadic(), pos)
+
+ var (
+ name string
+ comment *ast.CommentGroup
+ )
+ if obj != nil {
+ d, err := HoverDocForObject(ctx, snapshot, pkg.FileSet(), obj)
+ if err != nil {
+ return nil, 0, err
+ }
+ name = obj.Name()
+ comment = d
+ } else {
+ name = "func"
+ }
+ mq := MetadataQualifierForFile(snapshot, pgf.File, pkg.Metadata())
+ s, err := NewSignature(ctx, snapshot, pkg, sig, comment, qf, mq)
+ if err != nil {
+ return nil, 0, err
+ }
+ paramInfo := make([]protocol.ParameterInformation, 0, len(s.params))
+ for _, p := range s.params {
+ paramInfo = append(paramInfo, protocol.ParameterInformation{Label: p})
+ }
+ return &protocol.SignatureInformation{
+ Label: name + s.Format(),
+ Documentation: stringToSigInfoDocumentation(s.doc, snapshot.View().Options()),
+ Parameters: paramInfo,
+ }, activeParam, nil
+}
+
+func builtinSignature(ctx context.Context, snapshot Snapshot, callExpr *ast.CallExpr, name string, pos token.Pos) (*protocol.SignatureInformation, int, error) {
+ sig, err := NewBuiltinSignature(ctx, snapshot, name)
+ if err != nil {
+ return nil, 0, err
+ }
+ paramInfo := make([]protocol.ParameterInformation, 0, len(sig.params))
+ for _, p := range sig.params {
+ paramInfo = append(paramInfo, protocol.ParameterInformation{Label: p})
+ }
+ activeParam := activeParameter(callExpr, len(sig.params), sig.variadic, pos)
+ return &protocol.SignatureInformation{
+ Label: sig.name + sig.Format(),
+ Documentation: stringToSigInfoDocumentation(sig.doc, snapshot.View().Options()),
+ Parameters: paramInfo,
+ }, activeParam, nil
+
+}
+
+func activeParameter(callExpr *ast.CallExpr, numParams int, variadic bool, pos token.Pos) (activeParam int) {
+ if len(callExpr.Args) == 0 {
+ return 0
+ }
+ // First, check if the position is even in the range of the arguments.
+ start, end := callExpr.Lparen, callExpr.Rparen
+ if !(start <= pos && pos <= end) {
+ return 0
+ }
+ for _, expr := range callExpr.Args {
+ if start == token.NoPos {
+ start = expr.Pos()
+ }
+ end = expr.End()
+ if start <= pos && pos <= end {
+ break
+ }
+ // Don't advance the active parameter for the last parameter of a variadic function.
+ if !variadic || activeParam < numParams-1 {
+ activeParam++
+ }
+ start = expr.Pos() + 1 // to account for commas
+ }
+ return activeParam
+}
+
+func stringToSigInfoDocumentation(s string, options *Options) *protocol.Or_SignatureInformation_documentation {
+ v := s
+ k := protocol.PlainText
+ if options.PreferredContentFormat == protocol.Markdown {
+ v = CommentToMarkdown(s, options)
+ // whether or not content is newline terminated may not matter for LSP clients,
+ // but our tests expect trailing newlines to be stripped.
+ v = strings.TrimSuffix(v, "\n") // TODO(pjw): change the golden files
+ k = protocol.Markdown
+ }
+ return &protocol.Or_SignatureInformation_documentation{
+ Value: protocol.MarkupContent{
+ Kind: k,
+ Value: v,
+ },
+ }
+}
diff --git a/gopls/internal/lsp/source/stub.go b/gopls/internal/lsp/source/stub.go
new file mode 100644
index 000000000..6bbc1dba2
--- /dev/null
+++ b/gopls/internal/lsp/source/stub.go
@@ -0,0 +1,238 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io"
+ "path"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/gopls/internal/lsp/analysis/stubmethods"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// stubSuggestedFixFunc returns a suggested fix to declare the missing
+// methods of the concrete type that is assigned to an interface type
+// at the cursor position.
+func stubSuggestedFixFunc(ctx context.Context, snapshot Snapshot, fh FileHandle, rng protocol.Range) (*token.FileSet, *analysis.SuggestedFix, error) {
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, nil, fmt.Errorf("GetTypedFile: %w", err)
+ }
+ start, end, err := pgf.RangePos(rng)
+ if err != nil {
+ return nil, nil, err
+ }
+ nodes, _ := astutil.PathEnclosingInterval(pgf.File, start, end)
+ si := stubmethods.GetStubInfo(pkg.FileSet(), pkg.GetTypesInfo(), nodes, start)
+ if si == nil {
+ return nil, nil, fmt.Errorf("nil interface request")
+ }
+ return stub(ctx, snapshot, si)
+}
+
+// stub returns a suggested fix to declare the missing methods of si.Concrete.
+func stub(ctx context.Context, snapshot Snapshot, si *stubmethods.StubInfo) (*token.FileSet, *analysis.SuggestedFix, error) {
+ // A function-local type cannot be stubbed
+ // since there's nowhere to put the methods.
+ conc := si.Concrete.Obj()
+ if conc.Parent() != conc.Pkg().Scope() {
+ return nil, nil, fmt.Errorf("local type %q cannot be stubbed", conc.Name())
+ }
+
+ // Parse the file declaring the concrete type.
+ declPGF, _, err := parseFull(ctx, snapshot, si.Fset, conc.Pos())
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to parse file %q declaring implementation type: %w", declPGF.URI, err)
+ }
+ if declPGF.Fixed {
+ return nil, nil, fmt.Errorf("file contains parse errors: %s", declPGF.URI)
+ }
+
+ // Build import environment for the declaring file.
+ importEnv := make(map[ImportPath]string) // value is local name
+ for _, imp := range declPGF.File.Imports {
+ importPath := UnquoteImportPath(imp)
+ var name string
+ if imp.Name != nil {
+ name = imp.Name.Name
+ if name == "_" {
+ continue
+ } else if name == "." {
+ name = "" // see types.Qualifier
+ }
+ } else {
+ // TODO(adonovan): may omit a vendor/ prefix; consult the Metadata.
+ name = path.Base(string(importPath))
+ }
+ importEnv[importPath] = name // latest alias wins
+ }
+
+ // Find subset of interface methods that the concrete type lacks.
+ var missing []*types.Func
+ ifaceType := si.Interface.Type().Underlying().(*types.Interface)
+ for i := 0; i < ifaceType.NumMethods(); i++ {
+ imethod := ifaceType.Method(i)
+ cmethod, _, _ := types.LookupFieldOrMethod(si.Concrete, si.Pointer, imethod.Pkg(), imethod.Name())
+ if cmethod == nil {
+ missing = append(missing, imethod)
+ continue
+ }
+
+ if _, ok := cmethod.(*types.Var); ok {
+ // len(LookupFieldOrMethod.index) = 1 => conflict, >1 => shadow.
+ return nil, nil, fmt.Errorf("adding method %s.%s would conflict with (or shadow) existing field",
+ conc.Name(), imethod.Name())
+ }
+
+ if !types.Identical(cmethod.Type(), imethod.Type()) {
+ return nil, nil, fmt.Errorf("method %s.%s already exists but has the wrong type: got %s, want %s",
+ conc.Name(), imethod.Name(), cmethod.Type(), imethod.Type())
+ }
+ }
+ if len(missing) == 0 {
+ return nil, nil, fmt.Errorf("no missing methods found")
+ }
+
+ // Create a package name qualifier that uses the
+ // locally appropriate imported package name.
+ // It records any needed new imports.
+ // TODO(adonovan): factor with source.FormatVarType, stubmethods.RelativeToFiles?
+ //
+ // Prior to CL 469155 this logic preserved any renaming
+ // imports from the file that declares the interface
+ // method--ostensibly the preferred name for imports of
+ // frequently renamed packages such as protobufs.
+ // Now we use the package's declared name. If this turns out
+ // to be a mistake, then use parseHeader(si.iface.Pos()).
+ //
+ type newImport struct{ name, importPath string }
+ var newImports []newImport // for AddNamedImport
+ qual := func(pkg *types.Package) string {
+ // TODO(adonovan): don't ignore vendor prefix.
+ importPath := ImportPath(pkg.Path())
+ name, ok := importEnv[importPath]
+ if !ok {
+ // Insert new import using package's declared name.
+ //
+ // TODO(adonovan): resolve conflict between declared
+ // name and existing file-level (declPGF.File.Imports)
+ // or package-level (si.Concrete.Pkg.Scope) decls by
+ // generating a fresh name.
+ name = pkg.Name()
+ importEnv[importPath] = name
+ new := newImport{importPath: string(importPath)}
+ // For clarity, use a renaming import whenever the
+ // local name does not match the path's last segment.
+ if name != path.Base(new.importPath) {
+ new.name = name
+ }
+ newImports = append(newImports, new)
+ }
+ return name
+ }
+
+ // Format interface name (used only in a comment).
+ iface := si.Interface.Name()
+ if ipkg := si.Interface.Pkg(); ipkg != nil && ipkg != conc.Pkg() {
+ iface = ipkg.Name() + "." + iface
+ }
+
+ // Pointer receiver?
+ var star string
+ if si.Pointer {
+ star = "*"
+ }
+
+ // Format the new methods.
+ var newMethods bytes.Buffer
+ for _, method := range missing {
+ fmt.Fprintf(&newMethods, `// %s implements %s
+func (%s%s%s) %s%s {
+ panic("unimplemented")
+}
+`,
+ method.Name(),
+ iface,
+ star,
+ si.Concrete.Obj().Name(),
+ FormatTypeParams(typeparams.ForNamed(si.Concrete)),
+ method.Name(),
+ strings.TrimPrefix(types.TypeString(method.Type(), qual), "func"))
+ }
+
+ // Compute insertion point for new methods:
+ // after the top-level declaration enclosing the (package-level) type.
+ insertOffset, err := safetoken.Offset(declPGF.Tok, declPGF.File.End())
+ if err != nil {
+ return nil, nil, bug.Errorf("internal error: end position outside file bounds: %v", err)
+ }
+ concOffset, err := safetoken.Offset(si.Fset.File(conc.Pos()), conc.Pos())
+ if err != nil {
+ return nil, nil, bug.Errorf("internal error: finding type decl offset: %v", err)
+ }
+ for _, decl := range declPGF.File.Decls {
+ declEndOffset, err := safetoken.Offset(declPGF.Tok, decl.End())
+ if err != nil {
+ return nil, nil, bug.Errorf("internal error: finding decl offset: %v", err)
+ }
+ if declEndOffset > concOffset {
+ insertOffset = declEndOffset
+ break
+ }
+ }
+
+ // Splice the new methods into the file content.
+ var buf bytes.Buffer
+ input := declPGF.Mapper.Content // unfixed content of file
+ buf.Write(input[:insertOffset])
+ buf.WriteByte('\n')
+ io.Copy(&buf, &newMethods)
+ buf.Write(input[insertOffset:])
+
+ // Re-parse the file.
+ fset := token.NewFileSet()
+ newF, err := parser.ParseFile(fset, declPGF.File.Name.Name, buf.Bytes(), parser.ParseComments)
+ if err != nil {
+ return nil, nil, fmt.Errorf("could not reparse file: %w", err)
+ }
+
+ // Splice the new imports into the syntax tree.
+ for _, imp := range newImports {
+ astutil.AddNamedImport(fset, newF, imp.name, imp.importPath)
+ }
+
+ // Pretty-print.
+ var output strings.Builder
+ if err := format.Node(&output, fset, newF); err != nil {
+ return nil, nil, fmt.Errorf("format.Node: %w", err)
+ }
+
+ // Report the diff.
+ diffs := snapshot.View().Options().ComputeEdits(string(input), output.String())
+ var edits []analysis.TextEdit
+ for _, edit := range diffs {
+ edits = append(edits, analysis.TextEdit{
+ Pos: declPGF.Tok.Pos(edit.Start),
+ End: declPGF.Tok.Pos(edit.End),
+ NewText: []byte(edit.New),
+ })
+ }
+ return FileSetFor(declPGF.Tok), // edits use declPGF.Tok
+ &analysis.SuggestedFix{TextEdits: edits},
+ nil
+}
diff --git a/gopls/internal/lsp/source/symbols.go b/gopls/internal/lsp/source/symbols.go
new file mode 100644
index 000000000..a5c015e0a
--- /dev/null
+++ b/gopls/internal/lsp/source/symbols.go
@@ -0,0 +1,227 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/event"
+)
+
+func DocumentSymbols(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.DocumentSymbol, error) {
+ ctx, done := event.Start(ctx, "source.DocumentSymbols")
+ defer done()
+
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
+ if err != nil {
+ return nil, fmt.Errorf("getting file for DocumentSymbols: %w", err)
+ }
+
+ // Build symbols for file declarations. When encountering a declaration with
+ // errors (typically because positions are invalid), we skip the declaration
+ // entirely. VS Code fails to show any symbols if one of the top-level
+ // symbols is missing position information.
+ var symbols []protocol.DocumentSymbol
+ for _, decl := range pgf.File.Decls {
+ switch decl := decl.(type) {
+ case *ast.FuncDecl:
+ if decl.Name.Name == "_" {
+ continue
+ }
+ fs, err := funcSymbol(pgf.Mapper, pgf.Tok, decl)
+ if err == nil {
+ // If function is a method, prepend the type of the method.
+ if decl.Recv != nil && len(decl.Recv.List) > 0 {
+ fs.Name = fmt.Sprintf("(%s).%s", types.ExprString(decl.Recv.List[0].Type), fs.Name)
+ }
+ symbols = append(symbols, fs)
+ }
+ case *ast.GenDecl:
+ for _, spec := range decl.Specs {
+ switch spec := spec.(type) {
+ case *ast.TypeSpec:
+ if spec.Name.Name == "_" {
+ continue
+ }
+ ts, err := typeSymbol(pgf.Mapper, pgf.Tok, spec)
+ if err == nil {
+ symbols = append(symbols, ts)
+ }
+ case *ast.ValueSpec:
+ for _, name := range spec.Names {
+ if name.Name == "_" {
+ continue
+ }
+ vs, err := varSymbol(pgf.Mapper, pgf.Tok, spec, name, decl.Tok == token.CONST)
+ if err == nil {
+ symbols = append(symbols, vs)
+ }
+ }
+ }
+ }
+ }
+ }
+ return symbols, nil
+}
+
+func funcSymbol(m *protocol.Mapper, tf *token.File, decl *ast.FuncDecl) (protocol.DocumentSymbol, error) {
+ s := protocol.DocumentSymbol{
+ Name: decl.Name.Name,
+ Kind: protocol.Function,
+ }
+ if decl.Recv != nil {
+ s.Kind = protocol.Method
+ }
+ var err error
+ s.Range, err = m.NodeRange(tf, decl)
+ if err != nil {
+ return protocol.DocumentSymbol{}, err
+ }
+ s.SelectionRange, err = m.NodeRange(tf, decl.Name)
+ if err != nil {
+ return protocol.DocumentSymbol{}, err
+ }
+ s.Detail = types.ExprString(decl.Type)
+ return s, nil
+}
+
+func typeSymbol(m *protocol.Mapper, tf *token.File, spec *ast.TypeSpec) (protocol.DocumentSymbol, error) {
+ s := protocol.DocumentSymbol{
+ Name: spec.Name.Name,
+ }
+ var err error
+ s.Range, err = m.NodeRange(tf, spec)
+ if err != nil {
+ return protocol.DocumentSymbol{}, err
+ }
+ s.SelectionRange, err = m.NodeRange(tf, spec.Name)
+ if err != nil {
+ return protocol.DocumentSymbol{}, err
+ }
+ s.Kind, s.Detail, s.Children = typeDetails(m, tf, spec.Type)
+ return s, nil
+}
+
+func typeDetails(m *protocol.Mapper, tf *token.File, typExpr ast.Expr) (kind protocol.SymbolKind, detail string, children []protocol.DocumentSymbol) {
+ switch typExpr := typExpr.(type) {
+ case *ast.StructType:
+ kind = protocol.Struct
+ children = fieldListSymbols(m, tf, typExpr.Fields, protocol.Field)
+ if len(children) > 0 {
+ detail = "struct{...}"
+ } else {
+ detail = "struct{}"
+ }
+
+ // Find interface methods and embedded types.
+ case *ast.InterfaceType:
+ kind = protocol.Interface
+ children = fieldListSymbols(m, tf, typExpr.Methods, protocol.Method)
+ if len(children) > 0 {
+ detail = "interface{...}"
+ } else {
+ detail = "interface{}"
+ }
+
+ case *ast.FuncType:
+ kind = protocol.Function
+ detail = types.ExprString(typExpr)
+
+ default:
+ kind = protocol.Class // catch-all, for cases where we don't know the kind syntactically
+ detail = types.ExprString(typExpr)
+ }
+ return
+}
+
+func fieldListSymbols(m *protocol.Mapper, tf *token.File, fields *ast.FieldList, fieldKind protocol.SymbolKind) []protocol.DocumentSymbol {
+ if fields == nil {
+ return nil
+ }
+
+ var symbols []protocol.DocumentSymbol
+ for _, field := range fields.List {
+ detail, children := "", []protocol.DocumentSymbol(nil)
+ if field.Type != nil {
+ _, detail, children = typeDetails(m, tf, field.Type)
+ }
+ if len(field.Names) == 0 { // embedded interface or struct field
+ // By default, use the formatted type details as the name of this field.
+ // This handles potentially invalid syntax, as well as type embeddings in
+ // interfaces.
+ child := protocol.DocumentSymbol{
+ Name: detail,
+ Kind: protocol.Field, // consider all embeddings to be fields
+ Children: children,
+ }
+
+ // If the field is a valid embedding, promote the type name to field
+ // name.
+ selection := field.Type
+ if id := embeddedIdent(field.Type); id != nil {
+ child.Name = id.Name
+ child.Detail = detail
+ selection = id
+ }
+
+ if rng, err := m.NodeRange(tf, field.Type); err == nil {
+ child.Range = rng
+ }
+ if rng, err := m.NodeRange(tf, selection); err == nil {
+ child.SelectionRange = rng
+ }
+
+ symbols = append(symbols, child)
+ } else {
+ for _, name := range field.Names {
+ child := protocol.DocumentSymbol{
+ Name: name.Name,
+ Kind: fieldKind,
+ Detail: detail,
+ Children: children,
+ }
+
+ if rng, err := m.NodeRange(tf, field); err == nil {
+ child.Range = rng
+ }
+ if rng, err := m.NodeRange(tf, name); err == nil {
+ child.SelectionRange = rng
+ }
+
+ symbols = append(symbols, child)
+ }
+ }
+
+ }
+ return symbols
+}
+
+func varSymbol(m *protocol.Mapper, tf *token.File, spec *ast.ValueSpec, name *ast.Ident, isConst bool) (protocol.DocumentSymbol, error) {
+ s := protocol.DocumentSymbol{
+ Name: name.Name,
+ Kind: protocol.Variable,
+ }
+ if isConst {
+ s.Kind = protocol.Constant
+ }
+ var err error
+ s.Range, err = m.NodeRange(tf, spec)
+ if err != nil {
+ return protocol.DocumentSymbol{}, err
+ }
+ s.SelectionRange, err = m.NodeRange(tf, name)
+ if err != nil {
+ return protocol.DocumentSymbol{}, err
+ }
+ if spec.Type != nil { // type may be missing from the syntax
+ _, s.Detail, s.Children = typeDetails(m, tf, spec.Type)
+ }
+ return s, nil
+}
diff --git a/gopls/internal/lsp/source/type_definition.go b/gopls/internal/lsp/source/type_definition.go
new file mode 100644
index 000000000..104b7accf
--- /dev/null
+++ b/gopls/internal/lsp/source/type_definition.go
@@ -0,0 +1,55 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/token"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/event"
+)
+
+// TypeDefinition handles the textDocument/typeDefinition request for Go files.
+func TypeDefinition(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) ([]protocol.Location, error) {
+ ctx, done := event.Start(ctx, "source.TypeDefinition")
+ defer done()
+
+ pkg, pgf, err := PackageForFile(ctx, snapshot, fh.URI(), NarrowestPackage)
+ if err != nil {
+ return nil, err
+ }
+ pos, err := pgf.PositionPos(position)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(rfindley): handle type switch implicits correctly here: if the user
+ // jumps to the type definition of x in x := y.(type), it makes sense to jump
+ // to the type of y.
+ _, obj, _ := referencedObject(pkg, pgf, pos)
+ if obj == nil {
+ return nil, nil
+ }
+
+ typObj := typeToObject(obj.Type())
+ if typObj == nil {
+ return nil, fmt.Errorf("no type definition for %s", obj.Name())
+ }
+
+ // Identifiers with the type "error" are a special case with no position.
+ if hasErrorType(typObj) {
+ // TODO(rfindley): we can do better here, returning a link to the builtin
+ // file.
+ return nil, nil
+ }
+
+ loc, err := mapPosition(ctx, pkg.FileSet(), snapshot, typObj.Pos(), typObj.Pos()+token.Pos(len(typObj.Name())))
+ if err != nil {
+ return nil, err
+ }
+ return []protocol.Location{loc}, nil
+}
diff --git a/gopls/internal/lsp/source/types_format.go b/gopls/internal/lsp/source/types_format.go
new file mode 100644
index 000000000..46e260212
--- /dev/null
+++ b/gopls/internal/lsp/source/types_format.go
@@ -0,0 +1,517 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/ast"
+ "go/doc"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// FormatType returns the detail and kind for a types.Type.
+func FormatType(typ types.Type, qf types.Qualifier) (detail string, kind protocol.CompletionItemKind) {
+ if types.IsInterface(typ) {
+ detail = "interface{...}"
+ kind = protocol.InterfaceCompletion
+ } else if _, ok := typ.(*types.Struct); ok {
+ detail = "struct{...}"
+ kind = protocol.StructCompletion
+ } else if typ != typ.Underlying() {
+ detail, kind = FormatType(typ.Underlying(), qf)
+ } else {
+ detail = types.TypeString(typ, qf)
+ kind = protocol.ClassCompletion
+ }
+ return detail, kind
+}
+
+type signature struct {
+ name, doc string
+ typeParams, params, results []string
+ variadic bool
+ needResultParens bool
+}
+
+func (s *signature) Format() string {
+ var b strings.Builder
+ b.WriteByte('(')
+ for i, p := range s.params {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(p)
+ }
+ b.WriteByte(')')
+
+ // Add space between parameters and results.
+ if len(s.results) > 0 {
+ b.WriteByte(' ')
+ }
+ if s.needResultParens {
+ b.WriteByte('(')
+ }
+ for i, r := range s.results {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(r)
+ }
+ if s.needResultParens {
+ b.WriteByte(')')
+ }
+ return b.String()
+}
+
+func (s *signature) TypeParams() []string {
+ return s.typeParams
+}
+
+func (s *signature) Params() []string {
+ return s.params
+}
+
+// NewBuiltinSignature returns signature for the builtin object with a given
+// name, if a builtin object with the name exists.
+func NewBuiltinSignature(ctx context.Context, s Snapshot, name string) (*signature, error) {
+ builtin, err := s.BuiltinFile(ctx)
+ if err != nil {
+ return nil, err
+ }
+ obj := builtin.File.Scope.Lookup(name)
+ if obj == nil {
+ return nil, fmt.Errorf("no builtin object for %s", name)
+ }
+ decl, ok := obj.Decl.(*ast.FuncDecl)
+ if !ok {
+ return nil, fmt.Errorf("no function declaration for builtin: %s", name)
+ }
+ if decl.Type == nil {
+ return nil, fmt.Errorf("no type for builtin decl %s", decl.Name)
+ }
+ var variadic bool
+ if decl.Type.Params.List != nil {
+ numParams := len(decl.Type.Params.List)
+ lastParam := decl.Type.Params.List[numParams-1]
+ if _, ok := lastParam.Type.(*ast.Ellipsis); ok {
+ variadic = true
+ }
+ }
+ fset := FileSetFor(builtin.Tok)
+ params, _ := formatFieldList(ctx, fset, decl.Type.Params, variadic)
+ results, needResultParens := formatFieldList(ctx, fset, decl.Type.Results, false)
+ d := decl.Doc.Text()
+ switch s.View().Options().HoverKind {
+ case SynopsisDocumentation:
+ d = doc.Synopsis(d)
+ case NoDocumentation:
+ d = ""
+ }
+ return &signature{
+ doc: d,
+ name: name,
+ needResultParens: needResultParens,
+ params: params,
+ results: results,
+ variadic: variadic,
+ }, nil
+}
+
+// replacer replaces some synthetic "type classes" used in the builtin file
+// with their most common constituent type.
+var replacer = strings.NewReplacer(
+ `ComplexType`, `complex128`,
+ `FloatType`, `float64`,
+ `IntegerType`, `int`,
+)
+
+func formatFieldList(ctx context.Context, fset *token.FileSet, list *ast.FieldList, variadic bool) ([]string, bool) {
+ if list == nil {
+ return nil, false
+ }
+ var writeResultParens bool
+ var result []string
+ for i := 0; i < len(list.List); i++ {
+ if i >= 1 {
+ writeResultParens = true
+ }
+ p := list.List[i]
+ cfg := printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 4}
+ b := &bytes.Buffer{}
+ if err := cfg.Fprint(b, fset, p.Type); err != nil {
+ event.Error(ctx, "unable to print type", nil, tag.Type.Of(p.Type))
+ continue
+ }
+ typ := replacer.Replace(b.String())
+ if len(p.Names) == 0 {
+ result = append(result, typ)
+ }
+ for _, name := range p.Names {
+ if name.Name != "" {
+ if i == 0 {
+ writeResultParens = true
+ }
+ result = append(result, fmt.Sprintf("%s %s", name.Name, typ))
+ } else {
+ result = append(result, typ)
+ }
+ }
+ }
+ if variadic {
+ result[len(result)-1] = strings.Replace(result[len(result)-1], "[]", "...", 1)
+ }
+ return result, writeResultParens
+}
+
+// FormatTypeParams turns TypeParamList into its Go representation, such as:
+// [T, Y]. Note that it does not print constraints as this is mainly used for
+// formatting type params in method receivers.
+func FormatTypeParams(tparams *typeparams.TypeParamList) string {
+ if tparams == nil || tparams.Len() == 0 {
+ return ""
+ }
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i := 0; i < tparams.Len(); i++ {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(tparams.At(i).Obj().Name())
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+// NewSignature returns formatted signature for a types.Signature struct.
+func NewSignature(ctx context.Context, s Snapshot, pkg Package, sig *types.Signature, comment *ast.CommentGroup, qf types.Qualifier, mq MetadataQualifier) (*signature, error) {
+ var tparams []string
+ tpList := typeparams.ForSignature(sig)
+ for i := 0; i < tpList.Len(); i++ {
+ tparam := tpList.At(i)
+ // TODO: is it possible to reuse the logic from FormatVarType here?
+ s := tparam.Obj().Name() + " " + tparam.Constraint().String()
+ tparams = append(tparams, s)
+ }
+
+ params := make([]string, 0, sig.Params().Len())
+ for i := 0; i < sig.Params().Len(); i++ {
+ el := sig.Params().At(i)
+ typ, err := FormatVarType(ctx, s, pkg, el, qf, mq)
+ if err != nil {
+ return nil, err
+ }
+ p := typ
+ if el.Name() != "" {
+ p = el.Name() + " " + typ
+ }
+ params = append(params, p)
+ }
+
+ var needResultParens bool
+ results := make([]string, 0, sig.Results().Len())
+ for i := 0; i < sig.Results().Len(); i++ {
+ if i >= 1 {
+ needResultParens = true
+ }
+ el := sig.Results().At(i)
+ typ, err := FormatVarType(ctx, s, pkg, el, qf, mq)
+ if err != nil {
+ return nil, err
+ }
+ if el.Name() == "" {
+ results = append(results, typ)
+ } else {
+ if i == 0 {
+ needResultParens = true
+ }
+ results = append(results, el.Name()+" "+typ)
+ }
+ }
+ var d string
+ if comment != nil {
+ d = comment.Text()
+ }
+ switch s.View().Options().HoverKind {
+ case SynopsisDocumentation:
+ d = doc.Synopsis(d)
+ case NoDocumentation:
+ d = ""
+ }
+ return &signature{
+ doc: d,
+ typeParams: tparams,
+ params: params,
+ results: results,
+ variadic: sig.Variadic(),
+ needResultParens: needResultParens,
+ }, nil
+}
+
+// FormatVarType formats a *types.Var, accounting for type aliases.
+// To do this, it looks in the AST of the file in which the object is declared.
+// On any errors, it always falls back to types.TypeString.
+//
+// TODO(rfindley): this function could return the actual name used in syntax,
+// for better parameter names.
+func FormatVarType(ctx context.Context, snapshot Snapshot, srcpkg Package, obj *types.Var, qf types.Qualifier, mq MetadataQualifier) (string, error) {
+ // TODO(rfindley): This looks wrong. The previous comment said:
+ // "If the given expr refers to a type parameter, then use the
+ // object's Type instead of the type parameter declaration. This helps
+ // format the instantiated type as opposed to the original undeclared
+ // generic type".
+ //
+ // But of course, if obj is a type param, we are formatting a generic type
+ // and not an instantiated type. Handling for instantiated types must be done
+ // at a higher level.
+ //
+ // Left this during refactoring in order to preserve pre-existing logic.
+ if typeparams.IsTypeParam(obj.Type()) {
+ return types.TypeString(obj.Type(), qf), nil
+ }
+
+ if obj.Pkg() == nil || !obj.Pos().IsValid() {
+ // This is defensive, though it is extremely unlikely we'll ever have a
+ // builtin var.
+ return types.TypeString(obj.Type(), qf), nil
+ }
+
+ targetpgf, pos, err := parseFull(ctx, snapshot, srcpkg.FileSet(), obj.Pos())
+ if err != nil {
+ return "", err // e.g. ctx cancelled
+ }
+
+ targetMeta := findFileInDeps(snapshot, srcpkg.Metadata(), targetpgf.URI)
+ if targetMeta == nil {
+ // If we have an object from type-checking, it should exist in a file in
+ // the forward transitive closure.
+ return "", bug.Errorf("failed to find file %q in deps of %q", targetpgf.URI, srcpkg.Metadata().ID)
+ }
+
+ decl, spec, field := findDeclInfo([]*ast.File{targetpgf.File}, pos)
+
+ // We can't handle type parameters correctly, so we fall back on TypeString
+ // for parameterized decls.
+ if decl, _ := decl.(*ast.FuncDecl); decl != nil {
+ if typeparams.ForFuncType(decl.Type).NumFields() > 0 {
+ return types.TypeString(obj.Type(), qf), nil // in generic function
+ }
+ if decl.Recv != nil && len(decl.Recv.List) > 0 {
+ if x, _, _, _ := typeparams.UnpackIndexExpr(decl.Recv.List[0].Type); x != nil {
+ return types.TypeString(obj.Type(), qf), nil // in method of generic type
+ }
+ }
+ }
+ if spec, _ := spec.(*ast.TypeSpec); spec != nil && typeparams.ForTypeSpec(spec).NumFields() > 0 {
+ return types.TypeString(obj.Type(), qf), nil // in generic type decl
+ }
+
+ if field == nil {
+ // TODO(rfindley): we should never reach here from an ordinary var, so
+ // should probably return an error here.
+ return types.TypeString(obj.Type(), qf), nil
+ }
+ expr := field.Type
+
+ rq := requalifier(snapshot, targetpgf.File, targetMeta, mq)
+
+ // The type names in the AST may not be correctly qualified.
+ // Determine the package name to use based on the package that originated
+ // the query and the package in which the type is declared.
+ // We then qualify the value by cloning the AST node and editing it.
+ expr = qualifyTypeExpr(expr, rq)
+
+ // If the request came from a different package than the one in which the
+ // types are defined, we may need to modify the qualifiers.
+ return FormatNodeFile(targetpgf.Tok, expr), nil
+}
+
+// qualifyTypeExpr clones the type expression expr after re-qualifying type
+// names using the given function, which accepts the current syntactic
+// qualifier (possibly "" for unqualified idents), and returns a new qualifier
+// (again, possibly "" if the identifier should be unqualified).
+//
+// The resulting expression may be inaccurate: without type-checking we don't
+// properly account for "." imported identifiers or builtins.
+//
+// TODO(rfindley): add many more tests for this function.
+func qualifyTypeExpr(expr ast.Expr, qf func(string) string) ast.Expr {
+ switch expr := expr.(type) {
+ case *ast.ArrayType:
+ return &ast.ArrayType{
+ Lbrack: expr.Lbrack,
+ Elt: qualifyTypeExpr(expr.Elt, qf),
+ Len: expr.Len,
+ }
+
+ case *ast.BinaryExpr:
+ if expr.Op != token.OR {
+ return expr
+ }
+ return &ast.BinaryExpr{
+ X: qualifyTypeExpr(expr.X, qf),
+ OpPos: expr.OpPos,
+ Op: expr.Op,
+ Y: qualifyTypeExpr(expr.Y, qf),
+ }
+
+ case *ast.ChanType:
+ return &ast.ChanType{
+ Arrow: expr.Arrow,
+ Begin: expr.Begin,
+ Dir: expr.Dir,
+ Value: qualifyTypeExpr(expr.Value, qf),
+ }
+
+ case *ast.Ellipsis:
+ return &ast.Ellipsis{
+ Ellipsis: expr.Ellipsis,
+ Elt: qualifyTypeExpr(expr.Elt, qf),
+ }
+
+ case *ast.FuncType:
+ return &ast.FuncType{
+ Func: expr.Func,
+ Params: qualifyFieldList(expr.Params, qf),
+ Results: qualifyFieldList(expr.Results, qf),
+ }
+
+ case *ast.Ident:
+ // Unqualified type (builtin, package local, or dot-imported).
+
+ // Don't qualify names that look like builtins.
+ //
+ // Without type-checking this may be inaccurate. It could be made accurate
+ // by doing syntactic object resolution for the entire package, but that
+ // does not seem worthwhile and we generally want to avoid using
+ // ast.Object, which may be inaccurate.
+ if obj := types.Universe.Lookup(expr.Name); obj != nil {
+ return expr
+ }
+
+ newName := qf("")
+ if newName != "" {
+ return &ast.SelectorExpr{
+ X: &ast.Ident{
+ NamePos: expr.Pos(),
+ Name: newName,
+ },
+ Sel: expr,
+ }
+ }
+ return expr
+
+ case *ast.IndexExpr:
+ return &ast.IndexExpr{
+ X: qualifyTypeExpr(expr.X, qf),
+ Lbrack: expr.Lbrack,
+ Index: qualifyTypeExpr(expr.Index, qf),
+ Rbrack: expr.Rbrack,
+ }
+
+ case *typeparams.IndexListExpr:
+ indices := make([]ast.Expr, len(expr.Indices))
+ for i, idx := range expr.Indices {
+ indices[i] = qualifyTypeExpr(idx, qf)
+ }
+ return &typeparams.IndexListExpr{
+ X: qualifyTypeExpr(expr.X, qf),
+ Lbrack: expr.Lbrack,
+ Indices: indices,
+ Rbrack: expr.Rbrack,
+ }
+
+ case *ast.InterfaceType:
+ return &ast.InterfaceType{
+ Interface: expr.Interface,
+ Methods: qualifyFieldList(expr.Methods, qf),
+ Incomplete: expr.Incomplete,
+ }
+
+ case *ast.MapType:
+ return &ast.MapType{
+ Map: expr.Map,
+ Key: qualifyTypeExpr(expr.Key, qf),
+ Value: qualifyTypeExpr(expr.Value, qf),
+ }
+
+ case *ast.ParenExpr:
+ return &ast.ParenExpr{
+ Lparen: expr.Lparen,
+ Rparen: expr.Rparen,
+ X: qualifyTypeExpr(expr.X, qf),
+ }
+
+ case *ast.SelectorExpr:
+ if id, ok := expr.X.(*ast.Ident); ok {
+ // qualified type
+ newName := qf(id.Name)
+ if newName == "" {
+ return expr.Sel
+ }
+ return &ast.SelectorExpr{
+ X: &ast.Ident{
+ NamePos: id.NamePos,
+ Name: newName,
+ },
+ Sel: expr.Sel,
+ }
+ }
+ return expr
+
+ case *ast.StarExpr:
+ return &ast.StarExpr{
+ Star: expr.Star,
+ X: qualifyTypeExpr(expr.X, qf),
+ }
+
+ case *ast.StructType:
+ return &ast.StructType{
+ Struct: expr.Struct,
+ Fields: qualifyFieldList(expr.Fields, qf),
+ Incomplete: expr.Incomplete,
+ }
+
+ default:
+ return expr
+ }
+}
+
+func qualifyFieldList(fl *ast.FieldList, qf func(string) string) *ast.FieldList {
+ if fl == nil {
+ return nil
+ }
+ if fl.List == nil {
+ return &ast.FieldList{
+ Closing: fl.Closing,
+ Opening: fl.Opening,
+ }
+ }
+ list := make([]*ast.Field, 0, len(fl.List))
+ for _, f := range fl.List {
+ list = append(list, &ast.Field{
+ Comment: f.Comment,
+ Doc: f.Doc,
+ Names: f.Names,
+ Tag: f.Tag,
+ Type: qualifyTypeExpr(f.Type, qf),
+ })
+ }
+ return &ast.FieldList{
+ Closing: fl.Closing,
+ Opening: fl.Opening,
+ List: list,
+ }
+}
diff --git a/gopls/internal/lsp/source/util.go b/gopls/internal/lsp/source/util.go
new file mode 100644
index 000000000..82cb8d075
--- /dev/null
+++ b/gopls/internal/lsp/source/util.go
@@ -0,0 +1,555 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "go/ast"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/tokeninternal"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// IsGenerated gets and reads the file denoted by uri and reports
+// whether it contains a "generated file" comment as described at
+// https://golang.org/s/generatedcode.
+//
+// TODO(adonovan): opt: this function does too much.
+// Move snapshot.GetFile into the caller (most of which have already done it).
+func IsGenerated(ctx context.Context, snapshot Snapshot, uri span.URI) bool {
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return false
+ }
+ pgf, err := snapshot.ParseGo(ctx, fh, ParseHeader)
+ if err != nil {
+ return false
+ }
+ for _, commentGroup := range pgf.File.Comments {
+ for _, comment := range commentGroup.List {
+ if matched := generatedRx.MatchString(comment.Text); matched {
+ // Check if comment is at the beginning of the line in source.
+ if safetoken.Position(pgf.Tok, comment.Slash).Column == 1 {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// adjustedObjEnd returns the end position of obj, possibly modified for
+// package names.
+//
+// TODO(rfindley): eliminate this function, by inlining it at callsites where
+// it makes sense.
+func adjustedObjEnd(obj types.Object) token.Pos {
+ nameLen := len(obj.Name())
+ if pkgName, ok := obj.(*types.PkgName); ok {
+ // An imported Go package has a package-local, unqualified name.
+ // When the name matches the imported package name, there is no
+ // identifier in the import spec with the local package name.
+ //
+ // For example:
+ // import "go/ast" // name "ast" matches package name
+ // import a "go/ast" // name "a" does not match package name
+ //
+ // When the identifier does not appear in the source, have the range
+ // of the object be the import path, including quotes.
+ if pkgName.Imported().Name() == pkgName.Name() {
+ nameLen = len(pkgName.Imported().Path()) + len(`""`)
+ }
+ }
+ return obj.Pos() + token.Pos(nameLen)
+}
+
+// Matches cgo generated comment as well as the proposed standard:
+//
+// https://golang.org/s/generatedcode
+var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`)
+
+// FileKindForLang returns the file kind associated with the given language ID,
+// or UnknownKind if the language ID is not recognized.
+func FileKindForLang(langID string) FileKind {
+ switch langID {
+ case "go":
+ return Go
+ case "go.mod":
+ return Mod
+ case "go.sum":
+ return Sum
+ case "tmpl", "gotmpl":
+ return Tmpl
+ case "go.work":
+ return Work
+ default:
+ return UnknownKind
+ }
+}
+
+// nodeAtPos returns the index and the node whose position is contained inside
+// the node list.
+func nodeAtPos(nodes []ast.Node, pos token.Pos) (ast.Node, int) {
+ if nodes == nil {
+ return nil, -1
+ }
+ for i, node := range nodes {
+ if node.Pos() <= pos && pos <= node.End() {
+ return node, i
+ }
+ }
+ return nil, -1
+}
+
+// FormatNode returns the "pretty-print" output for an ast node.
+func FormatNode(fset *token.FileSet, n ast.Node) string {
+ var buf strings.Builder
+ if err := printer.Fprint(&buf, fset, n); err != nil {
+ return ""
+ }
+ return buf.String()
+}
+
+// FormatNodeFile is like FormatNode, but requires only the token.File for the
+// syntax containing the given ast node.
+func FormatNodeFile(file *token.File, n ast.Node) string {
+ fset := FileSetFor(file)
+ return FormatNode(fset, n)
+}
+
+// FileSetFor returns a new FileSet containing a sequence of new Files with
+// the same base, size, and line as the input files, for use in APIs that
+// require a FileSet.
+//
+// Precondition: the input files must be non-overlapping, and sorted in order
+// of their Base.
+func FileSetFor(files ...*token.File) *token.FileSet {
+ fset := token.NewFileSet()
+ for _, f := range files {
+ f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
+ lines := tokeninternal.GetLines(f)
+ f2.SetLines(lines)
+ }
+ return fset
+}
+
+// Deref returns a pointer's element type, traversing as many levels as needed.
+// Otherwise it returns typ.
+//
+// It can return a pointer type for cyclic types (see golang/go#45510).
+func Deref(typ types.Type) types.Type {
+ var seen map[types.Type]struct{}
+ for {
+ p, ok := typ.Underlying().(*types.Pointer)
+ if !ok {
+ return typ
+ }
+ if _, ok := seen[p.Elem()]; ok {
+ return typ
+ }
+
+ typ = p.Elem()
+
+ if seen == nil {
+ seen = make(map[types.Type]struct{})
+ }
+ seen[typ] = struct{}{}
+ }
+}
+
+func SortDiagnostics(d []*Diagnostic) {
+ sort.Slice(d, func(i int, j int) bool {
+ return CompareDiagnostic(d[i], d[j]) < 0
+ })
+}
+
+func CompareDiagnostic(a, b *Diagnostic) int {
+ if r := protocol.CompareRange(a.Range, b.Range); r != 0 {
+ return r
+ }
+ if a.Source < b.Source {
+ return -1
+ }
+ if a.Source > b.Source {
+ return +1
+ }
+ if a.Message < b.Message {
+ return -1
+ }
+ if a.Message > b.Message {
+ return +1
+ }
+ return 0
+}
+
+// findFileInDeps finds package metadata containing URI in the transitive
+// dependencies of m. When using the Go command, the answer is unique.
+//
+// TODO(rfindley): refactor to share logic with findPackageInDeps?
+func findFileInDeps(s MetadataSource, m *Metadata, uri span.URI) *Metadata {
+ seen := make(map[PackageID]bool)
+ var search func(*Metadata) *Metadata
+ search = func(m *Metadata) *Metadata {
+ if seen[m.ID] {
+ return nil
+ }
+ seen[m.ID] = true
+ for _, cgf := range m.CompiledGoFiles {
+ if cgf == uri {
+ return m
+ }
+ }
+ for _, dep := range m.DepsByPkgPath {
+ m := s.Metadata(dep)
+ if m == nil {
+ bug.Reportf("nil metadata for %q", dep)
+ continue
+ }
+ if found := search(m); found != nil {
+ return found
+ }
+ }
+ return nil
+ }
+ return search(m)
+}
+
+// UnquoteImportPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func UnquoteImportPath(s *ast.ImportSpec) ImportPath {
+ path, err := strconv.Unquote(s.Path.Value)
+ if err != nil {
+ return ""
+ }
+ return ImportPath(path)
+}
+
+// NodeContains returns true if a node encloses a given position pos.
+func NodeContains(n ast.Node, pos token.Pos) bool {
+ return n != nil && n.Pos() <= pos && pos <= n.End()
+}
+
+// CollectScopes returns all scopes in an ast path, ordered as innermost scope
+// first.
+func CollectScopes(info *types.Info, path []ast.Node, pos token.Pos) []*types.Scope {
+ // scopes[i], where i<len(path), is the possibly nil Scope of path[i].
+ var scopes []*types.Scope
+ for _, n := range path {
+ // Include *FuncType scope if pos is inside the function body.
+ switch node := n.(type) {
+ case *ast.FuncDecl:
+ if node.Body != nil && NodeContains(node.Body, pos) {
+ n = node.Type
+ }
+ case *ast.FuncLit:
+ if node.Body != nil && NodeContains(node.Body, pos) {
+ n = node.Type
+ }
+ }
+ scopes = append(scopes, info.Scopes[n])
+ }
+ return scopes
+}
+
+// Qualifier returns a function that appropriately formats a types.PkgName
+// appearing in a *ast.File.
+func Qualifier(f *ast.File, pkg *types.Package, info *types.Info) types.Qualifier {
+ // Construct mapping of import paths to their defined or implicit names.
+ imports := make(map[*types.Package]string)
+ for _, imp := range f.Imports {
+ var obj types.Object
+ if imp.Name != nil {
+ obj = info.Defs[imp.Name]
+ } else {
+ obj = info.Implicits[imp]
+ }
+ if pkgname, ok := obj.(*types.PkgName); ok {
+ imports[pkgname.Imported()] = pkgname.Name()
+ }
+ }
+ // Define qualifier to replace full package paths with names of the imports.
+ return func(p *types.Package) string {
+ if p == pkg {
+ return ""
+ }
+ if name, ok := imports[p]; ok {
+ if name == "." {
+ return ""
+ }
+ return name
+ }
+ return p.Name()
+ }
+}
+
+// requalifier returns a function that re-qualifies identifiers and qualified
+// identifiers contained in targetFile using the given metadata qualifier.
+func requalifier(s MetadataSource, targetFile *ast.File, targetMeta *Metadata, mq MetadataQualifier) func(string) string {
+ qm := map[string]string{
+ "": mq(targetMeta.Name, "", targetMeta.PkgPath),
+ }
+
+ // Construct mapping of import paths to their defined or implicit names.
+ for _, imp := range targetFile.Imports {
+ name, pkgName, impPath, pkgPath := importInfo(s, imp, targetMeta)
+
+ // Re-map the target name for the source file.
+ qm[name] = mq(pkgName, impPath, pkgPath)
+ }
+
+ return func(name string) string {
+ if newName, ok := qm[name]; ok {
+ return newName
+ }
+ return name
+ }
+}
+
+// A MetadataQualifier is a function that qualifies an identifier declared in a
+// package with the given package name, import path, and package path.
+//
+// In scenarios where metadata is missing the provided PackageName and
+// PackagePath may be empty, but ImportPath must always be non-empty.
+type MetadataQualifier func(PackageName, ImportPath, PackagePath) string
+
+// MetadataQualifierForFile returns a metadata qualifier that chooses the best
+// qualification of an imported package relative to the file f in package with
+// metadata m.
+func MetadataQualifierForFile(s MetadataSource, f *ast.File, m *Metadata) MetadataQualifier {
+ // Record local names for import paths.
+ localNames := make(map[ImportPath]string) // local names for imports in f
+ for _, imp := range f.Imports {
+ name, _, impPath, _ := importInfo(s, imp, m)
+ localNames[impPath] = name
+ }
+
+ // Record a package path -> import path mapping.
+ inverseDeps := make(map[PackageID]PackagePath)
+ for path, id := range m.DepsByPkgPath {
+ inverseDeps[id] = path
+ }
+ importsByPkgPath := make(map[PackagePath]ImportPath) // best import paths by pkgPath
+ for impPath, id := range m.DepsByImpPath {
+ if id == "" {
+ continue
+ }
+ pkgPath := inverseDeps[id]
+ _, hasPath := importsByPkgPath[pkgPath]
+ _, hasImp := localNames[impPath]
+ // In rare cases, there may be multiple import paths with the same package
+ // path. In such scenarios, prefer an import path that already exists in
+ // the file.
+ if !hasPath || hasImp {
+ importsByPkgPath[pkgPath] = impPath
+ }
+ }
+
+ return func(pkgName PackageName, impPath ImportPath, pkgPath PackagePath) string {
+ // If supplied, translate the package path to an import path in the source
+ // package.
+ if pkgPath != "" {
+ if srcImp := importsByPkgPath[pkgPath]; srcImp != "" {
+ impPath = srcImp
+ }
+ if pkgPath == m.PkgPath {
+ return ""
+ }
+ }
+ if localName, ok := localNames[impPath]; ok && impPath != "" {
+ return string(localName)
+ }
+ if pkgName != "" {
+ return string(pkgName)
+ }
+ idx := strings.LastIndexByte(string(impPath), '/')
+ return string(impPath[idx+1:])
+ }
+}
+
+// importInfo collects information about the import specified by imp,
+// extracting its file-local name, package name, import path, and package path.
+//
+// If metadata is missing for the import, the resulting package name and
+// package path may be empty, and the file local name may be guessed based on
+// the import path.
+//
+// Note: previous versions of this helper used a PackageID->PackagePath map
+// extracted from m, for extracting package path even in the case where
+// metadata for a dep was missing. This should not be necessary, as we should
+// always have metadata for IDs contained in DepsByPkgPath.
+func importInfo(s MetadataSource, imp *ast.ImportSpec, m *Metadata) (string, PackageName, ImportPath, PackagePath) {
+ var (
+ name string // local name
+ pkgName PackageName
+ impPath = UnquoteImportPath(imp)
+ pkgPath PackagePath
+ )
+
+ // If the import has a local name, use it.
+ if imp.Name != nil {
+ name = imp.Name.Name
+ }
+
+ // Try to find metadata for the import. If successful and there is no local
+ // name, the package name is the local name.
+ if depID := m.DepsByImpPath[impPath]; depID != "" {
+ if depm := s.Metadata(depID); depm != nil {
+ if name == "" {
+ name = string(depm.Name)
+ }
+ pkgName = depm.Name
+ pkgPath = depm.PkgPath
+ }
+ }
+
+ // If the local name is still unknown, guess it based on the import path.
+ if name == "" {
+ idx := strings.LastIndexByte(string(impPath), '/')
+ name = string(impPath[idx+1:])
+ }
+ return name, pkgName, impPath, pkgPath
+}
+
+// isDirective reports whether c is a comment directive.
+//
+// Copied and adapted from go/src/go/ast/ast.go.
+func isDirective(c string) bool {
+ if len(c) < 3 {
+ return false
+ }
+ if c[1] != '/' {
+ return false
+ }
+ //-style comment (no newline at the end)
+ c = c[2:]
+ if len(c) == 0 {
+ // empty line
+ return false
+ }
+ // "//line " is a line directive.
+ // (The // has been removed.)
+ if strings.HasPrefix(c, "line ") {
+ return true
+ }
+
+ // "//[a-z0-9]+:[a-z0-9]"
+ // (The // has been removed.)
+ colon := strings.Index(c, ":")
+ if colon <= 0 || colon+1 >= len(c) {
+ return false
+ }
+ for i := 0; i <= colon+1; i++ {
+ if i == colon {
+ continue
+ }
+ b := c[i]
+ if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') {
+ return false
+ }
+ }
+ return true
+}
+
+// InDir checks whether path is in the file tree rooted at dir.
+// It checks only the lexical form of the file names.
+// It does not consider symbolic links.
+//
+// Copied from go/src/cmd/go/internal/search/search.go.
+func InDir(dir, path string) bool {
+ pv := strings.ToUpper(filepath.VolumeName(path))
+ dv := strings.ToUpper(filepath.VolumeName(dir))
+ path = path[len(pv):]
+ dir = dir[len(dv):]
+ switch {
+ default:
+ return false
+ case pv != dv:
+ return false
+ case len(path) == len(dir):
+ if path == dir {
+ return true
+ }
+ return false
+ case dir == "":
+ return path != ""
+ case len(path) > len(dir):
+ if dir[len(dir)-1] == filepath.Separator {
+ if path[:len(dir)] == dir {
+ return path[len(dir):] != ""
+ }
+ return false
+ }
+ if path[len(dir)] == filepath.Separator && path[:len(dir)] == dir {
+ if len(path) == len(dir)+1 {
+ return true
+ }
+ return path[len(dir)+1:] != ""
+ }
+ return false
+ }
+}
+
+// IsValidImport returns whether importPkgPath is importable
+// by pkgPath
+func IsValidImport(pkgPath, importPkgPath PackagePath) bool {
+ i := strings.LastIndex(string(importPkgPath), "/internal/")
+ if i == -1 {
+ return true
+ }
+ // TODO(rfindley): this looks wrong: IsCommandLineArguments is meant to
+ // operate on package IDs, not package paths.
+ if IsCommandLineArguments(PackageID(pkgPath)) {
+ return true
+ }
+ // TODO(rfindley): this is wrong. mod.testx/p should not be able to
+ // import mod.test/internal: https://go.dev/play/p/-Ca6P-E4V4q
+ return strings.HasPrefix(string(pkgPath), string(importPkgPath[:i]))
+}
+
+// IsCommandLineArguments reports whether a given value denotes
+// "command-line-arguments" package, which is a package with an unknown ID
+// created by the go command. It can have a test variant, which is why callers
+// should not check that a value equals "command-line-arguments" directly.
+func IsCommandLineArguments(id PackageID) bool {
+ return strings.Contains(string(id), "command-line-arguments")
+}
+
+// embeddedIdent returns the type name identifier for an embedding x, if x in a
+// valid embedding. Otherwise, it returns nil.
+//
+// Spec: An embedded field must be specified as a type name T or as a pointer
+// to a non-interface type name *T
+func embeddedIdent(x ast.Expr) *ast.Ident {
+ if star, ok := x.(*ast.StarExpr); ok {
+ x = star.X
+ }
+ switch ix := x.(type) { // check for instantiated receivers
+ case *ast.IndexExpr:
+ x = ix.X
+ case *typeparams.IndexListExpr:
+ x = ix.X
+ }
+ switch x := x.(type) {
+ case *ast.Ident:
+ return x
+ case *ast.SelectorExpr:
+ if _, ok := x.X.(*ast.Ident); ok {
+ return x.Sel
+ }
+ }
+ return nil
+}
diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go
new file mode 100644
index 000000000..41bcbac4b
--- /dev/null
+++ b/gopls/internal/lsp/source/view.go
@@ -0,0 +1,857 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "io"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source/methodsets"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event/label"
+ "golang.org/x/tools/internal/event/tag"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/imports"
+ "golang.org/x/tools/internal/packagesinternal"
+)
+
+// A GlobalSnapshotID uniquely identifies a snapshot within this process and
+// increases monotonically with snapshot creation time.
+//
+// We use a distinct integral type for global IDs to help enforce correct
+// usage.
+type GlobalSnapshotID uint64
+
+// Snapshot represents the current state for the given view.
+type Snapshot interface {
+ // SequenceID is the sequence id of this snapshot within its containing
+ // view.
+ //
+ // Relative to their view sequence ids are monotonically increasing, but this
+ // does not hold globally: when new views are created their initial snapshot
+ // has sequence ID 0. For operations that span multiple views, use global
+ // IDs.
+ SequenceID() uint64
+
+ // GlobalID is a globally unique identifier for this snapshot. Global IDs are
+ // monotonic: subsequent snapshots will have higher global ID, though
+ // subsequent snapshots in a view may not have adjacent global IDs.
+ GlobalID() GlobalSnapshotID
+
+ // View returns the View associated with this snapshot.
+ View() View
+
+ // BackgroundContext returns a context used for all background processing
+ // on behalf of this snapshot.
+ BackgroundContext() context.Context
+
+ // ValidBuildConfiguration returns true if there is some error in the
+ // user's workspace. In particular, if they are both outside of a module
+ // and their GOPATH.
+ ValidBuildConfiguration() bool
+
+ // FindFile returns the FileHandle for the given URI, if it is already
+ // in the given snapshot.
+ FindFile(uri span.URI) FileHandle
+
+ // GetFile returns the FileHandle for a given URI, initializing it if it is
+ // not already part of the snapshot.
+ GetFile(ctx context.Context, uri span.URI) (FileHandle, error)
+
+ // AwaitInitialized waits until the snapshot's view is initialized.
+ AwaitInitialized(ctx context.Context)
+
+ // IsOpen returns whether the editor currently has a file open.
+ IsOpen(uri span.URI) bool
+
+ // IgnoredFile reports if a file would be ignored by a `go list` of the whole
+ // workspace.
+ IgnoredFile(uri span.URI) bool
+
+ // Templates returns the .tmpl files
+ Templates() map[span.URI]FileHandle
+
+ // ParseGo returns the parsed AST for the file.
+ // If the file is not available, returns nil and an error.
+ // Position information is added to FileSet().
+ ParseGo(ctx context.Context, fh FileHandle, mode ParseMode) (*ParsedGoFile, error)
+
+ // Analyze runs the specified analyzers on the given package at this snapshot.
+ Analyze(ctx context.Context, id PackageID, analyzers []*Analyzer) ([]*Diagnostic, error)
+
+ // RunGoCommandPiped runs the given `go` command, writing its output
+ // to stdout and stderr. Verb, Args, and WorkingDir must be specified.
+ //
+ // RunGoCommandPiped runs the command serially using gocommand.RunPiped,
+ // enforcing that this command executes exclusively to other commands on the
+ // server.
+ RunGoCommandPiped(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error
+
+ // RunGoCommandDirect runs the given `go` command. Verb, Args, and
+ // WorkingDir must be specified.
+ RunGoCommandDirect(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error)
+
+ // RunGoCommands runs a series of `go` commands that updates the go.mod
+ // and go.sum file for wd, and returns their updated contents.
+ RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error)
+
+ // RunProcessEnvFunc runs fn with the process env for this snapshot's view.
+ // Note: the process env contains cached module and filesystem state.
+ RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error
+
+ // ModFiles are the go.mod files enclosed in the snapshot's view and known
+ // to the snapshot.
+ ModFiles() []span.URI
+
+ // ParseMod is used to parse go.mod files.
+ ParseMod(ctx context.Context, fh FileHandle) (*ParsedModule, error)
+
+ // ModWhy returns the results of `go mod why` for the module specified by
+ // the given go.mod file.
+ ModWhy(ctx context.Context, fh FileHandle) (map[string]string, error)
+
+ // ModTidy returns the results of `go mod tidy` for the module specified by
+ // the given go.mod file.
+ ModTidy(ctx context.Context, pm *ParsedModule) (*TidiedModule, error)
+
+ // ModVuln returns import vulnerability analysis for the given go.mod URI.
+ // Concurrent requests are combined into a single command.
+ ModVuln(ctx context.Context, modURI span.URI) (*govulncheck.Result, error)
+
+ // GoModForFile returns the URI of the go.mod file for the given URI.
+ GoModForFile(uri span.URI) span.URI
+
+ // WorkFile, if non-empty, is the go.work file for the workspace.
+ WorkFile() span.URI
+
+ // ParseWork is used to parse go.work files.
+ ParseWork(ctx context.Context, fh FileHandle) (*ParsedWorkFile, error)
+
+ // BuiltinFile returns information about the special builtin package.
+ BuiltinFile(ctx context.Context) (*ParsedGoFile, error)
+
+ // IsBuiltin reports whether uri is part of the builtin package.
+ IsBuiltin(ctx context.Context, uri span.URI) bool
+
+ // ReverseDependencies returns a new mapping whose entries are
+ // the ID and Metadata of each package in the workspace that
+ // directly or transitively depend on the package denoted by id,
+ // excluding id itself.
+ ReverseDependencies(ctx context.Context, id PackageID, transitive bool) (map[PackageID]*Metadata, error)
+
+ // ActiveMetadata returns a new, unordered slice containing
+ // metadata for all packages considered 'active' in the workspace.
+ //
+ // In normal memory mode, this is all workspace packages. In degraded memory
+ // mode, this is just the reverse transitive closure of open packages.
+ ActiveMetadata(ctx context.Context) ([]*Metadata, error)
+
+ // AllMetadata returns a new unordered array of metadata for all packages in the workspace.
+ AllMetadata(ctx context.Context) ([]*Metadata, error)
+
+ // Symbols returns all symbols in the snapshot.
+ Symbols(ctx context.Context) (map[span.URI][]Symbol, error)
+
+ // Metadata returns the metadata for the specified package,
+ // or nil if it was not found.
+ Metadata(id PackageID) *Metadata
+
+ // MetadataForFile returns a new slice containing metadata for each
+ // package containing the Go file identified by uri, ordered by the
+ // number of CompiledGoFiles (i.e. "narrowest" to "widest" package).
+ // The result may include tests and intermediate test variants of
+ // importable packages.
+ // It returns an error if the context was cancelled.
+ MetadataForFile(ctx context.Context, uri span.URI) ([]*Metadata, error)
+
+ // TypeCheck parses and type-checks the specified packages,
+ // and returns them in the same order as the ids.
+ // The resulting packages' types may belong to different importers,
+ // so types from different packages are incommensurable.
+ TypeCheck(ctx context.Context, ids ...PackageID) ([]Package, error)
+
+ // PackageDiagnostics returns diagnostics for files contained in specified
+ // packages.
+ //
+ // If these diagnostics cannot be loaded from cache, the requested packages
+ // may be type-checked.
+ PackageDiagnostics(ctx context.Context, ids ...PackageID) (map[span.URI][]*Diagnostic, error)
+
+ // References returns cross-references indexes for the specified packages.
+ //
+ // If these indexes cannot be loaded from cache, the requested packages may
+ // be type-checked.
+ References(ctx context.Context, ids ...PackageID) ([]XrefIndex, error)
+
+ // MethodSets returns method-set indexes for the specified packages.
+ //
+ // If these indexes cannot be loaded from cache, the requested packages may
+ // be type-checked.
+ MethodSets(ctx context.Context, ids ...PackageID) ([]*methodsets.Index, error)
+
+ // GetCriticalError returns any critical errors in the workspace.
+ //
+ // A nil result may mean success, or context cancellation.
+ GetCriticalError(ctx context.Context) *CriticalError
+}
+
+type XrefIndex interface {
+ Lookup(targets map[PackagePath]map[objectpath.Path]struct{}) (locs []protocol.Location)
+}
+
+// SnapshotLabels returns a new slice of labels that should be used for events
+// related to a snapshot.
+func SnapshotLabels(snapshot Snapshot) []label.Label {
+ return []label.Label{tag.Snapshot.Of(snapshot.SequenceID()), tag.Directory.Of(snapshot.View().Folder())}
+}
+
+// PackageForFile is a convenience function that selects a package to
+// which this file belongs (narrowest or widest), type-checks it in
+// the requested mode (full or workspace), and returns it, along with
+// the parse tree of that file.
+//
+// Type-checking is expensive. Call snapshot.ParseGo if all you need
+// is a parse tree, or snapshot.MetadataForFile if you only need metadata.
+func PackageForFile(ctx context.Context, snapshot Snapshot, uri span.URI, pkgSel PackageSelector) (Package, *ParsedGoFile, error) {
+ metas, err := snapshot.MetadataForFile(ctx, uri)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(metas) == 0 {
+ return nil, nil, fmt.Errorf("no package metadata for file %s", uri)
+ }
+ switch pkgSel {
+ case NarrowestPackage:
+ metas = metas[:1]
+ case WidestPackage:
+ metas = metas[len(metas)-1:]
+ }
+ pkgs, err := snapshot.TypeCheck(ctx, metas[0].ID)
+ if err != nil {
+ return nil, nil, err
+ }
+ pkg := pkgs[0]
+ pgf, err := pkg.File(uri)
+ if err != nil {
+ return nil, nil, err // "can't happen"
+ }
+ return pkg, pgf, err
+}
+
+// PackageSelector sets how a package is selected out from a set of packages
+// containing a given file.
+type PackageSelector int
+
+const (
+ // NarrowestPackage picks the "narrowest" package for a given file.
+ // By "narrowest" package, we mean the package with the fewest number of
+ // files that includes the given file. This solves the problem of test
+ // variants, as the test will have more files than the non-test package.
+ NarrowestPackage PackageSelector = iota
+
+ // WidestPackage returns the Package containing the most files.
+ // This is useful for something like diagnostics, where we'd prefer to
+ // offer diagnostics for as many files as possible.
+ WidestPackage
+)
+
+// InvocationFlags represents the settings of a particular go command invocation.
+// It is a mode, plus a set of flag bits.
+type InvocationFlags int
+
+const (
+ // Normal is appropriate for commands that might be run by a user and don't
+ // deliberately modify go.mod files, e.g. `go test`.
+ Normal InvocationFlags = iota
+ // WriteTemporaryModFile is for commands that need information from a
+ // modified version of the user's go.mod file, e.g. `go mod tidy` used to
+ // generate diagnostics.
+ WriteTemporaryModFile
+ // LoadWorkspace is for packages.Load, and other operations that should
+ // consider the whole workspace at once.
+ LoadWorkspace
+
+ // AllowNetwork is a flag bit that indicates the invocation should be
+ // allowed to access the network.
+ AllowNetwork InvocationFlags = 1 << 10
+)
+
+func (m InvocationFlags) Mode() InvocationFlags {
+ return m & (AllowNetwork - 1)
+}
+
+func (m InvocationFlags) AllowNetwork() bool {
+ return m&AllowNetwork != 0
+}
+
+// View represents a single workspace.
+// This is the level at which we maintain configuration like working directory
+// and build tags.
+type View interface {
+ // Name returns the name this view was constructed with.
+ Name() string
+
+ // Folder returns the folder with which this view was created.
+ Folder() span.URI
+
+ // Options returns a copy of the Options for this view.
+ Options() *Options
+
+ // Snapshot returns the current snapshot for the view, and a
+ // release function that must be called when the Snapshot is
+ // no longer needed.
+ //
+ // If the view is shut down, the resulting error will be non-nil, and the
+ // release function need not be called.
+ Snapshot() (Snapshot, func(), error)
+
+ // IsGoPrivatePath reports whether target is a private import path, as identified
+ // by the GOPRIVATE environment variable.
+ IsGoPrivatePath(path string) bool
+
+ // ModuleUpgrades returns known module upgrades for the dependencies of
+ // modfile.
+ ModuleUpgrades(modfile span.URI) map[string]string
+
+ // RegisterModuleUpgrades registers that upgrades exist for the given modules
+ // required by modfile.
+ RegisterModuleUpgrades(modfile span.URI, upgrades map[string]string)
+
+ // ClearModuleUpgrades clears all upgrades for the modules in modfile.
+ ClearModuleUpgrades(modfile span.URI)
+
+ // Vulnerabilities returns known vulnerabilities for the given modfile.
+ // TODO(suzmue): replace command.Vuln with a different type, maybe
+ // https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck/govulnchecklib#Summary?
+ Vulnerabilities(modfile ...span.URI) map[span.URI]*govulncheck.Result
+
+ // SetVulnerabilities resets the list of vulnerabilities that exists for the given modules
+ // required by modfile.
+ SetVulnerabilities(modfile span.URI, vulncheckResult *govulncheck.Result)
+
+ // FileKind returns the type of a file.
+ //
+ // We can't reliably deduce the kind from the file name alone,
+ // as some editors can be told to interpret a buffer as
+ // language different from the file name heuristic, e.g. that
+ // an .html file actually contains Go "html/template" syntax,
+ // or even that a .go file contains Python.
+ FileKind(FileHandle) FileKind
+
+ // GoVersion returns the configured Go version for this view.
+ GoVersion() int
+
+ // GoVersionString returns the go version string configured for this view.
+ // Unlike [GoVersion], this encodes the minor version and commit hash information.
+ GoVersionString() string
+}
+
+// A FileSource maps uris to FileHandles.
+type FileSource interface {
+ // GetFile returns the FileHandle for a given URI.
+ GetFile(ctx context.Context, uri span.URI) (FileHandle, error)
+}
+
+// A MetadataSource maps package IDs to metadata.
+//
+// TODO(rfindley): replace this with a concrete metadata graph, once it is
+// exposed from the snapshot.
+type MetadataSource interface {
+ // Metadata returns Metadata for the given package ID, or nil if it does not
+ // exist.
+ Metadata(PackageID) *Metadata
+}
+
+// A ParsedGoFile contains the results of parsing a Go file.
+type ParsedGoFile struct {
+ URI span.URI
+ Mode ParseMode
+ File *ast.File
+ Tok *token.File
+ // Source code used to build the AST. It may be different from the
+ // actual content of the file if we have fixed the AST.
+ Src []byte
+ Fixed bool
+ Mapper *protocol.Mapper // may map fixed Src, not file content
+ ParseErr scanner.ErrorList
+}
+
+// -- go/token domain convenience helpers --
+
+// PositionPos returns the token.Pos of protocol position p within the file.
+func (pgf *ParsedGoFile) PositionPos(p protocol.Position) (token.Pos, error) {
+ offset, err := pgf.Mapper.PositionOffset(p)
+ if err != nil {
+ return token.NoPos, err
+ }
+ return safetoken.Pos(pgf.Tok, offset)
+}
+
+// PosRange returns a protocol Range for the token.Pos interval in this file.
+func (pgf *ParsedGoFile) PosRange(start, end token.Pos) (protocol.Range, error) {
+ return pgf.Mapper.PosRange(pgf.Tok, start, end)
+}
+
+// PosMappedRange returns a MappedRange for the token.Pos interval in this file.
+// A MappedRange can be converted to any other form.
+func (pgf *ParsedGoFile) PosMappedRange(start, end token.Pos) (protocol.MappedRange, error) {
+ return pgf.Mapper.PosMappedRange(pgf.Tok, start, end)
+}
+
+// PosLocation returns a protocol Location for the token.Pos interval in this file.
+func (pgf *ParsedGoFile) PosLocation(start, end token.Pos) (protocol.Location, error) {
+ return pgf.Mapper.PosLocation(pgf.Tok, start, end)
+}
+
+// NodeRange returns a protocol Range for the ast.Node interval in this file.
+func (pgf *ParsedGoFile) NodeRange(node ast.Node) (protocol.Range, error) {
+ return pgf.Mapper.NodeRange(pgf.Tok, node)
+}
+
+// NodeMappedRange returns a MappedRange for the ast.Node interval in this file.
+// A MappedRange can be converted to any other form.
+func (pgf *ParsedGoFile) NodeMappedRange(node ast.Node) (protocol.MappedRange, error) {
+ return pgf.Mapper.NodeMappedRange(pgf.Tok, node)
+}
+
+// NodeLocation returns a protocol Location for the ast.Node interval in this file.
+func (pgf *ParsedGoFile) NodeLocation(node ast.Node) (protocol.Location, error) {
+ return pgf.Mapper.PosLocation(pgf.Tok, node.Pos(), node.End())
+}
+
+// RangePos parses a protocol Range back into the go/token domain.
+func (pgf *ParsedGoFile) RangePos(r protocol.Range) (token.Pos, token.Pos, error) {
+ start, end, err := pgf.Mapper.RangeOffsets(r)
+ if err != nil {
+ return token.NoPos, token.NoPos, err
+ }
+ return pgf.Tok.Pos(start), pgf.Tok.Pos(end), nil
+}
+
+// A ParsedModule contains the results of parsing a go.mod file.
+type ParsedModule struct {
+ URI span.URI
+ File *modfile.File
+ Mapper *protocol.Mapper
+ ParseErrors []*Diagnostic
+}
+
+// A ParsedWorkFile contains the results of parsing a go.work file.
+type ParsedWorkFile struct {
+ URI span.URI
+ File *modfile.WorkFile
+ Mapper *protocol.Mapper
+ ParseErrors []*Diagnostic
+}
+
+// A TidiedModule contains the results of running `go mod tidy` on a module.
+type TidiedModule struct {
+ // Diagnostics representing changes made by `go mod tidy`.
+ Diagnostics []*Diagnostic
+ // The bytes of the go.mod file after it was tidied.
+ TidiedContent []byte
+}
+
+// Metadata represents package metadata retrieved from go/packages.
+type Metadata struct {
+ ID PackageID
+ PkgPath PackagePath
+ Name PackageName
+ GoFiles []span.URI
+ CompiledGoFiles []span.URI
+ ForTest PackagePath // package path under test, or ""
+ TypesSizes types.Sizes
+ Errors []packages.Error
+ DepsByImpPath map[ImportPath]PackageID // may contain dups; empty ID => missing
+ DepsByPkgPath map[PackagePath]PackageID // values are unique and non-empty
+ Module *packages.Module
+ DepsErrors []*packagesinternal.PackageError
+ Diagnostics []*Diagnostic // processed diagnostics from 'go list'
+ LoadDir string // directory from which go/packages was run
+}
+
+func (m *Metadata) String() string { return string(m.ID) }
+
+// IsIntermediateTestVariant reports whether the given package is an
+// intermediate test variant, e.g. "net/http [net/url.test]".
+//
+// Such test variants arise when an x_test package (in this case net/url_test)
+// imports a package (in this case net/http) that itself imports the the
+// non-x_test package (in this case net/url).
+//
+// This is done so that the forward transitive closure of net/url_test has
+// only one package for the "net/url" import.
+// The intermediate test variant exists to hold the test variant import:
+//
+// net/url_test [net/url.test]
+//
+// | "net/http" -> net/http [net/url.test]
+// | "net/url" -> net/url [net/url.test]
+// | ...
+//
+// net/http [net/url.test]
+//
+// | "net/url" -> net/url [net/url.test]
+// | ...
+//
+// This restriction propagates throughout the import graph of net/http: for
+// every package imported by net/http that imports net/url, there must be an
+// intermediate test variant that instead imports "net/url [net/url.test]".
+//
+// As one can see from the example of net/url and net/http, intermediate test
+// variants can result in many additional packages that are essentially (but
+// not quite) identical. For this reason, we filter these variants wherever
+// possible.
+func (m *Metadata) IsIntermediateTestVariant() bool {
+ return m.ForTest != "" && m.ForTest != m.PkgPath && m.ForTest+"_test" != m.PkgPath
+}
+
+// RemoveIntermediateTestVariants removes intermediate test variants, modifying the array.
+func RemoveIntermediateTestVariants(metas []*Metadata) []*Metadata {
+ res := metas[:0]
+ for _, m := range metas {
+ if !m.IsIntermediateTestVariant() {
+ res = append(res, m)
+ }
+ }
+ return res
+}
+
+var ErrViewExists = errors.New("view already exists for session")
+
+// FileModification represents a modification to a file.
+type FileModification struct {
+ URI span.URI
+ Action FileAction
+
+ // OnDisk is true if a watched file is changed on disk.
+ // If true, Version will be -1 and Text will be nil.
+ OnDisk bool
+
+ // Version will be -1 and Text will be nil when they are not supplied,
+ // specifically on textDocument/didClose and for on-disk changes.
+ Version int32
+ Text []byte
+
+ // LanguageID is only sent from the language client on textDocument/didOpen.
+ LanguageID string
+}
+
+type FileAction int
+
+const (
+ UnknownFileAction = FileAction(iota)
+ Open
+ Change
+ Close
+ Save
+ Create
+ Delete
+ InvalidateMetadata
+)
+
+func (a FileAction) String() string {
+ switch a {
+ case Open:
+ return "Open"
+ case Change:
+ return "Change"
+ case Close:
+ return "Close"
+ case Save:
+ return "Save"
+ case Create:
+ return "Create"
+ case Delete:
+ return "Delete"
+ case InvalidateMetadata:
+ return "InvalidateMetadata"
+ default:
+ return "Unknown"
+ }
+}
+
+var ErrTmpModfileUnsupported = errors.New("-modfile is unsupported for this Go version")
+var ErrNoModOnDisk = errors.New("go.mod file is not on disk")
+
+func IsNonFatalGoModError(err error) bool {
+ return err == ErrTmpModfileUnsupported || err == ErrNoModOnDisk
+}
+
+// ParseMode controls the content of the AST produced when parsing a source file.
+type ParseMode int
+
+const (
+ // ParseHeader specifies that the main package declaration and imports are needed.
+ // This is the mode used when attempting to examine the package graph structure.
+ ParseHeader ParseMode = iota
+
+ // ParseFull specifies the full AST is needed.
+ // This is used for files of direct interest where the entire contents must
+ // be considered.
+ ParseFull
+)
+
+// A FileHandle is an interface to files tracked by the LSP session, which may
+// be either files read from disk, or open in the editor session (overlays).
+type FileHandle interface {
+ // URI is the URI for this file handle.
+ // TODO(rfindley): this is not actually well-defined. In some cases, there
+ // may be more than one URI that resolve to the same FileHandle. Which one is
+ // this?
+ URI() span.URI
+ // FileIdentity returns a FileIdentity for the file, even if there was an
+ // error reading it.
+ FileIdentity() FileIdentity
+ // Saved reports whether the file has the same content on disk.
+ // For on-disk files, this is trivially true.
+ Saved() bool
+ // Version returns the file version, as defined by the LSP client.
+ // For on-disk file handles, Version returns 0.
+ Version() int32
+ // Read reads the contents of a file.
+ // If the file is not available, returns a nil slice and an error.
+ Read() ([]byte, error)
+}
+
+// A Hash is a cryptographic digest of the contents of a file.
+// (Although at 32B it is larger than a 16B string header, it is smaller
+// and has better locality than the string header + 64B of hex digits.)
+type Hash [sha256.Size]byte
+
+// HashOf returns the hash of some data.
+func HashOf(data []byte) Hash {
+ return Hash(sha256.Sum256(data))
+}
+
+// Hashf returns the hash of a printf-formatted string.
+func Hashf(format string, args ...interface{}) Hash {
+ // Although this looks alloc-heavy, it is faster than using
+ // Fprintf on sha256.New() because the allocations don't escape.
+ return HashOf([]byte(fmt.Sprintf(format, args...)))
+}
+
+// String returns the digest as a string of hex digits.
+func (h Hash) String() string {
+ return fmt.Sprintf("%64x", [sha256.Size]byte(h))
+}
+
+// Less returns true if the given hash is less than the other.
+func (h Hash) Less(other Hash) bool {
+ return bytes.Compare(h[:], other[:]) < 0
+}
+
+// XORWith updates *h to *h XOR h2.
+func (h *Hash) XORWith(h2 Hash) {
+ // Small enough that we don't need crypto/subtle.XORBytes.
+ for i := range h {
+ h[i] ^= h2[i]
+ }
+}
+
+// FileIdentity uniquely identifies a file at a version from a FileSystem.
+type FileIdentity struct {
+ URI span.URI
+ Hash Hash // digest of file contents
+}
+
+func (id FileIdentity) String() string {
+ return fmt.Sprintf("%s%s", id.URI, id.Hash)
+}
+
+// FileKind describes the kind of the file in question.
+// It can be one of Go,mod, Sum, or Tmpl.
+type FileKind int
+
+const (
+ // UnknownKind is a file type we don't know about.
+ UnknownKind = FileKind(iota)
+
+ // Go is a normal go source file.
+ Go
+ // Mod is a go.mod file.
+ Mod
+ // Sum is a go.sum file.
+ Sum
+ // Tmpl is a template file.
+ Tmpl
+ // Work is a go.work file.
+ Work
+)
+
+func (k FileKind) String() string {
+ switch k {
+ case Go:
+ return "go"
+ case Mod:
+ return "go.mod"
+ case Sum:
+ return "go.sum"
+ case Tmpl:
+ return "tmpl"
+ case Work:
+ return "go.work"
+ default:
+ return fmt.Sprintf("internal error: unknown file kind %d", k)
+ }
+}
+
+// Analyzer represents a go/analysis analyzer with some boolean properties
+// that let the user know how to use the analyzer.
+type Analyzer struct {
+ Analyzer *analysis.Analyzer
+
+ // Enabled reports whether the analyzer is enabled. This value can be
+ // configured per-analysis in user settings. For staticcheck analyzers,
+ // the value of the Staticcheck setting overrides this field.
+ //
+ // Most clients should use the IsEnabled method.
+ Enabled bool
+
+ // Fix is the name of the suggested fix name used to invoke the suggested
+ // fixes for the analyzer. It is non-empty if we expect this analyzer to
+ // provide its fix separately from its diagnostics. That is, we should apply
+ // the analyzer's suggested fixes through a Command, not a TextEdit.
+ Fix string
+
+ // ActionKind is the kind of code action this analyzer produces. If
+ // unspecified the type defaults to quickfix.
+ ActionKind []protocol.CodeActionKind
+
+ // Severity is the severity set for diagnostics reported by this
+ // analyzer. If left unset it defaults to Warning.
+ Severity protocol.DiagnosticSeverity
+}
+
+func (a *Analyzer) String() string { return a.Analyzer.String() }
+
+// IsEnabled reports whether this analyzer is enabled by the given options.
+func (a Analyzer) IsEnabled(options *Options) bool {
+ // Staticcheck analyzers can only be enabled when staticcheck is on.
+ if _, ok := options.StaticcheckAnalyzers[a.Analyzer.Name]; ok {
+ if !options.Staticcheck {
+ return false
+ }
+ }
+ if enabled, ok := options.Analyses[a.Analyzer.Name]; ok {
+ return enabled
+ }
+ return a.Enabled
+}
+
+// Declare explicit types for package paths, names, and IDs to ensure that we
+// never use an ID where a path belongs, and vice versa. If we confused these,
+// it would result in confusing errors because package IDs often look like
+// package paths.
+type (
+ PackageID string // go list's unique identifier for a package (e.g. "vendor/example.com/foo [vendor/example.com/bar.test]")
+ PackagePath string // name used to prefix linker symbols (e.g. "vendor/example.com/foo")
+ PackageName string // identifier in 'package' declaration (e.g. "foo")
+ ImportPath string // path that appears in an import declaration (e.g. "example.com/foo")
+)
+
+// Package represents a Go package that has been parsed and type-checked.
+//
+// By design, there is no way to reach from a Package to the Package
+// representing one of its dependencies.
+//
+// Callers must not assume that two Packages share the same
+// token.FileSet or types.Importer and thus have commensurable
+// token.Pos values or types.Objects. Instead, use stable naming
+// schemes, such as (URI, byte offset) for positions, or (PackagePath,
+// objectpath.Path) for exported declarations.
+type Package interface {
+ Metadata() *Metadata
+
+ // Results of parsing:
+ FileSet() *token.FileSet
+ ParseMode() ParseMode
+ CompiledGoFiles() []*ParsedGoFile // (borrowed)
+ File(uri span.URI) (*ParsedGoFile, error)
+ GetSyntax() []*ast.File // (borrowed)
+ HasParseErrors() bool
+
+ // Results of type checking:
+ GetTypes() *types.Package
+ GetTypesInfo() *types.Info
+ DependencyTypes(PackagePath) *types.Package // nil for indirect dependency of no consequence
+ HasTypeErrors() bool
+ DiagnosticsForFile(ctx context.Context, s Snapshot, uri span.URI) ([]*Diagnostic, error)
+}
+
+type unit = struct{}
+
+// A CriticalError is a workspace-wide error that generally prevents gopls from
+// functioning correctly. In the presence of critical errors, other diagnostics
+// in the workspace may not make sense.
+type CriticalError struct {
+ // MainError is the primary error. Must be non-nil.
+ MainError error
+
+ // Diagnostics contains any supplemental (structured) diagnostics.
+ Diagnostics []*Diagnostic
+}
+
+// An Diagnostic corresponds to an LSP Diagnostic.
+// https://microsoft.github.io/language-server-protocol/specification#diagnostic
+type Diagnostic struct {
+ URI span.URI
+ Range protocol.Range
+ Severity protocol.DiagnosticSeverity
+ Code string
+ CodeHref string
+
+ // Source is a human-readable description of the source of the error.
+ // Diagnostics generated by an analysis.Analyzer set it to Analyzer.Name.
+ Source DiagnosticSource
+
+ Message string
+
+ Tags []protocol.DiagnosticTag
+ Related []protocol.DiagnosticRelatedInformation
+
+ // Fields below are used internally to generate quick fixes. They aren't
+ // part of the LSP spec and don't leave the server.
+ SuggestedFixes []SuggestedFix
+}
+
+func (d *Diagnostic) String() string {
+ return fmt.Sprintf("%v: %s", d.Range, d.Message)
+}
+
+type DiagnosticSource string
+
+const (
+ UnknownError DiagnosticSource = "<Unknown source>"
+ ListError DiagnosticSource = "go list"
+ ParseError DiagnosticSource = "syntax"
+ TypeError DiagnosticSource = "compiler"
+ ModTidyError DiagnosticSource = "go mod tidy"
+ OptimizationDetailsError DiagnosticSource = "optimizer details"
+ UpgradeNotification DiagnosticSource = "upgrade available"
+ Vulncheck DiagnosticSource = "vulncheck imports"
+ Govulncheck DiagnosticSource = "govulncheck"
+ TemplateError DiagnosticSource = "template"
+ WorkFileError DiagnosticSource = "go.work file"
+)
+
+func AnalyzerErrorKind(name string) DiagnosticSource {
+ return DiagnosticSource(name)
+}
diff --git a/gopls/internal/lsp/source/workspace_symbol.go b/gopls/internal/lsp/source/workspace_symbol.go
new file mode 100644
index 000000000..17c3a24fb
--- /dev/null
+++ b/gopls/internal/lsp/source/workspace_symbol.go
@@ -0,0 +1,632 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "context"
+ "fmt"
+ "go/types"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strings"
+ "unicode"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/fuzzy"
+)
+
+// Symbol holds a precomputed symbol value. Note: we avoid using the
+// protocol.SymbolInformation struct here in order to reduce the size of each
+// symbol.
+type Symbol struct {
+ Name string
+ Kind protocol.SymbolKind
+ Range protocol.Range
+}
+
+// maxSymbols defines the maximum number of symbol results that should ever be
+// sent in response to a client.
+const maxSymbols = 100
+
+// WorkspaceSymbols matches symbols across all views using the given query,
+// according to the match semantics parameterized by matcherType and style.
+//
+// The workspace symbol method is defined in the spec as follows:
+//
+// The workspace symbol request is sent from the client to the server to
+// list project-wide symbols matching the query string.
+//
+// It is unclear what "project-wide" means here, but given the parameters of
+// workspace/symbol do not include any workspace identifier, then it has to be
+// assumed that "project-wide" means "across all workspaces". Hence why
+// WorkspaceSymbols receives the views []View.
+//
+// However, it then becomes unclear what it would mean to call WorkspaceSymbols
+// with a different configured SymbolMatcher per View. Therefore we assume that
+// Session level configuration will define the SymbolMatcher to be used for the
+// WorkspaceSymbols method.
+func WorkspaceSymbols(ctx context.Context, matcher SymbolMatcher, style SymbolStyle, views []View, query string) ([]protocol.SymbolInformation, error) {
+ ctx, done := event.Start(ctx, "source.WorkspaceSymbols")
+ defer done()
+ if query == "" {
+ return nil, nil
+ }
+
+ var s symbolizer
+ switch style {
+ case DynamicSymbols:
+ s = dynamicSymbolMatch
+ case FullyQualifiedSymbols:
+ s = fullyQualifiedSymbolMatch
+ case PackageQualifiedSymbols:
+ s = packageSymbolMatch
+ default:
+ panic(fmt.Errorf("unknown symbol style: %v", style))
+ }
+
+ return collectSymbols(ctx, views, matcher, s, query)
+}
+
+// A matcherFunc returns the index and score of a symbol match.
+//
+// See the comment for symbolCollector for more information.
+type matcherFunc func(chunks []string) (int, float64)
+
+// A symbolizer returns the best symbol match for a name with pkg, according to
+// some heuristic. The symbol name is passed as the slice nameParts of logical
+// name pieces. For example, for myType.field the caller can pass either
+// []string{"myType.field"} or []string{"myType.", "field"}.
+//
+// See the comment for symbolCollector for more information.
+//
+// The space argument is an empty slice with spare capacity that may be used
+// to allocate the result.
+type symbolizer func(space []string, name string, pkg *Metadata, m matcherFunc) ([]string, float64)
+
+func fullyQualifiedSymbolMatch(space []string, name string, pkg *Metadata, matcher matcherFunc) ([]string, float64) {
+ if _, score := dynamicSymbolMatch(space, name, pkg, matcher); score > 0 {
+ return append(space, string(pkg.PkgPath), ".", name), score
+ }
+ return nil, 0
+}
+
+func dynamicSymbolMatch(space []string, name string, pkg *Metadata, matcher matcherFunc) ([]string, float64) {
+ if IsCommandLineArguments(pkg.ID) {
+ // command-line-arguments packages have a non-sensical package path, so
+ // just use their package name.
+ return packageSymbolMatch(space, name, pkg, matcher)
+ }
+
+ var score float64
+
+ endsInPkgName := strings.HasSuffix(string(pkg.PkgPath), string(pkg.Name))
+
+ // If the package path does not end in the package name, we need to check the
+ // package-qualified symbol as an extra pass first.
+ if !endsInPkgName {
+ pkgQualified := append(space, string(pkg.Name), ".", name)
+ idx, score := matcher(pkgQualified)
+ nameStart := len(pkg.Name) + 1
+ if score > 0 {
+ // If our match is contained entirely within the unqualified portion,
+ // just return that.
+ if idx >= nameStart {
+ return append(space, name), score
+ }
+ // Lower the score for matches that include the package name.
+ return pkgQualified, score * 0.8
+ }
+ }
+
+ // Now try matching the fully qualified symbol.
+ fullyQualified := append(space, string(pkg.PkgPath), ".", name)
+ idx, score := matcher(fullyQualified)
+
+ // As above, check if we matched just the unqualified symbol name.
+ nameStart := len(pkg.PkgPath) + 1
+ if idx >= nameStart {
+ return append(space, name), score
+ }
+
+ // If our package path ends in the package name, we'll have skipped the
+ // initial pass above, so check if we matched just the package-qualified
+ // name.
+ if endsInPkgName && idx >= 0 {
+ pkgStart := len(pkg.PkgPath) - len(pkg.Name)
+ if idx >= pkgStart {
+ return append(space, string(pkg.Name), ".", name), score
+ }
+ }
+
+ // Our match was not contained within the unqualified or package qualified
+ // symbol. Return the fully qualified symbol but discount the score.
+ return fullyQualified, score * 0.6
+}
+
+func packageSymbolMatch(space []string, name string, pkg *Metadata, matcher matcherFunc) ([]string, float64) {
+ qualified := append(space, string(pkg.Name), ".", name)
+ if _, s := matcher(qualified); s > 0 {
+ return qualified, s
+ }
+ return nil, 0
+}
+
+func buildMatcher(matcher SymbolMatcher, query string) matcherFunc {
+ switch matcher {
+ case SymbolFuzzy:
+ return parseQuery(query, newFuzzyMatcher)
+ case SymbolFastFuzzy:
+ return parseQuery(query, func(query string) matcherFunc {
+ return fuzzy.NewSymbolMatcher(query).Match
+ })
+ case SymbolCaseSensitive:
+ return matchExact(query)
+ case SymbolCaseInsensitive:
+ q := strings.ToLower(query)
+ exact := matchExact(q)
+ wrapper := []string{""}
+ return func(chunks []string) (int, float64) {
+ s := strings.Join(chunks, "")
+ wrapper[0] = strings.ToLower(s)
+ return exact(wrapper)
+ }
+ }
+ panic(fmt.Errorf("unknown symbol matcher: %v", matcher))
+}
+
+func newFuzzyMatcher(query string) matcherFunc {
+ fm := fuzzy.NewMatcher(query)
+ return func(chunks []string) (int, float64) {
+ score := float64(fm.ScoreChunks(chunks))
+ ranges := fm.MatchedRanges()
+ if len(ranges) > 0 {
+ return ranges[0], score
+ }
+ return -1, score
+ }
+}
+
+// parseQuery parses a field-separated symbol query, extracting the special
+// characters listed below, and returns a matcherFunc corresponding to the AND
+// of all field queries.
+//
+// Special characters:
+//
+// ^ match exact prefix
+// $ match exact suffix
+// ' match exact
+//
+// In all three of these special queries, matches are 'smart-cased', meaning
+// they are case sensitive if the symbol query contains any upper-case
+// characters, and case insensitive otherwise.
+func parseQuery(q string, newMatcher func(string) matcherFunc) matcherFunc {
+ fields := strings.Fields(q)
+ if len(fields) == 0 {
+ return func([]string) (int, float64) { return -1, 0 }
+ }
+ var funcs []matcherFunc
+ for _, field := range fields {
+ var f matcherFunc
+ switch {
+ case strings.HasPrefix(field, "^"):
+ prefix := field[1:]
+ f = smartCase(prefix, func(chunks []string) (int, float64) {
+ s := strings.Join(chunks, "")
+ if strings.HasPrefix(s, prefix) {
+ return 0, 1
+ }
+ return -1, 0
+ })
+ case strings.HasPrefix(field, "'"):
+ exact := field[1:]
+ f = smartCase(exact, matchExact(exact))
+ case strings.HasSuffix(field, "$"):
+ suffix := field[0 : len(field)-1]
+ f = smartCase(suffix, func(chunks []string) (int, float64) {
+ s := strings.Join(chunks, "")
+ if strings.HasSuffix(s, suffix) {
+ return len(s) - len(suffix), 1
+ }
+ return -1, 0
+ })
+ default:
+ f = newMatcher(field)
+ }
+ funcs = append(funcs, f)
+ }
+ if len(funcs) == 1 {
+ return funcs[0]
+ }
+ return comboMatcher(funcs).match
+}
+
+func matchExact(exact string) matcherFunc {
+ return func(chunks []string) (int, float64) {
+ s := strings.Join(chunks, "")
+ if idx := strings.LastIndex(s, exact); idx >= 0 {
+ return idx, 1
+ }
+ return -1, 0
+ }
+}
+
+// smartCase returns a matcherFunc that is case-sensitive if q contains any
+// upper-case characters, and case-insensitive otherwise.
+func smartCase(q string, m matcherFunc) matcherFunc {
+ insensitive := strings.ToLower(q) == q
+ wrapper := []string{""}
+ return func(chunks []string) (int, float64) {
+ s := strings.Join(chunks, "")
+ if insensitive {
+ s = strings.ToLower(s)
+ }
+ wrapper[0] = s
+ return m(wrapper)
+ }
+}
+
+type comboMatcher []matcherFunc
+
+func (c comboMatcher) match(chunks []string) (int, float64) {
+ score := 1.0
+ first := 0
+ for _, f := range c {
+ idx, s := f(chunks)
+ if idx < first {
+ first = idx
+ }
+ score *= s
+ }
+ return first, score
+}
+
+// collectSymbols calls snapshot.Symbols to walk the syntax trees of
+// all files in the views' current snapshots, and returns a sorted,
+// scored list of symbols that best match the parameters.
+//
+// How it matches symbols is parameterized by two interfaces:
+// - A matcherFunc determines how well a string symbol matches a query. It
+// returns a non-negative score indicating the quality of the match. A score
+// of zero indicates no match.
+// - A symbolizer determines how we extract the symbol for an object. This
+// enables the 'symbolStyle' configuration option.
+func collectSymbols(ctx context.Context, views []View, matcherType SymbolMatcher, symbolizer symbolizer, query string) ([]protocol.SymbolInformation, error) {
+ // Extract symbols from all files.
+ var work []symbolFile
+ var roots []string
+ seen := make(map[span.URI]bool)
+ // TODO(adonovan): opt: parallelize this loop? How often is len > 1?
+ for _, v := range views {
+ snapshot, release, err := v.Snapshot()
+ if err != nil {
+ continue // view is shut down; continue with others
+ }
+ defer release()
+
+ // Use the root view URIs for determining (lexically)
+ // whether a URI is in any open workspace.
+ roots = append(roots, strings.TrimRight(string(v.Folder()), "/"))
+
+ filters := v.Options().DirectoryFilters
+ filterer := NewFilterer(filters)
+ folder := filepath.ToSlash(v.Folder().Filename())
+ symbols, err := snapshot.Symbols(ctx)
+ if err != nil {
+ return nil, err
+ }
+ for uri, syms := range symbols {
+ norm := filepath.ToSlash(uri.Filename())
+ nm := strings.TrimPrefix(norm, folder)
+ if filterer.Disallow(nm) {
+ continue
+ }
+ // Only scan each file once.
+ if seen[uri] {
+ continue
+ }
+ mds, err := snapshot.MetadataForFile(ctx, uri)
+ if err != nil {
+ event.Error(ctx, fmt.Sprintf("missing metadata for %q", uri), err)
+ continue
+ }
+ if len(mds) == 0 {
+ // TODO: should use the bug reporting API
+ continue
+ }
+ seen[uri] = true
+ work = append(work, symbolFile{uri, mds[0], syms})
+ }
+ }
+
+ // Match symbols in parallel.
+ // Each worker has its own symbolStore,
+ // which we merge at the end.
+ nmatchers := runtime.GOMAXPROCS(-1) // matching is CPU bound
+ results := make(chan *symbolStore)
+ for i := 0; i < nmatchers; i++ {
+ go func(i int) {
+ matcher := buildMatcher(matcherType, query)
+ store := new(symbolStore)
+ // Assign files to workers in round-robin fashion.
+ for j := i; j < len(work); j += nmatchers {
+ matchFile(store, symbolizer, matcher, roots, work[j])
+ }
+ results <- store
+ }(i)
+ }
+
+ // Gather and merge results as they arrive.
+ var unified symbolStore
+ for i := 0; i < nmatchers; i++ {
+ store := <-results
+ for _, syms := range store.res {
+ unified.store(syms)
+ }
+ }
+ return unified.results(), nil
+}
+
+type Filterer struct {
+ // Whether a filter is excluded depends on the operator (first char of the raw filter).
+ // Slices filters and excluded then should have the same length.
+ filters []*regexp.Regexp
+ excluded []bool
+}
+
+// NewFilterer computes regular expression form of all raw filters
+func NewFilterer(rawFilters []string) *Filterer {
+ var f Filterer
+ for _, filter := range rawFilters {
+ filter = path.Clean(filepath.ToSlash(filter))
+ // TODO(dungtuanle): fix: validate [+-] prefix.
+ op, prefix := filter[0], filter[1:]
+ // convertFilterToRegexp adds "/" at the end of prefix to handle cases where a filter is a prefix of another filter.
+ // For example, it prevents [+foobar, -foo] from excluding "foobar".
+ f.filters = append(f.filters, convertFilterToRegexp(filepath.ToSlash(prefix)))
+ f.excluded = append(f.excluded, op == '-')
+ }
+
+ return &f
+}
+
+// Disallow return true if the path is excluded from the filterer's filters.
+func (f *Filterer) Disallow(path string) bool {
+ // Ensure trailing but not leading slash.
+ path = strings.TrimPrefix(path, "/")
+ if !strings.HasSuffix(path, "/") {
+ path += "/"
+ }
+
+ // TODO(adonovan): opt: iterate in reverse and break at first match.
+ excluded := false
+ for i, filter := range f.filters {
+ if filter.MatchString(path) {
+ excluded = f.excluded[i] // last match wins
+ }
+ }
+ return excluded
+}
+
+// convertFilterToRegexp replaces glob-like operator substrings in a string file path to their equivalent regex forms.
+// Supporting glob-like operators:
+// - **: match zero or more complete path segments
+func convertFilterToRegexp(filter string) *regexp.Regexp {
+ if filter == "" {
+ return regexp.MustCompile(".*")
+ }
+ var ret strings.Builder
+ ret.WriteString("^")
+ segs := strings.Split(filter, "/")
+ for _, seg := range segs {
+ // Inv: seg != "" since path is clean.
+ if seg == "**" {
+ ret.WriteString(".*")
+ } else {
+ ret.WriteString(regexp.QuoteMeta(seg))
+ }
+ ret.WriteString("/")
+ }
+ pattern := ret.String()
+
+ // Remove unnecessary "^.*" prefix, which increased
+ // BenchmarkWorkspaceSymbols time by ~20% (even though
+ // filter CPU time increased by only by ~2.5%) when the
+ // default filter was changed to "**/node_modules".
+ pattern = strings.TrimPrefix(pattern, "^.*")
+
+ return regexp.MustCompile(pattern)
+}
+
+// symbolFile holds symbol information for a single file.
+type symbolFile struct {
+ uri span.URI
+ md *Metadata
+ syms []Symbol
+}
+
+// matchFile scans a symbol file and adds matching symbols to the store.
+func matchFile(store *symbolStore, symbolizer symbolizer, matcher matcherFunc, roots []string, i symbolFile) {
+ space := make([]string, 0, 3)
+ for _, sym := range i.syms {
+ symbolParts, score := symbolizer(space, sym.Name, i.md, matcher)
+
+ // Check if the score is too low before applying any downranking.
+ if store.tooLow(score) {
+ continue
+ }
+
+ // Factors to apply to the match score for the purpose of downranking
+ // results.
+ //
+ // These numbers were crudely calibrated based on trial-and-error using a
+ // small number of sample queries. Adjust as necessary.
+ //
+ // All factors are multiplicative, meaning if more than one applies they are
+ // multiplied together.
+ const (
+ // nonWorkspaceFactor is applied to symbols outside of any active
+ // workspace. Developers are less likely to want to jump to code that they
+ // are not actively working on.
+ nonWorkspaceFactor = 0.5
+ // nonWorkspaceUnexportedFactor is applied to unexported symbols outside of
+ // any active workspace. Since one wouldn't usually jump to unexported
+ // symbols to understand a package API, they are particularly irrelevant.
+ nonWorkspaceUnexportedFactor = 0.5
+ // every field or method nesting level to access the field decreases
+ // the score by a factor of 1.0 - depth*depthFactor, up to a depth of
+ // 3.
+ depthFactor = 0.2
+ )
+
+ startWord := true
+ exported := true
+ depth := 0.0
+ for _, r := range sym.Name {
+ if startWord && !unicode.IsUpper(r) {
+ exported = false
+ }
+ if r == '.' {
+ startWord = true
+ depth++
+ } else {
+ startWord = false
+ }
+ }
+
+ inWorkspace := false
+ for _, root := range roots {
+ if strings.HasPrefix(string(i.uri), root) {
+ inWorkspace = true
+ break
+ }
+ }
+
+ // Apply downranking based on workspace position.
+ if !inWorkspace {
+ score *= nonWorkspaceFactor
+ if !exported {
+ score *= nonWorkspaceUnexportedFactor
+ }
+ }
+
+ // Apply downranking based on symbol depth.
+ if depth > 3 {
+ depth = 3
+ }
+ score *= 1.0 - depth*depthFactor
+
+ if store.tooLow(score) {
+ continue
+ }
+
+ si := symbolInformation{
+ score: score,
+ symbol: strings.Join(symbolParts, ""),
+ kind: sym.Kind,
+ uri: i.uri,
+ rng: sym.Range,
+ container: string(i.md.PkgPath),
+ }
+ store.store(si)
+ }
+}
+
+type symbolStore struct {
+ res [maxSymbols]symbolInformation
+}
+
+// store inserts si into the sorted results, if si has a high enough score.
+func (sc *symbolStore) store(si symbolInformation) {
+ if sc.tooLow(si.score) {
+ return
+ }
+ insertAt := sort.Search(len(sc.res), func(i int) bool {
+ // Sort by score, then symbol length, and finally lexically.
+ if sc.res[i].score != si.score {
+ return sc.res[i].score < si.score
+ }
+ if len(sc.res[i].symbol) != len(si.symbol) {
+ return len(sc.res[i].symbol) > len(si.symbol)
+ }
+ return sc.res[i].symbol > si.symbol
+ })
+ if insertAt < len(sc.res)-1 {
+ copy(sc.res[insertAt+1:], sc.res[insertAt:len(sc.res)-1])
+ }
+ sc.res[insertAt] = si
+}
+
+func (sc *symbolStore) tooLow(score float64) bool {
+ return score <= sc.res[len(sc.res)-1].score
+}
+
+func (sc *symbolStore) results() []protocol.SymbolInformation {
+ var res []protocol.SymbolInformation
+ for _, si := range sc.res {
+ if si.score <= 0 {
+ return res
+ }
+ res = append(res, si.asProtocolSymbolInformation())
+ }
+ return res
+}
+
+func typeToKind(typ types.Type) protocol.SymbolKind {
+ switch typ := typ.Underlying().(type) {
+ case *types.Interface:
+ return protocol.Interface
+ case *types.Struct:
+ return protocol.Struct
+ case *types.Signature:
+ if typ.Recv() != nil {
+ return protocol.Method
+ }
+ return protocol.Function
+ case *types.Named:
+ return typeToKind(typ.Underlying())
+ case *types.Basic:
+ i := typ.Info()
+ switch {
+ case i&types.IsNumeric != 0:
+ return protocol.Number
+ case i&types.IsBoolean != 0:
+ return protocol.Boolean
+ case i&types.IsString != 0:
+ return protocol.String
+ }
+ }
+ return protocol.Variable
+}
+
+// symbolInformation is a cut-down version of protocol.SymbolInformation that
+// allows struct values of this type to be used as map keys.
+type symbolInformation struct {
+ score float64
+ symbol string
+ container string
+ kind protocol.SymbolKind
+ uri span.URI
+ rng protocol.Range
+}
+
+// asProtocolSymbolInformation converts s to a protocol.SymbolInformation value.
+//
+// TODO: work out how to handle tags if/when they are needed.
+func (s symbolInformation) asProtocolSymbolInformation() protocol.SymbolInformation {
+ return protocol.SymbolInformation{
+ Name: s.symbol,
+ Kind: s.kind,
+ Location: protocol.Location{
+ URI: protocol.URIFromSpanURI(s.uri),
+ Range: s.rng,
+ },
+ ContainerName: s.container,
+ }
+}
diff --git a/gopls/internal/lsp/source/workspace_symbol_test.go b/gopls/internal/lsp/source/workspace_symbol_test.go
new file mode 100644
index 000000000..24fb8b452
--- /dev/null
+++ b/gopls/internal/lsp/source/workspace_symbol_test.go
@@ -0,0 +1,136 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package source
+
+import (
+ "testing"
+)
+
+func TestParseQuery(t *testing.T) {
+ tests := []struct {
+ query, s string
+ wantMatch bool
+ }{
+ {"", "anything", false},
+ {"any", "anything", true},
+ {"any$", "anything", false},
+ {"ing$", "anything", true},
+ {"ing$", "anythinG", true},
+ {"inG$", "anything", false},
+ {"^any", "anything", true},
+ {"^any", "Anything", true},
+ {"^Any", "anything", false},
+ {"at", "anything", true},
+ // TODO: this appears to be a bug in the fuzzy matching algorithm. 'At'
+ // should cause a case-sensitive match.
+ // {"At", "anything", false},
+ {"At", "Anything", true},
+ {"'yth", "Anything", true},
+ {"'yti", "Anything", false},
+ {"'any 'thing", "Anything", true},
+ {"anythn nythg", "Anything", true},
+ {"ntx", "Anything", false},
+ {"anythn", "anything", true},
+ {"ing", "anything", true},
+ {"anythn nythgx", "anything", false},
+ }
+
+ for _, test := range tests {
+ matcher := parseQuery(test.query, newFuzzyMatcher)
+ if _, score := matcher([]string{test.s}); score > 0 != test.wantMatch {
+ t.Errorf("parseQuery(%q) match for %q: %.2g, want match: %t", test.query, test.s, score, test.wantMatch)
+ }
+ }
+}
+
+func TestFiltererDisallow(t *testing.T) {
+ tests := []struct {
+ filters []string
+ included []string
+ excluded []string
+ }{
+ {
+ []string{"+**/c.go"},
+ []string{"a/c.go", "a/b/c.go"},
+ []string{},
+ },
+ {
+ []string{"+a/**/c.go"},
+ []string{"a/b/c.go", "a/b/d/c.go", "a/c.go"},
+ []string{},
+ },
+ {
+ []string{"-a/c.go", "+a/**"},
+ []string{"a/c.go"},
+ []string{},
+ },
+ {
+ []string{"+a/**/c.go", "-**/c.go"},
+ []string{},
+ []string{"a/b/c.go"},
+ },
+ {
+ []string{"+a/**/c.go", "-a/**"},
+ []string{},
+ []string{"a/b/c.go"},
+ },
+ {
+ []string{"+**/c.go", "-a/**/c.go"},
+ []string{},
+ []string{"a/b/c.go"},
+ },
+ {
+ []string{"+foobar", "-foo"},
+ []string{"foobar", "foobar/a"},
+ []string{"foo", "foo/a"},
+ },
+ {
+ []string{"+", "-"},
+ []string{},
+ []string{"foobar", "foobar/a", "foo", "foo/a"},
+ },
+ {
+ []string{"-", "+"},
+ []string{"foobar", "foobar/a", "foo", "foo/a"},
+ []string{},
+ },
+ {
+ []string{"-a/**/b/**/c.go"},
+ []string{},
+ []string{"a/x/y/z/b/f/g/h/c.go"},
+ },
+ // tests for unsupported glob operators
+ {
+ []string{"+**/c.go", "-a/*/c.go"},
+ []string{"a/b/c.go"},
+ []string{},
+ },
+ {
+ []string{"+**/c.go", "-a/?/c.go"},
+ []string{"a/b/c.go"},
+ []string{},
+ },
+ {
+ []string{"-b"}, // should only filter paths prefixed with the "b" directory
+ []string{"a/b/c.go", "bb"},
+ []string{"b/c/d.go", "b"},
+ },
+ }
+
+ for _, test := range tests {
+ filterer := NewFilterer(test.filters)
+ for _, inc := range test.included {
+ if filterer.Disallow(inc) {
+ t.Errorf("Filters %v excluded %v, wanted included", test.filters, inc)
+ }
+ }
+
+ for _, exc := range test.excluded {
+ if !filterer.Disallow(exc) {
+ t.Errorf("Filters %v included %v, wanted excluded", test.filters, exc)
+ }
+ }
+ }
+}
diff --git a/gopls/internal/lsp/source/xrefs/xrefs.go b/gopls/internal/lsp/source/xrefs/xrefs.go
new file mode 100644
index 000000000..23c758266
--- /dev/null
+++ b/gopls/internal/lsp/source/xrefs/xrefs.go
@@ -0,0 +1,216 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xrefs defines the serializable index of cross-package
+// references that is computed during type checking.
+//
+// See ../references2.go for the 'references' query.
+package xrefs
+
+import (
+ "bytes"
+ "encoding/gob"
+ "go/ast"
+ "go/types"
+ "log"
+ "sort"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// Index constructs a serializable index of outbound cross-references
+// for the specified type-checked package.
+func Index(files []*source.ParsedGoFile, pkg *types.Package, info *types.Info) []byte {
+ // pkgObjects maps each referenced package Q to a mapping:
+ // from each referenced symbol in Q to the ordered list
+ // of references to that symbol from this package.
+ // A nil types.Object indicates a reference
+ // to the package as a whole: an import.
+ pkgObjects := make(map[*types.Package]map[types.Object]*gobObject)
+
+ // getObjects returns the object-to-references mapping for a package.
+ getObjects := func(pkg *types.Package) map[types.Object]*gobObject {
+ objects, ok := pkgObjects[pkg]
+ if !ok {
+ objects = make(map[types.Object]*gobObject)
+ pkgObjects[pkg] = objects
+ }
+ return objects
+ }
+
+ objectpathFor := typesinternal.NewObjectpathFunc()
+
+ for fileIndex, pgf := range files {
+
+ nodeRange := func(n ast.Node) protocol.Range {
+ rng, err := pgf.PosRange(n.Pos(), n.End())
+ if err != nil {
+ panic(err) // can't fail
+ }
+ return rng
+ }
+
+ ast.Inspect(pgf.File, func(n ast.Node) bool {
+ switch n := n.(type) {
+ case *ast.Ident:
+ // Report a reference for each identifier that
+ // uses a symbol exported from another package.
+ // (The built-in error.Error method has no package.)
+ if n.IsExported() {
+ if obj, ok := info.Uses[n]; ok &&
+ obj.Pkg() != nil &&
+ obj.Pkg() != pkg {
+
+ objects := getObjects(obj.Pkg())
+ gobObj, ok := objects[obj]
+ if !ok {
+ path, err := objectpathFor(obj)
+ if err != nil {
+ // Capitalized but not exported
+ // (e.g. local const/var/type).
+ return true
+ }
+ gobObj = &gobObject{Path: path}
+ objects[obj] = gobObj
+ }
+
+ gobObj.Refs = append(gobObj.Refs, gobRef{
+ FileIndex: fileIndex,
+ Range: nodeRange(n),
+ })
+ }
+ }
+
+ case *ast.ImportSpec:
+ // Report a reference from each import path
+ // string to the imported package.
+ var obj types.Object
+ if n.Name != nil {
+ obj = info.Defs[n.Name]
+ } else {
+ obj = info.Implicits[n]
+ }
+ if obj == nil {
+ return true // missing import
+ }
+ objects := getObjects(obj.(*types.PkgName).Imported())
+ gobObj, ok := objects[nil]
+ if !ok {
+ gobObj = &gobObject{Path: ""}
+ objects[nil] = gobObj
+ }
+ gobObj.Refs = append(gobObj.Refs, gobRef{
+ FileIndex: fileIndex,
+ Range: nodeRange(n.Path),
+ })
+ }
+ return true
+ })
+ }
+
+ // Flatten the maps into slices, and sort for determinism.
+ var packages []*gobPackage
+ for p := range pkgObjects {
+ objects := pkgObjects[p]
+ gp := &gobPackage{
+ PkgPath: source.PackagePath(p.Path()),
+ Objects: make([]*gobObject, 0, len(objects)),
+ }
+ for _, gobObj := range objects {
+ gp.Objects = append(gp.Objects, gobObj)
+ }
+ sort.Slice(gp.Objects, func(i, j int) bool {
+ return gp.Objects[i].Path < gp.Objects[j].Path
+ })
+ packages = append(packages, gp)
+ }
+ sort.Slice(packages, func(i, j int) bool {
+ return packages[i].PkgPath < packages[j].PkgPath
+ })
+
+ return mustEncode(packages)
+}
+
+// Lookup searches a serialized index produced by an indexPackage
+// operation on m, and returns the locations of all references from m
+// to any object in the target set. Each object is denoted by a pair
+// of (package path, object path).
+func Lookup(m *source.Metadata, data []byte, targets map[source.PackagePath]map[objectpath.Path]struct{}) (locs []protocol.Location) {
+
+ // TODO(adonovan): opt: evaluate whether it would be faster to decode
+ // in two passes, first with struct { PkgPath string; Objects BLOB }
+ // to find the relevant record without decoding the Objects slice,
+ // then decode just the desired BLOB into a slice. BLOB would be a
+ // type whose Unmarshal method just retains (a copy of) the bytes.
+ var packages []gobPackage
+ mustDecode(data, &packages)
+
+ for _, gp := range packages {
+ if objectSet, ok := targets[gp.PkgPath]; ok {
+ for _, gobObj := range gp.Objects {
+ if _, ok := objectSet[gobObj.Path]; ok {
+ for _, ref := range gobObj.Refs {
+ uri := m.CompiledGoFiles[ref.FileIndex]
+ locs = append(locs, protocol.Location{
+ URI: protocol.URIFromSpanURI(uri),
+ Range: ref.Range,
+ })
+ }
+ }
+ }
+ }
+ }
+
+ return locs
+}
+
+// -- serialized representation --
+
+// The cross-reference index records the location of all references
+// from one package to symbols defined in other packages
+// (dependencies). It does not record within-package references.
+// The index for package P consists of a list of gopPackage records,
+// each enumerating references to symbols defined a single dependency, Q.
+
+// TODO(adonovan): opt: choose a more compact encoding. Gzip reduces
+// the gob output to about one third its size, so clearly there's room
+// to improve. The gobRef.Range field is the obvious place to begin.
+// Even a zero-length slice gob-encodes to ~285 bytes.
+
+// A gobPackage records the set of outgoing references from the index
+// package to symbols defined in a dependency package.
+type gobPackage struct {
+ PkgPath source.PackagePath // defining package (Q)
+ Objects []*gobObject // set of Q objects referenced by P
+}
+
+// A gobObject records all references to a particular symbol.
+type gobObject struct {
+ Path objectpath.Path // symbol name within package; "" => import of package itself
+ Refs []gobRef // locations of references within P, in lexical order
+}
+
+type gobRef struct {
+ FileIndex int // index of enclosing file within P's CompiledGoFiles
+ Range protocol.Range // source range of reference
+}
+
+// -- duplicated from ../../cache/analysis.go --
+
+func mustEncode(x interface{}) []byte {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(x); err != nil {
+ log.Fatalf("internal error encoding %T: %v", x, err)
+ }
+ return buf.Bytes()
+}
+
+func mustDecode(data []byte, ptr interface{}) {
+ if err := gob.NewDecoder(bytes.NewReader(data)).Decode(ptr); err != nil {
+ log.Fatalf("internal error decoding %T: %v", ptr, err)
+ }
+}
diff --git a/gopls/internal/lsp/symbols.go b/gopls/internal/lsp/symbols.go
new file mode 100644
index 000000000..40fa0d05e
--- /dev/null
+++ b/gopls/internal/lsp/symbols.go
@@ -0,0 +1,60 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/template"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+func (s *Server) documentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) ([]interface{}, error) {
+ ctx, done := event.Start(ctx, "lsp.Server.documentSymbol")
+ defer done()
+
+ snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
+ defer release()
+ if !ok {
+ return []interface{}{}, err
+ }
+ var docSymbols []protocol.DocumentSymbol
+ switch snapshot.View().FileKind(fh) {
+ case source.Tmpl:
+ docSymbols, err = template.DocumentSymbols(snapshot, fh)
+ case source.Go:
+ docSymbols, err = source.DocumentSymbols(ctx, snapshot, fh)
+ default:
+ return []interface{}{}, nil
+ }
+ if err != nil {
+ event.Error(ctx, "DocumentSymbols failed", err, tag.URI.Of(fh.URI()))
+ return []interface{}{}, nil
+ }
+ // Convert the symbols to an interface array.
+ // TODO: Remove this once the lsp deprecates SymbolInformation.
+ symbols := make([]interface{}, len(docSymbols))
+ for i, s := range docSymbols {
+ if snapshot.View().Options().HierarchicalDocumentSymbolSupport {
+ symbols[i] = s
+ continue
+ }
+ // If the client does not support hierarchical document symbols, then
+ // we need to be backwards compatible for now and return SymbolInformation.
+ symbols[i] = protocol.SymbolInformation{
+ Name: s.Name,
+ Kind: s.Kind,
+ Deprecated: s.Deprecated,
+ Location: protocol.Location{
+ URI: params.TextDocument.URI,
+ Range: s.Range,
+ },
+ }
+ }
+ return symbols, nil
+}
diff --git a/gopls/internal/lsp/template/completion.go b/gopls/internal/lsp/template/completion.go
new file mode 100644
index 000000000..292563a88
--- /dev/null
+++ b/gopls/internal/lsp/template/completion.go
@@ -0,0 +1,287 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+// information needed for completion
+type completer struct {
+ p *Parsed
+ pos protocol.Position
+ offset int // offset of the start of the Token
+ ctx protocol.CompletionContext
+ syms map[string]symbol
+}
+
+func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, pos protocol.Position, context protocol.CompletionContext) (*protocol.CompletionList, error) {
+ all := New(snapshot.Templates())
+ var start int // the beginning of the Token (completed or not)
+ syms := make(map[string]symbol)
+ var p *Parsed
+ for fn, fc := range all.files {
+ // collect symbols from all template files
+ filterSyms(syms, fc.symbols)
+ if fn.Filename() != fh.URI().Filename() {
+ continue
+ }
+ if start = inTemplate(fc, pos); start == -1 {
+ return nil, nil
+ }
+ p = fc
+ }
+ if p == nil {
+ // this cannot happen unless the search missed a template file
+ return nil, fmt.Errorf("%s not found", fh.FileIdentity().URI.Filename())
+ }
+ c := completer{
+ p: p,
+ pos: pos,
+ offset: start + len(Left),
+ ctx: context,
+ syms: syms,
+ }
+ return c.complete()
+}
+
+func filterSyms(syms map[string]symbol, ns []symbol) {
+ for _, xsym := range ns {
+ switch xsym.kind {
+ case protocol.Method, protocol.Package, protocol.Boolean, protocol.Namespace,
+ protocol.Function:
+ syms[xsym.name] = xsym // we don't care which symbol we get
+ case protocol.Variable:
+ if xsym.name != "dot" {
+ syms[xsym.name] = xsym
+ }
+ case protocol.Constant:
+ if xsym.name == "nil" {
+ syms[xsym.name] = xsym
+ }
+ }
+ }
+}
+
+// return the starting position of the enclosing token, or -1 if none
+func inTemplate(fc *Parsed, pos protocol.Position) int {
+ // pos is the pos-th character. if the cursor is at the beginning
+ // of the file, pos is 0. That is, we've only seen characters before pos
+ // 1. pos might be in a Token, return tk.Start
+ // 2. pos might be after an elided but before a Token, return elided
+ // 3. return -1 for false
+ offset := fc.FromPosition(pos)
+ // this could be a binary search, as the tokens are ordered
+ for _, tk := range fc.tokens {
+ if tk.Start < offset && offset <= tk.End {
+ return tk.Start
+ }
+ }
+ for _, x := range fc.elided {
+ if x > offset {
+ // fc.elided is sorted
+ break
+ }
+ // If the interval [x,offset] does not contain Left or Right
+ // then provide completions. (do we need the test for Right?)
+ if !bytes.Contains(fc.buf[x:offset], []byte(Left)) && !bytes.Contains(fc.buf[x:offset], []byte(Right)) {
+ return x
+ }
+ }
+ return -1
+}
+
+var (
+ keywords = []string{"if", "with", "else", "block", "range", "template", "end}}", "end"}
+ globals = []string{"and", "call", "html", "index", "slice", "js", "len", "not", "or",
+ "urlquery", "printf", "println", "print", "eq", "ne", "le", "lt", "ge", "gt"}
+)
+
+// find the completions. start is the offset of either the Token enclosing pos, or where
+// the incomplete token starts.
+// The error return is always nil.
+func (c *completer) complete() (*protocol.CompletionList, error) {
+ ans := &protocol.CompletionList{IsIncomplete: true, Items: []protocol.CompletionItem{}}
+ start := c.p.FromPosition(c.pos)
+ sofar := c.p.buf[c.offset:start]
+ if len(sofar) == 0 || sofar[len(sofar)-1] == ' ' || sofar[len(sofar)-1] == '\t' {
+ return ans, nil
+ }
+ // sofar could be parsed by either c.analyzer() or scan(). The latter is precise
+ // and slower, but fast enough
+ words := scan(sofar)
+ // 1. if pattern starts $, show variables
+ // 2. if pattern starts ., show methods (and . by itself?)
+ // 3. if len(words) == 1, show firstWords (but if it were a |, show functions and globals)
+ // 4. ...? (parenthetical expressions, arguments, ...) (packages, namespaces, nil?)
+ if len(words) == 0 {
+ return nil, nil // if this happens, why were we called?
+ }
+ pattern := string(words[len(words)-1])
+ if pattern[0] == '$' {
+ // should we also return a raw "$"?
+ for _, s := range c.syms {
+ if s.kind == protocol.Variable && weakMatch(s.name, pattern) > 0 {
+ ans.Items = append(ans.Items, protocol.CompletionItem{
+ Label: s.name,
+ Kind: protocol.VariableCompletion,
+ Detail: "Variable",
+ })
+ }
+ }
+ return ans, nil
+ }
+ if pattern[0] == '.' {
+ for _, s := range c.syms {
+ if s.kind == protocol.Method && weakMatch("."+s.name, pattern) > 0 {
+ ans.Items = append(ans.Items, protocol.CompletionItem{
+ Label: s.name,
+ Kind: protocol.MethodCompletion,
+ Detail: "Method/member",
+ })
+ }
+ }
+ return ans, nil
+ }
+ // could we get completion attempts in strings or numbers, and if so, do we care?
+ // globals
+ for _, kw := range globals {
+ if weakMatch(kw, string(pattern)) != 0 {
+ ans.Items = append(ans.Items, protocol.CompletionItem{
+ Label: kw,
+ Kind: protocol.KeywordCompletion,
+ Detail: "Function",
+ })
+ }
+ }
+ // and functions
+ for _, s := range c.syms {
+ if s.kind == protocol.Function && weakMatch(s.name, pattern) != 0 {
+ ans.Items = append(ans.Items, protocol.CompletionItem{
+ Label: s.name,
+ Kind: protocol.FunctionCompletion,
+ Detail: "Function",
+ })
+ }
+ }
+ // keywords if we're at the beginning
+ if len(words) <= 1 || len(words[len(words)-2]) == 1 && words[len(words)-2][0] == '|' {
+ for _, kw := range keywords {
+ if weakMatch(kw, string(pattern)) != 0 {
+ ans.Items = append(ans.Items, protocol.CompletionItem{
+ Label: kw,
+ Kind: protocol.KeywordCompletion,
+ Detail: "keyword",
+ })
+ }
+ }
+ }
+ return ans, nil
+}
+
+// someday think about comments, strings, backslashes, etc
+// this would repeat some of the template parsing, but because the user is typing
+// there may be no parse tree here.
+// (go/scanner will report 2 tokens for $a, as $ is not a legal go identifier character)
+// (go/scanner is about 2.7 times more expensive)
+func (c *completer) analyze(buf []byte) [][]byte {
+ // we want to split on whitespace and before dots
+ var working []byte
+ var ans [][]byte
+ for _, ch := range buf {
+ if ch == '.' && len(working) > 0 {
+ ans = append(ans, working)
+ working = []byte{'.'}
+ continue
+ }
+ if ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' {
+ if len(working) > 0 {
+ ans = append(ans, working)
+ working = []byte{}
+ continue
+ }
+ }
+ working = append(working, ch)
+ }
+ if len(working) > 0 {
+ ans = append(ans, working)
+ }
+ ch := buf[len(buf)-1]
+ if ch == ' ' || ch == '\t' {
+ // avoid completing on whitespace
+ ans = append(ans, []byte{ch})
+ }
+ return ans
+}
+
+// version of c.analyze that uses go/scanner.
+func scan(buf []byte) []string {
+ fset := token.NewFileSet()
+ fp := fset.AddFile("", -1, len(buf))
+ var sc scanner.Scanner
+ sc.Init(fp, buf, func(pos token.Position, msg string) {}, scanner.ScanComments)
+ ans := make([]string, 0, 10) // preallocating gives a measurable savings
+ for {
+ _, tok, lit := sc.Scan() // tok is an int
+ if tok == token.EOF {
+ break // done
+ } else if tok == token.SEMICOLON && lit == "\n" {
+ continue // don't care, but probably can't happen
+ } else if tok == token.PERIOD {
+ ans = append(ans, ".") // lit is empty
+ } else if tok == token.IDENT && len(ans) > 0 && ans[len(ans)-1] == "." {
+ ans[len(ans)-1] = "." + lit
+ } else if tok == token.IDENT && len(ans) > 0 && ans[len(ans)-1] == "$" {
+ ans[len(ans)-1] = "$" + lit
+ } else if lit != "" {
+ ans = append(ans, lit)
+ }
+ }
+ return ans
+}
+
+// pattern is what the user has typed
+func weakMatch(choice, pattern string) float64 {
+ lower := strings.ToLower(choice)
+ // for now, use only lower-case everywhere
+ pattern = strings.ToLower(pattern)
+ // The first char has to match
+ if pattern[0] != lower[0] {
+ return 0
+ }
+ // If they start with ., then the second char has to match
+ from := 1
+ if pattern[0] == '.' {
+ if len(pattern) < 2 {
+ return 1 // pattern just a ., so it matches
+ }
+ if pattern[1] != lower[1] {
+ return 0
+ }
+ from = 2
+ }
+ // check that all the characters of pattern occur as a subsequence of choice
+ i, j := from, from
+ for ; i < len(lower) && j < len(pattern); j++ {
+ if pattern[j] == lower[i] {
+ i++
+ if i >= len(lower) {
+ return 0
+ }
+ }
+ }
+ if j < len(pattern) {
+ return 0
+ }
+ return 1
+}
diff --git a/gopls/internal/lsp/template/completion_test.go b/gopls/internal/lsp/template/completion_test.go
new file mode 100644
index 000000000..0fc478842
--- /dev/null
+++ b/gopls/internal/lsp/template/completion_test.go
@@ -0,0 +1,102 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "log"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+func init() {
+ log.SetFlags(log.Lshortfile)
+}
+
+type tparse struct {
+ marked string // ^ shows where to ask for completions. (The user just typed the following character.)
+ wanted []string // expected completions
+}
+
+// Test completions in templates that parse enough (if completion needs symbols)
+// Seen characters up to the ^
+func TestParsed(t *testing.T) {
+ var tests = []tparse{
+ {"{{x}}{{12. xx^", nil}, // https://github.com/golang/go/issues/50430
+ {`<table class="chroma" data-new-comment-url="{{if $.PageIsPullFiles}}{{$.Issue.HTMLURL}}/files/reviews/new_comment{{else}}{{$.CommitHTML}}/new_comment^{{end}}">`, nil},
+ {"{{i^f}}", []string{"index", "if"}},
+ {"{{if .}}{{e^ {{end}}", []string{"eq", "end}}", "else", "end"}},
+ {"{{foo}}{{f^", []string{"foo"}},
+ {"{{$^}}", []string{"$"}},
+ {"{{$x:=4}}{{$^", []string{"$x"}},
+ {"{{$x:=4}}{{$ ^ ", []string{}},
+ {"{{len .Modified}}{{.^Mo", []string{"Modified"}},
+ {"{{len .Modified}}{{.mf^", []string{"Modified"}},
+ {"{{$^ }}", []string{"$"}},
+ {"{{$a =3}}{{$^", []string{"$a"}},
+ // .two is not good here: fix someday
+ {`{{.Modified}}{{.^{{if $.one.two}}xxx{{end}}`, []string{"Modified", "one", "two"}},
+ {`{{.Modified}}{{.o^{{if $.one.two}}xxx{{end}}`, []string{"one"}},
+ {"{{.Modiifed}}{{.one.t^{{if $.one.two}}xxx{{end}}", []string{"two"}},
+ {`{{block "foo" .}}{{i^`, []string{"index", "if"}},
+ {"{{in^{{Internal}}", []string{"index", "Internal", "if"}},
+ // simple number has no completions
+ {"{{4^e", []string{}},
+ // simple string has no completions
+ {"{{`e^", []string{}},
+ {"{{`No i^", []string{}}, // example of why go/scanner is used
+ {"{{xavier}}{{12. x^", []string{"xavier"}},
+ }
+ for _, tx := range tests {
+ c := testCompleter(t, tx)
+ var v []string
+ if c != nil {
+ ans, _ := c.complete()
+ for _, a := range ans.Items {
+ v = append(v, a.Label)
+ }
+ }
+ if len(v) != len(tx.wanted) {
+ t.Errorf("%q: got %q, wanted %q %d,%d", tx.marked, v, tx.wanted, len(v), len(tx.wanted))
+ continue
+ }
+ sort.Strings(tx.wanted)
+ sort.Strings(v)
+ for i := 0; i < len(v); i++ {
+ if tx.wanted[i] != v[i] {
+ t.Errorf("%q at %d: got %v, wanted %v", tx.marked, i, v, tx.wanted)
+ break
+ }
+ }
+ }
+}
+
+func testCompleter(t *testing.T, tx tparse) *completer {
+ t.Helper()
+ // seen chars up to ^
+ col := strings.Index(tx.marked, "^")
+ buf := strings.Replace(tx.marked, "^", "", 1)
+ p := parseBuffer([]byte(buf))
+ pos := protocol.Position{Line: 0, Character: uint32(col)}
+ if p.ParseErr != nil {
+ log.Printf("%q: %v", tx.marked, p.ParseErr)
+ }
+ offset := inTemplate(p, pos)
+ if offset == -1 {
+ return nil
+ }
+ syms := make(map[string]symbol)
+ filterSyms(syms, p.symbols)
+ c := &completer{
+ p: p,
+ pos: protocol.Position{Line: 0, Character: uint32(col)},
+ offset: offset + len(Left),
+ ctx: protocol.CompletionContext{TriggerKind: protocol.Invoked},
+ syms: syms,
+ }
+ return c
+}
diff --git a/gopls/internal/lsp/template/highlight.go b/gopls/internal/lsp/template/highlight.go
new file mode 100644
index 000000000..1e06b9208
--- /dev/null
+++ b/gopls/internal/lsp/template/highlight.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+func Highlight(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, loc protocol.Position) ([]protocol.DocumentHighlight, error) {
+ buf, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ p := parseBuffer(buf)
+ pos := p.FromPosition(loc)
+ var ans []protocol.DocumentHighlight
+ if p.ParseErr == nil {
+ for _, s := range p.symbols {
+ if s.start <= pos && pos < s.start+s.length {
+ return markSymbols(p, s)
+ }
+ }
+ }
+ // these tokens exist whether or not there was a parse error
+ // (symbols require a successful parse)
+ for _, tok := range p.tokens {
+ if tok.Start <= pos && pos < tok.End {
+ wordAt := findWordAt(p, pos)
+ if len(wordAt) > 0 {
+ return markWordInToken(p, wordAt)
+ }
+ }
+ }
+ // find the 'word' at pos, etc: someday
+ // until then we get the default action, which doesn't respect word boundaries
+ return ans, nil
+}
+
+func markSymbols(p *Parsed, sym symbol) ([]protocol.DocumentHighlight, error) {
+ var ans []protocol.DocumentHighlight
+ for _, s := range p.symbols {
+ if s.name == sym.name {
+ kind := protocol.Read
+ if s.vardef {
+ kind = protocol.Write
+ }
+ ans = append(ans, protocol.DocumentHighlight{
+ Range: p.Range(s.start, s.length),
+ Kind: kind,
+ })
+ }
+ }
+ return ans, nil
+}
+
+// A token is {{...}}, and this marks words in the token that equal the give word
+func markWordInToken(p *Parsed, wordAt string) ([]protocol.DocumentHighlight, error) {
+ var ans []protocol.DocumentHighlight
+ pat, err := regexp.Compile(fmt.Sprintf(`\b%s\b`, wordAt))
+ if err != nil {
+ return nil, fmt.Errorf("%q: unmatchable word (%v)", wordAt, err)
+ }
+ for _, tok := range p.tokens {
+ got := pat.FindAllIndex(p.buf[tok.Start:tok.End], -1)
+ for i := 0; i < len(got); i++ {
+ ans = append(ans, protocol.DocumentHighlight{
+ Range: p.Range(got[i][0], got[i][1]-got[i][0]),
+ Kind: protocol.Text,
+ })
+ }
+ }
+ return ans, nil
+}
+
+var wordRe = regexp.MustCompile(`[$]?\w+$`)
+var moreRe = regexp.MustCompile(`^[$]?\w+`)
+
+// findWordAt finds the word the cursor is in (meaning in or just before)
+func findWordAt(p *Parsed, pos int) string {
+ if pos >= len(p.buf) {
+ return "" // can't happen, as we are called with pos < tok.End
+ }
+ after := moreRe.Find(p.buf[pos:])
+ if len(after) == 0 {
+ return "" // end of the word
+ }
+ got := wordRe.Find(p.buf[:pos+len(after)])
+ return string(got)
+}
diff --git a/gopls/internal/lsp/template/implementations.go b/gopls/internal/lsp/template/implementations.go
new file mode 100644
index 000000000..ed9b986a7
--- /dev/null
+++ b/gopls/internal/lsp/template/implementations.go
@@ -0,0 +1,189 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strconv"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// line number (1-based) and message
+var errRe = regexp.MustCompile(`template.*:(\d+): (.*)`)
+
+// Diagnose returns parse errors. There is only one.
+// The errors are not always helpful. For instance { {end}}
+// will likely point to the end of the file.
+func Diagnose(f source.FileHandle) []*source.Diagnostic {
+ // no need for skipTemplate check, as Diagnose is called on the
+ // snapshot's template files
+ buf, err := f.Read()
+ if err != nil {
+ // Is a Diagnostic with no Range useful? event.Error also?
+ msg := fmt.Sprintf("failed to read %s (%v)", f.URI().Filename(), err)
+ d := source.Diagnostic{Message: msg, Severity: protocol.SeverityError, URI: f.URI(),
+ Source: source.TemplateError}
+ return []*source.Diagnostic{&d}
+ }
+ p := parseBuffer(buf)
+ if p.ParseErr == nil {
+ return nil
+ }
+ unknownError := func(msg string) []*source.Diagnostic {
+ s := fmt.Sprintf("malformed template error %q: %s", p.ParseErr.Error(), msg)
+ d := source.Diagnostic{
+ Message: s, Severity: protocol.SeverityError, Range: p.Range(p.nls[0], 1),
+ URI: f.URI(), Source: source.TemplateError}
+ return []*source.Diagnostic{&d}
+ }
+ // errors look like `template: :40: unexpected "}" in operand`
+ // so the string needs to be parsed
+ matches := errRe.FindStringSubmatch(p.ParseErr.Error())
+ if len(matches) != 3 {
+ msg := fmt.Sprintf("expected 3 matches, got %d (%v)", len(matches), matches)
+ return unknownError(msg)
+ }
+ lineno, err := strconv.Atoi(matches[1])
+ if err != nil {
+ msg := fmt.Sprintf("couldn't convert %q to int, %v", matches[1], err)
+ return unknownError(msg)
+ }
+ msg := matches[2]
+ d := source.Diagnostic{Message: msg, Severity: protocol.SeverityError,
+ Source: source.TemplateError}
+ start := p.nls[lineno-1]
+ if lineno < len(p.nls) {
+ size := p.nls[lineno] - start
+ d.Range = p.Range(start, size)
+ } else {
+ d.Range = p.Range(start, 1)
+ }
+ return []*source.Diagnostic{&d}
+}
+
+// Definition finds the definitions of the symbol at loc. It
+// does not understand scoping (if any) in templates. This code is
+// for definitions, type definitions, and implementations.
+// Results only for variables and templates.
+func Definition(snapshot source.Snapshot, fh source.FileHandle, loc protocol.Position) ([]protocol.Location, error) {
+ x, _, err := symAtPosition(fh, loc)
+ if err != nil {
+ return nil, err
+ }
+ sym := x.name
+ ans := []protocol.Location{}
+ // PJW: this is probably a pattern to abstract
+ a := New(snapshot.Templates())
+ for k, p := range a.files {
+ for _, s := range p.symbols {
+ if !s.vardef || s.name != sym {
+ continue
+ }
+ ans = append(ans, protocol.Location{URI: protocol.DocumentURI(k), Range: p.Range(s.start, s.length)})
+ }
+ }
+ return ans, nil
+}
+
+func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) {
+ sym, p, err := symAtPosition(fh, position)
+ if sym == nil || err != nil {
+ return nil, err
+ }
+ ans := protocol.Hover{Range: p.Range(sym.start, sym.length), Contents: protocol.MarkupContent{Kind: protocol.Markdown}}
+ switch sym.kind {
+ case protocol.Function:
+ ans.Contents.Value = fmt.Sprintf("function: %s", sym.name)
+ case protocol.Variable:
+ ans.Contents.Value = fmt.Sprintf("variable: %s", sym.name)
+ case protocol.Constant:
+ ans.Contents.Value = fmt.Sprintf("constant %s", sym.name)
+ case protocol.Method: // field or method
+ ans.Contents.Value = fmt.Sprintf("%s: field or method", sym.name)
+ case protocol.Package: // template use, template def (PJW: do we want two?)
+ ans.Contents.Value = fmt.Sprintf("template %s\n(add definition)", sym.name)
+ case protocol.Namespace:
+ ans.Contents.Value = fmt.Sprintf("template %s defined", sym.name)
+ case protocol.Number:
+ ans.Contents.Value = "number"
+ case protocol.String:
+ ans.Contents.Value = "string"
+ case protocol.Boolean:
+ ans.Contents.Value = "boolean"
+ default:
+ ans.Contents.Value = fmt.Sprintf("oops, sym=%#v", sym)
+ }
+ return &ans, nil
+}
+
+func References(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, params *protocol.ReferenceParams) ([]protocol.Location, error) {
+ sym, _, err := symAtPosition(fh, params.Position)
+ if sym == nil || err != nil || sym.name == "" {
+ return nil, err
+ }
+ ans := []protocol.Location{}
+
+ a := New(snapshot.Templates())
+ for k, p := range a.files {
+ for _, s := range p.symbols {
+ if s.name != sym.name {
+ continue
+ }
+ if s.vardef && !params.Context.IncludeDeclaration {
+ continue
+ }
+ ans = append(ans, protocol.Location{URI: protocol.DocumentURI(k), Range: p.Range(s.start, s.length)})
+ }
+ }
+ // do these need to be sorted? (a.files is a map)
+ return ans, nil
+}
+
+func SemanticTokens(ctx context.Context, snapshot source.Snapshot, spn span.URI, add func(line, start, len uint32), d func() []uint32) (*protocol.SemanticTokens, error) {
+ fh, err := snapshot.GetFile(ctx, spn)
+ if err != nil {
+ return nil, err
+ }
+ buf, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ p := parseBuffer(buf)
+
+ for _, t := range p.Tokens() {
+ if t.Multiline {
+ la, ca := p.LineCol(t.Start)
+ lb, cb := p.LineCol(t.End)
+ add(la, ca, p.RuneCount(la, ca, 0))
+ for l := la + 1; l < lb; l++ {
+ add(l, 0, p.RuneCount(l, 0, 0))
+ }
+ add(lb, 0, p.RuneCount(lb, 0, cb))
+ continue
+ }
+ sz, err := p.TokenSize(t)
+ if err != nil {
+ return nil, err
+ }
+ line, col := p.LineCol(t.Start)
+ add(line, col, uint32(sz))
+ }
+ data := d()
+ ans := &protocol.SemanticTokens{
+ Data: data,
+ // for small cache, some day. for now, the LSP client ignores this
+ // (that is, when the LSP client starts returning these, we can cache)
+ ResultID: fmt.Sprintf("%v", time.Now()),
+ }
+ return ans, nil
+}
+
+// still need to do rename, etc
diff --git a/gopls/internal/lsp/template/parse.go b/gopls/internal/lsp/template/parse.go
new file mode 100644
index 000000000..a6befdcb9
--- /dev/null
+++ b/gopls/internal/lsp/template/parse.go
@@ -0,0 +1,508 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package template contains code for dealing with templates
+package template
+
+// template files are small enough that the code reprocesses them each time
+// this may be a bad choice for projects with lots of template files.
+
+// This file contains the parsing code, some debugging printing, and
+// implementations for Diagnose, Definition, Hover, References
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "regexp"
+ "runtime"
+ "sort"
+ "text/template"
+ "text/template/parse"
+ "unicode/utf8"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+)
+
+var (
+ Left = []byte("{{")
+ Right = []byte("}}")
+)
+
+type Parsed struct {
+ buf []byte //contents
+ lines [][]byte // needed?, other than for debugging?
+ elided []int // offsets where Left was replaced by blanks
+
+ // tokens are matched Left-Right pairs, computed before trying to parse
+ tokens []Token
+
+ // result of parsing
+ named []*template.Template // the template and embedded templates
+ ParseErr error
+ symbols []symbol
+ stack []parse.Node // used while computing symbols
+
+ // for mapping from offsets in buf to LSP coordinates
+ // See FromPosition() and LineCol()
+ nls []int // offset of newlines before each line (nls[0]==-1)
+ lastnl int // last line seen
+ check int // used to decide whether to use lastnl or search through nls
+ nonASCII bool // are there any non-ascii runes in buf?
+}
+
+// Token is a single {{...}}. More precisely, Left...Right
+type Token struct {
+ Start, End int // offset from start of template
+ Multiline bool
+}
+
+// All contains the Parse of all the template files
+type All struct {
+ files map[span.URI]*Parsed
+}
+
+// New returns the Parses of the snapshot's tmpl files
+// (maybe cache these, but then avoiding import cycles needs code rearrangements)
+func New(tmpls map[span.URI]source.FileHandle) *All {
+ all := make(map[span.URI]*Parsed)
+ for k, v := range tmpls {
+ buf, err := v.Read()
+ if err != nil { // PJW: decide what to do with these errors
+ log.Printf("failed to read %s (%v)", v.URI().Filename(), err)
+ continue
+ }
+ all[k] = parseBuffer(buf)
+ }
+ return &All{files: all}
+}
+
+func parseBuffer(buf []byte) *Parsed {
+ ans := &Parsed{
+ buf: buf,
+ check: -1,
+ nls: []int{-1},
+ }
+ if len(buf) == 0 {
+ return ans
+ }
+ // how to compute allAscii...
+ for _, b := range buf {
+ if b >= utf8.RuneSelf {
+ ans.nonASCII = true
+ break
+ }
+ }
+ if buf[len(buf)-1] != '\n' {
+ ans.buf = append(buf, '\n')
+ }
+ for i, p := range ans.buf {
+ if p == '\n' {
+ ans.nls = append(ans.nls, i)
+ }
+ }
+ ans.setTokens() // ans.buf may be a new []byte
+ ans.lines = bytes.Split(ans.buf, []byte{'\n'})
+ t, err := template.New("").Parse(string(ans.buf))
+ if err != nil {
+ funcs := make(template.FuncMap)
+ for t == nil && ans.ParseErr == nil {
+ // in 1.17 it may be possible to avoid getting this error
+ // template: :2: function "foo" not defined
+ matches := parseErrR.FindStringSubmatch(err.Error())
+ if len(matches) == 2 {
+ // suppress the error by giving it a function with the right name
+ funcs[matches[1]] = func() interface{} { return nil }
+ t, err = template.New("").Funcs(funcs).Parse(string(ans.buf))
+ continue
+ }
+ ans.ParseErr = err // unfixed error
+ return ans
+ }
+ }
+ ans.named = t.Templates()
+ // set the symbols
+ for _, t := range ans.named {
+ ans.stack = append(ans.stack, t.Root)
+ ans.findSymbols()
+ if t.Name() != "" {
+ // defining a template. The pos is just after {{define...}} (or {{block...}}?)
+ at, sz := ans.FindLiteralBefore(int(t.Root.Pos))
+ s := symbol{start: at, length: sz, name: t.Name(), kind: protocol.Namespace, vardef: true}
+ ans.symbols = append(ans.symbols, s)
+ }
+ }
+
+ sort.Slice(ans.symbols, func(i, j int) bool {
+ left, right := ans.symbols[i], ans.symbols[j]
+ if left.start != right.start {
+ return left.start < right.start
+ }
+ if left.vardef != right.vardef {
+ return left.vardef
+ }
+ return left.kind < right.kind
+ })
+ return ans
+}
+
+// FindLiteralBefore locates the first preceding string literal
+// returning its position and length in buf
+// or returns -1 if there is none.
+// Assume double-quoted string rather than backquoted string for now.
+func (p *Parsed) FindLiteralBefore(pos int) (int, int) {
+ left, right := -1, -1
+ for i := pos - 1; i >= 0; i-- {
+ if p.buf[i] != '"' {
+ continue
+ }
+ if right == -1 {
+ right = i
+ continue
+ }
+ left = i
+ break
+ }
+ if left == -1 {
+ return -1, 0
+ }
+ return left + 1, right - left - 1
+}
+
+var (
+ parseErrR = regexp.MustCompile(`template:.*function "([^"]+)" not defined`)
+)
+
+func (p *Parsed) setTokens() {
+ const (
+ // InRaw and InString only occur inside an action (SeenLeft)
+ Start = iota
+ InRaw
+ InString
+ SeenLeft
+ )
+ state := Start
+ var left, oldState int
+ for n := 0; n < len(p.buf); n++ {
+ c := p.buf[n]
+ switch state {
+ case InRaw:
+ if c == '`' {
+ state = oldState
+ }
+ case InString:
+ if c == '"' && !isEscaped(p.buf[:n]) {
+ state = oldState
+ }
+ case SeenLeft:
+ if c == '`' {
+ oldState = state // it's SeenLeft, but a little clearer this way
+ state = InRaw
+ continue
+ }
+ if c == '"' {
+ oldState = state
+ state = InString
+ continue
+ }
+ if bytes.HasPrefix(p.buf[n:], Right) {
+ right := n + len(Right)
+ tok := Token{Start: left,
+ End: right,
+ Multiline: bytes.Contains(p.buf[left:right], []byte{'\n'}),
+ }
+ p.tokens = append(p.tokens, tok)
+ state = Start
+ }
+ // If we see (unquoted) Left then the original left is probably the user
+ // typing. Suppress the original left
+ if bytes.HasPrefix(p.buf[n:], Left) {
+ p.elideAt(left)
+ left = n
+ n += len(Left) - 1 // skip the rest
+ }
+ case Start:
+ if bytes.HasPrefix(p.buf[n:], Left) {
+ left = n
+ state = SeenLeft
+ n += len(Left) - 1 // skip the rest (avoids {{{ bug)
+ }
+ }
+ }
+ // this error occurs after typing {{ at the end of the file
+ if state != Start {
+ // Unclosed Left. remove the Left at left
+ p.elideAt(left)
+ }
+}
+
+func (p *Parsed) elideAt(left int) {
+ if p.elided == nil {
+ // p.buf is the same buffer that v.Read() returns, so copy it.
+ // (otherwise the next time it's parsed, elided information is lost)
+ b := make([]byte, len(p.buf))
+ copy(b, p.buf)
+ p.buf = b
+ }
+ for i := 0; i < len(Left); i++ {
+ p.buf[left+i] = ' '
+ }
+ p.elided = append(p.elided, left)
+}
+
+// isEscaped reports whether the byte after buf is escaped
+func isEscaped(buf []byte) bool {
+ backSlashes := 0
+ for j := len(buf) - 1; j >= 0 && buf[j] == '\\'; j-- {
+ backSlashes++
+ }
+ return backSlashes%2 == 1
+}
+
+func (p *Parsed) Tokens() []Token {
+ return p.tokens
+}
+
+// TODO(adonovan): the next 100 lines could perhaps replaced by use of protocol.Mapper.
+
+func (p *Parsed) utf16len(buf []byte) int {
+ cnt := 0
+ if !p.nonASCII {
+ return len(buf)
+ }
+ // we need a utf16len(rune), but we don't have it
+ for _, r := range string(buf) {
+ cnt++
+ if r >= 1<<16 {
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func (p *Parsed) TokenSize(t Token) (int, error) {
+ if t.Multiline {
+ return -1, fmt.Errorf("TokenSize called with Multiline token %#v", t)
+ }
+ ans := p.utf16len(p.buf[t.Start:t.End])
+ return ans, nil
+}
+
+// RuneCount counts runes in line l, from col s to e
+// (e==0 for end of line. called only for multiline tokens)
+func (p *Parsed) RuneCount(l, s, e uint32) uint32 {
+ start := p.nls[l] + 1 + int(s)
+ end := p.nls[l] + 1 + int(e)
+ if e == 0 || end > p.nls[l+1] {
+ end = p.nls[l+1]
+ }
+ return uint32(utf8.RuneCount(p.buf[start:end]))
+}
+
+// LineCol converts from a 0-based byte offset to 0-based line, col. col in runes
+func (p *Parsed) LineCol(x int) (uint32, uint32) {
+ if x < p.check {
+ p.lastnl = 0
+ }
+ p.check = x
+ for i := p.lastnl; i < len(p.nls); i++ {
+ if p.nls[i] <= x {
+ continue
+ }
+ p.lastnl = i
+ var count int
+ if i > 0 && x == p.nls[i-1] { // \n
+ count = 0
+ } else {
+ count = p.utf16len(p.buf[p.nls[i-1]+1 : x])
+ }
+ return uint32(i - 1), uint32(count)
+ }
+ if x == len(p.buf)-1 { // trailing \n
+ return uint32(len(p.nls) - 1), 0
+ }
+ // shouldn't happen
+ for i := 1; i < 4; i++ {
+ _, f, l, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ log.Printf("%d: %s:%d", i, f, l)
+ }
+
+ msg := fmt.Errorf("LineCol off the end, %d of %d, nls=%v, %q", x, len(p.buf), p.nls, p.buf[x:])
+ event.Error(context.Background(), "internal error", msg)
+ return 0, 0
+}
+
+// Position produces a protocol.Position from an offset in the template
+func (p *Parsed) Position(pos int) protocol.Position {
+ line, col := p.LineCol(pos)
+ return protocol.Position{Line: line, Character: col}
+}
+
+func (p *Parsed) Range(x, length int) protocol.Range {
+ line, col := p.LineCol(x)
+ ans := protocol.Range{
+ Start: protocol.Position{Line: line, Character: col},
+ End: protocol.Position{Line: line, Character: col + uint32(length)},
+ }
+ return ans
+}
+
+// FromPosition translates a protocol.Position into an offset into the template
+func (p *Parsed) FromPosition(x protocol.Position) int {
+ l, c := int(x.Line), int(x.Character)
+ if l >= len(p.nls) || p.nls[l]+1 >= len(p.buf) {
+ // paranoia to avoid panic. return the largest offset
+ return len(p.buf)
+ }
+ line := p.buf[p.nls[l]+1:]
+ cnt := 0
+ for w := range string(line) {
+ if cnt >= c {
+ return w + p.nls[l] + 1
+ }
+ cnt++
+ }
+ // do we get here? NO
+ pos := int(x.Character) + p.nls[int(x.Line)] + 1
+ event.Error(context.Background(), "internal error", fmt.Errorf("surprise %#v", x))
+ return pos
+}
+
+func symAtPosition(fh source.FileHandle, loc protocol.Position) (*symbol, *Parsed, error) {
+ buf, err := fh.Read()
+ if err != nil {
+ return nil, nil, err
+ }
+ p := parseBuffer(buf)
+ pos := p.FromPosition(loc)
+ syms := p.SymsAtPos(pos)
+ if len(syms) == 0 {
+ return nil, p, fmt.Errorf("no symbol found")
+ }
+ if len(syms) > 1 {
+ log.Printf("Hover: %d syms, not 1 %v", len(syms), syms)
+ }
+ sym := syms[0]
+ return &sym, p, nil
+}
+
+func (p *Parsed) SymsAtPos(pos int) []symbol {
+ ans := []symbol{}
+ for _, s := range p.symbols {
+ if s.start <= pos && pos < s.start+s.length {
+ ans = append(ans, s)
+ }
+ }
+ return ans
+}
+
+type wrNode struct {
+ p *Parsed
+ w io.Writer
+}
+
+// WriteNode is for debugging
+func (p *Parsed) WriteNode(w io.Writer, n parse.Node) {
+ wr := wrNode{p: p, w: w}
+ wr.writeNode(n, "")
+}
+
+func (wr wrNode) writeNode(n parse.Node, indent string) {
+ if n == nil {
+ return
+ }
+ at := func(pos parse.Pos) string {
+ line, col := wr.p.LineCol(int(pos))
+ return fmt.Sprintf("(%d)%v:%v", pos, line, col)
+ }
+ switch x := n.(type) {
+ case *parse.ActionNode:
+ fmt.Fprintf(wr.w, "%sActionNode at %s\n", indent, at(x.Pos))
+ wr.writeNode(x.Pipe, indent+". ")
+ case *parse.BoolNode:
+ fmt.Fprintf(wr.w, "%sBoolNode at %s, %v\n", indent, at(x.Pos), x.True)
+ case *parse.BranchNode:
+ fmt.Fprintf(wr.w, "%sBranchNode at %s\n", indent, at(x.Pos))
+ wr.writeNode(x.Pipe, indent+"Pipe. ")
+ wr.writeNode(x.List, indent+"List. ")
+ wr.writeNode(x.ElseList, indent+"Else. ")
+ case *parse.ChainNode:
+ fmt.Fprintf(wr.w, "%sChainNode at %s, %v\n", indent, at(x.Pos), x.Field)
+ case *parse.CommandNode:
+ fmt.Fprintf(wr.w, "%sCommandNode at %s, %d children\n", indent, at(x.Pos), len(x.Args))
+ for _, a := range x.Args {
+ wr.writeNode(a, indent+". ")
+ }
+ //case *parse.CommentNode: // 1.16
+ case *parse.DotNode:
+ fmt.Fprintf(wr.w, "%sDotNode at %s\n", indent, at(x.Pos))
+ case *parse.FieldNode:
+ fmt.Fprintf(wr.w, "%sFieldNode at %s, %v\n", indent, at(x.Pos), x.Ident)
+ case *parse.IdentifierNode:
+ fmt.Fprintf(wr.w, "%sIdentifierNode at %s, %v\n", indent, at(x.Pos), x.Ident)
+ case *parse.IfNode:
+ fmt.Fprintf(wr.w, "%sIfNode at %s\n", indent, at(x.Pos))
+ wr.writeNode(&x.BranchNode, indent+". ")
+ case *parse.ListNode:
+ if x == nil {
+ return // nil BranchNode.ElseList
+ }
+ fmt.Fprintf(wr.w, "%sListNode at %s, %d children\n", indent, at(x.Pos), len(x.Nodes))
+ for _, n := range x.Nodes {
+ wr.writeNode(n, indent+". ")
+ }
+ case *parse.NilNode:
+ fmt.Fprintf(wr.w, "%sNilNode at %s\n", indent, at(x.Pos))
+ case *parse.NumberNode:
+ fmt.Fprintf(wr.w, "%sNumberNode at %s, %s\n", indent, at(x.Pos), x.Text)
+ case *parse.PipeNode:
+ if x == nil {
+ return // {{template "xxx"}}
+ }
+ fmt.Fprintf(wr.w, "%sPipeNode at %s, %d vars, %d cmds, IsAssign:%v\n",
+ indent, at(x.Pos), len(x.Decl), len(x.Cmds), x.IsAssign)
+ for _, d := range x.Decl {
+ wr.writeNode(d, indent+"Decl. ")
+ }
+ for _, c := range x.Cmds {
+ wr.writeNode(c, indent+"Cmd. ")
+ }
+ case *parse.RangeNode:
+ fmt.Fprintf(wr.w, "%sRangeNode at %s\n", indent, at(x.Pos))
+ wr.writeNode(&x.BranchNode, indent+". ")
+ case *parse.StringNode:
+ fmt.Fprintf(wr.w, "%sStringNode at %s, %s\n", indent, at(x.Pos), x.Quoted)
+ case *parse.TemplateNode:
+ fmt.Fprintf(wr.w, "%sTemplateNode at %s, %s\n", indent, at(x.Pos), x.Name)
+ wr.writeNode(x.Pipe, indent+". ")
+ case *parse.TextNode:
+ fmt.Fprintf(wr.w, "%sTextNode at %s, len %d\n", indent, at(x.Pos), len(x.Text))
+ case *parse.VariableNode:
+ fmt.Fprintf(wr.w, "%sVariableNode at %s, %v\n", indent, at(x.Pos), x.Ident)
+ case *parse.WithNode:
+ fmt.Fprintf(wr.w, "%sWithNode at %s\n", indent, at(x.Pos))
+ wr.writeNode(&x.BranchNode, indent+". ")
+ }
+}
+
+var kindNames = []string{"", "File", "Module", "Namespace", "Package", "Class", "Method", "Property",
+ "Field", "Constructor", "Enum", "Interface", "Function", "Variable", "Constant", "String",
+ "Number", "Boolean", "Array", "Object", "Key", "Null", "EnumMember", "Struct", "Event",
+ "Operator", "TypeParameter"}
+
+func kindStr(k protocol.SymbolKind) string {
+ n := int(k)
+ if n < 1 || n >= len(kindNames) {
+ return fmt.Sprintf("?SymbolKind %d?", n)
+ }
+ return kindNames[n]
+}
diff --git a/internal/lsp/template/parse_test.go b/gopls/internal/lsp/template/parse_test.go
index 345f52347..345f52347 100644
--- a/internal/lsp/template/parse_test.go
+++ b/gopls/internal/lsp/template/parse_test.go
diff --git a/gopls/internal/lsp/template/symbols.go b/gopls/internal/lsp/template/symbols.go
new file mode 100644
index 000000000..24f9604c1
--- /dev/null
+++ b/gopls/internal/lsp/template/symbols.go
@@ -0,0 +1,230 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "text/template/parse"
+ "unicode/utf8"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+)
+
+// in local coordinates, to be translated to protocol.DocumentSymbol
+type symbol struct {
+ start int // for sorting
+ length int // in runes (unicode code points)
+ name string
+ kind protocol.SymbolKind
+ vardef bool // is this a variable definition?
+ // do we care about selection range, or children?
+ // no children yet, and selection range is the same as range
+}
+
+func (s symbol) String() string {
+ return fmt.Sprintf("{%d,%d,%s,%s,%v}", s.start, s.length, s.name, s.kind, s.vardef)
+}
+
+// for FieldNode or VariableNode (or ChainNode?)
+func (p *Parsed) fields(flds []string, x parse.Node) []symbol {
+ ans := []symbol{}
+ // guessing that there are no embedded blanks allowed. The doc is unclear
+ lookfor := ""
+ switch x.(type) {
+ case *parse.FieldNode:
+ for _, f := range flds {
+ lookfor += "." + f // quadratic, but probably ok
+ }
+ case *parse.VariableNode:
+ lookfor = flds[0]
+ for i := 1; i < len(flds); i++ {
+ lookfor += "." + flds[i]
+ }
+ case *parse.ChainNode: // PJW, what are these?
+ for _, f := range flds {
+ lookfor += "." + f // quadratic, but probably ok
+ }
+ default:
+ // If these happen they will happen even if gopls is restarted
+ // and the users does the same thing, so it is better not to panic.
+ // context.Background() is used because we don't have access
+ // to any other context. [we could, but it would be complicated]
+ event.Log(context.Background(), fmt.Sprintf("%T unexpected in fields()", x))
+ return nil
+ }
+ if len(lookfor) == 0 {
+ event.Log(context.Background(), fmt.Sprintf("no strings in fields() %#v", x))
+ return nil
+ }
+ startsAt := int(x.Position())
+ ix := bytes.Index(p.buf[startsAt:], []byte(lookfor)) // HasPrefix? PJW?
+ if ix < 0 || ix > len(lookfor) { // lookfor expected to be at start (or so)
+ // probably golang.go/#43388, so back up
+ startsAt -= len(flds[0]) + 1
+ ix = bytes.Index(p.buf[startsAt:], []byte(lookfor)) // ix might be 1? PJW
+ if ix < 0 {
+ return ans
+ }
+ }
+ at := ix + startsAt
+ for _, f := range flds {
+ at += 1 // .
+ kind := protocol.Method
+ if f[0] == '$' {
+ kind = protocol.Variable
+ }
+ sym := symbol{name: f, kind: kind, start: at, length: utf8.RuneCount([]byte(f))}
+ if kind == protocol.Variable && len(p.stack) > 1 {
+ if pipe, ok := p.stack[len(p.stack)-2].(*parse.PipeNode); ok {
+ for _, y := range pipe.Decl {
+ if x == y {
+ sym.vardef = true
+ }
+ }
+ }
+ }
+ ans = append(ans, sym)
+ at += len(f)
+ }
+ return ans
+}
+
+func (p *Parsed) findSymbols() {
+ if len(p.stack) == 0 {
+ return
+ }
+ n := p.stack[len(p.stack)-1]
+ pop := func() {
+ p.stack = p.stack[:len(p.stack)-1]
+ }
+ if n == nil { // allowing nil simplifies the code
+ pop()
+ return
+ }
+ nxt := func(nd parse.Node) {
+ p.stack = append(p.stack, nd)
+ p.findSymbols()
+ }
+ switch x := n.(type) {
+ case *parse.ActionNode:
+ nxt(x.Pipe)
+ case *parse.BoolNode:
+ // need to compute the length from the value
+ msg := fmt.Sprintf("%v", x.True)
+ p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: len(msg), kind: protocol.Boolean})
+ case *parse.BranchNode:
+ nxt(x.Pipe)
+ nxt(x.List)
+ nxt(x.ElseList)
+ case *parse.ChainNode:
+ p.symbols = append(p.symbols, p.fields(x.Field, x)...)
+ nxt(x.Node)
+ case *parse.CommandNode:
+ for _, a := range x.Args {
+ nxt(a)
+ }
+ //case *parse.CommentNode: // go 1.16
+ // log.Printf("implement %d", x.Type())
+ case *parse.DotNode:
+ sym := symbol{name: "dot", kind: protocol.Variable, start: int(x.Pos), length: 1}
+ p.symbols = append(p.symbols, sym)
+ case *parse.FieldNode:
+ p.symbols = append(p.symbols, p.fields(x.Ident, x)...)
+ case *parse.IdentifierNode:
+ sym := symbol{name: x.Ident, kind: protocol.Function, start: int(x.Pos),
+ length: utf8.RuneCount([]byte(x.Ident))}
+ p.symbols = append(p.symbols, sym)
+ case *parse.IfNode:
+ nxt(&x.BranchNode)
+ case *parse.ListNode:
+ if x != nil { // wretched typed nils. Node should have an IfNil
+ for _, nd := range x.Nodes {
+ nxt(nd)
+ }
+ }
+ case *parse.NilNode:
+ sym := symbol{name: "nil", kind: protocol.Constant, start: int(x.Pos), length: 3}
+ p.symbols = append(p.symbols, sym)
+ case *parse.NumberNode:
+ // no name; ascii
+ p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: len(x.Text), kind: protocol.Number})
+ case *parse.PipeNode:
+ if x == nil { // {{template "foo"}}
+ return
+ }
+ for _, d := range x.Decl {
+ nxt(d)
+ }
+ for _, c := range x.Cmds {
+ nxt(c)
+ }
+ case *parse.RangeNode:
+ nxt(&x.BranchNode)
+ case *parse.StringNode:
+ // no name
+ sz := utf8.RuneCount([]byte(x.Text))
+ p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: sz, kind: protocol.String})
+ case *parse.TemplateNode: // invoking a template
+ // x.Pos points to the quote before the name
+ p.symbols = append(p.symbols, symbol{name: x.Name, kind: protocol.Package, start: int(x.Pos) + 1,
+ length: utf8.RuneCount([]byte(x.Name))})
+ nxt(x.Pipe)
+ case *parse.TextNode:
+ if len(x.Text) == 1 && x.Text[0] == '\n' {
+ break
+ }
+ // nothing to report, but build one for hover
+ sz := utf8.RuneCount([]byte(x.Text))
+ p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: sz, kind: protocol.Constant})
+ case *parse.VariableNode:
+ p.symbols = append(p.symbols, p.fields(x.Ident, x)...)
+ case *parse.WithNode:
+ nxt(&x.BranchNode)
+
+ }
+ pop()
+}
+
+// DocumentSymbols returns a hierarchy of the symbols defined in a template file.
+// (The hierarchy is flat. SymbolInformation might be better.)
+func DocumentSymbols(snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentSymbol, error) {
+ buf, err := fh.Read()
+ if err != nil {
+ return nil, err
+ }
+ p := parseBuffer(buf)
+ if p.ParseErr != nil {
+ return nil, p.ParseErr
+ }
+ var ans []protocol.DocumentSymbol
+ for _, s := range p.symbols {
+ if s.kind == protocol.Constant {
+ continue
+ }
+ d := kindStr(s.kind)
+ if d == "Namespace" {
+ d = "Template"
+ }
+ if s.vardef {
+ d += "(def)"
+ } else {
+ d += "(use)"
+ }
+ r := p.Range(s.start, s.length)
+ y := protocol.DocumentSymbol{
+ Name: s.name,
+ Detail: d,
+ Kind: s.kind,
+ Range: r,
+ SelectionRange: r, // or should this be the entire {{...}}?
+ }
+ ans = append(ans, y)
+ }
+ return ans, nil
+}
diff --git a/internal/lsp/testdata/%percent/perc%ent.go b/gopls/internal/lsp/testdata/%percent/perc%ent.go
index 93b5e5570..93b5e5570 100644
--- a/internal/lsp/testdata/%percent/perc%ent.go
+++ b/gopls/internal/lsp/testdata/%percent/perc%ent.go
diff --git a/internal/lsp/testdata/addimport/addimport.go.golden b/gopls/internal/lsp/testdata/addimport/addimport.go.golden
index 9605aa6f9..9605aa6f9 100644
--- a/internal/lsp/testdata/addimport/addimport.go.golden
+++ b/gopls/internal/lsp/testdata/addimport/addimport.go.golden
diff --git a/internal/lsp/testdata/addimport/addimport.go.in b/gopls/internal/lsp/testdata/addimport/addimport.go.in
index 07b454f52..07b454f52 100644
--- a/internal/lsp/testdata/addimport/addimport.go.in
+++ b/gopls/internal/lsp/testdata/addimport/addimport.go.in
diff --git a/internal/lsp/testdata/address/address.go b/gopls/internal/lsp/testdata/address/address.go
index 3f1c2fa8d..3f1c2fa8d 100644
--- a/internal/lsp/testdata/address/address.go
+++ b/gopls/internal/lsp/testdata/address/address.go
diff --git a/gopls/internal/lsp/testdata/analyzer/bad_test.go b/gopls/internal/lsp/testdata/analyzer/bad_test.go
new file mode 100644
index 000000000..b1724c666
--- /dev/null
+++ b/gopls/internal/lsp/testdata/analyzer/bad_test.go
@@ -0,0 +1,24 @@
+package analyzer
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+)
+
+func Testbad(t *testing.T) { //@diag("", "tests", "Testbad has malformed name: first letter after 'Test' must not be lowercase", "warning")
+ var x sync.Mutex
+ _ = x //@diag("x", "copylocks", "assignment copies lock value to _: sync.Mutex", "warning")
+
+ printfWrapper("%s") //@diag(re`printfWrapper\(.*\)`, "printf", "golang.org/lsptests/analyzer.printfWrapper format %s reads arg #1, but call has 0 args", "warning")
+}
+
+func printfWrapper(format string, args ...interface{}) {
+ fmt.Printf(format, args...)
+}
+
+func _() {
+ now := time.Now()
+ fmt.Println(now.Format("2006-02-01")) //@diag("2006-02-01", "timeformat", "2006-02-01 should be 2006-01-02", "warning")
+}
diff --git a/internal/lsp/testdata/anon/anon.go.in b/gopls/internal/lsp/testdata/anon/anon.go.in
index 36611b268..36611b268 100644
--- a/internal/lsp/testdata/anon/anon.go.in
+++ b/gopls/internal/lsp/testdata/anon/anon.go.in
diff --git a/internal/lsp/testdata/append/append.go b/gopls/internal/lsp/testdata/append/append.go
index 2880e59db..2880e59db 100644
--- a/internal/lsp/testdata/append/append.go
+++ b/gopls/internal/lsp/testdata/append/append.go
diff --git a/internal/lsp/testdata/append/append2.go.in b/gopls/internal/lsp/testdata/append/append2.go.in
index 15bd357b2..15bd357b2 100644
--- a/internal/lsp/testdata/append/append2.go.in
+++ b/gopls/internal/lsp/testdata/append/append2.go.in
diff --git a/gopls/internal/lsp/testdata/arraytype/array_type.go.in b/gopls/internal/lsp/testdata/arraytype/array_type.go.in
new file mode 100644
index 000000000..ac1a3e782
--- /dev/null
+++ b/gopls/internal/lsp/testdata/arraytype/array_type.go.in
@@ -0,0 +1,50 @@
+package arraytype
+
+import (
+ "golang.org/lsptests/foo"
+)
+
+func _() {
+ var (
+ val string //@item(atVal, "val", "string", "var")
+ )
+
+ // disabled - see issue #54822
+ [] // complete(" //", PackageFoo)
+
+ []val //@complete(" //")
+
+ []foo.StructFoo //@complete(" //", StructFoo)
+
+ []foo.StructFoo(nil) //@complete("(", StructFoo)
+
+ []*foo.StructFoo //@complete(" //", StructFoo)
+
+ [...]foo.StructFoo //@complete(" //", StructFoo)
+
+ [2][][4]foo.StructFoo //@complete(" //", StructFoo)
+
+ []struct { f []foo.StructFoo } //@complete(" }", StructFoo)
+}
+
+func _() {
+ type myInt int //@item(atMyInt, "myInt", "int", "type")
+
+ var mark []myInt //@item(atMark, "mark", "[]myInt", "var")
+
+ var s []myInt //@item(atS, "s", "[]myInt", "var")
+ s = []m //@complete(" //", atMyInt)
+ // disabled - see issue #54822
+ s = [] // complete(" //", atMyInt, PackageFoo)
+
+ var a [1]myInt
+ a = [1]m //@complete(" //", atMyInt)
+
+ var ds [][]myInt
+ ds = [][]m //@complete(" //", atMyInt)
+}
+
+func _() {
+ var b [0]byte //@item(atByte, "b", "[0]byte", "var")
+ var _ []byte = b //@snippet(" //", atByte, "b[:]", "b[:]")
+}
diff --git a/gopls/internal/lsp/testdata/assign/assign.go.in b/gopls/internal/lsp/testdata/assign/assign.go.in
new file mode 100644
index 000000000..93a622c83
--- /dev/null
+++ b/gopls/internal/lsp/testdata/assign/assign.go.in
@@ -0,0 +1,26 @@
+package assign
+
+import "golang.org/lsptests/assign/internal/secret"
+
+func _() {
+ secret.Hello()
+ var (
+ myInt int //@item(assignInt, "myInt", "int", "var")
+ myStr string //@item(assignStr, "myStr", "string", "var")
+ )
+
+ var _ string = my //@rank(" //", assignStr, assignInt)
+ var _ string = //@rank(" //", assignStr, assignInt)
+}
+
+func _() {
+ var a string = a //@complete(" //")
+}
+
+func _() {
+ fooBar := fooBa //@complete(" //"),item(assignFooBar, "fooBar", "", "var")
+ abc, fooBar := 123, fooBa //@complete(" //", assignFooBar)
+ {
+ fooBar := fooBa //@complete(" //", assignFooBar)
+ }
+}
diff --git a/internal/lsp/testdata/assign/internal/secret/secret.go b/gopls/internal/lsp/testdata/assign/internal/secret/secret.go
index 5ee1554df..5ee1554df 100644
--- a/internal/lsp/testdata/assign/internal/secret/secret.go
+++ b/gopls/internal/lsp/testdata/assign/internal/secret/secret.go
diff --git a/gopls/internal/lsp/testdata/bad/bad0.go b/gopls/internal/lsp/testdata/bad/bad0.go
new file mode 100644
index 000000000..0f23a3911
--- /dev/null
+++ b/gopls/internal/lsp/testdata/bad/bad0.go
@@ -0,0 +1,24 @@
+//go:build go1.11
+// +build go1.11
+
+package bad
+
+import _ "golang.org/lsptests/assign/internal/secret" //@diag("\"golang.org/lsptests/assign/internal/secret\"", "compiler", "could not import golang.org/lsptests/assign/internal/secret \\(invalid use of internal package \"golang.org/lsptests/assign/internal/secret\"\\)", "error")
+
+func stuff() { //@item(stuff, "stuff", "func()", "func")
+ x := "heeeeyyyy"
+ random2(x) //@diag("x", "compiler", "cannot use x \\(variable of type string\\) as int value in argument to random2", "error")
+ random2(1) //@complete("dom", random, random2, random3)
+ y := 3 //@diag("y", "compiler", "y declared (and|but) not used", "error")
+}
+
+type bob struct { //@item(bob, "bob", "struct{...}", "struct")
+ x int
+}
+
+func _() {
+ var q int
+ _ = &bob{
+ f: q, //@diag("f: q", "compiler", "unknown field f in struct literal", "error")
+ }
+}
diff --git a/gopls/internal/lsp/testdata/bad/bad1.go b/gopls/internal/lsp/testdata/bad/bad1.go
new file mode 100644
index 000000000..13b3d0af6
--- /dev/null
+++ b/gopls/internal/lsp/testdata/bad/bad1.go
@@ -0,0 +1,34 @@
+//go:build go1.11
+// +build go1.11
+
+package bad
+
+// See #36637
+type stateFunc func() stateFunc //@item(stateFunc, "stateFunc", "func() stateFunc", "type")
+
+var a unknown //@item(global_a, "a", "unknown", "var"),diag("unknown", "compiler", "(undeclared name|undefined): unknown", "error")
+
+func random() int { //@item(random, "random", "func() int", "func")
+ //@complete("", global_a, bob, random, random2, random3, stateFunc, stuff)
+ return 0
+}
+
+func random2(y int) int { //@item(random2, "random2", "func(y int) int", "func"),item(bad_y_param, "y", "int", "var")
+ x := 6 //@item(x, "x", "int", "var"),diag("x", "compiler", "x declared (and|but) not used", "error")
+ var q blah //@item(q, "q", "blah", "var"),diag("q", "compiler", "q declared (and|but) not used", "error"),diag("blah", "compiler", "(undeclared name|undefined): blah", "error")
+ var t **blob //@item(t, "t", "**blob", "var"),diag("t", "compiler", "t declared (and|but) not used", "error"),diag("blob", "compiler", "(undeclared name|undefined): blob", "error")
+ //@complete("", q, t, x, bad_y_param, global_a, bob, random, random2, random3, stateFunc, stuff)
+
+ return y
+}
+
+func random3(y ...int) { //@item(random3, "random3", "func(y ...int)", "func"),item(y_variadic_param, "y", "[]int", "var")
+ //@complete("", y_variadic_param, global_a, bob, random, random2, random3, stateFunc, stuff)
+
+ var ch chan (favType1) //@item(ch, "ch", "chan (favType1)", "var"),diag("ch", "compiler", "ch declared (and|but) not used", "error"),diag("favType1", "compiler", "(undeclared name|undefined): favType1", "error")
+ var m map[keyType]int //@item(m, "m", "map[keyType]int", "var"),diag("m", "compiler", "m declared (and|but) not used", "error"),diag("keyType", "compiler", "(undeclared name|undefined): keyType", "error")
+ var arr []favType2 //@item(arr, "arr", "[]favType2", "var"),diag("arr", "compiler", "arr declared (and|but) not used", "error"),diag("favType2", "compiler", "(undeclared name|undefined): favType2", "error")
+ var fn1 func() badResult //@item(fn1, "fn1", "func() badResult", "var"),diag("fn1", "compiler", "fn1 declared (and|but) not used", "error"),diag("badResult", "compiler", "(undeclared name|undefined): badResult", "error")
+ var fn2 func(badParam) //@item(fn2, "fn2", "func(badParam)", "var"),diag("fn2", "compiler", "fn2 declared (and|but) not used", "error"),diag("badParam", "compiler", "(undeclared name|undefined): badParam", "error")
+ //@complete("", arr, ch, fn1, fn2, m, y_variadic_param, global_a, bob, random, random2, random3, stateFunc, stuff)
+}
diff --git a/gopls/internal/lsp/testdata/badstmt/badstmt.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt.go.in
new file mode 100644
index 000000000..81aee201d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/badstmt/badstmt.go.in
@@ -0,0 +1,29 @@
+package badstmt
+
+import (
+ "golang.org/lsptests/foo"
+)
+
+// The nonewvars expectation asserts that the go/analysis framework ran.
+// See comments in noparse.
+
+func _(x int) {
+ defer foo.F //@complete(" //", Foo),diag(" //", "syntax", "function must be invoked in defer statement|expression in defer must be function call", "error")
+ defer foo.F //@complete(" //", Foo)
+ x := 123 //@diag(":=", "nonewvars", "no new variables", "warning")
+}
+
+func _() {
+ switch true {
+ case true:
+ go foo.F //@complete(" //", Foo)
+ }
+}
+
+func _() {
+ defer func() {
+ foo.F //@complete(" //", Foo),snippet(" //", Foo, "Foo()", "Foo()")
+
+ foo. //@rank(" //", Foo)
+ }
+}
diff --git a/gopls/internal/lsp/testdata/badstmt/badstmt_2.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt_2.go.in
new file mode 100644
index 000000000..6af9c35e3
--- /dev/null
+++ b/gopls/internal/lsp/testdata/badstmt/badstmt_2.go.in
@@ -0,0 +1,9 @@
+package badstmt
+
+import (
+ "golang.org/lsptests/foo"
+)
+
+func _() {
+ defer func() { foo. } //@rank(" }", Foo)
+}
diff --git a/gopls/internal/lsp/testdata/badstmt/badstmt_3.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt_3.go.in
new file mode 100644
index 000000000..d135e2015
--- /dev/null
+++ b/gopls/internal/lsp/testdata/badstmt/badstmt_3.go.in
@@ -0,0 +1,9 @@
+package badstmt
+
+import (
+ "golang.org/lsptests/foo"
+)
+
+func _() {
+ go foo. //@rank(" //", Foo, IntFoo),snippet(" //", Foo, "Foo()", "Foo()")
+}
diff --git a/gopls/internal/lsp/testdata/badstmt/badstmt_4.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt_4.go.in
new file mode 100644
index 000000000..6afd635ec
--- /dev/null
+++ b/gopls/internal/lsp/testdata/badstmt/badstmt_4.go.in
@@ -0,0 +1,11 @@
+package badstmt
+
+import (
+ "golang.org/lsptests/foo"
+)
+
+func _() {
+ go func() {
+ defer foo. //@rank(" //", Foo, IntFoo)
+ }
+}
diff --git a/gopls/internal/lsp/testdata/bar/bar.go.in b/gopls/internal/lsp/testdata/bar/bar.go.in
new file mode 100644
index 000000000..502bdf740
--- /dev/null
+++ b/gopls/internal/lsp/testdata/bar/bar.go.in
@@ -0,0 +1,47 @@
+// +build go1.11
+
+package bar
+
+import (
+ "golang.org/lsptests/foo" //@item(foo, "foo", "\"golang.org/lsptests/foo\"", "package")
+)
+
+func helper(i foo.IntFoo) {} //@item(helper, "helper", "func(i foo.IntFoo)", "func")
+
+func _() {
+ help //@complete("l", helper)
+ _ = foo.StructFoo{} //@complete("S", IntFoo, StructFoo)
+}
+
+// Bar is a function.
+func Bar() { //@item(Bar, "Bar", "func()", "func", "Bar is a function.")
+ foo.Foo() //@complete("F", Foo, IntFoo, StructFoo)
+ var _ foo.IntFoo //@complete("I", IntFoo, StructFoo)
+ foo.() //@complete("(", Foo, IntFoo, StructFoo)
+}
+
+func _() {
+ var Valentine int //@item(Valentine, "Valentine", "int", "var")
+
+ _ = foo.StructFoo{
+ Valu //@complete(" //", Value)
+ }
+ _ = foo.StructFoo{
+ Va //@complete("a", Value, Valentine)
+ }
+ _ = foo.StructFoo{
+ Value: 5, //@complete("a", Value)
+ }
+ _ = foo.StructFoo{
+ //@complete("", Value, Valentine, foo, helper, Bar)
+ }
+ _ = foo.StructFoo{
+ Value: Valen //@complete("le", Valentine)
+ }
+ _ = foo.StructFoo{
+ Value: //@complete(" //", Valentine, foo, helper, Bar)
+ }
+ _ = foo.StructFoo{
+ Value: //@complete(" ", Valentine, foo, helper, Bar)
+ }
+}
diff --git a/gopls/internal/lsp/testdata/basiclit/basiclit.go b/gopls/internal/lsp/testdata/basiclit/basiclit.go
new file mode 100644
index 000000000..ab895dc01
--- /dev/null
+++ b/gopls/internal/lsp/testdata/basiclit/basiclit.go
@@ -0,0 +1,13 @@
+package basiclit
+
+func _() {
+ var a int // something for lexical completions
+
+ _ = "hello." //@complete(".")
+
+ _ = 1 //@complete(" //")
+
+ _ = 1. //@complete(".")
+
+ _ = 'a' //@complete("' ")
+}
diff --git a/gopls/internal/lsp/testdata/baz/baz.go.in b/gopls/internal/lsp/testdata/baz/baz.go.in
new file mode 100644
index 000000000..94952e126
--- /dev/null
+++ b/gopls/internal/lsp/testdata/baz/baz.go.in
@@ -0,0 +1,33 @@
+// +build go1.11
+
+package baz
+
+import (
+ "golang.org/lsptests/bar"
+
+ f "golang.org/lsptests/foo"
+)
+
+var FooStruct f.StructFoo
+
+func Baz() {
+ defer bar.Bar() //@complete("B", Bar)
+ // TODO(rstambler): Test completion here.
+ defer bar.B
+ var x f.IntFoo //@complete("n", IntFoo),typdef("x", IntFoo)
+ bar.Bar() //@complete("B", Bar)
+}
+
+func _() {
+ bob := f.StructFoo{Value: 5}
+ if x := bob. //@complete(" //", Value)
+ switch true == false {
+ case true:
+ if x := bob. //@complete(" //", Value)
+ case false:
+ }
+ if x := bob.Va //@complete("a", Value)
+ switch true == true {
+ default:
+ }
+}
diff --git a/internal/lsp/testdata/builtins/builtin_args.go b/gopls/internal/lsp/testdata/builtins/builtin_args.go
index 052777fe9..052777fe9 100644
--- a/internal/lsp/testdata/builtins/builtin_args.go
+++ b/gopls/internal/lsp/testdata/builtins/builtin_args.go
diff --git a/gopls/internal/lsp/testdata/builtins/builtin_go117.go b/gopls/internal/lsp/testdata/builtins/builtin_go117.go
new file mode 100644
index 000000000..57abcde15
--- /dev/null
+++ b/gopls/internal/lsp/testdata/builtins/builtin_go117.go
@@ -0,0 +1,8 @@
+//go:build !go1.18
+// +build !go1.18
+
+package builtins
+
+func _() {
+ //@complete("", append, bool, byte, cap, close, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil)
+}
diff --git a/gopls/internal/lsp/testdata/builtins/builtin_go118.go b/gopls/internal/lsp/testdata/builtins/builtin_go118.go
new file mode 100644
index 000000000..dabffcc67
--- /dev/null
+++ b/gopls/internal/lsp/testdata/builtins/builtin_go118.go
@@ -0,0 +1,8 @@
+//go:build go1.18 && !go1.21
+// +build go1.18,!go1.21
+
+package builtins
+
+func _() {
+ //@complete("", any, append, bool, byte, cap, close, comparable, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil)
+}
diff --git a/gopls/internal/lsp/testdata/builtins/builtin_go121.go b/gopls/internal/lsp/testdata/builtins/builtin_go121.go
new file mode 100644
index 000000000..cb8e8fae3
--- /dev/null
+++ b/gopls/internal/lsp/testdata/builtins/builtin_go121.go
@@ -0,0 +1,8 @@
+//go:build go1.21
+// +build go1.21
+
+package builtins
+
+func _() {
+ //@complete("", any, append, bool, byte, cap, clear, close, comparable, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil)
+}
diff --git a/internal/lsp/testdata/builtins/builtin_types.go b/gopls/internal/lsp/testdata/builtins/builtin_types.go
index 93a4a7095..93a4a7095 100644
--- a/internal/lsp/testdata/builtins/builtin_types.go
+++ b/gopls/internal/lsp/testdata/builtins/builtin_types.go
diff --git a/gopls/internal/lsp/testdata/builtins/builtins.go b/gopls/internal/lsp/testdata/builtins/builtins.go
new file mode 100644
index 000000000..75c6e4183
--- /dev/null
+++ b/gopls/internal/lsp/testdata/builtins/builtins.go
@@ -0,0 +1,47 @@
+package builtins
+
+// Definitions of builtin completion items.
+
+/* any */ //@item(any, "any", "", "interface")
+/* Create markers for builtin types. Only for use by this test.
+/* append(slice []Type, elems ...Type) []Type */ //@item(append, "append", "func(slice []Type, elems ...Type) []Type", "func")
+/* bool */ //@item(bool, "bool", "", "type")
+/* byte */ //@item(byte, "byte", "", "type")
+/* cap(v Type) int */ //@item(cap, "cap", "func(v Type) int", "func")
+/* clear[T interface{ ~[]Type | ~map[Type]Type1 }](t T) */ //@item(clear, "clear", "func(t T)", "func")
+/* close(c chan<- Type) */ //@item(close, "close", "func(c chan<- Type)", "func")
+/* comparable */ //@item(comparable, "comparable", "", "interface")
+/* complex(r float64, i float64) */ //@item(complex, "complex", "func(r float64, i float64) complex128", "func")
+/* complex128 */ //@item(complex128, "complex128", "", "type")
+/* complex64 */ //@item(complex64, "complex64", "", "type")
+/* copy(dst []Type, src []Type) int */ //@item(copy, "copy", "func(dst []Type, src []Type) int", "func")
+/* delete(m map[Type]Type1, key Type) */ //@item(delete, "delete", "func(m map[Type]Type1, key Type)", "func")
+/* error */ //@item(error, "error", "", "interface")
+/* false */ //@item(_false, "false", "", "const")
+/* float32 */ //@item(float32, "float32", "", "type")
+/* float64 */ //@item(float64, "float64", "", "type")
+/* imag(c complex128) float64 */ //@item(imag, "imag", "func(c complex128) float64", "func")
+/* int */ //@item(int, "int", "", "type")
+/* int16 */ //@item(int16, "int16", "", "type")
+/* int32 */ //@item(int32, "int32", "", "type")
+/* int64 */ //@item(int64, "int64", "", "type")
+/* int8 */ //@item(int8, "int8", "", "type")
+/* iota */ //@item(iota, "iota", "", "const")
+/* len(v Type) int */ //@item(len, "len", "func(v Type) int", "func")
+/* make(t Type, size ...int) Type */ //@item(make, "make", "func(t Type, size ...int) Type", "func")
+/* new(Type) *Type */ //@item(new, "new", "func(Type) *Type", "func")
+/* nil */ //@item(_nil, "nil", "", "var")
+/* panic(v interface{}) */ //@item(panic, "panic", "func(v interface{})", "func")
+/* print(args ...Type) */ //@item(print, "print", "func(args ...Type)", "func")
+/* println(args ...Type) */ //@item(println, "println", "func(args ...Type)", "func")
+/* real(c complex128) float64 */ //@item(real, "real", "func(c complex128) float64", "func")
+/* recover() interface{} */ //@item(recover, "recover", "func() interface{}", "func")
+/* rune */ //@item(rune, "rune", "", "type")
+/* string */ //@item(string, "string", "", "type")
+/* true */ //@item(_true, "true", "", "const")
+/* uint */ //@item(uint, "uint", "", "type")
+/* uint16 */ //@item(uint16, "uint16", "", "type")
+/* uint32 */ //@item(uint32, "uint32", "", "type")
+/* uint64 */ //@item(uint64, "uint64", "", "type")
+/* uint8 */ //@item(uint8, "uint8", "", "type")
+/* uintptr */ //@item(uintptr, "uintptr", "", "type")
diff --git a/internal/lsp/testdata/builtins/constants.go b/gopls/internal/lsp/testdata/builtins/constants.go
index 7ad07bd1f..7ad07bd1f 100644
--- a/internal/lsp/testdata/builtins/constants.go
+++ b/gopls/internal/lsp/testdata/builtins/constants.go
diff --git a/gopls/internal/lsp/testdata/callhierarchy/callhierarchy.go b/gopls/internal/lsp/testdata/callhierarchy/callhierarchy.go
new file mode 100644
index 000000000..252e8054f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/callhierarchy/callhierarchy.go
@@ -0,0 +1,70 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package callhierarchy
+
+import "golang.org/lsptests/callhierarchy/outgoing"
+
+func a() { //@mark(hierarchyA, "a")
+ D()
+}
+
+func b() { //@mark(hierarchyB, "b")
+ D()
+}
+
+// C is an exported function
+func C() { //@mark(hierarchyC, "C")
+ D()
+ D()
+}
+
+// To test hierarchy across function literals
+var x = func() { //@mark(hierarchyLiteral, "func"),mark(hierarchyLiteralOut, "x")
+ D()
+}
+
+// D is exported to test incoming/outgoing calls across packages
+func D() { //@mark(hierarchyD, "D"),incomingcalls(hierarchyD, hierarchyA, hierarchyB, hierarchyC, hierarchyLiteral, incomingA),outgoingcalls(hierarchyD, hierarchyE, hierarchyF, hierarchyG, hierarchyLiteralOut, outgoingB, hierarchyFoo, hierarchyH, hierarchyI, hierarchyJ, hierarchyK)
+ e()
+ x()
+ F()
+ outgoing.B()
+ foo := func() {} //@mark(hierarchyFoo, "foo"),incomingcalls(hierarchyFoo, hierarchyD),outgoingcalls(hierarchyFoo)
+ foo()
+
+ func() {
+ g()
+ }()
+
+ var i Interface = impl{}
+ i.H()
+ i.I()
+
+ s := Struct{}
+ s.J()
+ s.K()
+}
+
+func e() {} //@mark(hierarchyE, "e")
+
+// F is an exported function
+func F() {} //@mark(hierarchyF, "F")
+
+func g() {} //@mark(hierarchyG, "g")
+
+type Interface interface {
+ H() //@mark(hierarchyH, "H")
+ I() //@mark(hierarchyI, "I")
+}
+
+type impl struct{}
+
+func (i impl) H() {}
+func (i impl) I() {}
+
+type Struct struct {
+ J func() //@mark(hierarchyJ, "J")
+ K func() //@mark(hierarchyK, "K")
+}
diff --git a/gopls/internal/lsp/testdata/callhierarchy/incoming/incoming.go b/gopls/internal/lsp/testdata/callhierarchy/incoming/incoming.go
new file mode 100644
index 000000000..c629aa879
--- /dev/null
+++ b/gopls/internal/lsp/testdata/callhierarchy/incoming/incoming.go
@@ -0,0 +1,12 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package incoming
+
+import "golang.org/lsptests/callhierarchy"
+
+// A is exported to test incoming calls across packages
+func A() { //@mark(incomingA, "A")
+ callhierarchy.D()
+}
diff --git a/internal/lsp/testdata/callhierarchy/outgoing/outgoing.go b/gopls/internal/lsp/testdata/callhierarchy/outgoing/outgoing.go
index 74362d419..74362d419 100644
--- a/internal/lsp/testdata/callhierarchy/outgoing/outgoing.go
+++ b/gopls/internal/lsp/testdata/callhierarchy/outgoing/outgoing.go
diff --git a/internal/lsp/testdata/casesensitive/casesensitive.go b/gopls/internal/lsp/testdata/casesensitive/casesensitive.go
index 6f49d36ff..6f49d36ff 100644
--- a/internal/lsp/testdata/casesensitive/casesensitive.go
+++ b/gopls/internal/lsp/testdata/casesensitive/casesensitive.go
diff --git a/internal/lsp/testdata/cast/cast.go.in b/gopls/internal/lsp/testdata/cast/cast.go.in
index 7fe21903c..7fe21903c 100644
--- a/internal/lsp/testdata/cast/cast.go.in
+++ b/gopls/internal/lsp/testdata/cast/cast.go.in
diff --git a/internal/lsp/testdata/cgo/declarecgo.go b/gopls/internal/lsp/testdata/cgo/declarecgo.go
index c283cdfb2..c283cdfb2 100644
--- a/internal/lsp/testdata/cgo/declarecgo.go
+++ b/gopls/internal/lsp/testdata/cgo/declarecgo.go
diff --git a/gopls/internal/lsp/testdata/cgo/declarecgo.go.golden b/gopls/internal/lsp/testdata/cgo/declarecgo.go.golden
new file mode 100644
index 000000000..0d6fbb0ff
--- /dev/null
+++ b/gopls/internal/lsp/testdata/cgo/declarecgo.go.golden
@@ -0,0 +1,30 @@
+-- funccgoexample-definition --
+cgo/declarecgo.go:18:6-13: defined here as ```go
+func Example()
+```
+
+[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example)
+-- funccgoexample-definition-json --
+{
+ "span": {
+ "uri": "file://cgo/declarecgo.go",
+ "start": {
+ "line": 18,
+ "column": 6,
+ "offset": 151
+ },
+ "end": {
+ "line": 18,
+ "column": 13,
+ "offset": 158
+ }
+ },
+ "description": "```go\nfunc Example()\n```\n\n[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example)"
+}
+
+-- funccgoexample-hoverdef --
+```go
+func Example()
+```
+
+[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example)
diff --git a/internal/lsp/testdata/cgo/declarecgo_nocgo.go b/gopls/internal/lsp/testdata/cgo/declarecgo_nocgo.go
index a05c01257..a05c01257 100644
--- a/internal/lsp/testdata/cgo/declarecgo_nocgo.go
+++ b/gopls/internal/lsp/testdata/cgo/declarecgo_nocgo.go
diff --git a/gopls/internal/lsp/testdata/cgoimport/usecgo.go.golden b/gopls/internal/lsp/testdata/cgoimport/usecgo.go.golden
new file mode 100644
index 000000000..03fc22468
--- /dev/null
+++ b/gopls/internal/lsp/testdata/cgoimport/usecgo.go.golden
@@ -0,0 +1,30 @@
+-- funccgoexample-definition --
+cgo/declarecgo.go:18:6-13: defined here as ```go
+func cgo.Example()
+```
+
+[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example)
+-- funccgoexample-definition-json --
+{
+ "span": {
+ "uri": "file://cgo/declarecgo.go",
+ "start": {
+ "line": 18,
+ "column": 6,
+ "offset": 151
+ },
+ "end": {
+ "line": 18,
+ "column": 13,
+ "offset": 158
+ }
+ },
+ "description": "```go\nfunc cgo.Example()\n```\n\n[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example)"
+}
+
+-- funccgoexample-hoverdef --
+```go
+func cgo.Example()
+```
+
+[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/cgo#Example)
diff --git a/gopls/internal/lsp/testdata/cgoimport/usecgo.go.in b/gopls/internal/lsp/testdata/cgoimport/usecgo.go.in
new file mode 100644
index 000000000..414a739da
--- /dev/null
+++ b/gopls/internal/lsp/testdata/cgoimport/usecgo.go.in
@@ -0,0 +1,9 @@
+package cgoimport
+
+import (
+ "golang.org/lsptests/cgo"
+)
+
+func _() {
+ cgo.Example() //@godef("ample", funccgoexample),complete("ample", funccgoexample)
+}
diff --git a/internal/lsp/testdata/channel/channel.go b/gopls/internal/lsp/testdata/channel/channel.go
index d6bd311e3..d6bd311e3 100644
--- a/internal/lsp/testdata/channel/channel.go
+++ b/gopls/internal/lsp/testdata/channel/channel.go
diff --git a/internal/lsp/testdata/codelens/codelens_test.go b/gopls/internal/lsp/testdata/codelens/codelens_test.go
index f6c696416..f6c696416 100644
--- a/internal/lsp/testdata/codelens/codelens_test.go
+++ b/gopls/internal/lsp/testdata/codelens/codelens_test.go
diff --git a/internal/lsp/testdata/comment_completion/comment_completion.go.in b/gopls/internal/lsp/testdata/comment_completion/comment_completion.go.in
index dbca0ff17..dbca0ff17 100644
--- a/internal/lsp/testdata/comment_completion/comment_completion.go.in
+++ b/gopls/internal/lsp/testdata/comment_completion/comment_completion.go.in
diff --git a/internal/lsp/testdata/complit/complit.go.in b/gopls/internal/lsp/testdata/complit/complit.go.in
index e819810d8..e819810d8 100644
--- a/internal/lsp/testdata/complit/complit.go.in
+++ b/gopls/internal/lsp/testdata/complit/complit.go.in
diff --git a/internal/lsp/testdata/constant/constant.go b/gopls/internal/lsp/testdata/constant/constant.go
index c1c88e16e..c1c88e16e 100644
--- a/internal/lsp/testdata/constant/constant.go
+++ b/gopls/internal/lsp/testdata/constant/constant.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_for.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_for.go
index a16d3bd88..a16d3bd88 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_for.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_for.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_for_init.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init.go
index e1130bc23..e1130bc23 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_for_init.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go
index fb0269f16..fb0269f16 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go
index 14f78d392..14f78d392 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_for_init_cond_post.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_if.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_if.go
index 91f145ada..91f145ada 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_if.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_if.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_if_eof.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_eof.go
index 3454c9fa6..3454c9fa6 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_if_eof.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_eof.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_if_init.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_init.go
index 887c31860..887c31860 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_if_init.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_init.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go
index 5371283e9..5371283e9 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_if_init_cond.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_multiline_if.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_multiline_if.go
index 2213777e1..2213777e1 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_multiline_if.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_multiline_if.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_selector_1.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_1.go
index 772152f7b..772152f7b 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_selector_1.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_1.go
diff --git a/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_2.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_2.go
new file mode 100644
index 000000000..8d4b15bff
--- /dev/null
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_selector_2.go
@@ -0,0 +1,8 @@
+package danglingstmt
+
+import "golang.org/lsptests/foo"
+
+func _() {
+ foo. //@rank(" //", Foo)
+ var _ = []string{foo.} //@rank("}", Foo)
+}
diff --git a/internal/lsp/testdata/danglingstmt/dangling_switch_init.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init.go
index 15da3ce10..15da3ce10 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_switch_init.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init.go
diff --git a/internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go b/gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go
index 20b825b2e..20b825b2e 100644
--- a/internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go
+++ b/gopls/internal/lsp/testdata/danglingstmt/dangling_switch_init_tag.go
diff --git a/gopls/internal/lsp/testdata/deep/deep.go b/gopls/internal/lsp/testdata/deep/deep.go
new file mode 100644
index 000000000..6908824f8
--- /dev/null
+++ b/gopls/internal/lsp/testdata/deep/deep.go
@@ -0,0 +1,142 @@
+package deep
+
+import "context"
+
+type deepA struct {
+ b deepB //@item(deepBField, "b", "deepB", "field")
+}
+
+type deepB struct {
+}
+
+func wantsDeepB(deepB) {}
+
+func _() {
+ var a deepA //@item(deepAVar, "a", "deepA", "var")
+ a.b //@item(deepABField, "a.b", "deepB", "field")
+ wantsDeepB(a) //@deep(")", deepABField, deepAVar)
+
+ deepA{a} //@snippet("}", deepABField, "a.b", "a.b")
+}
+
+func wantsContext(context.Context) {}
+
+func _() {
+ context.Background() //@item(ctxBackground, "context.Background", "func() context.Context", "func", "Background returns a non-nil, empty Context.")
+ context.TODO() //@item(ctxTODO, "context.TODO", "func() context.Context", "func", "TODO returns a non-nil, empty Context.")
+
+ wantsContext(c) //@rank(")", ctxBackground),rank(")", ctxTODO)
+}
+
+func _() {
+ var cork struct{ err error }
+ cork.err //@item(deepCorkErr, "cork.err", "error", "field")
+ context //@item(deepContextPkg, "context", "\"context\"", "package")
+ var _ error = co //@rank(" //", deepCorkErr, deepContextPkg)
+}
+
+func _() {
+ // deepCircle is circular.
+ type deepCircle struct {
+ *deepCircle
+ }
+ var circle deepCircle //@item(deepCircle, "circle", "deepCircle", "var")
+ circle.deepCircle //@item(deepCircleField, "circle.deepCircle", "*deepCircle", "field")
+ var _ deepCircle = circ //@deep(" //", deepCircle, deepCircleField),snippet(" //", deepCircleField, "*circle.deepCircle", "*circle.deepCircle")
+}
+
+func _() {
+ type deepEmbedC struct {
+ }
+ type deepEmbedB struct {
+ deepEmbedC
+ }
+ type deepEmbedA struct {
+ deepEmbedB
+ }
+
+ wantsC := func(deepEmbedC) {}
+
+ var a deepEmbedA //@item(deepEmbedA, "a", "deepEmbedA", "var")
+ a.deepEmbedB //@item(deepEmbedB, "a.deepEmbedB", "deepEmbedB", "field")
+ a.deepEmbedC //@item(deepEmbedC, "a.deepEmbedC", "deepEmbedC", "field")
+ wantsC(a) //@deep(")", deepEmbedC, deepEmbedA, deepEmbedB)
+}
+
+func _() {
+ type nested struct {
+ a int
+ n *nested //@item(deepNestedField, "n", "*nested", "field")
+ }
+
+ nested{
+ a: 123, //@deep(" //", deepNestedField)
+ }
+}
+
+func _() {
+ var a struct {
+ b struct {
+ c int
+ }
+ d int
+ }
+
+ a.d //@item(deepAD, "a.d", "int", "field")
+ a.b.c //@item(deepABC, "a.b.c", "int", "field")
+ a.b //@item(deepAB, "a.b", "struct{...}", "field")
+ a //@item(deepA, "a", "struct{...}", "var")
+
+ // "a.d" should be ranked above the deeper "a.b.c"
+ var i int
+ i = a //@deep(" //", deepAD, deepABC, deepA, deepAB)
+}
+
+type foo struct {
+ b bar
+}
+
+func (f foo) bar() bar {
+ return f.b
+}
+
+func (f foo) barPtr() *bar {
+ return &f.b
+}
+
+type bar struct{}
+
+func (b bar) valueReceiver() int {
+ return 0
+}
+
+func (b *bar) ptrReceiver() int {
+ return 0
+}
+
+func _() {
+ var (
+ i int
+ f foo
+ )
+
+ f.bar().valueReceiver //@item(deepBarValue, "f.bar().valueReceiver", "func() int", "method")
+ f.barPtr().ptrReceiver //@item(deepBarPtrPtr, "f.barPtr().ptrReceiver", "func() int", "method")
+ f.barPtr().valueReceiver //@item(deepBarPtrValue, "f.barPtr().valueReceiver", "func() int", "method")
+
+ i = fbar //@fuzzy(" //", deepBarValue, deepBarPtrPtr, deepBarPtrValue)
+}
+
+func (b baz) Thing() struct{ val int } {
+ return b.thing
+}
+
+type baz struct {
+ thing struct{ val int }
+}
+
+func (b baz) _() {
+ b.Thing().val //@item(deepBazMethVal, "b.Thing().val", "int", "field")
+ b.thing.val //@item(deepBazFieldVal, "b.thing.val", "int", "field")
+ var _ int = bval //@rank(" //", deepBazFieldVal, deepBazMethVal)
+}
diff --git a/gopls/internal/lsp/testdata/errors/errors.go b/gopls/internal/lsp/testdata/errors/errors.go
new file mode 100644
index 000000000..e14cde69e
--- /dev/null
+++ b/gopls/internal/lsp/testdata/errors/errors.go
@@ -0,0 +1,10 @@
+package errors
+
+import (
+ "golang.org/lsptests/types"
+)
+
+func _() {
+ bob.Bob() //@complete(".")
+ types.b //@complete(" //", Bob_interface)
+}
diff --git a/internal/lsp/testdata/extract/extract_function/extract_args_returns.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go
index 63d24df00..63d24df00 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_args_returns.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden
index b15345e23..b15345e23 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go
index 5e44de26f..5e44de26f 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_basic.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden
index 18adc4db4..18adc4db4 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden
diff --git a/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go
new file mode 100644
index 000000000..71f969e48
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go
@@ -0,0 +1,12 @@
+package extract
+
+func _() {
+ a := /* comment in the middle of a line */ 1 //@mark(exSt18, "a")
+ // Comment on its own line //@mark(exSt19, "Comment")
+ _ = 3 + 4 //@mark(exEn18, "4"),mark(exEn19, "4"),mark(exSt20, "_")
+ // Comment right after 3 + 4
+
+ // Comment after with space //@mark(exEn20, "Comment")
+
+ //@extractfunc(exSt18, exEn18),extractfunc(exSt19, exEn19),extractfunc(exSt20, exEn20)
+}
diff --git a/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden
new file mode 100644
index 000000000..1b2869ef7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden
@@ -0,0 +1,57 @@
+-- functionextraction_extract_basic_comment_4_2 --
+package extract
+
+func _() {
+ /* comment in the middle of a line */
+ //@mark(exSt18, "a")
+ // Comment on its own line //@mark(exSt19, "Comment")
+ newFunction() //@mark(exEn18, "4"),mark(exEn19, "4"),mark(exSt20, "_")
+ // Comment right after 3 + 4
+
+ // Comment after with space //@mark(exEn20, "Comment")
+
+ //@extractfunc(exSt18, exEn18),extractfunc(exSt19, exEn19),extractfunc(exSt20, exEn20)
+}
+
+func newFunction() {
+ a := 1
+
+ _ = 3 + 4
+}
+
+-- functionextraction_extract_basic_comment_5_5 --
+package extract
+
+func _() {
+ a := /* comment in the middle of a line */ 1 //@mark(exSt18, "a")
+ // Comment on its own line //@mark(exSt19, "Comment")
+ newFunction() //@mark(exEn18, "4"),mark(exEn19, "4"),mark(exSt20, "_")
+ // Comment right after 3 + 4
+
+ // Comment after with space //@mark(exEn20, "Comment")
+
+ //@extractfunc(exSt18, exEn18),extractfunc(exSt19, exEn19),extractfunc(exSt20, exEn20)
+}
+
+func newFunction() {
+ _ = 3 + 4
+}
+
+-- functionextraction_extract_basic_comment_6_2 --
+package extract
+
+func _() {
+ a := /* comment in the middle of a line */ 1 //@mark(exSt18, "a")
+ // Comment on its own line //@mark(exSt19, "Comment")
+ newFunction() //@mark(exEn18, "4"),mark(exEn19, "4"),mark(exSt20, "_")
+ // Comment right after 3 + 4
+
+ // Comment after with space //@mark(exEn20, "Comment")
+
+ //@extractfunc(exSt18, exEn18),extractfunc(exSt19, exEn19),extractfunc(exSt20, exEn20)
+}
+
+func newFunction() {
+ _ = 3 + 4
+}
+
diff --git a/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go
index 9713b9101..9713b9101 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden
index 3198c9fa2..3198c9fa2 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_redefine.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go
index 604f4757c..604f4757c 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_redefine.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden
index e2ee217d1..e2ee217d1 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go
index 1ff24daeb..1ff24daeb 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_basic.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden
index 6103d1ee9..6103d1ee9 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go
index 08573acdd..08573acdd 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden
index 19e48da01..19e48da01 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go
index 605c5ec2e..605c5ec2e 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_complex.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden
index 4d201227a..4d201227a 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go
index 6b2a4d8c0..6b2a4d8c0 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden
index de54b1534..de54b1534 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go
index b3fb4fd21..b3fb4fd21 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden
index 3af747c22..3af747c22 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go
index c22db2a6d..c22db2a6d 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden
index efa22ba2b..efa22ba2b 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go
index c1994c1c1..c1994c1c1 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_init.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden
index 31d1b2ddf..31d1b2ddf 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go
index bb5ed083c..bb5ed083c 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden
index 58bb57325..58bb57325 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_scope.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go
index 6cc141fd1..6cc141fd1 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_scope.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden
index a4803b4fe..a4803b4fe 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go
index da2c669a8..da2c669a8 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden
index 8be5040c4..8be5040c4 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_return.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go
index 264d680e2..264d680e2 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_smart_return.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden
index fdf55ae6d..fdf55ae6d 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden
diff --git a/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go b/gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go
index a6eb1f872..a6eb1f872 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go
diff --git a/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden b/gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden
index 4374f3728..4374f3728 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden
+++ b/gopls/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden
diff --git a/internal/lsp/testdata/extract/extract_method/extract_basic.go b/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go
index c9a8d9dce..c9a8d9dce 100644
--- a/internal/lsp/testdata/extract/extract_method/extract_basic.go
+++ b/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go
diff --git a/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden b/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden
new file mode 100644
index 000000000..3310d973e
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden
@@ -0,0 +1,364 @@
+-- functionextraction_extract_basic_13_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(a *A) int {
+ sum := a.x + a.y
+ return sum
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_basic_14_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(sum int) int {
+ return sum
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_basic_18_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+ return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func newFunction(a A) bool {
+ return a.x < a.y
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_basic_22_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(a A) int {
+ sum := a.x + a.y
+ return sum
+}
+
+-- functionextraction_extract_basic_23_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(sum int) int {
+ return sum
+}
+
+-- functionextraction_extract_basic_9_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func newFunction(a *A) bool {
+ return a.x < a.y
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_basic_13_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a *A) newMethod() int {
+ sum := a.x + a.y
+ return sum
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_basic_14_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (*A) newMethod(sum int) int {
+ return sum
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_basic_18_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+ return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) newMethod() bool {
+ return a.x < a.y
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_basic_22_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) newMethod() int {
+ sum := a.x + a.y
+ return sum
+}
+
+-- methodextraction_extract_basic_23_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (A) newMethod(sum int) int {
+ return sum
+}
+
+-- methodextraction_extract_basic_9_2 --
+package extract
+
+type A struct {
+ x int
+ y int
+}
+
+func (a *A) XLessThanYP() bool {
+ return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) newMethod() bool {
+ return a.x < a.y
+}
+
+func (a *A) AddP() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+ return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+ sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+ return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go b/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go
new file mode 100644
index 000000000..cbb70a04c
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go
@@ -0,0 +1,6 @@
+package extract
+
+func _() {
+ var _ = 1 + 2 //@suggestedfix("1", "refactor.extract", "")
+ var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract", "")
+}
diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden b/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
new file mode 100644
index 000000000..3fd9b3287
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
@@ -0,0 +1,18 @@
+-- suggestedfix_extract_basic_lit_4_10 --
+package extract
+
+func _() {
+ x := 1
+ var _ = x + 2 //@suggestedfix("1", "refactor.extract", "")
+ var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract", "")
+}
+
+-- suggestedfix_extract_basic_lit_5_10 --
+package extract
+
+func _() {
+ var _ = 1 + 2 //@suggestedfix("1", "refactor.extract", "")
+ x := 3 + 4
+ var _ = x //@suggestedfix("3 + 4", "refactor.extract", "")
+}
+
diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go b/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
new file mode 100644
index 000000000..a20b45f58
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
@@ -0,0 +1,9 @@
+package extract
+
+import "strconv"
+
+func _() {
+ x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract", "")
+ str := "1"
+ b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "")
+}
diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden b/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
new file mode 100644
index 000000000..d59c0ee99
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
@@ -0,0 +1,24 @@
+-- suggestedfix_extract_func_call_6_8 --
+package extract
+
+import "strconv"
+
+func _() {
+ x := append([]int{}, 1)
+ x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract", "")
+ str := "1"
+ b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "")
+}
+
+-- suggestedfix_extract_func_call_8_12 --
+package extract
+
+import "strconv"
+
+func _() {
+ x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract", "")
+ str := "1"
+ x, x1 := strconv.Atoi(str)
+ b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "")
+}
+
diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go b/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go
new file mode 100644
index 000000000..c14ad7092
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go
@@ -0,0 +1,13 @@
+package extract
+
+import "go/ast"
+
+func _() {
+ x0 := 0
+ if true {
+ y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "")
+ }
+ if true {
+ x1 := !false //@suggestedfix("!false", "refactor.extract", "")
+ }
+}
diff --git a/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden b/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
new file mode 100644
index 000000000..1c2f64b7d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
@@ -0,0 +1,32 @@
+-- suggestedfix_extract_scope_11_9 --
+package extract
+
+import "go/ast"
+
+func _() {
+ x0 := 0
+ if true {
+ y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "")
+ }
+ if true {
+ x := !false
+ x1 := x //@suggestedfix("!false", "refactor.extract", "")
+ }
+}
+
+-- suggestedfix_extract_scope_8_8 --
+package extract
+
+import "go/ast"
+
+func _() {
+ x0 := 0
+ if true {
+ x := ast.CompositeLit{}
+ y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "")
+ }
+ if true {
+ x1 := !false //@suggestedfix("!false", "refactor.extract", "")
+ }
+}
+
diff --git a/internal/lsp/testdata/fieldlist/field_list.go b/gopls/internal/lsp/testdata/fieldlist/field_list.go
index e687defb1..e687defb1 100644
--- a/internal/lsp/testdata/fieldlist/field_list.go
+++ b/gopls/internal/lsp/testdata/fieldlist/field_list.go
diff --git a/gopls/internal/lsp/testdata/fillstruct/a.go b/gopls/internal/lsp/testdata/fillstruct/a.go
new file mode 100644
index 000000000..e1add2d47
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/a.go
@@ -0,0 +1,27 @@
+package fillstruct
+
+import (
+ "golang.org/lsptests/fillstruct/data"
+)
+
+type basicStruct struct {
+ foo int
+}
+
+var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStruct struct {
+ foo int
+ bar string
+}
+
+var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStruct struct {
+ bar string
+ basic basicStruct
+}
+
+var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
diff --git a/gopls/internal/lsp/testdata/fillstruct/a.go.golden b/gopls/internal/lsp/testdata/fillstruct/a.go.golden
new file mode 100644
index 000000000..ca1db04ea
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/a.go.golden
@@ -0,0 +1,126 @@
+-- suggestedfix_a_11_21 --
+package fillstruct
+
+import (
+ "golang.org/lsptests/fillstruct/data"
+)
+
+type basicStruct struct {
+ foo int
+}
+
+var _ = basicStruct{
+ foo: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStruct struct {
+ foo int
+ bar string
+}
+
+var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStruct struct {
+ bar string
+ basic basicStruct
+}
+
+var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a_18_22 --
+package fillstruct
+
+import (
+ "golang.org/lsptests/fillstruct/data"
+)
+
+type basicStruct struct {
+ foo int
+}
+
+var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStruct struct {
+ foo int
+ bar string
+}
+
+var _ = twoArgStruct{
+ foo: 0,
+ bar: "",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStruct struct {
+ bar string
+ basic basicStruct
+}
+
+var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a_25_22 --
+package fillstruct
+
+import (
+ "golang.org/lsptests/fillstruct/data"
+)
+
+type basicStruct struct {
+ foo int
+}
+
+var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStruct struct {
+ foo int
+ bar string
+}
+
+var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStruct struct {
+ bar string
+ basic basicStruct
+}
+
+var _ = nestedStruct{
+ bar: "",
+ basic: basicStruct{},
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a_27_16 --
+package fillstruct
+
+import (
+ "golang.org/lsptests/fillstruct/data"
+)
+
+type basicStruct struct {
+ foo int
+}
+
+var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStruct struct {
+ foo int
+ bar string
+}
+
+var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStruct struct {
+ bar string
+ basic basicStruct
+}
+
+var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = data.B{
+ ExportedInt: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/a2.go b/gopls/internal/lsp/testdata/fillstruct/a2.go
new file mode 100644
index 000000000..b5e30a84f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/a2.go
@@ -0,0 +1,29 @@
+package fillstruct
+
+type typedStruct struct {
+ m map[string]int
+ s []int
+ c chan int
+ c1 <-chan int
+ a [2]string
+}
+
+var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStruct struct {
+ fn func(i int) int
+}
+
+var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructCompex struct {
+ fn func(i int, s string) (string, int)
+}
+
+var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructEmpty struct {
+ fn func()
+}
+
+var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill")
diff --git a/gopls/internal/lsp/testdata/fillstruct/a2.go.golden b/gopls/internal/lsp/testdata/fillstruct/a2.go.golden
new file mode 100644
index 000000000..2eca3e349
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/a2.go.golden
@@ -0,0 +1,139 @@
+-- suggestedfix_a2_11_21 --
+package fillstruct
+
+type typedStruct struct {
+ m map[string]int
+ s []int
+ c chan int
+ c1 <-chan int
+ a [2]string
+}
+
+var _ = typedStruct{
+ m: map[string]int{},
+ s: []int{},
+ c: make(chan int),
+ c1: make(<-chan int),
+ a: [2]string{},
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStruct struct {
+ fn func(i int) int
+}
+
+var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructCompex struct {
+ fn func(i int, s string) (string, int)
+}
+
+var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructEmpty struct {
+ fn func()
+}
+
+var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a2_17_19 --
+package fillstruct
+
+type typedStruct struct {
+ m map[string]int
+ s []int
+ c chan int
+ c1 <-chan int
+ a [2]string
+}
+
+var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStruct struct {
+ fn func(i int) int
+}
+
+var _ = funStruct{
+ fn: func(i int) int {
+ },
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructCompex struct {
+ fn func(i int, s string) (string, int)
+}
+
+var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructEmpty struct {
+ fn func()
+}
+
+var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a2_23_25 --
+package fillstruct
+
+type typedStruct struct {
+ m map[string]int
+ s []int
+ c chan int
+ c1 <-chan int
+ a [2]string
+}
+
+var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStruct struct {
+ fn func(i int) int
+}
+
+var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructCompex struct {
+ fn func(i int, s string) (string, int)
+}
+
+var _ = funStructCompex{
+ fn: func(i int, s string) (string, int) {
+ },
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructEmpty struct {
+ fn func()
+}
+
+var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a2_29_24 --
+package fillstruct
+
+type typedStruct struct {
+ m map[string]int
+ s []int
+ c chan int
+ c1 <-chan int
+ a [2]string
+}
+
+var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStruct struct {
+ fn func(i int) int
+}
+
+var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructCompex struct {
+ fn func(i int, s string) (string, int)
+}
+
+var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type funStructEmpty struct {
+ fn func()
+}
+
+var _ = funStructEmpty{
+ fn: func() {
+ },
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/a3.go b/gopls/internal/lsp/testdata/fillstruct/a3.go
new file mode 100644
index 000000000..59cd9fa28
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/a3.go
@@ -0,0 +1,42 @@
+package fillstruct
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+type Foo struct {
+ A int
+}
+
+type Bar struct {
+ X *Foo
+ Y *Foo
+}
+
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type importedStruct struct {
+ m map[*ast.CompositeLit]ast.Field
+ s []ast.BadExpr
+ a [3]token.Token
+ c chan ast.EmptyStmt
+ fn func(ast_decl ast.DeclStmt) ast.Ellipsis
+ st ast.CompositeLit
+}
+
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type pointerBuiltinStruct struct {
+ b *bool
+ s *string
+ i *int
+}
+
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = []ast.BasicLit{
+ {}, //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
diff --git a/gopls/internal/lsp/testdata/fillstruct/a3.go.golden b/gopls/internal/lsp/testdata/fillstruct/a3.go.golden
new file mode 100644
index 000000000..a7c7baa8d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/a3.go.golden
@@ -0,0 +1,243 @@
+-- suggestedfix_a3_17_13 --
+package fillstruct
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+type Foo struct {
+ A int
+}
+
+type Bar struct {
+ X *Foo
+ Y *Foo
+}
+
+var _ = Bar{
+ X: &Foo{},
+ Y: &Foo{},
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type importedStruct struct {
+ m map[*ast.CompositeLit]ast.Field
+ s []ast.BadExpr
+ a [3]token.Token
+ c chan ast.EmptyStmt
+ fn func(ast_decl ast.DeclStmt) ast.Ellipsis
+ st ast.CompositeLit
+}
+
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type pointerBuiltinStruct struct {
+ b *bool
+ s *string
+ i *int
+}
+
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = []ast.BasicLit{
+ {}, //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a3_28_24 --
+package fillstruct
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+type Foo struct {
+ A int
+}
+
+type Bar struct {
+ X *Foo
+ Y *Foo
+}
+
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type importedStruct struct {
+ m map[*ast.CompositeLit]ast.Field
+ s []ast.BadExpr
+ a [3]token.Token
+ c chan ast.EmptyStmt
+ fn func(ast_decl ast.DeclStmt) ast.Ellipsis
+ st ast.CompositeLit
+}
+
+var _ = importedStruct{
+ m: map[*ast.CompositeLit]ast.Field{},
+ s: []ast.BadExpr{},
+ a: [3]token.Token{},
+ c: make(chan ast.EmptyStmt),
+ fn: func(ast_decl ast.DeclStmt) ast.Ellipsis {
+ },
+ st: ast.CompositeLit{},
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type pointerBuiltinStruct struct {
+ b *bool
+ s *string
+ i *int
+}
+
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = []ast.BasicLit{
+ {}, //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a3_36_30 --
+package fillstruct
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+type Foo struct {
+ A int
+}
+
+type Bar struct {
+ X *Foo
+ Y *Foo
+}
+
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type importedStruct struct {
+ m map[*ast.CompositeLit]ast.Field
+ s []ast.BadExpr
+ a [3]token.Token
+ c chan ast.EmptyStmt
+ fn func(ast_decl ast.DeclStmt) ast.Ellipsis
+ st ast.CompositeLit
+}
+
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type pointerBuiltinStruct struct {
+ b *bool
+ s *string
+ i *int
+}
+
+var _ = pointerBuiltinStruct{
+ b: new(bool),
+ s: new(string),
+ i: new(int),
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = []ast.BasicLit{
+ {}, //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a3_39_3 --
+package fillstruct
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+type Foo struct {
+ A int
+}
+
+type Bar struct {
+ X *Foo
+ Y *Foo
+}
+
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type importedStruct struct {
+ m map[*ast.CompositeLit]ast.Field
+ s []ast.BadExpr
+ a [3]token.Token
+ c chan ast.EmptyStmt
+ fn func(ast_decl ast.DeclStmt) ast.Ellipsis
+ st ast.CompositeLit
+}
+
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type pointerBuiltinStruct struct {
+ b *bool
+ s *string
+ i *int
+}
+
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = []ast.BasicLit{
+ {
+ ValuePos: 0,
+ Kind: 0,
+ Value: "",
+ }, //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+-- suggestedfix_a3_42_25 --
+package fillstruct
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+type Foo struct {
+ A int
+}
+
+type Bar struct {
+ X *Foo
+ Y *Foo
+}
+
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type importedStruct struct {
+ m map[*ast.CompositeLit]ast.Field
+ s []ast.BadExpr
+ a [3]token.Token
+ c chan ast.EmptyStmt
+ fn func(ast_decl ast.DeclStmt) ast.Ellipsis
+ st ast.CompositeLit
+}
+
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type pointerBuiltinStruct struct {
+ b *bool
+ s *string
+ i *int
+}
+
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = []ast.BasicLit{
+ {}, //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+var _ = []ast.BasicLit{{
+ ValuePos: 0,
+ Kind: 0,
+ Value: "",
+}} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/a4.go b/gopls/internal/lsp/testdata/fillstruct/a4.go
new file mode 100644
index 000000000..5f52a55fa
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/a4.go
@@ -0,0 +1,39 @@
+package fillstruct
+
+import "go/ast"
+
+type iStruct struct {
+ X int
+}
+
+type sStruct struct {
+ str string
+}
+
+type multiFill struct {
+ num int
+ strin string
+ arr []int
+}
+
+type assignStruct struct {
+ n ast.Node
+}
+
+func fill() {
+ var x int
+ var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var s string
+ var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var n int
+ _ = []int{}
+ if true {
+ arr := []int{1, 2}
+ }
+ var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var node *ast.CompositeLit
+ var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/a4.go.golden b/gopls/internal/lsp/testdata/fillstruct/a4.go.golden
new file mode 100644
index 000000000..b1e376f05
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/a4.go.golden
@@ -0,0 +1,174 @@
+-- suggestedfix_a4_25_18 --
+package fillstruct
+
+import "go/ast"
+
+type iStruct struct {
+ X int
+}
+
+type sStruct struct {
+ str string
+}
+
+type multiFill struct {
+ num int
+ strin string
+ arr []int
+}
+
+type assignStruct struct {
+ n ast.Node
+}
+
+func fill() {
+ var x int
+ var _ = iStruct{
+ X: x,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var s string
+ var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var n int
+ _ = []int{}
+ if true {
+ arr := []int{1, 2}
+ }
+ var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var node *ast.CompositeLit
+ var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_a4_28_18 --
+package fillstruct
+
+import "go/ast"
+
+type iStruct struct {
+ X int
+}
+
+type sStruct struct {
+ str string
+}
+
+type multiFill struct {
+ num int
+ strin string
+ arr []int
+}
+
+type assignStruct struct {
+ n ast.Node
+}
+
+func fill() {
+ var x int
+ var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var s string
+ var _ = sStruct{
+ str: s,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var n int
+ _ = []int{}
+ if true {
+ arr := []int{1, 2}
+ }
+ var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var node *ast.CompositeLit
+ var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_a4_35_20 --
+package fillstruct
+
+import "go/ast"
+
+type iStruct struct {
+ X int
+}
+
+type sStruct struct {
+ str string
+}
+
+type multiFill struct {
+ num int
+ strin string
+ arr []int
+}
+
+type assignStruct struct {
+ n ast.Node
+}
+
+func fill() {
+ var x int
+ var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var s string
+ var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var n int
+ _ = []int{}
+ if true {
+ arr := []int{1, 2}
+ }
+ var _ = multiFill{
+ num: n,
+ strin: s,
+ arr: []int{},
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var node *ast.CompositeLit
+ var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_a4_38_23 --
+package fillstruct
+
+import "go/ast"
+
+type iStruct struct {
+ X int
+}
+
+type sStruct struct {
+ str string
+}
+
+type multiFill struct {
+ num int
+ strin string
+ arr []int
+}
+
+type assignStruct struct {
+ n ast.Node
+}
+
+func fill() {
+ var x int
+ var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var s string
+ var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var n int
+ _ = []int{}
+ if true {
+ arr := []int{1, 2}
+ }
+ var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+ var node *ast.CompositeLit
+ var _ = assignStruct{
+ n: node,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
diff --git a/internal/lsp/testdata/fillstruct/data/a.go b/gopls/internal/lsp/testdata/fillstruct/data/a.go
index 7ca37736b..7ca37736b 100644
--- a/internal/lsp/testdata/fillstruct/data/a.go
+++ b/gopls/internal/lsp/testdata/fillstruct/data/a.go
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct.go
new file mode 100644
index 000000000..3da904741
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct.go
@@ -0,0 +1,26 @@
+package fillstruct
+
+type StructA struct {
+ unexportedIntField int
+ ExportedIntField int
+ MapA map[int]string
+ Array []int
+ StructB
+}
+
+type StructA2 struct {
+ B *StructB
+}
+
+type StructA3 struct {
+ B StructB
+}
+
+func fill() {
+ a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ if true {
+ _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ }
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct.go.golden
new file mode 100644
index 000000000..de01a40f0
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct.go.golden
@@ -0,0 +1,124 @@
+-- suggestedfix_fill_struct_20_15 --
+package fillstruct
+
+type StructA struct {
+ unexportedIntField int
+ ExportedIntField int
+ MapA map[int]string
+ Array []int
+ StructB
+}
+
+type StructA2 struct {
+ B *StructB
+}
+
+type StructA3 struct {
+ B StructB
+}
+
+func fill() {
+ a := StructA{
+ unexportedIntField: 0,
+ ExportedIntField: 0,
+ MapA: map[int]string{},
+ Array: []int{},
+ StructB: StructB{},
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+ b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ if true {
+ _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ }
+}
+
+-- suggestedfix_fill_struct_21_16 --
+package fillstruct
+
+type StructA struct {
+ unexportedIntField int
+ ExportedIntField int
+ MapA map[int]string
+ Array []int
+ StructB
+}
+
+type StructA2 struct {
+ B *StructB
+}
+
+type StructA3 struct {
+ B StructB
+}
+
+func fill() {
+ a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ b := StructA2{
+ B: &StructB{},
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+ c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ if true {
+ _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ }
+}
+
+-- suggestedfix_fill_struct_22_16 --
+package fillstruct
+
+type StructA struct {
+ unexportedIntField int
+ ExportedIntField int
+ MapA map[int]string
+ Array []int
+ StructB
+}
+
+type StructA2 struct {
+ B *StructB
+}
+
+type StructA3 struct {
+ B StructB
+}
+
+func fill() {
+ a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ c := StructA3{
+ B: StructB{},
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+ if true {
+ _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ }
+}
+
+-- suggestedfix_fill_struct_24_16 --
+package fillstruct
+
+type StructA struct {
+ unexportedIntField int
+ ExportedIntField int
+ MapA map[int]string
+ Array []int
+ StructB
+}
+
+type StructA2 struct {
+ B *StructB
+}
+
+type StructA3 struct {
+ B StructB
+}
+
+func fill() {
+ a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ if true {
+ _ = StructA3{
+ B: StructB{},
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+ }
+}
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go
new file mode 100644
index 000000000..2c099a80e
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go
@@ -0,0 +1,14 @@
+package fillstruct
+
+type StructAnon struct {
+ a struct{}
+ b map[string]interface{}
+ c map[string]struct {
+ d int
+ e bool
+ }
+}
+
+func fill() {
+ _ := StructAnon{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden
new file mode 100644
index 000000000..7cc9ac23d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden
@@ -0,0 +1,20 @@
+-- suggestedfix_fill_struct_anon_13_18 --
+package fillstruct
+
+type StructAnon struct {
+ a struct{}
+ b map[string]interface{}
+ c map[string]struct {
+ d int
+ e bool
+ }
+}
+
+func fill() {
+ _ := StructAnon{
+ a: struct{}{},
+ b: map[string]interface{}{},
+ c: map[string]struct{d int; e bool}{},
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go
new file mode 100644
index 000000000..ab7be5a7b
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go
@@ -0,0 +1,15 @@
+package fillstruct
+
+type StructB struct {
+ StructC
+}
+
+type StructC struct {
+ unexportedInt int
+}
+
+func nested() {
+ c := StructB{
+ StructC: StructC{}, //@suggestedfix("}", "refactor.rewrite", "Fill")
+ }
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden
new file mode 100644
index 000000000..c902ee7f1
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden
@@ -0,0 +1,19 @@
+-- suggestedfix_fill_struct_nested_13_20 --
+package fillstruct
+
+type StructB struct {
+ StructC
+}
+
+type StructC struct {
+ unexportedInt int
+}
+
+func nested() {
+ c := StructB{
+ StructC: StructC{
+ unexportedInt: 0,
+ }, //@suggestedfix("}", "refactor.rewrite", "Fill")
+ }
+}
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go
new file mode 100644
index 000000000..ef35627c8
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go
@@ -0,0 +1,12 @@
+package fillstruct
+
+import (
+ h2 "net/http"
+
+ "golang.org/lsptests/fillstruct/data"
+)
+
+func unexported() {
+ a := data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden
new file mode 100644
index 000000000..0cdbfc820
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden
@@ -0,0 +1,36 @@
+-- suggestedfix_fill_struct_package_10_14 --
+package fillstruct
+
+import (
+ h2 "net/http"
+
+ "golang.org/lsptests/fillstruct/data"
+)
+
+func unexported() {
+ a := data.B{
+ ExportedInt: 0,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+ _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_fill_struct_package_11_16 --
+package fillstruct
+
+import (
+ h2 "net/http"
+
+ "golang.org/lsptests/fillstruct/data"
+)
+
+func unexported() {
+ a := data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+ _ = h2.Client{
+ Transport: nil,
+ CheckRedirect: func(req *h2.Request, via []*h2.Request) error {
+ },
+ Jar: nil,
+ Timeout: 0,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go
new file mode 100644
index 000000000..5de1722c7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go
@@ -0,0 +1,24 @@
+package fillstruct
+
+type StructPartialA struct {
+ PrefilledInt int
+ UnfilledInt int
+ StructPartialB
+}
+
+type StructPartialB struct {
+ PrefilledInt int
+ UnfilledInt int
+}
+
+func fill() {
+ a := StructPartialA{
+ PrefilledInt: 5,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+ b := StructPartialB{
+ /* this comment should disappear */
+ PrefilledInt: 7, // This comment should be blown away.
+ /* As should
+ this one */
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden
new file mode 100644
index 000000000..3aa437a03
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden
@@ -0,0 +1,52 @@
+-- suggestedfix_fill_struct_partial_17_2 --
+package fillstruct
+
+type StructPartialA struct {
+ PrefilledInt int
+ UnfilledInt int
+ StructPartialB
+}
+
+type StructPartialB struct {
+ PrefilledInt int
+ UnfilledInt int
+}
+
+func fill() {
+ a := StructPartialA{
+ PrefilledInt: 5,
+ UnfilledInt: 0,
+ StructPartialB: StructPartialB{},
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+ b := StructPartialB{
+ /* this comment should disappear */
+ PrefilledInt: 7, // This comment should be blown away.
+ /* As should
+ this one */
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_fill_struct_partial_23_2 --
+package fillstruct
+
+type StructPartialA struct {
+ PrefilledInt int
+ UnfilledInt int
+ StructPartialB
+}
+
+type StructPartialB struct {
+ PrefilledInt int
+ UnfilledInt int
+}
+
+func fill() {
+ a := StructPartialA{
+ PrefilledInt: 5,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+ b := StructPartialB{
+ PrefilledInt: 7,
+ UnfilledInt: 0,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go
new file mode 100644
index 000000000..6a468cd54
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go
@@ -0,0 +1,9 @@
+package fillstruct
+
+type StructD struct {
+ ExportedIntField int
+}
+
+func spaces() {
+ d := StructD{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden
new file mode 100644
index 000000000..590c91611
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden
@@ -0,0 +1,13 @@
+-- suggestedfix_fill_struct_spaces_8_15 --
+package fillstruct
+
+type StructD struct {
+ ExportedIntField int
+}
+
+func spaces() {
+ d := StructD{
+ ExportedIntField: 0,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go b/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go
new file mode 100644
index 000000000..f5e42a4f2
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go
@@ -0,0 +1,12 @@
+package fillstruct
+
+import "unsafe"
+
+type unsafeStruct struct {
+ x int
+ p unsafe.Pointer
+}
+
+func fill() {
+ _ := unsafeStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden b/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden
new file mode 100644
index 000000000..7e8e1952f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden
@@ -0,0 +1,17 @@
+-- suggestedfix_fill_struct_unsafe_11_20 --
+package fillstruct
+
+import "unsafe"
+
+type unsafeStruct struct {
+ x int
+ p unsafe.Pointer
+}
+
+func fill() {
+ _ := unsafeStruct{
+ x: 0,
+ p: nil,
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
diff --git a/gopls/internal/lsp/testdata/fillstruct/typeparams.go b/gopls/internal/lsp/testdata/fillstruct/typeparams.go
new file mode 100644
index 000000000..c0b702f57
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/typeparams.go
@@ -0,0 +1,37 @@
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{} // no suggested fix
+
+type basicStructWithTypeParams[T any] struct {
+ foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+ foo F
+ bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+ bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+ bar string
+ basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+func _[T any]() {
+ type S struct{ t T }
+ _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
diff --git a/gopls/internal/lsp/testdata/fillstruct/typeparams.go.golden b/gopls/internal/lsp/testdata/fillstruct/typeparams.go.golden
new file mode 100644
index 000000000..625df7577
--- /dev/null
+++ b/gopls/internal/lsp/testdata/fillstruct/typeparams.go.golden
@@ -0,0 +1,206 @@
+-- suggestedfix_typeparams_14_40 --
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{} // no suggested fix
+
+type basicStructWithTypeParams[T any] struct {
+ foo T
+}
+
+var _ = basicStructWithTypeParams[int]{
+ foo: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+ foo F
+ bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+ bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+ bar string
+ basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+func _[T any]() {
+ type S struct{ t T }
+ _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_typeparams_21_49 --
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{} // no suggested fix
+
+type basicStructWithTypeParams[T any] struct {
+ foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+ foo F
+ bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{
+ foo: "",
+ bar: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+ bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+ bar string
+ basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+func _[T any]() {
+ type S struct{ t T }
+ _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_typeparams_25_1 --
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{} // no suggested fix
+
+type basicStructWithTypeParams[T any] struct {
+ foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+ foo F
+ bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+ foo: 0,
+ bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+ bar string
+ basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+func _[T any]() {
+ type S struct{ t T }
+ _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_typeparams_32_36 --
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{} // no suggested fix
+
+type basicStructWithTypeParams[T any] struct {
+ foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+ foo F
+ bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+ bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+ bar string
+ basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{
+ bar: "",
+ basic: basicStructWithTypeParams{},
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+func _[T any]() {
+ type S struct{ t T }
+ _ = S{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
+-- suggestedfix_typeparams_36_8 --
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{} // no suggested fix
+
+type basicStructWithTypeParams[T any] struct {
+ foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+ foo F
+ bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+ bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+ bar string
+ basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+func _[T any]() {
+ type S struct{ t T }
+ _ = S{
+ t: *new(T),
+ } //@suggestedfix("}", "refactor.rewrite", "Fill")
+}
+
diff --git a/internal/lsp/testdata/folding/a.go b/gopls/internal/lsp/testdata/folding/a.go
index e07d7e0bf..e07d7e0bf 100644
--- a/internal/lsp/testdata/folding/a.go
+++ b/gopls/internal/lsp/testdata/folding/a.go
diff --git a/gopls/internal/lsp/testdata/folding/a.go.golden b/gopls/internal/lsp/testdata/folding/a.go.golden
new file mode 100644
index 000000000..b04ca4dab
--- /dev/null
+++ b/gopls/internal/lsp/testdata/folding/a.go.golden
@@ -0,0 +1,722 @@
+-- foldingRange-0 --
+package folding //@fold("package")
+
+import (<>)
+
+import _ "os"
+
+// bar is a function.<>
+func bar(<>) string {<>}
+
+-- foldingRange-1 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {<>}
+ /* This is a multiline<>
+
+ /* This is a multiline<>
+ _ = []int{<>}
+ _ = [2]string{<>}
+ _ = map[string]int{<>}
+ type T struct {<>}
+ _ = T{<>}
+ x, y := make(<>), make(<>)
+ select {<>}
+ // This is a multiline comment<>
+ return <>
+}
+
+-- foldingRange-2 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:<>
+ case false:<>
+ default:<>
+ }
+ /* This is a multiline
+ block
+ comment */
+
+ /* This is a multiline
+ block
+ comment */
+ // Followed by another comment.
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:<>
+ case <-y:<>
+ default:<>
+ }
+ // This is a multiline comment
+ // that is not a doc comment.
+ return `
+this string
+is not indented`
+}
+
+-- foldingRange-3 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:
+ if true {<>} else {<>}
+ case false:
+ fmt.Println(<>)
+ default:
+ fmt.Println(<>)
+ }
+ /* This is a multiline
+ block
+ comment */
+
+ /* This is a multiline
+ block
+ comment */
+ // Followed by another comment.
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:
+ if val {<>} else {<>}
+ case <-y:
+ fmt.Println(<>)
+ default:
+ fmt.Println(<>)
+ }
+ // This is a multiline comment
+ // that is not a doc comment.
+ return `
+this string
+is not indented`
+}
+
+-- foldingRange-4 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:
+ if true {
+ fmt.Println(<>)
+ } else {
+ fmt.Println(<>)
+ }
+ case false:
+ fmt.Println("false")
+ default:
+ fmt.Println("default")
+ }
+ /* This is a multiline
+ block
+ comment */
+
+ /* This is a multiline
+ block
+ comment */
+ // Followed by another comment.
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:
+ if val {
+ fmt.Println(<>)
+ } else {
+ fmt.Println(<>)
+ }
+ case <-y:
+ fmt.Println("y")
+ default:
+ fmt.Println("default")
+ }
+ // This is a multiline comment
+ // that is not a doc comment.
+ return `
+this string
+is not indented`
+}
+
+-- foldingRange-comment-0 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.<>
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:
+ if true {
+ fmt.Println("true")
+ } else {
+ fmt.Println("false")
+ }
+ case false:
+ fmt.Println("false")
+ default:
+ fmt.Println("default")
+ }
+ /* This is a multiline<>
+
+ /* This is a multiline<>
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:
+ if val {
+ fmt.Println("true from x")
+ } else {
+ fmt.Println("false from x")
+ }
+ case <-y:
+ fmt.Println("y")
+ default:
+ fmt.Println("default")
+ }
+ // This is a multiline comment<>
+ return `
+this string
+is not indented`
+}
+
+-- foldingRange-imports-0 --
+package folding //@fold("package")
+
+import (<>)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:
+ if true {
+ fmt.Println("true")
+ } else {
+ fmt.Println("false")
+ }
+ case false:
+ fmt.Println("false")
+ default:
+ fmt.Println("default")
+ }
+ /* This is a multiline
+ block
+ comment */
+
+ /* This is a multiline
+ block
+ comment */
+ // Followed by another comment.
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:
+ if val {
+ fmt.Println("true from x")
+ } else {
+ fmt.Println("false from x")
+ }
+ case <-y:
+ fmt.Println("y")
+ default:
+ fmt.Println("default")
+ }
+ // This is a multiline comment
+ // that is not a doc comment.
+ return `
+this string
+is not indented`
+}
+
+-- foldingRange-lineFolding-0 --
+package folding //@fold("package")
+
+import (<>
+)
+
+import _ "os"
+
+// bar is a function.<>
+func bar() string {<>
+}
+
+-- foldingRange-lineFolding-1 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {<>
+ }
+ /* This is a multiline<>
+
+ /* This is a multiline<>
+ _ = []int{<>,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{<>,
+ }
+ type T struct {<>
+ }
+ _ = T{<>,
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {<>
+ }
+ // This is a multiline comment<>
+ return <>
+}
+
+-- foldingRange-lineFolding-2 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:<>
+ case false:<>
+ default:<>
+ }
+ /* This is a multiline
+ block
+ comment */
+
+ /* This is a multiline
+ block
+ comment */
+ // Followed by another comment.
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:<>
+ case <-y:<>
+ default:<>
+ }
+ // This is a multiline comment
+ // that is not a doc comment.
+ return `
+this string
+is not indented`
+}
+
+-- foldingRange-lineFolding-3 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:
+ if true {<>
+ } else {<>
+ }
+ case false:
+ fmt.Println("false")
+ default:
+ fmt.Println("default")
+ }
+ /* This is a multiline
+ block
+ comment */
+
+ /* This is a multiline
+ block
+ comment */
+ // Followed by another comment.
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:
+ if val {<>
+ } else {<>
+ }
+ case <-y:
+ fmt.Println("y")
+ default:
+ fmt.Println("default")
+ }
+ // This is a multiline comment
+ // that is not a doc comment.
+ return `
+this string
+is not indented`
+}
+
+-- foldingRange-lineFolding-comment-0 --
+package folding //@fold("package")
+
+import (
+ "fmt"
+ _ "log"
+)
+
+import _ "os"
+
+// bar is a function.<>
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:
+ if true {
+ fmt.Println("true")
+ } else {
+ fmt.Println("false")
+ }
+ case false:
+ fmt.Println("false")
+ default:
+ fmt.Println("default")
+ }
+ /* This is a multiline<>
+
+ /* This is a multiline<>
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:
+ if val {
+ fmt.Println("true from x")
+ } else {
+ fmt.Println("false from x")
+ }
+ case <-y:
+ fmt.Println("y")
+ default:
+ fmt.Println("default")
+ }
+ // This is a multiline comment<>
+ return `
+this string
+is not indented`
+}
+
+-- foldingRange-lineFolding-imports-0 --
+package folding //@fold("package")
+
+import (<>
+)
+
+import _ "os"
+
+// bar is a function.
+// With a multiline doc comment.
+func bar() string {
+ /* This is a single line comment */
+ switch {
+ case true:
+ if true {
+ fmt.Println("true")
+ } else {
+ fmt.Println("false")
+ }
+ case false:
+ fmt.Println("false")
+ default:
+ fmt.Println("default")
+ }
+ /* This is a multiline
+ block
+ comment */
+
+ /* This is a multiline
+ block
+ comment */
+ // Followed by another comment.
+ _ = []int{
+ 1,
+ 2,
+ 3,
+ }
+ _ = [2]string{"d",
+ "e",
+ }
+ _ = map[string]int{
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ }
+ type T struct {
+ f string
+ g int
+ h string
+ }
+ _ = T{
+ f: "j",
+ g: 4,
+ h: "i",
+ }
+ x, y := make(chan bool), make(chan bool)
+ select {
+ case val := <-x:
+ if val {
+ fmt.Println("true from x")
+ } else {
+ fmt.Println("false from x")
+ }
+ case <-y:
+ fmt.Println("y")
+ default:
+ fmt.Println("default")
+ }
+ // This is a multiline comment
+ // that is not a doc comment.
+ return `
+this string
+is not indented`
+}
+
diff --git a/gopls/internal/lsp/testdata/folding/bad.go.golden b/gopls/internal/lsp/testdata/folding/bad.go.golden
new file mode 100644
index 000000000..ab274f75a
--- /dev/null
+++ b/gopls/internal/lsp/testdata/folding/bad.go.golden
@@ -0,0 +1,81 @@
+-- foldingRange-0 --
+package folding //@fold("package")
+
+import (<>)
+
+import (<>)
+
+// badBar is a function.
+func badBar(<>) string {<>}
+
+-- foldingRange-1 --
+package folding //@fold("package")
+
+import ( "fmt"
+ _ "log"
+)
+
+import (
+ _ "os" )
+
+// badBar is a function.
+func badBar() string { x := true
+ if x {<>} else {<>}
+ return
+}
+
+-- foldingRange-2 --
+package folding //@fold("package")
+
+import ( "fmt"
+ _ "log"
+)
+
+import (
+ _ "os" )
+
+// badBar is a function.
+func badBar() string { x := true
+ if x {
+ // This is the only foldable thing in this file when lineFoldingOnly
+ fmt.Println(<>)
+ } else {
+ fmt.Println(<>) }
+ return
+}
+
+-- foldingRange-imports-0 --
+package folding //@fold("package")
+
+import (<>)
+
+import (<>)
+
+// badBar is a function.
+func badBar() string { x := true
+ if x {
+ // This is the only foldable thing in this file when lineFoldingOnly
+ fmt.Println("true")
+ } else {
+ fmt.Println("false") }
+ return
+}
+
+-- foldingRange-lineFolding-0 --
+package folding //@fold("package")
+
+import ( "fmt"
+ _ "log"
+)
+
+import (
+ _ "os" )
+
+// badBar is a function.
+func badBar() string { x := true
+ if x {<>
+ } else {
+ fmt.Println("false") }
+ return
+}
+
diff --git a/internal/lsp/testdata/folding/bad.go.in b/gopls/internal/lsp/testdata/folding/bad.go.in
index 84fcb740f..84fcb740f 100644
--- a/internal/lsp/testdata/folding/bad.go.in
+++ b/gopls/internal/lsp/testdata/folding/bad.go.in
diff --git a/gopls/internal/lsp/testdata/foo/foo.go b/gopls/internal/lsp/testdata/foo/foo.go
new file mode 100644
index 000000000..66631c58c
--- /dev/null
+++ b/gopls/internal/lsp/testdata/foo/foo.go
@@ -0,0 +1,30 @@
+package foo //@mark(PackageFoo, "foo"),item(PackageFoo, "foo", "\"golang.org/lsptests/foo\"", "package")
+
+type StructFoo struct { //@item(StructFoo, "StructFoo", "struct{...}", "struct")
+ Value int //@item(Value, "Value", "int", "field")
+}
+
+// Pre-set this marker, as we don't have a "source" for it in this package.
+/* Error() */ //@item(Error, "Error", "func() string", "method")
+
+func Foo() { //@item(Foo, "Foo", "func()", "func")
+ var err error
+ err.Error() //@complete("E", Error)
+}
+
+func _() {
+ var sFoo StructFoo //@mark(sFoo1, "sFoo"),complete("t", StructFoo)
+ if x := sFoo; x.Value == 1 { //@mark(sFoo2, "sFoo"),complete("V", Value),typdef("sFoo", StructFoo),refs("sFo", sFoo1, sFoo2)
+ return
+ }
+}
+
+func _() {
+ shadowed := 123
+ {
+ shadowed := "hi" //@item(shadowed, "shadowed", "string", "var"),refs("shadowed", shadowed)
+ sha //@complete("a", shadowed)
+ }
+}
+
+type IntFoo int //@item(IntFoo, "IntFoo", "int", "type")
diff --git a/gopls/internal/lsp/testdata/format/bad_format.go.golden b/gopls/internal/lsp/testdata/format/bad_format.go.golden
new file mode 100644
index 000000000..f0c24d635
--- /dev/null
+++ b/gopls/internal/lsp/testdata/format/bad_format.go.golden
@@ -0,0 +1,21 @@
+-- gofmt --
+package format //@format("package")
+
+import (
+ "fmt"
+ "log"
+ "runtime"
+)
+
+func hello() {
+
+ var x int //@diag("x", "compiler", "x declared (and|but) not used", "error")
+}
+
+func hi() {
+ runtime.GOROOT()
+ fmt.Printf("")
+
+ log.Printf("")
+}
+
diff --git a/gopls/internal/lsp/testdata/format/bad_format.go.in b/gopls/internal/lsp/testdata/format/bad_format.go.in
new file mode 100644
index 000000000..995ec399a
--- /dev/null
+++ b/gopls/internal/lsp/testdata/format/bad_format.go.in
@@ -0,0 +1,22 @@
+package format //@format("package")
+
+import (
+ "runtime"
+ "fmt"
+ "log"
+)
+
+func hello() {
+
+
+
+
+ var x int //@diag("x", "compiler", "x declared (and|but) not used", "error")
+}
+
+func hi() {
+ runtime.GOROOT()
+ fmt.Printf("")
+
+ log.Printf("")
+}
diff --git a/internal/lsp/testdata/format/good_format.go b/gopls/internal/lsp/testdata/format/good_format.go
index 01cb1610c..01cb1610c 100644
--- a/internal/lsp/testdata/format/good_format.go
+++ b/gopls/internal/lsp/testdata/format/good_format.go
diff --git a/internal/lsp/testdata/format/good_format.go.golden b/gopls/internal/lsp/testdata/format/good_format.go.golden
index 99f47e2e8..99f47e2e8 100644
--- a/internal/lsp/testdata/format/good_format.go.golden
+++ b/gopls/internal/lsp/testdata/format/good_format.go.golden
diff --git a/internal/lsp/testdata/format/newline_format.go.golden b/gopls/internal/lsp/testdata/format/newline_format.go.golden
index 7c76afdd5..7c76afdd5 100644
--- a/internal/lsp/testdata/format/newline_format.go.golden
+++ b/gopls/internal/lsp/testdata/format/newline_format.go.golden
diff --git a/internal/lsp/testdata/format/newline_format.go.in b/gopls/internal/lsp/testdata/format/newline_format.go.in
index fe597b90b..fe597b90b 100644
--- a/internal/lsp/testdata/format/newline_format.go.in
+++ b/gopls/internal/lsp/testdata/format/newline_format.go.in
diff --git a/internal/lsp/testdata/format/one_line.go.golden b/gopls/internal/lsp/testdata/format/one_line.go.golden
index 4d11f84cb..4d11f84cb 100644
--- a/internal/lsp/testdata/format/one_line.go.golden
+++ b/gopls/internal/lsp/testdata/format/one_line.go.golden
diff --git a/internal/lsp/testdata/format/one_line.go.in b/gopls/internal/lsp/testdata/format/one_line.go.in
index 30f413755..30f413755 100644
--- a/internal/lsp/testdata/format/one_line.go.in
+++ b/gopls/internal/lsp/testdata/format/one_line.go.in
diff --git a/internal/lsp/testdata/func_rank/func_rank.go.in b/gopls/internal/lsp/testdata/func_rank/func_rank.go.in
index 905010b3d..905010b3d 100644
--- a/internal/lsp/testdata/func_rank/func_rank.go.in
+++ b/gopls/internal/lsp/testdata/func_rank/func_rank.go.in
diff --git a/internal/lsp/testdata/funcsig/func_sig.go b/gopls/internal/lsp/testdata/funcsig/func_sig.go
index 00f9b575d..00f9b575d 100644
--- a/internal/lsp/testdata/funcsig/func_sig.go
+++ b/gopls/internal/lsp/testdata/funcsig/func_sig.go
diff --git a/internal/lsp/testdata/funcvalue/func_value.go b/gopls/internal/lsp/testdata/funcvalue/func_value.go
index 913fcbcfe..913fcbcfe 100644
--- a/internal/lsp/testdata/funcvalue/func_value.go
+++ b/gopls/internal/lsp/testdata/funcvalue/func_value.go
diff --git a/internal/lsp/testdata/fuzzymatch/fuzzymatch.go b/gopls/internal/lsp/testdata/fuzzymatch/fuzzymatch.go
index 73268f553..73268f553 100644
--- a/internal/lsp/testdata/fuzzymatch/fuzzymatch.go
+++ b/gopls/internal/lsp/testdata/fuzzymatch/fuzzymatch.go
diff --git a/internal/lsp/testdata/generate/generate.go b/gopls/internal/lsp/testdata/generate/generate.go
index ae5e90d1a..ae5e90d1a 100644
--- a/internal/lsp/testdata/generate/generate.go
+++ b/gopls/internal/lsp/testdata/generate/generate.go
diff --git a/gopls/internal/lsp/testdata/generated/generated.go b/gopls/internal/lsp/testdata/generated/generated.go
new file mode 100644
index 000000000..c7adc1804
--- /dev/null
+++ b/gopls/internal/lsp/testdata/generated/generated.go
@@ -0,0 +1,7 @@
+package generated
+
+// Code generated by generator.go. DO NOT EDIT.
+
+func _() {
+ var y int //@diag("y", "compiler", "y declared (and|but) not used", "error")
+}
diff --git a/gopls/internal/lsp/testdata/generated/generator.go b/gopls/internal/lsp/testdata/generated/generator.go
new file mode 100644
index 000000000..8e2a4fab7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/generated/generator.go
@@ -0,0 +1,5 @@
+package generated
+
+func _() {
+ var x int //@diag("x", "compiler", "x declared (and|but) not used", "error")
+}
diff --git a/gopls/internal/lsp/testdata/godef/a/a_x_test.go b/gopls/internal/lsp/testdata/godef/a/a_x_test.go
new file mode 100644
index 000000000..f166f0550
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/a/a_x_test.go
@@ -0,0 +1,9 @@
+package a_test
+
+import (
+ "testing"
+)
+
+func TestA2(t *testing.T) { //@TestA2,godef(TestA2, TestA2)
+ Nonexistant() //@diag("Nonexistant", "compiler", "(undeclared name|undefined): Nonexistant", "error")
+}
diff --git a/internal/lsp/testdata/godef/a/a_x_test.go.golden b/gopls/internal/lsp/testdata/godef/a/a_x_test.go.golden
index 2e3064794..2e3064794 100644
--- a/internal/lsp/testdata/godef/a/a_x_test.go.golden
+++ b/gopls/internal/lsp/testdata/godef/a/a_x_test.go.golden
diff --git a/gopls/internal/lsp/testdata/godef/a/d.go b/gopls/internal/lsp/testdata/godef/a/d.go
new file mode 100644
index 000000000..a1d17ad0d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/a/d.go
@@ -0,0 +1,69 @@
+package a //@mark(a, "a "),hoverdef("a ", a)
+
+import "fmt"
+
+type Thing struct { //@Thing
+ Member string //@Member
+}
+
+var Other Thing //@Other
+
+func Things(val []string) []Thing { //@Things
+ return nil
+}
+
+func (t Thing) Method(i int) string { //@Method
+ return t.Member
+}
+
+func (t Thing) Method3() {
+}
+
+func (t *Thing) Method2(i int, j int) (error, string) {
+ return nil, t.Member
+}
+
+func (t *Thing) private() {
+}
+
+func useThings() {
+ t := Thing{ //@mark(aStructType, "ing")
+ Member: "string", //@mark(fMember, "ember")
+ }
+ fmt.Print(t.Member) //@mark(aMember, "ember")
+ fmt.Print(Other) //@mark(aVar, "ther")
+ Things() //@mark(aFunc, "ings")
+ t.Method() //@mark(aMethod, "eth")
+}
+
+type NextThing struct { //@NextThing
+ Thing
+ Value int
+}
+
+func (n NextThing) another() string {
+ return n.Member
+}
+
+// Shadows Thing.Method3
+func (n *NextThing) Method3() int {
+ return n.Value
+}
+
+var nextThing NextThing //@hoverdef("NextThing", NextThing)
+
+/*@
+godef(aStructType, Thing)
+godef(aMember, Member)
+godef(aVar, Other)
+godef(aFunc, Things)
+godef(aMethod, Method)
+godef(fMember, Member)
+godef(Member, Member)
+
+//param
+//package name
+//const
+//anon field
+
+*/
diff --git a/gopls/internal/lsp/testdata/godef/a/d.go.golden b/gopls/internal/lsp/testdata/godef/a/d.go.golden
new file mode 100644
index 000000000..ee687750c
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/a/d.go.golden
@@ -0,0 +1,191 @@
+-- Member-definition --
+godef/a/d.go:6:2-8: defined here as ```go
+field Member string
+```
+
+@Member
+
+
+[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member)
+-- Member-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 6,
+ "column": 2,
+ "offset": 90
+ },
+ "end": {
+ "line": 6,
+ "column": 8,
+ "offset": 96
+ }
+ },
+ "description": "```go\nfield Member string\n```\n\n@Member\n\n\n[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member)"
+}
+
+-- Member-hoverdef --
+```go
+field Member string
+```
+
+@Member
+
+
+[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member)
+-- Method-definition --
+godef/a/d.go:15:16-22: defined here as ```go
+func (Thing).Method(i int) string
+```
+
+[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Method)
+-- Method-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 15,
+ "column": 16,
+ "offset": 219
+ },
+ "end": {
+ "line": 15,
+ "column": 22,
+ "offset": 225
+ }
+ },
+ "description": "```go\nfunc (Thing).Method(i int) string\n```\n\n[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Method)"
+}
+
+-- Method-hoverdef --
+```go
+func (Thing).Method(i int) string
+```
+
+[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Method)
+-- NextThing-hoverdef --
+```go
+type NextThing struct {
+ Thing
+ Value int
+}
+
+func (*NextThing).Method3() int
+func (NextThing).another() string
+```
+
+[`a.NextThing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#NextThing)
+-- Other-definition --
+godef/a/d.go:9:5-10: defined here as ```go
+var Other Thing
+```
+
+@Other
+
+
+[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other)
+-- Other-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 9,
+ "column": 5,
+ "offset": 121
+ },
+ "end": {
+ "line": 9,
+ "column": 10,
+ "offset": 126
+ }
+ },
+ "description": "```go\nvar Other Thing\n```\n\n@Other\n\n\n[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other)"
+}
+
+-- Other-hoverdef --
+```go
+var Other Thing
+```
+
+@Other
+
+
+[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other)
+-- Thing-definition --
+godef/a/d.go:5:6-11: defined here as ```go
+type Thing struct {
+ Member string //@Member
+}
+
+func (Thing).Method(i int) string
+func (*Thing).Method2(i int, j int) (error, string)
+func (Thing).Method3()
+func (*Thing).private()
+```
+
+[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing)
+-- Thing-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 5,
+ "column": 6,
+ "offset": 65
+ },
+ "end": {
+ "line": 5,
+ "column": 11,
+ "offset": 70
+ }
+ },
+ "description": "```go\ntype Thing struct {\n\tMember string //@Member\n}\n\nfunc (Thing).Method(i int) string\nfunc (*Thing).Method2(i int, j int) (error, string)\nfunc (Thing).Method3()\nfunc (*Thing).private()\n```\n\n[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing)"
+}
+
+-- Thing-hoverdef --
+```go
+type Thing struct {
+ Member string //@Member
+}
+
+func (Thing).Method(i int) string
+func (*Thing).Method2(i int, j int) (error, string)
+func (Thing).Method3()
+func (*Thing).private()
+```
+
+[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing)
+-- Things-definition --
+godef/a/d.go:11:6-12: defined here as ```go
+func Things(val []string) []Thing
+```
+
+[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things)
+-- Things-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 11,
+ "column": 6,
+ "offset": 148
+ },
+ "end": {
+ "line": 11,
+ "column": 12,
+ "offset": 154
+ }
+ },
+ "description": "```go\nfunc Things(val []string) []Thing\n```\n\n[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things)"
+}
+
+-- Things-hoverdef --
+```go
+func Things(val []string) []Thing
+```
+
+[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things)
+-- a-hoverdef --
+Package a is a package for testing go to definition.
+
diff --git a/gopls/internal/lsp/testdata/godef/a/f.go b/gopls/internal/lsp/testdata/godef/a/f.go
new file mode 100644
index 000000000..10f88262a
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/a/f.go
@@ -0,0 +1,16 @@
+// Package a is a package for testing go to definition.
+package a
+
+import "fmt"
+
+func TypeStuff() { //@Stuff
+ var x string
+
+ switch y := interface{}(x).(type) { //@mark(switchY, "y"),godef("y", switchY)
+ case int: //@mark(intY, "int")
+ fmt.Printf("%v", y) //@hoverdef("y", intY)
+ case string: //@mark(stringY, "string")
+ fmt.Printf("%v", y) //@hoverdef("y", stringY)
+ }
+
+}
diff --git a/internal/lsp/testdata/godef/a/f.go.golden b/gopls/internal/lsp/testdata/godef/a/f.go.golden
index a084356c0..a084356c0 100644
--- a/internal/lsp/testdata/godef/a/f.go.golden
+++ b/gopls/internal/lsp/testdata/godef/a/f.go.golden
diff --git a/internal/lsp/testdata/godef/a/g.go b/gopls/internal/lsp/testdata/godef/a/g.go
index dfef2fb80..dfef2fb80 100644
--- a/internal/lsp/testdata/godef/a/g.go
+++ b/gopls/internal/lsp/testdata/godef/a/g.go
diff --git a/gopls/internal/lsp/testdata/godef/a/g.go.golden b/gopls/internal/lsp/testdata/godef/a/g.go.golden
new file mode 100644
index 000000000..f7a2e1b07
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/a/g.go.golden
@@ -0,0 +1,7 @@
+-- dur-hoverdef --
+```go
+const dur time.Duration = 910350000000 // 15m10.35s
+```
+
+dur is a constant of type time.Duration.
+
diff --git a/internal/lsp/testdata/godef/a/h.go b/gopls/internal/lsp/testdata/godef/a/h.go
index 5a5dcc678..5a5dcc678 100644
--- a/internal/lsp/testdata/godef/a/h.go
+++ b/gopls/internal/lsp/testdata/godef/a/h.go
diff --git a/gopls/internal/lsp/testdata/godef/a/h.go.golden b/gopls/internal/lsp/testdata/godef/a/h.go.golden
new file mode 100644
index 000000000..7cef9ee96
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/a/h.go.golden
@@ -0,0 +1,161 @@
+-- arrD-hoverdef --
+```go
+field d int
+```
+
+d field
+
+-- arrE-hoverdef --
+```go
+field e struct{f int}
+```
+
+e nested struct
+
+-- arrF-hoverdef --
+```go
+field f int
+```
+
+f field of nested struct
+
+-- complexH-hoverdef --
+```go
+field h int
+```
+
+h field
+
+-- complexI-hoverdef --
+```go
+field i struct{j int}
+```
+
+i nested struct
+
+-- complexJ-hoverdef --
+```go
+field j int
+```
+
+j field of nested struct
+
+-- mapStructKeyX-hoverdef --
+```go
+field x []string
+```
+
+X key field
+
+-- mapStructKeyY-hoverdef --
+```go
+field y string
+```
+
+Y key field
+
+-- mapStructValueX-hoverdef --
+```go
+field x string
+```
+
+X value field
+
+-- nestedMap-hoverdef --
+```go
+field m map[string]float64
+```
+
+nested map
+
+-- nestedNumber-hoverdef --
+```go
+field number int64
+```
+
+nested number
+
+-- nestedString-hoverdef --
+```go
+field str string
+```
+
+nested string
+
+-- openMethod-hoverdef --
+```go
+func (interface).open() error
+```
+
+open method comment
+
+-- returnX-hoverdef --
+```go
+field x int
+```
+
+X coord
+
+-- returnY-hoverdef --
+```go
+field y int
+```
+
+Y coord
+
+-- structA-hoverdef --
+```go
+field a int
+```
+
+a field
+
+-- structB-hoverdef --
+```go
+field b struct{c int}
+```
+
+b nested struct
+
+-- structC-hoverdef --
+```go
+field c int
+```
+
+c field of nested struct
+
+-- testDescription-hoverdef --
+```go
+field desc string
+```
+
+test description
+
+-- testInput-hoverdef --
+```go
+field in map[string][]struct{key string; value interface{}}
+```
+
+test input
+
+-- testInputKey-hoverdef --
+```go
+field key string
+```
+
+test key
+
+-- testInputValue-hoverdef --
+```go
+field value interface{}
+```
+
+test value
+
+-- testResultValue-hoverdef --
+```go
+field value int
+```
+
+expected test value
+
diff --git a/gopls/internal/lsp/testdata/godef/b/e.go b/gopls/internal/lsp/testdata/godef/b/e.go
new file mode 100644
index 000000000..9c81cad31
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/b/e.go
@@ -0,0 +1,31 @@
+package b
+
+import (
+ "fmt"
+
+ "golang.org/lsptests/godef/a"
+)
+
+func useThings() {
+ t := a.Thing{} //@mark(bStructType, "ing")
+ fmt.Print(t.Member) //@mark(bMember, "ember")
+ fmt.Print(a.Other) //@mark(bVar, "ther")
+ a.Things() //@mark(bFunc, "ings")
+}
+
+/*@
+godef(bStructType, Thing)
+godef(bMember, Member)
+godef(bVar, Other)
+godef(bFunc, Things)
+*/
+
+func _() {
+ var x interface{} //@mark(eInterface, "interface{}")
+ switch x := x.(type) { //@hoverdef("x", eInterface)
+ case string: //@mark(eString, "string")
+ fmt.Println(x) //@hoverdef("x", eString)
+ case int: //@mark(eInt, "int")
+ fmt.Println(x) //@hoverdef("x", eInt)
+ }
+}
diff --git a/gopls/internal/lsp/testdata/godef/b/e.go.golden b/gopls/internal/lsp/testdata/godef/b/e.go.golden
new file mode 100644
index 000000000..3d7d89797
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/b/e.go.golden
@@ -0,0 +1,156 @@
+-- Member-definition --
+godef/a/d.go:6:2-8: defined here as ```go
+field Member string
+```
+
+@Member
+
+
+[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member)
+-- Member-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 6,
+ "column": 2,
+ "offset": 90
+ },
+ "end": {
+ "line": 6,
+ "column": 8,
+ "offset": 96
+ }
+ },
+ "description": "```go\nfield Member string\n```\n\n@Member\n\n\n[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member)"
+}
+
+-- Member-hoverdef --
+```go
+field Member string
+```
+
+@Member
+
+
+[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing.Member)
+-- Other-definition --
+godef/a/d.go:9:5-10: defined here as ```go
+var a.Other a.Thing
+```
+
+@Other
+
+
+[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other)
+-- Other-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 9,
+ "column": 5,
+ "offset": 121
+ },
+ "end": {
+ "line": 9,
+ "column": 10,
+ "offset": 126
+ }
+ },
+ "description": "```go\nvar a.Other a.Thing\n```\n\n@Other\n\n\n[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other)"
+}
+
+-- Other-hoverdef --
+```go
+var a.Other a.Thing
+```
+
+@Other
+
+
+[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Other)
+-- Thing-definition --
+godef/a/d.go:5:6-11: defined here as ```go
+type Thing struct {
+ Member string //@Member
+}
+
+func (a.Thing).Method(i int) string
+func (*a.Thing).Method2(i int, j int) (error, string)
+func (a.Thing).Method3()
+```
+
+[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing)
+-- Thing-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 5,
+ "column": 6,
+ "offset": 65
+ },
+ "end": {
+ "line": 5,
+ "column": 11,
+ "offset": 70
+ }
+ },
+ "description": "```go\ntype Thing struct {\n\tMember string //@Member\n}\n\nfunc (a.Thing).Method(i int) string\nfunc (*a.Thing).Method2(i int, j int) (error, string)\nfunc (a.Thing).Method3()\n```\n\n[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing)"
+}
+
+-- Thing-hoverdef --
+```go
+type Thing struct {
+ Member string //@Member
+}
+
+func (a.Thing).Method(i int) string
+func (*a.Thing).Method2(i int, j int) (error, string)
+func (a.Thing).Method3()
+```
+
+[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Thing)
+-- Things-definition --
+godef/a/d.go:11:6-12: defined here as ```go
+func a.Things(val []string) []a.Thing
+```
+
+[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things)
+-- Things-definition-json --
+{
+ "span": {
+ "uri": "file://godef/a/d.go",
+ "start": {
+ "line": 11,
+ "column": 6,
+ "offset": 148
+ },
+ "end": {
+ "line": 11,
+ "column": 12,
+ "offset": 154
+ }
+ },
+ "description": "```go\nfunc a.Things(val []string) []a.Thing\n```\n\n[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things)"
+}
+
+-- Things-hoverdef --
+```go
+func a.Things(val []string) []a.Thing
+```
+
+[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/lsptests/godef/a#Things)
+-- eInt-hoverdef --
+```go
+var x int
+```
+-- eInterface-hoverdef --
+```go
+var x interface{}
+```
+-- eString-hoverdef --
+```go
+var x string
+```
diff --git a/gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.golden b/gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.golden
new file mode 100644
index 000000000..9ce869848
--- /dev/null
+++ b/gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.golden
@@ -0,0 +1,31 @@
+-- myUnclosedIf-definition --
+godef/broken/unclosedIf.go:7:7-19: defined here as ```go
+var myUnclosedIf string
+```
+
+@myUnclosedIf
+-- myUnclosedIf-definition-json --
+{
+ "span": {
+ "uri": "file://godef/broken/unclosedIf.go",
+ "start": {
+ "line": 7,
+ "column": 7,
+ "offset": 68
+ },
+ "end": {
+ "line": 7,
+ "column": 19,
+ "offset": 80
+ }
+ },
+ "description": "```go\nvar myUnclosedIf string\n```\n\n@myUnclosedIf"
+}
+
+-- myUnclosedIf-hoverdef --
+```go
+var myUnclosedIf string
+```
+
+@myUnclosedIf
+
diff --git a/internal/lsp/testdata/godef/broken/unclosedIf.go.in b/gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.in
index 0f2cf1b1e..0f2cf1b1e 100644
--- a/internal/lsp/testdata/godef/broken/unclosedIf.go.in
+++ b/gopls/internal/lsp/testdata/godef/broken/unclosedIf.go.in
diff --git a/internal/lsp/testdata/good/good0.go b/gopls/internal/lsp/testdata/good/good0.go
index 89450a845..89450a845 100644
--- a/internal/lsp/testdata/good/good0.go
+++ b/gopls/internal/lsp/testdata/good/good0.go
diff --git a/gopls/internal/lsp/testdata/good/good1.go b/gopls/internal/lsp/testdata/good/good1.go
new file mode 100644
index 000000000..624d8147a
--- /dev/null
+++ b/gopls/internal/lsp/testdata/good/good1.go
@@ -0,0 +1,21 @@
+package good //@diag("package", "no_diagnostics", "", "error")
+
+import (
+ "golang.org/lsptests/types" //@item(types_import, "types", "\"golang.org/lsptests/types\"", "package")
+)
+
+func random() int { //@item(good_random, "random", "func() int", "func")
+ _ = "random() int" //@prepare("random", "", "")
+ y := 6 + 7 //@prepare("7", "", "")
+ return y //@prepare("return", "","")
+}
+
+func random2(y int) int { //@item(good_random2, "random2", "func(y int) int", "func"),item(good_y_param, "y", "int", "var")
+ //@complete("", good_y_param, types_import, good_random, good_random2, good_stuff)
+ var b types.Bob = &types.X{} //@prepare("ypes","types", "types")
+ if _, ok := b.(*types.X); ok { //@complete("X", X_struct, Y_struct, Bob_interface, CoolAlias)
+ _ = 0 // suppress "empty branch" diagnostic
+ }
+
+ return y
+}
diff --git a/internal/lsp/testdata/highlights/highlights.go b/gopls/internal/lsp/testdata/highlights/highlights.go
index 55ae68aa1..55ae68aa1 100644
--- a/internal/lsp/testdata/highlights/highlights.go
+++ b/gopls/internal/lsp/testdata/highlights/highlights.go
diff --git a/gopls/internal/lsp/testdata/implementation/implementation.go b/gopls/internal/lsp/testdata/implementation/implementation.go
new file mode 100644
index 000000000..4c1a22dd4
--- /dev/null
+++ b/gopls/internal/lsp/testdata/implementation/implementation.go
@@ -0,0 +1,37 @@
+package implementation
+
+import "golang.org/lsptests/implementation/other"
+
+type ImpP struct{} //@ImpP,implementations("ImpP", Laugher, OtherLaugher)
+
+func (*ImpP) Laugh() { //@mark(LaughP, "Laugh"),implementations("Laugh", Laugh, OtherLaugh)
+}
+
+type ImpS struct{} //@ImpS,implementations("ImpS", Laugher, OtherLaugher)
+
+func (ImpS) Laugh() { //@mark(LaughS, "Laugh"),implementations("Laugh", Laugh, OtherLaugh)
+}
+
+type Laugher interface { //@Laugher,implementations("Laugher", ImpP, OtherImpP, ImpS, OtherImpS, embedsImpP)
+ Laugh() //@Laugh,implementations("Laugh", LaughP, OtherLaughP, LaughS, OtherLaughS)
+}
+
+type Foo struct { //@implementations("Foo", Joker)
+ other.Foo
+}
+
+type Joker interface { //@Joker
+ Joke() //@Joke,implementations("Joke", ImpJoker)
+}
+
+type cryer int //@implementations("cryer", Cryer)
+
+func (cryer) Cry(other.CryType) {} //@mark(CryImpl, "Cry"),implementations("Cry", Cry)
+
+type Empty interface{} //@implementations("Empty")
+
+var _ interface{ Joke() } //@implementations("Joke", ImpJoker)
+
+type embedsImpP struct { //@embedsImpP
+ ImpP //@implementations("ImpP", Laugher, OtherLaugher)
+}
diff --git a/gopls/internal/lsp/testdata/implementation/implementation_generics.go b/gopls/internal/lsp/testdata/implementation/implementation_generics.go
new file mode 100644
index 000000000..1f02d166b
--- /dev/null
+++ b/gopls/internal/lsp/testdata/implementation/implementation_generics.go
@@ -0,0 +1,16 @@
+//go:build go1.18
+// +build go1.18
+
+package implementation
+
+// -- generics --
+
+type GenIface[T any] interface { //@mark(GenIface, "GenIface"),implementations("GenIface", GC)
+ F(int, string, T) //@mark(GenIfaceF, "F"),implementations("F", GCF)
+}
+
+type GenConc[U any] int //@mark(GenConc, "GenConc"),implementations("GenConc", GI)
+
+func (GenConc[V]) F(int, string, V) {} //@mark(GenConcF, "F"),implementations("F", GIF)
+
+type GenConcString struct{ GenConc[string] } //@mark(GenConcString, "GenConcString"),implementations(GenConcString, GIString)
diff --git a/internal/lsp/testdata/implementation/other/other.go b/gopls/internal/lsp/testdata/implementation/other/other.go
index aff825e91..aff825e91 100644
--- a/internal/lsp/testdata/implementation/other/other.go
+++ b/gopls/internal/lsp/testdata/implementation/other/other.go
diff --git a/gopls/internal/lsp/testdata/implementation/other/other_generics.go b/gopls/internal/lsp/testdata/implementation/other/other_generics.go
new file mode 100644
index 000000000..4b4c29f7d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/implementation/other/other_generics.go
@@ -0,0 +1,16 @@
+//go:build go1.18
+// +build go1.18
+
+package other
+
+// -- generics (limited support) --
+
+type GI[T any] interface { //@mark(GI, "GI"),implementations("GI", GenConc)
+ F(int, string, T) //@mark(GIF, "F"),implementations("F", GenConcF)
+}
+
+type GIString GI[string] //@mark(GIString, "GIString"),implementations("GIString", GenConcString)
+
+type GC[U any] int //@mark(GC, "GC"),implementations("GC", GenIface)
+
+func (GC[V]) F(int, string, V) {} //@mark(GCF, "F"),implementations("F", GenIfaceF)
diff --git a/internal/lsp/testdata/implementation/other/other_test.go b/gopls/internal/lsp/testdata/implementation/other/other_test.go
index 846e0d591..846e0d591 100644
--- a/internal/lsp/testdata/implementation/other/other_test.go
+++ b/gopls/internal/lsp/testdata/implementation/other/other_test.go
diff --git a/gopls/internal/lsp/testdata/importedcomplit/imported_complit.go.in b/gopls/internal/lsp/testdata/importedcomplit/imported_complit.go.in
new file mode 100644
index 000000000..2f4cbada1
--- /dev/null
+++ b/gopls/internal/lsp/testdata/importedcomplit/imported_complit.go.in
@@ -0,0 +1,42 @@
+package importedcomplit
+
+import (
+ "golang.org/lsptests/foo"
+
+ // import completions
+ "fm" //@complete("\" //", fmtImport)
+ "go/pars" //@complete("\" //", parserImport)
+ "golang.org/lsptests/signa" //@complete("na\" //", signatureImport)
+ "golang.org/lspte" //@complete("\" //", lsptestsImport)
+ "crypto/elli" //@complete("\" //", cryptoImport)
+ "golang.org/lsptests/sign" //@complete("\" //", signatureImport)
+ "golang.org/lsptests/sign" //@complete("ests", lsptestsImport)
+ namedParser "go/pars" //@complete("\" //", parserImport)
+)
+
+func _() {
+ var V int //@item(icVVar, "V", "int", "var")
+ _ = foo.StructFoo{V} //@complete("}", Value, icVVar)
+}
+
+func _() {
+ var (
+ aa string //@item(icAAVar, "aa", "string", "var")
+ ab int //@item(icABVar, "ab", "int", "var")
+ )
+
+ _ = foo.StructFoo{a} //@complete("}", abVar, aaVar)
+
+ var s struct {
+ AA string //@item(icFieldAA, "AA", "string", "field")
+ AB int //@item(icFieldAB, "AB", "int", "field")
+ }
+
+ _ = foo.StructFoo{s.} //@complete("}", icFieldAB, icFieldAA)
+}
+
+/* "fmt" */ //@item(fmtImport, "fmt", "\"fmt\"", "package")
+/* "go/parser" */ //@item(parserImport, "parser", "\"go/parser\"", "package")
+/* "golang.org/lsptests/signature" */ //@item(signatureImport, "signature", "\"golang.org/lsptests/signature\"", "package")
+/* "golang.org/lsptests/" */ //@item(lsptestsImport, "lsptests/", "\"golang.org/lsptests/\"", "package")
+/* "crypto/elliptic" */ //@item(cryptoImport, "elliptic", "\"crypto/elliptic\"", "package")
diff --git a/internal/lsp/testdata/imports/add_import.go.golden b/gopls/internal/lsp/testdata/imports/add_import.go.golden
index 16af110a0..16af110a0 100644
--- a/internal/lsp/testdata/imports/add_import.go.golden
+++ b/gopls/internal/lsp/testdata/imports/add_import.go.golden
diff --git a/internal/lsp/testdata/imports/add_import.go.in b/gopls/internal/lsp/testdata/imports/add_import.go.in
index 7928e6f71..7928e6f71 100644
--- a/internal/lsp/testdata/imports/add_import.go.in
+++ b/gopls/internal/lsp/testdata/imports/add_import.go.in
diff --git a/internal/lsp/testdata/imports/good_imports.go.golden b/gopls/internal/lsp/testdata/imports/good_imports.go.golden
index 2abdae4d7..2abdae4d7 100644
--- a/internal/lsp/testdata/imports/good_imports.go.golden
+++ b/gopls/internal/lsp/testdata/imports/good_imports.go.golden
diff --git a/internal/lsp/testdata/imports/good_imports.go.in b/gopls/internal/lsp/testdata/imports/good_imports.go.in
index a03c06c6d..a03c06c6d 100644
--- a/internal/lsp/testdata/imports/good_imports.go.in
+++ b/gopls/internal/lsp/testdata/imports/good_imports.go.in
diff --git a/internal/lsp/testdata/imports/issue35458.go.golden b/gopls/internal/lsp/testdata/imports/issue35458.go.golden
index f0772606b..f0772606b 100644
--- a/internal/lsp/testdata/imports/issue35458.go.golden
+++ b/gopls/internal/lsp/testdata/imports/issue35458.go.golden
diff --git a/internal/lsp/testdata/imports/issue35458.go.in b/gopls/internal/lsp/testdata/imports/issue35458.go.in
index 7420c212c..7420c212c 100644
--- a/internal/lsp/testdata/imports/issue35458.go.in
+++ b/gopls/internal/lsp/testdata/imports/issue35458.go.in
diff --git a/internal/lsp/testdata/imports/multiple_blocks.go.golden b/gopls/internal/lsp/testdata/imports/multiple_blocks.go.golden
index d37a6c751..d37a6c751 100644
--- a/internal/lsp/testdata/imports/multiple_blocks.go.golden
+++ b/gopls/internal/lsp/testdata/imports/multiple_blocks.go.golden
diff --git a/internal/lsp/testdata/imports/multiple_blocks.go.in b/gopls/internal/lsp/testdata/imports/multiple_blocks.go.in
index 3f2fb99ea..3f2fb99ea 100644
--- a/internal/lsp/testdata/imports/multiple_blocks.go.in
+++ b/gopls/internal/lsp/testdata/imports/multiple_blocks.go.in
diff --git a/internal/lsp/testdata/imports/needs_imports.go.golden b/gopls/internal/lsp/testdata/imports/needs_imports.go.golden
index fd6032874..fd6032874 100644
--- a/internal/lsp/testdata/imports/needs_imports.go.golden
+++ b/gopls/internal/lsp/testdata/imports/needs_imports.go.golden
diff --git a/internal/lsp/testdata/imports/needs_imports.go.in b/gopls/internal/lsp/testdata/imports/needs_imports.go.in
index 949d56a64..949d56a64 100644
--- a/internal/lsp/testdata/imports/needs_imports.go.in
+++ b/gopls/internal/lsp/testdata/imports/needs_imports.go.in
diff --git a/internal/lsp/testdata/imports/remove_import.go.golden b/gopls/internal/lsp/testdata/imports/remove_import.go.golden
index 3df80882c..3df80882c 100644
--- a/internal/lsp/testdata/imports/remove_import.go.golden
+++ b/gopls/internal/lsp/testdata/imports/remove_import.go.golden
diff --git a/internal/lsp/testdata/imports/remove_import.go.in b/gopls/internal/lsp/testdata/imports/remove_import.go.in
index 09060bada..09060bada 100644
--- a/internal/lsp/testdata/imports/remove_import.go.in
+++ b/gopls/internal/lsp/testdata/imports/remove_import.go.in
diff --git a/internal/lsp/testdata/imports/remove_imports.go.golden b/gopls/internal/lsp/testdata/imports/remove_imports.go.golden
index 530c8c09f..530c8c09f 100644
--- a/internal/lsp/testdata/imports/remove_imports.go.golden
+++ b/gopls/internal/lsp/testdata/imports/remove_imports.go.golden
diff --git a/internal/lsp/testdata/imports/remove_imports.go.in b/gopls/internal/lsp/testdata/imports/remove_imports.go.in
index 44d065f25..44d065f25 100644
--- a/internal/lsp/testdata/imports/remove_imports.go.in
+++ b/gopls/internal/lsp/testdata/imports/remove_imports.go.in
diff --git a/internal/lsp/testdata/imports/two_lines.go.golden b/gopls/internal/lsp/testdata/imports/two_lines.go.golden
index ec118a4dd..ec118a4dd 100644
--- a/internal/lsp/testdata/imports/two_lines.go.golden
+++ b/gopls/internal/lsp/testdata/imports/two_lines.go.golden
diff --git a/internal/lsp/testdata/imports/two_lines.go.in b/gopls/internal/lsp/testdata/imports/two_lines.go.in
index eee534569..eee534569 100644
--- a/internal/lsp/testdata/imports/two_lines.go.in
+++ b/gopls/internal/lsp/testdata/imports/two_lines.go.in
diff --git a/internal/lsp/testdata/index/index.go b/gopls/internal/lsp/testdata/index/index.go
index a2656893c..a2656893c 100644
--- a/internal/lsp/testdata/index/index.go
+++ b/gopls/internal/lsp/testdata/index/index.go
diff --git a/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go b/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go
new file mode 100644
index 000000000..b05c95ec8
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go
@@ -0,0 +1,27 @@
+package inlayHint //@inlayHint("package")
+
+import "fmt"
+
+func fieldNames() {
+ for _, c := range []struct {
+ in, want string
+ }{
+ struct{ in, want string }{"Hello, world", "dlrow ,olleH"},
+ {"Hello, 世界", "界世 ,olleH"},
+ {"", ""},
+ } {
+ fmt.Println(c.in == c.want)
+ }
+}
+
+func fieldNamesPointers() {
+ for _, c := range []*struct {
+ in, want string
+ }{
+ &struct{ in, want string }{"Hello, world", "dlrow ,olleH"},
+ {"Hello, 世界", "界世 ,olleH"},
+ {"", ""},
+ } {
+ fmt.Println(c.in == c.want)
+ }
+}
diff --git a/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go.golden b/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go.golden
new file mode 100644
index 000000000..eb2febdb6
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/composite_literals.go.golden
@@ -0,0 +1,29 @@
+-- inlayHint --
+package inlayHint //@inlayHint("package")
+
+import "fmt"
+
+func fieldNames() {
+ for _< int>, c< struct{in string; want string}> := range []struct {
+ in, want string
+ }{
+ struct{ in, want string }{<in: >"Hello, world", <want: >"dlrow ,olleH"},
+ <struct{in string; want string}>{<in: >"Hello, 世界", <want: >"界世 ,olleH"},
+ <struct{in string; want string}>{<in: >"", <want: >""},
+ } {
+ fmt.Println(<a...: >c.in == c.want)
+ }
+}
+
+func fieldNamesPointers() {
+ for _< int>, c< *struct{in string; want string}> := range []*struct {
+ in, want string
+ }{
+ &struct{ in, want string }{<in: >"Hello, world", <want: >"dlrow ,olleH"},
+ <&struct{in string; want string}>{<in: >"Hello, 世界", <want: >"界世 ,olleH"},
+ <&struct{in string; want string}>{<in: >"", <want: >""},
+ } {
+ fmt.Println(<a...: >c.in == c.want)
+ }
+}
+
diff --git a/gopls/internal/lsp/testdata/inlay_hint/constant_values.go b/gopls/internal/lsp/testdata/inlay_hint/constant_values.go
new file mode 100644
index 000000000..e3339b0f3
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/constant_values.go
@@ -0,0 +1,45 @@
+package inlayHint //@inlayHint("package")
+
+const True = true
+
+type Kind int
+
+const (
+ KindNone Kind = iota
+ KindPrint
+ KindPrintf
+ KindErrorf
+)
+
+const (
+ u = iota * 4
+ v float64 = iota * 42
+ w = iota * 42
+)
+
+const (
+ a, b = 1, 2
+ c, d
+ e, f = 5 * 5, "hello" + "world"
+ g, h
+ i, j = true, f
+)
+
+// No hint
+const (
+ Int = 3
+ Float = 3.14
+ Bool = true
+ Rune = '3'
+ Complex = 2.7i
+ String = "Hello, world!"
+)
+
+var (
+ varInt = 3
+ varFloat = 3.14
+ varBool = true
+ varRune = '3' + '4'
+ varComplex = 2.7i
+ varString = "Hello, world!"
+)
diff --git a/gopls/internal/lsp/testdata/inlay_hint/constant_values.go.golden b/gopls/internal/lsp/testdata/inlay_hint/constant_values.go.golden
new file mode 100644
index 000000000..edc46debc
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/constant_values.go.golden
@@ -0,0 +1,47 @@
+-- inlayHint --
+package inlayHint //@inlayHint("package")
+
+const True = true
+
+type Kind int
+
+const (
+ KindNone Kind = iota< = 0>
+ KindPrint< = 1>
+ KindPrintf< = 2>
+ KindErrorf< = 3>
+)
+
+const (
+ u = iota * 4< = 0>
+ v float64 = iota * 42< = 42>
+ w = iota * 42< = 84>
+)
+
+const (
+ a, b = 1, 2
+ c, d< = 1, 2>
+ e, f = 5 * 5, "hello" + "world"< = 25, "helloworld">
+ g, h< = 25, "helloworld">
+ i, j = true, f< = true, "helloworld">
+)
+
+// No hint
+const (
+ Int = 3
+ Float = 3.14
+ Bool = true
+ Rune = '3'
+ Complex = 2.7i
+ String = "Hello, world!"
+)
+
+var (
+ varInt = 3
+ varFloat = 3.14
+ varBool = true
+ varRune = '3' + '4'
+ varComplex = 2.7i
+ varString = "Hello, world!"
+)
+
diff --git a/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go b/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go
new file mode 100644
index 000000000..0d930e5d4
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go
@@ -0,0 +1,50 @@
+package inlayHint //@inlayHint("package")
+
+import "fmt"
+
+func hello(name string) string {
+ return "Hello " + name
+}
+
+func helloWorld() string {
+ return hello("World")
+}
+
+type foo struct{}
+
+func (*foo) bar(baz string, qux int) int {
+ if baz != "" {
+ return qux + 1
+ }
+ return qux
+}
+
+func kase(foo int, bar bool, baz ...string) {
+ fmt.Println(foo, bar, baz)
+}
+
+func kipp(foo string, bar, baz string) {
+ fmt.Println(foo, bar, baz)
+}
+
+func plex(foo, bar string, baz string) {
+ fmt.Println(foo, bar, baz)
+}
+
+func tars(foo string, bar, baz string) {
+ fmt.Println(foo, bar, baz)
+}
+
+func foobar() {
+ var x foo
+ x.bar("", 1)
+ kase(0, true, "c", "d", "e")
+ kipp("a", "b", "c")
+ plex("a", "b", "c")
+ tars("a", "b", "c")
+ foo, bar, baz := "a", "b", "c"
+ kipp(foo, bar, baz)
+ plex("a", bar, baz)
+ tars(foo+foo, (bar), "c")
+
+}
diff --git a/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go.golden b/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go.golden
new file mode 100644
index 000000000..4e93a4f92
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/parameter_names.go.golden
@@ -0,0 +1,52 @@
+-- inlayHint --
+package inlayHint //@inlayHint("package")
+
+import "fmt"
+
+func hello(name string) string {
+ return "Hello " + name
+}
+
+func helloWorld() string {
+ return hello(<name: >"World")
+}
+
+type foo struct{}
+
+func (*foo) bar(baz string, qux int) int {
+ if baz != "" {
+ return qux + 1
+ }
+ return qux
+}
+
+func kase(foo int, bar bool, baz ...string) {
+ fmt.Println(<a...: >foo, bar, baz)
+}
+
+func kipp(foo string, bar, baz string) {
+ fmt.Println(<a...: >foo, bar, baz)
+}
+
+func plex(foo, bar string, baz string) {
+ fmt.Println(<a...: >foo, bar, baz)
+}
+
+func tars(foo string, bar, baz string) {
+ fmt.Println(<a...: >foo, bar, baz)
+}
+
+func foobar() {
+ var x foo
+ x.bar(<baz: >"", <qux: >1)
+ kase(<foo: >0, <bar: >true, <baz...: >"c", "d", "e")
+ kipp(<foo: >"a", <bar: >"b", <baz: >"c")
+ plex(<foo: >"a", <bar: >"b", <baz: >"c")
+ tars(<foo: >"a", <bar: >"b", <baz: >"c")
+ foo< string>, bar< string>, baz< string> := "a", "b", "c"
+ kipp(foo, bar, baz)
+ plex(<foo: >"a", bar, baz)
+ tars(<foo: >foo+foo, <bar: >(bar), <baz: >"c")
+
+}
+
diff --git a/gopls/internal/lsp/testdata/inlay_hint/type_params.go b/gopls/internal/lsp/testdata/inlay_hint/type_params.go
new file mode 100644
index 000000000..3a3c7e537
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/type_params.go
@@ -0,0 +1,45 @@
+//go:build go1.18
+// +build go1.18
+
+package inlayHint //@inlayHint("package")
+
+func main() {
+ ints := map[string]int64{
+ "first": 34,
+ "second": 12,
+ }
+
+ floats := map[string]float64{
+ "first": 35.98,
+ "second": 26.99,
+ }
+
+ SumIntsOrFloats[string, int64](ints)
+ SumIntsOrFloats[string, float64](floats)
+
+ SumIntsOrFloats(ints)
+ SumIntsOrFloats(floats)
+
+ SumNumbers(ints)
+ SumNumbers(floats)
+}
+
+type Number interface {
+ int64 | float64
+}
+
+func SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V {
+ var s V
+ for _, v := range m {
+ s += v
+ }
+ return s
+}
+
+func SumNumbers[K comparable, V Number](m map[K]V) V {
+ var s V
+ for _, v := range m {
+ s += v
+ }
+ return s
+}
diff --git a/gopls/internal/lsp/testdata/inlay_hint/type_params.go.golden b/gopls/internal/lsp/testdata/inlay_hint/type_params.go.golden
new file mode 100644
index 000000000..4819963b7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/type_params.go.golden
@@ -0,0 +1,47 @@
+-- inlayHint --
+//go:build go1.18
+// +build go1.18
+
+package inlayHint //@inlayHint("package")
+
+func main() {
+ ints< map[string]int64> := map[string]int64{
+ "first": 34,
+ "second": 12,
+ }
+
+ floats< map[string]float64> := map[string]float64{
+ "first": 35.98,
+ "second": 26.99,
+ }
+
+ SumIntsOrFloats[string, int64](<m: >ints)
+ SumIntsOrFloats[string, float64](<m: >floats)
+
+ SumIntsOrFloats<[string, int64]>(<m: >ints)
+ SumIntsOrFloats<[string, float64]>(<m: >floats)
+
+ SumNumbers<[string, int64]>(<m: >ints)
+ SumNumbers<[string, float64]>(<m: >floats)
+}
+
+type Number interface {
+ int64 | float64
+}
+
+func SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V {
+ var s V
+ for _< K>, v< V> := range m {
+ s += v
+ }
+ return s
+}
+
+func SumNumbers[K comparable, V Number](m map[K]V) V {
+ var s V
+ for _< K>, v< V> := range m {
+ s += v
+ }
+ return s
+}
+
diff --git a/gopls/internal/lsp/testdata/inlay_hint/variable_types.go b/gopls/internal/lsp/testdata/inlay_hint/variable_types.go
new file mode 100644
index 000000000..219af7059
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/variable_types.go
@@ -0,0 +1,20 @@
+package inlayHint //@inlayHint("package")
+
+func assignTypes() {
+ i, j := 0, len([]string{})-1
+ println(i, j)
+}
+
+func rangeTypes() {
+ for k, v := range []string{} {
+ println(k, v)
+ }
+}
+
+func funcLitType() {
+ myFunc := func(a string) string { return "" }
+}
+
+func compositeLitType() {
+ foo := map[string]interface{}{"": ""}
+}
diff --git a/gopls/internal/lsp/testdata/inlay_hint/variable_types.go.golden b/gopls/internal/lsp/testdata/inlay_hint/variable_types.go.golden
new file mode 100644
index 000000000..6039950d5
--- /dev/null
+++ b/gopls/internal/lsp/testdata/inlay_hint/variable_types.go.golden
@@ -0,0 +1,22 @@
+-- inlayHint --
+package inlayHint //@inlayHint("package")
+
+func assignTypes() {
+ i< int>, j< int> := 0, len([]string{})-1
+ println(i, j)
+}
+
+func rangeTypes() {
+ for k< int>, v< string> := range []string{} {
+ println(k, v)
+ }
+}
+
+func funcLitType() {
+ myFunc< func(a string) string> := func(a string) string { return "" }
+}
+
+func compositeLitType() {
+ foo< map[string]interface{}> := map[string]interface{}{"": ""}
+}
+
diff --git a/internal/lsp/testdata/interfacerank/interface_rank.go b/gopls/internal/lsp/testdata/interfacerank/interface_rank.go
index acb5a42e0..acb5a42e0 100644
--- a/internal/lsp/testdata/interfacerank/interface_rank.go
+++ b/gopls/internal/lsp/testdata/interfacerank/interface_rank.go
diff --git a/gopls/internal/lsp/testdata/issues/issue56505.go b/gopls/internal/lsp/testdata/issues/issue56505.go
new file mode 100644
index 000000000..8c641bfb8
--- /dev/null
+++ b/gopls/internal/lsp/testdata/issues/issue56505.go
@@ -0,0 +1,8 @@
+package issues
+
+// Test for golang/go#56505: completion on variables of type *error should not
+// panic.
+func _() {
+ var e *error
+ e.x //@complete(" //")
+}
diff --git a/internal/lsp/testdata/keywords/accidental_keywords.go.in b/gopls/internal/lsp/testdata/keywords/accidental_keywords.go.in
index 3833081c4..3833081c4 100644
--- a/internal/lsp/testdata/keywords/accidental_keywords.go.in
+++ b/gopls/internal/lsp/testdata/keywords/accidental_keywords.go.in
diff --git a/internal/lsp/testdata/keywords/empty_select.go b/gopls/internal/lsp/testdata/keywords/empty_select.go
index 17ca3ec9d..17ca3ec9d 100644
--- a/internal/lsp/testdata/keywords/empty_select.go
+++ b/gopls/internal/lsp/testdata/keywords/empty_select.go
diff --git a/internal/lsp/testdata/keywords/empty_switch.go b/gopls/internal/lsp/testdata/keywords/empty_switch.go
index 2004d5541..2004d5541 100644
--- a/internal/lsp/testdata/keywords/empty_switch.go
+++ b/gopls/internal/lsp/testdata/keywords/empty_switch.go
diff --git a/gopls/internal/lsp/testdata/keywords/keywords.go b/gopls/internal/lsp/testdata/keywords/keywords.go
new file mode 100644
index 000000000..0bcaa63bf
--- /dev/null
+++ b/gopls/internal/lsp/testdata/keywords/keywords.go
@@ -0,0 +1,100 @@
+package keywords
+
+//@rank("", type),rank("", func),rank("", var),rank("", const),rank("", import)
+
+func _() {
+ var test int //@rank(" //", int, interface)
+ var tChan chan int
+ var _ m //@complete(" //", map)
+ var _ f //@complete(" //", func)
+ var _ c //@complete(" //", chan)
+
+ var _ str //@rank(" //", string, struct)
+
+ type _ int //@rank(" //", interface, int)
+
+ type _ str //@rank(" //", struct, string)
+
+ switch test {
+ case 1: // TODO: trying to complete case here will break because the parser won't return *ast.Ident
+ b //@complete(" //", break)
+ case 2:
+ f //@complete(" //", fallthrough, for)
+ r //@complete(" //", return)
+ d //@complete(" //", default, defer)
+ c //@complete(" //", case, const)
+ }
+
+ switch test.(type) {
+ case fo: //@complete(":")
+ case int:
+ b //@complete(" //", break)
+ case int32:
+ f //@complete(" //", for)
+ d //@complete(" //", default, defer)
+ r //@complete(" //", return)
+ c //@complete(" //", case, const)
+ }
+
+ select {
+ case <-tChan:
+ b //@complete(" //", break)
+ c //@complete(" //", case, const)
+ }
+
+ for index := 0; index < test; index++ {
+ c //@complete(" //", const, continue)
+ b //@complete(" //", break)
+ }
+
+ for range []int{} {
+ c //@complete(" //", const, continue)
+ b //@complete(" //", break)
+ }
+
+ // Test function level keywords
+
+ //Using 2 characters to test because map output order is random
+ sw //@complete(" //", switch)
+ se //@complete(" //", select)
+
+ f //@complete(" //", for)
+ d //@complete(" //", defer)
+ g //@rank(" //", go),rank(" //", goto)
+ r //@complete(" //", return)
+ i //@complete(" //", if)
+ e //@complete(" //", else)
+ v //@complete(" //", var)
+ c //@complete(" //", const)
+
+ for i := r //@complete(" //", range)
+}
+
+/* package */ //@item(package, "package", "", "keyword")
+/* import */ //@item(import, "import", "", "keyword")
+/* func */ //@item(func, "func", "", "keyword")
+/* type */ //@item(type, "type", "", "keyword")
+/* var */ //@item(var, "var", "", "keyword")
+/* const */ //@item(const, "const", "", "keyword")
+/* break */ //@item(break, "break", "", "keyword")
+/* default */ //@item(default, "default", "", "keyword")
+/* case */ //@item(case, "case", "", "keyword")
+/* defer */ //@item(defer, "defer", "", "keyword")
+/* go */ //@item(go, "go", "", "keyword")
+/* for */ //@item(for, "for", "", "keyword")
+/* if */ //@item(if, "if", "", "keyword")
+/* else */ //@item(else, "else", "", "keyword")
+/* switch */ //@item(switch, "switch", "", "keyword")
+/* select */ //@item(select, "select", "", "keyword")
+/* fallthrough */ //@item(fallthrough, "fallthrough", "", "keyword")
+/* continue */ //@item(continue, "continue", "", "keyword")
+/* return */ //@item(return, "return", "", "keyword")
+/* var */ //@item(var, "var", "", "keyword")
+/* const */ //@item(const, "const", "", "keyword")
+/* goto */ //@item(goto, "goto", "", "keyword")
+/* struct */ //@item(struct, "struct", "", "keyword")
+/* interface */ //@item(interface, "interface", "", "keyword")
+/* map */ //@item(map, "map", "", "keyword")
+/* func */ //@item(func, "func", "", "keyword")
+/* chan */ //@item(chan, "chan", "", "keyword")
+/* range */ //@item(range, "range", "", "keyword")
diff --git a/internal/lsp/testdata/labels/labels.go b/gopls/internal/lsp/testdata/labels/labels.go
index b9effb6d0..b9effb6d0 100644
--- a/internal/lsp/testdata/labels/labels.go
+++ b/gopls/internal/lsp/testdata/labels/labels.go
diff --git a/gopls/internal/lsp/testdata/links/links.go b/gopls/internal/lsp/testdata/links/links.go
new file mode 100644
index 000000000..378134341
--- /dev/null
+++ b/gopls/internal/lsp/testdata/links/links.go
@@ -0,0 +1,26 @@
+package links
+
+import (
+ "fmt" //@link(`fmt`,"https://pkg.go.dev/fmt")
+
+ "golang.org/lsptests/foo" //@link(`golang.org/lsptests/foo`,`https://pkg.go.dev/golang.org/lsptests/foo`)
+
+ _ "database/sql" //@link(`database/sql`, `https://pkg.go.dev/database/sql`)
+)
+
+var (
+ _ fmt.Formatter
+ _ foo.StructFoo
+ _ errors.Formatter
+)
+
+// Foo function
+func Foo() string {
+ /*https://example.com/comment */ //@link("https://example.com/comment","https://example.com/comment")
+
+ url := "https://example.com/string_literal" //@link("https://example.com/string_literal","https://example.com/string_literal")
+ return url
+
+ // TODO(golang/go#1234): Link the relevant issue. //@link("golang/go#1234", "https://github.com/golang/go/issues/1234")
+ // TODO(microsoft/vscode-go#12): Another issue. //@link("microsoft/vscode-go#12", "https://github.com/microsoft/vscode-go/issues/12")
+}
diff --git a/internal/lsp/testdata/maps/maps.go.in b/gopls/internal/lsp/testdata/maps/maps.go.in
index eeb5576b0..eeb5576b0 100644
--- a/internal/lsp/testdata/maps/maps.go.in
+++ b/gopls/internal/lsp/testdata/maps/maps.go.in
diff --git a/gopls/internal/lsp/testdata/missingfunction/channels.go b/gopls/internal/lsp/testdata/missingfunction/channels.go
new file mode 100644
index 000000000..303770cd7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/channels.go
@@ -0,0 +1,9 @@
+package missingfunction
+
+func channels(s string) {
+ undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix", "")
+}
+
+func c() (<-chan string, chan string) {
+ return make(<-chan string), make(chan string)
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/channels.go.golden b/gopls/internal/lsp/testdata/missingfunction/channels.go.golden
new file mode 100644
index 000000000..998ce589e
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/channels.go.golden
@@ -0,0 +1,15 @@
+-- suggestedfix_channels_4_2 --
+package missingfunction
+
+func channels(s string) {
+ undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix", "")
+}
+
+func undefinedChannels(ch1 <-chan string, ch2 chan string) {
+ panic("unimplemented")
+}
+
+func c() (<-chan string, chan string) {
+ return make(<-chan string), make(chan string)
+}
+
diff --git a/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go b/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go
new file mode 100644
index 000000000..f2fb3c041
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go
@@ -0,0 +1,6 @@
+package missingfunction
+
+func consecutiveParams() {
+ var s string
+ undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix", "")
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go.golden b/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go.golden
new file mode 100644
index 000000000..4b852ce14
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/consecutive_params.go.golden
@@ -0,0 +1,12 @@
+-- suggestedfix_consecutive_params_5_2 --
+package missingfunction
+
+func consecutiveParams() {
+ var s string
+ undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix", "")
+}
+
+func undefinedConsecutiveParams(s1, s2 string) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/missingfunction/error_param.go b/gopls/internal/lsp/testdata/missingfunction/error_param.go
new file mode 100644
index 000000000..d0484f0ff
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/error_param.go
@@ -0,0 +1,6 @@
+package missingfunction
+
+func errorParam() {
+ var err error
+ undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix", "")
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/error_param.go.golden b/gopls/internal/lsp/testdata/missingfunction/error_param.go.golden
new file mode 100644
index 000000000..de78646a5
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/error_param.go.golden
@@ -0,0 +1,12 @@
+-- suggestedfix_error_param_5_2 --
+package missingfunction
+
+func errorParam() {
+ var err error
+ undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix", "")
+}
+
+func undefinedErrorParam(err error) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/missingfunction/literals.go b/gopls/internal/lsp/testdata/missingfunction/literals.go
new file mode 100644
index 000000000..0099b1a08
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/literals.go
@@ -0,0 +1,7 @@
+package missingfunction
+
+type T struct{}
+
+func literals() {
+ undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "")
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/literals.go.golden b/gopls/internal/lsp/testdata/missingfunction/literals.go.golden
new file mode 100644
index 000000000..cb85de4eb
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/literals.go.golden
@@ -0,0 +1,13 @@
+-- suggestedfix_literals_6_2 --
+package missingfunction
+
+type T struct{}
+
+func literals() {
+ undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "")
+}
+
+func undefinedLiterals(s string, t1 T, t2 *T) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/missingfunction/operation.go b/gopls/internal/lsp/testdata/missingfunction/operation.go
new file mode 100644
index 000000000..a4913ec10
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/operation.go
@@ -0,0 +1,7 @@
+package missingfunction
+
+import "time"
+
+func operation() {
+ undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "")
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/operation.go.golden b/gopls/internal/lsp/testdata/missingfunction/operation.go.golden
new file mode 100644
index 000000000..6f9e6ffab
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/operation.go.golden
@@ -0,0 +1,13 @@
+-- suggestedfix_operation_6_2 --
+package missingfunction
+
+import "time"
+
+func operation() {
+ undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "")
+}
+
+func undefinedOperation(duration time.Duration) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/missingfunction/selector.go b/gopls/internal/lsp/testdata/missingfunction/selector.go
new file mode 100644
index 000000000..93a040271
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/selector.go
@@ -0,0 +1,6 @@
+package missingfunction
+
+func selector() {
+ m := map[int]bool{}
+ undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix", "")
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/selector.go.golden b/gopls/internal/lsp/testdata/missingfunction/selector.go.golden
new file mode 100644
index 000000000..44e2dde3a
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/selector.go.golden
@@ -0,0 +1,12 @@
+-- suggestedfix_selector_5_2 --
+package missingfunction
+
+func selector() {
+ m := map[int]bool{}
+ undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix", "")
+}
+
+func undefinedSelector(b bool) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/missingfunction/slice.go b/gopls/internal/lsp/testdata/missingfunction/slice.go
new file mode 100644
index 000000000..48b1a52b3
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/slice.go
@@ -0,0 +1,5 @@
+package missingfunction
+
+func slice() {
+ undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix", "")
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/slice.go.golden b/gopls/internal/lsp/testdata/missingfunction/slice.go.golden
new file mode 100644
index 000000000..2a05d9a0f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/slice.go.golden
@@ -0,0 +1,11 @@
+-- suggestedfix_slice_4_2 --
+package missingfunction
+
+func slice() {
+ undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix", "")
+}
+
+func undefinedSlice(i []int) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/missingfunction/tuple.go b/gopls/internal/lsp/testdata/missingfunction/tuple.go
new file mode 100644
index 000000000..4059ced98
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/tuple.go
@@ -0,0 +1,9 @@
+package missingfunction
+
+func tuple() {
+ undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix", "")
+}
+
+func b() (string, error) {
+ return "", nil
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/tuple.go.golden b/gopls/internal/lsp/testdata/missingfunction/tuple.go.golden
new file mode 100644
index 000000000..e1118a3f3
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/tuple.go.golden
@@ -0,0 +1,15 @@
+-- suggestedfix_tuple_4_2 --
+package missingfunction
+
+func tuple() {
+ undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix", "")
+}
+
+func undefinedTuple(s string, err error) {
+ panic("unimplemented")
+}
+
+func b() (string, error) {
+ return "", nil
+}
+
diff --git a/gopls/internal/lsp/testdata/missingfunction/unique_params.go b/gopls/internal/lsp/testdata/missingfunction/unique_params.go
new file mode 100644
index 000000000..00479bf75
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/unique_params.go
@@ -0,0 +1,7 @@
+package missingfunction
+
+func uniqueArguments() {
+ var s string
+ var i int
+ undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "")
+}
diff --git a/gopls/internal/lsp/testdata/missingfunction/unique_params.go.golden b/gopls/internal/lsp/testdata/missingfunction/unique_params.go.golden
new file mode 100644
index 000000000..8d6352cde
--- /dev/null
+++ b/gopls/internal/lsp/testdata/missingfunction/unique_params.go.golden
@@ -0,0 +1,13 @@
+-- suggestedfix_unique_params_6_2 --
+package missingfunction
+
+func uniqueArguments() {
+ var s string
+ var i int
+ undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "")
+}
+
+func undefinedUniqueArguments(s1 string, i int, s2 string) {
+ panic("unimplemented")
+}
+
diff --git a/internal/lsp/testdata/multireturn/multi_return.go.in b/gopls/internal/lsp/testdata/multireturn/multi_return.go.in
index c302f3815..c302f3815 100644
--- a/internal/lsp/testdata/multireturn/multi_return.go.in
+++ b/gopls/internal/lsp/testdata/multireturn/multi_return.go.in
diff --git a/gopls/internal/lsp/testdata/nested_complit/nested_complit.go.in b/gopls/internal/lsp/testdata/nested_complit/nested_complit.go.in
new file mode 100644
index 000000000..3ad2d213e
--- /dev/null
+++ b/gopls/internal/lsp/testdata/nested_complit/nested_complit.go.in
@@ -0,0 +1,15 @@
+package nested_complit
+
+type ncFoo struct {} //@item(structNCFoo, "ncFoo", "struct{...}", "struct")
+
+type ncBar struct { //@item(structNCBar, "ncBar", "struct{...}", "struct")
+ baz []ncFoo
+}
+
+func _() {
+ []ncFoo{} //@item(litNCFoo, "[]ncFoo{}", "", "var")
+ _ := ncBar{
+ // disabled - see issue #54822
+ baz: [] // complete(" //", structNCFoo, structNCBar)
+ }
+}
diff --git a/internal/lsp/testdata/nodisk/empty b/gopls/internal/lsp/testdata/nodisk/empty
index 0c10a42f9..0c10a42f9 100644
--- a/internal/lsp/testdata/nodisk/empty
+++ b/gopls/internal/lsp/testdata/nodisk/empty
diff --git a/gopls/internal/lsp/testdata/nodisk/nodisk.overlay.go b/gopls/internal/lsp/testdata/nodisk/nodisk.overlay.go
new file mode 100644
index 000000000..08aebd12f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/nodisk/nodisk.overlay.go
@@ -0,0 +1,9 @@
+package nodisk
+
+import (
+ "golang.org/lsptests/foo"
+)
+
+func _() {
+ foo.Foo() //@complete("F", Foo, IntFoo, StructFoo)
+}
diff --git a/gopls/internal/lsp/testdata/noparse/noparse.go.in b/gopls/internal/lsp/testdata/noparse/noparse.go.in
new file mode 100644
index 000000000..8b0bfaa03
--- /dev/null
+++ b/gopls/internal/lsp/testdata/noparse/noparse.go.in
@@ -0,0 +1,24 @@
+package noparse
+
+// The type error was chosen carefully to exercise a type-error analyzer.
+// We use the 'nonewvars' analyzer because the other candidates are tricky:
+//
+// - The 'unusedvariable' analyzer is disabled by default, so it is not
+// consistently enabled across Test{LSP,CommandLine} tests, which
+// both process this file.
+// - The 'undeclaredname' analyzer depends on the text of the go/types
+// "undeclared name" error, which changed in go1.20.
+// - The 'noresultvalues' analyzer produces a diagnostic containing newlines,
+// which breaks the parser used by TestCommandLine.
+//
+// This comment is all that remains of my afternoon.
+
+func bye(x int) {
+ x := 123 //@diag(":=", "nonewvars", "no new variables", "warning")
+}
+
+func stuff() {
+
+}
+
+func .() {} //@diag(".", "syntax", "expected 'IDENT', found '.'", "error")
diff --git a/internal/lsp/testdata/noparse_format/noparse_format.go.golden b/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.golden
index 0060c5c92..0060c5c92 100644
--- a/internal/lsp/testdata/noparse_format/noparse_format.go.golden
+++ b/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.golden
diff --git a/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.in b/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.in
new file mode 100644
index 000000000..311a99aaf
--- /dev/null
+++ b/gopls/internal/lsp/testdata/noparse_format/noparse_format.go.in
@@ -0,0 +1,14 @@
+// +build go1.11
+
+package noparse_format //@format("package")
+
+// The nonewvars expectation asserts that the go/analysis framework ran.
+// See comments in badstmt.
+
+func what() {
+ var hi func()
+ if { hi() //@diag("{", "syntax", "missing condition in if statement", "error")
+ }
+ hi := nil //@diag(":=", "nonewvars", "no new variables", "warning")
+}
+
diff --git a/internal/lsp/testdata/noparse_format/parse_format.go.golden b/gopls/internal/lsp/testdata/noparse_format/parse_format.go.golden
index 667c90b22..667c90b22 100644
--- a/internal/lsp/testdata/noparse_format/parse_format.go.golden
+++ b/gopls/internal/lsp/testdata/noparse_format/parse_format.go.golden
diff --git a/internal/lsp/testdata/noparse_format/parse_format.go.in b/gopls/internal/lsp/testdata/noparse_format/parse_format.go.in
index 4b98cf8d0..4b98cf8d0 100644
--- a/internal/lsp/testdata/noparse_format/parse_format.go.in
+++ b/gopls/internal/lsp/testdata/noparse_format/parse_format.go.in
diff --git a/internal/lsp/testdata/printf/printf.go b/gopls/internal/lsp/testdata/printf/printf.go
index 6e56549c1..6e56549c1 100644
--- a/internal/lsp/testdata/printf/printf.go
+++ b/gopls/internal/lsp/testdata/printf/printf.go
diff --git a/internal/lsp/testdata/rank/assign_rank.go.in b/gopls/internal/lsp/testdata/rank/assign_rank.go.in
index 5c51910d4..5c51910d4 100644
--- a/internal/lsp/testdata/rank/assign_rank.go.in
+++ b/gopls/internal/lsp/testdata/rank/assign_rank.go.in
diff --git a/internal/lsp/testdata/rank/binexpr_rank.go.in b/gopls/internal/lsp/testdata/rank/binexpr_rank.go.in
index 60b2cc1bc..60b2cc1bc 100644
--- a/internal/lsp/testdata/rank/binexpr_rank.go.in
+++ b/gopls/internal/lsp/testdata/rank/binexpr_rank.go.in
diff --git a/internal/lsp/testdata/rank/boolexpr_rank.go b/gopls/internal/lsp/testdata/rank/boolexpr_rank.go
index fe512eee1..fe512eee1 100644
--- a/internal/lsp/testdata/rank/boolexpr_rank.go
+++ b/gopls/internal/lsp/testdata/rank/boolexpr_rank.go
diff --git a/internal/lsp/testdata/rank/convert_rank.go.in b/gopls/internal/lsp/testdata/rank/convert_rank.go.in
index c43004833..c43004833 100644
--- a/internal/lsp/testdata/rank/convert_rank.go.in
+++ b/gopls/internal/lsp/testdata/rank/convert_rank.go.in
diff --git a/internal/lsp/testdata/rank/struct/struct_rank.go b/gopls/internal/lsp/testdata/rank/struct/struct_rank.go
index e0bdd38a8..e0bdd38a8 100644
--- a/internal/lsp/testdata/rank/struct/struct_rank.go
+++ b/gopls/internal/lsp/testdata/rank/struct/struct_rank.go
diff --git a/internal/lsp/testdata/rank/switch_rank.go.in b/gopls/internal/lsp/testdata/rank/switch_rank.go.in
index b828528da..b828528da 100644
--- a/internal/lsp/testdata/rank/switch_rank.go.in
+++ b/gopls/internal/lsp/testdata/rank/switch_rank.go.in
diff --git a/internal/lsp/testdata/rank/type_assert_rank.go.in b/gopls/internal/lsp/testdata/rank/type_assert_rank.go.in
index 416541cdd..416541cdd 100644
--- a/internal/lsp/testdata/rank/type_assert_rank.go.in
+++ b/gopls/internal/lsp/testdata/rank/type_assert_rank.go.in
diff --git a/internal/lsp/testdata/rank/type_switch_rank.go.in b/gopls/internal/lsp/testdata/rank/type_switch_rank.go.in
index 1ed12b7c1..1ed12b7c1 100644
--- a/internal/lsp/testdata/rank/type_switch_rank.go.in
+++ b/gopls/internal/lsp/testdata/rank/type_switch_rank.go.in
diff --git a/gopls/internal/lsp/testdata/references/another/another.go b/gopls/internal/lsp/testdata/references/another/another.go
new file mode 100644
index 000000000..20e3ebca1
--- /dev/null
+++ b/gopls/internal/lsp/testdata/references/another/another.go
@@ -0,0 +1,13 @@
+// Package another has another type.
+package another
+
+import (
+ other "golang.org/lsptests/references/other"
+)
+
+func _() {
+ xes := other.GetXes()
+ for _, x := range xes { //@mark(defX, "x")
+ _ = x.Y //@mark(useX, "x"),mark(anotherXY, "Y"),refs("Y", typeXY, anotherXY, GetXesY),refs(".", defX, useX),refs("x", defX, useX)
+ }
+}
diff --git a/internal/lsp/testdata/references/interfaces/interfaces.go b/gopls/internal/lsp/testdata/references/interfaces/interfaces.go
index 6661dcc5d..6661dcc5d 100644
--- a/internal/lsp/testdata/references/interfaces/interfaces.go
+++ b/gopls/internal/lsp/testdata/references/interfaces/interfaces.go
diff --git a/gopls/internal/lsp/testdata/references/other/other.go b/gopls/internal/lsp/testdata/references/other/other.go
new file mode 100644
index 000000000..daac1a028
--- /dev/null
+++ b/gopls/internal/lsp/testdata/references/other/other.go
@@ -0,0 +1,19 @@
+package other
+
+import (
+ references "golang.org/lsptests/references"
+)
+
+func GetXes() []references.X {
+ return []references.X{
+ {
+ Y: 1, //@mark(GetXesY, "Y"),refs("Y", typeXY, GetXesY, anotherXY)
+ },
+ }
+}
+
+func _() {
+ references.Q = "hello" //@mark(assignExpQ, "Q")
+ bob := func(_ string) {}
+ bob(references.Q) //@mark(bobExpQ, "Q")
+}
diff --git a/gopls/internal/lsp/testdata/references/refs.go b/gopls/internal/lsp/testdata/references/refs.go
new file mode 100644
index 000000000..e7ff50494
--- /dev/null
+++ b/gopls/internal/lsp/testdata/references/refs.go
@@ -0,0 +1,53 @@
+// Package refs is a package used to test find references.
+package refs
+
+import "os" //@mark(osDecl, `"os"`),refs("os", osDecl, osUse)
+
+type i int //@mark(typeI, "i"),refs("i", typeI, argI, returnI, embeddedI)
+
+type X struct {
+ Y int //@mark(typeXY, "Y")
+}
+
+func _(_ i) []bool { //@mark(argI, "i")
+ return nil
+}
+
+func _(_ []byte) i { //@mark(returnI, "i")
+ return 0
+}
+
+var q string //@mark(declQ, "q"),refs("q", declQ, assignQ, bobQ)
+
+var Q string //@mark(declExpQ, "Q"),refs("Q", declExpQ, assignExpQ, bobExpQ)
+
+func _() {
+ q = "hello" //@mark(assignQ, "q")
+ bob := func(_ string) {}
+ bob(q) //@mark(bobQ, "q")
+}
+
+type e struct {
+ i //@mark(embeddedI, "i"),refs("i", embeddedI, embeddedIUse)
+}
+
+func _() {
+ _ = e{}.i //@mark(embeddedIUse, "i")
+}
+
+const (
+ foo = iota //@refs("iota")
+)
+
+func _(x interface{}) {
+ // We use the _ prefix because the markers inhabit a single
+ // namespace and yDecl is already used in ../highlights/highlights.go.
+ switch _y := x.(type) { //@mark(_yDecl, "_y"),refs("_y", _yDecl, _yInt, _yDefault)
+ case int:
+ println(_y) //@mark(_yInt, "_y"),refs("_y", _yDecl, _yInt, _yDefault)
+ default:
+ println(_y) //@mark(_yDefault, "_y")
+ }
+
+ os.Getwd() //@mark(osUse, "os")
+}
diff --git a/internal/lsp/testdata/references/refs_test.go b/gopls/internal/lsp/testdata/references/refs_test.go
index 08c0db1f0..08c0db1f0 100644
--- a/internal/lsp/testdata/references/refs_test.go
+++ b/gopls/internal/lsp/testdata/references/refs_test.go
diff --git a/internal/lsp/testdata/rename/a/random.go.golden b/gopls/internal/lsp/testdata/rename/a/random.go.golden
index 7459863ec..7459863ec 100644
--- a/internal/lsp/testdata/rename/a/random.go.golden
+++ b/gopls/internal/lsp/testdata/rename/a/random.go.golden
diff --git a/internal/lsp/testdata/rename/a/random.go.in b/gopls/internal/lsp/testdata/rename/a/random.go.in
index 069db27ba..069db27ba 100644
--- a/internal/lsp/testdata/rename/a/random.go.in
+++ b/gopls/internal/lsp/testdata/rename/a/random.go.in
diff --git a/internal/lsp/testdata/rename/b/b.go b/gopls/internal/lsp/testdata/rename/b/b.go
index 8455f035b..8455f035b 100644
--- a/internal/lsp/testdata/rename/b/b.go
+++ b/gopls/internal/lsp/testdata/rename/b/b.go
diff --git a/gopls/internal/lsp/testdata/rename/b/b.go.golden b/gopls/internal/lsp/testdata/rename/b/b.go.golden
new file mode 100644
index 000000000..add4049cd
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rename/b/b.go.golden
@@ -0,0 +1,78 @@
+-- Bob-rename --
+package b
+
+var c int //@rename("int", "uint")
+
+func _() {
+ a := 1 //@rename("a", "error")
+ a = 2
+ _ = a
+}
+
+var (
+ // Hello there.
+ // Bob does the thing.
+ Bob int //@rename("Foo", "Bob")
+)
+
+/*
+Hello description
+*/
+func Hello() {} //@rename("Hello", "Goodbye")
+
+-- Goodbye-rename --
+b.go:
+package b
+
+var c int //@rename("int", "uint")
+
+func _() {
+ a := 1 //@rename("a", "error")
+ a = 2
+ _ = a
+}
+
+var (
+ // Hello there.
+ // Foo does the thing.
+ Foo int //@rename("Foo", "Bob")
+)
+
+/*
+Goodbye description
+*/
+func Goodbye() {} //@rename("Hello", "Goodbye")
+
+c.go:
+package c
+
+import "golang.org/lsptests/rename/b"
+
+func _() {
+ b.Goodbye() //@rename("Hello", "Goodbye")
+}
+
+-- error-rename --
+package b
+
+var c int //@rename("int", "uint")
+
+func _() {
+ error := 1 //@rename("a", "error")
+ error = 2
+ _ = error
+}
+
+var (
+ // Hello there.
+ // Foo does the thing.
+ Foo int //@rename("Foo", "Bob")
+)
+
+/*
+Hello description
+*/
+func Hello() {} //@rename("Hello", "Goodbye")
+
+-- uint-rename --
+int is built in and cannot be renamed
diff --git a/gopls/internal/lsp/testdata/rename/bad/bad.go.golden b/gopls/internal/lsp/testdata/rename/bad/bad.go.golden
new file mode 100644
index 000000000..1b27e1782
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rename/bad/bad.go.golden
@@ -0,0 +1,2 @@
+-- rFunc-rename --
+renaming "sFunc" to "rFunc" not possible because "golang.org/lsptests/rename/bad" has errors
diff --git a/internal/lsp/testdata/rename/bad/bad.go.in b/gopls/internal/lsp/testdata/rename/bad/bad.go.in
index 56dbee74e..56dbee74e 100644
--- a/internal/lsp/testdata/rename/bad/bad.go.in
+++ b/gopls/internal/lsp/testdata/rename/bad/bad.go.in
diff --git a/internal/lsp/testdata/rename/bad/bad_test.go.in b/gopls/internal/lsp/testdata/rename/bad/bad_test.go.in
index e695db14b..e695db14b 100644
--- a/internal/lsp/testdata/rename/bad/bad_test.go.in
+++ b/gopls/internal/lsp/testdata/rename/bad/bad_test.go.in
diff --git a/gopls/internal/lsp/testdata/rename/c/c.go b/gopls/internal/lsp/testdata/rename/c/c.go
new file mode 100644
index 000000000..6332c78f3
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rename/c/c.go
@@ -0,0 +1,7 @@
+package c
+
+import "golang.org/lsptests/rename/b"
+
+func _() {
+ b.Hello() //@rename("Hello", "Goodbye")
+}
diff --git a/gopls/internal/lsp/testdata/rename/c/c.go.golden b/gopls/internal/lsp/testdata/rename/c/c.go.golden
new file mode 100644
index 000000000..d56250693
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rename/c/c.go.golden
@@ -0,0 +1,32 @@
+-- Goodbye-rename --
+b.go:
+package b
+
+var c int //@rename("int", "uint")
+
+func _() {
+ a := 1 //@rename("a", "error")
+ a = 2
+ _ = a
+}
+
+var (
+ // Hello there.
+ // Foo does the thing.
+ Foo int //@rename("Foo", "Bob")
+)
+
+/*
+Goodbye description
+*/
+func Goodbye() {} //@rename("Hello", "Goodbye")
+
+c.go:
+package c
+
+import "golang.org/lsptests/rename/b"
+
+func _() {
+ b.Goodbye() //@rename("Hello", "Goodbye")
+}
+
diff --git a/internal/lsp/testdata/rename/c/c2.go b/gopls/internal/lsp/testdata/rename/c/c2.go
index 4fc484a1a..4fc484a1a 100644
--- a/internal/lsp/testdata/rename/c/c2.go
+++ b/gopls/internal/lsp/testdata/rename/c/c2.go
diff --git a/internal/lsp/testdata/rename/c/c2.go.golden b/gopls/internal/lsp/testdata/rename/c/c2.go.golden
index e509227a9..e509227a9 100644
--- a/internal/lsp/testdata/rename/c/c2.go.golden
+++ b/gopls/internal/lsp/testdata/rename/c/c2.go.golden
diff --git a/internal/lsp/testdata/rename/crosspkg/another/another.go b/gopls/internal/lsp/testdata/rename/crosspkg/another/another.go
index 9b50af2cb..9b50af2cb 100644
--- a/internal/lsp/testdata/rename/crosspkg/another/another.go
+++ b/gopls/internal/lsp/testdata/rename/crosspkg/another/another.go
diff --git a/internal/lsp/testdata/rename/crosspkg/another/another.go.golden b/gopls/internal/lsp/testdata/rename/crosspkg/another/another.go.golden
index d3fccdaf1..d3fccdaf1 100644
--- a/internal/lsp/testdata/rename/crosspkg/another/another.go.golden
+++ b/gopls/internal/lsp/testdata/rename/crosspkg/another/another.go.golden
diff --git a/internal/lsp/testdata/rename/crosspkg/crosspkg.go b/gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go
index 8510bcfe0..8510bcfe0 100644
--- a/internal/lsp/testdata/rename/crosspkg/crosspkg.go
+++ b/gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go
diff --git a/gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden b/gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden
new file mode 100644
index 000000000..49ff7f841
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden
@@ -0,0 +1,40 @@
+-- Dolphin-rename --
+crosspkg.go:
+package crosspkg
+
+func Dolphin() { //@rename("Foo", "Dolphin")
+
+}
+
+var Bar int //@rename("Bar", "Tomato")
+
+other.go:
+package other
+
+import "golang.org/lsptests/rename/crosspkg"
+
+func Other() {
+ crosspkg.Bar
+ crosspkg.Dolphin() //@rename("Foo", "Flamingo")
+}
+
+-- Tomato-rename --
+crosspkg.go:
+package crosspkg
+
+func Foo() { //@rename("Foo", "Dolphin")
+
+}
+
+var Tomato int //@rename("Bar", "Tomato")
+
+other.go:
+package other
+
+import "golang.org/lsptests/rename/crosspkg"
+
+func Other() {
+ crosspkg.Tomato
+ crosspkg.Foo() //@rename("Foo", "Flamingo")
+}
+
diff --git a/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go b/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go
new file mode 100644
index 000000000..5fd147da6
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go
@@ -0,0 +1,8 @@
+package other
+
+import "golang.org/lsptests/rename/crosspkg"
+
+func Other() {
+ crosspkg.Bar
+ crosspkg.Foo() //@rename("Foo", "Flamingo")
+}
diff --git a/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go.golden b/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go.golden
new file mode 100644
index 000000000..f7b4aaad4
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rename/crosspkg/other/other.go.golden
@@ -0,0 +1,20 @@
+-- Flamingo-rename --
+crosspkg.go:
+package crosspkg
+
+func Flamingo() { //@rename("Foo", "Dolphin")
+
+}
+
+var Bar int //@rename("Bar", "Tomato")
+
+other.go:
+package other
+
+import "golang.org/lsptests/rename/crosspkg"
+
+func Other() {
+ crosspkg.Bar
+ crosspkg.Flamingo() //@rename("Foo", "Flamingo")
+}
+
diff --git a/internal/lsp/testdata/rename/generics/embedded.go b/gopls/internal/lsp/testdata/rename/generics/embedded.go
index b44bab880..b44bab880 100644
--- a/internal/lsp/testdata/rename/generics/embedded.go
+++ b/gopls/internal/lsp/testdata/rename/generics/embedded.go
diff --git a/internal/lsp/testdata/rename/generics/embedded.go.golden b/gopls/internal/lsp/testdata/rename/generics/embedded.go.golden
index faa9afb69..faa9afb69 100644
--- a/internal/lsp/testdata/rename/generics/embedded.go.golden
+++ b/gopls/internal/lsp/testdata/rename/generics/embedded.go.golden
diff --git a/internal/lsp/testdata/rename/generics/generics.go b/gopls/internal/lsp/testdata/rename/generics/generics.go
index 977589c0c..977589c0c 100644
--- a/internal/lsp/testdata/rename/generics/generics.go
+++ b/gopls/internal/lsp/testdata/rename/generics/generics.go
diff --git a/internal/lsp/testdata/rename/generics/generics.go.golden b/gopls/internal/lsp/testdata/rename/generics/generics.go.golden
index 7d39813e1..7d39813e1 100644
--- a/internal/lsp/testdata/rename/generics/generics.go.golden
+++ b/gopls/internal/lsp/testdata/rename/generics/generics.go.golden
diff --git a/internal/lsp/testdata/rename/generics/unions.go b/gopls/internal/lsp/testdata/rename/generics/unions.go
index c737b5c27..c737b5c27 100644
--- a/internal/lsp/testdata/rename/generics/unions.go
+++ b/gopls/internal/lsp/testdata/rename/generics/unions.go
diff --git a/internal/lsp/testdata/rename/generics/unions.go.golden b/gopls/internal/lsp/testdata/rename/generics/unions.go.golden
index 463289629..463289629 100644
--- a/internal/lsp/testdata/rename/generics/unions.go.golden
+++ b/gopls/internal/lsp/testdata/rename/generics/unions.go.golden
diff --git a/internal/lsp/testdata/rename/issue39614/issue39614.go.golden b/gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.golden
index d87c58e83..d87c58e83 100644
--- a/internal/lsp/testdata/rename/issue39614/issue39614.go.golden
+++ b/gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.golden
diff --git a/internal/lsp/testdata/rename/issue39614/issue39614.go.in b/gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.in
index 8222db2c4..8222db2c4 100644
--- a/internal/lsp/testdata/rename/issue39614/issue39614.go.in
+++ b/gopls/internal/lsp/testdata/rename/issue39614/issue39614.go.in
diff --git a/internal/lsp/testdata/rename/issue42134/1.go b/gopls/internal/lsp/testdata/rename/issue42134/1.go
index 056f8476a..056f8476a 100644
--- a/internal/lsp/testdata/rename/issue42134/1.go
+++ b/gopls/internal/lsp/testdata/rename/issue42134/1.go
diff --git a/internal/lsp/testdata/rename/issue42134/1.go.golden b/gopls/internal/lsp/testdata/rename/issue42134/1.go.golden
index 266aeef4b..266aeef4b 100644
--- a/internal/lsp/testdata/rename/issue42134/1.go.golden
+++ b/gopls/internal/lsp/testdata/rename/issue42134/1.go.golden
diff --git a/internal/lsp/testdata/rename/issue42134/2.go b/gopls/internal/lsp/testdata/rename/issue42134/2.go
index e9f639575..e9f639575 100644
--- a/internal/lsp/testdata/rename/issue42134/2.go
+++ b/gopls/internal/lsp/testdata/rename/issue42134/2.go
diff --git a/internal/lsp/testdata/rename/issue42134/2.go.golden b/gopls/internal/lsp/testdata/rename/issue42134/2.go.golden
index 406a3833c..406a3833c 100644
--- a/internal/lsp/testdata/rename/issue42134/2.go.golden
+++ b/gopls/internal/lsp/testdata/rename/issue42134/2.go.golden
diff --git a/internal/lsp/testdata/rename/issue42134/3.go b/gopls/internal/lsp/testdata/rename/issue42134/3.go
index 7666f57d3..7666f57d3 100644
--- a/internal/lsp/testdata/rename/issue42134/3.go
+++ b/gopls/internal/lsp/testdata/rename/issue42134/3.go
diff --git a/internal/lsp/testdata/rename/issue42134/3.go.golden b/gopls/internal/lsp/testdata/rename/issue42134/3.go.golden
index cdcae1808..cdcae1808 100644
--- a/internal/lsp/testdata/rename/issue42134/3.go.golden
+++ b/gopls/internal/lsp/testdata/rename/issue42134/3.go.golden
diff --git a/internal/lsp/testdata/rename/issue42134/4.go b/gopls/internal/lsp/testdata/rename/issue42134/4.go
index c953520bc..c953520bc 100644
--- a/internal/lsp/testdata/rename/issue42134/4.go
+++ b/gopls/internal/lsp/testdata/rename/issue42134/4.go
diff --git a/internal/lsp/testdata/rename/issue42134/4.go.golden b/gopls/internal/lsp/testdata/rename/issue42134/4.go.golden
index 2086cf74c..2086cf74c 100644
--- a/internal/lsp/testdata/rename/issue42134/4.go.golden
+++ b/gopls/internal/lsp/testdata/rename/issue42134/4.go.golden
diff --git a/internal/lsp/testdata/rename/issue43616/issue43616.go.golden b/gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.golden
index 34d03ba7a..34d03ba7a 100644
--- a/internal/lsp/testdata/rename/issue43616/issue43616.go.golden
+++ b/gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.golden
diff --git a/internal/lsp/testdata/rename/issue43616/issue43616.go.in b/gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.in
index aaad531b7..aaad531b7 100644
--- a/internal/lsp/testdata/rename/issue43616/issue43616.go.in
+++ b/gopls/internal/lsp/testdata/rename/issue43616/issue43616.go.in
diff --git a/internal/lsp/testdata/rename/shadow/shadow.go b/gopls/internal/lsp/testdata/rename/shadow/shadow.go
index 38329b4fe..38329b4fe 100644
--- a/internal/lsp/testdata/rename/shadow/shadow.go
+++ b/gopls/internal/lsp/testdata/rename/shadow/shadow.go
diff --git a/gopls/internal/lsp/testdata/rename/shadow/shadow.go.golden b/gopls/internal/lsp/testdata/rename/shadow/shadow.go.golden
new file mode 100644
index 000000000..a34b5c0fe
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rename/shadow/shadow.go.golden
@@ -0,0 +1,51 @@
+-- a-rename --
+shadow/shadow.go:10:6: renaming this func "A" to "a"
+shadow/shadow.go:5:13: would cause this reference to become shadowed
+shadow/shadow.go:4:2: by this intervening var definition
+-- b-rename --
+package shadow
+
+func _() {
+ a := true
+ b, c, _ := A(), b(), D() //@rename("A", "a"),rename("B", "b"),rename("b", "c"),rename("D", "d")
+ d := false
+ _, _, _, _ = a, b, c, d
+}
+
+func A() int {
+ return 0
+}
+
+func b() int {
+ return 0
+}
+
+func D() int {
+ return 0
+}
+
+-- c-rename --
+shadow/shadow.go:5:2: renaming this var "b" to "c"
+shadow/shadow.go:5:5: conflicts with var in same block
+-- d-rename --
+package shadow
+
+func _() {
+ a := true
+ b, c, _ := A(), B(), d() //@rename("A", "a"),rename("B", "b"),rename("b", "c"),rename("D", "d")
+ d := false
+ _, _, _, _ = a, b, c, d
+}
+
+func A() int {
+ return 0
+}
+
+func B() int {
+ return 0
+}
+
+func d() int {
+ return 0
+}
+
diff --git a/internal/lsp/testdata/rename/testy/testy.go b/gopls/internal/lsp/testdata/rename/testy/testy.go
index e46dc06cd..e46dc06cd 100644
--- a/internal/lsp/testdata/rename/testy/testy.go
+++ b/gopls/internal/lsp/testdata/rename/testy/testy.go
diff --git a/internal/lsp/testdata/rename/testy/testy.go.golden b/gopls/internal/lsp/testdata/rename/testy/testy.go.golden
index 288dfee96..288dfee96 100644
--- a/internal/lsp/testdata/rename/testy/testy.go.golden
+++ b/gopls/internal/lsp/testdata/rename/testy/testy.go.golden
diff --git a/internal/lsp/testdata/rename/testy/testy_test.go b/gopls/internal/lsp/testdata/rename/testy/testy_test.go
index 3d86e8455..3d86e8455 100644
--- a/internal/lsp/testdata/rename/testy/testy_test.go
+++ b/gopls/internal/lsp/testdata/rename/testy/testy_test.go
diff --git a/internal/lsp/testdata/rename/testy/testy_test.go.golden b/gopls/internal/lsp/testdata/rename/testy/testy_test.go.golden
index 480c8e995..480c8e995 100644
--- a/internal/lsp/testdata/rename/testy/testy_test.go.golden
+++ b/gopls/internal/lsp/testdata/rename/testy/testy_test.go.golden
diff --git a/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go b/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go
new file mode 100644
index 000000000..783e9a55f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go
@@ -0,0 +1,14 @@
+package rundespiteerrors
+
+// This test verifies that analyzers without RunDespiteErrors are not
+// executed on a package containing type errors (see issue #54762).
+func _() {
+ // A type error.
+ _ = 1 + "" //@diag("1", "compiler", "mismatched types|cannot convert", "error")
+
+ // A violation of an analyzer for which RunDespiteErrors=false:
+ // no diagnostic is produced; the diag comment is merely illustrative.
+ for _ = range "" { //diag("for _", "simplifyrange", "simplify range expression", "warning")
+
+ }
+}
diff --git a/gopls/internal/lsp/testdata/selectionrange/foo.go b/gopls/internal/lsp/testdata/selectionrange/foo.go
new file mode 100644
index 000000000..1bf41340c
--- /dev/null
+++ b/gopls/internal/lsp/testdata/selectionrange/foo.go
@@ -0,0 +1,13 @@
+package foo
+
+import "time"
+
+func Bar(x, y int, t time.Time) int {
+ zs := []int{1, 2, 3} //@selectionrange("1")
+
+ for _, z := range zs {
+ x = x + z + y + zs[1] //@selectionrange("1")
+ }
+
+ return x + y //@selectionrange("+")
+}
diff --git a/gopls/internal/lsp/testdata/selectionrange/foo.go.golden b/gopls/internal/lsp/testdata/selectionrange/foo.go.golden
new file mode 100644
index 000000000..fe70b30b7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/selectionrange/foo.go.golden
@@ -0,0 +1,29 @@
+-- selectionrange_foo_12_11 --
+Ranges 0:
+ 11:8-11:13 "x + y"
+ 11:1-11:13 "return x + y"
+ 4:36-12:1 "{\\n\tzs := []int{...ionrange(\"+\")\\n}"
+ 4:0-12:1 "func Bar(x, y i...ionrange(\"+\")\\n}"
+ 0:0-12:1 "package foo\\n\\nim...ionrange(\"+\")\\n}"
+
+-- selectionrange_foo_6_14 --
+Ranges 0:
+ 5:13-5:14 "1"
+ 5:7-5:21 "[]int{1, 2, 3}"
+ 5:1-5:21 "zs := []int{1, 2, 3}"
+ 4:36-12:1 "{\\n\tzs := []int{...ionrange(\"+\")\\n}"
+ 4:0-12:1 "func Bar(x, y i...ionrange(\"+\")\\n}"
+ 0:0-12:1 "package foo\\n\\nim...ionrange(\"+\")\\n}"
+
+-- selectionrange_foo_9_22 --
+Ranges 0:
+ 8:21-8:22 "1"
+ 8:18-8:23 "zs[1]"
+ 8:6-8:23 "x + z + y + zs[1]"
+ 8:2-8:23 "x = x + z + y + zs[1]"
+ 7:22-9:2 "{\\n\t\tx = x + z +...onrange(\"1\")\\n\t}"
+ 7:1-9:2 "for _, z := ran...onrange(\"1\")\\n\t}"
+ 4:36-12:1 "{\\n\tzs := []int{...ionrange(\"+\")\\n}"
+ 4:0-12:1 "func Bar(x, y i...ionrange(\"+\")\\n}"
+ 0:0-12:1 "package foo\\n\\nim...ionrange(\"+\")\\n}"
+
diff --git a/gopls/internal/lsp/testdata/selector/selector.go.in b/gopls/internal/lsp/testdata/selector/selector.go.in
new file mode 100644
index 000000000..b1498a08c
--- /dev/null
+++ b/gopls/internal/lsp/testdata/selector/selector.go.in
@@ -0,0 +1,66 @@
+// +build go1.11
+
+package selector
+
+import (
+ "golang.org/lsptests/bar"
+)
+
+type S struct {
+ B, A, C int //@item(Bf, "B", "int", "field"),item(Af, "A", "int", "field"),item(Cf, "C", "int", "field")
+}
+
+func _() {
+ _ = S{}.; //@complete(";", Af, Bf, Cf)
+}
+
+type bob struct { a int } //@item(a, "a", "int", "field")
+type george struct { b int }
+type jack struct { c int } //@item(c, "c", "int", "field")
+type jill struct { d int }
+
+func (b *bob) george() *george {} //@item(george, "george", "func() *george", "method")
+func (g *george) jack() *jack {}
+func (j *jack) jill() *jill {} //@item(jill, "jill", "func() *jill", "method")
+
+func _() {
+ b := &bob{}
+ y := b.george().
+ jack();
+ y.; //@complete(";", c, jill)
+}
+
+func _() {
+ bar. //@complete(" /", Bar)
+ x := 5
+
+ var b *bob
+ b. //@complete(" /", a, george)
+ y, z := 5, 6
+
+ b. //@complete(" /", a, george)
+ y, z, a, b, c := 5, 6
+}
+
+func _() {
+ bar. //@complete(" /", Bar)
+ bar.Bar()
+
+ bar. //@complete(" /", Bar)
+ go f()
+}
+
+func _() {
+ var b *bob
+ if y != b. //@complete(" /", a, george)
+ z := 5
+
+ if z + y + 1 + b. //@complete(" /", a, george)
+ r, s, t := 4, 5
+
+ if y != b. //@complete(" /", a, george)
+ z = 5
+
+ if z + y + 1 + b. //@complete(" /", a, george)
+ r = 4
+}
diff --git a/internal/lsp/testdata/semantic/README.md b/gopls/internal/lsp/testdata/semantic/README.md
index 00ec19af1..00ec19af1 100644
--- a/internal/lsp/testdata/semantic/README.md
+++ b/gopls/internal/lsp/testdata/semantic/README.md
diff --git a/internal/lsp/testdata/semantic/a.go b/gopls/internal/lsp/testdata/semantic/a.go
index 54d6c8a62..54d6c8a62 100644
--- a/internal/lsp/testdata/semantic/a.go
+++ b/gopls/internal/lsp/testdata/semantic/a.go
diff --git a/gopls/internal/lsp/testdata/semantic/a.go.golden b/gopls/internal/lsp/testdata/semantic/a.go.golden
new file mode 100644
index 000000000..047a031a7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/semantic/a.go.golden
@@ -0,0 +1,83 @@
+-- semantic --
+/*⇒7,keyword,[]*/package /*⇒14,namespace,[]*/semantictokens /*⇒16,comment,[]*///@ semantic("")
+
+/*⇒6,keyword,[]*/import (
+ _ "encoding/utf8"
+ /*⇒3,namespace,[]*/utf "encoding/utf8"
+ "fmt"/*⇐3,namespace,[]*/ /*⇒19,comment,[]*///@ semantic("fmt")
+ . "fmt"
+ "unicode/utf8"/*⇐4,namespace,[]*/
+)
+
+/*⇒3,keyword,[]*/var (
+ /*⇒1,variable,[definition]*/a = /*⇒3,namespace,[]*/fmt./*⇒5,function,[]*/Print
+ /*⇒1,variable,[definition]*/b []/*⇒6,type,[defaultLibrary]*/string = []/*⇒6,type,[defaultLibrary]*/string{/*⇒5,string,[]*/"foo"}
+ /*⇒2,variable,[definition]*/c1 /*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int
+ /*⇒2,variable,[definition]*/c2 /*⇒2,operator,[]*/<-/*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int
+ /*⇒2,variable,[definition]*/c3 = /*⇒4,function,[defaultLibrary]*/make([]/*⇒4,keyword,[]*/chan/*⇒2,operator,[]*/<- /*⇒3,type,[defaultLibrary]*/int)
+ /*⇒1,variable,[definition]*/b = /*⇒1,type,[]*/A{/*⇒1,variable,[]*/X: /*⇒2,number,[]*/23}
+ /*⇒1,variable,[definition]*/m /*⇒3,keyword,[]*/map[/*⇒4,type,[defaultLibrary]*/bool][/*⇒1,number,[]*/3]/*⇒1,operator,[]*/*/*⇒7,type,[defaultLibrary]*/float64
+)
+
+/*⇒5,keyword,[]*/const (
+ /*⇒2,variable,[definition readonly]*/xx /*⇒1,type,[]*/F = /*⇒4,variable,[readonly]*/iota
+ /*⇒2,variable,[definition readonly]*/yy = /*⇒2,variable,[readonly]*/xx /*⇒1,operator,[]*/+ /*⇒1,number,[]*/3
+ /*⇒2,variable,[definition readonly]*/zz = /*⇒2,string,[]*/""
+ /*⇒2,variable,[definition readonly]*/ww = /*⇒6,string,[]*/"not " /*⇒1,operator,[]*/+ /*⇒2,variable,[readonly]*/zz
+)
+
+/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/A /*⇒6,keyword,[]*/struct {
+ /*⇒1,variable,[definition]*/X /*⇒3,type,[defaultLibrary]*/int /*⇒6,string,[]*/`foof`
+}
+/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/B /*⇒9,keyword,[]*/interface {
+ /*⇒1,type,[]*/A
+ /*⇒3,method,[definition]*/sad(/*⇒3,type,[defaultLibrary]*/int) /*⇒4,type,[defaultLibrary]*/bool
+}
+
+/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/F /*⇒3,type,[defaultLibrary]*/int
+
+/*⇒4,keyword,[]*/func (/*⇒1,variable,[]*/a /*⇒1,operator,[]*/*/*⇒1,type,[]*/A) /*⇒1,method,[definition]*/f() /*⇒4,type,[defaultLibrary]*/bool {
+ /*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/z /*⇒6,type,[defaultLibrary]*/string
+ /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"foo"
+ /*⇒1,variable,[]*/a(/*⇒1,variable,[]*/x)
+ /*⇒1,variable,[definition]*/y /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"bar" /*⇒1,operator,[]*/+ /*⇒1,variable,[]*/x
+ /*⇒6,keyword,[]*/switch /*⇒1,variable,[]*/z {
+ /*⇒4,keyword,[]*/case /*⇒4,string,[]*/"xx":
+ /*⇒7,keyword,[]*/default:
+ }
+ /*⇒6,keyword,[]*/select {
+ /*⇒4,keyword,[]*/case /*⇒1,variable,[definition]*/z /*⇒2,operator,[]*/:= /*⇒2,operator,[]*/<-/*⇒2,variable,[]*/c3[/*⇒1,number,[]*/0]:
+ /*⇒7,keyword,[]*/default:
+ }
+ /*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/k, /*⇒1,variable,[definition]*/v := /*⇒5,keyword,[]*/range /*⇒1,variable,[]*/m {
+ /*⇒6,keyword,[]*/return (/*⇒1,operator,[]*/!/*⇒1,variable,[]*/k) /*⇒2,operator,[]*/&& /*⇒1,variable,[]*/v[/*⇒1,number,[]*/0] /*⇒2,operator,[]*/== /*⇒3,variable,[readonly defaultLibrary]*/nil
+ }
+ /*⇒2,variable,[]*/c2 /*⇒2,operator,[]*/<- /*⇒1,type,[]*/A./*⇒1,variable,[]*/X
+ /*⇒1,variable,[definition]*/w /*⇒2,operator,[]*/:= /*⇒1,variable,[]*/b[/*⇒1,number,[]*/4:]
+ /*⇒1,variable,[definition]*/j /*⇒2,operator,[]*/:= /*⇒3,function,[defaultLibrary]*/len(/*⇒1,variable,[]*/x)
+ /*⇒1,variable,[]*/j/*⇒2,operator,[]*/--
+ /*⇒1,variable,[definition]*/q /*⇒2,operator,[]*/:= []/*⇒9,keyword,[]*/interface{}{/*⇒1,variable,[]*/j, /*⇒3,number,[]*/23i, /*⇒1,operator,[]*/&/*⇒1,variable,[]*/y}
+ /*⇒1,function,[]*/g(/*⇒1,variable,[]*/q/*⇒3,operator,[]*/...)
+ /*⇒6,keyword,[]*/return /*⇒4,variable,[readonly]*/true
+}
+
+/*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/g(/*⇒2,parameter,[definition]*/vv /*⇒3,operator,[]*/.../*⇒9,keyword,[]*/interface{}) {
+ /*⇒2,variable,[definition]*/ff /*⇒2,operator,[]*/:= /*⇒4,keyword,[]*/func() {}
+ /*⇒5,keyword,[]*/defer /*⇒2,function,[]*/ff()
+ /*⇒2,keyword,[]*/go /*⇒3,namespace,[]*/utf./*⇒9,function,[]*/RuneCount(/*⇒2,string,[]*/"")
+ /*⇒2,keyword,[]*/go /*⇒4,namespace,[]*/utf8./*⇒9,function,[]*/RuneCount(/*⇒2,parameter,[]*/vv.(/*⇒6,type,[]*/string))
+ /*⇒2,keyword,[]*/if /*⇒4,variable,[readonly]*/true {
+ } /*⇒4,keyword,[]*/else {
+ }
+/*⇒5,parameter,[definition]*/Never:
+ /*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/i /*⇒2,operator,[]*/:= /*⇒1,number,[]*/0; /*⇒1,variable,[]*/i /*⇒1,operator,[]*/< /*⇒2,number,[]*/10; {
+ /*⇒5,keyword,[]*/break Never
+ }
+ _, /*⇒2,variable,[definition]*/ok /*⇒2,operator,[]*/:= /*⇒2,parameter,[]*/vv[/*⇒1,number,[]*/0].(/*⇒1,type,[]*/A)
+ /*⇒2,keyword,[]*/if /*⇒1,operator,[]*/!/*⇒2,variable,[]*/ok {
+ /*⇒6,keyword,[]*/switch /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒2,parameter,[]*/vv[/*⇒1,number,[]*/0].(/*⇒4,keyword,[]*/type) {
+ }
+ /*⇒4,keyword,[]*/goto Never
+ }
+}
+
diff --git a/gopls/internal/lsp/testdata/semantic/b.go b/gopls/internal/lsp/testdata/semantic/b.go
new file mode 100644
index 000000000..496b0863d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/semantic/b.go
@@ -0,0 +1,38 @@
+package semantictokens //@ semantic("")
+
+func f(x ...interface{}) {
+}
+
+func weirⰀd() { /*😀*/ // comment
+ const (
+ snil = nil
+ nil = true
+ true = false
+ false = snil
+ cmd = `foof`
+ double = iota
+ iota = copy
+ four = (len(cmd)/2 < 5)
+ five = four
+ )
+ f(cmd, nil, double, iota)
+}
+
+/*
+
+multiline */ /*
+multiline
+*/
+type AA int
+type BB struct {
+ AA
+}
+type CC struct {
+ AA int
+}
+type D func(aa AA) (BB error)
+type E func(AA) BB
+
+var a chan<- chan int
+var b chan<- <-chan int
+var c <-chan <-chan int
diff --git a/gopls/internal/lsp/testdata/semantic/b.go.golden b/gopls/internal/lsp/testdata/semantic/b.go.golden
new file mode 100644
index 000000000..59071374b
--- /dev/null
+++ b/gopls/internal/lsp/testdata/semantic/b.go.golden
@@ -0,0 +1,40 @@
+-- semantic --
+/*⇒7,keyword,[]*/package /*⇒14,namespace,[]*/semantictokens /*⇒16,comment,[]*///@ semantic("")
+
+/*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/f(/*⇒1,parameter,[definition]*/x /*⇒3,operator,[]*/.../*⇒9,keyword,[]*/interface{}) {
+}
+
+/*⇒4,keyword,[]*/func /*⇒6,function,[definition]*/weirⰀd() { /*⇒5,comment,[]*//*😀*/ /*⇒10,comment,[]*/// comment
+ /*⇒5,keyword,[]*/const (
+ /*⇒4,variable,[definition readonly]*/snil = /*⇒3,variable,[readonly defaultLibrary]*/nil
+ /*⇒3,variable,[definition readonly]*/nil = /*⇒4,variable,[readonly]*/true
+ /*⇒4,variable,[definition readonly]*/true = /*⇒5,variable,[readonly]*/false
+ /*⇒5,variable,[definition readonly]*/false = /*⇒4,variable,[readonly]*/snil
+ /*⇒3,variable,[definition readonly]*/cmd = /*⇒6,string,[]*/`foof`
+ /*⇒6,variable,[definition readonly]*/double = /*⇒4,variable,[readonly]*/iota
+ /*⇒4,variable,[definition readonly]*/iota = /*⇒4,function,[defaultLibrary]*/copy
+ /*⇒4,variable,[definition readonly]*/four = (/*⇒3,function,[defaultLibrary]*/len(/*⇒3,variable,[readonly]*/cmd)/*⇒1,operator,[]*// /*⇒1,number,[]*/2 /*⇒1,operator,[]*/< /*⇒1,number,[]*/5)
+ /*⇒4,variable,[definition readonly]*/five = /*⇒4,variable,[readonly]*/four
+ )
+ /*⇒1,function,[]*/f(/*⇒3,variable,[readonly]*/cmd, /*⇒3,variable,[readonly]*/nil, /*⇒6,variable,[readonly]*/double, /*⇒4,variable,[readonly]*/iota)
+}
+
+/*⇒2,comment,[]*//*
+/*⇒0,comment,[]*/
+/*⇒12,comment,[]*/multiline */ /*⇒2,comment,[]*//*
+/*⇒9,comment,[]*/multiline
+/*⇒2,comment,[]*/*/
+/*⇒4,keyword,[]*/type /*⇒2,type,[definition]*/AA /*⇒3,type,[defaultLibrary]*/int
+/*⇒4,keyword,[]*/type /*⇒2,type,[definition]*/BB /*⇒6,keyword,[]*/struct {
+ /*⇒2,type,[]*/AA
+}
+/*⇒4,keyword,[]*/type /*⇒2,type,[definition]*/CC /*⇒6,keyword,[]*/struct {
+ /*⇒2,variable,[definition]*/AA /*⇒3,type,[defaultLibrary]*/int
+}
+/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/D /*⇒4,keyword,[]*/func(/*⇒2,parameter,[definition]*/aa /*⇒2,type,[]*/AA) (/*⇒2,parameter,[definition]*/BB /*⇒5,type,[]*/error)
+/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/E /*⇒4,keyword,[]*/func(/*⇒2,type,[]*/AA) /*⇒2,type,[]*/BB
+
+/*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/a /*⇒4,keyword,[]*/chan/*⇒2,operator,[]*/<- /*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int
+/*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/b /*⇒4,keyword,[]*/chan/*⇒2,operator,[]*/<- /*⇒2,operator,[]*/<-/*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int
+/*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/c /*⇒2,operator,[]*/<-/*⇒4,keyword,[]*/chan /*⇒2,operator,[]*/<-/*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int
+
diff --git a/internal/lsp/testdata/semantic/semantic_test.go b/gopls/internal/lsp/testdata/semantic/semantic_test.go
index 63d59f666..63d59f666 100644
--- a/internal/lsp/testdata/semantic/semantic_test.go
+++ b/gopls/internal/lsp/testdata/semantic/semantic_test.go
diff --git a/internal/lsp/testdata/signature/signature.go b/gopls/internal/lsp/testdata/signature/signature.go
index 4e2b12bc4..4e2b12bc4 100644
--- a/internal/lsp/testdata/signature/signature.go
+++ b/gopls/internal/lsp/testdata/signature/signature.go
diff --git a/gopls/internal/lsp/testdata/signature/signature.go.golden b/gopls/internal/lsp/testdata/signature/signature.go.golden
new file mode 100644
index 000000000..90a4facf9
--- /dev/null
+++ b/gopls/internal/lsp/testdata/signature/signature.go.golden
@@ -0,0 +1,53 @@
+-- AliasMap(a map[*Alias]StringAlias) (b map[*Alias]StringAlias, c map[*Alias]StringAlias)-signature --
+AliasMap(a map[*Alias]StringAlias) (b map[*Alias]StringAlias, c map[*Alias]StringAlias)
+
+-- AliasSlice(a []*Alias) (b Alias)-signature --
+AliasSlice(a []*Alias) (b Alias)
+
+-- Bar(float64, ...byte)-signature --
+Bar(float64, ...byte)
+
+-- Foo(a string, b int) (c bool)-signature --
+Foo(a string, b int) (c bool)
+
+-- Next(n int) []byte-signature --
+Next(n int) []byte
+
+Next returns a slice containing the next n bytes from the buffer, advancing the buffer as if the bytes had been returned by Read.
+
+-- OtherAliasMap(a map[Alias]OtherAlias, b map[Alias]OtherAlias) map[Alias]OtherAlias-signature --
+OtherAliasMap(a map[Alias]OtherAlias, b map[Alias]OtherAlias) map[Alias]OtherAlias
+
+-- fn(hi string, there string) func(i int) rune-signature --
+fn(hi string, there string) func(i int) rune
+
+-- foo(e *json.Decoder) (*big.Int, error)-signature --
+foo(e *json.Decoder) (*big.Int, error)
+
+-- func(hi string, there string) func(i int) rune-signature --
+func(hi string, there string) func(i int) rune
+
+-- func(i int) rune-signature --
+func(i int) rune
+
+-- func(string, int) bool-signature --
+func(string, int) bool
+
+-- make(t Type, size ...int) Type-signature --
+make(t Type, size ...int) Type
+
+The make built-in function allocates and initializes an object of type slice, map, or chan (only).
+
+-- myFunc(foo int) string-signature --
+myFunc(foo int) string
+
+-- panic(v interface{})-signature --
+panic(v any)
+
+The panic built-in function stops normal execution of the current goroutine.
+
+-- println(args ...Type)-signature --
+println(args ...Type)
+
+The println built-in function formats its arguments in an implementation-specific way and writes the result to standard error.
+
diff --git a/internal/lsp/testdata/signature/signature2.go.golden b/gopls/internal/lsp/testdata/signature/signature2.go.golden
index e8102584f..e8102584f 100644
--- a/internal/lsp/testdata/signature/signature2.go.golden
+++ b/gopls/internal/lsp/testdata/signature/signature2.go.golden
diff --git a/internal/lsp/testdata/signature/signature2.go.in b/gopls/internal/lsp/testdata/signature/signature2.go.in
index 16355ffc0..16355ffc0 100644
--- a/internal/lsp/testdata/signature/signature2.go.in
+++ b/gopls/internal/lsp/testdata/signature/signature2.go.in
diff --git a/internal/lsp/testdata/signature/signature3.go.golden b/gopls/internal/lsp/testdata/signature/signature3.go.golden
index e8102584f..e8102584f 100644
--- a/internal/lsp/testdata/signature/signature3.go.golden
+++ b/gopls/internal/lsp/testdata/signature/signature3.go.golden
diff --git a/internal/lsp/testdata/signature/signature3.go.in b/gopls/internal/lsp/testdata/signature/signature3.go.in
index 032be1304..032be1304 100644
--- a/internal/lsp/testdata/signature/signature3.go.in
+++ b/gopls/internal/lsp/testdata/signature/signature3.go.in
diff --git a/gopls/internal/lsp/testdata/signature/signature_test.go b/gopls/internal/lsp/testdata/signature/signature_test.go
new file mode 100644
index 000000000..500247dbd
--- /dev/null
+++ b/gopls/internal/lsp/testdata/signature/signature_test.go
@@ -0,0 +1,13 @@
+package signature_test
+
+import (
+ "testing"
+
+ sig "golang.org/lsptests/signature"
+)
+
+func TestSignature(t *testing.T) {
+ sig.AliasSlice() //@signature(")", "AliasSlice(a []*sig.Alias) (b sig.Alias)", 0)
+ sig.AliasMap() //@signature(")", "AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias)", 0)
+ sig.OtherAliasMap() //@signature(")", "OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias", 0)
+}
diff --git a/gopls/internal/lsp/testdata/signature/signature_test.go.golden b/gopls/internal/lsp/testdata/signature/signature_test.go.golden
new file mode 100644
index 000000000..9e6561ac5
--- /dev/null
+++ b/gopls/internal/lsp/testdata/signature/signature_test.go.golden
@@ -0,0 +1,9 @@
+-- AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias)-signature --
+AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias)
+
+-- AliasSlice(a []*sig.Alias) (b sig.Alias)-signature --
+AliasSlice(a []*sig.Alias) (b sig.Alias)
+
+-- OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias-signature --
+OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias
+
diff --git a/internal/lsp/testdata/snippets/func_snippets118.go.in b/gopls/internal/lsp/testdata/snippets/func_snippets118.go.in
index d4933689d..d4933689d 100644
--- a/internal/lsp/testdata/snippets/func_snippets118.go.in
+++ b/gopls/internal/lsp/testdata/snippets/func_snippets118.go.in
diff --git a/gopls/internal/lsp/testdata/snippets/literal.go b/gopls/internal/lsp/testdata/snippets/literal.go
new file mode 100644
index 000000000..fbb642f08
--- /dev/null
+++ b/gopls/internal/lsp/testdata/snippets/literal.go
@@ -0,0 +1,22 @@
+package snippets
+
+import (
+ "golang.org/lsptests/signature"
+ t "golang.org/lsptests/types"
+)
+
+type structy struct {
+ x signature.MyType
+}
+
+func X(_ map[signature.Alias]t.CoolAlias) (map[signature.Alias]t.CoolAlias) {
+ return nil
+}
+
+func _() {
+ X() //@signature(")", "X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias", 0)
+ _ = signature.MyType{} //@item(literalMyType, "signature.MyType{}", "", "var")
+ s := structy{
+ x: //@snippet(" //", literalMyType, "signature.MyType{\\}", "signature.MyType{\\}")
+ }
+} \ No newline at end of file
diff --git a/gopls/internal/lsp/testdata/snippets/literal.go.golden b/gopls/internal/lsp/testdata/snippets/literal.go.golden
new file mode 100644
index 000000000..c91e5e9e0
--- /dev/null
+++ b/gopls/internal/lsp/testdata/snippets/literal.go.golden
@@ -0,0 +1,3 @@
+-- X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias-signature --
+X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias
+
diff --git a/gopls/internal/lsp/testdata/snippets/literal_snippets.go.in b/gopls/internal/lsp/testdata/snippets/literal_snippets.go.in
new file mode 100644
index 000000000..c6e6c0fbd
--- /dev/null
+++ b/gopls/internal/lsp/testdata/snippets/literal_snippets.go.in
@@ -0,0 +1,233 @@
+package snippets
+
+import (
+ "bytes"
+ "context"
+ "go/ast"
+ "net/http"
+ "sort"
+
+ "golang.org/lsptests/foo"
+)
+
+func _() {
+ []int{} //@item(litIntSlice, "[]int{}", "", "var")
+ &[]int{} //@item(litIntSliceAddr, "&[]int{}", "", "var")
+ make([]int, 0) //@item(makeIntSlice, "make([]int, 0)", "", "func")
+
+ var _ *[]int = in //@snippet(" //", litIntSliceAddr, "&[]int{$0\\}", "&[]int{$0\\}")
+ var _ **[]int = in //@complete(" //")
+
+ var slice []int
+ slice = i //@snippet(" //", litIntSlice, "[]int{$0\\}", "[]int{$0\\}")
+ slice = m //@snippet(" //", makeIntSlice, "make([]int, ${1:})", "make([]int, ${1:0})")
+}
+
+func _() {
+ type namedInt []int
+
+ namedInt{} //@item(litNamedSlice, "namedInt{}", "", "var")
+ make(namedInt, 0) //@item(makeNamedSlice, "make(namedInt, 0)", "", "func")
+
+ var namedSlice namedInt
+ namedSlice = n //@snippet(" //", litNamedSlice, "namedInt{$0\\}", "namedInt{$0\\}")
+ namedSlice = m //@snippet(" //", makeNamedSlice, "make(namedInt, ${1:})", "make(namedInt, ${1:0})")
+}
+
+func _() {
+ make(chan int) //@item(makeChan, "make(chan int)", "", "func")
+
+ var ch chan int
+ ch = m //@snippet(" //", makeChan, "make(chan int)", "make(chan int)")
+}
+
+func _() {
+ map[string]struct{}{} //@item(litMap, "map[string]struct{}{}", "", "var")
+ make(map[string]struct{}) //@item(makeMap, "make(map[string]struct{})", "", "func")
+
+ var m map[string]struct{}
+ m = m //@snippet(" //", litMap, "map[string]struct{\\}{$0\\}", "map[string]struct{\\}{$0\\}")
+ m = m //@snippet(" //", makeMap, "make(map[string]struct{\\})", "make(map[string]struct{\\})")
+
+ struct{}{} //@item(litEmptyStruct, "struct{}{}", "", "var")
+
+ m["hi"] = s //@snippet(" //", litEmptyStruct, "struct{\\}{\\}", "struct{\\}{\\}")
+}
+
+func _() {
+ type myStruct struct{ i int } //@item(myStructType, "myStruct", "struct{...}", "struct")
+
+ myStruct{} //@item(litStruct, "myStruct{}", "", "var")
+ &myStruct{} //@item(litStructPtr, "&myStruct{}", "", "var")
+
+ var ms myStruct
+ ms = m //@snippet(" //", litStruct, "myStruct{$0\\}", "myStruct{$0\\}")
+
+ var msPtr *myStruct
+ msPtr = m //@snippet(" //", litStructPtr, "&myStruct{$0\\}", "&myStruct{$0\\}")
+
+ msPtr = &m //@snippet(" //", litStruct, "myStruct{$0\\}", "myStruct{$0\\}")
+
+ type myStructCopy struct { i int } //@item(myStructCopyType, "myStructCopy", "struct{...}", "struct")
+
+ // Don't offer literal completion for convertible structs.
+ ms = myStruct //@complete(" //", litStruct, myStructType, myStructCopyType)
+}
+
+type myImpl struct{}
+
+func (myImpl) foo() {}
+
+func (*myImpl) bar() {}
+
+type myBasicImpl string
+
+func (myBasicImpl) foo() {}
+
+func _() {
+ type myIntf interface {
+ foo()
+ }
+
+ myImpl{} //@item(litImpl, "myImpl{}", "", "var")
+
+ var mi myIntf
+ mi = m //@snippet(" //", litImpl, "myImpl{\\}", "myImpl{\\}")
+
+ myBasicImpl() //@item(litBasicImpl, "myBasicImpl()", "string", "var")
+
+ mi = m //@snippet(" //", litBasicImpl, "myBasicImpl($0)", "myBasicImpl($0)")
+
+ // only satisfied by pointer to myImpl
+ type myPtrIntf interface {
+ bar()
+ }
+
+ &myImpl{} //@item(litImplPtr, "&myImpl{}", "", "var")
+
+ var mpi myPtrIntf
+ mpi = m //@snippet(" //", litImplPtr, "&myImpl{\\}", "&myImpl{\\}")
+}
+
+func _() {
+ var s struct{ i []int } //@item(litSliceField, "i", "[]int", "field")
+ var foo []int
+ // no literal completions after selector
+ foo = s.i //@complete(" //", litSliceField)
+}
+
+func _() {
+ type myStruct struct{ i int } //@item(litMyStructType, "myStruct", "struct{...}", "struct")
+ myStruct{} //@item(litMyStruct, "myStruct{}", "", "var")
+
+ foo := func(s string, args ...myStruct) {}
+ // Don't give literal slice candidate for variadic arg.
+ // Do give literal candidates for variadic element.
+ foo("", myStruct) //@complete(")", litMyStruct, litMyStructType)
+}
+
+func _() {
+ Buffer{} //@item(litBuffer, "Buffer{}", "", "var")
+
+ var b *bytes.Buffer
+ b = bytes.Bu //@snippet(" //", litBuffer, "Buffer{\\}", "Buffer{\\}")
+}
+
+func _() {
+ _ = "func(...) {}" //@item(litFunc, "func(...) {}", "", "var")
+
+ sort.Slice(nil, fun) //@complete(")", litFunc),snippet(")", litFunc, "func(i, j int) bool {$0\\}", "func(i, j int) bool {$0\\}")
+
+ http.HandleFunc("", f) //@snippet(")", litFunc, "func(w http.ResponseWriter, r *http.Request) {$0\\}", "func(${1:w} http.ResponseWriter, ${2:r} *http.Request) {$0\\}")
+
+ // no literal "func" completions
+ http.Handle("", fun) //@complete(")")
+
+ http.HandlerFunc() //@item(handlerFunc, "http.HandlerFunc()", "", "var")
+ http.Handle("", h) //@snippet(")", handlerFunc, "http.HandlerFunc($0)", "http.HandlerFunc($0)")
+ http.Handle("", http.HandlerFunc()) //@snippet("))", litFunc, "func(w http.ResponseWriter, r *http.Request) {$0\\}", "func(${1:w} http.ResponseWriter, ${2:r} *http.Request) {$0\\}")
+
+ var namedReturn func(s string) (b bool)
+ namedReturn = f //@snippet(" //", litFunc, "func(s string) (b bool) {$0\\}", "func(s string) (b bool) {$0\\}")
+
+ var multiReturn func() (bool, int)
+ multiReturn = f //@snippet(" //", litFunc, "func() (bool, int) {$0\\}", "func() (bool, int) {$0\\}")
+
+ var multiNamedReturn func() (b bool, i int)
+ multiNamedReturn = f //@snippet(" //", litFunc, "func() (b bool, i int) {$0\\}", "func() (b bool, i int) {$0\\}")
+
+ var duplicateParams func(myImpl, int, myImpl)
+ duplicateParams = f //@snippet(" //", litFunc, "func(mi1 myImpl, i int, mi2 myImpl) {$0\\}", "func(${1:mi1} myImpl, ${2:i} int, ${3:mi2} myImpl) {$0\\}")
+
+ type aliasImpl = myImpl
+ var aliasParams func(aliasImpl) aliasImpl
+ aliasParams = f //@snippet(" //", litFunc, "func(ai aliasImpl) aliasImpl {$0\\}", "func(${1:ai} aliasImpl) aliasImpl {$0\\}")
+
+ const two = 2
+ var builtinTypes func([]int, [two]bool, map[string]string, struct{ i int }, interface{ foo() }, <-chan int)
+ builtinTypes = f //@snippet(" //", litFunc, "func(i1 []int, b [two]bool, m map[string]string, s struct{ i int \\}, i2 interface{ foo() \\}, c <-chan int) {$0\\}", "func(${1:i1} []int, ${2:b} [two]bool, ${3:m} map[string]string, ${4:s} struct{ i int \\}, ${5:i2} interface{ foo() \\}, ${6:c} <-chan int) {$0\\}")
+
+ var _ func(ast.Node) = f //@snippet(" //", litFunc, "func(n ast.Node) {$0\\}", "func(${1:n} ast.Node) {$0\\}")
+ var _ func(error) = f //@snippet(" //", litFunc, "func(err error) {$0\\}", "func(${1:err} error) {$0\\}")
+ var _ func(context.Context) = f //@snippet(" //", litFunc, "func(ctx context.Context) {$0\\}", "func(${1:ctx} context.Context) {$0\\}")
+
+ type context struct {}
+ var _ func(context) = f //@snippet(" //", litFunc, "func(ctx context) {$0\\}", "func(${1:ctx} context) {$0\\}")
+}
+
+func _() {
+ StructFoo{} //@item(litStructFoo, "StructFoo{}", "struct{...}", "struct")
+
+ var sfp *foo.StructFoo
+ // Don't insert the "&" before "StructFoo{}".
+ sfp = foo.Str //@snippet(" //", litStructFoo, "StructFoo{$0\\}", "StructFoo{$0\\}")
+
+ var sf foo.StructFoo
+ sf = foo.Str //@snippet(" //", litStructFoo, "StructFoo{$0\\}", "StructFoo{$0\\}")
+ sf = foo. //@snippet(" //", litStructFoo, "StructFoo{$0\\}", "StructFoo{$0\\}")
+}
+
+func _() {
+ float64() //@item(litFloat64, "float64()", "float64", "var")
+
+ // don't complete to "&float64()"
+ var _ *float64 = float64 //@complete(" //")
+
+ var f float64
+ f = fl //@complete(" //", litFloat64),snippet(" //", litFloat64, "float64($0)", "float64($0)")
+
+ type myInt int
+ myInt() //@item(litMyInt, "myInt()", "", "var")
+
+ var mi myInt
+ mi = my //@snippet(" //", litMyInt, "myInt($0)", "myInt($0)")
+}
+
+func _() {
+ type ptrStruct struct {
+ p *ptrStruct
+ }
+
+ ptrStruct{} //@item(litPtrStruct, "ptrStruct{}", "", "var")
+
+ ptrStruct{
+ p: &ptrSt, //@rank(",", litPtrStruct)
+ }
+
+ &ptrStruct{} //@item(litPtrStructPtr, "&ptrStruct{}", "", "var")
+
+ &ptrStruct{
+ p: ptrSt, //@rank(",", litPtrStructPtr)
+ }
+}
+
+func _() {
+ f := func(...[]int) {}
+ f() //@snippet(")", litIntSlice, "[]int{$0\\}", "[]int{$0\\}")
+}
+
+
+func _() {
+ // don't complete to "untyped int()"
+ []int{}[untyped] //@complete("] //")
+}
diff --git a/internal/lsp/testdata/snippets/literal_snippets118.go.in b/gopls/internal/lsp/testdata/snippets/literal_snippets118.go.in
index 8251a6384..8251a6384 100644
--- a/internal/lsp/testdata/snippets/literal_snippets118.go.in
+++ b/gopls/internal/lsp/testdata/snippets/literal_snippets118.go.in
diff --git a/internal/lsp/testdata/snippets/postfix.go b/gopls/internal/lsp/testdata/snippets/postfix.go
index d29694e83..d29694e83 100644
--- a/internal/lsp/testdata/snippets/postfix.go
+++ b/gopls/internal/lsp/testdata/snippets/postfix.go
diff --git a/internal/lsp/testdata/snippets/snippets.go.golden b/gopls/internal/lsp/testdata/snippets/snippets.go.golden
index 3f20ba50b..3f20ba50b 100644
--- a/internal/lsp/testdata/snippets/snippets.go.golden
+++ b/gopls/internal/lsp/testdata/snippets/snippets.go.golden
diff --git a/internal/lsp/testdata/snippets/snippets.go.in b/gopls/internal/lsp/testdata/snippets/snippets.go.in
index 58150c644..58150c644 100644
--- a/internal/lsp/testdata/snippets/snippets.go.in
+++ b/gopls/internal/lsp/testdata/snippets/snippets.go.in
diff --git a/internal/lsp/testdata/statements/append.go b/gopls/internal/lsp/testdata/statements/append.go
index 0eea85a28..0eea85a28 100644
--- a/internal/lsp/testdata/statements/append.go
+++ b/gopls/internal/lsp/testdata/statements/append.go
diff --git a/internal/lsp/testdata/statements/if_err_check_return.go b/gopls/internal/lsp/testdata/statements/if_err_check_return.go
index e82b78333..e82b78333 100644
--- a/internal/lsp/testdata/statements/if_err_check_return.go
+++ b/gopls/internal/lsp/testdata/statements/if_err_check_return.go
diff --git a/internal/lsp/testdata/statements/if_err_check_return_2.go b/gopls/internal/lsp/testdata/statements/if_err_check_return_2.go
index e2dce804f..e2dce804f 100644
--- a/internal/lsp/testdata/statements/if_err_check_return_2.go
+++ b/gopls/internal/lsp/testdata/statements/if_err_check_return_2.go
diff --git a/internal/lsp/testdata/statements/if_err_check_test.go b/gopls/internal/lsp/testdata/statements/if_err_check_test.go
index 6de587879..6de587879 100644
--- a/internal/lsp/testdata/statements/if_err_check_test.go
+++ b/gopls/internal/lsp/testdata/statements/if_err_check_test.go
diff --git a/internal/lsp/testdata/stub/other/other.go b/gopls/internal/lsp/testdata/stub/other/other.go
index ba3c1747a..ba3c1747a 100644
--- a/internal/lsp/testdata/stub/other/other.go
+++ b/gopls/internal/lsp/testdata/stub/other/other.go
diff --git a/gopls/internal/lsp/testdata/stub/stub_add_selector.go b/gopls/internal/lsp/testdata/stub/stub_add_selector.go
new file mode 100644
index 000000000..4037b7ad3
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_add_selector.go
@@ -0,0 +1,12 @@
+package stub
+
+import "io"
+
+// This file tests that if an interface
+// method references a type from its own package
+// then our implementation must add the import/package selector
+// in the concrete method if the concrete type is outside of the interface
+// package
+var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite", "")
+
+type readerFrom struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_add_selector.go.golden b/gopls/internal/lsp/testdata/stub/stub_add_selector.go.golden
new file mode 100644
index 000000000..8f08ca1ef
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_add_selector.go.golden
@@ -0,0 +1,19 @@
+-- suggestedfix_stub_add_selector_10_23 --
+package stub
+
+import "io"
+
+// This file tests that if an interface
+// method references a type from its own package
+// then our implementation must add the import/package selector
+// in the concrete method if the concrete type is outside of the interface
+// package
+var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite", "")
+
+type readerFrom struct{}
+
+// ReadFrom implements io.ReaderFrom
+func (*readerFrom) ReadFrom(r io.Reader) (n int64, err error) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_assign.go b/gopls/internal/lsp/testdata/stub/stub_assign.go
new file mode 100644
index 000000000..d3f09313f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_assign.go
@@ -0,0 +1,10 @@
+package stub
+
+import "io"
+
+func main() {
+ var br io.ByteWriter
+ br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+type byteWriter struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_assign.go.golden b/gopls/internal/lsp/testdata/stub/stub_assign.go.golden
new file mode 100644
index 000000000..f15354241
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_assign.go.golden
@@ -0,0 +1,17 @@
+-- suggestedfix_stub_assign_7_7 --
+package stub
+
+import "io"
+
+func main() {
+ var br io.ByteWriter
+ br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+type byteWriter struct{}
+
+// WriteByte implements io.ByteWriter
+func (*byteWriter) WriteByte(c byte) error {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go b/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go
new file mode 100644
index 000000000..bd36d6833
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go
@@ -0,0 +1,11 @@
+package stub
+
+import "io"
+
+func main() {
+ var br io.ByteWriter
+ var i int
+ i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+type multiByteWriter struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go.golden b/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go.golden
new file mode 100644
index 000000000..425d11746
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_assign_multivars.go.golden
@@ -0,0 +1,18 @@
+-- suggestedfix_stub_assign_multivars_8_13 --
+package stub
+
+import "io"
+
+func main() {
+ var br io.ByteWriter
+ var i int
+ i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+type multiByteWriter struct{}
+
+// WriteByte implements io.ByteWriter
+func (*multiByteWriter) WriteByte(c byte) error {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_call_expr.go b/gopls/internal/lsp/testdata/stub/stub_call_expr.go
new file mode 100644
index 000000000..0c3094665
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_call_expr.go
@@ -0,0 +1,13 @@
+package stub
+
+func main() {
+ check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+func check(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+type callExpr struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_call_expr.go.golden b/gopls/internal/lsp/testdata/stub/stub_call_expr.go.golden
new file mode 100644
index 000000000..c82d22440
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_call_expr.go.golden
@@ -0,0 +1,20 @@
+-- suggestedfix_stub_call_expr_4_8 --
+package stub
+
+func main() {
+ check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+func check(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+type callExpr struct{}
+
+// Error implements error
+func (*callExpr) Error() string {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_embedded.go b/gopls/internal/lsp/testdata/stub/stub_embedded.go
new file mode 100644
index 000000000..f66989e9f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_embedded.go
@@ -0,0 +1,15 @@
+package stub
+
+import (
+ "io"
+ "sort"
+)
+
+var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite", "")
+
+type embeddedConcrete struct{}
+
+type embeddedInterface interface {
+ sort.Interface
+ io.Reader
+}
diff --git a/gopls/internal/lsp/testdata/stub/stub_embedded.go.golden b/gopls/internal/lsp/testdata/stub/stub_embedded.go.golden
new file mode 100644
index 000000000..c1ec219e9
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_embedded.go.golden
@@ -0,0 +1,37 @@
+-- suggestedfix_stub_embedded_8_27 --
+package stub
+
+import (
+ "io"
+ "sort"
+)
+
+var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite", "")
+
+type embeddedConcrete struct{}
+
+// Len implements embeddedInterface
+func (*embeddedConcrete) Len() int {
+ panic("unimplemented")
+}
+
+// Less implements embeddedInterface
+func (*embeddedConcrete) Less(i int, j int) bool {
+ panic("unimplemented")
+}
+
+// Read implements embeddedInterface
+func (*embeddedConcrete) Read(p []byte) (n int, err error) {
+ panic("unimplemented")
+}
+
+// Swap implements embeddedInterface
+func (*embeddedConcrete) Swap(i int, j int) {
+ panic("unimplemented")
+}
+
+type embeddedInterface interface {
+ sort.Interface
+ io.Reader
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_err.go b/gopls/internal/lsp/testdata/stub/stub_err.go
new file mode 100644
index 000000000..121f0e794
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_err.go
@@ -0,0 +1,7 @@
+package stub
+
+func main() {
+ var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+type customErr struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_err.go.golden b/gopls/internal/lsp/testdata/stub/stub_err.go.golden
new file mode 100644
index 000000000..0b441bdaa
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_err.go.golden
@@ -0,0 +1,14 @@
+-- suggestedfix_stub_err_4_17 --
+package stub
+
+func main() {
+ var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+type customErr struct{}
+
+// Error implements error
+func (*customErr) Error() string {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_function_return.go b/gopls/internal/lsp/testdata/stub/stub_function_return.go
new file mode 100644
index 000000000..41f17645e
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_function_return.go
@@ -0,0 +1,11 @@
+package stub
+
+import (
+ "io"
+)
+
+func newCloser() io.Closer {
+ return closer{} //@suggestedfix("c", "refactor.rewrite", "")
+}
+
+type closer struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_function_return.go.golden b/gopls/internal/lsp/testdata/stub/stub_function_return.go.golden
new file mode 100644
index 000000000..e90712e69
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_function_return.go.golden
@@ -0,0 +1,18 @@
+-- suggestedfix_stub_function_return_8_9 --
+package stub
+
+import (
+ "io"
+)
+
+func newCloser() io.Closer {
+ return closer{} //@suggestedfix("c", "refactor.rewrite", "")
+}
+
+type closer struct{}
+
+// Close implements io.Closer
+func (closer) Close() error {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go b/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go
new file mode 100644
index 000000000..1c00569ea
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go
@@ -0,0 +1,15 @@
+//go:build go1.18
+// +build go1.18
+
+package stub
+
+import "io"
+
+// This file tests that that the stub method generator accounts for concrete
+// types that have type parameters defined.
+var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite", "Implement io.ReaderFrom")
+
+type genReader[T, Y any] struct {
+ T T
+ Y Y
+}
diff --git a/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go.golden b/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go.golden
new file mode 100644
index 000000000..97935d47e
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_generic_receiver.go.golden
@@ -0,0 +1,22 @@
+-- suggestedfix_stub_generic_receiver_10_23 --
+//go:build go1.18
+// +build go1.18
+
+package stub
+
+import "io"
+
+// This file tests that that the stub method generator accounts for concrete
+// types that have type parameters defined.
+var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite", "Implement io.ReaderFrom")
+
+type genReader[T, Y any] struct {
+ T T
+ Y Y
+}
+
+// ReadFrom implements io.ReaderFrom
+func (*genReader[T, Y]) ReadFrom(r io.Reader) (n int64, err error) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go b/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go
new file mode 100644
index 000000000..ca95d2a71
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go
@@ -0,0 +1,18 @@
+package stub
+
+import (
+ "compress/zlib"
+ . "io"
+ _ "io"
+)
+
+// This file tests that dot-imports and underscore imports
+// are properly ignored and that a new import is added to
+// reference method types
+
+var (
+ _ Reader
+ _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite", "")
+)
+
+type ignoredResetter struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go.golden b/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go.golden
new file mode 100644
index 000000000..d4ab9d86a
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_ignored_imports.go.golden
@@ -0,0 +1,25 @@
+-- suggestedfix_stub_ignored_imports_15_20 --
+package stub
+
+import (
+ "compress/zlib"
+ . "io"
+ _ "io"
+)
+
+// This file tests that dot-imports and underscore imports
+// are properly ignored and that a new import is added to
+// reference method types
+
+var (
+ _ Reader
+ _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite", "")
+)
+
+type ignoredResetter struct{}
+
+// Reset implements zlib.Resetter
+func (*ignoredResetter) Reset(r Reader, dict []byte) error {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_issue2606.go b/gopls/internal/lsp/testdata/stub/stub_issue2606.go
new file mode 100644
index 000000000..66ef2b24b
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_issue2606.go
@@ -0,0 +1,7 @@
+package stub
+
+type I interface{ error }
+
+type C int
+
+var _ I = C(0) //@suggestedfix("C", "refactor.rewrite", "")
diff --git a/gopls/internal/lsp/testdata/stub/stub_issue2606.go.golden b/gopls/internal/lsp/testdata/stub/stub_issue2606.go.golden
new file mode 100644
index 000000000..4db266346
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_issue2606.go.golden
@@ -0,0 +1,14 @@
+-- suggestedfix_stub_issue2606_7_11 --
+package stub
+
+type I interface{ error }
+
+type C int
+
+// Error implements I
+func (C) Error() string {
+ panic("unimplemented")
+}
+
+var _ I = C(0) //@suggestedfix("C", "refactor.rewrite", "")
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_multi_var.go b/gopls/internal/lsp/testdata/stub/stub_multi_var.go
new file mode 100644
index 000000000..06702b222
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_multi_var.go
@@ -0,0 +1,11 @@
+package stub
+
+import "io"
+
+// This test ensures that a variable declaration that
+// has multiple values on the same line can still be
+// analyzed correctly to target the interface implementation
+// diagnostic.
+var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite", "")
+
+type multiVar struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_multi_var.go.golden b/gopls/internal/lsp/testdata/stub/stub_multi_var.go.golden
new file mode 100644
index 000000000..804c7eec6
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_multi_var.go.golden
@@ -0,0 +1,18 @@
+-- suggestedfix_stub_multi_var_9_38 --
+package stub
+
+import "io"
+
+// This test ensures that a variable declaration that
+// has multiple values on the same line can still be
+// analyzed correctly to target the interface implementation
+// diagnostic.
+var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite", "")
+
+type multiVar struct{}
+
+// Read implements io.Reader
+func (*multiVar) Read(p []byte) (n int, err error) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_pointer.go b/gopls/internal/lsp/testdata/stub/stub_pointer.go
new file mode 100644
index 000000000..e9d8bc688
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_pointer.go
@@ -0,0 +1,9 @@
+package stub
+
+import "io"
+
+func getReaderFrom() io.ReaderFrom {
+ return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+type pointerImpl struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_pointer.go.golden b/gopls/internal/lsp/testdata/stub/stub_pointer.go.golden
new file mode 100644
index 000000000..a4d765dd4
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_pointer.go.golden
@@ -0,0 +1,16 @@
+-- suggestedfix_stub_pointer_6_9 --
+package stub
+
+import "io"
+
+func getReaderFrom() io.ReaderFrom {
+ return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite", "")
+}
+
+type pointerImpl struct{}
+
+// ReadFrom implements io.ReaderFrom
+func (*pointerImpl) ReadFrom(r io.Reader) (n int64, err error) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_renamed_import.go b/gopls/internal/lsp/testdata/stub/stub_renamed_import.go
new file mode 100644
index 000000000..54dd59801
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_renamed_import.go
@@ -0,0 +1,11 @@
+package stub
+
+import (
+ "compress/zlib"
+ myio "io"
+)
+
+var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite", "")
+var _ myio.Reader
+
+type myIO struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_renamed_import.go.golden b/gopls/internal/lsp/testdata/stub/stub_renamed_import.go.golden
new file mode 100644
index 000000000..8182d2b36
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_renamed_import.go.golden
@@ -0,0 +1,18 @@
+-- suggestedfix_stub_renamed_import_8_23 --
+package stub
+
+import (
+ "compress/zlib"
+ myio "io"
+)
+
+var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite", "")
+var _ myio.Reader
+
+type myIO struct{}
+
+// Reset implements zlib.Resetter
+func (*myIO) Reset(r myio.Reader, dict []byte) error {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go b/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go
new file mode 100644
index 000000000..0f1758685
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go
@@ -0,0 +1,13 @@
+package stub
+
+import (
+ "golang.org/lsptests/stub/other"
+)
+
+// This file tests that if an interface
+// method references an import from its own package
+// that the concrete type does not yet import, and that import happens
+// to be renamed, then we prefer the renaming of the interface.
+var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite", "")
+
+type otherInterfaceImpl struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden b/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden
new file mode 100644
index 000000000..d9c621584
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden
@@ -0,0 +1,22 @@
+-- suggestedfix_stub_renamed_import_iface_11_25 --
+package stub
+
+import (
+ "bytes"
+ "context"
+ "golang.org/lsptests/stub/other"
+)
+
+// This file tests that if an interface
+// method references an import from its own package
+// that the concrete type does not yet import, and that import happens
+// to be renamed, then we prefer the renaming of the interface.
+var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite", "")
+
+type otherInterfaceImpl struct{}
+
+// Get implements other.Interface
+func (*otherInterfaceImpl) Get(context.Context) *bytes.Buffer {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_stdlib.go b/gopls/internal/lsp/testdata/stub/stub_stdlib.go
new file mode 100644
index 000000000..463cf78a3
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_stdlib.go
@@ -0,0 +1,9 @@
+package stub
+
+import (
+ "io"
+)
+
+var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite", "")
+
+type writer struct{}
diff --git a/gopls/internal/lsp/testdata/stub/stub_stdlib.go.golden b/gopls/internal/lsp/testdata/stub/stub_stdlib.go.golden
new file mode 100644
index 000000000..55592501a
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_stdlib.go.golden
@@ -0,0 +1,16 @@
+-- suggestedfix_stub_stdlib_7_19 --
+package stub
+
+import (
+ "io"
+)
+
+var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite", "")
+
+type writer struct{}
+
+// Write implements io.Writer
+func (writer) Write(p []byte) (n int, err error) {
+ panic("unimplemented")
+}
+
diff --git a/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go b/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go
new file mode 100644
index 000000000..f82401faf
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go
@@ -0,0 +1,27 @@
+package stub
+
+// Regression test for Issue #56825: file corrupted by insertion of
+// methods after TypeSpec in a parenthesized TypeDecl.
+
+import "io"
+
+func newReadCloser() io.ReadCloser {
+ return rdcloser{} //@suggestedfix("rd", "refactor.rewrite", "")
+}
+
+type (
+ A int
+ rdcloser struct{}
+ B int
+)
+
+func _() {
+ // Local types can't be stubbed as there's nowhere to put the methods.
+ // The suggestedfix assertion can't express this yet. TODO(adonovan): support it.
+ type local struct{}
+ var _ io.ReadCloser = local{} // want error: `local type "local" cannot be stubbed`
+}
+
+type (
+ C int
+)
diff --git a/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go.golden b/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go.golden
new file mode 100644
index 000000000..0848789ea
--- /dev/null
+++ b/gopls/internal/lsp/testdata/stub/stub_typedecl_group.go.golden
@@ -0,0 +1,39 @@
+-- suggestedfix_stub_typedecl_group_9_9 --
+package stub
+
+// Regression test for Issue #56825: file corrupted by insertion of
+// methods after TypeSpec in a parenthesized TypeDecl.
+
+import "io"
+
+func newReadCloser() io.ReadCloser {
+ return rdcloser{} //@suggestedfix("rd", "refactor.rewrite", "")
+}
+
+type (
+ A int
+ rdcloser struct{}
+ B int
+)
+
+// Close implements io.ReadCloser
+func (rdcloser) Close() error {
+ panic("unimplemented")
+}
+
+// Read implements io.ReadCloser
+func (rdcloser) Read(p []byte) (n int, err error) {
+ panic("unimplemented")
+}
+
+func _() {
+ // Local types can't be stubbed as there's nowhere to put the methods.
+ // The suggestedfix assertion can't express this yet. TODO(adonovan): support it.
+ type local struct{}
+ var _ io.ReadCloser = local{} // want error: `local type "local" cannot be stubbed`
+}
+
+type (
+ C int
+)
+
diff --git a/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go b/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go
new file mode 100644
index 000000000..7ff524479
--- /dev/null
+++ b/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go
@@ -0,0 +1,11 @@
+package suggestedfix
+
+import (
+ "log"
+)
+
+func goodbye() {
+ s := "hiiiiiii"
+ s = s //@suggestedfix("s = s", "quickfix", "")
+ log.Print(s)
+}
diff --git a/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden b/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden
new file mode 100644
index 000000000..e7e84fc22
--- /dev/null
+++ b/gopls/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden
@@ -0,0 +1,13 @@
+-- suggestedfix_has_suggested_fix_9_2 --
+package suggestedfix
+
+import (
+ "log"
+)
+
+func goodbye() {
+ s := "hiiiiiii"
+ //@suggestedfix("s = s", "quickfix", "")
+ log.Print(s)
+}
+
diff --git a/gopls/internal/lsp/testdata/summary.txt.golden b/gopls/internal/lsp/testdata/summary.txt.golden
new file mode 100644
index 000000000..985361ba7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/summary.txt.golden
@@ -0,0 +1,32 @@
+-- summary --
+CallHierarchyCount = 2
+CodeLensCount = 5
+CompletionsCount = 263
+CompletionSnippetCount = 106
+UnimportedCompletionsCount = 5
+DeepCompletionsCount = 5
+FuzzyCompletionsCount = 8
+RankedCompletionsCount = 164
+CaseSensitiveCompletionsCount = 4
+DiagnosticsCount = 42
+FoldingRangesCount = 2
+FormatCount = 6
+ImportCount = 8
+SemanticTokenCount = 3
+SuggestedFixCount = 65
+FunctionExtractionCount = 27
+MethodExtractionCount = 6
+DefinitionsCount = 47
+TypeDefinitionsCount = 18
+HighlightsCount = 69
+InlayHintsCount = 4
+ReferencesCount = 30
+RenamesCount = 41
+PrepareRenamesCount = 7
+SymbolsCount = 1
+WorkspaceSymbolsCount = 20
+SignaturesCount = 33
+LinksCount = 7
+ImplementationsCount = 16
+SelectionRangesCount = 3
+
diff --git a/gopls/internal/lsp/testdata/summary_go1.18.txt.golden b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden
new file mode 100644
index 000000000..9ae4d1364
--- /dev/null
+++ b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden
@@ -0,0 +1,32 @@
+-- summary --
+CallHierarchyCount = 2
+CodeLensCount = 5
+CompletionsCount = 264
+CompletionSnippetCount = 115
+UnimportedCompletionsCount = 5
+DeepCompletionsCount = 5
+FuzzyCompletionsCount = 8
+RankedCompletionsCount = 174
+CaseSensitiveCompletionsCount = 4
+DiagnosticsCount = 42
+FoldingRangesCount = 2
+FormatCount = 6
+ImportCount = 8
+SemanticTokenCount = 3
+SuggestedFixCount = 71
+FunctionExtractionCount = 27
+MethodExtractionCount = 6
+DefinitionsCount = 47
+TypeDefinitionsCount = 18
+HighlightsCount = 69
+InlayHintsCount = 5
+ReferencesCount = 30
+RenamesCount = 48
+PrepareRenamesCount = 7
+SymbolsCount = 2
+WorkspaceSymbolsCount = 20
+SignaturesCount = 33
+LinksCount = 7
+ImplementationsCount = 26
+SelectionRangesCount = 3
+
diff --git a/gopls/internal/lsp/testdata/symbols/go1.18.go b/gopls/internal/lsp/testdata/symbols/go1.18.go
new file mode 100644
index 000000000..cdf99dc20
--- /dev/null
+++ b/gopls/internal/lsp/testdata/symbols/go1.18.go
@@ -0,0 +1,16 @@
+//go:build go1.18
+// +build go1.18
+
+package main
+
+type T[P any] struct { //@symbol("T", "T", "Struct", "struct{...}", "T", "")
+ F P //@symbol("F", "F", "Field", "P", "", "T")
+}
+
+type Constraint interface { //@symbol("Constraint", "Constraint", "Interface", "interface{...}", "Constraint", "")
+ ~int | struct{ int } //@symbol("~int | struct{int}", "~int | struct{ int }", "Field", "", "", "Constraint")
+
+ // TODO(rfindley): the selection range below is the entire interface field.
+ // Can we reduce it?
+ interface{ M() } //@symbol("interface{...}", "interface{ M() }", "Field", "", "iFaceField", "Constraint"), symbol("M", "M", "Method", "func()", "", "iFaceField")
+}
diff --git a/gopls/internal/lsp/testdata/symbols/go1.18.go.golden b/gopls/internal/lsp/testdata/symbols/go1.18.go.golden
new file mode 100644
index 000000000..5a0c1a94d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/symbols/go1.18.go.golden
@@ -0,0 +1,7 @@
+-- symbols --
+T Struct 6:6-6:7
+ F Field 7:2-7:3
+Constraint Interface 10:6-10:16
+ interface{...} Field 15:2-15:18
+ ~int | struct{int} Field 11:2-11:22
+
diff --git a/gopls/internal/lsp/testdata/symbols/main.go b/gopls/internal/lsp/testdata/symbols/main.go
new file mode 100644
index 000000000..65e0869fd
--- /dev/null
+++ b/gopls/internal/lsp/testdata/symbols/main.go
@@ -0,0 +1,91 @@
+package main
+
+import (
+ "io"
+)
+
+// Each symbol marker in this file defines the following information:
+// symbol(name, selectionSpan, kind, detail, id, parentID)
+// - name: DocumentSymbol.Name
+// - selectionSpan: DocumentSymbol.SelectionRange
+// - kind: DocumentSymbol.Kind
+// - detail: DocumentSymbol.Detail
+// - id: if non-empty, a unique identifier for this symbol
+// - parentID: if non-empty, the id of the parent of this symbol
+//
+// This data in aggregate defines a set of document symbols and their
+// parent-child relationships, which is compared against the DocummentSymbols
+// response from gopls for the current file.
+//
+// TODO(rfindley): the symbol annotations here are complicated and difficult to
+// maintain. It would be simpler to just write out the full expected response
+// in the golden file, perhaps as raw JSON.
+
+var _ = 1
+
+var x = 42 //@symbol("x", "x", "Variable", "", "", "")
+
+var nested struct { //@symbol("nested", "nested", "Variable", "struct{...}", "nested", "")
+ nestedField struct { //@symbol("nestedField", "nestedField", "Field", "struct{...}", "nestedField", "nested")
+ f int //@symbol("f", "f", "Field", "int", "", "nestedField")
+ }
+}
+
+const y = 43 //@symbol("y", "y", "Constant", "", "", "")
+
+type Number int //@symbol("Number", "Number", "Class", "int", "", "")
+
+type Alias = string //@symbol("Alias", "Alias", "Class", "string", "", "")
+
+type NumberAlias = Number //@symbol("NumberAlias", "NumberAlias", "Class", "Number", "", "")
+
+type (
+ Boolean bool //@symbol("Boolean", "Boolean", "Class", "bool", "", "")
+ BoolAlias = bool //@symbol("BoolAlias", "BoolAlias", "Class", "bool", "", "")
+)
+
+type Foo struct { //@symbol("Foo", "Foo", "Struct", "struct{...}", "Foo", "")
+ Quux //@symbol("Quux", "Quux", "Field", "Quux", "", "Foo")
+ W io.Writer //@symbol("W", "W", "Field", "io.Writer", "", "Foo")
+ Bar int //@symbol("Bar", "Bar", "Field", "int", "", "Foo")
+ baz string //@symbol("baz", "baz", "Field", "string", "", "Foo")
+ funcField func(int) int //@symbol("funcField", "funcField", "Field", "func(int) int", "", "Foo")
+}
+
+type Quux struct { //@symbol("Quux", "Quux", "Struct", "struct{...}", "Quux", "")
+ X, Y float64 //@symbol("X", "X", "Field", "float64", "", "Quux"), symbol("Y", "Y", "Field", "float64", "", "Quux")
+}
+
+type EmptyStruct struct{} //@symbol("EmptyStruct", "EmptyStruct", "Struct", "struct{}", "", "")
+
+func (f Foo) Baz() string { //@symbol("(Foo).Baz", "Baz", "Method", "func() string", "", "")
+ return f.baz
+}
+
+func _() {}
+
+func (q *Quux) Do() {} //@symbol("(*Quux).Do", "Do", "Method", "func()", "", "")
+
+func main() { //@symbol("main", "main", "Function", "func()", "", "")
+}
+
+type Stringer interface { //@symbol("Stringer", "Stringer", "Interface", "interface{...}", "Stringer", "")
+ String() string //@symbol("String", "String", "Method", "func() string", "", "Stringer")
+}
+
+type ABer interface { //@symbol("ABer", "ABer", "Interface", "interface{...}", "ABer", "")
+ B() //@symbol("B", "B", "Method", "func()", "", "ABer")
+ A() string //@symbol("A", "A", "Method", "func() string", "", "ABer")
+}
+
+type WithEmbeddeds interface { //@symbol("WithEmbeddeds", "WithEmbeddeds", "Interface", "interface{...}", "WithEmbeddeds", "")
+ Do() //@symbol("Do", "Do", "Method", "func()", "", "WithEmbeddeds")
+ ABer //@symbol("ABer", "ABer", "Field", "ABer", "", "WithEmbeddeds")
+ io.Writer //@symbol("Writer", "Writer", "Field", "io.Writer", "", "WithEmbeddeds")
+}
+
+type EmptyInterface interface{} //@symbol("EmptyInterface", "EmptyInterface", "Interface", "interface{}", "", "")
+
+func Dunk() int { return 0 } //@symbol("Dunk", "Dunk", "Function", "func() int", "", "")
+
+func dunk() {} //@symbol("dunk", "dunk", "Function", "func()", "", "")
diff --git a/gopls/internal/lsp/testdata/symbols/main.go.golden b/gopls/internal/lsp/testdata/symbols/main.go.golden
new file mode 100644
index 000000000..98009b02d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/symbols/main.go.golden
@@ -0,0 +1,36 @@
+-- symbols --
+x Variable 26:5-26:6
+nested Variable 28:5-28:11
+ nestedField Field 29:2-29:13
+y Constant 34:7-34:8
+Number Class 36:6-36:12
+Alias Class 38:6-38:11
+NumberAlias Class 40:6-40:17
+Boolean Class 43:2-43:9
+BoolAlias Class 44:2-44:11
+Foo Struct 47:6-47:9
+ Bar Field 50:2-50:5
+ Quux Field 48:2-48:6
+ W Field 49:2-49:3
+ baz Field 51:2-51:5
+ funcField Field 52:2-52:11
+Quux Struct 55:6-55:10
+ X Field 56:2-56:3
+ Y Field 56:5-56:6
+EmptyStruct Struct 59:6-59:17
+(Foo).Baz Method 61:14-61:17
+(*Quux).Do Method 67:16-67:18
+main Function 69:6-69:10
+Stringer Interface 72:6-72:14
+ String Method 73:2-73:8
+ABer Interface 76:6-76:10
+ A Method 78:2-78:3
+ B Method 77:2-77:3
+WithEmbeddeds Interface 81:6-81:19
+ ABer Field 83:2-83:6
+ Do Method 82:2-82:4
+ Writer Field 84:5-84:11
+EmptyInterface Interface 87:6-87:20
+Dunk Function 89:6-89:10
+dunk Function 91:6-91:10
+
diff --git a/internal/lsp/testdata/testy/testy.go b/gopls/internal/lsp/testdata/testy/testy.go
index 1a738d7d7..1a738d7d7 100644
--- a/internal/lsp/testdata/testy/testy.go
+++ b/gopls/internal/lsp/testdata/testy/testy.go
diff --git a/gopls/internal/lsp/testdata/testy/testy_test.go b/gopls/internal/lsp/testdata/testy/testy_test.go
new file mode 100644
index 000000000..a7e897840
--- /dev/null
+++ b/gopls/internal/lsp/testdata/testy/testy_test.go
@@ -0,0 +1,18 @@
+package testy
+
+import (
+ "testing"
+
+ sig "golang.org/lsptests/signature"
+ "golang.org/lsptests/snippets"
+)
+
+func TestSomething(t *testing.T) { //@item(TestSomething, "TestSomething(t *testing.T)", "", "func")
+ var x int //@mark(testyX, "x"),diag("x", "compiler", "x declared (and|but) not used", "error"),refs("x", testyX)
+ a() //@mark(testyA, "a")
+}
+
+func _() {
+ _ = snippets.X(nil) //@signature("nil", "X(_ map[sig.Alias]types.CoolAlias) map[sig.Alias]types.CoolAlias", 0)
+ var _ sig.Alias
+}
diff --git a/internal/lsp/testdata/testy/testy_test.go.golden b/gopls/internal/lsp/testdata/testy/testy_test.go.golden
index cafc380d0..cafc380d0 100644
--- a/internal/lsp/testdata/testy/testy_test.go.golden
+++ b/gopls/internal/lsp/testdata/testy/testy_test.go.golden
diff --git a/internal/lsp/testdata/typdef/typdef.go b/gopls/internal/lsp/testdata/typdef/typdef.go
index bd2ea4b00..bd2ea4b00 100644
--- a/internal/lsp/testdata/typdef/typdef.go
+++ b/gopls/internal/lsp/testdata/typdef/typdef.go
diff --git a/internal/lsp/testdata/typeassert/type_assert.go b/gopls/internal/lsp/testdata/typeassert/type_assert.go
index e24b68a07..e24b68a07 100644
--- a/internal/lsp/testdata/typeassert/type_assert.go
+++ b/gopls/internal/lsp/testdata/typeassert/type_assert.go
diff --git a/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go b/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go
new file mode 100644
index 000000000..729e7bbcc
--- /dev/null
+++ b/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go
@@ -0,0 +1,5 @@
+package typeerrors
+
+func x() { return nil } //@suggestedfix("nil", "quickfix", "")
+
+func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix", "")
diff --git a/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go.golden b/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go.golden
new file mode 100644
index 000000000..48409a0b7
--- /dev/null
+++ b/gopls/internal/lsp/testdata/typeerrors/noresultvalues.go.golden
@@ -0,0 +1,14 @@
+-- suggestedfix_noresultvalues_3_19 --
+package typeerrors
+
+func x() { return } //@suggestedfix("nil", "quickfix", "")
+
+func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix", "")
+
+-- suggestedfix_noresultvalues_5_19 --
+package typeerrors
+
+func x() { return nil } //@suggestedfix("nil", "quickfix", "")
+
+func y() { return } //@suggestedfix("nil", "quickfix", "")
+
diff --git a/internal/lsp/testdata/typemods/type_mods.go b/gopls/internal/lsp/testdata/typemods/type_mods.go
index f5f0f8076..f5f0f8076 100644
--- a/internal/lsp/testdata/typemods/type_mods.go
+++ b/gopls/internal/lsp/testdata/typemods/type_mods.go
diff --git a/gopls/internal/lsp/testdata/typeparams/type_params.go b/gopls/internal/lsp/testdata/typeparams/type_params.go
new file mode 100644
index 000000000..21fc7049f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/typeparams/type_params.go
@@ -0,0 +1,61 @@
+//go:build go1.18
+// +build go1.18
+
+package typeparams
+
+func one[a int | string]() {}
+func two[a int | string, b float64 | int]() {}
+
+func _() {
+ one[]() //@rank("]", string, float64)
+ two[]() //@rank("]", int, float64)
+ two[int, f]() //@rank("]", float64, float32)
+}
+
+func slices[a []int | []float64]() {} //@item(tpInts, "[]int", "[]int", "type"),item(tpFloats, "[]float64", "[]float64", "type")
+
+func _() {
+ slices[]() //@rank("]", tpInts),rank("]", tpFloats)
+}
+
+type s[a int | string] struct{}
+
+func _() {
+ s[]{} //@rank("]", int, float64)
+}
+
+func takesGeneric[a int | string](s[a]) {
+ "s[a]{}" //@item(tpInScopeLit, "s[a]{}", "", "var")
+ takesGeneric() //@rank(")", tpInScopeLit),snippet(")", tpInScopeLit, "s[a]{\\}", "s[a]{\\}")
+}
+
+func _() {
+ s[int]{} //@item(tpInstLit, "s[int]{}", "", "var")
+ takesGeneric[int]() //@rank(")", tpInstLit),snippet(")", tpInstLit, "s[int]{\\}", "s[int]{\\}")
+
+ "s[...]{}" //@item(tpUninstLit, "s[...]{}", "", "var")
+ takesGeneric() //@rank(")", tpUninstLit),snippet(")", tpUninstLit, "s[${1:}]{\\}", "s[${1:a}]{\\}")
+}
+
+func returnTP[A int | float64](a A) A { //@item(returnTP, "returnTP", "something", "func")
+ return a
+}
+
+func _() {
+ // disabled - see issue #54822
+ var _ int = returnTP // snippet(" //", returnTP, "returnTP[${1:}](${2:})", "returnTP[${1:A int|float64}](${2:a A})")
+
+ var aa int //@item(tpInt, "aa", "int", "var")
+ var ab float64 //@item(tpFloat, "ab", "float64", "var")
+ returnTP[int](a) //@rank(")", tpInt, tpFloat)
+}
+
+func takesFunc[T any](func(T) T) {
+ var _ func(t T) T = f //@snippet(" //", tpLitFunc, "func(t T) T {$0\\}", "func(t T) T {$0\\}")
+}
+
+func _() {
+ _ = "func(...) {}" //@item(tpLitFunc, "func(...) {}", "", "var")
+ takesFunc() //@snippet(")", tpLitFunc, "func(${1:}) ${2:} {$0\\}", "func(${1:t} ${2:T}) ${3:T} {$0\\}")
+ takesFunc[int]() //@snippet(")", tpLitFunc, "func(i int) int {$0\\}", "func(${1:i} int) int {$0\\}")
+}
diff --git a/internal/lsp/testdata/types/types.go b/gopls/internal/lsp/testdata/types/types.go
index c60d4b2e4..c60d4b2e4 100644
--- a/internal/lsp/testdata/types/types.go
+++ b/gopls/internal/lsp/testdata/types/types.go
diff --git a/gopls/internal/lsp/testdata/undeclared/var.go b/gopls/internal/lsp/testdata/undeclared/var.go
new file mode 100644
index 000000000..3fda582ce
--- /dev/null
+++ b/gopls/internal/lsp/testdata/undeclared/var.go
@@ -0,0 +1,14 @@
+package undeclared
+
+func m() int {
+ z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "")
+ if 100 < 90 {
+ z = 1
+ } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "")
+ z = 4
+ }
+ for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "")
+ }
+ r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error")
+ return z
+}
diff --git a/gopls/internal/lsp/testdata/undeclared/var.go.golden b/gopls/internal/lsp/testdata/undeclared/var.go.golden
new file mode 100644
index 000000000..de5cbb42f
--- /dev/null
+++ b/gopls/internal/lsp/testdata/undeclared/var.go.golden
@@ -0,0 +1,51 @@
+-- suggestedfix_var_10_6 --
+package undeclared
+
+func m() int {
+ z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "")
+ if 100 < 90 {
+ z = 1
+ } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "")
+ z = 4
+ }
+ i :=
+ for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "")
+ }
+ r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error")
+ return z
+}
+
+-- suggestedfix_var_4_12 --
+package undeclared
+
+func m() int {
+ y :=
+ z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "")
+ if 100 < 90 {
+ z = 1
+ } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "")
+ z = 4
+ }
+ for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "")
+ }
+ r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error")
+ return z
+}
+
+-- suggestedfix_var_7_18 --
+package undeclared
+
+func m() int {
+ z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "")
+ n :=
+ if 100 < 90 {
+ z = 1
+ } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "")
+ z = 4
+ }
+ for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "")
+ }
+ r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error")
+ return z
+}
+
diff --git a/gopls/internal/lsp/testdata/unimported/export_test.go b/gopls/internal/lsp/testdata/unimported/export_test.go
new file mode 100644
index 000000000..707768e1d
--- /dev/null
+++ b/gopls/internal/lsp/testdata/unimported/export_test.go
@@ -0,0 +1,3 @@
+package unimported
+
+var TestExport int //@item(testexport, "TestExport", "var (from \"golang.org/lsptests/unimported\")", "var")
diff --git a/gopls/internal/lsp/testdata/unimported/unimported.go.in b/gopls/internal/lsp/testdata/unimported/unimported.go.in
new file mode 100644
index 000000000..74d51ffe8
--- /dev/null
+++ b/gopls/internal/lsp/testdata/unimported/unimported.go.in
@@ -0,0 +1,23 @@
+package unimported
+
+func _() {
+ http //@unimported("p", nethttp)
+ // container/ring is extremely unlikely to be imported by anything, so shouldn't have type information.
+ ring.Ring //@unimported("Ring", ringring)
+ signature.Foo //@unimported("Foo", signaturefoo)
+
+ context.Bac //@unimported(" //", contextBackground)
+}
+
+// Create markers for unimported std lib packages. Only for use by this test.
+/* http */ //@item(nethttp, "http", "\"net/http\"", "package")
+
+/* ring.Ring */ //@item(ringring, "Ring", "(from \"container/ring\")", "var")
+
+/* signature.Foo */ //@item(signaturefoo, "Foo", "func (from \"golang.org/lsptests/signature\")", "func")
+
+/* context.Background */ //@item(contextBackground, "Background", "func (from \"context\")", "func")
+
+// Now that we no longer type-check imported completions,
+// we don't expect the context.Background().Err method (see golang/go#58663).
+/* context.Background().Err */ //@item(contextBackgroundErr, "Background().Err", "func (from \"context\")", "method")
diff --git a/gopls/internal/lsp/testdata/unimported/unimported_cand_type.go b/gopls/internal/lsp/testdata/unimported/unimported_cand_type.go
new file mode 100644
index 000000000..554c426a9
--- /dev/null
+++ b/gopls/internal/lsp/testdata/unimported/unimported_cand_type.go
@@ -0,0 +1,16 @@
+package unimported
+
+import (
+ _ "context"
+
+ "golang.org/lsptests/baz"
+ _ "golang.org/lsptests/signature" // provide type information for unimported completions in the other file
+)
+
+func _() {
+ foo.StructFoo{} //@item(litFooStructFoo, "foo.StructFoo{}", "struct{...}", "struct")
+
+ // We get the literal completion for "foo.StructFoo{}" even though we haven't
+ // imported "foo" yet.
+ baz.FooStruct = f //@snippet(" //", litFooStructFoo, "foo.StructFoo{$0\\}", "foo.StructFoo{$0\\}")
+}
diff --git a/internal/lsp/testdata/unimported/x_test.go b/gopls/internal/lsp/testdata/unimported/x_test.go
index 681dcb253..681dcb253 100644
--- a/internal/lsp/testdata/unimported/x_test.go
+++ b/gopls/internal/lsp/testdata/unimported/x_test.go
diff --git a/internal/lsp/testdata/unresolved/unresolved.go.in b/gopls/internal/lsp/testdata/unresolved/unresolved.go.in
index e1daecc2e..e1daecc2e 100644
--- a/internal/lsp/testdata/unresolved/unresolved.go.in
+++ b/gopls/internal/lsp/testdata/unresolved/unresolved.go.in
diff --git a/internal/lsp/testdata/unsafe/unsafe.go b/gopls/internal/lsp/testdata/unsafe/unsafe.go
index 5d5e43407..5d5e43407 100644
--- a/internal/lsp/testdata/unsafe/unsafe.go
+++ b/gopls/internal/lsp/testdata/unsafe/unsafe.go
diff --git a/internal/lsp/testdata/variadic/variadic.go.in b/gopls/internal/lsp/testdata/variadic/variadic.go.in
index 4787498ce..4787498ce 100644
--- a/internal/lsp/testdata/variadic/variadic.go.in
+++ b/gopls/internal/lsp/testdata/variadic/variadic.go.in
diff --git a/internal/lsp/testdata/variadic/variadic_intf.go b/gopls/internal/lsp/testdata/variadic/variadic_intf.go
index 6e23fc996..6e23fc996 100644
--- a/internal/lsp/testdata/variadic/variadic_intf.go
+++ b/gopls/internal/lsp/testdata/variadic/variadic_intf.go
diff --git a/gopls/internal/lsp/testdata/workspacesymbol/a/a.go b/gopls/internal/lsp/testdata/workspacesymbol/a/a.go
new file mode 100644
index 000000000..4ae9997a0
--- /dev/null
+++ b/gopls/internal/lsp/testdata/workspacesymbol/a/a.go
@@ -0,0 +1,9 @@
+package a
+
+var RandomGopherVariableA = "a"
+
+const RandomGopherConstantA = "a"
+
+const (
+ randomgopherinvariable = iota
+)
diff --git a/gopls/internal/lsp/testdata/workspacesymbol/a/a_test.go b/gopls/internal/lsp/testdata/workspacesymbol/a/a_test.go
new file mode 100644
index 000000000..0d97c50d6
--- /dev/null
+++ b/gopls/internal/lsp/testdata/workspacesymbol/a/a_test.go
@@ -0,0 +1,3 @@
+package a
+
+var RandomGopherTestVariableA = "a"
diff --git a/gopls/internal/lsp/testdata/workspacesymbol/a/a_x_test.go b/gopls/internal/lsp/testdata/workspacesymbol/a/a_x_test.go
new file mode 100644
index 000000000..747cd17ec
--- /dev/null
+++ b/gopls/internal/lsp/testdata/workspacesymbol/a/a_x_test.go
@@ -0,0 +1,3 @@
+package a_test
+
+var RandomGopherXTestVariableA = "a"
diff --git a/gopls/internal/lsp/testdata/workspacesymbol/b/b.go b/gopls/internal/lsp/testdata/workspacesymbol/b/b.go
new file mode 100644
index 000000000..b2e2092ee
--- /dev/null
+++ b/gopls/internal/lsp/testdata/workspacesymbol/b/b.go
@@ -0,0 +1,7 @@
+package b
+
+var RandomGopherVariableB = "b"
+
+type RandomGopherStructB struct {
+ Bar int
+}
diff --git a/internal/lsp/testdata/workspacesymbol/issue44806.go b/gopls/internal/lsp/testdata/workspacesymbol/issue44806.go
index 6a6e03a5f..6a6e03a5f 100644
--- a/internal/lsp/testdata/workspacesymbol/issue44806.go
+++ b/gopls/internal/lsp/testdata/workspacesymbol/issue44806.go
diff --git a/internal/lsp/testdata/workspacesymbol/main.go b/gopls/internal/lsp/testdata/workspacesymbol/main.go
index 36ec8f1a5..36ec8f1a5 100644
--- a/internal/lsp/testdata/workspacesymbol/main.go
+++ b/gopls/internal/lsp/testdata/workspacesymbol/main.go
diff --git a/internal/lsp/testdata/workspacesymbol/p/p.go b/gopls/internal/lsp/testdata/workspacesymbol/p/p.go
index 409cc3547..409cc3547 100644
--- a/internal/lsp/testdata/workspacesymbol/p/p.go
+++ b/gopls/internal/lsp/testdata/workspacesymbol/p/p.go
diff --git a/internal/lsp/testdata/workspacesymbol/query.go b/gopls/internal/lsp/testdata/workspacesymbol/query.go
index 883aae268..883aae268 100644
--- a/internal/lsp/testdata/workspacesymbol/query.go
+++ b/gopls/internal/lsp/testdata/workspacesymbol/query.go
diff --git a/internal/lsp/testdata/workspacesymbol/query.go.golden b/gopls/internal/lsp/testdata/workspacesymbol/query.go.golden
index 4c6d470f7..4c6d470f7 100644
--- a/internal/lsp/testdata/workspacesymbol/query.go.golden
+++ b/gopls/internal/lsp/testdata/workspacesymbol/query.go.golden
diff --git a/gopls/internal/lsp/tests/README.md b/gopls/internal/lsp/tests/README.md
new file mode 100644
index 000000000..07df28815
--- /dev/null
+++ b/gopls/internal/lsp/tests/README.md
@@ -0,0 +1,66 @@
+# Testing
+
+LSP has "marker tests" defined in `internal/lsp/testdata`, as well as
+traditional tests.
+
+## Marker tests
+
+Marker tests have a standard input file, like
+`internal/lsp/testdata/foo/bar.go`, and some may have a corresponding golden
+file, like `internal/lsp/testdata/foo/bar.go.golden`. The former is the "input"
+and the latter is the expected output.
+
+Each input file contains annotations like
+`//@suggestedfix("}", "refactor.rewrite", "Fill anonymous struct")`. These annotations are interpreted by
+test runners to perform certain actions. The expected output after those actions
+is encoded in the golden file.
+
+When tests are run, each annotation results in a new subtest, which is encoded
+in the golden file with a heading like,
+
+```bash
+-- suggestedfix_bar_11_21 --
+// expected contents go here
+-- suggestedfix_bar_13_20 --
+// expected contents go here
+```
+
+The format of these headings vary: they are defined by the
+[`Golden`](https://pkg.go.dev/golang.org/x/tools/gopls/internal/lsp/tests#Data.Golden)
+function for each annotation. In the case above, the format is: annotation
+name, file name, annotation line location, annotation character location.
+
+So, if `internal/lsp/testdata/foo/bar.go` has three `suggestedfix` annotations,
+the golden file should have three headers with `suggestedfix_bar_xx_yy`
+headings.
+
+To see a list of all available annotations, see the exported "expectations" in
+[tests.go](https://github.com/golang/tools/blob/299f270db45902e93469b1152fafed034bb3f033/internal/lsp/tests/tests.go#L418-L447).
+
+To run marker tests,
+
+```bash
+cd /path/to/tools
+
+# The marker tests are located in "internal/lsp", "internal/lsp/cmd, and
+# "internal/lsp/source".
+go test ./internal/lsp/...
+```
+
+There are quite a lot of marker tests, so to run one individually, pass the test
+path and heading into a -run argument:
+
+```bash
+cd /path/to/tools
+go test ./internal/lsp/... -v -run TestLSP/Modules/SuggestedFix/bar_11_21
+```
+
+## Resetting marker tests
+
+Sometimes, a change is made to lsp that requires a change to multiple golden
+files. When this happens, you can run,
+
+```bash
+cd /path/to/tools
+./internal/lsp/reset_golden.sh
+```
diff --git a/gopls/internal/lsp/tests/compare/text.go b/gopls/internal/lsp/tests/compare/text.go
new file mode 100644
index 000000000..4ce2f8c6b
--- /dev/null
+++ b/gopls/internal/lsp/tests/compare/text.go
@@ -0,0 +1,49 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package compare
+
+import (
+ "bytes"
+
+ "golang.org/x/tools/internal/diff"
+)
+
+// Text returns a formatted unified diff of the edits to go from want to
+// got, returning "" if and only if want == got.
+//
+// This function is intended for use in testing, and panics if any error occurs
+// while computing the diff. It is not sufficiently tested for production use.
+func Text(want, got string) string {
+ return NamedText("want", "got", want, got)
+}
+
+// NamedText is like text, but allows passing custom names of the 'want' and
+// 'got' content.
+func NamedText(wantName, gotName, want, got string) string {
+ if want == got {
+ return ""
+ }
+
+ // Add newlines to avoid verbose newline messages ("No newline at end of file").
+ unified := diff.Unified(wantName, gotName, want+"\n", got+"\n")
+
+ // Defensively assert that we get an actual diff, so that we guarantee the
+ // invariant that we return "" if and only if want == got.
+ //
+ // This is probably unnecessary, but convenient.
+ if unified == "" {
+ panic("empty diff for non-identical input")
+ }
+
+ return unified
+}
+
+// Bytes is like Text but using byte slices.
+func Bytes(want, got []byte) string {
+ if bytes.Equal(want, got) {
+ return "" // common case
+ }
+ return Text(string(want), string(got))
+}
diff --git a/gopls/internal/lsp/tests/compare/text_test.go b/gopls/internal/lsp/tests/compare/text_test.go
new file mode 100644
index 000000000..8f5af48bd
--- /dev/null
+++ b/gopls/internal/lsp/tests/compare/text_test.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package compare_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+)
+
+func TestText(t *testing.T) {
+ tests := []struct {
+ got, want, wantDiff string
+ }{
+ {"", "", ""},
+ {"equal", "equal", ""},
+ {"a", "b", "--- want\n+++ got\n@@ -1 +1 @@\n-b\n+a\n"},
+ {"a\nd\nc\n", "a\nb\nc\n", "--- want\n+++ got\n@@ -1,4 +1,4 @@\n a\n-b\n+d\n c\n \n"},
+ }
+
+ for _, test := range tests {
+ if gotDiff := compare.Text(test.want, test.got); gotDiff != test.wantDiff {
+ t.Errorf("compare.Text(%q, %q) =\n%q, want\n%q", test.want, test.got, gotDiff, test.wantDiff)
+ }
+ }
+}
diff --git a/gopls/internal/lsp/tests/markdown_go118.go b/gopls/internal/lsp/tests/markdown_go118.go
new file mode 100644
index 000000000..3701018bd
--- /dev/null
+++ b/gopls/internal/lsp/tests/markdown_go118.go
@@ -0,0 +1,69 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package tests
+
+import (
+ "regexp"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+)
+
+// DiffMarkdown compares two markdown strings produced by parsing go doc
+// comments.
+//
+// For go1.19 and later, markdown conversion is done using go/doc/comment.
+// Compared to the newer version, the older version has extra escapes, and
+// treats code blocks slightly differently.
+func DiffMarkdown(want, got string) string {
+ want = normalizeMarkdown(want)
+ got = normalizeMarkdown(got)
+ return compare.Text(want, got)
+}
+
+// normalizeMarkdown normalizes whitespace and escaping of the input string, to
+// eliminate differences between the Go 1.18 and Go 1.19 generated markdown for
+// doc comments. Note that it does not normalize to either the 1.18 or 1.19
+// formatting: it simplifies both so that they may be compared.
+//
+// This function may need to be adjusted as we encounter more differences in
+// the generated text.
+//
+// TODO(rfindley): this function doesn't correctly handle the case of
+// multi-line docstrings.
+func normalizeMarkdown(input string) string {
+ input = strings.TrimSpace(input)
+
+ // For simplicity, eliminate blank lines.
+ input = regexp.MustCompile("\n+").ReplaceAllString(input, "\n")
+
+ // Replace common escaped characters with their unescaped version.
+ //
+ // This list may not be exhaustive: it was just sufficient to make tests
+ // pass.
+ input = strings.NewReplacer(
+ `\\`, ``,
+ `\@`, `@`,
+ `\(`, `(`,
+ `\)`, `)`,
+ `\{`, `{`,
+ `\}`, `}`,
+ `\"`, `"`,
+ `\.`, `.`,
+ `\-`, `-`,
+ `\'`, `'`,
+ `\+`, `+`,
+ `\~`, `~`,
+ `\=`, `=`,
+ `\:`, `:`,
+ `\?`, `?`,
+ `\n\n\n`, `\n\n`, // Note that these are *escaped* newlines.
+ ).Replace(input)
+
+ return input
+}
diff --git a/gopls/internal/lsp/tests/markdown_go119.go b/gopls/internal/lsp/tests/markdown_go119.go
new file mode 100644
index 000000000..a7fcf1a42
--- /dev/null
+++ b/gopls/internal/lsp/tests/markdown_go119.go
@@ -0,0 +1,22 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package tests
+
+import (
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+)
+
+// DiffMarkdown compares two markdown strings produced by parsing go doc
+// comments.
+//
+// For go1.19 and later, markdown conversion is done using go/doc/comment.
+// Compared to the newer version, the older version has extra escapes, and
+// treats code blocks slightly differently.
+func DiffMarkdown(want, got string) string {
+ return compare.Text(want, got)
+}
diff --git a/gopls/internal/lsp/tests/normalizer.go b/gopls/internal/lsp/tests/normalizer.go
new file mode 100644
index 000000000..9c5d7b9c8
--- /dev/null
+++ b/gopls/internal/lsp/tests/normalizer.go
@@ -0,0 +1,113 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tests
+
+import (
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/packages/packagestest"
+)
+
+type Normalizer struct {
+ path string
+ slashed string
+ escaped string
+ fragment string
+}
+
+func CollectNormalizers(exported *packagestest.Exported) []Normalizer {
+ // build the path normalizing patterns
+ var normalizers []Normalizer
+ for _, m := range exported.Modules {
+ for fragment := range m.Files {
+ n := Normalizer{
+ path: exported.File(m.Name, fragment),
+ fragment: fragment,
+ }
+ if n.slashed = filepath.ToSlash(n.path); n.slashed == n.path {
+ n.slashed = ""
+ }
+ quoted := strconv.Quote(n.path)
+ if n.escaped = quoted[1 : len(quoted)-1]; n.escaped == n.path {
+ n.escaped = ""
+ }
+ normalizers = append(normalizers, n)
+ }
+ }
+ return normalizers
+}
+
+// Normalize replaces all paths present in s with just the fragment portion
+// this is used to make golden files not depend on the temporary paths of the files
+func Normalize(s string, normalizers []Normalizer) string {
+ type entry struct {
+ path string
+ index int
+ fragment string
+ }
+ var match []entry
+ // collect the initial state of all the matchers
+ for _, n := range normalizers {
+ index := strings.Index(s, n.path)
+ if index >= 0 {
+ match = append(match, entry{n.path, index, n.fragment})
+ }
+ if n.slashed != "" {
+ index := strings.Index(s, n.slashed)
+ if index >= 0 {
+ match = append(match, entry{n.slashed, index, n.fragment})
+ }
+ }
+ if n.escaped != "" {
+ index := strings.Index(s, n.escaped)
+ if index >= 0 {
+ match = append(match, entry{n.escaped, index, n.fragment})
+ }
+ }
+ }
+ // result should be the same or shorter than the input
+ var b strings.Builder
+ last := 0
+ for {
+ // find the nearest path match to the start of the buffer
+ next := -1
+ nearest := len(s)
+ for i, c := range match {
+ if c.index >= 0 && nearest > c.index {
+ nearest = c.index
+ next = i
+ }
+ }
+ // if there are no matches, we copy the rest of the string and are done
+ if next < 0 {
+ b.WriteString(s[last:])
+ return b.String()
+ }
+ // we have a match
+ n := &match[next]
+ // copy up to the start of the match
+ b.WriteString(s[last:n.index])
+ // skip over the filename
+ last = n.index + len(n.path)
+
+ // Hack: In multi-module mode, we add a "testmodule/" prefix, so trim
+ // it from the fragment.
+ fragment := n.fragment
+ if strings.HasPrefix(fragment, "testmodule") {
+ split := strings.Split(filepath.ToSlash(fragment), "/")
+ fragment = filepath.FromSlash(strings.Join(split[1:], "/"))
+ }
+
+ // add in the fragment instead
+ b.WriteString(fragment)
+ // see what the next match for this path is
+ n.index = strings.Index(s[last:], n.path)
+ if n.index >= 0 {
+ n.index += last
+ }
+ }
+}
diff --git a/gopls/internal/lsp/tests/tests.go b/gopls/internal/lsp/tests/tests.go
new file mode 100644
index 000000000..5bf8a92d0
--- /dev/null
+++ b/gopls/internal/lsp/tests/tests.go
@@ -0,0 +1,1446 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tests exports functionality to be used across a variety of gopls tests.
+package tests
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/go/expect"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/go/packages/packagestest"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source/completion"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/txtar"
+)
+
+const (
+ overlayFileSuffix = ".overlay"
+ goldenFileSuffix = ".golden"
+ inFileSuffix = ".in"
+
+ // The module path containing the testdata packages.
+ //
+ // Warning: the length of this module path matters, as we have bumped up
+ // against command-line limitations on windows (golang/go#54800).
+ testModule = "golang.org/lsptests"
+)
+
+var summaryFile = "summary.txt"
+
+func init() {
+ if typeparams.Enabled {
+ summaryFile = "summary_go1.18.txt"
+ }
+}
+
+var UpdateGolden = flag.Bool("golden", false, "Update golden files")
+
+// These type names apparently avoid the need to repeat the
+// type in the field name and the make() expression.
+type CallHierarchy = map[span.Span]*CallHierarchyResult
+type CodeLens = map[span.URI][]protocol.CodeLens
+type Diagnostics = map[span.URI][]*source.Diagnostic
+type CompletionItems = map[token.Pos]*completion.CompletionItem
+type Completions = map[span.Span][]Completion
+type CompletionSnippets = map[span.Span][]CompletionSnippet
+type UnimportedCompletions = map[span.Span][]Completion
+type DeepCompletions = map[span.Span][]Completion
+type FuzzyCompletions = map[span.Span][]Completion
+type CaseSensitiveCompletions = map[span.Span][]Completion
+type RankCompletions = map[span.Span][]Completion
+type FoldingRanges = []span.Span
+type Formats = []span.Span
+type Imports = []span.Span
+type SemanticTokens = []span.Span
+type SuggestedFixes = map[span.Span][]SuggestedFix
+type FunctionExtractions = map[span.Span]span.Span
+type MethodExtractions = map[span.Span]span.Span
+type Definitions = map[span.Span]Definition
+type Implementations = map[span.Span][]span.Span
+type Highlights = map[span.Span][]span.Span
+type References = map[span.Span][]span.Span
+type Renames = map[span.Span]string
+type PrepareRenames = map[span.Span]*source.PrepareItem
+type Symbols = map[span.URI][]*symbol
+type InlayHints = []span.Span
+type WorkspaceSymbols = map[WorkspaceSymbolsTestType]map[span.URI][]string
+type Signatures = map[span.Span]*protocol.SignatureHelp
+type Links = map[span.URI][]Link
+type AddImport = map[span.URI]string
+type SelectionRanges = []span.Span
+
+type Data struct {
+ Config packages.Config
+ Exported *packagestest.Exported
+ CallHierarchy CallHierarchy
+ CodeLens CodeLens
+ Diagnostics Diagnostics
+ CompletionItems CompletionItems
+ Completions Completions
+ CompletionSnippets CompletionSnippets
+ UnimportedCompletions UnimportedCompletions
+ DeepCompletions DeepCompletions
+ FuzzyCompletions FuzzyCompletions
+ CaseSensitiveCompletions CaseSensitiveCompletions
+ RankCompletions RankCompletions
+ FoldingRanges FoldingRanges
+ Formats Formats
+ Imports Imports
+ SemanticTokens SemanticTokens
+ SuggestedFixes SuggestedFixes
+ FunctionExtractions FunctionExtractions
+ MethodExtractions MethodExtractions
+ Definitions Definitions
+ Implementations Implementations
+ Highlights Highlights
+ References References
+ Renames Renames
+ InlayHints InlayHints
+ PrepareRenames PrepareRenames
+ Symbols Symbols
+ WorkspaceSymbols WorkspaceSymbols
+ Signatures Signatures
+ Links Links
+ AddImport AddImport
+ SelectionRanges SelectionRanges
+
+ fragments map[string]string
+ dir string
+ golden map[string]*Golden
+ mode string
+
+ ModfileFlagAvailable bool
+
+ mappersMu sync.Mutex
+ mappers map[span.URI]*protocol.Mapper
+}
+
+// The Tests interface abstracts the LSP-based implementation of the marker
+// test operators (such as @codelens) appearing in files beneath ../testdata/.
+//
+// TODO(adonovan): reduce duplication; see https://github.com/golang/go/issues/54845.
+// There is only one implementation (*runner in ../lsp_test.go), so
+// we can abolish the interface now.
+type Tests interface {
+ CallHierarchy(*testing.T, span.Span, *CallHierarchyResult)
+ CodeLens(*testing.T, span.URI, []protocol.CodeLens)
+ Diagnostics(*testing.T, span.URI, []*source.Diagnostic)
+ Completion(*testing.T, span.Span, Completion, CompletionItems)
+ CompletionSnippet(*testing.T, span.Span, CompletionSnippet, bool, CompletionItems)
+ UnimportedCompletion(*testing.T, span.Span, Completion, CompletionItems)
+ DeepCompletion(*testing.T, span.Span, Completion, CompletionItems)
+ FuzzyCompletion(*testing.T, span.Span, Completion, CompletionItems)
+ CaseSensitiveCompletion(*testing.T, span.Span, Completion, CompletionItems)
+ RankCompletion(*testing.T, span.Span, Completion, CompletionItems)
+ FoldingRanges(*testing.T, span.Span)
+ Format(*testing.T, span.Span)
+ Import(*testing.T, span.Span)
+ SemanticTokens(*testing.T, span.Span)
+ SuggestedFix(*testing.T, span.Span, []SuggestedFix, int)
+ FunctionExtraction(*testing.T, span.Span, span.Span)
+ MethodExtraction(*testing.T, span.Span, span.Span)
+ Definition(*testing.T, span.Span, Definition)
+ Implementation(*testing.T, span.Span, []span.Span)
+ Highlight(*testing.T, span.Span, []span.Span)
+ InlayHints(*testing.T, span.Span)
+ References(*testing.T, span.Span, []span.Span)
+ Rename(*testing.T, span.Span, string)
+ PrepareRename(*testing.T, span.Span, *source.PrepareItem)
+ Symbols(*testing.T, span.URI, []protocol.DocumentSymbol)
+ WorkspaceSymbols(*testing.T, span.URI, string, WorkspaceSymbolsTestType)
+ SignatureHelp(*testing.T, span.Span, *protocol.SignatureHelp)
+ Link(*testing.T, span.URI, []Link)
+ AddImport(*testing.T, span.URI, string)
+ SelectionRanges(*testing.T, span.Span)
+}
+
+type Definition struct {
+ Name string
+ IsType bool
+ OnlyHover bool
+ Src, Def span.Span
+}
+
+type CompletionTestType int
+
+const (
+ // Default runs the standard completion tests.
+ CompletionDefault = CompletionTestType(iota)
+
+ // Unimported tests the autocompletion of unimported packages.
+ CompletionUnimported
+
+ // Deep tests deep completion.
+ CompletionDeep
+
+ // Fuzzy tests deep completion and fuzzy matching.
+ CompletionFuzzy
+
+ // CaseSensitive tests case sensitive completion.
+ CompletionCaseSensitive
+
+ // CompletionRank candidates in test must be valid and in the right relative order.
+ CompletionRank
+)
+
+type WorkspaceSymbolsTestType int
+
+const (
+ // Default runs the standard workspace symbols tests.
+ WorkspaceSymbolsDefault = WorkspaceSymbolsTestType(iota)
+
+ // Fuzzy tests workspace symbols with fuzzy matching.
+ WorkspaceSymbolsFuzzy
+
+ // CaseSensitive tests workspace symbols with case sensitive.
+ WorkspaceSymbolsCaseSensitive
+)
+
+type Completion struct {
+ CompletionItems []token.Pos
+}
+
+type CompletionSnippet struct {
+ CompletionItem token.Pos
+ PlainSnippet string
+ PlaceholderSnippet string
+}
+
+type CallHierarchyResult struct {
+ IncomingCalls, OutgoingCalls []protocol.CallHierarchyItem
+}
+
+type Link struct {
+ Src span.Span
+ Target string
+ NotePosition token.Position
+}
+
+type SuggestedFix struct {
+ ActionKind, Title string
+}
+
+// A symbol holds a DocumentSymbol along with its parent-child edge.
+type symbol struct {
+ pSymbol protocol.DocumentSymbol
+ id, parentID string
+}
+
+type Golden struct {
+ Filename string
+ Archive *txtar.Archive
+ Modified bool
+}
+
+func Context(t testing.TB) context.Context {
+ return context.Background()
+}
+
+func DefaultOptions(o *source.Options) {
+ o.SupportedCodeActions = map[source.FileKind]map[protocol.CodeActionKind]bool{
+ source.Go: {
+ protocol.SourceOrganizeImports: true,
+ protocol.QuickFix: true,
+ protocol.RefactorRewrite: true,
+ protocol.RefactorExtract: true,
+ protocol.SourceFixAll: true,
+ },
+ source.Mod: {
+ protocol.SourceOrganizeImports: true,
+ },
+ source.Sum: {},
+ source.Work: {},
+ source.Tmpl: {},
+ }
+ o.UserOptions.Codelenses[string(command.Test)] = true
+ o.HoverKind = source.SynopsisDocumentation
+ o.InsertTextFormat = protocol.SnippetTextFormat
+ o.CompletionBudget = time.Minute
+ o.HierarchicalDocumentSymbolSupport = true
+ o.SemanticTokens = true
+ o.InternalOptions.NewDiff = "both"
+}
+
+func RunTests(t *testing.T, dataDir string, includeMultiModule bool, f func(*testing.T, *Data)) {
+ t.Helper()
+ modes := []string{"Modules", "GOPATH"}
+ if includeMultiModule {
+ modes = append(modes, "MultiModule")
+ }
+ for _, mode := range modes {
+ t.Run(mode, func(t *testing.T) {
+ datum := load(t, mode, dataDir)
+ t.Helper()
+ f(t, datum)
+ })
+ }
+}
+
+func load(t testing.TB, mode string, dir string) *Data {
+ datum := &Data{
+ CallHierarchy: make(CallHierarchy),
+ CodeLens: make(CodeLens),
+ Diagnostics: make(Diagnostics),
+ CompletionItems: make(CompletionItems),
+ Completions: make(Completions),
+ CompletionSnippets: make(CompletionSnippets),
+ UnimportedCompletions: make(UnimportedCompletions),
+ DeepCompletions: make(DeepCompletions),
+ FuzzyCompletions: make(FuzzyCompletions),
+ RankCompletions: make(RankCompletions),
+ CaseSensitiveCompletions: make(CaseSensitiveCompletions),
+ Definitions: make(Definitions),
+ Implementations: make(Implementations),
+ Highlights: make(Highlights),
+ References: make(References),
+ Renames: make(Renames),
+ PrepareRenames: make(PrepareRenames),
+ SuggestedFixes: make(SuggestedFixes),
+ FunctionExtractions: make(FunctionExtractions),
+ MethodExtractions: make(MethodExtractions),
+ Symbols: make(Symbols),
+ WorkspaceSymbols: make(WorkspaceSymbols),
+ Signatures: make(Signatures),
+ Links: make(Links),
+ AddImport: make(AddImport),
+
+ dir: dir,
+ fragments: map[string]string{},
+ golden: map[string]*Golden{},
+ mode: mode,
+ mappers: map[span.URI]*protocol.Mapper{},
+ }
+
+ if !*UpdateGolden {
+ summary := filepath.Join(filepath.FromSlash(dir), summaryFile+goldenFileSuffix)
+ if _, err := os.Stat(summary); os.IsNotExist(err) {
+ t.Fatalf("could not find golden file summary.txt in %#v", dir)
+ }
+ archive, err := txtar.ParseFile(summary)
+ if err != nil {
+ t.Fatalf("could not read golden file %v/%v: %v", dir, summary, err)
+ }
+ datum.golden[summaryFile] = &Golden{
+ Filename: summary,
+ Archive: archive,
+ }
+ }
+
+ files := packagestest.MustCopyFileTree(dir)
+ // Prune test cases that exercise generics.
+ if !typeparams.Enabled {
+ for name := range files {
+ if strings.Contains(name, "_generics") {
+ delete(files, name)
+ }
+ }
+ }
+ overlays := map[string][]byte{}
+ for fragment, operation := range files {
+ if trimmed := strings.TrimSuffix(fragment, goldenFileSuffix); trimmed != fragment {
+ delete(files, fragment)
+ goldFile := filepath.Join(dir, fragment)
+ archive, err := txtar.ParseFile(goldFile)
+ if err != nil {
+ t.Fatalf("could not read golden file %v: %v", fragment, err)
+ }
+ datum.golden[trimmed] = &Golden{
+ Filename: goldFile,
+ Archive: archive,
+ }
+ } else if trimmed := strings.TrimSuffix(fragment, inFileSuffix); trimmed != fragment {
+ delete(files, fragment)
+ files[trimmed] = operation
+ } else if index := strings.Index(fragment, overlayFileSuffix); index >= 0 {
+ delete(files, fragment)
+ partial := fragment[:index] + fragment[index+len(overlayFileSuffix):]
+ contents, err := ioutil.ReadFile(filepath.Join(dir, fragment))
+ if err != nil {
+ t.Fatal(err)
+ }
+ overlays[partial] = contents
+ }
+ }
+
+ modules := []packagestest.Module{
+ {
+ Name: testModule,
+ Files: files,
+ Overlay: overlays,
+ },
+ }
+ switch mode {
+ case "Modules":
+ datum.Exported = packagestest.Export(t, packagestest.Modules, modules)
+ case "GOPATH":
+ datum.Exported = packagestest.Export(t, packagestest.GOPATH, modules)
+ case "MultiModule":
+ files := map[string]interface{}{}
+ for k, v := range modules[0].Files {
+ files[filepath.Join("testmodule", k)] = v
+ }
+ modules[0].Files = files
+
+ overlays := map[string][]byte{}
+ for k, v := range modules[0].Overlay {
+ overlays[filepath.Join("testmodule", k)] = v
+ }
+ modules[0].Overlay = overlays
+
+ golden := map[string]*Golden{}
+ for k, v := range datum.golden {
+ if k == summaryFile {
+ golden[k] = v
+ } else {
+ golden[filepath.Join("testmodule", k)] = v
+ }
+ }
+ datum.golden = golden
+
+ datum.Exported = packagestest.Export(t, packagestest.Modules, modules)
+ default:
+ panic("unknown mode " + mode)
+ }
+
+ for _, m := range modules {
+ for fragment := range m.Files {
+ filename := datum.Exported.File(m.Name, fragment)
+ datum.fragments[filename] = fragment
+ }
+ }
+
+ // Turn off go/packages debug logging.
+ datum.Exported.Config.Logf = nil
+ datum.Config.Logf = nil
+
+ // Merge the exported.Config with the view.Config.
+ datum.Config = *datum.Exported.Config
+ datum.Config.Fset = token.NewFileSet()
+ datum.Config.Context = Context(nil)
+ datum.Config.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
+ panic("ParseFile should not be called")
+ }
+
+ // Do a first pass to collect special markers for completion and workspace symbols.
+ if err := datum.Exported.Expect(map[string]interface{}{
+ "item": func(name string, r packagestest.Range, _ []string) {
+ datum.Exported.Mark(name, r)
+ },
+ "symbol": func(name string, r packagestest.Range, _ []string) {
+ datum.Exported.Mark(name, r)
+ },
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Collect any data that needs to be used by subsequent tests.
+ if err := datum.Exported.Expect(map[string]interface{}{
+ "codelens": datum.collectCodeLens,
+ "diag": datum.collectDiagnostics,
+ "item": datum.collectCompletionItems,
+ "complete": datum.collectCompletions(CompletionDefault),
+ "unimported": datum.collectCompletions(CompletionUnimported),
+ "deep": datum.collectCompletions(CompletionDeep),
+ "fuzzy": datum.collectCompletions(CompletionFuzzy),
+ "casesensitive": datum.collectCompletions(CompletionCaseSensitive),
+ "rank": datum.collectCompletions(CompletionRank),
+ "snippet": datum.collectCompletionSnippets,
+ "fold": datum.collectFoldingRanges,
+ "format": datum.collectFormats,
+ "import": datum.collectImports,
+ "semantic": datum.collectSemanticTokens,
+ "godef": datum.collectDefinitions,
+ "implementations": datum.collectImplementations,
+ "typdef": datum.collectTypeDefinitions,
+ "hoverdef": datum.collectHoverDefinitions,
+ "highlight": datum.collectHighlights,
+ "inlayHint": datum.collectInlayHints,
+ "refs": datum.collectReferences,
+ "rename": datum.collectRenames,
+ "prepare": datum.collectPrepareRenames,
+ "symbol": datum.collectSymbols,
+ "signature": datum.collectSignatures,
+ "link": datum.collectLinks,
+ "suggestedfix": datum.collectSuggestedFixes,
+ "extractfunc": datum.collectFunctionExtractions,
+ "extractmethod": datum.collectMethodExtractions,
+ "incomingcalls": datum.collectIncomingCalls,
+ "outgoingcalls": datum.collectOutgoingCalls,
+ "addimport": datum.collectAddImports,
+ "selectionrange": datum.collectSelectionRanges,
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Collect names for the entries that require golden files.
+ if err := datum.Exported.Expect(map[string]interface{}{
+ "godef": datum.collectDefinitionNames,
+ "hoverdef": datum.collectDefinitionNames,
+ "workspacesymbol": datum.collectWorkspaceSymbols(WorkspaceSymbolsDefault),
+ "workspacesymbolfuzzy": datum.collectWorkspaceSymbols(WorkspaceSymbolsFuzzy),
+ "workspacesymbolcasesensitive": datum.collectWorkspaceSymbols(WorkspaceSymbolsCaseSensitive),
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if mode == "MultiModule" {
+ if err := moveFile(filepath.Join(datum.Config.Dir, "go.mod"), filepath.Join(datum.Config.Dir, "testmodule/go.mod")); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ return datum
+}
+
+// moveFile moves the file at oldpath to newpath, by renaming if possible
+// or copying otherwise.
+func moveFile(oldpath, newpath string) (err error) {
+ renameErr := os.Rename(oldpath, newpath)
+ if renameErr == nil {
+ return nil
+ }
+
+ src, err := os.Open(oldpath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ src.Close()
+ if err == nil {
+ err = os.Remove(oldpath)
+ }
+ }()
+
+ perm := os.ModePerm
+ fi, err := src.Stat()
+ if err == nil {
+ perm = fi.Mode().Perm()
+ }
+
+ dst, err := os.OpenFile(newpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(dst, src)
+ if closeErr := dst.Close(); err == nil {
+ err = closeErr
+ }
+ return err
+}
+
+func Run(t *testing.T, tests Tests, data *Data) {
+ t.Helper()
+ checkData(t, data)
+
+ eachCompletion := func(t *testing.T, cases map[span.Span][]Completion, test func(*testing.T, span.Span, Completion, CompletionItems)) {
+ t.Helper()
+
+ for src, exp := range cases {
+ for i, e := range exp {
+ t.Run(SpanName(src)+"_"+strconv.Itoa(i), func(t *testing.T) {
+ t.Helper()
+ if strings.Contains(t.Name(), "cgo") {
+ testenv.NeedsTool(t, "cgo")
+ }
+ test(t, src, e, data.CompletionItems)
+ })
+ }
+
+ }
+ }
+
+ t.Run("CallHierarchy", func(t *testing.T) {
+ t.Helper()
+ for spn, callHierarchyResult := range data.CallHierarchy {
+ t.Run(SpanName(spn), func(t *testing.T) {
+ t.Helper()
+ tests.CallHierarchy(t, spn, callHierarchyResult)
+ })
+ }
+ })
+
+ t.Run("Completion", func(t *testing.T) {
+ t.Helper()
+ eachCompletion(t, data.Completions, tests.Completion)
+ })
+
+ t.Run("CompletionSnippets", func(t *testing.T) {
+ t.Helper()
+ for _, placeholders := range []bool{true, false} {
+ for src, expecteds := range data.CompletionSnippets {
+ for i, expected := range expecteds {
+ name := SpanName(src) + "_" + strconv.Itoa(i+1)
+ if placeholders {
+ name += "_placeholders"
+ }
+
+ t.Run(name, func(t *testing.T) {
+ t.Helper()
+ tests.CompletionSnippet(t, src, expected, placeholders, data.CompletionItems)
+ })
+ }
+ }
+ }
+ })
+
+ t.Run("UnimportedCompletion", func(t *testing.T) {
+ t.Helper()
+ eachCompletion(t, data.UnimportedCompletions, tests.UnimportedCompletion)
+ })
+
+ t.Run("DeepCompletion", func(t *testing.T) {
+ t.Helper()
+ eachCompletion(t, data.DeepCompletions, tests.DeepCompletion)
+ })
+
+ t.Run("FuzzyCompletion", func(t *testing.T) {
+ t.Helper()
+ eachCompletion(t, data.FuzzyCompletions, tests.FuzzyCompletion)
+ })
+
+ t.Run("CaseSensitiveCompletion", func(t *testing.T) {
+ t.Helper()
+ eachCompletion(t, data.CaseSensitiveCompletions, tests.CaseSensitiveCompletion)
+ })
+
+ t.Run("RankCompletions", func(t *testing.T) {
+ t.Helper()
+ eachCompletion(t, data.RankCompletions, tests.RankCompletion)
+ })
+
+ t.Run("CodeLens", func(t *testing.T) {
+ t.Helper()
+ for uri, want := range data.CodeLens {
+ // Check if we should skip this URI if the -modfile flag is not available.
+ if shouldSkip(data, uri) {
+ continue
+ }
+ t.Run(uriName(uri), func(t *testing.T) {
+ t.Helper()
+ tests.CodeLens(t, uri, want)
+ })
+ }
+ })
+
+ t.Run("Diagnostics", func(t *testing.T) {
+ t.Helper()
+ for uri, want := range data.Diagnostics {
+ // Check if we should skip this URI if the -modfile flag is not available.
+ if shouldSkip(data, uri) {
+ continue
+ }
+ t.Run(uriName(uri), func(t *testing.T) {
+ t.Helper()
+ tests.Diagnostics(t, uri, want)
+ })
+ }
+ })
+
+ t.Run("FoldingRange", func(t *testing.T) {
+ t.Helper()
+ for _, spn := range data.FoldingRanges {
+ t.Run(uriName(spn.URI()), func(t *testing.T) {
+ t.Helper()
+ tests.FoldingRanges(t, spn)
+ })
+ }
+ })
+
+ t.Run("Format", func(t *testing.T) {
+ t.Helper()
+ for _, spn := range data.Formats {
+ t.Run(uriName(spn.URI()), func(t *testing.T) {
+ t.Helper()
+ tests.Format(t, spn)
+ })
+ }
+ })
+
+ t.Run("Import", func(t *testing.T) {
+ t.Helper()
+ for _, spn := range data.Imports {
+ t.Run(uriName(spn.URI()), func(t *testing.T) {
+ t.Helper()
+ tests.Import(t, spn)
+ })
+ }
+ })
+
+ t.Run("SemanticTokens", func(t *testing.T) {
+ t.Helper()
+ for _, spn := range data.SemanticTokens {
+ t.Run(uriName(spn.URI()), func(t *testing.T) {
+ t.Helper()
+ tests.SemanticTokens(t, spn)
+ })
+ }
+ })
+
+ t.Run("SuggestedFix", func(t *testing.T) {
+ t.Helper()
+ for spn, actionKinds := range data.SuggestedFixes {
+ // Check if we should skip this spn if the -modfile flag is not available.
+ if shouldSkip(data, spn.URI()) {
+ continue
+ }
+ t.Run(SpanName(spn), func(t *testing.T) {
+ t.Helper()
+ tests.SuggestedFix(t, spn, actionKinds, 1)
+ })
+ }
+ })
+
+ t.Run("FunctionExtraction", func(t *testing.T) {
+ t.Helper()
+ for start, end := range data.FunctionExtractions {
+ // Check if we should skip this spn if the -modfile flag is not available.
+ if shouldSkip(data, start.URI()) {
+ continue
+ }
+ t.Run(SpanName(start), func(t *testing.T) {
+ t.Helper()
+ tests.FunctionExtraction(t, start, end)
+ })
+ }
+ })
+
+ t.Run("MethodExtraction", func(t *testing.T) {
+ t.Helper()
+ for start, end := range data.MethodExtractions {
+ // Check if we should skip this spn if the -modfile flag is not available.
+ if shouldSkip(data, start.URI()) {
+ continue
+ }
+ t.Run(SpanName(start), func(t *testing.T) {
+ t.Helper()
+ tests.MethodExtraction(t, start, end)
+ })
+ }
+ })
+
+ t.Run("Definition", func(t *testing.T) {
+ t.Helper()
+ for spn, d := range data.Definitions {
+ t.Run(SpanName(spn), func(t *testing.T) {
+ t.Helper()
+ if strings.Contains(t.Name(), "cgo") {
+ testenv.NeedsTool(t, "cgo")
+ }
+ tests.Definition(t, spn, d)
+ })
+ }
+ })
+
+ t.Run("Implementation", func(t *testing.T) {
+ t.Helper()
+ for spn, m := range data.Implementations {
+ t.Run(SpanName(spn), func(t *testing.T) {
+ t.Helper()
+ tests.Implementation(t, spn, m)
+ })
+ }
+ })
+
+ t.Run("Highlight", func(t *testing.T) {
+ t.Helper()
+ for pos, locations := range data.Highlights {
+ t.Run(SpanName(pos), func(t *testing.T) {
+ t.Helper()
+ tests.Highlight(t, pos, locations)
+ })
+ }
+ })
+
+ t.Run("InlayHints", func(t *testing.T) {
+ t.Helper()
+ for _, src := range data.InlayHints {
+ t.Run(SpanName(src), func(t *testing.T) {
+ t.Helper()
+ tests.InlayHints(t, src)
+ })
+ }
+ })
+
+ t.Run("References", func(t *testing.T) {
+ t.Helper()
+ for src, itemList := range data.References {
+ t.Run(SpanName(src), func(t *testing.T) {
+ t.Helper()
+ tests.References(t, src, itemList)
+ })
+ }
+ })
+
+ t.Run("Renames", func(t *testing.T) {
+ t.Helper()
+ for spn, newText := range data.Renames {
+ t.Run(uriName(spn.URI())+"_"+newText, func(t *testing.T) {
+ t.Helper()
+ tests.Rename(t, spn, newText)
+ })
+ }
+ })
+
+ t.Run("PrepareRenames", func(t *testing.T) {
+ t.Helper()
+ for src, want := range data.PrepareRenames {
+ t.Run(SpanName(src), func(t *testing.T) {
+ t.Helper()
+ tests.PrepareRename(t, src, want)
+ })
+ }
+ })
+
+ t.Run("Symbols", func(t *testing.T) {
+ t.Helper()
+ for uri, allSymbols := range data.Symbols {
+ byParent := make(map[string][]*symbol)
+ for _, sym := range allSymbols {
+ if sym.parentID != "" {
+ byParent[sym.parentID] = append(byParent[sym.parentID], sym)
+ }
+ }
+
+ // collectChildren does a depth-first traversal of the symbol tree,
+ // computing children of child nodes before returning to their parent.
+ // This is necessary as the Children field is slice of non-pointer types,
+ // and therefore we need to be careful to mutate children first before
+ // assigning them to their parent.
+ var collectChildren func(id string) []protocol.DocumentSymbol
+ collectChildren = func(id string) []protocol.DocumentSymbol {
+ children := byParent[id]
+ // delete from byParent before recursing, to ensure that
+ // collectChildren terminates even in the presence of cycles.
+ delete(byParent, id)
+ var result []protocol.DocumentSymbol
+ for _, child := range children {
+ child.pSymbol.Children = collectChildren(child.id)
+ result = append(result, child.pSymbol)
+ }
+ return result
+ }
+
+ var topLevel []protocol.DocumentSymbol
+ for _, sym := range allSymbols {
+ if sym.parentID == "" {
+ sym.pSymbol.Children = collectChildren(sym.id)
+ topLevel = append(topLevel, sym.pSymbol)
+ }
+ }
+
+ t.Run(uriName(uri), func(t *testing.T) {
+ t.Helper()
+ tests.Symbols(t, uri, topLevel)
+ })
+ }
+ })
+
+ t.Run("WorkspaceSymbols", func(t *testing.T) {
+ t.Helper()
+
+ for _, typ := range []WorkspaceSymbolsTestType{
+ WorkspaceSymbolsDefault,
+ WorkspaceSymbolsCaseSensitive,
+ WorkspaceSymbolsFuzzy,
+ } {
+ for uri, cases := range data.WorkspaceSymbols[typ] {
+ for _, query := range cases {
+ name := query
+ if name == "" {
+ name = "EmptyQuery"
+ }
+ t.Run(name, func(t *testing.T) {
+ t.Helper()
+ tests.WorkspaceSymbols(t, uri, query, typ)
+ })
+ }
+ }
+ }
+
+ })
+
+ t.Run("SignatureHelp", func(t *testing.T) {
+ t.Helper()
+ for spn, expectedSignature := range data.Signatures {
+ t.Run(SpanName(spn), func(t *testing.T) {
+ t.Helper()
+ tests.SignatureHelp(t, spn, expectedSignature)
+ })
+ }
+ })
+
+ t.Run("Link", func(t *testing.T) {
+ t.Helper()
+ for uri, wantLinks := range data.Links {
+ // If we are testing GOPATH, then we do not want links with the versions
+ // attached (pkg.go.dev/repoa/moda@v1.1.0/pkg), unless the file is a
+ // go.mod, then we can skip it altogether.
+ if data.Exported.Exporter == packagestest.GOPATH {
+ if strings.HasSuffix(uri.Filename(), ".mod") {
+ continue
+ }
+ re := regexp.MustCompile(`@v\d+\.\d+\.[\w-]+`)
+ for i, link := range wantLinks {
+ wantLinks[i].Target = re.ReplaceAllString(link.Target, "")
+ }
+ }
+ t.Run(uriName(uri), func(t *testing.T) {
+ t.Helper()
+ tests.Link(t, uri, wantLinks)
+ })
+ }
+ })
+
+ t.Run("AddImport", func(t *testing.T) {
+ t.Helper()
+ for uri, exp := range data.AddImport {
+ t.Run(uriName(uri), func(t *testing.T) {
+ tests.AddImport(t, uri, exp)
+ })
+ }
+ })
+
+ t.Run("SelectionRanges", func(t *testing.T) {
+ t.Helper()
+ for _, span := range data.SelectionRanges {
+ t.Run(SpanName(span), func(t *testing.T) {
+ tests.SelectionRanges(t, span)
+ })
+ }
+ })
+
+ if *UpdateGolden {
+ for _, golden := range data.golden {
+ if !golden.Modified {
+ continue
+ }
+ sort.Slice(golden.Archive.Files, func(i, j int) bool {
+ return golden.Archive.Files[i].Name < golden.Archive.Files[j].Name
+ })
+ if err := ioutil.WriteFile(golden.Filename, txtar.Format(golden.Archive), 0666); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
+
+func checkData(t *testing.T, data *Data) {
+ buf := &bytes.Buffer{}
+ diagnosticsCount := 0
+ for _, want := range data.Diagnostics {
+ diagnosticsCount += len(want)
+ }
+ linksCount := 0
+ for _, want := range data.Links {
+ linksCount += len(want)
+ }
+ definitionCount := 0
+ typeDefinitionCount := 0
+ for _, d := range data.Definitions {
+ if d.IsType {
+ typeDefinitionCount++
+ } else {
+ definitionCount++
+ }
+ }
+
+ snippetCount := 0
+ for _, want := range data.CompletionSnippets {
+ snippetCount += len(want)
+ }
+
+ countCompletions := func(c map[span.Span][]Completion) (count int) {
+ for _, want := range c {
+ count += len(want)
+ }
+ return count
+ }
+
+ countCodeLens := func(c map[span.URI][]protocol.CodeLens) (count int) {
+ for _, want := range c {
+ count += len(want)
+ }
+ return count
+ }
+
+ countWorkspaceSymbols := func(c map[WorkspaceSymbolsTestType]map[span.URI][]string) (count int) {
+ for _, typs := range c {
+ for _, queries := range typs {
+ count += len(queries)
+ }
+ }
+ return count
+ }
+
+ fmt.Fprintf(buf, "CallHierarchyCount = %v\n", len(data.CallHierarchy))
+ fmt.Fprintf(buf, "CodeLensCount = %v\n", countCodeLens(data.CodeLens))
+ fmt.Fprintf(buf, "CompletionsCount = %v\n", countCompletions(data.Completions))
+ fmt.Fprintf(buf, "CompletionSnippetCount = %v\n", snippetCount)
+ fmt.Fprintf(buf, "UnimportedCompletionsCount = %v\n", countCompletions(data.UnimportedCompletions))
+ fmt.Fprintf(buf, "DeepCompletionsCount = %v\n", countCompletions(data.DeepCompletions))
+ fmt.Fprintf(buf, "FuzzyCompletionsCount = %v\n", countCompletions(data.FuzzyCompletions))
+ fmt.Fprintf(buf, "RankedCompletionsCount = %v\n", countCompletions(data.RankCompletions))
+ fmt.Fprintf(buf, "CaseSensitiveCompletionsCount = %v\n", countCompletions(data.CaseSensitiveCompletions))
+ fmt.Fprintf(buf, "DiagnosticsCount = %v\n", diagnosticsCount)
+ fmt.Fprintf(buf, "FoldingRangesCount = %v\n", len(data.FoldingRanges))
+ fmt.Fprintf(buf, "FormatCount = %v\n", len(data.Formats))
+ fmt.Fprintf(buf, "ImportCount = %v\n", len(data.Imports))
+ fmt.Fprintf(buf, "SemanticTokenCount = %v\n", len(data.SemanticTokens))
+ fmt.Fprintf(buf, "SuggestedFixCount = %v\n", len(data.SuggestedFixes))
+ fmt.Fprintf(buf, "FunctionExtractionCount = %v\n", len(data.FunctionExtractions))
+ fmt.Fprintf(buf, "MethodExtractionCount = %v\n", len(data.MethodExtractions))
+ fmt.Fprintf(buf, "DefinitionsCount = %v\n", definitionCount)
+ fmt.Fprintf(buf, "TypeDefinitionsCount = %v\n", typeDefinitionCount)
+ fmt.Fprintf(buf, "HighlightsCount = %v\n", len(data.Highlights))
+ fmt.Fprintf(buf, "InlayHintsCount = %v\n", len(data.InlayHints))
+ fmt.Fprintf(buf, "ReferencesCount = %v\n", len(data.References))
+ fmt.Fprintf(buf, "RenamesCount = %v\n", len(data.Renames))
+ fmt.Fprintf(buf, "PrepareRenamesCount = %v\n", len(data.PrepareRenames))
+ fmt.Fprintf(buf, "SymbolsCount = %v\n", len(data.Symbols))
+ fmt.Fprintf(buf, "WorkspaceSymbolsCount = %v\n", countWorkspaceSymbols(data.WorkspaceSymbols))
+ fmt.Fprintf(buf, "SignaturesCount = %v\n", len(data.Signatures))
+ fmt.Fprintf(buf, "LinksCount = %v\n", linksCount)
+ fmt.Fprintf(buf, "ImplementationsCount = %v\n", len(data.Implementations))
+ fmt.Fprintf(buf, "SelectionRangesCount = %v\n", len(data.SelectionRanges))
+
+ want := string(data.Golden(t, "summary", summaryFile, func() ([]byte, error) {
+ return buf.Bytes(), nil
+ }))
+ got := buf.String()
+ if want != got {
+ // These counters change when assertions are added or removed.
+ // They act as an independent safety net to ensure that the
+ // tests didn't spuriously pass because they did no work.
+ t.Errorf("test summary does not match:\n%s\n(Run with -golden to update golden file; also, there may be one per Go version.)", compare.Text(want, got))
+ }
+}
+
+func (data *Data) Mapper(uri span.URI) (*protocol.Mapper, error) {
+ data.mappersMu.Lock()
+ defer data.mappersMu.Unlock()
+
+ if _, ok := data.mappers[uri]; !ok {
+ content, err := data.Exported.FileContents(uri.Filename())
+ if err != nil {
+ return nil, err
+ }
+ data.mappers[uri] = protocol.NewMapper(uri, content)
+ }
+ return data.mappers[uri], nil
+}
+
+func (data *Data) Golden(t *testing.T, tag, target string, update func() ([]byte, error)) []byte {
+ t.Helper()
+ fragment, found := data.fragments[target]
+ if !found {
+ if filepath.IsAbs(target) {
+ t.Fatalf("invalid golden file fragment %v", target)
+ }
+ fragment = target
+ }
+ golden := data.golden[fragment]
+ if golden == nil {
+ if !*UpdateGolden {
+ t.Fatalf("could not find golden file %v: %v", fragment, tag)
+ }
+ golden = &Golden{
+ Filename: filepath.Join(data.dir, fragment+goldenFileSuffix),
+ Archive: &txtar.Archive{},
+ Modified: true,
+ }
+ data.golden[fragment] = golden
+ }
+ var file *txtar.File
+ for i := range golden.Archive.Files {
+ f := &golden.Archive.Files[i]
+ if f.Name == tag {
+ file = f
+ break
+ }
+ }
+ if *UpdateGolden {
+ if file == nil {
+ golden.Archive.Files = append(golden.Archive.Files, txtar.File{
+ Name: tag,
+ })
+ file = &golden.Archive.Files[len(golden.Archive.Files)-1]
+ }
+ contents, err := update()
+ if err != nil {
+ t.Fatalf("could not update golden file %v: %v", fragment, err)
+ }
+ file.Data = append(contents, '\n') // add trailing \n for txtar
+ golden.Modified = true
+
+ }
+ if file == nil {
+ t.Fatalf("could not find golden contents %v: %v", fragment, tag)
+ }
+ if len(file.Data) == 0 {
+ return file.Data
+ }
+ return file.Data[:len(file.Data)-1] // drop the trailing \n
+}
+
+func (data *Data) collectCodeLens(spn span.Span, title, cmd string) {
+ data.CodeLens[spn.URI()] = append(data.CodeLens[spn.URI()], protocol.CodeLens{
+ Range: data.mustRange(spn),
+ Command: &protocol.Command{
+ Title: title,
+ Command: cmd,
+ },
+ })
+}
+
+func (data *Data) collectDiagnostics(spn span.Span, msgSource, msgPattern, msgSeverity string) {
+ severity := protocol.SeverityError
+ switch msgSeverity {
+ case "error":
+ severity = protocol.SeverityError
+ case "warning":
+ severity = protocol.SeverityWarning
+ case "hint":
+ severity = protocol.SeverityHint
+ case "information":
+ severity = protocol.SeverityInformation
+ }
+
+ data.Diagnostics[spn.URI()] = append(data.Diagnostics[spn.URI()], &source.Diagnostic{
+ Range: data.mustRange(spn),
+ Severity: severity,
+ Source: source.DiagnosticSource(msgSource),
+ Message: msgPattern,
+ })
+}
+
+func (data *Data) collectCompletions(typ CompletionTestType) func(span.Span, []token.Pos) {
+ result := func(m map[span.Span][]Completion, src span.Span, expected []token.Pos) {
+ m[src] = append(m[src], Completion{
+ CompletionItems: expected,
+ })
+ }
+ switch typ {
+ case CompletionDeep:
+ return func(src span.Span, expected []token.Pos) {
+ result(data.DeepCompletions, src, expected)
+ }
+ case CompletionUnimported:
+ return func(src span.Span, expected []token.Pos) {
+ result(data.UnimportedCompletions, src, expected)
+ }
+ case CompletionFuzzy:
+ return func(src span.Span, expected []token.Pos) {
+ result(data.FuzzyCompletions, src, expected)
+ }
+ case CompletionRank:
+ return func(src span.Span, expected []token.Pos) {
+ result(data.RankCompletions, src, expected)
+ }
+ case CompletionCaseSensitive:
+ return func(src span.Span, expected []token.Pos) {
+ result(data.CaseSensitiveCompletions, src, expected)
+ }
+ default:
+ return func(src span.Span, expected []token.Pos) {
+ result(data.Completions, src, expected)
+ }
+ }
+}
+
+func (data *Data) collectCompletionItems(pos token.Pos, label, detail, kind string, args []string) {
+ var documentation string
+ if len(args) > 3 {
+ documentation = args[3]
+ }
+ data.CompletionItems[pos] = &completion.CompletionItem{
+ Label: label,
+ Detail: detail,
+ Kind: protocol.ParseCompletionItemKind(kind),
+ Documentation: documentation,
+ }
+}
+
+func (data *Data) collectFoldingRanges(spn span.Span) {
+ data.FoldingRanges = append(data.FoldingRanges, spn)
+}
+
+func (data *Data) collectFormats(spn span.Span) {
+ data.Formats = append(data.Formats, spn)
+}
+
+func (data *Data) collectImports(spn span.Span) {
+ data.Imports = append(data.Imports, spn)
+}
+
+func (data *Data) collectAddImports(spn span.Span, imp string) {
+ data.AddImport[spn.URI()] = imp
+}
+
+func (data *Data) collectSemanticTokens(spn span.Span) {
+ data.SemanticTokens = append(data.SemanticTokens, spn)
+}
+
+func (data *Data) collectSuggestedFixes(spn span.Span, actionKind, fix string) {
+ data.SuggestedFixes[spn] = append(data.SuggestedFixes[spn], SuggestedFix{actionKind, fix})
+}
+
+func (data *Data) collectFunctionExtractions(start span.Span, end span.Span) {
+ if _, ok := data.FunctionExtractions[start]; !ok {
+ data.FunctionExtractions[start] = end
+ }
+}
+
+func (data *Data) collectMethodExtractions(start span.Span, end span.Span) {
+ if _, ok := data.MethodExtractions[start]; !ok {
+ data.MethodExtractions[start] = end
+ }
+}
+
+func (data *Data) collectDefinitions(src, target span.Span) {
+ data.Definitions[src] = Definition{
+ Src: src,
+ Def: target,
+ }
+}
+
+func (data *Data) collectSelectionRanges(spn span.Span) {
+ data.SelectionRanges = append(data.SelectionRanges, spn)
+}
+
+func (data *Data) collectImplementations(src span.Span, targets []span.Span) {
+ data.Implementations[src] = targets
+}
+
+func (data *Data) collectIncomingCalls(src span.Span, calls []span.Span) {
+ for _, call := range calls {
+ rng := data.mustRange(call)
+ // we're only comparing protocol.range
+ if data.CallHierarchy[src] != nil {
+ data.CallHierarchy[src].IncomingCalls = append(data.CallHierarchy[src].IncomingCalls,
+ protocol.CallHierarchyItem{
+ URI: protocol.DocumentURI(call.URI()),
+ Range: rng,
+ })
+ } else {
+ data.CallHierarchy[src] = &CallHierarchyResult{
+ IncomingCalls: []protocol.CallHierarchyItem{
+ {URI: protocol.DocumentURI(call.URI()), Range: rng},
+ },
+ }
+ }
+ }
+}
+
+func (data *Data) collectOutgoingCalls(src span.Span, calls []span.Span) {
+ if data.CallHierarchy[src] == nil {
+ data.CallHierarchy[src] = &CallHierarchyResult{}
+ }
+ for _, call := range calls {
+ // we're only comparing protocol.range
+ data.CallHierarchy[src].OutgoingCalls = append(data.CallHierarchy[src].OutgoingCalls,
+ protocol.CallHierarchyItem{
+ URI: protocol.DocumentURI(call.URI()),
+ Range: data.mustRange(call),
+ })
+ }
+}
+
+func (data *Data) collectHoverDefinitions(src, target span.Span) {
+ data.Definitions[src] = Definition{
+ Src: src,
+ Def: target,
+ OnlyHover: true,
+ }
+}
+
+func (data *Data) collectTypeDefinitions(src, target span.Span) {
+ data.Definitions[src] = Definition{
+ Src: src,
+ Def: target,
+ IsType: true,
+ }
+}
+
+func (data *Data) collectDefinitionNames(src span.Span, name string) {
+ d := data.Definitions[src]
+ d.Name = name
+ data.Definitions[src] = d
+}
+
+func (data *Data) collectHighlights(src span.Span, expected []span.Span) {
+ // Declaring a highlight in a test file: @highlight(src, expected1, expected2)
+ data.Highlights[src] = append(data.Highlights[src], expected...)
+}
+
+func (data *Data) collectInlayHints(src span.Span) {
+ data.InlayHints = append(data.InlayHints, src)
+}
+
+func (data *Data) collectReferences(src span.Span, expected []span.Span) {
+ data.References[src] = expected
+}
+
+func (data *Data) collectRenames(src span.Span, newText string) {
+ data.Renames[src] = newText
+}
+
+func (data *Data) collectPrepareRenames(src, spn span.Span, placeholder string) {
+ data.PrepareRenames[src] = &source.PrepareItem{
+ Range: data.mustRange(spn),
+ Text: placeholder,
+ }
+}
+
+// collectSymbols is responsible for collecting @symbol annotations.
+func (data *Data) collectSymbols(name string, selectionRng span.Span, kind, detail, id, parentID string) {
+ // We don't set 'Range' here as it is difficult (impossible?) to express
+ // multi-line ranges in the packagestest framework.
+ uri := selectionRng.URI()
+ data.Symbols[uri] = append(data.Symbols[uri], &symbol{
+ pSymbol: protocol.DocumentSymbol{
+ Name: name,
+ Kind: protocol.ParseSymbolKind(kind),
+ SelectionRange: data.mustRange(selectionRng),
+ Detail: detail,
+ },
+ id: id,
+ parentID: parentID,
+ })
+}
+
+// mustRange converts spn into a protocol.Range, panicking on any error.
+func (data *Data) mustRange(spn span.Span) protocol.Range {
+ m, err := data.Mapper(spn.URI())
+ rng, err := m.SpanRange(spn)
+ if err != nil {
+ panic(fmt.Sprintf("converting span %s to range: %v", spn, err))
+ }
+ return rng
+}
+
+func (data *Data) collectWorkspaceSymbols(typ WorkspaceSymbolsTestType) func(*expect.Note, string) {
+ return func(note *expect.Note, query string) {
+ if data.WorkspaceSymbols[typ] == nil {
+ data.WorkspaceSymbols[typ] = make(map[span.URI][]string)
+ }
+ pos := safetoken.StartPosition(data.Exported.ExpectFileSet, note.Pos)
+ uri := span.URIFromPath(pos.Filename)
+ data.WorkspaceSymbols[typ][uri] = append(data.WorkspaceSymbols[typ][uri], query)
+ }
+}
+
+func (data *Data) collectSignatures(spn span.Span, signature string, activeParam int64) {
+ data.Signatures[spn] = &protocol.SignatureHelp{
+ Signatures: []protocol.SignatureInformation{
+ {
+ Label: signature,
+ },
+ },
+ ActiveParameter: uint32(activeParam),
+ }
+ // Hardcode special case to test the lack of a signature.
+ if signature == "" && activeParam == 0 {
+ data.Signatures[spn] = nil
+ }
+}
+
+func (data *Data) collectCompletionSnippets(spn span.Span, item token.Pos, plain, placeholder string) {
+ data.CompletionSnippets[spn] = append(data.CompletionSnippets[spn], CompletionSnippet{
+ CompletionItem: item,
+ PlainSnippet: plain,
+ PlaceholderSnippet: placeholder,
+ })
+}
+
+func (data *Data) collectLinks(spn span.Span, link string, note *expect.Note, fset *token.FileSet) {
+ position := safetoken.StartPosition(fset, note.Pos)
+ uri := spn.URI()
+ data.Links[uri] = append(data.Links[uri], Link{
+ Src: spn,
+ Target: link,
+ NotePosition: position,
+ })
+}
+
+func uriName(uri span.URI) string {
+ return filepath.Base(strings.TrimSuffix(uri.Filename(), ".go"))
+}
+
+// TODO(golang/go#54845): improve the formatting here to match standard
+// line:column position formatting.
+func SpanName(spn span.Span) string {
+ return fmt.Sprintf("%v_%v_%v", uriName(spn.URI()), spn.Start().Line(), spn.Start().Column())
+}
+
+func CopyFolderToTempDir(folder string) (string, error) {
+ if _, err := os.Stat(folder); err != nil {
+ return "", err
+ }
+ dst, err := ioutil.TempDir("", "modfile_test")
+ if err != nil {
+ return "", err
+ }
+ fds, err := ioutil.ReadDir(folder)
+ if err != nil {
+ return "", err
+ }
+ for _, fd := range fds {
+ srcfp := filepath.Join(folder, fd.Name())
+ stat, err := os.Stat(srcfp)
+ if err != nil {
+ return "", err
+ }
+ if !stat.Mode().IsRegular() {
+ return "", fmt.Errorf("cannot copy non regular file %s", srcfp)
+ }
+ contents, err := ioutil.ReadFile(srcfp)
+ if err != nil {
+ return "", err
+ }
+ if err := ioutil.WriteFile(filepath.Join(dst, fd.Name()), contents, stat.Mode()); err != nil {
+ return "", err
+ }
+ }
+ return dst, nil
+}
+
+func shouldSkip(data *Data, uri span.URI) bool {
+ if data.ModfileFlagAvailable {
+ return false
+ }
+ // If the -modfile flag is not available, then we do not want to run
+ // any tests on the go.mod file.
+ if strings.HasSuffix(uri.Filename(), ".mod") {
+ return true
+ }
+ // If the -modfile flag is not available, then we do not want to test any
+ // uri that contains "go mod tidy".
+ m, err := data.Mapper(uri)
+ return err == nil && strings.Contains(string(m.Content), ", \"go mod tidy\",")
+}
diff --git a/gopls/internal/lsp/tests/util.go b/gopls/internal/lsp/tests/util.go
new file mode 100644
index 000000000..fd65ecb55
--- /dev/null
+++ b/gopls/internal/lsp/tests/util.go
@@ -0,0 +1,547 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tests
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "go/token"
+ "path"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/source/completion"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+var builtins = map[string]bool{
+ "append": true,
+ "cap": true,
+ "close": true,
+ "complex": true,
+ "copy": true,
+ "delete": true,
+ "error": true,
+ "false": true,
+ "imag": true,
+ "iota": true,
+ "len": true,
+ "make": true,
+ "new": true,
+ "nil": true,
+ "panic": true,
+ "print": true,
+ "println": true,
+ "real": true,
+ "recover": true,
+ "true": true,
+}
+
+// DiffLinks takes the links we got and checks if they are located within the source or a Note.
+// If the link is within a Note, the link is removed.
+// Returns an diff comment if there are differences and empty string if no diffs.
+func DiffLinks(mapper *protocol.Mapper, wantLinks []Link, gotLinks []protocol.DocumentLink) string {
+ var notePositions []token.Position
+ links := make(map[span.Span]string, len(wantLinks))
+ for _, link := range wantLinks {
+ links[link.Src] = link.Target
+ notePositions = append(notePositions, link.NotePosition)
+ }
+
+ var msg strings.Builder
+ for _, link := range gotLinks {
+ spn, err := mapper.RangeSpan(link.Range)
+ if err != nil {
+ return fmt.Sprintf("%v", err)
+ }
+ linkInNote := false
+ for _, notePosition := range notePositions {
+ // Drop the links found inside expectation notes arguments as this links are not collected by expect package.
+ if notePosition.Line == spn.Start().Line() &&
+ notePosition.Column <= spn.Start().Column() {
+ delete(links, spn)
+ linkInNote = true
+ }
+ }
+ if linkInNote {
+ continue
+ }
+
+ if target, ok := links[spn]; ok {
+ delete(links, spn)
+ if target != link.Target {
+ fmt.Fprintf(&msg, "%s: want link with target %q, got %q\n", spn, target, link.Target)
+ }
+ } else {
+ fmt.Fprintf(&msg, "%s: got unexpected link with target %q\n", spn, link.Target)
+ }
+ }
+ for spn, target := range links {
+ fmt.Fprintf(&msg, "%s: expected link with target %q is missing\n", spn, target)
+ }
+ return msg.String()
+}
+
+// CompareDiagnostics reports testing errors to t when the diagnostic set got
+// does not match want. If the sole expectation has source "no_diagnostics",
+// the test expects that no diagnostics were received for the given document.
+func CompareDiagnostics(t *testing.T, uri span.URI, want, got []*source.Diagnostic) {
+ t.Helper()
+ fileName := path.Base(string(uri))
+
+ // A special case to test that there are no diagnostics for a file.
+ if len(want) == 1 && want[0].Source == "no_diagnostics" {
+ want = nil
+ }
+
+ // Build a helper function to match an actual diagnostic to an overlapping
+ // expected diagnostic (if any).
+ unmatched := make([]*source.Diagnostic, len(want))
+ copy(unmatched, want)
+ source.SortDiagnostics(unmatched)
+ match := func(g *source.Diagnostic) *source.Diagnostic {
+ // Find the last expected diagnostic d for which start(d) < end(g), and
+ // check to see if it overlaps.
+ i := sort.Search(len(unmatched), func(i int) bool {
+ d := unmatched[i]
+ // See rangeOverlaps: if a range is a single point, we consider End to be
+ // included in the range...
+ if g.Range.Start == g.Range.End {
+ return protocol.ComparePosition(d.Range.Start, g.Range.End) > 0
+ }
+ // ...otherwise the end position of a range is not included.
+ return protocol.ComparePosition(d.Range.Start, g.Range.End) >= 0
+ })
+ if i == 0 {
+ return nil
+ }
+ w := unmatched[i-1]
+ if rangeOverlaps(w.Range, g.Range) {
+ unmatched = append(unmatched[:i-1], unmatched[i:]...)
+ return w
+ }
+ return nil
+ }
+
+ for _, g := range got {
+ w := match(g)
+ if w == nil {
+ t.Errorf("%s:%s: unexpected diagnostic %q", fileName, g.Range, g.Message)
+ continue
+ }
+ if match, err := regexp.MatchString(w.Message, g.Message); err != nil {
+ t.Errorf("%s:%s: invalid regular expression %q: %v", fileName, w.Range.Start, w.Message, err)
+ } else if !match {
+ t.Errorf("%s:%s: got Message %q, want match for pattern %q", fileName, g.Range.Start, g.Message, w.Message)
+ }
+ if w.Severity != g.Severity {
+ t.Errorf("%s:%s: got Severity %v, want %v", fileName, g.Range.Start, g.Severity, w.Severity)
+ }
+ if w.Source != g.Source {
+ t.Errorf("%s:%s: got Source %v, want %v", fileName, g.Range.Start, g.Source, w.Source)
+ }
+ }
+
+ for _, w := range unmatched {
+ t.Errorf("%s:%s: unmatched diagnostic pattern %q", fileName, w.Range, w.Message)
+ }
+}
+
+// rangeOverlaps reports whether r1 and r2 overlap.
+func rangeOverlaps(r1, r2 protocol.Range) bool {
+ if inRange(r2.Start, r1) || inRange(r1.Start, r2) {
+ return true
+ }
+ return false
+}
+
+// inRange reports whether p is contained within [r.Start, r.End), or if p ==
+// r.Start == r.End (special handling for the case where the range is a single
+// point).
+func inRange(p protocol.Position, r protocol.Range) bool {
+ if protocol.IsPoint(r) {
+ return protocol.ComparePosition(r.Start, p) == 0
+ }
+ if protocol.ComparePosition(r.Start, p) <= 0 && protocol.ComparePosition(p, r.End) < 0 {
+ return true
+ }
+ return false
+}
+
+func DiffCodeLens(uri span.URI, want, got []protocol.CodeLens) string {
+ sortCodeLens(want)
+ sortCodeLens(got)
+
+ if len(got) != len(want) {
+ return summarizeCodeLens(-1, uri, want, got, "different lengths got %v want %v", len(got), len(want))
+ }
+ for i, w := range want {
+ g := got[i]
+ if w.Command.Command != g.Command.Command {
+ return summarizeCodeLens(i, uri, want, got, "incorrect Command Name got %v want %v", g.Command.Command, w.Command.Command)
+ }
+ if w.Command.Title != g.Command.Title {
+ return summarizeCodeLens(i, uri, want, got, "incorrect Command Title got %v want %v", g.Command.Title, w.Command.Title)
+ }
+ if protocol.ComparePosition(w.Range.Start, g.Range.Start) != 0 {
+ return summarizeCodeLens(i, uri, want, got, "incorrect Start got %v want %v", g.Range.Start, w.Range.Start)
+ }
+ if !protocol.IsPoint(g.Range) { // Accept any 'want' range if the codelens returns a zero-length range.
+ if protocol.ComparePosition(w.Range.End, g.Range.End) != 0 {
+ return summarizeCodeLens(i, uri, want, got, "incorrect End got %v want %v", g.Range.End, w.Range.End)
+ }
+ }
+ }
+ return ""
+}
+
+func sortCodeLens(c []protocol.CodeLens) {
+ sort.Slice(c, func(i int, j int) bool {
+ if r := protocol.CompareRange(c[i].Range, c[j].Range); r != 0 {
+ return r < 0
+ }
+ if c[i].Command.Command < c[j].Command.Command {
+ return true
+ } else if c[i].Command.Command == c[j].Command.Command {
+ return c[i].Command.Title < c[j].Command.Title
+ } else {
+ return false
+ }
+ })
+}
+
+func summarizeCodeLens(i int, uri span.URI, want, got []protocol.CodeLens, reason string, args ...interface{}) string {
+ msg := &bytes.Buffer{}
+ fmt.Fprint(msg, "codelens failed")
+ if i >= 0 {
+ fmt.Fprintf(msg, " at %d", i)
+ }
+ fmt.Fprint(msg, " because of ")
+ fmt.Fprintf(msg, reason, args...)
+ fmt.Fprint(msg, ":\nexpected:\n")
+ for _, d := range want {
+ fmt.Fprintf(msg, " %s:%v: %s | %s\n", uri, d.Range, d.Command.Command, d.Command.Title)
+ }
+ fmt.Fprintf(msg, "got:\n")
+ for _, d := range got {
+ fmt.Fprintf(msg, " %s:%v: %s | %s\n", uri, d.Range, d.Command.Command, d.Command.Title)
+ }
+ return msg.String()
+}
+
+func DiffSignatures(spn span.Span, want, got *protocol.SignatureHelp) string {
+ decorate := func(f string, args ...interface{}) string {
+ return fmt.Sprintf("invalid signature at %s: %s", spn, fmt.Sprintf(f, args...))
+ }
+ if len(got.Signatures) != 1 {
+ return decorate("wanted 1 signature, got %d", len(got.Signatures))
+ }
+ if got.ActiveSignature != 0 {
+ return decorate("wanted active signature of 0, got %d", int(got.ActiveSignature))
+ }
+ if want.ActiveParameter != got.ActiveParameter {
+ return decorate("wanted active parameter of %d, got %d", want.ActiveParameter, int(got.ActiveParameter))
+ }
+ g := got.Signatures[0]
+ w := want.Signatures[0]
+ if diff := compare.Text(NormalizeAny(w.Label), NormalizeAny(g.Label)); diff != "" {
+ return decorate("mismatched labels:\n%s", diff)
+ }
+ var paramParts []string
+ for _, p := range g.Parameters {
+ paramParts = append(paramParts, p.Label)
+ }
+ paramsStr := strings.Join(paramParts, ", ")
+ if !strings.Contains(g.Label, paramsStr) {
+ return decorate("expected signature %q to contain params %q", g.Label, paramsStr)
+ }
+ return ""
+}
+
+// NormalizeAny replaces occurrences of interface{} in input with any.
+//
+// In Go 1.18, standard library functions were changed to use the 'any'
+// alias in place of interface{}, which affects their type string.
+func NormalizeAny(input string) string {
+ return strings.ReplaceAll(input, "interface{}", "any")
+}
+
+// DiffCallHierarchyItems returns the diff between expected and actual call locations for incoming/outgoing call hierarchies
+func DiffCallHierarchyItems(gotCalls []protocol.CallHierarchyItem, expectedCalls []protocol.CallHierarchyItem) string {
+ expected := make(map[protocol.Location]bool)
+ for _, call := range expectedCalls {
+ expected[protocol.Location{URI: call.URI, Range: call.Range}] = true
+ }
+
+ got := make(map[protocol.Location]bool)
+ for _, call := range gotCalls {
+ got[protocol.Location{URI: call.URI, Range: call.Range}] = true
+ }
+ if len(got) != len(expected) {
+ return fmt.Sprintf("expected %d calls but got %d", len(expected), len(got))
+ }
+ for spn := range got {
+ if !expected[spn] {
+ return fmt.Sprintf("incorrect calls, expected locations %v but got locations %v", expected, got)
+ }
+ }
+ return ""
+}
+
+func FilterBuiltins(src span.Span, items []protocol.CompletionItem) []protocol.CompletionItem {
+ var (
+ got []protocol.CompletionItem
+ wantBuiltins = strings.Contains(string(src.URI()), "builtins")
+ wantKeywords = strings.Contains(string(src.URI()), "keywords")
+ )
+ for _, item := range items {
+ if !wantBuiltins && isBuiltin(item.Label, item.Detail, item.Kind) {
+ continue
+ }
+
+ if !wantKeywords && token.Lookup(item.Label).IsKeyword() {
+ continue
+ }
+
+ got = append(got, item)
+ }
+ return got
+}
+
+func isBuiltin(label, detail string, kind protocol.CompletionItemKind) bool {
+ if detail == "" && kind == protocol.ClassCompletion {
+ return true
+ }
+ // Remaining builtin constants, variables, interfaces, and functions.
+ trimmed := label
+ if i := strings.Index(trimmed, "("); i >= 0 {
+ trimmed = trimmed[:i]
+ }
+ return builtins[trimmed]
+}
+
+func CheckCompletionOrder(want, got []protocol.CompletionItem, strictScores bool) string {
+ var (
+ matchedIdxs []int
+ lastGotIdx int
+ lastGotSort float64
+ inOrder = true
+ errorMsg = "completions out of order"
+ )
+ for _, w := range want {
+ var found bool
+ for i, g := range got {
+ if w.Label == g.Label && NormalizeAny(w.Detail) == NormalizeAny(g.Detail) && w.Kind == g.Kind {
+ matchedIdxs = append(matchedIdxs, i)
+ found = true
+
+ if i < lastGotIdx {
+ inOrder = false
+ }
+ lastGotIdx = i
+
+ sort, _ := strconv.ParseFloat(g.SortText, 64)
+ if strictScores && len(matchedIdxs) > 1 && sort <= lastGotSort {
+ inOrder = false
+ errorMsg = "candidate scores not strictly decreasing"
+ }
+ lastGotSort = sort
+
+ break
+ }
+ }
+ if !found {
+ return summarizeCompletionItems(-1, []protocol.CompletionItem{w}, got, "didn't find expected completion")
+ }
+ }
+
+ sort.Ints(matchedIdxs)
+ matched := make([]protocol.CompletionItem, 0, len(matchedIdxs))
+ for _, idx := range matchedIdxs {
+ matched = append(matched, got[idx])
+ }
+
+ if !inOrder {
+ return summarizeCompletionItems(-1, want, matched, errorMsg)
+ }
+
+ return ""
+}
+
+func DiffSnippets(want string, got *protocol.CompletionItem) string {
+ if want == "" {
+ if got != nil {
+ x := got.TextEdit
+ return fmt.Sprintf("expected no snippet but got %s", x.NewText)
+ }
+ } else {
+ if got == nil {
+ return fmt.Sprintf("couldn't find completion matching %q", want)
+ }
+ x := got.TextEdit
+ if want != x.NewText {
+ return fmt.Sprintf("expected snippet %q, got %q", want, x.NewText)
+ }
+ }
+ return ""
+}
+
+func FindItem(list []protocol.CompletionItem, want completion.CompletionItem) *protocol.CompletionItem {
+ for _, item := range list {
+ if item.Label == want.Label {
+ return &item
+ }
+ }
+ return nil
+}
+
+// DiffCompletionItems prints the diff between expected and actual completion
+// test results.
+//
+// The diff will be formatted using '-' and '+' for want and got, respectively.
+func DiffCompletionItems(want, got []protocol.CompletionItem) string {
+ // Many fields are not set in the "want" slice.
+ irrelevantFields := []string{
+ "AdditionalTextEdits",
+ "Documentation",
+ "TextEdit",
+ "SortText",
+ "Preselect",
+ "FilterText",
+ "InsertText",
+ "InsertTextFormat",
+ }
+ ignore := cmpopts.IgnoreFields(protocol.CompletionItem{}, irrelevantFields...)
+ normalizeAny := cmpopts.AcyclicTransformer("NormalizeAny", func(item protocol.CompletionItem) protocol.CompletionItem {
+ item.Detail = NormalizeAny(item.Detail)
+ return item
+ })
+ return cmp.Diff(want, got, ignore, normalizeAny)
+}
+
+func summarizeCompletionItems(i int, want, got []protocol.CompletionItem, reason string, args ...interface{}) string {
+ msg := &bytes.Buffer{}
+ fmt.Fprint(msg, "completion failed")
+ if i >= 0 {
+ fmt.Fprintf(msg, " at %d", i)
+ }
+ fmt.Fprint(msg, " because of ")
+ fmt.Fprintf(msg, reason, args...)
+ fmt.Fprint(msg, ":\nexpected:\n")
+ for _, d := range want {
+ fmt.Fprintf(msg, " %v\n", d)
+ }
+ fmt.Fprintf(msg, "got:\n")
+ for _, d := range got {
+ fmt.Fprintf(msg, " %v\n", d)
+ }
+ return msg.String()
+}
+
+func EnableAllAnalyzers(opts *source.Options) {
+ if opts.Analyses == nil {
+ opts.Analyses = make(map[string]bool)
+ }
+ for _, a := range opts.DefaultAnalyzers {
+ if !a.IsEnabled(opts) {
+ opts.Analyses[a.Analyzer.Name] = true
+ }
+ }
+ for _, a := range opts.TypeErrorAnalyzers {
+ if !a.IsEnabled(opts) {
+ opts.Analyses[a.Analyzer.Name] = true
+ }
+ }
+ for _, a := range opts.ConvenienceAnalyzers {
+ if !a.IsEnabled(opts) {
+ opts.Analyses[a.Analyzer.Name] = true
+ }
+ }
+ for _, a := range opts.StaticcheckAnalyzers {
+ if !a.IsEnabled(opts) {
+ opts.Analyses[a.Analyzer.Name] = true
+ }
+ }
+}
+
+func EnableAllInlayHints(opts *source.Options) {
+ if opts.Hints == nil {
+ opts.Hints = make(map[string]bool)
+ }
+ for name := range source.AllInlayHints {
+ opts.Hints[name] = true
+ }
+}
+
+func WorkspaceSymbolsString(ctx context.Context, data *Data, queryURI span.URI, symbols []protocol.SymbolInformation) (string, error) {
+ queryDir := filepath.Dir(queryURI.Filename())
+ var filtered []string
+ for _, s := range symbols {
+ uri := s.Location.URI.SpanURI()
+ dir := filepath.Dir(uri.Filename())
+ if !source.InDir(queryDir, dir) { // assume queries always issue from higher directories
+ continue
+ }
+ m, err := data.Mapper(uri)
+ if err != nil {
+ return "", err
+ }
+ spn, err := m.LocationSpan(s.Location)
+ if err != nil {
+ return "", err
+ }
+ filtered = append(filtered, fmt.Sprintf("%s %s %s", spn, s.Name, s.Kind))
+ }
+ sort.Strings(filtered)
+ return strings.Join(filtered, "\n") + "\n", nil
+}
+
+func WorkspaceSymbolsTestTypeToMatcher(typ WorkspaceSymbolsTestType) source.SymbolMatcher {
+ switch typ {
+ case WorkspaceSymbolsFuzzy:
+ return source.SymbolFuzzy
+ case WorkspaceSymbolsCaseSensitive:
+ return source.SymbolCaseSensitive
+ default:
+ return source.SymbolCaseInsensitive
+ }
+}
+
+// LocationsToSpans converts protocol location into span form for testing.
+func LocationsToSpans(data *Data, locs []protocol.Location) ([]span.Span, error) {
+ spans := make([]span.Span, len(locs))
+ for i, loc := range locs {
+ m, err := data.Mapper(loc.URI.SpanURI())
+ if err != nil {
+ return nil, err
+ }
+ spn, err := m.LocationSpan(loc)
+ if err != nil {
+ return nil, fmt.Errorf("failed for %v: %w", loc, err)
+ }
+ spans[i] = spn
+ }
+ return spans, nil
+}
+
+// SortAndFormatSpans sorts and formats a list of spans for use in an assertion.
+func SortAndFormatSpans(spans []span.Span) string {
+ span.SortSpans(spans)
+ var buf strings.Builder
+ for _, spn := range spans {
+ fmt.Fprintf(&buf, "%v\n", spn)
+ }
+ return buf.String()
+}
diff --git a/gopls/internal/lsp/tests/util_go118.go b/gopls/internal/lsp/tests/util_go118.go
new file mode 100644
index 000000000..6115342df
--- /dev/null
+++ b/gopls/internal/lsp/tests/util_go118.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package tests
+
+func init() {
+ builtins["any"] = true
+ builtins["comparable"] = true
+}
diff --git a/gopls/internal/lsp/tests/util_go121.go b/gopls/internal/lsp/tests/util_go121.go
new file mode 100644
index 000000000..930658648
--- /dev/null
+++ b/gopls/internal/lsp/tests/util_go121.go
@@ -0,0 +1,12 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+// +build go1.21
+
+package tests
+
+func init() {
+ builtins["clear"] = true
+}
diff --git a/gopls/internal/lsp/text_synchronization.go b/gopls/internal/lsp/text_synchronization.go
new file mode 100644
index 000000000..b7be1e1ce
--- /dev/null
+++ b/gopls/internal/lsp/text_synchronization.go
@@ -0,0 +1,349 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "sync"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/jsonrpc2"
+)
+
+// ModificationSource identifies the originating cause of a file modification.
+type ModificationSource int
+
+const (
+ // FromDidOpen is a file modification caused by opening a file.
+ FromDidOpen = ModificationSource(iota)
+
+ // FromDidChange is a file modification caused by changing a file.
+ FromDidChange
+
+ // FromDidChangeWatchedFiles is a file modification caused by a change to a
+ // watched file.
+ FromDidChangeWatchedFiles
+
+ // FromDidSave is a file modification caused by a file save.
+ FromDidSave
+
+ // FromDidClose is a file modification caused by closing a file.
+ FromDidClose
+
+ // TODO: add FromDidChangeConfiguration, once configuration changes cause a
+ // new snapshot to be created.
+
+ // FromRegenerateCgo refers to file modifications caused by regenerating
+ // the cgo sources for the workspace.
+ FromRegenerateCgo
+
+ // FromInitialWorkspaceLoad refers to the loading of all packages in the
+ // workspace when the view is first created.
+ FromInitialWorkspaceLoad
+)
+
+func (m ModificationSource) String() string {
+ switch m {
+ case FromDidOpen:
+ return "opened files"
+ case FromDidChange:
+ return "changed files"
+ case FromDidChangeWatchedFiles:
+ return "files changed on disk"
+ case FromDidSave:
+ return "saved files"
+ case FromDidClose:
+ return "close files"
+ case FromRegenerateCgo:
+ return "regenerate cgo"
+ case FromInitialWorkspaceLoad:
+ return "initial workspace load"
+ default:
+ return "unknown file modification"
+ }
+}
+
+func (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
+ uri := params.TextDocument.URI.SpanURI()
+ if !uri.IsFile() {
+ return nil
+ }
+ // There may not be any matching view in the current session. If that's
+ // the case, try creating a new view based on the opened file path.
+ //
+ // TODO(rstambler): This seems like it would continuously add new
+ // views, but it won't because ViewOf only returns an error when there
+ // are no views in the session. I don't know if that logic should go
+ // here, or if we can continue to rely on that implementation detail.
+ if _, err := s.session.ViewOf(uri); err != nil {
+ dir := filepath.Dir(uri.Filename())
+ if err := s.addFolders(ctx, []protocol.WorkspaceFolder{{
+ URI: string(protocol.URIFromPath(dir)),
+ Name: filepath.Base(dir),
+ }}); err != nil {
+ return err
+ }
+ }
+ return s.didModifyFiles(ctx, []source.FileModification{{
+ URI: uri,
+ Action: source.Open,
+ Version: params.TextDocument.Version,
+ Text: []byte(params.TextDocument.Text),
+ LanguageID: params.TextDocument.LanguageID,
+ }}, FromDidOpen)
+}
+
+func (s *Server) didChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error {
+ uri := params.TextDocument.URI.SpanURI()
+ if !uri.IsFile() {
+ return nil
+ }
+
+ text, err := s.changedText(ctx, uri, params.ContentChanges)
+ if err != nil {
+ return err
+ }
+ c := source.FileModification{
+ URI: uri,
+ Action: source.Change,
+ Version: params.TextDocument.Version,
+ Text: text,
+ }
+ if err := s.didModifyFiles(ctx, []source.FileModification{c}, FromDidChange); err != nil {
+ return err
+ }
+ return s.warnAboutModifyingGeneratedFiles(ctx, uri)
+}
+
+// warnAboutModifyingGeneratedFiles shows a warning if a user tries to edit a
+// generated file for the first time.
+func (s *Server) warnAboutModifyingGeneratedFiles(ctx context.Context, uri span.URI) error {
+ s.changedFilesMu.Lock()
+ _, ok := s.changedFiles[uri]
+ if !ok {
+ s.changedFiles[uri] = struct{}{}
+ }
+ s.changedFilesMu.Unlock()
+
+ // This file has already been edited before.
+ if ok {
+ return nil
+ }
+
+ // Ideally, we should be able to specify that a generated file should
+ // be opened as read-only. Tell the user that they should not be
+ // editing a generated file.
+ view, err := s.session.ViewOf(uri)
+ if err != nil {
+ return err
+ }
+ snapshot, release, err := view.Snapshot()
+ if err != nil {
+ return err
+ }
+ isGenerated := source.IsGenerated(ctx, snapshot, uri)
+ release()
+
+ if !isGenerated {
+ return nil
+ }
+ return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+ Message: fmt.Sprintf("Do not edit this file! %s is a generated file.", uri.Filename()),
+ Type: protocol.Warning,
+ })
+}
+
+func (s *Server) didChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) error {
+ var modifications []source.FileModification
+ for _, change := range params.Changes {
+ uri := change.URI.SpanURI()
+ if !uri.IsFile() {
+ continue
+ }
+ action := changeTypeToFileAction(change.Type)
+ modifications = append(modifications, source.FileModification{
+ URI: uri,
+ Action: action,
+ OnDisk: true,
+ })
+ }
+ return s.didModifyFiles(ctx, modifications, FromDidChangeWatchedFiles)
+}
+
+func (s *Server) didSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) error {
+ uri := params.TextDocument.URI.SpanURI()
+ if !uri.IsFile() {
+ return nil
+ }
+ c := source.FileModification{
+ URI: uri,
+ Action: source.Save,
+ }
+ if params.Text != nil {
+ c.Text = []byte(*params.Text)
+ }
+ return s.didModifyFiles(ctx, []source.FileModification{c}, FromDidSave)
+}
+
+func (s *Server) didClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) error {
+ uri := params.TextDocument.URI.SpanURI()
+ if !uri.IsFile() {
+ return nil
+ }
+ return s.didModifyFiles(ctx, []source.FileModification{
+ {
+ URI: uri,
+ Action: source.Close,
+ Version: -1,
+ Text: nil,
+ },
+ }, FromDidClose)
+}
+
+func (s *Server) didModifyFiles(ctx context.Context, modifications []source.FileModification, cause ModificationSource) error {
+ // wg guards two conditions:
+ // 1. didModifyFiles is complete
+ // 2. the goroutine diagnosing changes on behalf of didModifyFiles is
+ // complete, if it was started
+ //
+ // Both conditions must be satisfied for the purpose of testing: we don't
+ // want to observe the completion of change processing until we have received
+ // all diagnostics as well as all server->client notifications done on behalf
+ // of this function.
+ var wg sync.WaitGroup
+ wg.Add(1)
+ defer wg.Done()
+
+ if s.session.Options().VerboseWorkDoneProgress {
+ work := s.progress.Start(ctx, DiagnosticWorkTitle(cause), "Calculating file diagnostics...", nil, nil)
+ go func() {
+ wg.Wait()
+ work.End(ctx, "Done.")
+ }()
+ }
+
+ onDisk := cause == FromDidChangeWatchedFiles
+
+ s.stateMu.Lock()
+ if s.state >= serverShutDown {
+ // This state check does not prevent races below, and exists only to
+ // produce a better error message. The actual race to the cache should be
+ // guarded by Session.viewMu.
+ s.stateMu.Unlock()
+ return errors.New("server is shut down")
+ }
+ s.stateMu.Unlock()
+
+ // If the set of changes included directories, expand those directories
+ // to their files.
+ modifications = s.session.ExpandModificationsToDirectories(ctx, modifications)
+
+ // Build a lookup map for file modifications, so that we can later join
+ // with the snapshot file associations.
+ modMap := make(map[span.URI]source.FileModification)
+ for _, mod := range modifications {
+ modMap[mod.URI] = mod
+ }
+
+ snapshots, release, err := s.session.DidModifyFiles(ctx, modifications)
+ if err != nil {
+ return err
+ }
+
+ // golang/go#50267: diagnostics should be re-sent after an open or close. For
+ // some clients, it may be helpful to re-send after each change.
+ for snapshot, uris := range snapshots {
+ for _, uri := range uris {
+ mod := modMap[uri]
+ if snapshot.View().Options().ChattyDiagnostics || mod.Action == source.Open || mod.Action == source.Close {
+ s.mustPublishDiagnostics(uri)
+ }
+ }
+ }
+
+ wg.Add(1)
+ go func() {
+ s.diagnoseSnapshots(snapshots, onDisk)
+ release()
+ wg.Done()
+ }()
+
+ // After any file modifications, we need to update our watched files,
+ // in case something changed. Compute the new set of directories to watch,
+ // and if it differs from the current set, send updated registrations.
+ return s.updateWatchedDirectories(ctx)
+}
+
+// DiagnosticWorkTitle returns the title of the diagnostic work resulting from a
+// file change originating from the given cause.
+func DiagnosticWorkTitle(cause ModificationSource) string {
+ return fmt.Sprintf("diagnosing %v", cause)
+}
+
+func (s *Server) changedText(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) {
+ if len(changes) == 0 {
+ return nil, fmt.Errorf("%w: no content changes provided", jsonrpc2.ErrInternal)
+ }
+
+ // Check if the client sent the full content of the file.
+ // We accept a full content change even if the server expected incremental changes.
+ if len(changes) == 1 && changes[0].Range == nil && changes[0].RangeLength == 0 {
+ return []byte(changes[0].Text), nil
+ }
+ return s.applyIncrementalChanges(ctx, uri, changes)
+}
+
+func (s *Server) applyIncrementalChanges(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) {
+ fh, err := s.session.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ content, err := fh.Read()
+ if err != nil {
+ return nil, fmt.Errorf("%w: file not found (%v)", jsonrpc2.ErrInternal, err)
+ }
+ for _, change := range changes {
+ // TODO(adonovan): refactor to use diff.Apply, which is robust w.r.t.
+ // out-of-order or overlapping changes---and much more efficient.
+
+ // Make sure to update mapper along with the content.
+ m := protocol.NewMapper(uri, content)
+ if change.Range == nil {
+ return nil, fmt.Errorf("%w: unexpected nil range for change", jsonrpc2.ErrInternal)
+ }
+ spn, err := m.RangeSpan(*change.Range)
+ if err != nil {
+ return nil, err
+ }
+ start, end := spn.Start().Offset(), spn.End().Offset()
+ if end < start {
+ return nil, fmt.Errorf("%w: invalid range for content change", jsonrpc2.ErrInternal)
+ }
+ var buf bytes.Buffer
+ buf.Write(content[:start])
+ buf.WriteString(change.Text)
+ buf.Write(content[end:])
+ content = buf.Bytes()
+ }
+ return content, nil
+}
+
+func changeTypeToFileAction(ct protocol.FileChangeType) source.FileAction {
+ switch ct {
+ case protocol.Changed:
+ return source.Change
+ case protocol.Created:
+ return source.Create
+ case protocol.Deleted:
+ return source.Delete
+ }
+ return source.UnknownFileAction
+}
diff --git a/gopls/internal/lsp/work/completion.go b/gopls/internal/lsp/work/completion.go
new file mode 100644
index 000000000..bcdc2d1f4
--- /dev/null
+++ b/gopls/internal/lsp/work/completion.go
@@ -0,0 +1,154 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+)
+
+func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.CompletionList, error) {
+ ctx, done := event.Start(ctx, "work.Completion")
+ defer done()
+
+ // Get the position of the cursor.
+ pw, err := snapshot.ParseWork(ctx, fh)
+ if err != nil {
+ return nil, fmt.Errorf("getting go.work file handle: %w", err)
+ }
+ cursor, err := pw.Mapper.PositionOffset(position)
+ if err != nil {
+ return nil, fmt.Errorf("computing cursor offset: %w", err)
+ }
+
+ // Find the use statement the user is in.
+ use, pathStart, _ := usePath(pw, cursor)
+ if use == nil {
+ return &protocol.CompletionList{}, nil
+ }
+ completingFrom := use.Path[:cursor-pathStart]
+
+ // We're going to find the completions of the user input
+ // (completingFrom) by doing a walk on the innermost directory
+ // of the given path, and comparing the found paths to make sure
+ // that they match the component of the path after the
+ // innermost directory.
+ //
+ // We'll maintain two paths when doing this: pathPrefixSlash
+ // is essentially the path the user typed in, and pathPrefixAbs
+ // is the path made absolute from the go.work directory.
+
+ pathPrefixSlash := completingFrom
+ pathPrefixAbs := filepath.FromSlash(pathPrefixSlash)
+ if !filepath.IsAbs(pathPrefixAbs) {
+ pathPrefixAbs = filepath.Join(filepath.Dir(pw.URI.Filename()), pathPrefixAbs)
+ }
+
+ // pathPrefixDir is the directory that will be walked to find matches.
+ // If pathPrefixSlash is not explicitly a directory boundary (is either equivalent to "." or
+ // ends in a separator) we need to examine its parent directory to find sibling files that
+ // match.
+ depthBound := 5
+ pathPrefixDir, pathPrefixBase := pathPrefixAbs, ""
+ pathPrefixSlashDir := pathPrefixSlash
+ if filepath.Clean(pathPrefixSlash) != "." && !strings.HasSuffix(pathPrefixSlash, "/") {
+ depthBound++
+ pathPrefixDir, pathPrefixBase = filepath.Split(pathPrefixAbs)
+ pathPrefixSlashDir = dirNonClean(pathPrefixSlash)
+ }
+
+ var completions []string
+ // Stop traversing deeper once we've hit 10k files to try to stay generally under 100ms.
+ const numSeenBound = 10000
+ var numSeen int
+ stopWalking := errors.New("hit numSeenBound")
+ err = filepath.Walk(pathPrefixDir, func(wpath string, info os.FileInfo, err error) error {
+ if numSeen > numSeenBound {
+ // Stop traversing if we hit bound.
+ return stopWalking
+ }
+ numSeen++
+
+ // rel is the path relative to pathPrefixDir.
+ // Make sure that it has pathPrefixBase as a prefix
+ // otherwise it won't match the beginning of the
+ // base component of the path the user typed in.
+ rel := strings.TrimPrefix(wpath[len(pathPrefixDir):], string(filepath.Separator))
+ if info.IsDir() && wpath != pathPrefixDir && !strings.HasPrefix(rel, pathPrefixBase) {
+ return filepath.SkipDir
+ }
+
+ // Check for a match (a module directory).
+ if filepath.Base(rel) == "go.mod" {
+ relDir := strings.TrimSuffix(dirNonClean(rel), string(os.PathSeparator))
+ completionPath := join(pathPrefixSlashDir, filepath.ToSlash(relDir))
+
+ if !strings.HasPrefix(completionPath, completingFrom) {
+ return nil
+ }
+ if strings.HasSuffix(completionPath, "/") {
+ // Don't suggest paths that end in "/". This happens
+ // when the input is a path that ends in "/" and
+ // the completion is empty.
+ return nil
+ }
+ completion := completionPath[len(completingFrom):]
+ if completingFrom == "" && !strings.HasPrefix(completion, "./") {
+ // Bias towards "./" prefixes.
+ completion = join(".", completion)
+ }
+
+ completions = append(completions, completion)
+ }
+
+ if depth := strings.Count(rel, string(filepath.Separator)); depth >= depthBound {
+ return filepath.SkipDir
+ }
+ return nil
+ })
+ if err != nil && !errors.Is(err, stopWalking) {
+ return nil, fmt.Errorf("walking to find completions: %w", err)
+ }
+
+ sort.Strings(completions)
+
+ var items []protocol.CompletionItem
+ for _, c := range completions {
+ items = append(items, protocol.CompletionItem{
+ Label: c,
+ InsertText: c,
+ })
+ }
+ return &protocol.CompletionList{Items: items}, nil
+}
+
+// dirNonClean is filepath.Dir, without the Clean at the end.
+func dirNonClean(path string) string {
+ vol := filepath.VolumeName(path)
+ i := len(path) - 1
+ for i >= len(vol) && !os.IsPathSeparator(path[i]) {
+ i--
+ }
+ return path[len(vol) : i+1]
+}
+
+func join(a, b string) string {
+ if a == "" {
+ return b
+ }
+ if b == "" {
+ return a
+ }
+ return strings.TrimSuffix(a, "/") + "/" + b
+}
diff --git a/gopls/internal/lsp/work/diagnostics.go b/gopls/internal/lsp/work/diagnostics.go
new file mode 100644
index 000000000..cbcc85055
--- /dev/null
+++ b/gopls/internal/lsp/work/diagnostics.go
@@ -0,0 +1,92 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/event"
+)
+
+func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[span.URI][]*source.Diagnostic, error) {
+ ctx, done := event.Start(ctx, "work.Diagnostics", source.SnapshotLabels(snapshot)...)
+ defer done()
+
+ reports := map[span.URI][]*source.Diagnostic{}
+ uri := snapshot.WorkFile()
+ if uri == "" {
+ return nil, nil
+ }
+ fh, err := snapshot.GetFile(ctx, uri)
+ if err != nil {
+ return nil, err
+ }
+ reports[fh.URI()] = []*source.Diagnostic{}
+ diagnostics, err := DiagnosticsForWork(ctx, snapshot, fh)
+ if err != nil {
+ return nil, err
+ }
+ for _, d := range diagnostics {
+ fh, err := snapshot.GetFile(ctx, d.URI)
+ if err != nil {
+ return nil, err
+ }
+ reports[fh.URI()] = append(reports[fh.URI()], d)
+ }
+
+ return reports, nil
+}
+
+func DiagnosticsForWork(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]*source.Diagnostic, error) {
+ pw, err := snapshot.ParseWork(ctx, fh)
+ if err != nil {
+ if pw == nil || len(pw.ParseErrors) == 0 {
+ return nil, err
+ }
+ return pw.ParseErrors, nil
+ }
+
+ // Add diagnostic if a directory does not contain a module.
+ var diagnostics []*source.Diagnostic
+ for _, use := range pw.File.Use {
+ rng, err := pw.Mapper.OffsetRange(use.Syntax.Start.Byte, use.Syntax.End.Byte)
+ if err != nil {
+ return nil, err
+ }
+
+ modfh, err := snapshot.GetFile(ctx, modFileURI(pw, use))
+ if err != nil {
+ return nil, err
+ }
+ if _, err := modfh.Read(); err != nil && os.IsNotExist(err) {
+ diagnostics = append(diagnostics, &source.Diagnostic{
+ URI: fh.URI(),
+ Range: rng,
+ Severity: protocol.SeverityError,
+ Source: source.WorkFileError,
+ Message: fmt.Sprintf("directory %v does not contain a module", use.Path),
+ })
+ }
+ }
+ return diagnostics, nil
+}
+
+func modFileURI(pw *source.ParsedWorkFile, use *modfile.Use) span.URI {
+ workdir := filepath.Dir(pw.URI.Filename())
+
+ modroot := filepath.FromSlash(use.Path)
+ if !filepath.IsAbs(modroot) {
+ modroot = filepath.Join(workdir, modroot)
+ }
+
+ return span.URIFromPath(filepath.Join(modroot, "go.mod"))
+}
diff --git a/gopls/internal/lsp/work/format.go b/gopls/internal/lsp/work/format.go
new file mode 100644
index 000000000..e852eb4d2
--- /dev/null
+++ b/gopls/internal/lsp/work/format.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "context"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+)
+
+func Format(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.TextEdit, error) {
+ ctx, done := event.Start(ctx, "work.Format")
+ defer done()
+
+ pw, err := snapshot.ParseWork(ctx, fh)
+ if err != nil {
+ return nil, err
+ }
+ formatted := modfile.Format(pw.File.Syntax)
+ // Calculate the edits to be made due to the change.
+ diffs := snapshot.View().Options().ComputeEdits(string(pw.Mapper.Content), string(formatted))
+ return source.ToProtocolEdits(pw.Mapper, diffs)
+}
diff --git a/gopls/internal/lsp/work/hover.go b/gopls/internal/lsp/work/hover.go
new file mode 100644
index 000000000..1a1b299fd
--- /dev/null
+++ b/gopls/internal/lsp/work/hover.go
@@ -0,0 +1,89 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+
+ "golang.org/x/mod/modfile"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+)
+
+func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) {
+ // We only provide hover information for the view's go.work file.
+ if fh.URI() != snapshot.WorkFile() {
+ return nil, nil
+ }
+
+ ctx, done := event.Start(ctx, "work.Hover")
+ defer done()
+
+ // Get the position of the cursor.
+ pw, err := snapshot.ParseWork(ctx, fh)
+ if err != nil {
+ return nil, fmt.Errorf("getting go.work file handle: %w", err)
+ }
+ offset, err := pw.Mapper.PositionOffset(position)
+ if err != nil {
+ return nil, fmt.Errorf("computing cursor offset: %w", err)
+ }
+
+ // Confirm that the cursor is inside a use statement, and then find
+ // the position of the use statement's directory path.
+ use, pathStart, pathEnd := usePath(pw, offset)
+
+ // The cursor position is not on a use statement.
+ if use == nil {
+ return nil, nil
+ }
+
+ // Get the mod file denoted by the use.
+ modfh, err := snapshot.GetFile(ctx, modFileURI(pw, use))
+ if err != nil {
+ return nil, fmt.Errorf("getting modfile handle: %w", err)
+ }
+ pm, err := snapshot.ParseMod(ctx, modfh)
+ if err != nil {
+ return nil, fmt.Errorf("getting modfile handle: %w", err)
+ }
+ mod := pm.File.Module.Mod
+
+ // Get the range to highlight for the hover.
+ rng, err := pw.Mapper.OffsetRange(pathStart, pathEnd)
+ if err != nil {
+ return nil, err
+ }
+ options := snapshot.View().Options()
+ return &protocol.Hover{
+ Contents: protocol.MarkupContent{
+ Kind: options.PreferredContentFormat,
+ Value: mod.Path,
+ },
+ Range: rng,
+ }, nil
+}
+
+func usePath(pw *source.ParsedWorkFile, offset int) (use *modfile.Use, pathStart, pathEnd int) {
+ for _, u := range pw.File.Use {
+ path := []byte(u.Path)
+ s, e := u.Syntax.Start.Byte, u.Syntax.End.Byte
+ i := bytes.Index(pw.Mapper.Content[s:e], path)
+ if i == -1 {
+ // This should not happen.
+ continue
+ }
+ // Shift the start position to the location of the
+ // module directory within the use statement.
+ pathStart, pathEnd = s+i, s+i+len(path)
+ if pathStart <= offset && offset <= pathEnd {
+ return u, pathStart, pathEnd
+ }
+ }
+ return nil, 0, 0
+}
diff --git a/gopls/internal/lsp/workspace.go b/gopls/internal/lsp/workspace.go
new file mode 100644
index 000000000..7c02239e6
--- /dev/null
+++ b/gopls/internal/lsp/workspace.go
@@ -0,0 +1,95 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+ "fmt"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+func (s *Server) didChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) error {
+ event := params.Event
+ for _, folder := range event.Removed {
+ view := s.session.View(folder.Name)
+ if view != nil {
+ s.session.RemoveView(view)
+ } else {
+ return fmt.Errorf("view %s for %v not found", folder.Name, folder.URI)
+ }
+ }
+ return s.addFolders(ctx, event.Added)
+}
+
+// addView returns a Snapshot and a release function that must be
+// called when it is no longer needed.
+func (s *Server) addView(ctx context.Context, name string, uri span.URI) (source.Snapshot, func(), error) {
+ s.stateMu.Lock()
+ state := s.state
+ s.stateMu.Unlock()
+ if state < serverInitialized {
+ return nil, nil, fmt.Errorf("addView called before server initialized")
+ }
+ options := s.session.Options().Clone()
+ if err := s.fetchConfig(ctx, name, uri, options); err != nil {
+ return nil, nil, err
+ }
+ _, snapshot, release, err := s.session.NewView(ctx, name, uri, options)
+ return snapshot, release, err
+}
+
+func (s *Server) didChangeConfiguration(ctx context.Context, _ *protocol.DidChangeConfigurationParams) error {
+ // Apply any changes to the session-level settings.
+ options := s.session.Options().Clone()
+ if err := s.fetchConfig(ctx, "", "", options); err != nil {
+ return err
+ }
+ s.session.SetOptions(options)
+
+ // Go through each view, getting and updating its configuration.
+ for _, view := range s.session.Views() {
+ options := s.session.Options().Clone()
+ if err := s.fetchConfig(ctx, view.Name(), view.Folder(), options); err != nil {
+ return err
+ }
+ view, err := s.session.SetViewOptions(ctx, view, options)
+ if err != nil {
+ return err
+ }
+ go func() {
+ snapshot, release, err := view.Snapshot()
+ if err != nil {
+ return // view is shut down; no need to diagnose
+ }
+ defer release()
+ s.diagnoseDetached(snapshot)
+ }()
+ }
+
+ // An options change may have affected the detected Go version.
+ s.checkViewGoVersions()
+
+ return nil
+}
+
+func semanticTokenRegistration(tokenTypes, tokenModifiers []string) protocol.Registration {
+ return protocol.Registration{
+ ID: "textDocument/semanticTokens",
+ Method: "textDocument/semanticTokens",
+ RegisterOptions: &protocol.SemanticTokensOptions{
+ Legend: protocol.SemanticTokensLegend{
+ // TODO(pjw): trim these to what we use (and an unused one
+ // at position 0 of TokTypes, to catch typos)
+ TokenTypes: tokenTypes,
+ TokenModifiers: tokenModifiers,
+ },
+ Full: &protocol.Or_SemanticTokensOptions_full{Value: true},
+ Range: &protocol.Or_SemanticTokensOptions_range{Value: true},
+ },
+ }
+}
diff --git a/gopls/internal/lsp/workspace_symbol.go b/gopls/internal/lsp/workspace_symbol.go
new file mode 100644
index 000000000..88b3e8865
--- /dev/null
+++ b/gopls/internal/lsp/workspace_symbol.go
@@ -0,0 +1,32 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsp
+
+import (
+ "context"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/event"
+)
+
+func (s *Server) symbol(ctx context.Context, params *protocol.WorkspaceSymbolParams) ([]protocol.SymbolInformation, error) {
+ ctx, done := event.Start(ctx, "lsp.Server.symbol")
+ defer done()
+
+ views := s.session.Views()
+ matcher := s.session.Options().SymbolMatcher
+ style := s.session.Options().SymbolStyle
+ // TODO(rfindley): it looks wrong that we need to pass views here.
+ //
+ // Evidence:
+ // - this is the only place we convert views to []source.View
+ // - workspace symbols is the only place where we call source.View.Snapshot
+ var sourceViews []source.View
+ for _, v := range views {
+ sourceViews = append(sourceViews, v)
+ }
+ return source.WorkspaceSymbols(ctx, matcher, style, sourceViews, params.Query)
+}
diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go
index 61d4ae2cb..29e02ce6e 100644
--- a/gopls/internal/regtest/bench/bench_test.go
+++ b/gopls/internal/regtest/bench/bench_test.go
@@ -5,186 +5,245 @@
package bench
import (
+ "context"
"flag"
"fmt"
+ "io/ioutil"
+ "log"
"os"
- "runtime/pprof"
+ "os/exec"
+ "path/filepath"
+ "sync"
"testing"
+ "time"
"golang.org/x/tools/gopls/internal/hooks"
- "golang.org/x/tools/internal/lsp/fake"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/cmd"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/fakenet"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
+ "golang.org/x/tools/internal/tool"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+var (
+ goplsPath = flag.String("gopls_path", "", "if set, use this gopls for testing; incompatible with -gopls_commit")
+
+ installGoplsOnce sync.Once // guards installing gopls at -gopls_commit
+ goplsCommit = flag.String("gopls_commit", "", "if set, install and use gopls at this commit for testing; incompatible with -gopls_path")
- "golang.org/x/tools/internal/lsp/protocol"
+ cpuProfile = flag.String("gopls_cpuprofile", "", "if set, the cpu profile file suffix; see \"Profiling\" in the package doc")
+ memProfile = flag.String("gopls_memprofile", "", "if set, the mem profile file suffix; see \"Profiling\" in the package doc")
+ trace = flag.String("gopls_trace", "", "if set, the trace file suffix; see \"Profiling\" in the package doc")
+
+ // If non-empty, tempDir is a temporary working dir that was created by this
+ // test suite.
+ makeTempDirOnce sync.Once // guards creation of the temp dir
+ tempDir string
)
-func TestMain(m *testing.M) {
- Main(m, hooks.Options)
-}
+// if runAsGopls is "true", run the gopls command instead of the testing.M.
+const runAsGopls = "_GOPLS_BENCH_RUN_AS_GOPLS"
-func benchmarkOptions(dir string) []RunOption {
- return []RunOption{
- // Run in an existing directory, since we're trying to simulate known cases
- // that cause gopls memory problems.
- InExistingDir(dir),
- // Skip logs as they buffer up memory unnaturally.
- SkipLogs(),
- // The Debug server only makes sense if running in singleton mode.
- Modes(Singleton),
- // Remove the default timeout. Individual tests should control their
- // own graceful termination.
- NoDefaultTimeout(),
-
- // Use the actual proxy, since we want our builds to succeed.
- GOPROXY("https://proxy.golang.org"),
+func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
+ if os.Getenv(runAsGopls) == "true" {
+ tool.Main(context.Background(), cmd.New("gopls", "", nil, hooks.Options), os.Args[1:])
+ os.Exit(0)
+ }
+ event.SetExporter(nil) // don't log to stderr
+ code := m.Run()
+ if err := cleanup(); err != nil {
+ fmt.Fprintf(os.Stderr, "cleaning up after benchmarks: %v\n", err)
+ if code == 0 {
+ code = 1
+ }
}
+ os.Exit(code)
}
-func printBenchmarkResults(result testing.BenchmarkResult) {
- fmt.Printf("BenchmarkStatistics\t%s\t%s\n", result.String(), result.MemString())
+// getTempDir returns the temporary directory to use for benchmark files,
+// creating it if necessary.
+func getTempDir() string {
+ makeTempDirOnce.Do(func() {
+ var err error
+ tempDir, err = ioutil.TempDir("", "gopls-bench")
+ if err != nil {
+ log.Fatal(err)
+ }
+ })
+ return tempDir
}
-var iwlOptions struct {
- workdir string
-}
+// shallowClone performs a shallow clone of repo into dir at the given
+// 'commitish' ref (any commit reference understood by git).
+//
+// The directory dir must not already exist.
+func shallowClone(dir, repo, commitish string) error {
+ if err := os.Mkdir(dir, 0750); err != nil {
+ return fmt.Errorf("creating dir for %s: %v", repo, err)
+ }
-func init() {
- flag.StringVar(&iwlOptions.workdir, "iwl_workdir", "", "if set, run IWL benchmark in this directory")
-}
+ // Set a timeout for git fetch. If this proves flaky, it can be removed.
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
+ defer cancel()
-func TestBenchmarkIWL(t *testing.T) {
- if iwlOptions.workdir == "" {
- t.Skip("-iwl_workdir not configured")
+ // Use a shallow fetch to download just the relevant commit.
+ shInit := fmt.Sprintf("git init && git fetch --depth=1 %q %q && git checkout FETCH_HEAD", repo, commitish)
+ initCmd := exec.CommandContext(ctx, "/bin/sh", "-c", shInit)
+ initCmd.Dir = dir
+ if output, err := initCmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("checking out %s: %v\n%s", repo, err, output)
}
+ return nil
+}
- opts := stressTestOptions(iwlOptions.workdir)
- // Don't skip hooks, so that we can wait for IWL.
- opts = append(opts, SkipHooks(false))
-
- results := testing.Benchmark(func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {})
- }
+// connectEditor connects a fake editor session in the given dir, using the
+// given editor config.
+func connectEditor(dir string, config fake.EditorConfig, ts servertest.Connector) (*fake.Sandbox, *fake.Editor, *Awaiter, error) {
+ s, err := fake.NewSandbox(&fake.SandboxConfig{
+ Workdir: dir,
+ GOPROXY: "https://proxy.golang.org",
})
+ if err != nil {
+ return nil, nil, nil, err
+ }
- printBenchmarkResults(results)
-}
-
-var symbolOptions struct {
- workdir, query, matcher, style string
- printResults bool
-}
+ a := NewAwaiter(s.Workdir)
+ const skipApplyEdits = false
+ editor, err := fake.NewEditor(s, config).Connect(context.Background(), ts, a.Hooks(), skipApplyEdits)
+ if err != nil {
+ return nil, nil, nil, err
+ }
-func init() {
- flag.StringVar(&symbolOptions.workdir, "symbol_workdir", "", "if set, run symbol benchmark in this directory")
- flag.StringVar(&symbolOptions.query, "symbol_query", "test", "symbol query to use in benchmark")
- flag.StringVar(&symbolOptions.matcher, "symbol_matcher", "", "symbol matcher to use in benchmark")
- flag.StringVar(&symbolOptions.style, "symbol_style", "", "symbol style to use in benchmark")
- flag.BoolVar(&symbolOptions.printResults, "symbol_print_results", false, "whether to print symbol query results")
+ return s, editor, a, nil
}
-func TestBenchmarkSymbols(t *testing.T) {
- if symbolOptions.workdir == "" {
- t.Skip("-symbol_workdir not configured")
+// newGoplsServer returns a connector that connects to a new gopls process.
+func newGoplsServer(name string) (servertest.Connector, error) {
+ if *goplsPath != "" && *goplsCommit != "" {
+ panic("can't set both -gopls_path and -gopls_commit")
}
-
- opts := benchmarkOptions(symbolOptions.workdir)
- conf := EditorConfig{}
- if symbolOptions.matcher != "" {
- conf.SymbolMatcher = &symbolOptions.matcher
+ var (
+ goplsPath = *goplsPath
+ env []string
+ )
+ if *goplsCommit != "" {
+ goplsPath = getInstalledGopls()
+ }
+ if goplsPath == "" {
+ var err error
+ goplsPath, err = os.Executable()
+ if err != nil {
+ return nil, err
+ }
+ env = []string{fmt.Sprintf("%s=true", runAsGopls)}
}
- if symbolOptions.style != "" {
- conf.SymbolStyle = &symbolOptions.style
+ var args []string
+ if *cpuProfile != "" {
+ args = append(args, fmt.Sprintf("-profile.cpu=%s", name+"."+*cpuProfile))
}
- opts = append(opts, conf)
+ if *memProfile != "" {
+ args = append(args, fmt.Sprintf("-profile.mem=%s", name+"."+*memProfile))
+ }
+ if *trace != "" {
+ args = append(args, fmt.Sprintf("-profile.trace=%s", name+"."+*trace))
+ }
+ return &SidecarServer{
+ goplsPath: goplsPath,
+ env: env,
+ args: args,
+ }, nil
+}
- WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {
- // We can't Await in this test, since we have disabled hooks. Instead, run
- // one symbol request to completion to ensure all necessary cache entries
- // are populated.
- symbols, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
- Query: symbolOptions.query,
- })
- if err != nil {
- t.Fatal(err)
+// getInstalledGopls builds gopls at the given -gopls_commit, returning the
+// path to the gopls binary.
+func getInstalledGopls() string {
+ if *goplsCommit == "" {
+ panic("must provide -gopls_commit")
+ }
+ toolsDir := filepath.Join(getTempDir(), "gopls_build")
+ goplsPath := filepath.Join(toolsDir, "gopls", "gopls")
+
+ installGoplsOnce.Do(func() {
+ log.Printf("installing gopls: checking out x/tools@%s into %s\n", *goplsCommit, toolsDir)
+ if err := shallowClone(toolsDir, "https://go.googlesource.com/tools", *goplsCommit); err != nil {
+ log.Fatal(err)
}
- if symbolOptions.printResults {
- fmt.Println("Results:")
- for i := 0; i < len(symbols); i++ {
- fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName)
- }
+ log.Println("installing gopls: building...")
+ bld := exec.Command("go", "build", ".")
+ bld.Dir = filepath.Join(toolsDir, "gopls")
+ if output, err := bld.CombinedOutput(); err != nil {
+ log.Fatalf("building gopls: %v\n%s", err, output)
}
- results := testing.Benchmark(func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- if _, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
- Query: symbolOptions.query,
- }); err != nil {
- t.Fatal(err)
- }
- }
- })
- printBenchmarkResults(results)
+ // Confirm that the resulting path now exists.
+ if _, err := os.Stat(goplsPath); err != nil {
+ log.Fatalf("os.Stat(%s): %v", goplsPath, err)
+ }
})
+ return goplsPath
}
-var (
- benchDir = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set regtest_bench_file.")
- benchFile = flag.String("didchange_file", "", "The file to modify")
- benchProfile = flag.String("didchange_cpuprof", "", "file to write cpu profiling data to")
-)
+// A SidecarServer starts (and connects to) a separate gopls process at the
+// given path.
+type SidecarServer struct {
+ goplsPath string
+ env []string // additional environment bindings
+ args []string // command-line arguments
+}
-// TestBenchmarkDidChange benchmarks modifications of a single file by making
-// synthetic modifications in a comment. It controls pacing by waiting for the
-// server to actually start processing the didChange notification before
-// proceeding. Notably it does not wait for diagnostics to complete.
-//
-// Run it by passing -didchange_dir and -didchange_file, where -didchange_dir
-// is the path to a workspace root, and -didchange_file is the
-// workspace-relative path to a file to modify. e.g.:
+// Connect creates new io.Pipes and binds them to the underlying StreamServer.
//
-// go test -run=TestBenchmarkDidChange \
-// -didchange_dir=path/to/kubernetes \
-// -didchange_file=pkg/util/hash/hash.go
-func TestBenchmarkDidChange(t *testing.T) {
- if *benchDir == "" {
- t.Skip("-didchange_dir is not set")
- }
- if *benchFile == "" {
- t.Fatal("-didchange_file must be set if -didchange_dir is set")
- }
-
- opts := benchmarkOptions(*benchDir)
- WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
- env.OpenFile(*benchFile)
- env.Await(env.DoneWithOpen())
- // Insert the text we'll be modifying at the top of the file.
- env.EditBuffer(*benchFile, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"})
- result := testing.Benchmark(func(b *testing.B) {
- if *benchProfile != "" {
- profile, err := os.Create(*benchProfile)
- if err != nil {
- t.Fatal(err)
- }
- defer profile.Close()
- if err := pprof.StartCPUProfile(profile); err != nil {
- t.Fatal(err)
- }
- defer pprof.StopCPUProfile()
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- env.EditBuffer(*benchFile, fake.Edit{
- Start: fake.Pos{Line: 0, Column: 0},
- End: fake.Pos{Line: 1, Column: 0},
- // Increment
- Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1),
- })
- env.Await(StartedChange(uint64(i + 1)))
- }
- b.StopTimer()
- })
- printBenchmarkResults(result)
- })
+// It implements the servertest.Connector interface.
+func (s *SidecarServer) Connect(ctx context.Context) jsonrpc2.Conn {
+ // Note: don't use CommandContext here, as we want gopls to exit gracefully
+ // in order to write out profile data.
+ //
+ // We close the connection on context cancelation below.
+ cmd := exec.Command(s.goplsPath, s.args...)
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ log.Fatal(err)
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ log.Fatal(err)
+ }
+ cmd.Stderr = os.Stderr
+ cmd.Env = append(os.Environ(), s.env...)
+ if err := cmd.Start(); err != nil {
+ log.Fatalf("starting gopls: %v", err)
+ }
+
+ go func() {
+ // If we don't log.Fatal here, benchmarks may hang indefinitely if gopls
+ // exits abnormally.
+ //
+ // TODO(rfindley): ideally we would shut down the connection gracefully,
+ // but that doesn't currently work.
+ if err := cmd.Wait(); err != nil {
+ log.Fatalf("gopls invocation failed with error: %v", err)
+ }
+ }()
+
+ clientStream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", stdout, stdin))
+ clientConn := jsonrpc2.NewConn(clientStream)
+
+ go func() {
+ select {
+ case <-ctx.Done():
+ clientConn.Close()
+ clientStream.Close()
+ case <-clientConn.Done():
+ }
+ }()
+
+ return clientConn
}
diff --git a/gopls/internal/regtest/bench/completion_bench_test.go b/gopls/internal/regtest/bench/completion_bench_test.go
deleted file mode 100644
index a8ef47c20..000000000
--- a/gopls/internal/regtest/bench/completion_bench_test.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bench
-
-import (
- "flag"
- "fmt"
- "strings"
- "testing"
-
- . "golang.org/x/tools/internal/lsp/regtest"
-
- "golang.org/x/tools/internal/lsp/fake"
-)
-
-// dummyCompletionFunction to test manually configured completion using CLI.
-func dummyCompletionFunction() { const s = "placeholder"; fmt.Printf("%s", s) }
-
-type completionBenchOptions struct {
- workdir, file, locationRegexp string
- printResults bool
- // hook to run edits before initial completion, not supported for manually
- // configured completions.
- preCompletionEdits func(*Env)
-}
-
-var completionOptions = completionBenchOptions{}
-
-func init() {
- flag.StringVar(&completionOptions.workdir, "completion_workdir", "", "directory to run completion benchmarks in")
- flag.StringVar(&completionOptions.file, "completion_file", "", "relative path to the file to complete in")
- flag.StringVar(&completionOptions.locationRegexp, "completion_regexp", "", "regexp location to complete at")
- flag.BoolVar(&completionOptions.printResults, "completion_print_results", false, "whether to print completion results")
-}
-
-func benchmarkCompletion(options completionBenchOptions, t *testing.T) {
- if completionOptions.workdir == "" {
- t.Skip("-completion_workdir not configured, skipping benchmark")
- }
-
- opts := stressTestOptions(options.workdir)
-
- // Completion gives bad results if IWL is not yet complete, so we must await
- // it first (and therefore need hooks).
- opts = append(opts, SkipHooks(false))
-
- WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {
- env.OpenFile(options.file)
-
- // Run edits required for this completion.
- if options.preCompletionEdits != nil {
- options.preCompletionEdits(env)
- }
-
- // Run a completion to make sure the system is warm.
- pos := env.RegexpSearch(options.file, options.locationRegexp)
- completions := env.Completion(options.file, pos)
-
- if options.printResults {
- fmt.Println("Results:")
- for i := 0; i < len(completions.Items); i++ {
- fmt.Printf("\t%d. %v\n", i, completions.Items[i])
- }
- }
-
- results := testing.Benchmark(func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- env.Completion(options.file, pos)
- }
- })
-
- printBenchmarkResults(results)
- })
-}
-
-// endPosInBuffer returns the position for last character in the buffer for
-// the given file.
-func endPosInBuffer(env *Env, name string) fake.Pos {
- buffer := env.Editor.BufferText(name)
- lines := strings.Split(buffer, "\n")
- numLines := len(lines)
-
- return fake.Pos{
- Line: numLines - 1,
- Column: len([]rune(lines[numLines-1])),
- }
-}
-
-// Benchmark completion at a specified file and location. When no CLI options
-// are specified, this test is skipped.
-// To Run (from x/tools/gopls) against the dummy function above:
-// go test -v ./internal/regtest/bench -run=TestBenchmarkConfiguredCompletion
-// -completion_workdir="$HOME/Developer/tools"
-// -completion_file="gopls/internal/regtest/completion_bench_test.go"
-// -completion_regexp="dummyCompletionFunction.*fmt\.Printf\(\"%s\", s(\))"
-func TestBenchmarkConfiguredCompletion(t *testing.T) {
- benchmarkCompletion(completionOptions, t)
-}
-
-// To run (from x/tools/gopls):
-// go test -v ./internal/regtest/bench -run TestBenchmark<>Completion
-// -completion_workdir="$HOME/Developer/tools"
-// where <> is one of the tests below. completion_workdir should be path to
-// x/tools on your system.
-
-// Benchmark struct completion in tools codebase.
-func TestBenchmarkStructCompletion(t *testing.T) {
- file := "internal/lsp/cache/session.go"
-
- preCompletionEdits := func(env *Env) {
- env.OpenFile(file)
- originalBuffer := env.Editor.BufferText(file)
- env.EditBuffer(file, fake.Edit{
- End: endPosInBuffer(env, file),
- Text: originalBuffer + "\nvar testVariable map[string]bool = Session{}.\n",
- })
- }
-
- benchmarkCompletion(completionBenchOptions{
- workdir: completionOptions.workdir,
- file: file,
- locationRegexp: `var testVariable map\[string\]bool = Session{}(\.)`,
- preCompletionEdits: preCompletionEdits,
- printResults: completionOptions.printResults,
- }, t)
-}
-
-// Benchmark import completion in tools codebase.
-func TestBenchmarkImportCompletion(t *testing.T) {
- benchmarkCompletion(completionBenchOptions{
- workdir: completionOptions.workdir,
- file: "internal/lsp/source/completion/completion.go",
- locationRegexp: `go\/()`,
- printResults: completionOptions.printResults,
- }, t)
-}
-
-// Benchmark slice completion in tools codebase.
-func TestBenchmarkSliceCompletion(t *testing.T) {
- file := "internal/lsp/cache/session.go"
-
- preCompletionEdits := func(env *Env) {
- env.OpenFile(file)
- originalBuffer := env.Editor.BufferText(file)
- env.EditBuffer(file, fake.Edit{
- End: endPosInBuffer(env, file),
- Text: originalBuffer + "\nvar testVariable []byte = \n",
- })
- }
-
- benchmarkCompletion(completionBenchOptions{
- workdir: completionOptions.workdir,
- file: file,
- locationRegexp: `var testVariable \[\]byte (=)`,
- preCompletionEdits: preCompletionEdits,
- printResults: completionOptions.printResults,
- }, t)
-}
-
-// Benchmark deep completion in function call in tools codebase.
-func TestBenchmarkFuncDeepCompletion(t *testing.T) {
- file := "internal/lsp/source/completion/completion.go"
- fileContent := `
-func (c *completer) _() {
- c.inference.kindMatches(c.)
-}
-`
- preCompletionEdits := func(env *Env) {
- env.OpenFile(file)
- originalBuffer := env.Editor.BufferText(file)
- env.EditBuffer(file, fake.Edit{
- End: endPosInBuffer(env, file),
- Text: originalBuffer + fileContent,
- })
- }
-
- benchmarkCompletion(completionBenchOptions{
- workdir: completionOptions.workdir,
- file: file,
- locationRegexp: `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`,
- preCompletionEdits: preCompletionEdits,
- printResults: completionOptions.printResults,
- }, t)
-}
diff --git a/gopls/internal/regtest/bench/completion_test.go b/gopls/internal/regtest/bench/completion_test.go
new file mode 100644
index 000000000..2ddd8c11e
--- /dev/null
+++ b/gopls/internal/regtest/bench/completion_test.go
@@ -0,0 +1,173 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+// TODO(rfindley): update these completion tests to run on multiple repos.
+
+type completionBenchOptions struct {
+ file, locationRegexp string
+
+ // Hooks to run edits before initial completion
+ setup func(*Env) // run before the benchmark starts
+ beforeCompletion func(*Env) // run before each completion
+}
+
+func benchmarkCompletion(options completionBenchOptions, b *testing.B) {
+ repo := getRepo(b, "tools")
+ env := repo.newEnv(b, "completion.tools", fake.EditorConfig{})
+ defer env.Close()
+
+ // Run edits required for this completion.
+ if options.setup != nil {
+ options.setup(env)
+ }
+
+ // Run a completion to make sure the system is warm.
+ loc := env.RegexpSearch(options.file, options.locationRegexp)
+ completions := env.Completion(loc)
+
+ if testing.Verbose() {
+ fmt.Println("Results:")
+ for i := 0; i < len(completions.Items); i++ {
+ fmt.Printf("\t%d. %v\n", i, completions.Items[i])
+ }
+ }
+
+ b.Run("tools", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ if options.beforeCompletion != nil {
+ options.beforeCompletion(env)
+ }
+ env.Completion(loc)
+ }
+ })
+}
+
+// endRangeInBuffer returns the position for last character in the buffer for
+// the given file.
+func endRangeInBuffer(env *Env, name string) protocol.Range {
+ buffer := env.BufferText(name)
+ m := protocol.NewMapper("", []byte(buffer))
+ rng, err := m.OffsetRange(len(buffer), len(buffer))
+ if err != nil {
+ env.T.Fatal(err)
+ }
+ return rng
+}
+
+// Benchmark struct completion in tools codebase.
+func BenchmarkStructCompletion(b *testing.B) {
+ file := "internal/lsp/cache/session.go"
+
+ setup := func(env *Env) {
+ env.OpenFile(file)
+ env.EditBuffer(file, protocol.TextEdit{
+ Range: endRangeInBuffer(env, file),
+ NewText: "\nvar testVariable map[string]bool = Session{}.\n",
+ })
+ }
+
+ benchmarkCompletion(completionBenchOptions{
+ file: file,
+ locationRegexp: `var testVariable map\[string\]bool = Session{}(\.)`,
+ setup: setup,
+ }, b)
+}
+
+// Benchmark import completion in tools codebase.
+func BenchmarkImportCompletion(b *testing.B) {
+ const file = "internal/lsp/source/completion/completion.go"
+ benchmarkCompletion(completionBenchOptions{
+ file: file,
+ locationRegexp: `go\/()`,
+ setup: func(env *Env) { env.OpenFile(file) },
+ }, b)
+}
+
+// Benchmark slice completion in tools codebase.
+func BenchmarkSliceCompletion(b *testing.B) {
+ file := "internal/lsp/cache/session.go"
+
+ setup := func(env *Env) {
+ env.OpenFile(file)
+ env.EditBuffer(file, protocol.TextEdit{
+ Range: endRangeInBuffer(env, file),
+ NewText: "\nvar testVariable []byte = \n",
+ })
+ }
+
+ benchmarkCompletion(completionBenchOptions{
+ file: file,
+ locationRegexp: `var testVariable \[\]byte (=)`,
+ setup: setup,
+ }, b)
+}
+
+// Benchmark deep completion in function call in tools codebase.
+func BenchmarkFuncDeepCompletion(b *testing.B) {
+ file := "internal/lsp/source/completion/completion.go"
+ fileContent := `
+func (c *completer) _() {
+ c.inference.kindMatches(c.)
+}
+`
+ setup := func(env *Env) {
+ env.OpenFile(file)
+ originalBuffer := env.BufferText(file)
+ env.EditBuffer(file, protocol.TextEdit{
+ Range: endRangeInBuffer(env, file),
+ NewText: originalBuffer + fileContent,
+ })
+ }
+
+ benchmarkCompletion(completionBenchOptions{
+ file: file,
+ locationRegexp: `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`,
+ setup: setup,
+ }, b)
+}
+
+// Benchmark completion following an arbitrary edit.
+//
+// Edits force type-checked packages to be invalidated, so we want to measure
+// how long it takes before completion results are available.
+func BenchmarkCompletionFollowingEdit(b *testing.B) {
+ file := "internal/lsp/source/completion/completion2.go"
+ fileContent := `
+package completion
+
+func (c *completer) _() {
+ c.inference.kindMatches(c.)
+ // __MAGIC_STRING_1
+}
+`
+ setup := func(env *Env) {
+ env.CreateBuffer(file, fileContent)
+ }
+
+ n := 1
+ beforeCompletion := func(env *Env) {
+ old := fmt.Sprintf("__MAGIC_STRING_%d", n)
+ new := fmt.Sprintf("__MAGIC_STRING_%d", n+1)
+ n++
+ env.RegexpReplace(file, old, new)
+ }
+
+ benchmarkCompletion(completionBenchOptions{
+ file: file,
+ locationRegexp: `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`,
+ setup: setup,
+ beforeCompletion: beforeCompletion,
+ }, b)
+}
diff --git a/gopls/internal/regtest/bench/definition_test.go b/gopls/internal/regtest/bench/definition_test.go
new file mode 100644
index 000000000..a3e68f532
--- /dev/null
+++ b/gopls/internal/regtest/bench/definition_test.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+ "testing"
+)
+
+func BenchmarkDefinition(b *testing.B) {
+ tests := []struct {
+ repo string
+ file string
+ regexp string
+ }{
+ {"istio", "pkg/config/model.go", `gogotypes\.(MarshalAny)`},
+ {"kubernetes", "pkg/controller/lookup_cache.go", `hashutil\.(DeepHashObject)`},
+ {"kuma", "api/generic/insights.go", `proto\.(Message)`},
+ {"pkgsite", "internal/log/log.go", `derrors\.(Wrap)`},
+ {"starlark", "starlark/eval.go", "prog.compiled.(Encode)"},
+ {"tools", "internal/lsp/cache/check.go", `(snapshot)\) buildKey`},
+ }
+
+ for _, test := range tests {
+ b.Run(test.repo, func(b *testing.B) {
+ env := getRepo(b, test.repo).sharedEnv(b)
+ env.OpenFile(test.file)
+ loc := env.RegexpSearch(test.file, test.regexp)
+ env.Await(env.DoneWithOpen())
+ env.GoToDefinition(loc) // pre-warm the query, and open the target file
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ env.GoToDefinition(loc) // pre-warm the query
+ }
+ })
+ }
+}
diff --git a/gopls/internal/regtest/bench/didchange_test.go b/gopls/internal/regtest/bench/didchange_test.go
new file mode 100644
index 000000000..3a7cb10b6
--- /dev/null
+++ b/gopls/internal/regtest/bench/didchange_test.go
@@ -0,0 +1,99 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+ "fmt"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+// Use a global edit counter as bench function may execute multiple times, and
+// we want to avoid cache hits. Use time.Now to also avoid cache hits from the
+// shared file cache.
+var editID int64 = time.Now().UnixNano()
+
+var didChangeTests = []struct {
+ repo string
+ file string
+}{
+ {"istio", "pkg/fuzz/util.go"},
+ {"kubernetes", "pkg/controller/lookup_cache.go"},
+ {"kuma", "api/generic/insights.go"},
+ {"pkgsite", "internal/frontend/server.go"},
+ {"starlark", "starlark/eval.go"},
+ {"tools", "internal/lsp/cache/snapshot.go"},
+}
+
+// BenchmarkDidChange benchmarks modifications of a single file by making
+// synthetic modifications in a comment. It controls pacing by waiting for the
+// server to actually start processing the didChange notification before
+// proceeding. Notably it does not wait for diagnostics to complete.
+func BenchmarkDidChange(b *testing.B) {
+ for _, test := range didChangeTests {
+ b.Run(test.repo, func(b *testing.B) {
+ env := getRepo(b, test.repo).sharedEnv(b)
+ env.OpenFile(test.file)
+ // Insert the text we'll be modifying at the top of the file.
+ env.EditBuffer(test.file, protocol.TextEdit{NewText: "// __REGTEST_PLACEHOLDER_0__\n"})
+ env.AfterChange()
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ edits := atomic.AddInt64(&editID, 1)
+ env.EditBuffer(test.file, protocol.TextEdit{
+ Range: protocol.Range{
+ Start: protocol.Position{Line: 0, Character: 0},
+ End: protocol.Position{Line: 1, Character: 0},
+ },
+ // Increment the placeholder text, to ensure cache misses.
+ NewText: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", edits),
+ })
+ env.Await(env.StartedChange())
+ }
+ })
+ }
+}
+
+func BenchmarkDiagnoseChange(b *testing.B) {
+ for _, test := range didChangeTests {
+ b.Run(test.repo, func(b *testing.B) {
+ // Use a new env to avoid the diagnostic delay: we want to measure how
+ // long it takes to produce the diagnostics.
+ env := repos[test.repo].newEnv(b, "diagnoseChange", fake.EditorConfig{
+ Settings: map[string]interface{}{
+ "diagnosticsDelay": "0s",
+ },
+ })
+ env.OpenFile(test.file)
+ // Insert the text we'll be modifying at the top of the file.
+ env.EditBuffer(test.file, protocol.TextEdit{NewText: "// __REGTEST_PLACEHOLDER_0__\n"})
+ env.AfterChange()
+ b.ResetTimer()
+
+ // We must use an extra subtest layer here, so that we only set up the
+ // shared env once (otherwise we pay additional overhead and the profiling
+ // flags don't work).
+ b.Run("diagnose", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ edits := atomic.AddInt64(&editID, 1)
+ env.EditBuffer(test.file, protocol.TextEdit{
+ Range: protocol.Range{
+ Start: protocol.Position{Line: 0, Character: 0},
+ End: protocol.Position{Line: 1, Character: 0},
+ },
+ // Increment the placeholder text, to ensure cache misses.
+ NewText: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", edits),
+ })
+ env.AfterChange()
+ }
+ })
+ })
+ }
+}
diff --git a/gopls/internal/regtest/bench/doc.go b/gopls/internal/regtest/bench/doc.go
new file mode 100644
index 000000000..a9f2fbffa
--- /dev/null
+++ b/gopls/internal/regtest/bench/doc.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The bench package implements benchmarks for various LSP operations.
+//
+// Benchmarks check out specific commits of popular and/or exemplary
+// repositories, and script an external gopls process via a fake text editor.
+// By default, benchmarks run the test executable as gopls (using a special
+// "gopls mode" environment variable). A different gopls binary may be used by
+// setting the -gopls_path or -gopls_commit flags.
+//
+// This package is a work in progress.
+//
+// # Profiling
+//
+// As benchmark functions run gopls in a separate process, the normal test
+// flags for profiling are not useful. Instead the -gopls_cpuprofile,
+// -gopls_memprofile, and -gopls_trace flags may be used to pass through
+// profiling flags to the gopls process. Each of these flags sets a suffix
+// for the respective gopls profiling flag, which is prefixed with a name
+// corresponding to the shared repository or (in some cases) benchmark name.
+// For example, settings -gopls_cpuprofile=cpu.out will result in profiles
+// named tools.cpu.out, BenchmarkInitialWorkspaceLoad.cpu.out, etc. Here,
+// tools.cpu.out is the cpu profile for the shared x/tools session, which may
+// be used by multiple benchmark functions, and BenchmarkInitialWorkspaceLoad
+// is the cpu profile for the last iteration of the initial workspace load
+// test, which starts a new editor session for each iteration.
+//
+// # TODO
+// - add more benchmarks, and more repositories
+// - improve this documentation
+package bench
diff --git a/gopls/internal/regtest/bench/hover_test.go b/gopls/internal/regtest/bench/hover_test.go
new file mode 100644
index 000000000..e89e03b33
--- /dev/null
+++ b/gopls/internal/regtest/bench/hover_test.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+ "testing"
+)
+
+func BenchmarkHover(b *testing.B) {
+ tests := []struct {
+ repo string
+ file string
+ regexp string
+ }{
+ {"istio", "pkg/config/model.go", `gogotypes\.(MarshalAny)`},
+ {"kubernetes", "pkg/apis/core/types.go", "type (Pod)"},
+ {"kuma", "api/generic/insights.go", `proto\.(Message)`},
+ {"pkgsite", "internal/log/log.go", `derrors\.(Wrap)`},
+ {"starlark", "starlark/eval.go", "prog.compiled.(Encode)"},
+ {"tools", "internal/lsp/cache/check.go", `(snapshot)\) buildKey`},
+ }
+
+ for _, test := range tests {
+ b.Run(test.repo, func(b *testing.B) {
+ env := getRepo(b, test.repo).sharedEnv(b)
+ env.OpenFile(test.file)
+ loc := env.RegexpSearch(test.file, test.regexp)
+ env.Await(env.DoneWithOpen())
+ env.Hover(loc) // pre-warm the query
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ env.Hover(loc) // pre-warm the query
+ }
+ })
+ }
+}
diff --git a/gopls/internal/regtest/bench/implementations_test.go b/gopls/internal/regtest/bench/implementations_test.go
new file mode 100644
index 000000000..219f42a37
--- /dev/null
+++ b/gopls/internal/regtest/bench/implementations_test.go
@@ -0,0 +1,37 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import "testing"
+
+func BenchmarkImplementations(b *testing.B) {
+ tests := []struct {
+ repo string
+ file string
+ regexp string
+ }{
+ {"istio", "pkg/config/mesh/watcher.go", `type (Watcher)`},
+ {"kubernetes", "pkg/controller/lookup_cache.go", `objectWithMeta`},
+ {"kuma", "api/generic/insights.go", `type (Insight)`},
+ {"pkgsite", "internal/datasource.go", `type (DataSource)`},
+ {"starlark", "syntax/syntax.go", `type (Expr)`},
+ {"tools", "internal/lsp/source/view.go", `type (Snapshot)`},
+ }
+
+ for _, test := range tests {
+ b.Run(test.repo, func(b *testing.B) {
+ env := getRepo(b, test.repo).sharedEnv(b)
+ env.OpenFile(test.file)
+ loc := env.RegexpSearch(test.file, test.regexp)
+ env.Await(env.DoneWithOpen())
+ env.Implementations(loc) // pre-warm the query
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ env.Implementations(loc)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/regtest/bench/iwl_test.go b/gopls/internal/regtest/bench/iwl_test.go
new file mode 100644
index 000000000..352078e7f
--- /dev/null
+++ b/gopls/internal/regtest/bench/iwl_test.go
@@ -0,0 +1,77 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+// BenchmarkInitialWorkspaceLoad benchmarks the initial workspace load time for
+// a new editing session.
+func BenchmarkInitialWorkspaceLoad(b *testing.B) {
+ if testing.Short() {
+ // TODO(rfindley): remove this skip once the released gopls version
+ // supports the memstats command.
+ b.Skip("temporarily skipping as baseline gopls versions do not support the memstats command")
+ }
+ tests := []struct {
+ repo string
+ file string
+ }{
+ {"tools", "internal/lsp/cache/snapshot.go"},
+ {"kubernetes", "pkg/controller/lookup_cache.go"},
+ {"pkgsite", "internal/frontend/server.go"},
+ {"starlark", "starlark/eval.go"},
+ {"istio", "pkg/fuzz/util.go"},
+ {"kuma", "api/generic/insights.go"},
+ }
+
+ for _, test := range tests {
+ b.Run(test.repo, func(b *testing.B) {
+ repo := getRepo(b, test.repo)
+ // get the (initialized) shared env to ensure the cache is warm.
+ // Reuse its GOPATH so that we get cache hits for things in the module
+ // cache.
+ sharedEnv := repo.sharedEnv(b)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ doIWL(b, sharedEnv.Sandbox.GOPATH(), repo, test.file)
+ }
+ })
+ }
+}
+
+func doIWL(b *testing.B, gopath string, repo *repo, file string) {
+ // Exclude the time to set up the env from the benchmark time, as this may
+ // involve installing gopls and/or checking out the repo dir.
+ b.StopTimer()
+ config := fake.EditorConfig{Env: map[string]string{"GOPATH": gopath}}
+ env := repo.newEnv(b, "iwl."+repo.name, config)
+ defer env.Close()
+ b.StartTimer()
+
+ // Open an arbitrary file to ensure that gopls starts working.
+ //
+ // In the future, this may matter if gopls doesn't eagerly construct
+ // the workspace.
+ env.OpenFile(file)
+
+ env.Await(InitialWorkspaceLoad)
+ b.StopTimer()
+ params := &protocol.ExecuteCommandParams{
+ Command: command.MemStats.ID(),
+ }
+ var memstats command.MemStatsResult
+ env.ExecuteCommand(params, &memstats)
+ b.ReportMetric(float64(memstats.HeapAlloc), "alloc_bytes")
+ b.ReportMetric(float64(memstats.HeapInUse), "in_use_bytes")
+ b.StartTimer()
+}
diff --git a/gopls/internal/regtest/bench/references_test.go b/gopls/internal/regtest/bench/references_test.go
new file mode 100644
index 000000000..d47ea56a4
--- /dev/null
+++ b/gopls/internal/regtest/bench/references_test.go
@@ -0,0 +1,37 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import "testing"
+
+func BenchmarkReferences(b *testing.B) {
+ tests := []struct {
+ repo string
+ file string
+ regexp string
+ }{
+ {"istio", "pkg/config/model.go", "type (Meta)"},
+ {"kubernetes", "pkg/controller/lookup_cache.go", "type (objectWithMeta)"},
+ {"kuma", "pkg/events/interfaces.go", "type (Event)"},
+ {"pkgsite", "internal/log/log.go", "func (Infof)"},
+ {"starlark", "syntax/syntax.go", "type (Ident)"},
+ {"tools", "internal/lsp/source/view.go", "type (Snapshot)"},
+ }
+
+ for _, test := range tests {
+ b.Run(test.repo, func(b *testing.B) {
+ env := getRepo(b, test.repo).sharedEnv(b)
+ env.OpenFile(test.file)
+ loc := env.RegexpSearch(test.file, test.regexp)
+ env.Await(env.DoneWithOpen())
+ env.References(loc) // pre-warm the query
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ env.References(loc)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/regtest/bench/rename_test.go b/gopls/internal/regtest/bench/rename_test.go
new file mode 100644
index 000000000..bd1ce9491
--- /dev/null
+++ b/gopls/internal/regtest/bench/rename_test.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+ "fmt"
+ "testing"
+)
+
+func BenchmarkRename(b *testing.B) {
+ tests := []struct {
+ repo string
+ file string
+ regexp string
+ baseName string
+ }{
+ {"kubernetes", "pkg/controller/lookup_cache.go", `hashutil\.(DeepHashObject)`, "DeepHashObject"},
+ {"kuma", "pkg/events/interfaces.go", `Delete`, "Delete"},
+ {"istio", "pkg/config/model.go", `(Namespace) string`, "Namespace"},
+ {"pkgsite", "internal/log/log.go", `func (Infof)`, "Infof"},
+ {"starlark", "starlark/eval.go", `Program\) (Filename)`, "Filename"},
+ {"tools", "internal/lsp/cache/snapshot.go", `meta \*(metadataGraph)`, "metadataGraph"},
+ }
+
+ for _, test := range tests {
+ names := 0 // bench function may execute multiple times
+ b.Run(test.repo, func(b *testing.B) {
+ env := getRepo(b, test.repo).sharedEnv(b)
+ env.OpenFile(test.file)
+ loc := env.RegexpSearch(test.file, test.regexp)
+ env.Await(env.DoneWithOpen())
+ env.Rename(loc, test.baseName+"X") // pre-warm the query
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ names++
+ newName := fmt.Sprintf("%s%d", test.baseName, names)
+ env.Rename(loc, newName)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/regtest/bench/repo_test.go b/gopls/internal/regtest/bench/repo_test.go
new file mode 100644
index 000000000..5ca24ec90
--- /dev/null
+++ b/gopls/internal/regtest/bench/repo_test.go
@@ -0,0 +1,231 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+// repos holds shared repositories for use in benchmarks.
+//
+// These repos were selected to represent a variety of different types of
+// codebases.
+var repos = map[string]*repo{
+ // Used by x/benchmarks; large.
+ "istio": {
+ name: "istio",
+ url: "https://github.com/istio/istio",
+ commit: "1.17.0",
+ },
+
+ // Kubernetes is a large repo with many dependencies, and in the past has
+ // been about as large a repo as gopls could handle.
+ "kubernetes": {
+ name: "kubernetes",
+ url: "https://github.com/kubernetes/kubernetes",
+ commit: "v1.24.0",
+ },
+
+ // A large, industrial application.
+ "kuma": {
+ name: "kuma",
+ url: "https://github.com/kumahq/kuma",
+ commit: "2.1.1",
+ },
+
+ // x/pkgsite is familiar and represents a common use case (a webserver). It
+ // also has a number of static non-go files and template files.
+ "pkgsite": {
+ name: "pkgsite",
+ url: "https://go.googlesource.com/pkgsite",
+ commit: "81f6f8d4175ad0bf6feaa03543cc433f8b04b19b",
+ short: true,
+ },
+
+ // A tiny self-contained project.
+ "starlark": {
+ name: "starlark",
+ url: "https://github.com/google/starlark-go",
+ commit: "3f75dec8e4039385901a30981e3703470d77e027",
+ short: true,
+ },
+
+ // The current repository, which is medium-small and has very few dependencies.
+ "tools": {
+ name: "tools",
+ url: "https://go.googlesource.com/tools",
+ commit: "gopls/v0.9.0",
+ short: true,
+ },
+}
+
+// getRepo gets the requested repo, and skips the test if -short is set and
+// repo is not configured as a short repo.
+func getRepo(tb testing.TB, name string) *repo {
+ tb.Helper()
+ repo := repos[name]
+ if repo == nil {
+ tb.Fatalf("repo %s does not exist", name)
+ }
+ if !repo.short && testing.Short() {
+ tb.Skipf("large repo %s does not run whith -short", repo.name)
+ }
+ return repo
+}
+
+// A repo represents a working directory for a repository checked out at a
+// specific commit.
+//
+// Repos are used for sharing state across benchmarks that operate on the same
+// codebase.
+type repo struct {
+ // static configuration
+ name string // must be unique, used for subdirectory
+ url string // repo url
+ commit string // full commit hash or tag
+ short bool // whether this repo runs with -short
+
+ dirOnce sync.Once
+ dir string // directory contaning source code checked out to url@commit
+
+ // shared editor state
+ editorOnce sync.Once
+ editor *fake.Editor
+ sandbox *fake.Sandbox
+ awaiter *Awaiter
+}
+
+// getDir returns directory containing repo source code, creating it if
+// necessary. It is safe for concurrent use.
+func (r *repo) getDir() string {
+ r.dirOnce.Do(func() {
+ r.dir = filepath.Join(getTempDir(), r.name)
+ log.Printf("cloning %s@%s into %s", r.url, r.commit, r.dir)
+ if err := shallowClone(r.dir, r.url, r.commit); err != nil {
+ log.Fatal(err)
+ }
+ })
+ return r.dir
+}
+
+// sharedEnv returns a shared benchmark environment. It is safe for concurrent
+// use.
+//
+// Every call to sharedEnv uses the same editor and sandbox, as a means to
+// avoid reinitializing the editor for large repos. Calling repo.Close cleans
+// up the shared environment.
+//
+// Repos in the package-local Repos var are closed at the end of the test main
+// function.
+func (r *repo) sharedEnv(tb testing.TB) *Env {
+ r.editorOnce.Do(func() {
+ dir := r.getDir()
+
+ start := time.Now()
+ log.Printf("starting initial workspace load for %s", r.name)
+ ts, err := newGoplsServer(r.name)
+ if err != nil {
+ log.Fatal(err)
+ }
+ r.sandbox, r.editor, r.awaiter, err = connectEditor(dir, fake.EditorConfig{}, ts)
+ if err != nil {
+ log.Fatalf("connecting editor: %v", err)
+ }
+
+ if err := r.awaiter.Await(context.Background(), InitialWorkspaceLoad); err != nil {
+ log.Fatal(err)
+ }
+ log.Printf("initial workspace load (cold) for %s took %v", r.name, time.Since(start))
+ })
+
+ return &Env{
+ T: tb,
+ Ctx: context.Background(),
+ Editor: r.editor,
+ Sandbox: r.sandbox,
+ Awaiter: r.awaiter,
+ }
+}
+
+// newEnv returns a new Env connected to a new gopls process communicating
+// over stdin/stdout. It is safe for concurrent use.
+//
+// It is the caller's responsibility to call Close on the resulting Env when it
+// is no longer needed.
+func (r *repo) newEnv(tb testing.TB, name string, config fake.EditorConfig) *Env {
+ dir := r.getDir()
+
+ ts, err := newGoplsServer(name)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ sandbox, editor, awaiter, err := connectEditor(dir, config, ts)
+ if err != nil {
+ log.Fatalf("connecting editor: %v", err)
+ }
+
+ return &Env{
+ T: tb,
+ Ctx: context.Background(),
+ Editor: editor,
+ Sandbox: sandbox,
+ Awaiter: awaiter,
+ }
+}
+
+// Close cleans up shared state referenced by the repo.
+func (r *repo) Close() error {
+ var errBuf bytes.Buffer
+ if r.editor != nil {
+ if err := r.editor.Close(context.Background()); err != nil {
+ fmt.Fprintf(&errBuf, "closing editor: %v", err)
+ }
+ }
+ if r.sandbox != nil {
+ if err := r.sandbox.Close(); err != nil {
+ fmt.Fprintf(&errBuf, "closing sandbox: %v", err)
+ }
+ }
+ if r.dir != "" {
+ if err := os.RemoveAll(r.dir); err != nil {
+ fmt.Fprintf(&errBuf, "cleaning dir: %v", err)
+ }
+ }
+ if errBuf.Len() > 0 {
+ return errors.New(errBuf.String())
+ }
+ return nil
+}
+
+// cleanup cleans up state that is shared across benchmark functions.
+func cleanup() error {
+ var errBuf bytes.Buffer
+ for _, repo := range repos {
+ if err := repo.Close(); err != nil {
+ fmt.Fprintf(&errBuf, "closing %q: %v", repo.name, err)
+ }
+ }
+ if tempDir != "" {
+ if err := os.RemoveAll(tempDir); err != nil {
+ fmt.Fprintf(&errBuf, "cleaning tempDir: %v", err)
+ }
+ }
+ if errBuf.Len() > 0 {
+ return errors.New(errBuf.String())
+ }
+ return nil
+}
diff --git a/gopls/internal/regtest/bench/stress_test.go b/gopls/internal/regtest/bench/stress_test.go
index f7e59faf9..15a2c9081 100644
--- a/gopls/internal/regtest/bench/stress_test.go
+++ b/gopls/internal/regtest/bench/stress_test.go
@@ -11,56 +11,84 @@ import (
"testing"
"time"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/hooks"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
)
-// Pilosa is a repository that has historically caused significant memory
-// problems for Gopls. We use it for a simple stress test that types
-// arbitrarily in a file with lots of dependents.
+// github.com/pilosa/pilosa is a repository that has historically caused
+// significant memory problems for Gopls. We use it for a simple stress test
+// that types arbitrarily in a file with lots of dependents.
var pilosaPath = flag.String("pilosa_path", "", "Path to a directory containing "+
"github.com/pilosa/pilosa, for stress testing. Do not set this unless you "+
"know what you're doing!")
-func stressTestOptions(dir string) []RunOption {
- opts := benchmarkOptions(dir)
- opts = append(opts, SkipHooks(true), DebugAddress(":8087"))
- return opts
-}
-
func TestPilosaStress(t *testing.T) {
+ // TODO(rfindley): revisit this test and make it is hermetic: it should check
+ // out pilosa into a directory.
+ //
+ // Note: This stress test has not been run recently, and may no longer
+ // function properly.
if *pilosaPath == "" {
t.Skip("-pilosa_path not configured")
}
- opts := stressTestOptions(*pilosaPath)
- WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
- files := []string{
- "cmd.go",
- "internal/private.pb.go",
- "roaring/roaring.go",
- "roaring/roaring_internal_test.go",
- "server/handler_test.go",
- }
- for _, file := range files {
- env.OpenFile(file)
+ sandbox, err := fake.NewSandbox(&fake.SandboxConfig{
+ Workdir: *pilosaPath,
+ GOPROXY: "https://proxy.golang.org",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ server := lsprpc.NewStreamServer(cache.New(nil), false, hooks.Options)
+ ts := servertest.NewPipeServer(server, jsonrpc2.NewRawStream)
+ ctx := context.Background()
+
+ const skipApplyEdits = false
+ editor, err := fake.NewEditor(sandbox, fake.EditorConfig{}).Connect(ctx, ts, fake.ClientHooks{}, skipApplyEdits)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ files := []string{
+ "cmd.go",
+ "internal/private.pb.go",
+ "roaring/roaring.go",
+ "roaring/roaring_internal_test.go",
+ "server/handler_test.go",
+ }
+ for _, file := range files {
+ if err := editor.OpenFile(ctx, file); err != nil {
+ t.Fatal(err)
}
- ctx, cancel := context.WithTimeout(env.Ctx, 10*time.Minute)
- defer cancel()
+ }
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
+ defer cancel()
- i := 1
- // MagicNumber is an identifier that occurs in roaring.go. Just change it
- // arbitrarily.
- env.RegexpReplace("roaring/roaring.go", "MagicNumber", fmt.Sprintf("MagicNumber%d", 1))
- for {
- select {
- case <-ctx.Done():
- return
- default:
- }
- env.RegexpReplace("roaring/roaring.go", fmt.Sprintf("MagicNumber%d", i), fmt.Sprintf("MagicNumber%d", i+1))
- time.Sleep(20 * time.Millisecond)
- i++
+ i := 1
+ // MagicNumber is an identifier that occurs in roaring.go. Just change it
+ // arbitrarily.
+ if err := editor.RegexpReplace(ctx, "roaring/roaring.go", "MagicNumber", fmt.Sprintf("MagicNumber%d", 1)); err != nil {
+ t.Fatal(err)
+ }
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
}
- })
+ if err := editor.RegexpReplace(ctx, "roaring/roaring.go", fmt.Sprintf("MagicNumber%d", i), fmt.Sprintf("MagicNumber%d", i+1)); err != nil {
+ t.Fatal(err)
+ }
+ // Simulate (very fast) typing.
+ //
+ // Typing 80 wpm ~150ms per keystroke.
+ time.Sleep(150 * time.Millisecond)
+ i++
+ }
}
diff --git a/gopls/internal/regtest/bench/workspace_symbols_test.go b/gopls/internal/regtest/bench/workspace_symbols_test.go
new file mode 100644
index 000000000..975422ac6
--- /dev/null
+++ b/gopls/internal/regtest/bench/workspace_symbols_test.go
@@ -0,0 +1,37 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+ "flag"
+ "fmt"
+ "testing"
+)
+
+var symbolQuery = flag.String("symbol_query", "test", "symbol query to use in benchmark")
+
+// BenchmarkWorkspaceSymbols benchmarks the time to execute a workspace symbols
+// request (controlled by the -symbol_query flag).
+func BenchmarkWorkspaceSymbols(b *testing.B) {
+ for name := range repos {
+ b.Run(name, func(b *testing.B) {
+ env := getRepo(b, name).sharedEnv(b)
+ symbols := env.Symbol(*symbolQuery) // warm the cache
+
+ if testing.Verbose() {
+ fmt.Println("Results:")
+ for i, symbol := range symbols {
+ fmt.Printf("\t%d. %s (%s)\n", i, symbol.Name, symbol.ContainerName)
+ }
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ env.Symbol(*symbolQuery)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/regtest/codelens/codelens_test.go b/gopls/internal/regtest/codelens/codelens_test.go
index 3e1527114..79b5df0f5 100644
--- a/gopls/internal/regtest/codelens/codelens_test.go
+++ b/gopls/internal/regtest/codelens/codelens_test.go
@@ -6,21 +6,20 @@ package codelens
import (
"fmt"
- "runtime"
- "strings"
"testing"
"golang.org/x/tools/gopls/internal/hooks"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/internal/bug"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
"golang.org/x/tools/internal/testenv"
)
func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
Main(m, hooks.Options)
}
@@ -41,7 +40,7 @@ const (
Two
)
-//go:generate stringer -type=Number
+//` + `go:generate stringer -type=Number
`
tests := []struct {
label string
@@ -61,9 +60,7 @@ const (
for _, test := range tests {
t.Run(test.label, func(t *testing.T) {
WithOptions(
- EditorConfig{
- CodeLenses: test.enabled,
- },
+ Settings{"codelenses": test.enabled},
).Run(t, workspace, func(t *testing.T, env *Env) {
env.OpenFile("lib.go")
lens := env.CodeLens("lib.go")
@@ -78,8 +75,11 @@ const (
// This test confirms the full functionality of the code lenses for updating
// dependencies in a go.mod file. It checks for the code lens that suggests
// an update and then executes the command associated with that code lens. A
-// regression test for golang/go#39446.
+// regression test for golang/go#39446. It also checks that these code lenses
+// only affect the diagnostics and contents of the containing go.mod file.
func TestUpgradeCodelens(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
+
const proxyWithLatest = `
-- golang.org/x/hello@v1.3.3/go.mod --
module golang.org/x/hello
@@ -100,16 +100,23 @@ var Goodbye error
`
const shouldUpdateDep = `
--- go.mod --
-module mod.com
+-- go.work --
+go 1.18
+
+use (
+ ./a
+ ./b
+)
+-- a/go.mod --
+module mod.com/a
go 1.14
require golang.org/x/hello v1.2.3
--- go.sum --
+-- a/go.sum --
golang.org/x/hello v1.2.3 h1:7Wesfkx/uBd+eFgPrq0irYj/1XfmbvLV8jZ/W7C2Dwg=
golang.org/x/hello v1.2.3/go.mod h1:OgtlzsxVMUUdsdQCIDYgaauCTH47B8T8vofouNJfzgY=
--- main.go --
+-- a/main.go --
package main
import "golang.org/x/hello/hi"
@@ -117,14 +124,41 @@ import "golang.org/x/hello/hi"
func main() {
_ = hi.Goodbye
}
+-- b/go.mod --
+module mod.com/b
+
+go 1.14
+
+require golang.org/x/hello v1.2.3
+-- b/go.sum --
+golang.org/x/hello v1.2.3 h1:7Wesfkx/uBd+eFgPrq0irYj/1XfmbvLV8jZ/W7C2Dwg=
+golang.org/x/hello v1.2.3/go.mod h1:OgtlzsxVMUUdsdQCIDYgaauCTH47B8T8vofouNJfzgY=
+-- b/main.go --
+package main
+
+import (
+ "golang.org/x/hello/hi"
+)
+
+func main() {
+ _ = hi.Goodbye
+}
`
- const wantGoMod = `module mod.com
+ const wantGoModA = `module mod.com/a
go 1.14
require golang.org/x/hello v1.3.3
`
+ // Applying the diagnostics or running the codelenses for a/go.mod
+ // should not change the contents of b/go.mod
+ const wantGoModB = `module mod.com/b
+
+go 1.14
+
+require golang.org/x/hello v1.2.3
+`
for _, commandTitle := range []string{
"Upgrade transitive dependencies",
@@ -134,10 +168,11 @@ require golang.org/x/hello v1.3.3
WithOptions(
ProxyFiles(proxyWithLatest),
).Run(t, shouldUpdateDep, func(t *testing.T, env *Env) {
- env.OpenFile("go.mod")
+ env.OpenFile("a/go.mod")
+ env.OpenFile("b/go.mod")
var lens protocol.CodeLens
var found bool
- for _, l := range env.CodeLens("go.mod") {
+ for _, l := range env.CodeLens("a/go.mod") {
if l.Command.Title == commandTitle {
lens = l
found = true
@@ -152,9 +187,12 @@ require golang.org/x/hello v1.3.3
}); err != nil {
t.Fatal(err)
}
- env.Await(env.DoneWithChangeWatchedFiles())
- if got := env.Editor.BufferText("go.mod"); got != wantGoMod {
- t.Fatalf("go.mod upgrade failed:\n%s", tests.Diff(t, wantGoMod, got))
+ env.AfterChange()
+ if got := env.BufferText("a/go.mod"); got != wantGoModA {
+ t.Fatalf("a/go.mod upgrade failed:\n%s", compare.Text(wantGoModA, got))
+ }
+ if got := env.BufferText("b/go.mod"); got != wantGoModB {
+ t.Fatalf("b/go.mod changed unexpectedly:\n%s", compare.Text(wantGoModB, got))
}
})
})
@@ -163,22 +201,36 @@ require golang.org/x/hello v1.3.3
t.Run(fmt.Sprintf("Upgrade individual dependency vendoring=%v", vendoring), func(t *testing.T) {
WithOptions(ProxyFiles(proxyWithLatest)).Run(t, shouldUpdateDep, func(t *testing.T, env *Env) {
if vendoring {
- env.RunGoCommand("mod", "vendor")
+ env.RunGoCommandInDir("a", "mod", "vendor")
}
- env.Await(env.DoneWithChangeWatchedFiles())
- env.OpenFile("go.mod")
- env.ExecuteCodeLensCommand("go.mod", command.CheckUpgrades)
+ env.AfterChange()
+ env.OpenFile("a/go.mod")
+ env.OpenFile("b/go.mod")
+ env.ExecuteCodeLensCommand("a/go.mod", command.CheckUpgrades, nil)
d := &protocol.PublishDiagnosticsParams{}
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("go.mod", `require`, "can be upgraded"),
- ReadDiagnostics("go.mod", d),
- ),
+ env.OnceMet(
+ Diagnostics(env.AtRegexp("a/go.mod", `require`), WithMessage("can be upgraded")),
+ ReadDiagnostics("a/go.mod", d),
+ // We do not want there to be a diagnostic for b/go.mod,
+ // but there may be some subtlety in timing here, where this
+ // should always succeed, but may not actually test the correct
+ // behavior.
+ NoDiagnostics(env.AtRegexp("b/go.mod", `require`)),
)
- env.ApplyQuickFixes("go.mod", d.Diagnostics)
- env.Await(env.DoneWithChangeWatchedFiles())
- if got := env.Editor.BufferText("go.mod"); got != wantGoMod {
- t.Fatalf("go.mod upgrade failed:\n%s", tests.Diff(t, wantGoMod, got))
+ // Check for upgrades in b/go.mod and then clear them.
+ env.ExecuteCodeLensCommand("b/go.mod", command.CheckUpgrades, nil)
+ env.Await(Diagnostics(env.AtRegexp("b/go.mod", `require`), WithMessage("can be upgraded")))
+ env.ExecuteCodeLensCommand("b/go.mod", command.ResetGoModDiagnostics, nil)
+ env.Await(NoDiagnostics(ForFile("b/go.mod")))
+
+ // Apply the diagnostics to a/go.mod.
+ env.ApplyQuickFixes("a/go.mod", d.Diagnostics)
+ env.AfterChange()
+ if got := env.BufferText("a/go.mod"); got != wantGoModA {
+ t.Fatalf("a/go.mod upgrade failed:\n%s", compare.Text(wantGoModA, got))
+ }
+ if got := env.BufferText("b/go.mod"); got != wantGoModB {
+ t.Fatalf("b/go.mod changed unexpectedly:\n%s", compare.Text(wantGoModB, got))
}
})
})
@@ -186,7 +238,6 @@ require golang.org/x/hello v1.3.3
}
func TestUnusedDependenciesCodelens(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const proxy = `
-- golang.org/x/hello@v1.0.0/go.mod --
module golang.org/x/hello
@@ -230,9 +281,9 @@ func main() {
`
WithOptions(ProxyFiles(proxy)).Run(t, shouldRemoveDep, func(t *testing.T, env *Env) {
env.OpenFile("go.mod")
- env.ExecuteCodeLensCommand("go.mod", command.Tidy)
+ env.ExecuteCodeLensCommand("go.mod", command.Tidy, nil)
env.Await(env.DoneWithChangeWatchedFiles())
- got := env.Editor.BufferText("go.mod")
+ got := env.BufferText("go.mod")
const wantGoMod = `module mod.com
go 1.14
@@ -240,15 +291,13 @@ go 1.14
require golang.org/x/hello v1.0.0
`
if got != wantGoMod {
- t.Fatalf("go.mod tidy failed:\n%s", tests.Diff(t, wantGoMod, got))
+ t.Fatalf("go.mod tidy failed:\n%s", compare.Text(wantGoMod, got))
}
})
}
func TestRegenerateCgo(t *testing.T) {
testenv.NeedsTool(t, "cgo")
- testenv.NeedsGo1Point(t, 15)
-
const workspace = `
-- go.mod --
module example.com
@@ -269,85 +318,19 @@ func Foo() {
Run(t, workspace, func(t *testing.T, env *Env) {
// Open the file. We have a nonexistant symbol that will break cgo processing.
env.OpenFile("cgo.go")
- env.Await(env.DiagnosticAtRegexpWithMessage("cgo.go", ``, "go list failed to return CompiledGoFiles"))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("cgo.go", ``), WithMessage("go list failed to return CompiledGoFiles")),
+ )
// Fix the C function name. We haven't regenerated cgo, so nothing should be fixed.
env.RegexpReplace("cgo.go", `int fortythree`, "int fortytwo")
env.SaveBuffer("cgo.go")
- env.Await(OnceMet(
- env.DoneWithSave(),
- env.DiagnosticAtRegexpWithMessage("cgo.go", ``, "go list failed to return CompiledGoFiles"),
- ))
-
- // Regenerate cgo, fixing the diagnostic.
- env.ExecuteCodeLensCommand("cgo.go", command.RegenerateCgo)
- env.Await(EmptyDiagnostics("cgo.go"))
- })
-}
-
-func TestGCDetails(t *testing.T) {
- testenv.NeedsGo1Point(t, 15)
- if runtime.GOOS == "android" {
- t.Skipf("the gc details code lens doesn't work on Android")
- }
-
- const mod = `
--- go.mod --
-module mod.com
-
-go 1.15
--- main.go --
-package main
-
-import "fmt"
-
-func main() {
- fmt.Println(42)
-}
-`
- WithOptions(
- EditorConfig{
- CodeLenses: map[string]bool{
- "gc_details": true,
- }},
- ).Run(t, mod, func(t *testing.T, env *Env) {
- env.OpenFile("main.go")
- env.ExecuteCodeLensCommand("main.go", command.GCDetails)
- d := &protocol.PublishDiagnosticsParams{}
- env.Await(
- OnceMet(
- DiagnosticAt("main.go", 5, 13),
- ReadDiagnostics("main.go", d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("cgo.go", ``), WithMessage("go list failed to return CompiledGoFiles")),
)
- // Confirm that the diagnostics come from the gc details code lens.
- var found bool
- for _, d := range d.Diagnostics {
- if d.Severity != protocol.SeverityInformation {
- t.Fatalf("unexpected diagnostic severity %v, wanted Information", d.Severity)
- }
- if strings.Contains(d.Message, "42 escapes") {
- found = true
- }
- }
- if !found {
- t.Fatalf(`expected to find diagnostic with message "escape(42 escapes to heap)", found none`)
- }
-
- // Editing a buffer should cause gc_details diagnostics to disappear, since
- // they only apply to saved buffers.
- env.EditBuffer("main.go", fake.NewEdit(0, 0, 0, 0, "\n\n"))
- env.Await(EmptyDiagnostics("main.go"))
- // Saving a buffer should re-format back to the original state, and
- // re-enable the gc_details diagnostics.
- env.SaveBuffer("main.go")
- env.Await(DiagnosticAt("main.go", 5, 13))
-
- // Toggle the GC details code lens again so now it should be off.
- env.ExecuteCodeLensCommand("main.go", command.GCDetails)
- env.Await(
- EmptyDiagnostics("main.go"),
- )
+ // Regenerate cgo, fixing the diagnostic.
+ env.ExecuteCodeLensCommand("cgo.go", command.RegenerateCgo, nil)
+ env.Await(NoDiagnostics(ForFile("cgo.go")))
})
}
diff --git a/gopls/internal/regtest/codelens/gcdetails_test.go b/gopls/internal/regtest/codelens/gcdetails_test.go
new file mode 100644
index 000000000..e0642d652
--- /dev/null
+++ b/gopls/internal/regtest/codelens/gcdetails_test.go
@@ -0,0 +1,127 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codelens
+
+import (
+ "runtime"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/bug"
+)
+
+func TestGCDetails_Toggle(t *testing.T) {
+ if runtime.GOOS == "android" {
+ t.Skipf("the gc details code lens doesn't work on Android")
+ }
+
+ const mod = `
+-- go.mod --
+module mod.com
+
+go 1.15
+-- main.go --
+package main
+
+import "fmt"
+
+func main() {
+ fmt.Println(42)
+}
+`
+ WithOptions(
+ Settings{
+ "codelenses": map[string]bool{
+ "gc_details": true,
+ },
+ },
+ ).Run(t, mod, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ env.ExecuteCodeLensCommand("main.go", command.GCDetails, nil)
+ d := &protocol.PublishDiagnosticsParams{}
+ env.OnceMet(
+ Diagnostics(AtPosition("main.go", 5, 13)),
+ ReadDiagnostics("main.go", d),
+ )
+ // Confirm that the diagnostics come from the gc details code lens.
+ var found bool
+ for _, d := range d.Diagnostics {
+ if d.Severity != protocol.SeverityInformation {
+ t.Fatalf("unexpected diagnostic severity %v, wanted Information", d.Severity)
+ }
+ if strings.Contains(d.Message, "42 escapes") {
+ found = true
+ }
+ }
+ if !found {
+ t.Fatalf(`expected to find diagnostic with message "escape(42 escapes to heap)", found none`)
+ }
+
+ // Editing a buffer should cause gc_details diagnostics to disappear, since
+ // they only apply to saved buffers.
+ env.EditBuffer("main.go", fake.NewEdit(0, 0, 0, 0, "\n\n"))
+ env.AfterChange(NoDiagnostics(ForFile("main.go")))
+
+ // Saving a buffer should re-format back to the original state, and
+ // re-enable the gc_details diagnostics.
+ env.SaveBuffer("main.go")
+ env.AfterChange(Diagnostics(AtPosition("main.go", 5, 13)))
+
+ // Toggle the GC details code lens again so now it should be off.
+ env.ExecuteCodeLensCommand("main.go", command.GCDetails, nil)
+ env.Await(NoDiagnostics(ForFile("main.go")))
+ })
+}
+
+// Test for the crasher in golang/go#54199
+func TestGCDetails_NewFile(t *testing.T) {
+ bug.PanicOnBugs = false
+ const src = `
+-- go.mod --
+module mod.test
+
+go 1.12
+`
+
+ WithOptions(
+ Settings{
+ "codelenses": map[string]bool{
+ "gc_details": true,
+ },
+ },
+ ).Run(t, src, func(t *testing.T, env *Env) {
+ env.CreateBuffer("p_test.go", "")
+
+ const gcDetailsCommand = "gopls." + string(command.GCDetails)
+
+ hasGCDetails := func() bool {
+ lenses := env.CodeLens("p_test.go") // should not crash
+ for _, lens := range lenses {
+ if lens.Command.Command == gcDetailsCommand {
+ return true
+ }
+ }
+ return false
+ }
+
+ // With an empty file, we shouldn't get the gc_details codelens because
+ // there is nowhere to position it (it needs a package name).
+ if hasGCDetails() {
+ t.Errorf("got the gc_details codelens for an empty file")
+ }
+
+ // Edit to provide a package name.
+ env.EditBuffer("p_test.go", fake.NewEdit(0, 0, 0, 0, "package p"))
+
+ // Now we should get the gc_details codelens.
+ if !hasGCDetails() {
+ t.Errorf("didn't get the gc_details codelens for a valid non-empty Go file")
+ }
+ })
+}
diff --git a/gopls/internal/regtest/completion/completion18_test.go b/gopls/internal/regtest/completion/completion18_test.go
index 9683e30c8..18e81bc4b 100644
--- a/gopls/internal/regtest/completion/completion18_test.go
+++ b/gopls/internal/regtest/completion/completion18_test.go
@@ -10,7 +10,8 @@ package completion
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
// test generic receivers
@@ -41,10 +42,10 @@ func (s SyncMap[XX,string]) g(v UU) {}
env.OpenFile("main.go")
env.Await(env.DoneWithOpen())
for _, tst := range tests {
- pos := env.RegexpSearch("main.go", tst.pat)
- pos.Column += len(tst.pat)
- completions := env.Completion("main.go", pos)
- result := compareCompletionResults(tst.want, completions.Items)
+ loc := env.RegexpSearch("main.go", tst.pat)
+ loc.Range.Start.Character += uint32(protocol.UTF16Len([]byte(tst.pat)))
+ completions := env.Completion(loc)
+ result := compareCompletionLabels(tst.want, completions.Items)
if result != "" {
t.Errorf("%s: wanted %v", result, tst.want)
for i, g := range completions.Items {
@@ -95,7 +96,7 @@ func FuzzHex(f *testing.F) {
tests := []struct {
file string
pat string
- offset int // from the beginning of pat to what the user just typed
+ offset uint32 // UTF16 length from the beginning of pat to what the user just typed
want []string
}{
{"a_test.go", "f.Ad", 3, []string{"Add"}},
@@ -108,10 +109,10 @@ func FuzzHex(f *testing.F) {
for _, test := range tests {
env.OpenFile(test.file)
env.Await(env.DoneWithOpen())
- pos := env.RegexpSearch(test.file, test.pat)
- pos.Column += test.offset // character user just typed? will type?
- completions := env.Completion(test.file, pos)
- result := compareCompletionResults(test.want, completions.Items)
+ loc := env.RegexpSearch(test.file, test.pat)
+ loc.Range.Start.Character += test.offset // character user just typed? will type?
+ completions := env.Completion(loc)
+ result := compareCompletionLabels(test.want, completions.Items)
if result != "" {
t.Errorf("pat %q %q", test.pat, result)
for i, it := range completions.Items {
diff --git a/gopls/internal/regtest/completion/completion_test.go b/gopls/internal/regtest/completion/completion_test.go
index c0b4736e7..81addba11 100644
--- a/gopls/internal/regtest/completion/completion_test.go
+++ b/gopls/internal/regtest/completion/completion_test.go
@@ -9,15 +9,17 @@ import (
"strings"
"testing"
+ "github.com/google/go-cmp/cmp"
"golang.org/x/tools/gopls/internal/hooks"
- . "golang.org/x/tools/internal/lsp/regtest"
-
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/bug"
"golang.org/x/tools/internal/testenv"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
)
func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
Main(m, hooks.Options)
}
@@ -41,7 +43,6 @@ const Name = "Hello"
`
func TestPackageCompletion(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const files = `
-- go.mod --
module mod.com
@@ -172,41 +173,35 @@ package
Run(t, files, func(t *testing.T, env *Env) {
if tc.content != nil {
env.WriteWorkspaceFile(tc.filename, *tc.content)
- env.Await(
- env.DoneWithChangeWatchedFiles(),
- )
+ env.Await(env.DoneWithChangeWatchedFiles())
}
env.OpenFile(tc.filename)
- completions := env.Completion(tc.filename, env.RegexpSearch(tc.filename, tc.triggerRegexp))
+ completions := env.Completion(env.RegexpSearch(tc.filename, tc.triggerRegexp))
// Check that the completion item suggestions are in the range
- // of the file.
- lineCount := len(strings.Split(env.Editor.BufferText(tc.filename), "\n"))
+ // of the file. {Start,End}.Line are zero-based.
+ lineCount := len(strings.Split(env.BufferText(tc.filename), "\n"))
for _, item := range completions.Items {
- if start := int(item.TextEdit.Range.Start.Line); start >= lineCount {
- t.Fatalf("unexpected text edit range start line number: got %d, want less than %d", start, lineCount)
+ if start := int(item.TextEdit.Range.Start.Line); start > lineCount {
+ t.Fatalf("unexpected text edit range start line number: got %d, want <= %d", start, lineCount)
}
- if end := int(item.TextEdit.Range.End.Line); end >= lineCount {
- t.Fatalf("unexpected text edit range end line number: got %d, want less than %d", end, lineCount)
+ if end := int(item.TextEdit.Range.End.Line); end > lineCount {
+ t.Fatalf("unexpected text edit range end line number: got %d, want <= %d", end, lineCount)
}
}
if tc.want != nil {
- start, end := env.RegexpRange(tc.filename, tc.editRegexp)
- expectedRng := protocol.Range{
- Start: fake.Pos.ToProtocolPosition(start),
- End: fake.Pos.ToProtocolPosition(end),
- }
+ expectedLoc := env.RegexpSearch(tc.filename, tc.editRegexp)
for _, item := range completions.Items {
gotRng := item.TextEdit.Range
- if expectedRng != gotRng {
+ if expectedLoc.Range != gotRng {
t.Errorf("unexpected completion range for completion item %s: got %v, want %v",
- item.Label, gotRng, expectedRng)
+ item.Label, gotRng, expectedLoc.Range)
}
}
}
- diff := compareCompletionResults(tc.want, completions.Items)
+ diff := compareCompletionLabels(tc.want, completions.Items)
if diff != "" {
t.Error(diff)
}
@@ -228,23 +223,18 @@ package ma
want := []string{"ma", "ma_test", "main", "math", "math_test"}
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("math/add.go")
- completions := env.Completion("math/add.go", fake.Pos{
- Line: 0,
- Column: 10,
- })
+ completions := env.Completion(env.RegexpSearch("math/add.go", "package ma()"))
- diff := compareCompletionResults(want, completions.Items)
+ diff := compareCompletionLabels(want, completions.Items)
if diff != "" {
t.Fatal(diff)
}
})
}
-func compareCompletionResults(want []string, gotItems []protocol.CompletionItem) string {
- if len(gotItems) != len(want) {
- return fmt.Sprintf("got %v completion(s), want %v", len(gotItems), len(want))
- }
-
+// TODO(rfindley): audit/clean up call sites for this helper, to ensure
+// consistent test errors.
+func compareCompletionLabels(want []string, gotItems []protocol.CompletionItem) string {
var got []string
for _, item := range gotItems {
got = append(got, item.Label)
@@ -254,18 +244,17 @@ func compareCompletionResults(want []string, gotItems []protocol.CompletionItem)
}
}
- for i, v := range got {
- if v != want[i] {
- return fmt.Sprintf("%d completion result not the same: got %q, want %q", i, v, want[i])
- }
+ if len(got) == 0 && len(want) == 0 {
+ return "" // treat nil and the empty slice as equivalent
}
+ if diff := cmp.Diff(want, got); diff != "" {
+ return fmt.Sprintf("completion item mismatch (-want +got):\n%s", diff)
+ }
return ""
}
func TestUnimportedCompletion(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const mod = `
-- go.mod --
module mod.com
@@ -303,19 +292,19 @@ func _() {
// Trigger unimported completions for the example.com/blah package.
env.OpenFile("main.go")
env.Await(env.DoneWithOpen())
- pos := env.RegexpSearch("main.go", "ah")
- completions := env.Completion("main.go", pos)
+ loc := env.RegexpSearch("main.go", "ah")
+ completions := env.Completion(loc)
if len(completions.Items) == 0 {
t.Fatalf("no completion items")
}
- env.AcceptCompletion("main.go", pos, completions.Items[0])
+ env.AcceptCompletion(loc, completions.Items[0]) // adds blah import to main.go
env.Await(env.DoneWithChange())
// Trigger completions once again for the blah.<> selector.
env.RegexpReplace("main.go", "_ = blah", "_ = blah.")
env.Await(env.DoneWithChange())
- pos = env.RegexpSearch("main.go", "\n}")
- completions = env.Completion("main.go", pos)
+ loc = env.RegexpSearch("main.go", "\n}")
+ completions = env.Completion(loc)
if len(completions.Items) != 1 {
t.Fatalf("expected 1 completion item, got %v", len(completions.Items))
}
@@ -323,11 +312,11 @@ func _() {
if item.Label != "Name" {
t.Fatalf("expected completion item blah.Name, got %v", item.Label)
}
- env.AcceptCompletion("main.go", pos, item)
+ env.AcceptCompletion(loc, item)
// Await the diagnostics to add example.com/blah to the go.mod file.
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"example.com/blah"`),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", `"example.com/blah"`)),
)
})
}
@@ -391,8 +380,8 @@ type S struct {
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("foo.go")
- completions := env.Completion("foo.go", env.RegexpSearch("foo.go", `if s\.()`))
- diff := compareCompletionResults([]string{"i"}, completions.Items)
+ completions := env.Completion(env.RegexpSearch("foo.go", `if s\.()`))
+ diff := compareCompletionLabels([]string{"i"}, completions.Items)
if diff != "" {
t.Fatal(diff)
}
@@ -451,8 +440,8 @@ func _() {
{`var _ e = xxxx()`, []string{"xxxxc", "xxxxd", "xxxxe"}},
}
for _, tt := range tests {
- completions := env.Completion("main.go", env.RegexpSearch("main.go", tt.re))
- diff := compareCompletionResults(tt.want, completions.Items)
+ completions := env.Completion(env.RegexpSearch("main.go", tt.re))
+ diff := compareCompletionLabels(tt.want, completions.Items)
if diff != "" {
t.Errorf("%s: %s", tt.re, diff)
}
@@ -484,32 +473,30 @@ func doit() {
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("prog.go")
- pos := env.RegexpSearch("prog.go", "if fooF")
- pos.Column += len("if fooF")
- completions := env.Completion("prog.go", pos)
- diff := compareCompletionResults([]string{"fooFunc"}, completions.Items)
+ loc := env.RegexpSearch("prog.go", "if fooF")
+ loc.Range.Start.Character += uint32(protocol.UTF16Len([]byte("if fooF")))
+ completions := env.Completion(loc)
+ diff := compareCompletionLabels([]string{"fooFunc"}, completions.Items)
if diff != "" {
t.Error(diff)
}
if completions.Items[0].Tags == nil {
- t.Errorf("expected Tags to show deprecation %#v", diff[0])
+ t.Errorf("expected Tags to show deprecation %#v", completions.Items[0].Tags)
}
- pos = env.RegexpSearch("prog.go", "= badP")
- pos.Column += len("= badP")
- completions = env.Completion("prog.go", pos)
- diff = compareCompletionResults([]string{"badPi"}, completions.Items)
+ loc = env.RegexpSearch("prog.go", "= badP")
+ loc.Range.Start.Character += uint32(protocol.UTF16Len([]byte("= badP")))
+ completions = env.Completion(loc)
+ diff = compareCompletionLabels([]string{"badPi"}, completions.Items)
if diff != "" {
t.Error(diff)
}
if completions.Items[0].Tags == nil {
- t.Errorf("expected Tags to show deprecation %#v", diff[0])
+ t.Errorf("expected Tags to show deprecation %#v", completions.Items[0].Tags)
}
})
}
func TestUnimportedCompletion_VSCodeIssue1489(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const src = `
-- go.mod --
module mod.com
@@ -527,74 +514,195 @@ func main() {
}
`
WithOptions(
- EditorConfig{WindowsLineEndings: true},
+ WindowsLineEndings(),
).Run(t, src, func(t *testing.T, env *Env) {
- // Trigger unimported completions for the example.com/blah package.
+ // Trigger unimported completions for the mod.com package.
env.OpenFile("main.go")
env.Await(env.DoneWithOpen())
- pos := env.RegexpSearch("main.go", "Sqr()")
- completions := env.Completion("main.go", pos)
+ loc := env.RegexpSearch("main.go", "Sqr()")
+ completions := env.Completion(loc)
if len(completions.Items) == 0 {
t.Fatalf("no completion items")
}
- env.AcceptCompletion("main.go", pos, completions.Items[0])
+ env.AcceptCompletion(loc, completions.Items[0])
env.Await(env.DoneWithChange())
- got := env.Editor.BufferText("main.go")
+ got := env.BufferText("main.go")
want := "package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"math\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Println(\"a\")\r\n\tmath.Sqrt(${1:})\r\n}\r\n"
- if got != want {
- t.Errorf("unimported completion: got %q, want %q", got, want)
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unimported completion (-want +got):\n%s", diff)
+ }
+ })
+}
+
+func TestPackageMemberCompletionAfterSyntaxError(t *testing.T) {
+ // This test documents the current broken behavior due to golang/go#58833.
+ const src = `
+-- go.mod --
+module mod.com
+
+go 1.14
+
+-- main.go --
+package main
+
+import "math"
+
+func main() {
+ math.Sqrt(,0)
+ math.Ldex
+}
+`
+ Run(t, src, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ env.Await(env.DoneWithOpen())
+ loc := env.RegexpSearch("main.go", "Ldex()")
+ completions := env.Completion(loc)
+ if len(completions.Items) == 0 {
+ t.Fatalf("no completion items")
+ }
+ env.AcceptCompletion(loc, completions.Items[0])
+ env.Await(env.DoneWithChange())
+ got := env.BufferText("main.go")
+ // The completion of math.Ldex after the syntax error on the
+ // previous line is not "math.Ldexp" but "math.Ldexmath.Abs".
+ // (In VSCode, "Abs" wrongly appears in the completion menu.)
+ // This is a consequence of poor error recovery in the parser
+ // causing "math.Ldex" to become a BadExpr.
+ want := "package main\n\nimport \"math\"\n\nfunc main() {\n\tmath.Sqrt(,0)\n\tmath.Ldexmath.Abs(${1:})\n}\n"
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unimported completion (-want +got):\n%s", diff)
}
})
}
func TestDefinition(t *testing.T) {
- stuff := `
+ testenv.NeedsGo1Point(t, 17) // in go1.16, The FieldList in func x is not empty
+ files := `
-- go.mod --
module mod.com
go 1.18
-- a_test.go --
package foo
-func T()
-func TestG()
-func TestM()
-func TestMi()
-func Ben()
-func Fuz()
-func Testx()
-func TestMe(t *testing.T)
-func BenchmarkFoo()
`
- // All those parentheses are needed for the completion code to see
- // later lines as being definitions
tests := []struct {
- pat string
- want []string
+ line string // the sole line in the buffer after the package statement
+ pat string // the pattern to search for
+ want []string // expected completions
}{
- {"T", []string{"TestXxx(t *testing.T)", "TestMain(m *testing.M)"}},
- {"TestM", []string{"TestMain(m *testing.M)", "TestM(t *testing.T)"}},
- {"TestMi", []string{"TestMi(t *testing.T)"}},
- {"TestG", []string{"TestG(t *testing.T)"}},
- {"B", []string{"BenchmarkXxx(b *testing.B)"}},
- {"BenchmarkFoo", []string{"BenchmarkFoo(b *testing.B)"}},
- {"F", []string{"FuzzXxx(f *testing.F)"}},
- {"Testx", nil},
- {"TestMe", []string{"TestMe"}},
+ {"func T", "T", []string{"TestXxx(t *testing.T)", "TestMain(m *testing.M)"}},
+ {"func T()", "T", []string{"TestMain", "Test"}},
+ {"func TestM", "TestM", []string{"TestMain(m *testing.M)", "TestM(t *testing.T)"}},
+ {"func TestM()", "TestM", []string{"TestMain"}},
+ {"func TestMi", "TestMi", []string{"TestMi(t *testing.T)"}},
+ {"func TestMi()", "TestMi", nil},
+ {"func TestG", "TestG", []string{"TestG(t *testing.T)"}},
+ {"func TestG(", "TestG", nil},
+ {"func Ben", "B", []string{"BenchmarkXxx(b *testing.B)"}},
+ {"func Ben(", "Ben", []string{"Benchmark"}},
+ {"func BenchmarkFoo", "BenchmarkFoo", []string{"BenchmarkFoo(b *testing.B)"}},
+ {"func BenchmarkFoo(", "BenchmarkFoo", nil},
+ {"func Fuz", "F", []string{"FuzzXxx(f *testing.F)"}},
+ {"func Fuz(", "Fuz", []string{"Fuzz"}},
+ {"func Testx", "Testx", nil},
+ {"func TestMe(t *testing.T)", "TestMe", nil},
+ {"func Te(t *testing.T)", "Te", []string{"TestMain", "Test"}},
}
fname := "a_test.go"
- Run(t, stuff, func(t *testing.T, env *Env) {
+ Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile(fname)
env.Await(env.DoneWithOpen())
+ for _, test := range tests {
+ env.SetBufferContent(fname, "package foo\n"+test.line)
+ loc := env.RegexpSearch(fname, test.pat)
+ loc.Range.Start.Character += uint32(protocol.UTF16Len([]byte(test.pat)))
+ completions := env.Completion(loc)
+ if diff := compareCompletionLabels(test.want, completions.Items); diff != "" {
+ t.Error(diff)
+ }
+ }
+ })
+}
+
+// Test that completing a definition replaces source text when applied, golang/go#56852.
+// Note: With go <= 1.16 the completions does not add parameters and fails these tests.
+func TestDefinitionReplaceRange(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+
+ const mod = `
+-- go.mod --
+module mod.com
+
+go 1.17
+`
+
+ tests := []struct {
+ name string
+ before, after string
+ }{
+ {
+ name: "func TestMa",
+ before: `
+package foo_test
+
+func TestMa
+`,
+ after: `
+package foo_test
+
+func TestMain(m *testing.M)
+`,
+ },
+ {
+ name: "func TestSome",
+ before: `
+package foo_test
+
+func TestSome
+`,
+ after: `
+package foo_test
+
+func TestSome(t *testing.T)
+`,
+ },
+ {
+ name: "func Bench",
+ before: `
+package foo_test
+
+func Bench
+`,
+ // Note: Snippet with escaped }.
+ after: `
+package foo_test
+
+func Benchmark${1:Xxx}(b *testing.B) {
+ $0
+\}
+`,
+ },
+ }
+
+ Run(t, mod, func(t *testing.T, env *Env) {
+ env.CreateBuffer("foo_test.go", "")
+
for _, tst := range tests {
- pos := env.RegexpSearch(fname, tst.pat)
- pos.Column += len(tst.pat)
- completions := env.Completion(fname, pos)
- result := compareCompletionResults(tst.want, completions.Items)
- if result != "" {
- t.Errorf("%s failed: %s:%q", tst.pat, result, tst.want)
- for i, it := range completions.Items {
- t.Errorf("%d got %q %q", i, it.Label, it.Detail)
- }
+ tst.before = strings.Trim(tst.before, "\n")
+ tst.after = strings.Trim(tst.after, "\n")
+ env.SetBufferContent("foo_test.go", tst.before)
+
+ loc := env.RegexpSearch("foo_test.go", tst.name)
+ loc.Range.Start.Character = uint32(protocol.UTF16Len([]byte(tst.name)))
+ completions := env.Completion(loc)
+ if len(completions.Items) == 0 {
+ t.Fatalf("no completion items")
+ }
+
+ env.AcceptCompletion(loc, completions.Items[0])
+ env.Await(env.DoneWithChange())
+ if buf := env.BufferText("foo_test.go"); buf != tst.after {
+ t.Errorf("%s:incorrect completion: got %q, want %q", tst.name, buf, tst.after)
}
}
})
@@ -636,8 +744,8 @@ use ./dir/foobar/
{`use ./dir/foobar/()`, []string{}},
}
for _, tt := range tests {
- completions := env.Completion("go.work", env.RegexpSearch("go.work", tt.re))
- diff := compareCompletionResults(tt.want, completions.Items)
+ completions := env.Completion(env.RegexpSearch("go.work", tt.re))
+ diff := compareCompletionLabels(tt.want, completions.Items)
if diff != "" {
t.Errorf("%s: %s", tt.re, diff)
}
diff --git a/gopls/internal/regtest/completion/postfix_snippet_test.go b/gopls/internal/regtest/completion/postfix_snippet_test.go
index 2674d555c..df69703ee 100644
--- a/gopls/internal/regtest/completion/postfix_snippet_test.go
+++ b/gopls/internal/regtest/completion/postfix_snippet_test.go
@@ -8,13 +8,10 @@ import (
"strings"
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
- "golang.org/x/tools/internal/lsp/source"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestPostfixSnippetCompletion(t *testing.T) {
- t.Skipf("skipping test due to suspected synchronization bug; see https://go.dev/issue/50707")
-
const mod = `
-- go.mod --
module mod.com
@@ -268,6 +265,27 @@ for k := range foo {
`,
},
{
+ name: "channel_range",
+ before: `
+package foo
+
+func _() {
+ foo := make(chan int)
+ foo.range
+}
+`,
+ after: `
+package foo
+
+func _() {
+ foo := make(chan int)
+ for e := range foo {
+ $0
+}
+}
+`,
+ },
+ {
name: "var",
before: `
package foo
@@ -379,7 +397,7 @@ func _() {
before: `
package foo
-func foo() []string {
+func foo() []string {
x := "test"
return x.split
}`,
@@ -388,7 +406,7 @@ package foo
import "strings"
-func foo() []string {
+func foo() []string {
x := "test"
return strings.Split(x, "$0")
}`,
@@ -414,26 +432,30 @@ func foo() string {
},
}
- r := WithOptions(Options(func(o *source.Options) {
- o.ExperimentalPostfixCompletions = true
- }))
+ r := WithOptions(
+ Settings{
+ "experimentalPostfixCompletions": true,
+ },
+ )
r.Run(t, mod, func(t *testing.T, env *Env) {
+ env.CreateBuffer("foo.go", "")
+
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
c.before = strings.Trim(c.before, "\n")
c.after = strings.Trim(c.after, "\n")
- env.CreateBuffer("foo.go", c.before)
+ env.SetBufferContent("foo.go", c.before)
- pos := env.RegexpSearch("foo.go", "\n}")
- completions := env.Completion("foo.go", pos)
+ loc := env.RegexpSearch("foo.go", "\n}")
+ completions := env.Completion(loc)
if len(completions.Items) != 1 {
t.Fatalf("expected one completion, got %v", completions.Items)
}
- env.AcceptCompletion("foo.go", pos, completions.Items[0])
+ env.AcceptCompletion(loc, completions.Items[0])
- if buf := env.Editor.BufferText("foo.go"); buf != c.after {
+ if buf := env.BufferText("foo.go"); buf != c.after {
t.Errorf("\nGOT:\n%s\nEXPECTED:\n%s", buf, c.after)
}
})
diff --git a/gopls/internal/regtest/debug/debug_test.go b/gopls/internal/regtest/debug/debug_test.go
new file mode 100644
index 000000000..f8efb8f5d
--- /dev/null
+++ b/gopls/internal/regtest/debug/debug_test.go
@@ -0,0 +1,30 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/hooks"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/bug"
+)
+
+func TestMain(m *testing.M) {
+ Main(m, hooks.Options)
+}
+
+func TestBugNotification(t *testing.T) {
+ // Verify that a properly configured session gets notified of a bug on the
+ // server.
+ WithOptions(
+ Modes(Default), // must be in-process to receive the bug report below
+ Settings{"showBugReports": true},
+ ).Run(t, "", func(t *testing.T, env *Env) {
+ const desc = "got a bug"
+ bug.Report(desc, nil)
+ env.Await(ShownMessage(desc))
+ })
+}
diff --git a/gopls/internal/regtest/diagnostics/analysis_test.go b/gopls/internal/regtest/diagnostics/analysis_test.go
new file mode 100644
index 000000000..308c25f13
--- /dev/null
+++ b/gopls/internal/regtest/diagnostics/analysis_test.go
@@ -0,0 +1,49 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diagnostics
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+// Test for the timeformat analyzer, following golang/vscode-go#2406.
+//
+// This test checks that applying the suggested fix from the analyzer resolves
+// the diagnostic warning.
+func TestTimeFormatAnalyzer(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- main.go --
+package main
+
+import (
+ "fmt"
+ "time"
+)
+
+func main() {
+ now := time.Now()
+ fmt.Println(now.Format("2006-02-01"))
+}`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+
+ var d protocol.PublishDiagnosticsParams
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", "2006-02-01")),
+ ReadDiagnostics("main.go", &d),
+ )
+
+ env.ApplyQuickFixes("main.go", d.Diagnostics)
+ env.AfterChange(NoDiagnostics(ForFile("main.go")))
+ })
+}
diff --git a/gopls/internal/regtest/diagnostics/builtin_test.go b/gopls/internal/regtest/diagnostics/builtin_test.go
index 775e7ec0b..935a7f9b8 100644
--- a/gopls/internal/regtest/diagnostics/builtin_test.go
+++ b/gopls/internal/regtest/diagnostics/builtin_test.go
@@ -8,7 +8,7 @@ import (
"strings"
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestIssue44866(t *testing.T) {
@@ -26,13 +26,10 @@ const (
`
Run(t, src, func(t *testing.T, env *Env) {
env.OpenFile("a.go")
- name, _ := env.GoToDefinition("a.go", env.RegexpSearch("a.go", "iota"))
- if !strings.HasSuffix(name, "builtin.go") {
- t.Fatalf("jumped to %q, want builtin.go", name)
+ loc := env.GoToDefinition(env.RegexpSearch("a.go", "iota"))
+ if !strings.HasSuffix(string(loc.URI), "builtin.go") {
+ t.Fatalf("jumped to %q, want builtin.go", loc.URI)
}
- env.Await(OnceMet(
- env.DoneWithOpen(),
- NoDiagnostics("builtin.go"),
- ))
+ env.AfterChange(NoDiagnostics(ForFile("builtin.go")))
})
}
diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go
index c18dfbf91..b4b962efb 100644
--- a/gopls/internal/regtest/diagnostics/diagnostics_test.go
+++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go
@@ -11,15 +11,16 @@ import (
"testing"
"golang.org/x/tools/gopls/internal/hooks"
- . "golang.org/x/tools/internal/lsp/regtest"
-
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/bug"
"golang.org/x/tools/internal/testenv"
)
func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
Main(m, hooks.Options)
}
@@ -47,13 +48,8 @@ func TestDiagnosticErrorInEditedFile(t *testing.T) {
// diagnostic.
env.OpenFile("main.go")
env.RegexpReplace("main.go", "Printl(n)", "")
- env.Await(
- // Once we have gotten diagnostics for the change above, we should
- // satisfy the DiagnosticAtRegexp assertion.
- OnceMet(
- env.DoneWithChange(),
- env.DiagnosticAtRegexp("main.go", "Printl"),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", "Printl")),
// Assert that this test has sent no error logs to the client. This is not
// strictly necessary for testing this regression, but is included here
// as an example of using the NoErrorLogs() expectation. Feel free to
@@ -77,13 +73,9 @@ func m() {
log.Println()
}
`)
- env.Await(
- env.DiagnosticAtRegexp("main.go", "log"),
- )
+ env.AfterChange(Diagnostics(env.AtRegexp("main.go", "log")))
env.SaveBuffer("main.go")
- env.Await(
- EmptyDiagnostics("main.go"),
- )
+ env.AfterChange(NoDiagnostics(ForFile("main.go")))
})
}
@@ -94,7 +86,7 @@ const Foo = "abc
`
Run(t, brokenFile, func(t *testing.T, env *Env) {
env.CreateBuffer("broken.go", brokenFile)
- env.Await(env.DiagnosticAtRegexp("broken.go", "\"abc"))
+ env.AfterChange(Diagnostics(env.AtRegexp("broken.go", "\"abc")))
})
}
@@ -117,13 +109,16 @@ const a = 2
func TestDiagnosticClearingOnEdit(t *testing.T) {
Run(t, badPackage, func(t *testing.T, env *Env) {
env.OpenFile("b.go")
- env.Await(env.DiagnosticAtRegexp("a.go", "a = 1"), env.DiagnosticAtRegexp("b.go", "a = 2"))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a.go", "a = 1")),
+ Diagnostics(env.AtRegexp("b.go", "a = 2")),
+ )
// Fix the error by editing the const name in b.go to `b`.
env.RegexpReplace("b.go", "(a) = 2", "b")
- env.Await(
- EmptyDiagnostics("a.go"),
- EmptyDiagnostics("b.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a.go")),
+ NoDiagnostics(ForFile("b.go")),
)
})
}
@@ -131,10 +126,16 @@ func TestDiagnosticClearingOnEdit(t *testing.T) {
func TestDiagnosticClearingOnDelete_Issue37049(t *testing.T) {
Run(t, badPackage, func(t *testing.T, env *Env) {
env.OpenFile("a.go")
- env.Await(env.DiagnosticAtRegexp("a.go", "a = 1"), env.DiagnosticAtRegexp("b.go", "a = 2"))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a.go", "a = 1")),
+ Diagnostics(env.AtRegexp("b.go", "a = 2")),
+ )
env.RemoveWorkspaceFile("b.go")
- env.Await(EmptyDiagnostics("a.go"), EmptyDiagnostics("b.go"))
+ env.AfterChange(
+ NoDiagnostics(ForFile("a.go")),
+ NoDiagnostics(ForFile("b.go")),
+ )
})
}
@@ -143,16 +144,16 @@ func TestDiagnosticClearingOnClose(t *testing.T) {
env.CreateBuffer("c.go", `package consts
const a = 3`)
- env.Await(
- env.DiagnosticAtRegexp("a.go", "a = 1"),
- env.DiagnosticAtRegexp("b.go", "a = 2"),
- env.DiagnosticAtRegexp("c.go", "a = 3"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a.go", "a = 1")),
+ Diagnostics(env.AtRegexp("b.go", "a = 2")),
+ Diagnostics(env.AtRegexp("c.go", "a = 3")),
)
env.CloseBuffer("c.go")
- env.Await(
- env.DiagnosticAtRegexp("a.go", "a = 1"),
- env.DiagnosticAtRegexp("b.go", "a = 2"),
- EmptyDiagnostics("c.go"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a.go", "a = 1")),
+ Diagnostics(env.AtRegexp("b.go", "a = 2")),
+ NoDiagnostics(ForFile("c.go")),
)
})
}
@@ -164,20 +165,20 @@ func TestIssue37978(t *testing.T) {
env.CreateBuffer("c/c.go", "")
// Write the file contents with a missing import.
- env.EditBuffer("c/c.go", fake.Edit{
- Text: `package c
+ env.EditBuffer("c/c.go", protocol.TextEdit{
+ NewText: `package c
const a = http.MethodGet
`,
})
- env.Await(
- env.DiagnosticAtRegexp("c/c.go", "http.MethodGet"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("c/c.go", "http.MethodGet")),
)
// Save file, which will organize imports, adding the expected import.
// Expect the diagnostics to clear.
env.SaveBuffer("c/c.go")
- env.Await(
- EmptyDiagnostics("c/c.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("c/c.go")),
)
})
}
@@ -211,15 +212,15 @@ func TestA(t *testing.T) {
// not break the workspace.
func TestDeleteTestVariant(t *testing.T) {
Run(t, test38878, func(t *testing.T, env *Env) {
- env.Await(env.DiagnosticAtRegexp("a_test.go", `f\((3)\)`))
+ env.AfterChange(Diagnostics(env.AtRegexp("a_test.go", `f\((3)\)`)))
env.RemoveWorkspaceFile("a_test.go")
- env.Await(EmptyDiagnostics("a_test.go"))
+ env.AfterChange(NoDiagnostics(ForFile("a_test.go")))
// Make sure the test variant has been removed from the workspace by
// triggering a metadata load.
env.OpenFile("a.go")
env.RegexpReplace("a.go", `// import`, "import")
- env.Await(env.DiagnosticAtRegexp("a.go", `"fmt"`))
+ env.AfterChange(Diagnostics(env.AtRegexp("a.go", `"fmt"`)))
})
}
@@ -228,11 +229,9 @@ func TestDeleteTestVariant(t *testing.T) {
func TestDeleteTestVariant_DiskOnly(t *testing.T) {
Run(t, test38878, func(t *testing.T, env *Env) {
env.OpenFile("a_test.go")
- env.Await(DiagnosticAt("a_test.go", 5, 3))
+ env.AfterChange(Diagnostics(AtPosition("a_test.go", 5, 3)))
env.Sandbox.Workdir.RemoveFile(context.Background(), "a_test.go")
- env.Await(OnceMet(
- env.DoneWithChangeWatchedFiles(),
- DiagnosticAt("a_test.go", 5, 3)))
+ env.AfterChange(Diagnostics(AtPosition("a_test.go", 5, 3)))
})
}
@@ -258,23 +257,20 @@ func Hello() {
t.Run("manual", func(t *testing.T) {
Run(t, noMod, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `"mod.com/bob"`)),
)
env.CreateBuffer("go.mod", `module mod.com
go 1.12
`)
env.SaveBuffer("go.mod")
- env.Await(
- EmptyDiagnostics("main.go"),
- )
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("bob/bob.go", "x"),
- ReadDiagnostics("bob/bob.go", &d),
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ Diagnostics(env.AtRegexp("bob/bob.go", "x")),
+ ReadDiagnostics("bob/bob.go", &d),
)
if len(d.Diagnostics) != 1 {
t.Fatalf("expected 1 diagnostic, got %v", len(d.Diagnostics))
@@ -283,30 +279,32 @@ func Hello() {
})
t.Run("initialized", func(t *testing.T) {
Run(t, noMod, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `"mod.com/bob"`)),
)
env.RunGoCommand("mod", "init", "mod.com")
- env.Await(
- EmptyDiagnostics("main.go"),
- env.DiagnosticAtRegexp("bob/bob.go", "x"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ Diagnostics(env.AtRegexp("bob/bob.go", "x")),
)
})
})
t.Run("without workspace module", func(t *testing.T) {
WithOptions(
- Modes(Singleton),
+ Modes(Default),
).Run(t, noMod, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `"mod.com/bob"`)),
)
if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, true); err != nil {
t.Fatal(err)
}
- env.Await(
- EmptyDiagnostics("main.go"),
- env.DiagnosticAtRegexp("bob/bob.go", "x"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ Diagnostics(env.AtRegexp("bob/bob.go", "x")),
)
})
})
@@ -347,15 +345,15 @@ func TestHello(t *testing.T) {
Run(t, testPackage, func(t *testing.T, env *Env) {
env.OpenFile("lib_test.go")
- env.Await(
- DiagnosticAt("lib_test.go", 10, 2),
- DiagnosticAt("lib_test.go", 11, 2),
+ env.AfterChange(
+ Diagnostics(AtPosition("lib_test.go", 10, 2)),
+ Diagnostics(AtPosition("lib_test.go", 11, 2)),
)
env.OpenFile("lib.go")
env.RegexpReplace("lib.go", "_ = x", "var y int")
- env.Await(
- env.DiagnosticAtRegexp("lib.go", "y int"),
- EmptyDiagnostics("lib_test.go"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("lib.go", "y int")),
+ NoDiagnostics(ForFile("lib_test.go")),
)
})
}
@@ -374,16 +372,8 @@ func main() {}
Run(t, packageChange, func(t *testing.T, env *Env) {
env.OpenFile("a.go")
env.RegexpReplace("a.go", "foo", "foox")
- env.Await(
- // When the bug reported in #38328 was present, we didn't get erroneous
- // file diagnostics until after the didChange message generated by the
- // package renaming was fully processed. Therefore, in order for this
- // test to actually exercise the bug, we must wait until that work has
- // completed.
- OnceMet(
- env.DoneWithChange(),
- NoDiagnostics("a.go"),
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a.go")),
)
})
}
@@ -430,8 +420,12 @@ func TestResolveDiagnosticWithDownload(t *testing.T) {
env.OpenFile("print.go")
// Check that gopackages correctly loaded this dependency. We should get a
// diagnostic for the wrong formatting type.
- // TODO: we should be able to easily also match the diagnostic message.
- env.Await(env.DiagnosticAtRegexp("print.go", "fmt.Printf"))
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("print.go", "fmt.Printf"),
+ WithMessage("wrong type int"),
+ ),
+ )
})
}
@@ -454,7 +448,9 @@ func Hello() {
`
Run(t, adHoc, func(t *testing.T, env *Env) {
env.OpenFile("b/b.go")
- env.Await(env.DiagnosticAtRegexp("b/b.go", "x"))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("b/b.go", "x")),
+ )
})
}
@@ -469,16 +465,15 @@ func _() {
}
`
WithOptions(
- EditorConfig{
- Env: map[string]string{
- "GOPATH": "",
- "GO111MODULE": "off",
- },
- }).Run(t, files, func(t *testing.T, env *Env) {
+ EnvVars{
+ "GOPATH": "",
+ "GO111MODULE": "off",
+ },
+ ).Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- env.Await(env.DiagnosticAtRegexp("main.go", "fmt"))
+ env.AfterChange(Diagnostics(env.AtRegexp("main.go", "fmt")))
env.SaveBuffer("main.go")
- env.Await(EmptyDiagnostics("main.go"))
+ env.AfterChange(NoDiagnostics(ForFile("main.go")))
})
}
@@ -498,11 +493,12 @@ package x
var X = 0
`
- editorConfig := EditorConfig{Env: map[string]string{"GOFLAGS": "-tags=foo"}}
- WithOptions(editorConfig).Run(t, files, func(t *testing.T, env *Env) {
+ WithOptions(
+ EnvVars{"GOFLAGS": "-tags=foo"},
+ ).Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.OrganizeImports("main.go")
- env.Await(EmptyDiagnostics("main.go"))
+ env.AfterChange(NoDiagnostics(ForFile("main.go")))
})
}
@@ -527,11 +523,9 @@ func _() {
Run(t, generated, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- DiagnosticAt("main.go", 5, 8),
- ReadDiagnostics("main.go", &d),
- ),
+ env.AfterChange(
+ Diagnostics(AtPosition("main.go", 5, 8)),
+ ReadDiagnostics("main.go", &d),
)
if fixes := env.GetQuickFixes("main.go", d.Diagnostics); len(fixes) != 0 {
t.Errorf("got quick fixes %v, wanted none", fixes)
@@ -541,7 +535,7 @@ func _() {
// Expect a module/GOPATH error if there is an error in the file at startup.
// Tests golang/go#37279.
-func TestShowCriticalError_Issue37279(t *testing.T) {
+func TestBrokenWorkspace_OutsideModule(t *testing.T) {
const noModule = `
-- a.go --
package foo
@@ -554,11 +548,13 @@ func f() {
`
Run(t, noModule, func(t *testing.T, env *Env) {
env.OpenFile("a.go")
- env.Await(
+ env.AfterChange(
+ // Expect the adHocPackagesWarning.
OutstandingWork(lsp.WorkspaceLoadFailure, "outside of a module"),
)
+ // Deleting the import dismisses the warning.
env.RegexpReplace("a.go", `import "mod.com/hello"`, "")
- env.Await(
+ env.AfterChange(
NoOutstandingWork(),
)
})
@@ -571,10 +567,11 @@ hi mom
`
for _, go111module := range []string{"on", "off", ""} {
t.Run(fmt.Sprintf("GO111MODULE_%v", go111module), func(t *testing.T) {
- WithOptions(EditorConfig{
- Env: map[string]string{"GO111MODULE": go111module},
- }).Run(t, files, func(t *testing.T, env *Env) {
- env.Await(
+ WithOptions(
+ EnvVars{"GO111MODULE": go111module},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
NoOutstandingWork(),
)
})
@@ -603,24 +600,20 @@ func main() {
`
WithOptions(
InGOPATH(),
- EditorConfig{
- Env: map[string]string{
- "GO111MODULE": "off",
- },
- },
+ EnvVars{"GO111MODULE": "off"},
).Run(t, collision, func(t *testing.T, env *Env) {
env.OpenFile("x/x.go")
- env.Await(
- env.DiagnosticAtRegexpWithMessage("x/x.go", `^`, "found packages main (main.go) and x (x.go)"),
- env.DiagnosticAtRegexpWithMessage("x/main.go", `^`, "found packages main (main.go) and x (x.go)"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("x/x.go", `^`), WithMessage("found packages main (main.go) and x (x.go)")),
+ Diagnostics(env.AtRegexp("x/main.go", `^`), WithMessage("found packages main (main.go) and x (x.go)")),
)
// We don't recover cleanly from the errors without good overlay support.
if testenv.Go1Point() >= 16 {
env.RegexpReplace("x/x.go", `package x`, `package main`)
- env.Await(OnceMet(
- env.DoneWithChange(),
- env.DiagnosticAtRegexpWithMessage("x/main.go", `fmt`, "undeclared name")))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("x/main.go", `fmt`)),
+ )
}
})
}
@@ -638,9 +631,6 @@ var ErrHelpWanted error
// Test for golang/go#38211.
func Test_Issue38211(t *testing.T) {
- t.Skipf("Skipping flaky test: https://golang.org/issue/44098")
-
- testenv.NeedsGo1Point(t, 14)
const ardanLabs = `
-- go.mod --
module mod.com
@@ -663,49 +653,44 @@ func main() {
env.OpenFile("go.mod")
env.OpenFile("main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`),
- ReadDiagnostics("main.go", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", `"github.com/ardanlabs/conf"`)),
+ ReadDiagnostics("main.go", &d),
)
env.ApplyQuickFixes("main.go", d.Diagnostics)
env.SaveBuffer("go.mod")
- env.Await(
- EmptyDiagnostics("main.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
// Comment out the line that depends on conf and expect a
// diagnostic and a fix to remove the import.
env.RegexpReplace("main.go", "_ = conf.ErrHelpWanted", "//_ = conf.ErrHelpWanted")
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", `"github.com/ardanlabs/conf"`)),
)
env.SaveBuffer("main.go")
// Expect a diagnostic and fix to remove the dependency in the go.mod.
- env.Await(EmptyDiagnostics("main.go"))
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("go.mod", "require github.com/ardanlabs/conf", "not used in this module"),
- ReadDiagnostics("go.mod", &d),
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ Diagnostics(env.AtRegexp("go.mod", "require github.com/ardanlabs/conf"), WithMessage("not used in this module")),
+ ReadDiagnostics("go.mod", &d),
)
env.ApplyQuickFixes("go.mod", d.Diagnostics)
env.SaveBuffer("go.mod")
- env.Await(
- EmptyDiagnostics("go.mod"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("go.mod")),
)
// Uncomment the lines and expect a new diagnostic for the import.
env.RegexpReplace("main.go", "//_ = conf.ErrHelpWanted", "_ = conf.ErrHelpWanted")
env.SaveBuffer("main.go")
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"github.com/ardanlabs/conf"`),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", `"github.com/ardanlabs/conf"`)),
)
})
}
// Test for golang/go#38207.
func TestNewModule_Issue38207(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const emptyFile = `
-- go.mod --
module mod.com
@@ -726,22 +711,19 @@ func main() {
`)
env.SaveBuffer("main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("main.go", `"github.com/ardanlabs/conf"`, "no required module"),
- ReadDiagnostics("main.go", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", `"github.com/ardanlabs/conf"`), WithMessage("no required module")),
+ ReadDiagnostics("main.go", &d),
)
env.ApplyQuickFixes("main.go", d.Diagnostics)
- env.Await(
- EmptyDiagnostics("main.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
})
}
// Test for golang/go#36960.
func TestNewFileBadImports_Issue36960(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const simplePackage = `
-- go.mod --
module mod.com
@@ -760,15 +742,12 @@ func _() {
env.OpenFile("a/a1.go")
env.CreateBuffer("a/a2.go", ``)
env.SaveBufferWithoutActions("a/a2.go")
- env.Await(
- OnceMet(
- env.DoneWithSave(),
- NoDiagnostics("a/a1.go"),
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a1.go")),
)
env.EditBuffer("a/a2.go", fake.NewEdit(0, 0, 0, 0, `package a`))
- env.Await(
- OnceMet(env.DoneWithChange(), NoDiagnostics("a/a1.go")),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a1.go")),
)
})
}
@@ -776,9 +755,6 @@ func _() {
// This test tries to replicate the workflow of a user creating a new x test.
// It also tests golang/go#39315.
func TestManuallyCreatingXTest(t *testing.T) {
- // Only for 1.15 because of golang/go#37971.
- testenv.NeedsGo1Point(t, 15)
-
// Create a package that already has a test variant (in-package test).
const testVariant = `
-- go.mod --
@@ -805,9 +781,9 @@ func TestHello(t *testing.T) {
// Open the file, triggering the workspace load.
// There are errors in the code to ensure all is working as expected.
env.OpenFile("hello/hello.go")
- env.Await(
- env.DiagnosticAtRegexp("hello/hello.go", "x"),
- env.DiagnosticAtRegexp("hello/hello_test.go", "x"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("hello/hello.go", "x")),
+ Diagnostics(env.AtRegexp("hello/hello_test.go", "x")),
)
// Create an empty file with the intention of making it an x test.
@@ -834,20 +810,18 @@ func TestHello(t *testing.T) {
`))
// Expect a diagnostic for the missing import. Save, which should
// trigger import organization. The diagnostic should clear.
- env.Await(
- env.DiagnosticAtRegexp("hello/hello_x_test.go", "hello.Hello"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("hello/hello_x_test.go", "hello.Hello")),
)
env.SaveBuffer("hello/hello_x_test.go")
- env.Await(
- EmptyDiagnostics("hello/hello_x_test.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("hello/hello_x_test.go")),
)
})
}
// Reproduce golang/go#40690.
func TestCreateOnlyXTest(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
-
const mod = `
-- go.mod --
module mod.com
@@ -869,15 +843,13 @@ func TestX(t *testing.T) {
var x int
}
`)
- env.Await(
- env.DiagnosticAtRegexp("foo/bar_test.go", "x"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("foo/bar_test.go", "x")),
)
})
}
func TestChangePackageName(t *testing.T) {
- t.Skip("This issue hasn't been fixed yet. See golang.org/issue/41061.")
-
const mod = `
-- go.mod --
module mod.com
@@ -890,17 +862,11 @@ package foo_
`
Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("foo/bar_test.go")
+ env.AfterChange()
env.RegexpReplace("foo/bar_test.go", "package foo_", "package foo_test")
- env.SaveBuffer("foo/bar_test.go")
- env.Await(
- OnceMet(
- env.DoneWithSave(),
- NoDiagnostics("foo/bar_test.go"),
- ),
- OnceMet(
- env.DoneWithSave(),
- NoDiagnostics("foo/foo.go"),
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("foo/bar_test.go")),
+ NoDiagnostics(ForFile("foo/foo.go")),
)
})
}
@@ -918,11 +884,9 @@ var _ = foo.Bar
`
Run(t, ws, func(t *testing.T, env *Env) {
env.OpenFile("_foo/x.go")
- env.Await(
- OnceMet(
- env.DoneWithOpen(),
- NoDiagnostics("_foo/x.go"),
- ))
+ env.AfterChange(
+ NoDiagnostics(ForFile("_foo/x.go")),
+ )
})
}
@@ -962,17 +926,15 @@ const C = a.A
// We should still get diagnostics for files that exist.
env.RegexpReplace("b/b.go", `a.A`, "a.Nonexistant")
- env.Await(env.DiagnosticAtRegexp("b/b.go", `Nonexistant`))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("b/b.go", `Nonexistant`)),
+ )
})
}
// This is a copy of the scenario_default/quickfix_empty_files.txt test from
// govim. Reproduces golang/go#39646.
func TestQuickFixEmptyFiles(t *testing.T) {
- t.Skip("too flaky: golang/go#48773")
-
- testenv.NeedsGo1Point(t, 15)
-
const mod = `
-- go.mod --
module mod.com
@@ -1012,7 +974,9 @@ func main() {
Run(t, mod, func(t *testing.T, env *Env) {
writeGoVim(env, "p/p.go", p)
writeGoVim(env, "main.go", main)
- env.Await(env.DiagnosticAtRegexp("main.go", "5"))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", "5")),
+ )
})
})
@@ -1041,16 +1005,16 @@ func TestDoIt(t *testing.T) {
p.DoIt(5)
}
`)
- env.Await(
- env.DiagnosticAtRegexp("main.go", "5"),
- env.DiagnosticAtRegexp("p/p_test.go", "5"),
- env.DiagnosticAtRegexp("p/x_test.go", "5"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", "5")),
+ Diagnostics(env.AtRegexp("p/p_test.go", "5")),
+ Diagnostics(env.AtRegexp("p/x_test.go", "5")),
)
env.RegexpReplace("p/p.go", "s string", "i int")
- env.Await(
- EmptyDiagnostics("main.go"),
- EmptyDiagnostics("p/p_test.go"),
- EmptyDiagnostics("p/x_test.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ NoDiagnostics(ForFile("p/p_test.go")),
+ NoDiagnostics(ForFile("p/x_test.go")),
)
})
})
@@ -1074,8 +1038,8 @@ func _() {
WorkspaceFolders(),
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "x"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "x")),
)
})
}
@@ -1098,8 +1062,6 @@ func Foo() {
}
`
Run(t, basic, func(t *testing.T, env *Env) {
- testenv.NeedsGo1Point(t, 16) // We can't recover cleanly from this case without good overlay support.
-
env.WriteWorkspaceFile("foo/foo_test.go", `package main
func main() {
@@ -1107,12 +1069,7 @@ func main() {
}`)
env.OpenFile("foo/foo_test.go")
env.RegexpReplace("foo/foo_test.go", `package main`, `package foo`)
- env.Await(
- OnceMet(
- env.DoneWithChange(),
- NoDiagnostics("foo/foo.go"),
- ),
- )
+ env.AfterChange(NoDiagnostics(ForFile("foo/foo.go")))
})
}
@@ -1129,16 +1086,9 @@ func main() {}
`
Run(t, basic, func(t *testing.T, env *Env) {
env.Editor.CreateBuffer(env.Ctx, "foo.go", `package main`)
- env.Await(
- env.DoneWithOpen(),
- )
+ env.AfterChange()
env.CloseBuffer("foo.go")
- env.Await(
- OnceMet(
- env.DoneWithClose(),
- NoLogMatching(protocol.Info, "packages=0"),
- ),
- )
+ env.AfterChange(NoLogMatching(protocol.Info, "packages=0"))
})
}
@@ -1178,16 +1128,14 @@ func main() {
var x int
}
`))
- env.Await(
- env.DiagnosticAtRegexp("main.go", "x"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", "x")),
)
})
}
// Reproduces golang/go#39763.
func TestInvalidPackageName(t *testing.T) {
- testenv.NeedsGo1Point(t, 15)
-
const pkgDefault = `
-- go.mod --
module mod.com
@@ -1200,8 +1148,11 @@ func main() {}
`
Run(t, pkgDefault, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- env.Await(
- env.DiagnosticAtRegexpWithMessage("main.go", "default", "expected 'IDENT'"),
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("main.go", "default"),
+ WithMessage("expected 'IDENT'"),
+ ),
)
})
}
@@ -1228,17 +1179,17 @@ func main() {
WorkspaceFolders("a"),
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("a/main.go")
- env.Await(
- env.DiagnosticAtRegexp("main.go", "x"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", "x")),
)
})
WithOptions(
WorkspaceFolders("a"),
- LimitWorkspaceScope(),
+ Settings{"expandWorkspaceToModule": false},
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("a/main.go")
- env.Await(
- NoDiagnostics("main.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
})
}
@@ -1265,19 +1216,19 @@ func main() {
`
WithOptions(
- EditorConfig{EnableStaticcheck: true},
+ Settings{"staticcheck": true},
).Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(OnceMet(
- env.DiagnosticAtRegexpWithMessage("main.go", `t{"msg"}`, "redundant type"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", `t{"msg"}`), WithMessage("redundant type")),
ReadDiagnostics("main.go", &d),
- ))
+ )
if tags := d.Diagnostics[0].Tags; len(tags) == 0 || tags[0] != protocol.Unnecessary {
t.Errorf("wanted Unnecessary tag on diagnostic, got %v", tags)
}
env.ApplyQuickFixes("main.go", d.Diagnostics)
- env.Await(EmptyDiagnostics("main.go"))
+ env.AfterChange(NoDiagnostics(ForFile("main.go")))
})
}
@@ -1300,23 +1251,23 @@ func main() {}
Run(t, dir, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.OpenFile("other.go")
- x := env.DiagnosticsFor("main.go")
- if x == nil {
- t.Fatalf("expected 1 diagnostic, got none")
- }
- if len(x.Diagnostics) != 1 {
- t.Fatalf("main.go, got %d diagnostics, expected 1", len(x.Diagnostics))
+ var mainDiags, otherDiags protocol.PublishDiagnosticsParams
+ env.AfterChange(
+ ReadDiagnostics("main.go", &mainDiags),
+ ReadDiagnostics("other.go", &otherDiags),
+ )
+ if len(mainDiags.Diagnostics) != 1 {
+ t.Fatalf("main.go, got %d diagnostics, expected 1", len(mainDiags.Diagnostics))
}
- keep := x.Diagnostics[0]
- y := env.DiagnosticsFor("other.go")
- if len(y.Diagnostics) != 1 {
- t.Fatalf("other.go: got %d diagnostics, expected 1", len(y.Diagnostics))
+ keep := mainDiags.Diagnostics[0]
+ if len(otherDiags.Diagnostics) != 1 {
+ t.Fatalf("other.go: got %d diagnostics, expected 1", len(otherDiags.Diagnostics))
}
- if len(y.Diagnostics[0].RelatedInformation) != 1 {
- t.Fatalf("got %d RelatedInformations, expected 1", len(y.Diagnostics[0].RelatedInformation))
+ if len(otherDiags.Diagnostics[0].RelatedInformation) != 1 {
+ t.Fatalf("got %d RelatedInformations, expected 1", len(otherDiags.Diagnostics[0].RelatedInformation))
}
// check that the RelatedInformation matches the error from main.go
- c := y.Diagnostics[0].RelatedInformation[0]
+ c := otherDiags.Diagnostics[0].RelatedInformation[0]
if c.Location.Range != keep.Range {
t.Errorf("locations don't match. Got %v expected %v", c.Location.Range, keep.Range)
}
@@ -1324,9 +1275,6 @@ func main() {}
}
func TestNotifyOrphanedFiles(t *testing.T) {
- // Need GO111MODULE=on for this test to work with Go 1.12.
- testenv.NeedsGo1Point(t, 13)
-
const files = `
-- go.mod --
module mod.com
@@ -1338,8 +1286,8 @@ package a
func main() {
var x int
}
--- a/a_ignore.go --
-// +build ignore
+-- a/a_exclude.go --
+// +build exclude
package a
@@ -1349,17 +1297,21 @@ func _() {
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "x"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "x")),
)
- env.OpenFile("a/a_ignore.go")
- env.Await(
- DiagnosticAt("a/a_ignore.go", 2, 8),
+ env.OpenFile("a/a_exclude.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a_exclude.go", "package (a)")),
)
})
}
func TestEnableAllExperiments(t *testing.T) {
+ // Before the oldest supported Go version, gopls sends a warning to upgrade
+ // Go, which fails the expectation below.
+ testenv.NeedsGo1Point(t, lsp.OldestSupportedGoVersion())
+
const mod = `
-- go.mod --
module mod.com
@@ -1375,12 +1327,13 @@ func b(c bytes.Buffer) {
}
`
WithOptions(
- EditorConfig{
- AllExperiments: true,
- },
+ Settings{"allExperiments": true},
).Run(t, mod, func(t *testing.T, env *Env) {
// Confirm that the setting doesn't cause any warnings.
- env.Await(NoShowMessage())
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ NoShownMessage(""), // empty substring to match any message
+ )
})
}
@@ -1430,11 +1383,9 @@ func main() {
}
`
Run(t, mod, func(t *testing.T, env *Env) {
- env.Await(
- OnceMet(
- InitialWorkspaceLoad,
- NoDiagnosticWithMessage("", "illegal character U+0023 '#'"),
- ),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ NoDiagnostics(WithMessage("illegal character U+0023 '#'")),
)
})
}
@@ -1444,8 +1395,6 @@ func main() {
// have no more complaints about it.
// https://github.com/golang/go/issues/41061
func TestRenamePackage(t *testing.T) {
- testenv.NeedsGo1Point(t, 16)
-
const proxy = `
-- example.com@v1.2.3/go.mod --
module example.com
@@ -1489,11 +1438,7 @@ package foo_
WithOptions(
ProxyFiles(proxy),
InGOPATH(),
- EditorConfig{
- Env: map[string]string{
- "GO111MODULE": "off",
- },
- },
+ EnvVars{"GO111MODULE": "off"},
).Run(t, contents, func(t *testing.T, env *Env) {
// Simulate typing character by character.
env.OpenFile("foo/foo_test.go")
@@ -1501,10 +1446,8 @@ package foo_
env.RegexpReplace("foo/foo_test.go", "_", "_t")
env.Await(env.DoneWithChange())
env.RegexpReplace("foo/foo_test.go", "_t", "_test")
- env.Await(env.DoneWithChange())
-
- env.Await(
- EmptyDiagnostics("foo/foo_test.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("foo/foo_test.go")),
NoOutstandingWork(),
)
})
@@ -1513,9 +1456,6 @@ package foo_
// TestProgressBarErrors confirms that critical workspace load errors are shown
// and updated via progress reports.
func TestProgressBarErrors(t *testing.T) {
- t.Skip("too flaky: golang/go#46930")
- testenv.NeedsGo1Point(t, 14)
-
const pkg = `
-- go.mod --
modul mod.com
@@ -1526,7 +1466,7 @@ package main
`
Run(t, pkg, func(t *testing.T, env *Env) {
env.OpenFile("go.mod")
- env.Await(
+ env.AfterChange(
OutstandingWork(lsp.WorkspaceLoadFailure, "unknown directive"),
)
env.EditBuffer("go.mod", fake.NewEdit(0, 0, 3, 0, `module mod.com
@@ -1536,20 +1476,18 @@ go 1.hello
// As of golang/go#42529, go.mod changes do not reload the workspace until
// they are saved.
env.SaveBufferWithoutActions("go.mod")
- env.Await(
+ env.AfterChange(
OutstandingWork(lsp.WorkspaceLoadFailure, "invalid go version"),
)
env.RegexpReplace("go.mod", "go 1.hello", "go 1.12")
env.SaveBufferWithoutActions("go.mod")
- env.Await(
+ env.AfterChange(
NoOutstandingWork(),
)
})
}
func TestDeleteDirectory(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const mod = `
-- bob/bob.go --
package bob
@@ -1559,7 +1497,7 @@ func Hello() {
}
-- go.mod --
module mod.com
--- main.go --
+-- cmd/main.go --
package main
import "mod.com/bob"
@@ -1569,11 +1507,15 @@ func main() {
}
`
Run(t, mod, func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ FileWatchMatching("bob"),
+ )
env.RemoveWorkspaceFile("bob")
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`),
- EmptyDiagnostics("bob/bob.go"),
- RegistrationMatching("didChangeWatchedFiles"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("cmd/main.go", `"mod.com/bob"`)),
+ NoDiagnostics(ForFile("bob/bob.go")),
+ NoFileWatchMatching("bob"),
)
})
}
@@ -1612,10 +1554,11 @@ package c
import _ "mod.com/triple/a"
`
Run(t, mod, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexpWithMessage("self/self.go", `_ "mod.com/self"`, "import cycle not allowed"),
- env.DiagnosticAtRegexpWithMessage("double/a/a.go", `_ "mod.com/double/b"`, "import cycle not allowed"),
- env.DiagnosticAtRegexpWithMessage("triple/a/a.go", `_ "mod.com/triple/b"`, "import cycle not allowed"),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("self/self.go", `_ "mod.com/self"`), WithMessage("import cycle not allowed")),
+ Diagnostics(env.AtRegexp("double/a/a.go", `_ "mod.com/double/b"`), WithMessage("import cycle not allowed")),
+ Diagnostics(env.AtRegexp("triple/a/a.go", `_ "mod.com/triple/b"`), WithMessage("import cycle not allowed")),
)
})
}
@@ -1646,19 +1589,26 @@ const B = a.B
Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
env.OpenFile("b/b.go")
- env.Await(env.DiagnosticAtRegexp("a/a.go", `"mod.test/b"`))
+ env.AfterChange(
+ // The Go command sometimes tells us about only one of the import cycle
+ // errors below. For robustness of this test, succeed if we get either.
+ //
+ // TODO(golang/go#52904): we should get *both* of these errors.
+ AnyOf(
+ Diagnostics(env.AtRegexp("a/a.go", `"mod.test/b"`), WithMessage("import cycle")),
+ Diagnostics(env.AtRegexp("b/b.go", `"mod.test/a"`), WithMessage("import cycle")),
+ ),
+ )
env.RegexpReplace("b/b.go", `const B = a\.B`, "")
env.SaveBuffer("b/b.go")
- env.Await(
- EmptyOrNoDiagnostics("a/a.go"),
- EmptyOrNoDiagnostics("b/b.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
+ NoDiagnostics(ForFile("b/b.go")),
)
})
}
func TestBadImport(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const mod = `
-- go.mod --
module mod.com
@@ -1673,80 +1623,21 @@ import (
`
t.Run("module", func(t *testing.T) {
Run(t, mod, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexpWithMessage("main.go", `"nosuchpkg"`, `could not import nosuchpkg (no required module provides package "nosuchpkg"`),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `"nosuchpkg"`), WithMessage(`could not import nosuchpkg (no required module provides package "nosuchpkg"`)),
)
})
})
t.Run("GOPATH", func(t *testing.T) {
WithOptions(
InGOPATH(),
- EditorConfig{
- Env: map[string]string{"GO111MODULE": "off"},
- },
- Modes(Singleton),
+ EnvVars{"GO111MODULE": "off"},
+ Modes(Default),
).Run(t, mod, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexpWithMessage("main.go", `"nosuchpkg"`, `cannot find package "nosuchpkg" in any of`),
- )
- })
- })
-}
-
-func TestMultipleModules_Warning(t *testing.T) {
- const modules = `
--- a/go.mod --
-module a.com
-
-go 1.12
--- a/a.go --
-package a
--- b/go.mod --
-module b.com
-
-go 1.12
--- b/b.go --
-package b
-`
- for _, go111module := range []string{"on", "auto"} {
- t.Run("GO111MODULE="+go111module, func(t *testing.T) {
- WithOptions(
- Modes(Singleton),
- EditorConfig{
- Env: map[string]string{
- "GO111MODULE": go111module,
- },
- },
- ).Run(t, modules, func(t *testing.T, env *Env) {
- env.OpenFile("a/a.go")
- env.OpenFile("b/go.mod")
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "package a"),
- env.DiagnosticAtRegexp("b/go.mod", "module b.com"),
- OutstandingWork(lsp.WorkspaceLoadFailure, "gopls requires a module at the root of your workspace."),
- )
- })
- })
- }
-
- // Expect no warning if GO111MODULE=auto in a directory in GOPATH.
- t.Run("GOPATH_GO111MODULE_auto", func(t *testing.T) {
- WithOptions(
- Modes(Singleton),
- EditorConfig{
- Env: map[string]string{
- "GO111MODULE": "auto",
- },
- },
- InGOPATH(),
- ).Run(t, modules, func(t *testing.T, env *Env) {
- env.OpenFile("a/a.go")
- env.Await(
- OnceMet(
- env.DoneWithOpen(),
- NoDiagnostics("a/a.go"),
- ),
- NoOutstandingWork(),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `"nosuchpkg"`), WithMessage(`cannot find package "nosuchpkg"`)),
)
})
})
@@ -1798,24 +1689,14 @@ func helloHelper() {}
`
WithOptions(
ProxyFiles(proxy),
- Modes(Singleton),
+ Modes(Default),
).Run(t, nested, func(t *testing.T, env *Env) {
// Expect a diagnostic in a nested module.
env.OpenFile("nested/hello/hello.go")
- didOpen := env.DoneWithOpen()
- env.Await(
- OnceMet(
- didOpen,
- env.DiagnosticAtRegexp("nested/hello/hello.go", "helloHelper"),
- ),
- OnceMet(
- didOpen,
- env.DiagnosticAtRegexpWithMessage("nested/hello/hello.go", "package hello", "nested module"),
- ),
- OnceMet(
- didOpen,
- OutstandingWork(lsp.WorkspaceLoadFailure, "nested module"),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("nested/hello/hello.go", "helloHelper")),
+ Diagnostics(env.AtRegexp("nested/hello/hello.go", "package hello"), WithMessage("nested module")),
+ OutstandingWork(lsp.WorkspaceLoadFailure, "nested module"),
)
})
}
@@ -1830,12 +1711,7 @@ func main() {}
Run(t, nomod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.RegexpReplace("main.go", "{}", "{ var x int; }") // simulate typing
- env.Await(
- OnceMet(
- env.DoneWithChange(),
- NoLogMatching(protocol.Info, "packages=1"),
- ),
- )
+ env.AfterChange(NoLogMatching(protocol.Info, "packages=1"))
})
}
@@ -1859,9 +1735,9 @@ var Bar = Foo
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("foo.go")
- env.Await(env.DiagnosticAtRegexpWithMessage("bar.go", `Foo`, "undeclared name"))
+ env.AfterChange(Diagnostics(env.AtRegexp("bar.go", `Foo`)))
env.RegexpReplace("foo.go", `\+build`, "")
- env.Await(EmptyDiagnostics("bar.go"))
+ env.AfterChange(NoDiagnostics(ForFile("bar.go")))
})
}
@@ -1889,17 +1765,15 @@ package main
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.OpenFile("other.go")
- env.Await(
- env.DiagnosticAtRegexpWithMessage("main.go", "asdf", "undeclared name"),
- env.DiagnosticAtRegexpWithMessage("main.go", "fdas", "undeclared name"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", "asdf")),
+ Diagnostics(env.AtRegexp("main.go", "fdas")),
)
env.SetBufferContent("other.go", "package main\n\nasdf")
// The new diagnostic in other.go should not suppress diagnostics in main.go.
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("other.go", "asdf", "expected declaration"),
- env.DiagnosticAtRegexpWithMessage("main.go", "asdf", "undeclared name"),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("other.go", "asdf"), WithMessage("expected declaration")),
+ Diagnostics(env.AtRegexp("main.go", "asdf")),
)
})
}
@@ -1918,42 +1792,8 @@ package main
env.Await(env.DoneWithOpen())
env.RegexpReplace("go.mod", "module", "modul")
env.SaveBufferWithoutActions("go.mod")
- env.Await(
- OnceMet(
- env.DoneWithSave(),
- NoLogMatching(protocol.Error, "initial workspace load failed"),
- ),
- )
- })
-}
-
-// Tests golang/go#45075: A panic in fillreturns broke diagnostics.
-// Expect an error log indicating that fillreturns panicked, as well type
-// errors for the broken code.
-func TestFillReturnsPanic(t *testing.T) {
- // At tip, the panic no longer reproduces.
- testenv.SkipAfterGo1Point(t, 16)
-
- const files = `
--- go.mod --
-module mod.com
-
-go 1.15
--- main.go --
-package main
-
-func foo() int {
- return x, nil
-}
-`
- Run(t, files, func(t *testing.T, env *Env) {
- env.OpenFile("main.go")
- env.Await(
- OnceMet(
- env.DoneWithOpen(),
- LogMatching(protocol.Error, `.*analysis fillreturns.*panicked.*`, 1, true),
- env.DiagnosticAtRegexpWithMessage("main.go", `return x`, "wrong number of return values"),
- ),
+ env.AfterChange(
+ NoLogMatching(protocol.Error, "initial workspace load failed"),
)
})
}
@@ -1973,168 +1813,8 @@ func main() {}
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("go.mod")
- env.Await(
- OnceMet(
- env.DoneWithOpen(),
- LogMatching(protocol.Info, `.*query=\[builtin mod.com/...\].*`, 1, false),
- ),
- )
- })
-}
-
-func TestUseOfInvalidMetadata(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
-
- const mod = `
--- go.mod --
-module mod.com
-
-go 1.12
--- main.go --
-package main
-
-import (
- "mod.com/a"
- //"os"
-)
-
-func _() {
- a.Hello()
- os.Getenv("")
- //var x int
-}
--- a/a.go --
-package a
-
-func Hello() {}
-`
- WithOptions(
- EditorConfig{
- ExperimentalUseInvalidMetadata: true,
- },
- Modes(Singleton),
- ).Run(t, mod, func(t *testing.T, env *Env) {
- env.OpenFile("go.mod")
- env.RegexpReplace("go.mod", "module mod.com", "modul mod.com") // break the go.mod file
- env.SaveBufferWithoutActions("go.mod")
- env.Await(
- env.DiagnosticAtRegexp("go.mod", "modul"),
- )
- // Confirm that language features work with invalid metadata.
- env.OpenFile("main.go")
- file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", "Hello"))
- wantPos := env.RegexpSearch("a/a.go", "Hello")
- if file != "a/a.go" && pos != wantPos {
- t.Fatalf("expected a/a.go:%s, got %s:%s", wantPos, file, pos)
- }
- // Confirm that new diagnostics appear with invalid metadata by adding
- // an unused variable to the body of the function.
- env.RegexpReplace("main.go", "//var x int", "var x int")
- env.Await(
- env.DiagnosticAtRegexp("main.go", "x"),
- )
- // Add an import and confirm that we get a diagnostic for it, since the
- // metadata will not have been updated.
- env.RegexpReplace("main.go", "//\"os\"", "\"os\"")
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"os"`),
- )
- // Fix the go.mod file and expect the diagnostic to resolve itself.
- env.RegexpReplace("go.mod", "modul mod.com", "module mod.com")
- env.SaveBuffer("go.mod")
- env.Await(
- env.DiagnosticAtRegexp("main.go", "x"),
- env.NoDiagnosticAtRegexp("main.go", `"os"`),
- EmptyDiagnostics("go.mod"),
- )
- })
-}
-
-func TestReloadInvalidMetadata(t *testing.T) {
- // We only use invalid metadata for Go versions > 1.12.
- testenv.NeedsGo1Point(t, 13)
-
- const mod = `
--- go.mod --
-module mod.com
-
-go 1.12
--- main.go --
-package main
-
-func _() {}
-`
- WithOptions(
- EditorConfig{
- ExperimentalUseInvalidMetadata: true,
- },
- // ExperimentalWorkspaceModule has a different failure mode for this
- // case.
- Modes(Singleton),
- ).Run(t, mod, func(t *testing.T, env *Env) {
- env.Await(
- OnceMet(
- InitialWorkspaceLoad,
- CompletedWork("Load", 1, false),
- ),
- )
-
- // Break the go.mod file on disk, expecting a reload.
- env.WriteWorkspaceFile("go.mod", `modul mod.com
-
-go 1.12
-`)
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- env.DiagnosticAtRegexp("go.mod", "modul"),
- CompletedWork("Load", 1, false),
- ),
- )
-
- env.OpenFile("main.go")
- env.Await(env.DoneWithOpen())
- // The first edit after the go.mod file invalidation should cause a reload.
- // Any subsequent simple edits should not.
- content := `package main
-
-func main() {
- _ = 1
-}
-`
- env.EditBuffer("main.go", fake.NewEdit(0, 0, 3, 0, content))
- env.Await(
- OnceMet(
- env.DoneWithChange(),
- CompletedWork("Load", 2, false),
- NoLogMatching(protocol.Error, "error loading file"),
- ),
- )
- env.RegexpReplace("main.go", "_ = 1", "_ = 2")
- env.Await(
- OnceMet(
- env.DoneWithChange(),
- CompletedWork("Load", 2, false),
- NoLogMatching(protocol.Error, "error loading file"),
- ),
- )
- // Add an import to the main.go file and confirm that it does get
- // reloaded, but the reload fails, so we see a diagnostic on the new
- // "fmt" import.
- env.EditBuffer("main.go", fake.NewEdit(0, 0, 5, 0, `package main
-
-import "fmt"
-
-func main() {
- fmt.Println("")
-}
-`))
- env.Await(
- OnceMet(
- env.DoneWithChange(),
- env.DiagnosticAtRegexp("main.go", `"fmt"`),
- CompletedWork("Load", 3, false),
- ),
+ env.AfterChange(
+ LogMatching(protocol.Info, `.*query=\[builtin mod.com/...\].*`, 1, false),
)
})
}
@@ -2152,9 +1832,14 @@ package main
const C = 0b10
`
Run(t, files, func(t *testing.T, env *Env) {
- env.Await(env.DiagnosticAtRegexpWithMessage("main.go", `0b10`, "go1.13 or later"))
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `0b10`), WithMessage("go1.13 or later")),
+ )
env.WriteWorkspaceFile("go.mod", "module mod.com \n\ngo 1.13\n")
- env.Await(EmptyDiagnostics("main.go"))
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ )
})
}
@@ -2174,11 +1859,10 @@ func F[T C](_ T) {
Run(t, files, func(t *testing.T, env *Env) {
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("main.go", `C`, "undeclared name"),
- ReadDiagnostics("main.go", &d),
- ),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `C`)),
+ ReadDiagnostics("main.go", &d),
)
if fixes := env.GetQuickFixes("main.go", d.Diagnostics); len(fixes) != 0 {
t.Errorf("got quick fixes %v, wanted none", fixes)
@@ -2201,17 +1885,15 @@ func F[T any](_ T) {
`
Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file.
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("main.go", `T any`, "type parameters require"),
- ReadDiagnostics("main.go", &d),
- ),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `T any`), WithMessage("type parameter")),
+ ReadDiagnostics("main.go", &d),
)
env.ApplyQuickFixes("main.go", d.Diagnostics)
-
- env.Await(
- EmptyDiagnostics("main.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
})
}
@@ -2235,17 +1917,132 @@ func F[T any](_ T) {
`
Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file.
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("main.go", `T any`, "type parameters require"),
- ReadDiagnostics("main.go", &d),
- ),
+
+ // We should have a diagnostic because generics are not supported at 1.16.
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `T any`), WithMessage("type parameter")),
+ ReadDiagnostics("main.go", &d),
)
+ // This diagnostic should have a quick fix to edit the go version.
env.ApplyQuickFixes("main.go", d.Diagnostics)
- env.Await(
- EmptyDiagnostics("main.go"),
+ // Once the edit is applied, the problematic diagnostics should be
+ // resolved.
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
})
}
+
+// This test demonstrates that analysis facts are correctly propagated
+// across packages.
+func TestInterpackageAnalysis(t *testing.T) {
+ const src = `
+-- go.mod --
+module example.com
+-- a/a.go --
+package a
+
+import "example.com/b"
+
+func _() {
+ new(b.B).Printf("%d", "s") // printf error
+}
+
+-- b/b.go --
+package b
+
+import "example.com/c"
+
+type B struct{}
+
+func (B) Printf(format string, args ...interface{}) {
+ c.MyPrintf(format, args...)
+}
+
+-- c/c.go --
+package c
+
+import "fmt"
+
+func MyPrintf(format string, args ...interface{}) {
+ fmt.Printf(format, args...)
+}
+`
+ Run(t, src, func(t *testing.T, env *Env) {
+ env.OpenFile("a/a.go")
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("a/a.go", "new.*Printf"),
+ WithMessage("format %d has arg \"s\" of wrong type string"),
+ ),
+ )
+ })
+}
+
+// This test ensures that only Analyzers with RunDespiteErrors=true
+// are invoked on a package that would not compile, even if the errors
+// are distant and localized.
+func TestErrorsThatPreventAnalysis(t *testing.T) {
+ const src = `
+-- go.mod --
+module example.com
+-- a/a.go --
+package a
+
+import "fmt"
+import "sync"
+import _ "example.com/b"
+
+func _() {
+ // The copylocks analyzer (RunDespiteErrors, FactTypes={}) does run.
+ var mu sync.Mutex
+ mu2 := mu // copylocks error, reported
+ _ = &mu2
+
+ // The printf analyzer (!RunDespiteErrors, FactTypes!={}) does not run:
+ // (c, printf) failed because of type error in c
+ // (b, printf) and (a, printf) do not run because of failed prerequisites.
+ fmt.Printf("%d", "s") // printf error, unreported
+
+ // The bools analyzer (!RunDespiteErrors, FactTypes={}) does not run:
+ var cond bool
+ _ = cond != true && cond != true // bools error, unreported
+}
+
+-- b/b.go --
+package b
+
+import _ "example.com/c"
+
+-- c/c.go --
+package c
+
+var _ = 1 / "" // type error
+
+`
+ Run(t, src, func(t *testing.T, env *Env) {
+ var diags protocol.PublishDiagnosticsParams
+ env.OpenFile("a/a.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "mu2 := (mu)"), WithMessage("assignment copies lock value")),
+ ReadDiagnostics("a/a.go", &diags))
+
+ // Assert that there were no other diagnostics.
+ // In particular:
+ // - "fmt.Printf" does not trigger a [printf] finding;
+ // - "cond != true" does not trigger a [bools] finding.
+ //
+ // We use this check in preference to NoDiagnosticAtRegexp
+ // as it is robust in case of minor mistakes in the position
+ // regexp, and because it reports unexpected diagnostics.
+ if got, want := len(diags.Diagnostics), 1; got != want {
+ t.Errorf("got %d diagnostics in a/a.go, want %d:", got, want)
+ for i, diag := range diags.Diagnostics {
+ t.Logf("Diagnostics[%d] = %+v", i, diag)
+ }
+ }
+ })
+}
diff --git a/gopls/internal/regtest/diagnostics/golist_test.go b/gopls/internal/regtest/diagnostics/golist_test.go
new file mode 100644
index 000000000..85b35be02
--- /dev/null
+++ b/gopls/internal/regtest/diagnostics/golist_test.go
@@ -0,0 +1,71 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diagnostics
+
+import (
+ "testing"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestGoListErrors(t *testing.T) {
+ testenv.NeedsTool(t, "cgo")
+
+ const src = `
+-- go.mod --
+module a.com
+
+go 1.18
+-- a/a.go --
+package a
+
+import
+-- c/c.go --
+package c
+
+/*
+int fortythree() { return 42; }
+*/
+import "C"
+
+func Foo() {
+ print(C.fortytwo())
+}
+-- p/p.go --
+package p
+
+import "a.com/q"
+
+const P = q.Q + 1
+-- q/q.go --
+package q
+
+import "a.com/p"
+
+const Q = p.P + 1
+`
+
+ Run(t, src, func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(
+ env.AtRegexp("a/a.go", "import\n()"),
+ FromSource(string(source.ParseError)),
+ ),
+ Diagnostics(
+ AtPosition("c/c.go", 0, 0),
+ FromSource(string(source.ListError)),
+ WithMessage("may indicate failure to perform cgo processing"),
+ ),
+ Diagnostics(
+ env.AtRegexp("p/p.go", `"a.com/q"`),
+ FromSource(string(source.ListError)),
+ WithMessage("import cycle not allowed"),
+ ),
+ )
+ })
+}
diff --git a/gopls/internal/regtest/diagnostics/invalidation_test.go b/gopls/internal/regtest/diagnostics/invalidation_test.go
new file mode 100644
index 000000000..f5097f32d
--- /dev/null
+++ b/gopls/internal/regtest/diagnostics/invalidation_test.go
@@ -0,0 +1,111 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diagnostics
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+// Test for golang/go#50267: diagnostics should be re-sent after a file is
+// opened.
+func TestDiagnosticsAreResentAfterCloseOrOpen(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.16
+-- main.go --
+package main
+
+func _() {
+ x := 2
+}
+`
+ Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file.
+ env.OpenFile("main.go")
+ var afterOpen protocol.PublishDiagnosticsParams
+ env.AfterChange(
+ ReadDiagnostics("main.go", &afterOpen),
+ )
+ env.CloseBuffer("main.go")
+ var afterClose protocol.PublishDiagnosticsParams
+ env.AfterChange(
+ ReadDiagnostics("main.go", &afterClose),
+ )
+ if afterOpen.Version == afterClose.Version {
+ t.Errorf("publishDiagnostics: got the same version after closing (%d) as after opening", afterOpen.Version)
+ }
+ env.OpenFile("main.go")
+ var afterReopen protocol.PublishDiagnosticsParams
+ env.AfterChange(
+ ReadDiagnostics("main.go", &afterReopen),
+ )
+ if afterReopen.Version == afterClose.Version {
+ t.Errorf("pubslishDiagnostics: got the same version after reopening (%d) as after closing", afterClose.Version)
+ }
+ })
+}
+
+// Test for the "chattyDiagnostics" setting: we should get re-published
+// diagnostics after every file change, even if diagnostics did not change.
+func TestChattyDiagnostics(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.16
+-- main.go --
+package main
+
+func _() {
+ x := 2
+}
+
+// Irrelevant comment #0
+`
+
+ WithOptions(
+ Settings{
+ "chattyDiagnostics": true,
+ },
+ ).Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file.
+
+ env.OpenFile("main.go")
+ var d protocol.PublishDiagnosticsParams
+ env.AfterChange(
+ ReadDiagnostics("main.go", &d),
+ )
+
+ if len(d.Diagnostics) != 1 {
+ t.Fatalf("len(Diagnostics) = %d, want 1", len(d.Diagnostics))
+ }
+ msg := d.Diagnostics[0].Message
+
+ for i := 0; i < 5; i++ {
+ before := d.Version
+ env.RegexpReplace("main.go", "Irrelevant comment #.", fmt.Sprintf("Irrelevant comment #%d", i))
+ env.AfterChange(
+ ReadDiagnostics("main.go", &d),
+ )
+
+ if d.Version == before {
+ t.Errorf("after change, got version %d, want new version", d.Version)
+ }
+
+ // As a sanity check, make sure we have the same diagnostic.
+ if len(d.Diagnostics) != 1 {
+ t.Fatalf("len(Diagnostics) = %d, want 1", len(d.Diagnostics))
+ }
+ newMsg := d.Diagnostics[0].Message
+ if newMsg != msg {
+ t.Errorf("after change, got message %q, want %q", newMsg, msg)
+ }
+ }
+ })
+}
diff --git a/gopls/internal/regtest/diagnostics/undeclared_test.go b/gopls/internal/regtest/diagnostics/undeclared_test.go
index 79f7d4267..ac5f598cc 100644
--- a/gopls/internal/regtest/diagnostics/undeclared_test.go
+++ b/gopls/internal/regtest/diagnostics/undeclared_test.go
@@ -7,8 +7,8 @@ package diagnostics
import (
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestUndeclaredDiagnostics(t *testing.T) {
@@ -44,23 +44,29 @@ func _() int {
// 'x' is undeclared, but still necessary.
env.OpenFile("a/a.go")
- env.Await(env.DiagnosticAtRegexp("a/a.go", "x"))
- diags := env.DiagnosticsFor("a/a.go")
- if got := len(diags.Diagnostics); got != 1 {
+ var adiags protocol.PublishDiagnosticsParams
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "x")),
+ ReadDiagnostics("a/a.go", &adiags),
+ )
+ if got := len(adiags.Diagnostics); got != 1 {
t.Errorf("len(Diagnostics) = %d, want 1", got)
}
- if diag := diags.Diagnostics[0]; isUnnecessary(diag) {
+ if diag := adiags.Diagnostics[0]; isUnnecessary(diag) {
t.Errorf("%v tagged unnecessary, want necessary", diag)
}
// 'y = y' is pointless, and should be detected as unnecessary.
env.OpenFile("b/b.go")
- env.Await(env.DiagnosticAtRegexp("b/b.go", "y = y"))
- diags = env.DiagnosticsFor("b/b.go")
- if got := len(diags.Diagnostics); got != 1 {
+ var bdiags protocol.PublishDiagnosticsParams
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("b/b.go", "y = y")),
+ ReadDiagnostics("b/b.go", &bdiags),
+ )
+ if got := len(bdiags.Diagnostics); got != 1 {
t.Errorf("len(Diagnostics) = %d, want 1", got)
}
- if diag := diags.Diagnostics[0]; !isUnnecessary(diag) {
+ if diag := bdiags.Diagnostics[0]; !isUnnecessary(diag) {
t.Errorf("%v tagged necessary, want unnecessary", diag)
}
})
diff --git a/gopls/internal/regtest/inlayhints/inlayhints_test.go b/gopls/internal/regtest/inlayhints/inlayhints_test.go
new file mode 100644
index 000000000..d4caabe79
--- /dev/null
+++ b/gopls/internal/regtest/inlayhints/inlayhints_test.go
@@ -0,0 +1,69 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package inlayhint
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/hooks"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/internal/bug"
+)
+
+func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
+ Main(m, hooks.Options)
+}
+
+func TestEnablingInlayHints(t *testing.T) {
+ const workspace = `
+-- go.mod --
+module inlayHint.test
+go 1.12
+-- lib.go --
+package lib
+type Number int
+const (
+ Zero Number = iota
+ One
+ Two
+)
+`
+ tests := []struct {
+ label string
+ enabled map[string]bool
+ wantInlayHint bool
+ }{
+ {
+ label: "default",
+ wantInlayHint: false,
+ },
+ {
+ label: "enable const",
+ enabled: map[string]bool{source.ConstantValues: true},
+ wantInlayHint: true,
+ },
+ {
+ label: "enable parameter names",
+ enabled: map[string]bool{source.ParameterNames: true},
+ wantInlayHint: false,
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.label, func(t *testing.T) {
+ WithOptions(
+ Settings{
+ "hints": test.enabled,
+ },
+ ).Run(t, workspace, func(t *testing.T, env *Env) {
+ env.OpenFile("lib.go")
+ lens := env.InlayHints("lib.go")
+ if gotInlayHint := len(lens) > 0; gotInlayHint != test.wantInlayHint {
+ t.Errorf("got inlayHint: %t, want %t", gotInlayHint, test.wantInlayHint)
+ }
+ })
+ })
+ }
+}
diff --git a/gopls/internal/regtest/marker/marker_test.go b/gopls/internal/regtest/marker/marker_test.go
new file mode 100644
index 000000000..ac051a555
--- /dev/null
+++ b/gopls/internal/regtest/marker/marker_test.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package marker
+
+import (
+ "testing"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+// Note: we use a separate package for the marker tests so that we can easily
+// compare their performance to the existing marker tests in ./internal/lsp.
+
+// TestMarkers runs the marker tests from the testdata directory.
+//
+// See RunMarkerTests for details on how marker tests work.
+func TestMarkers(t *testing.T) {
+ RunMarkerTests(t, "testdata")
+}
diff --git a/gopls/internal/regtest/marker/testdata/definition/embed.txt b/gopls/internal/regtest/marker/testdata/definition/embed.txt
new file mode 100644
index 000000000..e28c7fed6
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/definition/embed.txt
@@ -0,0 +1,254 @@
+This test checks definition and hover operations over embedded fields and methods.
+
+-- go.mod --
+module mod.com
+
+go 1.18
+
+-- a/a.go --
+package a
+
+type A string //@loc(AString, "A")
+
+func (_ A) Hi() {} //@loc(AHi, "Hi")
+
+type S struct {
+ Field int //@loc(SField, "Field")
+ R // embed a struct
+ H // embed an interface
+}
+
+type R struct {
+ Field2 int //@loc(RField2, "Field2")
+}
+
+func (_ R) Hey() {} //@loc(RHey, "Hey")
+
+type H interface { //@loc(H, "H")
+ Goodbye() //@loc(HGoodbye, "Goodbye")
+}
+
+type I interface { //@loc(I, "I")
+ B() //@loc(IB, "B")
+ J
+}
+
+type J interface { //@loc(J, "J")
+ Hello() //@loc(JHello, "Hello")
+}
+
+-- b/b.go --
+package b
+
+import "mod.com/a" //@loc(AImport, re"\".*\"")
+
+type embed struct {
+ F int //@loc(F, "F")
+}
+
+func (embed) M() //@loc(M, "M")
+
+type Embed struct {
+ embed
+ *a.A
+ a.I
+ a.S
+}
+
+func _() {
+ e := Embed{}
+ e.Hi() //@def("Hi", AHi),hover("Hi", "Hi", AHi)
+ e.B() //@def("B", IB),hover("B", "B", IB)
+ _ = e.Field //@def("Field", SField),hover("Field", "Field", SField)
+ _ = e.Field2 //@def("Field2", RField2),hover("Field2", "Field2", RField2)
+ e.Hello() //@def("Hello", JHello),hover("Hello", "Hello",JHello)
+ e.Hey() //@def("Hey", RHey),hover("Hey", "Hey", RHey)
+ e.Goodbye() //@def("Goodbye", HGoodbye),hover("Goodbye", "Goodbye", HGoodbye)
+ e.M() //@def("M", M),hover("M", "M", M)
+ _ = e.F //@def("F", F),hover("F", "F", F)
+}
+
+type aAlias = a.A //@loc(aAlias, "aAlias")
+
+type S1 struct { //@loc(S1, "S1")
+ F1 int //@loc(S1F1, "F1")
+ S2 //@loc(S1S2, "S2"),def("S2", S2),hover("S2", "S2", S2)
+ a.A //@def("A", AString),hover("A", "A", aA)
+ aAlias //@def("a", aAlias),hover("a", "aAlias", aAlias)
+}
+
+type S2 struct { //@loc(S2, "S2")
+ F1 string //@loc(S2F1, "F1")
+ F2 int //@loc(S2F2, "F2")
+ *a.A //@def("A", AString),def("a",AImport)
+}
+
+type S3 struct {
+ F1 struct {
+ a.A //@def("A", AString)
+ }
+}
+
+func Bar() {
+ var x S1 //@def("S1", S1),hover("S1", "S1", S1)
+ _ = x.S2 //@def("S2", S1S2),hover("S2", "S2", S1S2)
+ _ = x.F1 //@def("F1", S1F1),hover("F1", "F1", S1F1)
+ _ = x.F2 //@def("F2", S2F2),hover("F2", "F2", S2F2)
+ _ = x.S2.F1 //@def("F1", S2F1),hover("F1", "F1", S2F1)
+}
+
+-- b/c.go --
+package b
+
+var _ = S1{ //@def("S1", S1),hover("S1", "S1", S1)
+ F1: 99, //@def("F1", S1F1),hover("F1", "F1", S1F1)
+}
+
+-- @AHi/hover.md --
+```go
+func (a.A).Hi()
+```
+
+[`(a.A).Hi` on pkg.go.dev](https://pkg.go.dev/mod.com/a#A.Hi)
+-- @F/hover.md --
+```go
+field F int
+```
+
+@loc(F, "F")
+
+
+[`(b.Embed).F` on pkg.go.dev](https://pkg.go.dev/mod.com/b#Embed.F)
+-- @HGoodbye/hover.md --
+```go
+func (a.H).Goodbye()
+```
+
+@loc(HGoodbye, "Goodbye")
+
+
+[`(a.H).Goodbye` on pkg.go.dev](https://pkg.go.dev/mod.com/a#H.Goodbye)
+-- @IB/hover.md --
+```go
+func (a.I).B()
+```
+
+@loc(IB, "B")
+
+
+[`(a.I).B` on pkg.go.dev](https://pkg.go.dev/mod.com/a#I.B)
+-- @JHello/hover.md --
+```go
+func (a.J).Hello()
+```
+
+@loc(JHello, "Hello")
+
+
+[`(a.J).Hello` on pkg.go.dev](https://pkg.go.dev/mod.com/a#J.Hello)
+-- @M/hover.md --
+```go
+func (embed).M()
+```
+
+[`(b.Embed).M` on pkg.go.dev](https://pkg.go.dev/mod.com/b#Embed.M)
+-- @RField2/hover.md --
+```go
+field Field2 int
+```
+
+@loc(RField2, "Field2")
+
+
+[`(a.R).Field2` on pkg.go.dev](https://pkg.go.dev/mod.com/a#R.Field2)
+-- @RHey/hover.md --
+```go
+func (a.R).Hey()
+```
+
+[`(a.R).Hey` on pkg.go.dev](https://pkg.go.dev/mod.com/a#R.Hey)
+-- @S1/hover.md --
+```go
+type S1 struct {
+ F1 int //@loc(S1F1, "F1")
+ S2 //@loc(S1S2, "S2"),def("S2", S2),hover("S2", "S2", S2)
+ a.A //@def("A", AString),hover("A", "A", aA)
+ aAlias //@def("a", aAlias),hover("a", "aAlias", aAlias)
+}
+```
+
+[`b.S1` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S1)
+-- @S1F1/hover.md --
+```go
+field F1 int
+```
+
+@loc(S1F1, "F1")
+
+
+[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S1.F1)
+-- @S1S2/hover.md --
+```go
+field S2 S2
+```
+
+@loc(S1S2, "S2"),def("S2", S2),hover("S2", "S2", S2)
+
+
+[`(b.S1).S2` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S1.S2)
+-- @S2/hover.md --
+```go
+type S2 struct {
+ F1 string //@loc(S2F1, "F1")
+ F2 int //@loc(S2F2, "F2")
+ *a.A //@def("A", AString),def("a",AImport)
+}
+```
+
+[`b.S2` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S2)
+-- @S2F1/hover.md --
+```go
+field F1 string
+```
+
+@loc(S2F1, "F1")
+
+
+[`(b.S2).F1` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S2.F1)
+-- @S2F2/hover.md --
+```go
+field F2 int
+```
+
+@loc(S2F2, "F2")
+
+
+[`(b.S2).F2` on pkg.go.dev](https://pkg.go.dev/mod.com/b#S2.F2)
+-- @SField/hover.md --
+```go
+field Field int
+```
+
+@loc(SField, "Field")
+
+
+[`(a.S).Field` on pkg.go.dev](https://pkg.go.dev/mod.com/a#S.Field)
+-- @aA/hover.md --
+```go
+type A string
+
+func (a.A).Hi()
+```
+
+@loc(AString, "A")
+
+
+[`a.A` on pkg.go.dev](https://pkg.go.dev/mod.com/a#A)
+-- @aAlias/hover.md --
+```go
+type aAlias = a.A
+
+func (a.A).Hi()
+```
+
+@loc(aAlias, "aAlias")
diff --git a/gopls/internal/regtest/marker/testdata/definition/import.txt b/gopls/internal/regtest/marker/testdata/definition/import.txt
new file mode 100644
index 000000000..9e5e5929a
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/definition/import.txt
@@ -0,0 +1,52 @@
+This test checks definition and hover over imports.
+-- go.mod --
+module mod.com
+
+go 1.18
+-- foo/foo.go --
+package foo
+
+type Foo struct{}
+
+// DoFoo does foo.
+func DoFoo() {} //@loc(DoFoo, "DoFoo")
+-- bar/bar.go --
+package bar
+
+import (
+ myFoo "mod.com/foo" //@loc(myFoo, "myFoo")
+)
+
+var _ *myFoo.Foo //@def("myFoo", myFoo),hover("myFoo", "myFoo", myFoo)
+-- bar/dotimport.go --
+package bar
+
+import . "mod.com/foo"
+
+func _() {
+ // variable of type foo.Foo
+ var _ Foo //@hover("_", "_", FooVar)
+
+ DoFoo() //@hover("DoFoo", "DoFoo", DoFoo)
+}
+-- @DoFoo/hover.md --
+```go
+func DoFoo()
+```
+
+DoFoo does foo.
+
+
+[`foo.DoFoo` on pkg.go.dev](https://pkg.go.dev/mod.com/foo#DoFoo)
+-- @FooVar/hover.md --
+```go
+var _ Foo
+```
+
+variable of type foo.Foo
+-- @myFoo/hover.md --
+```go
+package myFoo ("mod.com/foo")
+```
+
+[`myFoo` on pkg.go.dev](https://pkg.go.dev/mod.com/foo)
diff --git a/gopls/internal/regtest/marker/testdata/definition/misc.txt b/gopls/internal/regtest/marker/testdata/definition/misc.txt
new file mode 100644
index 000000000..48f5d340c
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/definition/misc.txt
@@ -0,0 +1,230 @@
+This test exercises miscellaneous definition and hover requests.
+-- go.mod --
+module mod.com
+
+go 1.16
+-- a.go --
+package a //@loc(aPackage, re"package (a)"),hover(aPackage, aPackage, aPackage)
+
+var (
+ // x is a variable.
+ x string //@loc(x, "x"),hover(x, x, hoverx)
+)
+
+// Constant block. When I hover on h, I should see this comment.
+const (
+ // When I hover on g, I should see this comment.
+ g = 1 //@hover("g", "g", hoverg)
+
+ h = 2 //@hover("h", "h", hoverh)
+)
+
+// z is a variable too.
+var z string //@loc(z, "z"),hover(z, z, hoverz)
+
+func AStuff() { //@loc(AStuff, "AStuff")
+ x := 5
+ Random2(x) //@def("dom2", Random2)
+ Random() //@def("()", Random)
+}
+
+type H interface { //@loc(H, "H")
+ Goodbye()
+}
+
+type I interface { //@loc(I, "I")
+ B()
+ J
+}
+
+type J interface { //@loc(J, "J")
+ Hello()
+}
+
+func _() {
+ // 1st type declaration block
+ type (
+ a struct { //@hover("a", "a", hoverDeclBlocka)
+ x string
+ }
+ )
+
+ // 2nd type declaration block
+ type (
+ // b has a comment
+ b struct{} //@hover("b", "b", hoverDeclBlockb)
+ )
+
+ // 3rd type declaration block
+ type (
+ // c is a struct
+ c struct { //@hover("c", "c", hoverDeclBlockc)
+ f string
+ }
+
+ d string //@hover("d", "d", hoverDeclBlockd)
+ )
+
+ type (
+ e struct { //@hover("e", "e", hoverDeclBlocke)
+ f float64
+ } // e has a comment
+ )
+}
+
+var (
+ hh H //@hover("H", "H", hoverH)
+ ii I //@hover("I", "I", hoverI)
+ jj J //@hover("J", "J", hoverJ)
+)
+-- a_test.go --
+package a
+
+import (
+ "testing"
+)
+
+func TestA(t *testing.T) { //@hover("TestA", "TestA", hoverTestA)
+}
+-- random.go --
+package a
+
+func Random() int { //@loc(Random, "Random")
+ y := 6 + 7
+ return y
+}
+
+func Random2(y int) int { //@loc(Random2, "Random2"),loc(RandomParamY, "y")
+ return y //@def("y", RandomParamY),hover("y", "y", hovery)
+}
+
+type Pos struct {
+ x, y int //@loc(PosX, "x"),loc(PosY, "y")
+}
+
+// Typ has a comment. Its fields do not.
+type Typ struct{ field string } //@loc(TypField, "field")
+
+func _() {
+ x := &Typ{}
+ _ = x.field //@def("field", TypField),hover("field", "field", hoverfield)
+}
+
+func (p *Pos) Sum() int { //@loc(PosSum, "Sum")
+ return p.x + p.y //@hover("x", "x", hoverpx)
+}
+
+func _() {
+ var p Pos
+ _ = p.Sum() //@def("()", PosSum),hover("()", `Sum`, hoverSum)
+}
+-- @aPackage/hover.md --
+-- @hoverDeclBlocka/hover.md --
+```go
+type a struct {
+ x string
+}
+```
+
+1st type declaration block
+-- @hoverDeclBlockb/hover.md --
+```go
+type b struct{}
+```
+
+b has a comment
+-- @hoverDeclBlockc/hover.md --
+```go
+type c struct {
+ f string
+}
+```
+
+c is a struct
+-- @hoverDeclBlockd/hover.md --
+```go
+type d string
+```
+
+3rd type declaration block
+-- @hoverDeclBlocke/hover.md --
+```go
+type e struct {
+ f float64
+}
+```
+
+e has a comment
+-- @hoverH/hover.md --
+```go
+type H interface {
+ Goodbye()
+}
+```
+
+[`a.H` on pkg.go.dev](https://pkg.go.dev/mod.com#H)
+-- @hoverI/hover.md --
+```go
+type I interface {
+ B()
+ J
+}
+```
+
+[`a.I` on pkg.go.dev](https://pkg.go.dev/mod.com#I)
+-- @hoverJ/hover.md --
+```go
+type J interface {
+ Hello()
+}
+```
+
+[`a.J` on pkg.go.dev](https://pkg.go.dev/mod.com#J)
+-- @hoverSum/hover.md --
+```go
+func (*Pos).Sum() int
+```
+
+[`(a.Pos).Sum` on pkg.go.dev](https://pkg.go.dev/mod.com#Pos.Sum)
+-- @hoverTestA/hover.md --
+```go
+func TestA(t *testing.T)
+```
+-- @hoverfield/hover.md --
+```go
+field field string
+```
+-- @hoverg/hover.md --
+```go
+const g untyped int = 1
+```
+
+When I hover on g, I should see this comment.
+-- @hoverh/hover.md --
+```go
+const h untyped int = 2
+```
+
+Constant block. When I hover on h, I should see this comment.
+-- @hoverpx/hover.md --
+```go
+field x int
+```
+
+@loc(PosX, "x"),loc(PosY, "y")
+-- @hoverx/hover.md --
+```go
+var x string
+```
+
+x is a variable.
+-- @hovery/hover.md --
+```go
+var y int
+```
+-- @hoverz/hover.md --
+```go
+var z string
+```
+
+z is a variable too.
diff --git a/gopls/internal/regtest/marker/testdata/hover/basiclit.txt b/gopls/internal/regtest/marker/testdata/hover/basiclit.txt
new file mode 100644
index 000000000..32527420d
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/hover/basiclit.txt
@@ -0,0 +1,60 @@
+This test checks gopls behavior when hovering over basic literals.
+-- basiclit.go --
+package basiclit
+
+func _() {
+ _ = 'a' //@hover("'a'", "'a'", latinA)
+ _ = 0x61 //@hover("0x61", "0x61", latinA)
+
+ _ = '\u2211' //@hover("'\\u2211'", "'\\u2211'", summation)
+ _ = 0x2211 //@hover("0x2211", "0x2211", summation)
+ _ = "foo \u2211 bar" //@hover("\\u2211", "\\u2211", summation)
+
+ _ = '\a' //@hover("'\\a'", "'\\a'", control)
+ _ = "foo \a bar" //@hover("\\a", "\\a", control)
+
+ _ = '\U0001F30A' //@hover("'\\U0001F30A'", "'\\U0001F30A'", waterWave)
+ _ = 0x0001F30A //@hover("0x0001F30A", "0x0001F30A", waterWave)
+ _ = "foo \U0001F30A bar" //@hover("\\U0001F30A", "\\U0001F30A", waterWave)
+
+ _ = '\x7E' //@hover("'\\x7E'", "'\\x7E'", tilde)
+ _ = "foo \x7E bar" //@hover("\\x7E", "\\x7E", tilde)
+ _ = "foo \a bar" //@hover("\\a", "\\a", control)
+
+ _ = '\173' //@hover("'\\173'", "'\\173'", leftCurly)
+ _ = "foo \173 bar" //@hover("\\173","\\173", leftCurly)
+ _ = "foo \173 bar \u2211 baz" //@hover("\\173","\\173", leftCurly)
+ _ = "foo \173 bar \u2211 baz" //@hover("\\u2211","\\u2211", summation)
+ _ = "foo\173bar\u2211baz" //@hover("\\173","\\173", leftCurly)
+ _ = "foo\173bar\u2211baz" //@hover("\\u2211","\\u2211", summation)
+
+ // search for runes in string only if there is an escaped sequence
+ _ = "hello" //@hover(`"hello"`, _, _)
+
+ // incorrect escaped rune sequences
+ _ = '\0' //@hover("'\\0'", _, _),diag(re`\\0()'`, re"illegal character")
+ _ = '\u22111' //@hover("'\\u22111'", _, _)
+ _ = '\U00110000' //@hover("'\\U00110000'", _, _)
+ _ = '\u12e45'//@hover("'\\u12e45'", _, _)
+ _ = '\xa' //@hover("'\\xa'", _, _)
+ _ = 'aa' //@hover("'aa'", _, _)
+
+ // other basic lits
+ _ = 1 //@hover("1", _, _)
+ _ = 1.2 //@hover("1.2", _, _)
+ _ = 1.2i //@hover("1.2i", _, _)
+ _ = 0123 //@hover("0123", _, _)
+ _ = 0x1234567890 //@hover("0x1234567890", _, _)
+)
+-- @control/hover.md --
+U+0007, control
+-- @latinA/hover.md --
+'a', U+0061, LATIN SMALL LETTER A
+-- @leftCurly/hover.md --
+'{', U+007B, LEFT CURLY BRACKET
+-- @summation/hover.md --
+'∑', U+2211, N-ARY SUMMATION
+-- @tilde/hover.md --
+'~', U+007E, TILDE
+-- @waterWave/hover.md --
+'🌊', U+1F30A, WATER WAVE
diff --git a/gopls/internal/regtest/marker/testdata/hover/const.txt b/gopls/internal/regtest/marker/testdata/hover/const.txt
new file mode 100644
index 000000000..cdb0e51e2
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/hover/const.txt
@@ -0,0 +1,18 @@
+This test checks hovering over constants.
+-- go.mod --
+module mod.com
+
+go 1.18
+-- c.go --
+package c
+
+const X = 0 //@hover("X", "X", bX)
+-- @bX/hover.md --
+```go
+const X untyped int = 0
+```
+
+@hover("X", "X", bX)
+
+
+[`c.X` on pkg.go.dev](https://pkg.go.dev/mod.com#X)
diff --git a/gopls/internal/regtest/marker/testdata/hover/generics.txt b/gopls/internal/regtest/marker/testdata/hover/generics.txt
new file mode 100644
index 000000000..673e860a3
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/hover/generics.txt
@@ -0,0 +1,77 @@
+This file contains tests for hovering over generic Go code.
+
+-- flags --
+-min_go=go1.18
+
+-- go.mod --
+// A go.mod is require for correct pkgsite links.
+// TODO(rfindley): don't link to ad-hoc or command-line-arguments packages!
+module mod.com
+
+go 1.18
+
+-- generics.go --
+package generics
+
+type value[T any] struct { //hover("lue", "value", value),hover("T", "T", valueT)
+ val T //@hover("T", "T", valuevalT)
+ Q int //@hover("Q", "Q", valueQ)
+}
+
+type Value[T any] struct { //@hover("T", "T", ValueT)
+ val T //@hover("T", "T", ValuevalT)
+ Q int //@hover("Q", "Q", ValueQ)
+}
+
+// disabled - see issue #54822
+func F[P interface{ ~int | string }]() { // hover("P","P",Ptparam)
+ // disabled - see issue #54822
+ var _ P // hover("P","P",Pvar)
+}
+
+-- inferred.go --
+package generics
+
+func app[S interface{ ~[]E }, E interface{}](s S, e E) S {
+ return append(s, e)
+}
+
+func _() {
+ _ = app[[]int] //@hover("app", "app", appint)
+ _ = app[[]int, int] //@hover("app", "app", appint)
+ // TODO(rfindley): eliminate this diagnostic.
+ _ = app[[]int]([]int{}, 0) //@hover("app", "app", appint),diag("[[]int]", re"unnecessary type arguments")
+ _ = app([]int{}, 0) //@hover("app", "app", appint)
+}
+
+-- @ValueQ/hover.md --
+```go
+field Q int
+```
+
+@hover("Q", "Q", ValueQ)
+
+
+[`(generics.Value).Q` on pkg.go.dev](https://pkg.go.dev/mod.com#Value.Q)
+-- @ValueT/hover.md --
+```go
+type parameter T any
+```
+-- @ValuevalT/hover.md --
+```go
+type parameter T any
+```
+-- @appint/hover.md --
+```go
+func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S
+```
+-- @valueQ/hover.md --
+```go
+field Q int
+```
+
+@hover("Q", "Q", valueQ)
+-- @valuevalT/hover.md --
+```go
+type parameter T any
+```
diff --git a/gopls/internal/regtest/marker/testdata/hover/goprivate.txt b/gopls/internal/regtest/marker/testdata/hover/goprivate.txt
new file mode 100644
index 000000000..4c309ef38
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/hover/goprivate.txt
@@ -0,0 +1,27 @@
+This test checks that links in hover obey GOPRIVATE.
+-- env --
+GOPRIVATE=mod.com
+-- go.mod --
+module mod.com
+-- p.go --
+package p
+
+// T should not be linked, as it is private.
+type T struct{} //@hover("T", "T", T)
+-- lib/lib.go --
+package lib
+
+// GOPRIVATE should also match nested packages.
+type L struct{} //@hover("L", "L", L)
+-- @L/hover.md --
+```go
+type L struct{}
+```
+
+GOPRIVATE should also match nested packages.
+-- @T/hover.md --
+```go
+type T struct{}
+```
+
+T should not be linked, as it is private.
diff --git a/gopls/internal/regtest/marker/testdata/hover/hover.txt b/gopls/internal/regtest/marker/testdata/hover/hover.txt
new file mode 100644
index 000000000..f9cd3311b
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/hover/hover.txt
@@ -0,0 +1,29 @@
+This test demonstrates some features of the new marker test runner.
+-- a.go --
+package a
+
+const abc = 0x2a //@hover("b", "abc", abc),hover(" =", "abc", abc)
+-- typeswitch.go --
+package a
+
+func _() {
+ var y interface{}
+ switch x := y.(type) { //@hover("x", "x", x)
+ case int:
+ println(x) //@hover("x", "x", xint),hover(")", "x", xint)
+ }
+}
+-- @abc/hover.md --
+```go
+const abc untyped int = 42
+```
+
+@hover("b", "abc", abc),hover(" =", "abc", abc)
+-- @x/hover.md --
+```go
+var x interface{}
+```
+-- @xint/hover.md --
+```go
+var x int
+```
diff --git a/gopls/internal/regtest/marker/testdata/hover/linkable.txt b/gopls/internal/regtest/marker/testdata/hover/linkable.txt
new file mode 100644
index 000000000..981716d84
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/hover/linkable.txt
@@ -0,0 +1,120 @@
+This test checks that we correctly determine pkgsite links for various
+identifiers.
+
+We should only produce links that work, meaning the object is reachable via the
+package's public API.
+-- go.mod --
+module mod.com
+
+go 1.18
+-- p.go --
+package p
+
+type E struct {
+ Embed int
+}
+
+// T is in the package scope, and so should be linkable.
+type T struct{ //@hover("T", "T", T)
+ // Only exported fields should be linkable
+
+ f int //@hover("f", "f", f)
+ F int //@hover("F", "F", F)
+
+ E
+
+ // TODO(rfindley): is the link here correct? It ignores N.
+ N struct {
+ // Nested fields should also be linkable.
+ Nested int //@hover("Nested", "Nested", Nested)
+ }
+}
+// M is an exported method, and so should be linkable.
+func (T) M() {}
+
+// m is not exported, and so should not be linkable.
+func (T) m() {}
+
+func _() {
+ var t T
+
+ // Embedded fields should be linkable.
+ _ = t.Embed //@hover("Embed", "Embed", Embed)
+
+ // Local variables should not be linkable, even if they are capitalized.
+ var X int //@hover("X", "X", X)
+ _ = X
+
+ // Local types should not be linkable, even if they are capitalized.
+ type Local struct { //@hover("Local", "Local", Local)
+ E
+ }
+
+ // But the embedded field should still be linkable.
+ var l Local
+ _ = l.Embed //@hover("Embed", "Embed", Embed)
+}
+-- @Embed/hover.md --
+```go
+field Embed int
+```
+
+[`(p.E).Embed` on pkg.go.dev](https://pkg.go.dev/mod.com#E.Embed)
+-- @F/hover.md --
+```go
+field F int
+```
+
+@hover("F", "F", F)
+
+
+[`(p.T).F` on pkg.go.dev](https://pkg.go.dev/mod.com#T.F)
+-- @Local/hover.md --
+```go
+type Local struct {
+ E
+}
+```
+
+Local types should not be linkable, even if they are capitalized.
+-- @Nested/hover.md --
+```go
+field Nested int
+```
+
+Nested fields should also be linkable.
+-- @T/hover.md --
+```go
+type T struct {
+ f int //@hover("f", "f", f)
+ F int //@hover("F", "F", F)
+
+ E
+
+ // TODO(rfindley): is the link here correct? It ignores N.
+ N struct {
+ // Nested fields should also be linkable.
+ Nested int //@hover("Nested", "Nested", Nested)
+ }
+}
+
+func (T).M()
+func (T).m()
+```
+
+T is in the package scope, and so should be linkable.
+
+
+[`p.T` on pkg.go.dev](https://pkg.go.dev/mod.com#T)
+-- @X/hover.md --
+```go
+var X int
+```
+
+Local variables should not be linkable, even if they are capitalized.
+-- @f/hover.md --
+```go
+field f int
+```
+
+@hover("f", "f", f)
diff --git a/gopls/internal/regtest/marker/testdata/hover/linkable_generics.txt b/gopls/internal/regtest/marker/testdata/hover/linkable_generics.txt
new file mode 100644
index 000000000..be8e9e5cd
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/hover/linkable_generics.txt
@@ -0,0 +1,145 @@
+This file contains tests for documentation links to generic code in hover.
+
+-- flags --
+-min_go=go1.18
+
+-- go.mod --
+module mod.com
+
+go 1.19
+
+-- a.go --
+package a
+
+import "mod.com/generic"
+
+func _() {
+ // Hovering over instantiated object should produce accurate type
+ // information, but link to the generic declarations.
+
+ var x generic.GT[int] //@hover("GT", "GT", xGT)
+ _ = x.F //@hover("x", "x", x),hover("F", "F", xF)
+
+ f := generic.GF[int] //@hover("GF", "GF", fGF)
+ _ = f //@hover("f", "f", f)
+}
+
+-- generic/generic.go --
+package generic
+
+// Hovering over type parameters should link to documentation.
+//
+// TODO(rfindley): should it? We should probably link to the type.
+type GT[P any] struct{ //@hover("GT", "GT", GT),hover("P", "P", GTP)
+ F P //@hover("F", "F", F),hover("P", "P", FP)
+}
+
+func (GT[P]) M(p P) { //@hover("GT", "GT", GTrecv),hover("M","M", M),hover(re"p (P)", re"p (P)", pP)
+}
+
+func GF[P any] (p P) { //@hover("GF", "GF", GF)
+}
+
+-- @F/hover.md --
+```go
+field F P
+```
+
+@hover("F", "F", F),hover("P", "P", FP)
+
+
+[`(generic.GT).F` on pkg.go.dev](https://pkg.go.dev/mod.com/generic#GT.F)
+-- @FP/hover.md --
+```go
+type parameter P any
+```
+-- @GF/hover.md --
+```go
+func GF[P any](p P)
+```
+
+[`generic.GF` on pkg.go.dev](https://pkg.go.dev/mod.com/generic#GF)
+-- @GT/hover.md --
+```go
+type GT[P any] struct {
+ F P //@hover("F", "F", F),hover("P", "P", FP)
+}
+
+func (GT[P]).M(p P)
+```
+
+Hovering over type parameters should link to documentation.
+
+TODO(rfindley): should it? We should probably link to the type.
+
+
+[`generic.GT` on pkg.go.dev](https://pkg.go.dev/mod.com/generic#GT)
+-- @GTP/hover.md --
+```go
+type parameter P any
+```
+-- @GTrecv/hover.md --
+```go
+type GT[P any] struct {
+ F P //@hover("F", "F", F),hover("P", "P", FP)
+}
+
+func (GT[P]).M(p P)
+```
+
+Hovering over type parameters should link to documentation.
+
+TODO(rfindley): should it? We should probably link to the type.
+
+
+[`generic.GT` on pkg.go.dev](https://pkg.go.dev/mod.com/generic#GT)
+-- @M/hover.md --
+```go
+func (GT[P]).M(p P)
+```
+
+[`(generic.GT).M` on pkg.go.dev](https://pkg.go.dev/mod.com/generic#GT.M)
+-- @f/hover.md --
+```go
+var f func(p int)
+```
+-- @fGF/hover.md --
+```go
+func generic.GF(p int) // func[P any](p P)
+```
+
+[`generic.GF` on pkg.go.dev](https://pkg.go.dev/mod.com/generic#GF)
+-- @pP/hover.md --
+```go
+type parameter P any
+```
+-- @x/hover.md --
+```go
+var x generic.GT[int]
+```
+
+@hover("GT", "GT", xGT)
+-- @xF/hover.md --
+```go
+field F int
+```
+
+@hover("F", "F", F),hover("P", "P", FP)
+
+
+[`(generic.GT).F` on pkg.go.dev](https://pkg.go.dev/mod.com/generic#GT.F)
+-- @xGT/hover.md --
+```go
+type GT[P any] struct {
+ F P //@hover("F", "F", F),hover("P", "P", FP)
+}
+
+func (generic.GT[P]).M(p P)
+```
+
+Hovering over type parameters should link to documentation.
+
+TODO(rfindley): should it? We should probably link to the type.
+
+
+[`generic.GT` on pkg.go.dev](https://pkg.go.dev/mod.com/generic#GT)
diff --git a/gopls/internal/regtest/marker/testdata/hover/std.txt b/gopls/internal/regtest/marker/testdata/hover/std.txt
new file mode 100644
index 000000000..a526b5211
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/hover/std.txt
@@ -0,0 +1,80 @@
+This test checks hover results for built-in or standard library symbols.
+
+It uses synopsis documentation as full documentation for some of these
+built-ins varies across Go versions, where as it just so happens that the
+synopsis does not.
+
+In the future we may need to limit this test to the latest Go version to avoid
+documentation churn.
+-- settings.json --
+{
+ "hoverKind": "SynopsisDocumentation"
+}
+-- go.mod --
+module mod.com
+
+go 1.18
+-- std.go --
+package std
+
+import (
+ "fmt"
+ "go/types"
+ "sync"
+)
+
+func _() {
+ var err error //@loc(err, "err")
+ fmt.Printf("%v", err) //@def("err", err)
+
+ var _ string //@hover("string", "string", hoverstring)
+ _ = make([]int, 0) //@hover("make", "make", hovermake)
+
+ var mu sync.Mutex
+ mu.Lock() //@hover("Lock", "Lock", hoverLock)
+
+ var typ *types.Named //@hover("types", "types", hoverTypes)
+ typ.Obj().Name() //@hover("Name", "Name", hoverName)
+}
+-- @hoverLock/hover.md --
+```go
+func (*sync.Mutex).Lock()
+```
+
+Lock locks m.
+
+
+[`(sync.Mutex).Lock` on pkg.go.dev](https://pkg.go.dev/sync#Mutex.Lock)
+-- @hoverName/hover.md --
+```go
+func (*types.object).Name() string
+```
+
+Name returns the object's (package-local, unqualified) name.
+
+
+[`(types.TypeName).Name` on pkg.go.dev](https://pkg.go.dev/go/types#TypeName.Name)
+-- @hoverTypes/hover.md --
+```go
+package types ("go/types")
+```
+
+[`types` on pkg.go.dev](https://pkg.go.dev/go/types)
+-- @hovermake/hover.md --
+```go
+func make(t Type, size ...int) Type
+```
+
+The make built-in function allocates and initializes an object of type slice, map, or chan (only).
+
+
+[`make` on pkg.go.dev](https://pkg.go.dev/builtin#make)
+-- @hoverstring/hover.md --
+```go
+type string string
+```
+
+string is the set of all strings of 8-bit bytes, conventionally but not necessarily representing UTF-8-encoded text.
+
+
+[`string` on pkg.go.dev](https://pkg.go.dev/builtin#string)
diff --git a/gopls/internal/regtest/marker/testdata/rename/basic.txt b/gopls/internal/regtest/marker/testdata/rename/basic.txt
new file mode 100644
index 000000000..fe723cf9f
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/rename/basic.txt
@@ -0,0 +1,22 @@
+This test performs basic coverage of 'rename' within a single package.
+
+-- basic.go --
+package p
+
+func f(x int) { println(x) } //@rename("x", y, param_x)
+
+-- @param_x/basic.go --
+package p
+
+func f(y int) { println(y) } //@rename("x", y, param_x)
+
+-- errors.go --
+package p
+
+func _(x []int) { //@renameerr("_", blank, `can't rename "_"`)
+ x = append(x, 1) //@renameerr("append", blank, "built in and cannot be renamed")
+ x = nil //@renameerr("nil", blank, "built in and cannot be renamed")
+ x = nil //@renameerr("x", x, "old and new names are the same: x")
+ _ = 1 //@renameerr("1", x, "no identifier found")
+}
+
diff --git a/gopls/internal/regtest/marker/testdata/rename/conflict.txt b/gopls/internal/regtest/marker/testdata/rename/conflict.txt
new file mode 100644
index 000000000..18438c8a8
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/rename/conflict.txt
@@ -0,0 +1,59 @@
+This test exercises some renaming conflict scenarios
+and ensures that the errors are informative.
+
+-- go.mod --
+module example.com
+go 1.12
+
+-- super/p.go --
+package super
+
+var x int
+
+func f(y int) {
+ println(x)
+ println(y) //@renameerr("y", x, errSuperBlockConflict)
+}
+
+-- @errSuperBlockConflict --
+super/p.go:5:8: renaming this var "y" to "x"
+super/p.go:6:10: would shadow this reference
+super/p.go:3:5: to the var declared here
+-- sub/p.go --
+package sub
+
+var a int
+
+func f2(b int) {
+ println(a) //@renameerr("a", b, errSubBlockConflict)
+ println(b)
+}
+
+-- @errSubBlockConflict --
+sub/p.go:3:5: renaming this var "a" to "b"
+sub/p.go:6:10: would cause this reference to become shadowed
+sub/p.go:5:9: by this intervening var definition
+-- pkgname/p.go --
+package pkgname
+
+import e1 "errors" //@renameerr("e1", errors, errImportConflict)
+import "errors"
+
+var _ = errors.New
+var _ = e1.New
+
+-- @errImportConflict --
+pkgname/p.go:3:8: renaming this imported package name "e1" to "errors"
+pkgname/p.go:4:8: conflicts with imported package name in same block
+-- pkgname2/p1.go --
+package pkgname2
+var x int
+
+-- pkgname2/p2.go --
+package pkgname2
+import "errors" //@renameerr("errors", x, errImportConflict2)
+var _ = errors.New
+
+-- @errImportConflict2 --
+pkgname2/p2.go:2:8: renaming this imported package name "errors" to "x" would conflict
+pkgname2/p1.go:2:5: with this package member var
diff --git a/gopls/internal/regtest/marker/testdata/rename/embed.txt b/gopls/internal/regtest/marker/testdata/rename/embed.txt
new file mode 100644
index 000000000..68cf771bc
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/rename/embed.txt
@@ -0,0 +1,36 @@
+This test exercises renaming of types used as embedded fields.
+
+-- go.mod --
+module example.com
+go 1.12
+
+-- a/a.go --
+package a
+
+type A int //@rename("A", A2, type)
+
+-- b/b.go --
+package b
+
+import "example.com/a"
+
+type B struct { a.A } //@renameerr("A", A3, errAnonField)
+
+var _ = new(B).A //@renameerr("A", A4, errAnonField)
+
+-- @errAnonField --
+can't rename embedded fields: rename the type directly or name the field
+-- @type/a/a.go --
+package a
+
+type A2 int //@rename("A", A2, type)
+
+-- @type/b/b.go --
+package b
+
+import "example.com/a"
+
+type B struct { a.A2 } //@renameerr("A", A3, errAnonField)
+
+var _ = new(B).A2 //@renameerr("A", A4, errAnonField)
+
diff --git a/gopls/internal/regtest/marker/testdata/rename/methods.txt b/gopls/internal/regtest/marker/testdata/rename/methods.txt
new file mode 100644
index 000000000..1bd985bcf
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/rename/methods.txt
@@ -0,0 +1,67 @@
+This test exercises renaming of interface methods.
+
+The golden is currently wrong due to https://github.com/golang/go/issues/58506:
+the reference to B.F in package b should be renamed too.
+
+-- go.mod --
+module example.com
+go 1.12
+
+-- a/a.go --
+package a
+
+type A int
+
+func (A) F() {} //@renameerr("F", G, errAfToG)
+
+-- b/b.go --
+package b
+
+import "example.com/a"
+import "example.com/c"
+
+type B interface { F() } //@rename("F", G, BfToG)
+
+var _ B = a.A(0)
+var _ B = c.C(0)
+
+-- c/c.go --
+package c
+
+type C int
+
+func (C) F() {} //@renameerr("F", G, errCfToG)
+
+-- d/d.go --
+package d
+
+import "example.com/b"
+
+var _ = b.B.F
+
+-- @errAfToG --
+a/a.go:5:10: renaming this method "F" to "G"
+b/b.go:6:6: would make example.com/a.A no longer assignable to interface B
+b/b.go:6:20: (rename example.com/b.B.F if you intend to change both types)
+-- @BfToG/b/b.go --
+package b
+
+import "example.com/a"
+import "example.com/c"
+
+type B interface { G() } //@rename("F", G, BfToG)
+
+var _ B = a.A(0)
+var _ B = c.C(0)
+
+-- @BfToG/d/d.go --
+package d
+
+import "example.com/b"
+
+var _ = b.B.G
+
+-- @errCfToG --
+c/c.go:5:10: renaming this method "F" to "G"
+b/b.go:6:6: would make example.com/c.C no longer assignable to interface B
+b/b.go:6:20: (rename example.com/b.B.F if you intend to change both types)
diff --git a/gopls/internal/regtest/marker/testdata/rename/typeswitch.txt b/gopls/internal/regtest/marker/testdata/rename/typeswitch.txt
new file mode 100644
index 000000000..6743b99ef
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/rename/typeswitch.txt
@@ -0,0 +1,26 @@
+This test covers the special case of renaming a type switch var.
+
+-- p.go --
+package p
+
+func _(x interface{}) {
+ switch y := x.(type) { //@rename("y", z, yToZ)
+ case string:
+ print(y) //@rename("y", z, yToZ)
+ default:
+ print(y) //@rename("y", z, yToZ)
+ }
+}
+
+-- @yToZ/p.go --
+package p
+
+func _(x interface{}) {
+ switch z := x.(type) { //@rename("y", z, yToZ)
+ case string:
+ print(z) //@rename("y", z, yToZ)
+ default:
+ print(z) //@rename("y", z, yToZ)
+ }
+}
+
diff --git a/gopls/internal/regtest/marker/testdata/stubmethods/basic.txt b/gopls/internal/regtest/marker/testdata/stubmethods/basic.txt
new file mode 100644
index 000000000..bb53e6767
--- /dev/null
+++ b/gopls/internal/regtest/marker/testdata/stubmethods/basic.txt
@@ -0,0 +1,24 @@
+This test exercises basic 'stub methods' functionality.
+
+-- go.mod --
+module example.com
+go 1.12
+
+-- a/a.go --
+package a
+
+type C int
+
+var _ error = C(0) //@suggestedfix(re"C.0.", re"missing method Error", "refactor.rewrite", stub)
+
+-- @stub/a/a.go --
+package a
+
+type C int
+
+// Error implements error
+func (C) Error() string {
+ panic("unimplemented")
+}
+
+var _ error = C(0) //@suggestedfix(re"C.0.", re"missing method Error", "refactor.rewrite", stub)
diff --git a/gopls/internal/regtest/misc/call_hierarchy_test.go b/gopls/internal/regtest/misc/call_hierarchy_test.go
index 9d98896ce..f0f5d4a41 100644
--- a/gopls/internal/regtest/misc/call_hierarchy_test.go
+++ b/gopls/internal/regtest/misc/call_hierarchy_test.go
@@ -6,8 +6,8 @@ package misc
import (
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
// Test for golang/go#49125
@@ -23,11 +23,11 @@ package pkg
// TODO(rfindley): this could probably just be a marker test.
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("p.go")
- pos := env.RegexpSearch("p.go", "pkg")
+ loc := env.RegexpSearch("p.go", "pkg")
var params protocol.CallHierarchyPrepareParams
- params.TextDocument.URI = env.Sandbox.Workdir.URI("p.go")
- params.Position = pos.ToProtocolPosition()
+ params.TextDocument.URI = loc.URI
+ params.Position = loc.Range.Start
// Check that this doesn't panic.
env.Editor.Server.PrepareCallHierarchy(env.Ctx, &params)
diff --git a/gopls/internal/regtest/misc/configuration_test.go b/gopls/internal/regtest/misc/configuration_test.go
index 17116adaa..6cbfe373e 100644
--- a/gopls/internal/regtest/misc/configuration_test.go
+++ b/gopls/internal/regtest/misc/configuration_test.go
@@ -7,17 +7,18 @@ package misc
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
- "golang.org/x/tools/internal/lsp/fake"
"golang.org/x/tools/internal/testenv"
)
// Test that enabling and disabling produces the expected results of showing
// and hiding staticcheck analysis results.
func TestChangeConfiguration(t *testing.T) {
- // Staticcheck only supports Go versions > 1.14.
- testenv.NeedsGo1Point(t, 15)
+ // Staticcheck only supports Go versions >= 1.19.
+ // Note: keep this in sync with TestStaticcheckWarning. Below this version we
+ // should get an error when setting staticcheck configuration.
+ testenv.NeedsGo1Point(t, 19)
const files = `
-- go.mod --
@@ -34,16 +35,125 @@ var FooErr = errors.New("foo")
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
+ )
+ cfg := env.Editor.Config()
+ cfg.Settings = map[string]interface{}{
+ "staticcheck": true,
+ }
+ // TODO(rfindley): support waiting on diagnostics following a configuration
+ // change.
+ env.ChangeConfiguration(cfg)
env.Await(
- env.DoneWithOpen(),
- NoDiagnostics("a/a.go"),
+ Diagnostics(env.AtRegexp("a/a.go", "var (FooErr)")),
+ )
+ })
+}
+
+// TestMajorOptionsChange is like TestChangeConfiguration, but modifies an
+// an open buffer before making a major (but inconsequential) change that
+// causes gopls to recreate the view.
+//
+// Gopls should not get confused about buffer content when recreating the view.
+func TestMajorOptionsChange(t *testing.T) {
+ t.Skip("broken due to golang/go#57934")
+
+ testenv.NeedsGo1Point(t, 17)
+
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.12
+-- a/a.go --
+package a
+
+import "errors"
+
+var ErrFoo = errors.New("foo")
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("a/a.go")
+ // Introduce a staticcheck diagnostic. It should be detected when we enable
+ // staticcheck later.
+ env.RegexpReplace("a/a.go", "ErrFoo", "FooErr")
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
- cfg := &fake.EditorConfig{}
- *cfg = env.Editor.Config
- cfg.EnableStaticcheck = true
- env.ChangeConfiguration(t, cfg)
+ cfg := env.Editor.Config()
+ // Any change to environment recreates the view, but this should not cause
+ // gopls to get confused about the content of a/a.go: we should get the
+ // staticcheck diagnostic below.
+ cfg.Env = map[string]string{
+ "AN_ARBITRARY_VAR": "FOO",
+ }
+ cfg.Settings = map[string]interface{}{
+ "staticcheck": true,
+ }
+ // TODO(rfindley): support waiting on diagnostics following a configuration
+ // change.
+ env.ChangeConfiguration(cfg)
env.Await(
- DiagnosticAt("a/a.go", 5, 4),
+ Diagnostics(env.AtRegexp("a/a.go", "var (FooErr)")),
+ )
+ })
+}
+
+func TestStaticcheckWarning(t *testing.T) {
+ // Note: keep this in sync with TestChangeConfiguration.
+ testenv.SkipAfterGo1Point(t, 16)
+
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.12
+-- a/a.go --
+package a
+
+import "errors"
+
+// FooErr should be called ErrFoo (ST1012)
+var FooErr = errors.New("foo")
+`
+
+ WithOptions(
+ Settings{"staticcheck": true},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ ShownMessage("staticcheck is not supported"),
+ )
+ })
+}
+
+func TestGofumptWarning(t *testing.T) {
+ testenv.SkipAfterGo1Point(t, 17)
+
+ WithOptions(
+ Settings{"gofumpt": true},
+ ).Run(t, "", func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ ShownMessage("gofumpt is not supported"),
+ )
+ })
+}
+
+func TestDeprecatedSettings(t *testing.T) {
+ WithOptions(
+ Settings{
+ "experimentalUseInvalidMetadata": true,
+ "experimentalWatchedFileDelay": "1s",
+ "experimentalWorkspaceModule": true,
+ },
+ ).Run(t, "", func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ ShownMessage("experimentalWorkspaceModule"),
+ ShownMessage("experimentalUseInvalidMetadata"),
+ ShownMessage("experimentalWatchedFileDelay"),
)
})
}
diff --git a/gopls/internal/regtest/misc/debugserver_test.go b/gopls/internal/regtest/misc/debugserver_test.go
index c0df87070..519f79447 100644
--- a/gopls/internal/regtest/misc/debugserver_test.go
+++ b/gopls/internal/regtest/misc/debugserver_test.go
@@ -8,10 +8,10 @@ import (
"net/http"
"testing"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestStartDebugging(t *testing.T) {
diff --git a/gopls/internal/regtest/misc/definition_test.go b/gopls/internal/regtest/misc/definition_test.go
index 2f5a54820..c2dd67fc3 100644
--- a/gopls/internal/regtest/misc/definition_test.go
+++ b/gopls/internal/regtest/misc/definition_test.go
@@ -5,16 +5,15 @@
package misc
import (
+ "os"
"path"
+ "path/filepath"
"strings"
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
- "golang.org/x/tools/internal/testenv"
-
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
)
const internalDefinition = `
@@ -39,12 +38,150 @@ const message = "Hello World."
func TestGoToInternalDefinition(t *testing.T) {
Run(t, internalDefinition, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- name, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", "message"))
+ loc := env.GoToDefinition(env.RegexpSearch("main.go", "message"))
+ name := env.Sandbox.Workdir.URIToPath(loc.URI)
if want := "const.go"; name != want {
t.Errorf("GoToDefinition: got file %q, want %q", name, want)
}
- if want := env.RegexpSearch("const.go", "message"); pos != want {
- t.Errorf("GoToDefinition: got position %v, want %v", pos, want)
+ if want := env.RegexpSearch("const.go", "message"); loc != want {
+ t.Errorf("GoToDefinition: got location %v, want %v", loc, want)
+ }
+ })
+}
+
+const linknameDefinition = `
+-- go.mod --
+module mod.com
+
+-- upper/upper.go --
+package upper
+
+import (
+ _ "unsafe"
+
+ _ "mod.com/middle"
+)
+
+//go:linkname foo mod.com/lower.bar
+func foo() string
+
+-- middle/middle.go --
+package middle
+
+import (
+ _ "mod.com/lower"
+)
+
+-- lower/lower.s --
+
+-- lower/lower.go --
+package lower
+
+func bar() string {
+ return "bar as foo"
+}`
+
+func TestGoToLinknameDefinition(t *testing.T) {
+ Run(t, linknameDefinition, func(t *testing.T, env *Env) {
+ env.OpenFile("upper/upper.go")
+
+ // Jump from directives 2nd arg.
+ start := env.RegexpSearch("upper/upper.go", `lower.bar`)
+ loc := env.GoToDefinition(start)
+ name := env.Sandbox.Workdir.URIToPath(loc.URI)
+ if want := "lower/lower.go"; name != want {
+ t.Errorf("GoToDefinition: got file %q, want %q", name, want)
+ }
+ if want := env.RegexpSearch("lower/lower.go", `bar`); loc != want {
+ t.Errorf("GoToDefinition: got position %v, want %v", loc, want)
+ }
+ })
+}
+
+const linknameDefinitionReverse = `
+-- go.mod --
+module mod.com
+
+-- upper/upper.s --
+
+-- upper/upper.go --
+package upper
+
+import (
+ _ "mod.com/middle"
+)
+
+func foo() string
+
+-- middle/middle.go --
+package middle
+
+import (
+ _ "mod.com/lower"
+)
+
+-- lower/lower.go --
+package lower
+
+import _ "unsafe"
+
+//go:linkname bar mod.com/upper.foo
+func bar() string {
+ return "bar as foo"
+}`
+
+func TestGoToLinknameDefinitionInReverseDep(t *testing.T) {
+ Run(t, linknameDefinitionReverse, func(t *testing.T, env *Env) {
+ env.OpenFile("lower/lower.go")
+
+ // Jump from directives 2nd arg.
+ start := env.RegexpSearch("lower/lower.go", `upper.foo`)
+ loc := env.GoToDefinition(start)
+ name := env.Sandbox.Workdir.URIToPath(loc.URI)
+ if want := "upper/upper.go"; name != want {
+ t.Errorf("GoToDefinition: got file %q, want %q", name, want)
+ }
+ if want := env.RegexpSearch("upper/upper.go", `foo`); loc != want {
+ t.Errorf("GoToDefinition: got position %v, want %v", loc, want)
+ }
+ })
+}
+
+// The linkname directive connects two packages not related in the import graph.
+const linknameDefinitionDisconnected = `
+-- go.mod --
+module mod.com
+
+-- a/a.go --
+package a
+
+import (
+ _ "unsafe"
+)
+
+//go:linkname foo mod.com/b.bar
+func foo() string
+
+-- b/b.go --
+package b
+
+func bar() string {
+ return "bar as foo"
+}`
+
+func TestGoToLinknameDefinitionDisconnected(t *testing.T) {
+ Run(t, linknameDefinitionDisconnected, func(t *testing.T, env *Env) {
+ env.OpenFile("a/a.go")
+
+ // Jump from directives 2nd arg.
+ start := env.RegexpSearch("a/a.go", `b.bar`)
+ loc := env.GoToDefinition(start)
+ name := env.Sandbox.Workdir.URIToPath(loc.URI)
+ if want := "b/b.go"; name != want {
+ t.Errorf("GoToDefinition: got file %q, want %q", name, want)
+ }
+ if want := env.RegexpSearch("b/b.go", `bar`); loc != want {
+ t.Errorf("GoToDefinition: got position %v, want %v", loc, want)
}
})
}
@@ -66,19 +203,21 @@ func main() {
func TestGoToStdlibDefinition_Issue37045(t *testing.T) {
Run(t, stdlibDefinition, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- name, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt.(Printf)`))
+ loc := env.GoToDefinition(env.RegexpSearch("main.go", `fmt.(Printf)`))
+ name := env.Sandbox.Workdir.URIToPath(loc.URI)
if got, want := path.Base(name), "print.go"; got != want {
t.Errorf("GoToDefinition: got file %q, want %q", name, want)
}
// Test that we can jump to definition from outside our workspace.
// See golang.org/issues/37045.
- newName, newPos := env.GoToDefinition(name, pos)
+ newLoc := env.GoToDefinition(loc)
+ newName := env.Sandbox.Workdir.URIToPath(newLoc.URI)
if newName != name {
t.Errorf("GoToDefinition is not idempotent: got %q, want %q", newName, name)
}
- if newPos != pos {
- t.Errorf("GoToDefinition is not idempotent: got %v, want %v", newPos, pos)
+ if newLoc != loc {
+ t.Errorf("GoToDefinition is not idempotent: got %v, want %v", newLoc, loc)
}
})
}
@@ -86,24 +225,24 @@ func TestGoToStdlibDefinition_Issue37045(t *testing.T) {
func TestUnexportedStdlib_Issue40809(t *testing.T) {
Run(t, stdlibDefinition, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- name, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt.(Printf)`))
- env.OpenFile(name)
+ loc := env.GoToDefinition(env.RegexpSearch("main.go", `fmt.(Printf)`))
+ name := env.Sandbox.Workdir.URIToPath(loc.URI)
- pos := env.RegexpSearch(name, `:=\s*(newPrinter)\(\)`)
+ loc = env.RegexpSearch(name, `:=\s*(newPrinter)\(\)`)
// Check that we can find references on a reference
- refs := env.References(name, pos)
+ refs := env.References(loc)
if len(refs) < 5 {
t.Errorf("expected 5+ references to newPrinter, found: %#v", refs)
}
- name, pos = env.GoToDefinition(name, pos)
- content, _ := env.Hover(name, pos)
+ loc = env.GoToDefinition(loc)
+ content, _ := env.Hover(loc)
if !strings.Contains(content.Value, "newPrinter") {
t.Fatal("definition of newPrinter went to the incorrect place")
}
// And on the definition too.
- refs = env.References(name, pos)
+ refs = env.References(loc)
if len(refs) < 5 {
t.Errorf("expected 5+ references to newPrinter, found: %#v", refs)
}
@@ -127,13 +266,13 @@ func main() {
}`
Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- content, _ := env.Hover("main.go", env.RegexpSearch("main.go", "Error"))
+ content, _ := env.Hover(env.RegexpSearch("main.go", "Error"))
if content == nil {
t.Fatalf("nil hover content for Error")
}
want := "```go\nfunc (error).Error() string\n```"
if content.Value != want {
- t.Fatalf("hover failed:\n%s", tests.Diff(t, want, content.Value))
+ t.Fatalf("hover failed:\n%s", compare.Text(want, content.Value))
}
})
}
@@ -153,24 +292,19 @@ func main() {}
`
for _, tt := range []struct {
wantLinks int
- wantDef bool
importShortcut string
}{
- {1, false, "Link"},
- {0, true, "Definition"},
- {1, true, "Both"},
+ {1, "Link"},
+ {0, "Definition"},
+ {1, "Both"},
} {
t.Run(tt.importShortcut, func(t *testing.T) {
WithOptions(
- EditorConfig{
- ImportShortcut: tt.importShortcut,
- },
+ Settings{"importShortcut": tt.importShortcut},
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `"fmt"`))
- if !tt.wantDef && (file != "" || pos != (fake.Pos{})) {
- t.Fatalf("expected no definition, got one: %s:%v", file, pos)
- } else if tt.wantDef && file == "" && pos == (fake.Pos{}) {
+ loc := env.GoToDefinition(env.RegexpSearch("main.go", `"fmt"`))
+ if loc == (protocol.Location{}) {
t.Fatalf("expected definition, got none")
}
links := env.DocumentLink("main.go")
@@ -217,7 +351,7 @@ func main() {}
Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- _, pos, err := env.Editor.GoToTypeDefinition(env.Ctx, "main.go", env.RegexpSearch("main.go", tt.re))
+ loc, err := env.Editor.GoToTypeDefinition(env.Ctx, env.RegexpSearch("main.go", tt.re))
if tt.wantError {
if err == nil {
t.Fatal("expected error, got nil")
@@ -228,9 +362,9 @@ func main() {}
t.Fatalf("expected nil error, got %s", err)
}
- typePos := env.RegexpSearch("main.go", tt.wantTypeRe)
- if pos != typePos {
- t.Errorf("invalid pos: want %+v, got %+v", typePos, pos)
+ typeLoc := env.RegexpSearch("main.go", tt.wantTypeRe)
+ if loc != typeLoc {
+ t.Errorf("invalid pos: want %+v, got %+v", typeLoc, loc)
}
})
})
@@ -239,8 +373,6 @@ func main() {}
// Test for golang/go#47825.
func TestImportTestVariant(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
-
const mod = `
-- go.mod --
module mod.com
@@ -275,7 +407,7 @@ package client
`
Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("client/client_role_test.go")
- env.GoToDefinition("client/client_role_test.go", env.RegexpSearch("client/client_role_test.go", "RoleSetup"))
+ env.GoToDefinition(env.RegexpSearch("client/client_role_test.go", "RoleSetup"))
})
}
@@ -289,3 +421,92 @@ func TestGoToCrashingDefinition_Issue49223(t *testing.T) {
env.Editor.Server.Definition(env.Ctx, params)
})
}
+
+// TestVendoringInvalidatesMetadata ensures that gopls uses the
+// correct metadata even after an external 'go mod vendor' command
+// causes packages to move; see issue #55995.
+// See also TestImplementationsInVendor, which tests the same fix.
+func TestVendoringInvalidatesMetadata(t *testing.T) {
+ t.Skip("golang/go#56169: file watching does not capture vendor dirs")
+
+ const proxy = `
+-- other.com/b@v1.0.0/go.mod --
+module other.com/b
+go 1.14
+
+-- other.com/b@v1.0.0/b.go --
+package b
+const K = 0
+`
+ const src = `
+-- go.mod --
+module example.com/a
+go 1.14
+require other.com/b v1.0.0
+
+-- go.sum --
+other.com/b v1.0.0 h1:1wb3PMGdet5ojzrKl+0iNksRLnOM9Jw+7amBNqmYwqk=
+other.com/b v1.0.0/go.mod h1:TgHQFucl04oGT+vrUm/liAzukYHNxCwKNkQZEyn3m9g=
+
+-- a.go --
+package a
+import "other.com/b"
+const _ = b.K
+
+`
+ WithOptions(
+ ProxyFiles(proxy),
+ Modes(Default), // fails in 'experimental' mode
+ ).Run(t, src, func(t *testing.T, env *Env) {
+ // Enable to debug go.sum mismatch, which may appear as
+ // "module lookup disabled by GOPROXY=off", confusingly.
+ if false {
+ env.DumpGoSum(".")
+ }
+
+ env.OpenFile("a.go")
+ refLoc := env.RegexpSearch("a.go", "K") // find "b.K" reference
+
+ // Initially, b.K is defined in the module cache.
+ gotLoc := env.GoToDefinition(refLoc)
+ gotFile := env.Sandbox.Workdir.URIToPath(gotLoc.URI)
+ wantCache := filepath.ToSlash(env.Sandbox.GOPATH()) + "/pkg/mod/other.com/b@v1.0.0/b.go"
+ if gotFile != wantCache {
+ t.Errorf("GoToDefinition, before: got file %q, want %q", gotFile, wantCache)
+ }
+
+ // Run 'go mod vendor' outside the editor.
+ if err := env.Sandbox.RunGoCommand(env.Ctx, ".", "mod", []string{"vendor"}, true); err != nil {
+ t.Fatalf("go mod vendor: %v", err)
+ }
+
+ // Synchronize changes to watched files.
+ env.Await(env.DoneWithChangeWatchedFiles())
+
+ // Now, b.K is defined in the vendor tree.
+ gotLoc = env.GoToDefinition(refLoc)
+ wantVendor := "vendor/other.com/b/b.go"
+ if gotFile != wantVendor {
+ t.Errorf("GoToDefinition, after go mod vendor: got file %q, want %q", gotFile, wantVendor)
+ }
+
+ // Delete the vendor tree.
+ if err := os.RemoveAll(env.Sandbox.Workdir.AbsPath("vendor")); err != nil {
+ t.Fatal(err)
+ }
+ // Notify the server of the deletion.
+ if err := env.Sandbox.Workdir.CheckForFileChanges(env.Ctx); err != nil {
+ t.Fatal(err)
+ }
+
+ // Synchronize again.
+ env.Await(env.DoneWithChangeWatchedFiles())
+
+ // b.K is once again defined in the module cache.
+ gotLoc = env.GoToDefinition(gotLoc)
+ gotFile = env.Sandbox.Workdir.URIToPath(gotLoc.URI)
+ if gotFile != wantCache {
+ t.Errorf("GoToDefinition, after rm -rf vendor: got file %q, want %q", gotFile, wantCache)
+ }
+ })
+}
diff --git a/gopls/internal/regtest/misc/embed_test.go b/gopls/internal/regtest/misc/embed_test.go
index 2e66d7866..021fbfcc0 100644
--- a/gopls/internal/regtest/misc/embed_test.go
+++ b/gopls/internal/regtest/misc/embed_test.go
@@ -6,12 +6,10 @@ package misc
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
- "golang.org/x/tools/internal/testenv"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestMissingPatternDiagnostic(t *testing.T) {
- testenv.NeedsGo1Point(t, 16)
const files = `
-- go.mod --
module example.com
@@ -30,8 +28,13 @@ var foo string
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("x.go")
- env.Await(env.DiagnosticAtRegexpWithMessage("x.go", `NONEXISTENT`, "no matching files found"))
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("x.go", `NONEXISTENT`),
+ WithMessage("no matching files found"),
+ ),
+ )
env.RegexpReplace("x.go", `NONEXISTENT`, "x.go")
- env.Await(EmptyDiagnostics("x.go"))
+ env.AfterChange(NoDiagnostics(ForFile("x.go")))
})
}
diff --git a/gopls/internal/regtest/misc/extract_test.go b/gopls/internal/regtest/misc/extract_test.go
new file mode 100644
index 000000000..23efffbb7
--- /dev/null
+++ b/gopls/internal/regtest/misc/extract_test.go
@@ -0,0 +1,65 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package misc
+
+import (
+ "testing"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+)
+
+func TestExtractFunction(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.12
+-- main.go --
+package main
+
+func Foo() int {
+ a := 5
+ return a
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ loc := env.RegexpSearch("main.go", `a := 5\n.*return a`)
+ actions, err := env.Editor.CodeAction(env.Ctx, loc, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Find the extract function code action.
+ var extractFunc *protocol.CodeAction
+ for _, action := range actions {
+ if action.Kind == protocol.RefactorExtract && action.Title == "Extract function" {
+ extractFunc = &action
+ break
+ }
+ }
+ if extractFunc == nil {
+ t.Fatal("could not find extract function action")
+ }
+
+ env.ApplyCodeAction(*extractFunc)
+ want := `package main
+
+func Foo() int {
+ return newFunction()
+}
+
+func newFunction() int {
+ a := 5
+ return a
+}
+`
+ if got := env.BufferText("main.go"); got != want {
+ t.Fatalf("TestFillStruct failed:\n%s", compare.Text(want, got))
+ }
+ })
+}
diff --git a/gopls/internal/regtest/misc/failures_test.go b/gopls/internal/regtest/misc/failures_test.go
index 23fccfd62..42aa3721a 100644
--- a/gopls/internal/regtest/misc/failures_test.go
+++ b/gopls/internal/regtest/misc/failures_test.go
@@ -7,12 +7,15 @@ package misc
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
)
-// This test passes (TestHoverOnError in definition_test.go) without
-// the //line directive
+// This is a slight variant of TestHoverOnError in definition_test.go
+// that includes a line directive, which makes no difference since
+// gopls ignores line directives.
func TestHoverFailure(t *testing.T) {
+ t.Skip("line directives //line ")
const mod = `
-- go.mod --
module mod.com
@@ -29,19 +32,27 @@ func main() {
var err error
err.Error()
}`
- WithOptions(SkipLogs()).Run(t, mod, func(t *testing.T, env *Env) {
+ Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- content, _ := env.Hover("main.go", env.RegexpSearch("main.go", "Error"))
- // without the //line comment content would be non-nil
- if content != nil {
- t.Fatalf("expected nil hover content for Error")
+ content, _ := env.Hover(env.RegexpSearch("main.go", "Error"))
+ if content == nil {
+ t.Fatalf("Hover('Error') returned nil")
+ }
+ want := "```go\nfunc (error).Error() string\n```"
+ if content.Value != want {
+ t.Fatalf("wrong Hover('Error') content:\n%s", compare.Text(want, content.Value))
}
})
}
-// badPackageDup contains a duplicate definition of the 'a' const.
-// this is from diagnostics_test.go,
-const badPackageDup = `
+// This test demonstrates a case where gopls is not at all confused by
+// line directives, because it completely ignores them.
+func TestFailingDiagnosticClearingOnEdit(t *testing.T) {
+ t.Skip("line directives //line ")
+ // badPackageDup contains a duplicate definition of the 'a' const.
+ // This is a minor variant of TestDiagnosticClearingOnEdit from
+ // diagnostics_test.go, with a line directive, which makes no difference.
+ const badPackageDup = `
-- go.mod --
module mod.com
@@ -56,15 +67,18 @@ package consts
const a = 2
`
-func TestFailingDiagnosticClearingOnEdit(t *testing.T) {
Run(t, badPackageDup, func(t *testing.T, env *Env) {
env.OpenFile("b.go")
- // no diagnostics for any files, but there should be
- env.Await(NoDiagnostics("a.go"), NoDiagnostics("b.go"))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("b.go", `a = 2`), WithMessage("a redeclared")),
+ Diagnostics(env.AtRegexp("a.go", `a = 1`), WithMessage("other declaration")),
+ )
// Fix the error by editing the const name in b.go to `b`.
env.RegexpReplace("b.go", "(a) = 2", "b")
-
- // The diagnostics that weren't sent above should now be cleared.
+ env.AfterChange(
+ NoDiagnostics(ForFile("a.go")),
+ NoDiagnostics(ForFile("b.go")),
+ )
})
}
diff --git a/gopls/internal/regtest/misc/fix_test.go b/gopls/internal/regtest/misc/fix_test.go
index 8318ae557..7a5e530e3 100644
--- a/gopls/internal/regtest/misc/fix_test.go
+++ b/gopls/internal/regtest/misc/fix_test.go
@@ -7,10 +7,10 @@ package misc
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
)
// A basic test for fillstruct, now that it uses a command.
@@ -34,11 +34,7 @@ func Foo() {
`
Run(t, basic, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- pos := env.RegexpSearch("main.go", "Info{}").ToProtocolPosition()
- if err := env.Editor.RefactorRewrite(env.Ctx, "main.go", &protocol.Range{
- Start: pos,
- End: pos,
- }); err != nil {
+ if err := env.Editor.RefactorRewrite(env.Ctx, env.RegexpSearch("main.go", "Info{}")); err != nil {
t.Fatal(err)
}
want := `package main
@@ -55,8 +51,8 @@ func Foo() {
}
}
`
- if got := env.Editor.BufferText("main.go"); got != want {
- t.Fatalf("TestFillStruct failed:\n%s", tests.Diff(t, want, got))
+ if got := env.BufferText("main.go"); got != want {
+ t.Fatalf("TestFillStruct failed:\n%s", compare.Text(want, got))
}
})
}
@@ -77,11 +73,11 @@ func Foo() error {
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(OnceMet(
+ env.AfterChange(
// The error message here changed in 1.18; "return values" covers both forms.
- env.DiagnosticAtRegexpWithMessage("main.go", `return`, "return values"),
+ Diagnostics(env.AtRegexp("main.go", `return`), WithMessage("return values")),
ReadDiagnostics("main.go", &d),
- ))
+ )
codeActions := env.CodeAction("main.go", d.Diagnostics)
if len(codeActions) != 2 {
t.Fatalf("expected 2 code actions, got %v", len(codeActions))
@@ -102,6 +98,6 @@ func Foo() error {
t.Fatalf("expected fixall code action, got none")
}
env.ApplyQuickFixes("main.go", d.Diagnostics)
- env.Await(EmptyDiagnostics("main.go"))
+ env.AfterChange(NoDiagnostics(ForFile("main.go")))
})
}
diff --git a/gopls/internal/regtest/misc/formatting_test.go b/gopls/internal/regtest/misc/formatting_test.go
index 75d8f6224..ee8098cc9 100644
--- a/gopls/internal/regtest/misc/formatting_test.go
+++ b/gopls/internal/regtest/misc/formatting_test.go
@@ -8,9 +8,9 @@ import (
"strings"
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
-
- "golang.org/x/tools/internal/lsp/tests"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/internal/testenv"
)
const unformattedProgram = `
@@ -34,10 +34,10 @@ func TestFormatting(t *testing.T) {
Run(t, unformattedProgram, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.FormatBuffer("main.go")
- got := env.Editor.BufferText("main.go")
+ got := env.BufferText("main.go")
want := env.ReadWorkspaceFile("main.go.golden")
if got != want {
- t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got))
+ t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got))
}
})
}
@@ -56,10 +56,10 @@ func f() {}
Run(t, onelineProgram, func(t *testing.T, env *Env) {
env.OpenFile("a.go")
env.FormatBuffer("a.go")
- got := env.Editor.BufferText("a.go")
+ got := env.BufferText("a.go")
want := env.ReadWorkspaceFile("a.go.formatted")
if got != want {
- t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got))
+ t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got))
}
})
}
@@ -80,10 +80,10 @@ func f() { fmt.Println() }
Run(t, onelineProgramA, func(t *testing.T, env *Env) {
env.OpenFile("a.go")
env.OrganizeImports("a.go")
- got := env.Editor.BufferText("a.go")
+ got := env.BufferText("a.go")
want := env.ReadWorkspaceFile("a.go.imported")
if got != want {
- t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got))
+ t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got))
}
})
}
@@ -101,10 +101,10 @@ func f() {}
Run(t, onelineProgramB, func(t *testing.T, env *Env) {
env.OpenFile("a.go")
env.OrganizeImports("a.go")
- got := env.Editor.BufferText("a.go")
+ got := env.BufferText("a.go")
want := env.ReadWorkspaceFile("a.go.imported")
if got != want {
- t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got))
+ t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got))
}
})
}
@@ -147,10 +147,10 @@ func TestOrganizeImports(t *testing.T) {
Run(t, disorganizedProgram, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.OrganizeImports("main.go")
- got := env.Editor.BufferText("main.go")
+ got := env.BufferText("main.go")
want := env.ReadWorkspaceFile("main.go.organized")
if got != want {
- t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got))
+ t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got))
}
})
}
@@ -159,10 +159,10 @@ func TestFormattingOnSave(t *testing.T) {
Run(t, disorganizedProgram, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.SaveBuffer("main.go")
- got := env.Editor.BufferText("main.go")
+ got := env.BufferText("main.go")
want := env.ReadWorkspaceFile("main.go.formatted")
if got != want {
- t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got))
+ t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got))
}
})
}
@@ -259,10 +259,10 @@ func main() {
env.CreateBuffer("main.go", crlf)
env.Await(env.DoneWithOpen())
env.OrganizeImports("main.go")
- got := env.Editor.BufferText("main.go")
+ got := env.BufferText("main.go")
got = strings.ReplaceAll(got, "\r\n", "\n") // convert everything to LF for simplicity
if tt.want != got {
- t.Errorf("unexpected content after save:\n%s", tests.Diff(t, tt.want, got))
+ t.Errorf("unexpected content after save:\n%s", compare.Text(tt.want, got))
}
})
})
@@ -303,6 +303,7 @@ func main() {
}
func TestGofumptFormatting(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
// Exercise some gofumpt formatting rules:
// - No empty lines following an assignment operator
@@ -352,18 +353,16 @@ const Bar = 42
`
WithOptions(
- EditorConfig{
- Settings: map[string]interface{}{
- "gofumpt": true,
- },
+ Settings{
+ "gofumpt": true,
},
).Run(t, input, func(t *testing.T, env *Env) {
env.OpenFile("foo.go")
env.FormatBuffer("foo.go")
- got := env.Editor.BufferText("foo.go")
+ got := env.BufferText("foo.go")
want := env.ReadWorkspaceFile("foo.go.formatted")
if got != want {
- t.Errorf("unexpected formatting result:\n%s", tests.Diff(t, want, got))
+ t.Errorf("unexpected formatting result:\n%s", compare.Text(want, got))
}
})
}
diff --git a/gopls/internal/regtest/misc/generate_test.go b/gopls/internal/regtest/misc/generate_test.go
index 1dc22d737..547755fd2 100644
--- a/gopls/internal/regtest/misc/generate_test.go
+++ b/gopls/internal/regtest/misc/generate_test.go
@@ -12,12 +12,10 @@ package misc
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestGenerateProgress(t *testing.T) {
- t.Skipf("skipping flaky test: https://golang.org/issue/49901")
-
const generatedWorkspace = `
-- go.mod --
module fake.test
@@ -40,12 +38,12 @@ func main() {
-- lib1/lib.go --
package lib1
-//go:generate go run ../generate.go lib1
+//` + `go:generate go run ../generate.go lib1
-- lib2/lib.go --
package lib2
-//go:generate go run ../generate.go lib2
+//` + `go:generate go run ../generate.go lib2
-- main.go --
package main
@@ -61,15 +59,14 @@ func main() {
`
Run(t, generatedWorkspace, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("main.go", "lib1.(Answer)"),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", "lib1.(Answer)")),
)
env.RunGenerate("./lib1")
env.RunGenerate("./lib2")
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- EmptyDiagnostics("main.go")),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
})
}
diff --git a/gopls/internal/regtest/misc/highlight_test.go b/gopls/internal/regtest/misc/highlight_test.go
index affbffd66..8835d608e 100644
--- a/gopls/internal/regtest/misc/highlight_test.go
+++ b/gopls/internal/regtest/misc/highlight_test.go
@@ -8,9 +8,8 @@ import (
"sort"
"testing"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestWorkspacePackageHighlight(t *testing.T) {
@@ -31,9 +30,9 @@ func main() {
Run(t, mod, func(t *testing.T, env *Env) {
const file = "main.go"
env.OpenFile(file)
- _, pos := env.GoToDefinition(file, env.RegexpSearch(file, `var (A) string`))
+ loc := env.GoToDefinition(env.RegexpSearch(file, `var (A) string`))
- checkHighlights(env, file, pos, 3)
+ checkHighlights(env, loc, 3)
})
}
@@ -54,10 +53,11 @@ func main() {
Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- file, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt\.(Printf)`))
- pos := env.RegexpSearch(file, `func Printf\((format) string`)
+ defLoc := env.GoToDefinition(env.RegexpSearch("main.go", `fmt\.(Printf)`))
+ file := env.Sandbox.Workdir.URIToPath(defLoc.URI)
+ loc := env.RegexpSearch(file, `func Printf\((format) string`)
- checkHighlights(env, file, pos, 2)
+ checkHighlights(env, loc, 2)
})
}
@@ -113,26 +113,28 @@ func main() {}`
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- file, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `"example.com/global"`))
- pos := env.RegexpSearch(file, `const (A)`)
- checkHighlights(env, file, pos, 4)
+ defLoc := env.GoToDefinition(env.RegexpSearch("main.go", `"example.com/global"`))
+ file := env.Sandbox.Workdir.URIToPath(defLoc.URI)
+ loc := env.RegexpSearch(file, `const (A)`)
+ checkHighlights(env, loc, 4)
- file, _ = env.GoToDefinition("main.go", env.RegexpSearch("main.go", `"example.com/local"`))
- pos = env.RegexpSearch(file, `const (b)`)
- checkHighlights(env, file, pos, 5)
+ defLoc = env.GoToDefinition(env.RegexpSearch("main.go", `"example.com/local"`))
+ file = env.Sandbox.Workdir.URIToPath(defLoc.URI)
+ loc = env.RegexpSearch(file, `const (b)`)
+ checkHighlights(env, loc, 5)
})
}
-func checkHighlights(env *Env, file string, pos fake.Pos, highlightCount int) {
+func checkHighlights(env *Env, loc protocol.Location, highlightCount int) {
t := env.T
t.Helper()
- highlights := env.DocumentHighlight(file, pos)
+ highlights := env.DocumentHighlight(loc)
if len(highlights) != highlightCount {
t.Fatalf("expected %v highlight(s), got %v", highlightCount, len(highlights))
}
- references := env.References(file, pos)
+ references := env.References(loc)
if len(highlights) != len(references) {
t.Fatalf("number of highlights and references is expected to be equal: %v != %v", len(highlights), len(references))
}
diff --git a/gopls/internal/regtest/misc/hover_test.go b/gopls/internal/regtest/misc/hover_test.go
index 04dc740b8..72a6e23eb 100644
--- a/gopls/internal/regtest/misc/hover_test.go
+++ b/gopls/internal/regtest/misc/hover_test.go
@@ -5,11 +5,13 @@
package misc
import (
+ "fmt"
"strings"
"testing"
- "golang.org/x/tools/internal/lsp/fake"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
"golang.org/x/tools/internal/testenv"
)
@@ -59,21 +61,22 @@ func main() {
ProxyFiles(proxy),
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- mixedPos := env.RegexpSearch("main.go", "Mixed")
- got, _ := env.Hover("main.go", mixedPos)
+ mixedLoc := env.RegexpSearch("main.go", "Mixed")
+ got, _ := env.Hover(mixedLoc)
if !strings.Contains(got.Value, "unexported") {
t.Errorf("Workspace hover: missing expected field 'unexported'. Got:\n%q", got.Value)
}
- cacheFile, _ := env.GoToDefinition("main.go", mixedPos)
- argPos := env.RegexpSearch(cacheFile, "printMixed.*(Mixed)")
- got, _ = env.Hover(cacheFile, argPos)
+ cacheLoc := env.GoToDefinition(mixedLoc)
+ cacheFile := env.Sandbox.Workdir.URIToPath(cacheLoc.URI)
+ argLoc := env.RegexpSearch(cacheFile, "printMixed.*(Mixed)")
+ got, _ = env.Hover(argLoc)
if !strings.Contains(got.Value, "unexported") {
t.Errorf("Non-workspace hover: missing expected field 'unexported'. Got:\n%q", got.Value)
}
- exportedFieldPos := env.RegexpSearch("main.go", "Exported")
- got, _ = env.Hover("main.go", exportedFieldPos)
+ exportedFieldLoc := env.RegexpSearch("main.go", "Exported")
+ got, _ = env.Hover(exportedFieldLoc)
if !strings.Contains(got.Value, "comment") {
t.Errorf("Workspace hover: missing comment for field 'Exported'. Got:\n%q", got.Value)
}
@@ -81,7 +84,12 @@ func main() {
}
func TestHoverIntLiteral(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
+ // TODO(rfindley): this behavior doesn't actually make sense for vars. It is
+ // misleading to format their value when it is (of course) variable.
+ //
+ // Instead, we should allow hovering on numeric literals.
+ t.Skip("golang/go#58220: broken due to new hover logic")
+
const source = `
-- main.go --
package main
@@ -98,13 +106,13 @@ func main() {
Run(t, source, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
hexExpected := "58190"
- got, _ := env.Hover("main.go", env.RegexpSearch("main.go", "hex"))
+ got, _ := env.Hover(env.RegexpSearch("main.go", "hex"))
if got != nil && !strings.Contains(got.Value, hexExpected) {
t.Errorf("Hover: missing expected field '%s'. Got:\n%q", hexExpected, got.Value)
}
binExpected := "73"
- got, _ = env.Hover("main.go", env.RegexpSearch("main.go", "bigBin"))
+ got, _ = env.Hover(env.RegexpSearch("main.go", "bigBin"))
if got != nil && !strings.Contains(got.Value, binExpected) {
t.Errorf("Hover: missing expected field '%s'. Got:\n%q", binExpected, got.Value)
}
@@ -113,7 +121,8 @@ func main() {
// Tests that hovering does not trigger the panic in golang/go#48249.
func TestPanicInHoverBrokenCode(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
+ // Note: this test can not be expressed as a marker test, as it must use
+ // content without a trailing newline.
const source = `
-- main.go --
package main
@@ -121,7 +130,7 @@ package main
type Example struct`
Run(t, source, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- env.Editor.Hover(env.Ctx, "main.go", env.RegexpSearch("main.go", "Example"))
+ env.Editor.Hover(env.Ctx, env.RegexpSearch("main.go", "Example"))
})
}
@@ -137,6 +146,239 @@ package main
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.EditBuffer("main.go", fake.NewEdit(0, 0, 1, 0, "package main\nfunc main() {\nconst x = `\nfoo\n`\n}"))
- env.Editor.Hover(env.Ctx, "main.go", env.RegexpSearch("main.go", "foo"))
+ env.Editor.Hover(env.Ctx, env.RegexpSearch("main.go", "foo"))
+ })
+}
+
+func TestHoverImport(t *testing.T) {
+ const packageDoc1 = "Package lib1 hover documentation"
+ const packageDoc2 = "Package lib2 hover documentation"
+ tests := []struct {
+ hoverPackage string
+ want string
+ wantError bool
+ }{
+ {
+ "mod.com/lib1",
+ packageDoc1,
+ false,
+ },
+ {
+ "mod.com/lib2",
+ packageDoc2,
+ false,
+ },
+ {
+ "mod.com/lib3",
+ "",
+ false,
+ },
+ {
+ "mod.com/lib4",
+ "",
+ true,
+ },
+ }
+ source := fmt.Sprintf(`
+-- go.mod --
+module mod.com
+
+go 1.12
+-- lib1/a.go --
+// %s
+package lib1
+
+const C = 1
+
+-- lib1/b.go --
+package lib1
+
+const D = 1
+
+-- lib2/a.go --
+// %s
+package lib2
+
+const E = 1
+
+-- lib3/a.go --
+package lib3
+
+const F = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib1"
+ "mod.com/lib2"
+ "mod.com/lib3"
+ "mod.com/lib4"
+)
+
+func main() {
+ println("Hello")
+}
+ `, packageDoc1, packageDoc2)
+ Run(t, source, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ for _, test := range tests {
+ got, _, err := env.Editor.Hover(env.Ctx, env.RegexpSearch("main.go", test.hoverPackage))
+ if test.wantError {
+ if err == nil {
+ t.Errorf("Hover(%q) succeeded unexpectedly", test.hoverPackage)
+ }
+ } else if !strings.Contains(got.Value, test.want) {
+ t.Errorf("Hover(%q): got:\n%q\nwant:\n%q", test.hoverPackage, got.Value, test.want)
+ }
+ }
+ })
+}
+
+// for x/tools/gopls: unhandled named anchor on the hover #57048
+func TestHoverTags(t *testing.T) {
+ const source = `
+-- go.mod --
+module mod.com
+
+go 1.19
+
+-- lib/a.go --
+
+// variety of execution modes.
+//
+// # Test package setup
+//
+// The regression test package uses a couple of uncommon patterns to reduce
+package lib
+
+-- a.go --
+ package main
+ import "mod.com/lib"
+
+ const A = 1
+
+}
+`
+ Run(t, source, func(t *testing.T, env *Env) {
+ t.Run("tags", func(t *testing.T) {
+ env.OpenFile("a.go")
+ z := env.RegexpSearch("a.go", "lib")
+ t.Logf("%#v", z)
+ got, _ := env.Hover(env.RegexpSearch("a.go", "lib"))
+ if strings.Contains(got.Value, "{#hdr-") {
+ t.Errorf("Hover: got {#hdr- tag:\n%q", got)
+ }
+ })
+ })
+}
+
+// This is a regression test for Go issue #57625.
+func TestHoverModMissingModuleStmt(t *testing.T) {
+ const source = `
+-- go.mod --
+go 1.16
+`
+ Run(t, source, func(t *testing.T, env *Env) {
+ env.OpenFile("go.mod")
+ env.Hover(env.RegexpSearch("go.mod", "go")) // no panic
+ })
+}
+
+func TestHoverCompletionMarkdown(t *testing.T) {
+ testenv.NeedsGo1Point(t, 19)
+ const source = `
+-- go.mod --
+module mod.com
+go 1.19
+-- main.go --
+package main
+// Just says [hello].
+//
+// [hello]: https://en.wikipedia.org/wiki/Hello
+func Hello() string {
+ Hello() //Here
+ return "hello"
+}
+`
+ Run(t, source, func(t *testing.T, env *Env) {
+ // Hover, Completion, and SignatureHelp should all produce markdown
+ // check that the markdown for SignatureHelp and Completion are
+ // the same, and contained in that for Hover (up to trailing \n)
+ env.OpenFile("main.go")
+ loc := env.RegexpSearch("main.go", "func (Hello)")
+ hover, _ := env.Hover(loc)
+ hoverContent := hover.Value
+
+ loc = env.RegexpSearch("main.go", "//Here")
+ loc.Range.Start.Character -= 3 // Hello(_) //Here
+ completions := env.Completion(loc)
+ signatures := env.SignatureHelp(loc)
+
+ if len(completions.Items) != 1 {
+ t.Errorf("got %d completions, expected 1", len(completions.Items))
+ }
+ if len(signatures.Signatures) != 1 {
+ t.Errorf("got %d signatures, expected 1", len(signatures.Signatures))
+ }
+ item := completions.Items[0].Documentation.Value
+ var itemContent string
+ if x, ok := item.(protocol.MarkupContent); !ok || x.Kind != protocol.Markdown {
+ t.Fatalf("%#v is not markdown", item)
+ } else {
+ itemContent = strings.Trim(x.Value, "\n")
+ }
+ sig := signatures.Signatures[0].Documentation.Value
+ var sigContent string
+ if x, ok := sig.(protocol.MarkupContent); !ok || x.Kind != protocol.Markdown {
+ t.Fatalf("%#v is not markdown", item)
+ } else {
+ sigContent = x.Value
+ }
+ if itemContent != sigContent {
+ t.Errorf("item:%q not sig:%q", itemContent, sigContent)
+ }
+ if !strings.Contains(hoverContent, itemContent) {
+ t.Errorf("hover:%q does not containt sig;%q", hoverContent, sigContent)
+ }
})
}
+
+// Test that the generated markdown contains links for Go references.
+// https://github.com/golang/go/issues/58352
+func TestHoverLinks(t *testing.T) {
+ testenv.NeedsGo1Point(t, 19)
+ const input = `
+-- go.mod --
+go 1.19
+module mod.com
+-- main.go --
+package main
+// [fmt]
+var A int
+// [fmt.Println]
+var B int
+// [golang.org/x/tools/go/packages.Package.String]
+var C int
+`
+ var tests = []struct {
+ pat string
+ ans string
+ }{
+ {"A", "fmt"},
+ {"B", "fmt#Println"},
+ {"C", "golang.org/x/tools/go/packages#Package.String"},
+ }
+ for _, test := range tests {
+ Run(t, input, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ loc := env.RegexpSearch("main.go", test.pat)
+ hover, _ := env.Hover(loc)
+ hoverContent := hover.Value
+ want := fmt.Sprintf("%s/%s", "https://pkg.go.dev", test.ans)
+ if !strings.Contains(hoverContent, want) {
+ t.Errorf("hover:%q does not contain link %q", hoverContent, want)
+ }
+ })
+ }
+}
diff --git a/gopls/internal/regtest/misc/import_test.go b/gopls/internal/regtest/misc/import_test.go
index d5b6bcf43..30986ba50 100644
--- a/gopls/internal/regtest/misc/import_test.go
+++ b/gopls/internal/regtest/misc/import_test.go
@@ -8,10 +8,10 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
- "golang.org/x/tools/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
)
func TestAddImport(t *testing.T) {
@@ -49,9 +49,9 @@ func main() {
Command: "gopls.add_import",
Arguments: cmd.Arguments,
}, nil)
- got := env.Editor.BufferText("main.go")
+ got := env.BufferText("main.go")
if got != want {
- t.Fatalf("gopls.add_import failed\n%s", tests.Diff(t, want, got))
+ t.Fatalf("gopls.add_import failed\n%s", compare.Text(want, got))
}
})
}
diff --git a/gopls/internal/regtest/misc/imports_test.go b/gopls/internal/regtest/misc/imports_test.go
index 4ae2be6bf..bea955220 100644
--- a/gopls/internal/regtest/misc/imports_test.go
+++ b/gopls/internal/regtest/misc/imports_test.go
@@ -11,9 +11,9 @@ import (
"strings"
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
- "golang.org/x/tools/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
"golang.org/x/tools/internal/testenv"
)
@@ -47,7 +47,7 @@ func TestZ(t *testing.T) {
Run(t, needs, func(t *testing.T, env *Env) {
env.CreateBuffer("a_test.go", ntest)
env.SaveBuffer("a_test.go")
- got := env.Editor.BufferText("a_test.go")
+ got := env.BufferText("a_test.go")
if want != got {
t.Errorf("got\n%q, wanted\n%q", got, want)
}
@@ -76,7 +76,7 @@ func main() {
env.OrganizeImports("main.go")
actions := env.CodeAction("main.go", nil)
if len(actions) > 0 {
- got := env.Editor.BufferText("main.go")
+ got := env.BufferText("main.go")
t.Errorf("unexpected actions %#v", actions)
if got == vim1 {
t.Errorf("no changes")
@@ -146,23 +146,21 @@ import "example.com/x"
var _, _ = x.X, y.Y
`
- testenv.NeedsGo1Point(t, 15)
-
modcache, err := ioutil.TempDir("", "TestGOMODCACHE-modcache")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(modcache)
- editorConfig := EditorConfig{Env: map[string]string{"GOMODCACHE": modcache}}
WithOptions(
- editorConfig,
+ EnvVars{"GOMODCACHE": modcache},
ProxyFiles(proxy),
).Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- env.Await(env.DiagnosticAtRegexp("main.go", `y.Y`))
+ env.AfterChange(Diagnostics(env.AtRegexp("main.go", `y.Y`)))
env.SaveBuffer("main.go")
- env.Await(EmptyDiagnostics("main.go"))
- path, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `y.(Y)`))
+ env.AfterChange(NoDiagnostics(ForFile("main.go")))
+ loc := env.GoToDefinition(env.RegexpSearch("main.go", `y.(Y)`))
+ path := env.Sandbox.Workdir.URIToPath(loc.URI)
if !strings.HasPrefix(path, filepath.ToSlash(modcache)) {
t.Errorf("found module dependency outside of GOMODCACHE: got %v, wanted subdir of %v", path, filepath.ToSlash(modcache))
}
@@ -202,15 +200,59 @@ func TestA(t *testing.T) {
Run(t, pkg, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("a/a.go", "os.Stat"),
- ReadDiagnostics("a/a.go", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "os.Stat")),
+ ReadDiagnostics("a/a.go", &d),
)
env.ApplyQuickFixes("a/a.go", d.Diagnostics)
- env.Await(
- EmptyDiagnostics("a/a.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
})
}
+
+// Test for golang/go#52784
+func TestGoWorkImports(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+ const pkg = `
+-- go.work --
+go 1.19
+
+use (
+ ./caller
+ ./mod
+)
+-- caller/go.mod --
+module caller.com
+
+go 1.18
+
+require mod.com v0.0.0
+
+replace mod.com => ../mod
+-- caller/caller.go --
+package main
+
+func main() {
+ a.Test()
+}
+-- mod/go.mod --
+module mod.com
+
+go 1.18
+-- mod/a/a.go --
+package a
+
+func Test() {
+}
+`
+ Run(t, pkg, func(t *testing.T, env *Env) {
+ env.OpenFile("caller/caller.go")
+ env.AfterChange(Diagnostics(env.AtRegexp("caller/caller.go", "a.Test")))
+
+ // Saving caller.go should trigger goimports, which should find a.Test in
+ // the mod.com module, thanks to the go.work file.
+ env.SaveBuffer("caller/caller.go")
+ env.AfterChange(NoDiagnostics(ForFile("caller/caller.go")))
+ })
+}
diff --git a/gopls/internal/regtest/misc/leak_test.go b/gopls/internal/regtest/misc/leak_test.go
new file mode 100644
index 000000000..586ffcc41
--- /dev/null
+++ b/gopls/internal/regtest/misc/leak_test.go
@@ -0,0 +1,89 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package misc
+
+import (
+ "context"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/hooks"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/lsprpc"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
+)
+
+// Test for golang/go#57222.
+func TestCacheLeak(t *testing.T) {
+ // TODO(rfindley): either fix this test with additional instrumentation, or
+ // delete it.
+ t.Skip("This test races with cache eviction.")
+ const files = `-- a.go --
+package a
+
+func _() {
+ println("1")
+}
+`
+ c := cache.New(nil)
+ env := setupEnv(t, files, c)
+ env.Await(InitialWorkspaceLoad)
+ env.OpenFile("a.go")
+
+ // Make a couple edits to stabilize cache state.
+ //
+ // For some reason, after only one edit we're left with two parsed files
+ // (perhaps because something had to ParseHeader). If this test proves flaky,
+ // we'll need to investigate exactly what is causing various parse modes to
+ // be present (or rewrite the test to be more tolerant, for example make ~100
+ // modifications and assert that we're within a few of where we're started).
+ env.RegexpReplace("a.go", "1", "2")
+ env.RegexpReplace("a.go", "2", "3")
+ env.AfterChange()
+
+ // Capture cache state, make an arbitrary change, and wait for gopls to do
+ // its work. Afterward, we should have the exact same number of parsed
+ before := c.MemStats()
+ env.RegexpReplace("a.go", "3", "4")
+ env.AfterChange()
+ after := c.MemStats()
+
+ if diff := cmp.Diff(before, after); diff != "" {
+ t.Errorf("store objects differ after change (-before +after)\n%s", diff)
+ }
+}
+
+// setupEnv creates a new sandbox environment for editing the txtar encoded
+// content of files. It uses a new gopls instance backed by the Cache c.
+func setupEnv(t *testing.T, files string, c *cache.Cache) *Env {
+ ctx := debug.WithInstance(context.Background(), "", "off")
+ server := lsprpc.NewStreamServer(c, false, hooks.Options)
+ ts := servertest.NewPipeServer(server, jsonrpc2.NewRawStream)
+ s, err := fake.NewSandbox(&fake.SandboxConfig{
+ Files: fake.UnpackTxt(files),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ a := NewAwaiter(s.Workdir)
+ const skipApplyEdits = false
+ editor, err := fake.NewEditor(s, fake.EditorConfig{}).Connect(ctx, ts, a.Hooks(), skipApplyEdits)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return &Env{
+ T: t,
+ Ctx: ctx,
+ Editor: editor,
+ Sandbox: s,
+ Awaiter: a,
+ }
+}
diff --git a/gopls/internal/regtest/misc/link_test.go b/gopls/internal/regtest/misc/link_test.go
index daea74250..8a64c54e2 100644
--- a/gopls/internal/regtest/misc/link_test.go
+++ b/gopls/internal/regtest/misc/link_test.go
@@ -8,13 +8,10 @@ import (
"strings"
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
-
- "golang.org/x/tools/internal/testenv"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestHoverAndDocumentLink(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
const program = `
-- go.mod --
module mod.test
@@ -31,6 +28,8 @@ package main
import "import.test/pkg"
func main() {
+ // Issue 43990: this is not a link that most users can open from an LSP
+ // client: mongodb://not.a.link.com
println(pkg.Hello)
}`
@@ -50,36 +49,38 @@ const Hello = "Hello"
env.OpenFile("main.go")
env.OpenFile("go.mod")
- modLink := "https://pkg.go.dev/mod/import.test@v1.2.3?utm_source=gopls"
- pkgLink := "https://pkg.go.dev/import.test@v1.2.3/pkg?utm_source=gopls"
+ modLink := "https://pkg.go.dev/mod/import.test@v1.2.3"
+ pkgLink := "https://pkg.go.dev/import.test@v1.2.3/pkg"
// First, check that we get the expected links via hover and documentLink.
- content, _ := env.Hover("main.go", env.RegexpSearch("main.go", "pkg.Hello"))
+ content, _ := env.Hover(env.RegexpSearch("main.go", "pkg.Hello"))
if content == nil || !strings.Contains(content.Value, pkgLink) {
t.Errorf("hover: got %v in main.go, want contains %q", content, pkgLink)
}
- content, _ = env.Hover("go.mod", env.RegexpSearch("go.mod", "import.test"))
+ content, _ = env.Hover(env.RegexpSearch("go.mod", "import.test"))
if content == nil || !strings.Contains(content.Value, pkgLink) {
t.Errorf("hover: got %v in go.mod, want contains %q", content, pkgLink)
}
links := env.DocumentLink("main.go")
if len(links) != 1 || links[0].Target != pkgLink {
- t.Errorf("documentLink: got %v for main.go, want link to %q", links, pkgLink)
+ t.Errorf("documentLink: got links %+v for main.go, want one link with target %q", links, pkgLink)
}
links = env.DocumentLink("go.mod")
if len(links) != 1 || links[0].Target != modLink {
- t.Errorf("documentLink: got %v for go.mod, want link to %q", links, modLink)
+ t.Errorf("documentLink: got links %+v for go.mod, want one link with target %q", links, modLink)
}
// Then change the environment to make these links private.
- env.ChangeEnv(map[string]string{"GOPRIVATE": "import.test"})
+ cfg := env.Editor.Config()
+ cfg.Env = map[string]string{"GOPRIVATE": "import.test"}
+ env.ChangeConfiguration(cfg)
// Finally, verify that the links are gone.
- content, _ = env.Hover("main.go", env.RegexpSearch("main.go", "pkg.Hello"))
+ content, _ = env.Hover(env.RegexpSearch("main.go", "pkg.Hello"))
if content == nil || strings.Contains(content.Value, pkgLink) {
t.Errorf("hover: got %v in main.go, want non-empty hover without %q", content, pkgLink)
}
- content, _ = env.Hover("go.mod", env.RegexpSearch("go.mod", "import.test"))
+ content, _ = env.Hover(env.RegexpSearch("go.mod", "import.test"))
if content == nil || strings.Contains(content.Value, modLink) {
t.Errorf("hover: got %v in go.mod, want contains %q", content, modLink)
}
diff --git a/gopls/internal/regtest/misc/misc_test.go b/gopls/internal/regtest/misc/misc_test.go
index 3694b07fc..12aea697c 100644
--- a/gopls/internal/regtest/misc/misc_test.go
+++ b/gopls/internal/regtest/misc/misc_test.go
@@ -8,9 +8,11 @@ import (
"testing"
"golang.org/x/tools/gopls/internal/hooks"
- "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/bug"
)
func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
regtest.Main(m, hooks.Options)
}
diff --git a/gopls/internal/regtest/misc/multiple_adhoc_test.go b/gopls/internal/regtest/misc/multiple_adhoc_test.go
index 5f803e4e3..981b74efc 100644
--- a/gopls/internal/regtest/misc/multiple_adhoc_test.go
+++ b/gopls/internal/regtest/misc/multiple_adhoc_test.go
@@ -7,7 +7,7 @@ package misc
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestMultipleAdHocPackages(t *testing.T) {
@@ -30,14 +30,14 @@ func main() () {
}
`, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
- if list := env.Completion("a/a.go", env.RegexpSearch("a/a.go", "Println")); list == nil || len(list.Items) == 0 {
+ if list := env.Completion(env.RegexpSearch("a/a.go", "Println")); list == nil || len(list.Items) == 0 {
t.Fatal("expected completions, got none")
}
env.OpenFile("a/b.go")
- if list := env.Completion("a/b.go", env.RegexpSearch("a/b.go", "Println")); list == nil || len(list.Items) == 0 {
+ if list := env.Completion(env.RegexpSearch("a/b.go", "Println")); list == nil || len(list.Items) == 0 {
t.Fatal("expected completions, got none")
}
- if list := env.Completion("a/a.go", env.RegexpSearch("a/a.go", "Println")); list == nil || len(list.Items) == 0 {
+ if list := env.Completion(env.RegexpSearch("a/a.go", "Println")); list == nil || len(list.Items) == 0 {
t.Fatal("expected completions, got none")
}
})
diff --git a/gopls/internal/regtest/misc/references_test.go b/gopls/internal/regtest/misc/references_test.go
index 768251680..e1f5d8e05 100644
--- a/gopls/internal/regtest/misc/references_test.go
+++ b/gopls/internal/regtest/misc/references_test.go
@@ -5,9 +5,15 @@
package misc
import (
+ "fmt"
+ "os"
+ "sort"
+ "strings"
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestStdlibReferences(t *testing.T) {
@@ -28,12 +34,13 @@ func main() {
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt.(Print)`))
- refs, err := env.Editor.References(env.Ctx, file, pos)
+ loc := env.GoToDefinition(env.RegexpSearch("main.go", `fmt.(Print)`))
+ refs, err := env.Editor.References(env.Ctx, loc)
if err != nil {
t.Fatal(err)
}
if len(refs) != 2 {
+ // TODO(adonovan): make this assertion less maintainer-hostile.
t.Fatalf("got %v reference(s), want 2", len(refs))
}
// The first reference is guaranteed to be the definition.
@@ -43,8 +50,10 @@ func main() {
})
}
-// This reproduces and tests golang/go#48400.
-func TestReferencesPanicOnError(t *testing.T) {
+// This is a regression test for golang/go#48400 (a panic).
+func TestReferencesOnErrorMethod(t *testing.T) {
+ // Ideally this would actually return the correct answer,
+ // instead of merely failing gracefully.
const files = `
-- go.mod --
module mod.com
@@ -70,14 +79,321 @@ func _() {
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `Error`))
- refs, err := env.Editor.References(env.Ctx, file, pos)
- if err == nil {
- t.Fatalf("expected error for references, instead got %v", refs)
+ loc := env.GoToDefinition(env.RegexpSearch("main.go", `Error`))
+ refs, err := env.Editor.References(env.Ctx, loc)
+ if err != nil {
+ t.Fatalf("references on (*s).Error failed: %v", err)
+ }
+ // TODO(adonovan): this test is crying out for marker support in regtests.
+ var buf strings.Builder
+ for _, ref := range refs {
+ fmt.Fprintf(&buf, "%s %s\n", env.Sandbox.Workdir.URIToPath(ref.URI), ref.Range)
}
- wantErr := "no position for func (error).Error() string"
- if err.Error() != wantErr {
- t.Fatalf("expected error with message %s, instead got %s", wantErr, err.Error())
+ got := buf.String()
+ want := "main.go 8:10-8:15\n" + // (*s).Error decl
+ "main.go 14:7-14:12\n" // s.Error() call
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unexpected references on (*s).Error (-want +got):\n%s", diff)
+ }
+ })
+}
+
+func TestPackageReferences(t *testing.T) {
+ tests := []struct {
+ packageName string
+ wantRefCount int
+ wantFiles []string
+ }{
+ {
+ "lib1",
+ 3,
+ []string{
+ "main.go",
+ "lib1/a.go",
+ "lib1/b.go",
+ },
+ },
+ {
+ "lib2",
+ 2,
+ []string{
+ "main.go",
+ "lib2/a.go",
+ },
+ },
+ }
+
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib1/a.go --
+package lib1
+
+const A = 1
+
+-- lib1/b.go --
+package lib1
+
+const B = 1
+
+-- lib2/a.go --
+package lib2
+
+const C = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib1"
+ "mod.com/lib2"
+)
+
+func main() {
+ println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ for _, test := range tests {
+ file := fmt.Sprintf("%s/a.go", test.packageName)
+ env.OpenFile(file)
+ loc := env.RegexpSearch(file, test.packageName)
+ refs := env.References(loc)
+ if len(refs) != test.wantRefCount {
+ // TODO(adonovan): make this assertion less maintainer-hostile.
+ t.Fatalf("got %v reference(s), want %d", len(refs), test.wantRefCount)
+ }
+ var refURIs []string
+ for _, ref := range refs {
+ refURIs = append(refURIs, string(ref.URI))
+ }
+ for _, base := range test.wantFiles {
+ hasBase := false
+ for _, ref := range refURIs {
+ if strings.HasSuffix(ref, base) {
+ hasBase = true
+ break
+ }
+ }
+ if !hasBase {
+ t.Fatalf("got [%v], want reference ends with \"%v\"", strings.Join(refURIs, ","), base)
+ }
+ }
}
})
}
+
+// Test for golang/go#43144.
+//
+// Verify that we search for references and implementations in intermediate
+// test variants.
+func TestReferencesInTestVariants(t *testing.T) {
+ const files = `
+-- go.mod --
+module foo.mod
+
+go 1.12
+-- foo/foo.go --
+package foo
+
+import "foo.mod/bar"
+
+const Foo = 42
+
+type T int
+type InterfaceM interface{ M() }
+type InterfaceF interface{ F() }
+
+func _() {
+ _ = bar.Blah
+}
+
+-- foo/foo_test.go --
+package foo
+
+type Fer struct{}
+func (Fer) F() {}
+
+-- bar/bar.go --
+package bar
+
+var Blah = 123
+
+-- bar/bar_test.go --
+package bar
+
+type Mer struct{}
+func (Mer) M() {}
+
+func TestBar() {
+ _ = Blah
+}
+-- bar/bar_x_test.go --
+package bar_test
+
+import (
+ "foo.mod/bar"
+ "foo.mod/foo"
+)
+
+type Mer struct{}
+func (Mer) M() {}
+
+func _() {
+ _ = bar.Blah
+ _ = foo.Foo
+}
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("foo/foo.go")
+
+ // Helper to map locations relative file paths.
+ fileLocations := func(locs []protocol.Location) []string {
+ var got []string
+ for _, loc := range locs {
+ got = append(got, env.Sandbox.Workdir.URIToPath(loc.URI))
+ }
+ sort.Strings(got)
+ return got
+ }
+
+ refTests := []struct {
+ re string
+ wantRefs []string
+ }{
+ // Blah is referenced:
+ // - inside the foo.mod/bar (ordinary) package
+ // - inside the foo.mod/bar [foo.mod/bar.test] test variant package
+ // - from the foo.mod/bar_test [foo.mod/bar.test] x_test package
+ // - from the foo.mod/foo package
+ {"Blah", []string{"bar/bar.go", "bar/bar_test.go", "bar/bar_x_test.go", "foo/foo.go"}},
+
+ // Foo is referenced in bar_x_test.go via the intermediate test variant
+ // foo.mod/foo [foo.mod/bar.test].
+ {"Foo", []string{"bar/bar_x_test.go", "foo/foo.go"}},
+ }
+
+ for _, test := range refTests {
+ loc := env.RegexpSearch("foo/foo.go", test.re)
+ refs := env.References(loc)
+
+ got := fileLocations(refs)
+ if diff := cmp.Diff(test.wantRefs, got); diff != "" {
+ t.Errorf("References(%q) returned unexpected diff (-want +got):\n%s", test.re, diff)
+ }
+ }
+
+ implTests := []struct {
+ re string
+ wantImpls []string
+ }{
+ // InterfaceM is implemented both in foo.mod/bar [foo.mod/bar.test] (which
+ // doesn't import foo), and in foo.mod/bar_test [foo.mod/bar.test], which
+ // imports the test variant of foo.
+ {"InterfaceM", []string{"bar/bar_test.go", "bar/bar_x_test.go"}},
+
+ // A search within the ordinary package to should find implementations
+ // (Fer) within the augmented test package.
+ {"InterfaceF", []string{"foo/foo_test.go"}},
+ }
+
+ for _, test := range implTests {
+ loc := env.RegexpSearch("foo/foo.go", test.re)
+ impls := env.Implementations(loc)
+
+ got := fileLocations(impls)
+ if diff := cmp.Diff(test.wantImpls, got); diff != "" {
+ t.Errorf("Implementations(%q) returned unexpected diff (-want +got):\n%s", test.re, diff)
+ }
+ }
+ })
+}
+
+// This is a regression test for Issue #56169, in which interface
+// implementations in vendored modules were not found. The actual fix
+// was the same as for #55995; see TestVendoringInvalidatesMetadata.
+func TestImplementationsInVendor(t *testing.T) {
+ t.Skip("golang/go#56169: file watching does not capture vendor dirs")
+
+ const proxy = `
+-- other.com/b@v1.0.0/go.mod --
+module other.com/b
+go 1.14
+
+-- other.com/b@v1.0.0/b.go --
+package b
+type B int
+func (B) F() {}
+`
+ const src = `
+-- go.mod --
+module example.com/a
+go 1.14
+require other.com/b v1.0.0
+
+-- go.sum --
+other.com/b v1.0.0 h1:9WyCKS+BLAMRQM0CegP6zqP2beP+ShTbPaARpNY31II=
+other.com/b v1.0.0/go.mod h1:TgHQFucl04oGT+vrUm/liAzukYHNxCwKNkQZEyn3m9g=
+
+-- a.go --
+package a
+import "other.com/b"
+type I interface { F() }
+var _ b.B
+
+`
+ WithOptions(
+ ProxyFiles(proxy),
+ Modes(Default), // fails in 'experimental' mode
+ ).Run(t, src, func(t *testing.T, env *Env) {
+ // Enable to debug go.sum mismatch, which may appear as
+ // "module lookup disabled by GOPROXY=off", confusingly.
+ if false {
+ env.DumpGoSum(".")
+ }
+
+ checkVendor := func(locs []protocol.Location, wantVendor bool) {
+ if len(locs) != 1 {
+ t.Errorf("got %d locations, want 1", len(locs))
+ } else if strings.Contains(string(locs[0].URI), "/vendor/") != wantVendor {
+ t.Errorf("got location %s, wantVendor=%t", locs[0], wantVendor)
+ }
+ }
+
+ env.OpenFile("a.go")
+ refLoc := env.RegexpSearch("a.go", "I") // find "I" reference
+
+ // Initially, a.I has one implementation b.B in
+ // the module cache, not the vendor tree.
+ checkVendor(env.Implementations(refLoc), false)
+
+ // Run 'go mod vendor' outside the editor.
+ if err := env.Sandbox.RunGoCommand(env.Ctx, ".", "mod", []string{"vendor"}, true); err != nil {
+ t.Fatalf("go mod vendor: %v", err)
+ }
+
+ // Synchronize changes to watched files.
+ env.Await(env.DoneWithChangeWatchedFiles())
+
+ // Now, b.B is found in the vendor tree.
+ checkVendor(env.Implementations(refLoc), true)
+
+ // Delete the vendor tree.
+ if err := os.RemoveAll(env.Sandbox.Workdir.AbsPath("vendor")); err != nil {
+ t.Fatal(err)
+ }
+ // Notify the server of the deletion.
+ if err := env.Sandbox.Workdir.CheckForFileChanges(env.Ctx); err != nil {
+ t.Fatal(err)
+ }
+
+ // Synchronize again.
+ env.Await(env.DoneWithChangeWatchedFiles())
+
+ // b.B is once again defined in the module cache.
+ checkVendor(env.Implementations(refLoc), false)
+ })
+}
diff --git a/gopls/internal/regtest/misc/rename_test.go b/gopls/internal/regtest/misc/rename_test.go
index 121b70725..ebb02609d 100644
--- a/gopls/internal/regtest/misc/rename_test.go
+++ b/gopls/internal/regtest/misc/rename_test.go
@@ -5,12 +5,319 @@
package misc
import (
+ "fmt"
"strings"
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestPrepareRenameMainPackage(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- main.go --
+package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ fmt.Println(1)
+}
+`
+ const wantErr = "can't rename package \"main\""
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ loc := env.RegexpSearch("main.go", `main`)
+ params := &protocol.PrepareRenameParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ _, err := env.Editor.Server.PrepareRename(env.Ctx, params)
+ if err == nil {
+ t.Errorf("missing can't rename package main error from PrepareRename")
+ }
+
+ if err.Error() != wantErr {
+ t.Errorf("got %v, want %v", err.Error(), wantErr)
+ }
+ })
+}
+
+// Test case for golang/go#56227
+func TestRenameWithUnsafeSlice(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17) // unsafe.Slice was added in Go 1.17
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- p.go --
+package p
+
+import "unsafe"
+
+type T struct{}
+
+func (T) M() {}
+
+func _() {
+ x := [3]int{1, 2, 3}
+ ptr := unsafe.Pointer(&x)
+ _ = unsafe.Slice((*int)(ptr), 3)
+}
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("p.go")
+ env.Rename(env.RegexpSearch("p.go", "M"), "N") // must not panic
+ })
+}
+
+func TestPrepareRenameWithNoPackageDeclaration(t *testing.T) {
+ const files = `
+go 1.14
+-- lib/a.go --
+import "fmt"
+
+const A = 1
+
+func bar() {
+ fmt.Println("Bar")
+}
+
+-- main.go --
+package main
+
+import "fmt"
+
+func main() {
+ fmt.Println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ err := env.Editor.Rename(env.Ctx, env.RegexpSearch("lib/a.go", "fmt"), "fmt1")
+ if got, want := fmt.Sprint(err), "no identifier found"; got != want {
+ t.Errorf("Rename: got error %v, want %v", got, want)
+ }
+ })
+}
+
+func TestPrepareRenameFailWithUnknownModule(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+go 1.14
+-- lib/a.go --
+package lib
+
+const A = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib"
+)
+
+func main() {
+ println("Hello")
+}
+`
+ const wantErr = "can't rename package: missing module information for package"
+ Run(t, files, func(t *testing.T, env *Env) {
+ loc := env.RegexpSearch("lib/a.go", "lib")
+ params := &protocol.PrepareRenameParams{
+ TextDocumentPositionParams: protocol.LocationTextDocumentPositionParams(loc),
+ }
+ _, err := env.Editor.Server.PrepareRename(env.Ctx, params)
+ if err == nil || !strings.Contains(err.Error(), wantErr) {
+ t.Errorf("missing cannot rename packages with unknown module from PrepareRename")
+ }
+ })
+}
+
+// This test ensures that each import of a renamed package
+// is also renamed if it would otherwise create a conflict.
+func TestRenamePackageWithConflicts(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+const A = 1
+
+-- lib/nested/a.go --
+package nested
+
+const B = 1
+
+-- lib/x/a.go --
+package nested1
+
+const C = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib"
+ "mod.com/lib/nested"
+ nested1 "mod.com/lib/x"
+)
+
+func main() {
+ println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("nested/a.go", "package nested")
+ env.RegexpSearch("main.go", `nested2 "mod.com/nested"`)
+ env.RegexpSearch("main.go", "mod.com/nested/nested")
+ env.RegexpSearch("main.go", `nested1 "mod.com/nested/x"`)
+ })
+}
+
+func TestRenamePackageWithAlias(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+const A = 1
+
+-- lib/nested/a.go --
+package nested
+
+const B = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib"
+ lib1 "mod.com/lib/nested"
)
+func main() {
+ println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("nested/a.go", "package nested")
+ env.RegexpSearch("main.go", "mod.com/nested")
+ env.RegexpSearch("main.go", `lib1 "mod.com/nested/nested"`)
+ })
+}
+
+func TestRenamePackageWithDifferentDirectoryPath(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+const A = 1
+
+-- lib/nested/a.go --
+package foo
+
+const B = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib"
+ foo "mod.com/lib/nested"
+)
+
+func main() {
+ println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("nested/a.go", "package nested")
+ env.RegexpSearch("main.go", "mod.com/nested")
+ env.RegexpSearch("main.go", `foo "mod.com/nested/nested"`)
+ })
+}
+
+func TestRenamePackage(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+const A = 1
+
+-- lib/b.go --
+package lib
+
+const B = 1
+
+-- lib/nested/a.go --
+package nested
+
+const C = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib"
+ "mod.com/lib/nested"
+)
+
+func main() {
+ println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "lib"), "lib1")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("lib1/a.go", "package lib1")
+ env.RegexpSearch("lib1/b.go", "package lib1")
+ env.RegexpSearch("main.go", "mod.com/lib1")
+ env.RegexpSearch("main.go", "mod.com/lib1/nested")
+ })
+}
+
// Test for golang/go#47564.
func TestRenameInTestVariant(t *testing.T) {
const files = `
@@ -48,11 +355,581 @@ func main() {
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
- pos := env.RegexpSearch("main.go", `stringutil\.(Identity)`)
- env.Rename("main.go", pos, "Identityx")
- text := env.Editor.BufferText("stringutil/stringutil_test.go")
+ env.Rename(env.RegexpSearch("main.go", `stringutil\.(Identity)`), "Identityx")
+ env.OpenFile("stringutil/stringutil_test.go")
+ text := env.BufferText("stringutil/stringutil_test.go")
if !strings.Contains(text, "Identityx") {
t.Errorf("stringutil/stringutil_test.go: missing expected token `Identityx` after rename:\n%s", text)
}
})
}
+
+// This is a test that rename operation initiated by the editor function as expected.
+func TestRenameFileFromEditor(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.16
+-- a/a.go --
+package a
+
+const X = 1
+-- a/x.go --
+package a
+
+const X = 2
+-- b/b.go --
+package b
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ // Rename files and verify that diagnostics are affected accordingly.
+
+ // Initially, we should have diagnostics on both X's, for their duplicate declaration.
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("a/a.go", "X")),
+ Diagnostics(env.AtRegexp("a/x.go", "X")),
+ )
+
+ // Moving x.go should make the diagnostic go away.
+ env.RenameFile("a/x.go", "b/x.go")
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")), // no more duplicate declarations
+ Diagnostics(env.AtRegexp("b/b.go", "package")), // as package names mismatch
+ )
+
+ // Renaming should also work on open buffers.
+ env.OpenFile("b/x.go")
+
+ // Moving x.go back to a/ should cause the diagnostics to reappear.
+ env.RenameFile("b/x.go", "a/x.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "X")),
+ Diagnostics(env.AtRegexp("a/x.go", "X")),
+ )
+
+ // Renaming the entire directory should move both the open and closed file.
+ env.RenameFile("a", "x")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("x/a.go", "X")),
+ Diagnostics(env.AtRegexp("x/x.go", "X")),
+ )
+
+ // As a sanity check, verify that x/x.go is open.
+ if text := env.BufferText("x/x.go"); text == "" {
+ t.Fatal("got empty buffer for x/x.go")
+ }
+ })
+}
+
+func TestRenamePackage_Tests(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+const A = 1
+
+-- lib/b.go --
+package lib
+
+const B = 1
+
+-- lib/a_test.go --
+package lib_test
+
+import (
+ "mod.com/lib"
+ "fmt
+)
+
+const C = 1
+
+-- lib/b_test.go --
+package lib
+
+import (
+ "fmt
+)
+
+const D = 1
+
+-- lib/nested/a.go --
+package nested
+
+const D = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib"
+ "mod.com/lib/nested"
+)
+
+func main() {
+ println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "lib"), "lib1")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("lib1/a.go", "package lib1")
+ env.RegexpSearch("lib1/b.go", "package lib1")
+ env.RegexpSearch("main.go", "mod.com/lib1")
+ env.RegexpSearch("main.go", "mod.com/lib1/nested")
+
+ // Check if the test package is renamed
+ env.RegexpSearch("lib1/a_test.go", "package lib1_test")
+ env.RegexpSearch("lib1/b_test.go", "package lib1")
+ })
+}
+
+func TestRenamePackage_NestedModule(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+ const files = `
+-- go.work --
+go 1.18
+use (
+ .
+ ./foo/bar
+ ./foo/baz
+)
+
+-- go.mod --
+module mod.com
+
+go 1.18
+
+require (
+ mod.com/foo/bar v0.0.0
+)
+
+replace (
+ mod.com/foo/bar => ./foo/bar
+ mod.com/foo/baz => ./foo/baz
+)
+-- foo/foo.go --
+package foo
+
+import "fmt"
+
+func Bar() {
+ fmt.Println("In foo before renamed to foox.")
+}
+
+-- foo/bar/go.mod --
+module mod.com/foo/bar
+
+-- foo/bar/bar.go --
+package bar
+
+const Msg = "Hi from package bar"
+
+-- foo/baz/go.mod --
+module mod.com/foo/baz
+
+-- foo/baz/baz.go --
+package baz
+
+const Msg = "Hi from package baz"
+
+-- main.go --
+package main
+
+import (
+ "fmt"
+ "mod.com/foo/bar"
+ "mod.com/foo/baz"
+ "mod.com/foo"
+)
+
+func main() {
+ foo.Bar()
+ fmt.Println(bar.Msg)
+ fmt.Println(baz.Msg)
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("foo/foo.go")
+ env.Rename(env.RegexpSearch("foo/foo.go", "foo"), "foox")
+
+ env.RegexpSearch("foox/foo.go", "package foox")
+ env.OpenFile("foox/bar/bar.go")
+ env.OpenFile("foox/bar/go.mod")
+
+ env.RegexpSearch("main.go", "mod.com/foo/bar")
+ env.RegexpSearch("main.go", "mod.com/foox")
+ env.RegexpSearch("main.go", "foox.Bar()")
+
+ env.RegexpSearch("go.mod", "./foox/bar")
+ env.RegexpSearch("go.mod", "./foox/baz")
+ })
+}
+
+func TestRenamePackage_DuplicateImport(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+const A = 1
+
+-- lib/nested/a.go --
+package nested
+
+const B = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib"
+ lib1 "mod.com/lib"
+ lib2 "mod.com/lib/nested"
+)
+
+func main() {
+ println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("nested/a.go", "package nested")
+ env.RegexpSearch("main.go", "mod.com/nested")
+ env.RegexpSearch("main.go", `lib1 "mod.com/nested"`)
+ env.RegexpSearch("main.go", `lib2 "mod.com/nested/nested"`)
+ })
+}
+
+func TestRenamePackage_DuplicateBlankImport(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+const A = 1
+
+-- lib/nested/a.go --
+package nested
+
+const B = 1
+
+-- main.go --
+package main
+
+import (
+ "mod.com/lib"
+ _ "mod.com/lib"
+ lib1 "mod.com/lib/nested"
+)
+
+func main() {
+ println("Hello")
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "lib"), "nested")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("nested/a.go", "package nested")
+ env.RegexpSearch("main.go", "mod.com/nested")
+ env.RegexpSearch("main.go", `_ "mod.com/nested"`)
+ env.RegexpSearch("main.go", `lib1 "mod.com/nested/nested"`)
+ })
+}
+
+func TestRenamePackage_TestVariant(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.12
+-- foo/foo.go --
+package foo
+
+const Foo = 42
+-- bar/bar.go --
+package bar
+
+import "mod.com/foo"
+
+const Bar = foo.Foo
+-- bar/bar_test.go --
+package bar
+
+import "mod.com/foo"
+
+const Baz = foo.Foo
+-- testdata/bar/bar.go --
+package bar
+
+import "mod.com/foox"
+
+const Bar = foox.Foo
+-- testdata/bar/bar_test.go --
+package bar
+
+import "mod.com/foox"
+
+const Baz = foox.Foo
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("foo/foo.go")
+ env.Rename(env.RegexpSearch("foo/foo.go", "package (foo)"), "foox")
+
+ checkTestdata(t, env)
+ })
+}
+
+func TestRenamePackage_IntermediateTestVariant(t *testing.T) {
+ // In this test set up, we have the following import edges:
+ // bar_test -> baz -> foo -> bar
+ // bar_test -> foo -> bar
+ // bar_test -> bar
+ //
+ // As a consequence, bar_x_test.go is in the reverse closure of both
+ // `foo [bar.test]` and `baz [bar.test]`. This test confirms that we don't
+ // produce duplicate edits in this case.
+ const files = `
+-- go.mod --
+module foo.mod
+
+go 1.12
+-- foo/foo.go --
+package foo
+
+import "foo.mod/bar"
+
+const Foo = 42
+
+const _ = bar.Bar
+-- baz/baz.go --
+package baz
+
+import "foo.mod/foo"
+
+const Baz = foo.Foo
+-- bar/bar.go --
+package bar
+
+var Bar = 123
+-- bar/bar_test.go --
+package bar
+
+const _ = Bar
+-- bar/bar_x_test.go --
+package bar_test
+
+import (
+ "foo.mod/bar"
+ "foo.mod/baz"
+ "foo.mod/foo"
+)
+
+const _ = bar.Bar + baz.Baz + foo.Foo
+-- testdata/foox/foo.go --
+package foox
+
+import "foo.mod/bar"
+
+const Foo = 42
+
+const _ = bar.Bar
+-- testdata/baz/baz.go --
+package baz
+
+import "foo.mod/foox"
+
+const Baz = foox.Foo
+-- testdata/bar/bar_x_test.go --
+package bar_test
+
+import (
+ "foo.mod/bar"
+ "foo.mod/baz"
+ "foo.mod/foox"
+)
+
+const _ = bar.Bar + baz.Baz + foox.Foo
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("foo/foo.go")
+ env.Rename(env.RegexpSearch("foo/foo.go", "package (foo)"), "foox")
+
+ checkTestdata(t, env)
+ })
+}
+
+func TestRenamePackage_Nesting(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+import "mod.com/lib/nested"
+
+const A = 1 + nested.B
+-- lib/nested/a.go --
+package nested
+
+const B = 1
+-- other/other.go --
+package other
+
+import (
+ "mod.com/lib"
+ "mod.com/lib/nested"
+)
+
+const C = lib.A + nested.B
+-- testdata/libx/a.go --
+package libx
+
+import "mod.com/libx/nested"
+
+const A = 1 + nested.B
+-- testdata/other/other.go --
+package other
+
+import (
+ "mod.com/libx"
+ "mod.com/libx/nested"
+)
+
+const C = libx.A + nested.B
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "package (lib)"), "libx")
+
+ checkTestdata(t, env)
+ })
+}
+
+func TestRenamePackage_InvalidName(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+import "mod.com/lib/nested"
+
+const A = 1 + nested.B
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ loc := env.RegexpSearch("lib/a.go", "package (lib)")
+
+ for _, badName := range []string{"$$$", "lib_test"} {
+ if err := env.Editor.Rename(env.Ctx, loc, badName); err == nil {
+ t.Errorf("Rename(lib, libx) succeeded, want non-nil error")
+ }
+ }
+ })
+}
+
+func TestRenamePackage_InternalPackage(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+import (
+ "fmt"
+ "mod.com/lib/internal/x"
+)
+
+const A = 1
+
+func print() {
+ fmt.Println(x.B)
+}
+
+-- lib/internal/x/a.go --
+package x
+
+const B = 1
+
+-- main.go --
+package main
+
+import "mod.com/lib"
+
+func main() {
+ lib.print()
+}
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/internal/x/a.go")
+ env.Rename(env.RegexpSearch("lib/internal/x/a.go", "x"), "utils")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("lib/a.go", "mod.com/lib/internal/utils")
+ env.RegexpSearch("lib/a.go", "utils.B")
+
+ // Check if the test package is renamed
+ env.RegexpSearch("lib/internal/utils/a.go", "package utils")
+
+ env.OpenFile("lib/a.go")
+ env.Rename(env.RegexpSearch("lib/a.go", "lib"), "lib1")
+
+ // Check if the new package name exists.
+ env.RegexpSearch("lib1/a.go", "package lib1")
+ env.RegexpSearch("lib1/a.go", "mod.com/lib1/internal/utils")
+ env.RegexpSearch("main.go", `import "mod.com/lib1"`)
+ env.RegexpSearch("main.go", "lib1.print()")
+ })
+}
+
+// checkTestdata checks that current buffer contents match their corresponding
+// expected content in the testdata directory.
+func checkTestdata(t *testing.T, env *Env) {
+ t.Helper()
+ files := env.ListFiles("testdata")
+ if len(files) == 0 {
+ t.Fatal("no files in testdata directory")
+ }
+ for _, file := range files {
+ suffix := strings.TrimPrefix(file, "testdata/")
+ got := env.BufferText(suffix)
+ want := env.ReadWorkspaceFile(file)
+ if diff := compare.Text(want, got); diff != "" {
+ t.Errorf("Rename: unexpected buffer content for %s (-want +got):\n%s", suffix, diff)
+ }
+ }
+}
diff --git a/gopls/internal/regtest/misc/semantictokens_test.go b/gopls/internal/regtest/misc/semantictokens_test.go
index 79507876a..a96024b9c 100644
--- a/gopls/internal/regtest/misc/semantictokens_test.go
+++ b/gopls/internal/regtest/misc/semantictokens_test.go
@@ -5,10 +5,14 @@
package misc
import (
+ "strings"
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/typeparams"
)
func TestBadURICrash_VSCodeIssue1498(t *testing.T) {
@@ -25,10 +29,8 @@ func main() {}
`
WithOptions(
- Modes(Singleton),
- EditorConfig{
- AllExperiments: true,
- },
+ Modes(Default),
+ Settings{"allExperiments": true},
).Run(t, src, func(t *testing.T, env *Env) {
params := &protocol.SemanticTokensParams{}
const badURI = "http://foo"
@@ -42,3 +44,161 @@ func main() {}
}
})
}
+
+// fix bug involving type parameters and regular parameters
+// (golang/vscode-go#2527)
+func TestSemantic_2527(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("type parameters are needed for this test")
+ }
+ // these are the expected types of identifiers in text order
+ want := []result{
+ {"package", "keyword", ""},
+ {"foo", "namespace", ""},
+ {"func", "keyword", ""},
+ {"Add", "function", "definition deprecated"},
+ {"T", "typeParameter", "definition"},
+ {"int", "type", "defaultLibrary"},
+ {"target", "parameter", "definition"},
+ {"T", "typeParameter", ""},
+ {"l", "parameter", "definition"},
+ {"T", "typeParameter", ""},
+ {"T", "typeParameter", ""},
+ {"return", "keyword", ""},
+ {"append", "function", "defaultLibrary"},
+ {"l", "parameter", ""},
+ {"target", "parameter", ""},
+ {"for", "keyword", ""},
+ {"range", "keyword", ""},
+ {"l", "parameter", ""},
+ {"return", "keyword", ""},
+ {"nil", "variable", "readonly defaultLibrary"},
+ }
+ src := `
+-- go.mod --
+module example.com
+
+go 1.19
+-- main.go --
+package foo
+// Deprecated (for testing)
+func Add[T int](target T, l []T) []T {
+ return append(l, target)
+ for range l {} // test coverage
+ return nil
+}
+`
+ WithOptions(
+ Modes(Default),
+ Settings{"semanticTokens": true},
+ ).Run(t, src, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", "for range")),
+ )
+ p := &protocol.SemanticTokensParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: env.Sandbox.Workdir.URI("main.go"),
+ },
+ }
+ v, err := env.Editor.Server.SemanticTokensFull(env.Ctx, p)
+ if err != nil {
+ t.Fatal(err)
+ }
+ seen := interpret(v.Data, env.BufferText("main.go"))
+ if x := cmp.Diff(want, seen); x != "" {
+ t.Errorf("Semantic tokens do not match (-want +got):\n%s", x)
+ }
+ })
+
+}
+
+// fix inconsistency in TypeParameters
+// https://github.com/golang/go/issues/57619
+func TestSemantic_57619(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("type parameters are needed for this test")
+ }
+ src := `
+-- go.mod --
+module example.com
+
+go 1.19
+-- main.go --
+package foo
+type Smap[K int, V any] struct {
+ Store map[K]V
+}
+func (s *Smap[K, V]) Get(k K) (V, bool) {
+ v, ok := s.Store[k]
+ return v, ok
+}
+func New[K int, V any]() Smap[K, V] {
+ return Smap[K, V]{Store: make(map[K]V)}
+}
+`
+ WithOptions(
+ Modes(Default),
+ Settings{"semanticTokens": true},
+ ).Run(t, src, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ p := &protocol.SemanticTokensParams{
+ TextDocument: protocol.TextDocumentIdentifier{
+ URI: env.Sandbox.Workdir.URI("main.go"),
+ },
+ }
+ v, err := env.Editor.Server.SemanticTokensFull(env.Ctx, p)
+ if err != nil {
+ t.Fatal(err)
+ }
+ seen := interpret(v.Data, env.BufferText("main.go"))
+ for i, s := range seen {
+ if (s.Token == "K" || s.Token == "V") && s.TokenType != "typeParameter" {
+ t.Errorf("%d: expected K and V to be type parameters, but got %v", i, s)
+ }
+ }
+ })
+}
+
+type result struct {
+ Token string
+ TokenType string
+ Mod string
+}
+
+// human-readable version of the semantic tokens
+// comment, string, number are elided
+// (and in the future, maybe elide other things, like operators)
+func interpret(x []uint32, contents string) []result {
+ lines := strings.Split(contents, "\n")
+ ans := []result{}
+ line, col := 1, 1
+ for i := 0; i < len(x); i += 5 {
+ line += int(x[i])
+ col += int(x[i+1])
+ if x[i] != 0 { // new line
+ col = int(x[i+1]) + 1 // 1-based column numbers
+ }
+ sz := x[i+2]
+ t := semanticTypes[x[i+3]]
+ if t == "comment" || t == "string" || t == "number" {
+ continue
+ }
+ l := x[i+4]
+ var mods []string
+ for i, mod := range semanticModifiers {
+ if l&(1<<i) != 0 {
+ mods = append(mods, mod)
+ }
+ }
+ // col is a utf-8 offset
+ tok := lines[line-1][col-1 : col-1+int(sz)]
+ ans = append(ans, result{tok, t, strings.Join(mods, " ")})
+ }
+ return ans
+}
+
+var (
+ semanticTypes = lsp.SemanticTypes()
+ semanticModifiers = lsp.SemanticModifiers()
+)
diff --git a/gopls/internal/regtest/misc/settings_test.go b/gopls/internal/regtest/misc/settings_test.go
index 7704c3c04..dd4042989 100644
--- a/gopls/internal/regtest/misc/settings_test.go
+++ b/gopls/internal/regtest/misc/settings_test.go
@@ -7,7 +7,7 @@ package misc
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestEmptyDirectoryFilters_Issue51843(t *testing.T) {
@@ -24,11 +24,7 @@ func main() {
`
WithOptions(
- EditorConfig{
- Settings: map[string]interface{}{
- "directoryFilters": []string{""},
- },
- },
+ Settings{"directoryFilters": []string{""}},
).Run(t, src, func(t *testing.T, env *Env) {
// No need to do anything. Issue golang/go#51843 is triggered by the empty
// directory filter above.
diff --git a/gopls/internal/regtest/misc/shared_test.go b/gopls/internal/regtest/misc/shared_test.go
index 6861743ff..410a8d327 100644
--- a/gopls/internal/regtest/misc/shared_test.go
+++ b/gopls/internal/regtest/misc/shared_test.go
@@ -7,10 +7,13 @@ package misc
import (
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
-const sharedProgram = `
+// Smoke test that simultaneous editing sessions in the same workspace works.
+func TestSimultaneousEdits(t *testing.T) {
+ const sharedProgram = `
-- go.mod --
module mod
@@ -24,20 +27,26 @@ func main() {
fmt.Println("Hello World.")
}`
-func runShared(t *testing.T, testFunc func(env1 *Env, env2 *Env)) {
- // Only run these tests in forwarded modes.
- modes := DefaultModes() & (Forwarded | SeparateProcess)
- WithOptions(Modes(modes)).Run(t, sharedProgram, func(t *testing.T, env1 *Env) {
+ WithOptions(
+ Modes(DefaultModes()&(Forwarded|SeparateProcess)),
+ ).Run(t, sharedProgram, func(t *testing.T, env1 *Env) {
// Create a second test session connected to the same workspace and server
// as the first.
- env2 := NewEnv(env1.Ctx, t, env1.Sandbox, env1.Server, env1.Editor.Config, true)
+ awaiter := NewAwaiter(env1.Sandbox.Workdir)
+ const skipApplyEdits = false
+ editor, err := fake.NewEditor(env1.Sandbox, env1.Editor.Config()).Connect(env1.Ctx, env1.Server, awaiter.Hooks(), skipApplyEdits)
+ if err != nil {
+ t.Fatal(err)
+ }
+ env2 := &Env{
+ T: t,
+ Ctx: env1.Ctx,
+ Sandbox: env1.Sandbox,
+ Server: env1.Server,
+ Editor: editor,
+ Awaiter: awaiter,
+ }
env2.Await(InitialWorkspaceLoad)
- testFunc(env1, env2)
- })
-}
-
-func TestSimultaneousEdits(t *testing.T) {
- runShared(t, func(env1 *Env, env2 *Env) {
// In editor #1, break fmt.Println as before.
env1.OpenFile("main.go")
env1.RegexpReplace("main.go", "Printl(n)", "")
@@ -46,19 +55,18 @@ func TestSimultaneousEdits(t *testing.T) {
env2.RegexpReplace("main.go", "\\)\n(})", "")
// Now check that we got different diagnostics in each environment.
- env1.Await(env1.DiagnosticAtRegexp("main.go", "Printl"))
- env2.Await(env2.DiagnosticAtRegexp("main.go", "$"))
- })
-}
+ env1.AfterChange(Diagnostics(env1.AtRegexp("main.go", "Printl")))
+ env2.AfterChange(Diagnostics(env2.AtRegexp("main.go", "$")))
-func TestShutdown(t *testing.T) {
- runShared(t, func(env1 *Env, env2 *Env) {
- if err := env1.Editor.Close(env1.Ctx); err != nil {
- t.Errorf("closing first editor: %v", err)
+ // Now close editor #2, and verify that operation in editor #1 is
+ // unaffected.
+ if err := env2.Editor.Close(env2.Ctx); err != nil {
+ t.Errorf("closing second editor: %v", err)
}
- // Now make an edit in editor #2 to trigger diagnostics.
- env2.OpenFile("main.go")
- env2.RegexpReplace("main.go", "\\)\n(})", "")
- env2.Await(env2.DiagnosticAtRegexp("main.go", "$"))
+
+ env1.RegexpReplace("main.go", "Printl", "Println")
+ env1.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ )
})
}
diff --git a/gopls/internal/regtest/misc/signature_help_test.go b/gopls/internal/regtest/misc/signature_help_test.go
new file mode 100644
index 000000000..fd9f4f07a
--- /dev/null
+++ b/gopls/internal/regtest/misc/signature_help_test.go
@@ -0,0 +1,69 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package misc
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+func TestSignatureHelpInNonWorkspacePackage(t *testing.T) {
+ const files = `
+-- a/go.mod --
+module a.com
+
+go 1.18
+-- a/a/a.go --
+package a
+
+func DoSomething(int) {}
+
+func _() {
+ DoSomething()
+}
+-- b/go.mod --
+module b.com
+go 1.18
+
+require a.com v1.0.0
+
+replace a.com => ../a
+-- b/b/b.go --
+package b
+
+import "a.com/a"
+
+func _() {
+ a.DoSomething()
+}
+`
+
+ WithOptions(
+ WorkspaceFolders("a"),
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("a/a/a.go")
+ env.OpenFile("b/b/b.go")
+ signatureHelp := func(filename string) *protocol.SignatureHelp {
+ loc := env.RegexpSearch(filename, `DoSomething\(()\)`)
+ var params protocol.SignatureHelpParams
+ params.TextDocument.URI = loc.URI
+ params.Position = loc.Range.Start
+ help, err := env.Editor.Server.SignatureHelp(env.Ctx, &params)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return help
+ }
+ ahelp := signatureHelp("a/a/a.go")
+ bhelp := signatureHelp("b/b/b.go")
+
+ if diff := cmp.Diff(ahelp, bhelp); diff != "" {
+ t.Fatal(diff)
+ }
+ })
+}
diff --git a/gopls/internal/regtest/misc/staticcheck_test.go b/gopls/internal/regtest/misc/staticcheck_test.go
new file mode 100644
index 000000000..fa049ab0e
--- /dev/null
+++ b/gopls/internal/regtest/misc/staticcheck_test.go
@@ -0,0 +1,110 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package misc
+
+import (
+ "testing"
+
+ "golang.org/x/tools/internal/testenv"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+func TestStaticcheckGenerics(t *testing.T) {
+ testenv.NeedsGo1Point(t, 19) // generics were introduced in Go 1.18, staticcheck requires go1.19+
+
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- a/a.go --
+package a
+
+import (
+ "errors"
+ "sort"
+ "strings"
+)
+
+func Zero[P any]() P {
+ var p P
+ return p
+}
+
+type Inst[P any] struct {
+ Field P
+}
+
+func testGenerics[P *T, T any](p P) {
+ // Calls to instantiated functions should not break checks.
+ slice := Zero[string]()
+ sort.Slice(slice, func(i, j int) bool {
+ return slice[i] < slice[j]
+ })
+
+ // Usage of instantiated fields should not break checks.
+ g := Inst[string]{"hello"}
+ g.Field = strings.TrimLeft(g.Field, "12234")
+
+ // Use of type parameters should not break checks.
+ var q P
+ p = q // SA4009: p is overwritten before its first use
+ q = &*p // SA4001: &* will be simplified
+}
+
+
+// FooErr should be called ErrFoo (ST1012)
+var FooErr error = errors.New("foo")
+`
+
+ WithOptions(
+ Settings{"staticcheck": true},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("a/a.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "sort.Slice"), FromSource("sortslice")),
+ Diagnostics(env.AtRegexp("a/a.go", "sort.Slice.(slice)"), FromSource("SA1028")),
+ Diagnostics(env.AtRegexp("a/a.go", "var (FooErr)"), FromSource("ST1012")),
+ Diagnostics(env.AtRegexp("a/a.go", `"12234"`), FromSource("SA1024")),
+ Diagnostics(env.AtRegexp("a/a.go", "testGenerics.*(p P)"), FromSource("SA4009")),
+ Diagnostics(env.AtRegexp("a/a.go", "q = (&\\*p)"), FromSource("SA4001")),
+ )
+ })
+}
+
+// Test for golang/go#56270: an analysis with related info should not panic if
+// analysis.RelatedInformation.End is not set.
+func TestStaticcheckRelatedInfo(t *testing.T) {
+ testenv.NeedsGo1Point(t, 19) // staticcheck is only supported at Go 1.19+
+ const files = `
+-- go.mod --
+module mod.test
+
+go 1.18
+-- p.go --
+package p
+
+import (
+ "fmt"
+)
+
+func Foo(enabled interface{}) {
+ if enabled, ok := enabled.(bool); ok {
+ } else {
+ _ = fmt.Sprintf("invalid type %T", enabled) // enabled is always bool here
+ }
+}
+`
+
+ WithOptions(
+ Settings{"staticcheck": true},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("p.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("p.go", ", (enabled)"), FromSource("SA9008")),
+ )
+ })
+}
diff --git a/gopls/internal/regtest/misc/vendor_test.go b/gopls/internal/regtest/misc/vendor_test.go
index 0e615f281..4fcf1067a 100644
--- a/gopls/internal/regtest/misc/vendor_test.go
+++ b/gopls/internal/regtest/misc/vendor_test.go
@@ -5,13 +5,11 @@
package misc
import (
- "runtime"
"testing"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
)
const basicProxy = `
@@ -26,11 +24,6 @@ var Goodbye error
`
func TestInconsistentVendoring(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
- if runtime.GOOS == "windows" {
- t.Skipf("skipping test due to flakiness on Windows: https://golang.org/issue/49646")
- }
-
const pkgThatUsesVendoring = `
-- go.mod --
module mod.com
@@ -53,21 +46,20 @@ func _() {
}
`
WithOptions(
- Modes(Singleton),
+ Modes(Default),
ProxyFiles(basicProxy),
).Run(t, pkgThatUsesVendoring, func(t *testing.T, env *Env) {
env.OpenFile("a/a1.go")
d := &protocol.PublishDiagnosticsParams{}
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("go.mod", "module mod.com", "Inconsistent vendoring"),
- ReadDiagnostics("go.mod", d),
- ),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("go.mod", "module mod.com"), WithMessage("Inconsistent vendoring")),
+ ReadDiagnostics("go.mod", d),
)
env.ApplyQuickFixes("go.mod", d.Diagnostics)
- env.Await(
- env.DiagnosticAtRegexpWithMessage("a/a1.go", `q int`, "not used"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a1.go", `q int`), WithMessage("not used")),
)
})
}
diff --git a/gopls/internal/regtest/misc/vuln_test.go b/gopls/internal/regtest/misc/vuln_test.go
index 94fde715c..8badc879e 100644
--- a/gopls/internal/regtest/misc/vuln_test.go
+++ b/gopls/internal/regtest/misc/vuln_test.go
@@ -2,17 +2,32 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build go1.18
+// +build go1.18
+
package misc
import (
+ "context"
+ "encoding/json"
+ "path/filepath"
+ "sort"
+ "strings"
"testing"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/source"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/gopls/internal/vulncheck"
+ "golang.org/x/tools/gopls/internal/vulncheck/vulntest"
+ "golang.org/x/tools/internal/testenv"
)
-func TestRunVulncheckExpError(t *testing.T) {
+func TestRunGovulncheckError(t *testing.T) {
const files = `
-- go.mod --
module mod.com
@@ -22,15 +37,15 @@ go 1.12
package foo
`
Run(t, files, func(t *testing.T, env *Env) {
- cmd, err := command.NewRunVulncheckExpCommand("Run Vulncheck Exp", command.VulncheckArgs{
- Dir: "/invalid/file/url", // invalid arg
+ cmd, err := command.NewRunGovulncheckCommand("Run Vulncheck Exp", command.VulncheckArgs{
+ URI: "/invalid/file/url", // invalid arg
})
if err != nil {
t.Fatal(err)
}
params := &protocol.ExecuteCommandParams{
- Command: command.RunVulncheckExp.ID(),
+ Command: command.RunGovulncheck.ID(),
Arguments: cmd.Arguments,
}
@@ -41,3 +56,922 @@ package foo
}
})
}
+
+func TestRunGovulncheckError2(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.12
+-- foo.go --
+package foo
+
+func F() { // build error incomplete
+`
+ WithOptions(
+ EnvVars{
+ "_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true", // needed to run `gopls vulncheck`.
+ },
+ Settings{
+ "codelenses": map[string]bool{
+ "run_govulncheck": true,
+ },
+ },
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("go.mod")
+ var result command.RunVulncheckResult
+ env.ExecuteCodeLensCommand("go.mod", command.RunGovulncheck, &result)
+ var ws WorkStatus
+ env.Await(
+ CompletedProgress(result.Token, &ws),
+ )
+ wantEndMsg, wantMsgPart := "failed", "failed to load packages due to errors"
+ if ws.EndMsg != "failed" || !strings.Contains(ws.Msg, wantMsgPart) {
+ t.Errorf("work status = %+v, want {EndMessage: %q, Message: %q}", ws, wantEndMsg, wantMsgPart)
+ }
+ })
+}
+
+const vulnsData = `
+-- GO-2022-01.yaml --
+modules:
+ - module: golang.org/amod
+ versions:
+ - introduced: 1.0.0
+ - fixed: 1.0.4
+ - introduced: 1.1.2
+ packages:
+ - package: golang.org/amod/avuln
+ symbols:
+ - VulnData.Vuln1
+ - VulnData.Vuln2
+description: >
+ vuln in amod
+references:
+ - href: pkg.go.dev/vuln/GO-2022-01
+-- GO-2022-03.yaml --
+modules:
+ - module: golang.org/amod
+ versions:
+ - introduced: 1.0.0
+ - fixed: 1.0.6
+ packages:
+ - package: golang.org/amod/avuln
+ symbols:
+ - nonExisting
+description: >
+ unaffecting vulnerability
+-- GO-2022-02.yaml --
+modules:
+ - module: golang.org/bmod
+ packages:
+ - package: golang.org/bmod/bvuln
+ symbols:
+ - Vuln
+description: |
+ vuln in bmod
+
+ This is a long description
+ of this vulnerability.
+references:
+ - href: pkg.go.dev/vuln/GO-2022-03
+-- GO-2022-04.yaml --
+modules:
+ - module: golang.org/bmod
+ packages:
+ - package: golang.org/bmod/unused
+ symbols:
+ - Vuln
+description: |
+ vuln in bmod/somtrhingelse
+references:
+ - href: pkg.go.dev/vuln/GO-2022-04
+-- GOSTDLIB.yaml --
+modules:
+ - module: stdlib
+ versions:
+ - introduced: 1.18.0
+ packages:
+ - package: archive/zip
+ symbols:
+ - OpenReader
+references:
+ - href: pkg.go.dev/vuln/GOSTDLIB
+`
+
+func TestRunGovulncheckStd(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- main.go --
+package main
+
+import (
+ "archive/zip"
+ "fmt"
+)
+
+func main() {
+ _, err := zip.OpenReader("file.zip") // vulnerability id: GOSTDLIB
+ fmt.Println(err)
+}
+`
+
+ db, err := vulntest.NewDatabase(context.Background(), []byte(vulnsData))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Clean()
+ WithOptions(
+ EnvVars{
+ // Let the analyzer read vulnerabilities data from the testdata/vulndb.
+ "GOVULNDB": db.URI(),
+ // When fetchinging stdlib package vulnerability info,
+ // behave as if our go version is go1.18 for this testing.
+ // The default behavior is to run `go env GOVERSION` (which isn't mutable env var).
+ vulncheck.GoVersionForVulnTest: "go1.18",
+ "_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true", // needed to run `gopls vulncheck`.
+ },
+ Settings{
+ "codelenses": map[string]bool{
+ "run_govulncheck": true,
+ },
+ },
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("go.mod")
+
+ // Test CodeLens is present.
+ lenses := env.CodeLens("go.mod")
+
+ const wantCommand = "gopls." + string(command.RunGovulncheck)
+ var gotCodelens = false
+ var lens protocol.CodeLens
+ for _, l := range lenses {
+ if l.Command.Command == wantCommand {
+ gotCodelens = true
+ lens = l
+ break
+ }
+ }
+ if !gotCodelens {
+ t.Fatal("got no vulncheck codelens")
+ }
+ // Run Command included in the codelens.
+ var result command.RunVulncheckResult
+ env.ExecuteCommand(&protocol.ExecuteCommandParams{
+ Command: lens.Command.Command,
+ Arguments: lens.Command.Arguments,
+ }, &result)
+
+ env.OnceMet(
+ CompletedProgress(result.Token, nil),
+ ShownMessage("Found GOSTDLIB"),
+ NoDiagnostics(ForFile("go.mod")),
+ )
+ testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{
+ "go.mod": {IDs: []string{"GOSTDLIB"}, Mode: govulncheck.ModeGovulncheck}})
+ })
+}
+
+func TestFetchVulncheckResultStd(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- main.go --
+package main
+
+import (
+ "archive/zip"
+ "fmt"
+)
+
+func main() {
+ _, err := zip.OpenReader("file.zip") // vulnerability id: GOSTDLIB
+ fmt.Println(err)
+}
+`
+
+ db, err := vulntest.NewDatabase(context.Background(), []byte(vulnsData))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Clean()
+ WithOptions(
+ EnvVars{
+ // Let the analyzer read vulnerabilities data from the testdata/vulndb.
+ "GOVULNDB": db.URI(),
+ // When fetchinging stdlib package vulnerability info,
+ // behave as if our go version is go1.18 for this testing.
+ vulncheck.GoVersionForVulnTest: "go1.18",
+ "_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true", // needed to run `gopls vulncheck`.
+ },
+ Settings{"ui.diagnostic.vulncheck": "Imports"},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("go.mod")
+ env.AfterChange(
+ NoDiagnostics(ForFile("go.mod")),
+ // we don't publish diagnostics for standard library vulnerability yet.
+ )
+ testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{
+ "go.mod": {
+ IDs: []string{"GOSTDLIB"},
+ Mode: govulncheck.ModeImports,
+ },
+ })
+ })
+}
+
+type fetchVulncheckResult struct {
+ IDs []string
+ Mode govulncheck.AnalysisMode
+}
+
+func testFetchVulncheckResult(t *testing.T, env *Env, want map[string]fetchVulncheckResult) {
+ t.Helper()
+
+ var result map[protocol.DocumentURI]*govulncheck.Result
+ fetchCmd, err := command.NewFetchVulncheckResultCommand("fetch", command.URIArg{
+ URI: env.Sandbox.Workdir.URI("go.mod"),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ env.ExecuteCommand(&protocol.ExecuteCommandParams{
+ Command: fetchCmd.Command,
+ Arguments: fetchCmd.Arguments,
+ }, &result)
+
+ for _, v := range want {
+ sort.Strings(v.IDs)
+ }
+ got := map[string]fetchVulncheckResult{}
+ for k, r := range result {
+ var osv []string
+ for _, v := range r.Vulns {
+ osv = append(osv, v.OSV.ID)
+ }
+ sort.Strings(osv)
+ modfile := env.Sandbox.Workdir.RelPath(k.SpanURI().Filename())
+ got[modfile] = fetchVulncheckResult{
+ IDs: osv,
+ Mode: r.Mode,
+ }
+ }
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("fetch vulnchheck result = got %v, want %v: diff %v", got, want, diff)
+ }
+}
+
+const workspace1 = `
+-- go.mod --
+module golang.org/entry
+
+go 1.18
+
+require golang.org/cmod v1.1.3
+
+require (
+ golang.org/amod v1.0.0 // indirect
+ golang.org/bmod v0.5.0 // indirect
+)
+-- go.sum --
+golang.org/amod v1.0.0 h1:EUQOI2m5NhQZijXZf8WimSnnWubaFNrrKUH/PopTN8k=
+golang.org/amod v1.0.0/go.mod h1:yvny5/2OtYFomKt8ax+WJGvN6pfN1pqjGnn7DQLUi6E=
+golang.org/bmod v0.5.0 h1:KgvUulMyMiYRB7suKA0x+DfWRVdeyPgVJvcishTH+ng=
+golang.org/bmod v0.5.0/go.mod h1:f6o+OhF66nz/0BBc/sbCsshyPRKMSxZIlG50B/bsM4c=
+golang.org/cmod v1.1.3 h1:PJ7rZFTk7xGAunBRDa0wDe7rZjZ9R/vr1S2QkVVCngQ=
+golang.org/cmod v1.1.3/go.mod h1:eCR8dnmvLYQomdeAZRCPgS5JJihXtqOQrpEkNj5feQA=
+-- x/x.go --
+package x
+
+import (
+ "golang.org/cmod/c"
+ "golang.org/entry/y"
+)
+
+func X() {
+ c.C1().Vuln1() // vuln use: X -> Vuln1
+}
+
+func CallY() {
+ y.Y() // vuln use: CallY -> y.Y -> bvuln.Vuln
+}
+
+-- y/y.go --
+package y
+
+import "golang.org/cmod/c"
+
+func Y() {
+ c.C2()() // vuln use: Y -> bvuln.Vuln
+}
+`
+
+// cmod/c imports amod/avuln and bmod/bvuln.
+const proxy1 = `
+-- golang.org/cmod@v1.1.3/go.mod --
+module golang.org/cmod
+
+go 1.12
+-- golang.org/cmod@v1.1.3/c/c.go --
+package c
+
+import (
+ "golang.org/amod/avuln"
+ "golang.org/bmod/bvuln"
+)
+
+type I interface {
+ Vuln1()
+}
+
+func C1() I {
+ v := avuln.VulnData{}
+ v.Vuln2() // vuln use
+ return v
+}
+
+func C2() func() {
+ return bvuln.Vuln
+}
+-- golang.org/amod@v1.0.0/go.mod --
+module golang.org/amod
+
+go 1.14
+-- golang.org/amod@v1.0.0/avuln/avuln.go --
+package avuln
+
+type VulnData struct {}
+func (v VulnData) Vuln1() {}
+func (v VulnData) Vuln2() {}
+-- golang.org/amod@v1.0.4/go.mod --
+module golang.org/amod
+
+go 1.14
+-- golang.org/amod@v1.0.4/avuln/avuln.go --
+package avuln
+
+type VulnData struct {}
+func (v VulnData) Vuln1() {}
+func (v VulnData) Vuln2() {}
+
+-- golang.org/bmod@v0.5.0/go.mod --
+module golang.org/bmod
+
+go 1.14
+-- golang.org/bmod@v0.5.0/bvuln/bvuln.go --
+package bvuln
+
+func Vuln() {
+ // something evil
+}
+-- golang.org/bmod@v0.5.0/unused/unused.go --
+package unused
+
+func Vuln() {
+ // something evil
+}
+-- golang.org/amod@v1.0.6/go.mod --
+module golang.org/amod
+
+go 1.14
+-- golang.org/amod@v1.0.6/avuln/avuln.go --
+package avuln
+
+type VulnData struct {}
+func (v VulnData) Vuln1() {}
+func (v VulnData) Vuln2() {}
+`
+
+func vulnTestEnv(vulnsDB, proxyData string) (*vulntest.DB, []RunOption, error) {
+ db, err := vulntest.NewDatabase(context.Background(), []byte(vulnsData))
+ if err != nil {
+ return nil, nil, nil
+ }
+ settings := Settings{
+ "codelenses": map[string]bool{
+ "run_govulncheck": true,
+ },
+ }
+ ev := EnvVars{
+ // Let the analyzer read vulnerabilities data from the testdata/vulndb.
+ "GOVULNDB": db.URI(),
+ // When fetching stdlib package vulnerability info,
+ // behave as if our go version is go1.18 for this testing.
+ // The default behavior is to run `go env GOVERSION` (which isn't mutable env var).
+ vulncheck.GoVersionForVulnTest: "go1.18",
+ "_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true", // needed to run `gopls vulncheck`.
+ "GOSUMDB": "off",
+ }
+ return db, []RunOption{ProxyFiles(proxyData), ev, settings}, nil
+}
+
+func TestRunVulncheckPackageDiagnostics(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+
+ db, opts0, err := vulnTestEnv(vulnsData, proxy1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Clean()
+
+ checkVulncheckDiagnostics := func(env *Env, t *testing.T) {
+ env.OpenFile("go.mod")
+
+ gotDiagnostics := &protocol.PublishDiagnosticsParams{}
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("go.mod", `golang.org/amod`)),
+ ReadDiagnostics("go.mod", gotDiagnostics),
+ )
+
+ testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{
+ "go.mod": {
+ IDs: []string{"GO-2022-01", "GO-2022-02", "GO-2022-03"},
+ Mode: govulncheck.ModeImports,
+ },
+ })
+
+ wantVulncheckDiagnostics := map[string]vulnDiagExpectation{
+ "golang.org/amod": {
+ diagnostics: []vulnDiag{
+ {
+ msg: "golang.org/amod has known vulnerabilities GO-2022-01, GO-2022-03.",
+ severity: protocol.SeverityInformation,
+ source: string(source.Vulncheck),
+ codeActions: []string{
+ "Run govulncheck to verify",
+ "Upgrade to v1.0.6",
+ "Upgrade to latest",
+ },
+ },
+ },
+ codeActions: []string{
+ "Run govulncheck to verify",
+ "Upgrade to v1.0.6",
+ "Upgrade to latest",
+ },
+ hover: []string{"GO-2022-01", "Fixed in v1.0.4.", "GO-2022-03"},
+ },
+ "golang.org/bmod": {
+ diagnostics: []vulnDiag{
+ {
+ msg: "golang.org/bmod has a vulnerability GO-2022-02.",
+ severity: protocol.SeverityInformation,
+ source: string(source.Vulncheck),
+ codeActions: []string{
+ "Run govulncheck to verify",
+ },
+ },
+ },
+ codeActions: []string{
+ "Run govulncheck to verify",
+ },
+ hover: []string{"GO-2022-02", "This is a long description of this vulnerability.", "No fix is available."},
+ },
+ }
+
+ for pattern, want := range wantVulncheckDiagnostics {
+ modPathDiagnostics := testVulnDiagnostics(t, env, pattern, want, gotDiagnostics)
+
+ gotActions := env.CodeAction("go.mod", modPathDiagnostics)
+ if diff := diffCodeActions(gotActions, want.codeActions); diff != "" {
+ t.Errorf("code actions for %q do not match, got %v, want %v\n%v\n", pattern, gotActions, want.codeActions, diff)
+ continue
+ }
+ }
+ }
+
+ wantNoVulncheckDiagnostics := func(env *Env, t *testing.T) {
+ env.OpenFile("go.mod")
+
+ gotDiagnostics := &protocol.PublishDiagnosticsParams{}
+ env.AfterChange(
+ ReadDiagnostics("go.mod", gotDiagnostics),
+ )
+
+ if len(gotDiagnostics.Diagnostics) > 0 {
+ t.Errorf("Unexpected diagnostics: %v", stringify(gotDiagnostics))
+ }
+ testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{})
+ }
+
+ for _, tc := range []struct {
+ name string
+ setting Settings
+ wantDiagnostics bool
+ }{
+ {"imports", Settings{"ui.diagnostic.vulncheck": "Imports"}, true},
+ {"default", Settings{}, false},
+ {"invalid", Settings{"ui.diagnostic.vulncheck": "invalid"}, false},
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ // override the settings options to enable diagnostics
+ opts := append(opts0, tc.setting)
+ WithOptions(opts...).Run(t, workspace1, func(t *testing.T, env *Env) {
+ // TODO(hyangah): implement it, so we see GO-2022-01, GO-2022-02, and GO-2022-03.
+ // Check that the actions we get when including all diagnostics at a location return the same result
+ if tc.wantDiagnostics {
+ checkVulncheckDiagnostics(env, t)
+ } else {
+ wantNoVulncheckDiagnostics(env, t)
+ }
+
+ if tc.name == "imports" && tc.wantDiagnostics {
+ // test we get only govulncheck-based diagnostics after "run govulncheck".
+ var result command.RunVulncheckResult
+ env.ExecuteCodeLensCommand("go.mod", command.RunGovulncheck, &result)
+ gotDiagnostics := &protocol.PublishDiagnosticsParams{}
+ env.OnceMet(
+ CompletedProgress(result.Token, nil),
+ ShownMessage("Found"),
+ )
+ env.OnceMet(
+ Diagnostics(env.AtRegexp("go.mod", "golang.org/bmod")),
+ ReadDiagnostics("go.mod", gotDiagnostics),
+ )
+ // We expect only one diagnostic for GO-2022-02.
+ count := 0
+ for _, diag := range gotDiagnostics.Diagnostics {
+ if strings.Contains(diag.Message, "GO-2022-02") {
+ count++
+ if got, want := diag.Severity, protocol.SeverityWarning; got != want {
+ t.Errorf("Diagnostic for GO-2022-02 = %v, want %v", got, want)
+ }
+ }
+ }
+ if count != 1 {
+ t.Errorf("Unexpected number of diagnostics about GO-2022-02 = %v, want 1:\n%+v", count, stringify(gotDiagnostics))
+ }
+ }
+ })
+ })
+ }
+}
+
+func stringify(a interface{}) string {
+ data, _ := json.Marshal(a)
+ return string(data)
+}
+
+func TestRunVulncheckWarning(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+
+ db, opts, err := vulnTestEnv(vulnsData, proxy1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Clean()
+ WithOptions(opts...).Run(t, workspace1, func(t *testing.T, env *Env) {
+ env.OpenFile("go.mod")
+
+ var result command.RunVulncheckResult
+ env.ExecuteCodeLensCommand("go.mod", command.RunGovulncheck, &result)
+ gotDiagnostics := &protocol.PublishDiagnosticsParams{}
+ env.OnceMet(
+ CompletedProgress(result.Token, nil),
+ ShownMessage("Found"),
+ )
+ // Vulncheck diagnostics asynchronous to the vulncheck command.
+ env.OnceMet(
+ Diagnostics(env.AtRegexp("go.mod", `golang.org/amod`)),
+ ReadDiagnostics("go.mod", gotDiagnostics),
+ )
+
+ testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{
+ "go.mod": {IDs: []string{"GO-2022-01", "GO-2022-02", "GO-2022-03"}, Mode: govulncheck.ModeGovulncheck},
+ })
+ env.OpenFile("x/x.go")
+ lineX := env.RegexpSearch("x/x.go", `c\.C1\(\)\.Vuln1\(\)`).Range.Start
+ env.OpenFile("y/y.go")
+ lineY := env.RegexpSearch("y/y.go", `c\.C2\(\)\(\)`).Range.Start
+ wantDiagnostics := map[string]vulnDiagExpectation{
+ "golang.org/amod": {
+ applyAction: "Upgrade to v1.0.6",
+ diagnostics: []vulnDiag{
+ {
+ msg: "golang.org/amod has a vulnerability used in the code: GO-2022-01.",
+ severity: protocol.SeverityWarning,
+ source: string(source.Govulncheck),
+ codeActions: []string{
+ "Upgrade to v1.0.4",
+ "Upgrade to latest",
+ "Reset govulncheck result",
+ },
+ relatedInfo: []vulnRelatedInfo{
+ {"x.go", uint32(lineX.Line), "[GO-2022-01]"}, // avuln.VulnData.Vuln1
+ {"x.go", uint32(lineX.Line), "[GO-2022-01]"}, // avuln.VulnData.Vuln2
+ },
+ },
+ {
+ msg: "golang.org/amod has a vulnerability GO-2022-03 that is not used in the code.",
+ severity: protocol.SeverityInformation,
+ source: string(source.Govulncheck),
+ codeActions: []string{
+ "Upgrade to v1.0.6",
+ "Upgrade to latest",
+ "Reset govulncheck result",
+ },
+ relatedInfo: []vulnRelatedInfo{
+ {"x.go", uint32(lineX.Line), "[GO-2022-01]"}, // avuln.VulnData.Vuln1
+ {"x.go", uint32(lineX.Line), "[GO-2022-01]"}, // avuln.VulnData.Vuln2
+ },
+ },
+ },
+ codeActions: []string{
+ "Upgrade to v1.0.6",
+ "Upgrade to latest",
+ "Reset govulncheck result",
+ },
+ hover: []string{"GO-2022-01", "Fixed in v1.0.4.", "GO-2022-03"},
+ },
+ "golang.org/bmod": {
+ diagnostics: []vulnDiag{
+ {
+ msg: "golang.org/bmod has a vulnerability used in the code: GO-2022-02.",
+ severity: protocol.SeverityWarning,
+ source: string(source.Govulncheck),
+ codeActions: []string{
+ "Reset govulncheck result", // no fix, but we should give an option to reset.
+ },
+ relatedInfo: []vulnRelatedInfo{
+ {"y.go", uint32(lineY.Line), "[GO-2022-02]"}, // bvuln.Vuln
+ },
+ },
+ },
+ codeActions: []string{
+ "Reset govulncheck result", // no fix, but we should give an option to reset.
+ },
+ hover: []string{"GO-2022-02", "This is a long description of this vulnerability.", "No fix is available."},
+ },
+ }
+
+ for mod, want := range wantDiagnostics {
+ modPathDiagnostics := testVulnDiagnostics(t, env, mod, want, gotDiagnostics)
+
+ // Check that the actions we get when including all diagnostics at a location return the same result
+ gotActions := env.CodeAction("go.mod", modPathDiagnostics)
+ if diff := diffCodeActions(gotActions, want.codeActions); diff != "" {
+ t.Errorf("code actions for %q do not match, expected %v, got %v\n%v\n", mod, want.codeActions, gotActions, diff)
+ continue
+ }
+
+ // Apply the code action matching applyAction.
+ if want.applyAction == "" {
+ continue
+ }
+ for _, action := range gotActions {
+ if action.Title == want.applyAction {
+ env.ApplyCodeAction(action)
+ break
+ }
+ }
+ }
+
+ env.Await(env.DoneWithChangeWatchedFiles())
+ wantGoMod := `module golang.org/entry
+
+go 1.18
+
+require golang.org/cmod v1.1.3
+
+require (
+ golang.org/amod v1.0.6 // indirect
+ golang.org/bmod v0.5.0 // indirect
+)
+`
+ if got := env.BufferText("go.mod"); got != wantGoMod {
+ t.Fatalf("go.mod vulncheck fix failed:\n%s", compare.Text(wantGoMod, got))
+ }
+ })
+}
+
+func diffCodeActions(gotActions []protocol.CodeAction, want []string) string {
+ var gotTitles []string
+ for _, ca := range gotActions {
+ gotTitles = append(gotTitles, ca.Title)
+ }
+ return cmp.Diff(want, gotTitles)
+}
+
+const workspace2 = `
+-- go.mod --
+module golang.org/entry
+
+go 1.18
+
+require golang.org/bmod v0.5.0
+
+-- go.sum --
+golang.org/bmod v0.5.0 h1:MT/ysNRGbCiURc5qThRFWaZ5+rK3pQRPo9w7dYZfMDk=
+golang.org/bmod v0.5.0/go.mod h1:k+zl+Ucu4yLIjndMIuWzD/MnOHy06wqr3rD++y0abVs=
+-- x/x.go --
+package x
+
+import "golang.org/bmod/bvuln"
+
+func F() {
+ // Calls a benign func in bvuln.
+ bvuln.OK()
+}
+`
+
+const proxy2 = `
+-- golang.org/bmod@v0.5.0/bvuln/bvuln.go --
+package bvuln
+
+func Vuln() {} // vulnerable.
+func OK() {} // ok.
+`
+
+func TestGovulncheckInfo(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+
+ db, opts, err := vulnTestEnv(vulnsData, proxy2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Clean()
+ WithOptions(opts...).Run(t, workspace2, func(t *testing.T, env *Env) {
+ env.OpenFile("go.mod")
+ var result command.RunVulncheckResult
+ env.ExecuteCodeLensCommand("go.mod", command.RunGovulncheck, &result)
+ gotDiagnostics := &protocol.PublishDiagnosticsParams{}
+ env.OnceMet(
+ CompletedProgress(result.Token, nil),
+ ShownMessage("No vulnerabilities found"), // only count affecting vulnerabilities.
+ )
+
+ // Vulncheck diagnostics asynchronous to the vulncheck command.
+ env.OnceMet(
+ Diagnostics(env.AtRegexp("go.mod", "golang.org/bmod")),
+ ReadDiagnostics("go.mod", gotDiagnostics),
+ )
+
+ testFetchVulncheckResult(t, env, map[string]fetchVulncheckResult{"go.mod": {IDs: []string{"GO-2022-02"}, Mode: govulncheck.ModeGovulncheck}})
+ // wantDiagnostics maps a module path in the require
+ // section of a go.mod to diagnostics that will be returned
+ // when running vulncheck.
+ wantDiagnostics := map[string]vulnDiagExpectation{
+ "golang.org/bmod": {
+ diagnostics: []vulnDiag{
+ {
+ msg: "golang.org/bmod has a vulnerability GO-2022-02 that is not used in the code.",
+ severity: protocol.SeverityInformation,
+ source: string(source.Govulncheck),
+ codeActions: []string{
+ "Reset govulncheck result",
+ },
+ },
+ },
+ codeActions: []string{
+ "Reset govulncheck result",
+ },
+ hover: []string{"GO-2022-02", "This is a long description of this vulnerability.", "No fix is available."},
+ },
+ }
+
+ var allActions []protocol.CodeAction
+ for mod, want := range wantDiagnostics {
+ modPathDiagnostics := testVulnDiagnostics(t, env, mod, want, gotDiagnostics)
+ // Check that the actions we get when including all diagnostics at a location return the same result
+ gotActions := env.CodeAction("go.mod", modPathDiagnostics)
+ allActions = append(allActions, gotActions...)
+ if diff := diffCodeActions(gotActions, want.codeActions); diff != "" {
+ t.Errorf("code actions for %q do not match, expected %v, got %v\n%v\n", mod, want.codeActions, gotActions, diff)
+ continue
+ }
+ }
+
+ // Clear Diagnostics by using one of the reset code actions.
+ var reset protocol.CodeAction
+ for _, a := range allActions {
+ if a.Title == "Reset govulncheck result" {
+ reset = a
+ break
+ }
+ }
+ if reset.Title != "Reset govulncheck result" {
+ t.Errorf("failed to find a 'Reset govulncheck result' code action, got %v", allActions)
+ }
+ env.ApplyCodeAction(reset)
+
+ env.Await(NoDiagnostics(ForFile("go.mod")))
+ })
+}
+
+// testVulnDiagnostics finds the require or module statement line for the requireMod in go.mod file
+// and runs checks if diagnostics and code actions associated with the line match expectation.
+func testVulnDiagnostics(t *testing.T, env *Env, pattern string, want vulnDiagExpectation, got *protocol.PublishDiagnosticsParams) []protocol.Diagnostic {
+ t.Helper()
+ loc := env.RegexpSearch("go.mod", pattern)
+ var modPathDiagnostics []protocol.Diagnostic
+ for _, w := range want.diagnostics {
+ // Find the diagnostics at loc.start.
+ var diag *protocol.Diagnostic
+ for _, g := range got.Diagnostics {
+ g := g
+ if g.Range.Start == loc.Range.Start && w.msg == g.Message {
+ modPathDiagnostics = append(modPathDiagnostics, g)
+ diag = &g
+ break
+ }
+ }
+ if diag == nil {
+ t.Errorf("no diagnostic at %q matching %q found\n", pattern, w.msg)
+ continue
+ }
+ if diag.Severity != w.severity || diag.Source != w.source {
+ t.Errorf("incorrect (severity, source) for %q, want (%s, %s) got (%s, %s)\n", w.msg, w.severity, w.source, diag.Severity, diag.Source)
+ }
+ sort.Slice(w.relatedInfo, func(i, j int) bool { return w.relatedInfo[i].less(w.relatedInfo[j]) })
+ if got, want := summarizeRelatedInfo(diag.RelatedInformation), w.relatedInfo; !cmp.Equal(got, want) {
+ t.Errorf("related info for %q do not match, want %v, got %v\n", w.msg, want, got)
+ }
+ // Check expected code actions appear.
+ gotActions := env.CodeAction("go.mod", []protocol.Diagnostic{*diag})
+ if diff := diffCodeActions(gotActions, w.codeActions); diff != "" {
+ t.Errorf("code actions for %q do not match, want %v, got %v\n%v\n", w.msg, w.codeActions, gotActions, diff)
+ continue
+ }
+ }
+ // Check that useful info is supplemented as hover.
+ if len(want.hover) > 0 {
+ hover, _ := env.Hover(loc)
+ for _, part := range want.hover {
+ if !strings.Contains(hover.Value, part) {
+ t.Errorf("hover contents for %q do not match, want %v, got %v\n", pattern, strings.Join(want.hover, ","), hover.Value)
+ break
+ }
+ }
+ }
+ return modPathDiagnostics
+}
+
+// summarizeRelatedInfo converts protocol.DiagnosticRelatedInformation to vulnRelatedInfo
+// that captures only the part that we want to test.
+func summarizeRelatedInfo(rinfo []protocol.DiagnosticRelatedInformation) []vulnRelatedInfo {
+ var res []vulnRelatedInfo
+ for _, r := range rinfo {
+ filename := filepath.Base(r.Location.URI.SpanURI().Filename())
+ message, _, _ := strings.Cut(r.Message, " ")
+ line := r.Location.Range.Start.Line
+ res = append(res, vulnRelatedInfo{filename, line, message})
+ }
+ sort.Slice(res, func(i, j int) bool {
+ return res[i].less(res[j])
+ })
+ return res
+}
+
+type vulnRelatedInfo struct {
+ Filename string
+ Line uint32
+ Message string
+}
+
+type vulnDiag struct {
+ msg string
+ severity protocol.DiagnosticSeverity
+ // codeActions is a list titles of code actions that we get with this
+ // diagnostics as the context.
+ codeActions []string
+ // relatedInfo is related info message prefixed by the file base.
+ // See summarizeRelatedInfo.
+ relatedInfo []vulnRelatedInfo
+ // diagnostic source.
+ source string
+}
+
+func (i vulnRelatedInfo) less(j vulnRelatedInfo) bool {
+ if i.Filename != j.Filename {
+ return i.Filename < j.Filename
+ }
+ if i.Line != j.Line {
+ return i.Line < j.Line
+ }
+ return i.Message < j.Message
+}
+
+// vulnDiagExpectation maps a module path in the require
+// section of a go.mod to diagnostics that will be returned
+// when running vulncheck.
+type vulnDiagExpectation struct {
+ // applyAction is the title of the code action to run for this module.
+ // If empty, no code actions will be executed.
+ applyAction string
+ // diagnostics is the list of diagnostics we expect at the require line for
+ // the module path.
+ diagnostics []vulnDiag
+ // codeActions is a list titles of code actions that we get with context
+ // diagnostics.
+ codeActions []string
+ // hover message is the list of expected hover message parts for this go.mod require line.
+ // all parts must appear in the hover message.
+ hover []string
+}
diff --git a/gopls/internal/regtest/misc/workspace_symbol_test.go b/gopls/internal/regtest/misc/workspace_symbol_test.go
index a21d47312..a492e1d49 100644
--- a/gopls/internal/regtest/misc/workspace_symbol_test.go
+++ b/gopls/internal/regtest/misc/workspace_symbol_test.go
@@ -7,16 +7,12 @@ package misc
import (
"testing"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
func TestWorkspaceSymbolMissingMetadata(t *testing.T) {
- // We get 2 symbols on 1.12, for some reason.
- testenv.NeedsGo1Point(t, 13)
-
const files = `
-- go.mod --
module mod.com
@@ -26,26 +22,27 @@ go 1.17
package p
const C1 = "a.go"
--- ignore.go --
+-- exclude.go --
-// +build ignore
+//go:build exclude
+// +build exclude
-package ignore
+package exclude
-const C2 = "ignore.go"
+const C2 = "exclude.go"
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("a.go")
- syms := env.WorkspaceSymbol("C")
+ syms := env.Symbol("C")
if got, want := len(syms), 1; got != want {
t.Errorf("got %d symbols, want %d", got, want)
}
// Opening up an ignored file will result in an overlay with missing
// metadata, but this shouldn't break workspace symbols requests.
- env.OpenFile("ignore.go")
- syms = env.WorkspaceSymbol("C")
+ env.OpenFile("exclude.go")
+ syms = env.Symbol("C")
if got, want := len(syms), 1; got != want {
t.Errorf("got %d symbols, want %d", got, want)
}
@@ -72,9 +69,7 @@ const (
var symbolMatcher = string(source.SymbolFastFuzzy)
WithOptions(
- EditorConfig{
- SymbolMatcher: &symbolMatcher,
- },
+ Settings{"symbolMatcher": symbolMatcher},
).Run(t, files, func(t *testing.T, env *Env) {
want := []string{
"Foo", // prefer exact segment matches first
@@ -83,8 +78,8 @@ const (
"Fooey", // shorter than Fooest, Foobar
"Fooest",
}
- got := env.WorkspaceSymbol("Foo")
- compareSymbols(t, got, want)
+ got := env.Symbol("Foo")
+ compareSymbols(t, got, want...)
})
}
@@ -105,19 +100,17 @@ const (
var symbolMatcher = string(source.SymbolFastFuzzy)
WithOptions(
- EditorConfig{
- SymbolMatcher: &symbolMatcher,
- },
+ Settings{"symbolMatcher": symbolMatcher},
).Run(t, files, func(t *testing.T, env *Env) {
- compareSymbols(t, env.WorkspaceSymbol("ABC"), []string{"ABC", "AxxBxxCxx"})
- compareSymbols(t, env.WorkspaceSymbol("'ABC"), []string{"ABC"})
- compareSymbols(t, env.WorkspaceSymbol("^mod.com"), []string{"mod.com/a.ABC", "mod.com/a.AxxBxxCxx"})
- compareSymbols(t, env.WorkspaceSymbol("^mod.com Axx"), []string{"mod.com/a.AxxBxxCxx"})
- compareSymbols(t, env.WorkspaceSymbol("C$"), []string{"ABC"})
+ compareSymbols(t, env.Symbol("ABC"), "ABC", "AxxBxxCxx")
+ compareSymbols(t, env.Symbol("'ABC"), "ABC")
+ compareSymbols(t, env.Symbol("^mod.com"), "mod.com/a.ABC", "mod.com/a.AxxBxxCxx")
+ compareSymbols(t, env.Symbol("^mod.com Axx"), "mod.com/a.AxxBxxCxx")
+ compareSymbols(t, env.Symbol("C$"), "ABC")
})
}
-func compareSymbols(t *testing.T, got []protocol.SymbolInformation, want []string) {
+func compareSymbols(t *testing.T, got []protocol.SymbolInformation, want ...string) {
t.Helper()
if len(got) != len(want) {
t.Errorf("got %d symbols, want %d", len(got), len(want))
diff --git a/gopls/internal/regtest/modfile/modfile_test.go b/gopls/internal/regtest/modfile/modfile_test.go
index 868aa70aa..483118dd3 100644
--- a/gopls/internal/regtest/modfile/modfile_test.go
+++ b/gopls/internal/regtest/modfile/modfile_test.go
@@ -11,14 +11,16 @@ import (
"testing"
"golang.org/x/tools/gopls/internal/hooks"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+ "golang.org/x/tools/internal/bug"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
"golang.org/x/tools/internal/testenv"
)
func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
Main(m, hooks.Options)
}
@@ -65,8 +67,6 @@ const Name = "Hello"
`
func TestModFileModification(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const untidyModule = `
-- a/go.mod --
module mod.com
@@ -92,54 +92,59 @@ func main() {
// modify the go.mod file.
goModContent := env.ReadWorkspaceFile("a/go.mod")
env.OpenFile("a/main.go")
- env.Await(
- env.DiagnosticAtRegexp("a/main.go", "\"example.com/blah\""),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", "\"example.com/blah\"")),
)
if got := env.ReadWorkspaceFile("a/go.mod"); got != goModContent {
- t.Fatalf("go.mod changed on disk:\n%s", tests.Diff(t, goModContent, got))
+ t.Fatalf("go.mod changed on disk:\n%s", compare.Text(goModContent, got))
}
// Save the buffer, which will format and organize imports.
// Confirm that the go.mod file still does not change.
env.SaveBuffer("a/main.go")
- env.Await(
- env.DiagnosticAtRegexp("a/main.go", "\"example.com/blah\""),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", "\"example.com/blah\"")),
)
if got := env.ReadWorkspaceFile("a/go.mod"); got != goModContent {
- t.Fatalf("go.mod changed on disk:\n%s", tests.Diff(t, goModContent, got))
+ t.Fatalf("go.mod changed on disk:\n%s", compare.Text(goModContent, got))
}
})
})
// Reproduce golang/go#40269 by deleting and recreating main.go.
t.Run("delete main.go", func(t *testing.T) {
- t.Skip("This test will be flaky until golang/go#40269 is resolved.")
-
runner.Run(t, untidyModule, func(t *testing.T, env *Env) {
goModContent := env.ReadWorkspaceFile("a/go.mod")
mainContent := env.ReadWorkspaceFile("a/main.go")
env.OpenFile("a/main.go")
env.SaveBuffer("a/main.go")
+ // Ensure that we're done processing all the changes caused by opening
+ // and saving above. If not, we may run into a file locking issue on
+ // windows.
+ //
+ // If this proves insufficient, env.RemoveWorkspaceFile can be updated to
+ // retry file lock errors on windows.
+ env.AfterChange()
env.RemoveWorkspaceFile("a/main.go")
- env.Await(
- env.DoneWithOpen(),
- env.DoneWithSave(),
- env.DoneWithChangeWatchedFiles(),
- )
- env.WriteWorkspaceFile("main.go", mainContent)
- env.Await(
- env.DiagnosticAtRegexp("main.go", "\"example.com/blah\""),
+ // TODO(rfindley): awaiting here shouldn't really be necessary. We should
+ // be consistent eventually.
+ //
+ // Probably this was meant to exercise a race with the change below.
+ env.AfterChange()
+
+ env.WriteWorkspaceFile("a/main.go", mainContent)
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", "\"example.com/blah\"")),
)
- if got := env.ReadWorkspaceFile("go.mod"); got != goModContent {
- t.Fatalf("go.mod changed on disk:\n%s", tests.Diff(t, goModContent, got))
+ if got := env.ReadWorkspaceFile("a/go.mod"); got != goModContent {
+ t.Fatalf("go.mod changed on disk:\n%s", compare.Text(goModContent, got))
}
})
})
}
func TestGoGetFix(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const mod = `
-- a/go.mod --
module mod.com
@@ -170,11 +175,9 @@ require example.com v1.2.3
}
env.OpenFile("a/main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("a/main.go", `"example.com/blah"`),
- ReadDiagnostics("a/main.go", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", `"example.com/blah"`)),
+ ReadDiagnostics("a/main.go", &d),
)
var goGetDiag protocol.Diagnostic
for _, diag := range d.Diagnostics {
@@ -184,14 +187,13 @@ require example.com v1.2.3
}
env.ApplyQuickFixes("a/main.go", []protocol.Diagnostic{goGetDiag})
if got := env.ReadWorkspaceFile("a/go.mod"); got != want {
- t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got))
+ t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got))
}
})
}
// Tests that multiple missing dependencies gives good single fixes.
func TestMissingDependencyFixes(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const mod = `
-- a/go.mod --
module mod.com
@@ -220,11 +222,9 @@ require random.org v1.2.3
}.Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("a/main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("a/main.go", `"random.org/blah"`),
- ReadDiagnostics("a/main.go", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", `"random.org/blah"`)),
+ ReadDiagnostics("a/main.go", &d),
)
var randomDiag protocol.Diagnostic
for _, diag := range d.Diagnostics {
@@ -234,7 +234,7 @@ require random.org v1.2.3
}
env.ApplyQuickFixes("a/main.go", []protocol.Diagnostic{randomDiag})
if got := env.ReadWorkspaceFile("a/go.mod"); got != want {
- t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got))
+ t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got))
}
})
}
@@ -276,11 +276,9 @@ require random.org v1.2.3
}.Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("a/main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("a/main.go", `"random.org/blah"`),
- ReadDiagnostics("a/main.go", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", `"random.org/blah"`)),
+ ReadDiagnostics("a/main.go", &d),
)
var randomDiag protocol.Diagnostic
for _, diag := range d.Diagnostics {
@@ -290,14 +288,12 @@ require random.org v1.2.3
}
env.ApplyQuickFixes("a/main.go", []protocol.Diagnostic{randomDiag})
if got := env.ReadWorkspaceFile("a/go.mod"); got != want {
- t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got))
+ t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got))
}
})
}
func TestIndirectDependencyFix(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const mod = `
-- a/go.mod --
module mod.com
@@ -329,21 +325,18 @@ require example.com v1.2.3
}.Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("a/go.mod")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("a/go.mod", "// indirect"),
- ReadDiagnostics("a/go.mod", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/go.mod", "// indirect")),
+ ReadDiagnostics("a/go.mod", &d),
)
env.ApplyQuickFixes("a/go.mod", d.Diagnostics)
- if got := env.Editor.BufferText("a/go.mod"); got != want {
- t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got))
+ if got := env.BufferText("a/go.mod"); got != want {
+ t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got))
}
})
}
func TestUnusedDiag(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const proxy = `
-- example.com@v1.0.0/x.go --
@@ -374,15 +367,13 @@ go 1.14
}.Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("a/go.mod")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("a/go.mod", `require example.com`),
- ReadDiagnostics("a/go.mod", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/go.mod", `require example.com`)),
+ ReadDiagnostics("a/go.mod", &d),
)
env.ApplyQuickFixes("a/go.mod", d.Diagnostics)
- if got := env.Editor.BufferText("a/go.mod"); got != want {
- t.Fatalf("unexpected go.mod content:\n%s", tests.Diff(t, want, got))
+ if got := env.BufferText("a/go.mod"); got != want {
+ t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got))
}
})
}
@@ -390,7 +381,6 @@ go 1.14
// Test to reproduce golang/go#39041. It adds a new require to a go.mod file
// that already has an unused require.
func TestNewDepWithUnusedDep(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const proxy = `
-- github.com/esimov/caire@v1.2.5/go.mod --
@@ -437,11 +427,9 @@ func _() {
}.Run(t, repro, func(t *testing.T, env *Env) {
env.OpenFile("a/main.go")
var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("a/main.go", `"github.com/esimov/caire"`),
- ReadDiagnostics("a/main.go", &d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", `"github.com/esimov/caire"`)),
+ ReadDiagnostics("a/main.go", &d),
)
env.ApplyQuickFixes("a/main.go", d.Diagnostics)
want := `module mod.com
@@ -454,7 +442,7 @@ require (
)
`
if got := env.ReadWorkspaceFile("a/go.mod"); got != want {
- t.Fatalf("TestNewDepWithUnusedDep failed:\n%s", tests.Diff(t, want, got))
+ t.Fatalf("TestNewDepWithUnusedDep failed:\n%s", compare.Text(want, got))
}
})
}
@@ -463,8 +451,6 @@ require (
// the file watching GlobPattern in the capability registration. See
// golang/go#39384.
func TestModuleChangesOnDisk(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const mod = `
-- a/go.mod --
module mod.com
@@ -485,10 +471,13 @@ func main() {
{"default", WithOptions(ProxyFiles(proxy), WorkspaceFolders("a"))},
{"nested", WithOptions(ProxyFiles(proxy))},
}.Run(t, mod, func(t *testing.T, env *Env) {
- env.Await(env.DiagnosticAtRegexp("a/go.mod", "require"))
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("a/go.mod", "require")),
+ )
env.RunGoCommandInDir("a", "mod", "tidy")
- env.Await(
- EmptyDiagnostics("a/go.mod"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/go.mod")),
)
})
}
@@ -496,8 +485,6 @@ func main() {
// Tests golang/go#39784: a missing indirect dependency, necessary
// due to blah@v2.0.0's incomplete go.mod file.
func TestBadlyVersionedModule(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const proxy = `
-- example.com/blah/@v/v1.0.0.mod --
module example.com
@@ -544,13 +531,15 @@ var _ = blah.Name
}.Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("a/main.go")
env.OpenFile("a/go.mod")
- env.Await(
+ var modDiags protocol.PublishDiagnosticsParams
+ env.AfterChange(
// We would like for the error to appear in the v2 module, but
// as of writing non-workspace packages are not diagnosed.
- env.DiagnosticAtRegexpWithMessage("a/main.go", `"example.com/blah/v2"`, "cannot find module providing"),
- env.DiagnosticAtRegexpWithMessage("a/go.mod", `require example.com/blah/v2`, "cannot find module providing"),
+ Diagnostics(env.AtRegexp("a/main.go", `"example.com/blah/v2"`), WithMessage("cannot find module providing")),
+ Diagnostics(env.AtRegexp("a/go.mod", `require example.com/blah/v2`), WithMessage("cannot find module providing")),
+ ReadDiagnostics("a/go.mod", &modDiags),
)
- env.ApplyQuickFixes("a/go.mod", env.DiagnosticsFor("a/go.mod").Diagnostics)
+ env.ApplyQuickFixes("a/go.mod", modDiags.Diagnostics)
const want = `module mod.com
go 1.12
@@ -561,9 +550,9 @@ require (
)
`
env.SaveBuffer("a/go.mod")
- env.Await(EmptyDiagnostics("a/main.go"))
- if got := env.Editor.BufferText("a/go.mod"); got != want {
- t.Fatalf("suggested fixes failed:\n%s", tests.Diff(t, want, got))
+ env.AfterChange(NoDiagnostics(ForFile("a/main.go")))
+ if got := env.BufferText("a/go.mod"); got != want {
+ t.Fatalf("suggested fixes failed:\n%s", compare.Text(want, got))
}
})
}
@@ -573,9 +562,6 @@ func TestUnknownRevision(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skipf("skipping test that fails for unknown reasons on plan9; see https://go.dev/issue/50477")
}
-
- testenv.NeedsGo1Point(t, 14)
-
const unknown = `
-- a/go.mod --
module mod.com
@@ -601,19 +587,17 @@ func main() {
t.Run("bad", func(t *testing.T) {
runner.Run(t, unknown, func(t *testing.T, env *Env) {
env.OpenFile("a/go.mod")
- env.Await(
- env.DiagnosticAtRegexp("a/go.mod", "example.com v1.2.2"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/go.mod", "example.com v1.2.2")),
)
env.RegexpReplace("a/go.mod", "v1.2.2", "v1.2.3")
env.SaveBuffer("a/go.mod") // Save to trigger diagnostics.
d := protocol.PublishDiagnosticsParams{}
- env.Await(
- OnceMet(
- // Make sure the diagnostic mentions the new version -- the old diagnostic is in the same place.
- env.DiagnosticAtRegexpWithMessage("a/go.mod", "example.com v1.2.3", "example.com@v1.2.3"),
- ReadDiagnostics("a/go.mod", &d),
- ),
+ env.AfterChange(
+ // Make sure the diagnostic mentions the new version -- the old diagnostic is in the same place.
+ Diagnostics(env.AtRegexp("a/go.mod", "example.com v1.2.3"), WithMessage("example.com@v1.2.3")),
+ ReadDiagnostics("a/go.mod", &d),
)
qfs := env.GetQuickFixes("a/go.mod", d.Diagnostics)
if len(qfs) == 0 {
@@ -621,9 +605,9 @@ func main() {
}
env.ApplyCodeAction(qfs[0]) // Arbitrarily pick a single fix to apply. Applying all of them seems to cause trouble in this particular test.
env.SaveBuffer("a/go.mod") // Save to trigger diagnostics.
- env.Await(
- EmptyDiagnostics("a/go.mod"),
- env.DiagnosticAtRegexp("a/main.go", "x = "),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/go.mod")),
+ Diagnostics(env.AtRegexp("a/main.go", "x = ")),
)
})
})
@@ -652,18 +636,18 @@ func main() {
t.Run("good", func(t *testing.T) {
runner.Run(t, known, func(t *testing.T, env *Env) {
env.OpenFile("a/go.mod")
- env.Await(
- env.DiagnosticAtRegexp("a/main.go", "x = "),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", "x = ")),
)
env.RegexpReplace("a/go.mod", "v1.2.3", "v1.2.2")
env.Editor.SaveBuffer(env.Ctx, "a/go.mod") // go.mod changes must be on disk
- env.Await(
- env.DiagnosticAtRegexp("a/go.mod", "example.com v1.2.2"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/go.mod", "example.com v1.2.2")),
)
env.RegexpReplace("a/go.mod", "v1.2.2", "v1.2.3")
env.Editor.SaveBuffer(env.Ctx, "a/go.mod") // go.mod changes must be on disk
- env.Await(
- env.DiagnosticAtRegexp("a/main.go", "x = "),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", "x = ")),
)
})
})
@@ -672,8 +656,6 @@ func main() {
// Confirm that an error in an indirect dependency of a requirement is surfaced
// as a diagnostic in the go.mod file.
func TestErrorInIndirectDependency(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const badProxy = `
-- example.com@v1.2.3/go.mod --
module example.com
@@ -715,8 +697,8 @@ func main() {
{"nested", WithOptions(ProxyFiles(badProxy))},
}.Run(t, module, func(t *testing.T, env *Env) {
env.OpenFile("a/go.mod")
- env.Await(
- env.DiagnosticAtRegexp("a/go.mod", "require example.com v1.2.3"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/go.mod", "require example.com v1.2.3")),
)
})
}
@@ -738,35 +720,37 @@ func main() {
}
`
WithOptions(
- EditorConfig{
- Env: map[string]string{
- "GOFLAGS": "-mod=readonly",
- },
- },
+ EnvVars{"GOFLAGS": "-mod=readonly"},
ProxyFiles(proxy),
- Modes(Singleton),
+ Modes(Default),
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
original := env.ReadWorkspaceFile("go.mod")
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"example.com/blah"`),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("main.go", `"example.com/blah"`)),
)
got := env.ReadWorkspaceFile("go.mod")
if got != original {
- t.Fatalf("go.mod file modified:\n%s", tests.Diff(t, original, got))
+ t.Fatalf("go.mod file modified:\n%s", compare.Text(original, got))
}
env.RunGoCommand("get", "example.com/blah@v1.2.3")
env.RunGoCommand("mod", "tidy")
- env.Await(
- EmptyDiagnostics("main.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
})
}
func TestMultiModuleModDiagnostics(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
+ testenv.NeedsGo1Point(t, 18) // uses go.work
const mod = `
+-- go.work --
+go 1.18
+
+use (
+ a
+ b
+)
-- a/go.mod --
module moda.com
@@ -799,17 +783,17 @@ func main() {
`
WithOptions(
ProxyFiles(workspaceProxy),
- Modes(Experimental),
).Run(t, mod, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexpWithMessage("a/go.mod", "example.com v1.2.3", "is not used"),
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("a/go.mod", "example.com v1.2.3"),
+ WithMessage("is not used"),
+ ),
)
})
}
func TestModTidyWithBuildTags(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const mod = `
-- go.mod --
module mod.com
@@ -828,12 +812,11 @@ func main() {
`
WithOptions(
ProxyFiles(workspaceProxy),
- EditorConfig{
- BuildFlags: []string{"-tags", "bob"},
- },
+ Settings{"buildFlags": []string{"-tags", "bob"}},
).Run(t, mod, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("main.go", `"example.com/blah"`),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", `"example.com/blah"`)),
)
})
}
@@ -852,15 +835,13 @@ func main() {}
Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("go.mod")
env.RegexpReplace("go.mod", "module", "modul")
- env.Await(
- env.DiagnosticAtRegexp("go.mod", "modul"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("go.mod", "modul")),
)
})
}
func TestSumUpdateFixesDiagnostics(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const mod = `
-- go.mod --
module mod.com
@@ -887,16 +868,17 @@ func main() {
).Run(t, mod, func(t *testing.T, env *Env) {
d := &protocol.PublishDiagnosticsParams{}
env.OpenFile("go.mod")
- env.Await(
- OnceMet(
- env.GoSumDiagnostic("go.mod", `example.com v1.2.3`),
- ReadDiagnostics("go.mod", d),
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("go.mod", `example.com v1.2.3`),
+ WithMessage("go.sum is out of sync"),
),
+ ReadDiagnostics("go.mod", d),
)
env.ApplyQuickFixes("go.mod", d.Diagnostics)
env.SaveBuffer("go.mod") // Save to trigger diagnostics.
- env.Await(
- EmptyDiagnostics("go.mod"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("go.mod")),
)
})
}
@@ -924,20 +906,20 @@ func hello() {}
// TODO(rFindley) this doesn't work in multi-module workspace mode, because
// it keeps around the last parsing modfile. Update this test to also
// exercise the workspace module.
- Modes(Singleton),
+ Modes(Default),
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("go.mod")
env.Await(env.DoneWithOpen())
env.RegexpReplace("go.mod", "module", "modul")
// Confirm that we still have metadata with only on-disk edits.
env.OpenFile("main.go")
- file, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", "hello"))
- if filepath.Base(file) != "hello.go" {
- t.Fatalf("expected definition in hello.go, got %s", file)
+ loc := env.GoToDefinition(env.RegexpSearch("main.go", "hello"))
+ if filepath.Base(string(loc.URI)) != "hello.go" {
+ t.Fatalf("expected definition in hello.go, got %s", loc.URI)
}
// Confirm that we no longer have metadata when the file is saved.
env.SaveBufferWithoutActions("go.mod")
- _, _, err := env.Editor.GoToDefinition(env.Ctx, "main.go", env.RegexpSearch("main.go", "hello"))
+ _, err := env.Editor.GoToDefinition(env.Ctx, env.RegexpSearch("main.go", "hello"))
if err == nil {
t.Fatalf("expected error, got none")
}
@@ -945,8 +927,6 @@ func hello() {}
}
func TestRemoveUnusedDependency(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const proxy = `
-- hasdep.com@v1.2.3/go.mod --
module hasdep.com
@@ -996,19 +976,17 @@ func main() {}
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("go.mod")
d := &protocol.PublishDiagnosticsParams{}
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexp("go.mod", "require hasdep.com v1.2.3"),
- ReadDiagnostics("go.mod", d),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("go.mod", "require hasdep.com v1.2.3")),
+ ReadDiagnostics("go.mod", d),
)
const want = `module mod.com
go 1.12
`
env.ApplyQuickFixes("go.mod", d.Diagnostics)
- if got := env.Editor.BufferText("go.mod"); got != want {
- t.Fatalf("unexpected content in go.mod:\n%s", tests.Diff(t, want, got))
+ if got := env.BufferText("go.mod"); got != want {
+ t.Fatalf("unexpected content in go.mod:\n%s", compare.Text(want, got))
}
})
})
@@ -1039,12 +1017,10 @@ func main() {}
).Run(t, mod, func(t *testing.T, env *Env) {
d := &protocol.PublishDiagnosticsParams{}
env.OpenFile("go.mod")
- pos := env.RegexpSearch("go.mod", "require hasdep.com v1.2.3")
- env.Await(
- OnceMet(
- DiagnosticAt("go.mod", pos.Line, pos.Column),
- ReadDiagnostics("go.mod", d),
- ),
+ pos := env.RegexpSearch("go.mod", "require hasdep.com v1.2.3").Range.Start
+ env.AfterChange(
+ Diagnostics(AtPosition("go.mod", pos.Line, pos.Character)),
+ ReadDiagnostics("go.mod", d),
)
const want = `module mod.com
@@ -1060,15 +1036,14 @@ require random.com v1.2.3
diagnostics = append(diagnostics, d)
}
env.ApplyQuickFixes("go.mod", diagnostics)
- if got := env.Editor.BufferText("go.mod"); got != want {
- t.Fatalf("unexpected content in go.mod:\n%s", tests.Diff(t, want, got))
+ if got := env.BufferText("go.mod"); got != want {
+ t.Fatalf("unexpected content in go.mod:\n%s", compare.Text(want, got))
}
})
})
}
func TestSumUpdateQuickFix(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
const mod = `
-- go.mod --
module mod.com
@@ -1092,29 +1067,28 @@ func main() {
`
WithOptions(
ProxyFiles(workspaceProxy),
- Modes(Singleton),
+ Modes(Default),
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("go.mod")
params := &protocol.PublishDiagnosticsParams{}
- env.Await(
- OnceMet(
- env.GoSumDiagnostic("go.mod", "example.com"),
- ReadDiagnostics("go.mod", params),
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("go.mod", `example.com`),
+ WithMessage("go.sum is out of sync"),
),
+ ReadDiagnostics("go.mod", params),
)
env.ApplyQuickFixes("go.mod", params.Diagnostics)
const want = `example.com v1.2.3 h1:Yryq11hF02fEf2JlOS2eph+ICE2/ceevGV3C9dl5V/c=
example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo=
`
if got := env.ReadWorkspaceFile("go.sum"); got != want {
- t.Fatalf("unexpected go.sum contents:\n%s", tests.Diff(t, want, got))
+ t.Fatalf("unexpected go.sum contents:\n%s", compare.Text(want, got))
}
})
}
func TestDownloadDeps(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
const proxy = `
-- example.com@v1.2.3/go.mod --
module example.com
@@ -1161,24 +1135,26 @@ func main() {
`
WithOptions(
ProxyFiles(proxy),
- Modes(Singleton),
+ Modes(Default),
).Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
d := &protocol.PublishDiagnosticsParams{}
- env.Await(
- env.DiagnosticAtRegexpWithMessage("main.go", `"example.com/blah"`, `could not import example.com/blah (no required module provides package "example.com/blah")`),
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("main.go", `"example.com/blah"`),
+ WithMessage(`could not import example.com/blah (no required module provides package "example.com/blah")`),
+ ),
ReadDiagnostics("main.go", d),
)
env.ApplyQuickFixes("main.go", d.Diagnostics)
- env.Await(
- EmptyDiagnostics("main.go"),
- NoDiagnostics("go.mod"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ NoDiagnostics(ForFile("go.mod")),
)
})
}
func TestInvalidGoVersion(t *testing.T) {
- testenv.NeedsGo1Point(t, 14) // Times out on 1.13 for reasons unclear. Not worth worrying about.
const files = `
-- go.mod --
module mod.com
@@ -1188,8 +1164,25 @@ go foo
package main
`
Run(t, files, func(t *testing.T, env *Env) {
- env.Await(env.DiagnosticAtRegexpWithMessage("go.mod", `go foo`, "invalid go version"))
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("go.mod", `go foo`), WithMessage("invalid go version")),
+ )
env.WriteWorkspaceFile("go.mod", "module mod.com \n\ngo 1.12\n")
- env.Await(EmptyDiagnostics("go.mod"))
+ env.AfterChange(NoDiagnostics(ForFile("go.mod")))
+ })
+}
+
+// This is a regression test for a bug in the line-oriented implementation
+// of the "apply diffs" operation used by the fake editor.
+func TestIssue57627(t *testing.T) {
+ const files = `
+-- go.work --
+package main
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("go.work")
+ env.SetBufferContent("go.work", "go 1.18\nuse moda/a")
+ env.SaveBuffer("go.work") // doesn't fail
})
}
diff --git a/gopls/internal/regtest/template/template_test.go b/gopls/internal/regtest/template/template_test.go
index b0acdfeb8..48635643c 100644
--- a/gopls/internal/regtest/template/template_test.go
+++ b/gopls/internal/regtest/template/template_test.go
@@ -9,11 +9,13 @@ import (
"testing"
"golang.org/x/tools/gopls/internal/hooks"
- "golang.org/x/tools/internal/lsp/protocol"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/bug"
)
func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
Main(m, hooks.Options)
}
@@ -33,11 +35,9 @@ go 1.17
{{end}}
`
WithOptions(
- EditorConfig{
- Settings: map[string]interface{}{
- "templateExtensions": []string{"tmpl"},
- "semanticTokens": true,
- },
+ Settings{
+ "templateExtensions": []string{"tmpl"},
+ "semanticTokens": true,
},
).Run(t, files, func(t *testing.T, env *Env) {
var p protocol.SemanticTokensParams
@@ -64,16 +64,19 @@ Hello {{}} <-- missing body
{{end}}
`
WithOptions(
- EditorConfig{
- Settings: map[string]interface{}{
- "templateExtensions": []string{"tmpl"},
- "semanticTokens": true,
- },
+ Settings{
+ "templateExtensions": []string{"tmpl"},
+ "semanticTokens": true,
},
).Run(t, files, func(t *testing.T, env *Env) {
// TODO: can we move this diagnostic onto {{}}?
- env.Await(env.DiagnosticAtRegexp("hello.tmpl", "()Hello {{}}"))
- d := env.DiagnosticsFor("hello.tmpl").Diagnostics // issue 50786: check for Source
+ var diags protocol.PublishDiagnosticsParams
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("hello.tmpl", "()Hello {{}}")),
+ ReadDiagnostics("hello.tmpl", &diags),
+ )
+ d := diags.Diagnostics // issue 50786: check for Source
if len(d) != 1 {
t.Errorf("expected 1 diagnostic, got %d", len(d))
return
@@ -93,7 +96,7 @@ Hello {{}} <-- missing body
}
env.WriteWorkspaceFile("hello.tmpl", "{{range .Planets}}\nHello {{.}}\n{{end}}")
- env.Await(EmptyDiagnostics("hello.tmpl"))
+ env.AfterChange(NoDiagnostics(ForFile("hello.tmpl")))
})
}
@@ -110,16 +113,15 @@ B {{}} <-- missing body
`
WithOptions(
- EditorConfig{
- Settings: map[string]interface{}{
- "templateExtensions": []string{"tmpl"},
- },
- DirectoryFilters: []string{"-b"},
+ Settings{
+ "directoryFilters": []string{"-b"},
+ "templateExtensions": []string{"tmpl"},
},
).Run(t, files, func(t *testing.T, env *Env) {
- env.Await(
- OnceMet(env.DiagnosticAtRegexp("a/a.tmpl", "()A")),
- NoDiagnostics("b/b.tmpl"),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("a/a.tmpl", "()A")),
+ NoDiagnostics(ForFile("b/b.tmpl")),
)
})
}
@@ -134,16 +136,13 @@ go 1.12
Run(t, files, func(t *testing.T, env *Env) {
env.CreateBuffer("hello.tmpl", "")
- env.Await(
- OnceMet(
- env.DoneWithOpen(),
- NoDiagnostics("hello.tmpl"), // Don't get spurious errors for empty templates.
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("hello.tmpl")), // Don't get spurious errors for empty templates.
)
env.SetBufferContent("hello.tmpl", "{{range .Planets}}\nHello {{}}\n{{end}}")
- env.Await(env.DiagnosticAtRegexp("hello.tmpl", "()Hello {{}}"))
+ env.Await(Diagnostics(env.AtRegexp("hello.tmpl", "()Hello {{}}")))
env.RegexpReplace("hello.tmpl", "{{}}", "{{.}}")
- env.Await(EmptyOrNoDiagnostics("hello.tmpl"))
+ env.Await(NoDiagnostics(ForFile("hello.tmpl")))
})
}
@@ -161,11 +160,15 @@ Hello {{}} <-- missing body
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("hello.tmpl")
- env.Await(env.DiagnosticAtRegexp("hello.tmpl", "()Hello {{}}"))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("hello.tmpl", "()Hello {{}}")),
+ )
// Since we don't have templateExtensions configured, closing hello.tmpl
// should make its diagnostics disappear.
env.CloseBuffer("hello.tmpl")
- env.Await(EmptyDiagnostics("hello.tmpl"))
+ env.AfterChange(
+ NoDiagnostics(ForFile("hello.tmpl")),
+ )
})
}
@@ -182,16 +185,14 @@ go 1.12
`
WithOptions(
- EditorConfig{
- Settings: map[string]interface{}{
- "templateExtensions": []string{"tmpl", "gotmpl"},
- },
+ Settings{
+ "templateExtensions": []string{"tmpl", "gotmpl"},
},
).Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("a.tmpl")
x := env.RegexpSearch("a.tmpl", `A`)
- file, pos := env.GoToDefinition("a.tmpl", x)
- refs := env.References(file, pos)
+ loc := env.GoToDefinition(x)
+ refs := env.References(loc)
if len(refs) != 2 {
t.Fatalf("got %v reference(s), want 2", len(refs))
}
@@ -204,9 +205,9 @@ go 1.12
}
}
- content, npos := env.Hover(file, pos)
- if pos != npos {
- t.Errorf("pos? got %v, wanted %v", npos, pos)
+ content, nloc := env.Hover(loc)
+ if loc != nloc {
+ t.Errorf("loc? got %v, wanted %v", nloc, loc)
}
if content.Value != "template A defined" {
t.Errorf("got %s, wanted 'template A defined", content.Value)
diff --git a/gopls/internal/regtest/watch/watch_test.go b/gopls/internal/regtest/watch/watch_test.go
index 5b432e18a..edb479a9c 100644
--- a/gopls/internal/regtest/watch/watch_test.go
+++ b/gopls/internal/regtest/watch/watch_test.go
@@ -8,14 +8,15 @@ import (
"testing"
"golang.org/x/tools/gopls/internal/hooks"
- . "golang.org/x/tools/internal/lsp/regtest"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/bug"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
)
func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
Main(m, hooks.Options)
}
@@ -36,12 +37,13 @@ func _() {
// diagnostics are updated.
t.Run("unopened", func(t *testing.T) {
Run(t, pkg, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "x"),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("a/a.go", "x")),
)
env.WriteWorkspaceFile("a/a.go", `package a; func _() {};`)
- env.Await(
- EmptyDiagnostics("a/a.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
})
})
@@ -54,13 +56,11 @@ func _() {
// Insert a trivial edit so that we don't automatically update the buffer
// (see CL 267577).
env.EditBuffer("a/a.go", fake.NewEdit(0, 0, 0, 0, " "))
- env.Await(env.DoneWithOpen())
+ env.AfterChange()
env.WriteWorkspaceFile("a/a.go", `package a; func _() {};`)
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- env.DiagnosticAtRegexp("a/a.go", "x"),
- ))
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "x")),
+ )
})
})
}
@@ -89,10 +89,10 @@ func _() {
`
Run(t, pkg, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
- env.Await(env.DoneWithOpen())
+ env.AfterChange()
env.WriteWorkspaceFile("b/b.go", `package b; func B() {};`)
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "b.B"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "b.B")),
)
})
}
@@ -122,8 +122,9 @@ func _() {
}
`
Run(t, pkg, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "x"),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("a/a.go", "x")),
)
env.WriteWorkspaceFiles(map[string]string{
"b/b.go": `package b; func B() {};`,
@@ -135,9 +136,9 @@ func _() {
b.B()
}`,
})
- env.Await(
- EmptyDiagnostics("a/a.go"),
- NoDiagnostics("b/b.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
+ NoDiagnostics(ForFile("b/b.go")),
)
})
}
@@ -166,10 +167,10 @@ func _() {
`
Run(t, pkg, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
- env.Await(env.DoneWithOpen())
+ env.AfterChange()
env.RemoveWorkspaceFile("b/b.go")
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "\"mod.com/b\""),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "\"mod.com/b\"")),
)
})
}
@@ -197,14 +198,13 @@ func _() {
}
`
Run(t, missing, func(t *testing.T, env *Env) {
- t.Skip("the initial workspace load fails and never retries")
-
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "\"mod.com/c\""),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("a/a.go", "\"mod.com/c\"")),
)
env.WriteWorkspaceFile("c/c.go", `package c; func C() {};`)
- env.Await(
- EmptyDiagnostics("c/c.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
})
}
@@ -225,8 +225,8 @@ func _() {}
Run(t, original, func(t *testing.T, env *Env) {
env.WriteWorkspaceFile("c/c.go", `package c; func C() {};`)
env.WriteWorkspaceFile("a/a.go", `package a; import "mod.com/c"; func _() { c.C() }`)
- env.Await(
- NoDiagnostics("a/a.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
})
}
@@ -246,12 +246,13 @@ func _() {
}
`
Run(t, pkg, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "hello"),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("a/a.go", "hello")),
)
env.WriteWorkspaceFile("a/a2.go", `package a; func hello() {};`)
- env.Await(
- EmptyDiagnostics("a/a.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
})
}
@@ -322,15 +323,12 @@ func _() {
t.Run("method before implementation", func(t *testing.T) {
Run(t, pkg, func(t *testing.T, env *Env) {
env.WriteWorkspaceFile("b/b.go", newMethod)
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- DiagnosticAt("a/a.go", 12, 12),
- ),
+ env.AfterChange(
+ Diagnostics(AtPosition("a/a.go", 12, 12)),
)
env.WriteWorkspaceFile("a/a.go", implementation)
- env.Await(
- EmptyDiagnostics("a/a.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
})
})
@@ -338,15 +336,12 @@ func _() {
t.Run("implementation before method", func(t *testing.T) {
Run(t, pkg, func(t *testing.T, env *Env) {
env.WriteWorkspaceFile("a/a.go", implementation)
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- NoDiagnostics("a/a.go"),
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
env.WriteWorkspaceFile("b/b.go", newMethod)
- env.Await(
- NoDiagnostics("a/a.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
)
})
})
@@ -357,12 +352,9 @@ func _() {
"a/a.go": implementation,
"b/b.go": newMethod,
})
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- NoDiagnostics("a/a.go"),
- ),
- NoDiagnostics("b/b.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
+ NoDiagnostics(ForFile("b/b.go")),
)
})
})
@@ -371,7 +363,6 @@ func _() {
// Tests golang/go#38498. Delete a file and then force a reload.
// Assert that we no longer try to load the file.
func TestDeleteFiles(t *testing.T) {
- testenv.NeedsGo1Point(t, 13) // Poor overlay support causes problems on 1.12.
const pkg = `
-- go.mod --
module mod.com
@@ -387,69 +378,57 @@ func _() {
package a
`
t.Run("close then delete", func(t *testing.T) {
- WithOptions(EditorConfig{
- VerboseOutput: true,
- }).Run(t, pkg, func(t *testing.T, env *Env) {
+ WithOptions(
+ Settings{"verboseOutput": true},
+ ).Run(t, pkg, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
env.OpenFile("a/a_unneeded.go")
- env.Await(
- OnceMet(
- env.DoneWithOpen(),
- LogMatching(protocol.Info, "a_unneeded.go", 1, false),
- ),
+ env.AfterChange(
+ LogMatching(protocol.Info, "a_unneeded.go", 1, false),
)
// Close and delete the open file, mimicking what an editor would do.
env.CloseBuffer("a/a_unneeded.go")
env.RemoveWorkspaceFile("a/a_unneeded.go")
env.RegexpReplace("a/a.go", "var _ int", "fmt.Println(\"\")")
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "fmt"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "fmt")),
)
env.SaveBuffer("a/a.go")
- env.Await(
- OnceMet(
- env.DoneWithSave(),
- // There should only be one log message containing
- // a_unneeded.go, from the initial workspace load, which we
- // check for earlier. If there are more, there's a bug.
- LogMatching(protocol.Info, "a_unneeded.go", 1, false),
- ),
- EmptyDiagnostics("a/a.go"),
+ env.AfterChange(
+ // There should only be one log message containing
+ // a_unneeded.go, from the initial workspace load, which we
+ // check for earlier. If there are more, there's a bug.
+ LogMatching(protocol.Info, "a_unneeded.go", 1, false),
+ NoDiagnostics(ForFile("a/a.go")),
)
})
})
t.Run("delete then close", func(t *testing.T) {
WithOptions(
- EditorConfig{VerboseOutput: true},
+ Settings{"verboseOutput": true},
).Run(t, pkg, func(t *testing.T, env *Env) {
env.OpenFile("a/a.go")
env.OpenFile("a/a_unneeded.go")
- env.Await(
- OnceMet(
- env.DoneWithOpen(),
- LogMatching(protocol.Info, "a_unneeded.go", 1, false),
- ),
+ env.AfterChange(
+ LogMatching(protocol.Info, "a_unneeded.go", 1, false),
)
// Delete and then close the file.
env.RemoveWorkspaceFile("a/a_unneeded.go")
env.CloseBuffer("a/a_unneeded.go")
env.RegexpReplace("a/a.go", "var _ int", "fmt.Println(\"\")")
- env.Await(
- env.DiagnosticAtRegexp("a/a.go", "fmt"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "fmt")),
)
env.SaveBuffer("a/a.go")
- env.Await(
- OnceMet(
- env.DoneWithSave(),
- // There should only be one log message containing
- // a_unneeded.go, from the initial workspace load, which we
- // check for earlier. If there are more, there's a bug.
- LogMatching(protocol.Info, "a_unneeded.go", 1, false),
- ),
- EmptyDiagnostics("a/a.go"),
+ env.AfterChange(
+ // There should only be one log message containing
+ // a_unneeded.go, from the initial workspace load, which we
+ // check for earlier. If there are more, there's a bug.
+ LogMatching(protocol.Info, "a_unneeded.go", 1, false),
+ NoDiagnostics(ForFile("a/a.go")),
)
})
})
@@ -487,39 +466,11 @@ package a
func _() {}
`
Run(t, pkg, func(t *testing.T, env *Env) {
- env.ChangeFilesOnDisk([]fake.FileEvent{
- {
- Path: "a/a3.go",
- Content: `package a
-
-var Hello int
-`,
- ProtocolEvent: protocol.FileEvent{
- URI: env.Sandbox.Workdir.URI("a/a3.go"),
- Type: protocol.Created,
- },
- },
- {
- Path: "a/a1.go",
- ProtocolEvent: protocol.FileEvent{
- URI: env.Sandbox.Workdir.URI("a/a1.go"),
- Type: protocol.Deleted,
- },
- },
- {
- Path: "a/a2.go",
- Content: `package a; func _() {};`,
- ProtocolEvent: protocol.FileEvent{
- URI: env.Sandbox.Workdir.URI("a/a2.go"),
- Type: protocol.Changed,
- },
- },
- })
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- NoDiagnostics("main.go"),
- ),
+ env.WriteWorkspaceFile("a/a3.go", "package a\n\nvar Hello int\n")
+ env.RemoveWorkspaceFile("a/a1.go")
+ env.WriteWorkspaceFile("a/a2.go", "package a; func _() {};")
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
})
}
@@ -563,6 +514,9 @@ module mod.com
go 1.12
require example.com v1.2.2
+-- go.sum --
+example.com v1.2.3 h1:OnPPkx+rW63kj9pgILsu12MORKhSlnFa3DVRJq1HZ7g=
+example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo=
-- main.go --
package main
@@ -591,26 +545,24 @@ func main() {
}
`,
})
- env.Await(
+ env.AfterChange(
env.DoneWithChangeWatchedFiles(),
- NoDiagnostics("main.go"),
+ NoDiagnostics(ForFile("main.go")),
)
})
}
// Reproduces golang/go#40340.
-func TestSwitchFromGOPATHToModules(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
-
+func TestSwitchFromGOPATHToModuleMode(t *testing.T) {
const files = `
-- foo/blah/blah.go --
package blah
const Name = ""
--- foo/main.go --
+-- main.go --
package main
-import "blah"
+import "foo/blah"
func main() {
_ = blah.Name
@@ -618,29 +570,32 @@ func main() {
`
WithOptions(
InGOPATH(),
- EditorConfig{
- Env: map[string]string{
- "GO111MODULE": "auto",
- },
- },
- Modes(Experimental), // module is in a subdirectory
+ Modes(Default), // golang/go#57521: this test is temporarily failing in 'experimental' mode
+ EnvVars{"GO111MODULE": "auto"},
).Run(t, files, func(t *testing.T, env *Env) {
- env.OpenFile("foo/main.go")
- env.Await(env.DiagnosticAtRegexp("foo/main.go", `"blah"`))
- if err := env.Sandbox.RunGoCommand(env.Ctx, "foo", "mod", []string{"init", "mod.com"}, true); err != nil {
+ env.OpenFile("main.go")
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ )
+ if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, true); err != nil {
t.Fatal(err)
}
- env.RegexpReplace("foo/main.go", `"blah"`, `"mod.com/blah"`)
- env.Await(
- EmptyDiagnostics("foo/main.go"),
+
+ // TODO(golang/go#57558, golang/go#57512): file watching is asynchronous,
+ // and we must wait for the view to be reconstructed before touching
+ // main.go, so that the new view "knows" about main.go. This is a bug, but
+ // awaiting the change here avoids it.
+ env.AfterChange()
+
+ env.RegexpReplace("main.go", `"foo/blah"`, `"mod.com/foo/blah"`)
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
)
})
}
// Reproduces golang/go#40487.
func TestSwitchFromModulesToGOPATH(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
-
const files = `
-- foo/go.mod --
module mod.com
@@ -661,23 +616,16 @@ func main() {
`
WithOptions(
InGOPATH(),
- EditorConfig{
- Env: map[string]string{
- "GO111MODULE": "auto",
- },
- },
+ EnvVars{"GO111MODULE": "auto"},
).Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("foo/main.go")
env.RemoveWorkspaceFile("foo/go.mod")
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- env.DiagnosticAtRegexp("foo/main.go", `"mod.com/blah"`),
- ),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("foo/main.go", `"mod.com/blah"`)),
)
env.RegexpReplace("foo/main.go", `"mod.com/blah"`, `"foo/blah"`)
- env.Await(
- EmptyDiagnostics("foo/main.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("foo/main.go")),
)
})
}
@@ -720,15 +668,9 @@ func TestAll(t *testing.T) {
}
`,
})
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- NoDiagnostics("a/a.go"),
- ),
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- NoDiagnostics("a/a_test.go"),
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
+ NoDiagnostics(ForFile("a/a_test.go")),
)
// Now, add a new file to the test variant and use its symbol in the
// original test file. Expect no diagnostics.
@@ -752,15 +694,9 @@ func hi() {}
func TestSomething(t *testing.T) {}
`,
})
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- NoDiagnostics("a/a_test.go"),
- ),
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- NoDiagnostics("a/a2_test.go"),
- ),
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a_test.go")),
+ NoDiagnostics(ForFile("a/a2_test.go")),
)
})
}
diff --git a/gopls/internal/regtest/workspace/broken_test.go b/gopls/internal/regtest/workspace/broken_test.go
new file mode 100644
index 000000000..005a7e946
--- /dev/null
+++ b/gopls/internal/regtest/workspace/broken_test.go
@@ -0,0 +1,264 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workspace
+
+import (
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/lsp"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// This file holds various tests for UX with respect to broken workspaces.
+//
+// TODO: consolidate other tests here.
+//
+// TODO: write more tests:
+// - an explicit GOWORK value that doesn't exist
+// - using modules and/or GOWORK inside of GOPATH?
+
+// Test for golang/go#53933
+func TestBrokenWorkspace_DuplicateModules(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+
+ // TODO(golang/go#57650): fix this feature.
+ t.Skip("we no longer detect duplicate modules")
+
+ // This proxy module content is replaced by the workspace, but is still
+ // required for module resolution to function in the Go command.
+ const proxy = `
+-- example.com/foo@v0.0.1/go.mod --
+module example.com/foo
+
+go 1.12
+`
+
+ const src = `
+-- go.work --
+go 1.18
+
+use (
+ ./package1
+ ./package1/vendor/example.com/foo
+ ./package2
+ ./package2/vendor/example.com/foo
+)
+
+-- package1/go.mod --
+module mod.test
+
+go 1.18
+
+require example.com/foo v0.0.1
+-- package1/main.go --
+package main
+
+import "example.com/foo"
+
+func main() {
+ _ = foo.CompleteMe
+}
+-- package1/vendor/example.com/foo/go.mod --
+module example.com/foo
+
+go 1.18
+-- package1/vendor/example.com/foo/foo.go --
+package foo
+
+const CompleteMe = 111
+-- package2/go.mod --
+module mod2.test
+
+go 1.18
+
+require example.com/foo v0.0.1
+-- package2/main.go --
+package main
+
+import "example.com/foo"
+
+func main() {
+ _ = foo.CompleteMe
+}
+-- package2/vendor/example.com/foo/go.mod --
+module example.com/foo
+
+go 1.18
+-- package2/vendor/example.com/foo/foo.go --
+package foo
+
+const CompleteMe = 222
+`
+
+ WithOptions(
+ ProxyFiles(proxy),
+ ).Run(t, src, func(t *testing.T, env *Env) {
+ env.OpenFile("package1/main.go")
+ env.Await(
+ OutstandingWork(lsp.WorkspaceLoadFailure, `found module "example.com/foo" multiple times in the workspace`),
+ )
+
+ // Remove the redundant vendored copy of example.com.
+ env.WriteWorkspaceFile("go.work", `go 1.18
+ use (
+ ./package1
+ ./package2
+ ./package2/vendor/example.com/foo
+ )
+ `)
+ env.Await(NoOutstandingWork())
+
+ // Check that definitions in package1 go to the copy vendored in package2.
+ location := env.GoToDefinition(env.RegexpSearch("package1/main.go", "CompleteMe")).URI.SpanURI().Filename()
+ const wantLocation = "package2/vendor/example.com/foo/foo.go"
+ if !strings.HasSuffix(location, wantLocation) {
+ t.Errorf("got definition of CompleteMe at %q, want %q", location, wantLocation)
+ }
+ })
+}
+
+// Test for golang/go#43186: correcting the module path should fix errors
+// without restarting gopls.
+func TestBrokenWorkspace_WrongModulePath(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.testx
+
+go 1.18
+-- p/internal/foo/foo.go --
+package foo
+
+const C = 1
+-- p/internal/bar/bar.go --
+package bar
+
+import "mod.test/p/internal/foo"
+
+const D = foo.C + 1
+-- p/internal/bar/bar_test.go --
+package bar_test
+
+import (
+ "mod.test/p/internal/foo"
+ . "mod.test/p/internal/bar"
+)
+
+const E = D + foo.C
+-- p/internal/baz/baz_test.go --
+package baz_test
+
+import (
+ named "mod.test/p/internal/bar"
+)
+
+const F = named.D - 3
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("p/internal/bar/bar.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("p/internal/bar/bar.go", "\"mod.test/p/internal/foo\"")),
+ )
+ env.OpenFile("go.mod")
+ env.RegexpReplace("go.mod", "mod.testx", "mod.test")
+ env.SaveBuffer("go.mod") // saving triggers a reload
+ env.AfterChange(NoDiagnostics())
+ })
+}
+
+func TestMultipleModules_Warning(t *testing.T) {
+ msgForVersion := func(ver int) string {
+ if ver >= 18 {
+ return `gopls was not able to find modules in your workspace.`
+ } else {
+ return `gopls requires a module at the root of your workspace.`
+ }
+ }
+
+ const modules = `
+-- a/go.mod --
+module a.com
+
+go 1.12
+-- a/a.go --
+package a
+-- a/empty.go --
+// an empty file
+-- b/go.mod --
+module b.com
+
+go 1.12
+-- b/b.go --
+package b
+`
+ for _, go111module := range []string{"on", "auto"} {
+ t.Run("GO111MODULE="+go111module, func(t *testing.T) {
+ WithOptions(
+ Modes(Default),
+ EnvVars{"GO111MODULE": go111module},
+ ).Run(t, modules, func(t *testing.T, env *Env) {
+ ver := env.GoVersion()
+ msg := msgForVersion(ver)
+ env.OpenFile("a/a.go")
+ env.OpenFile("a/empty.go")
+ env.OpenFile("b/go.mod")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/a.go", "package a")),
+ Diagnostics(env.AtRegexp("b/go.mod", "module b.com")),
+ OutstandingWork(lsp.WorkspaceLoadFailure, msg),
+ )
+
+ // Changing the workspace folders to the valid modules should resolve
+ // the workspace errors and diagnostics.
+ //
+ // TODO(rfindley): verbose work tracking doesn't follow changing the
+ // workspace folder, therefore we can't invoke AfterChange here.
+ env.ChangeWorkspaceFolders("a", "b")
+ env.Await(
+ NoDiagnostics(ForFile("a/a.go")),
+ NoDiagnostics(ForFile("b/go.mod")),
+ NoOutstandingWork(),
+ )
+
+ env.ChangeWorkspaceFolders(".")
+
+ // TODO(rfindley): when GO111MODULE=auto, we need to open or change a
+ // file here in order to detect a critical error. This is because gopls
+ // has forgotten about a/a.go, and therefore doesn't hit the heuristic
+ // "all packages are command-line-arguments".
+ //
+ // This is broken, and could be fixed by adjusting the heuristic to
+ // account for the scenario where there are *no* workspace packages, or
+ // (better) trying to get workspace packages for each open file. See
+ // also golang/go#54261.
+ env.OpenFile("b/b.go")
+ env.AfterChange(
+ // TODO(rfindley): fix these missing diagnostics.
+ // Diagnostics(env.AtRegexp("a/a.go", "package a")),
+ // Diagnostics(env.AtRegexp("b/go.mod", "module b.com")),
+ Diagnostics(env.AtRegexp("b/b.go", "package b")),
+ OutstandingWork(lsp.WorkspaceLoadFailure, msg),
+ )
+ })
+ })
+ }
+
+ // Expect no warning if GO111MODULE=auto in a directory in GOPATH.
+ t.Run("GOPATH_GO111MODULE_auto", func(t *testing.T) {
+ WithOptions(
+ Modes(Default),
+ EnvVars{"GO111MODULE": "auto"},
+ InGOPATH(),
+ ).Run(t, modules, func(t *testing.T, env *Env) {
+ env.OpenFile("a/a.go")
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/a.go")),
+ NoOutstandingWork(),
+ )
+ })
+ })
+}
diff --git a/gopls/internal/regtest/workspace/directoryfilters_test.go b/gopls/internal/regtest/workspace/directoryfilters_test.go
new file mode 100644
index 000000000..6e2a15557
--- /dev/null
+++ b/gopls/internal/regtest/workspace/directoryfilters_test.go
@@ -0,0 +1,259 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workspace
+
+import (
+ "sort"
+ "strings"
+ "testing"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// This file contains regression tests for the directoryFilters setting.
+//
+// TODO:
+// - consolidate some of these tests into a single test
+// - add more tests for changing directory filters
+
+func TestDirectoryFilters(t *testing.T) {
+ WithOptions(
+ ProxyFiles(workspaceProxy),
+ WorkspaceFolders("pkg"),
+ Settings{
+ "directoryFilters": []string{"-inner"},
+ },
+ ).Run(t, workspaceModule, func(t *testing.T, env *Env) {
+ syms := env.Symbol("Hi")
+ sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName })
+ for _, s := range syms {
+ if strings.Contains(s.ContainerName, "inner") {
+ t.Errorf("WorkspaceSymbol: found symbol %q with container %q, want \"inner\" excluded", s.Name, s.ContainerName)
+ }
+ }
+ })
+}
+
+func TestDirectoryFiltersLoads(t *testing.T) {
+ // exclude, and its error, should be excluded from the workspace.
+ const files = `
+-- go.mod --
+module example.com
+
+go 1.12
+-- exclude/exclude.go --
+package exclude
+
+const _ = Nonexistant
+`
+
+ WithOptions(
+ Settings{"directoryFilters": []string{"-exclude"}},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ NoDiagnostics(ForFile("exclude/x.go")),
+ )
+ })
+}
+
+func TestDirectoryFiltersTransitiveDep(t *testing.T) {
+ // Even though exclude is excluded from the workspace, it should
+ // still be importable as a non-workspace package.
+ const files = `
+-- go.mod --
+module example.com
+
+go 1.12
+-- include/include.go --
+package include
+import "example.com/exclude"
+
+const _ = exclude.X
+-- exclude/exclude.go --
+package exclude
+
+const _ = Nonexistant // should be ignored, since this is a non-workspace package
+const X = 1
+`
+
+ WithOptions(
+ Settings{"directoryFilters": []string{"-exclude"}},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ NoDiagnostics(ForFile("exclude/exclude.go")), // filtered out
+ NoDiagnostics(ForFile("include/include.go")), // successfully builds
+ )
+ })
+}
+
+func TestDirectoryFiltersWorkspaceModules(t *testing.T) {
+ // Define a module include.com which should be in the workspace, plus a
+ // module exclude.com which should be excluded and therefore come from
+ // the proxy.
+ const files = `
+-- include/go.mod --
+module include.com
+
+go 1.12
+
+require exclude.com v1.0.0
+
+-- include/go.sum --
+exclude.com v1.0.0 h1:Q5QSfDXY5qyNCBeUiWovUGqcLCRZKoTs9XdBeVz+w1I=
+exclude.com v1.0.0/go.mod h1:hFox2uDlNB2s2Jfd9tHlQVfgqUiLVTmh6ZKat4cvnj4=
+
+-- include/include.go --
+package include
+
+import "exclude.com"
+
+var _ = exclude.X // satisfied only by the workspace version
+-- exclude/go.mod --
+module exclude.com
+
+go 1.12
+-- exclude/exclude.go --
+package exclude
+
+const X = 1
+`
+ const proxy = `
+-- exclude.com@v1.0.0/go.mod --
+module exclude.com
+
+go 1.12
+-- exclude.com@v1.0.0/exclude.go --
+package exclude
+`
+ WithOptions(
+ Modes(Experimental),
+ ProxyFiles(proxy),
+ Settings{"directoryFilters": []string{"-exclude"}},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.Await(Diagnostics(env.AtRegexp("include/include.go", `exclude.(X)`)))
+ })
+}
+
+// Test for golang/go#46438: support for '**' in directory filters.
+func TestDirectoryFilters_Wildcard(t *testing.T) {
+ filters := []string{"-**/bye"}
+ WithOptions(
+ ProxyFiles(workspaceProxy),
+ WorkspaceFolders("pkg"),
+ Settings{
+ "directoryFilters": filters,
+ },
+ ).Run(t, workspaceModule, func(t *testing.T, env *Env) {
+ syms := env.Symbol("Bye")
+ sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName })
+ for _, s := range syms {
+ if strings.Contains(s.ContainerName, "bye") {
+ t.Errorf("WorkspaceSymbol: found symbol %q with container %q with filters %v", s.Name, s.ContainerName, filters)
+ }
+ }
+ })
+}
+
+// Test for golang/go#52993: wildcard directoryFilters should apply to
+// goimports scanning as well.
+func TestDirectoryFilters_ImportScanning(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.test
+
+go 1.12
+-- main.go --
+package main
+
+func main() {
+ bye.Goodbye()
+}
+-- p/bye/bye.go --
+package bye
+
+func Goodbye() {}
+`
+
+ WithOptions(
+ Settings{
+ "directoryFilters": []string{"-**/bye"},
+ },
+ // This test breaks in 'Experimental' mode, because with
+ // experimentalWorkspaceModule set we the goimports scan behaves
+ // differently.
+ //
+ // Since this feature is going away (golang/go#52897), don't investigate.
+ Modes(Default),
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("main.go")
+ beforeSave := env.BufferText("main.go")
+ env.OrganizeImports("main.go")
+ got := env.BufferText("main.go")
+ if got != beforeSave {
+ t.Errorf("after organizeImports code action, got modified buffer:\n%s", got)
+ }
+ })
+}
+
+// Test for golang/go#52993: non-wildcard directoryFilters should still be
+// applied relative to the workspace folder, not the module root.
+func TestDirectoryFilters_MultiRootImportScanning(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
+
+ const files = `
+-- go.work --
+go 1.18
+
+use (
+ a
+ b
+)
+-- a/go.mod --
+module mod1.test
+
+go 1.18
+-- a/main.go --
+package main
+
+func main() {
+ hi.Hi()
+}
+-- a/hi/hi.go --
+package hi
+
+func Hi() {}
+-- b/go.mod --
+module mod2.test
+
+go 1.18
+-- b/main.go --
+package main
+
+func main() {
+ hi.Hi()
+}
+-- b/hi/hi.go --
+package hi
+
+func Hi() {}
+`
+
+ WithOptions(
+ Settings{
+ "directoryFilters": []string{"-hi"}, // this test fails with -**/hi
+ },
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("a/main.go")
+ beforeSave := env.BufferText("a/main.go")
+ env.OrganizeImports("a/main.go")
+ got := env.BufferText("a/main.go")
+ if got == beforeSave {
+ t.Errorf("after organizeImports code action, got identical buffer:\n%s", got)
+ }
+ })
+}
diff --git a/gopls/internal/regtest/workspace/fromenv_test.go b/gopls/internal/regtest/workspace/fromenv_test.go
new file mode 100644
index 000000000..c05012d74
--- /dev/null
+++ b/gopls/internal/regtest/workspace/fromenv_test.go
@@ -0,0 +1,68 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workspace
+
+import (
+ "testing"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// Test that setting go.work via environment variables or settings works.
+func TestUseGoWorkOutsideTheWorkspace(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+ const files = `
+-- work/a/go.mod --
+module a.com
+
+go 1.12
+-- work/a/a.go --
+package a
+-- work/b/go.mod --
+module b.com
+
+go 1.12
+-- work/b/b.go --
+package b
+
+func _() {
+ x := 1 // unused
+}
+-- other/c/go.mod --
+module c.com
+
+go 1.18
+-- other/c/c.go --
+package c
+-- config/go.work --
+go 1.18
+
+use (
+ $SANDBOX_WORKDIR/work/a
+ $SANDBOX_WORKDIR/work/b
+ $SANDBOX_WORKDIR/other/c
+)
+`
+
+ WithOptions(
+ WorkspaceFolders("work"), // use a nested workspace dir, so that GOWORK is outside the workspace
+ EnvVars{"GOWORK": "$SANDBOX_WORKDIR/config/go.work"},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ // When we have an explicit GOWORK set, we should get a file watch request.
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ FileWatchMatching(`other`),
+ FileWatchMatching(`config.go\.work`),
+ )
+ env.Await(FileWatchMatching(`config.go\.work`))
+ // Even though work/b is not open, we should get its diagnostics as it is
+ // included in the workspace.
+ env.OpenFile("work/a/a.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("work/b/b.go", "x := 1"), WithMessage("not used")),
+ )
+ })
+}
diff --git a/gopls/internal/regtest/workspace/metadata_test.go b/gopls/internal/regtest/workspace/metadata_test.go
new file mode 100644
index 000000000..ac64b0758
--- /dev/null
+++ b/gopls/internal/regtest/workspace/metadata_test.go
@@ -0,0 +1,181 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workspace
+
+import (
+ "strings"
+ "testing"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// TODO(rfindley): move workspace tests related to metadata bugs into this
+// file.
+
+func TestFixImportDecl(t *testing.T) {
+ const src = `
+-- go.mod --
+module mod.test
+
+go 1.12
+-- p.go --
+package p
+
+import (
+ _ "fmt"
+
+const C = 42
+`
+
+ Run(t, src, func(t *testing.T, env *Env) {
+ env.OpenFile("p.go")
+ env.RegexpReplace("p.go", "\"fmt\"", "\"fmt\"\n)")
+ env.AfterChange(
+ NoDiagnostics(ForFile("p.go")),
+ )
+ })
+}
+
+// Test that moving ignoring a file via build constraints causes diagnostics to
+// be resolved.
+func TestIgnoreFile(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17) // needs native overlays and support for go:build directives
+
+ const src = `
+-- go.mod --
+module mod.test
+
+go 1.12
+-- foo.go --
+package main
+
+func main() {}
+-- bar.go --
+package main
+
+func main() {}
+ `
+
+ WithOptions(
+ // TODO(golang/go#54180): we don't run in 'experimental' mode here, because
+ // with "experimentalUseInvalidMetadata", this test fails because the
+ // orphaned bar.go is diagnosed using stale metadata, and then not
+ // re-diagnosed when new metadata arrives.
+ //
+ // We could fix this by re-running diagnostics after a load, but should
+ // consider whether that is worthwhile.
+ Modes(Default),
+ ).Run(t, src, func(t *testing.T, env *Env) {
+ env.OpenFile("foo.go")
+ env.OpenFile("bar.go")
+ env.OnceMet(
+ env.DoneWithOpen(),
+ Diagnostics(env.AtRegexp("foo.go", "func (main)")),
+ Diagnostics(env.AtRegexp("bar.go", "func (main)")),
+ )
+
+ // Ignore bar.go. This should resolve diagnostics.
+ env.RegexpReplace("bar.go", "package main", "//go:build ignore\n\npackage main")
+
+ // To make this test pass with experimentalUseInvalidMetadata, we could make
+ // an arbitrary edit that invalidates the snapshot, at which point the
+ // orphaned diagnostics will be invalidated.
+ //
+ // But of course, this should not be necessary: we should invalidate stale
+ // information when fresh metadata arrives.
+ // env.RegexpReplace("foo.go", "package main", "package main // test")
+ env.AfterChange(
+ NoDiagnostics(ForFile("foo.go")),
+ NoDiagnostics(ForFile("bar.go")),
+ )
+
+ // If instead of 'ignore' (which gopls treats as a standalone package) we
+ // used a different build tag, we should get a warning about having no
+ // packages for bar.go
+ env.RegexpReplace("bar.go", "ignore", "excluded")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("bar.go", "package (main)"), WithMessage("No packages")),
+ )
+ })
+}
+
+func TestReinitializeRepeatedly(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
+
+ const multiModule = `
+-- go.work --
+go 1.18
+
+use (
+ moda/a
+ modb
+)
+-- moda/a/go.mod --
+module a.com
+
+require b.com v1.2.3
+-- moda/a/go.sum --
+b.com v1.2.3 h1:tXrlXP0rnjRpKNmkbLYoWBdq0ikb3C3bKK9//moAWBI=
+b.com v1.2.3/go.mod h1:D+J7pfFBZK5vdIdZEFquR586vKKIkqG7Qjw9AxG5BQ8=
+-- moda/a/a.go --
+package a
+
+import (
+ "b.com/b"
+)
+
+func main() {
+ var x int
+ _ = b.Hello()
+ // AAA
+}
+-- modb/go.mod --
+module b.com
+
+-- modb/b/b.go --
+package b
+
+func Hello() int {
+ var x int
+}
+`
+ WithOptions(
+ ProxyFiles(workspaceModuleProxy),
+ Settings{
+ // For this test, we want workspace diagnostics to start immediately
+ // during change processing.
+ "diagnosticsDelay": "0",
+ },
+ ).Run(t, multiModule, func(t *testing.T, env *Env) {
+ env.OpenFile("moda/a/a.go")
+ env.AfterChange()
+
+ // This test verifies that we fully process workspace reinitialization
+ // (which allows GOPROXY), even when the reinitialized snapshot is
+ // invalidated by subsequent changes.
+ //
+ // First, update go.work to remove modb. This will cause reinitialization
+ // to fetch b.com from the proxy.
+ env.WriteWorkspaceFile("go.work", "go 1.18\nuse moda/a")
+ // Next, wait for gopls to start processing the change. Because we've set
+ // diagnosticsDelay to zero, this will start diagnosing the workspace (and
+ // try to reinitialize on the snapshot context).
+ env.Await(env.StartedChangeWatchedFiles())
+ // Finally, immediately make a file change to cancel the previous
+ // operation. This is racy, but will usually cause initialization to be
+ // canceled.
+ env.RegexpReplace("moda/a/a.go", "AAA", "BBB")
+ env.AfterChange()
+ // Now, to satisfy a definition request, gopls will try to reload moda. But
+ // without access to the proxy (because this is no longer a
+ // reinitialization), this loading will fail.
+ loc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello"))
+ got := env.Sandbox.Workdir.URIToPath(loc.URI)
+ if want := "b.com@v1.2.3/b/b.go"; !strings.HasSuffix(got, want) {
+ t.Errorf("expected %s, got %v", want, got)
+ }
+ })
+}
diff --git a/gopls/internal/regtest/workspace/misspelling_test.go b/gopls/internal/regtest/workspace/misspelling_test.go
new file mode 100644
index 000000000..0419a1163
--- /dev/null
+++ b/gopls/internal/regtest/workspace/misspelling_test.go
@@ -0,0 +1,80 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workspace
+
+import (
+ "runtime"
+ "testing"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
+)
+
+// Test for golang/go#57081.
+func TestFormattingMisspelledURI(t *testing.T) {
+ if runtime.GOOS != "windows" && runtime.GOOS != "darwin" {
+ t.Skip("golang/go#57081 only reproduces on case-insensitive filesystems.")
+ }
+ const files = `
+-- go.mod --
+module mod.test
+
+go 1.19
+-- foo.go --
+package foo
+
+const C = 2 // extra space is intentional
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("Foo.go")
+ env.FormatBuffer("Foo.go")
+ want := env.BufferText("Foo.go")
+
+ if want == "" {
+ t.Fatalf("Foo.go is empty")
+ }
+
+ // In golang/go#57081, we observed that if overlay cases don't match, gopls
+ // will find (and format) the on-disk contents rather than the overlay,
+ // resulting in invalid edits.
+ //
+ // Verify that this doesn't happen, by confirming that formatting is
+ // idempotent.
+ env.FormatBuffer("Foo.go")
+ got := env.BufferText("Foo.go")
+ if diff := compare.Text(want, got); diff != "" {
+ t.Errorf("invalid content after second formatting:\n%s", diff)
+ }
+ })
+}
+
+// Test that we can find packages for open files with different spelling on
+// case-insensitive file systems.
+func TestPackageForMisspelledURI(t *testing.T) {
+ t.Skip("golang/go#57081: this test fails because the Go command does not load Foo.go correctly")
+ if runtime.GOOS != "windows" && runtime.GOOS != "darwin" {
+ t.Skip("golang/go#57081 only reproduces on case-insensitive filesystems.")
+ }
+ const files = `
+-- go.mod --
+module mod.test
+
+go 1.19
+-- foo.go --
+package foo
+
+const C = D
+-- bar.go --
+package foo
+
+const D = 2
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("Foo.go")
+ env.AfterChange(NoDiagnostics())
+ })
+}
diff --git a/gopls/internal/regtest/workspace/standalone_test.go b/gopls/internal/regtest/workspace/standalone_test.go
new file mode 100644
index 000000000..e1021dfbc
--- /dev/null
+++ b/gopls/internal/regtest/workspace/standalone_test.go
@@ -0,0 +1,206 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workspace
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
+)
+
+func TestStandaloneFiles(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.test
+
+go 1.16
+-- lib/lib.go --
+package lib
+
+const C = 0
+
+type I interface {
+ M()
+}
+-- lib/ignore.go --
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+ "mod.test/lib"
+)
+
+const C = 1
+
+type Mer struct{}
+func (Mer) M()
+
+func main() {
+ println(lib.C + C)
+}
+`
+ WithOptions(
+ // On Go 1.17 and earlier, this test fails with
+ // experimentalWorkspaceModule. Not investigated, as
+ // experimentalWorkspaceModule will be removed.
+ Modes(Default),
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ // Initially, gopls should not know about the standalone file as it hasn't
+ // been opened. Therefore, we should only find one symbol 'C'.
+ syms := env.Symbol("C")
+ if got, want := len(syms), 1; got != want {
+ t.Errorf("got %d symbols, want %d", got, want)
+ }
+
+ // Similarly, we should only find one reference to "C", and no
+ // implementations of I.
+ checkLocations := func(method string, gotLocations []protocol.Location, wantFiles ...string) {
+ var gotFiles []string
+ for _, l := range gotLocations {
+ gotFiles = append(gotFiles, env.Sandbox.Workdir.URIToPath(l.URI))
+ }
+ sort.Strings(gotFiles)
+ sort.Strings(wantFiles)
+ if diff := cmp.Diff(wantFiles, gotFiles); diff != "" {
+ t.Errorf("%s(...): unexpected locations (-want +got):\n%s", method, diff)
+ }
+ }
+
+ env.OpenFile("lib/lib.go")
+ env.AfterChange(NoDiagnostics())
+
+ // Replacing C with D should not cause any workspace diagnostics, since we
+ // haven't yet opened the standalone file.
+ env.RegexpReplace("lib/lib.go", "C", "D")
+ env.AfterChange(NoDiagnostics())
+ env.RegexpReplace("lib/lib.go", "D", "C")
+ env.AfterChange(NoDiagnostics())
+
+ refs := env.References(env.RegexpSearch("lib/lib.go", "C"))
+ checkLocations("References", refs, "lib/lib.go")
+
+ impls := env.Implementations(env.RegexpSearch("lib/lib.go", "I"))
+ checkLocations("Implementations", impls) // no implementations
+
+ // Opening the standalone file should not result in any diagnostics.
+ env.OpenFile("lib/ignore.go")
+ env.AfterChange(NoDiagnostics())
+
+ // Having opened the standalone file, we should find its symbols in the
+ // workspace.
+ syms = env.Symbol("C")
+ if got, want := len(syms), 2; got != want {
+ t.Fatalf("got %d symbols, want %d", got, want)
+ }
+
+ foundMainC := false
+ var symNames []string
+ for _, sym := range syms {
+ symNames = append(symNames, sym.Name)
+ if sym.Name == "main.C" {
+ foundMainC = true
+ }
+ }
+ if !foundMainC {
+ t.Errorf("WorkspaceSymbol(\"C\") = %v, want containing main.C", symNames)
+ }
+
+ // We should resolve workspace definitions in the standalone file.
+ fileLoc := env.GoToDefinition(env.RegexpSearch("lib/ignore.go", "lib.(C)"))
+ file := env.Sandbox.Workdir.URIToPath(fileLoc.URI)
+ if got, want := file, "lib/lib.go"; got != want {
+ t.Errorf("GoToDefinition(lib.C) = %v, want %v", got, want)
+ }
+
+ // ...as well as intra-file definitions
+ loc := env.GoToDefinition(env.RegexpSearch("lib/ignore.go", "\\+ (C)"))
+ wantLoc := env.RegexpSearch("lib/ignore.go", "const (C)")
+ if loc != wantLoc {
+ t.Errorf("GoToDefinition(C) = %v, want %v", loc, wantLoc)
+ }
+
+ // Renaming "lib.C" to "lib.D" should cause a diagnostic in the standalone
+ // file.
+ env.RegexpReplace("lib/lib.go", "C", "D")
+ env.AfterChange(Diagnostics(env.AtRegexp("lib/ignore.go", "lib.(C)")))
+
+ // Undoing the replacement should fix diagnostics
+ env.RegexpReplace("lib/lib.go", "D", "C")
+ env.AfterChange(NoDiagnostics())
+
+ // Now that our workspace has no errors, we should be able to find
+ // references and rename.
+ refs = env.References(env.RegexpSearch("lib/lib.go", "C"))
+ checkLocations("References", refs, "lib/lib.go", "lib/ignore.go")
+
+ impls = env.Implementations(env.RegexpSearch("lib/lib.go", "I"))
+ checkLocations("Implementations", impls, "lib/ignore.go")
+
+ // Renaming should rename in the standalone package.
+ env.Rename(env.RegexpSearch("lib/lib.go", "C"), "D")
+ env.RegexpSearch("lib/ignore.go", "lib.D")
+ })
+}
+
+func TestStandaloneFiles_Configuration(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.test
+
+go 1.18
+-- lib.go --
+package lib // without this package, files are loaded as command-line-arguments
+-- ignore.go --
+//go:build ignore
+// +build ignore
+
+package main
+
+// An arbitrary comment.
+
+func main() {}
+-- standalone.go --
+//go:build standalone
+// +build standalone
+
+package main
+
+func main() {}
+`
+
+ WithOptions(
+ Settings{
+ "standaloneTags": []string{"standalone", "script"},
+ },
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("ignore.go")
+ env.OpenFile("standalone.go")
+
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("ignore.go", "package (main)")),
+ NoDiagnostics(ForFile("standalone.go")),
+ )
+
+ cfg := env.Editor.Config()
+ cfg.Settings = map[string]interface{}{
+ "standaloneTags": []string{"ignore"},
+ }
+ env.ChangeConfiguration(cfg)
+
+ // TODO(golang/go#56158): gopls does not purge previously published
+ // diagnostice when configuration changes.
+ env.RegexpReplace("ignore.go", "arbitrary", "meaningless")
+
+ env.AfterChange(
+ NoDiagnostics(ForFile("ignore.go")),
+ Diagnostics(env.AtRegexp("standalone.go", "package (main)")),
+ )
+ })
+}
diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go
index ed2c9effd..0aff4713b 100644
--- a/gopls/internal/regtest/workspace/workspace_test.go
+++ b/gopls/internal/regtest/workspace/workspace_test.go
@@ -5,22 +5,25 @@
package workspace
import (
+ "context"
"fmt"
"path/filepath"
- "sort"
"strings"
"testing"
"golang.org/x/tools/gopls/internal/hooks"
- . "golang.org/x/tools/internal/lsp/regtest"
- "golang.org/x/tools/internal/lsp/source"
-
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp"
+ "golang.org/x/tools/gopls/internal/lsp/fake"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/bug"
+ "golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/testenv"
+
+ . "golang.org/x/tools/gopls/internal/lsp/regtest"
)
func TestMain(m *testing.M) {
+ bug.PanicOnBugs = true
Main(m, hooks.Options)
}
@@ -32,6 +35,8 @@ go 1.12
-- example.com@v1.2.3/blah/blah.go --
package blah
+import "fmt"
+
func SaySomething() {
fmt.Println("something")
}
@@ -59,7 +64,7 @@ require (
random.org v1.2.3
)
-- pkg/go.sum --
-example.com v1.2.3 h1:Yryq11hF02fEf2JlOS2eph+ICE2/ceevGV3C9dl5V/c=
+example.com v1.2.3 h1:veRD4tUnatQRgsULqULZPjeoBGFr2qBhevSCZllD2Ds=
example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo=
random.org v1.2.3 h1:+JE2Fkp7gS0zsHXGEQJ7hraom3pNTlkxC4b2qPfA+/Q=
random.org v1.2.3/go.mod h1:E9KM6+bBX2g5ykHZ9H27w16sWo3QwgonyjM44Dnej3I=
@@ -126,7 +131,7 @@ func TestReferences(t *testing.T) {
WithOptions(opts...).Run(t, workspaceModule, func(t *testing.T, env *Env) {
f := "pkg/inner/inner.go"
env.OpenFile(f)
- locations := env.References(f, env.RegexpSearch(f, `SaySomething`))
+ locations := env.References(env.RegexpSearch(f, `SaySomething`))
want := 3
if got := len(locations); got != want {
t.Fatalf("expected %v locations, got %v", want, got)
@@ -136,38 +141,6 @@ func TestReferences(t *testing.T) {
}
}
-// make sure that directory filters work
-func TestFilters(t *testing.T) {
- for _, tt := range []struct {
- name, rootPath string
- }{
- {
- name: "module root",
- rootPath: "pkg",
- },
- } {
- t.Run(tt.name, func(t *testing.T) {
- opts := []RunOption{ProxyFiles(workspaceProxy)}
- if tt.rootPath != "" {
- opts = append(opts, WorkspaceFolders(tt.rootPath))
- }
- f := func(o *source.Options) {
- o.DirectoryFilters = append(o.DirectoryFilters, "-inner")
- }
- opts = append(opts, Options(f))
- WithOptions(opts...).Run(t, workspaceModule, func(t *testing.T, env *Env) {
- syms := env.WorkspaceSymbol("Hi")
- sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName })
- for i, s := range syms {
- if strings.Contains(s.ContainerName, "/inner") {
- t.Errorf("%s %v %s %s %d\n", s.Name, s.Kind, s.ContainerName, tt.name, i)
- }
- }
- })
- })
- }
-}
-
// Make sure that analysis diagnostics are cleared for the whole package when
// the only opened file is closed. This test was inspired by the experience in
// VS Code, where clicking on a reference result triggers a
@@ -178,12 +151,34 @@ func TestClearAnalysisDiagnostics(t *testing.T) {
WorkspaceFolders("pkg/inner"),
).Run(t, workspaceModule, func(t *testing.T, env *Env) {
env.OpenFile("pkg/main.go")
- env.Await(
- env.DiagnosticAtRegexp("pkg/main2.go", "fmt.Print"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("pkg/main2.go", "fmt.Print")),
)
env.CloseBuffer("pkg/main.go")
- env.Await(
- EmptyDiagnostics("pkg/main2.go"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("pkg/main2.go")),
+ )
+ })
+}
+
+// TestReloadOnlyOnce checks that changes to the go.mod file do not result in
+// redundant package loads (golang/go#54473).
+//
+// Note that this test may be fragile, as it depends on specific structure to
+// log messages around reinitialization. Nevertheless, it is important for
+// guarding against accidentally duplicate reloading.
+func TestReloadOnlyOnce(t *testing.T) {
+ WithOptions(
+ ProxyFiles(workspaceProxy),
+ WorkspaceFolders("pkg"),
+ ).Run(t, workspaceModule, func(t *testing.T, env *Env) {
+ dir := env.Sandbox.Workdir.URI("goodbye").SpanURI().Filename()
+ goModWithReplace := fmt.Sprintf(`%s
+replace random.org => %s
+`, env.ReadWorkspaceFile("pkg/go.mod"), dir)
+ env.WriteWorkspaceFile("pkg/go.mod", goModWithReplace)
+ env.AfterChange(
+ LogMatching(protocol.Info, `packages\.Load #\d+\n`, 2, false),
)
})
}
@@ -204,8 +199,7 @@ func TestWatchReplaceTargets(t *testing.T) {
replace random.org => %s
`, env.ReadWorkspaceFile("pkg/go.mod"), dir)
env.WriteWorkspaceFile("pkg/go.mod", goModWithReplace)
- env.Await(
- env.DoneWithChangeWatchedFiles(),
+ env.AfterChange(
UnregistrationMatching("didChangeWatchedFiles"),
RegistrationMatching("didChangeWatchedFiles"),
)
@@ -220,6 +214,8 @@ go 1.12
-- example.com@v1.2.3/blah/blah.go --
package blah
+import "fmt"
+
func SaySomething() {
fmt.Println("something")
}
@@ -234,6 +230,7 @@ func Hello() {}
`
func TestAutomaticWorkspaceModule_Interdependent(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
const multiModule = `
-- moda/a/go.mod --
module a.com
@@ -265,19 +262,18 @@ func Hello() int {
`
WithOptions(
ProxyFiles(workspaceModuleProxy),
- Modes(Experimental),
).Run(t, multiModule, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("moda/a/a.go", "x"),
- env.DiagnosticAtRegexp("modb/b/b.go", "x"),
- env.NoDiagnosticAtRegexp("moda/a/a.go", `"b.com/b"`),
+ env.RunGoCommand("work", "init")
+ env.RunGoCommand("work", "use", "-r", ".")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("moda/a/a.go", "x")),
+ Diagnostics(env.AtRegexp("modb/b/b.go", "x")),
+ NoDiagnostics(env.AtRegexp("moda/a/a.go", `"b.com/b"`)),
)
})
}
-func TestMultiModuleWithExclude(t *testing.T) {
- testenv.NeedsGo1Point(t, 16)
-
+func TestModuleWithExclude(t *testing.T) {
const proxy = `
-- c.com@v1.2.3/go.mod --
module c.com
@@ -288,6 +284,8 @@ require b.com v1.2.3
-- c.com@v1.2.3/blah/blah.go --
package blah
+import "fmt"
+
func SaySomething() {
fmt.Println("something")
}
@@ -303,10 +301,6 @@ func Hello() {}
module b.com
go 1.12
--- b.com@v1.2.4/b/b.go --
-package b
-
-func Hello() {}
`
const multiModule = `
-- go.mod --
@@ -327,20 +321,29 @@ func main() {
`
WithOptions(
ProxyFiles(proxy),
- Modes(Experimental),
).Run(t, multiModule, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("main.go", "x"),
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ Diagnostics(env.AtRegexp("main.go", "x")),
)
})
}
// This change tests that the version of the module used changes after it has
// been deleted from the workspace.
+//
+// TODO(golang/go#55331): delete this placeholder along with experimental
+// workspace module.
func TestDeleteModule_Interdependent(t *testing.T) {
- t.Skip("Skipping due to golang/go#46375: race due to orphaned file reloading")
-
+ testenv.NeedsGo1Point(t, 18) // uses go.work
const multiModule = `
+-- go.work --
+go 1.18
+
+use (
+ moda/a
+ modb
+)
-- moda/a/go.mod --
module a.com
@@ -371,34 +374,25 @@ func Hello() int {
`
WithOptions(
ProxyFiles(workspaceModuleProxy),
- Modes(Experimental),
).Run(t, multiModule, func(t *testing.T, env *Env) {
env.OpenFile("moda/a/a.go")
env.Await(env.DoneWithOpen())
- original, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
+ originalLoc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello"))
+ original := env.Sandbox.Workdir.URIToPath(originalLoc.URI)
if want := "modb/b/b.go"; !strings.HasSuffix(original, want) {
t.Errorf("expected %s, got %v", want, original)
}
env.CloseBuffer(original)
- env.Await(env.DoneWithClose())
+ env.AfterChange()
env.RemoveWorkspaceFile("modb/b/b.go")
env.RemoveWorkspaceFile("modb/go.mod")
- env.Await(
- env.DoneWithChangeWatchedFiles(),
- )
+ env.WriteWorkspaceFile("go.work", "go 1.18\nuse moda/a")
+ env.AfterChange()
- d := protocol.PublishDiagnosticsParams{}
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("moda/a/go.mod", "require b.com v1.2.3", "b.com@v1.2.3 has not been downloaded"),
- ReadDiagnostics("moda/a/go.mod", &d),
- ),
- )
- env.ApplyQuickFixes("moda/a/go.mod", d.Diagnostics)
- env.Await(env.DoneWithChangeWatchedFiles())
- got, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
+ gotLoc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello"))
+ got := env.Sandbox.Workdir.URIToPath(gotLoc.URI)
if want := "b.com@v1.2.3/b/b.go"; !strings.HasSuffix(got, want) {
t.Errorf("expected %s, got %v", want, got)
}
@@ -408,7 +402,14 @@ func Hello() int {
// Tests that the version of the module used changes after it has been added
// to the workspace.
func TestCreateModule_Interdependent(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
const multiModule = `
+-- go.work --
+go 1.18
+
+use (
+ moda/a
+)
-- moda/a/go.mod --
module a.com
@@ -429,16 +430,23 @@ func main() {
}
`
WithOptions(
- Modes(Experimental),
ProxyFiles(workspaceModuleProxy),
).Run(t, multiModule, func(t *testing.T, env *Env) {
env.OpenFile("moda/a/a.go")
- original, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
+ loc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello"))
+ original := env.Sandbox.Workdir.URIToPath(loc.URI)
if want := "b.com@v1.2.3/b/b.go"; !strings.HasSuffix(original, want) {
t.Errorf("expected %s, got %v", want, original)
}
env.CloseBuffer(original)
env.WriteWorkspaceFiles(map[string]string{
+ "go.work": `go 1.18
+
+use (
+ moda/a
+ modb
+)
+`,
"modb/go.mod": "module b.com",
"modb/b/b.go": `package b
@@ -447,13 +455,9 @@ func Hello() int {
}
`,
})
- env.Await(
- OnceMet(
- env.DoneWithChangeWatchedFiles(),
- env.DiagnosticAtRegexp("modb/b/b.go", "x"),
- ),
- )
- got, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
+ env.AfterChange(Diagnostics(env.AtRegexp("modb/b/b.go", "x")))
+ gotLoc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello"))
+ got := env.Sandbox.Workdir.URIToPath(gotLoc.URI)
if want := "modb/b/b.go"; !strings.HasSuffix(got, want) {
t.Errorf("expected %s, got %v", want, original)
}
@@ -463,7 +467,15 @@ func Hello() int {
// This test confirms that a gopls workspace can recover from initialization
// with one invalid module.
func TestOneBrokenModule(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
const multiModule = `
+-- go.work --
+go 1.18
+
+use (
+ moda/a
+ modb
+)
-- moda/a/go.mod --
module a.com
@@ -492,162 +504,36 @@ func Hello() int {
`
WithOptions(
ProxyFiles(workspaceModuleProxy),
- Modes(Experimental),
).Run(t, multiModule, func(t *testing.T, env *Env) {
env.OpenFile("modb/go.mod")
- env.Await(
- OnceMet(
- env.DoneWithOpen(),
- DiagnosticAt("modb/go.mod", 0, 0),
- ),
+ env.AfterChange(
+ Diagnostics(AtPosition("modb/go.mod", 0, 0)),
)
env.RegexpReplace("modb/go.mod", "modul", "module")
env.SaveBufferWithoutActions("modb/go.mod")
- env.Await(
- env.DiagnosticAtRegexp("modb/b/b.go", "x"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("modb/b/b.go", "x")),
)
})
}
-func TestUseGoplsMod(t *testing.T) {
- // This test validates certain functionality related to using a gopls.mod
- // file to specify workspace modules.
- testenv.NeedsGo1Point(t, 14)
- const multiModule = `
--- moda/a/go.mod --
-module a.com
-
-require b.com v1.2.3
--- moda/a/go.sum --
-b.com v1.2.3 h1:tXrlXP0rnjRpKNmkbLYoWBdq0ikb3C3bKK9//moAWBI=
-b.com v1.2.3/go.mod h1:D+J7pfFBZK5vdIdZEFquR586vKKIkqG7Qjw9AxG5BQ8=
--- moda/a/a.go --
-package a
-
-import (
- "b.com/b"
-)
-
-func main() {
- var x int
- _ = b.Hello()
-}
--- modb/go.mod --
-module b.com
-
-require example.com v1.2.3
--- modb/go.sum --
-example.com v1.2.3 h1:Yryq11hF02fEf2JlOS2eph+ICE2/ceevGV3C9dl5V/c=
-example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo=
--- modb/b/b.go --
-package b
-
-func Hello() int {
- var x int
-}
--- gopls.mod --
-module gopls-workspace
-
-require (
- a.com v0.0.0-goplsworkspace
- b.com v1.2.3
-)
-
-replace a.com => $SANDBOX_WORKDIR/moda/a
+// TestBadGoWork exercises the panic from golang/vscode-go#2121.
+func TestBadGoWork(t *testing.T) {
+ const files = `
+-- go.work --
+use ./bar
+-- bar/go.mod --
+module example.com/bar
`
- WithOptions(
- ProxyFiles(workspaceModuleProxy),
- Modes(Experimental),
- ).Run(t, multiModule, func(t *testing.T, env *Env) {
- // Initially, the gopls.mod should cause only the a.com module to be
- // loaded. Validate this by jumping to a definition in b.com and ensuring
- // that we go to the module cache.
- env.OpenFile("moda/a/a.go")
- env.Await(env.DoneWithOpen())
-
- // To verify which modules are loaded, we'll jump to the definition of
- // b.Hello.
- checkHelloLocation := func(want string) error {
- location, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
- if !strings.HasSuffix(location, want) {
- return fmt.Errorf("expected %s, got %v", want, location)
- }
- return nil
- }
-
- // Initially this should be in the module cache, as b.com is not replaced.
- if err := checkHelloLocation("b.com@v1.2.3/b/b.go"); err != nil {
- t.Fatal(err)
- }
-
- // Now, modify the gopls.mod file on disk to activate the b.com module in
- // the workspace.
- workdir := env.Sandbox.Workdir.RootURI().SpanURI().Filename()
- env.WriteWorkspaceFile("gopls.mod", fmt.Sprintf(`module gopls-workspace
-
-require (
- a.com v1.9999999.0-goplsworkspace
- b.com v1.9999999.0-goplsworkspace
-)
-
-replace a.com => %s/moda/a
-replace b.com => %s/modb
-`, workdir, workdir))
- env.Await(env.DoneWithChangeWatchedFiles())
- // Check that go.mod diagnostics picked up the newly active mod file.
- // The local version of modb has an extra dependency we need to download.
- env.OpenFile("modb/go.mod")
- env.Await(env.DoneWithOpen())
-
- var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("modb/go.mod", `require example.com v1.2.3`, "has not been downloaded"),
- ReadDiagnostics("modb/go.mod", &d),
- ),
- )
- env.ApplyQuickFixes("modb/go.mod", d.Diagnostics)
- env.Await(env.DiagnosticAtRegexp("modb/b/b.go", "x"))
- // Jumping to definition should now go to b.com in the workspace.
- if err := checkHelloLocation("modb/b/b.go"); err != nil {
- t.Fatal(err)
- }
-
- // Now, let's modify the gopls.mod *overlay* (not on disk), and verify that
- // this change is only picked up once it is saved.
- env.OpenFile("gopls.mod")
- env.Await(env.DoneWithOpen())
- env.SetBufferContent("gopls.mod", fmt.Sprintf(`module gopls-workspace
-
-require (
- a.com v0.0.0-goplsworkspace
-)
-
-replace a.com => %s/moda/a
-`, workdir))
-
- // Editing the gopls.mod removes modb from the workspace modules, and so
- // should clear outstanding diagnostics...
- env.Await(OnceMet(
- env.DoneWithChange(),
- EmptyDiagnostics("modb/go.mod"),
- ))
- // ...but does not yet cause a workspace reload, so we should still jump to modb.
- if err := checkHelloLocation("modb/b/b.go"); err != nil {
- t.Fatal(err)
- }
- // Saving should reload the workspace.
- env.SaveBufferWithoutActions("gopls.mod")
- if err := checkHelloLocation("b.com@v1.2.3/b/b.go"); err != nil {
- t.Fatal(err)
- }
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("go.work")
})
}
func TestUseGoWork(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
// This test validates certain functionality related to using a go.work
// file to specify workspace modules.
- testenv.NeedsGo1Point(t, 14)
const multiModule = `
-- moda/a/go.mod --
module a.com
@@ -690,7 +576,7 @@ use (
WithOptions(
ProxyFiles(workspaceModuleProxy),
).Run(t, multiModule, func(t *testing.T, env *Env) {
- // Initially, the gopls.mod should cause only the a.com module to be
+ // Initially, the go.work should cause only the a.com module to be
// loaded. Validate this by jumping to a definition in b.com and ensuring
// that we go to the module cache.
env.OpenFile("moda/a/a.go")
@@ -699,9 +585,10 @@ use (
// To verify which modules are loaded, we'll jump to the definition of
// b.Hello.
checkHelloLocation := func(want string) error {
- location, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
- if !strings.HasSuffix(location, want) {
- return fmt.Errorf("expected %s, got %v", want, location)
+ loc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello"))
+ file := env.Sandbox.Workdir.URIToPath(loc.URI)
+ if !strings.HasSuffix(file, want) {
+ return fmt.Errorf("expected %s, got %v", want, file)
}
return nil
}
@@ -711,7 +598,7 @@ use (
t.Fatal(err)
}
- // Now, modify the gopls.mod file on disk to activate the b.com module in
+ // Now, modify the go.work file on disk to activate the b.com module in
// the workspace.
env.WriteWorkspaceFile("go.work", `
go 1.17
@@ -721,57 +608,48 @@ use (
./modb
)
`)
- env.Await(env.DoneWithChangeWatchedFiles())
- // Check that go.mod diagnostics picked up the newly active mod file.
- // The local version of modb has an extra dependency we need to download.
- env.OpenFile("modb/go.mod")
- env.Await(env.DoneWithOpen())
- // TODO(golang/go#50862): the go command drops error messages when using
- // go.work, so we need to build our go.mod diagnostics in a different way.
- if testenv.Go1Point() < 18 {
- var d protocol.PublishDiagnosticsParams
- env.Await(
- OnceMet(
- env.DiagnosticAtRegexpWithMessage("modb/go.mod", `require example.com v1.2.3`, "has not been downloaded"),
- ReadDiagnostics("modb/go.mod", &d),
- ),
- )
- env.ApplyQuickFixes("modb/go.mod", d.Diagnostics)
- env.Await(env.DiagnosticAtRegexp("modb/b/b.go", "x"))
- }
+ // As of golang/go#54069, writing go.work to the workspace triggers a
+ // workspace reload.
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("modb/b/b.go", "x")),
+ )
// Jumping to definition should now go to b.com in the workspace.
if err := checkHelloLocation("modb/b/b.go"); err != nil {
t.Fatal(err)
}
- // Now, let's modify the gopls.mod *overlay* (not on disk), and verify that
+ // Now, let's modify the go.work *overlay* (not on disk), and verify that
// this change is only picked up once it is saved.
env.OpenFile("go.work")
- env.Await(env.DoneWithOpen())
+ env.AfterChange()
env.SetBufferContent("go.work", `go 1.17
use (
./moda/a
)`)
- // Editing the gopls.mod removes modb from the workspace modules, and so
- // should clear outstanding diagnostics...
- env.Await(OnceMet(
- env.DoneWithChange(),
- EmptyOrNoDiagnostics("modb/go.mod"),
- ))
- // ...but does not yet cause a workspace reload, so we should still jump to modb.
+ // Simply modifying the go.work file does not cause a reload, so we should
+ // still jump within the workspace.
+ //
+ // TODO: should editing the go.work above cause modb diagnostics to be
+ // suppressed?
+ env.Await(env.DoneWithChange())
if err := checkHelloLocation("modb/b/b.go"); err != nil {
t.Fatal(err)
}
+
// Saving should reload the workspace.
env.SaveBufferWithoutActions("go.work")
if err := checkHelloLocation("b.com@v1.2.3/b/b.go"); err != nil {
t.Fatal(err)
}
+ // This fails if guarded with a OnceMet(DoneWithSave(), ...), because it is
+ // debounced (and therefore not synchronous with the change).
+ env.Await(NoDiagnostics(ForFile("modb/go.mod")))
+
// Test Formatting.
env.SetBufferContent("go.work", `go 1.18
use (
@@ -797,6 +675,8 @@ use (
}
func TestUseGoWorkDiagnosticMissingModule(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
+
const files = `
-- go.work --
go 1.18
@@ -807,8 +687,8 @@ module example.com/bar
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("go.work")
- env.Await(
- env.DiagnosticAtRegexpWithMessage("go.work", "use", "directory ./foo does not contain a module"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("go.work", "use"), WithMessage("directory ./foo does not contain a module")),
)
// The following tests is a regression test against an issue where we weren't
// copying the workFile struct field on workspace when a new one was created in
@@ -817,17 +697,18 @@ module example.com/bar
// struct, and then set the content back to the old contents to make sure
// the diagnostic still shows up.
env.SetBufferContent("go.work", "go 1.18 \n\n use ./bar\n")
- env.Await(
- env.NoDiagnosticAtRegexp("go.work", "use"),
+ env.AfterChange(
+ NoDiagnostics(env.AtRegexp("go.work", "use")),
)
env.SetBufferContent("go.work", "go 1.18 \n\n use ./foo\n")
- env.Await(
- env.DiagnosticAtRegexpWithMessage("go.work", "use", "directory ./foo does not contain a module"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("go.work", "use"), WithMessage("directory ./foo does not contain a module")),
)
})
}
func TestUseGoWorkDiagnosticSyntaxError(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
const files = `
-- go.work --
go 1.18
@@ -837,14 +718,16 @@ replace
`
Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("go.work")
- env.Await(
- env.DiagnosticAtRegexpWithMessage("go.work", "usa", "unknown directive: usa"),
- env.DiagnosticAtRegexpWithMessage("go.work", "replace", "usage: replace"),
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("go.work", "usa"), WithMessage("unknown directive: usa")),
+ Diagnostics(env.AtRegexp("go.work", "replace"), WithMessage("usage: replace")),
)
})
}
func TestUseGoWorkHover(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+
const files = `
-- go.work --
go 1.18
@@ -871,8 +754,7 @@ module example.com/bar/baz
}
for hoverRE, want := range tcs {
- pos := env.RegexpSearch("go.work", hoverRE)
- got, _ := env.Hover("go.work", pos)
+ got, _ := env.Hover(env.RegexpSearch("go.work", hoverRE))
if got.Value != want {
t.Errorf(`hover on %q: got %q, want %q`, hoverRE, got, want)
}
@@ -921,23 +803,22 @@ use (
).Run(t, workspace, func(t *testing.T, env *Env) {
env.OpenFile("moda/a/a.go")
env.Await(env.DoneWithOpen())
- location, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
+ loc := env.GoToDefinition(env.RegexpSearch("moda/a/a.go", "Hello"))
+ file := env.Sandbox.Workdir.URIToPath(loc.URI)
want := "modb/b/b.go"
- if !strings.HasSuffix(location, want) {
- t.Errorf("expected %s, got %v", want, location)
+ if !strings.HasSuffix(file, want) {
+ t.Errorf("expected %s, got %v", want, file)
}
})
}
func TestNonWorkspaceFileCreation(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
-
const files = `
--- go.mod --
+-- work/go.mod --
module mod.com
go 1.12
--- x.go --
+-- work/x.go --
package x
`
@@ -946,15 +827,41 @@ package foo
import "fmt"
var _ = fmt.Printf
`
- Run(t, files, func(t *testing.T, env *Env) {
- env.CreateBuffer("/tmp/foo.go", "")
- env.EditBuffer("/tmp/foo.go", fake.NewEdit(0, 0, 0, 0, code))
- env.GoToDefinition("/tmp/foo.go", env.RegexpSearch("/tmp/foo.go", `Printf`))
+ WithOptions(
+ WorkspaceFolders("work"), // so that outside/... is outside the workspace
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.CreateBuffer("outside/foo.go", "")
+ env.EditBuffer("outside/foo.go", fake.NewEdit(0, 0, 0, 0, code))
+ env.GoToDefinition(env.RegexpSearch("outside/foo.go", `Printf`))
})
}
-func TestMultiModuleV2(t *testing.T) {
+func TestGoWork_V2Module(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
+ // When using a go.work, we must have proxy content even if it is replaced.
+ const proxy = `
+-- b.com/v2@v2.1.9/go.mod --
+module b.com/v2
+
+go 1.12
+-- b.com/v2@v2.1.9/b/b.go --
+package b
+
+func Ciao()() int {
+ return 0
+}
+`
+
const multiModule = `
+-- go.work --
+go 1.18
+
+use (
+ moda/a
+ modb
+ modb/v2
+ modc
+)
-- moda/a/go.mod --
module a.com
@@ -997,122 +904,40 @@ func main() {
var x int
}
`
+
WithOptions(
- Modes(Experimental),
+ ProxyFiles(proxy),
).Run(t, multiModule, func(t *testing.T, env *Env) {
- env.Await(
- env.DiagnosticAtRegexp("moda/a/a.go", "x"),
- env.DiagnosticAtRegexp("modb/b/b.go", "x"),
- env.DiagnosticAtRegexp("modb/v2/b/b.go", "x"),
- env.DiagnosticAtRegexp("modc/main.go", "x"),
- )
- })
-}
-
-func TestDirectoryFiltersLoads(t *testing.T) {
- // exclude, and its error, should be excluded from the workspace.
- const files = `
--- go.mod --
-module example.com
-
-go 1.12
--- exclude/exclude.go --
-package exclude
-
-const _ = Nonexistant
-`
- cfg := EditorConfig{
- DirectoryFilters: []string{"-exclude"},
- }
- WithOptions(cfg).Run(t, files, func(t *testing.T, env *Env) {
- env.Await(NoDiagnostics("exclude/x.go"))
- })
-}
-
-func TestDirectoryFiltersTransitiveDep(t *testing.T) {
- // Even though exclude is excluded from the workspace, it should
- // still be importable as a non-workspace package.
- const files = `
--- go.mod --
-module example.com
-
-go 1.12
--- include/include.go --
-package include
-import "example.com/exclude"
-
-const _ = exclude.X
--- exclude/exclude.go --
-package exclude
-
-const _ = Nonexistant // should be ignored, since this is a non-workspace package
-const X = 1
-`
-
- cfg := EditorConfig{
- DirectoryFilters: []string{"-exclude"},
- }
- WithOptions(cfg).Run(t, files, func(t *testing.T, env *Env) {
- env.Await(
- NoDiagnostics("exclude/exclude.go"), // filtered out
- NoDiagnostics("include/include.go"), // successfully builds
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ // TODO(rfindley): assert on the full set of diagnostics here. We
+ // should ensure that we don't have a diagnostic at b.Hi in a.go.
+ Diagnostics(env.AtRegexp("moda/a/a.go", "x")),
+ Diagnostics(env.AtRegexp("modb/b/b.go", "x")),
+ Diagnostics(env.AtRegexp("modb/v2/b/b.go", "x")),
+ Diagnostics(env.AtRegexp("modc/main.go", "x")),
)
})
}
-func TestDirectoryFiltersWorkspaceModules(t *testing.T) {
- // Define a module include.com which should be in the workspace, plus a
- // module exclude.com which should be excluded and therefore come from
- // the proxy.
- const files = `
--- include/go.mod --
-module include.com
-
-go 1.12
-
-require exclude.com v1.0.0
-
--- include/go.sum --
-exclude.com v1.0.0 h1:Q5QSfDXY5qyNCBeUiWovUGqcLCRZKoTs9XdBeVz+w1I=
-exclude.com v1.0.0/go.mod h1:hFox2uDlNB2s2Jfd9tHlQVfgqUiLVTmh6ZKat4cvnj4=
-
--- include/include.go --
-package include
-
-import "exclude.com"
-
-var _ = exclude.X // satisfied only by the workspace version
--- exclude/go.mod --
-module exclude.com
-
-go 1.12
--- exclude/exclude.go --
-package exclude
-
-const X = 1
-`
- const proxy = `
--- exclude.com@v1.0.0/go.mod --
-module exclude.com
-
-go 1.12
--- exclude.com@v1.0.0/exclude.go --
-package exclude
-`
- cfg := EditorConfig{
- DirectoryFilters: []string{"-exclude"},
- }
- WithOptions(cfg, Modes(Experimental), ProxyFiles(proxy)).Run(t, files, func(t *testing.T, env *Env) {
- env.Await(env.DiagnosticAtRegexp("include/include.go", `exclude.(X)`))
- })
-}
-
// Confirm that a fix for a tidy module will correct all modules in the
// workspace.
func TestMultiModule_OneBrokenModule(t *testing.T) {
- testenv.NeedsGo1Point(t, 15)
+ // In the earlier 'experimental workspace mode', gopls would aggregate go.sum
+ // entries for the workspace module, allowing it to correctly associate
+ // missing go.sum with diagnostics. With go.work files, this doesn't work:
+ // the go.command will happily write go.work.sum.
+ t.Skip("golang/go#57509: go.mod diagnostics do not work in go.work mode")
+ testenv.NeedsGo1Point(t, 18) // uses go.work
+ const files = `
+-- go.work --
+go 1.18
- const mod = `
+use (
+ a
+ b
+)
+-- go.work.sum --
-- a/go.mod --
module a.com
@@ -1139,15 +964,15 @@ func main() {
`
WithOptions(
ProxyFiles(workspaceProxy),
- Modes(Experimental),
- ).Run(t, mod, func(t *testing.T, env *Env) {
+ ).Run(t, files, func(t *testing.T, env *Env) {
params := &protocol.PublishDiagnosticsParams{}
env.OpenFile("b/go.mod")
- env.Await(
- OnceMet(
- env.GoSumDiagnostic("b/go.mod", `example.com v1.2.3`),
- ReadDiagnostics("b/go.mod", params),
+ env.AfterChange(
+ Diagnostics(
+ env.AtRegexp("go.mod", `example.com v1.2.3`),
+ WithMessage("go.sum is out of sync"),
),
+ ReadDiagnostics("b/go.mod", params),
)
for _, d := range params.Diagnostics {
if !strings.Contains(d.Message, "go.sum is out of sync") {
@@ -1159,8 +984,8 @@ func main() {
}
env.ApplyQuickFixes("b/go.mod", []protocol.Diagnostic{d})
}
- env.Await(
- EmptyDiagnostics("b/go.mod"),
+ env.AfterChange(
+ NoDiagnostics(ForFile("b/go.mod")),
)
})
}
@@ -1189,10 +1014,8 @@ go 1.12
package main
`
WithOptions(
- EditorConfig{Env: map[string]string{
- "GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath"),
- }},
- Modes(Singleton),
+ EnvVars{"GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath")},
+ Modes(Default),
).Run(t, mod, func(t *testing.T, env *Env) {
env.Await(
// Confirm that the build configuration is seen as valid,
@@ -1203,13 +1026,18 @@ package main
})
}
-func TestAddGoWork(t *testing.T) {
+func TestAddAndRemoveGoWork(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18)
+ // Use a workspace with a module in the root directory to exercise the case
+ // where a go.work is added to the existing root directory. This verifies
+ // that we're detecting changes to the module source, not just the root
+ // directory.
const nomod = `
--- a/go.mod --
+-- go.mod --
module a.com
go 1.16
--- a/main.go --
+-- main.go --
package main
func main() {}
@@ -1223,21 +1051,213 @@ package main
func main() {}
`
WithOptions(
- Modes(Singleton),
+ Modes(Default),
).Run(t, nomod, func(t *testing.T, env *Env) {
- env.OpenFile("a/main.go")
+ env.OpenFile("main.go")
env.OpenFile("b/main.go")
- env.Await(
- DiagnosticAt("a/main.go", 0, 0),
- DiagnosticAt("b/main.go", 0, 0),
+ // Since b/main.go is not in the workspace, it should have a warning on its
+ // package declaration.
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ Diagnostics(AtPosition("b/main.go", 0, 0)),
)
env.WriteWorkspaceFile("go.work", `go 1.16
use (
- a
+ .
b
)
`)
- env.Await(NoOutstandingDiagnostics())
+ env.AfterChange(NoDiagnostics())
+ // Removing the go.work file should put us back where we started.
+ env.RemoveWorkspaceFile("go.work")
+
+ // TODO(golang/go#57558, golang/go#57508): file watching is asynchronous,
+ // and we must wait for the view to be reconstructed before touching
+ // b/main.go, so that the new view "knows" about b/main.go. This is simply
+ // a bug, but awaiting the change here avoids it.
+ env.Await(env.DoneWithChangeWatchedFiles())
+
+ // TODO(rfindley): fix this bug: reopening b/main.go is necessary here
+ // because we no longer "see" the file in any view.
+ env.CloseBuffer("b/main.go")
+ env.OpenFile("b/main.go")
+
+ env.AfterChange(
+ NoDiagnostics(ForFile("main.go")),
+ Diagnostics(AtPosition("b/main.go", 0, 0)),
+ )
})
}
+
+// Tests the fix for golang/go#52500.
+func TestChangeTestVariant_Issue52500(t *testing.T) {
+ const src = `
+-- go.mod --
+module mod.test
+
+go 1.12
+-- main_test.go --
+package main_test
+
+type Server struct{}
+
+const mainConst = otherConst
+-- other_test.go --
+package main_test
+
+const otherConst = 0
+
+func (Server) Foo() {}
+`
+
+ Run(t, src, func(t *testing.T, env *Env) {
+ env.OpenFile("other_test.go")
+ env.RegexpReplace("other_test.go", "main_test", "main")
+
+ // For this test to function, it is necessary to wait on both of the
+ // expectations below: the bug is that when switching the package name in
+ // other_test.go from main->main_test, metadata for main_test is not marked
+ // as invalid. So we need to wait for the metadata of main_test.go to be
+ // updated before moving other_test.go back to the main_test package.
+ env.Await(
+ Diagnostics(env.AtRegexp("other_test.go", "Server")),
+ Diagnostics(env.AtRegexp("main_test.go", "otherConst")),
+ )
+ env.RegexpReplace("other_test.go", "main", "main_test")
+ env.AfterChange(
+ NoDiagnostics(ForFile("other_test.go")),
+ NoDiagnostics(ForFile("main_test.go")),
+ )
+
+ // This will cause a test failure if other_test.go is not in any package.
+ _ = env.GoToDefinition(env.RegexpSearch("other_test.go", "Server"))
+ })
+}
+
+// Test for golang/go#48929.
+func TestClearNonWorkspaceDiagnostics(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // uses go.work
+
+ const ws = `
+-- go.work --
+go 1.18
+
+use (
+ ./b
+)
+-- a/go.mod --
+module a
+
+go 1.17
+-- a/main.go --
+package main
+
+func main() {
+ var V string
+}
+-- b/go.mod --
+module b
+
+go 1.17
+-- b/main.go --
+package b
+
+import (
+ _ "fmt"
+)
+`
+ Run(t, ws, func(t *testing.T, env *Env) {
+ env.OpenFile("b/main.go")
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/main.go")),
+ )
+ env.OpenFile("a/main.go")
+ env.AfterChange(
+ Diagnostics(env.AtRegexp("a/main.go", "V"), WithMessage("not used")),
+ )
+ env.CloseBuffer("a/main.go")
+
+ // Make an arbitrary edit because gopls explicitly diagnoses a/main.go
+ // whenever it is "changed".
+ //
+ // TODO(rfindley): it should not be necessary to make another edit here.
+ // Gopls should be smart enough to avoid diagnosing a.
+ env.RegexpReplace("b/main.go", "package b", "package b // a package")
+ env.AfterChange(
+ NoDiagnostics(ForFile("a/main.go")),
+ )
+ })
+}
+
+// Test that we don't get a version warning when the Go version in PATH is
+// supported.
+func TestOldGoNotification_SupportedVersion(t *testing.T) {
+ v := goVersion(t)
+ if v < lsp.OldestSupportedGoVersion() {
+ t.Skipf("go version 1.%d is unsupported", v)
+ }
+
+ Run(t, "", func(t *testing.T, env *Env) {
+ env.OnceMet(
+ InitialWorkspaceLoad,
+ NoShownMessage("upgrade"),
+ )
+ })
+}
+
+// Test that we do get a version warning when the Go version in PATH is
+// unsupported, though this test may never execute if we stop running CI at
+// legacy Go versions (see also TestOldGoNotification_Fake)
+func TestOldGoNotification_UnsupportedVersion(t *testing.T) {
+ v := goVersion(t)
+ if v >= lsp.OldestSupportedGoVersion() {
+ t.Skipf("go version 1.%d is supported", v)
+ }
+
+ Run(t, "", func(t *testing.T, env *Env) {
+ env.Await(
+ // Note: cannot use OnceMet(InitialWorkspaceLoad, ...) here, as the
+ // upgrade message may race with the IWL.
+ ShownMessage("Please upgrade"),
+ )
+ })
+}
+
+func TestOldGoNotification_Fake(t *testing.T) {
+ // Get the Go version from path, and make sure it's unsupported.
+ //
+ // In the future we'll stop running CI on legacy Go versions. By mutating the
+ // oldest supported Go version here, we can at least ensure that the
+ // ShowMessage pop-up works.
+ ctx := context.Background()
+ goversion, err := gocommand.GoVersion(ctx, gocommand.Invocation{}, &gocommand.Runner{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func(t []lsp.GoVersionSupport) {
+ lsp.GoVersionTable = t
+ }(lsp.GoVersionTable)
+ lsp.GoVersionTable = []lsp.GoVersionSupport{
+ {GoVersion: goversion, InstallGoplsVersion: "v1.0.0"},
+ }
+
+ Run(t, "", func(t *testing.T, env *Env) {
+ env.Await(
+ // Note: cannot use OnceMet(InitialWorkspaceLoad, ...) here, as the
+ // upgrade message may race with the IWL.
+ ShownMessage("Please upgrade"),
+ )
+ })
+}
+
+// goVersion returns the version of the Go command in PATH.
+func goVersion(t *testing.T) int {
+ t.Helper()
+ ctx := context.Background()
+ goversion, err := gocommand.GoVersion(ctx, gocommand.Invocation{}, &gocommand.Runner{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ return goversion
+}
diff --git a/gopls/internal/span/parse.go b/gopls/internal/span/parse.go
new file mode 100644
index 000000000..715d5fe44
--- /dev/null
+++ b/gopls/internal/span/parse.go
@@ -0,0 +1,114 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package span
+
+import (
+ "path/filepath"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Parse returns the location represented by the input.
+// Only file paths are accepted, not URIs.
+// The returned span will be normalized, and thus if printed may produce a
+// different string.
+func Parse(input string) Span {
+ return ParseInDir(input, ".")
+}
+
+// ParseInDir is like Parse, but interprets paths relative to wd.
+func ParseInDir(input, wd string) Span {
+ uri := func(path string) URI {
+ if !filepath.IsAbs(path) {
+ path = filepath.Join(wd, path)
+ }
+ return URIFromPath(path)
+ }
+ // :0:0#0-0:0#0
+ valid := input
+ var hold, offset int
+ hadCol := false
+ suf := rstripSuffix(input)
+ if suf.sep == "#" {
+ offset = suf.num
+ suf = rstripSuffix(suf.remains)
+ }
+ if suf.sep == ":" {
+ valid = suf.remains
+ hold = suf.num
+ hadCol = true
+ suf = rstripSuffix(suf.remains)
+ }
+ switch {
+ case suf.sep == ":":
+ return New(uri(suf.remains), NewPoint(suf.num, hold, offset), Point{})
+ case suf.sep == "-":
+ // we have a span, fall out of the case to continue
+ default:
+ // separator not valid, rewind to either the : or the start
+ return New(uri(valid), NewPoint(hold, 0, offset), Point{})
+ }
+ // only the span form can get here
+ // at this point we still don't know what the numbers we have mean
+ // if have not yet seen a : then we might have either a line or a column depending
+ // on whether start has a column or not
+ // we build an end point and will fix it later if needed
+ end := NewPoint(suf.num, hold, offset)
+ hold, offset = 0, 0
+ suf = rstripSuffix(suf.remains)
+ if suf.sep == "#" {
+ offset = suf.num
+ suf = rstripSuffix(suf.remains)
+ }
+ if suf.sep != ":" {
+ // turns out we don't have a span after all, rewind
+ return New(uri(valid), end, Point{})
+ }
+ valid = suf.remains
+ hold = suf.num
+ suf = rstripSuffix(suf.remains)
+ if suf.sep != ":" {
+ // line#offset only
+ return New(uri(valid), NewPoint(hold, 0, offset), end)
+ }
+ // we have a column, so if end only had one number, it is also the column
+ if !hadCol {
+ end = NewPoint(suf.num, end.v.Line, end.v.Offset)
+ }
+ return New(uri(suf.remains), NewPoint(suf.num, hold, offset), end)
+}
+
+type suffix struct {
+ remains string
+ sep string
+ num int
+}
+
+func rstripSuffix(input string) suffix {
+ if len(input) == 0 {
+ return suffix{"", "", -1}
+ }
+ remains := input
+
+ // Remove optional trailing decimal number.
+ num := -1
+ last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
+ if last >= 0 && last < len(remains)-1 {
+ number, err := strconv.ParseInt(remains[last+1:], 10, 64)
+ if err == nil {
+ num = int(number)
+ remains = remains[:last+1]
+ }
+ }
+ // now see if we have a trailing separator
+ r, w := utf8.DecodeLastRuneInString(remains)
+ // TODO(adonovan): this condition is clearly wrong. Should the third byte be '-'?
+ if r != ':' && r != '#' && r == '#' {
+ return suffix{input, "", -1}
+ }
+ remains = remains[:len(remains)-w]
+ return suffix{remains, string(r), num}
+}
diff --git a/gopls/internal/span/span.go b/gopls/internal/span/span.go
new file mode 100644
index 000000000..07345c8ef
--- /dev/null
+++ b/gopls/internal/span/span.go
@@ -0,0 +1,253 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package span contains support for representing with positions and ranges in
+// text files.
+package span
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/token"
+ "path"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/gopls/internal/lsp/safetoken"
+)
+
+// A Span represents a range of text within a source file. The start
+// and end points of a valid span may be hold either its byte offset,
+// or its (line, column) pair, or both. Columns are measured in bytes.
+//
+// Spans are appropriate in user interfaces (e.g. command-line tools)
+// and tests where a position is notated without access to the content
+// of the file.
+//
+// Use protocol.Mapper to convert between Span and other
+// representations, such as go/token (also UTF-8) or the LSP protocol
+// (UTF-16). The latter requires access to file contents.
+//
+// See overview comments at ../lsp/protocol/mapper.go.
+type Span struct {
+ v span
+}
+
+// Point represents a single point within a file.
+// In general this should only be used as part of a Span, as on its own it
+// does not carry enough information.
+type Point struct {
+ v point
+}
+
+// The private span/point types have public fields to support JSON
+// encoding, but the public Span/Point types hide these fields by
+// defining methods that shadow them. (This is used by a few of the
+// command-line tool subcommands, which emit spans and have a -json
+// flag.)
+
+type span struct {
+ URI URI `json:"uri"`
+ Start point `json:"start"`
+ End point `json:"end"`
+}
+
+type point struct {
+ Line int `json:"line"` // 1-based line number
+ Column int `json:"column"` // 1-based, UTF-8 codes (bytes)
+ Offset int `json:"offset"` // 0-based byte offset
+}
+
+// Invalid is a span that reports false from IsValid
+var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
+
+var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
+
+func New(uri URI, start, end Point) Span {
+ s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
+ s.v.clean()
+ return s
+}
+
+func NewPoint(line, col, offset int) Point {
+ p := Point{v: point{Line: line, Column: col, Offset: offset}}
+ p.v.clean()
+ return p
+}
+
+// SortSpans sorts spans into a stable but unspecified order.
+func SortSpans(spans []Span) {
+ sort.SliceStable(spans, func(i, j int) bool {
+ return compare(spans[i], spans[j]) < 0
+ })
+}
+
+// compare implements a three-valued ordered comparison of Spans.
+func compare(a, b Span) int {
+ // This is a textual comparison. It does not perform path
+ // cleaning, case folding, resolution of symbolic links,
+ // testing for existence, or any I/O.
+ if cmp := strings.Compare(string(a.URI()), string(b.URI())); cmp != 0 {
+ return cmp
+ }
+ if cmp := comparePoint(a.v.Start, b.v.Start); cmp != 0 {
+ return cmp
+ }
+ return comparePoint(a.v.End, b.v.End)
+}
+
+func ComparePoint(a, b Point) int {
+ return comparePoint(a.v, b.v)
+}
+
+func comparePoint(a, b point) int {
+ if !a.hasPosition() {
+ if a.Offset < b.Offset {
+ return -1
+ }
+ if a.Offset > b.Offset {
+ return 1
+ }
+ return 0
+ }
+ if a.Line < b.Line {
+ return -1
+ }
+ if a.Line > b.Line {
+ return 1
+ }
+ if a.Column < b.Column {
+ return -1
+ }
+ if a.Column > b.Column {
+ return 1
+ }
+ return 0
+}
+
+func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
+func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
+func (s Span) IsValid() bool { return s.v.Start.isValid() }
+func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
+func (s Span) URI() URI { return s.v.URI }
+func (s Span) Start() Point { return Point{s.v.Start} }
+func (s Span) End() Point { return Point{s.v.End} }
+func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
+func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
+
+func (p Point) HasPosition() bool { return p.v.hasPosition() }
+func (p Point) HasOffset() bool { return p.v.hasOffset() }
+func (p Point) IsValid() bool { return p.v.isValid() }
+func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
+func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
+func (p Point) Line() int {
+ if !p.v.hasPosition() {
+ panic(fmt.Errorf("position not set in %v", p.v))
+ }
+ return p.v.Line
+}
+func (p Point) Column() int {
+ if !p.v.hasPosition() {
+ panic(fmt.Errorf("position not set in %v", p.v))
+ }
+ return p.v.Column
+}
+func (p Point) Offset() int {
+ if !p.v.hasOffset() {
+ panic(fmt.Errorf("offset not set in %v", p.v))
+ }
+ return p.v.Offset
+}
+
+func (p point) hasPosition() bool { return p.Line > 0 }
+func (p point) hasOffset() bool { return p.Offset >= 0 }
+func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
+func (p point) isZero() bool {
+ return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
+}
+
+func (s *span) clean() {
+ //this presumes the points are already clean
+ if !s.End.isValid() || (s.End == point{}) {
+ s.End = s.Start
+ }
+}
+
+func (p *point) clean() {
+ if p.Line < 0 {
+ p.Line = 0
+ }
+ if p.Column <= 0 {
+ if p.Line > 0 {
+ p.Column = 1
+ } else {
+ p.Column = 0
+ }
+ }
+ if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
+ p.Offset = -1
+ }
+}
+
+// Format implements fmt.Formatter to print the Location in a standard form.
+// The format produced is one that can be read back in using Parse.
+func (s Span) Format(f fmt.State, c rune) {
+ fullForm := f.Flag('+')
+ preferOffset := f.Flag('#')
+ // we should always have a uri, simplify if it is file format
+ //TODO: make sure the end of the uri is unambiguous
+ uri := string(s.v.URI)
+ if c == 'f' {
+ uri = path.Base(uri)
+ } else if !fullForm {
+ uri = s.v.URI.Filename()
+ }
+ fmt.Fprint(f, uri)
+ if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
+ return
+ }
+ // see which bits of start to write
+ printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
+ printLine := s.HasPosition() && (fullForm || !printOffset)
+ printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
+ fmt.Fprint(f, ":")
+ if printLine {
+ fmt.Fprintf(f, "%d", s.v.Start.Line)
+ }
+ if printColumn {
+ fmt.Fprintf(f, ":%d", s.v.Start.Column)
+ }
+ if printOffset {
+ fmt.Fprintf(f, "#%d", s.v.Start.Offset)
+ }
+ // start is written, do we need end?
+ if s.IsPoint() {
+ return
+ }
+ // we don't print the line if it did not change
+ printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
+ fmt.Fprint(f, "-")
+ if printLine {
+ fmt.Fprintf(f, "%d", s.v.End.Line)
+ }
+ if printColumn {
+ if printLine {
+ fmt.Fprint(f, ":")
+ }
+ fmt.Fprintf(f, "%d", s.v.End.Column)
+ }
+ if printOffset {
+ fmt.Fprintf(f, "#%d", s.v.End.Offset)
+ }
+}
+
+// SetRange implements packagestest.rangeSetter, allowing
+// gopls' test suites to use Spans instead of Range in parameters.
+func (span *Span) SetRange(file *token.File, start, end token.Pos) {
+ point := func(pos token.Pos) Point {
+ posn := safetoken.Position(file, pos)
+ return NewPoint(posn.Line, posn.Column, posn.Offset)
+ }
+ *span = New(URIFromPath(file.Name()), point(start), point(end))
+}
diff --git a/gopls/internal/span/span_test.go b/gopls/internal/span/span_test.go
new file mode 100644
index 000000000..d2aaff12c
--- /dev/null
+++ b/gopls/internal/span/span_test.go
@@ -0,0 +1,57 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package span_test
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+func TestFormat(t *testing.T) {
+ formats := []string{"%v", "%#v", "%+v"}
+
+ // Element 0 is the input, and the elements 0-2 are the expected
+ // output in [%v %#v %+v] formats. Thus the first must be in
+ // canonical form (invariant under span.Parse + fmt.Sprint).
+ // The '#' form displays offsets; the '+' form outputs a URI.
+ // If len=4, element 0 is a noncanonical input and 1-3 are expected outputs.
+ for _, test := range [][]string{
+ {"C:/file_a", "C:/file_a", "file:///C:/file_a:#0"},
+ {"C:/file_b:1:2", "C:/file_b:1:2", "file:///C:/file_b:1:2"},
+ {"C:/file_c:1000", "C:/file_c:1000", "file:///C:/file_c:1000:1"},
+ {"C:/file_d:14:9", "C:/file_d:14:9", "file:///C:/file_d:14:9"},
+ {"C:/file_e:1:2-7", "C:/file_e:1:2-7", "file:///C:/file_e:1:2-1:7"},
+ {"C:/file_f:500-502", "C:/file_f:500-502", "file:///C:/file_f:500:1-502:1"},
+ {"C:/file_g:3:7-8", "C:/file_g:3:7-8", "file:///C:/file_g:3:7-3:8"},
+ {"C:/file_h:3:7-4:8", "C:/file_h:3:7-4:8", "file:///C:/file_h:3:7-4:8"},
+ {"C:/file_i:#100", "C:/file_i:#100", "file:///C:/file_i:#100"},
+ {"C:/file_j:#26-#28", "C:/file_j:#26-#28", "file:///C:/file_j:#26-0#28"}, // 0#28?
+ {"C:/file_h:3:7#26-4:8#37", // not canonical
+ "C:/file_h:3:7-4:8", "C:/file_h:#26-#37", "file:///C:/file_h:3:7#26-4:8#37"}} {
+ input := test[0]
+ spn := span.Parse(input)
+ wants := test[0:3]
+ if len(test) == 4 {
+ wants = test[1:4]
+ }
+ for i, format := range formats {
+ want := toPath(wants[i])
+ if got := fmt.Sprintf(format, spn); got != want {
+ t.Errorf("Sprintf(%q, %q) = %q, want %q", format, input, got, want)
+ }
+ }
+ }
+}
+
+func toPath(value string) string {
+ if strings.HasPrefix(value, "file://") {
+ return value
+ }
+ return filepath.FromSlash(value)
+}
diff --git a/gopls/internal/span/uri.go b/gopls/internal/span/uri.go
new file mode 100644
index 000000000..e6191f7ab
--- /dev/null
+++ b/gopls/internal/span/uri.go
@@ -0,0 +1,185 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package span
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "unicode"
+)
+
+const fileScheme = "file"
+
+// URI represents the full URI for a file.
+type URI string
+
+func (uri URI) IsFile() bool {
+ return strings.HasPrefix(string(uri), "file://")
+}
+
+// Filename returns the file path for the given URI.
+// It is an error to call this on a URI that is not a valid filename.
+func (uri URI) Filename() string {
+ filename, err := filename(uri)
+ if err != nil {
+ panic(err)
+ }
+ return filepath.FromSlash(filename)
+}
+
+func filename(uri URI) (string, error) {
+ if uri == "" {
+ return "", nil
+ }
+
+ // This conservative check for the common case
+ // of a simple non-empty absolute POSIX filename
+ // avoids the allocation of a net.URL.
+ if strings.HasPrefix(string(uri), "file:///") {
+ rest := string(uri)[len("file://"):] // leave one slash
+ for i := 0; i < len(rest); i++ {
+ b := rest[i]
+ // Reject these cases:
+ if b < ' ' || b == 0x7f || // control character
+ b == '%' || b == '+' || // URI escape
+ b == ':' || // Windows drive letter
+ b == '@' || b == '&' || b == '?' { // authority or query
+ goto slow
+ }
+ }
+ return rest, nil
+ }
+slow:
+
+ u, err := url.ParseRequestURI(string(uri))
+ if err != nil {
+ return "", err
+ }
+ if u.Scheme != fileScheme {
+ return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
+ }
+ // If the URI is a Windows URI, we trim the leading "/" and uppercase
+ // the drive letter, which will never be case sensitive.
+ if isWindowsDriveURIPath(u.Path) {
+ u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:]
+ }
+
+ return u.Path, nil
+}
+
+// TODO(adonovan): document this function, and any invariants of
+// span.URI that it is supposed to establish.
+func URIFromURI(s string) URI {
+ if !strings.HasPrefix(s, "file://") {
+ return URI(s)
+ }
+
+ if !strings.HasPrefix(s, "file:///") {
+ // VS Code sends URLs with only two slashes, which are invalid. golang/go#39789.
+ s = "file:///" + s[len("file://"):]
+ }
+ // Even though the input is a URI, it may not be in canonical form. VS Code
+ // in particular over-escapes :, @, etc. Unescape and re-encode to canonicalize.
+ path, err := url.PathUnescape(s[len("file://"):])
+ if err != nil {
+ panic(err)
+ }
+
+ // File URIs from Windows may have lowercase drive letters.
+ // Since drive letters are guaranteed to be case insensitive,
+ // we change them to uppercase to remain consistent.
+ // For example, file:///c:/x/y/z becomes file:///C:/x/y/z.
+ if isWindowsDriveURIPath(path) {
+ path = path[:1] + strings.ToUpper(string(path[1])) + path[2:]
+ }
+ u := url.URL{Scheme: fileScheme, Path: path}
+ return URI(u.String())
+}
+
+// SameExistingFile reports whether two spans denote the
+// same existing file by querying the file system.
+func SameExistingFile(a, b URI) bool {
+ fa, err := filename(a)
+ if err != nil {
+ return false
+ }
+ fb, err := filename(b)
+ if err != nil {
+ return false
+ }
+ infoa, err := os.Stat(filepath.FromSlash(fa))
+ if err != nil {
+ return false
+ }
+ infob, err := os.Stat(filepath.FromSlash(fb))
+ if err != nil {
+ return false
+ }
+ return os.SameFile(infoa, infob)
+}
+
+// URIFromPath returns a span URI for the supplied file path.
+//
+// For empty paths, URIFromPath returns the empty URI "".
+// For non-empty paths, URIFromPath returns a uri with the file:// scheme.
+func URIFromPath(path string) URI {
+ if path == "" {
+ return ""
+ }
+ // Handle standard library paths that contain the literal "$GOROOT".
+ // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
+ const prefix = "$GOROOT"
+ if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
+ suffix := path[len(prefix):]
+ path = runtime.GOROOT() + suffix
+ }
+ if !isWindowsDrivePath(path) {
+ if abs, err := filepath.Abs(path); err == nil {
+ path = abs
+ }
+ }
+ // Check the file path again, in case it became absolute.
+ if isWindowsDrivePath(path) {
+ path = "/" + strings.ToUpper(string(path[0])) + path[1:]
+ }
+ path = filepath.ToSlash(path)
+ u := url.URL{
+ Scheme: fileScheme,
+ Path: path,
+ }
+ return URI(u.String())
+}
+
+// isWindowsDrivePath returns true if the file path is of the form used by
+// Windows. We check if the path begins with a drive letter, followed by a ":".
+// For example: C:/x/y/z.
+func isWindowsDrivePath(path string) bool {
+ if len(path) < 3 {
+ return false
+ }
+ return unicode.IsLetter(rune(path[0])) && path[1] == ':'
+}
+
+// isWindowsDriveURIPath returns true if the file URI is of the format used by
+// Windows URIs. The url.Parse package does not specially handle Windows paths
+// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:").
+func isWindowsDriveURIPath(uri string) bool {
+ if len(uri) < 4 {
+ return false
+ }
+ return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
+}
+
+// Dir returns the URI for the directory containing uri. Dir panics if uri is
+// not a file uri.
+//
+// TODO(rfindley): add a unit test for various edge cases.
+func Dir(uri URI) URI {
+ return URIFromPath(filepath.Dir(uri.Filename()))
+}
diff --git a/gopls/internal/span/uri_test.go b/gopls/internal/span/uri_test.go
new file mode 100644
index 000000000..e99043785
--- /dev/null
+++ b/gopls/internal/span/uri_test.go
@@ -0,0 +1,117 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+// +build !windows
+
+package span_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// TestURI tests the conversion between URIs and filenames. The test cases
+// include Windows-style URIs and filepaths, but we avoid having OS-specific
+// tests by using only forward slashes, assuming that the standard library
+// functions filepath.ToSlash and filepath.FromSlash do not need testing.
+func TestURIFromPath(t *testing.T) {
+ for _, test := range []struct {
+ path, wantFile string
+ wantURI span.URI
+ }{
+ {
+ path: ``,
+ wantFile: ``,
+ wantURI: span.URI(""),
+ },
+ {
+ path: `C:/Windows/System32`,
+ wantFile: `C:/Windows/System32`,
+ wantURI: span.URI("file:///C:/Windows/System32"),
+ },
+ {
+ path: `C:/Go/src/bob.go`,
+ wantFile: `C:/Go/src/bob.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob.go"),
+ },
+ {
+ path: `c:/Go/src/bob.go`,
+ wantFile: `C:/Go/src/bob.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob.go"),
+ },
+ {
+ path: `/path/to/dir`,
+ wantFile: `/path/to/dir`,
+ wantURI: span.URI("file:///path/to/dir"),
+ },
+ {
+ path: `/a/b/c/src/bob.go`,
+ wantFile: `/a/b/c/src/bob.go`,
+ wantURI: span.URI("file:///a/b/c/src/bob.go"),
+ },
+ {
+ path: `c:/Go/src/bob george/george/george.go`,
+ wantFile: `C:/Go/src/bob george/george/george.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
+ },
+ } {
+ got := span.URIFromPath(test.path)
+ if got != test.wantURI {
+ t.Errorf("URIFromPath(%q): got %q, expected %q", test.path, got, test.wantURI)
+ }
+ gotFilename := got.Filename()
+ if gotFilename != test.wantFile {
+ t.Errorf("Filename(%q): got %q, expected %q", got, gotFilename, test.wantFile)
+ }
+ }
+}
+
+func TestURIFromURI(t *testing.T) {
+ for _, test := range []struct {
+ inputURI, wantFile string
+ wantURI span.URI
+ }{
+ {
+ inputURI: `file:///c:/Go/src/bob%20george/george/george.go`,
+ wantFile: `C:/Go/src/bob george/george/george.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
+ },
+ {
+ inputURI: `file:///C%3A/Go/src/bob%20george/george/george.go`,
+ wantFile: `C:/Go/src/bob george/george/george.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
+ },
+ {
+ inputURI: `file:///path/to/%25p%25ercent%25/per%25cent.go`,
+ wantFile: `/path/to/%p%ercent%/per%cent.go`,
+ wantURI: span.URI(`file:///path/to/%25p%25ercent%25/per%25cent.go`),
+ },
+ {
+ inputURI: `file:///C%3A/`,
+ wantFile: `C:/`,
+ wantURI: span.URI(`file:///C:/`),
+ },
+ {
+ inputURI: `file:///`,
+ wantFile: `/`,
+ wantURI: span.URI(`file:///`),
+ },
+ {
+ inputURI: `file://wsl%24/Ubuntu/home/wdcui/repo/VMEnclaves/cvm-runtime`,
+ wantFile: `/wsl$/Ubuntu/home/wdcui/repo/VMEnclaves/cvm-runtime`,
+ wantURI: span.URI(`file:///wsl$/Ubuntu/home/wdcui/repo/VMEnclaves/cvm-runtime`),
+ },
+ } {
+ got := span.URIFromURI(test.inputURI)
+ if got != test.wantURI {
+ t.Errorf("NewURI(%q): got %q, expected %q", test.inputURI, got, test.wantURI)
+ }
+ gotFilename := got.Filename()
+ if gotFilename != test.wantFile {
+ t.Errorf("Filename(%q): got %q, expected %q", got, gotFilename, test.wantFile)
+ }
+ }
+}
diff --git a/gopls/internal/span/uri_windows_test.go b/gopls/internal/span/uri_windows_test.go
new file mode 100644
index 000000000..3891e0d3e
--- /dev/null
+++ b/gopls/internal/span/uri_windows_test.go
@@ -0,0 +1,112 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package span_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/gopls/internal/span"
+)
+
+// TestURI tests the conversion between URIs and filenames. The test cases
+// include Windows-style URIs and filepaths, but we avoid having OS-specific
+// tests by using only forward slashes, assuming that the standard library
+// functions filepath.ToSlash and filepath.FromSlash do not need testing.
+func TestURIFromPath(t *testing.T) {
+ for _, test := range []struct {
+ path, wantFile string
+ wantURI span.URI
+ }{
+ {
+ path: ``,
+ wantFile: ``,
+ wantURI: span.URI(""),
+ },
+ {
+ path: `C:\Windows\System32`,
+ wantFile: `C:\Windows\System32`,
+ wantURI: span.URI("file:///C:/Windows/System32"),
+ },
+ {
+ path: `C:\Go\src\bob.go`,
+ wantFile: `C:\Go\src\bob.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob.go"),
+ },
+ {
+ path: `c:\Go\src\bob.go`,
+ wantFile: `C:\Go\src\bob.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob.go"),
+ },
+ {
+ path: `\path\to\dir`,
+ wantFile: `C:\path\to\dir`,
+ wantURI: span.URI("file:///C:/path/to/dir"),
+ },
+ {
+ path: `\a\b\c\src\bob.go`,
+ wantFile: `C:\a\b\c\src\bob.go`,
+ wantURI: span.URI("file:///C:/a/b/c/src/bob.go"),
+ },
+ {
+ path: `c:\Go\src\bob george\george\george.go`,
+ wantFile: `C:\Go\src\bob george\george\george.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
+ },
+ } {
+ got := span.URIFromPath(test.path)
+ if got != test.wantURI {
+ t.Errorf("URIFromPath(%q): got %q, expected %q", test.path, got, test.wantURI)
+ }
+ gotFilename := got.Filename()
+ if gotFilename != test.wantFile {
+ t.Errorf("Filename(%q): got %q, expected %q", got, gotFilename, test.wantFile)
+ }
+ }
+}
+
+func TestURIFromURI(t *testing.T) {
+ for _, test := range []struct {
+ inputURI, wantFile string
+ wantURI span.URI
+ }{
+ {
+ inputURI: `file:///c:/Go/src/bob%20george/george/george.go`,
+ wantFile: `C:\Go\src\bob george\george\george.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
+ },
+ {
+ inputURI: `file:///C%3A/Go/src/bob%20george/george/george.go`,
+ wantFile: `C:\Go\src\bob george\george\george.go`,
+ wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
+ },
+ {
+ inputURI: `file:///c:/path/to/%25p%25ercent%25/per%25cent.go`,
+ wantFile: `C:\path\to\%p%ercent%\per%cent.go`,
+ wantURI: span.URI(`file:///C:/path/to/%25p%25ercent%25/per%25cent.go`),
+ },
+ {
+ inputURI: `file:///C%3A/`,
+ wantFile: `C:\`,
+ wantURI: span.URI(`file:///C:/`),
+ },
+ {
+ inputURI: `file:///`,
+ wantFile: `\`,
+ wantURI: span.URI(`file:///`),
+ },
+ } {
+ got := span.URIFromURI(test.inputURI)
+ if got != test.wantURI {
+ t.Errorf("NewURI(%q): got %q, expected %q", test.inputURI, got, test.wantURI)
+ }
+ gotFilename := got.Filename()
+ if gotFilename != test.wantFile {
+ t.Errorf("Filename(%q): got %q, expected %q", got, gotFilename, test.wantFile)
+ }
+ }
+}
diff --git a/gopls/internal/vulncheck/cache.go b/gopls/internal/vulncheck/cache.go
deleted file mode 100644
index 524ccfa81..000000000
--- a/gopls/internal/vulncheck/cache.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package vulncheck
-
-import (
- "encoding/json"
- "go/build"
- "io/ioutil"
- "os"
- "path/filepath"
- "time"
-
- "golang.org/x/vuln/client"
- "golang.org/x/vuln/osv"
-)
-
-// copy from x/vuln/cmd/govulncheck/cache.go
-
-// NOTE: this cache implementation should be kept internal to the go tooling
-// (i.e. cmd/go/internal/something) so that the vulndb cache is owned by the
-// go command. Also it is currently NOT CONCURRENCY SAFE since it does not
-// implement file locking. If ported to the stdlib it should use
-// cmd/go/internal/lockedfile.
-
-// The cache uses a single JSON index file for each vulnerability database
-// which contains the map from packages to the time the last
-// vulnerability for that package was added/modified and the time that
-// the index was retrieved from the vulnerability database. The JSON
-// format is as follows:
-//
-// $GOPATH/pkg/mod/cache/download/vulndb/{db hostname}/indexes/index.json
-// {
-// Retrieved time.Time
-// Index client.DBIndex
-// }
-//
-// Each package also has a JSON file which contains the array of vulnerability
-// entries for the package. The JSON format is as follows:
-//
-// $GOPATH/pkg/mod/cache/download/vulndb/{db hostname}/{import path}/vulns.json
-// []*osv.Entry
-
-// fsCache is file-system cache implementing osv.Cache
-// TODO: make cache thread-safe
-type fsCache struct {
- rootDir string
-}
-
-// use cfg.GOMODCACHE available in cmd/go/internal?
-var defaultCacheRoot = filepath.Join(build.Default.GOPATH, "/pkg/mod/cache/download/vulndb")
-
-func defaultCache() *fsCache {
- return &fsCache{rootDir: defaultCacheRoot}
-}
-
-type cachedIndex struct {
- Retrieved time.Time
- Index client.DBIndex
-}
-
-func (c *fsCache) ReadIndex(dbName string) (client.DBIndex, time.Time, error) {
- b, err := ioutil.ReadFile(filepath.Join(c.rootDir, dbName, "index.json"))
- if err != nil {
- if os.IsNotExist(err) {
- return nil, time.Time{}, nil
- }
- return nil, time.Time{}, err
- }
- var index cachedIndex
- if err := json.Unmarshal(b, &index); err != nil {
- return nil, time.Time{}, err
- }
- return index.Index, index.Retrieved, nil
-}
-
-func (c *fsCache) WriteIndex(dbName string, index client.DBIndex, retrieved time.Time) error {
- path := filepath.Join(c.rootDir, dbName)
- if err := os.MkdirAll(path, 0755); err != nil {
- return err
- }
- j, err := json.Marshal(cachedIndex{
- Index: index,
- Retrieved: retrieved,
- })
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(filepath.Join(path, "index.json"), j, 0666); err != nil {
- return err
- }
- return nil
-}
-
-func (c *fsCache) ReadEntries(dbName string, p string) ([]*osv.Entry, error) {
- b, err := ioutil.ReadFile(filepath.Join(c.rootDir, dbName, p, "vulns.json"))
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
- return nil, err
- }
- var entries []*osv.Entry
- if err := json.Unmarshal(b, &entries); err != nil {
- return nil, err
- }
- return entries, nil
-}
-
-func (c *fsCache) WriteEntries(dbName string, p string, entries []*osv.Entry) error {
- path := filepath.Join(c.rootDir, dbName, p)
- if err := os.MkdirAll(path, 0777); err != nil {
- return err
- }
- j, err := json.Marshal(entries)
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(filepath.Join(path, "vulns.json"), j, 0666); err != nil {
- return err
- }
- return nil
-}
diff --git a/gopls/internal/vulncheck/command.go b/gopls/internal/vulncheck/command.go
index 32b98ae24..1f171f09d 100644
--- a/gopls/internal/vulncheck/command.go
+++ b/gopls/internal/vulncheck/command.go
@@ -9,42 +9,33 @@ package vulncheck
import (
"context"
+ "encoding/json"
+ "errors"
"fmt"
+ "log"
"os"
+ "regexp"
+ "sort"
"strings"
+ "sync"
+ "golang.org/x/mod/semver"
+ "golang.org/x/sync/errgroup"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/vuln/client"
+ gvcapi "golang.org/x/vuln/exp/govulncheck"
+ "golang.org/x/vuln/osv"
"golang.org/x/vuln/vulncheck"
)
func init() {
- Govulncheck = govulncheck
+ VulnerablePackages = vulnerablePackages
}
-func govulncheck(ctx context.Context, cfg *packages.Config, args command.VulncheckArgs) (res command.VulncheckResult, _ error) {
- if args.Pattern == "" {
- args.Pattern = "."
- }
-
- dbClient, err := client.NewClient(findGOVULNDB(cfg), client.Options{HTTPCache: defaultCache()})
- if err != nil {
- return res, err
- }
-
- c := cmd{Client: dbClient}
- vulns, err := c.Run(ctx, cfg, args.Pattern)
- if err != nil {
- return res, err
- }
-
- res.Vuln = vulns
- return res, err
-}
-
-func findGOVULNDB(cfg *packages.Config) []string {
- for _, kv := range cfg.Env {
+func findGOVULNDB(env []string) []string {
+ for _, kv := range env {
if strings.HasPrefix(kv, "GOVULNDB=") {
return strings.Split(kv[len("GOVULNDB="):], ",")
}
@@ -52,88 +43,339 @@ func findGOVULNDB(cfg *packages.Config) []string {
if GOVULNDB := os.Getenv("GOVULNDB"); GOVULNDB != "" {
return strings.Split(GOVULNDB, ",")
}
- return []string{"https://storage.googleapis.com/go-vulndb"}
+ return []string{"https://vuln.go.dev"}
}
-type Vuln = command.Vuln
-type CallStack = command.CallStack
-type StackEntry = command.StackEntry
+// GoVersionForVulnTest is an internal environment variable used in gopls
+// testing to examine govulncheck behavior with a go version different
+// than what `go version` returns in the system.
+const GoVersionForVulnTest = "_GOPLS_TEST_VULNCHECK_GOVERSION"
-// cmd is an in-process govulncheck command runner
-// that uses the provided client.Client.
-type cmd struct {
- Client client.Client
+func init() {
+ Main = func(cfg packages.Config, patterns ...string) error {
+ // Set the mode that Source needs.
+ cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedTypes |
+ packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedDeps |
+ packages.NeedModule
+ logf := log.New(os.Stderr, "", log.Ltime).Printf
+ logf("Loading packages...")
+ pkgs, err := packages.Load(&cfg, patterns...)
+ if err != nil {
+ logf("Failed to load packages: %v", err)
+ return err
+ }
+ if n := packages.PrintErrors(pkgs); n > 0 {
+ err := errors.New("failed to load packages due to errors")
+ logf("%v", err)
+ return err
+ }
+ logf("Loaded %d packages and their dependencies", len(pkgs))
+ cache, err := govulncheck.DefaultCache()
+ if err != nil {
+ return err
+ }
+ cli, err := client.NewClient(findGOVULNDB(cfg.Env), client.Options{
+ HTTPCache: cache,
+ })
+ if err != nil {
+ return err
+ }
+ res, err := gvcapi.Source(context.Background(), &gvcapi.Config{
+ Client: cli,
+ GoVersion: os.Getenv(GoVersionForVulnTest),
+ }, vulncheck.Convert(pkgs))
+ if err != nil {
+ return err
+ }
+ affecting := 0
+ for _, v := range res.Vulns {
+ if v.IsCalled() {
+ affecting++
+ }
+ }
+ logf("Found %d affecting vulns and %d unaffecting vulns in imported packages", affecting, len(res.Vulns)-affecting)
+ if err := json.NewEncoder(os.Stdout).Encode(res); err != nil {
+ return err
+ }
+ return nil
+ }
+}
+
+var (
+ // Regexp for matching go tags. The groups are:
+ // 1 the major.minor version
+ // 2 the patch version, or empty if none
+ // 3 the entire prerelease, if present
+ // 4 the prerelease type ("beta" or "rc")
+ // 5 the prerelease number
+ tagRegexp = regexp.MustCompile(`^go(\d+\.\d+)(\.\d+|)((beta|rc|-pre)(\d+))?$`)
+)
+
+// This is a modified copy of pkgsite/internal/stdlib:VersionForTag.
+func GoTagToSemver(tag string) string {
+ if tag == "" {
+ return ""
+ }
+
+ tag = strings.Fields(tag)[0]
+ // Special cases for go1.
+ if tag == "go1" {
+ return "v1.0.0"
+ }
+ if tag == "go1.0" {
+ return ""
+ }
+ m := tagRegexp.FindStringSubmatch(tag)
+ if m == nil {
+ return ""
+ }
+ version := "v" + m[1]
+ if m[2] != "" {
+ version += m[2]
+ } else {
+ version += ".0"
+ }
+ if m[3] != "" {
+ if !strings.HasPrefix(m[4], "-") {
+ version += "-"
+ }
+ version += m[4] + "." + m[5]
+ }
+ return version
}
-// Run runs the govulncheck after loading packages using the provided packages.Config.
-func (c *cmd) Run(ctx context.Context, cfg *packages.Config, patterns ...string) (_ []Vuln, err error) {
- // TODO: how&where can we ensure cfg is the right config for the given patterns?
+// semverToGoTag returns the Go standard library repository tag corresponding
+// to semver, a version string without the initial "v".
+// Go tags differ from standard semantic versions in a few ways,
+// such as beginning with "go" instead of "v".
+func semverToGoTag(v string) string {
+ if strings.HasPrefix(v, "v0.0.0") {
+ return "master"
+ }
+ // Special case: v1.0.0 => go1.
+ if v == "v1.0.0" {
+ return "go1"
+ }
+ if !semver.IsValid(v) {
+ return fmt.Sprintf("<!%s:invalid semver>", v)
+ }
+ goVersion := semver.Canonical(v)
+ prerelease := semver.Prerelease(goVersion)
+ versionWithoutPrerelease := strings.TrimSuffix(goVersion, prerelease)
+ patch := strings.TrimPrefix(versionWithoutPrerelease, semver.MajorMinor(goVersion)+".")
+ if patch == "0" {
+ versionWithoutPrerelease = strings.TrimSuffix(versionWithoutPrerelease, ".0")
+ }
+ goVersion = fmt.Sprintf("go%s", strings.TrimPrefix(versionWithoutPrerelease, "v"))
+ if prerelease != "" {
+ // Go prereleases look like "beta1" instead of "beta.1".
+ // "beta1" is bad for sorting (since beta10 comes before beta9), so
+ // require the dot form.
+ i := finalDigitsIndex(prerelease)
+ if i >= 1 {
+ if prerelease[i-1] != '.' {
+ return fmt.Sprintf("<!%s:final digits in a prerelease must follow a period>", v)
+ }
+ // Remove the dot.
+ prerelease = prerelease[:i-1] + prerelease[i:]
+ }
+ goVersion += strings.TrimPrefix(prerelease, "-")
+ }
+ return goVersion
+}
- // vulncheck.Source may panic if the packages are incomplete. (e.g. broken code or failed dependency fetch)
- defer func() {
- if r := recover(); r != nil {
- err = fmt.Errorf("cannot run vulncheck: %v", r)
+// finalDigitsIndex returns the index of the first digit in the sequence of digits ending s.
+// If s doesn't end in digits, it returns -1.
+func finalDigitsIndex(s string) int {
+ // Assume ASCII (since the semver package does anyway).
+ var i int
+ for i = len(s) - 1; i >= 0; i-- {
+ if s[i] < '0' || s[i] > '9' {
+ break
}
- }()
- return c.run(ctx, cfg, patterns)
+ }
+ if i == len(s)-1 {
+ return -1
+ }
+ return i + 1
}
-func (c *cmd) run(ctx context.Context, packagesCfg *packages.Config, patterns []string) ([]Vuln, error) {
- packagesCfg.Mode |= packages.NeedModule | packages.NeedName | packages.NeedFiles |
- packages.NeedCompiledGoFiles | packages.NeedImports | packages.NeedTypes |
- packages.NeedTypesSizes | packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedDeps
+// vulnerablePackages queries the vulndb and reports which vulnerabilities
+// apply to this snapshot. The result contains a set of packages,
+// grouped by vuln ID and by module.
+func vulnerablePackages(ctx context.Context, snapshot source.Snapshot, modfile source.FileHandle) (*govulncheck.Result, error) {
+ // We want to report the intersection of vulnerable packages in the vulndb
+ // and packages transitively imported by this module ('go list -deps all').
+ // We use snapshot.AllMetadata to retrieve the list of packages
+ // as an approximation.
+ //
+ // TODO(hyangah): snapshot.AllMetadata is a superset of
+ // `go list all` - e.g. when the workspace has multiple main modules
+ // (multiple go.mod files), that can include packages that are not
+ // used by this module. Vulncheck behavior with go.work is not well
+ // defined. Figure out the meaning, and if we decide to present
+ // the result as if each module is analyzed independently, make
+ // gopls track a separate build list for each module and use that
+ // information instead of snapshot.AllMetadata.
+ metadata, err := snapshot.AllMetadata(ctx)
+ if err != nil {
+ return nil, err
+ }
- loadedPkgs, err := packages.Load(packagesCfg, patterns...)
+ // TODO(hyangah): handle vulnerabilities in the standard library.
+
+ // Group packages by modules since vuln db is keyed by module.
+ metadataByModule := map[source.PackagePath][]*source.Metadata{}
+ for _, md := range metadata {
+ mi := md.Module
+ modulePath := source.PackagePath("stdlib")
+ if mi != nil {
+ modulePath = source.PackagePath(mi.Path)
+ }
+ metadataByModule[modulePath] = append(metadataByModule[modulePath], md)
+ }
+
+ // Request vuln entries from remote service.
+ fsCache, err := govulncheck.DefaultCache()
if err != nil {
return nil, err
}
- pkgs := vulncheck.Convert(loadedPkgs)
- res, err := vulncheck.Source(ctx, pkgs, &vulncheck.Config{
- Client: c.Client,
- ImportsOnly: false,
- })
- cs := vulncheck.CallStacks(res)
+ cli, err := client.NewClient(
+ findGOVULNDB(snapshot.View().Options().EnvSlice()),
+ client.Options{HTTPCache: govulncheck.NewInMemoryCache(fsCache)})
+ if err != nil {
+ return nil, err
+ }
+ // Keys are osv.Entry.IDs
+ vulnsResult := map[string]*govulncheck.Vuln{}
+ var (
+ group errgroup.Group
+ mu sync.Mutex
+ )
- return toVulns(loadedPkgs, cs)
+ goVersion := snapshot.View().Options().Env[GoVersionForVulnTest]
+ if goVersion == "" {
+ goVersion = snapshot.View().GoVersionString()
+ }
+ group.SetLimit(10)
+ stdlibModule := &packages.Module{
+ Path: "stdlib",
+ Version: goVersion,
+ }
+ for path, mds := range metadataByModule {
+ path, mds := path, mds
+ group.Go(func() error {
+ effectiveModule := stdlibModule
+ if m := mds[0].Module; m != nil {
+ effectiveModule = m
+ }
+ for effectiveModule.Replace != nil {
+ effectiveModule = effectiveModule.Replace
+ }
+ ver := effectiveModule.Version
- // TODO: add import graphs.
-}
+ // TODO(go.dev/issues/56312): batch these requests for efficiency.
+ vulns, err := cli.GetByModule(ctx, effectiveModule.Path)
+ if err != nil {
+ return err
+ }
+ if len(vulns) == 0 { // No known vulnerability.
+ return nil
+ }
-func packageModule(p *packages.Package) *packages.Module {
- m := p.Module
- if m == nil {
- return nil
+ // set of packages in this module known to gopls.
+ // This will be lazily initialized when we need it.
+ var knownPkgs map[source.PackagePath]bool
+
+ // Report vulnerabilities that affect packages of this module.
+ for _, entry := range vulns {
+ var vulnerablePkgs []*govulncheck.Package
+
+ for _, a := range entry.Affected {
+ if a.Package.Ecosystem != osv.GoEcosystem || a.Package.Name != effectiveModule.Path {
+ continue
+ }
+ if !a.Ranges.AffectsSemver(ver) {
+ continue
+ }
+ for _, imp := range a.EcosystemSpecific.Imports {
+ if knownPkgs == nil {
+ knownPkgs = toPackagePathSet(mds)
+ }
+ if knownPkgs[source.PackagePath(imp.Path)] {
+ vulnerablePkgs = append(vulnerablePkgs, &govulncheck.Package{
+ Path: imp.Path,
+ })
+ }
+ }
+ }
+ if len(vulnerablePkgs) == 0 {
+ continue
+ }
+ mu.Lock()
+ vuln, ok := vulnsResult[entry.ID]
+ if !ok {
+ vuln = &govulncheck.Vuln{OSV: entry}
+ vulnsResult[entry.ID] = vuln
+ }
+ vuln.Modules = append(vuln.Modules, &govulncheck.Module{
+ Path: string(path),
+ FoundVersion: ver,
+ FixedVersion: fixedVersion(effectiveModule.Path, entry.Affected),
+ Packages: vulnerablePkgs,
+ })
+ mu.Unlock()
+ }
+ return nil
+ })
}
- if r := m.Replace; r != nil {
- return r
+ if err := group.Wait(); err != nil {
+ return nil, err
}
- return m
-}
-func toVulns(pkgs []*packages.Package, callstacks map[*vulncheck.Vuln][]vulncheck.CallStack) ([]Vuln, error) {
- // Build a map from module paths to versions.
- moduleVersions := map[string]string{}
- packages.Visit(pkgs, nil, func(p *packages.Package) {
- if m := packageModule(p); m != nil {
- moduleVersions[m.Path] = m.Version
- }
+ vulns := make([]*govulncheck.Vuln, 0, len(vulnsResult))
+ for _, v := range vulnsResult {
+ vulns = append(vulns, v)
+ }
+ // Sort so the results are deterministic.
+ sort.Slice(vulns, func(i, j int) bool {
+ return vulns[i].OSV.ID < vulns[j].OSV.ID
})
+ ret := &govulncheck.Result{
+ Vulns: vulns,
+ Mode: govulncheck.ModeImports,
+ }
+ return ret, nil
+}
- var vulns []Vuln
- for v, trace := range callstacks {
- vuln := Vuln{
- ID: v.OSV.ID,
- Details: v.OSV.Details,
- Aliases: v.OSV.Aliases,
- Symbol: v.Symbol,
- PkgPath: v.PkgPath,
- ModPath: v.ModPath,
- URL: href(v.OSV),
- CurrentVersion: moduleVersions[v.ModPath],
- FixedVersion: fixedVersion(v.OSV),
- CallStacks: toCallStacks(trace),
- }
- vulns = append(vulns, vuln)
+// toPackagePathSet transforms the metadata to a set of package paths.
+func toPackagePathSet(mds []*source.Metadata) map[source.PackagePath]bool {
+ pkgPaths := make(map[source.PackagePath]bool, len(mds))
+ for _, md := range mds {
+ pkgPaths[md.PkgPath] = true
+ }
+ return pkgPaths
+}
+
+func fixedVersion(modulePath string, affected []osv.Affected) string {
+ fixed := govulncheck.LatestFixed(modulePath, affected)
+ if fixed != "" {
+ fixed = versionString(modulePath, fixed)
+ }
+ return fixed
+}
+
+// versionString prepends a version string prefix (`v` or `go`
+// depending on the modulePath) to the given semver-style version string.
+func versionString(modulePath, version string) string {
+ if version == "" {
+ return ""
+ }
+ v := "v" + version
+ // These are internal Go module paths used by the vuln DB
+ // when listing vulns in standard library and the go command.
+ if modulePath == "stdlib" || modulePath == "toolchain" {
+ return semverToGoTag(v)
}
- return vulns, nil
+ return v
}
diff --git a/gopls/internal/vulncheck/command_test.go b/gopls/internal/vulncheck/command_test.go
deleted file mode 100644
index 93fd9b9ee..000000000
--- a/gopls/internal/vulncheck/command_test.go
+++ /dev/null
@@ -1,378 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package vulncheck
-
-import (
- "bytes"
- "context"
- "fmt"
- "os"
- "path/filepath"
- "sort"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/vuln/client"
- "golang.org/x/vuln/osv"
- "golang.org/x/vuln/vulncheck"
-)
-
-func TestCmd_Run(t *testing.T) {
- runTest(t, workspace1, proxy1, func(ctx context.Context, snapshot source.Snapshot) {
- cmd := &cmd{Client: testClient1}
- cfg := packagesCfg(ctx, snapshot)
- result, err := cmd.Run(ctx, cfg, "./...")
- if err != nil {
- t.Fatal(err)
- }
- // Check that we find the right number of vulnerabilities.
- // There should be three entries as there are three vulnerable
- // symbols in the two import-reachable OSVs.
- var got []report
- for _, v := range result {
- got = append(got, toReport(v))
- }
-
- var want = []report{
- {
- Vuln: Vuln{
- ID: "GO-2022-01",
- Symbol: "VulnData.Vuln1",
- PkgPath: "golang.org/amod/avuln",
- ModPath: "golang.org/amod",
- URL: "https://pkg.go.dev/vuln/GO-2022-01",
- CurrentVersion: "v1.1.3",
- FixedVersion: "v1.0.4",
- },
- CallStacksStr: []string{
- "golang.org/cmod/c.I.t0 called from golang.org/entry/x.X [approx.] (x.go:8)\n" +
- "golang.org/amod/avuln.VulnData.Vuln1 (avuln.go:3)\n",
- },
- },
- {
- Vuln: Vuln{
- ID: "GO-2022-01",
- Symbol: "VulnData.Vuln2",
- PkgPath: "golang.org/amod/avuln",
- ModPath: "golang.org/amod",
- URL: "https://pkg.go.dev/vuln/GO-2022-01",
- CurrentVersion: "v1.1.3",
- FixedVersion: "v1.0.4",
- },
- CallStacksStr: []string{
- "C1 called from golang.org/entry/x.X (x.go:8)\n" +
- "Vuln2 called from golang.org/cmod/c.C1 (c.go:13)\n" +
- "golang.org/amod/avuln.VulnData.Vuln2 (avuln.go:4)\n",
- },
- },
- {
- Vuln: Vuln{
- ID: "GO-2022-02",
- Symbol: "Vuln",
- PkgPath: "golang.org/bmod/bvuln",
- ModPath: "golang.org/bmod",
- URL: "https://pkg.go.dev/vuln/GO-2022-02",
- CurrentVersion: "v0.5.0",
- },
- CallStacksStr: []string{
- "t0 called from golang.org/entry/y.Y [approx.] (y.go:5)\n" +
- "golang.org/bmod/bvuln.Vuln (bvuln.go:2)\n",
- "Y called from golang.org/entry/x.CallY (x.go:12)\n" +
- "t0 called from golang.org/entry/y.Y [approx.] (y.go:5)\n" +
- "golang.org/bmod/bvuln.Vuln (bvuln.go:2)\n",
- },
- },
- }
- // sort reports for stability before comparison.
- for _, rpts := range [][]report{got, want} {
- sort.Slice(rpts, func(i, j int) bool {
- a, b := got[i], got[j]
- if b.ID != b.ID {
- return a.ID < b.ID
- }
- if a.PkgPath != b.PkgPath {
- return a.PkgPath < b.PkgPath
- }
- return a.Symbol < b.Symbol
- })
- }
- if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(report{}, "Vuln.CallStacks")); diff != "" {
- t.Error(diff)
- }
-
- })
-}
-
-type report struct {
- Vuln
- // Trace is stringified Vuln.CallStacks
- CallStacksStr []string
-}
-
-func toReport(v Vuln) report {
- var r = report{Vuln: v}
- for _, s := range v.CallStacks {
- r.CallStacksStr = append(r.CallStacksStr, CallStackString(s))
- }
- return r
-}
-
-func CallStackString(callstack CallStack) string {
- var b bytes.Buffer
- for _, entry := range callstack {
- fname := filepath.Base(entry.URI.SpanURI().Filename())
- fmt.Fprintf(&b, "%v (%v:%d)\n", entry.Name, fname, entry.Pos.Line)
- }
- return b.String()
-}
-
-const workspace1 = `
--- go.mod --
-module golang.org/entry
-
-require (
- golang.org/cmod v1.1.3
-)
-go 1.18
--- x/x.go --
-package x
-
-import (
- "golang.org/cmod/c"
- "golang.org/entry/y"
-)
-
-func X() {
- c.C1().Vuln1() // vuln use: X -> Vuln1
-}
-
-func CallY() {
- y.Y() // vuln use: CallY -> y.Y -> bvuln.Vuln
-}
-
--- y/y.go --
-package y
-
-import "golang.org/cmod/c"
-
-func Y() {
- c.C2()() // vuln use: Y -> bvuln.Vuln
-}
-`
-
-const proxy1 = `
--- golang.org/cmod@v1.1.3/go.mod --
-module golang.org/cmod
-
-go 1.12
--- golang.org/cmod@v1.1.3/c/c.go --
-package c
-
-import (
- "golang.org/amod/avuln"
- "golang.org/bmod/bvuln"
-)
-
-type I interface {
- Vuln1()
-}
-
-func C1() I {
- v := avuln.VulnData{}
- v.Vuln2() // vuln use
- return v
-}
-
-func C2() func() {
- return bvuln.Vuln
-}
--- golang.org/amod@v1.1.3/go.mod --
-module golang.org/amod
-
-go 1.14
--- golang.org/amod@v1.1.3/avuln/avuln.go --
-package avuln
-
-type VulnData struct {}
-func (v VulnData) Vuln1() {}
-func (v VulnData) Vuln2() {}
--- golang.org/bmod@v0.5.0/go.mod --
-module golang.org/bmod
-
-go 1.14
--- golang.org/bmod@v0.5.0/bvuln/bvuln.go --
-package bvuln
-
-func Vuln() {
- // something evil
-}
-`
-
-// testClient contains the following test vulnerabilities
-// golang.org/amod/avuln.{VulnData.Vuln1, vulnData.Vuln2}
-// golang.org/bmod/bvuln.{Vuln}
-var testClient1 = &mockClient{
- ret: map[string][]*osv.Entry{
- "golang.org/amod": {
- {
- ID: "GO-2022-01",
- References: []osv.Reference{
- {
- Type: "href",
- URL: "pkg.go.dev/vuln/GO-2022-01",
- },
- },
- Affected: []osv.Affected{{
- Package: osv.Package{Name: "golang.org/amod/avuln"},
- Ranges: osv.Affects{{Type: osv.TypeSemver, Events: []osv.RangeEvent{{Introduced: "1.0.0"}, {Fixed: "1.0.4"}, {Introduced: "1.1.2"}}}},
- EcosystemSpecific: osv.EcosystemSpecific{Symbols: []string{"VulnData.Vuln1", "VulnData.Vuln2"}},
- }},
- },
- },
- "golang.org/bmod": {
- {
- ID: "GO-2022-02",
- Affected: []osv.Affected{{
- Package: osv.Package{Name: "golang.org/bmod/bvuln"},
- Ranges: osv.Affects{{Type: osv.TypeSemver}},
- EcosystemSpecific: osv.EcosystemSpecific{Symbols: []string{"Vuln"}},
- }},
- },
- },
- },
-}
-
-var goldenReport1 = []string{`
-{
- ID: "GO-2022-01",
- Symbol: "VulnData.Vuln1",
- PkgPath: "golang.org/amod/avuln",
- ModPath: "golang.org/amod",
- URL: "https://pkg.go.dev/vuln/GO-2022-01",
- CurrentVersion "v1.1.3",
- FixedVersion "v1.0.4",
- "call_stacks": [
- "golang.org/cmod/c.I.t0 called from golang.org/entry/x.X [approx.] (x.go:8)\ngolang.org/amod/avuln.VulnData.Vuln1 (avuln.go:3)\n\n"
- ]
-}
-`,
- `
-{
- "id": "GO-2022-02",
- "symbol": "Vuln",
- "pkg_path": "golang.org/bmod/bvuln",
- "mod_path": "golang.org/bmod",
- "url": "https://pkg.go.dev/vuln/GO-2022-02",
- "current_version": "v0.5.0",
- "call_stacks": [
- "t0 called from golang.org/entry/y.Y [approx.] (y.go:5)\ngolang.org/bmod/bvuln.Vuln (bvuln.go:2)\n\n",
- "Y called from golang.org/entry/x.CallY (x.go:12)\nt0 called from golang.org/entry/y.Y [approx.] (y.go:5)\ngolang.org/bmod/bvuln.Vuln (bvuln.go:2)\n\n"
- ]
-}
-`,
- `
-{
- "id": "GO-2022-01",
- "symbol": "VulnData.Vuln2",
- "pkg_path": "golang.org/amod/avuln",
- "mod_path": "golang.org/amod",
- "url": "https://pkg.go.dev/vuln/GO-2022-01",
- "current_version": "v1.1.3",
- FixedVersion: "v1.0.4",
- "call_stacks": [
- "C1 called from golang.org/entry/x.X (x.go:8)\nVuln2 called from golang.org/cmod/c.C1 (c.go:13)\ngolang.org/amod/avuln.VulnData.Vuln2 (avuln.go:4)\n\n"
- ]
-}
-`,
-}
-
-type mockClient struct {
- client.Client
- ret map[string][]*osv.Entry
-}
-
-func (mc *mockClient) GetByModule(ctx context.Context, a string) ([]*osv.Entry, error) {
- return mc.ret[a], nil
-}
-
-func runTest(t *testing.T, workspaceData, proxyData string, test func(context.Context, source.Snapshot)) {
- ws, err := fake.NewSandbox(&fake.SandboxConfig{
- Files: fake.UnpackTxt(workspaceData),
- ProxyFiles: fake.UnpackTxt(proxyData),
- })
- if err != nil {
- t.Fatal(err)
- }
- defer ws.Close()
-
- ctx := tests.Context(t)
-
- // get the module cache populated and the go.sum file at the root auto-generated.
- dir := ws.Workdir.RootURI().SpanURI().Filename()
- if err := ws.RunGoCommand(ctx, dir, "list", []string{"-mod=mod", "..."}, true); err != nil {
- t.Fatal(err)
- }
-
- cache := cache.New(nil)
- session := cache.NewSession(ctx)
- options := source.DefaultOptions().Clone()
- tests.DefaultOptions(options)
- session.SetOptions(options)
- envs := []string{}
- for k, v := range ws.GoEnv() {
- envs = append(envs, k+"="+v)
- }
- options.SetEnvSlice(envs)
- name := ws.RootDir()
- folder := ws.Workdir.RootURI().SpanURI()
- view, snapshot, release, err := session.NewView(ctx, name, folder, options)
- if err != nil {
- t.Fatal(err)
- }
- defer release()
- defer view.Shutdown(ctx)
-
- test(ctx, snapshot)
-}
-
-func sortStrs(s []string) []string {
- sort.Strings(s)
- return s
-}
-
-func pkgPaths(pkgs []*vulncheck.Package) []string {
- var r []string
- for _, p := range pkgs {
- r = append(r, p.PkgPath)
- }
- return sortStrs(r)
-}
-
-// TODO: expose this as a method of Snapshot.
-func packagesCfg(ctx context.Context, snapshot source.Snapshot) *packages.Config {
- view := snapshot.View()
- viewBuildFlags := view.Options().BuildFlags
- var viewEnv []string
- if e := view.Options().EnvSlice(); e != nil {
- viewEnv = append(os.Environ(), e...)
- }
- return &packages.Config{
- // Mode will be set by cmd.Run.
- Context: ctx,
- Tests: true,
- BuildFlags: viewBuildFlags,
- Env: viewEnv,
- Dir: view.Folder().Filename(),
- }
-}
diff --git a/gopls/internal/vulncheck/util.go b/gopls/internal/vulncheck/util.go
deleted file mode 100644
index a85b55bb0..000000000
--- a/gopls/internal/vulncheck/util.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package vulncheck
-
-import (
- "fmt"
- "go/token"
- "strings"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/vuln/osv"
- "golang.org/x/vuln/vulncheck"
-)
-
-// fixedVersion returns the semantic version of the module
-// version with a fix. The semantic version is
-// as defined by SemVer 2.0.0, with no leading “v” prefix.
-// Returns an empty string if there is no reported fix.
-func fixedVersion(info *osv.Entry) string {
- var fixed string
- for _, a := range info.Affected {
- for _, r := range a.Ranges {
- if r.Type != "SEMVER" {
- continue
- }
- for _, e := range r.Events {
- if e.Fixed != "" {
- // assuming the later entry has higher semver.
- // TODO: check assumption.
- fixed = "v" + e.Fixed
- }
- }
- }
- }
- return fixed
-}
-
-const maxNumCallStacks = 64
-
-func toCallStacks(src []vulncheck.CallStack) []CallStack {
- if len(src) > maxNumCallStacks {
- src = src[:maxNumCallStacks]
- }
- var dest []CallStack
- for _, s := range src {
- dest = append(dest, toCallStack(s))
- }
- return dest
-}
-
-func toCallStack(src vulncheck.CallStack) CallStack {
- var dest []StackEntry
- for _, e := range src {
- dest = append(dest, toStackEntry(e))
- }
- return dest
-}
-
-func toStackEntry(src vulncheck.StackEntry) StackEntry {
- f, call := src.Function, src.Call
- pos := f.Pos
- desc := funcName(f)
- if src.Call != nil {
- pos = src.Call.Pos
- desc = funcNameInCallSite(call) + " called from " + desc
- if !call.Resolved {
- // In case of a statically unresolved call site, communicate to the client
- // that this was approximately resolved to f
-
- desc += " [approx.]"
- }
- }
- return StackEntry{
- Name: desc,
- URI: filenameToURI(pos),
- Pos: posToPosition(pos),
- }
-}
-
-func funcName(fn *vulncheck.FuncNode) string {
- return strings.TrimPrefix(fn.String(), "*")
-}
-
-func funcNameInCallSite(call *vulncheck.CallSite) string {
- if call.RecvType == "" {
- return call.Name
- }
- return fmt.Sprintf("%s.%s", call.RecvType, call.Name)
-}
-
-// href returns a URL embedded in the entry if any.
-// If no suitable URL is found, it returns a default entry in
-// pkg.go.dev/vuln.
-func href(vuln *osv.Entry) string {
- for _, affected := range vuln.Affected {
- if url := affected.DatabaseSpecific.URL; url != "" {
- return url
- }
- }
- for _, r := range vuln.References {
- if r.Type == "WEB" {
- return r.URL
- }
- }
- return fmt.Sprintf("https://pkg.go.dev/vuln/%s", vuln.ID)
-}
-
-func filenameToURI(pos *token.Position) protocol.DocumentURI {
- if pos == nil || pos.Filename == "" {
- return ""
- }
- return protocol.URIFromPath(pos.Filename)
-}
-
-func posToPosition(pos *token.Position) (p protocol.Position) {
- // token.Position.Line starts from 1, and
- // LSP protocol's position line is 0-based.
- if pos != nil {
- p.Line = uint32(pos.Line - 1)
- // TODO(hyangah): LSP uses UTF16 column.
- // We need utility like span.ToUTF16Column,
- // but somthing that does not require file contents.
- }
- return p
-}
diff --git a/gopls/internal/vulncheck/vulncheck.go b/gopls/internal/vulncheck/vulncheck.go
index 2c4d0d297..3c361bd01 100644
--- a/gopls/internal/vulncheck/vulncheck.go
+++ b/gopls/internal/vulncheck/vulncheck.go
@@ -10,14 +10,16 @@ package vulncheck
import (
"context"
- "errors"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/command"
+ "golang.org/x/tools/gopls/internal/govulncheck"
+ "golang.org/x/tools/gopls/internal/lsp/source"
)
-// Govulncheck runs the in-process govulncheck implementation.
// With go1.18+, this is swapped with the real implementation.
-var Govulncheck = func(ctx context.Context, cfg *packages.Config, args command.VulncheckArgs) (res command.VulncheckResult, _ error) {
- return res, errors.New("not implemented")
-}
+var Main func(cfg packages.Config, patterns ...string) error = nil
+
+// VulnerablePackages queries the vulndb and reports which vulnerabilities
+// apply to this snapshot. The result contains a set of packages,
+// grouped by vuln ID and by module.
+var VulnerablePackages func(ctx context.Context, snapshot source.Snapshot, modfile source.FileHandle) (*govulncheck.Result, error) = nil
diff --git a/gopls/internal/vulncheck/vulntest/db.go b/gopls/internal/vulncheck/vulntest/db.go
new file mode 100644
index 000000000..511a47e1b
--- /dev/null
+++ b/gopls/internal/vulncheck/vulntest/db.go
@@ -0,0 +1,303 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+// Package vulntest provides helpers for vulncheck functionality testing.
+package vulntest
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/txtar"
+ "golang.org/x/vuln/client"
+ "golang.org/x/vuln/osv"
+)
+
+// NewDatabase returns a read-only DB containing the provided
+// txtar-format collection of vulnerability reports.
+// Each vulnerability report is a YAML file whose format
+// is defined in golang.org/x/vulndb/doc/format.md.
+// A report file name must have the id as its base name,
+// and have .yaml as its extension.
+//
+// db, err := NewDatabase(ctx, reports)
+// ...
+// defer db.Clean()
+// client, err := NewClient(db)
+// ...
+//
+// The returned DB's Clean method must be called to clean up the
+// generated database.
+func NewDatabase(ctx context.Context, txtarReports []byte) (*DB, error) {
+ disk, err := ioutil.TempDir("", "vulndb-test")
+ if err != nil {
+ return nil, err
+ }
+ if err := generateDB(ctx, txtarReports, disk, false); err != nil {
+ os.RemoveAll(disk)
+ return nil, err
+ }
+
+ return &DB{disk: disk}, nil
+}
+
+// DB is a read-only vulnerability database on disk.
+// Users can use this database with golang.org/x/vuln APIs
+// by setting the `VULNDB“ environment variable.
+type DB struct {
+ disk string
+}
+
+// URI returns the file URI that can be used for VULNDB environment
+// variable.
+func (db *DB) URI() string {
+ u := span.URIFromPath(db.disk)
+ return string(u)
+}
+
+// Clean deletes the database.
+func (db *DB) Clean() error {
+ return os.RemoveAll(db.disk)
+}
+
+// NewClient returns a vuln DB client that works with the given DB.
+func NewClient(db *DB) (client.Client, error) {
+ return client.NewClient([]string{db.URI()}, client.Options{})
+}
+
+//
+// The following was selectively copied from golang.org/x/vulndb/internal/database
+//
+
+const (
+ dbURL = "https://pkg.go.dev/vuln/"
+
+ // idDirectory is the name of the directory that contains entries
+ // listed by their IDs.
+ idDirectory = "ID"
+
+ // stdFileName is the name of the .json file in the vulndb repo
+ // that will contain info on standard library vulnerabilities.
+ stdFileName = "stdlib"
+
+ // toolchainFileName is the name of the .json file in the vulndb repo
+ // that will contain info on toolchain (cmd/...) vulnerabilities.
+ toolchainFileName = "toolchain"
+
+ // cmdModule is the name of the module containing Go toolchain
+ // binaries.
+ cmdModule = "cmd"
+
+ // stdModule is the name of the module containing Go std packages.
+ stdModule = "std"
+)
+
+// generateDB generates the file-based vuln DB in the directory jsonDir.
+func generateDB(ctx context.Context, txtarData []byte, jsonDir string, indent bool) error {
+ archive := txtar.Parse(txtarData)
+
+ jsonVulns, entries, err := generateEntries(ctx, archive)
+ if err != nil {
+ return err
+ }
+
+ index := make(client.DBIndex, len(jsonVulns))
+ for modulePath, vulns := range jsonVulns {
+ epath, err := client.EscapeModulePath(modulePath)
+ if err != nil {
+ return err
+ }
+ if err := writeVulns(filepath.Join(jsonDir, epath), vulns, indent); err != nil {
+ return err
+ }
+ for _, v := range vulns {
+ if v.Modified.After(index[modulePath]) {
+ index[modulePath] = v.Modified
+ }
+ }
+ }
+ if err := writeJSON(filepath.Join(jsonDir, "index.json"), index, indent); err != nil {
+ return err
+ }
+ if err := writeAliasIndex(jsonDir, entries, indent); err != nil {
+ return err
+ }
+ return writeEntriesByID(filepath.Join(jsonDir, idDirectory), entries, indent)
+}
+
+func generateEntries(_ context.Context, archive *txtar.Archive) (map[string][]osv.Entry, []osv.Entry, error) {
+ now := time.Now()
+ jsonVulns := map[string][]osv.Entry{}
+ var entries []osv.Entry
+ for _, f := range archive.Files {
+ if !strings.HasSuffix(f.Name, ".yaml") {
+ continue
+ }
+ r, err := readReport(bytes.NewReader(f.Data))
+ if err != nil {
+ return nil, nil, err
+ }
+ name := strings.TrimSuffix(filepath.Base(f.Name), filepath.Ext(f.Name))
+ linkName := fmt.Sprintf("%s%s", dbURL, name)
+ entry, modulePaths := generateOSVEntry(name, linkName, now, *r)
+ for _, modulePath := range modulePaths {
+ jsonVulns[modulePath] = append(jsonVulns[modulePath], entry)
+ }
+ entries = append(entries, entry)
+ }
+ return jsonVulns, entries, nil
+}
+
+func writeVulns(outPath string, vulns []osv.Entry, indent bool) error {
+ if err := os.MkdirAll(filepath.Dir(outPath), 0755); err != nil {
+ return fmt.Errorf("failed to create directory %q: %s", filepath.Dir(outPath), err)
+ }
+ return writeJSON(outPath+".json", vulns, indent)
+}
+
+func writeEntriesByID(idDir string, entries []osv.Entry, indent bool) error {
+ // Write a directory containing entries by ID.
+ if err := os.MkdirAll(idDir, 0755); err != nil {
+ return fmt.Errorf("failed to create directory %q: %v", idDir, err)
+ }
+ var idIndex []string
+ for _, e := range entries {
+ outPath := filepath.Join(idDir, e.ID+".json")
+ if err := writeJSON(outPath, e, indent); err != nil {
+ return err
+ }
+ idIndex = append(idIndex, e.ID)
+ }
+ // Write an index.json in the ID directory with a list of all the IDs.
+ return writeJSON(filepath.Join(idDir, "index.json"), idIndex, indent)
+}
+
+// Write a JSON file containing a map from alias to GO IDs.
+func writeAliasIndex(dir string, entries []osv.Entry, indent bool) error {
+ aliasToGoIDs := map[string][]string{}
+ for _, e := range entries {
+ for _, a := range e.Aliases {
+ aliasToGoIDs[a] = append(aliasToGoIDs[a], e.ID)
+ }
+ }
+ return writeJSON(filepath.Join(dir, "aliases.json"), aliasToGoIDs, indent)
+}
+
+func writeJSON(filename string, value any, indent bool) (err error) {
+ j, err := jsonMarshal(value, indent)
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(filename, j, 0644)
+}
+
+func jsonMarshal(v any, indent bool) ([]byte, error) {
+ if indent {
+ return json.MarshalIndent(v, "", " ")
+ }
+ return json.Marshal(v)
+}
+
+// generateOSVEntry create an osv.Entry for a report. In addition to the report, it
+// takes the ID for the vuln and a URL that will point to the entry in the vuln DB.
+// It returns the osv.Entry and a list of module paths that the vuln affects.
+func generateOSVEntry(id, url string, lastModified time.Time, r Report) (osv.Entry, []string) {
+ entry := osv.Entry{
+ ID: id,
+ Published: r.Published,
+ Modified: lastModified,
+ Withdrawn: r.Withdrawn,
+ Details: r.Description,
+ }
+
+ moduleMap := make(map[string]bool)
+ for _, m := range r.Modules {
+ switch m.Module {
+ case stdModule:
+ moduleMap[stdFileName] = true
+ case cmdModule:
+ moduleMap[toolchainFileName] = true
+ default:
+ moduleMap[m.Module] = true
+ }
+ entry.Affected = append(entry.Affected, generateAffected(m, url))
+ }
+ for _, ref := range r.References {
+ entry.References = append(entry.References, osv.Reference{
+ Type: string(ref.Type),
+ URL: ref.URL,
+ })
+ }
+
+ var modulePaths []string
+ for module := range moduleMap {
+ modulePaths = append(modulePaths, module)
+ }
+ // TODO: handle missing fields - Aliases
+
+ return entry, modulePaths
+}
+
+func generateAffectedRanges(versions []VersionRange) osv.Affects {
+ a := osv.AffectsRange{Type: osv.TypeSemver}
+ if len(versions) == 0 || versions[0].Introduced == "" {
+ a.Events = append(a.Events, osv.RangeEvent{Introduced: "0"})
+ }
+ for _, v := range versions {
+ if v.Introduced != "" {
+ a.Events = append(a.Events, osv.RangeEvent{Introduced: v.Introduced.Canonical()})
+ }
+ if v.Fixed != "" {
+ a.Events = append(a.Events, osv.RangeEvent{Fixed: v.Fixed.Canonical()})
+ }
+ }
+ return osv.Affects{a}
+}
+
+func generateImports(m *Module) (imps []osv.EcosystemSpecificImport) {
+ for _, p := range m.Packages {
+ syms := append([]string{}, p.Symbols...)
+ syms = append(syms, p.DerivedSymbols...)
+ sort.Strings(syms)
+ imps = append(imps, osv.EcosystemSpecificImport{
+ Path: p.Package,
+ GOOS: p.GOOS,
+ GOARCH: p.GOARCH,
+ Symbols: syms,
+ })
+ }
+ return imps
+}
+func generateAffected(m *Module, url string) osv.Affected {
+ name := m.Module
+ switch name {
+ case stdModule:
+ name = "stdlib"
+ case cmdModule:
+ name = "toolchain"
+ }
+ return osv.Affected{
+ Package: osv.Package{
+ Name: name,
+ Ecosystem: osv.GoEcosystem,
+ },
+ Ranges: generateAffectedRanges(m.Versions),
+ DatabaseSpecific: osv.DatabaseSpecific{URL: url},
+ EcosystemSpecific: osv.EcosystemSpecific{
+ Imports: generateImports(m),
+ },
+ }
+}
diff --git a/gopls/internal/vulncheck/vulntest/db_test.go b/gopls/internal/vulncheck/vulntest/db_test.go
new file mode 100644
index 000000000..7d939421c
--- /dev/null
+++ b/gopls/internal/vulncheck/vulntest/db_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package vulntest
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestNewDatabase(t *testing.T) {
+ ctx := context.Background()
+ in := []byte(`
+-- GO-2020-0001.yaml --
+modules:
+ - module: github.com/gin-gonic/gin
+ versions:
+ - fixed: 1.6.0
+ packages:
+ - package: github.com/gin-gonic/gin
+ symbols:
+ - defaultLogFormatter
+description: |
+ Something.
+published: 2021-04-14T20:04:52Z
+references:
+ - fix: https://github.com/gin-gonic/gin/pull/2237
+`)
+
+ db, err := NewDatabase(ctx, in)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Clean()
+
+ cli, err := NewClient(db)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := cli.GetByID(ctx, "GO-2020-0001")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.ID != "GO-2020-0001" {
+ m, _ := json.Marshal(got)
+ t.Errorf("got %s\nwant GO-2020-0001 entry", m)
+ }
+ gotAll, err := cli.GetByModule(ctx, "github.com/gin-gonic/gin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(gotAll) != 1 || gotAll[0].ID != "GO-2020-0001" {
+ m, _ := json.Marshal(got)
+ t.Errorf("got %s\nwant GO-2020-0001 entry", m)
+ }
+}
diff --git a/gopls/internal/vulncheck/vulntest/report.go b/gopls/internal/vulncheck/vulntest/report.go
new file mode 100644
index 000000000..e5595e8ba
--- /dev/null
+++ b/gopls/internal/vulncheck/vulntest/report.go
@@ -0,0 +1,176 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package vulntest
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/mod/semver"
+ "gopkg.in/yaml.v3"
+)
+
+//
+// The following was selectively copied from golang.org/x/vulndb/internal/report
+//
+
+// readReport reads a Report in YAML format.
+func readReport(in io.Reader) (*Report, error) {
+ d := yaml.NewDecoder(in)
+ // Require that all fields in the file are in the struct.
+ // This corresponds to v2's UnmarshalStrict.
+ d.KnownFields(true)
+ var r Report
+ if err := d.Decode(&r); err != nil {
+ return nil, fmt.Errorf("yaml.Decode: %v", err)
+ }
+ return &r, nil
+}
+
+// Report represents a vulnerability report in the vulndb.
+// Remember to update doc/format.md when this structure changes.
+type Report struct {
+ Modules []*Module `yaml:",omitempty"`
+
+ // Description is the CVE description from an existing CVE. If we are
+ // assigning a CVE ID ourselves, use CVEMetadata.Description instead.
+ Description string `yaml:",omitempty"`
+ Published time.Time `yaml:",omitempty"`
+ Withdrawn *time.Time `yaml:",omitempty"`
+
+ References []*Reference `yaml:",omitempty"`
+}
+
+// Write writes r to filename in YAML format.
+func (r *Report) Write(filename string) (err error) {
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ err = r.encode(f)
+ err2 := f.Close()
+ if err == nil {
+ err = err2
+ }
+ return err
+}
+
+// ToString encodes r to a YAML string.
+func (r *Report) ToString() (string, error) {
+ var b strings.Builder
+ if err := r.encode(&b); err != nil {
+ return "", err
+ }
+ return b.String(), nil
+}
+
+func (r *Report) encode(w io.Writer) error {
+ e := yaml.NewEncoder(w)
+ defer e.Close()
+ e.SetIndent(4)
+ return e.Encode(r)
+}
+
+type VersionRange struct {
+ Introduced Version `yaml:"introduced,omitempty"`
+ Fixed Version `yaml:"fixed,omitempty"`
+}
+
+type Module struct {
+ Module string `yaml:",omitempty"`
+ Versions []VersionRange `yaml:",omitempty"`
+ Packages []*Package `yaml:",omitempty"`
+}
+
+type Package struct {
+ Package string `yaml:",omitempty"`
+ GOOS []string `yaml:"goos,omitempty"`
+ GOARCH []string `yaml:"goarch,omitempty"`
+ // Symbols originally identified as vulnerable.
+ Symbols []string `yaml:",omitempty"`
+ // Additional vulnerable symbols, computed from Symbols via static analysis
+ // or other technique.
+ DerivedSymbols []string `yaml:"derived_symbols,omitempty"`
+}
+
+// Version is an SemVer 2.0.0 semantic version with no leading "v" prefix,
+// as used by OSV.
+type Version string
+
+// V returns the version with a "v" prefix.
+func (v Version) V() string {
+ return "v" + string(v)
+}
+
+// IsValid reports whether v is a valid semantic version string.
+func (v Version) IsValid() bool {
+ return semver.IsValid(v.V())
+}
+
+// Before reports whether v < v2.
+func (v Version) Before(v2 Version) bool {
+ return semver.Compare(v.V(), v2.V()) < 0
+}
+
+// Canonical returns the canonical formatting of the version.
+func (v Version) Canonical() string {
+ return strings.TrimPrefix(semver.Canonical(v.V()), "v")
+}
+
+// Reference type is a reference (link) type.
+type ReferenceType string
+
+const (
+ ReferenceTypeAdvisory = ReferenceType("ADVISORY")
+ ReferenceTypeArticle = ReferenceType("ARTICLE")
+ ReferenceTypeReport = ReferenceType("REPORT")
+ ReferenceTypeFix = ReferenceType("FIX")
+ ReferenceTypePackage = ReferenceType("PACKAGE")
+ ReferenceTypeEvidence = ReferenceType("EVIDENCE")
+ ReferenceTypeWeb = ReferenceType("WEB")
+)
+
+// ReferenceTypes is the set of reference types defined in OSV.
+var ReferenceTypes = []ReferenceType{
+ ReferenceTypeAdvisory,
+ ReferenceTypeArticle,
+ ReferenceTypeReport,
+ ReferenceTypeFix,
+ ReferenceTypePackage,
+ ReferenceTypeEvidence,
+ ReferenceTypeWeb,
+}
+
+// A Reference is a link to some external resource.
+//
+// For ease of typing, References are represented in the YAML as a
+// single-element mapping of type to URL.
+type Reference struct {
+ Type ReferenceType `json:"type,omitempty"`
+ URL string `json:"url,omitempty"`
+}
+
+func (r *Reference) MarshalYAML() (interface{}, error) {
+ return map[string]string{
+ strings.ToLower(string(r.Type)): r.URL,
+ }, nil
+}
+
+func (r *Reference) UnmarshalYAML(n *yaml.Node) (err error) {
+ if n.Kind != yaml.MappingNode || len(n.Content) != 2 || n.Content[0].Kind != yaml.ScalarNode || n.Content[1].Kind != yaml.ScalarNode {
+ return &yaml.TypeError{Errors: []string{
+ fmt.Sprintf("line %d: report.Reference must contain a mapping with one value", n.Line),
+ }}
+ }
+ r.Type = ReferenceType(strings.ToUpper(n.Content[0].Value))
+ r.URL = n.Content[1].Value
+ return nil
+}
diff --git a/gopls/internal/vulncheck/vulntest/report_test.go b/gopls/internal/vulncheck/vulntest/report_test.go
new file mode 100644
index 000000000..c42dae805
--- /dev/null
+++ b/gopls/internal/vulncheck/vulntest/report_test.go
@@ -0,0 +1,52 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package vulntest
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+func readAll(t *testing.T, filename string) io.Reader {
+ d, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return bytes.NewReader(d)
+}
+
+func TestRoundTrip(t *testing.T) {
+ // A report shouldn't change after being read and then written.
+ in := filepath.Join("testdata", "report.yaml")
+ r, err := readReport(readAll(t, in))
+ if err != nil {
+ t.Fatal(err)
+ }
+ out := filepath.Join(t.TempDir(), "report.yaml")
+ if err := r.Write(out); err != nil {
+ t.Fatal(err)
+ }
+
+ want, err := os.ReadFile(in)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := os.ReadFile(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("mismatch (-want, +got):\n%s", diff)
+ }
+}
diff --git a/gopls/internal/vulncheck/vulntest/stdlib.go b/gopls/internal/vulncheck/vulntest/stdlib.go
new file mode 100644
index 000000000..9bf4d4ef0
--- /dev/null
+++ b/gopls/internal/vulncheck/vulntest/stdlib.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package vulntest
+
+import (
+ "strings"
+
+ "golang.org/x/mod/module"
+)
+
+// maybeStdlib reports whether the given import path could be part of the Go
+// standard library, by reporting whether the first component lacks a '.'.
+func maybeStdlib(path string) bool {
+ if err := module.CheckImportPath(path); err != nil {
+ return false
+ }
+ if i := strings.IndexByte(path, '/'); i != -1 {
+ path = path[:i]
+ }
+ return !strings.Contains(path, ".")
+}
diff --git a/gopls/internal/vulncheck/vulntest/stdlib_test.go b/gopls/internal/vulncheck/vulntest/stdlib_test.go
new file mode 100644
index 000000000..8f893f3ec
--- /dev/null
+++ b/gopls/internal/vulncheck/vulntest/stdlib_test.go
@@ -0,0 +1,27 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package vulntest
+
+import "testing"
+
+func TestMaybeStdlib(t *testing.T) {
+ for _, test := range []struct {
+ in string
+ want bool
+ }{
+ {"", false},
+ {"math/crypto", true},
+ {"github.com/pkg/errors", false},
+ {"Path is unknown", false},
+ } {
+ got := maybeStdlib(test.in)
+ if got != test.want {
+ t.Errorf("%q: got %t, want %t", test.in, got, test.want)
+ }
+ }
+}
diff --git a/gopls/internal/vulncheck/vulntest/testdata/report.yaml b/gopls/internal/vulncheck/vulntest/testdata/report.yaml
new file mode 100644
index 000000000..48384b543
--- /dev/null
+++ b/gopls/internal/vulncheck/vulntest/testdata/report.yaml
@@ -0,0 +1,15 @@
+modules:
+ - module: github.com/gin-gonic/gin
+ versions:
+ - fixed: 1.6.0
+ packages:
+ - package: github.com/gin-gonic/gin
+ symbols:
+ - defaultLogFormatter
+description: |
+ The default Formatter for the Logger middleware (LoggerConfig.Formatter),
+ which is included in the Default engine, allows attackers to inject arbitrary
+ log entries by manipulating the request path.
+references:
+ - fix: https://github.com/gin-gonic/gin/pull/1234
+ - fix: https://github.com/gin-gonic/gin/commit/abcdefg
diff --git a/gopls/main.go b/gopls/main.go
index f73eabf57..bdbe36154 100644
--- a/gopls/main.go
+++ b/gopls/main.go
@@ -11,13 +11,15 @@
// for the most up-to-date documentation.
package main // import "golang.org/x/tools/gopls"
+//go:generate go run doc/generate.go
+
import (
"context"
"golang.org/x/tools/internal/analysisinternal"
"os"
"golang.org/x/tools/gopls/internal/hooks"
- "golang.org/x/tools/internal/lsp/cmd"
+ "golang.org/x/tools/gopls/internal/lsp/cmd"
"golang.org/x/tools/internal/tool"
)
diff --git a/gopls/release/release.go b/gopls/release/release.go
index 173909122..dab95822e 100644
--- a/gopls/release/release.go
+++ b/gopls/release/release.go
@@ -15,25 +15,21 @@ import (
"flag"
"fmt"
"go/types"
- exec "golang.org/x/sys/execabs"
"io/ioutil"
"log"
"os"
- "os/user"
"path/filepath"
"strconv"
"strings"
+ exec "golang.org/x/sys/execabs"
+
"golang.org/x/mod/modfile"
"golang.org/x/mod/semver"
"golang.org/x/tools/go/packages"
)
-var (
- versionFlag = flag.String("version", "", "version to tag")
- remoteFlag = flag.String("remote", "", "remote to which to push the tag")
- releaseFlag = flag.Bool("release", false, "release is true if you intend to tag and push a release")
-)
+var versionFlag = flag.String("version", "", "version to tag")
func main() {
flag.Parse()
@@ -50,13 +46,6 @@ func main() {
if semver.Build(*versionFlag) != "" {
log.Fatalf("unexpected build suffix: %s", *versionFlag)
}
- if *releaseFlag && *remoteFlag == "" {
- log.Fatalf("must provide -remote flag if releasing")
- }
- user, err := user.Current()
- if err != nil {
- log.Fatal(err)
- }
// Validate that the user is running the program from the gopls module.
wd, err := os.Getwd()
if err != nil {
@@ -65,77 +54,28 @@ func main() {
if filepath.Base(wd) != "gopls" {
log.Fatalf("must run from the gopls module")
}
- // Confirm that they are running on a branch with a name following the
- // format of "gopls-release-branch.<major>.<minor>".
- if err := validateBranchName(*versionFlag); err != nil {
- log.Fatal(err)
- }
// Confirm that they have updated the hardcoded version.
- if err := validateHardcodedVersion(wd, *versionFlag); err != nil {
+ if err := validateHardcodedVersion(*versionFlag); err != nil {
log.Fatal(err)
}
// Confirm that the versions in the go.mod file are correct.
if err := validateGoModFile(wd); err != nil {
log.Fatal(err)
}
- earlyExitMsg := "Validated that the release is ready. Exiting without tagging and publishing."
- if !*releaseFlag {
- fmt.Println(earlyExitMsg)
- os.Exit(0)
- }
- fmt.Println(`Proceeding to tagging and publishing the release...
-Please enter Y if you wish to proceed or anything else if you wish to exit.`)
- // Accept and process user input.
- var input string
- fmt.Scanln(&input)
- switch input {
- case "Y":
- fmt.Println("Proceeding to tagging and publishing the release.")
- default:
- fmt.Println(earlyExitMsg)
- os.Exit(0)
- }
- // To tag the release:
- // $ git -c user.email=username@google.com tag -a -m “<message>” gopls/v<major>.<minor>.<patch>-<pre-release>
- goplsVersion := fmt.Sprintf("gopls/%s", *versionFlag)
- cmd := exec.Command("git", "-c", fmt.Sprintf("user.email=%s@google.com", user.Username), "tag", "-a", "-m", fmt.Sprintf("%q", goplsVersion), goplsVersion)
- if err := cmd.Run(); err != nil {
- log.Fatal(err)
- }
- // Push the tag to the remote:
- // $ git push <remote> gopls/v<major>.<minor>.<patch>-pre.1
- cmd = exec.Command("git", "push", *remoteFlag, goplsVersion)
- if err := cmd.Run(); err != nil {
- log.Fatal(err)
- }
-}
-
-// validateBranchName reports whether the user's current branch name is of the
-// form "gopls-release-branch.<major>.<minor>". It reports an error if not.
-func validateBranchName(version string) error {
- cmd := exec.Command("git", "branch", "--show-current")
- stdout, err := cmd.Output()
- if err != nil {
- return err
- }
- branch := strings.TrimSpace(string(stdout))
- expectedBranch := fmt.Sprintf("gopls-release-branch.%s", strings.TrimPrefix(semver.MajorMinor(version), "v"))
- if branch != expectedBranch {
- return fmt.Errorf("expected release branch %s, got %s", expectedBranch, branch)
- }
- return nil
+ fmt.Println("Validated that the release is ready.")
+ os.Exit(0)
}
// validateHardcodedVersion reports whether the version hardcoded in the gopls
// binary is equivalent to the version being published. It reports an error if
// not.
-func validateHardcodedVersion(wd string, version string) error {
+func validateHardcodedVersion(version string) error {
+ const debugPkg = "golang.org/x/tools/gopls/internal/lsp/debug"
pkgs, err := packages.Load(&packages.Config{
- Dir: filepath.Dir(wd),
Mode: packages.NeedName | packages.NeedFiles |
packages.NeedCompiledGoFiles | packages.NeedImports |
packages.NeedTypes | packages.NeedTypesSizes,
- }, "golang.org/x/tools/internal/lsp/debug")
+ }, debugPkg)
if err != nil {
return err
}
@@ -143,6 +83,9 @@ func validateHardcodedVersion(wd string, version string) error {
return fmt.Errorf("expected 1 package, got %v", len(pkgs))
}
pkg := pkgs[0]
+ if len(pkg.Errors) > 0 {
+ return fmt.Errorf("failed to load %q: first error: %w", debugPkg, pkg.Errors[0])
+ }
obj := pkg.Types.Scope().Lookup("Version")
c, ok := obj.(*types.Const)
if !ok {
@@ -164,8 +107,8 @@ func validateHardcodedVersion(wd string, version string) error {
return nil
}
-func validateGoModFile(wd string) error {
- filename := filepath.Join(wd, "go.mod")
+func validateGoModFile(goplsDir string) error {
+ filename := filepath.Join(goplsDir, "go.mod")
data, err := ioutil.ReadFile(filename)
if err != nil {
return err
diff --git a/gopls/test/debug/debug_test.go b/gopls/test/debug/debug_test.go
index 4d680eebb..72e5d6513 100644
--- a/gopls/test/debug/debug_test.go
+++ b/gopls/test/debug/debug_test.go
@@ -6,14 +6,13 @@ package debug_test
// Provide 'static type checking' of the templates. This guards against changes is various
// gopls datastructures causing template execution to fail. The checking is done by
-// the github.com/jba/templatecheck pacakge. Before that is run, the test checks that
+// the github.com/jba/templatecheck package. Before that is run, the test checks that
// its list of templates and their arguments corresponds to the arguments in
// calls to render(). The test assumes that all uses of templates are done through render().
import (
"go/ast"
"html/template"
- "log"
"runtime"
"sort"
"strings"
@@ -21,18 +20,14 @@ import (
"github.com/jba/templatecheck"
"golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
+ "golang.org/x/tools/gopls/internal/lsp/cache"
+ "golang.org/x/tools/gopls/internal/lsp/debug"
)
-type tdata struct {
+var templates = map[string]struct {
tmpl *template.Template
data interface{} // a value of the needed type
-}
-
-var templates = map[string]tdata{
+}{
"MainTmpl": {debug.MainTmpl, &debug.Instance{}},
"DebugTmpl": {debug.DebugTmpl, nil},
"RPCTmpl": {debug.RPCTmpl, &debug.Rpcs{}},
@@ -42,45 +37,9 @@ var templates = map[string]tdata{
"ViewTmpl": {debug.ViewTmpl, &cache.View{}},
"ClientTmpl": {debug.ClientTmpl, &debug.Client{}},
"ServerTmpl": {debug.ServerTmpl, &debug.Server{}},
- //"FileTmpl": {FileTmpl, source.Overlay{}}, // need to construct a source.Overlay in init
- "InfoTmpl": {debug.InfoTmpl, "something"},
- "MemoryTmpl": {debug.MemoryTmpl, runtime.MemStats{}},
-}
-
-// construct a source.Overlay for fileTmpl
-type fakeOverlay struct{}
-
-func (fakeOverlay) Version() int32 {
- return 0
-}
-func (fakeOverlay) Session() string {
- return ""
-}
-func (fakeOverlay) VersionedFileIdentity() source.VersionedFileIdentity {
- return source.VersionedFileIdentity{}
-}
-func (fakeOverlay) FileIdentity() source.FileIdentity {
- return source.FileIdentity{}
-}
-func (fakeOverlay) Kind() source.FileKind {
- return 0
-}
-func (fakeOverlay) Read() ([]byte, error) {
- return nil, nil
-}
-func (fakeOverlay) Saved() bool {
- return true
-}
-func (fakeOverlay) URI() span.URI {
- return ""
-}
-
-var _ source.Overlay = fakeOverlay{}
-
-func init() {
- log.SetFlags(log.Lshortfile)
- var v fakeOverlay
- templates["FileTmpl"] = tdata{debug.FileTmpl, v}
+ "FileTmpl": {debug.FileTmpl, &cache.Overlay{}},
+ "InfoTmpl": {debug.InfoTmpl, "something"},
+ "MemoryTmpl": {debug.MemoryTmpl, runtime.MemStats{}},
}
func TestTemplates(t *testing.T) {
@@ -90,7 +49,7 @@ func TestTemplates(t *testing.T) {
cfg := &packages.Config{
Mode: packages.NeedTypesInfo | packages.LoadAllSyntax, // figure out what's necessary PJW
}
- pkgs, err := packages.Load(cfg, "golang.org/x/tools/internal/lsp/debug")
+ pkgs, err := packages.Load(cfg, "golang.org/x/tools/gopls/internal/lsp/debug")
if err != nil {
t.Fatal(err)
}
@@ -169,6 +128,7 @@ func callsOf(p *packages.Package, tree *ast.File, name string) []*ast.CallExpr {
ast.Inspect(tree, f)
return ans
}
+
func treeOf(p *packages.Package, fname string) *ast.File {
for _, tree := range p.Syntax {
loc := tree.Package
diff --git a/gopls/test/gopls_test.go b/gopls/test/gopls_test.go
deleted file mode 100644
index fde262292..000000000
--- a/gopls/test/gopls_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gopls_test
-
-import (
- "os"
- "testing"
-
- "golang.org/x/tools/gopls/internal/hooks"
- cmdtest "golang.org/x/tools/internal/lsp/cmd/test"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/testenv"
-)
-
-func TestMain(m *testing.M) {
- testenv.ExitIfSmallMachine()
- os.Exit(m.Run())
-}
-
-func TestCommandLine(t *testing.T) {
- cmdtest.TestCommandLine(t, "../../internal/lsp/testdata", commandLineOptions)
-}
-
-func commandLineOptions(options *source.Options) {
- options.Staticcheck = true
- options.GoDiff = false
- tests.DefaultOptions(options)
- hooks.Options(options)
-}
diff --git a/gopls/test/json_test.go b/gopls/test/json_test.go
index 5ea5b3434..7a91a953a 100644
--- a/gopls/test/json_test.go
+++ b/gopls/test/json_test.go
@@ -12,7 +12,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- "golang.org/x/tools/internal/lsp/protocol"
+ "golang.org/x/tools/gopls/internal/lsp/protocol"
)
// verify that type errors in Initialize lsp messages don't cause
@@ -28,8 +28,9 @@ import (
// bools are changed to numbers or strings
// numbers are changed to strings or bools
-// a recent Initialize message taken from a log
-const input = `{"processId":38349,"clientInfo":{"name":"vscode","version":"1.56.0-insider"},"rootPath":"/Users/pjw/latest/tools","rootUri":"file:///Users/pjw/latest/tools","capabilities":{"workspace":{"applyEdit":true,"workspaceEdit":{"documentChanges":true,"resourceOperations":["create","rename","delete"],"failureHandling":"textOnlyTransactional"},"didChangeConfiguration":{"dynamicRegistration":true},"didChangeWatchedFiles":{"dynamicRegistration":true},"symbol":{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]},"tagSupport":{"valueSet":[1]}},"executeCommand":{"dynamicRegistration":true},"configuration":true,"workspaceFolders":true,"semanticTokens":{"refreshSupport":true}},"textDocument":{"publishDiagnostics":{"relatedInformation":true,"versionSupport":false,"tagSupport":{"valueSet":[1,2]},"codeDescriptionSupport":true,"dataSupport":true},"synchronization":{"dynamicRegistration":true,"willSave":true,"willSaveWaitUntil":true,"didSave":true},"completion":{"dynamicRegistration":true,"contextSupport":true,"completionItem":{"snippetSupport":true,"commitCharactersSupport":true,"documentationFormat":["markdown","plaintext"],"deprecatedSupport":true,"preselectSupport":true,"tagSupport":{"valueSet":[1]},"insertReplaceSupport":true,"resolveSupport":{"properties":["documentation","detail","additionalTextEdits"]}},"completionItemKind":{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25]}},"hover":{"dynamicRegistration":true,"contentFormat":["markdown","plaintext"]},"signatureHelp":{"dynamicRegistration":true,"signatureInformation":{"documentationFormat":["markdown","plaintext"],"parameterInformation":{"labelOffsetSupport":true},"activeParameterSupport":true},"contextSupport":true},"definition":{"dynamicRegistration":true,"linkSupport":true},"references":{"dynamicRegistration":true},"documentHighlight":{"dynamicRegistration":true},"documentSymbol":{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]},"hierarchicalDocumentSymbolSupport":true,"tagSupport":{"valueSet":[1]},"labelSupport":true},"codeAction":{"dynamicRegistration":true,"isPreferredSupport":true,"disabledSupport":true,"dataSupport":true,"resolveSupport":{"properties":["edit"]},"codeActionLiteralSupport":{"codeActionKind":{"valueSet":["","quickfix","refactor","refactor.extract","refactor.inline","refactor.rewrite","source","source.organizeImports"]}}},"codeLens":{"dynamicRegistration":true},"formatting":{"dynamicRegistration":true},"rangeFormatting":{"dynamicRegistration":true},"onTypeFormatting":{"dynamicRegistration":true},"rename":{"dynamicRegistration":true,"prepareSupport":true,"prepareSupportDefaultBehavior":true},"documentLink":{"dynamicRegistration":true,"tooltipSupport":true},"typeDefinition":{"dynamicRegistration":true,"linkSupport":true},"implementation":{"dynamicRegistration":true,"linkSupport":true},"colorProvider":{"dynamicRegistration":true},"foldingRange":{"dynamicRegistration":true,"rangeLimit":5000,"lineFoldingOnly":true},"declaration":{"dynamicRegistration":true,"linkSupport":true},"selectionRange":{"dynamicRegistration":true},"callHierarchy":{"dynamicRegistration":true},"semanticTokens":{"dynamicRegistration":true,"tokenTypes":["namespace","type","class","enum","interface","struct","typeParameter","parameter","variable","property","enumMember","event","function","member","macro","keyword","modifier","comment","string","number","regexp","operator"],"tokenModifiers":["declaration","definition","readonly","static","deprecated","abstract","async","modification","documentation","defaultLibrary"],"formats":["relative"],"requests":{"range":true,"full":{"delta":true}}}},"window":{"workDoneProgress":true}},"initializationOptions":{"usePlaceholders":true,"completionDocumentation":true,"verboseOutput":false,"codelenses":{"gc_details":true},"analyses":{"fillstruct":true,"staticcheck":true},"experimentalWorkspaceModule":true,"semanticTokens":true},"trace":"off","workspaceFolders":[{"uri":"file:///Users/pjw/latest/tools","name":"tools"}]}`
+// a recent Initialize message taken from a log (at some point
+// some field incompatibly changed from bool to int32)
+const input = `{"processId":46408,"clientInfo":{"name":"Visual Studio Code - Insiders","version":"1.76.0-insider"},"locale":"en-us","rootPath":"/Users/pjw/hakim","rootUri":"file:///Users/pjw/hakim","capabilities":{"workspace":{"applyEdit":true,"workspaceEdit":{"documentChanges":true,"resourceOperations":["create","rename","delete"],"failureHandling":"textOnlyTransactional","normalizesLineEndings":true,"changeAnnotationSupport":{"groupsOnLabel":true}},"configuration":true,"didChangeWatchedFiles":{"dynamicRegistration":true,"relativePatternSupport":true},"symbol":{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]},"tagSupport":{"valueSet":[1]},"resolveSupport":{"properties":["location.range"]}},"codeLens":{"refreshSupport":true},"executeCommand":{"dynamicRegistration":true},"didChangeConfiguration":{"dynamicRegistration":true},"workspaceFolders":true,"semanticTokens":{"refreshSupport":true},"fileOperations":{"dynamicRegistration":true,"didCreate":true,"didRename":true,"didDelete":true,"willCreate":true,"willRename":true,"willDelete":true},"inlineValue":{"refreshSupport":true},"inlayHint":{"refreshSupport":true},"diagnostics":{"refreshSupport":true}},"textDocument":{"publishDiagnostics":{"relatedInformation":true,"versionSupport":false,"tagSupport":{"valueSet":[1,2]},"codeDescriptionSupport":true,"dataSupport":true},"synchronization":{"dynamicRegistration":true,"willSave":true,"willSaveWaitUntil":true,"didSave":true},"completion":{"dynamicRegistration":true,"contextSupport":true,"completionItem":{"snippetSupport":true,"commitCharactersSupport":true,"documentationFormat":["markdown","plaintext"],"deprecatedSupport":true,"preselectSupport":true,"tagSupport":{"valueSet":[1]},"insertReplaceSupport":true,"resolveSupport":{"properties":["documentation","detail","additionalTextEdits"]},"insertTextModeSupport":{"valueSet":[1,2]},"labelDetailsSupport":true},"insertTextMode":2,"completionItemKind":{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25]},"completionList":{"itemDefaults":["commitCharacters","editRange","insertTextFormat","insertTextMode"]}},"hover":{"dynamicRegistration":true,"contentFormat":["markdown","plaintext"]},"signatureHelp":{"dynamicRegistration":true,"signatureInformation":{"documentationFormat":["markdown","plaintext"],"parameterInformation":{"labelOffsetSupport":true},"activeParameterSupport":true},"contextSupport":true},"definition":{"dynamicRegistration":true,"linkSupport":true},"references":{"dynamicRegistration":true},"documentHighlight":{"dynamicRegistration":true},"documentSymbol":{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]},"hierarchicalDocumentSymbolSupport":true,"tagSupport":{"valueSet":[1]},"labelSupport":true},"codeAction":{"dynamicRegistration":true,"isPreferredSupport":true,"disabledSupport":true,"dataSupport":true,"resolveSupport":{"properties":["edit"]},"codeActionLiteralSupport":{"codeActionKind":{"valueSet":["","quickfix","refactor","refactor.extract","refactor.inline","refactor.rewrite","source","source.organizeImports"]}},"honorsChangeAnnotations":false},"codeLens":{"dynamicRegistration":true},"formatting":{"dynamicRegistration":true},"rangeFormatting":{"dynamicRegistration":true},"onTypeFormatting":{"dynamicRegistration":true},"rename":{"dynamicRegistration":true,"prepareSupport":true,"prepareSupportDefaultBehavior":1,"honorsChangeAnnotations":true},"documentLink":{"dynamicRegistration":true,"tooltipSupport":true},"typeDefinition":{"dynamicRegistration":true,"linkSupport":true},"implementation":{"dynamicRegistration":true,"linkSupport":true},"colorProvider":{"dynamicRegistration":true},"foldingRange":{"dynamicRegistration":true,"rangeLimit":5000,"lineFoldingOnly":true,"foldingRangeKind":{"valueSet":["comment","imports","region"]},"foldingRange":{"collapsedText":false}},"declaration":{"dynamicRegistration":true,"linkSupport":true},"selectionRange":{"dynamicRegistration":true},"callHierarchy":{"dynamicRegistration":true},"semanticTokens":{"dynamicRegistration":true,"tokenTypes":["namespace","type","class","enum","interface","struct","typeParameter","parameter","variable","property","enumMember","event","function","method","macro","keyword","modifier","comment","string","number","regexp","operator","decorator"],"tokenModifiers":["declaration","definition","readonly","static","deprecated","abstract","async","modification","documentation","defaultLibrary"],"formats":["relative"],"requests":{"range":true,"full":{"delta":true}},"multilineTokenSupport":false,"overlappingTokenSupport":false,"serverCancelSupport":true,"augmentsSyntaxTokens":true},"linkedEditingRange":{"dynamicRegistration":true},"typeHierarchy":{"dynamicRegistration":true},"inlineValue":{"dynamicRegistration":true},"inlayHint":{"dynamicRegistration":true,"resolveSupport":{"properties":["tooltip","textEdits","label.tooltip","label.location","label.command"]}},"diagnostic":{"dynamicRegistration":true,"relatedDocumentSupport":false}},"window":{"showMessage":{"messageActionItem":{"additionalPropertiesSupport":true}},"showDocument":{"support":true},"workDoneProgress":true},"general":{"staleRequestSupport":{"cancel":true,"retryOnContentModified":["textDocument/semanticTokens/full","textDocument/semanticTokens/range","textDocument/semanticTokens/full/delta"]},"regularExpressions":{"engine":"ECMAScript","version":"ES2020"},"markdown":{"parser":"marked","version":"1.1.0"},"positionEncodings":["utf-16"]},"notebookDocument":{"synchronization":{"dynamicRegistration":true,"executionSummarySupport":true}}},"initializationOptions":{"usePlaceholders":true,"completionDocumentation":true,"verboseOutput":false,"build.directoryFilters":["-foof","-internal/lsp/protocol/typescript"],"codelenses":{"reference":true,"gc_details":true},"analyses":{"fillstruct":true,"staticcheck":true,"unusedparams":false,"composites":false},"semanticTokens":true,"noSemanticString":true,"noSemanticNumber":true,"templateExtensions":["tmpl","gotmpl"],"ui.completion.matcher":"Fuzzy","ui.inlayhint.hints":{"assignVariableTypes":false,"compositeLiteralFields":false,"compositeLiteralTypes":false,"constantValues":false,"functionTypeParameters":false,"parameterNames":false,"rangeVariableTypes":false},"ui.vulncheck":"Off","allExperiments":true},"trace":"off","workspaceFolders":[{"uri":"file:///Users/pjw/hakim","name":"hakim"}]}`
type DiffReporter struct {
path cmp.Path
@@ -99,7 +100,7 @@ func allDeltas(t *testing.T, v [][]int, repls ...string) {
}
func tryChange(start, end int, repl string) error {
- var p, q protocol.InitializeParams
+ var p, q protocol.ParamInitialize
mod := input[:start] + repl + input[end:]
excerpt := func() (string, string) {
a := start - 5
@@ -115,13 +116,18 @@ func tryChange(start, end int, repl string) error {
mb := mod[a:b]
return ma, mb
}
+
if err := json.Unmarshal([]byte(input), &p); err != nil {
return fmt.Errorf("%s %v", repl, err)
}
- if err := json.Unmarshal([]byte(mod), &q); err == nil {
- return nil // no errors is ok
- } else if _, ok := err.(*json.UnmarshalTypeError); !ok {
- return fmt.Errorf("%T, not *json.UnmarshalTypeError", err)
+ switch err := json.Unmarshal([]byte(mod), &q).(type) {
+ case nil: //ok
+ case *json.UnmarshalTypeError:
+ break
+ case *protocol.UnmarshalError:
+ return nil // cmp.Diff produces several diffs for custom unmrshalers
+ default:
+ return fmt.Errorf("%T unexpected unmarshal error", err)
}
var r DiffReporter
diff --git a/imports/forward.go b/imports/forward.go
index 8be18a66b..d2547c743 100644
--- a/imports/forward.go
+++ b/imports/forward.go
@@ -40,7 +40,7 @@ var LocalPrefix string
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
-// To process data ``as if'' it were in filename, pass the data as a non-nil src.
+// To process data “as if” it were in filename, pass the data as a non-nil src.
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
var err error
if src == nil {
diff --git a/internal/analysisinternal/analysis.go b/internal/analysisinternal/analysis.go
index 78ee2c06b..d15f0eb7a 100644
--- a/internal/analysisinternal/analysis.go
+++ b/internal/analysisinternal/analysis.go
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package analysisinternal exposes internal-only fields from go/analysis.
+// Package analysisinternal provides gopls' internal analyses with a
+// number of helper functions that operate on typed syntax trees.
package analysisinternal
import (
@@ -11,20 +12,13 @@ import (
"go/ast"
"go/token"
"go/types"
- "strings"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/lsp/fuzzy"
+ "strconv"
)
-// Flag to gate diagnostics for fuzz tests in 1.18.
+// DiagnoseFuzzTests controls whether the 'tests' analyzer diagnoses fuzz tests
+// in Go 1.18+.
var DiagnoseFuzzTests bool = false
-var (
- GetTypeErrors func(p interface{}) []types.Error
- SetTypeErrors func(p interface{}, errors []types.Error)
-)
-
func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos {
// Get the end position for the type error.
offset, end := fset.PositionFor(start, false).Offset, start
@@ -37,7 +31,7 @@ func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos
return end
}
-func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
under := typ
if n, ok := typ.(*types.Named); ok {
under = n.Underlying()
@@ -57,7 +51,7 @@ func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.T
case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array:
return ast.NewIdent("nil")
case *types.Struct:
- texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here.
+ texpr := TypeExpr(f, pkg, typ) // typ because we want the name here.
if texpr == nil {
return nil
}
@@ -81,7 +75,10 @@ func IsZeroValue(expr ast.Expr) bool {
}
}
-func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+// TypeExpr returns syntax for the specified type. References to
+// named types from packages other than pkg are qualified by an appropriate
+// package name, as defined by the import environment of file.
+func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
switch t := typ.(type) {
case *types.Basic:
switch t.Kind() {
@@ -91,7 +88,7 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
return ast.NewIdent(t.Name())
}
case *types.Pointer:
- x := TypeExpr(fset, f, pkg, t.Elem())
+ x := TypeExpr(f, pkg, t.Elem())
if x == nil {
return nil
}
@@ -100,7 +97,7 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
X: x,
}
case *types.Array:
- elt := TypeExpr(fset, f, pkg, t.Elem())
+ elt := TypeExpr(f, pkg, t.Elem())
if elt == nil {
return nil
}
@@ -112,7 +109,7 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
Elt: elt,
}
case *types.Slice:
- elt := TypeExpr(fset, f, pkg, t.Elem())
+ elt := TypeExpr(f, pkg, t.Elem())
if elt == nil {
return nil
}
@@ -120,8 +117,8 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
Elt: elt,
}
case *types.Map:
- key := TypeExpr(fset, f, pkg, t.Key())
- value := TypeExpr(fset, f, pkg, t.Elem())
+ key := TypeExpr(f, pkg, t.Key())
+ value := TypeExpr(f, pkg, t.Elem())
if key == nil || value == nil {
return nil
}
@@ -134,7 +131,7 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
if t.Dir() == types.SendRecv {
dir = ast.SEND | ast.RECV
}
- value := TypeExpr(fset, f, pkg, t.Elem())
+ value := TypeExpr(f, pkg, t.Elem())
if value == nil {
return nil
}
@@ -145,7 +142,7 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
case *types.Signature:
var params []*ast.Field
for i := 0; i < t.Params().Len(); i++ {
- p := TypeExpr(fset, f, pkg, t.Params().At(i).Type())
+ p := TypeExpr(f, pkg, t.Params().At(i).Type())
if p == nil {
return nil
}
@@ -160,7 +157,7 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
}
var returns []*ast.Field
for i := 0; i < t.Results().Len(); i++ {
- r := TypeExpr(fset, f, pkg, t.Results().At(i).Type())
+ r := TypeExpr(f, pkg, t.Results().At(i).Type())
if r == nil {
return nil
}
@@ -184,13 +181,12 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
return ast.NewIdent(t.Obj().Name())
}
pkgName := t.Obj().Pkg().Name()
+
// If the file already imports the package under another name, use that.
- for _, group := range astutil.Imports(fset, f) {
- for _, cand := range group {
- if strings.Trim(cand.Path.Value, `"`) == t.Obj().Pkg().Path() {
- if cand.Name != nil && cand.Name.Name != "" {
- pkgName = cand.Name.Name
- }
+ for _, cand := range f.Imports {
+ if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
+ if cand.Name != nil && cand.Name.Name != "" {
+ pkgName = cand.Name.Name
}
}
}
@@ -210,14 +206,6 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
}
}
-type TypeErrorPass string
-
-const (
- NoNewVars TypeErrorPass = "nonewvars"
- NoResultValues TypeErrorPass = "noresultvalues"
- UndeclaredName TypeErrorPass = "undeclaredname"
-)
-
// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable.
// Some examples:
//
@@ -311,19 +299,21 @@ func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) {
})
}
-// FindMatchingIdents finds all identifiers in 'node' that match any of the given types.
+// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types.
// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within
// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that
// is unrecognized.
-func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]*ast.Ident {
- matches := map[types.Type][]*ast.Ident{}
+func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string {
+
// Initialize matches to contain the variable types we are searching for.
+ matches := make(map[types.Type][]string)
for _, typ := range typs {
if typ == nil {
- continue
+ continue // TODO(adonovan): is this reachable?
}
- matches[typ] = []*ast.Ident{}
+ matches[typ] = nil // create entry
}
+
seen := map[types.Object]struct{}{}
ast.Inspect(node, func(n ast.Node) bool {
if n == nil {
@@ -335,8 +325,7 @@ func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *t
//
// x := fakeStruct{f0: x}
//
- assignment, ok := n.(*ast.AssignStmt)
- if ok && pos > assignment.Pos() && pos <= assignment.End() {
+ if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() {
return false
}
if n.End() > pos {
@@ -369,17 +358,17 @@ func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *t
return true
}
// The object must match one of the types that we are searching for.
- if idents, ok := matches[obj.Type()]; ok {
- matches[obj.Type()] = append(idents, ast.NewIdent(ident.Name))
- }
- // If the object type does not exactly match any of the target types, greedily
- // find the first target type that the object type can satisfy.
- for typ := range matches {
- if obj.Type() == typ {
- continue
- }
- if equivalentTypes(obj.Type(), typ) {
- matches[typ] = append(matches[typ], ast.NewIdent(ident.Name))
+ // TODO(adonovan): opt: use typeutil.Map?
+ if names, ok := matches[obj.Type()]; ok {
+ matches[obj.Type()] = append(names, ident.Name)
+ } else {
+ // If the object type does not exactly match
+ // any of the target types, greedily find the first
+ // target type that the object type can satisfy.
+ for typ := range matches {
+ if equivalentTypes(obj.Type(), typ) {
+ matches[typ] = append(matches[typ], ident.Name)
+ }
}
}
return true
@@ -388,7 +377,7 @@ func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *t
}
func equivalentTypes(want, got types.Type) bool {
- if want == got || types.Identical(want, got) {
+ if types.Identical(want, got) {
return true
}
// Code segment to help check for untyped equality from (golang/go#32146).
@@ -399,30 +388,3 @@ func equivalentTypes(want, got types.Type) bool {
}
return types.AssignableTo(want, got)
}
-
-// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the
-// given pattern. We return the identifier whose name is most similar to the pattern.
-func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr {
- fuzz := fuzzy.NewMatcher(pattern)
- var bestFuzz ast.Expr
- highScore := float32(0) // minimum score is 0 (no match)
- for _, ident := range idents {
- // TODO: Improve scoring algorithm.
- score := fuzz.Score(ident.Name)
- if score > highScore {
- highScore = score
- bestFuzz = ident
- } else if score == 0 {
- // Order matters in the fuzzy matching algorithm. If we find no match
- // when matching the target to the identifier, try matching the identifier
- // to the target.
- revFuzz := fuzzy.NewMatcher(ident.Name)
- revScore := revFuzz.Score(pattern)
- if revScore > highScore {
- highScore = revScore
- bestFuzz = ident
- }
- }
- }
- return bestFuzz
-}
diff --git a/internal/apidiff/compatibility.go b/internal/apidiff/compatibility.go
index 6b5ba7582..2e327485b 100644
--- a/internal/apidiff/compatibility.go
+++ b/internal/apidiff/compatibility.go
@@ -138,13 +138,13 @@ func unexportedMethod(t *types.Interface) *types.Func {
}
// We need to check three things for structs:
-// 1. The set of exported fields must be compatible. This ensures that keyed struct
-// literals continue to compile. (There is no compatibility guarantee for unkeyed
-// struct literals.)
-// 2. The set of exported *selectable* fields must be compatible. This includes the exported
-// fields of all embedded structs. This ensures that selections continue to compile.
-// 3. If the old struct is comparable, so must the new one be. This ensures that equality
-// expressions and uses of struct values as map keys continue to compile.
+// 1. The set of exported fields must be compatible. This ensures that keyed struct
+// literals continue to compile. (There is no compatibility guarantee for unkeyed
+// struct literals.)
+// 2. The set of exported *selectable* fields must be compatible. This includes the exported
+// fields of all embedded structs. This ensures that selections continue to compile.
+// 3. If the old struct is comparable, so must the new one be. This ensures that equality
+// expressions and uses of struct values as map keys continue to compile.
//
// An unexported embedded struct can't appear in a struct literal outside the
// package, so it doesn't have to be present, or have the same name, in the new
diff --git a/internal/bug/bug.go b/internal/bug/bug.go
new file mode 100644
index 000000000..c18d35a6a
--- /dev/null
+++ b/internal/bug/bug.go
@@ -0,0 +1,132 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bug provides utilities for reporting internal bugs, and being
+// notified when they occur.
+//
+// Philosophically, because gopls runs as a sidecar process that the user does
+// not directly control, sometimes it keeps going on broken invariants rather
+// than panicking. In those cases, bug reports provide a mechanism to alert
+// developers and capture relevant metadata.
+package bug
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "sort"
+ "sync"
+)
+
+// PanicOnBugs controls whether to panic when bugs are reported.
+//
+// It may be set to true during testing.
+var PanicOnBugs = false
+
+var (
+ mu sync.Mutex
+ exemplars map[string]Bug
+ waiters []chan<- Bug
+)
+
+// A Bug represents an unexpected event or broken invariant. They are used for
+// capturing metadata that helps us understand the event.
+type Bug struct {
+ File string // file containing the call to bug.Report
+ Line int // line containing the call to bug.Report
+ Description string // description of the bug
+ Data Data // additional metadata
+ Key string // key identifying the bug (file:line if available)
+ Stack string // call stack
+}
+
+// Data is additional metadata to record for a bug.
+type Data map[string]interface{}
+
+// Reportf reports a formatted bug message.
+func Reportf(format string, args ...interface{}) {
+ report(fmt.Sprintf(format, args...), nil)
+}
+
+// Errorf calls fmt.Errorf for the given arguments, and reports the resulting
+// error message as a bug.
+func Errorf(format string, args ...interface{}) error {
+ err := fmt.Errorf(format, args...)
+ report(err.Error(), nil)
+ return err
+}
+
+// Report records a new bug encountered on the server.
+// It uses reflection to report the position of the immediate caller.
+func Report(description string, data Data) {
+ report(description, data)
+}
+
+func report(description string, data Data) {
+ _, file, line, ok := runtime.Caller(2) // all exported reporting functions call report directly
+
+ key := "<missing callsite>"
+ if ok {
+ key = fmt.Sprintf("%s:%d", file, line)
+ }
+
+ if PanicOnBugs {
+ panic(fmt.Sprintf("%s: %s", key, description))
+ }
+
+ bug := Bug{
+ File: file,
+ Line: line,
+ Description: description,
+ Data: data,
+ Key: key,
+ Stack: string(debug.Stack()),
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+
+ if exemplars == nil {
+ exemplars = make(map[string]Bug)
+ }
+
+ if _, ok := exemplars[key]; !ok {
+ exemplars[key] = bug // capture one exemplar per key
+ }
+
+ for _, waiter := range waiters {
+ waiter <- bug
+ }
+ waiters = nil
+}
+
+// Notify returns a channel that will be sent the next bug to occur on the
+// server. This channel only ever receives one bug.
+func Notify() <-chan Bug {
+ mu.Lock()
+ defer mu.Unlock()
+
+ ch := make(chan Bug, 1) // 1-buffered so that bug reporting is non-blocking
+ waiters = append(waiters, ch)
+ return ch
+}
+
+// List returns a slice of bug exemplars -- the first bugs to occur at each
+// callsite.
+func List() []Bug {
+ mu.Lock()
+ defer mu.Unlock()
+
+ var bugs []Bug
+
+ for _, bug := range exemplars {
+ bugs = append(bugs, bug)
+ }
+
+ sort.Slice(bugs, func(i, j int) bool {
+ return bugs[i].Key < bugs[j].Key
+ })
+
+ return bugs
+}
diff --git a/internal/bug/bug_test.go b/internal/bug/bug_test.go
new file mode 100644
index 000000000..edfc10386
--- /dev/null
+++ b/internal/bug/bug_test.go
@@ -0,0 +1,65 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bug
+
+import (
+ "fmt"
+ "testing"
+)
+
+func resetForTesting() {
+ exemplars = nil
+ waiters = nil
+}
+
+func TestListBugs(t *testing.T) {
+ defer resetForTesting()
+
+ Report("bad", nil)
+
+ wantBugs(t, "bad")
+
+ for i := 0; i < 3; i++ {
+ Report(fmt.Sprintf("index:%d", i), nil)
+ }
+
+ wantBugs(t, "bad", "index:0")
+}
+
+func wantBugs(t *testing.T, want ...string) {
+ t.Helper()
+
+ bugs := List()
+ if got, want := len(bugs), len(want); got != want {
+ t.Errorf("List(): got %d bugs, want %d", got, want)
+ return
+ }
+
+ for i, b := range bugs {
+ if got, want := b.Description, want[i]; got != want {
+ t.Errorf("bug.List()[%d] = %q, want %q", i, got, want)
+ }
+ }
+}
+
+func TestBugNotification(t *testing.T) {
+ defer resetForTesting()
+
+ Report("unseen", nil)
+
+ notify1 := Notify()
+ notify2 := Notify()
+
+ Report("seen", Data{"answer": 42})
+
+ for _, got := range []Bug{<-notify1, <-notify2} {
+ if got, want := got.Description, "seen"; got != want {
+ t.Errorf("Saw bug %q, want %q", got, want)
+ }
+ if got, want := got.Data["answer"], 42; got != want {
+ t.Errorf(`bug.Data["answer"] = %v, want %v`, got, want)
+ }
+ }
+}
diff --git a/internal/diff/diff.go b/internal/diff/diff.go
new file mode 100644
index 000000000..2bc63c2a8
--- /dev/null
+++ b/internal/diff/diff.go
@@ -0,0 +1,169 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff computes differences between text files or strings.
+package diff
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// An Edit describes the replacement of a portion of a text file.
+type Edit struct {
+ Start, End int // byte offsets of the region to replace
+ New string // the replacement
+}
+
+func (e Edit) String() string {
+ return fmt.Sprintf("{Start:%d,End:%d,New:%s}", e.Start, e.End, e.New)
+}
+
+// Apply applies a sequence of edits to the src buffer and returns the
+// result. Edits are applied in order of start offset; edits with the
+// same start offset are applied in they order they were provided.
+//
+// Apply returns an error if any edit is out of bounds,
+// or if any pair of edits is overlapping.
+func Apply(src string, edits []Edit) (string, error) {
+ edits, size, err := validate(src, edits)
+ if err != nil {
+ return "", err
+ }
+
+ // Apply edits.
+ out := make([]byte, 0, size)
+ lastEnd := 0
+ for _, edit := range edits {
+ if lastEnd < edit.Start {
+ out = append(out, src[lastEnd:edit.Start]...)
+ }
+ out = append(out, edit.New...)
+ lastEnd = edit.End
+ }
+ out = append(out, src[lastEnd:]...)
+
+ if len(out) != size {
+ panic("wrong size")
+ }
+
+ return string(out), nil
+}
+
+// ApplyBytes is like Apply, but it accepts a byte slice.
+// The result is always a new array.
+func ApplyBytes(src []byte, edits []Edit) ([]byte, error) {
+ res, err := Apply(string(src), edits)
+ return []byte(res), err
+}
+
+// validate checks that edits are consistent with src,
+// and returns the size of the patched output.
+// It may return a different slice.
+func validate(src string, edits []Edit) ([]Edit, int, error) {
+ if !sort.IsSorted(editsSort(edits)) {
+ edits = append([]Edit(nil), edits...)
+ SortEdits(edits)
+ }
+
+ // Check validity of edits and compute final size.
+ size := len(src)
+ lastEnd := 0
+ for _, edit := range edits {
+ if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) {
+ return nil, 0, fmt.Errorf("diff has out-of-bounds edits")
+ }
+ if edit.Start < lastEnd {
+ return nil, 0, fmt.Errorf("diff has overlapping edits")
+ }
+ size += len(edit.New) + edit.Start - edit.End
+ lastEnd = edit.End
+ }
+
+ return edits, size, nil
+}
+
+// SortEdits orders a slice of Edits by (start, end) offset.
+// This ordering puts insertions (end = start) before deletions
+// (end > start) at the same point, but uses a stable sort to preserve
+// the order of multiple insertions at the same point.
+// (Apply detects multiple deletions at the same point as an error.)
+func SortEdits(edits []Edit) {
+ sort.Stable(editsSort(edits))
+}
+
+type editsSort []Edit
+
+func (a editsSort) Len() int { return len(a) }
+func (a editsSort) Less(i, j int) bool {
+ if cmp := a[i].Start - a[j].Start; cmp != 0 {
+ return cmp < 0
+ }
+ return a[i].End < a[j].End
+}
+func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// lineEdits expands and merges a sequence of edits so that each
+// resulting edit replaces one or more complete lines.
+// See ApplyEdits for preconditions.
+func lineEdits(src string, edits []Edit) ([]Edit, error) {
+ edits, _, err := validate(src, edits)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do all edits begin and end at the start of a line?
+ // TODO(adonovan): opt: is this fast path necessary?
+ // (Also, it complicates the result ownership.)
+ for _, edit := range edits {
+ if edit.Start >= len(src) || // insertion at EOF
+ edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start
+ edit.End > 0 && src[edit.End-1] != '\n' { // not at line start
+ goto expand
+ }
+ }
+ return edits, nil // aligned
+
+expand:
+ expanded := make([]Edit, 0, len(edits)) // a guess
+ prev := edits[0]
+ // TODO(adonovan): opt: start from the first misaligned edit.
+ // TODO(adonovan): opt: avoid quadratic cost of string += string.
+ for _, edit := range edits[1:] {
+ between := src[prev.End:edit.Start]
+ if !strings.Contains(between, "\n") {
+ // overlapping lines: combine with previous edit.
+ prev.New += between + edit.New
+ prev.End = edit.End
+ } else {
+ // non-overlapping lines: flush previous edit.
+ expanded = append(expanded, expandEdit(prev, src))
+ prev = edit
+ }
+ }
+ return append(expanded, expandEdit(prev, src)), nil // flush final edit
+}
+
+// expandEdit returns edit expanded to complete whole lines.
+func expandEdit(edit Edit, src string) Edit {
+ // Expand start left to start of line.
+ // (delta is the zero-based column number of of start.)
+ start := edit.Start
+ if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 {
+ edit.Start -= delta
+ edit.New = src[start-delta:start] + edit.New
+ }
+
+ // Expand end right to end of line.
+ end := edit.End
+ if nl := strings.IndexByte(src[end:], '\n'); nl < 0 {
+ edit.End = len(src) // extend to EOF
+ } else {
+ edit.End = end + nl + 1 // extend beyond \n
+ }
+ edit.New += src[end:edit.End]
+
+ return edit
+}
diff --git a/internal/diff/diff_test.go b/internal/diff/diff_test.go
new file mode 100644
index 000000000..b6881c1f2
--- /dev/null
+++ b/internal/diff/diff_test.go
@@ -0,0 +1,199 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff_test
+
+import (
+ "bytes"
+ "math/rand"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "testing"
+ "unicode/utf8"
+
+ "golang.org/x/tools/internal/diff"
+ "golang.org/x/tools/internal/diff/difftest"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestApply(t *testing.T) {
+ for _, tc := range difftest.TestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ got, err := diff.Apply(tc.In, tc.Edits)
+ if err != nil {
+ t.Fatalf("Apply(Edits) failed: %v", err)
+ }
+ if got != tc.Out {
+ t.Errorf("Apply(Edits): got %q, want %q", got, tc.Out)
+ }
+ if tc.LineEdits != nil {
+ got, err := diff.Apply(tc.In, tc.LineEdits)
+ if err != nil {
+ t.Fatalf("Apply(LineEdits) failed: %v", err)
+ }
+ if got != tc.Out {
+ t.Errorf("Apply(LineEdits): got %q, want %q", got, tc.Out)
+ }
+ }
+ })
+ }
+}
+
+func TestNEdits(t *testing.T) {
+ for _, tc := range difftest.TestCases {
+ edits := diff.Strings(tc.In, tc.Out)
+ got, err := diff.Apply(tc.In, edits)
+ if err != nil {
+ t.Fatalf("Apply failed: %v", err)
+ }
+ if got != tc.Out {
+ t.Fatalf("%s: got %q wanted %q", tc.Name, got, tc.Out)
+ }
+ if len(edits) < len(tc.Edits) { // should find subline edits
+ t.Errorf("got %v, expected %v for %#v", edits, tc.Edits, tc)
+ }
+ }
+}
+
+func TestNRandom(t *testing.T) {
+ rand.Seed(1)
+ for i := 0; i < 1000; i++ {
+ a := randstr("abω", 16)
+ b := randstr("abωc", 16)
+ edits := diff.Strings(a, b)
+ got, err := diff.Apply(a, edits)
+ if err != nil {
+ t.Fatalf("Apply failed: %v", err)
+ }
+ if got != b {
+ t.Fatalf("%d: got %q, wanted %q, starting with %q", i, got, b, a)
+ }
+ }
+}
+
+// $ go test -fuzz=FuzzRoundTrip ./internal/diff
+func FuzzRoundTrip(f *testing.F) {
+ f.Fuzz(func(t *testing.T, a, b string) {
+ if !utf8.ValidString(a) || !utf8.ValidString(b) {
+ return // inputs must be text
+ }
+ edits := diff.Strings(a, b)
+ got, err := diff.Apply(a, edits)
+ if err != nil {
+ t.Fatalf("Apply failed: %v", err)
+ }
+ if got != b {
+ t.Fatalf("applying diff(%q, %q) gives %q; edits=%v", a, b, got, edits)
+ }
+ })
+}
+
+func TestLineEdits(t *testing.T) {
+ for _, tc := range difftest.TestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ // if line edits not specified, it is the same as edits
+ edits := tc.LineEdits
+ if edits == nil {
+ edits = tc.Edits
+ }
+ got, err := diff.LineEdits(tc.In, tc.Edits)
+ if err != nil {
+ t.Fatalf("LineEdits: %v", err)
+ }
+ if !reflect.DeepEqual(got, edits) {
+ t.Errorf("LineEdits got\n%q, want\n%q\n%#v", got, edits, tc)
+ }
+ })
+ }
+}
+
+func TestToUnified(t *testing.T) {
+ testenv.NeedsTool(t, "patch")
+ for _, tc := range difftest.TestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ unified, err := diff.ToUnified(difftest.FileA, difftest.FileB, tc.In, tc.Edits)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if unified == "" {
+ return
+ }
+ orig := filepath.Join(t.TempDir(), "original")
+ err = os.WriteFile(orig, []byte(tc.In), 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ temp := filepath.Join(t.TempDir(), "patched")
+ err = os.WriteFile(temp, []byte(tc.In), 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cmd := exec.Command("patch", "-p0", "-u", "-s", "-o", temp, orig)
+ cmd.Stdin = strings.NewReader(unified)
+ cmd.Stdout = new(bytes.Buffer)
+ cmd.Stderr = new(bytes.Buffer)
+ if err = cmd.Run(); err != nil {
+ t.Fatalf("%v: %q (%q) (%q)", err, cmd.String(),
+ cmd.Stderr, cmd.Stdout)
+ }
+ got, err := os.ReadFile(temp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(got) != tc.Out {
+ t.Errorf("applying unified failed: got\n%q, wanted\n%q unified\n%q",
+ got, tc.Out, unified)
+ }
+
+ })
+ }
+}
+
+func TestRegressionOld001(t *testing.T) {
+ a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/gopls/internal/span\"\n)\n"
+
+ b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/gopls/internal/span\"\n)\n"
+ diffs := diff.Strings(a, b)
+ got, err := diff.Apply(a, diffs)
+ if err != nil {
+ t.Fatalf("Apply failed: %v", err)
+ }
+ if got != b {
+ i := 0
+ for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ {
+ }
+ t.Errorf("oops %vd\n%q\n%q", diffs, got, b)
+ t.Errorf("\n%q\n%q", got[i:], b[i:])
+ }
+}
+
+func TestRegressionOld002(t *testing.T) {
+ a := "n\"\n)\n"
+ b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n"
+ diffs := diff.Strings(a, b)
+ got, err := diff.Apply(a, diffs)
+ if err != nil {
+ t.Fatalf("Apply failed: %v", err)
+ }
+ if got != b {
+ i := 0
+ for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ {
+ }
+ t.Errorf("oops %vd\n%q\n%q", diffs, got, b)
+ t.Errorf("\n%q\n%q", got[i:], b[i:])
+ }
+}
+
+// return a random string of length n made of characters from s
+func randstr(s string, n int) string {
+ src := []rune(s)
+ x := make([]rune, n)
+ for i := 0; i < n; i++ {
+ x[i] = src[rand.Intn(len(src))]
+ }
+ return string(x)
+}
diff --git a/internal/diff/difftest/difftest.go b/internal/diff/difftest/difftest.go
new file mode 100644
index 000000000..4a251111b
--- /dev/null
+++ b/internal/diff/difftest/difftest.go
@@ -0,0 +1,289 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package difftest supplies a set of tests that will operate on any
+// implementation of a diff algorithm as exposed by
+// "golang.org/x/tools/internal/diff"
+package difftest
+
+// There are two kinds of tests, semantic tests, and 'golden data' tests.
+// The semantic tests check that the computed diffs transform the input to
+// the output, and that 'patch' accepts the computed unified diffs.
+// The other tests just check that Edits and LineEdits haven't changed
+// unexpectedly. These fields may need to be changed when the diff algorithm
+// changes.
+
+import (
+ "testing"
+
+ "golang.org/x/tools/internal/diff"
+)
+
+const (
+ FileA = "from"
+ FileB = "to"
+ UnifiedPrefix = "--- " + FileA + "\n+++ " + FileB + "\n"
+)
+
+var TestCases = []struct {
+ Name, In, Out, Unified string
+ Edits, LineEdits []diff.Edit
+ NoDiff bool
+}{{
+ Name: "empty",
+ In: "",
+ Out: "",
+}, {
+ Name: "no_diff",
+ In: "gargantuan\n",
+ Out: "gargantuan\n",
+}, {
+ Name: "replace_all",
+ In: "fruit\n",
+ Out: "cheese\n",
+ Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-fruit
++cheese
+`[1:],
+ Edits: []diff.Edit{{Start: 0, End: 5, New: "cheese"}},
+ LineEdits: []diff.Edit{{Start: 0, End: 6, New: "cheese\n"}},
+}, {
+ Name: "insert_rune",
+ In: "gord\n",
+ Out: "gourd\n",
+ Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-gord
++gourd
+`[1:],
+ Edits: []diff.Edit{{Start: 2, End: 2, New: "u"}},
+ LineEdits: []diff.Edit{{Start: 0, End: 5, New: "gourd\n"}},
+}, {
+ Name: "delete_rune",
+ In: "groat\n",
+ Out: "goat\n",
+ Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-groat
++goat
+`[1:],
+ Edits: []diff.Edit{{Start: 1, End: 2, New: ""}},
+ LineEdits: []diff.Edit{{Start: 0, End: 6, New: "goat\n"}},
+}, {
+ Name: "replace_rune",
+ In: "loud\n",
+ Out: "lord\n",
+ Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-loud
++lord
+`[1:],
+ Edits: []diff.Edit{{Start: 2, End: 3, New: "r"}},
+ LineEdits: []diff.Edit{{Start: 0, End: 5, New: "lord\n"}},
+}, {
+ Name: "replace_partials",
+ In: "blanket\n",
+ Out: "bunker\n",
+ Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-blanket
++bunker
+`[1:],
+ Edits: []diff.Edit{
+ {Start: 1, End: 3, New: "u"},
+ {Start: 6, End: 7, New: "r"},
+ },
+ LineEdits: []diff.Edit{{Start: 0, End: 8, New: "bunker\n"}},
+}, {
+ Name: "insert_line",
+ In: "1: one\n3: three\n",
+ Out: "1: one\n2: two\n3: three\n",
+ Unified: UnifiedPrefix + `
+@@ -1,2 +1,3 @@
+ 1: one
++2: two
+ 3: three
+`[1:],
+ Edits: []diff.Edit{{Start: 7, End: 7, New: "2: two\n"}},
+}, {
+ Name: "replace_no_newline",
+ In: "A",
+ Out: "B",
+ Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-A
+\ No newline at end of file
++B
+\ No newline at end of file
+`[1:],
+ Edits: []diff.Edit{{Start: 0, End: 1, New: "B"}},
+}, {
+ Name: "append_empty",
+ In: "", // GNU diff -u special case: -0,0
+ Out: "AB\nC",
+ Unified: UnifiedPrefix + `
+@@ -0,0 +1,2 @@
++AB
++C
+\ No newline at end of file
+`[1:],
+ Edits: []diff.Edit{{Start: 0, End: 0, New: "AB\nC"}},
+ LineEdits: []diff.Edit{{Start: 0, End: 0, New: "AB\nC"}},
+},
+ // TODO(adonovan): fix this test: GNU diff -u prints "+1,2", Unifies prints "+1,3".
+ // {
+ // Name: "add_start",
+ // In: "A",
+ // Out: "B\nCA",
+ // Unified: UnifiedPrefix + `
+ // @@ -1 +1,2 @@
+ // -A
+ // \ No newline at end of file
+ // +B
+ // +CA
+ // \ No newline at end of file
+ // `[1:],
+ // Edits: []diff.TextEdit{{Span: newSpan(0, 0), NewText: "B\nC"}},
+ // LineEdits: []diff.TextEdit{{Span: newSpan(0, 0), NewText: "B\nC"}},
+ // },
+ {
+ Name: "add_end",
+ In: "A",
+ Out: "AB",
+ Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-A
+\ No newline at end of file
++AB
+\ No newline at end of file
+`[1:],
+ Edits: []diff.Edit{{Start: 1, End: 1, New: "B"}},
+ LineEdits: []diff.Edit{{Start: 0, End: 1, New: "AB"}},
+ }, {
+ Name: "add_empty",
+ In: "",
+ Out: "AB\nC",
+ Unified: UnifiedPrefix + `
+@@ -0,0 +1,2 @@
++AB
++C
+\ No newline at end of file
+`[1:],
+ Edits: []diff.Edit{{Start: 0, End: 0, New: "AB\nC"}},
+ LineEdits: []diff.Edit{{Start: 0, End: 0, New: "AB\nC"}},
+ }, {
+ Name: "add_newline",
+ In: "A",
+ Out: "A\n",
+ Unified: UnifiedPrefix + `
+@@ -1 +1 @@
+-A
+\ No newline at end of file
++A
+`[1:],
+ Edits: []diff.Edit{{Start: 1, End: 1, New: "\n"}},
+ LineEdits: []diff.Edit{{Start: 0, End: 1, New: "A\n"}},
+ }, {
+ Name: "delete_front",
+ In: "A\nB\nC\nA\nB\nB\nA\n",
+ Out: "C\nB\nA\nB\nA\nC\n",
+ Unified: UnifiedPrefix + `
+@@ -1,7 +1,6 @@
+-A
+-B
+ C
++B
+ A
+ B
+-B
+ A
++C
+`[1:],
+ NoDiff: true, // unified diff is different but valid
+ Edits: []diff.Edit{
+ {Start: 0, End: 4, New: ""},
+ {Start: 6, End: 6, New: "B\n"},
+ {Start: 10, End: 12, New: ""},
+ {Start: 14, End: 14, New: "C\n"},
+ },
+ LineEdits: []diff.Edit{
+ {Start: 0, End: 6, New: "C\n"},
+ {Start: 6, End: 8, New: "B\nA\n"},
+ {Start: 10, End: 14, New: "A\n"},
+ {Start: 14, End: 14, New: "C\n"},
+ },
+ }, {
+ Name: "replace_last_line",
+ In: "A\nB\n",
+ Out: "A\nC\n\n",
+ Unified: UnifiedPrefix + `
+@@ -1,2 +1,3 @@
+ A
+-B
++C
++
+`[1:],
+ Edits: []diff.Edit{{Start: 2, End: 3, New: "C\n"}},
+ LineEdits: []diff.Edit{{Start: 2, End: 4, New: "C\n\n"}},
+ },
+ {
+ Name: "multiple_replace",
+ In: "A\nB\nC\nD\nE\nF\nG\n",
+ Out: "A\nH\nI\nJ\nE\nF\nK\n",
+ Unified: UnifiedPrefix + `
+@@ -1,7 +1,7 @@
+ A
+-B
+-C
+-D
++H
++I
++J
+ E
+ F
+-G
++K
+`[1:],
+ Edits: []diff.Edit{
+ {Start: 2, End: 8, New: "H\nI\nJ\n"},
+ {Start: 12, End: 14, New: "K\n"},
+ },
+ NoDiff: true, // diff algorithm produces different delete/insert pattern
+ },
+ {
+ Name: "extra_newline",
+ In: "\nA\n",
+ Out: "A\n",
+ Edits: []diff.Edit{{Start: 0, End: 1, New: ""}},
+ Unified: UnifiedPrefix + `@@ -1,2 +1 @@
+-
+ A
+`,
+ },
+}
+
+func DiffTest(t *testing.T, compute func(before, after string) []diff.Edit) {
+ for _, test := range TestCases {
+ t.Run(test.Name, func(t *testing.T) {
+ edits := compute(test.In, test.Out)
+ got, err := diff.Apply(test.In, edits)
+ if err != nil {
+ t.Fatalf("Apply failed: %v", err)
+ }
+ unified, err := diff.ToUnified(FileA, FileB, test.In, edits)
+ if err != nil {
+ t.Fatalf("ToUnified: %v", err)
+ }
+ if got != test.Out {
+ t.Errorf("Apply: got patched:\n%v\nfrom diff:\n%v\nexpected:\n%v",
+ got, unified, test.Out)
+ }
+ if !test.NoDiff && unified != test.Unified {
+ t.Errorf("Unified: got diff:\n%q\nexpected:\n%q diffs:%v",
+ unified, test.Unified, edits)
+ }
+ })
+ }
+}
diff --git a/internal/diff/difftest/difftest_test.go b/internal/diff/difftest/difftest_test.go
new file mode 100644
index 000000000..a990e5224
--- /dev/null
+++ b/internal/diff/difftest/difftest_test.go
@@ -0,0 +1,83 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package difftest supplies a set of tests that will operate on any
+// implementation of a diff algorithm as exposed by
+// "golang.org/x/tools/internal/diff"
+package difftest_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/internal/diff/difftest"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestVerifyUnified(t *testing.T) {
+ testenv.NeedsTool(t, "diff")
+ for _, test := range difftest.TestCases {
+ t.Run(test.Name, func(t *testing.T) {
+ if test.NoDiff {
+ t.Skip("diff tool produces expected different results")
+ }
+ diff, err := getDiffOutput(test.In, test.Out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(diff) > 0 {
+ diff = difftest.UnifiedPrefix + diff
+ }
+ if diff != test.Unified {
+ t.Errorf("unified:\n%s\ndiff -u:\n%s", test.Unified, diff)
+ }
+ })
+ }
+}
+
+func getDiffOutput(a, b string) (string, error) {
+ fileA, err := ioutil.TempFile("", "myers.in")
+ if err != nil {
+ return "", err
+ }
+ defer os.Remove(fileA.Name())
+ if _, err := fileA.Write([]byte(a)); err != nil {
+ return "", err
+ }
+ if err := fileA.Close(); err != nil {
+ return "", err
+ }
+ fileB, err := ioutil.TempFile("", "myers.in")
+ if err != nil {
+ return "", err
+ }
+ defer os.Remove(fileB.Name())
+ if _, err := fileB.Write([]byte(b)); err != nil {
+ return "", err
+ }
+ if err := fileB.Close(); err != nil {
+ return "", err
+ }
+ cmd := exec.Command("diff", "-u", fileA.Name(), fileB.Name())
+ cmd.Env = append(cmd.Env, "LANG=en_US.UTF-8")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ if _, ok := err.(*exec.ExitError); !ok {
+ return "", fmt.Errorf("failed to run diff -u %v %v: %v\n%v", fileA.Name(), fileB.Name(), err, string(out))
+ }
+ }
+ diff := string(out)
+ if len(diff) <= 0 {
+ return diff, nil
+ }
+ bits := strings.SplitN(diff, "\n", 3)
+ if len(bits) != 3 {
+ return "", fmt.Errorf("diff output did not have file prefix:\n%s", diff)
+ }
+ return bits[2], nil
+}
diff --git a/internal/diff/export_test.go b/internal/diff/export_test.go
new file mode 100644
index 000000000..eedf0dd77
--- /dev/null
+++ b/internal/diff/export_test.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+// This file exports some private declarations to tests.
+
+var LineEdits = lineEdits
diff --git a/internal/diff/lcs/common.go b/internal/diff/lcs/common.go
new file mode 100644
index 000000000..c3e82dd26
--- /dev/null
+++ b/internal/diff/lcs/common.go
@@ -0,0 +1,179 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+ "log"
+ "sort"
+)
+
+// lcs is a longest common sequence
+type lcs []diag
+
+// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i<Len.
+// All computed diagonals are parts of a longest common subsequence.
+type diag struct {
+ X, Y int
+ Len int
+}
+
+// sort sorts in place, by lowest X, and if tied, inversely by Len
+func (l lcs) sort() lcs {
+ sort.Slice(l, func(i, j int) bool {
+ if l[i].X != l[j].X {
+ return l[i].X < l[j].X
+ }
+ return l[i].Len > l[j].Len
+ })
+ return l
+}
+
+// validate that the elements of the lcs do not overlap
+// (can only happen when the two-sided algorithm ends early)
+// expects the lcs to be sorted
+func (l lcs) valid() bool {
+ for i := 1; i < len(l); i++ {
+ if l[i-1].X+l[i-1].Len > l[i].X {
+ return false
+ }
+ if l[i-1].Y+l[i-1].Len > l[i].Y {
+ return false
+ }
+ }
+ return true
+}
+
+// repair overlapping lcs
+// only called if two-sided stops early
+func (l lcs) fix() lcs {
+ // from the set of diagonals in l, find a maximal non-conflicting set
+ // this problem may be NP-complete, but we use a greedy heuristic,
+ // which is quadratic, but with a better data structure, could be D log D.
+ // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs
+ // which has to have monotone x and y
+ if len(l) == 0 {
+ return nil
+ }
+ sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len })
+ tmp := make(lcs, 0, len(l))
+ tmp = append(tmp, l[0])
+ for i := 1; i < len(l); i++ {
+ var dir direction
+ nxt := l[i]
+ for _, in := range tmp {
+ if dir, nxt = overlap(in, nxt); dir == empty || dir == bad {
+ break
+ }
+ }
+ if nxt.Len > 0 && dir != bad {
+ tmp = append(tmp, nxt)
+ }
+ }
+ tmp.sort()
+ if false && !tmp.valid() { // debug checking
+ log.Fatalf("here %d", len(tmp))
+ }
+ return tmp
+}
+
+type direction int
+
+const (
+ empty direction = iota // diag is empty (so not in lcs)
+ leftdown // proposed acceptably to the left and below
+ rightup // proposed diag is acceptably to the right and above
+ bad // proposed diag is inconsistent with the lcs so far
+)
+
+// overlap trims the proposed diag prop so it doesn't overlap with
+// the existing diag that has already been added to the lcs.
+func overlap(exist, prop diag) (direction, diag) {
+ if prop.X <= exist.X && exist.X < prop.X+prop.Len {
+ // remove the end of prop where it overlaps with the X end of exist
+ delta := prop.X + prop.Len - exist.X
+ prop.Len -= delta
+ if prop.Len <= 0 {
+ return empty, prop
+ }
+ }
+ if exist.X <= prop.X && prop.X < exist.X+exist.Len {
+ // remove the beginning of prop where overlaps with exist
+ delta := exist.X + exist.Len - prop.X
+ prop.Len -= delta
+ if prop.Len <= 0 {
+ return empty, prop
+ }
+ prop.X += delta
+ prop.Y += delta
+ }
+ if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len {
+ // remove the end of prop that overlaps (in Y) with exist
+ delta := prop.Y + prop.Len - exist.Y
+ prop.Len -= delta
+ if prop.Len <= 0 {
+ return empty, prop
+ }
+ }
+ if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len {
+ // remove the beginning of peop that overlaps with exist
+ delta := exist.Y + exist.Len - prop.Y
+ prop.Len -= delta
+ if prop.Len <= 0 {
+ return empty, prop
+ }
+ prop.X += delta // no test reaches this code
+ prop.Y += delta
+ }
+ if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y {
+ return leftdown, prop
+ }
+ if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y {
+ return rightup, prop
+ }
+ // prop can't be in an lcs that contains exist
+ return bad, prop
+}
+
+// manipulating Diag and lcs
+
+// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs
+// or to its first Diag. prepend is only called to extend diagonals
+// the backward direction.
+func (lcs lcs) prepend(x, y int) lcs {
+ if len(lcs) > 0 {
+ d := &lcs[0]
+ if int(d.X) == x+1 && int(d.Y) == y+1 {
+ // extend the diagonal down and to the left
+ d.X, d.Y = int(x), int(y)
+ d.Len++
+ return lcs
+ }
+ }
+
+ r := diag{X: int(x), Y: int(y), Len: 1}
+ lcs = append([]diag{r}, lcs...)
+ return lcs
+}
+
+// append appends a diagonal, or extends the existing one.
+// by adding the edge (x,y)-(x+1.y+1). append is only called
+// to extend diagonals in the forward direction.
+func (lcs lcs) append(x, y int) lcs {
+ if len(lcs) > 0 {
+ last := &lcs[len(lcs)-1]
+ // Expand last element if adjoining.
+ if last.X+last.Len == x && last.Y+last.Len == y {
+ last.Len++
+ return lcs
+ }
+ }
+
+ return append(lcs, diag{X: x, Y: y, Len: 1})
+}
+
+// enforce constraint on d, k
+func ok(d, k int) bool {
+ return d >= 0 && -d <= k && k <= d
+}
diff --git a/internal/diff/lcs/common_test.go b/internal/diff/lcs/common_test.go
new file mode 100644
index 000000000..f19245e40
--- /dev/null
+++ b/internal/diff/lcs/common_test.go
@@ -0,0 +1,140 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+ "log"
+ "math/rand"
+ "strings"
+ "testing"
+)
+
+type Btest struct {
+ a, b string
+ lcs []string
+}
+
+var Btests = []Btest{
+ {"aaabab", "abaab", []string{"abab", "aaab"}},
+ {"aabbba", "baaba", []string{"aaba"}},
+ {"cabbx", "cbabx", []string{"cabx", "cbbx"}},
+ {"c", "cb", []string{"c"}},
+ {"aaba", "bbb", []string{"b"}},
+ {"bbaabb", "b", []string{"b"}},
+ {"baaabb", "bbaba", []string{"bbb", "baa", "bab"}},
+ {"baaabb", "abbab", []string{"abb", "bab", "aab"}},
+ {"baaba", "aaabba", []string{"aaba"}},
+ {"ca", "cba", []string{"ca"}},
+ {"ccbcbc", "abba", []string{"bb"}},
+ {"ccbcbc", "aabba", []string{"bb"}},
+ {"ccb", "cba", []string{"cb"}},
+ {"caef", "axe", []string{"ae"}},
+ {"bbaabb", "baabb", []string{"baabb"}},
+ // Example from Myers:
+ {"abcabba", "cbabac", []string{"caba", "baba", "cbba"}},
+ {"3456aaa", "aaa", []string{"aaa"}},
+ {"aaa", "aaa123", []string{"aaa"}},
+ {"aabaa", "aacaa", []string{"aaaa"}},
+ {"1a", "a", []string{"a"}},
+ {"abab", "bb", []string{"bb"}},
+ {"123", "ab", []string{""}},
+ {"a", "b", []string{""}},
+ {"abc", "123", []string{""}},
+ {"aa", "aa", []string{"aa"}},
+ {"abcde", "12345", []string{""}},
+ {"aaa3456", "aaa", []string{"aaa"}},
+ {"abcde", "12345a", []string{"a"}},
+ {"ab", "123", []string{""}},
+ {"1a2", "a", []string{"a"}},
+ // for two-sided
+ {"babaab", "cccaba", []string{"aba"}},
+ {"aabbab", "cbcabc", []string{"bab"}},
+ {"abaabb", "bcacab", []string{"baab"}},
+ {"abaabb", "abaaaa", []string{"abaa"}},
+ {"bababb", "baaabb", []string{"baabb"}},
+ {"abbbaa", "cabacc", []string{"aba"}},
+ {"aabbaa", "aacaba", []string{"aaaa", "aaba"}},
+}
+
+func init() {
+ log.SetFlags(log.Lshortfile)
+}
+
+func check(t *testing.T, str string, lcs lcs, want []string) {
+ t.Helper()
+ if !lcs.valid() {
+ t.Errorf("bad lcs %v", lcs)
+ }
+ var got strings.Builder
+ for _, dd := range lcs {
+ got.WriteString(str[dd.X : dd.X+dd.Len])
+ }
+ ans := got.String()
+ for _, w := range want {
+ if ans == w {
+ return
+ }
+ }
+ t.Fatalf("str=%q lcs=%v want=%q got=%q", str, lcs, want, ans)
+}
+
+func checkDiffs(t *testing.T, before string, diffs []Diff, after string) {
+ t.Helper()
+ var ans strings.Builder
+ sofar := 0 // index of position in before
+ for _, d := range diffs {
+ if sofar < d.Start {
+ ans.WriteString(before[sofar:d.Start])
+ }
+ ans.WriteString(after[d.ReplStart:d.ReplEnd])
+ sofar = d.End
+ }
+ ans.WriteString(before[sofar:])
+ if ans.String() != after {
+ t.Fatalf("diff %v took %q to %q, not to %q", diffs, before, ans.String(), after)
+ }
+}
+
+func lcslen(l lcs) int {
+ ans := 0
+ for _, d := range l {
+ ans += int(d.Len)
+ }
+ return ans
+}
+
+// return a random string of length n made of characters from s
+func randstr(s string, n int) string {
+ src := []rune(s)
+ x := make([]rune, n)
+ for i := 0; i < n; i++ {
+ x[i] = src[rand.Intn(len(src))]
+ }
+ return string(x)
+}
+
+func TestLcsFix(t *testing.T) {
+ tests := []struct{ before, after lcs }{
+ {lcs{diag{0, 0, 3}, diag{2, 2, 5}, diag{3, 4, 5}, diag{8, 9, 4}}, lcs{diag{0, 0, 2}, diag{2, 2, 1}, diag{3, 4, 5}, diag{8, 9, 4}}},
+ {lcs{diag{1, 1, 6}, diag{6, 12, 3}}, lcs{diag{1, 1, 5}, diag{6, 12, 3}}},
+ {lcs{diag{0, 0, 4}, diag{3, 5, 4}}, lcs{diag{0, 0, 3}, diag{3, 5, 4}}},
+ {lcs{diag{0, 20, 1}, diag{0, 0, 3}, diag{1, 20, 4}}, lcs{diag{0, 0, 3}, diag{3, 22, 2}}},
+ {lcs{diag{0, 0, 4}, diag{1, 1, 2}}, lcs{diag{0, 0, 4}}},
+ {lcs{diag{0, 0, 4}}, lcs{diag{0, 0, 4}}},
+ {lcs{}, lcs{}},
+ {lcs{diag{0, 0, 4}, diag{1, 1, 6}, diag{3, 3, 2}}, lcs{diag{0, 0, 1}, diag{1, 1, 6}}},
+ }
+ for n, x := range tests {
+ got := x.before.fix()
+ if len(got) != len(x.after) {
+ t.Errorf("got %v, expected %v, for %v", got, x.after, x.before)
+ }
+ olen := lcslen(x.after)
+ glen := lcslen(got)
+ if olen != glen {
+ t.Errorf("%d: lens(%d,%d) differ, %v, %v, %v", n, glen, olen, got, x.after, x.before)
+ }
+ }
+}
diff --git a/internal/diff/lcs/doc.go b/internal/diff/lcs/doc.go
new file mode 100644
index 000000000..dc779f38a
--- /dev/null
+++ b/internal/diff/lcs/doc.go
@@ -0,0 +1,156 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package lcs contains code to find longest-common-subsequences
+// (and diffs)
+package lcs
+
+/*
+Compute longest-common-subsequences of two slices A, B using
+algorithms from Myers' paper. A longest-common-subsequence
+(LCS from now on) of A and B is a maximal set of lexically increasing
+pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but
+they all have the same length. An LCS determines a sequence of edits
+that changes A into B.
+
+The key concept is the edit graph of A and B.
+If A has length N and B has length M, then the edit graph has
+vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a
+horizontal edge from v[i][j] to v[i+1][j] whenever both are in
+the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly.
+When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1].
+
+A path between in the graph between (0,0) and (N,M) determines a sequence
+of edits converting A into B: each horizontal edge corresponds to removing
+an element of A, and each vertical edge corresponds to inserting an
+element of B.
+
+A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph
+is of length D if it has D non-diagonal edges. The algorithms generate
+forward paths (in which at least one of x,y increases at each edge),
+or backward paths (in which at least one of x,y decreases at each edge),
+or a combination. (Note that the orientation is the traditional mathematical one,
+with the origin in the lower-left corner.)
+
+Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.)
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ b | | | ___/‾‾‾ | ___/‾‾‾ | | |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ c | | | | | | |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a a b b a a
+
+
+The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at
+the end of a maximal path of length D. (Because x-y=k it suffices to remember
+only the x coordinate of the vertex.)
+
+The forward algorithm: Find the longest diagonal starting at (0,0) and
+label its end with D=0,k=0. From that vertex take a vertical step and
+then follow the longest diagonal (up and to the right), and label that vertex
+with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow
+the longest diagonal (up and to the right) and label that vertex
+D=1,k=1. In the same way, having labelled all the D vertices,
+from a vertex labelled D,k find two vertices
+tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same
+diagonal, in which case take the one with the larger x.
+
+Eventually the path gets to (N,M), and the diagonals on it are the LCS.
+
+Here is the edit graph with the ends of D-paths labelled. (So, for instance,
+0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first
+step is to go up the longest diagonal from (0,0).)
+A:"aabbaa", B:"aacaba"
+ ⊙ ------- ⊙ ------- ⊙ -------(3/3,6)------- ⊙ -------(3/5,6)-------(4/6,6)
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ -------(2/3,5)------- ⊙ ------- ⊙ ------- ⊙
+ b | | | ___/‾‾‾ | ___/‾‾‾ | | |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ -------(3/5,4)------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ -------(1/2,3)-------(2/3,3)------- ⊙ ------- ⊙ ------- ⊙
+ c | | | | | | |
+ ⊙ ------- ⊙ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2)
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ |
+ ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙
+ a a b b a a
+
+The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical
+to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected,
+there are 4 non-diagonal steps, and the diagonals form an LCS.
+
+There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon):
+A:"aabbaa", B:"aacaba"
+ ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙
+ a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ |
+ ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ --------(:0/5,5)-------- ⊙
+ b | | | ____/‾‾‾ | ____/‾‾‾ | | |
+ ⊙ -------- ⊙ -------- ⊙ --------(:1/3,4)-------- ⊙ -------- ⊙ -------- ⊙
+ a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ |
+ (:3/0,3)--------(:2/1,3)-------- ⊙ --------(:2/3,3)--------(:1/4,3)-------- ⊙ -------- ⊙
+ c | | | | | | |
+ ⊙ -------- ⊙ -------- ⊙ --------(:3/3,2)--------(:2/4,2)-------- ⊙ -------- ⊙
+ a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ |
+ (:3/0,1)-------- ⊙ -------- ⊙ -------- ⊙ --------(:3/4,1)-------- ⊙ -------- ⊙
+ a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ |
+ (:4/0,0)-------- ⊙ -------- ⊙ -------- ⊙ --------(:4/4,0)-------- ⊙ -------- ⊙
+ a a b b a a
+
+Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the
+front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short.
+We want to control how big D can be, by stopping when it gets too large. The forward algorithm then
+privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable
+asymmetry.
+
+Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in
+the edit graph look like.
+A:"aabbaa", B:"aacaba"
+ ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙
+ a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ |
+ ⊙ --------- ⊙ --------- ⊙ --------- (2/3,5) --------- ⊙ --------- (:0/5,5)--------- ⊙
+ b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | |
+ ⊙ --------- ⊙ --------- ⊙ --------- (:1/3,4)--------- ⊙ --------- ⊙ --------- ⊙
+ a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ |
+ ⊙ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- ⊙ --------- ⊙
+ c | | | | | | |
+ ⊙ --------- ⊙ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- ⊙ --------- ⊙
+ a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ |
+ ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙
+ a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ |
+ ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙
+ a a b b a a
+
+The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion
+is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same
+diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward
+2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path.
+Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the
+computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed
+from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path.
+
+If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a
+backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two
+computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS
+is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two
+to form a best-effort LCS. In the worst case the forward partial LCS may have to
+be recomputed.
+*/
+
+/* Eugene Myers paper is titled
+"An O(ND) Difference Algorithm and Its Variations"
+and can be found at
+http://www.xmailserver.org/diff2.pdf
+
+(There is a generic implementation of the algorithm the the repository with git hash
+b9ad7e4ade3a686d608e44475390ad428e60e7fc)
+*/
diff --git a/internal/diff/lcs/git.sh b/internal/diff/lcs/git.sh
new file mode 100644
index 000000000..6856f8439
--- /dev/null
+++ b/internal/diff/lcs/git.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright 2022 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+#
+# Creates a zip file containing all numbered versions
+# of the commit history of a large source file, for use
+# as input data for the tests of the diff algorithm.
+#
+# Run script from root of the x/tools repo.
+
+set -eu
+
+# WARNING: This script will install the latest version of $file
+# The largest real source file in the x/tools repo.
+# file=internal/lsp/source/completion/completion.go
+# file=internal/lsp/source/diagnostics.go
+file=internal/lsp/protocol/tsprotocol.go
+
+tmp=$(mktemp -d)
+git log $file |
+ awk '/^commit / {print $2}' |
+ nl -ba -nrz |
+ while read n hash; do
+ git checkout --quiet $hash $file
+ cp -f $file $tmp/$n
+ done
+(cd $tmp && zip -q - *) > testdata.zip
+rm -fr $tmp
+git restore --staged $file
+git restore $file
+echo "Created testdata.zip"
diff --git a/internal/diff/lcs/labels.go b/internal/diff/lcs/labels.go
new file mode 100644
index 000000000..0689f1ed7
--- /dev/null
+++ b/internal/diff/lcs/labels.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+ "fmt"
+)
+
+// For each D, vec[D] has length D+1,
+// and the label for (D, k) is stored in vec[D][(D+k)/2].
+type label struct {
+ vec [][]int
+}
+
+// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE
+const debug = false
+
+// debugging. check that the (d,k) pair is valid
+// (that is, -d<=k<=d and d+k even)
+func checkDK(D, k int) {
+ if k >= -D && k <= D && (D+k)%2 == 0 {
+ return
+ }
+ panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k))
+}
+
+func (t *label) set(D, k, x int) {
+ if debug {
+ checkDK(D, k)
+ }
+ for len(t.vec) <= D {
+ t.vec = append(t.vec, nil)
+ }
+ if t.vec[D] == nil {
+ t.vec[D] = make([]int, D+1)
+ }
+ t.vec[D][(D+k)/2] = x // known that D+k is even
+}
+
+func (t *label) get(d, k int) int {
+ if debug {
+ checkDK(d, k)
+ }
+ return int(t.vec[d][(d+k)/2])
+}
+
+func newtriang(limit int) label {
+ if limit < 100 {
+ // Preallocate if limit is not large.
+ return label{vec: make([][]int, limit)}
+ }
+ return label{}
+}
diff --git a/internal/diff/lcs/old.go b/internal/diff/lcs/old.go
new file mode 100644
index 000000000..7af11fc89
--- /dev/null
+++ b/internal/diff/lcs/old.go
@@ -0,0 +1,480 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+// TODO(adonovan): remove unclear references to "old" in this package.
+
+import (
+ "fmt"
+)
+
+// A Diff is a replacement of a portion of A by a portion of B.
+type Diff struct {
+ Start, End int // offsets of portion to delete in A
+ ReplStart, ReplEnd int // offset of replacement text in B
+}
+
+// DiffStrings returns the differences between two strings.
+// It does not respect rune boundaries.
+func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) }
+
+// DiffBytes returns the differences between two byte sequences.
+// It does not respect rune boundaries.
+func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) }
+
+// DiffRunes returns the differences between two rune sequences.
+func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) }
+
+func diff(seqs sequences) []Diff {
+ // A limit on how deeply the LCS algorithm should search. The value is just a guess.
+ const maxDiffs = 30
+ diff, _ := compute(seqs, twosided, maxDiffs/2)
+ return diff
+}
+
+// compute computes the list of differences between two sequences,
+// along with the LCS. It is exercised directly by tests.
+// The algorithm is one of {forward, backward, twosided}.
+func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) {
+ if limit <= 0 {
+ limit = 1 << 25 // effectively infinity
+ }
+ alen, blen := seqs.lengths()
+ g := &editGraph{
+ seqs: seqs,
+ vf: newtriang(limit),
+ vb: newtriang(limit),
+ limit: limit,
+ ux: alen,
+ uy: blen,
+ delta: alen - blen,
+ }
+ lcs := algo(g)
+ diffs := lcs.toDiffs(alen, blen)
+ return diffs, lcs
+}
+
+// editGraph carries the information for computing the lcs of two sequences.
+type editGraph struct {
+ seqs sequences
+ vf, vb label // forward and backward labels
+
+ limit int // maximal value of D
+ // the bounding rectangle of the current edit graph
+ lx, ly, ux, uy int
+ delta int // common subexpression: (ux-lx)-(uy-ly)
+}
+
+// toDiffs converts an LCS to a list of edits.
+func (lcs lcs) toDiffs(alen, blen int) []Diff {
+ var diffs []Diff
+ var pa, pb int // offsets in a, b
+ for _, l := range lcs {
+ if pa < l.X || pb < l.Y {
+ diffs = append(diffs, Diff{pa, l.X, pb, l.Y})
+ }
+ pa = l.X + l.Len
+ pb = l.Y + l.Len
+ }
+ if pa < alen || pb < blen {
+ diffs = append(diffs, Diff{pa, alen, pb, blen})
+ }
+ return diffs
+}
+
+// --- FORWARD ---
+
+// fdone decides if the forwward path has reached the upper right
+// corner of the rectangle. If so, it also returns the computed lcs.
+func (e *editGraph) fdone(D, k int) (bool, lcs) {
+ // x, y, k are relative to the rectangle
+ x := e.vf.get(D, k)
+ y := x - k
+ if x == e.ux && y == e.uy {
+ return true, e.forwardlcs(D, k)
+ }
+ return false, nil
+}
+
+// run the forward algorithm, until success or up to the limit on D.
+func forward(e *editGraph) lcs {
+ e.setForward(0, 0, e.lx)
+ if ok, ans := e.fdone(0, 0); ok {
+ return ans
+ }
+ // from D to D+1
+ for D := 0; D < e.limit; D++ {
+ e.setForward(D+1, -(D + 1), e.getForward(D, -D))
+ if ok, ans := e.fdone(D+1, -(D + 1)); ok {
+ return ans
+ }
+ e.setForward(D+1, D+1, e.getForward(D, D)+1)
+ if ok, ans := e.fdone(D+1, D+1); ok {
+ return ans
+ }
+ for k := -D + 1; k <= D-1; k += 2 {
+ // these are tricky and easy to get backwards
+ lookv := e.lookForward(k, e.getForward(D, k-1)+1)
+ lookh := e.lookForward(k, e.getForward(D, k+1))
+ if lookv > lookh {
+ e.setForward(D+1, k, lookv)
+ } else {
+ e.setForward(D+1, k, lookh)
+ }
+ if ok, ans := e.fdone(D+1, k); ok {
+ return ans
+ }
+ }
+ }
+ // D is too large
+ // find the D path with maximal x+y inside the rectangle and
+ // use that to compute the found part of the lcs
+ kmax := -e.limit - 1
+ diagmax := -1
+ for k := -e.limit; k <= e.limit; k += 2 {
+ x := e.getForward(e.limit, k)
+ y := x - k
+ if x+y > diagmax && x <= e.ux && y <= e.uy {
+ diagmax, kmax = x+y, k
+ }
+ }
+ return e.forwardlcs(e.limit, kmax)
+}
+
+// recover the lcs by backtracking from the farthest point reached
+func (e *editGraph) forwardlcs(D, k int) lcs {
+ var ans lcs
+ for x := e.getForward(D, k); x != 0 || x-k != 0; {
+ if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) {
+ // if (x-1,y) is labelled D-1, x--,D--,k--,continue
+ D, k, x = D-1, k-1, x-1
+ continue
+ } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) {
+ // if (x,y-1) is labelled D-1, x, D--,k++, continue
+ D, k = D-1, k+1
+ continue
+ }
+ // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue
+ y := x - k
+ ans = ans.prepend(x+e.lx-1, y+e.ly-1)
+ x--
+ }
+ return ans
+}
+
+// start at (x,y), go up the diagonal as far as possible,
+// and label the result with d
+func (e *editGraph) lookForward(k, relx int) int {
+ rely := relx - k
+ x, y := relx+e.lx, rely+e.ly
+ if x < e.ux && y < e.uy {
+ x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy)
+ }
+ return x
+}
+
+func (e *editGraph) setForward(d, k, relx int) {
+ x := e.lookForward(k, relx)
+ e.vf.set(d, k, x-e.lx)
+}
+
+func (e *editGraph) getForward(d, k int) int {
+ x := e.vf.get(d, k)
+ return x
+}
+
+// --- BACKWARD ---
+
+// bdone decides if the backward path has reached the lower left corner
+func (e *editGraph) bdone(D, k int) (bool, lcs) {
+ // x, y, k are relative to the rectangle
+ x := e.vb.get(D, k)
+ y := x - (k + e.delta)
+ if x == 0 && y == 0 {
+ return true, e.backwardlcs(D, k)
+ }
+ return false, nil
+}
+
+// run the backward algorithm, until success or up to the limit on D.
+func backward(e *editGraph) lcs {
+ e.setBackward(0, 0, e.ux)
+ if ok, ans := e.bdone(0, 0); ok {
+ return ans
+ }
+ // from D to D+1
+ for D := 0; D < e.limit; D++ {
+ e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1)
+ if ok, ans := e.bdone(D+1, -(D + 1)); ok {
+ return ans
+ }
+ e.setBackward(D+1, D+1, e.getBackward(D, D))
+ if ok, ans := e.bdone(D+1, D+1); ok {
+ return ans
+ }
+ for k := -D + 1; k <= D-1; k += 2 {
+ // these are tricky and easy to get wrong
+ lookv := e.lookBackward(k, e.getBackward(D, k-1))
+ lookh := e.lookBackward(k, e.getBackward(D, k+1)-1)
+ if lookv < lookh {
+ e.setBackward(D+1, k, lookv)
+ } else {
+ e.setBackward(D+1, k, lookh)
+ }
+ if ok, ans := e.bdone(D+1, k); ok {
+ return ans
+ }
+ }
+ }
+
+ // D is too large
+ // find the D path with minimal x+y inside the rectangle and
+ // use that to compute the part of the lcs found
+ kmax := -e.limit - 1
+ diagmin := 1 << 25
+ for k := -e.limit; k <= e.limit; k += 2 {
+ x := e.getBackward(e.limit, k)
+ y := x - (k + e.delta)
+ if x+y < diagmin && x >= 0 && y >= 0 {
+ diagmin, kmax = x+y, k
+ }
+ }
+ if kmax < -e.limit {
+ panic(fmt.Sprintf("no paths when limit=%d?", e.limit))
+ }
+ return e.backwardlcs(e.limit, kmax)
+}
+
+// recover the lcs by backtracking
+func (e *editGraph) backwardlcs(D, k int) lcs {
+ var ans lcs
+ for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; {
+ if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) {
+ // D--, k--, x unchanged
+ D, k = D-1, k-1
+ continue
+ } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) {
+ // D--, k++, x++
+ D, k, x = D-1, k+1, x+1
+ continue
+ }
+ y := x - (k + e.delta)
+ ans = ans.append(x+e.lx, y+e.ly)
+ x++
+ }
+ return ans
+}
+
+// start at (x,y), go down the diagonal as far as possible,
+func (e *editGraph) lookBackward(k, relx int) int {
+ rely := relx - (k + e.delta) // forward k = k + e.delta
+ x, y := relx+e.lx, rely+e.ly
+ if x > 0 && y > 0 {
+ x -= e.seqs.commonSuffixLen(0, x, 0, y)
+ }
+ return x
+}
+
+// convert to rectangle, and label the result with d
+func (e *editGraph) setBackward(d, k, relx int) {
+ x := e.lookBackward(k, relx)
+ e.vb.set(d, k, x-e.lx)
+}
+
+func (e *editGraph) getBackward(d, k int) int {
+ x := e.vb.get(d, k)
+ return x
+}
+
+// -- TWOSIDED ---
+
+func twosided(e *editGraph) lcs {
+ // The termination condition could be improved, as either the forward
+ // or backward pass could succeed before Myers' Lemma applies.
+ // Aside from questions of efficiency (is the extra testing cost-effective)
+ // this is more likely to matter when e.limit is reached.
+ e.setForward(0, 0, e.lx)
+ e.setBackward(0, 0, e.ux)
+
+ // from D to D+1
+ for D := 0; D < e.limit; D++ {
+ // just finished a backwards pass, so check
+ if got, ok := e.twoDone(D, D); ok {
+ return e.twolcs(D, D, got)
+ }
+ // do a forwards pass (D to D+1)
+ e.setForward(D+1, -(D + 1), e.getForward(D, -D))
+ e.setForward(D+1, D+1, e.getForward(D, D)+1)
+ for k := -D + 1; k <= D-1; k += 2 {
+ // these are tricky and easy to get backwards
+ lookv := e.lookForward(k, e.getForward(D, k-1)+1)
+ lookh := e.lookForward(k, e.getForward(D, k+1))
+ if lookv > lookh {
+ e.setForward(D+1, k, lookv)
+ } else {
+ e.setForward(D+1, k, lookh)
+ }
+ }
+ // just did a forward pass, so check
+ if got, ok := e.twoDone(D+1, D); ok {
+ return e.twolcs(D+1, D, got)
+ }
+ // do a backward pass, D to D+1
+ e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1)
+ e.setBackward(D+1, D+1, e.getBackward(D, D))
+ for k := -D + 1; k <= D-1; k += 2 {
+ // these are tricky and easy to get wrong
+ lookv := e.lookBackward(k, e.getBackward(D, k-1))
+ lookh := e.lookBackward(k, e.getBackward(D, k+1)-1)
+ if lookv < lookh {
+ e.setBackward(D+1, k, lookv)
+ } else {
+ e.setBackward(D+1, k, lookh)
+ }
+ }
+ }
+
+ // D too large. combine a forward and backward partial lcs
+ // first, a forward one
+ kmax := -e.limit - 1
+ diagmax := -1
+ for k := -e.limit; k <= e.limit; k += 2 {
+ x := e.getForward(e.limit, k)
+ y := x - k
+ if x+y > diagmax && x <= e.ux && y <= e.uy {
+ diagmax, kmax = x+y, k
+ }
+ }
+ if kmax < -e.limit {
+ panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit))
+ }
+ lcs := e.forwardlcs(e.limit, kmax)
+ // now a backward one
+ // find the D path with minimal x+y inside the rectangle and
+ // use that to compute the lcs
+ diagmin := 1 << 25 // infinity
+ for k := -e.limit; k <= e.limit; k += 2 {
+ x := e.getBackward(e.limit, k)
+ y := x - (k + e.delta)
+ if x+y < diagmin && x >= 0 && y >= 0 {
+ diagmin, kmax = x+y, k
+ }
+ }
+ if kmax < -e.limit {
+ panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit))
+ }
+ lcs = append(lcs, e.backwardlcs(e.limit, kmax)...)
+ // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs)
+ ans := lcs.fix()
+ return ans
+}
+
+// Does Myers' Lemma apply?
+func (e *editGraph) twoDone(df, db int) (int, bool) {
+ if (df+db+e.delta)%2 != 0 {
+ return 0, false // diagonals cannot overlap
+ }
+ kmin := -db + e.delta
+ if -df > kmin {
+ kmin = -df
+ }
+ kmax := db + e.delta
+ if df < kmax {
+ kmax = df
+ }
+ for k := kmin; k <= kmax; k += 2 {
+ x := e.vf.get(df, k)
+ u := e.vb.get(db, k-e.delta)
+ if u <= x {
+ // is it worth looking at all the other k?
+ for l := k; l <= kmax; l += 2 {
+ x := e.vf.get(df, l)
+ y := x - l
+ u := e.vb.get(db, l-e.delta)
+ v := u - l
+ if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux {
+ return l, true
+ }
+ }
+ return k, true
+ }
+ }
+ return 0, false
+}
+
+func (e *editGraph) twolcs(df, db, kf int) lcs {
+ // db==df || db+1==df
+ x := e.vf.get(df, kf)
+ y := x - kf
+ kb := kf - e.delta
+ u := e.vb.get(db, kb)
+ v := u - kf
+
+ // Myers proved there is a df-path from (0,0) to (u,v)
+ // and a db-path from (x,y) to (N,M).
+ // In the first case the overall path is the forward path
+ // to (u,v) followed by the backward path to (N,M).
+ // In the second case the path is the backward path to (x,y)
+ // followed by the forward path to (x,y) from (0,0).
+
+ // Look for some special cases to avoid computing either of these paths.
+ if x == u {
+ // "babaab" "cccaba"
+ // already patched together
+ lcs := e.forwardlcs(df, kf)
+ lcs = append(lcs, e.backwardlcs(db, kb)...)
+ return lcs.sort()
+ }
+
+ // is (u-1,v) or (u,v-1) labelled df-1?
+ // if so, that forward df-1-path plus a horizontal or vertical edge
+ // is the df-path to (u,v), then plus the db-path to (N,M)
+ if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 {
+ // "aabbab" "cbcabc"
+ lcs := e.forwardlcs(df-1, u-1-v)
+ lcs = append(lcs, e.backwardlcs(db, kb)...)
+ return lcs.sort()
+ }
+ if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u {
+ // "abaabb" "bcacab"
+ lcs := e.forwardlcs(df-1, u-(v-1))
+ lcs = append(lcs, e.backwardlcs(db, kb)...)
+ return lcs.sort()
+ }
+
+ // The path can't possibly contribute to the lcs because it
+ // is all horizontal or vertical edges
+ if u == 0 || v == 0 || x == e.ux || y == e.uy {
+ // "abaabb" "abaaaa"
+ if u == 0 || v == 0 {
+ return e.backwardlcs(db, kb)
+ }
+ return e.forwardlcs(df, kf)
+ }
+
+ // is (x+1,y) or (x,y+1) labelled db-1?
+ if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 {
+ // "bababb" "baaabb"
+ lcs := e.backwardlcs(db-1, kb+1)
+ lcs = append(lcs, e.forwardlcs(df, kf)...)
+ return lcs.sort()
+ }
+ if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x {
+ // "abbbaa" "cabacc"
+ lcs := e.backwardlcs(db-1, kb-1)
+ lcs = append(lcs, e.forwardlcs(df, kf)...)
+ return lcs.sort()
+ }
+
+ // need to compute another path
+ // "aabbaa" "aacaba"
+ lcs := e.backwardlcs(db, kb)
+ oldx, oldy := e.ux, e.uy
+ e.ux = u
+ e.uy = v
+ lcs = append(lcs, forward(e)...)
+ e.ux, e.uy = oldx, oldy
+ return lcs.sort()
+}
diff --git a/internal/diff/lcs/old_test.go b/internal/diff/lcs/old_test.go
new file mode 100644
index 000000000..0c894316f
--- /dev/null
+++ b/internal/diff/lcs/old_test.go
@@ -0,0 +1,251 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "strings"
+ "testing"
+)
+
+func TestAlgosOld(t *testing.T) {
+ for i, algo := range []func(*editGraph) lcs{forward, backward, twosided} {
+ t.Run(strings.Fields("forward backward twosided")[i], func(t *testing.T) {
+ for _, tx := range Btests {
+ lim := len(tx.a) + len(tx.b)
+
+ diffs, lcs := compute(stringSeqs{tx.a, tx.b}, algo, lim)
+ check(t, tx.a, lcs, tx.lcs)
+ checkDiffs(t, tx.a, diffs, tx.b)
+
+ diffs, lcs = compute(stringSeqs{tx.b, tx.a}, algo, lim)
+ check(t, tx.b, lcs, tx.lcs)
+ checkDiffs(t, tx.b, diffs, tx.a)
+ }
+ })
+ }
+}
+
+func TestIntOld(t *testing.T) {
+ // need to avoid any characters in btests
+ lfill, rfill := "AAAAAAAAAAAA", "BBBBBBBBBBBB"
+ for _, tx := range Btests {
+ if len(tx.a) < 2 || len(tx.b) < 2 {
+ continue
+ }
+ left := tx.a + lfill
+ right := tx.b + rfill
+ lim := len(tx.a) + len(tx.b)
+ diffs, lcs := compute(stringSeqs{left, right}, twosided, lim)
+ check(t, left, lcs, tx.lcs)
+ checkDiffs(t, left, diffs, right)
+ diffs, lcs = compute(stringSeqs{right, left}, twosided, lim)
+ check(t, right, lcs, tx.lcs)
+ checkDiffs(t, right, diffs, left)
+
+ left = lfill + tx.a
+ right = rfill + tx.b
+ diffs, lcs = compute(stringSeqs{left, right}, twosided, lim)
+ check(t, left, lcs, tx.lcs)
+ checkDiffs(t, left, diffs, right)
+ diffs, lcs = compute(stringSeqs{right, left}, twosided, lim)
+ check(t, right, lcs, tx.lcs)
+ checkDiffs(t, right, diffs, left)
+ }
+}
+
+func TestSpecialOld(t *testing.T) { // exercises lcs.fix
+ a := "golang.org/x/tools/intern"
+ b := "github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/intern"
+ diffs, lcs := compute(stringSeqs{a, b}, twosided, 4)
+ if !lcs.valid() {
+ t.Errorf("%d,%v", len(diffs), lcs)
+ }
+}
+
+func TestRegressionOld001(t *testing.T) {
+ a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/gopls/internal/span\"\n)\n"
+
+ b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/gopls/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/diff/difftest\"\n\t\"golang.org/x/tools/gopls/internal/span\"\n)\n"
+ for i := 1; i < len(b); i++ {
+ diffs, lcs := compute(stringSeqs{a, b}, twosided, i) // 14 from gopls
+ if !lcs.valid() {
+ t.Errorf("%d,%v", len(diffs), lcs)
+ }
+ checkDiffs(t, a, diffs, b)
+ }
+}
+
+func TestRegressionOld002(t *testing.T) {
+ a := "n\"\n)\n"
+ b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n"
+ for i := 1; i <= len(b); i++ {
+ diffs, lcs := compute(stringSeqs{a, b}, twosided, i)
+ if !lcs.valid() {
+ t.Errorf("%d,%v", len(diffs), lcs)
+ }
+ checkDiffs(t, a, diffs, b)
+ }
+}
+
+func TestRegressionOld003(t *testing.T) {
+ a := "golang.org/x/hello v1.0.0\nrequire golang.org/x/unused v1"
+ b := "golang.org/x/hello v1"
+ for i := 1; i <= len(a); i++ {
+ diffs, lcs := compute(stringSeqs{a, b}, twosided, i)
+ if !lcs.valid() {
+ t.Errorf("%d,%v", len(diffs), lcs)
+ }
+ checkDiffs(t, a, diffs, b)
+ }
+}
+
+func TestRandOld(t *testing.T) {
+ rand.Seed(1)
+ for i := 0; i < 1000; i++ {
+ // TODO(adonovan): use ASCII and bytesSeqs here? The use of
+ // non-ASCII isn't relevant to the property exercised by the test.
+ a := []rune(randstr("abω", 16))
+ b := []rune(randstr("abωc", 16))
+ seq := runesSeqs{a, b}
+
+ const lim = 24 // large enough to get true lcs
+ _, forw := compute(seq, forward, lim)
+ _, back := compute(seq, backward, lim)
+ _, two := compute(seq, twosided, lim)
+ if lcslen(two) != lcslen(forw) || lcslen(forw) != lcslen(back) {
+ t.Logf("\n%v\n%v\n%v", forw, back, two)
+ t.Fatalf("%d forw:%d back:%d two:%d", i, lcslen(forw), lcslen(back), lcslen(two))
+ }
+ if !two.valid() || !forw.valid() || !back.valid() {
+ t.Errorf("check failure")
+ }
+ }
+}
+
+// TestDiffAPI tests the public API functions (Diff{Bytes,Strings,Runes})
+// to ensure at least miminal parity of the three representations.
+func TestDiffAPI(t *testing.T) {
+ for _, test := range []struct {
+ a, b string
+ wantStrings, wantBytes, wantRunes string
+ }{
+ {"abcXdef", "abcxdef", "[{3 4 3 4}]", "[{3 4 3 4}]", "[{3 4 3 4}]"}, // ASCII
+ {"abcωdef", "abcΩdef", "[{3 5 3 5}]", "[{3 5 3 5}]", "[{3 4 3 4}]"}, // non-ASCII
+ } {
+
+ gotStrings := fmt.Sprint(DiffStrings(test.a, test.b))
+ if gotStrings != test.wantStrings {
+ t.Errorf("DiffStrings(%q, %q) = %v, want %v",
+ test.a, test.b, gotStrings, test.wantStrings)
+ }
+ gotBytes := fmt.Sprint(DiffBytes([]byte(test.a), []byte(test.b)))
+ if gotBytes != test.wantBytes {
+ t.Errorf("DiffBytes(%q, %q) = %v, want %v",
+ test.a, test.b, gotBytes, test.wantBytes)
+ }
+ gotRunes := fmt.Sprint(DiffRunes([]rune(test.a), []rune(test.b)))
+ if gotRunes != test.wantRunes {
+ t.Errorf("DiffRunes(%q, %q) = %v, want %v",
+ test.a, test.b, gotRunes, test.wantRunes)
+ }
+ }
+}
+
+func BenchmarkTwoOld(b *testing.B) {
+ tests := genBench("abc", 96)
+ for i := 0; i < b.N; i++ {
+ for _, tt := range tests {
+ _, two := compute(stringSeqs{tt.before, tt.after}, twosided, 100)
+ if !two.valid() {
+ b.Error("check failed")
+ }
+ }
+ }
+}
+
+func BenchmarkForwOld(b *testing.B) {
+ tests := genBench("abc", 96)
+ for i := 0; i < b.N; i++ {
+ for _, tt := range tests {
+ _, two := compute(stringSeqs{tt.before, tt.after}, forward, 100)
+ if !two.valid() {
+ b.Error("check failed")
+ }
+ }
+ }
+}
+
+func genBench(set string, n int) []struct{ before, after string } {
+ // before and after for benchmarks. 24 strings of length n with
+ // before and after differing at least once, and about 5%
+ rand.Seed(3)
+ var ans []struct{ before, after string }
+ for i := 0; i < 24; i++ {
+ // maybe b should have an approximately known number of diffs
+ a := randstr(set, n)
+ cnt := 0
+ bb := make([]rune, 0, n)
+ for _, r := range a {
+ if rand.Float64() < .05 {
+ cnt++
+ r = 'N'
+ }
+ bb = append(bb, r)
+ }
+ if cnt == 0 {
+ // avoid == shortcut
+ bb[n/2] = 'N'
+ }
+ ans = append(ans, struct{ before, after string }{a, string(bb)})
+ }
+ return ans
+}
+
+// This benchmark represents a common case for a diff command:
+// large file with a single relatively small diff in the middle.
+// (It's not clear whether this is representative of gopls workloads
+// or whether it is important to gopls diff performance.)
+//
+// TODO(adonovan) opt: it could be much faster. For example,
+// comparing a file against itself is about 10x faster than with the
+// small deletion in the middle. Strangely, comparing a file against
+// itself minus the last byte is faster still; I don't know why.
+// There is much low-hanging fruit here for further improvement.
+func BenchmarkLargeFileSmallDiff(b *testing.B) {
+ data, err := ioutil.ReadFile("old.go") // large file
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ n := len(data)
+
+ src := string(data)
+ dst := src[:n*49/100] + src[n*51/100:] // remove 2% from the middle
+ b.Run("string", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ compute(stringSeqs{src, dst}, twosided, len(src)+len(dst))
+ }
+ })
+
+ srcBytes := []byte(src)
+ dstBytes := []byte(dst)
+ b.Run("bytes", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ compute(bytesSeqs{srcBytes, dstBytes}, twosided, len(srcBytes)+len(dstBytes))
+ }
+ })
+
+ srcRunes := []rune(src)
+ dstRunes := []rune(dst)
+ b.Run("runes", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ compute(runesSeqs{srcRunes, dstRunes}, twosided, len(srcRunes)+len(dstRunes))
+ }
+ })
+}
diff --git a/internal/diff/lcs/sequence.go b/internal/diff/lcs/sequence.go
new file mode 100644
index 000000000..2d72d2630
--- /dev/null
+++ b/internal/diff/lcs/sequence.go
@@ -0,0 +1,113 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+// This file defines the abstract sequence over which the LCS algorithm operates.
+
+// sequences abstracts a pair of sequences, A and B.
+type sequences interface {
+ lengths() (int, int) // len(A), len(B)
+ commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj]))
+ commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj]))
+}
+
+type stringSeqs struct{ a, b string }
+
+func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) }
+func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int {
+ return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj])
+}
+func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int {
+ return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj])
+}
+
+// The explicit capacity in s[i:j:j] leads to more efficient code.
+
+type bytesSeqs struct{ a, b []byte }
+
+func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) }
+func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int {
+ return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj])
+}
+func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int {
+ return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj])
+}
+
+type runesSeqs struct{ a, b []rune }
+
+func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) }
+func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int {
+ return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj])
+}
+func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int {
+ return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj])
+}
+
+// TODO(adonovan): optimize these functions using ideas from:
+// - https://go.dev/cl/408116 common.go
+// - https://go.dev/cl/421435 xor_generic.go
+
+// TODO(adonovan): factor using generics when available,
+// but measure performance impact.
+
+// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj].
+func commonPrefixLenBytes(a, b []byte) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+func commonPrefixLenRunes(a, b []rune) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+func commonPrefixLenString(a, b string) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj].
+func commonSuffixLenBytes(a, b []byte) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[len(a)-1-i] == b[len(b)-1-i] {
+ i++
+ }
+ return i
+}
+func commonSuffixLenRunes(a, b []rune) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[len(a)-1-i] == b[len(b)-1-i] {
+ i++
+ }
+ return i
+}
+func commonSuffixLenString(a, b string) int {
+ n := min(len(a), len(b))
+ i := 0
+ for i < n && a[len(a)-1-i] == b[len(b)-1-i] {
+ i++
+ }
+ return i
+}
+
+func min(x, y int) int {
+ if x < y {
+ return x
+ } else {
+ return y
+ }
+}
diff --git a/internal/diff/myers/diff.go b/internal/diff/myers/diff.go
new file mode 100644
index 000000000..7c2d4356b
--- /dev/null
+++ b/internal/diff/myers/diff.go
@@ -0,0 +1,215 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package myers implements the Myers diff algorithm.
+package myers
+
+import (
+ "strings"
+
+ "golang.org/x/tools/internal/diff"
+)
+
+// Sources:
+// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/
+// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2
+
+func ComputeEdits(before, after string) []diff.Edit {
+ beforeLines := splitLines(before)
+ ops := operations(beforeLines, splitLines(after))
+
+ // Build a table mapping line number to offset.
+ lineOffsets := make([]int, 0, len(beforeLines)+1)
+ total := 0
+ for i := range beforeLines {
+ lineOffsets = append(lineOffsets, total)
+ total += len(beforeLines[i])
+ }
+ lineOffsets = append(lineOffsets, total) // EOF
+
+ edits := make([]diff.Edit, 0, len(ops))
+ for _, op := range ops {
+ start, end := lineOffsets[op.I1], lineOffsets[op.I2]
+ switch op.Kind {
+ case diff.Delete:
+ // Delete: before[I1:I2] is deleted.
+ edits = append(edits, diff.Edit{Start: start, End: end})
+ case diff.Insert:
+ // Insert: after[J1:J2] is inserted at before[I1:I1].
+ if content := strings.Join(op.Content, ""); content != "" {
+ edits = append(edits, diff.Edit{Start: start, End: end, New: content})
+ }
+ }
+ }
+ return edits
+}
+
+type operation struct {
+ Kind diff.OpKind
+ Content []string // content from b
+ I1, I2 int // indices of the line in a
+ J1 int // indices of the line in b, J2 implied by len(Content)
+}
+
+// operations returns the list of operations to convert a into b, consolidating
+// operations for multiple lines and not including equal lines.
+func operations(a, b []string) []*operation {
+ if len(a) == 0 && len(b) == 0 {
+ return nil
+ }
+
+ trace, offset := shortestEditSequence(a, b)
+ snakes := backtrack(trace, len(a), len(b), offset)
+
+ M, N := len(a), len(b)
+
+ var i int
+ solution := make([]*operation, len(a)+len(b))
+
+ add := func(op *operation, i2, j2 int) {
+ if op == nil {
+ return
+ }
+ op.I2 = i2
+ if op.Kind == diff.Insert {
+ op.Content = b[op.J1:j2]
+ }
+ solution[i] = op
+ i++
+ }
+ x, y := 0, 0
+ for _, snake := range snakes {
+ if len(snake) < 2 {
+ continue
+ }
+ var op *operation
+ // delete (horizontal)
+ for snake[0]-snake[1] > x-y {
+ if op == nil {
+ op = &operation{
+ Kind: diff.Delete,
+ I1: x,
+ J1: y,
+ }
+ }
+ x++
+ if x == M {
+ break
+ }
+ }
+ add(op, x, y)
+ op = nil
+ // insert (vertical)
+ for snake[0]-snake[1] < x-y {
+ if op == nil {
+ op = &operation{
+ Kind: diff.Insert,
+ I1: x,
+ J1: y,
+ }
+ }
+ y++
+ }
+ add(op, x, y)
+ op = nil
+ // equal (diagonal)
+ for x < snake[0] {
+ x++
+ y++
+ }
+ if x >= M && y >= N {
+ break
+ }
+ }
+ return solution[:i]
+}
+
+// backtrack uses the trace for the edit sequence computation and returns the
+// "snakes" that make up the solution. A "snake" is a single deletion or
+// insertion followed by zero or diagonals.
+func backtrack(trace [][]int, x, y, offset int) [][]int {
+ snakes := make([][]int, len(trace))
+ d := len(trace) - 1
+ for ; x > 0 && y > 0 && d > 0; d-- {
+ V := trace[d]
+ if len(V) == 0 {
+ continue
+ }
+ snakes[d] = []int{x, y}
+
+ k := x - y
+
+ var kPrev int
+ if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
+ kPrev = k + 1
+ } else {
+ kPrev = k - 1
+ }
+
+ x = V[kPrev+offset]
+ y = x - kPrev
+ }
+ if x < 0 || y < 0 {
+ return snakes
+ }
+ snakes[d] = []int{x, y}
+ return snakes
+}
+
+// shortestEditSequence returns the shortest edit sequence that converts a into b.
+func shortestEditSequence(a, b []string) ([][]int, int) {
+ M, N := len(a), len(b)
+ V := make([]int, 2*(N+M)+1)
+ offset := N + M
+ trace := make([][]int, N+M+1)
+
+ // Iterate through the maximum possible length of the SES (N+M).
+ for d := 0; d <= N+M; d++ {
+ copyV := make([]int, len(V))
+ // k lines are represented by the equation y = x - k. We move in
+ // increments of 2 because end points for even d are on even k lines.
+ for k := -d; k <= d; k += 2 {
+ // At each point, we either go down or to the right. We go down if
+ // k == -d, and we go to the right if k == d. We also prioritize
+ // the maximum x value, because we prefer deletions to insertions.
+ var x int
+ if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
+ x = V[k+1+offset] // down
+ } else {
+ x = V[k-1+offset] + 1 // right
+ }
+
+ y := x - k
+
+ // Diagonal moves while we have equal contents.
+ for x < M && y < N && a[x] == b[y] {
+ x++
+ y++
+ }
+
+ V[k+offset] = x
+
+ // Return if we've exceeded the maximum values.
+ if x == M && y == N {
+ // Makes sure to save the state of the array before returning.
+ copy(copyV, V)
+ trace[d] = copyV
+ return trace, offset
+ }
+ }
+
+ // Save the state of the array.
+ copy(copyV, V)
+ trace[d] = copyV
+ }
+ return nil, 0
+}
+
+func splitLines(text string) []string {
+ lines := strings.SplitAfter(text, "\n")
+ if lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ }
+ return lines
+}
diff --git a/internal/diff/myers/diff_test.go b/internal/diff/myers/diff_test.go
new file mode 100644
index 000000000..f24445558
--- /dev/null
+++ b/internal/diff/myers/diff_test.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package myers_test
+
+import (
+ "testing"
+
+ "golang.org/x/tools/internal/diff/difftest"
+ "golang.org/x/tools/internal/diff/myers"
+)
+
+func TestDiff(t *testing.T) {
+ difftest.DiffTest(t, myers.ComputeEdits)
+}
diff --git a/internal/diff/ndiff.go b/internal/diff/ndiff.go
new file mode 100644
index 000000000..050b08ded
--- /dev/null
+++ b/internal/diff/ndiff.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+ "bytes"
+ "unicode/utf8"
+
+ "golang.org/x/tools/internal/diff/lcs"
+)
+
+// Strings computes the differences between two strings.
+// The resulting edits respect rune boundaries.
+func Strings(before, after string) []Edit {
+ if before == after {
+ return nil // common case
+ }
+
+ if stringIsASCII(before) && stringIsASCII(after) {
+ // TODO(adonovan): opt: specialize diffASCII for strings.
+ return diffASCII([]byte(before), []byte(after))
+ }
+ return diffRunes([]rune(before), []rune(after))
+}
+
+// Bytes computes the differences between two byte slices.
+// The resulting edits respect rune boundaries.
+func Bytes(before, after []byte) []Edit {
+ if bytes.Equal(before, after) {
+ return nil // common case
+ }
+
+ if bytesIsASCII(before) && bytesIsASCII(after) {
+ return diffASCII(before, after)
+ }
+ return diffRunes(runes(before), runes(after))
+}
+
+func diffASCII(before, after []byte) []Edit {
+ diffs := lcs.DiffBytes(before, after)
+
+ // Convert from LCS diffs.
+ res := make([]Edit, len(diffs))
+ for i, d := range diffs {
+ res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])}
+ }
+ return res
+}
+
+func diffRunes(before, after []rune) []Edit {
+ diffs := lcs.DiffRunes(before, after)
+
+ // The diffs returned by the lcs package use indexes
+ // into whatever slice was passed in.
+ // Convert rune offsets to byte offsets.
+ res := make([]Edit, len(diffs))
+ lastEnd := 0
+ utf8Len := 0
+ for i, d := range diffs {
+ utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits
+ start := utf8Len
+ utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit
+ res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])}
+ lastEnd = d.End
+ }
+ return res
+}
+
+// runes is like []rune(string(bytes)) without the duplicate allocation.
+func runes(bytes []byte) []rune {
+ n := utf8.RuneCount(bytes)
+ runes := make([]rune, n)
+ for i := 0; i < n; i++ {
+ r, sz := utf8.DecodeRune(bytes)
+ bytes = bytes[sz:]
+ runes[i] = r
+ }
+ return runes
+}
+
+// runesLen returns the length in bytes of the UTF-8 encoding of runes.
+func runesLen(runes []rune) (len int) {
+ for _, r := range runes {
+ len += utf8.RuneLen(r)
+ }
+ return len
+}
+
+// stringIsASCII reports whether s contains only ASCII.
+// TODO(adonovan): combine when x/tools allows generics.
+func stringIsASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
+
+func bytesIsASCII(s []byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
diff --git a/internal/diff/unified.go b/internal/diff/unified.go
new file mode 100644
index 000000000..fa376f178
--- /dev/null
+++ b/internal/diff/unified.go
@@ -0,0 +1,248 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Unified returns a unified diff of the old and new strings.
+// The old and new labels are the names of the old and new files.
+// If the strings are equal, it returns the empty string.
+func Unified(oldLabel, newLabel, old, new string) string {
+ edits := Strings(old, new)
+ unified, err := ToUnified(oldLabel, newLabel, old, edits)
+ if err != nil {
+ // Can't happen: edits are consistent.
+ log.Fatalf("internal error in diff.Unified: %v", err)
+ }
+ return unified
+}
+
+// ToUnified applies the edits to content and returns a unified diff.
+// The old and new labels are the names of the content and result files.
+// It returns an error if the edits are inconsistent; see ApplyEdits.
+func ToUnified(oldLabel, newLabel, content string, edits []Edit) (string, error) {
+ u, err := toUnified(oldLabel, newLabel, content, edits)
+ if err != nil {
+ return "", err
+ }
+ return u.String(), nil
+}
+
+// unified represents a set of edits as a unified diff.
+type unified struct {
+ // From is the name of the original file.
+ From string
+ // To is the name of the modified file.
+ To string
+ // Hunks is the set of edit hunks needed to transform the file content.
+ Hunks []*hunk
+}
+
+// Hunk represents a contiguous set of line edits to apply.
+type hunk struct {
+ // The line in the original source where the hunk starts.
+ FromLine int
+ // The line in the original source where the hunk finishes.
+ ToLine int
+ // The set of line based edits to apply.
+ Lines []line
+}
+
+// Line represents a single line operation to apply as part of a Hunk.
+type line struct {
+ // Kind is the type of line this represents, deletion, insertion or copy.
+ Kind OpKind
+ // Content is the content of this line.
+ // For deletion it is the line being removed, for all others it is the line
+ // to put in the output.
+ Content string
+}
+
+// OpKind is used to denote the type of operation a line represents.
+// TODO(adonovan): hide this once the myers package no longer references it.
+type OpKind int
+
+const (
+ // Delete is the operation kind for a line that is present in the input
+ // but not in the output.
+ Delete OpKind = iota
+ // Insert is the operation kind for a line that is new in the output.
+ Insert
+ // Equal is the operation kind for a line that is the same in the input and
+ // output, often used to provide context around edited lines.
+ Equal
+)
+
+// String returns a human readable representation of an OpKind. It is not
+// intended for machine processing.
+func (k OpKind) String() string {
+ switch k {
+ case Delete:
+ return "delete"
+ case Insert:
+ return "insert"
+ case Equal:
+ return "equal"
+ default:
+ panic("unknown operation kind")
+ }
+}
+
+const (
+ edge = 3
+ gap = edge * 2
+)
+
+// toUnified takes a file contents and a sequence of edits, and calculates
+// a unified diff that represents those edits.
+func toUnified(fromName, toName string, content string, edits []Edit) (unified, error) {
+ u := unified{
+ From: fromName,
+ To: toName,
+ }
+ if len(edits) == 0 {
+ return u, nil
+ }
+ var err error
+ edits, err = lineEdits(content, edits) // expand to whole lines
+ if err != nil {
+ return u, err
+ }
+ lines := splitLines(content)
+ var h *hunk
+ last := 0
+ toLine := 0
+ for _, edit := range edits {
+ // Compute the zero-based line numbers of the edit start and end.
+ // TODO(adonovan): opt: compute incrementally, avoid O(n^2).
+ start := strings.Count(content[:edit.Start], "\n")
+ end := strings.Count(content[:edit.End], "\n")
+ if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' {
+ end++ // EOF counts as an implicit newline
+ }
+
+ switch {
+ case h != nil && start == last:
+ //direct extension
+ case h != nil && start <= last+gap:
+ //within range of previous lines, add the joiners
+ addEqualLines(h, lines, last, start)
+ default:
+ //need to start a new hunk
+ if h != nil {
+ // add the edge to the previous hunk
+ addEqualLines(h, lines, last, last+edge)
+ u.Hunks = append(u.Hunks, h)
+ }
+ toLine += start - last
+ h = &hunk{
+ FromLine: start + 1,
+ ToLine: toLine + 1,
+ }
+ // add the edge to the new hunk
+ delta := addEqualLines(h, lines, start-edge, start)
+ h.FromLine -= delta
+ h.ToLine -= delta
+ }
+ last = start
+ for i := start; i < end; i++ {
+ h.Lines = append(h.Lines, line{Kind: Delete, Content: lines[i]})
+ last++
+ }
+ if edit.New != "" {
+ for _, content := range splitLines(edit.New) {
+ h.Lines = append(h.Lines, line{Kind: Insert, Content: content})
+ toLine++
+ }
+ }
+ }
+ if h != nil {
+ // add the edge to the final hunk
+ addEqualLines(h, lines, last, last+edge)
+ u.Hunks = append(u.Hunks, h)
+ }
+ return u, nil
+}
+
+func splitLines(text string) []string {
+ lines := strings.SplitAfter(text, "\n")
+ if lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ }
+ return lines
+}
+
+func addEqualLines(h *hunk, lines []string, start, end int) int {
+ delta := 0
+ for i := start; i < end; i++ {
+ if i < 0 {
+ continue
+ }
+ if i >= len(lines) {
+ return delta
+ }
+ h.Lines = append(h.Lines, line{Kind: Equal, Content: lines[i]})
+ delta++
+ }
+ return delta
+}
+
+// String converts a unified diff to the standard textual form for that diff.
+// The output of this function can be passed to tools like patch.
+func (u unified) String() string {
+ if len(u.Hunks) == 0 {
+ return ""
+ }
+ b := new(strings.Builder)
+ fmt.Fprintf(b, "--- %s\n", u.From)
+ fmt.Fprintf(b, "+++ %s\n", u.To)
+ for _, hunk := range u.Hunks {
+ fromCount, toCount := 0, 0
+ for _, l := range hunk.Lines {
+ switch l.Kind {
+ case Delete:
+ fromCount++
+ case Insert:
+ toCount++
+ default:
+ fromCount++
+ toCount++
+ }
+ }
+ fmt.Fprint(b, "@@")
+ if fromCount > 1 {
+ fmt.Fprintf(b, " -%d,%d", hunk.FromLine, fromCount)
+ } else if hunk.FromLine == 1 && fromCount == 0 {
+ // Match odd GNU diff -u behavior adding to empty file.
+ fmt.Fprintf(b, " -0,0")
+ } else {
+ fmt.Fprintf(b, " -%d", hunk.FromLine)
+ }
+ if toCount > 1 {
+ fmt.Fprintf(b, " +%d,%d", hunk.ToLine, toCount)
+ } else {
+ fmt.Fprintf(b, " +%d", hunk.ToLine)
+ }
+ fmt.Fprint(b, " @@\n")
+ for _, l := range hunk.Lines {
+ switch l.Kind {
+ case Delete:
+ fmt.Fprintf(b, "-%s", l.Content)
+ case Insert:
+ fmt.Fprintf(b, "+%s", l.Content)
+ default:
+ fmt.Fprintf(b, " %s", l.Content)
+ }
+ if !strings.HasSuffix(l.Content, "\n") {
+ fmt.Fprintf(b, "\n\\ No newline at end of file\n")
+ }
+ }
+ }
+ return b.String()
+}
diff --git a/internal/event/export/ocagent/wire/metrics.go b/internal/event/export/ocagent/wire/metrics.go
index 4cfdb88bf..6cb58943c 100644
--- a/internal/event/export/ocagent/wire/metrics.go
+++ b/internal/event/export/ocagent/wire/metrics.go
@@ -71,9 +71,12 @@ type PointInt64Value struct {
// OpenCensus service can correctly determine the underlying value type.
// This custom MarshalJSON exists because,
// by default *Point is JSON marshalled as:
-// {"value": {"int64Value": 1}}
+//
+// {"value": {"int64Value": 1}}
+//
// but it should be marshalled as:
-// {"int64Value": 1}
+//
+// {"int64Value": 1}
func (p *Point) MarshalJSON() ([]byte, error) {
if p == nil {
return []byte("null"), nil
@@ -158,9 +161,12 @@ type bucketOptionsExplicitAlias BucketOptionsExplicit
// OpenCensus service can correctly determine the underlying value type.
// This custom MarshalJSON exists because,
// by default BucketOptionsExplicit is JSON marshalled as:
-// {"bounds":[1,2,3]}
+//
+// {"bounds":[1,2,3]}
+//
// but it should be marshalled as:
-// {"explicit":{"bounds":[1,2,3]}}
+//
+// {"explicit":{"bounds":[1,2,3]}}
func (be *BucketOptionsExplicit) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Explicit *bucketOptionsExplicitAlias `json:"explicit,omitempty"`
diff --git a/internal/event/export/trace.go b/internal/event/export/trace.go
index 1a99482f1..79aebbaca 100644
--- a/internal/event/export/trace.go
+++ b/internal/event/export/trace.go
@@ -90,7 +90,7 @@ func (s *SpanContext) Format(f fmt.State, r rune) {
}
func (s *Span) Start() core.Event {
- // start never changes after construction, so we dont need to hold the mutex
+ // start never changes after construction, so we don't need to hold the mutex
return s.start
}
diff --git a/internal/event/tag/tag.go b/internal/event/tag/tag.go
new file mode 100644
index 000000000..ff2f2ecd3
--- /dev/null
+++ b/internal/event/tag/tag.go
@@ -0,0 +1,59 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tag provides the labels used for telemetry throughout gopls.
+package tag
+
+import (
+ "golang.org/x/tools/internal/event/keys"
+)
+
+var (
+ // create the label keys we use
+ Method = keys.NewString("method", "")
+ StatusCode = keys.NewString("status.code", "")
+ StatusMessage = keys.NewString("status.message", "")
+ RPCID = keys.NewString("id", "")
+ RPCDirection = keys.NewString("direction", "")
+ File = keys.NewString("file", "")
+ Directory = keys.New("directory", "")
+ URI = keys.New("URI", "")
+ Package = keys.NewString("package", "") // Package ID
+ PackagePath = keys.NewString("package_path", "")
+ Query = keys.New("query", "")
+ Snapshot = keys.NewUInt64("snapshot", "")
+ Operation = keys.NewString("operation", "")
+
+ Position = keys.New("position", "")
+ Category = keys.NewString("category", "")
+ PackageCount = keys.NewInt("packages", "")
+ Files = keys.New("files", "")
+ Port = keys.NewInt("port", "")
+ Type = keys.New("type", "")
+ HoverKind = keys.NewString("hoverkind", "")
+
+ NewServer = keys.NewString("new_server", "A new server was added")
+ EndServer = keys.NewString("end_server", "A server was shut down")
+
+ ServerID = keys.NewString("server", "The server ID an event is related to")
+ Logfile = keys.NewString("logfile", "")
+ DebugAddress = keys.NewString("debug_address", "")
+ GoplsPath = keys.NewString("gopls_path", "")
+ ClientID = keys.NewString("client_id", "")
+
+ Level = keys.NewInt("level", "The logging level")
+)
+
+var (
+ // create the stats we measure
+ Started = keys.NewInt64("started", "Count of started RPCs.")
+ ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes)
+ SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes)
+ Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds)
+)
+
+const (
+ Inbound = "in"
+ Outbound = "out"
+)
diff --git a/internal/facts/facts.go b/internal/facts/facts.go
new file mode 100644
index 000000000..81df45161
--- /dev/null
+++ b/internal/facts/facts.go
@@ -0,0 +1,335 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package facts defines a serializable set of analysis.Fact.
+//
+// It provides a partial implementation of the Fact-related parts of the
+// analysis.Pass interface for use in analysis drivers such as "go vet"
+// and other build systems.
+//
+// The serial format is unspecified and may change, so the same version
+// of this package must be used for reading and writing serialized facts.
+//
+// The handling of facts in the analysis system parallels the handling
+// of type information in the compiler: during compilation of package P,
+// the compiler emits an export data file that describes the type of
+// every object (named thing) defined in package P, plus every object
+// indirectly reachable from one of those objects. Thus the downstream
+// compiler of package Q need only load one export data file per direct
+// import of Q, and it will learn everything about the API of package P
+// and everything it needs to know about the API of P's dependencies.
+//
+// Similarly, analysis of package P emits a fact set containing facts
+// about all objects exported from P, plus additional facts about only
+// those objects of P's dependencies that are reachable from the API of
+// package P; the downstream analysis of Q need only load one fact set
+// per direct import of Q.
+//
+// The notion of "exportedness" that matters here is that of the
+// compiler. According to the language spec, a method pkg.T.f is
+// unexported simply because its name starts with lowercase. But the
+// compiler must nonetheless export f so that downstream compilations can
+// accurately ascertain whether pkg.T implements an interface pkg.I
+// defined as interface{f()}. Exported thus means "described in export
+// data".
+package facts
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "reflect"
+ "sort"
+ "sync"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/types/objectpath"
+)
+
+const debug = false
+
+// A Set is a set of analysis.Facts.
+//
+// Decode creates a Set of facts by reading from the imports of a given
+// package, and Encode writes out the set. Between these operation,
+// the Import and Export methods will query and update the set.
+//
+// All of Set's methods except String are safe to call concurrently.
+type Set struct {
+ pkg *types.Package
+ mu sync.Mutex
+ m map[key]analysis.Fact
+}
+
+type key struct {
+ pkg *types.Package
+ obj types.Object // (object facts only)
+ t reflect.Type
+}
+
+// ImportObjectFact implements analysis.Pass.ImportObjectFact.
+func (s *Set) ImportObjectFact(obj types.Object, ptr analysis.Fact) bool {
+ if obj == nil {
+ panic("nil object")
+ }
+ key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(ptr)}
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if v, ok := s.m[key]; ok {
+ reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
+ return true
+ }
+ return false
+}
+
+// ExportObjectFact implements analysis.Pass.ExportObjectFact.
+func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) {
+ if obj.Pkg() != s.pkg {
+ log.Panicf("in package %s: ExportObjectFact(%s, %T): can't set fact on object belonging another package",
+ s.pkg, obj, fact)
+ }
+ key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(fact)}
+ s.mu.Lock()
+ s.m[key] = fact // clobber any existing entry
+ s.mu.Unlock()
+}
+
+func (s *Set) AllObjectFacts(filter map[reflect.Type]bool) []analysis.ObjectFact {
+ var facts []analysis.ObjectFact
+ s.mu.Lock()
+ for k, v := range s.m {
+ if k.obj != nil && filter[k.t] {
+ facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: v})
+ }
+ }
+ s.mu.Unlock()
+ return facts
+}
+
+// ImportPackageFact implements analysis.Pass.ImportPackageFact.
+func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
+ if pkg == nil {
+ panic("nil package")
+ }
+ key := key{pkg: pkg, t: reflect.TypeOf(ptr)}
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if v, ok := s.m[key]; ok {
+ reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
+ return true
+ }
+ return false
+}
+
+// ExportPackageFact implements analysis.Pass.ExportPackageFact.
+func (s *Set) ExportPackageFact(fact analysis.Fact) {
+ key := key{pkg: s.pkg, t: reflect.TypeOf(fact)}
+ s.mu.Lock()
+ s.m[key] = fact // clobber any existing entry
+ s.mu.Unlock()
+}
+
+func (s *Set) AllPackageFacts(filter map[reflect.Type]bool) []analysis.PackageFact {
+ var facts []analysis.PackageFact
+ s.mu.Lock()
+ for k, v := range s.m {
+ if k.obj == nil && filter[k.t] {
+ facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: v})
+ }
+ }
+ s.mu.Unlock()
+ return facts
+}
+
+// gobFact is the Gob declaration of a serialized fact.
+type gobFact struct {
+ PkgPath string // path of package
+ Object objectpath.Path // optional path of object relative to package itself
+ Fact analysis.Fact // type and value of user-defined Fact
+}
+
+// A Decoder decodes the facts from the direct imports of the package
+// provided to NewEncoder. A single decoder may be used to decode
+// multiple fact sets (e.g. each for a different set of fact types)
+// for the same package. Each call to Decode returns an independent
+// fact set.
+type Decoder struct {
+ pkg *types.Package
+ packages map[string]*types.Package
+}
+
+// NewDecoder returns a fact decoder for the specified package.
+func NewDecoder(pkg *types.Package) *Decoder {
+ // Compute the import map for this package.
+ // See the package doc comment.
+ return &Decoder{pkg, importMap(pkg.Imports())}
+}
+
+// Decode decodes all the facts relevant to the analysis of package pkg.
+// The read function reads serialized fact data from an external source
+// for one of of pkg's direct imports. The empty file is a valid
+// encoding of an empty fact set.
+//
+// It is the caller's responsibility to call gob.Register on all
+// necessary fact types.
+func (d *Decoder) Decode(read func(*types.Package) ([]byte, error)) (*Set, error) {
+ // Read facts from imported packages.
+ // Facts may describe indirectly imported packages, or their objects.
+ m := make(map[key]analysis.Fact) // one big bucket
+ for _, imp := range d.pkg.Imports() {
+ logf := func(format string, args ...interface{}) {
+ if debug {
+ prefix := fmt.Sprintf("in %s, importing %s: ",
+ d.pkg.Path(), imp.Path())
+ log.Print(prefix, fmt.Sprintf(format, args...))
+ }
+ }
+
+ // Read the gob-encoded facts.
+ data, err := read(imp)
+ if err != nil {
+ return nil, fmt.Errorf("in %s, can't import facts for package %q: %v",
+ d.pkg.Path(), imp.Path(), err)
+ }
+ if len(data) == 0 {
+ continue // no facts
+ }
+ var gobFacts []gobFact
+ if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&gobFacts); err != nil {
+ return nil, fmt.Errorf("decoding facts for %q: %v", imp.Path(), err)
+ }
+ if debug {
+ logf("decoded %d facts: %v", len(gobFacts), gobFacts)
+ }
+
+ // Parse each one into a key and a Fact.
+ for _, f := range gobFacts {
+ factPkg := d.packages[f.PkgPath]
+ if factPkg == nil {
+ // Fact relates to a dependency that was
+ // unused in this translation unit. Skip.
+ logf("no package %q; discarding %v", f.PkgPath, f.Fact)
+ continue
+ }
+ key := key{pkg: factPkg, t: reflect.TypeOf(f.Fact)}
+ if f.Object != "" {
+ // object fact
+ obj, err := objectpath.Object(factPkg, f.Object)
+ if err != nil {
+ // (most likely due to unexported object)
+ // TODO(adonovan): audit for other possibilities.
+ logf("no object for path: %v; discarding %s", err, f.Fact)
+ continue
+ }
+ key.obj = obj
+ logf("read %T fact %s for %v", f.Fact, f.Fact, key.obj)
+ } else {
+ // package fact
+ logf("read %T fact %s for %v", f.Fact, f.Fact, factPkg)
+ }
+ m[key] = f.Fact
+ }
+ }
+
+ return &Set{pkg: d.pkg, m: m}, nil
+}
+
+// Encode encodes a set of facts to a memory buffer.
+//
+// It may fail if one of the Facts could not be gob-encoded, but this is
+// a sign of a bug in an Analyzer.
+func (s *Set) Encode() []byte {
+
+ // TODO(adonovan): opt: use a more efficient encoding
+ // that avoids repeating PkgPath for each fact.
+
+ // Gather all facts, including those from imported packages.
+ var gobFacts []gobFact
+
+ s.mu.Lock()
+ for k, fact := range s.m {
+ if debug {
+ log.Printf("%v => %s\n", k, fact)
+ }
+ var object objectpath.Path
+ if k.obj != nil {
+ path, err := objectpath.For(k.obj)
+ if err != nil {
+ if debug {
+ log.Printf("discarding fact %s about %s\n", fact, k.obj)
+ }
+ continue // object not accessible from package API; discard fact
+ }
+ object = path
+ }
+ gobFacts = append(gobFacts, gobFact{
+ PkgPath: k.pkg.Path(),
+ Object: object,
+ Fact: fact,
+ })
+ }
+ s.mu.Unlock()
+
+ // Sort facts by (package, object, type) for determinism.
+ sort.Slice(gobFacts, func(i, j int) bool {
+ x, y := gobFacts[i], gobFacts[j]
+ if x.PkgPath != y.PkgPath {
+ return x.PkgPath < y.PkgPath
+ }
+ if x.Object != y.Object {
+ return x.Object < y.Object
+ }
+ tx := reflect.TypeOf(x.Fact)
+ ty := reflect.TypeOf(y.Fact)
+ if tx != ty {
+ return tx.String() < ty.String()
+ }
+ return false // equal
+ })
+
+ var buf bytes.Buffer
+ if len(gobFacts) > 0 {
+ if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil {
+ // Fact encoding should never fail. Identify the culprit.
+ for _, gf := range gobFacts {
+ if err := gob.NewEncoder(ioutil.Discard).Encode(gf); err != nil {
+ fact := gf.Fact
+ pkgpath := reflect.TypeOf(fact).Elem().PkgPath()
+ log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q",
+ fact, err, fact, pkgpath)
+ }
+ }
+ }
+ }
+
+ if debug {
+ log.Printf("package %q: encode %d facts, %d bytes\n",
+ s.pkg.Path(), len(gobFacts), buf.Len())
+ }
+
+ return buf.Bytes()
+}
+
+// String is provided only for debugging, and must not be called
+// concurrent with any Import/Export method.
+func (s *Set) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("{")
+ for k, f := range s.m {
+ if buf.Len() > 1 {
+ buf.WriteString(", ")
+ }
+ if k.obj != nil {
+ buf.WriteString(k.obj.String())
+ } else {
+ buf.WriteString(k.pkg.Path())
+ }
+ fmt.Fprintf(&buf, ": %v", f)
+ }
+ buf.WriteString("}")
+ return buf.String()
+}
diff --git a/internal/facts/facts_test.go b/internal/facts/facts_test.go
new file mode 100644
index 000000000..ad8751539
--- /dev/null
+++ b/internal/facts/facts_test.go
@@ -0,0 +1,564 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package facts_test
+
+import (
+ "encoding/gob"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/analysis/analysistest"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/internal/facts"
+ "golang.org/x/tools/internal/testenv"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+type myFact struct {
+ S string
+}
+
+func (f *myFact) String() string { return fmt.Sprintf("myFact(%s)", f.S) }
+func (f *myFact) AFact() {}
+
+func init() {
+ gob.Register(new(myFact))
+}
+
+func TestEncodeDecode(t *testing.T) {
+ tests := []struct {
+ name string
+ typeparams bool // requires typeparams to be enabled
+ files map[string]string
+ plookups []pkgLookups // see testEncodeDecode for details
+ }{
+ {
+ name: "loading-order",
+ // c -> b -> a, a2
+ // c does not directly depend on a, but it indirectly uses a.T.
+ //
+ // Package a2 is never loaded directly so it is incomplete.
+ //
+ // We use only types in this example because we rely on
+ // types.Eval to resolve the lookup expressions, and it only
+ // works for types. This is a definite gap in the typechecker API.
+ files: map[string]string{
+ "a/a.go": `package a; type A int; type T int`,
+ "a2/a.go": `package a2; type A2 int; type Unneeded int`,
+ "b/b.go": `package b; import ("a"; "a2"); type B chan a2.A2; type F func() a.T`,
+ "c/c.go": `package c; import "b"; type C []b.B`,
+ },
+ // In the following table, we analyze packages (a, b, c) in order,
+ // look up various objects accessible within each package,
+ // and see if they have a fact. The "analysis" exports a fact
+ // for every object at package level.
+ //
+ // Note: Loop iterations are not independent test cases;
+ // order matters, as we populate factmap.
+ plookups: []pkgLookups{
+ {"a", []lookup{
+ {"A", "myFact(a.A)"},
+ }},
+ {"b", []lookup{
+ {"a.A", "myFact(a.A)"},
+ {"a.T", "myFact(a.T)"},
+ {"B", "myFact(b.B)"},
+ {"F", "myFact(b.F)"},
+ {"F(nil)()", "myFact(a.T)"}, // (result type of b.F)
+ }},
+ {"c", []lookup{
+ {"b.B", "myFact(b.B)"},
+ {"b.F", "myFact(b.F)"},
+ {"b.F(nil)()", "myFact(a.T)"},
+ {"C", "myFact(c.C)"},
+ {"C{}[0]", "myFact(b.B)"},
+ {"<-(C{}[0])", "no fact"}, // object but no fact (we never "analyze" a2)
+ }},
+ },
+ },
+ {
+ name: "underlying",
+ // c->b->a
+ // c does not import a directly or use any of its types, but it does use
+ // the types within a indirectly. c.q has the type a.a so package a should
+ // be included by importMap.
+ files: map[string]string{
+ "a/a.go": `package a; type a int; type T *a`,
+ "b/b.go": `package b; import "a"; type B a.T`,
+ "c/c.go": `package c; import "b"; type C b.B; var q = *C(nil)`,
+ },
+ plookups: []pkgLookups{
+ {"a", []lookup{
+ {"a", "myFact(a.a)"},
+ {"T", "myFact(a.T)"},
+ }},
+ {"b", []lookup{
+ {"B", "myFact(b.B)"},
+ {"B(nil)", "myFact(b.B)"},
+ {"*(B(nil))", "myFact(a.a)"},
+ }},
+ {"c", []lookup{
+ {"C", "myFact(c.C)"},
+ {"C(nil)", "myFact(c.C)"},
+ {"*C(nil)", "myFact(a.a)"},
+ {"q", "myFact(a.a)"},
+ }},
+ },
+ },
+ {
+ name: "methods",
+ // c->b->a
+ // c does not import a directly or use any of its types, but it does use
+ // the types within a indirectly via a method.
+ files: map[string]string{
+ "a/a.go": `package a; type T int`,
+ "b/b.go": `package b; import "a"; type B struct{}; func (_ B) M() a.T { return 0 }`,
+ "c/c.go": `package c; import "b"; var C b.B`,
+ },
+ plookups: []pkgLookups{
+ {"a", []lookup{
+ {"T", "myFact(a.T)"},
+ }},
+ {"b", []lookup{
+ {"B{}", "myFact(b.B)"},
+ {"B{}.M()", "myFact(a.T)"},
+ }},
+ {"c", []lookup{
+ {"C", "myFact(b.B)"},
+ {"C.M()", "myFact(a.T)"},
+ }},
+ },
+ },
+ {
+ name: "globals",
+ files: map[string]string{
+ "a/a.go": `package a;
+ type T1 int
+ type T2 int
+ type T3 int
+ type T4 int
+ type T5 int
+ type K int; type V string
+ `,
+ "b/b.go": `package b
+ import "a"
+ var (
+ G1 []a.T1
+ G2 [7]a.T2
+ G3 chan a.T3
+ G4 *a.T4
+ G5 struct{ F a.T5 }
+ G6 map[a.K]a.V
+ )
+ `,
+ "c/c.go": `package c; import "b";
+ var (
+ v1 = b.G1
+ v2 = b.G2
+ v3 = b.G3
+ v4 = b.G4
+ v5 = b.G5
+ v6 = b.G6
+ )
+ `,
+ },
+ plookups: []pkgLookups{
+ {"a", []lookup{}},
+ {"b", []lookup{}},
+ {"c", []lookup{
+ {"v1[0]", "myFact(a.T1)"},
+ {"v2[0]", "myFact(a.T2)"},
+ {"<-v3", "myFact(a.T3)"},
+ {"*v4", "myFact(a.T4)"},
+ {"v5.F", "myFact(a.T5)"},
+ {"v6[0]", "myFact(a.V)"},
+ }},
+ },
+ },
+ {
+ name: "typeparams",
+ typeparams: true,
+ files: map[string]string{
+ "a/a.go": `package a
+ type T1 int
+ type T2 int
+ type T3 interface{Foo()}
+ type T4 int
+ type T5 int
+ type T6 interface{Foo()}
+ `,
+ "b/b.go": `package b
+ import "a"
+ type N1[T a.T1|int8] func() T
+ type N2[T any] struct{ F T }
+ type N3[T a.T3] func() T
+ type N4[T a.T4|int8] func() T
+ type N5[T interface{Bar() a.T5} ] func() T
+
+ type t5 struct{}; func (t5) Bar() a.T5 { return 0 }
+
+ var G1 N1[a.T1]
+ var G2 func() N2[a.T2]
+ var G3 N3[a.T3]
+ var G4 N4[a.T4]
+ var G5 N5[t5]
+
+ func F6[T a.T6]() T { var x T; return x }
+ `,
+ "c/c.go": `package c; import "b";
+ var (
+ v1 = b.G1
+ v2 = b.G2
+ v3 = b.G3
+ v4 = b.G4
+ v5 = b.G5
+ v6 = b.F6[t6]
+ )
+
+ type t6 struct{}; func (t6) Foo() {}
+ `,
+ },
+ plookups: []pkgLookups{
+ {"a", []lookup{}},
+ {"b", []lookup{}},
+ {"c", []lookup{
+ {"v1", "myFact(b.N1)"},
+ {"v1()", "myFact(a.T1)"},
+ {"v2()", "myFact(b.N2)"},
+ {"v2().F", "myFact(a.T2)"},
+ {"v3", "myFact(b.N3)"},
+ {"v4", "myFact(b.N4)"},
+ {"v4()", "myFact(a.T4)"},
+ {"v5", "myFact(b.N5)"},
+ {"v5()", "myFact(b.t5)"},
+ {"v6()", "myFact(c.t6)"},
+ }},
+ },
+ },
+ }
+
+ for i := range tests {
+ test := tests[i]
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ if test.typeparams && !typeparams.Enabled {
+ t.Skip("type parameters are not enabled")
+ }
+ testEncodeDecode(t, test.files, test.plookups)
+ })
+ }
+}
+
+type lookup struct {
+ objexpr string
+ want string
+}
+
+type pkgLookups struct {
+ path string
+ lookups []lookup
+}
+
+// testEncodeDecode tests fact encoding and decoding and simulates how package facts
+// are passed during analysis. It operates on a group of Go file contents. Then
+// for each <package, []lookup> in tests it does the following:
+// 1. loads and type checks the package,
+// 2. calls (*facts.Decoder).Decode to load the facts exported by its imports,
+// 3. exports a myFact Fact for all of package level objects,
+// 4. For each lookup for the current package:
+// 4.a) lookup the types.Object for an Go source expression in the curent package
+// (or confirms one is not expected want=="no object"),
+// 4.b) finds a Fact for the object (or confirms one is not expected want=="no fact"),
+// 4.c) compares the content of the Fact to want.
+// 5. encodes the Facts of the package.
+//
+// Note: tests are not independent test cases; order matters (as does a package being
+// skipped). It changes what Facts can be imported.
+//
+// Failures are reported on t.
+func testEncodeDecode(t *testing.T, files map[string]string, tests []pkgLookups) {
+ dir, cleanup, err := analysistest.WriteFiles(files)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer cleanup()
+
+ // factmap represents the passing of encoded facts from one
+ // package to another. In practice one would use the file system.
+ factmap := make(map[string][]byte)
+ read := func(imp *types.Package) ([]byte, error) { return factmap[imp.Path()], nil }
+
+ // Analyze packages in order, look up various objects accessible within
+ // each package, and see if they have a fact. The "analysis" exports a
+ // fact for every object at package level.
+ //
+ // Note: Loop iterations are not independent test cases;
+ // order matters, as we populate factmap.
+ for _, test := range tests {
+ // load package
+ pkg, err := load(t, dir, test.path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // decode
+ facts, err := facts.NewDecoder(pkg).Decode(read)
+ if err != nil {
+ t.Fatalf("Decode failed: %v", err)
+ }
+ t.Logf("decode %s facts = %v", pkg.Path(), facts) // show all facts
+
+ // export
+ // (one fact for each package-level object)
+ for _, name := range pkg.Scope().Names() {
+ obj := pkg.Scope().Lookup(name)
+ fact := &myFact{obj.Pkg().Name() + "." + obj.Name()}
+ facts.ExportObjectFact(obj, fact)
+ }
+ t.Logf("exported %s facts = %v", pkg.Path(), facts) // show all facts
+
+ // import
+ // (after export, because an analyzer may import its own facts)
+ for _, lookup := range test.lookups {
+ fact := new(myFact)
+ var got string
+ if obj := find(pkg, lookup.objexpr); obj == nil {
+ got = "no object"
+ } else if facts.ImportObjectFact(obj, fact) {
+ got = fact.String()
+ } else {
+ got = "no fact"
+ }
+ if got != lookup.want {
+ t.Errorf("in %s, ImportObjectFact(%s, %T) = %s, want %s",
+ pkg.Path(), lookup.objexpr, fact, got, lookup.want)
+ }
+ }
+
+ // encode
+ factmap[pkg.Path()] = facts.Encode()
+ }
+}
+
+func find(p *types.Package, expr string) types.Object {
+ // types.Eval only allows us to compute a TypeName object for an expression.
+ // TODO(adonovan): support other expressions that denote an object:
+ // - an identifier (or qualified ident) for a func, const, or var
+ // - new(T).f for a field or method
+ // I've added CheckExpr in https://go-review.googlesource.com/c/go/+/144677.
+ // If that becomes available, use it.
+
+ // Choose an arbitrary position within the (single-file) package
+ // so that we are within the scope of its import declarations.
+ somepos := p.Scope().Lookup(p.Scope().Names()[0]).Pos()
+ tv, err := types.Eval(token.NewFileSet(), p, somepos, expr)
+ if err != nil {
+ return nil
+ }
+ if n, ok := tv.Type.(*types.Named); ok {
+ return n.Obj()
+ }
+ return nil
+}
+
+func load(t *testing.T, dir string, path string) (*types.Package, error) {
+ cfg := &packages.Config{
+ Mode: packages.LoadSyntax,
+ Dir: dir,
+ Env: append(os.Environ(), "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off"),
+ }
+ testenv.NeedsGoPackagesEnv(t, cfg.Env)
+ pkgs, err := packages.Load(cfg, path)
+ if err != nil {
+ return nil, err
+ }
+ if packages.PrintErrors(pkgs) > 0 {
+ return nil, fmt.Errorf("packages had errors")
+ }
+ if len(pkgs) == 0 {
+ return nil, fmt.Errorf("no package matched %s", path)
+ }
+ return pkgs[0].Types, nil
+}
+
+type otherFact struct {
+ S string
+}
+
+func (f *otherFact) String() string { return fmt.Sprintf("otherFact(%s)", f.S) }
+func (f *otherFact) AFact() {}
+
+func TestFactFilter(t *testing.T) {
+ files := map[string]string{
+ "a/a.go": `package a; type A int`,
+ }
+ dir, cleanup, err := analysistest.WriteFiles(files)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer cleanup()
+
+ pkg, err := load(t, dir, "a")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ obj := pkg.Scope().Lookup("A")
+ s, err := facts.NewDecoder(pkg).Decode(func(*types.Package) ([]byte, error) { return nil, nil })
+ if err != nil {
+ t.Fatal(err)
+ }
+ s.ExportObjectFact(obj, &myFact{"good object fact"})
+ s.ExportPackageFact(&myFact{"good package fact"})
+ s.ExportObjectFact(obj, &otherFact{"bad object fact"})
+ s.ExportPackageFact(&otherFact{"bad package fact"})
+
+ filter := map[reflect.Type]bool{
+ reflect.TypeOf(&myFact{}): true,
+ }
+
+ pkgFacts := s.AllPackageFacts(filter)
+ wantPkgFacts := `[{package a ("a") myFact(good package fact)}]`
+ if got := fmt.Sprintf("%v", pkgFacts); got != wantPkgFacts {
+ t.Errorf("AllPackageFacts: got %v, want %v", got, wantPkgFacts)
+ }
+
+ objFacts := s.AllObjectFacts(filter)
+ wantObjFacts := "[{type a.A int myFact(good object fact)}]"
+ if got := fmt.Sprintf("%v", objFacts); got != wantObjFacts {
+ t.Errorf("AllObjectFacts: got %v, want %v", got, wantObjFacts)
+ }
+}
+
+// TestMalformed checks that facts can be encoded and decoded *despite*
+// types.Config.Check returning an error. Importing facts is expected to
+// happen when Analyzers have RunDespiteErrors set to true. So this
+// needs to robust, e.g. no infinite loops.
+func TestMalformed(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("type parameters are not enabled")
+ }
+ var findPkg func(*types.Package, string) *types.Package
+ findPkg = func(p *types.Package, name string) *types.Package {
+ if p.Name() == name {
+ return p
+ }
+ for _, o := range p.Imports() {
+ if f := findPkg(o, name); f != nil {
+ return f
+ }
+ }
+ return nil
+ }
+
+ type pkgTest struct {
+ content string
+ err string // if non-empty, expected substring of err.Error() from conf.Check().
+ wants map[string]string // package path to expected name
+ }
+ tests := []struct {
+ name string
+ pkgs []pkgTest
+ }{
+ {
+ name: "initialization-cycle",
+ pkgs: []pkgTest{
+ {
+ content: `package a; type N[T any] struct { F *N[N[T]] }`,
+ err: "instantiation cycle:",
+ wants: map[string]string{"a": "myFact(a.[N])", "b": "no package", "c": "no package"},
+ },
+ {
+ content: `package b; import "a"; type B a.N[int]`,
+ wants: map[string]string{"a": "myFact(a.[N])", "b": "myFact(b.[B])", "c": "no package"},
+ },
+ {
+ content: `package c; import "b"; var C b.B`,
+ wants: map[string]string{"a": "myFact(a.[N])", "b": "myFact(b.[B])", "c": "myFact(c.[C])"},
+ },
+ },
+ },
+ }
+
+ for i := range tests {
+ test := tests[i]
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+
+ // setup for test wide variables.
+ packages := make(map[string]*types.Package)
+ conf := types.Config{
+ Importer: closure(packages),
+ Error: func(err error) {}, // do not stop on first type checking error
+ }
+ fset := token.NewFileSet()
+ factmap := make(map[string][]byte)
+ read := func(imp *types.Package) ([]byte, error) { return factmap[imp.Path()], nil }
+
+ // Processes the pkgs in order. For package, export a package fact,
+ // and use this fact to verify which package facts are reachable via Decode.
+ // We allow for packages to have type checking errors.
+ for i, pkgTest := range test.pkgs {
+ // parse
+ f, err := parser.ParseFile(fset, fmt.Sprintf("%d.go", i), pkgTest.content, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // typecheck
+ pkg, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, nil)
+ var got string
+ if err != nil {
+ got = err.Error()
+ }
+ if !strings.Contains(got, pkgTest.err) {
+ t.Fatalf("%s: type checking error %q did not match pattern %q", pkg.Path(), err.Error(), pkgTest.err)
+ }
+ packages[pkg.Path()] = pkg
+
+ // decode facts
+ facts, err := facts.NewDecoder(pkg).Decode(read)
+ if err != nil {
+ t.Fatalf("Decode failed: %v", err)
+ }
+
+ // export facts
+ fact := &myFact{fmt.Sprintf("%s.%s", pkg.Name(), pkg.Scope().Names())}
+ facts.ExportPackageFact(fact)
+
+ // import facts
+ for other, want := range pkgTest.wants {
+ fact := new(myFact)
+ var got string
+ if found := findPkg(pkg, other); found == nil {
+ got = "no package"
+ } else if facts.ImportPackageFact(found, fact) {
+ got = fact.String()
+ } else {
+ got = "no fact"
+ }
+ if got != want {
+ t.Errorf("in %s, ImportPackageFact(%s, %T) = %s, want %s",
+ pkg.Path(), other, fact, got, want)
+ }
+ }
+
+ // encode facts
+ factmap[pkg.Path()] = facts.Encode()
+ }
+ })
+ }
+}
+
+type closure map[string]*types.Package
+
+func (c closure) Import(path string) (*types.Package, error) { return c[path], nil }
diff --git a/internal/facts/imports.go b/internal/facts/imports.go
new file mode 100644
index 000000000..7b2166866
--- /dev/null
+++ b/internal/facts/imports.go
@@ -0,0 +1,130 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package facts
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// importMap computes the import map for a package by traversing the
+// entire exported API each of its imports.
+//
+// This is a workaround for the fact that we cannot access the map used
+// internally by the types.Importer returned by go/importer. The entries
+// in this map are the packages and objects that may be relevant to the
+// current analysis unit.
+//
+// Packages in the map that are only indirectly imported may be
+// incomplete (!pkg.Complete()).
+//
+// TODO(adonovan): opt: compute this information more efficiently
+// by obtaining it from the internals of the gcexportdata decoder.
+func importMap(imports []*types.Package) map[string]*types.Package {
+ objects := make(map[types.Object]bool)
+ typs := make(map[types.Type]bool) // Named and TypeParam
+ packages := make(map[string]*types.Package)
+
+ var addObj func(obj types.Object)
+ var addType func(T types.Type)
+
+ addObj = func(obj types.Object) {
+ if !objects[obj] {
+ objects[obj] = true
+ addType(obj.Type())
+ if pkg := obj.Pkg(); pkg != nil {
+ packages[pkg.Path()] = pkg
+ }
+ }
+ }
+
+ addType = func(T types.Type) {
+ switch T := T.(type) {
+ case *types.Basic:
+ // nop
+ case *types.Named:
+ // Remove infinite expansions of *types.Named by always looking at the origin.
+ // Some named types with type parameters [that will not type check] have
+ // infinite expansions:
+ // type N[T any] struct { F *N[N[T]] }
+ // importMap() is called on such types when Analyzer.RunDespiteErrors is true.
+ T = typeparams.NamedTypeOrigin(T).(*types.Named)
+ if !typs[T] {
+ typs[T] = true
+ addObj(T.Obj())
+ addType(T.Underlying())
+ for i := 0; i < T.NumMethods(); i++ {
+ addObj(T.Method(i))
+ }
+ if tparams := typeparams.ForNamed(T); tparams != nil {
+ for i := 0; i < tparams.Len(); i++ {
+ addType(tparams.At(i))
+ }
+ }
+ if targs := typeparams.NamedTypeArgs(T); targs != nil {
+ for i := 0; i < targs.Len(); i++ {
+ addType(targs.At(i))
+ }
+ }
+ }
+ case *types.Pointer:
+ addType(T.Elem())
+ case *types.Slice:
+ addType(T.Elem())
+ case *types.Array:
+ addType(T.Elem())
+ case *types.Chan:
+ addType(T.Elem())
+ case *types.Map:
+ addType(T.Key())
+ addType(T.Elem())
+ case *types.Signature:
+ addType(T.Params())
+ addType(T.Results())
+ if tparams := typeparams.ForSignature(T); tparams != nil {
+ for i := 0; i < tparams.Len(); i++ {
+ addType(tparams.At(i))
+ }
+ }
+ case *types.Struct:
+ for i := 0; i < T.NumFields(); i++ {
+ addObj(T.Field(i))
+ }
+ case *types.Tuple:
+ for i := 0; i < T.Len(); i++ {
+ addObj(T.At(i))
+ }
+ case *types.Interface:
+ for i := 0; i < T.NumMethods(); i++ {
+ addObj(T.Method(i))
+ }
+ for i := 0; i < T.NumEmbeddeds(); i++ {
+ addType(T.EmbeddedType(i)) // walk Embedded for implicits
+ }
+ case *typeparams.Union:
+ for i := 0; i < T.Len(); i++ {
+ addType(T.Term(i).Type())
+ }
+ case *typeparams.TypeParam:
+ if !typs[T] {
+ typs[T] = true
+ addObj(T.Obj())
+ addType(T.Constraint())
+ }
+ }
+ }
+
+ for _, imp := range imports {
+ packages[imp.Path()] = imp
+
+ scope := imp.Scope()
+ for _, name := range scope.Names() {
+ addObj(scope.Lookup(name))
+ }
+ }
+
+ return packages
+}
diff --git a/internal/fastwalk/fastwalk.go b/internal/fastwalk/fastwalk.go
index 9887f7e7a..798fe599b 100644
--- a/internal/fastwalk/fastwalk.go
+++ b/internal/fastwalk/fastwalk.go
@@ -40,12 +40,12 @@ var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
// If fastWalk returns filepath.SkipDir, the directory is skipped.
//
// Unlike filepath.Walk:
-// * file stat calls must be done by the user.
+// - file stat calls must be done by the user.
// The only provided metadata is the file type, which does not include
// any permission bits.
-// * multiple goroutines stat the filesystem concurrently. The provided
+// - multiple goroutines stat the filesystem concurrently. The provided
// walkFn must be safe for concurrent use.
-// * fastWalk can follow symlinks if walkFn returns the TraverseLink
+// - fastWalk can follow symlinks if walkFn returns the TraverseLink
// sentinel error. It is the walkFn's responsibility to prevent
// fastWalk from going into symlink cycles.
func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
diff --git a/internal/fastwalk/fastwalk_darwin.go b/internal/fastwalk/fastwalk_darwin.go
new file mode 100644
index 000000000..0ca55e0d5
--- /dev/null
+++ b/internal/fastwalk/fastwalk_darwin.go
@@ -0,0 +1,119 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin && cgo
+// +build darwin,cgo
+
+package fastwalk
+
+/*
+#include <dirent.h>
+
+// fastwalk_readdir_r wraps readdir_r so that we don't have to pass a dirent**
+// result pointer which triggers CGO's "Go pointer to Go pointer" check unless
+// we allocat the result dirent* with malloc.
+//
+// fastwalk_readdir_r returns 0 on success, -1 upon reaching the end of the
+// directory, or a positive error number to indicate failure.
+static int fastwalk_readdir_r(DIR *fd, struct dirent *entry) {
+ struct dirent *result;
+ int ret = readdir_r(fd, entry, &result);
+ if (ret == 0 && result == NULL) {
+ ret = -1; // EOF
+ }
+ return ret;
+}
+*/
+import "C"
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
+ fd, err := openDir(dirName)
+ if err != nil {
+ return &os.PathError{Op: "opendir", Path: dirName, Err: err}
+ }
+ defer C.closedir(fd)
+
+ skipFiles := false
+ var dirent syscall.Dirent
+ for {
+ ret := int(C.fastwalk_readdir_r(fd, (*C.struct_dirent)(unsafe.Pointer(&dirent))))
+ if ret != 0 {
+ if ret == -1 {
+ break // EOF
+ }
+ if ret == int(syscall.EINTR) {
+ continue
+ }
+ return &os.PathError{Op: "readdir", Path: dirName, Err: syscall.Errno(ret)}
+ }
+ if dirent.Ino == 0 {
+ continue
+ }
+ typ := dtToType(dirent.Type)
+ if skipFiles && typ.IsRegular() {
+ continue
+ }
+ name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:]
+ name = name[:dirent.Namlen]
+ for i, c := range name {
+ if c == 0 {
+ name = name[:i]
+ break
+ }
+ }
+ // Check for useless names before allocating a string.
+ if string(name) == "." || string(name) == ".." {
+ continue
+ }
+ if err := fn(dirName, string(name), typ); err != nil {
+ if err != ErrSkipFiles {
+ return err
+ }
+ skipFiles = true
+ }
+ }
+
+ return nil
+}
+
+func dtToType(typ uint8) os.FileMode {
+ switch typ {
+ case syscall.DT_BLK:
+ return os.ModeDevice
+ case syscall.DT_CHR:
+ return os.ModeDevice | os.ModeCharDevice
+ case syscall.DT_DIR:
+ return os.ModeDir
+ case syscall.DT_FIFO:
+ return os.ModeNamedPipe
+ case syscall.DT_LNK:
+ return os.ModeSymlink
+ case syscall.DT_REG:
+ return 0
+ case syscall.DT_SOCK:
+ return os.ModeSocket
+ }
+ return ^os.FileMode(0)
+}
+
+// openDir wraps opendir(3) and handles any EINTR errors. The returned *DIR
+// needs to be closed with closedir(3).
+func openDir(path string) (*C.DIR, error) {
+ name, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ for {
+ fd, err := C.opendir((*C.char)(unsafe.Pointer(name)))
+ if err != syscall.EINTR {
+ return fd, err
+ }
+ }
+}
diff --git a/internal/fastwalk/fastwalk_dirent_ino.go b/internal/fastwalk/fastwalk_dirent_ino.go
index ea02b9ebf..d3922890b 100644
--- a/internal/fastwalk/fastwalk_dirent_ino.go
+++ b/internal/fastwalk/fastwalk_dirent_ino.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build (linux || darwin) && !appengine
-// +build linux darwin
+//go:build (linux || (darwin && !cgo)) && !appengine
+// +build linux darwin,!cgo
// +build !appengine
package fastwalk
@@ -11,5 +11,5 @@ package fastwalk
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
- return uint64(dirent.Ino)
+ return dirent.Ino
}
diff --git a/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
index d5c9c321e..38a4db6af 100644
--- a/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
+++ b/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build darwin || freebsd || openbsd || netbsd
-// +build darwin freebsd openbsd netbsd
+//go:build (darwin && !cgo) || freebsd || openbsd || netbsd
+// +build darwin,!cgo freebsd openbsd netbsd
package fastwalk
diff --git a/internal/fastwalk/fastwalk_unix.go b/internal/fastwalk/fastwalk_unix.go
index 58bd87841..f12f1a734 100644
--- a/internal/fastwalk/fastwalk_unix.go
+++ b/internal/fastwalk/fastwalk_unix.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build (linux || darwin || freebsd || openbsd || netbsd) && !appengine
-// +build linux darwin freebsd openbsd netbsd
+//go:build (linux || freebsd || openbsd || netbsd || (darwin && !cgo)) && !appengine
+// +build linux freebsd openbsd netbsd darwin,!cgo
// +build !appengine
package fastwalk
diff --git a/internal/lsp/fuzzy/input.go b/internal/fuzzy/input.go
index c1038163f..c1038163f 100644
--- a/internal/lsp/fuzzy/input.go
+++ b/internal/fuzzy/input.go
diff --git a/internal/fuzzy/input_test.go b/internal/fuzzy/input_test.go
new file mode 100644
index 000000000..64f66e363
--- /dev/null
+++ b/internal/fuzzy/input_test.go
@@ -0,0 +1,141 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzzy_test
+
+import (
+ "bytes"
+ "sort"
+ "testing"
+
+ "golang.org/x/tools/internal/fuzzy"
+)
+
+var rolesTests = []struct {
+ str string
+ want string
+}{
+ {str: "abc::def::goo", want: "Ccc//Ccc//Ccc"},
+ {str: "proto::Message", want: "Ccccc//Ccccccc"},
+ {str: "AbstractSWTFactory", want: "CcccccccCuuCcccccc"},
+ {str: "Abs012", want: "Cccccc"},
+ {str: "/", want: " "},
+ {str: "fOO", want: "CCu"},
+ {str: "fo_oo.o_oo", want: "Cc Cc/C Cc"},
+}
+
+func rolesString(roles []fuzzy.RuneRole) string {
+ var buf bytes.Buffer
+ for _, r := range roles {
+ buf.WriteByte(" /cuC"[int(r)])
+ }
+ return buf.String()
+}
+
+func TestRoles(t *testing.T) {
+ for _, tc := range rolesTests {
+ gotRoles := make([]fuzzy.RuneRole, len(tc.str))
+ fuzzy.RuneRoles([]byte(tc.str), gotRoles)
+ got := rolesString(gotRoles)
+ if got != tc.want {
+ t.Errorf("roles(%s) = %v; want %v", tc.str, got, tc.want)
+ }
+ }
+}
+
+var wordSplitTests = []struct {
+ input string
+ want []string
+}{
+ {
+ input: "foo bar baz",
+ want: []string{"foo", "bar", "baz"},
+ },
+ {
+ input: "fooBarBaz",
+ want: []string{"foo", "Bar", "Baz"},
+ },
+ {
+ input: "FOOBarBAZ",
+ want: []string{"FOO", "Bar", "BAZ"},
+ },
+ {
+ input: "foo123_bar2Baz3",
+ want: []string{"foo123", "bar2", "Baz3"},
+ },
+}
+
+func TestWordSplit(t *testing.T) {
+ for _, tc := range wordSplitTests {
+ roles := fuzzy.RuneRoles([]byte(tc.input), nil)
+
+ var got []string
+ consumer := func(i, j int) {
+ got = append(got, tc.input[i:j])
+ }
+ fuzzy.Words(roles, consumer)
+
+ if eq := diffStringLists(tc.want, got); !eq {
+ t.Errorf("input %v: (want %v -> got %v)", tc.input, tc.want, got)
+ }
+ }
+}
+
+func diffStringLists(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ sort.Strings(a)
+ sort.Strings(b)
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+var lastSegmentSplitTests = []struct {
+ str string
+ want string
+}{
+ {
+ str: "identifier",
+ want: "identifier",
+ },
+ {
+ str: "two_words",
+ want: "two_words",
+ },
+ {
+ str: "first::second",
+ want: "second",
+ },
+ {
+ str: "foo.bar.FOOBar_buz123_test",
+ want: "FOOBar_buz123_test",
+ },
+}
+
+func TestLastSegment(t *testing.T) {
+ for _, tc := range lastSegmentSplitTests {
+ roles := fuzzy.RuneRoles([]byte(tc.str), nil)
+
+ got := fuzzy.LastSegment(tc.str, roles)
+
+ if got != tc.want {
+ t.Errorf("str %v: want %v; got %v", tc.str, tc.want, got)
+ }
+ }
+}
+
+func BenchmarkRoles(b *testing.B) {
+ str := "AbstractSWTFactory"
+ out := make([]fuzzy.RuneRole, len(str))
+
+ for i := 0; i < b.N; i++ {
+ fuzzy.RuneRoles([]byte(str), out)
+ }
+ b.SetBytes(int64(len(str)))
+}
diff --git a/internal/fuzzy/matcher.go b/internal/fuzzy/matcher.go
new file mode 100644
index 000000000..c0efd30dd
--- /dev/null
+++ b/internal/fuzzy/matcher.go
@@ -0,0 +1,434 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fuzzy implements a fuzzy matching algorithm.
+package fuzzy
+
+import (
+ "bytes"
+ "fmt"
+)
+
+const (
+ // MaxInputSize is the maximum size of the input scored against the fuzzy matcher. Longer inputs
+ // will be truncated to this size.
+ MaxInputSize = 127
+ // MaxPatternSize is the maximum size of the pattern used to construct the fuzzy matcher. Longer
+ // inputs are truncated to this size.
+ MaxPatternSize = 63
+)
+
+type scoreVal int
+
+func (s scoreVal) val() int {
+ return int(s) >> 1
+}
+
+func (s scoreVal) prevK() int {
+ return int(s) & 1
+}
+
+func score(val int, prevK int /*0 or 1*/) scoreVal {
+ return scoreVal(val<<1 + prevK)
+}
+
+// Matcher implements a fuzzy matching algorithm for scoring candidates against a pattern.
+// The matcher does not support parallel usage.
+type Matcher struct {
+ pattern string
+ patternLower []byte // lower-case version of the pattern
+ patternShort []byte // first characters of the pattern
+ caseSensitive bool // set if the pattern is mix-cased
+
+ patternRoles []RuneRole // the role of each character in the pattern
+ roles []RuneRole // the role of each character in the tested string
+
+ scores [MaxInputSize + 1][MaxPatternSize + 1][2]scoreVal
+
+ scoreScale float32
+
+ lastCandidateLen int // in bytes
+ lastCandidateMatched bool
+
+ // Reusable buffers to avoid allocating for every candidate.
+ // - inputBuf stores the concatenated input chunks
+ // - lowerBuf stores the last candidate in lower-case
+ // - rolesBuf stores the calculated roles for each rune in the last
+ // candidate.
+ inputBuf [MaxInputSize]byte
+ lowerBuf [MaxInputSize]byte
+ rolesBuf [MaxInputSize]RuneRole
+}
+
+func (m *Matcher) bestK(i, j int) int {
+ if m.scores[i][j][0].val() < m.scores[i][j][1].val() {
+ return 1
+ }
+ return 0
+}
+
+// NewMatcher returns a new fuzzy matcher for scoring candidates against the provided pattern.
+func NewMatcher(pattern string) *Matcher {
+ if len(pattern) > MaxPatternSize {
+ pattern = pattern[:MaxPatternSize]
+ }
+
+ m := &Matcher{
+ pattern: pattern,
+ patternLower: toLower([]byte(pattern), nil),
+ }
+
+ for i, c := range m.patternLower {
+ if pattern[i] != c {
+ m.caseSensitive = true
+ break
+ }
+ }
+
+ if len(pattern) > 3 {
+ m.patternShort = m.patternLower[:3]
+ } else {
+ m.patternShort = m.patternLower
+ }
+
+ m.patternRoles = RuneRoles([]byte(pattern), nil)
+
+ if len(pattern) > 0 {
+ maxCharScore := 4
+ m.scoreScale = 1 / float32(maxCharScore*len(pattern))
+ }
+
+ return m
+}
+
+// Score returns the score returned by matching the candidate to the pattern.
+// This is not designed for parallel use. Multiple candidates must be scored sequentially.
+// Returns a score between 0 and 1 (0 - no match, 1 - perfect match).
+func (m *Matcher) Score(candidate string) float32 {
+ return m.ScoreChunks([]string{candidate})
+}
+
+func (m *Matcher) ScoreChunks(chunks []string) float32 {
+ candidate := fromChunks(chunks, m.inputBuf[:])
+ if len(candidate) > MaxInputSize {
+ candidate = candidate[:MaxInputSize]
+ }
+ lower := toLower(candidate, m.lowerBuf[:])
+ m.lastCandidateLen = len(candidate)
+
+ if len(m.pattern) == 0 {
+ // Empty patterns perfectly match candidates.
+ return 1
+ }
+
+ if m.match(candidate, lower) {
+ sc := m.computeScore(candidate, lower)
+ if sc > minScore/2 && !m.poorMatch() {
+ m.lastCandidateMatched = true
+ if len(m.pattern) == len(candidate) {
+ // Perfect match.
+ return 1
+ }
+
+ if sc < 0 {
+ sc = 0
+ }
+ normalizedScore := float32(sc) * m.scoreScale
+ if normalizedScore > 1 {
+ normalizedScore = 1
+ }
+
+ return normalizedScore
+ }
+ }
+
+ m.lastCandidateMatched = false
+ return 0
+}
+
+const minScore = -10000
+
+// MatchedRanges returns matches ranges for the last scored string as a flattened array of
+// [begin, end) byte offset pairs.
+func (m *Matcher) MatchedRanges() []int {
+ if len(m.pattern) == 0 || !m.lastCandidateMatched {
+ return nil
+ }
+ i, j := m.lastCandidateLen, len(m.pattern)
+ if m.scores[i][j][0].val() < minScore/2 && m.scores[i][j][1].val() < minScore/2 {
+ return nil
+ }
+
+ var ret []int
+ k := m.bestK(i, j)
+ for i > 0 {
+ take := (k == 1)
+ k = m.scores[i][j][k].prevK()
+ if take {
+ if len(ret) == 0 || ret[len(ret)-1] != i {
+ ret = append(ret, i)
+ ret = append(ret, i-1)
+ } else {
+ ret[len(ret)-1] = i - 1
+ }
+ j--
+ }
+ i--
+ }
+ // Reverse slice.
+ for i := 0; i < len(ret)/2; i++ {
+ ret[i], ret[len(ret)-1-i] = ret[len(ret)-1-i], ret[i]
+ }
+ return ret
+}
+
+func (m *Matcher) match(candidate []byte, candidateLower []byte) bool {
+ i, j := 0, 0
+ for ; i < len(candidateLower) && j < len(m.patternLower); i++ {
+ if candidateLower[i] == m.patternLower[j] {
+ j++
+ }
+ }
+ if j != len(m.patternLower) {
+ return false
+ }
+
+ // The input passes the simple test against pattern, so it is time to classify its characters.
+ // Character roles are used below to find the last segment.
+ m.roles = RuneRoles(candidate, m.rolesBuf[:])
+
+ return true
+}
+
+func (m *Matcher) computeScore(candidate []byte, candidateLower []byte) int {
+ pattLen, candLen := len(m.pattern), len(candidate)
+
+ for j := 0; j <= len(m.pattern); j++ {
+ m.scores[0][j][0] = minScore << 1
+ m.scores[0][j][1] = minScore << 1
+ }
+ m.scores[0][0][0] = score(0, 0) // Start with 0.
+
+ segmentsLeft, lastSegStart := 1, 0
+ for i := 0; i < candLen; i++ {
+ if m.roles[i] == RSep {
+ segmentsLeft++
+ lastSegStart = i + 1
+ }
+ }
+
+ // A per-character bonus for a consecutive match.
+ consecutiveBonus := 2
+ wordIdx := 0 // Word count within segment.
+ for i := 1; i <= candLen; i++ {
+
+ role := m.roles[i-1]
+ isHead := role == RHead
+
+ if isHead {
+ wordIdx++
+ } else if role == RSep && segmentsLeft > 1 {
+ wordIdx = 0
+ segmentsLeft--
+ }
+
+ var skipPenalty int
+ if i == 1 || (i-1) == lastSegStart {
+ // Skipping the start of first or last segment.
+ skipPenalty++
+ }
+
+ for j := 0; j <= pattLen; j++ {
+ // By default, we don't have a match. Fill in the skip data.
+ m.scores[i][j][1] = minScore << 1
+
+ // Compute the skip score.
+ k := 0
+ if m.scores[i-1][j][0].val() < m.scores[i-1][j][1].val() {
+ k = 1
+ }
+
+ skipScore := m.scores[i-1][j][k].val()
+ // Do not penalize missing characters after the last matched segment.
+ if j != pattLen {
+ skipScore -= skipPenalty
+ }
+ m.scores[i][j][0] = score(skipScore, k)
+
+ if j == 0 || candidateLower[i-1] != m.patternLower[j-1] {
+ // Not a match.
+ continue
+ }
+ pRole := m.patternRoles[j-1]
+
+ if role == RTail && pRole == RHead {
+ if j > 1 {
+ // Not a match: a head in the pattern matches a tail character in the candidate.
+ continue
+ }
+ // Special treatment for the first character of the pattern. We allow
+ // matches in the middle of a word if they are long enough, at least
+ // min(3, pattern.length) characters.
+ if !bytes.HasPrefix(candidateLower[i-1:], m.patternShort) {
+ continue
+ }
+ }
+
+ // Compute the char score.
+ var charScore int
+ // Bonus 1: the char is in the candidate's last segment.
+ if segmentsLeft <= 1 {
+ charScore++
+ }
+ // Bonus 2: Case match or a Head in the pattern aligns with one in the word.
+ // Single-case patterns lack segmentation signals and we assume any character
+ // can be a head of a segment.
+ if candidate[i-1] == m.pattern[j-1] || role == RHead && (!m.caseSensitive || pRole == RHead) {
+ charScore++
+ }
+
+ // Penalty 1: pattern char is Head, candidate char is Tail.
+ if role == RTail && pRole == RHead {
+ charScore--
+ }
+ // Penalty 2: first pattern character matched in the middle of a word.
+ if j == 1 && role == RTail {
+ charScore -= 4
+ }
+
+ // Third dimension encodes whether there is a gap between the previous match and the current
+ // one.
+ for k := 0; k < 2; k++ {
+ sc := m.scores[i-1][j-1][k].val() + charScore
+
+ isConsecutive := k == 1 || i-1 == 0 || i-1 == lastSegStart
+ if isConsecutive {
+ // Bonus 3: a consecutive match. First character match also gets a bonus to
+ // ensure prefix final match score normalizes to 1.0.
+ // Logically, this is a part of charScore, but we have to compute it here because it
+ // only applies for consecutive matches (k == 1).
+ sc += consecutiveBonus
+ }
+ if k == 0 {
+ // Penalty 3: Matching inside a segment (and previous char wasn't matched). Penalize for the lack
+ // of alignment.
+ if role == RTail || role == RUCTail {
+ sc -= 3
+ }
+ }
+
+ if sc > m.scores[i][j][1].val() {
+ m.scores[i][j][1] = score(sc, k)
+ }
+ }
+ }
+ }
+
+ result := m.scores[len(candidate)][len(m.pattern)][m.bestK(len(candidate), len(m.pattern))].val()
+
+ return result
+}
+
+// ScoreTable returns the score table computed for the provided candidate. Used only for debugging.
+func (m *Matcher) ScoreTable(candidate string) string {
+ var buf bytes.Buffer
+
+ var line1, line2, separator bytes.Buffer
+ line1.WriteString("\t")
+ line2.WriteString("\t")
+ for j := 0; j < len(m.pattern); j++ {
+ line1.WriteString(fmt.Sprintf("%c\t\t", m.pattern[j]))
+ separator.WriteString("----------------")
+ }
+
+ buf.WriteString(line1.String())
+ buf.WriteString("\n")
+ buf.WriteString(separator.String())
+ buf.WriteString("\n")
+
+ for i := 1; i <= len(candidate); i++ {
+ line1.Reset()
+ line2.Reset()
+
+ line1.WriteString(fmt.Sprintf("%c\t", candidate[i-1]))
+ line2.WriteString("\t")
+
+ for j := 1; j <= len(m.pattern); j++ {
+ line1.WriteString(fmt.Sprintf("M%6d(%c)\t", m.scores[i][j][0].val(), dir(m.scores[i][j][0].prevK())))
+ line2.WriteString(fmt.Sprintf("H%6d(%c)\t", m.scores[i][j][1].val(), dir(m.scores[i][j][1].prevK())))
+ }
+ buf.WriteString(line1.String())
+ buf.WriteString("\n")
+ buf.WriteString(line2.String())
+ buf.WriteString("\n")
+ buf.WriteString(separator.String())
+ buf.WriteString("\n")
+ }
+
+ return buf.String()
+}
+
+func dir(prevK int) rune {
+ if prevK == 0 {
+ return 'M'
+ }
+ return 'H'
+}
+
+func (m *Matcher) poorMatch() bool {
+ if len(m.pattern) < 2 {
+ return false
+ }
+
+ i, j := m.lastCandidateLen, len(m.pattern)
+ k := m.bestK(i, j)
+
+ var counter, len int
+ for i > 0 {
+ take := (k == 1)
+ k = m.scores[i][j][k].prevK()
+ if take {
+ len++
+ if k == 0 && len < 3 && m.roles[i-1] == RTail {
+ // Short match in the middle of a word
+ counter++
+ if counter > 1 {
+ return true
+ }
+ }
+ j--
+ } else {
+ len = 0
+ }
+ i--
+ }
+ return false
+}
+
+// BestMatch returns the name most similar to the
+// pattern, using fuzzy matching, or the empty string.
+func BestMatch(pattern string, names []string) string {
+ fuzz := NewMatcher(pattern)
+ best := ""
+ highScore := float32(0) // minimum score is 0 (no match)
+ for _, name := range names {
+ // TODO: Improve scoring algorithm.
+ score := fuzz.Score(name)
+ if score > highScore {
+ highScore = score
+ best = name
+ } else if score == 0 {
+ // Order matters in the fuzzy matching algorithm. If we find no match
+ // when matching the target to the identifier, try matching the identifier
+ // to the target.
+ revFuzz := NewMatcher(name)
+ revScore := revFuzz.Score(pattern)
+ if revScore > highScore {
+ highScore = revScore
+ best = name
+ }
+ }
+ }
+ return best
+}
diff --git a/internal/fuzzy/matcher_test.go b/internal/fuzzy/matcher_test.go
new file mode 100644
index 000000000..528224bd9
--- /dev/null
+++ b/internal/fuzzy/matcher_test.go
@@ -0,0 +1,294 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Benchmark results:
+//
+// BenchmarkMatcher-12 1000000 1615 ns/op 30.95 MB/s 0 B/op 0 allocs/op
+package fuzzy_test
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "testing"
+
+ "golang.org/x/tools/internal/fuzzy"
+)
+
+type comparator struct {
+ f func(val, ref float32) bool
+ descr string
+}
+
+var (
+ eq = comparator{
+ f: func(val, ref float32) bool {
+ return val == ref
+ },
+ descr: "==",
+ }
+ ge = comparator{
+ f: func(val, ref float32) bool {
+ return val >= ref
+ },
+ descr: ">=",
+ }
+ gt = comparator{
+ f: func(val, ref float32) bool {
+ return val > ref
+ },
+ descr: ">",
+ }
+)
+
+func (c comparator) eval(val, ref float32) bool {
+ return c.f(val, ref)
+}
+
+func (c comparator) String() string {
+ return c.descr
+}
+
+type scoreTest struct {
+ candidate string
+ comparator
+ ref float32
+}
+
+var matcherTests = []struct {
+ pattern string
+ tests []scoreTest
+}{
+ {
+ pattern: "",
+ tests: []scoreTest{
+ {"def", eq, 1},
+ {"Ab stuff c", eq, 1},
+ },
+ },
+ {
+ pattern: "abc",
+ tests: []scoreTest{
+ {"def", eq, 0},
+ {"abd", eq, 0},
+ {"abc", ge, 0},
+ {"Abc", ge, 0},
+ {"Ab stuff c", ge, 0},
+ },
+ },
+ {
+ pattern: "Abc",
+ tests: []scoreTest{
+ {"def", eq, 0},
+ {"abd", eq, 0},
+ {"abc", ge, 0},
+ {"Abc", ge, 0},
+ {"Ab stuff c", ge, 0},
+ },
+ },
+ {
+ pattern: "U",
+ tests: []scoreTest{
+ {"ErrUnexpectedEOF", gt, 0},
+ {"ErrUnexpectedEOF.Error", eq, 0},
+ },
+ },
+}
+
+func TestScore(t *testing.T) {
+ for _, tc := range matcherTests {
+ m := fuzzy.NewMatcher(tc.pattern)
+ for _, sct := range tc.tests {
+ score := m.Score(sct.candidate)
+ if !sct.comparator.eval(score, sct.ref) {
+ t.Errorf("m.Score(%q) = %.2g, want %s %v", sct.candidate, score, sct.comparator, sct.ref)
+ }
+ }
+ }
+}
+
+var compareCandidatesTestCases = []struct {
+ pattern string
+ orderedCandidates []string
+}{
+ {
+ pattern: "Foo",
+ orderedCandidates: []string{
+ "Barfoo",
+ "Faoo",
+ "F_o_o",
+ "FaoFooa",
+ "BarFoo",
+ "F__oo",
+ "F_oo",
+ "FooA",
+ "FooBar",
+ "Foo",
+ },
+ },
+ {
+ pattern: "U",
+ orderedCandidates: []string{
+ "ErrUnexpectedEOF.Error",
+ "ErrUnexpectedEOF",
+ },
+ },
+}
+
+func TestCompareCandidateScores(t *testing.T) {
+ for _, tc := range compareCandidatesTestCases {
+ m := fuzzy.NewMatcher(tc.pattern)
+
+ var prevScore float32
+ prevCand := "MIN_SCORE"
+ for _, cand := range tc.orderedCandidates {
+ score := m.Score(cand)
+ if prevScore > score {
+ t.Errorf("%s[=%v] is scored lower than %s[=%v]", cand, score, prevCand, prevScore)
+ }
+ if score < -1 || score > 1 {
+ t.Errorf("%s score is %v; want value between [-1, 1]", cand, score)
+ }
+ prevScore = score
+ prevCand = cand
+ }
+ }
+}
+
+var fuzzyMatcherTestCases = []struct {
+ p string
+ str string
+ want string
+}{
+ {p: "foo", str: "abc::foo", want: "abc::[foo]"},
+ {p: "foo", str: "foo.foo", want: "foo.[foo]"},
+ {p: "foo", str: "fo_oo.o_oo", want: "[fo]_oo.[o]_oo"},
+ {p: "foo", str: "fo_oo.fo_oo", want: "fo_oo.[fo]_[o]o"},
+ {p: "fo_o", str: "fo_oo.o_oo", want: "[f]o_oo.[o_o]o"},
+ {p: "fOO", str: "fo_oo.o_oo", want: "[f]o_oo.[o]_[o]o"},
+ {p: "tedit", str: "foo.TextEdit", want: "foo.[T]ext[Edit]"},
+ {p: "TEdit", str: "foo.TextEdit", want: "foo.[T]ext[Edit]"},
+ {p: "Tedit", str: "foo.TextEdit", want: "foo.[T]ext[Edit]"},
+ {p: "Tedit", str: "foo.Textedit", want: "foo.[Te]xte[dit]"},
+ {p: "TEdit", str: "foo.Textedit", want: ""},
+ {p: "te", str: "foo.Textedit", want: "foo.[Te]xtedit"},
+ {p: "ee", str: "foo.Textedit", want: ""}, // short middle of the word match
+ {p: "ex", str: "foo.Textedit", want: "foo.T[ex]tedit"},
+ {p: "exdi", str: "foo.Textedit", want: ""}, // short middle of the word match
+ {p: "exdit", str: "foo.Textedit", want: ""}, // short middle of the word match
+ {p: "extdit", str: "foo.Textedit", want: "foo.T[ext]e[dit]"},
+ {p: "e", str: "foo.Textedit", want: "foo.T[e]xtedit"},
+ {p: "E", str: "foo.Textedit", want: "foo.T[e]xtedit"},
+ {p: "ed", str: "foo.Textedit", want: "foo.Text[ed]it"},
+ {p: "edt", str: "foo.Textedit", want: ""}, // short middle of the word match
+ {p: "edit", str: "foo.Textedit", want: "foo.Text[edit]"},
+ {p: "edin", str: "foo.TexteditNum", want: "foo.Text[edi]t[N]um"},
+ {p: "n", str: "node.GoNodeMax", want: "[n]ode.GoNodeMax"},
+ {p: "N", str: "node.GoNodeMax", want: "[n]ode.GoNodeMax"},
+ {p: "completio", str: "completion", want: "[completio]n"},
+ {p: "completio", str: "completion.None", want: "[completio]n.None"},
+}
+
+func TestFuzzyMatcherRanges(t *testing.T) {
+ for _, tc := range fuzzyMatcherTestCases {
+ matcher := fuzzy.NewMatcher(tc.p)
+ score := matcher.Score(tc.str)
+ if tc.want == "" {
+ if score > 0 {
+ t.Errorf("Score(%s, %s) = %v; want: <= 0", tc.p, tc.str, score)
+ }
+ continue
+ }
+ if score < 0 {
+ t.Errorf("Score(%s, %s) = %v, want: > 0", tc.p, tc.str, score)
+ continue
+ }
+ got := highlightMatches(tc.str, matcher)
+ if tc.want != got {
+ t.Errorf("highlightMatches(%s, %s) = %v, want: %v", tc.p, tc.str, got, tc.want)
+ }
+ }
+}
+
+var scoreTestCases = []struct {
+ p string
+ str string
+ want float64
+}{
+ // Score precision up to five digits. Modify if changing the score, but make sure the new values
+ // are reasonable.
+ {p: "abc", str: "abc", want: 1},
+ {p: "abc", str: "Abc", want: 1},
+ {p: "abc", str: "Abcdef", want: 1},
+ {p: "strc", str: "StrCat", want: 1},
+ {p: "abc_def", str: "abc_def_xyz", want: 1},
+ {p: "abcdef", str: "abc_def_xyz", want: 0.91667},
+ {p: "abcxyz", str: "abc_def_xyz", want: 0.91667},
+ {p: "sc", str: "StrCat", want: 0.75},
+ {p: "abc", str: "AbstrBasicCtor", want: 0.83333},
+ {p: "foo", str: "abc::foo", want: 0.91667},
+ {p: "afoo", str: "abc::foo", want: 0.9375},
+ {p: "abr", str: "abc::bar", want: 0.5},
+ {p: "br", str: "abc::bar", want: 0.25},
+ {p: "aar", str: "abc::bar", want: 0.41667},
+ {p: "edin", str: "foo.TexteditNum", want: 0.125},
+ {p: "ediu", str: "foo.TexteditNum", want: 0},
+ // We want the next two items to have roughly similar scores.
+ {p: "up", str: "unique_ptr", want: 0.75},
+ {p: "up", str: "upper_bound", want: 1},
+}
+
+func TestScores(t *testing.T) {
+ for _, tc := range scoreTestCases {
+ matcher := fuzzy.NewMatcher(tc.p)
+ got := math.Round(float64(matcher.Score(tc.str))*1e5) / 1e5
+ if got != tc.want {
+ t.Errorf("Score(%s, %s) = %v, want: %v", tc.p, tc.str, got, tc.want)
+ }
+ }
+}
+
+func highlightMatches(str string, matcher *fuzzy.Matcher) string {
+ matches := matcher.MatchedRanges()
+
+ var buf bytes.Buffer
+ index := 0
+ for i := 0; i < len(matches)-1; i += 2 {
+ s, e := matches[i], matches[i+1]
+ fmt.Fprintf(&buf, "%s[%s]", str[index:s], str[s:e])
+ index = e
+ }
+ buf.WriteString(str[index:])
+ return buf.String()
+}
+
+func BenchmarkMatcher(b *testing.B) {
+ pattern := "Foo"
+ candidates := []string{
+ "F_o_o",
+ "Barfoo",
+ "Faoo",
+ "F__oo",
+ "F_oo",
+ "FaoFooa",
+ "BarFoo",
+ "FooA",
+ "FooBar",
+ "Foo",
+ }
+
+ matcher := fuzzy.NewMatcher(pattern)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, c := range candidates {
+ matcher.Score(c)
+ }
+ }
+ var numBytes int
+ for _, c := range candidates {
+ numBytes += len(c)
+ }
+ b.SetBytes(int64(numBytes))
+}
diff --git a/internal/fuzzy/symbol.go b/internal/fuzzy/symbol.go
new file mode 100644
index 000000000..073a4cd10
--- /dev/null
+++ b/internal/fuzzy/symbol.go
@@ -0,0 +1,237 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzzy
+
+import (
+ "unicode"
+)
+
+// SymbolMatcher implements a fuzzy matching algorithm optimized for Go symbols
+// of the form:
+//
+// example.com/path/to/package.object.field
+//
+// Knowing that we are matching symbols like this allows us to make the
+// following optimizations:
+// - We can incorporate right-to-left relevance directly into the score
+// calculation.
+// - We can match from right to left, discarding leading bytes if the input is
+// too long.
+// - We just take the right-most match without losing too much precision. This
+// allows us to use an O(n) algorithm.
+// - We can operate directly on chunked strings; in many cases we will
+// be storing the package path and/or package name separately from the
+// symbol or identifiers, so doing this avoids allocating strings.
+// - We can return the index of the right-most match, allowing us to trim
+// irrelevant qualification.
+//
+// This implementation is experimental, serving as a reference fast algorithm
+// to compare to the fuzzy algorithm implemented by Matcher.
+type SymbolMatcher struct {
+ // Using buffers of length 256 is both a reasonable size for most qualified
+ // symbols, and makes it easy to avoid bounds checks by using uint8 indexes.
+ pattern [256]rune
+ patternLen uint8
+ inputBuffer [256]rune // avoid allocating when considering chunks
+ roles [256]uint32 // which roles does a rune play (word start, etc.)
+ segments [256]uint8 // how many segments from the right is each rune
+}
+
+const (
+ segmentStart uint32 = 1 << iota
+ wordStart
+ separator
+)
+
+// NewSymbolMatcher creates a SymbolMatcher that may be used to match the given
+// search pattern.
+//
+// Currently this matcher only accepts case-insensitive fuzzy patterns.
+//
+// An empty pattern matches no input.
+func NewSymbolMatcher(pattern string) *SymbolMatcher {
+ m := &SymbolMatcher{}
+ for _, p := range pattern {
+ m.pattern[m.patternLen] = unicode.ToLower(p)
+ m.patternLen++
+ if m.patternLen == 255 || int(m.patternLen) == len(pattern) {
+ // break at 255 so that we can represent patternLen with a uint8.
+ break
+ }
+ }
+ return m
+}
+
+// Match looks for the right-most match of the search pattern within the symbol
+// represented by concatenating the given chunks, returning its offset and
+// score.
+//
+// If a match is found, the first return value will hold the absolute byte
+// offset within all chunks for the start of the symbol. In other words, the
+// index of the match within strings.Join(chunks, ""). If no match is found,
+// the first return value will be -1.
+//
+// The second return value will be the score of the match, which is always
+// between 0 and 1, inclusive. A score of 0 indicates no match.
+func (m *SymbolMatcher) Match(chunks []string) (int, float64) {
+ // Explicit behavior for an empty pattern.
+ //
+ // As a minor optimization, this also avoids nilness checks later on, since
+ // the compiler can prove that m != nil.
+ if m.patternLen == 0 {
+ return -1, 0
+ }
+
+ // First phase: populate the input buffer with lower-cased runes.
+ //
+ // We could also check for a forward match here, but since we'd have to write
+ // the entire input anyway this has negligible impact on performance.
+
+ var (
+ inputLen = uint8(0)
+ modifiers = wordStart | segmentStart
+ )
+
+input:
+ for _, chunk := range chunks {
+ for _, r := range chunk {
+ if r == '.' || r == '/' {
+ modifiers |= separator
+ }
+ // optimization: avoid calls to unicode.ToLower, which can't be inlined.
+ l := r
+ if r <= unicode.MaxASCII {
+ if 'A' <= r && r <= 'Z' {
+ l = r + 'a' - 'A'
+ }
+ } else {
+ l = unicode.ToLower(r)
+ }
+ if l != r {
+ modifiers |= wordStart
+ }
+ m.inputBuffer[inputLen] = l
+ m.roles[inputLen] = modifiers
+ inputLen++
+ if m.roles[inputLen-1]&separator != 0 {
+ modifiers = wordStart | segmentStart
+ } else {
+ modifiers = 0
+ }
+ // TODO: we should prefer the right-most input if it overflows, rather
+ // than the left-most as we're doing here.
+ if inputLen == 255 {
+ break input
+ }
+ }
+ }
+
+ // Second phase: find the right-most match, and count segments from the
+ // right.
+
+ var (
+ pi = uint8(m.patternLen - 1) // pattern index
+ p = m.pattern[pi] // pattern rune
+ start = -1 // start offset of match
+ rseg = uint8(0)
+ )
+ const maxSeg = 3 // maximum number of segments from the right to count, for scoring purposes.
+
+ for ii := inputLen - 1; ; ii-- {
+ r := m.inputBuffer[ii]
+ if rseg < maxSeg && m.roles[ii]&separator != 0 {
+ rseg++
+ }
+ m.segments[ii] = rseg
+ if p == r {
+ if pi == 0 {
+ start = int(ii)
+ break
+ }
+ pi--
+ p = m.pattern[pi]
+ }
+ // Don't check ii >= 0 in the loop condition: ii is a uint8.
+ if ii == 0 {
+ break
+ }
+ }
+
+ if start < 0 {
+ // no match: skip scoring
+ return -1, 0
+ }
+
+ // Third phase: find the shortest match, and compute the score.
+
+ // Score is the average score for each character.
+ //
+ // A character score is the multiple of:
+ // 1. 1.0 if the character starts a segment, .8 if the character start a
+ // mid-segment word, otherwise 0.6. This carries over to immediately
+ // following characters.
+ // 2. For the final character match, the multiplier from (1) is reduced to
+ // .8 if the next character in the input is a mid-segment word, or 0.6 if
+ // the next character in the input is not a word or segment start. This
+ // ensures that we favor whole-word or whole-segment matches over prefix
+ // matches.
+ // 3. 1.0 if the character is part of the last segment, otherwise
+ // 1.0-.2*<segments from the right>, with a max segment count of 3.
+ //
+ // This is a very naive algorithm, but it is fast. There's lots of prior art
+ // here, and we should leverage it. For example, we could explicitly consider
+ // character distance, and exact matches of words or segments.
+ //
+ // Also note that this might not actually find the highest scoring match, as
+ // doing so could require a non-linear algorithm, depending on how the score
+ // is calculated.
+
+ pi = 0
+ p = m.pattern[pi]
+
+ const (
+ segStreak = 1.0
+ wordStreak = 0.8
+ noStreak = 0.6
+ perSegment = 0.2 // we count at most 3 segments above
+ )
+
+ streakBonus := noStreak
+ totScore := 0.0
+ for ii := uint8(start); ii < inputLen; ii++ {
+ r := m.inputBuffer[ii]
+ if r == p {
+ pi++
+ p = m.pattern[pi]
+ // Note: this could be optimized with some bit operations.
+ switch {
+ case m.roles[ii]&segmentStart != 0 && segStreak > streakBonus:
+ streakBonus = segStreak
+ case m.roles[ii]&wordStart != 0 && wordStreak > streakBonus:
+ streakBonus = wordStreak
+ }
+ finalChar := pi >= m.patternLen
+ // finalCost := 1.0
+ if finalChar && streakBonus > noStreak {
+ switch {
+ case ii == inputLen-1 || m.roles[ii+1]&segmentStart != 0:
+ // Full segment: no reduction
+ case m.roles[ii+1]&wordStart != 0:
+ streakBonus = wordStreak
+ default:
+ streakBonus = noStreak
+ }
+ }
+ totScore += streakBonus * (1.0 - float64(m.segments[ii])*perSegment)
+ if finalChar {
+ break
+ }
+ } else {
+ streakBonus = noStreak
+ }
+ }
+
+ return start, totScore / float64(m.patternLen)
+}
diff --git a/internal/fuzzy/symbol_test.go b/internal/fuzzy/symbol_test.go
new file mode 100644
index 000000000..df74bbe0d
--- /dev/null
+++ b/internal/fuzzy/symbol_test.go
@@ -0,0 +1,79 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzzy_test
+
+import (
+ "testing"
+
+ . "golang.org/x/tools/internal/fuzzy"
+)
+
+func TestSymbolMatchIndex(t *testing.T) {
+ tests := []struct {
+ pattern, input string
+ want int
+ }{
+ {"test", "foo.TestFoo", 4},
+ {"test", "test", 0},
+ {"test", "Test", 0},
+ {"test", "est", -1},
+ {"t", "shortest", 7},
+ {"", "foo", -1},
+ {"", string([]rune{0}), -1}, // verify that we don't default to an empty pattern.
+ {"anything", "", -1},
+ }
+
+ for _, test := range tests {
+ matcher := NewSymbolMatcher(test.pattern)
+ if got, _ := matcher.Match([]string{test.input}); got != test.want {
+ t.Errorf("NewSymbolMatcher(%q).Match(%q) = %v, _, want %v, _", test.pattern, test.input, got, test.want)
+ }
+ }
+}
+
+func TestSymbolRanking(t *testing.T) {
+ matcher := NewSymbolMatcher("test")
+
+ // symbols to match, in ascending order of ranking.
+ symbols := []string{
+ "this.is.better.than.most",
+ "test.foo.bar",
+ "atest",
+ "thebest",
+ "test.foo",
+ "test.foo",
+ "tTest",
+ "testage",
+ "foo.test",
+ "test",
+ }
+ prev := 0.0
+ for _, sym := range symbols {
+ _, score := matcher.Match([]string{sym})
+ t.Logf("Match(%q) = %v", sym, score)
+ if score < prev {
+ t.Errorf("Match(%q) = _, %v, want > %v", sym, score, prev)
+ }
+ prev = score
+ }
+}
+
+func TestChunkedMatch(t *testing.T) {
+ matcher := NewSymbolMatcher("test")
+
+ chunked := [][]string{
+ {"test"},
+ {"", "test"},
+ {"test", ""},
+ {"te", "st"},
+ }
+
+ for _, chunks := range chunked {
+ offset, score := matcher.Match(chunks)
+ if offset != 0 || score != 1.0 {
+ t.Errorf("Match(%v) = %v, %v, want 0, 1.0", chunks, offset, score)
+ }
+ }
+}
diff --git a/internal/gcimporter/bexport.go b/internal/gcimporter/bexport.go
new file mode 100644
index 000000000..30582ed6d
--- /dev/null
+++ b/internal/gcimporter/bexport.go
@@ -0,0 +1,852 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
+// see that file for specification of the format.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "math"
+ "math/big"
+ "sort"
+ "strings"
+)
+
+// If debugFormat is set, each integer and string value is preceded by a marker
+// and position information in the encoding. This mechanism permits an importer
+// to recognize immediately when it is out of sync. The importer recognizes this
+// mode automatically (i.e., it can import export data produced with debugging
+// support even if debugFormat is not set at the time of import). This mode will
+// lead to massively larger export data (by a factor of 2 to 3) and should only
+// be enabled during development and debugging.
+//
+// NOTE: This flag is the first flag to enable if importing dies because of
+// (suspected) format errors, and whenever a change is made to the format.
+const debugFormat = false // default: false
+
+// Current export format version. Increase with each format change.
+//
+// Note: The latest binary (non-indexed) export format is at version 6.
+// This exporter is still at level 4, but it doesn't matter since
+// the binary importer can handle older versions just fine.
+//
+// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
+// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE
+// 4: type name objects support type aliases, uses aliasTag
+// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
+// 2: removed unused bool in ODCL export (compiler only)
+// 1: header format change (more regular), export package for _ struct fields
+// 0: Go1.7 encoding
+const exportVersion = 4
+
+// trackAllTypes enables cycle tracking for all types, not just named
+// types. The existing compiler invariants assume that unnamed types
+// that are not completely set up are not used, or else there are spurious
+// errors.
+// If disabled, only named types are tracked, possibly leading to slightly
+// less efficient encoding in rare cases. It also prevents the export of
+// some corner-case type declarations (but those are not handled correctly
+// with with the textual export format either).
+// TODO(gri) enable and remove once issues caused by it are fixed
+const trackAllTypes = false
+
+type exporter struct {
+ fset *token.FileSet
+ out bytes.Buffer
+
+ // object -> index maps, indexed in order of serialization
+ strIndex map[string]int
+ pkgIndex map[*types.Package]int
+ typIndex map[types.Type]int
+
+ // position encoding
+ posInfoFormat bool
+ prevFile string
+ prevLine int
+
+ // debugging support
+ written int // bytes written
+ indent int // for trace
+}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
+
+// BExportData returns binary export data for pkg.
+// If no file set is provided, position info will be missing.
+func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+ }
+
+ p := exporter{
+ fset: fset,
+ strIndex: map[string]int{"": 0}, // empty string is mapped to 0
+ pkgIndex: make(map[*types.Package]int),
+ typIndex: make(map[types.Type]int),
+ posInfoFormat: true, // TODO(gri) might become a flag, eventually
+ }
+
+ // write version info
+ // The version string must start with "version %d" where %d is the version
+ // number. Additional debugging information may follow after a blank; that
+ // text is ignored by the importer.
+ p.rawStringln(fmt.Sprintf("version %d", exportVersion))
+ var debug string
+ if debugFormat {
+ debug = "debug"
+ }
+ p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
+ p.bool(trackAllTypes)
+ p.bool(p.posInfoFormat)
+
+ // --- generic export data ---
+
+ // populate type map with predeclared "known" types
+ for index, typ := range predeclared() {
+ p.typIndex[typ] = index
+ }
+ if len(p.typIndex) != len(predeclared()) {
+ return nil, internalError("duplicate entries in type map?")
+ }
+
+ // write package data
+ p.pkg(pkg, true)
+ if trace {
+ p.tracef("\n")
+ }
+
+ // write objects
+ objcount := 0
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if !token.IsExported(name) {
+ continue
+ }
+ if trace {
+ p.tracef("\n")
+ }
+ p.obj(scope.Lookup(name))
+ objcount++
+ }
+
+ // indicate end of list
+ if trace {
+ p.tracef("\n")
+ }
+ p.tag(endTag)
+
+ // for self-verification only (redundant)
+ p.int(objcount)
+
+ if trace {
+ p.tracef("\n")
+ }
+
+ // --- end of export data ---
+
+ return p.out.Bytes(), nil
+}
+
+func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
+ if pkg == nil {
+ panic(internalError("unexpected nil pkg"))
+ }
+
+ // if we saw the package before, write its index (>= 0)
+ if i, ok := p.pkgIndex[pkg]; ok {
+ p.index('P', i)
+ return
+ }
+
+ // otherwise, remember the package, write the package tag (< 0) and package data
+ if trace {
+ p.tracef("P%d = { ", len(p.pkgIndex))
+ defer p.tracef("} ")
+ }
+ p.pkgIndex[pkg] = len(p.pkgIndex)
+
+ p.tag(packageTag)
+ p.string(pkg.Name())
+ if emptypath {
+ p.string("")
+ } else {
+ p.string(pkg.Path())
+ }
+}
+
+func (p *exporter) obj(obj types.Object) {
+ switch obj := obj.(type) {
+ case *types.Const:
+ p.tag(constTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ p.typ(obj.Type())
+ p.value(obj.Val())
+
+ case *types.TypeName:
+ if obj.IsAlias() {
+ p.tag(aliasTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ } else {
+ p.tag(typeTag)
+ }
+ p.typ(obj.Type())
+
+ case *types.Var:
+ p.tag(varTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ p.typ(obj.Type())
+
+ case *types.Func:
+ p.tag(funcTag)
+ p.pos(obj)
+ p.qualifiedName(obj)
+ sig := obj.Type().(*types.Signature)
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+
+ default:
+ panic(internalErrorf("unexpected object %v (%T)", obj, obj))
+ }
+}
+
+func (p *exporter) pos(obj types.Object) {
+ if !p.posInfoFormat {
+ return
+ }
+
+ file, line := p.fileLine(obj)
+ if file == p.prevFile {
+ // common case: write line delta
+ // delta == 0 means different file or no line change
+ delta := line - p.prevLine
+ p.int(delta)
+ if delta == 0 {
+ p.int(-1) // -1 means no file change
+ }
+ } else {
+ // different file
+ p.int(0)
+ // Encode filename as length of common prefix with previous
+ // filename, followed by (possibly empty) suffix. Filenames
+ // frequently share path prefixes, so this can save a lot
+ // of space and make export data size less dependent on file
+ // path length. The suffix is unlikely to be empty because
+ // file names tend to end in ".go".
+ n := commonPrefixLen(p.prevFile, file)
+ p.int(n) // n >= 0
+ p.string(file[n:]) // write suffix only
+ p.prevFile = file
+ p.int(line)
+ }
+ p.prevLine = line
+}
+
+func (p *exporter) fileLine(obj types.Object) (file string, line int) {
+ if p.fset != nil {
+ pos := p.fset.Position(obj.Pos())
+ file = pos.Filename
+ line = pos.Line
+ }
+ return
+}
+
+func commonPrefixLen(a, b string) int {
+ if len(a) > len(b) {
+ a, b = b, a
+ }
+ // len(a) <= len(b)
+ i := 0
+ for i < len(a) && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+func (p *exporter) qualifiedName(obj types.Object) {
+ p.string(obj.Name())
+ p.pkg(obj.Pkg(), false)
+}
+
+func (p *exporter) typ(t types.Type) {
+ if t == nil {
+ panic(internalError("nil type"))
+ }
+
+ // Possible optimization: Anonymous pointer types *T where
+ // T is a named type are common. We could canonicalize all
+ // such types *T to a single type PT = *T. This would lead
+ // to at most one *T entry in typIndex, and all future *T's
+ // would be encoded as the respective index directly. Would
+ // save 1 byte (pointerTag) per *T and reduce the typIndex
+ // size (at the cost of a canonicalization map). We can do
+ // this later, without encoding format change.
+
+ // if we saw the type before, write its index (>= 0)
+ if i, ok := p.typIndex[t]; ok {
+ p.index('T', i)
+ return
+ }
+
+ // otherwise, remember the type, write the type tag (< 0) and type data
+ if trackAllTypes {
+ if trace {
+ p.tracef("T%d = {>\n", len(p.typIndex))
+ defer p.tracef("<\n} ")
+ }
+ p.typIndex[t] = len(p.typIndex)
+ }
+
+ switch t := t.(type) {
+ case *types.Named:
+ if !trackAllTypes {
+ // if we don't track all types, track named types now
+ p.typIndex[t] = len(p.typIndex)
+ }
+
+ p.tag(namedTag)
+ p.pos(t.Obj())
+ p.qualifiedName(t.Obj())
+ p.typ(t.Underlying())
+ if !types.IsInterface(t) {
+ p.assocMethods(t)
+ }
+
+ case *types.Array:
+ p.tag(arrayTag)
+ p.int64(t.Len())
+ p.typ(t.Elem())
+
+ case *types.Slice:
+ p.tag(sliceTag)
+ p.typ(t.Elem())
+
+ case *dddSlice:
+ p.tag(dddTag)
+ p.typ(t.elem)
+
+ case *types.Struct:
+ p.tag(structTag)
+ p.fieldList(t)
+
+ case *types.Pointer:
+ p.tag(pointerTag)
+ p.typ(t.Elem())
+
+ case *types.Signature:
+ p.tag(signatureTag)
+ p.paramList(t.Params(), t.Variadic())
+ p.paramList(t.Results(), false)
+
+ case *types.Interface:
+ p.tag(interfaceTag)
+ p.iface(t)
+
+ case *types.Map:
+ p.tag(mapTag)
+ p.typ(t.Key())
+ p.typ(t.Elem())
+
+ case *types.Chan:
+ p.tag(chanTag)
+ p.int(int(3 - t.Dir())) // hack
+ p.typ(t.Elem())
+
+ default:
+ panic(internalErrorf("unexpected type %T: %s", t, t))
+ }
+}
+
+func (p *exporter) assocMethods(named *types.Named) {
+ // Sort methods (for determinism).
+ var methods []*types.Func
+ for i := 0; i < named.NumMethods(); i++ {
+ methods = append(methods, named.Method(i))
+ }
+ sort.Sort(methodsByName(methods))
+
+ p.int(len(methods))
+
+ if trace && methods != nil {
+ p.tracef("associated methods {>\n")
+ }
+
+ for i, m := range methods {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+
+ p.pos(m)
+ name := m.Name()
+ p.string(name)
+ if !exported(name) {
+ p.pkg(m.Pkg(), false)
+ }
+
+ sig := m.Type().(*types.Signature)
+ p.paramList(types.NewTuple(sig.Recv()), false)
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+ p.int(0) // dummy value for go:nointerface pragma - ignored by importer
+ }
+
+ if trace && methods != nil {
+ p.tracef("<\n} ")
+ }
+}
+
+type methodsByName []*types.Func
+
+func (x methodsByName) Len() int { return len(x) }
+func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
+
+func (p *exporter) fieldList(t *types.Struct) {
+ if trace && t.NumFields() > 0 {
+ p.tracef("fields {>\n")
+ defer p.tracef("<\n} ")
+ }
+
+ p.int(t.NumFields())
+ for i := 0; i < t.NumFields(); i++ {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+ p.field(t.Field(i))
+ p.string(t.Tag(i))
+ }
+}
+
+func (p *exporter) field(f *types.Var) {
+ if !f.IsField() {
+ panic(internalError("field expected"))
+ }
+
+ p.pos(f)
+ p.fieldName(f)
+ p.typ(f.Type())
+}
+
+func (p *exporter) iface(t *types.Interface) {
+ // TODO(gri): enable importer to load embedded interfaces,
+ // then emit Embeddeds and ExplicitMethods separately here.
+ p.int(0)
+
+ n := t.NumMethods()
+ if trace && n > 0 {
+ p.tracef("methods {>\n")
+ defer p.tracef("<\n} ")
+ }
+ p.int(n)
+ for i := 0; i < n; i++ {
+ if trace && i > 0 {
+ p.tracef("\n")
+ }
+ p.method(t.Method(i))
+ }
+}
+
+func (p *exporter) method(m *types.Func) {
+ sig := m.Type().(*types.Signature)
+ if sig.Recv() == nil {
+ panic(internalError("method expected"))
+ }
+
+ p.pos(m)
+ p.string(m.Name())
+ if m.Name() != "_" && !token.IsExported(m.Name()) {
+ p.pkg(m.Pkg(), false)
+ }
+
+ // interface method; no need to encode receiver.
+ p.paramList(sig.Params(), sig.Variadic())
+ p.paramList(sig.Results(), false)
+}
+
+func (p *exporter) fieldName(f *types.Var) {
+ name := f.Name()
+
+ if f.Anonymous() {
+ // anonymous field - we distinguish between 3 cases:
+ // 1) field name matches base type name and is exported
+ // 2) field name matches base type name and is not exported
+ // 3) field name doesn't match base type name (alias name)
+ bname := basetypeName(f.Type())
+ if name == bname {
+ if token.IsExported(name) {
+ name = "" // 1) we don't need to know the field name or package
+ } else {
+ name = "?" // 2) use unexported name "?" to force package export
+ }
+ } else {
+ // 3) indicate alias and export name as is
+ // (this requires an extra "@" but this is a rare case)
+ p.string("@")
+ }
+ }
+
+ p.string(name)
+ if name != "" && !token.IsExported(name) {
+ p.pkg(f.Pkg(), false)
+ }
+}
+
+func basetypeName(typ types.Type) string {
+ switch typ := deref(typ).(type) {
+ case *types.Basic:
+ return typ.Name()
+ case *types.Named:
+ return typ.Obj().Name()
+ default:
+ return "" // unnamed type
+ }
+}
+
+func (p *exporter) paramList(params *types.Tuple, variadic bool) {
+ // use negative length to indicate unnamed parameters
+ // (look at the first parameter only since either all
+ // names are present or all are absent)
+ n := params.Len()
+ if n > 0 && params.At(0).Name() == "" {
+ n = -n
+ }
+ p.int(n)
+ for i := 0; i < params.Len(); i++ {
+ q := params.At(i)
+ t := q.Type()
+ if variadic && i == params.Len()-1 {
+ t = &dddSlice{t.(*types.Slice).Elem()}
+ }
+ p.typ(t)
+ if n > 0 {
+ name := q.Name()
+ p.string(name)
+ if name != "_" {
+ p.pkg(q.Pkg(), false)
+ }
+ }
+ p.string("") // no compiler-specific info
+ }
+}
+
+func (p *exporter) value(x constant.Value) {
+ if trace {
+ p.tracef("= ")
+ }
+
+ switch x.Kind() {
+ case constant.Bool:
+ tag := falseTag
+ if constant.BoolVal(x) {
+ tag = trueTag
+ }
+ p.tag(tag)
+
+ case constant.Int:
+ if v, exact := constant.Int64Val(x); exact {
+ // common case: x fits into an int64 - use compact encoding
+ p.tag(int64Tag)
+ p.int64(v)
+ return
+ }
+ // uncommon case: large x - use float encoding
+ // (powers of 2 will be encoded efficiently with exponent)
+ p.tag(floatTag)
+ p.float(constant.ToFloat(x))
+
+ case constant.Float:
+ p.tag(floatTag)
+ p.float(x)
+
+ case constant.Complex:
+ p.tag(complexTag)
+ p.float(constant.Real(x))
+ p.float(constant.Imag(x))
+
+ case constant.String:
+ p.tag(stringTag)
+ p.string(constant.StringVal(x))
+
+ case constant.Unknown:
+ // package contains type errors
+ p.tag(unknownTag)
+
+ default:
+ panic(internalErrorf("unexpected value %v (%T)", x, x))
+ }
+}
+
+func (p *exporter) float(x constant.Value) {
+ if x.Kind() != constant.Float {
+ panic(internalErrorf("unexpected constant %v, want float", x))
+ }
+ // extract sign (there is no -0)
+ sign := constant.Sign(x)
+ if sign == 0 {
+ // x == 0
+ p.int(0)
+ return
+ }
+ // x != 0
+
+ var f big.Float
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ r := valueToRat(num)
+ f.SetRat(r.Quo(r, valueToRat(denom)))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ f.SetFloat64(math.MaxFloat64) // FIXME
+ }
+
+ // extract exponent such that 0.5 <= m < 1.0
+ var m big.Float
+ exp := f.MantExp(&m)
+
+ // extract mantissa as *big.Int
+ // - set exponent large enough so mant satisfies mant.IsInt()
+ // - get *big.Int from mant
+ m.SetMantExp(&m, int(m.MinPrec()))
+ mant, acc := m.Int(nil)
+ if acc != big.Exact {
+ panic(internalError("internal error"))
+ }
+
+ p.int(sign)
+ p.int(exp)
+ p.string(string(mant.Bytes()))
+}
+
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+func (p *exporter) bool(b bool) bool {
+ if trace {
+ p.tracef("[")
+ defer p.tracef("= %v] ", b)
+ }
+
+ x := 0
+ if b {
+ x = 1
+ }
+ p.int(x)
+ return b
+}
+
+// ----------------------------------------------------------------------------
+// Low-level encoders
+
+func (p *exporter) index(marker byte, index int) {
+ if index < 0 {
+ panic(internalError("invalid index < 0"))
+ }
+ if debugFormat {
+ p.marker('t')
+ }
+ if trace {
+ p.tracef("%c%d ", marker, index)
+ }
+ p.rawInt64(int64(index))
+}
+
+func (p *exporter) tag(tag int) {
+ if tag >= 0 {
+ panic(internalError("invalid tag >= 0"))
+ }
+ if debugFormat {
+ p.marker('t')
+ }
+ if trace {
+ p.tracef("%s ", tagString[-tag])
+ }
+ p.rawInt64(int64(tag))
+}
+
+func (p *exporter) int(x int) {
+ p.int64(int64(x))
+}
+
+func (p *exporter) int64(x int64) {
+ if debugFormat {
+ p.marker('i')
+ }
+ if trace {
+ p.tracef("%d ", x)
+ }
+ p.rawInt64(x)
+}
+
+func (p *exporter) string(s string) {
+ if debugFormat {
+ p.marker('s')
+ }
+ if trace {
+ p.tracef("%q ", s)
+ }
+ // if we saw the string before, write its index (>= 0)
+ // (the empty string is mapped to 0)
+ if i, ok := p.strIndex[s]; ok {
+ p.rawInt64(int64(i))
+ return
+ }
+ // otherwise, remember string and write its negative length and bytes
+ p.strIndex[s] = len(p.strIndex)
+ p.rawInt64(-int64(len(s)))
+ for i := 0; i < len(s); i++ {
+ p.rawByte(s[i])
+ }
+}
+
+// marker emits a marker byte and position information which makes
+// it easy for a reader to detect if it is "out of sync". Used for
+// debugFormat format only.
+func (p *exporter) marker(m byte) {
+ p.rawByte(m)
+ // Enable this for help tracking down the location
+ // of an incorrect marker when running in debugFormat.
+ if false && trace {
+ p.tracef("#%d ", p.written)
+ }
+ p.rawInt64(int64(p.written))
+}
+
+// rawInt64 should only be used by low-level encoders.
+func (p *exporter) rawInt64(x int64) {
+ var tmp [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(tmp[:], x)
+ for i := 0; i < n; i++ {
+ p.rawByte(tmp[i])
+ }
+}
+
+// rawStringln should only be used to emit the initial version string.
+func (p *exporter) rawStringln(s string) {
+ for i := 0; i < len(s); i++ {
+ p.rawByte(s[i])
+ }
+ p.rawByte('\n')
+}
+
+// rawByte is the bottleneck interface to write to p.out.
+// rawByte escapes b as follows (any encoding does that
+// hides '$'):
+//
+// '$' => '|' 'S'
+// '|' => '|' '|'
+//
+// Necessary so other tools can find the end of the
+// export data by searching for "$$".
+// rawByte should only be used by low-level encoders.
+func (p *exporter) rawByte(b byte) {
+ switch b {
+ case '$':
+ // write '$' as '|' 'S'
+ b = 'S'
+ fallthrough
+ case '|':
+ // write '|' as '|' '|'
+ p.out.WriteByte('|')
+ p.written++
+ }
+ p.out.WriteByte(b)
+ p.written++
+}
+
+// tracef is like fmt.Printf but it rewrites the format string
+// to take care of indentation.
+func (p *exporter) tracef(format string, args ...interface{}) {
+ if strings.ContainsAny(format, "<>\n") {
+ var buf bytes.Buffer
+ for i := 0; i < len(format); i++ {
+ // no need to deal with runes
+ ch := format[i]
+ switch ch {
+ case '>':
+ p.indent++
+ continue
+ case '<':
+ p.indent--
+ continue
+ }
+ buf.WriteByte(ch)
+ if ch == '\n' {
+ for j := p.indent; j > 0; j-- {
+ buf.WriteString(". ")
+ }
+ }
+ }
+ format = buf.String()
+ }
+ fmt.Printf(format, args...)
+}
+
+// Debugging support.
+// (tagString is only used when tracing is enabled)
+var tagString = [...]string{
+ // Packages
+ -packageTag: "package",
+
+ // Types
+ -namedTag: "named type",
+ -arrayTag: "array",
+ -sliceTag: "slice",
+ -dddTag: "ddd",
+ -structTag: "struct",
+ -pointerTag: "pointer",
+ -signatureTag: "signature",
+ -interfaceTag: "interface",
+ -mapTag: "map",
+ -chanTag: "chan",
+
+ // Values
+ -falseTag: "false",
+ -trueTag: "true",
+ -int64Tag: "int64",
+ -floatTag: "float",
+ -fractionTag: "fraction",
+ -complexTag: "complex",
+ -stringTag: "string",
+ -unknownTag: "unknown",
+
+ // Type aliases
+ -aliasTag: "alias",
+}
diff --git a/internal/gcimporter/bexport_test.go b/internal/gcimporter/bexport_test.go
new file mode 100644
index 000000000..b5e9ce100
--- /dev/null
+++ b/internal/gcimporter/bexport_test.go
@@ -0,0 +1,551 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/constant"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/go/buildutil"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/internal/gcimporter"
+ "golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typeparams/genericfeatures"
+)
+
+var isRace = false
+
+func TestBExportData_stdlib(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("gccgo standard library is inaccessible")
+ }
+ if runtime.GOOS == "android" {
+ t.Skipf("incomplete std lib on %s", runtime.GOOS)
+ }
+ if isRace {
+ t.Skipf("stdlib tests take too long in race mode and flake on builders")
+ }
+ if testing.Short() {
+ t.Skip("skipping RAM hungry test in -short mode")
+ }
+
+ // Load, parse and type-check the program.
+ ctxt := build.Default // copy
+ ctxt.GOPATH = "" // disable GOPATH
+ conf := loader.Config{
+ Build: &ctxt,
+ AllowErrors: true,
+ TypeChecker: types.Config{
+ Error: func(err error) { t.Log(err) },
+ },
+ }
+ for _, path := range buildutil.AllPackages(conf.Build) {
+ conf.Import(path)
+ }
+
+ // Create a package containing type and value errors to ensure
+ // they are properly encoded/decoded.
+ f, err := conf.ParseFile("haserrors/haserrors.go", `package haserrors
+const UnknownValue = "" + 0
+type UnknownType undefined
+`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf.CreateFromFiles("haserrors", f)
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load failed: %v", err)
+ }
+
+ numPkgs := len(prog.AllPackages)
+ if want := minStdlibPackages; numPkgs < want {
+ t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
+ }
+
+ checked := 0
+ for pkg, info := range prog.AllPackages {
+ if info.Files == nil {
+ continue // empty directory
+ }
+ // Binary export does not support generic code.
+ inspect := inspector.New(info.Files)
+ if genericfeatures.ForPackage(inspect, &info.Info) != 0 {
+ t.Logf("skipping package %q which uses generics", pkg.Path())
+ continue
+ }
+ checked++
+ exportdata, err := gcimporter.BExportData(conf.Fset, pkg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ n, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ t.Errorf("BImportData(%s): %v", pkg.Path(), err)
+ continue
+ }
+ if n != len(exportdata) {
+ t.Errorf("BImportData(%s) decoded %d bytes, want %d",
+ pkg.Path(), n, len(exportdata))
+ }
+
+ // Compare the packages' corresponding members.
+ for _, name := range pkg.Scope().Names() {
+ if !token.IsExported(name) {
+ continue
+ }
+ obj1 := pkg.Scope().Lookup(name)
+ obj2 := pkg2.Scope().Lookup(name)
+ if obj2 == nil {
+ t.Errorf("%s.%s not found, want %s", pkg.Path(), name, obj1)
+ continue
+ }
+
+ fl1 := fileLine(conf.Fset, obj1)
+ fl2 := fileLine(fset2, obj2)
+ if fl1 != fl2 {
+ t.Errorf("%s.%s: got posn %s, want %s",
+ pkg.Path(), name, fl2, fl1)
+ }
+
+ if err := equalObj(obj1, obj2); err != nil {
+ t.Errorf("%s.%s: %s\ngot: %s\nwant: %s",
+ pkg.Path(), name, err, obj2, obj1)
+ }
+ }
+ }
+ if want := minStdlibPackages; checked < want {
+ t.Errorf("Checked only %d packages, want at least %d", checked, want)
+ }
+}
+
+func fileLine(fset *token.FileSet, obj types.Object) string {
+ posn := fset.Position(obj.Pos())
+ filename := filepath.Clean(strings.ReplaceAll(posn.Filename, "$GOROOT", runtime.GOROOT()))
+ return fmt.Sprintf("%s:%d", filename, posn.Line)
+}
+
+// equalObj reports how x and y differ. They are assumed to belong to
+// different universes so cannot be compared directly.
+func equalObj(x, y types.Object) error {
+ if reflect.TypeOf(x) != reflect.TypeOf(y) {
+ return fmt.Errorf("%T vs %T", x, y)
+ }
+ xt := x.Type()
+ yt := y.Type()
+ switch x.(type) {
+ case *types.Var, *types.Func:
+ // ok
+ case *types.Const:
+ xval := x.(*types.Const).Val()
+ yval := y.(*types.Const).Val()
+ // Use string comparison for floating-point values since rounding is permitted.
+ if constant.Compare(xval, token.NEQ, yval) &&
+ !(xval.Kind() == constant.Float && xval.String() == yval.String()) {
+ return fmt.Errorf("unequal constants %s vs %s", xval, yval)
+ }
+ case *types.TypeName:
+ xt = xt.Underlying()
+ yt = yt.Underlying()
+ default:
+ return fmt.Errorf("unexpected %T", x)
+ }
+ return equalType(xt, yt)
+}
+
+func equalType(x, y types.Type) error {
+ if reflect.TypeOf(x) != reflect.TypeOf(y) {
+ return fmt.Errorf("unequal kinds: %T vs %T", x, y)
+ }
+ switch x := x.(type) {
+ case *types.Interface:
+ y := y.(*types.Interface)
+ // TODO(gri): enable separate emission of Embedded interfaces
+ // and ExplicitMethods then use this logic.
+ // if x.NumEmbeddeds() != y.NumEmbeddeds() {
+ // return fmt.Errorf("unequal number of embedded interfaces: %d vs %d",
+ // x.NumEmbeddeds(), y.NumEmbeddeds())
+ // }
+ // for i := 0; i < x.NumEmbeddeds(); i++ {
+ // xi := x.Embedded(i)
+ // yi := y.Embedded(i)
+ // if xi.String() != yi.String() {
+ // return fmt.Errorf("mismatched %th embedded interface: %s vs %s",
+ // i, xi, yi)
+ // }
+ // }
+ // if x.NumExplicitMethods() != y.NumExplicitMethods() {
+ // return fmt.Errorf("unequal methods: %d vs %d",
+ // x.NumExplicitMethods(), y.NumExplicitMethods())
+ // }
+ // for i := 0; i < x.NumExplicitMethods(); i++ {
+ // xm := x.ExplicitMethod(i)
+ // ym := y.ExplicitMethod(i)
+ // if xm.Name() != ym.Name() {
+ // return fmt.Errorf("mismatched %th method: %s vs %s", i, xm, ym)
+ // }
+ // if err := equalType(xm.Type(), ym.Type()); err != nil {
+ // return fmt.Errorf("mismatched %s method: %s", xm.Name(), err)
+ // }
+ // }
+ if x.NumMethods() != y.NumMethods() {
+ return fmt.Errorf("unequal methods: %d vs %d",
+ x.NumMethods(), y.NumMethods())
+ }
+ for i := 0; i < x.NumMethods(); i++ {
+ xm := x.Method(i)
+ ym := y.Method(i)
+ if xm.Name() != ym.Name() {
+ return fmt.Errorf("mismatched %dth method: %s vs %s", i, xm, ym)
+ }
+ if err := equalType(xm.Type(), ym.Type()); err != nil {
+ return fmt.Errorf("mismatched %s method: %s", xm.Name(), err)
+ }
+ }
+ // Constraints are handled explicitly in the *TypeParam case below, so we
+ // don't yet need to consider embeddeds here.
+ // TODO(rfindley): consider the type set here.
+ case *types.Array:
+ y := y.(*types.Array)
+ if x.Len() != y.Len() {
+ return fmt.Errorf("unequal array lengths: %d vs %d", x.Len(), y.Len())
+ }
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("array elements: %s", err)
+ }
+ case *types.Basic:
+ y := y.(*types.Basic)
+ if x.Kind() != y.Kind() {
+ return fmt.Errorf("unequal basic types: %s vs %s", x, y)
+ }
+ case *types.Chan:
+ y := y.(*types.Chan)
+ if x.Dir() != y.Dir() {
+ return fmt.Errorf("unequal channel directions: %d vs %d", x.Dir(), y.Dir())
+ }
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("channel elements: %s", err)
+ }
+ case *types.Map:
+ y := y.(*types.Map)
+ if err := equalType(x.Key(), y.Key()); err != nil {
+ return fmt.Errorf("map keys: %s", err)
+ }
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("map values: %s", err)
+ }
+ case *types.Named:
+ y := y.(*types.Named)
+ return cmpNamed(x, y)
+ case *types.Pointer:
+ y := y.(*types.Pointer)
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("pointer elements: %s", err)
+ }
+ case *types.Signature:
+ y := y.(*types.Signature)
+ if err := equalType(x.Params(), y.Params()); err != nil {
+ return fmt.Errorf("parameters: %s", err)
+ }
+ if err := equalType(x.Results(), y.Results()); err != nil {
+ return fmt.Errorf("results: %s", err)
+ }
+ if x.Variadic() != y.Variadic() {
+ return fmt.Errorf("unequal variadicity: %t vs %t",
+ x.Variadic(), y.Variadic())
+ }
+ if (x.Recv() != nil) != (y.Recv() != nil) {
+ return fmt.Errorf("unequal receivers: %s vs %s", x.Recv(), y.Recv())
+ }
+ if x.Recv() != nil {
+ // TODO(adonovan): fix: this assertion fires for interface methods.
+ // The type of the receiver of an interface method is a named type
+ // if the Package was loaded from export data, or an unnamed (interface)
+ // type if the Package was produced by type-checking ASTs.
+ // if err := equalType(x.Recv().Type(), y.Recv().Type()); err != nil {
+ // return fmt.Errorf("receiver: %s", err)
+ // }
+ }
+ if err := equalTypeParams(typeparams.ForSignature(x), typeparams.ForSignature(y)); err != nil {
+ return fmt.Errorf("type params: %s", err)
+ }
+ if err := equalTypeParams(typeparams.RecvTypeParams(x), typeparams.RecvTypeParams(y)); err != nil {
+ return fmt.Errorf("recv type params: %s", err)
+ }
+ case *types.Slice:
+ y := y.(*types.Slice)
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("slice elements: %s", err)
+ }
+ case *types.Struct:
+ y := y.(*types.Struct)
+ if x.NumFields() != y.NumFields() {
+ return fmt.Errorf("unequal struct fields: %d vs %d",
+ x.NumFields(), y.NumFields())
+ }
+ for i := 0; i < x.NumFields(); i++ {
+ xf := x.Field(i)
+ yf := y.Field(i)
+ if xf.Name() != yf.Name() {
+ return fmt.Errorf("mismatched fields: %s vs %s", xf, yf)
+ }
+ if err := equalType(xf.Type(), yf.Type()); err != nil {
+ return fmt.Errorf("struct field %s: %s", xf.Name(), err)
+ }
+ if x.Tag(i) != y.Tag(i) {
+ return fmt.Errorf("struct field %s has unequal tags: %q vs %q",
+ xf.Name(), x.Tag(i), y.Tag(i))
+ }
+ }
+ case *types.Tuple:
+ y := y.(*types.Tuple)
+ if x.Len() != y.Len() {
+ return fmt.Errorf("unequal tuple lengths: %d vs %d", x.Len(), y.Len())
+ }
+ for i := 0; i < x.Len(); i++ {
+ if err := equalType(x.At(i).Type(), y.At(i).Type()); err != nil {
+ return fmt.Errorf("tuple element %d: %s", i, err)
+ }
+ }
+ case *typeparams.TypeParam:
+ y := y.(*typeparams.TypeParam)
+ if x.String() != y.String() {
+ return fmt.Errorf("unequal named types: %s vs %s", x, y)
+ }
+ // For now, just compare constraints by type string to short-circuit
+ // cycles. We have to make interfaces explicit as export data currently
+ // doesn't support marking interfaces as implicit.
+ // TODO(rfindley): remove makeExplicit once export data contains an
+ // implicit bit.
+ xc := makeExplicit(x.Constraint()).String()
+ yc := makeExplicit(y.Constraint()).String()
+ if xc != yc {
+ return fmt.Errorf("unequal constraints: %s vs %s", xc, yc)
+ }
+
+ default:
+ panic(fmt.Sprintf("unexpected %T type", x))
+ }
+ return nil
+}
+
+// cmpNamed compares two named types x and y, returning an error for any
+// discrepancies. It does not compare their underlying types.
+func cmpNamed(x, y *types.Named) error {
+ xOrig := typeparams.NamedTypeOrigin(x)
+ yOrig := typeparams.NamedTypeOrigin(y)
+ if xOrig.String() != yOrig.String() {
+ return fmt.Errorf("unequal named types: %s vs %s", x, y)
+ }
+ if err := equalTypeParams(typeparams.ForNamed(x), typeparams.ForNamed(y)); err != nil {
+ return fmt.Errorf("type parameters: %s", err)
+ }
+ if err := equalTypeArgs(typeparams.NamedTypeArgs(x), typeparams.NamedTypeArgs(y)); err != nil {
+ return fmt.Errorf("type arguments: %s", err)
+ }
+ if x.NumMethods() != y.NumMethods() {
+ return fmt.Errorf("unequal methods: %d vs %d",
+ x.NumMethods(), y.NumMethods())
+ }
+ // Unfortunately method sorting is not canonical, so sort before comparing.
+ var xms, yms []*types.Func
+ for i := 0; i < x.NumMethods(); i++ {
+ xms = append(xms, x.Method(i))
+ yms = append(yms, y.Method(i))
+ }
+ for _, ms := range [][]*types.Func{xms, yms} {
+ sort.Slice(ms, func(i, j int) bool {
+ return ms[i].Name() < ms[j].Name()
+ })
+ }
+ for i, xm := range xms {
+ ym := yms[i]
+ if xm.Name() != ym.Name() {
+ return fmt.Errorf("mismatched %dth method: %s vs %s", i, xm, ym)
+ }
+ // Calling equalType here leads to infinite recursion, so just compare
+ // strings.
+ if xm.String() != ym.String() {
+ return fmt.Errorf("unequal methods: %s vs %s", x, y)
+ }
+ }
+ return nil
+}
+
+// makeExplicit returns an explicit version of typ, if typ is an implicit
+// interface. Otherwise it returns typ unmodified.
+func makeExplicit(typ types.Type) types.Type {
+ if iface, _ := typ.(*types.Interface); iface != nil && typeparams.IsImplicit(iface) {
+ var methods []*types.Func
+ for i := 0; i < iface.NumExplicitMethods(); i++ {
+ methods = append(methods, iface.Method(i))
+ }
+ var embeddeds []types.Type
+ for i := 0; i < iface.NumEmbeddeds(); i++ {
+ embeddeds = append(embeddeds, iface.EmbeddedType(i))
+ }
+ return types.NewInterfaceType(methods, embeddeds)
+ }
+ return typ
+}
+
+func equalTypeArgs(x, y *typeparams.TypeList) error {
+ if x.Len() != y.Len() {
+ return fmt.Errorf("unequal lengths: %d vs %d", x.Len(), y.Len())
+ }
+ for i := 0; i < x.Len(); i++ {
+ if err := equalType(x.At(i), y.At(i)); err != nil {
+ return fmt.Errorf("type %d: %s", i, err)
+ }
+ }
+ return nil
+}
+
+func equalTypeParams(x, y *typeparams.TypeParamList) error {
+ if x.Len() != y.Len() {
+ return fmt.Errorf("unequal lengths: %d vs %d", x.Len(), y.Len())
+ }
+ for i := 0; i < x.Len(); i++ {
+ if err := equalType(x.At(i), y.At(i)); err != nil {
+ return fmt.Errorf("type parameter %d: %s", i, err)
+ }
+ }
+ return nil
+}
+
+// TestVeryLongFile tests the position of an import object declared in
+// a very long input file. Line numbers greater than maxlines are
+// reported as line 1, not garbage or token.NoPos.
+func TestVeryLongFile(t *testing.T) {
+ // parse and typecheck
+ longFile := "package foo" + strings.Repeat("\n", 123456) + "var X int"
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "foo.go", longFile, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg, err := conf.Check("foo", fset1, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // export
+ exportdata, err := gcimporter.BExportData(fset1, pkg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ t.Fatalf("BImportData(%s): %v", pkg.Path(), err)
+ }
+
+ // compare
+ posn1 := fset1.Position(pkg.Scope().Lookup("X").Pos())
+ posn2 := fset2.Position(pkg2.Scope().Lookup("X").Pos())
+ if want := "foo.go:1:1"; posn2.String() != want {
+ t.Errorf("X position = %s, want %s (orig was %s)",
+ posn2, want, posn1)
+ }
+}
+
+const src = `
+package p
+
+type (
+ T0 = int32
+ T1 = struct{}
+ T2 = struct{ T1 }
+ Invalid = foo // foo is undeclared
+)
+`
+
+func checkPkg(t *testing.T, pkg *types.Package, label string) {
+ T1 := types.NewStruct(nil, nil)
+ T2 := types.NewStruct([]*types.Var{types.NewField(0, pkg, "T1", T1, true)}, nil)
+
+ for _, test := range []struct {
+ name string
+ typ types.Type
+ }{
+ {"T0", types.Typ[types.Int32]},
+ {"T1", T1},
+ {"T2", T2},
+ {"Invalid", types.Typ[types.Invalid]},
+ } {
+ obj := pkg.Scope().Lookup(test.name)
+ if obj == nil {
+ t.Errorf("%s: %s not found", label, test.name)
+ continue
+ }
+ tname, _ := obj.(*types.TypeName)
+ if tname == nil {
+ t.Errorf("%s: %v not a type name", label, obj)
+ continue
+ }
+ if !tname.IsAlias() {
+ t.Errorf("%s: %v: not marked as alias", label, tname)
+ continue
+ }
+ if got := tname.Type(); !types.Identical(got, test.typ) {
+ t.Errorf("%s: %v: got %v; want %v", label, tname, got, test.typ)
+ }
+ }
+}
+
+func TestTypeAliases(t *testing.T) {
+ // parse and typecheck
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "p.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil)
+ if err == nil {
+ // foo in undeclared in src; we should see an error
+ t.Fatal("invalid source type-checked without error")
+ }
+ if pkg1 == nil {
+ // despite incorrect src we should see a (partially) type-checked package
+ t.Fatal("nil package returned")
+ }
+ checkPkg(t, pkg1, "export")
+
+ // export
+ exportdata, err := gcimporter.BExportData(fset1, pkg1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg1.Path())
+ if err != nil {
+ t.Fatalf("BImportData(%s): %v", pkg1.Path(), err)
+ }
+ checkPkg(t, pkg2, "import")
+}
diff --git a/go/internal/gcimporter/bimport.go b/internal/gcimporter/bimport.go
index b85de0147..b85de0147 100644
--- a/go/internal/gcimporter/bimport.go
+++ b/internal/gcimporter/bimport.go
diff --git a/go/internal/gcimporter/exportdata.go b/internal/gcimporter/exportdata.go
index f6437feb1..f6437feb1 100644
--- a/go/internal/gcimporter/exportdata.go
+++ b/internal/gcimporter/exportdata.go
diff --git a/internal/gcimporter/gcimporter.go b/internal/gcimporter/gcimporter.go
new file mode 100644
index 000000000..a973dece9
--- /dev/null
+++ b/internal/gcimporter/gcimporter.go
@@ -0,0 +1,277 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
+
+// Package gcimporter provides various functions for reading
+// gc-generated object files that can be used to implement the
+// Importer interface defined by the Go 1.5 standard library package.
+//
+// The encoding is deterministic: if the encoder is applied twice to
+// the same types.Package data structure, both encodings are equal.
+// This property may be important to avoid spurious changes in
+// applications such as build systems.
+//
+// However, the encoder is not necessarily idempotent. Importing an
+// exported package may yield a types.Package that, while it
+// represents the same set of Go types as the original, may differ in
+// the details of its internal representation. Because of these
+// differences, re-encoding the imported package may yield a
+// different, but equally valid, encoding of the package.
+package gcimporter // import "golang.org/x/tools/internal/gcimporter"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/build"
+ "go/token"
+ "go/types"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+const (
+ // Enable debug during development: it adds some additional checks, and
+ // prevents errors from being recovered.
+ debug = false
+
+ // If trace is set, debugging output is printed to std out.
+ trace = false
+)
+
+var exportMap sync.Map // package dir → func() (string, bool)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+func lookupGorootExport(pkgDir string) (string, bool) {
+ f, ok := exportMap.Load(pkgDir)
+ if !ok {
+ var (
+ listOnce sync.Once
+ exportPath string
+ )
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
+ listOnce.Do(func() {
+ cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
+ cmd.Dir = build.Default.GOROOT
+ var output []byte
+ output, err := cmd.Output()
+ if err != nil {
+ return
+ }
+
+ exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+ if len(exports) != 1 {
+ return
+ }
+
+ exportPath = exports[0]
+ })
+
+ return exportPath, exportPath != ""
+ })
+ }
+
+ return f.(func() (string, bool))()
+}
+
+var pkgExts = [...]string{".a", ".o"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+// If no file was found, an empty filename is returned.
+func FindPkg(path, srcDir string) (filename, id string) {
+ if path == "" {
+ return
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ var ok bool
+ if bp.Goroot && bp.Dir != "" {
+ filename, ok = lookupGorootExport(bp.Dir)
+ }
+ if !ok {
+ id = path // make sure we have an id to print in error message
+ return
+ }
+ } else {
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ id = bp.ImportPath
+ }
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ if filename != "" {
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+ var rc io.ReadCloser
+ var filename, id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ filename, id = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %q", id)
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ var hdr string
+ var size int64
+ buf := bufio.NewReader(rc)
+ if hdr, size, err = FindExportData(buf); err != nil {
+ return
+ }
+
+ switch hdr {
+ case "$$B\n":
+ var data []byte
+ data, err = ioutil.ReadAll(buf)
+ if err != nil {
+ break
+ }
+
+ // TODO(gri): allow clients of go/importer to provide a FileSet.
+ // Or, define a new standard go/types/gcexportdata package.
+ fset := token.NewFileSet()
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 {
+ switch data[0] {
+ case 'i':
+ _, pkg, err := IImportData(fset, packages, data[1:], id)
+ return pkg, err
+
+ case 'v', 'c', 'd':
+ _, pkg, err := BImportData(fset, packages, data, id)
+ return pkg, err
+
+ case 'u':
+ _, pkg, err := UImportData(fset, packages, data[1:size], id)
+ return pkg, err
+
+ default:
+ l := len(data)
+ if l > 10 {
+ l = 10
+ }
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
+ }
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ }
+
+ return
+}
+
+func deref(typ types.Type) types.Type {
+ if p, _ := typ.(*types.Pointer); p != nil {
+ return p.Elem()
+ }
+ return typ
+}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/internal/gcimporter/gcimporter_test.go b/internal/gcimporter/gcimporter_test.go
new file mode 100644
index 000000000..0a247f71d
--- /dev/null
+++ b/internal/gcimporter/gcimporter_test.go
@@ -0,0 +1,935 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter_test.go,
+// adjusted to make it build with code from (std lib) internal/testenv copied.
+
+package gcimporter
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/constant"
+ goimporter "go/importer"
+ goparser "go/parser"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/internal/goroot"
+ "golang.org/x/tools/internal/testenv"
+)
+
+func TestMain(m *testing.M) {
+ testenv.ExitIfSmallMachine()
+ os.Exit(m.Run())
+}
+
+// ----------------------------------------------------------------------------
+
+func needsCompiler(t *testing.T, compiler string) {
+ if runtime.Compiler == compiler {
+ return
+ }
+ switch compiler {
+ case "gc":
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+}
+
+// compile runs the compiler on filename, with dirname as the working directory,
+// and writes the output file to outdirname.
+// compile gives the resulting package a packagepath of p.
+func compile(t *testing.T, dirname, filename, outdirname string, packagefiles map[string]string) string {
+ return compilePkg(t, dirname, filename, outdirname, packagefiles, "p")
+}
+
+func compilePkg(t *testing.T, dirname, filename, outdirname string, packagefiles map[string]string, pkg string) string {
+ testenv.NeedsGoBuild(t)
+
+ // filename must end with ".go"
+ basename := strings.TrimSuffix(filepath.Base(filename), ".go")
+ ok := filename != basename
+ if !ok {
+ t.Fatalf("filename doesn't end in .go: %s", filename)
+ }
+ objname := basename + ".o"
+ outname := filepath.Join(outdirname, objname)
+
+ importcfgfile := os.DevNull
+ if len(packagefiles) > 0 {
+ importcfgfile = filepath.Join(outdirname, basename) + ".importcfg"
+ importcfg := new(bytes.Buffer)
+ fmt.Fprintf(importcfg, "# import config")
+ for k, v := range packagefiles {
+ fmt.Fprintf(importcfg, "\npackagefile %s=%s\n", k, v)
+ }
+ if err := os.WriteFile(importcfgfile, importcfg.Bytes(), 0655); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ importreldir := strings.ReplaceAll(outdirname, string(os.PathSeparator), "/")
+ cmd := exec.Command("go", "tool", "compile", "-p", pkg, "-D", importreldir, "-importcfg", importcfgfile, "-o", outname, filename)
+ cmd.Dir = dirname
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Logf("%s", out)
+ t.Fatalf("go tool compile %s failed: %s", filename, err)
+ }
+ return outname
+}
+
+func testPath(t *testing.T, path, srcDir string) *types.Package {
+ t0 := time.Now()
+ pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
+ if err != nil {
+ t.Errorf("testPath(%s): %s", path, err)
+ return nil
+ }
+ t.Logf("testPath(%s): %v", path, time.Since(t0))
+ return pkg
+}
+
+func mktmpdir(t *testing.T) string {
+ tmpdir, err := ioutil.TempDir("", "gcimporter_test")
+ if err != nil {
+ t.Fatal("mktmpdir:", err)
+ }
+ if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil {
+ os.RemoveAll(tmpdir)
+ t.Fatal("mktmpdir:", err)
+ }
+ return tmpdir
+}
+
+const testfile = "exports.go"
+
+func TestImportTestdata(t *testing.T) {
+ needsCompiler(t, "gc")
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+
+ packageFiles := map[string]string{}
+ for _, pkg := range []string{"go/ast", "go/token"} {
+ export, _ := FindPkg(pkg, "testdata")
+ if export == "" {
+ t.Fatalf("no export data found for %s", pkg)
+ }
+ packageFiles[pkg] = export
+ }
+
+ compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"), packageFiles)
+
+ // filename should end with ".go"
+ filename := testfile[:len(testfile)-3]
+ if pkg := testPath(t, "./testdata/"+filename, tmpdir); pkg != nil {
+ // The package's Imports list must include all packages
+ // explicitly imported by testfile, plus all packages
+ // referenced indirectly via exported objects in testfile.
+ // With the textual export format (when run against Go1.6),
+ // the list may also include additional packages that are
+ // not strictly required for import processing alone (they
+ // are exported to err "on the safe side").
+ // For now, we just test the presence of a few packages
+ // that we know are there for sure.
+ got := fmt.Sprint(pkg.Imports())
+ wants := []string{"go/ast", "go/token"}
+ if unifiedIR {
+ wants = []string{"go/ast"}
+ }
+ for _, want := range wants {
+ if !strings.Contains(got, want) {
+ t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
+ }
+ }
+ }
+}
+
+func TestImportTypeparamTests(t *testing.T) {
+ if testing.Short() {
+ t.Skipf("in short mode, skipping test that requires export data for all of std")
+ }
+
+ testenv.NeedsGo1Point(t, 18) // requires generics
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+
+ // Check go files in test/typeparam, except those that fail for a known
+ // reason.
+ rootDir := filepath.Join(runtime.GOROOT(), "test", "typeparam")
+ list, err := os.ReadDir(rootDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var skip map[string]string
+ if !unifiedIR {
+ // The Go 1.18 frontend still fails several cases.
+ skip = map[string]string{
+ "equal.go": "inconsistent embedded sorting", // TODO(rfindley): investigate this.
+ "nested.go": "fails to compile", // TODO(rfindley): investigate this.
+ "issue47631.go": "can not handle local type declarations",
+ "issue55101.go": "fails to compile",
+ }
+ }
+
+ for _, entry := range list {
+ if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".go") {
+ // For now, only consider standalone go files.
+ continue
+ }
+
+ t.Run(entry.Name(), func(t *testing.T) {
+ if reason, ok := skip[entry.Name()]; ok {
+ t.Skip(reason)
+ }
+
+ filename := filepath.Join(rootDir, entry.Name())
+ src, err := os.ReadFile(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.HasPrefix(src, []byte("// run")) && !bytes.HasPrefix(src, []byte("// compile")) {
+ // We're bypassing the logic of run.go here, so be conservative about
+ // the files we consider in an attempt to make this test more robust to
+ // changes in test/typeparams.
+ t.Skipf("not detected as a run test")
+ }
+
+ // Compile and import, and compare the resulting package with the package
+ // that was type-checked directly.
+ pkgFiles, err := goroot.PkgfileMap()
+ if err != nil {
+ t.Fatal(err)
+ }
+ compile(t, rootDir, entry.Name(), filepath.Join(tmpdir, "testdata"), pkgFiles)
+ pkgName := strings.TrimSuffix(entry.Name(), ".go")
+ imported := importPkg(t, "./testdata/"+pkgName, tmpdir)
+ checked := checkFile(t, filename, src)
+
+ seen := make(map[string]bool)
+ for _, name := range imported.Scope().Names() {
+ if !token.IsExported(name) {
+ continue // ignore synthetic names like .inittask and .dict.*
+ }
+ seen[name] = true
+
+ importedObj := imported.Scope().Lookup(name)
+ got := types.ObjectString(importedObj, types.RelativeTo(imported))
+
+ checkedObj := checked.Scope().Lookup(name)
+ if checkedObj == nil {
+ t.Fatalf("imported object %q was not type-checked", name)
+ }
+ want := types.ObjectString(checkedObj, types.RelativeTo(checked))
+
+ if got != want {
+ t.Errorf("imported %q as %q, want %q", name, got, want)
+ }
+ }
+
+ for _, name := range checked.Scope().Names() {
+ if !token.IsExported(name) || seen[name] {
+ continue
+ }
+ t.Errorf("did not import object %q", name)
+ }
+ })
+ }
+}
+
+func checkFile(t *testing.T, filename string, src []byte) *types.Package {
+ fset := token.NewFileSet()
+ f, err := goparser.ParseFile(fset, filename, src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ config := types.Config{
+ Importer: goimporter.Default(),
+ }
+ pkg, err := config.Check("", fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return pkg
+}
+
+func TestVersionHandling(t *testing.T) {
+ if debug {
+ t.Skip("TestVersionHandling panics in debug mode")
+ }
+
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+
+ const dir = "./testdata/versions"
+ list, err := ioutil.ReadDir(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ corruptdir := filepath.Join(tmpdir, "testdata", "versions")
+ if err := os.Mkdir(corruptdir, 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, f := range list {
+ name := f.Name()
+ if !strings.HasSuffix(name, ".a") {
+ continue // not a package file
+ }
+ if strings.Contains(name, "corrupted") {
+ continue // don't process a leftover corrupted file
+ }
+ pkgpath := "./" + name[:len(name)-2]
+
+ if testing.Verbose() {
+ t.Logf("importing %s", name)
+ }
+
+ // test that export data can be imported
+ _, err := Import(make(map[string]*types.Package), pkgpath, dir, nil)
+ if err != nil {
+ // ok to fail if it fails with a newer version error for select files
+ if strings.Contains(err.Error(), "newer version") {
+ switch name {
+ case "test_go1.11_999b.a", "test_go1.11_999i.a":
+ continue
+ }
+ // fall through
+ }
+ t.Errorf("import %q failed: %v", pkgpath, err)
+ continue
+ }
+
+ // create file with corrupted export data
+ // 1) read file
+ data, err := ioutil.ReadFile(filepath.Join(dir, name))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // 2) find export data
+ i := bytes.Index(data, []byte("\n$$B\n")) + 5
+ j := bytes.Index(data[i:], []byte("\n$$\n")) + i
+ if i < 0 || j < 0 || i > j {
+ t.Fatalf("export data section not found (i = %d, j = %d)", i, j)
+ }
+ // 3) corrupt the data (increment every 7th byte)
+ for k := j - 13; k >= i; k -= 7 {
+ data[k]++
+ }
+ // 4) write the file
+ pkgpath += "_corrupted"
+ filename := filepath.Join(corruptdir, pkgpath) + ".a"
+ ioutil.WriteFile(filename, data, 0666)
+
+ // test that importing the corrupted file results in an error
+ _, err = Import(make(map[string]*types.Package), pkgpath, corruptdir, nil)
+ if err == nil {
+ t.Errorf("import corrupted %q succeeded", pkgpath)
+ } else if msg := err.Error(); !strings.Contains(msg, "version skew") {
+ t.Errorf("import %q error incorrect (%s)", pkgpath, msg)
+ }
+ }
+}
+
+func TestImportStdLib(t *testing.T) {
+ if testing.Short() {
+ t.Skip("the imports can be expensive, and this test is especially slow when the build cache is empty")
+ }
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ // Get list of packages in stdlib. Filter out test-only packages with {{if .GoFiles}} check.
+ var stderr bytes.Buffer
+ cmd := exec.Command("go", "list", "-f", "{{if .GoFiles}}{{.ImportPath}}{{end}}", "std")
+ cmd.Stderr = &stderr
+ out, err := cmd.Output()
+ if err != nil {
+ t.Fatalf("failed to run go list to determine stdlib packages: %v\nstderr:\n%v", err, stderr.String())
+ }
+ pkgs := strings.Fields(string(out))
+
+ var nimports int
+ for _, pkg := range pkgs {
+ t.Run(pkg, func(t *testing.T) {
+ if testPath(t, pkg, filepath.Join(testenv.GOROOT(t), "src", path.Dir(pkg))) != nil {
+ nimports++
+ }
+ })
+ }
+ const minPkgs = 225 // 'GOOS=plan9 go1.18 list std | wc -l' reports 228; most other platforms have more.
+ if len(pkgs) < minPkgs {
+ t.Fatalf("too few packages (%d) were imported", nimports)
+ }
+
+ t.Logf("tested %d imports", nimports)
+}
+
+var importedObjectTests = []struct {
+ name string
+ want string
+}{
+ // non-interfaces
+ {"crypto.Hash", "type Hash uint"},
+ {"go/ast.ObjKind", "type ObjKind int"},
+ {"go/types.Qualifier", "type Qualifier func(*Package) string"},
+ {"go/types.Comparable", "func Comparable(T Type) bool"},
+ {"math.Pi", "const Pi untyped float"},
+ {"math.Sin", "func Sin(x float64) float64"},
+ {"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
+ {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"},
+
+ // interfaces
+ {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
+ {"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"},
+ {"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"},
+ {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
+ {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
+ {"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
+ {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
+}
+
+// TODO(rsc): Delete this init func after x/tools no longer needs to test successfully with Go 1.17.
+func init() {
+ if build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] <= "go1.17" {
+ for i := range importedObjectTests {
+ if importedObjectTests[i].name == "context.Context" {
+ // Expand any to interface{}.
+ importedObjectTests[i].want = "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key interface{}) interface{}}"
+ }
+ }
+ }
+}
+
+func TestImportedTypes(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ for _, test := range importedObjectTests {
+ obj := importObject(t, test.name)
+ if obj == nil {
+ continue // error reported elsewhere
+ }
+ got := types.ObjectString(obj, types.RelativeTo(obj.Pkg()))
+
+ // TODO(rsc): Delete this block once go.dev/cl/368254 lands.
+ if got != test.want && test.want == strings.ReplaceAll(got, "interface{}", "any") {
+ got = test.want
+ }
+
+ if got != test.want {
+ t.Errorf("%s: got %q; want %q", test.name, got, test.want)
+ }
+
+ if named, _ := obj.Type().(*types.Named); named != nil {
+ verifyInterfaceMethodRecvs(t, named, 0)
+ }
+ }
+}
+
+func TestImportedConsts(t *testing.T) {
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ tests := []struct {
+ name string
+ want constant.Kind
+ }{
+ {"math.Pi", constant.Float},
+ {"math.MaxFloat64", constant.Float},
+ {"math.MaxInt64", constant.Int},
+ }
+
+ for _, test := range tests {
+ obj := importObject(t, test.name)
+ if got := obj.(*types.Const).Val().Kind(); got != test.want {
+ t.Errorf("%s: imported as constant.Kind(%v), want constant.Kind(%v)", test.name, got, test.want)
+ }
+ }
+}
+
+// importObject imports the object specified by a name of the form
+// <import path>.<object name>, e.g. go/types.Type.
+//
+// If any errors occur they are reported via t and the resulting object will
+// be nil.
+func importObject(t *testing.T, name string) types.Object {
+ s := strings.Split(name, ".")
+ if len(s) != 2 {
+ t.Fatal("inconsistent test data")
+ }
+ importPath := s[0]
+ objName := s[1]
+
+ pkg, err := Import(make(map[string]*types.Package), importPath, ".", nil)
+ if err != nil {
+ t.Error(err)
+ return nil
+ }
+
+ obj := pkg.Scope().Lookup(objName)
+ if obj == nil {
+ t.Errorf("%s: object not found", name)
+ return nil
+ }
+ return obj
+}
+
+// verifyInterfaceMethodRecvs verifies that method receiver types
+// are named if the methods belong to a named interface type.
+func verifyInterfaceMethodRecvs(t *testing.T, named *types.Named, level int) {
+ // avoid endless recursion in case of an embedding bug that lead to a cycle
+ if level > 10 {
+ t.Errorf("%s: embeds itself", named)
+ return
+ }
+
+ iface, _ := named.Underlying().(*types.Interface)
+ if iface == nil {
+ return // not an interface
+ }
+
+ // check explicitly declared methods
+ for i := 0; i < iface.NumExplicitMethods(); i++ {
+ m := iface.ExplicitMethod(i)
+ recv := m.Type().(*types.Signature).Recv()
+ if recv == nil {
+ t.Errorf("%s: missing receiver type", m)
+ continue
+ }
+ if recv.Type() != named {
+ t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named)
+ }
+ }
+
+ // check embedded interfaces (if they are named, too)
+ for i := 0; i < iface.NumEmbeddeds(); i++ {
+ // embedding of interfaces cannot have cycles; recursion will terminate
+ if etype, _ := iface.EmbeddedType(i).(*types.Named); etype != nil {
+ verifyInterfaceMethodRecvs(t, etype, level+1)
+ }
+ }
+}
+
+func TestIssue5815(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ pkg := importPkg(t, "strings", ".")
+
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ if obj.Pkg() == nil {
+ t.Errorf("no pkg for %s", obj)
+ }
+ if tname, _ := obj.(*types.TypeName); tname != nil {
+ named := tname.Type().(*types.Named)
+ for i := 0; i < named.NumMethods(); i++ {
+ m := named.Method(i)
+ if m.Pkg() == nil {
+ t.Errorf("no pkg for %s", m)
+ }
+ }
+ }
+ }
+}
+
+// Smoke test to ensure that imported methods get the correct package.
+func TestCorrectMethodPackage(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ imports := make(map[string]*types.Package)
+ _, err := Import(imports, "net/http", ".", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mutex := imports["sync"].Scope().Lookup("Mutex").(*types.TypeName).Type()
+ mset := types.NewMethodSet(types.NewPointer(mutex)) // methods of *sync.Mutex
+ sel := mset.Lookup(nil, "Lock")
+ lock := sel.Obj().(*types.Func)
+ if got, want := lock.Pkg().Path(), "sync"; got != want {
+ t.Errorf("got package path %q; want %q", got, want)
+ }
+}
+
+func TestIssue13566(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ testoutdir := filepath.Join(tmpdir, "testdata")
+
+ // b.go needs to be compiled from the output directory so that the compiler can
+ // find the compiled package a. We pass the full path to compile() so that we
+ // don't have to copy the file to that directory.
+ bpath, err := filepath.Abs(filepath.Join("testdata", "b.go"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ jsonExport, _ := FindPkg("encoding/json", "testdata")
+ if jsonExport == "" {
+ t.Fatalf("no export data found for encoding/json")
+ }
+
+ compilePkg(t, "testdata", "a.go", testoutdir, map[string]string{"encoding/json": jsonExport}, apkg(testoutdir))
+ compile(t, testoutdir, bpath, testoutdir, map[string]string{apkg(testoutdir): filepath.Join(testoutdir, "a.o")})
+
+ // import must succeed (test for issue at hand)
+ pkg := importPkg(t, "./testdata/b", tmpdir)
+
+ // make sure all indirectly imported packages have names
+ for _, imp := range pkg.Imports() {
+ if imp.Name() == "" {
+ t.Errorf("no name for %s package", imp.Path())
+ }
+ }
+}
+
+func TestIssue13898(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ // import go/internal/gcimporter which imports go/types partially
+ imports := make(map[string]*types.Package)
+ _, err := Import(imports, "go/internal/gcimporter", ".", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // look for go/types package
+ var goTypesPkg *types.Package
+ for path, pkg := range imports {
+ if path == "go/types" {
+ goTypesPkg = pkg
+ break
+ }
+ }
+ if goTypesPkg == nil {
+ t.Fatal("go/types not found")
+ }
+
+ // look for go/types.Object type
+ obj := lookupObj(t, goTypesPkg.Scope(), "Object")
+ typ, ok := obj.Type().(*types.Named)
+ if !ok {
+ t.Fatalf("go/types.Object type is %v; wanted named type", typ)
+ }
+
+ // lookup go/types.Object.Pkg method
+ m, index, indirect := types.LookupFieldOrMethod(typ, false, nil, "Pkg")
+ if m == nil {
+ t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
+ }
+
+ // the method must belong to go/types
+ if m.Pkg().Path() != "go/types" {
+ t.Fatalf("found %v; want go/types", m.Pkg())
+ }
+}
+
+func TestIssue15517(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+
+ compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata"), nil)
+
+ // Multiple imports of p must succeed without redeclaration errors.
+ // We use an import path that's not cleaned up so that the eventual
+ // file path for the package is different from the package path; this
+ // will expose the error if it is present.
+ //
+ // (Issue: Both the textual and the binary importer used the file path
+ // of the package to be imported as key into the shared packages map.
+ // However, the binary importer then used the package path to identify
+ // the imported package to mark it as complete; effectively marking the
+ // wrong package as complete. By using an "unclean" package path, the
+ // file and package path are different, exposing the problem if present.
+ // The same issue occurs with vendoring.)
+ imports := make(map[string]*types.Package)
+ for i := 0; i < 3; i++ {
+ if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestIssue15920(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ compileAndImportPkg(t, "issue15920")
+}
+
+func TestIssue20046(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ // "./issue20046".V.M must exist
+ pkg := compileAndImportPkg(t, "issue20046")
+ obj := lookupObj(t, pkg.Scope(), "V")
+ if m, index, indirect := types.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil {
+ t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect)
+ }
+}
+
+func TestIssue25301(t *testing.T) {
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ compileAndImportPkg(t, "issue25301")
+}
+
+func TestIssue51836(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // requires generics
+
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ testoutdir := filepath.Join(tmpdir, "testdata")
+
+ dir := filepath.Join("testdata", "issue51836")
+ // Following the pattern of TestIssue13898, aa.go needs to be compiled from
+ // the output directory. We pass the full path to compile() so that we don't
+ // have to copy the file to that directory.
+ bpath, err := filepath.Abs(filepath.Join(dir, "aa.go"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ compilePkg(t, dir, "a.go", testoutdir, nil, apkg(testoutdir))
+ compile(t, testoutdir, bpath, testoutdir, map[string]string{apkg(testoutdir): filepath.Join(testoutdir, "a.o")})
+
+ // import must succeed (test for issue at hand)
+ _ = importPkg(t, "./testdata/aa", tmpdir)
+}
+
+func TestIssue57015(t *testing.T) {
+ testenv.NeedsGo1Point(t, 18) // requires generics
+
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ compileAndImportPkg(t, "issue57015")
+}
+
+// This is a regression test for a failure to export a package
+// containing a specific type error.
+//
+// Though the issue and test are specific, they may be representatives
+// of class of exporter bugs on ill-typed code that we have yet to
+// flush out.
+//
+// TODO(adonovan): systematize our search for similar problems using
+// fuzz testing, and drive this test from a table of test cases
+// discovered by fuzzing.
+func TestIssue57729(t *testing.T) {
+ // The lack of a receiver causes Recv.Type=Invalid.
+ // (The type checker then treats Foo as a package-level
+ // function, inserting it into the package scope.)
+ // The exporter needs to apply the same treatment.
+ const src = `package p; func () Foo() {}`
+
+ // Parse the ill-typed input.
+ fset := token.NewFileSet()
+ f, err := goparser.ParseFile(fset, "p.go", src, 0)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+
+ // Type check it, expecting errors.
+ config := &types.Config{
+ Error: func(err error) { t.Log(err) }, // don't abort at first error
+ }
+ pkg1, _ := config.Check("p", fset, []*ast.File{f}, nil)
+
+ // Export it.
+ // (Shallowness isn't important here.)
+ data, err := IExportShallow(fset, pkg1)
+ if err != nil {
+ t.Fatalf("export: %v", err) // any failure to export is a bug
+ }
+
+ // Re-import it.
+ imports := make(map[string]*types.Package)
+ insert := func(pkg1 *types.Package, name string) { panic("unexpected insert") }
+ pkg2, err := IImportShallow(fset, imports, data, "p", insert)
+ if err != nil {
+ t.Fatalf("import: %v", err) // any failure of IExport+IImport is a bug.
+ }
+
+ // Check that Lookup("Foo") still returns something.
+ // We can't assert the type hasn't change: it has,
+ // from a method of Invalid to a standalone function.
+ hasObj1 := pkg1.Scope().Lookup("Foo") != nil
+ hasObj2 := pkg2.Scope().Lookup("Foo") != nil
+ if hasObj1 != hasObj2 {
+ t.Errorf("export+import changed Lookup('Foo')!=nil: was %t, became %t", hasObj1, hasObj2)
+ }
+}
+
+func TestIssue58296(t *testing.T) {
+ // Compiles packages c, b, and a where c imports b and b imports a,
+ // then imports c with stub *types.Packages for b and a, and checks that
+ // both a and b are in the Imports() of c.
+ //
+ // This is how go/packages can read the exportdata when NeedDeps is off.
+
+ // This package only handles gc export data.
+ needsCompiler(t, "gc")
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ testoutdir := filepath.Join(tmpdir, "testdata")
+
+ apkg := filepath.Join(testoutdir, "a")
+ bpkg := filepath.Join(testoutdir, "b")
+ cpkg := filepath.Join(testoutdir, "c")
+
+ srcdir := filepath.Join("testdata", "issue58296")
+ compilePkg(t, filepath.Join(srcdir, "a"), "a.go", testoutdir, nil, apkg)
+ compilePkg(t, filepath.Join(srcdir, "b"), "b.go", testoutdir, map[string]string{apkg: filepath.Join(testoutdir, "a.o")}, bpkg)
+ compilePkg(t, filepath.Join(srcdir, "c"), "c.go", testoutdir, map[string]string{bpkg: filepath.Join(testoutdir, "b.o")}, cpkg)
+
+ // The export data reader for c cannot rely on Package.Imports
+ // being populated for a or b. (For the imports {a,b} it is unset.)
+ imports := map[string]*types.Package{
+ apkg: types.NewPackage(apkg, "a"),
+ bpkg: types.NewPackage(bpkg, "b"),
+ }
+
+ // make sure a and b are both imported by c.
+ pkg, err := Import(imports, "./c", testoutdir, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var names []string
+ for _, imp := range pkg.Imports() {
+ names = append(names, imp.Name())
+ }
+ sort.Strings(names)
+
+ if got, want := strings.Join(names, ","), "a,b"; got != want {
+ t.Errorf("got imports %v for package c. wanted %v", names, want)
+ }
+}
+
+// apkg returns the package "a" prefixed by (as a package) testoutdir
+func apkg(testoutdir string) string {
+ apkg := testoutdir + "/a"
+ if os.PathSeparator != '/' {
+ apkg = strings.ReplaceAll(apkg, string(os.PathSeparator), "/")
+ }
+ return apkg
+}
+
+func importPkg(t *testing.T, path, srcDir string) *types.Package {
+ pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return pkg
+}
+
+func compileAndImportPkg(t *testing.T, name string) *types.Package {
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata"), nil)
+ return importPkg(t, "./testdata/"+name, tmpdir)
+}
+
+func lookupObj(t *testing.T, scope *types.Scope, name string) types.Object {
+ if obj := scope.Lookup(name); obj != nil {
+ return obj
+ }
+ t.Fatalf("%s not found", name)
+ return nil
+}
diff --git a/internal/gcimporter/iexport.go b/internal/gcimporter/iexport.go
new file mode 100644
index 000000000..ba53cdcdd
--- /dev/null
+++ b/internal/gcimporter/iexport.go
@@ -0,0 +1,1180 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
+// see that file for specification of the format.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/internal/tokeninternal"
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// IExportShallow encodes "shallow" export data for the specified package.
+//
+// No promises are made about the encoding other than that it can be
+// decoded by the same version of IIExportShallow. If you plan to save
+// export data in the file system, be sure to include a cryptographic
+// digest of the executable in the key to avoid version skew.
+func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) {
+ // In principle this operation can only fail if out.Write fails,
+ // but that's impossible for bytes.Buffer---and as a matter of
+ // fact iexportCommon doesn't even check for I/O errors.
+ // TODO(adonovan): handle I/O errors properly.
+ // TODO(adonovan): use byte slices throughout, avoiding copying.
+ const bundle, shallow = false, true
+ var out bytes.Buffer
+ err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
+ return out.Bytes(), err
+}
+
+// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow
+// in the same executable. This function cannot import data from
+// cmd/compile or gcexportdata.Write.
+func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) {
+ const bundle = false
+ pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert)
+ if err != nil {
+ return nil, err
+ }
+ return pkgs[0], nil
+}
+
+// InsertType is the type of a function that creates a types.TypeName
+// object for a named type and inserts it into the scope of the
+// specified Package.
+type InsertType = func(pkg *types.Package, name string)
+
+// Current bundled export format version. Increase with each format change.
+// 0: initial implementation
+const bundleVersion = 0
+
+// IExportData writes indexed export data for pkg to out.
+//
+// If no file set is provided, position info will be missing.
+// The package path of the top-level package will not be recorded,
+// so that calls to IImportData can override with a provided package path.
+func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ const bundle, shallow = false, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
+}
+
+// IExportBundle writes an indexed export bundle for pkgs to out.
+func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
+ const bundle, shallow = true, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs)
+}
+
+func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) {
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+ }
+
+ p := iexporter{
+ fset: fset,
+ version: version,
+ shallow: shallow,
+ allPkgs: map[*types.Package]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[types.Object]uint64{},
+ tparamNames: map[types.Object]string{},
+ typIndex: map[types.Type]uint64{},
+ }
+ if !bundle {
+ p.localpkg = pkgs[0]
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
+ }
+
+ // Initialize work queue with exported declarations.
+ for _, pkg := range pkgs {
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if token.IsExported(name) {
+ p.pushDecl(scope.Lookup(name))
+ }
+ }
+
+ if bundle {
+ // Ensure pkg and its imports are included in the index.
+ p.allPkgs[pkg] = true
+ for _, imp := range pkg.Imports() {
+ p.allPkgs[imp] = true
+ }
+ }
+ }
+
+ // Loop until no more work.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popHead())
+ }
+
+ // Produce index of offset of each file record in files.
+ var files intWriter
+ var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
+ if p.shallow {
+ fileOffset = make([]uint64, len(p.fileInfos))
+ for i, info := range p.fileInfos {
+ fileOffset[i] = uint64(files.Len())
+ p.encodeFile(&files, info.file, info.needed)
+ }
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex)
+
+ if bundle {
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.pkg(pkg)
+ imps := pkg.Imports()
+ w.uint64(uint64(len(imps)))
+ for _, imp := range imps {
+ w.pkg(imp)
+ }
+ }
+ }
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ if bundle {
+ hdr.uint64(bundleVersion)
+ }
+ hdr.uint64(uint64(p.version))
+ hdr.uint64(uint64(p.strings.Len()))
+ if p.shallow {
+ hdr.uint64(uint64(files.Len()))
+ hdr.uint64(uint64(len(fileOffset)))
+ for _, offset := range fileOffset {
+ hdr.uint64(offset)
+ }
+ }
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ io.Copy(out, &hdr)
+ io.Copy(out, &p.strings)
+ if p.shallow {
+ io.Copy(out, &files)
+ }
+ io.Copy(out, &p.data0)
+
+ return nil
+}
+
+// encodeFile writes to w a representation of the file sufficient to
+// faithfully restore position information about all needed offsets.
+// Mutates the needed array.
+func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
+ _ = needed[0] // precondition: needed is non-empty
+
+ w.uint64(p.stringOff(file.Name()))
+
+ size := uint64(file.Size())
+ w.uint64(size)
+
+ // Sort the set of needed offsets. Duplicates are harmless.
+ sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
+
+ lines := tokeninternal.GetLines(file) // byte offset of each line start
+ w.uint64(uint64(len(lines)))
+
+ // Rather than record the entire array of line start offsets,
+ // we save only a sparse list of (index, offset) pairs for
+ // the start of each line that contains a needed position.
+ var sparse [][2]int // (index, offset) pairs
+outer:
+ for i, lineStart := range lines {
+ lineEnd := size
+ if i < len(lines)-1 {
+ lineEnd = uint64(lines[i+1])
+ }
+ // Does this line contains a needed offset?
+ if needed[0] < lineEnd {
+ sparse = append(sparse, [2]int{i, lineStart})
+ for needed[0] < lineEnd {
+ needed = needed[1:]
+ if len(needed) == 0 {
+ break outer
+ }
+ }
+ }
+ }
+
+ // Delta-encode the columns.
+ w.uint64(uint64(len(sparse)))
+ var prev [2]int
+ for _, pair := range sparse {
+ w.uint64(uint64(pair[0] - prev[0]))
+ w.uint64(uint64(pair[1] - prev[1]))
+ prev = pair
+ }
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
+ type pkgObj struct {
+ obj types.Object
+ name string // qualified name; differs from obj.Name for type params
+ }
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Package][]pkgObj{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ if w.p.localpkg != nil {
+ pkgObjs[w.p.localpkg] = nil
+ }
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+
+ for obj := range index {
+ name := w.p.exportName(obj)
+ pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})
+ }
+
+ var pkgs []*types.Package
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].name < objs[j].name
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(w.exportPath(pkg))
+ w.string(pkg.Name())
+ w.uint64(uint64(0)) // package height is not needed for go/types
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, obj := range objs {
+ w.string(obj.name)
+ w.uint64(index[obj.obj])
+ }
+ }
+}
+
+// exportName returns the 'exported' name of an object. It differs from
+// obj.Name() only for type parameters (see tparamExportName for details).
+func (p *iexporter) exportName(obj types.Object) (res string) {
+ if name := p.tparamNames[obj]; name != "" {
+ return name
+ }
+ return obj.Name()
+}
+
+type iexporter struct {
+ fset *token.FileSet
+ out *bytes.Buffer
+ version int
+
+ shallow bool // don't put types from other packages in the index
+ localpkg *types.Package // (nil in bundle mode)
+
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Package]bool
+
+ declTodo objQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ // In shallow mode, object positions are encoded as (file, offset).
+ // Each file is recorded as a line-number table.
+ // Only the lines of needed positions are saved faithfully.
+ fileInfo map[*token.File]uint64 // value is index in fileInfos
+ fileInfos []*filePositions
+
+ data0 intWriter
+ declIndex map[types.Object]uint64
+ tparamNames map[types.Object]string // typeparam->exported name
+ typIndex map[types.Type]uint64
+
+ indent int // for tracing support
+}
+
+type filePositions struct {
+ file *token.File
+ needed []uint64 // unordered list of needed file offsets
+}
+
+func (p *iexporter) trace(format string, args ...interface{}) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
+func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
+ index, ok := p.fileInfo[file]
+ if !ok {
+ index = uint64(len(p.fileInfo))
+ p.fileInfos = append(p.fileInfos, &filePositions{file: file})
+ if p.fileInfo == nil {
+ p.fileInfo = make(map[*token.File]uint64)
+ }
+ p.fileInfo[file] = index
+ }
+ // Record each needed offset.
+ info := p.fileInfos[index]
+ offset := uint64(file.Offset(pos))
+ info.needed = append(info.needed, offset)
+
+ return index, offset
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(obj types.Object) {
+ // Package unsafe is known to the compiler and predeclared.
+ // Caller should not ask us to do export it.
+ if obj.Pkg() == types.Unsafe {
+ panic("cannot export package unsafe")
+ }
+
+ // Shallow export data: don't index decls from other packages.
+ if p.shallow && obj.Pkg() != p.localpkg {
+ return
+ }
+
+ if _, ok := p.declIndex[obj]; ok {
+ return
+ }
+
+ p.declIndex[obj] = ^uint64(0) // mark obj present in work queue
+ p.declTodo.pushTail(obj)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (w *exportWriter) exportPath(pkg *types.Package) string {
+ if pkg == w.p.localpkg {
+ return ""
+ }
+ return pkg.Path()
+}
+
+func (p *iexporter) doDecl(obj types.Object) {
+ if trace {
+ p.trace("exporting decl %v (%T)", obj, obj)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", obj)
+ }()
+ }
+ w := p.newWriter()
+ w.setPkg(obj.Pkg(), false)
+
+ switch obj := obj.(type) {
+ case *types.Var:
+ w.tag('V')
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+
+ case *types.Func:
+ sig, _ := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ // We shouldn't see methods in the package scope,
+ // but the type checker may repair "func () F() {}"
+ // to "func (Invalid) F()" and then treat it like "func F()",
+ // so allow that. See golang/go#57729.
+ if sig.Recv().Type() != types.Typ[types.Invalid] {
+ panic(internalErrorf("unexpected method: %v", sig))
+ }
+ }
+
+ // Function.
+ if typeparams.ForSignature(sig).Len() == 0 {
+ w.tag('F')
+ } else {
+ w.tag('G')
+ }
+ w.pos(obj.Pos())
+ // The tparam list of the function type is the declaration of the type
+ // params. So, write out the type params right now. Then those type params
+ // will be referenced via their type offset (via typOff) in all other
+ // places in the signature and function where they are used.
+ //
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ w.signature(sig)
+
+ case *types.Const:
+ w.tag('C')
+ w.pos(obj.Pos())
+ w.value(obj.Type(), obj.Val())
+
+ case *types.TypeName:
+ t := obj.Type()
+
+ if tparam, ok := t.(*typeparams.TypeParam); ok {
+ w.tag('P')
+ w.pos(obj.Pos())
+ constraint := tparam.Constraint()
+ if p.version >= iexportVersionGo1_18 {
+ implicit := false
+ if iface, _ := constraint.(*types.Interface); iface != nil {
+ implicit = typeparams.IsImplicit(iface)
+ }
+ w.bool(implicit)
+ }
+ w.typ(constraint, obj.Pkg())
+ break
+ }
+
+ if obj.IsAlias() {
+ w.tag('A')
+ w.pos(obj.Pos())
+ w.typ(t, obj.Pkg())
+ break
+ }
+
+ // Defined type.
+ named, ok := t.(*types.Named)
+ if !ok {
+ panic(internalErrorf("%s is not a defined type", t))
+ }
+
+ if typeparams.ForNamed(named).Len() == 0 {
+ w.tag('T')
+ } else {
+ w.tag('U')
+ }
+ w.pos(obj.Pos())
+
+ if typeparams.ForNamed(named).Len() > 0 {
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg())
+ }
+
+ underlying := obj.Type().Underlying()
+ w.typ(underlying, obj.Pkg())
+
+ if types.IsInterface(t) {
+ break
+ }
+
+ n := named.NumMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := named.Method(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+
+ // Receiver type parameters are type arguments of the receiver type, so
+ // their name must be qualified before exporting recv.
+ if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 {
+ prefix := obj.Name() + "." + m.Name()
+ for i := 0; i < rparams.Len(); i++ {
+ rparam := rparams.At(i)
+ name := tparamExportName(prefix, rparam)
+ w.p.tparamNames[rparam.Obj()] = name
+ }
+ }
+ w.param(sig.Recv())
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected object: %v", obj))
+ }
+
+ p.declIndex[obj] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) pos(pos token.Pos) {
+ if w.p.shallow {
+ w.posV2(pos)
+ } else if w.p.version >= iexportVersionPosCol {
+ w.posV1(pos)
+ } else {
+ w.posV0(pos)
+ }
+}
+
+// posV2 encoding (used only in shallow mode) records positions as
+// (file, offset), where file is the index in the token.File table
+// (which records the file name and newline offsets) and offset is a
+// byte offset. It effectively ignores //line directives.
+func (w *exportWriter) posV2(pos token.Pos) {
+ if pos == token.NoPos {
+ w.uint64(0)
+ return
+ }
+ file := w.p.fset.File(pos) // fset must be non-nil
+ index, offset := w.p.fileIndexAndOffset(file, pos)
+ w.uint64(1 + index)
+ w.uint64(offset)
+}
+
+func (w *exportWriter) posV1(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+ column := int64(p.Column)
+
+ deltaColumn := (column - w.prevColumn) << 1
+ deltaLine := (line - w.prevLine) << 1
+
+ if file != w.prevFile {
+ deltaLine |= 1
+ }
+ if deltaLine != 0 {
+ deltaColumn |= 1
+ }
+
+ w.int64(deltaColumn)
+ if deltaColumn&1 != 0 {
+ w.int64(deltaLine)
+ if deltaLine&1 != 0 {
+ w.string(file)
+ }
+ }
+
+ w.prevFile = file
+ w.prevLine = line
+ w.prevColumn = column
+}
+
+func (w *exportWriter) posV0(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+
+ // When file is the same as the last position (common case),
+ // we can save a few bytes by delta encoding just the line
+ // number.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile and
+ // prevLine as fields of exportWriter.
+
+ if file == w.prevFile {
+ delta := line - w.prevLine
+ w.int64(delta)
+ if delta == deltaNewFile {
+ w.int64(-1)
+ }
+ } else {
+ w.int64(deltaNewFile)
+ w.int64(line) // line >= 0
+ w.string(file)
+ w.prevFile = file
+ }
+ w.prevLine = line
+}
+
+func (w *exportWriter) pkg(pkg *types.Package) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(w.exportPath(pkg))
+}
+
+func (w *exportWriter) qualifiedType(obj *types.TypeName) {
+ name := w.p.exportName(obj)
+
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(obj)
+ w.string(name)
+ w.pkg(obj.Pkg())
+}
+
+func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
+ w.data.uint64(w.p.typOff(t, pkg))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t, pkg)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+ if trace {
+ w.p.trace("exporting type %s (%T)", t, t)
+ w.p.indent++
+ defer func() {
+ w.p.indent--
+ w.p.trace("=> %s", t)
+ }()
+ }
+ switch t := t.(type) {
+ case *types.Named:
+ if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 {
+ w.startType(instanceType)
+ // TODO(rfindley): investigate if this position is correct, and if it
+ // matters.
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(typeparams.NamedTypeOrigin(t), pkg)
+ return
+ }
+ w.startType(definedType)
+ w.qualifiedType(t.Obj())
+
+ case *typeparams.TypeParam:
+ w.startType(typeParamType)
+ w.qualifiedType(t.Obj())
+
+ case *types.Pointer:
+ w.startType(pointerType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Slice:
+ w.startType(sliceType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Array:
+ w.startType(arrayType)
+ w.uint64(uint64(t.Len()))
+ w.typ(t.Elem(), pkg)
+
+ case *types.Chan:
+ w.startType(chanType)
+ // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+ var dir uint64
+ switch t.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ w.uint64(dir)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Map:
+ w.startType(mapType)
+ w.typ(t.Key(), pkg)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Signature:
+ w.startType(signatureType)
+ w.setPkg(pkg, true)
+ w.signature(t)
+
+ case *types.Struct:
+ w.startType(structType)
+ n := t.NumFields()
+ if n > 0 {
+ w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects
+ } else {
+ w.setPkg(pkg, true)
+ }
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ w.pos(f.Pos())
+ w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg
+ w.typ(f.Type(), pkg)
+ w.bool(f.Anonymous())
+ w.string(t.Tag(i)) // note (or tag)
+ }
+
+ case *types.Interface:
+ w.startType(interfaceType)
+ w.setPkg(pkg, true)
+
+ n := t.NumEmbeddeds()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ ft := t.EmbeddedType(i)
+ tPkg := pkg
+ if named, _ := ft.(*types.Named); named != nil {
+ w.pos(named.Obj().Pos())
+ } else {
+ w.pos(token.NoPos)
+ }
+ w.typ(ft, tPkg)
+ }
+
+ n = t.NumExplicitMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.signature(sig)
+ }
+
+ case *typeparams.Union:
+ w.startType(unionType)
+ nt := t.Len()
+ w.uint64(uint64(nt))
+ for i := 0; i < nt; i++ {
+ term := t.Term(i)
+ w.bool(term.Tilde())
+ w.typ(term.Type(), pkg)
+ }
+
+ default:
+ panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
+ }
+}
+
+func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
+ if write {
+ w.pkg(pkg)
+ }
+
+ w.currPkg = pkg
+}
+
+func (w *exportWriter) signature(sig *types.Signature) {
+ w.paramList(sig.Params())
+ w.paramList(sig.Results())
+ if sig.Params().Len() > 0 {
+ w.bool(sig.Variadic())
+ }
+}
+
+func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) {
+ w.uint64(uint64(ts.Len()))
+ for i := 0; i < ts.Len(); i++ {
+ w.typ(ts.At(i), pkg)
+ }
+}
+
+func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) {
+ ll := uint64(list.Len())
+ w.uint64(ll)
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ // Set the type parameter exportName before exporting its type.
+ exportName := tparamExportName(prefix, tparam)
+ w.p.tparamNames[tparam.Obj()] = exportName
+ w.typ(list.At(i), pkg)
+ }
+}
+
+const blankMarker = "$"
+
+// tparamExportName returns the 'exported' name of a type parameter, which
+// differs from its actual object name: it is prefixed with a qualifier, and
+// blank type parameter names are disambiguated by their index in the type
+// parameter list.
+func tparamExportName(prefix string, tparam *typeparams.TypeParam) string {
+ assert(prefix != "")
+ name := tparam.Obj().Name()
+ if name == "_" {
+ name = blankMarker + strconv.Itoa(tparam.Index())
+ }
+ return prefix + "." + name
+}
+
+// tparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See tparamExportName
+// for details.
+func tparamName(exportName string) string {
+ // Remove the "path" from the type param name that makes it unique.
+ ix := strings.LastIndex(exportName, ".")
+ if ix < 0 {
+ errorf("malformed type parameter export name %s: missing prefix", exportName)
+ }
+ name := exportName[ix+1:]
+ if strings.HasPrefix(name, blankMarker) {
+ return "_"
+ }
+ return name
+}
+
+func (w *exportWriter) paramList(tup *types.Tuple) {
+ n := tup.Len()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ w.param(tup.At(i))
+ }
+}
+
+func (w *exportWriter) param(obj types.Object) {
+ w.pos(obj.Pos())
+ w.localIdent(obj)
+ w.typ(obj.Type(), obj.Pkg())
+}
+
+func (w *exportWriter) value(typ types.Type, v constant.Value) {
+ w.typ(typ, nil)
+ if w.p.version >= iexportVersionGo1_18 {
+ w.int64(int64(v.Kind()))
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ w.bool(constant.BoolVal(v))
+ case types.IsInteger:
+ var i big.Int
+ if i64, exact := constant.Int64Val(v); exact {
+ i.SetInt64(i64)
+ } else if ui64, exact := constant.Uint64Val(v); exact {
+ i.SetUint64(ui64)
+ } else {
+ i.SetString(v.ExactString(), 10)
+ }
+ w.mpint(&i, typ)
+ case types.IsFloat:
+ f := constantToFloat(v)
+ w.mpfloat(f, typ)
+ case types.IsComplex:
+ w.mpfloat(constantToFloat(constant.Real(v)), typ)
+ w.mpfloat(constantToFloat(constant.Imag(v)), typ)
+ case types.IsString:
+ w.string(constant.StringVal(v))
+ default:
+ if b.Kind() == types.Invalid {
+ // package contains type errors
+ break
+ }
+ panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying()))
+ }
+}
+
+// constantToFloat converts a constant.Value with kind constant.Float to a
+// big.Float.
+func constantToFloat(x constant.Value) *big.Float {
+ x = constant.ToFloat(x)
+ // Use the same floating-point precision (512) as cmd/compile
+ // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+ const mpprec = 512
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ assert(ok)
+ }
+ return &f
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
+ basic, ok := typ.Underlying().(*types.Basic)
+ if !ok {
+ panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
+ }
+
+ signed, maxBytes := intSize(basic)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ panic(internalErrorf("leading zeros"))
+ }
+ if uint(len(b)) > maxBytes {
+ panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
+ if f.IsInf() {
+ panic("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+func (w *exportWriter) localIdent(obj types.Object) {
+ // Anonymous parameters.
+ if obj == nil {
+ w.string("")
+ return
+ }
+
+ name := obj.Name()
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("internal error: assertion failed")
+ }
+}
+
+// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
+
+// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
+// a ready-to-use empty queue.
+type objQueue struct {
+ ring []types.Object
+ head, tail int
+}
+
+// empty returns true if q contains no Nodes.
+func (q *objQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushTail appends n to the tail of the queue.
+func (q *objQueue) pushTail(obj types.Object) {
+ if len(q.ring) == 0 {
+ q.ring = make([]types.Object, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]types.Object, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = obj
+ q.tail++
+}
+
+// popHead pops a node from the head of the queue. It panics if q is empty.
+func (q *objQueue) popHead() types.Object {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ obj := q.ring[q.head%len(q.ring)]
+ q.head++
+ return obj
+}
diff --git a/go/internal/gcimporter/iexport_common_test.go b/internal/gcimporter/iexport_common_test.go
index abc6aa64b..abc6aa64b 100644
--- a/go/internal/gcimporter/iexport_common_test.go
+++ b/internal/gcimporter/iexport_common_test.go
diff --git a/internal/gcimporter/iexport_go118_test.go b/internal/gcimporter/iexport_go118_test.go
new file mode 100644
index 000000000..c60a9b5ee
--- /dev/null
+++ b/internal/gcimporter/iexport_go118_test.go
@@ -0,0 +1,257 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package gcimporter_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/internal/gcimporter"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// TODO(rfindley): migrate this to testdata, as has been done in the standard library.
+func TestGenericExport(t *testing.T) {
+ const src = `
+package generic
+
+type Any any
+
+type T[A, B any] struct { Left A; Right B }
+
+func (T[P, Q]) m() {}
+
+var X T[int, string] = T[int, string]{1, "hi"}
+
+func ToInt[P interface{ ~int }](p P) int { return int(p) }
+
+var IntID = ToInt[int]
+
+type G[C comparable] int
+
+func ImplicitFunc[T ~int]() {}
+
+type ImplicitType[T ~int] int
+
+// Exercise constant import/export
+const C1 = 42
+const C2 int = 42
+const C3 float64 = 42
+
+type Constraint[T any] interface {
+ m(T)
+}
+
+// TODO(rfindley): revert to multiple blanks once the restriction on multiple
+// blanks is removed from the type checker.
+// type Blanks[_ any, _ Constraint[int]] int
+// func (Blanks[_, _]) m() {}
+type Blanks[_ any] int
+func (Blanks[_]) m() {}
+`
+ testExportSrc(t, []byte(src))
+}
+
+func testExportSrc(t *testing.T, src []byte) {
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "g.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf := types.Config{
+ Importer: importer.Default(),
+ }
+ pkg, err := conf.Check("", fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // export
+ version := gcimporter.IExportVersion
+ data, err := iexport(fset, version, pkg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testPkgData(t, fset, version, pkg, data)
+}
+
+func TestImportTypeparamTests(t *testing.T) {
+ testenv.NeedsGoBuild(t) // to find stdlib export data in the build cache
+
+ // Check go files in test/typeparam.
+ rootDir := filepath.Join(runtime.GOROOT(), "test", "typeparam")
+ list, err := os.ReadDir(rootDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if isUnifiedBuilder() {
+ t.Skip("unified export data format is currently unsupported")
+ }
+
+ for _, entry := range list {
+ if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".go") {
+ // For now, only consider standalone go files.
+ continue
+ }
+
+ t.Run(entry.Name(), func(t *testing.T) {
+ filename := filepath.Join(rootDir, entry.Name())
+ src, err := os.ReadFile(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.HasPrefix(src, []byte("// run")) && !bytes.HasPrefix(src, []byte("// compile")) {
+ // We're bypassing the logic of run.go here, so be conservative about
+ // the files we consider in an attempt to make this test more robust to
+ // changes in test/typeparams.
+ t.Skipf("not detected as a run test")
+ }
+
+ testExportSrc(t, src)
+ })
+ }
+}
+
+func TestRecursiveExport_Issue51219(t *testing.T) {
+ const srca = `
+package a
+
+type Interaction[DataT InteractionDataConstraint] struct {
+}
+
+type InteractionDataConstraint interface {
+ []byte |
+ UserCommandInteractionData
+}
+
+type UserCommandInteractionData struct {
+ resolvedInteractionWithOptions
+}
+
+type resolvedInteractionWithOptions struct {
+ Resolved Resolved
+}
+
+type Resolved struct {
+ Users ResolvedData[User]
+}
+
+type ResolvedData[T ResolvedDataConstraint] map[uint64]T
+
+type ResolvedDataConstraint interface {
+ User | Message
+}
+
+type User struct{}
+
+type Message struct {
+ Interaction *Interaction[[]byte]
+}
+`
+
+ const srcb = `
+package b
+
+import (
+ "a"
+)
+
+// InteractionRequest is an incoming request Interaction
+type InteractionRequest[T a.InteractionDataConstraint] struct {
+ a.Interaction[T]
+}
+`
+
+ const srcp = `
+package p
+
+import (
+ "b"
+)
+
+// ResponseWriterMock mocks corde's ResponseWriter interface
+type ResponseWriterMock struct {
+ x b.InteractionRequest[[]byte]
+}
+`
+
+ importer := &testImporter{
+ src: map[string][]byte{
+ "a": []byte(srca),
+ "b": []byte(srcb),
+ "p": []byte(srcp),
+ },
+ pkgs: make(map[string]*types.Package),
+ }
+ _, err := importer.Import("p")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// testImporter is a helper to test chains of imports using export data.
+type testImporter struct {
+ src map[string][]byte // original source
+ pkgs map[string]*types.Package // memoized imported packages
+}
+
+func (t *testImporter) Import(path string) (*types.Package, error) {
+ if pkg, ok := t.pkgs[path]; ok {
+ return pkg, nil
+ }
+ src, ok := t.src[path]
+ if !ok {
+ return nil, fmt.Errorf("unknown path %v", path)
+ }
+
+ // Type-check, but don't return this package directly.
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, path+".go", src, 0)
+ if err != nil {
+ return nil, err
+ }
+ conf := types.Config{
+ Importer: t,
+ }
+ pkg, err := conf.Check(path, fset, []*ast.File{f}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Export and import to get the package imported from export data.
+ exportdata, err := iexport(fset, gcimporter.IExportVersion, pkg)
+ if err != nil {
+ return nil, err
+ }
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ return nil, err
+ }
+ t.pkgs[path] = pkg2
+ return pkg2, nil
+}
diff --git a/internal/gcimporter/iexport_test.go b/internal/gcimporter/iexport_test.go
new file mode 100644
index 000000000..93183f9dc
--- /dev/null
+++ b/internal/gcimporter/iexport_test.go
@@ -0,0 +1,454 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a copy of bexport_test.go for iexport.go.
+
+//go:build go1.11
+// +build go1.11
+
+package gcimporter_test
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/constant"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "math/big"
+ "os"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/go/buildutil"
+ "golang.org/x/tools/go/gcexportdata"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/internal/gcimporter"
+ "golang.org/x/tools/internal/typeparams/genericfeatures"
+)
+
+func readExportFile(filename string) ([]byte, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ buf := bufio.NewReader(f)
+ if _, _, err := gcimporter.FindExportData(buf); err != nil {
+ return nil, err
+ }
+
+ if ch, err := buf.ReadByte(); err != nil {
+ return nil, err
+ } else if ch != 'i' {
+ return nil, fmt.Errorf("unexpected byte: %v", ch)
+ }
+
+ return ioutil.ReadAll(buf)
+}
+
+func iexport(fset *token.FileSet, version int, pkg *types.Package) ([]byte, error) {
+ var buf bytes.Buffer
+ const bundle, shallow = false, false
+ if err := gcimporter.IExportCommon(&buf, fset, bundle, shallow, version, []*types.Package{pkg}); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// isUnifiedBuilder reports whether we are executing on a go builder that uses
+// unified export data.
+func isUnifiedBuilder() bool {
+ return os.Getenv("GO_BUILDER_NAME") == "linux-amd64-unified"
+}
+
+const minStdlibPackages = 248
+
+func TestIExportData_stdlib(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("gccgo standard library is inaccessible")
+ }
+ if runtime.GOOS == "android" {
+ t.Skipf("incomplete std lib on %s", runtime.GOOS)
+ }
+ if isRace {
+ t.Skipf("stdlib tests take too long in race mode and flake on builders")
+ }
+ if testing.Short() {
+ t.Skip("skipping RAM hungry test in -short mode")
+ }
+
+ // Load, parse and type-check the program.
+ ctxt := build.Default // copy
+ ctxt.GOPATH = "" // disable GOPATH
+ conf := loader.Config{
+ Build: &ctxt,
+ AllowErrors: true,
+ TypeChecker: types.Config{
+ Sizes: types.SizesFor(ctxt.Compiler, ctxt.GOARCH),
+ Error: func(err error) { t.Log(err) },
+ },
+ }
+ for _, path := range buildutil.AllPackages(conf.Build) {
+ conf.Import(path)
+ }
+
+ // Create a package containing type and value errors to ensure
+ // they are properly encoded/decoded.
+ f, err := conf.ParseFile("haserrors/haserrors.go", `package haserrors
+const UnknownValue = "" + 0
+type UnknownType undefined
+`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf.CreateFromFiles("haserrors", f)
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load failed: %v", err)
+ }
+
+ var sorted []*types.Package
+ isUnified := isUnifiedBuilder()
+ for pkg, info := range prog.AllPackages {
+ // Temporarily skip packages that use generics on the unified builder, to
+ // fix TryBots.
+ //
+ // TODO(#48595): fix this test with GOEXPERIMENT=unified.
+ inspect := inspector.New(info.Files)
+ features := genericfeatures.ForPackage(inspect, &info.Info)
+ if isUnified && features != 0 {
+ t.Logf("skipping package %q which uses generics", pkg.Path())
+ continue
+ }
+ if info.Files != nil { // non-empty directory
+ sorted = append(sorted, pkg)
+ }
+ }
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Path() < sorted[j].Path()
+ })
+
+ version := gcimporter.IExportVersion
+ numPkgs := len(sorted)
+ if want := minStdlibPackages; numPkgs < want {
+ t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
+ }
+
+ for _, pkg := range sorted {
+ if exportdata, err := iexport(conf.Fset, version, pkg); err != nil {
+ t.Error(err)
+ } else {
+ testPkgData(t, conf.Fset, version, pkg, exportdata)
+ }
+
+ if pkg.Name() == "main" || pkg.Name() == "haserrors" {
+ // skip; no export data
+ } else if bp, err := ctxt.Import(pkg.Path(), "", build.FindOnly); err != nil {
+ t.Log("warning:", err)
+ } else if exportdata, err := readExportFile(bp.PkgObj); err != nil {
+ t.Log("warning:", err)
+ } else {
+ testPkgData(t, conf.Fset, version, pkg, exportdata)
+ }
+ }
+
+ var bundle bytes.Buffer
+ if err := gcimporter.IExportBundle(&bundle, conf.Fset, sorted); err != nil {
+ t.Fatal(err)
+ }
+ fset2 := token.NewFileSet()
+ imports := make(map[string]*types.Package)
+ pkgs2, err := gcimporter.IImportBundle(fset2, imports, bundle.Bytes())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i, pkg := range sorted {
+ testPkg(t, conf.Fset, version, pkg, fset2, pkgs2[i])
+ }
+}
+
+func testPkgData(t *testing.T, fset *token.FileSet, version int, pkg *types.Package, exportdata []byte) {
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ t.Errorf("IImportData(%s): %v", pkg.Path(), err)
+ }
+
+ testPkg(t, fset, version, pkg, fset2, pkg2)
+}
+
+func testPkg(t *testing.T, fset *token.FileSet, version int, pkg *types.Package, fset2 *token.FileSet, pkg2 *types.Package) {
+ if _, err := iexport(fset2, version, pkg2); err != nil {
+ t.Errorf("reexport %q: %v", pkg.Path(), err)
+ }
+
+ // Compare the packages' corresponding members.
+ for _, name := range pkg.Scope().Names() {
+ if !token.IsExported(name) {
+ continue
+ }
+ obj1 := pkg.Scope().Lookup(name)
+ obj2 := pkg2.Scope().Lookup(name)
+ if obj2 == nil {
+ t.Errorf("%s.%s not found, want %s", pkg.Path(), name, obj1)
+ continue
+ }
+
+ fl1 := fileLine(fset, obj1)
+ fl2 := fileLine(fset2, obj2)
+ if fl1 != fl2 {
+ t.Errorf("%s.%s: got posn %s, want %s",
+ pkg.Path(), name, fl2, fl1)
+ }
+
+ if err := cmpObj(obj1, obj2); err != nil {
+ t.Errorf("%s.%s: %s\ngot: %s\nwant: %s",
+ pkg.Path(), name, err, obj2, obj1)
+ }
+ }
+}
+
+// TestVeryLongFile tests the position of an import object declared in
+// a very long input file. Line numbers greater than maxlines are
+// reported as line 1, not garbage or token.NoPos.
+func TestIExportData_long(t *testing.T) {
+ // parse and typecheck
+ longFile := "package foo" + strings.Repeat("\n", 123456) + "var X int"
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "foo.go", longFile, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg, err := conf.Check("foo", fset1, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // export
+ exportdata, err := iexport(fset1, gcimporter.IExportVersion, pkg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ t.Fatalf("IImportData(%s): %v", pkg.Path(), err)
+ }
+
+ // compare
+ posn1 := fset1.Position(pkg.Scope().Lookup("X").Pos())
+ posn2 := fset2.Position(pkg2.Scope().Lookup("X").Pos())
+ if want := "foo.go:1:1"; posn2.String() != want {
+ t.Errorf("X position = %s, want %s (orig was %s)",
+ posn2, want, posn1)
+ }
+}
+
+func TestIExportData_typealiases(t *testing.T) {
+ // parse and typecheck
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "p.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil)
+ if err == nil {
+ // foo in undeclared in src; we should see an error
+ t.Fatal("invalid source type-checked without error")
+ }
+ if pkg1 == nil {
+ // despite incorrect src we should see a (partially) type-checked package
+ t.Fatal("nil package returned")
+ }
+ checkPkg(t, pkg1, "export")
+
+ // export
+ // use a nil fileset here to confirm that it doesn't panic
+ exportdata, err := iexport(nil, gcimporter.IExportVersion, pkg1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg1.Path())
+ if err != nil {
+ t.Fatalf("IImportData(%s): %v", pkg1.Path(), err)
+ }
+ checkPkg(t, pkg2, "import")
+}
+
+// cmpObj reports how x and y differ. They are assumed to belong to different
+// universes so cannot be compared directly. It is an adapted version of
+// equalObj in bexport_test.go.
+func cmpObj(x, y types.Object) error {
+ if reflect.TypeOf(x) != reflect.TypeOf(y) {
+ return fmt.Errorf("%T vs %T", x, y)
+ }
+ xt := x.Type()
+ yt := y.Type()
+ switch x := x.(type) {
+ case *types.Var, *types.Func:
+ // ok
+ case *types.Const:
+ xval := x.Val()
+ yval := y.(*types.Const).Val()
+ equal := constant.Compare(xval, token.EQL, yval)
+ if !equal {
+ // try approx. comparison
+ xkind := xval.Kind()
+ ykind := yval.Kind()
+ if xkind == constant.Complex || ykind == constant.Complex {
+ equal = same(constant.Real(xval), constant.Real(yval)) &&
+ same(constant.Imag(xval), constant.Imag(yval))
+ } else if xkind == constant.Float || ykind == constant.Float {
+ equal = same(xval, yval)
+ } else if xkind == constant.Unknown && ykind == constant.Unknown {
+ equal = true
+ }
+ }
+ if !equal {
+ return fmt.Errorf("unequal constants %s vs %s", xval, yval)
+ }
+ case *types.TypeName:
+ if xalias, yalias := x.IsAlias(), y.(*types.TypeName).IsAlias(); xalias != yalias {
+ return fmt.Errorf("mismatching IsAlias(): %s vs %s", x, y)
+ }
+ // equalType does not recurse into the underlying types of named types, so
+ // we must pass the underlying type explicitly here. However, in doing this
+ // we may skip checking the features of the named types themselves, in
+ // situations where the type name is not referenced by the underlying or
+ // any other top-level declarations. Therefore, we must explicitly compare
+ // named types here, before passing their underlying types into equalType.
+ xn, _ := xt.(*types.Named)
+ yn, _ := yt.(*types.Named)
+ if (xn == nil) != (yn == nil) {
+ return fmt.Errorf("mismatching types: %T vs %T", xt, yt)
+ }
+ if xn != nil {
+ if err := cmpNamed(xn, yn); err != nil {
+ return err
+ }
+ }
+ xt = xt.Underlying()
+ yt = yt.Underlying()
+ default:
+ return fmt.Errorf("unexpected %T", x)
+ }
+ return equalType(xt, yt)
+}
+
+// Use the same floating-point precision (512) as cmd/compile
+// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+const mpprec = 512
+
+// same compares non-complex numeric values and reports if they are approximately equal.
+func same(x, y constant.Value) bool {
+ xf := constantToFloat(x)
+ yf := constantToFloat(y)
+ d := new(big.Float).Sub(xf, yf)
+ d.Abs(d)
+ eps := big.NewFloat(1.0 / (1 << (mpprec - 1))) // allow for 1 bit of error
+ return d.Cmp(eps) < 0
+}
+
+// copy of the function with the same name in iexport.go.
+func constantToFloat(x constant.Value) *big.Float {
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ if !ok {
+ panic("should not reach here")
+ }
+ }
+ return &f
+}
+
+// copy of the function with the same name in iexport.go.
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+// This is a regression test for a bug in iexport of types.Struct:
+// unexported fields were losing their implicit package qualifier.
+func TestUnexportedStructFields(t *testing.T) {
+ fset := token.NewFileSet()
+ export := make(map[string][]byte)
+
+ // process parses and type-checks a single-file
+ // package and saves its export data.
+ process := func(path, content string) {
+ syntax, err := parser.ParseFile(fset, path+"/x.go", content, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ packages := make(map[string]*types.Package) // keys are package paths
+ cfg := &types.Config{
+ Importer: importerFunc(func(path string) (*types.Package, error) {
+ data, ok := export[path]
+ if !ok {
+ return nil, fmt.Errorf("missing export data for %s", path)
+ }
+ return gcexportdata.Read(bytes.NewReader(data), fset, packages, path)
+ }),
+ }
+ pkg := types.NewPackage(path, syntax.Name.Name)
+ check := types.NewChecker(cfg, fset, pkg, nil)
+ if err := check.Files([]*ast.File{syntax}); err != nil {
+ t.Fatal(err)
+ }
+ var out bytes.Buffer
+ if err := gcexportdata.Write(&out, fset, pkg); err != nil {
+ t.Fatal(err)
+ }
+ export[path] = out.Bytes()
+ }
+
+ // Historically this led to a spurious error:
+ // "cannot convert a.M (variable of type a.MyTime) to type time.Time"
+ // because the private fields of Time and MyTime were not identical.
+ process("time", `package time; type Time struct { x, y int }`)
+ process("a", `package a; import "time"; type MyTime time.Time; var M MyTime`)
+ process("b", `package b; import ("a"; "time"); var _ = time.Time(a.M)`)
+}
+
+type importerFunc func(path string) (*types.Package, error)
+
+func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
diff --git a/internal/gcimporter/iimport.go b/internal/gcimporter/iimport.go
new file mode 100644
index 000000000..448f903e8
--- /dev/null
+++ b/internal/gcimporter/iimport.go
@@ -0,0 +1,976 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/gc/iexport.go for the export data format.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+type intReader struct {
+ *bytes.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGo1_18 = 2
+ iexportVersionGenerics = 2
+
+ iexportVersionCurrent = 2
+)
+
+type ident struct {
+ pkg *types.Package
+ name string
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+ typeParamType
+ instanceType
+ unionType
+)
+
+// IImportData imports a package from the serialized package data
+// and returns 0 and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
+ pkgs, err := iimportCommon(fset, imports, data, false, path, nil)
+ if err != nil {
+ return 0, nil, err
+ }
+ return 0, pkgs[0], nil
+}
+
+// IImportBundle imports a set of packages from the serialized package bundle.
+func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
+ return iimportCommon(fset, imports, data, true, "", nil)
+}
+
+func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
+ const currentVersion = iexportVersionCurrent
+ version := int64(-1)
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if bundle {
+ err = fmt.Errorf("%v", e)
+ } else if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
+ }
+ }()
+ }
+
+ r := &intReader{bytes.NewReader(data), path}
+
+ if bundle {
+ bundleVersion := r.uint64()
+ switch bundleVersion {
+ case bundleVersion:
+ default:
+ errorf("unknown bundle format version %d", bundleVersion)
+ }
+ }
+
+ version = int64(r.uint64())
+ switch version {
+ case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ if version > iexportVersionGo1_18 {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
+ }
+
+ sLen := int64(r.uint64())
+ var fLen int64
+ var fileOffset []uint64
+ if insert != nil {
+ // Shallow mode uses a different position encoding.
+ fLen = int64(r.uint64())
+ fileOffset = make([]uint64, r.uint64())
+ for i := range fileOffset {
+ fileOffset[i] = r.uint64()
+ }
+ }
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ fileData := data[whence+sLen : whence+sLen+fLen]
+ declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
+ r.Seek(sLen+fLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ version: int(version),
+ ipath: path,
+ insert: insert,
+
+ stringData: stringData,
+ stringCache: make(map[uint64]string),
+ fileOffset: fileOffset,
+ fileData: fileData,
+ fileCache: make([]*token.File, len(fileOffset)),
+ pkgCache: make(map[uint64]*types.Package),
+
+ declData: declData,
+ pkgIndex: make(map[*types.Package]map[string]uint64),
+ typCache: make(map[uint64]types.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name.
+ tparamIndex: make(map[ident]types.Type),
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+ }
+ defer p.fake.setLines() // set lines for files in fset
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ pkgList := make([]*types.Package, r.uint64())
+ for i := range pkgList {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = r.uint64() // package height; unused by go/types
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ pkg := imports[pkgPath]
+ if pkg == nil {
+ pkg = types.NewPackage(pkgPath, pkgName)
+ imports[pkgPath] = pkg
+ } else if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+ if i == 0 && !bundle {
+ p.localpkg = pkg
+ }
+
+ p.pkgCache[pkgPathOff] = pkg
+
+ // Read index for package.
+ nameIndex := make(map[string]uint64)
+ nSyms := r.uint64()
+ // In shallow mode we don't expect an index for other packages.
+ assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil)
+ for ; nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ p.pkgIndex[pkg] = nameIndex
+ pkgList[i] = pkg
+ }
+
+ if bundle {
+ pkgs = make([]*types.Package, r.uint64())
+ for i := range pkgs {
+ pkg := p.pkgAt(r.uint64())
+ imps := make([]*types.Package, r.uint64())
+ for j := range imps {
+ imps[j] = p.pkgAt(r.uint64())
+ }
+ pkg.SetImports(imps)
+ pkgs[i] = pkg
+ }
+ } else {
+ if len(pkgList) == 0 {
+ errorf("no packages found for %s", path)
+ panic("unreachable")
+ }
+ pkgs = pkgList[:1]
+
+ // record all referenced packages as imports
+ list := append(([]*types.Package)(nil), pkgList[1:]...)
+ sort.Sort(byPath(list))
+ pkgs[0].SetImports(list)
+ }
+
+ for _, pkg := range pkgs {
+ if pkg.Complete() {
+ continue
+ }
+
+ names := make([]string, 0, len(p.pkgIndex[pkg]))
+ for name := range p.pkgIndex[pkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(pkg, name)
+ }
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+ }
+
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the 'P' case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ typeparams.SetTypeParamConstraint(d.t, d.constraint)
+ }
+
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ return pkgs, nil
+}
+
+type setConstraintArgs struct {
+ t *typeparams.TypeParam
+ constraint types.Type
+}
+
+type iimporter struct {
+ version int
+ ipath string
+
+ localpkg *types.Package
+ insert func(pkg *types.Package, name string) // "shallow" mode only
+
+ stringData []byte
+ stringCache map[uint64]string
+ fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
+ fileData []byte
+ fileCache []*token.File // memoized decoding of file encoded as i
+ pkgCache map[uint64]*types.Package
+
+ declData []byte
+ pkgIndex map[*types.Package]map[string]uint64
+ typCache map[uint64]types.Type
+ tparamIndex map[ident]types.Type
+
+ fake fakeFileSet
+ interfaceList []*types.Interface
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
+
+ indent int // for tracing support
+}
+
+func (p *iimporter) trace(format string, args ...interface{}) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+ if debug {
+ p.trace("import decl %s", name)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", name)
+ }()
+ }
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ // In "shallow" mode, call back to the application to
+ // find the object and insert it into the package scope.
+ if p.insert != nil {
+ assert(pkg != p.localpkg)
+ p.insert(pkg, name) // "can't fail"
+ return
+ }
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ if s, ok := p.stringCache[off]; ok {
+ return s
+ }
+
+ slen, n := binary.Uvarint(p.stringData[off:])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ s := string(p.stringData[spos : spos+slen])
+ p.stringCache[off] = s
+ return s
+}
+
+func (p *iimporter) fileAt(index uint64) *token.File {
+ file := p.fileCache[index]
+ if file == nil {
+ off := p.fileOffset[index]
+ file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
+ p.fileCache[index] = file
+ }
+ return file
+}
+
+func (p *iimporter) decodeFile(rd intReader) *token.File {
+ filename := p.stringAt(rd.uint64())
+ size := int(rd.uint64())
+ file := p.fake.fset.AddFile(filename, -1, size)
+
+ // SetLines requires a nondecreasing sequence.
+ // Because it is common for clients to derive the interval
+ // [start, start+len(name)] from a start position, and we
+ // want to ensure that the end offset is on the same line,
+ // we fill in the gaps of the sparse encoding with values
+ // that strictly increase by the largest possible amount.
+ // This allows us to avoid having to record the actual end
+ // offset of each needed line.
+
+ lines := make([]int, int(rd.uint64()))
+ var index, offset int
+ for i, n := 0, int(rd.uint64()); i < n; i++ {
+ index += int(rd.uint64())
+ offset += int(rd.uint64())
+ lines[index] = offset
+
+ // Ensure monotonicity between points.
+ for j := index - 1; j > 0 && lines[j] == 0; j-- {
+ lines[j] = lines[j+1] - 1
+ }
+ }
+
+ // Ensure monotonicity after last point.
+ for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
+ size--
+ lines[j] = size
+ }
+
+ if !file.SetLines(lines) {
+ errorf("SetLines failed: %d", lines) // can't happen
+ }
+ return file
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+ if t, ok := p.typCache[off]; ok && canReuse(base, t) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if canReuse(base, t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types.Named, rhs types.Type) bool {
+ if def == nil {
+ return true
+ }
+ iface, _ := rhs.(*types.Interface)
+ if iface == nil {
+ return true
+ }
+ // Don't use iface.Empty() here as iface may not be complete.
+ return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
+type importReader struct {
+ p *iimporter
+ declReader bytes.Reader
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
+
+ case 'C':
+ typ, val := r.value()
+
+ r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+ case 'F', 'G':
+ var tparams []*typeparams.TypeParam
+ if tag == 'G' {
+ tparams = r.tparamList()
+ }
+ sig := r.signature(nil, nil, tparams)
+ r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+ case 'T', 'U':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types.NewTypeName(pos, r.currPkg, name, nil)
+ named := types.NewNamed(obj, nil, nil)
+ // Declare obj before calling r.tparamList, so the new type name is recognized
+ // if used in the constraint of one of its own typeparams (see #48280).
+ r.declare(obj)
+ if tag == 'U' {
+ tparams := r.tparamList()
+ typeparams.SetForNamed(named, tparams)
+ }
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ base := baseType(recv.Type())
+ assert(base != nil)
+ targs := typeparams.NamedTypeArgs(base)
+ var rparams []*typeparams.TypeParam
+ if targs.Len() > 0 {
+ rparams = make([]*typeparams.TypeParam, targs.Len())
+ for i := range rparams {
+ rparams[i] = targs.At(i).(*typeparams.TypeParam)
+ }
+ }
+ msig := r.signature(recv, rparams, nil)
+
+ named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case 'P':
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ name0 := tparamName(name)
+ tn := types.NewTypeName(pos, r.currPkg, name0, nil)
+ t := typeparams.NewTypeParam(tn, nil)
+
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg, name}
+ r.p.tparamIndex[id] = t
+ var implicit bool
+ if r.p.version >= iexportVersionGo1_18 {
+ implicit = r.bool()
+ }
+ constraint := r.typ()
+ if implicit {
+ iface, _ := constraint.(*types.Interface)
+ if iface == nil {
+ errorf("non-interface constraint marked implicit")
+ }
+ typeparams.MarkImplicit(iface)
+ }
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
+
+ case 'V':
+ typ := r.typ()
+
+ r.declare(types.NewVar(pos, r.currPkg, name, typ))
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+ typ = r.typ()
+ if r.p.version >= iexportVersionGo1_18 {
+ // TODO: add support for using the kind.
+ _ = constant.Kind(r.int64())
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types.IsString:
+ val = constant.MakeString(r.string())
+
+ case types.IsInteger:
+ var x big.Int
+ r.mpint(&x, b)
+ val = constant.Make(&x)
+
+ case types.IsFloat:
+ val = r.mpfloat(b)
+
+ case types.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ if b.Kind() == types.Invalid {
+ val = constant.MakeUnknown()
+ return
+ }
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types.Float32, types.Complex64:
+ return true, 3
+ case types.Float64, types.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types.IsUnsigned) == 0
+ switch b.Kind() {
+ case types.Int8, types.Uint8:
+ maxBytes = 1
+ case types.Int16, types.Uint16:
+ maxBytes = 2
+ case types.Int32, types.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(x *big.Int, typ *types.Basic) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ io.ReadFull(&r.declReader, b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (r *importReader) mpfloat(typ *types.Basic) constant.Value {
+ var mant big.Int
+ r.mpint(&mant, typ)
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(r.int64()))
+ }
+ return constant.Make(&f)
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+ if r.p.insert != nil { // shallow mode
+ return r.posv2()
+ }
+ if r.p.version >= iexportVersionPosCol {
+ r.posv1()
+ } else {
+ r.posv0()
+ }
+
+ if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
+ return token.NoPos
+ }
+ return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
+}
+
+func (r *importReader) posv0() {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevFile = r.string()
+ r.prevLine = l
+ }
+}
+
+func (r *importReader) posv1() {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevFile = r.string()
+ }
+ }
+}
+
+func (r *importReader) posv2() token.Pos {
+ file := r.uint64()
+ if file == 0 {
+ return token.NoPos
+ }
+ tf := r.p.fileAt(file - 1)
+ return tf.Pos(int(r.uint64()))
+}
+
+func (r *importReader) typ() types.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+ _, ok := t.(*types.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) (res types.Type) {
+ k := r.kind()
+ if debug {
+ r.p.trace("importing type %d (base: %s)", k, base)
+ r.p.indent++
+ defer func() {
+ r.p.indent--
+ r.p.trace("=> %s", res)
+ }()
+ }
+ switch k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+ case pointerType:
+ return types.NewPointer(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types.NewChan(dir, r.typ())
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil, nil, nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ tags[i] = tag
+ }
+ return types.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types.Func, r.uint64())
+ for i := range methods {
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types.Var
+ if base != nil {
+ recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+ }
+
+ msig := r.signature(recv, nil, nil)
+ methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+
+ typ := newInterface(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+
+ case typeParamType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg, name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instanceType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ // pos does not matter for instances: they are positioned on the original
+ // type.
+ _ = r.pos()
+ len := r.uint64()
+ targs := make([]types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ // TODO provide a non-nil *Environment
+ t, _ := typeparams.Instantiate(nil, baseType, targs, false)
+ return t
+
+ case unionType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ terms := make([]*typeparams.Term, r.uint64())
+ for i := range terms {
+ terms[i] = typeparams.NewTerm(r.bool(), r.typ())
+ }
+ return typeparams.NewUnion(terms)
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic)
+}
+
+func (r *importReader) tparamList() []*typeparams.TypeParam {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*typeparams.TypeParam, n)
+ for i := range xs {
+ // Note: the standard library importer is tolerant of nil types here,
+ // though would panic in SetTypeParams.
+ xs[i] = r.typ().(*typeparams.TypeParam)
+ }
+ return xs
+}
+
+func (r *importReader) paramList() *types.Tuple {
+ xs := make([]*types.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
+
+func baseType(typ types.Type) *types.Named {
+ // pointer receivers are never types.Named types
+ if p, _ := typ.(*types.Pointer); p != nil {
+ typ = p.Elem()
+ }
+ // receiver base types are always (possibly generic) types.Named types
+ n, _ := typ.(*types.Named)
+ return n
+}
diff --git a/go/internal/gcimporter/israce_test.go b/internal/gcimporter/israce_test.go
index 885ba1c01..885ba1c01 100644
--- a/go/internal/gcimporter/israce_test.go
+++ b/internal/gcimporter/israce_test.go
diff --git a/go/internal/gcimporter/newInterface10.go b/internal/gcimporter/newInterface10.go
index 8b163e3d0..8b163e3d0 100644
--- a/go/internal/gcimporter/newInterface10.go
+++ b/internal/gcimporter/newInterface10.go
diff --git a/go/internal/gcimporter/newInterface11.go b/internal/gcimporter/newInterface11.go
index 49984f40f..49984f40f 100644
--- a/go/internal/gcimporter/newInterface11.go
+++ b/internal/gcimporter/newInterface11.go
diff --git a/internal/gcimporter/shallow_test.go b/internal/gcimporter/shallow_test.go
new file mode 100644
index 000000000..429c34b3d
--- /dev/null
+++ b/internal/gcimporter/shallow_test.go
@@ -0,0 +1,226 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "os"
+ "strings"
+ "testing"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/internal/gcimporter"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// TestStd type-checks the standard library using shallow export data.
+func TestShallowStd(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode; too slow (https://golang.org/issue/14113)")
+ }
+ testenv.NeedsTool(t, "go")
+
+ // Load import graph of the standard library.
+ // (No parsing or type-checking.)
+ cfg := &packages.Config{
+ Mode: packages.NeedImports |
+ packages.NeedName |
+ packages.NeedFiles | // see https://github.com/golang/go/issues/56632
+ packages.NeedCompiledGoFiles,
+ Tests: false,
+ }
+ pkgs, err := packages.Load(cfg, "std")
+ if err != nil {
+ t.Fatalf("load: %v", err)
+ }
+ if len(pkgs) < 200 {
+ t.Fatalf("too few packages: %d", len(pkgs))
+ }
+
+ // Type check the packages in parallel postorder.
+ done := make(map[*packages.Package]chan struct{})
+ packages.Visit(pkgs, nil, func(p *packages.Package) {
+ done[p] = make(chan struct{})
+ })
+ packages.Visit(pkgs, nil,
+ func(pkg *packages.Package) {
+ go func() {
+ // Wait for all deps to be done.
+ for _, imp := range pkg.Imports {
+ <-done[imp]
+ }
+ typecheck(t, pkg)
+ close(done[pkg])
+ }()
+ })
+ for _, root := range pkgs {
+ <-done[root]
+ }
+}
+
+// typecheck reads, parses, and type-checks a package.
+// It squirrels the export data in the the ppkg.ExportFile field.
+func typecheck(t *testing.T, ppkg *packages.Package) {
+ if ppkg.PkgPath == "unsafe" {
+ return // unsafe is special
+ }
+
+ // Create a local FileSet just for this package.
+ fset := token.NewFileSet()
+
+ // Parse files in parallel.
+ syntax := make([]*ast.File, len(ppkg.CompiledGoFiles))
+ var group errgroup.Group
+ for i, filename := range ppkg.CompiledGoFiles {
+ i, filename := i, filename
+ group.Go(func() error {
+ f, err := parser.ParseFile(fset, filename, nil, parser.SkipObjectResolution)
+ if err != nil {
+ return err // e.g. missing file
+ }
+ syntax[i] = f
+ return nil
+ })
+ }
+ if err := group.Wait(); err != nil {
+ t.Fatal(err)
+ }
+ // Inv: all files were successfully parsed.
+
+ // Build map of dependencies by package path.
+ // (We don't compute this mapping for the entire
+ // packages graph because it is not globally consistent.)
+ depsByPkgPath := make(map[string]*packages.Package)
+ {
+ var visit func(*packages.Package)
+ visit = func(pkg *packages.Package) {
+ if depsByPkgPath[pkg.PkgPath] == nil {
+ depsByPkgPath[pkg.PkgPath] = pkg
+ for path := range pkg.Imports {
+ visit(pkg.Imports[path])
+ }
+ }
+ }
+ visit(ppkg)
+ }
+
+ // importer state
+ var (
+ insert func(p *types.Package, name string)
+ importMap = make(map[string]*types.Package) // keys are PackagePaths
+ )
+ loadFromExportData := func(imp *packages.Package) (*types.Package, error) {
+ data := []byte(imp.ExportFile)
+ return gcimporter.IImportShallow(fset, importMap, data, imp.PkgPath, insert)
+ }
+ insert = func(p *types.Package, name string) {
+ imp, ok := depsByPkgPath[p.Path()]
+ if !ok {
+ t.Fatalf("can't find dependency: %q", p.Path())
+ }
+ imported, err := loadFromExportData(imp)
+ if err != nil {
+ t.Fatalf("unmarshal: %v", err)
+ }
+ if imported != p {
+ t.Fatalf("internal error: inconsistent packages")
+ }
+ if obj := imported.Scope().Lookup(name); obj == nil {
+ t.Fatalf("lookup %q.%s failed", imported.Path(), name)
+ }
+ }
+
+ cfg := &types.Config{
+ Error: func(e error) {
+ t.Error(e)
+ },
+ Importer: importerFunc(func(importPath string) (*types.Package, error) {
+ if importPath == "unsafe" {
+ return types.Unsafe, nil // unsafe has no exportdata
+ }
+ imp, ok := ppkg.Imports[importPath]
+ if !ok {
+ return nil, fmt.Errorf("missing import %q", importPath)
+ }
+ return loadFromExportData(imp)
+ }),
+ }
+
+ // Type-check the syntax trees.
+ tpkg, _ := cfg.Check(ppkg.PkgPath, fset, syntax, nil)
+ postTypeCheck(t, fset, tpkg)
+
+ // Save the export data.
+ data, err := gcimporter.IExportShallow(fset, tpkg)
+ if err != nil {
+ t.Fatalf("internal error marshalling export data: %v", err)
+ }
+ ppkg.ExportFile = string(data)
+}
+
+// postTypeCheck is called after a package is type checked.
+// We use it to assert additional correctness properties,
+// for example, that the apparent location of "fmt.Println"
+// corresponds to its source location: in other words,
+// export+import preserves high-fidelity positions.
+func postTypeCheck(t *testing.T, fset *token.FileSet, pkg *types.Package) {
+ // We hard-code a few interesting test-case objects.
+ var obj types.Object
+ switch pkg.Path() {
+ case "fmt":
+ // func fmt.Println
+ obj = pkg.Scope().Lookup("Println")
+ case "net/http":
+ // method (*http.Request).ParseForm
+ req := pkg.Scope().Lookup("Request")
+ obj, _, _ = types.LookupFieldOrMethod(req.Type(), true, pkg, "ParseForm")
+ default:
+ return
+ }
+ if obj == nil {
+ t.Errorf("object not found in package %s", pkg.Path())
+ return
+ }
+
+ // Now check the source fidelity of the object's position.
+ posn := fset.Position(obj.Pos())
+ data, err := os.ReadFile(posn.Filename)
+ if err != nil {
+ t.Errorf("can't read source file declaring %v: %v", obj, err)
+ return
+ }
+
+ // Check line and column denote a source interval containing the object's identifier.
+ line := strings.Split(string(data), "\n")[posn.Line-1]
+
+ if id := line[posn.Column-1 : posn.Column-1+len(obj.Name())]; id != obj.Name() {
+ t.Errorf("%+v: expected declaration of %v at this line, column; got %q", posn, obj, line)
+ }
+
+ // Check offset.
+ if id := string(data[posn.Offset : posn.Offset+len(obj.Name())]); id != obj.Name() {
+ t.Errorf("%+v: expected declaration of %v at this offset; got %q", posn, obj, id)
+ }
+
+ // Check commutativity of Position() and start+len(name) operations:
+ // Position(startPos+len(name)) == Position(startPos) + len(name).
+ // This important property is a consequence of the way in which the
+ // decoder fills the gaps in the sparse line-start offset table.
+ endPosn := fset.Position(obj.Pos() + token.Pos(len(obj.Name())))
+ wantEndPosn := token.Position{
+ Filename: posn.Filename,
+ Offset: posn.Offset + len(obj.Name()),
+ Line: posn.Line,
+ Column: posn.Column + len(obj.Name()),
+ }
+ if endPosn != wantEndPosn {
+ t.Errorf("%+v: expected end Position of %v here; was at %+v", wantEndPosn, obj, endPosn)
+ }
+}
diff --git a/internal/gcimporter/stdlib_test.go b/internal/gcimporter/stdlib_test.go
new file mode 100644
index 000000000..33ff79581
--- /dev/null
+++ b/internal/gcimporter/stdlib_test.go
@@ -0,0 +1,94 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "go/types"
+ "runtime"
+ "testing"
+ "unsafe"
+
+ "golang.org/x/tools/go/gcexportdata"
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/internal/testenv"
+)
+
+// TestStdlib ensures that all packages in std and x/tools can be
+// type-checked using export data. Takes around 3s.
+func TestStdlib(t *testing.T) {
+ testenv.NeedsGoPackages(t)
+
+ // gcexportdata.Read rapidly consumes FileSet address space,
+ // so disable the test on 32-bit machines.
+ // (We could use a fresh FileSet per type-check, but that
+ // would require us to re-parse the source using it.)
+ if unsafe.Sizeof(token.NoPos) < 8 {
+ t.Skip("skipping test on 32-bit machine")
+ }
+
+ // Load, parse and type-check the standard library.
+ // If we have the full source code for x/tools, also load and type-check that.
+ cfg := &packages.Config{Mode: packages.LoadAllSyntax}
+ patterns := []string{"std"}
+ minPkgs := 225 // 'GOOS=plan9 go1.18 list std | wc -l' reports 228; most other platforms have more.
+ switch runtime.GOOS {
+ case "android", "ios":
+ // The go_.*_exec script for mobile builders only copies over the source tree
+ // for the package under test.
+ default:
+ patterns = append(patterns, "golang.org/x/tools/...")
+ minPkgs += 160 // At the time of writing, 'GOOS=plan9 go list ./... | wc -l' reports 188.
+ }
+ pkgs, err := packages.Load(cfg, patterns...)
+ if err != nil {
+ t.Fatalf("failed to load/parse/type-check: %v", err)
+ }
+ if packages.PrintErrors(pkgs) > 0 {
+ t.Fatal("there were errors during loading")
+ }
+ if len(pkgs) < minPkgs {
+ t.Errorf("too few packages (%d) were loaded", len(pkgs))
+ }
+
+ export := make(map[string][]byte) // keys are package IDs
+
+ // Re-type check them all in post-order, using export data.
+ packages.Visit(pkgs, nil, func(pkg *packages.Package) {
+ packages := make(map[string]*types.Package) // keys are package paths
+ cfg := &types.Config{
+ Error: func(e error) {
+ t.Errorf("type error: %v", e)
+ },
+ Importer: importerFunc(func(importPath string) (*types.Package, error) {
+ // Resolve import path to (vendored?) package path.
+ imported := pkg.Imports[importPath]
+
+ if imported.PkgPath == "unsafe" {
+ return types.Unsafe, nil // unsafe has no exportdata
+ }
+
+ data, ok := export[imported.ID]
+ if !ok {
+ return nil, fmt.Errorf("missing export data for %s", importPath)
+ }
+ return gcexportdata.Read(bytes.NewReader(data), pkg.Fset, packages, imported.PkgPath)
+ }),
+ }
+
+ // Re-typecheck the syntax and save the export data in the map.
+ newPkg := types.NewPackage(pkg.PkgPath, pkg.Name)
+ check := types.NewChecker(cfg, pkg.Fset, newPkg, nil)
+ check.Files(pkg.Syntax)
+
+ var out bytes.Buffer
+ if err := gcexportdata.Write(&out, pkg.Fset, newPkg); err != nil {
+ t.Fatalf("internal error writing export data: %v", err)
+ }
+ export[pkg.ID] = out.Bytes()
+ })
+}
diff --git a/go/internal/gcimporter/support_go117.go b/internal/gcimporter/support_go117.go
index d892273ef..d892273ef 100644
--- a/go/internal/gcimporter/support_go117.go
+++ b/internal/gcimporter/support_go117.go
diff --git a/internal/gcimporter/support_go118.go b/internal/gcimporter/support_go118.go
new file mode 100644
index 000000000..edbe6ea70
--- /dev/null
+++ b/internal/gcimporter/support_go118.go
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package gcimporter
+
+import "go/types"
+
+const iexportVersion = iexportVersionGenerics
+
+// additionalPredeclared returns additional predeclared types in go.1.18.
+func additionalPredeclared() []types.Type {
+ return []types.Type{
+ // comparable
+ types.Universe.Lookup("comparable").Type(),
+
+ // any
+ types.Universe.Lookup("any").Type(),
+ }
+}
+
+// See cmd/compile/internal/types.SplitVargenSuffix.
+func splitVargenSuffix(name string) (base, suffix string) {
+ i := len(name)
+ for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+ i--
+ }
+ const dot = "·"
+ if i >= len(dot) && name[i-len(dot):i] == dot {
+ i -= len(dot)
+ return name[:i], name[i:]
+ }
+ return name, ""
+}
diff --git a/go/internal/gcimporter/testdata/a.go b/internal/gcimporter/testdata/a.go
index 56e4292cd..56e4292cd 100644
--- a/go/internal/gcimporter/testdata/a.go
+++ b/internal/gcimporter/testdata/a.go
diff --git a/internal/gcimporter/testdata/a/a.go b/internal/gcimporter/testdata/a/a.go
new file mode 100644
index 000000000..56e4292cd
--- /dev/null
+++ b/internal/gcimporter/testdata/a/a.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Input for TestIssue13566
+
+package a
+
+import "encoding/json"
+
+type A struct {
+ a *A
+ json json.RawMessage
+}
diff --git a/go/internal/gcimporter/testdata/b.go b/internal/gcimporter/testdata/b.go
index 419667820..419667820 100644
--- a/go/internal/gcimporter/testdata/b.go
+++ b/internal/gcimporter/testdata/b.go
diff --git a/go/internal/gcimporter/testdata/exports.go b/internal/gcimporter/testdata/exports.go
index 8ee28b094..8ee28b094 100644
--- a/go/internal/gcimporter/testdata/exports.go
+++ b/internal/gcimporter/testdata/exports.go
diff --git a/go/internal/gcimporter/testdata/issue15920.go b/internal/gcimporter/testdata/issue15920.go
index c70f7d826..c70f7d826 100644
--- a/go/internal/gcimporter/testdata/issue15920.go
+++ b/internal/gcimporter/testdata/issue15920.go
diff --git a/go/internal/gcimporter/testdata/issue20046.go b/internal/gcimporter/testdata/issue20046.go
index c63ee821c..c63ee821c 100644
--- a/go/internal/gcimporter/testdata/issue20046.go
+++ b/internal/gcimporter/testdata/issue20046.go
diff --git a/go/internal/gcimporter/testdata/issue25301.go b/internal/gcimporter/testdata/issue25301.go
index e3dc98b4e..e3dc98b4e 100644
--- a/go/internal/gcimporter/testdata/issue25301.go
+++ b/internal/gcimporter/testdata/issue25301.go
diff --git a/internal/gcimporter/testdata/issue51836/a.go b/internal/gcimporter/testdata/issue51836/a.go
new file mode 100644
index 000000000..e9223c9aa
--- /dev/null
+++ b/internal/gcimporter/testdata/issue51836/a.go
@@ -0,0 +1,8 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type T[K any] struct {
+}
diff --git a/internal/gcimporter/testdata/issue51836/a/a.go b/internal/gcimporter/testdata/issue51836/a/a.go
new file mode 100644
index 000000000..e9223c9aa
--- /dev/null
+++ b/internal/gcimporter/testdata/issue51836/a/a.go
@@ -0,0 +1,8 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type T[K any] struct {
+}
diff --git a/internal/gcimporter/testdata/issue51836/aa.go b/internal/gcimporter/testdata/issue51836/aa.go
new file mode 100644
index 000000000..d774be282
--- /dev/null
+++ b/internal/gcimporter/testdata/issue51836/aa.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "./a"
+)
+
+type T[K any] struct {
+ t a.T[K]
+}
diff --git a/internal/gcimporter/testdata/issue57015.go b/internal/gcimporter/testdata/issue57015.go
new file mode 100644
index 000000000..b6be81191
--- /dev/null
+++ b/internal/gcimporter/testdata/issue57015.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue57015
+
+type E error
+
+type X[T any] struct {}
+
+func F() X[interface {
+ E
+}] {
+ panic(0)
+}
+
diff --git a/internal/gcimporter/testdata/issue58296/a/a.go b/internal/gcimporter/testdata/issue58296/a/a.go
new file mode 100644
index 000000000..236978a5c
--- /dev/null
+++ b/internal/gcimporter/testdata/issue58296/a/a.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type A int
+
+func (A) f() {}
diff --git a/internal/gcimporter/testdata/issue58296/b/b.go b/internal/gcimporter/testdata/issue58296/b/b.go
new file mode 100644
index 000000000..8886ca571
--- /dev/null
+++ b/internal/gcimporter/testdata/issue58296/b/b.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+type B struct {
+ a a.A
+}
diff --git a/internal/gcimporter/testdata/issue58296/c/c.go b/internal/gcimporter/testdata/issue58296/c/c.go
new file mode 100644
index 000000000..bad8be81d
--- /dev/null
+++ b/internal/gcimporter/testdata/issue58296/c/c.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package c
+
+import "./b"
+
+type C struct {
+ b b.B
+}
diff --git a/go/internal/gcimporter/testdata/p.go b/internal/gcimporter/testdata/p.go
index 9e2e70576..9e2e70576 100644
--- a/go/internal/gcimporter/testdata/p.go
+++ b/internal/gcimporter/testdata/p.go
diff --git a/go/internal/gcimporter/testdata/versions/test.go b/internal/gcimporter/testdata/versions/test.go
index 6362adc21..6362adc21 100644
--- a/go/internal/gcimporter/testdata/versions/test.go
+++ b/internal/gcimporter/testdata/versions/test.go
diff --git a/go/internal/gcimporter/testdata/versions/test_go1.11_0i.a b/internal/gcimporter/testdata/versions/test_go1.11_0i.a
index b00fefed0..b00fefed0 100644
--- a/go/internal/gcimporter/testdata/versions/test_go1.11_0i.a
+++ b/internal/gcimporter/testdata/versions/test_go1.11_0i.a
Binary files differ
diff --git a/go/internal/gcimporter/testdata/versions/test_go1.11_6b.a b/internal/gcimporter/testdata/versions/test_go1.11_6b.a
index c0a211e91..c0a211e91 100644
--- a/go/internal/gcimporter/testdata/versions/test_go1.11_6b.a
+++ b/internal/gcimporter/testdata/versions/test_go1.11_6b.a
Binary files differ
diff --git a/go/internal/gcimporter/testdata/versions/test_go1.11_999b.a b/internal/gcimporter/testdata/versions/test_go1.11_999b.a
index c35d22dce..c35d22dce 100644
--- a/go/internal/gcimporter/testdata/versions/test_go1.11_999b.a
+++ b/internal/gcimporter/testdata/versions/test_go1.11_999b.a
Binary files differ
diff --git a/go/internal/gcimporter/testdata/versions/test_go1.11_999i.a b/internal/gcimporter/testdata/versions/test_go1.11_999i.a
index 99401d7c3..99401d7c3 100644
--- a/go/internal/gcimporter/testdata/versions/test_go1.11_999i.a
+++ b/internal/gcimporter/testdata/versions/test_go1.11_999i.a
Binary files differ
diff --git a/go/internal/gcimporter/testdata/versions/test_go1.7_0.a b/internal/gcimporter/testdata/versions/test_go1.7_0.a
index edb6c3f25..edb6c3f25 100644
--- a/go/internal/gcimporter/testdata/versions/test_go1.7_0.a
+++ b/internal/gcimporter/testdata/versions/test_go1.7_0.a
Binary files differ
diff --git a/go/internal/gcimporter/testdata/versions/test_go1.7_1.a b/internal/gcimporter/testdata/versions/test_go1.7_1.a
index 554d04a72..554d04a72 100644
--- a/go/internal/gcimporter/testdata/versions/test_go1.7_1.a
+++ b/internal/gcimporter/testdata/versions/test_go1.7_1.a
Binary files differ
diff --git a/go/internal/gcimporter/testdata/versions/test_go1.8_4.a b/internal/gcimporter/testdata/versions/test_go1.8_4.a
index 26b853165..26b853165 100644
--- a/go/internal/gcimporter/testdata/versions/test_go1.8_4.a
+++ b/internal/gcimporter/testdata/versions/test_go1.8_4.a
Binary files differ
diff --git a/go/internal/gcimporter/testdata/versions/test_go1.8_5.a b/internal/gcimporter/testdata/versions/test_go1.8_5.a
index 60e52efea..60e52efea 100644
--- a/go/internal/gcimporter/testdata/versions/test_go1.8_5.a
+++ b/internal/gcimporter/testdata/versions/test_go1.8_5.a
Binary files differ
diff --git a/internal/gcimporter/unified_no.go b/internal/gcimporter/unified_no.go
new file mode 100644
index 000000000..286bf4454
--- /dev/null
+++ b/internal/gcimporter/unified_no.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(go1.18 && goexperiment.unified)
+// +build !go1.18 !goexperiment.unified
+
+package gcimporter
+
+const unifiedIR = false
diff --git a/internal/gcimporter/unified_yes.go b/internal/gcimporter/unified_yes.go
new file mode 100644
index 000000000..b5d69ffbe
--- /dev/null
+++ b/internal/gcimporter/unified_yes.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18 && goexperiment.unified
+// +build go1.18,goexperiment.unified
+
+package gcimporter
+
+const unifiedIR = true
diff --git a/internal/gcimporter/ureader_no.go b/internal/gcimporter/ureader_no.go
new file mode 100644
index 000000000..8eb20729c
--- /dev/null
+++ b/internal/gcimporter/ureader_no.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+)
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data")
+ return
+}
diff --git a/internal/gcimporter/ureader_yes.go b/internal/gcimporter/ureader_yes.go
new file mode 100644
index 000000000..34fc783f8
--- /dev/null
+++ b/internal/gcimporter/ureader_yes.go
@@ -0,0 +1,719 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Derived from go/internal/gcimporter/ureader.go
+
+//go:build go1.18
+// +build go1.18
+
+package gcimporter
+
+import (
+ "go/token"
+ "go/types"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/internal/pkgbits"
+)
+
+// A pkgReader holds the shared state for reading a unified IR package
+// description.
+type pkgReader struct {
+ pkgbits.PkgDecoder
+
+ fake fakeFileSet
+
+ ctxt *types.Context
+ imports map[string]*types.Package // previously imported packages, indexed by path
+
+ // lazily initialized arrays corresponding to the unified IR
+ // PosBase, Pkg, and Type sections, respectively.
+ posBases []string // position bases (i.e., file names)
+ pkgs []*types.Package
+ typs []types.Type
+
+ // laterFns holds functions that need to be invoked at the end of
+ // import reading.
+ laterFns []func()
+ // laterFors is used in case of 'type A B' to ensure that B is processed before A.
+ laterFors map[types.Type]int
+
+ // ifaces holds a list of constructed Interfaces, which need to have
+ // Complete called after importing is done.
+ ifaces []*types.Interface
+}
+
+// later adds a function to be invoked at the end of import reading.
+func (pr *pkgReader) later(fn func()) {
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// See cmd/compile/internal/noder.derivedInfo.
+type derivedInfo struct {
+ idx pkgbits.Index
+ needed bool
+}
+
+// See cmd/compile/internal/noder.typeInfo.
+type typeInfo struct {
+ idx pkgbits.Index
+ derived bool
+}
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ s := string(data)
+ s = s[:strings.LastIndex(s, "\n$$\n")]
+ input := pkgbits.NewPkgDecoder(path, s)
+ pkg = readUnifiedPackage(fset, nil, imports, input)
+ return
+}
+
+// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
+func (pr *pkgReader) laterFor(t types.Type, fn func()) {
+ if pr.laterFors == nil {
+ pr.laterFors = make(map[types.Type]int)
+ }
+ pr.laterFors[t] = len(pr.laterFns)
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// readUnifiedPackage reads a package description from the given
+// unified IR export data decoder.
+func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
+ pr := pkgReader{
+ PkgDecoder: input,
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+
+ ctxt: ctxt,
+ imports: imports,
+
+ posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)),
+ typs: make([]types.Type, input.NumElems(pkgbits.RelocType)),
+ }
+ defer pr.fake.setLines()
+
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+ pkg := r.pkg()
+ r.Bool() // has init
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ // As if r.obj(), but avoiding the Scope.Lookup call,
+ // to avoid eager loading of imports.
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ assert(r.Len() == 0)
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+
+ for _, fn := range pr.laterFns {
+ fn()
+ }
+
+ for _, iface := range pr.ifaces {
+ iface.Complete()
+ }
+
+ // Imports() of pkg are all of the transitive packages that were loaded.
+ var imps []*types.Package
+ for _, imp := range pr.pkgs {
+ if imp != nil && imp != pkg {
+ imps = append(imps, imp)
+ }
+ }
+ sort.Sort(byPath(imps))
+ pkg.SetImports(imps)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+// A reader holds the state for reading a single unified IR element
+// within a package.
+type reader struct {
+ pkgbits.Decoder
+
+ p *pkgReader
+
+ dict *readerDict
+}
+
+// A readerDict holds the state for type parameters that parameterize
+// the current unified IR element.
+type readerDict struct {
+ // bounds is a slice of typeInfos corresponding to the underlying
+ // bounds of the element's type parameters.
+ bounds []typeInfo
+
+ // tparams is a slice of the constructed TypeParams for the element.
+ tparams []*types.TypeParam
+
+ // devived is a slice of types derived from tparams, which may be
+ // instantiated while reading the current element.
+ derived []derivedInfo
+ derivedTypes []types.Type // lazily instantiated from derived
+}
+
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.NewDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.TempDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) retireReader(r *reader) {
+ pr.RetireDecoder(&r.Decoder)
+}
+
+// @@@ Positions
+
+func (r *reader) pos() token.Pos {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
+ return token.NoPos
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.Uint()
+ col := r.Uint()
+ return r.p.fake.pos(posBase, int(line), int(col))
+}
+
+func (r *reader) posBase() string {
+ return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
+}
+
+func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
+ if b := pr.posBases[idx]; b != "" {
+ return b
+ }
+
+ var filename string
+ {
+ r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
+
+ // Within types2, position bases have a lot more details (e.g.,
+ // keeping track of where //line directives appeared exactly).
+ //
+ // For go/types, we just track the file name.
+
+ filename = r.String()
+
+ if r.Bool() { // file base
+ // Was: "b = token.NewTrimmedFileBase(filename, true)"
+ } else { // line base
+ pos := r.pos()
+ line := r.Uint()
+ col := r.Uint()
+
+ // Was: "b = token.NewLineBase(pos, filename, true, line, col)"
+ _, _, _ = pos, line, col
+ }
+ pr.retireReader(r)
+ }
+ b := filename
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Package {
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types.Package {
+ path := r.String()
+ switch path {
+ case "":
+ path = r.p.PkgPath()
+ case "builtin":
+ return nil // universe
+ case "unsafe":
+ return types.Unsafe
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.String()
+
+ pkg := types.NewPackage(path, name)
+ r.p.imports[path] = pkg
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() types.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
+ }
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
+ idx := info.idx
+ var where *types.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ var typ types.Type
+ {
+ r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
+ r.dict = dict
+
+ typ = r.doTyp()
+ assert(typ != nil)
+ pr.retireReader(r)
+ }
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader) doTyp() (res types.Type) {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
+ default:
+ errorf("unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case pkgbits.TypeBasic:
+ return types.Typ[r.Len()]
+
+ case pkgbits.TypeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types.TypeName)
+ if len(targs) != 0 {
+ t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false)
+ return t
+ }
+ return name.Type()
+
+ case pkgbits.TypeTypeParam:
+ return r.dict.tparams[r.Len()]
+
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
+ return types.NewArray(r.typ(), len)
+ case pkgbits.TypeChan:
+ dir := types.ChanDir(r.Len())
+ return types.NewChan(dir, r.typ())
+ case pkgbits.TypeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case pkgbits.TypePointer:
+ return types.NewPointer(r.typ())
+ case pkgbits.TypeSignature:
+ return r.signature(nil, nil, nil)
+ case pkgbits.TypeSlice:
+ return types.NewSlice(r.typ())
+ case pkgbits.TypeStruct:
+ return r.structType()
+ case pkgbits.TypeInterface:
+ return r.interfaceType()
+ case pkgbits.TypeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader) structType() *types.Struct {
+ fields := make([]*types.Var, r.Len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.String()
+ embedded := r.Bool()
+
+ fields[i] = types.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types.NewStruct(fields, tags)
+}
+
+func (r *reader) unionType() *types.Union {
+ terms := make([]*types.Term, r.Len())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.Bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+}
+
+func (r *reader) interfaceType() *types.Interface {
+ methods := make([]*types.Func, r.Len())
+ embeddeds := make([]types.Type, r.Len())
+ implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil, nil, nil)
+ methods[i] = types.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ iface := types.NewInterfaceType(methods, embeddeds)
+ if implicit {
+ iface.MarkImplicit()
+ }
+
+ // We need to call iface.Complete(), but if there are any embedded
+ // defined types, then we may not have set their underlying
+ // interface type yet. So we need to defer calling Complete until
+ // after we've called SetUnderlying everywhere.
+ //
+ // TODO(mdempsky): After CL 424876 lands, it should be safe to call
+ // iface.Complete() immediately.
+ r.p.ifaces = append(r.p.ifaces, iface)
+
+ return iface
+}
+
+func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature {
+ r.Sync(pkgbits.SyncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.Bool()
+
+ return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
+}
+
+func (r *reader) params() *types.Tuple {
+ r.Sync(pkgbits.SyncParams)
+
+ params := make([]*types.Var, r.Len())
+ for i := range params {
+ params[i] = r.param()
+ }
+
+ return types.NewTuple(params...)
+}
+
+func (r *reader) param() *types.Var {
+ r.Sync(pkgbits.SyncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader) obj() (types.Object, []types.Type) {
+ r.Sync(pkgbits.SyncObject)
+
+ assert(!r.Bool())
+
+ pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ obj := pkgScope(pkg).Lookup(name)
+
+ targs := make([]types.Type, r.Len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
+
+ var objPkg *types.Package
+ var objName string
+ var tag pkgbits.CodeObj
+ {
+ rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+
+ objPkg, objName = rname.qualifiedIdent()
+ assert(objName != "")
+
+ tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+ pr.retireReader(rname)
+ }
+
+ if tag == pkgbits.ObjStub {
+ assert(objPkg == nil || objPkg == types.Unsafe)
+ return objPkg, objName
+ }
+
+ // Ignore local types promoted to global scope (#55110).
+ if _, suffix := splitVargenSuffix(objName); suffix != "" {
+ return objPkg, objName
+ }
+
+ if objPkg.Scope().Lookup(objName) == nil {
+ dict := pr.objDictIdx(idx)
+
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ r.dict = dict
+
+ declare := func(obj types.Object) {
+ objPkg.Scope().Insert(obj)
+ }
+
+ switch tag {
+ default:
+ panic("weird")
+
+ case pkgbits.ObjAlias:
+ pos := r.pos()
+ typ := r.typ()
+ declare(types.NewTypeName(pos, objPkg, objName, typ))
+
+ case pkgbits.ObjConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := r.Value()
+ declare(types.NewConst(pos, objPkg, objName, typ, val))
+
+ case pkgbits.ObjFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil, nil, tparams)
+ declare(types.NewFunc(pos, objPkg, objName, sig))
+
+ case pkgbits.ObjType:
+ pos := r.pos()
+
+ obj := types.NewTypeName(pos, objPkg, objName, nil)
+ named := types.NewNamed(obj, nil, nil)
+ declare(obj)
+
+ named.SetTypeParams(r.typeParamNames())
+
+ setUnderlying := func(underlying types.Type) {
+ // If the underlying type is an interface, we need to
+ // duplicate its methods so we can replace the receiver
+ // parameter's type (#49906).
+ if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+ methods := make([]*types.Func, iface.NumExplicitMethods())
+ for i := range methods {
+ fn := iface.ExplicitMethod(i)
+ sig := fn.Type().(*types.Signature)
+
+ recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
+ methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
+ }
+
+ embeds := make([]types.Type, iface.NumEmbeddeds())
+ for i := range embeds {
+ embeds[i] = iface.EmbeddedType(i)
+ }
+
+ newIface := types.NewInterfaceType(methods, embeds)
+ r.p.ifaces = append(r.p.ifaces, newIface)
+ underlying = newIface
+ }
+
+ named.SetUnderlying(underlying)
+ }
+
+ // Since go.dev/cl/455279, we can assume rhs.Underlying() will
+ // always be non-nil. However, to temporarily support users of
+ // older snapshot releases, we continue to fallback to the old
+ // behavior for now.
+ //
+ // TODO(mdempsky): Remove fallback code and simplify after
+ // allowing time for snapshot users to upgrade.
+ rhs := r.typ()
+ if underlying := rhs.Underlying(); underlying != nil {
+ setUnderlying(underlying)
+ } else {
+ pk := r.p
+ pk.laterFor(named, func() {
+ // First be sure that the rhs is initialized, if it needs to be initialized.
+ delete(pk.laterFors, named) // prevent cycles
+ if i, ok := pk.laterFors[rhs]; ok {
+ f := pk.laterFns[i]
+ pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
+ f() // initialize RHS
+ }
+ setUnderlying(rhs.Underlying())
+ })
+ }
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ named.AddMethod(r.method())
+ }
+
+ case pkgbits.ObjVar:
+ pos := r.pos()
+ typ := r.typ()
+ declare(types.NewVar(pos, objPkg, objName, typ))
+ }
+ }
+
+ return objPkg, objName
+}
+
+func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
+
+ var dict readerDict
+
+ {
+ r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
+ if implicits := r.Len(); implicits != 0 {
+ errorf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ dict.bounds = make([]typeInfo, r.Len())
+ for i := range dict.bounds {
+ dict.bounds[i] = r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.Len())
+ dict.derivedTypes = make([]types.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+ }
+
+ pr.retireReader(r)
+ }
+ // function references follow, but reader doesn't need those
+
+ return &dict
+}
+
+func (r *reader) typeParamNames() []*types.TypeParam {
+ r.Sync(pkgbits.SyncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ tname := types.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = types.NewTypeParam(tname, nil)
+ }
+
+ typs := make([]types.Type, len(r.dict.bounds))
+ for i, bound := range r.dict.bounds {
+ typs[i] = r.p.typIdx(bound, r.dict)
+ }
+
+ // TODO(mdempsky): This is subtle, elaborate further.
+ //
+ // We have to save tparams outside of the closure, because
+ // typeParamNames() can be called multiple times with the same
+ // dictionary instance.
+ //
+ // Also, this needs to happen later to make sure SetUnderlying has
+ // been called.
+ //
+ // TODO(mdempsky): Is it safe to have a single "later" slice or do
+ // we need to have multiple passes? See comments on CL 386002 and
+ // go.dev/issue/52104.
+ tparams := r.dict.tparams
+ r.p.later(func() {
+ for i, typ := range typs {
+ tparams[i].SetConstraint(typ)
+ }
+ })
+
+ return r.dict.tparams
+}
+
+func (r *reader) method() *types.Func {
+ r.Sync(pkgbits.SyncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rparams := r.typeParamNames()
+ sig := r.signature(r.param(), rparams, nil)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) }
+func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
+func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) }
+
+func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) {
+ r.Sync(marker)
+ return r.pkg(), r.String()
+}
+
+// pkgScope returns pkg.Scope().
+// If pkg is nil, it returns types.Universe instead.
+//
+// TODO(mdempsky): Remove after x/tools can depend on Go 1.19.
+func pkgScope(pkg *types.Package) *types.Scope {
+ if pkg != nil {
+ return pkg.Scope()
+ }
+ return types.Universe
+}
diff --git a/internal/gocommand/invoke.go b/internal/gocommand/invoke.go
index f75336834..d50551693 100644
--- a/internal/gocommand/invoke.go
+++ b/internal/gocommand/invoke.go
@@ -10,8 +10,10 @@ import (
"context"
"fmt"
"io"
+ "log"
"os"
"regexp"
+ "runtime"
"strconv"
"strings"
"sync"
@@ -232,6 +234,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
return runCmdContext(ctx, cmd)
}
+// DebugHangingGoCommands may be set by tests to enable additional
+// instrumentation (including panics) for debugging hanging Go commands.
+//
+// See golang/go#54461 for details.
+var DebugHangingGoCommands = false
+
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
// before os.Kill.
func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
@@ -243,11 +251,24 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
resChan <- cmd.Wait()
}()
- select {
- case err := <-resChan:
- return err
- case <-ctx.Done():
+ // If we're interested in debugging hanging Go commands, stop waiting after a
+ // minute and panic with interesting information.
+ if DebugHangingGoCommands {
+ select {
+ case err := <-resChan:
+ return err
+ case <-time.After(1 * time.Minute):
+ HandleHangingGoCommand(cmd.Process)
+ case <-ctx.Done():
+ }
+ } else {
+ select {
+ case err := <-resChan:
+ return err
+ case <-ctx.Done():
+ }
}
+
// Cancelled. Interrupt and see if it ends voluntarily.
cmd.Process.Signal(os.Interrupt)
select {
@@ -255,17 +276,71 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
return err
case <-time.After(time.Second):
}
+
// Didn't shut down in response to interrupt. Kill it hard.
- cmd.Process.Kill()
+ // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
+ // on certain platforms, such as unix.
+ if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands {
+ // Don't panic here as this reliably fails on windows with EINVAL.
+ log.Printf("error killing the Go command: %v", err)
+ }
+
+ // See above: don't wait indefinitely if we're debugging hanging Go commands.
+ if DebugHangingGoCommands {
+ select {
+ case err := <-resChan:
+ return err
+ case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill
+ HandleHangingGoCommand(cmd.Process)
+ }
+ }
return <-resChan
}
+func HandleHangingGoCommand(proc *os.Process) {
+ switch runtime.GOOS {
+ case "linux", "darwin", "freebsd", "netbsd":
+ fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
+
+The gopls test runner has detected a hanging go command. In order to debug
+this, the output of ps and lsof/fstat is printed below.
+
+See golang/go#54461 for more details.`)
+
+ fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
+ fmt.Fprintln(os.Stderr, "-------------------------")
+ psCmd := exec.Command("ps", "axo", "ppid,pid,command")
+ psCmd.Stdout = os.Stderr
+ psCmd.Stderr = os.Stderr
+ if err := psCmd.Run(); err != nil {
+ panic(fmt.Sprintf("running ps: %v", err))
+ }
+
+ listFiles := "lsof"
+ if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
+ listFiles = "fstat"
+ }
+
+ fmt.Fprintln(os.Stderr, "\n"+listFiles+":")
+ fmt.Fprintln(os.Stderr, "-----")
+ listFilesCmd := exec.Command(listFiles)
+ listFilesCmd.Stdout = os.Stderr
+ listFilesCmd.Stderr = os.Stderr
+ if err := listFilesCmd.Run(); err != nil {
+ panic(fmt.Sprintf("running %s: %v", listFiles, err))
+ }
+ }
+ panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid))
+}
+
func cmdDebugStr(cmd *exec.Cmd) string {
env := make(map[string]string)
for _, kv := range cmd.Env {
split := strings.SplitN(kv, "=", 2)
- k, v := split[0], split[1]
- env[k] = v
+ if len(split) == 2 {
+ k, v := split[0], split[1]
+ env[k] = v
+ }
}
var args []string
diff --git a/internal/gocommand/version.go b/internal/gocommand/version.go
index 713043680..307a76d47 100644
--- a/internal/gocommand/version.go
+++ b/internal/gocommand/version.go
@@ -7,11 +7,19 @@ package gocommand
import (
"context"
"fmt"
+ "regexp"
"strings"
)
-// GoVersion checks the go version by running "go list" with modules off.
-// It returns the X in Go 1.X.
+// GoVersion reports the minor version number of the highest release
+// tag built into the go command on the PATH.
+//
+// Note that this may be higher than the version of the go tool used
+// to build this application, and thus the versions of the standard
+// go/{scanner,parser,ast,types} packages that are linked into it.
+// In that case, callers should either downgrade to the version of
+// go used to build the application, or report an error that the
+// application is too old to use the go command on the PATH.
func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
inv.Verb = "list"
inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
@@ -38,7 +46,7 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
if len(stdout) < 3 {
return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
}
- // Split up "[go1.1 go1.15]"
+ // Split up "[go1.1 go1.15]" and return highest go1.X value.
tags := strings.Fields(stdout[1 : len(stdout)-2])
for i := len(tags) - 1; i >= 0; i-- {
var version int
@@ -49,3 +57,25 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
}
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
}
+
+// GoVersionOutput returns the complete output of the go version command.
+func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
+ inv.Verb = "version"
+ goVersion, err := r.Run(ctx, inv)
+ if err != nil {
+ return "", err
+ }
+ return goVersion.String(), nil
+}
+
+// ParseGoVersionOutput extracts the Go version string
+// from the output of the "go version" command.
+// Given an unrecognized form, it returns an empty string.
+func ParseGoVersionOutput(data string) string {
+ re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
+ m := re.FindStringSubmatch(data)
+ if len(m) != 2 {
+ return "" // unrecognized version
+ }
+ return m[1]
+}
diff --git a/internal/gocommand/version_test.go b/internal/gocommand/version_test.go
new file mode 100644
index 000000000..27016e4c0
--- /dev/null
+++ b/internal/gocommand/version_test.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocommand
+
+import (
+ "strconv"
+ "testing"
+)
+
+func TestParseGoVersionOutput(t *testing.T) {
+ tests := []struct {
+ args string
+ want string
+ }{
+ {"go version go1.12 linux/amd64", "go1.12"},
+ {"go version go1.18.1 darwin/amd64", "go1.18.1"},
+ {"go version go1.19.rc1 windows/arm64", "go1.19.rc1"},
+ {"go version devel d5de62df152baf4de6e9fe81933319b86fd95ae4 linux/386", "devel d5de62df152baf4de6e9fe81933319b86fd95ae4"},
+ {"go version devel go1.20-1f068f0dc7 Tue Oct 18 20:58:37 2022 +0000 darwin/amd64", "devel go1.20-1f068f0dc7"},
+ {"v1.19.1 foo/bar", ""},
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ if got := ParseGoVersionOutput(tt.args); got != tt.want {
+ t.Errorf("parseGoVersionOutput() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/internal/gopathwalk/walk.go b/internal/gopathwalk/walk.go
index 925ff5356..168405322 100644
--- a/internal/gopathwalk/walk.go
+++ b/internal/gopathwalk/walk.go
@@ -175,8 +175,8 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
// walk walks through the given path.
func (w *walker) walk(path string, typ os.FileMode) error {
- dir := filepath.Dir(path)
if typ.IsRegular() {
+ dir := filepath.Dir(path)
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
@@ -209,12 +209,7 @@ func (w *walker) walk(path string, typ os.FileMode) error {
// Emacs noise.
return nil
}
- fi, err := os.Lstat(path)
- if err != nil {
- // Just ignore it.
- return nil
- }
- if w.shouldTraverse(dir, fi) {
+ if w.shouldTraverse(path) {
return fastwalk.ErrTraverseLink
}
}
@@ -224,13 +219,8 @@ func (w *walker) walk(path string, typ os.FileMode) error {
// shouldTraverse reports whether the symlink fi, found in dir,
// should be followed. It makes sure symlinks were never visited
// before to avoid symlink loops.
-func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
- path := filepath.Join(dir, fi.Name())
- target, err := filepath.EvalSymlinks(path)
- if err != nil {
- return false
- }
- ts, err := os.Stat(target)
+func (w *walker) shouldTraverse(path string) bool {
+ ts, err := os.Stat(path)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return false
@@ -238,7 +228,7 @@ func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
if !ts.IsDir() {
return false
}
- if w.shouldSkipDir(ts, dir) {
+ if w.shouldSkipDir(ts, filepath.Dir(path)) {
return false
}
// Check for symlink loops by statting each directory component
diff --git a/internal/gopathwalk/walk_test.go b/internal/gopathwalk/walk_test.go
index 2d887a655..fa4ebdc32 100644
--- a/internal/gopathwalk/walk_test.go
+++ b/internal/gopathwalk/walk_test.go
@@ -81,7 +81,7 @@ func TestShouldTraverse(t *testing.T) {
continue
}
var w walker
- got := w.shouldTraverse(filepath.Join(dir, tt.dir), fi)
+ got := w.shouldTraverse(filepath.Join(dir, tt.dir, fi.Name()))
if got != tt.want {
t.Errorf("%d. shouldTraverse(%q, %q) = %v; want %v", i, tt.dir, tt.file, got, tt.want)
}
diff --git a/internal/goroot/importcfg.go b/internal/goroot/importcfg.go
new file mode 100644
index 000000000..f1cd28e2e
--- /dev/null
+++ b/internal/goroot/importcfg.go
@@ -0,0 +1,71 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package goroot is a copy of package internal/goroot
+// in the main GO repot. It provides a utility to produce
+// an importcfg and import path to package file map mapping
+// standard library packages to the locations of their export
+// data files.
+package goroot
+
+import (
+ "bytes"
+ "fmt"
+ "os/exec"
+ "strings"
+ "sync"
+)
+
+// Importcfg returns an importcfg file to be passed to the
+// Go compiler that contains the cached paths for the .a files for the
+// standard library.
+func Importcfg() (string, error) {
+ var icfg bytes.Buffer
+
+ m, err := PkgfileMap()
+ if err != nil {
+ return "", err
+ }
+ fmt.Fprintf(&icfg, "# import config")
+ for importPath, export := range m {
+ fmt.Fprintf(&icfg, "\npackagefile %s=%s", importPath, export)
+ }
+ s := icfg.String()
+ return s, nil
+}
+
+var (
+ stdlibPkgfileMap map[string]string
+ stdlibPkgfileErr error
+ once sync.Once
+)
+
+// PkgfileMap returns a map of package paths to the location on disk
+// of the .a file for the package.
+// The caller must not modify the map.
+func PkgfileMap() (map[string]string, error) {
+ once.Do(func() {
+ m := make(map[string]string)
+ output, err := exec.Command("go", "list", "-export", "-e", "-f", "{{.ImportPath}} {{.Export}}", "std", "cmd").Output()
+ if err != nil {
+ stdlibPkgfileErr = err
+ }
+ for _, line := range strings.Split(string(output), "\n") {
+ if line == "" {
+ continue
+ }
+ sp := strings.SplitN(line, " ", 2)
+ if len(sp) != 2 {
+ err = fmt.Errorf("determining pkgfile map: invalid line in go list output: %q", line)
+ return
+ }
+ importPath, export := sp[0], sp[1]
+ if export != "" {
+ m[importPath] = export
+ }
+ }
+ stdlibPkgfileMap = m
+ })
+ return stdlibPkgfileMap, stdlibPkgfileErr
+}
diff --git a/internal/imports/fix.go b/internal/imports/fix.go
index d859617b7..642a5ac2d 100644
--- a/internal/imports/fix.go
+++ b/internal/imports/fix.go
@@ -697,6 +697,9 @@ func candidateImportName(pkg *pkg) string {
// GetAllCandidates calls wrapped for each package whose name starts with
// searchPrefix, and can be imported from filename with the package name filePkg.
+//
+// Beware that the wrapped function may be called multiple times concurrently.
+// TODO(adonovan): encapsulate the concurrency.
func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error {
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
@@ -796,7 +799,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
}
-var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"}
+var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"}
// ProcessEnv contains environment variables and settings that affect the use of
// the go command, the go/build package, etc.
@@ -807,6 +810,11 @@ type ProcessEnv struct {
ModFlag string
ModFile string
+ // SkipPathInScan returns true if the path should be skipped from scans of
+ // the RootCurrentModule root type. The function argument is a clean,
+ // absolute path.
+ SkipPathInScan func(string) bool
+
// Env overrides the OS environment, and can be used to specify
// GOPROXY, GO111MODULE, etc. PATH cannot be set here, because
// exec.Command will not honor it.
@@ -861,7 +869,7 @@ func (e *ProcessEnv) init() error {
}
foundAllRequired := true
- for _, k := range RequiredGoEnvVars {
+ for _, k := range requiredGoEnvVars {
if _, ok := e.Env[k]; !ok {
foundAllRequired = false
break
@@ -877,7 +885,7 @@ func (e *ProcessEnv) init() error {
}
goEnv := map[string]string{}
- stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...)
+ stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, requiredGoEnvVars...)...)
if err != nil {
return err
}
@@ -906,7 +914,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) {
if err := e.init(); err != nil {
return nil, err
}
- if len(e.Env["GOMOD"]) == 0 {
+ if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
e.resolver = newGopathResolver(e)
return e.resolver, nil
}
@@ -1367,9 +1375,9 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error
return err
}
var roots []gopathwalk.Root
- roots = append(roots, gopathwalk.Root{filepath.Join(goenv["GOROOT"], "src"), gopathwalk.RootGOROOT})
+ roots = append(roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "src"), Type: gopathwalk.RootGOROOT})
for _, p := range filepath.SplitList(goenv["GOPATH"]) {
- roots = append(roots, gopathwalk.Root{filepath.Join(p, "src"), gopathwalk.RootGOPATH})
+ roots = append(roots, gopathwalk.Root{Path: filepath.Join(p, "src"), Type: gopathwalk.RootGOPATH})
}
// The callback is not necessarily safe to use in the goroutine below. Process roots eagerly.
roots = filterRoots(roots, callback.rootFound)
diff --git a/internal/imports/fix_test.go b/internal/imports/fix_test.go
index ef0f8ae61..ba81affdb 100644
--- a/internal/imports/fix_test.go
+++ b/internal/imports/fix_test.go
@@ -1719,7 +1719,7 @@ func (t *goimportTest) assertProcessEquals(module, file string, contents []byte,
t.Fatalf("Process() = %v", err)
}
if string(buf) != want {
- t.Errorf("Got:\n%s\nWant:\n%s", buf, want)
+ t.Errorf("Got:\n'%s'\nWant:\n'%s'", buf, want) // 's show empty lines
}
}
@@ -1746,8 +1746,100 @@ const Y = bar.X
}.processTest(t, "foo.com", "test/t.go", nil, nil, want)
}
+func TestPanicAstutils(t *testing.T) {
+ t.Skip("panic in ast/astutil/imports.go, should be PostionFor(,false) at lines 273, 274, at least")
+ const input = `package main
+//line mah.go:600
+
+import (
+"foo.com/a.thing"
+"foo.com/surprise"
+"foo.com/v1"
+"foo.com/other/v2"
+"foo.com/other/v3"
+)
+`
+
+ const want = `package main
+
+//line mah.go:600
+
+import (
+ "foo.com/a.thing"
+ "foo.com/go-thing"
+ gow "foo.com/go-wrong"
+ v2 "foo.com/other/v2"
+ "foo.com/other/v3"
+ bar "foo.com/surprise"
+ v1 "foo.com/v1"
+)
+
+`
+
+ testConfig{
+ module: packagestest.Module{
+ Name: "foo.com",
+ Files: fm{
+ "test/t.go": input,
+ },
+ },
+ }.processTest(t, "foo.com", "test/t.go", nil, nil, want)
+}
+
+// without PositionFor in sortImports this test panics
+func TestPanic51916(t *testing.T) {
+ const input = `package main
+//line mah.go:600
+
+import (
+"foo.com/a.thing"
+"foo.com/surprise"
+"foo.com/v1"
+"foo.com/other/v2"
+"foo.com/other/v3"
+"foo.com/go-thing"
+"foo.com/go-wrong"
+)
+
+var _ = []interface{}{bar.X, v1.Y, a.A, v2.V2, other.V3, thing.Thing, gow.Wrong}`
+
+ const want = `package main
+
+//line mah.go:600
+
+import (
+ "foo.com/a.thing"
+ "foo.com/go-thing"
+ gow "foo.com/go-wrong"
+ v2 "foo.com/other/v2"
+ "foo.com/other/v3"
+ bar "foo.com/surprise"
+ v1 "foo.com/v1"
+)
+
+var _ = []interface{}{bar.X, v1.Y, a.A, v2.V2, other.V3, thing.Thing, gow.Wrong}
+`
+
+ testConfig{
+ module: packagestest.Module{
+ Name: "foo.com",
+ Files: fm{
+ "a.thing/a.go": "package a \n const A = 1",
+ "surprise/x.go": "package bar \n const X = 1",
+ "v1/x.go": "package v1 \n const Y = 1",
+ "other/v2/y.go": "package v2 \n const V2 = 1",
+ "other/v3/z.go": "package other \n const V3 = 1",
+ "go-thing/b.go": "package thing \n const Thing = 1",
+ "go-wrong/b.go": "package gow \n const Wrong = 1",
+ "test/t.go": input,
+ },
+ },
+ }.processTest(t, "foo.com", "test/t.go", nil, nil, want)
+}
+
// Tests that an existing import with badly mismatched path/name has its name
// correctly added. See #28645 and #29041.
+// and check that //line directives are ignored (#51916)
func TestAddNameToMismatchedImport(t *testing.T) {
const input = `package main
diff --git a/internal/imports/imports.go b/internal/imports/imports.go
index 25973989e..95a88383a 100644
--- a/internal/imports/imports.go
+++ b/internal/imports/imports.go
@@ -103,12 +103,17 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
return formatFile(fileSet, file, src, nil, opt)
}
-func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
- mergeImports(fileSet, file)
- sortImports(opt.LocalPrefix, fileSet, file)
- imps := astutil.Imports(fileSet, file)
+// formatFile formats the file syntax tree.
+// It may mutate the token.FileSet.
+//
+// If an adjust function is provided, it is called after formatting
+// with the original source (formatFile's src parameter) and the
+// formatted file, and returns the postpocessed result.
+func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
+ mergeImports(file)
+ sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
var spacesBefore []string // import paths we need spaces before
- for _, impSection := range imps {
+ for _, impSection := range astutil.Imports(fset, file) {
// Within each block of contiguous imports, see if any
// import lines are in different group numbers. If so,
// we'll need to put a space between them so it's
@@ -132,7 +137,7 @@ func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
var buf bytes.Buffer
- err := printConfig.Fprint(&buf, fileSet, file)
+ err := printConfig.Fprint(&buf, fset, file)
if err != nil {
return nil, err
}
@@ -276,11 +281,11 @@ func cutSpace(b []byte) (before, middle, after []byte) {
}
// matchSpace reformats src to use the same space context as orig.
-// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
-// 2) matchSpace copies the indentation of the first non-blank line in orig
-// to every non-blank line in src.
-// 3) matchSpace copies the trailing space from orig and uses it in place
-// of src's trailing space.
+// 1. If orig begins with blank lines, matchSpace inserts them at the beginning of src.
+// 2. matchSpace copies the indentation of the first non-blank line in orig
+// to every non-blank line in src.
+// 3. matchSpace copies the trailing space from orig and uses it in place
+// of src's trailing space.
func matchSpace(orig []byte, src []byte) []byte {
before, _, after := cutSpace(orig)
i := bytes.LastIndex(before, []byte{'\n'})
diff --git a/internal/imports/mkstdlib.go b/internal/imports/mkstdlib.go
index 47714bf07..470b93f1d 100644
--- a/internal/imports/mkstdlib.go
+++ b/internal/imports/mkstdlib.go
@@ -15,6 +15,7 @@ import (
"bytes"
"fmt"
"go/format"
+ "go/token"
"io"
"io/ioutil"
"log"
@@ -23,8 +24,9 @@ import (
"regexp"
"runtime"
"sort"
+ "strings"
- exec "golang.org/x/sys/execabs"
+ "golang.org/x/tools/go/packages"
)
func mustOpen(name string) io.Reader {
@@ -41,47 +43,29 @@ func api(base string) string {
var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`)
-var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true}
-
func main() {
var buf bytes.Buffer
outf := func(format string, args ...interface{}) {
fmt.Fprintf(&buf, format, args...)
}
+ outf(`// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+`)
outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n")
outf("package imports\n")
outf("var stdlib = map[string][]string{\n")
- f := io.MultiReader(
- mustOpen(api("go1.txt")),
- mustOpen(api("go1.1.txt")),
- mustOpen(api("go1.2.txt")),
- mustOpen(api("go1.3.txt")),
- mustOpen(api("go1.4.txt")),
- mustOpen(api("go1.5.txt")),
- mustOpen(api("go1.6.txt")),
- mustOpen(api("go1.7.txt")),
- mustOpen(api("go1.8.txt")),
- mustOpen(api("go1.9.txt")),
- mustOpen(api("go1.10.txt")),
- mustOpen(api("go1.11.txt")),
- mustOpen(api("go1.12.txt")),
- mustOpen(api("go1.13.txt")),
- mustOpen(api("go1.14.txt")),
- mustOpen(api("go1.15.txt")),
- mustOpen(api("go1.16.txt")),
- mustOpen(api("go1.17.txt")),
- mustOpen(api("go1.18.txt")),
-
- // The API of the syscall/js package needs to be computed explicitly,
- // because it's not included in the GOROOT/api/go1.*.txt files at this time.
- syscallJSAPI(),
- )
+ f := readAPI()
sc := bufio.NewScanner(f)
+ // The APIs of the syscall/js and unsafe packages need to be computed explicitly,
+ // because they're not included in the GOROOT/api/go1.*.txt files at this time.
pkgs := map[string]map[string]bool{
- "unsafe": unsafeSyms,
+ "syscall/js": syms("syscall/js", "GOOS=js", "GOARCH=wasm"),
+ "unsafe": syms("unsafe"),
}
- paths := []string{"unsafe"}
+ paths := []string{"syscall/js", "unsafe"}
for sc.Scan() {
l := sc.Text()
@@ -100,7 +84,7 @@ func main() {
}
sort.Strings(paths)
for _, path := range paths {
- outf("\t%q: []string{\n", path)
+ outf("\t%q: {\n", path)
pkg := pkgs[path]
var syms []string
for sym := range pkg {
@@ -123,17 +107,39 @@ func main() {
}
}
-// syscallJSAPI returns the API of the syscall/js package.
-// It's computed from the contents of $(go env GOROOT)/src/syscall/js.
-func syscallJSAPI() io.Reader {
- var exeSuffix string
- if runtime.GOOS == "windows" {
- exeSuffix = ".exe"
+// readAPI opens an io.Reader that reads all stdlib API content.
+func readAPI() io.Reader {
+ entries, err := os.ReadDir(filepath.Join(runtime.GOROOT(), "api"))
+ if err != nil {
+ log.Fatal(err)
+ }
+ var readers []io.Reader
+ for _, entry := range entries {
+ name := entry.Name()
+ if strings.HasPrefix(name, "go") && strings.HasSuffix(name, ".txt") {
+ readers = append(readers, mustOpen(api(name)))
+ }
+ }
+ return io.MultiReader(readers...)
+}
+
+// syms computes the exported symbols in the specified package.
+func syms(pkg string, extraEnv ...string) map[string]bool {
+ var env []string
+ if len(extraEnv) != 0 {
+ env = append(os.Environ(), extraEnv...)
}
- cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js")
- out, err := cmd.Output()
+ pkgs, err := packages.Load(&packages.Config{Mode: packages.NeedTypes, Env: env}, pkg)
if err != nil {
log.Fatalln(err)
+ } else if len(pkgs) != 1 {
+ log.Fatalf("got %d packages, want one package %q", len(pkgs), pkg)
+ }
+ syms := make(map[string]bool)
+ for _, name := range pkgs[0].Types.Scope().Names() {
+ if token.IsExported(name) {
+ syms[name] = true
+ }
}
- return bytes.NewReader(out)
+ return syms
}
diff --git a/internal/imports/mod.go b/internal/imports/mod.go
index 2bcf41f5f..7d99d04ca 100644
--- a/internal/imports/mod.go
+++ b/internal/imports/mod.go
@@ -70,9 +70,17 @@ func (r *ModuleResolver) init() error {
Logf: r.env.Logf,
WorkingDir: r.env.WorkingDir,
}
- vendorEnabled, mainModVendor, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
- if err != nil {
- return err
+
+ vendorEnabled := false
+ var mainModVendor *gocommand.ModuleJSON
+
+ // Module vendor directories are ignored in workspace mode:
+ // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md
+ if len(r.env.Env["GOWORK"]) == 0 {
+ vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
+ if err != nil {
+ return err
+ }
}
if mainModVendor != nil && vendorEnabled {
@@ -121,22 +129,22 @@ func (r *ModuleResolver) init() error {
})
r.roots = []gopathwalk.Root{
- {filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT},
+ {Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT},
}
r.mainByDir = make(map[string]*gocommand.ModuleJSON)
for _, main := range r.mains {
- r.roots = append(r.roots, gopathwalk.Root{main.Dir, gopathwalk.RootCurrentModule})
+ r.roots = append(r.roots, gopathwalk.Root{Path: main.Dir, Type: gopathwalk.RootCurrentModule})
r.mainByDir[main.Dir] = main
}
if vendorEnabled {
- r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
+ r.roots = append(r.roots, gopathwalk.Root{Path: r.dummyVendorMod.Dir, Type: gopathwalk.RootOther})
} else {
addDep := func(mod *gocommand.ModuleJSON) {
if mod.Replace == nil {
// This is redundant with the cache, but we'll skip it cheaply enough.
- r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache})
+ r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache})
} else {
- r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
+ r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther})
}
}
// Walk dependent modules before scanning the full mod cache, direct deps first.
@@ -150,7 +158,7 @@ func (r *ModuleResolver) init() error {
addDep(mod)
}
}
- r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
+ r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache})
}
r.scannedRoots = map[gopathwalk.Root]bool{}
@@ -458,6 +466,16 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
// We assume cached directories are fully cached, including all their
// children, and have not changed. We can skip them.
skip := func(root gopathwalk.Root, dir string) bool {
+ if r.env.SkipPathInScan != nil && root.Type == gopathwalk.RootCurrentModule {
+ if root.Path == dir {
+ return false
+ }
+
+ if r.env.SkipPathInScan(filepath.Clean(dir)) {
+ return true
+ }
+ }
+
info, ok := r.cacheLoad(dir)
if !ok {
return false
diff --git a/internal/imports/mod_test.go b/internal/imports/mod_test.go
index 5f71805fa..cb7fd4499 100644
--- a/internal/imports/mod_test.go
+++ b/internal/imports/mod_test.go
@@ -29,7 +29,7 @@ import (
// Tests that we can find packages in the stdlib.
func TestScanStdlib(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
`, "")
@@ -42,7 +42,7 @@ module x
// where the module is in scope -- here we have to figure out the import path
// without any help from go list.
func TestScanOutOfScopeNestedModule(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
@@ -68,7 +68,7 @@ package x`, "")
// Tests that we don't find a nested module contained in a local replace target.
// The code for this case is too annoying to write, so it's just ignored.
func TestScanNestedModuleInLocalReplace(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
@@ -107,7 +107,7 @@ package z
// Tests that path encoding is handled correctly. Adapted from mod_case.txt.
func TestModCase(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
@@ -124,7 +124,7 @@ import _ "rsc.io/QUOTE/QUOTE"
// Not obviously relevant to goimports. Adapted from mod_domain_root.txt anyway.
func TestModDomainRoot(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
@@ -140,7 +140,7 @@ import _ "example.com"
// Tests that scanning the module cache > 1 time is able to find the same module.
func TestModMultipleScans(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
@@ -159,7 +159,7 @@ import _ "example.com"
// Tests that scanning the module cache > 1 time is able to find the same module
// in the module cache.
func TestModMultipleScansWithSubdirs(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
@@ -178,7 +178,7 @@ import _ "rsc.io/quote"
// Tests that scanning the module cache > 1 after changing a package in module cache to make it unimportable
// is able to find the same module.
func TestModCacheEditModFile(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
@@ -219,7 +219,7 @@ import _ "rsc.io/quote"
// Tests that -mod=vendor works. Adapted from mod_vendor_build.txt.
func TestModVendorBuild(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module m
go 1.12
@@ -250,7 +250,7 @@ import _ "rsc.io/sampler"
// Tests that -mod=vendor is auto-enabled only for go1.14 and higher.
// Vaguely inspired by mod_vendor_auto.txt.
func TestModVendorAuto(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module m
go 1.14
@@ -276,7 +276,7 @@ import _ "rsc.io/sampler"
// Tests that a module replace works. Adapted from mod_list.txt. We start with
// go.mod2; the first part of the test is irrelevant.
func TestModList(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
require rsc.io/quote v1.5.1
@@ -293,7 +293,7 @@ import _ "rsc.io/quote"
// Tests that a local replace works. Adapted from mod_local_replace.txt.
func TestModLocalReplace(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- x/y/go.mod --
module x/y
require zz v1.0.0
@@ -317,7 +317,7 @@ package z
// Tests that the package at the root of the main module can be found.
// Adapted from the first part of mod_multirepo.txt.
func TestModMultirepo1(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module rsc.io/quote
@@ -333,7 +333,7 @@ package quote
// of mod_multirepo.txt (We skip the case where it doesn't have a go.mod
// entry -- we just don't work in that case.)
func TestModMultirepo3(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module rsc.io/quote
@@ -352,7 +352,7 @@ import _ "rsc.io/quote/v2"
// Tests that a nested module is found in the module cache, even though
// it's checked out. Adapted from the fourth part of mod_multirepo.txt.
func TestModMultirepo4(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module rsc.io/quote
require rsc.io/quote/v2 v2.0.1
@@ -376,7 +376,7 @@ import _ "rsc.io/quote/v2"
// Tests a simple module dependency. Adapted from the first part of mod_replace.txt.
func TestModReplace1(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module quoter
@@ -392,7 +392,7 @@ package main
// Tests a local replace. Adapted from the second part of mod_replace.txt.
func TestModReplace2(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module quoter
@@ -418,7 +418,7 @@ import "rsc.io/sampler"
// Tests that a module can be replaced by a different module path. Adapted
// from the third part of mod_replace.txt.
func TestModReplace3(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module quoter
@@ -451,7 +451,7 @@ package quote
// mod_replace_import.txt, with example.com/v changed to /vv because Go 1.11
// thinks /v is an invalid major version.
func TestModReplaceImport(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module example.com/m
@@ -556,7 +556,7 @@ package v
func TestModWorkspace(t *testing.T) {
testenv.NeedsGo1Point(t, 18)
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.work --
go 1.18
@@ -592,7 +592,7 @@ package b
func TestModWorkspaceReplace(t *testing.T) {
testenv.NeedsGo1Point(t, 18)
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.work --
use m
@@ -651,7 +651,7 @@ func G() {
func TestModWorkspaceReplaceOverride(t *testing.T) {
testenv.NeedsGo1Point(t, 18)
- mt := setup(t, `-- go.work --
+ mt := setup(t, nil, `-- go.work --
use m
use n
replace example.com/dep => ./dep3
@@ -716,7 +716,7 @@ func G() {
func TestModWorkspacePrune(t *testing.T) {
testenv.NeedsGo1Point(t, 18)
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.work --
go 1.18
@@ -884,8 +884,7 @@ package z
// Tests that we handle GO111MODULE=on with no go.mod file. See #30855.
func TestNoMainModule(t *testing.T) {
- testenv.NeedsGo1Point(t, 12)
- mt := setup(t, `
+ mt := setup(t, map[string]string{"GO111MODULE": "on"}, `
-- x.go --
package x
`, "")
@@ -993,9 +992,10 @@ type modTest struct {
// setup builds a test environment from a txtar and supporting modules
// in testdata/mod, along the lines of TestScript in cmd/go.
-func setup(t *testing.T, main, wd string) *modTest {
+//
+// extraEnv is applied on top of the default test env.
+func setup(t *testing.T, extraEnv map[string]string, main, wd string) *modTest {
t.Helper()
- testenv.NeedsGo1Point(t, 11)
testenv.NeedsTool(t, "go")
proxyOnce.Do(func() {
@@ -1023,13 +1023,16 @@ func setup(t *testing.T, main, wd string) *modTest {
Env: map[string]string{
"GOPATH": filepath.Join(dir, "gopath"),
"GOMODCACHE": "",
- "GO111MODULE": "on",
+ "GO111MODULE": "auto",
"GOSUMDB": "off",
"GOPROXY": proxydir.ToURL(proxyDir),
},
WorkingDir: filepath.Join(mainDir, wd),
GocmdRunner: &gocommand.Runner{},
}
+ for k, v := range extraEnv {
+ env.Env[k] = v
+ }
if *testDebug {
env.Logf = log.Printf
}
@@ -1168,7 +1171,7 @@ func removeDir(dir string) {
// Tests that findModFile can find the mod files from a path in the module cache.
func TestFindModFileModCache(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module x
@@ -1189,7 +1192,6 @@ import _ "rsc.io/quote"
// Tests that crud in the module cache is ignored.
func TestInvalidModCache(t *testing.T) {
- testenv.NeedsGo1Point(t, 11)
dir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatal(err)
@@ -1220,7 +1222,7 @@ func TestInvalidModCache(t *testing.T) {
}
func TestGetCandidatesRanking(t *testing.T) {
- mt := setup(t, `
+ mt := setup(t, nil, `
-- go.mod --
module example.com
@@ -1286,7 +1288,6 @@ import (
}
func BenchmarkScanModCache(b *testing.B) {
- testenv.NeedsGo1Point(b, 11)
env := &ProcessEnv{
GocmdRunner: &gocommand.Runner{},
Logf: log.Printf,
diff --git a/internal/imports/sortimports.go b/internal/imports/sortimports.go
index dc52372e4..1a0a7ebd9 100644
--- a/internal/imports/sortimports.go
+++ b/internal/imports/sortimports.go
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// Hacked up copy of go/ast/import.go
+// Modified to use a single token.File in preference to a FileSet.
package imports
@@ -16,7 +17,9 @@ import (
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
-func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
+//
+// It may mutate the token.File.
+func sortImports(localPrefix string, tokFile *token.File, f *ast.File) {
for i, d := range f.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
@@ -39,21 +42,22 @@ func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
i := 0
specs := d.Specs[:0]
for j, s := range d.Specs {
- if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
+ if j > i && tokFile.Line(s.Pos()) > 1+tokFile.Line(d.Specs[j-1].End()) {
// j begins a new run. End this one.
- specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...)
+ specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:j])...)
i = j
}
}
- specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...)
+ specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:])...)
d.Specs = specs
// Deduping can leave a blank line before the rparen; clean that up.
+ // Ignore line directives.
if len(d.Specs) > 0 {
lastSpec := d.Specs[len(d.Specs)-1]
- lastLine := fset.Position(lastSpec.Pos()).Line
- if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
- fset.File(d.Rparen).MergeLine(rParenLine - 1)
+ lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line
+ if rParenLine := tokFile.PositionFor(d.Rparen, false).Line; rParenLine > lastLine+1 {
+ tokFile.MergeLine(rParenLine - 1) // has side effects!
}
}
}
@@ -62,7 +66,7 @@ func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
// mergeImports merges all the import declarations into the first one.
// Taken from golang.org/x/tools/ast/astutil.
// This does not adjust line numbers properly
-func mergeImports(fset *token.FileSet, f *ast.File) {
+func mergeImports(f *ast.File) {
if len(f.Decls) <= 1 {
return
}
@@ -144,7 +148,9 @@ type posSpan struct {
End token.Pos
}
-func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
+// sortSpecs sorts the import specs within each import decl.
+// It may mutate the token.File.
+func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast.Spec) []ast.Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
// A lone import, however, may be safely ignored.
@@ -160,7 +166,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
// Identify comments in this range.
// Any comment from pos[0].Start to the final line counts.
- lastLine := fset.Position(pos[len(pos)-1].End).Line
+ lastLine := tokFile.Line(pos[len(pos)-1].End)
cstart := len(f.Comments)
cend := len(f.Comments)
for i, g := range f.Comments {
@@ -170,7 +176,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
if i < cstart {
cstart = i
}
- if fset.Position(g.End()).Line > lastLine {
+ if tokFile.Line(g.End()) > lastLine {
cend = i
break
}
@@ -203,7 +209,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
deduped = append(deduped, s)
} else {
p := s.Pos()
- fset.File(p).MergeLine(fset.Position(p).Line)
+ tokFile.MergeLine(tokFile.Line(p)) // has side effects!
}
}
specs = deduped
@@ -234,21 +240,21 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast
// Fixup comments can insert blank lines, because import specs are on different lines.
// We remove those blank lines here by merging import spec to the first import spec line.
- firstSpecLine := fset.Position(specs[0].Pos()).Line
+ firstSpecLine := tokFile.Line(specs[0].Pos())
for _, s := range specs[1:] {
p := s.Pos()
- line := fset.File(p).Line(p)
+ line := tokFile.Line(p)
for previousLine := line - 1; previousLine >= firstSpecLine; {
// MergeLine can panic. Avoid the panic at the cost of not removing the blank line
// golang/go#50329
- if previousLine > 0 && previousLine < fset.File(p).LineCount() {
- fset.File(p).MergeLine(previousLine)
+ if previousLine > 0 && previousLine < tokFile.LineCount() {
+ tokFile.MergeLine(previousLine) // has side effects!
previousLine--
} else {
// try to gather some data to diagnose how this could happen
req := "Please report what the imports section of your go file looked like."
log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s",
- firstSpecLine, line, previousLine, fset.File(p).LineCount(), req)
+ firstSpecLine, line, previousLine, tokFile.LineCount(), req)
}
}
}
diff --git a/internal/imports/zstdlib.go b/internal/imports/zstdlib.go
index 437fbb78d..31a75949c 100644
--- a/internal/imports/zstdlib.go
+++ b/internal/imports/zstdlib.go
@@ -1,11 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
// Code generated by mkstdlib.go. DO NOT EDIT.
package imports
var stdlib = map[string][]string{
- "archive/tar": []string{
+ "archive/tar": {
"ErrFieldTooLong",
"ErrHeader",
+ "ErrInsecurePath",
"ErrWriteAfterClose",
"ErrWriteTooLong",
"FileInfoHeader",
@@ -34,13 +39,14 @@ var stdlib = map[string][]string{
"TypeXHeader",
"Writer",
},
- "archive/zip": []string{
+ "archive/zip": {
"Compressor",
"Decompressor",
"Deflate",
"ErrAlgorithm",
"ErrChecksum",
"ErrFormat",
+ "ErrInsecurePath",
"File",
"FileHeader",
"FileInfoHeader",
@@ -54,7 +60,7 @@ var stdlib = map[string][]string{
"Store",
"Writer",
},
- "bufio": []string{
+ "bufio": {
"ErrAdvanceTooFar",
"ErrBadReadCount",
"ErrBufferFull",
@@ -81,14 +87,17 @@ var stdlib = map[string][]string{
"SplitFunc",
"Writer",
},
- "bytes": []string{
+ "bytes": {
"Buffer",
+ "Clone",
"Compare",
"Contains",
"ContainsAny",
"ContainsRune",
"Count",
"Cut",
+ "CutPrefix",
+ "CutSuffix",
"Equal",
"EqualFold",
"ErrTooLarge",
@@ -138,11 +147,11 @@ var stdlib = map[string][]string{
"TrimSpace",
"TrimSuffix",
},
- "compress/bzip2": []string{
+ "compress/bzip2": {
"NewReader",
"StructuralError",
},
- "compress/flate": []string{
+ "compress/flate": {
"BestCompression",
"BestSpeed",
"CorruptInputError",
@@ -160,7 +169,7 @@ var stdlib = map[string][]string{
"WriteError",
"Writer",
},
- "compress/gzip": []string{
+ "compress/gzip": {
"BestCompression",
"BestSpeed",
"DefaultCompression",
@@ -175,7 +184,7 @@ var stdlib = map[string][]string{
"Reader",
"Writer",
},
- "compress/lzw": []string{
+ "compress/lzw": {
"LSB",
"MSB",
"NewReader",
@@ -184,7 +193,7 @@ var stdlib = map[string][]string{
"Reader",
"Writer",
},
- "compress/zlib": []string{
+ "compress/zlib": {
"BestCompression",
"BestSpeed",
"DefaultCompression",
@@ -201,7 +210,7 @@ var stdlib = map[string][]string{
"Resetter",
"Writer",
},
- "container/heap": []string{
+ "container/heap": {
"Fix",
"Init",
"Interface",
@@ -209,28 +218,31 @@ var stdlib = map[string][]string{
"Push",
"Remove",
},
- "container/list": []string{
+ "container/list": {
"Element",
"List",
"New",
},
- "container/ring": []string{
+ "container/ring": {
"New",
"Ring",
},
- "context": []string{
+ "context": {
"Background",
+ "CancelCauseFunc",
"CancelFunc",
"Canceled",
+ "Cause",
"Context",
"DeadlineExceeded",
"TODO",
"WithCancel",
+ "WithCancelCause",
"WithDeadline",
"WithTimeout",
"WithValue",
},
- "crypto": []string{
+ "crypto": {
"BLAKE2b_256",
"BLAKE2b_384",
"BLAKE2b_512",
@@ -259,12 +271,12 @@ var stdlib = map[string][]string{
"Signer",
"SignerOpts",
},
- "crypto/aes": []string{
+ "crypto/aes": {
"BlockSize",
"KeySizeError",
"NewCipher",
},
- "crypto/cipher": []string{
+ "crypto/cipher": {
"AEAD",
"Block",
"BlockMode",
@@ -281,13 +293,13 @@ var stdlib = map[string][]string{
"StreamReader",
"StreamWriter",
},
- "crypto/des": []string{
+ "crypto/des": {
"BlockSize",
"KeySizeError",
"NewCipher",
"NewTripleDESCipher",
},
- "crypto/dsa": []string{
+ "crypto/dsa": {
"ErrInvalidPublicKey",
"GenerateKey",
"GenerateParameters",
@@ -302,7 +314,16 @@ var stdlib = map[string][]string{
"Sign",
"Verify",
},
- "crypto/ecdsa": []string{
+ "crypto/ecdh": {
+ "Curve",
+ "P256",
+ "P384",
+ "P521",
+ "PrivateKey",
+ "PublicKey",
+ "X25519",
+ },
+ "crypto/ecdsa": {
"GenerateKey",
"PrivateKey",
"PublicKey",
@@ -311,9 +332,10 @@ var stdlib = map[string][]string{
"Verify",
"VerifyASN1",
},
- "crypto/ed25519": []string{
+ "crypto/ed25519": {
"GenerateKey",
"NewKeyFromSeed",
+ "Options",
"PrivateKey",
"PrivateKeySize",
"PublicKey",
@@ -322,8 +344,9 @@ var stdlib = map[string][]string{
"Sign",
"SignatureSize",
"Verify",
+ "VerifyWithOptions",
},
- "crypto/elliptic": []string{
+ "crypto/elliptic": {
"Curve",
"CurveParams",
"GenerateKey",
@@ -336,28 +359,28 @@ var stdlib = map[string][]string{
"Unmarshal",
"UnmarshalCompressed",
},
- "crypto/hmac": []string{
+ "crypto/hmac": {
"Equal",
"New",
},
- "crypto/md5": []string{
+ "crypto/md5": {
"BlockSize",
"New",
"Size",
"Sum",
},
- "crypto/rand": []string{
+ "crypto/rand": {
"Int",
"Prime",
"Read",
"Reader",
},
- "crypto/rc4": []string{
+ "crypto/rc4": {
"Cipher",
"KeySizeError",
"NewCipher",
},
- "crypto/rsa": []string{
+ "crypto/rsa": {
"CRTValue",
"DecryptOAEP",
"DecryptPKCS1v15",
@@ -382,13 +405,13 @@ var stdlib = map[string][]string{
"VerifyPKCS1v15",
"VerifyPSS",
},
- "crypto/sha1": []string{
+ "crypto/sha1": {
"BlockSize",
"New",
"Size",
"Sum",
},
- "crypto/sha256": []string{
+ "crypto/sha256": {
"BlockSize",
"New",
"New224",
@@ -397,7 +420,7 @@ var stdlib = map[string][]string{
"Sum224",
"Sum256",
},
- "crypto/sha512": []string{
+ "crypto/sha512": {
"BlockSize",
"New",
"New384",
@@ -412,17 +435,19 @@ var stdlib = map[string][]string{
"Sum512_224",
"Sum512_256",
},
- "crypto/subtle": []string{
+ "crypto/subtle": {
"ConstantTimeByteEq",
"ConstantTimeCompare",
"ConstantTimeCopy",
"ConstantTimeEq",
"ConstantTimeLessOrEq",
"ConstantTimeSelect",
+ "XORBytes",
},
- "crypto/tls": []string{
+ "crypto/tls": {
"Certificate",
"CertificateRequestInfo",
+ "CertificateVerificationError",
"CipherSuite",
"CipherSuiteName",
"CipherSuites",
@@ -506,7 +531,7 @@ var stdlib = map[string][]string{
"X25519",
"X509KeyPair",
},
- "crypto/x509": []string{
+ "crypto/x509": {
"CANotAuthorizedForExtKeyUsage",
"CANotAuthorizedForThisName",
"CertPool",
@@ -588,6 +613,7 @@ var stdlib = map[string][]string{
"ParsePKCS1PublicKey",
"ParsePKCS8PrivateKey",
"ParsePKIXPublicKey",
+ "ParseRevocationList",
"PublicKeyAlgorithm",
"PureEd25519",
"RSA",
@@ -599,6 +625,7 @@ var stdlib = map[string][]string{
"SHA384WithRSAPSS",
"SHA512WithRSA",
"SHA512WithRSAPSS",
+ "SetFallbackRoots",
"SignatureAlgorithm",
"SystemCertPool",
"SystemRootsError",
@@ -611,7 +638,7 @@ var stdlib = map[string][]string{
"UnknownSignatureAlgorithm",
"VerifyOptions",
},
- "crypto/x509/pkix": []string{
+ "crypto/x509/pkix": {
"AlgorithmIdentifier",
"AttributeTypeAndValue",
"AttributeTypeAndValueSET",
@@ -623,7 +650,7 @@ var stdlib = map[string][]string{
"RevokedCertificate",
"TBSCertificateList",
},
- "database/sql": []string{
+ "database/sql": {
"ColumnType",
"Conn",
"DB",
@@ -664,7 +691,7 @@ var stdlib = map[string][]string{
"Tx",
"TxOptions",
},
- "database/sql/driver": []string{
+ "database/sql/driver": {
"Bool",
"ColumnConverter",
"Conn",
@@ -712,12 +739,12 @@ var stdlib = map[string][]string{
"ValueConverter",
"Valuer",
},
- "debug/buildinfo": []string{
+ "debug/buildinfo": {
"BuildInfo",
"Read",
"ReadFile",
},
- "debug/dwarf": []string{
+ "debug/dwarf": {
"AddrType",
"ArrayType",
"Attr",
@@ -968,7 +995,7 @@ var stdlib = map[string][]string{
"UnsupportedType",
"VoidType",
},
- "debug/elf": []string{
+ "debug/elf": {
"ARM_MAGIC_TRAMP_NUMBER",
"COMPRESS_HIOS",
"COMPRESS_HIPROC",
@@ -1238,6 +1265,7 @@ var stdlib = map[string][]string{
"EM_L10M",
"EM_LANAI",
"EM_LATTICEMICO32",
+ "EM_LOONGARCH",
"EM_M16C",
"EM_M32",
"EM_M32C",
@@ -1820,6 +1848,96 @@ var stdlib = map[string][]string{
"R_ARM_XPC25",
"R_INFO",
"R_INFO32",
+ "R_LARCH",
+ "R_LARCH_32",
+ "R_LARCH_32_PCREL",
+ "R_LARCH_64",
+ "R_LARCH_ABS64_HI12",
+ "R_LARCH_ABS64_LO20",
+ "R_LARCH_ABS_HI20",
+ "R_LARCH_ABS_LO12",
+ "R_LARCH_ADD16",
+ "R_LARCH_ADD24",
+ "R_LARCH_ADD32",
+ "R_LARCH_ADD64",
+ "R_LARCH_ADD8",
+ "R_LARCH_B16",
+ "R_LARCH_B21",
+ "R_LARCH_B26",
+ "R_LARCH_COPY",
+ "R_LARCH_GNU_VTENTRY",
+ "R_LARCH_GNU_VTINHERIT",
+ "R_LARCH_GOT64_HI12",
+ "R_LARCH_GOT64_LO20",
+ "R_LARCH_GOT64_PC_HI12",
+ "R_LARCH_GOT64_PC_LO20",
+ "R_LARCH_GOT_HI20",
+ "R_LARCH_GOT_LO12",
+ "R_LARCH_GOT_PC_HI20",
+ "R_LARCH_GOT_PC_LO12",
+ "R_LARCH_IRELATIVE",
+ "R_LARCH_JUMP_SLOT",
+ "R_LARCH_MARK_LA",
+ "R_LARCH_MARK_PCREL",
+ "R_LARCH_NONE",
+ "R_LARCH_PCALA64_HI12",
+ "R_LARCH_PCALA64_LO20",
+ "R_LARCH_PCALA_HI20",
+ "R_LARCH_PCALA_LO12",
+ "R_LARCH_RELATIVE",
+ "R_LARCH_RELAX",
+ "R_LARCH_SOP_ADD",
+ "R_LARCH_SOP_AND",
+ "R_LARCH_SOP_ASSERT",
+ "R_LARCH_SOP_IF_ELSE",
+ "R_LARCH_SOP_NOT",
+ "R_LARCH_SOP_POP_32_S_0_10_10_16_S2",
+ "R_LARCH_SOP_POP_32_S_0_5_10_16_S2",
+ "R_LARCH_SOP_POP_32_S_10_12",
+ "R_LARCH_SOP_POP_32_S_10_16",
+ "R_LARCH_SOP_POP_32_S_10_16_S2",
+ "R_LARCH_SOP_POP_32_S_10_5",
+ "R_LARCH_SOP_POP_32_S_5_20",
+ "R_LARCH_SOP_POP_32_U",
+ "R_LARCH_SOP_POP_32_U_10_12",
+ "R_LARCH_SOP_PUSH_ABSOLUTE",
+ "R_LARCH_SOP_PUSH_DUP",
+ "R_LARCH_SOP_PUSH_GPREL",
+ "R_LARCH_SOP_PUSH_PCREL",
+ "R_LARCH_SOP_PUSH_PLT_PCREL",
+ "R_LARCH_SOP_PUSH_TLS_GD",
+ "R_LARCH_SOP_PUSH_TLS_GOT",
+ "R_LARCH_SOP_PUSH_TLS_TPREL",
+ "R_LARCH_SOP_SL",
+ "R_LARCH_SOP_SR",
+ "R_LARCH_SOP_SUB",
+ "R_LARCH_SUB16",
+ "R_LARCH_SUB24",
+ "R_LARCH_SUB32",
+ "R_LARCH_SUB64",
+ "R_LARCH_SUB8",
+ "R_LARCH_TLS_DTPMOD32",
+ "R_LARCH_TLS_DTPMOD64",
+ "R_LARCH_TLS_DTPREL32",
+ "R_LARCH_TLS_DTPREL64",
+ "R_LARCH_TLS_GD_HI20",
+ "R_LARCH_TLS_GD_PC_HI20",
+ "R_LARCH_TLS_IE64_HI12",
+ "R_LARCH_TLS_IE64_LO20",
+ "R_LARCH_TLS_IE64_PC_HI12",
+ "R_LARCH_TLS_IE64_PC_LO20",
+ "R_LARCH_TLS_IE_HI20",
+ "R_LARCH_TLS_IE_LO12",
+ "R_LARCH_TLS_IE_PC_HI20",
+ "R_LARCH_TLS_IE_PC_LO12",
+ "R_LARCH_TLS_LD_HI20",
+ "R_LARCH_TLS_LD_PC_HI20",
+ "R_LARCH_TLS_LE64_HI12",
+ "R_LARCH_TLS_LE64_LO20",
+ "R_LARCH_TLS_LE_HI20",
+ "R_LARCH_TLS_LE_LO12",
+ "R_LARCH_TLS_TPREL32",
+ "R_LARCH_TLS_TPREL64",
"R_MIPS",
"R_MIPS_16",
"R_MIPS_26",
@@ -1881,15 +1999,25 @@ var stdlib = map[string][]string{
"R_PPC64_ADDR16_HIGH",
"R_PPC64_ADDR16_HIGHA",
"R_PPC64_ADDR16_HIGHER",
+ "R_PPC64_ADDR16_HIGHER34",
"R_PPC64_ADDR16_HIGHERA",
+ "R_PPC64_ADDR16_HIGHERA34",
"R_PPC64_ADDR16_HIGHEST",
+ "R_PPC64_ADDR16_HIGHEST34",
"R_PPC64_ADDR16_HIGHESTA",
+ "R_PPC64_ADDR16_HIGHESTA34",
"R_PPC64_ADDR16_LO",
"R_PPC64_ADDR16_LO_DS",
"R_PPC64_ADDR24",
"R_PPC64_ADDR32",
"R_PPC64_ADDR64",
"R_PPC64_ADDR64_LOCAL",
+ "R_PPC64_COPY",
+ "R_PPC64_D28",
+ "R_PPC64_D34",
+ "R_PPC64_D34_HA30",
+ "R_PPC64_D34_HI30",
+ "R_PPC64_D34_LO",
"R_PPC64_DTPMOD64",
"R_PPC64_DTPREL16",
"R_PPC64_DTPREL16_DS",
@@ -1903,8 +2031,12 @@ var stdlib = map[string][]string{
"R_PPC64_DTPREL16_HIGHESTA",
"R_PPC64_DTPREL16_LO",
"R_PPC64_DTPREL16_LO_DS",
+ "R_PPC64_DTPREL34",
"R_PPC64_DTPREL64",
"R_PPC64_ENTRY",
+ "R_PPC64_GLOB_DAT",
+ "R_PPC64_GNU_VTENTRY",
+ "R_PPC64_GNU_VTINHERIT",
"R_PPC64_GOT16",
"R_PPC64_GOT16_DS",
"R_PPC64_GOT16_HA",
@@ -1915,29 +2047,50 @@ var stdlib = map[string][]string{
"R_PPC64_GOT_DTPREL16_HA",
"R_PPC64_GOT_DTPREL16_HI",
"R_PPC64_GOT_DTPREL16_LO_DS",
+ "R_PPC64_GOT_DTPREL_PCREL34",
+ "R_PPC64_GOT_PCREL34",
"R_PPC64_GOT_TLSGD16",
"R_PPC64_GOT_TLSGD16_HA",
"R_PPC64_GOT_TLSGD16_HI",
"R_PPC64_GOT_TLSGD16_LO",
+ "R_PPC64_GOT_TLSGD_PCREL34",
"R_PPC64_GOT_TLSLD16",
"R_PPC64_GOT_TLSLD16_HA",
"R_PPC64_GOT_TLSLD16_HI",
"R_PPC64_GOT_TLSLD16_LO",
+ "R_PPC64_GOT_TLSLD_PCREL34",
"R_PPC64_GOT_TPREL16_DS",
"R_PPC64_GOT_TPREL16_HA",
"R_PPC64_GOT_TPREL16_HI",
"R_PPC64_GOT_TPREL16_LO_DS",
+ "R_PPC64_GOT_TPREL_PCREL34",
"R_PPC64_IRELATIVE",
"R_PPC64_JMP_IREL",
"R_PPC64_JMP_SLOT",
"R_PPC64_NONE",
+ "R_PPC64_PCREL28",
+ "R_PPC64_PCREL34",
+ "R_PPC64_PCREL_OPT",
+ "R_PPC64_PLT16_HA",
+ "R_PPC64_PLT16_HI",
+ "R_PPC64_PLT16_LO",
"R_PPC64_PLT16_LO_DS",
+ "R_PPC64_PLT32",
+ "R_PPC64_PLT64",
+ "R_PPC64_PLTCALL",
+ "R_PPC64_PLTCALL_NOTOC",
"R_PPC64_PLTGOT16",
"R_PPC64_PLTGOT16_DS",
"R_PPC64_PLTGOT16_HA",
"R_PPC64_PLTGOT16_HI",
"R_PPC64_PLTGOT16_LO",
"R_PPC64_PLTGOT_LO_DS",
+ "R_PPC64_PLTREL32",
+ "R_PPC64_PLTREL64",
+ "R_PPC64_PLTSEQ",
+ "R_PPC64_PLTSEQ_NOTOC",
+ "R_PPC64_PLT_PCREL34",
+ "R_PPC64_PLT_PCREL34_NOTOC",
"R_PPC64_REL14",
"R_PPC64_REL14_BRNTAKEN",
"R_PPC64_REL14_BRTAKEN",
@@ -1945,13 +2098,28 @@ var stdlib = map[string][]string{
"R_PPC64_REL16DX_HA",
"R_PPC64_REL16_HA",
"R_PPC64_REL16_HI",
+ "R_PPC64_REL16_HIGH",
+ "R_PPC64_REL16_HIGHA",
+ "R_PPC64_REL16_HIGHER",
+ "R_PPC64_REL16_HIGHER34",
+ "R_PPC64_REL16_HIGHERA",
+ "R_PPC64_REL16_HIGHERA34",
+ "R_PPC64_REL16_HIGHEST",
+ "R_PPC64_REL16_HIGHEST34",
+ "R_PPC64_REL16_HIGHESTA",
+ "R_PPC64_REL16_HIGHESTA34",
"R_PPC64_REL16_LO",
"R_PPC64_REL24",
"R_PPC64_REL24_NOTOC",
+ "R_PPC64_REL30",
"R_PPC64_REL32",
"R_PPC64_REL64",
"R_PPC64_RELATIVE",
+ "R_PPC64_SECTOFF",
"R_PPC64_SECTOFF_DS",
+ "R_PPC64_SECTOFF_HA",
+ "R_PPC64_SECTOFF_HI",
+ "R_PPC64_SECTOFF_LO",
"R_PPC64_SECTOFF_LO_DS",
"R_PPC64_TLS",
"R_PPC64_TLSGD",
@@ -1976,7 +2144,11 @@ var stdlib = map[string][]string{
"R_PPC64_TPREL16_HIGHESTA",
"R_PPC64_TPREL16_LO",
"R_PPC64_TPREL16_LO_DS",
+ "R_PPC64_TPREL34",
"R_PPC64_TPREL64",
+ "R_PPC64_UADDR16",
+ "R_PPC64_UADDR32",
+ "R_PPC64_UADDR64",
"R_PPC_ADDR14",
"R_PPC_ADDR14_BRNTAKEN",
"R_PPC_ADDR14_BRTAKEN",
@@ -2315,7 +2487,7 @@ var stdlib = map[string][]string{
"Type",
"Version",
},
- "debug/gosym": []string{
+ "debug/gosym": {
"DecodingError",
"Func",
"LineTable",
@@ -2327,7 +2499,7 @@ var stdlib = map[string][]string{
"UnknownFileError",
"UnknownLineError",
},
- "debug/macho": []string{
+ "debug/macho": {
"ARM64_RELOC_ADDEND",
"ARM64_RELOC_BRANCH26",
"ARM64_RELOC_GOT_LOAD_PAGE21",
@@ -2457,13 +2629,20 @@ var stdlib = map[string][]string{
"X86_64_RELOC_TLV",
"X86_64_RELOC_UNSIGNED",
},
- "debug/pe": []string{
+ "debug/pe": {
"COFFSymbol",
+ "COFFSymbolAuxFormat5",
"COFFSymbolSize",
"DataDirectory",
"File",
"FileHeader",
"FormatError",
+ "IMAGE_COMDAT_SELECT_ANY",
+ "IMAGE_COMDAT_SELECT_ASSOCIATIVE",
+ "IMAGE_COMDAT_SELECT_EXACT_MATCH",
+ "IMAGE_COMDAT_SELECT_LARGEST",
+ "IMAGE_COMDAT_SELECT_NODUPLICATES",
+ "IMAGE_COMDAT_SELECT_SAME_SIZE",
"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE",
"IMAGE_DIRECTORY_ENTRY_BASERELOC",
"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT",
@@ -2508,6 +2687,8 @@ var stdlib = map[string][]string{
"IMAGE_FILE_MACHINE_EBC",
"IMAGE_FILE_MACHINE_I386",
"IMAGE_FILE_MACHINE_IA64",
+ "IMAGE_FILE_MACHINE_LOONGARCH32",
+ "IMAGE_FILE_MACHINE_LOONGARCH64",
"IMAGE_FILE_MACHINE_M32R",
"IMAGE_FILE_MACHINE_MIPS16",
"IMAGE_FILE_MACHINE_MIPSFPU",
@@ -2515,6 +2696,9 @@ var stdlib = map[string][]string{
"IMAGE_FILE_MACHINE_POWERPC",
"IMAGE_FILE_MACHINE_POWERPCFP",
"IMAGE_FILE_MACHINE_R4000",
+ "IMAGE_FILE_MACHINE_RISCV128",
+ "IMAGE_FILE_MACHINE_RISCV32",
+ "IMAGE_FILE_MACHINE_RISCV64",
"IMAGE_FILE_MACHINE_SH3",
"IMAGE_FILE_MACHINE_SH3DSP",
"IMAGE_FILE_MACHINE_SH4",
@@ -2527,6 +2711,14 @@ var stdlib = map[string][]string{
"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP",
"IMAGE_FILE_SYSTEM",
"IMAGE_FILE_UP_SYSTEM_ONLY",
+ "IMAGE_SCN_CNT_CODE",
+ "IMAGE_SCN_CNT_INITIALIZED_DATA",
+ "IMAGE_SCN_CNT_UNINITIALIZED_DATA",
+ "IMAGE_SCN_LNK_COMDAT",
+ "IMAGE_SCN_MEM_DISCARDABLE",
+ "IMAGE_SCN_MEM_EXECUTE",
+ "IMAGE_SCN_MEM_READ",
+ "IMAGE_SCN_MEM_WRITE",
"IMAGE_SUBSYSTEM_EFI_APPLICATION",
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_ROM",
@@ -2553,7 +2745,7 @@ var stdlib = map[string][]string{
"StringTable",
"Symbol",
},
- "debug/plan9obj": []string{
+ "debug/plan9obj": {
"ErrNoSymbols",
"File",
"FileHeader",
@@ -2567,16 +2759,16 @@ var stdlib = map[string][]string{
"SectionHeader",
"Sym",
},
- "embed": []string{
+ "embed": {
"FS",
},
- "encoding": []string{
+ "encoding": {
"BinaryMarshaler",
"BinaryUnmarshaler",
"TextMarshaler",
"TextUnmarshaler",
},
- "encoding/ascii85": []string{
+ "encoding/ascii85": {
"CorruptInputError",
"Decode",
"Encode",
@@ -2584,7 +2776,7 @@ var stdlib = map[string][]string{
"NewDecoder",
"NewEncoder",
},
- "encoding/asn1": []string{
+ "encoding/asn1": {
"BitString",
"ClassApplication",
"ClassContextSpecific",
@@ -2622,7 +2814,7 @@ var stdlib = map[string][]string{
"Unmarshal",
"UnmarshalWithParams",
},
- "encoding/base32": []string{
+ "encoding/base32": {
"CorruptInputError",
"Encoding",
"HexEncoding",
@@ -2633,7 +2825,7 @@ var stdlib = map[string][]string{
"StdEncoding",
"StdPadding",
},
- "encoding/base64": []string{
+ "encoding/base64": {
"CorruptInputError",
"Encoding",
"NewDecoder",
@@ -2646,7 +2838,10 @@ var stdlib = map[string][]string{
"StdPadding",
"URLEncoding",
},
- "encoding/binary": []string{
+ "encoding/binary": {
+ "AppendByteOrder",
+ "AppendUvarint",
+ "AppendVarint",
"BigEndian",
"ByteOrder",
"LittleEndian",
@@ -2663,7 +2858,7 @@ var stdlib = map[string][]string{
"Varint",
"Write",
},
- "encoding/csv": []string{
+ "encoding/csv": {
"ErrBareQuote",
"ErrFieldCount",
"ErrQuote",
@@ -2674,7 +2869,7 @@ var stdlib = map[string][]string{
"Reader",
"Writer",
},
- "encoding/gob": []string{
+ "encoding/gob": {
"CommonType",
"Decoder",
"Encoder",
@@ -2685,7 +2880,7 @@ var stdlib = map[string][]string{
"Register",
"RegisterName",
},
- "encoding/hex": []string{
+ "encoding/hex": {
"Decode",
"DecodeString",
"DecodedLen",
@@ -2699,7 +2894,7 @@ var stdlib = map[string][]string{
"NewDecoder",
"NewEncoder",
},
- "encoding/json": []string{
+ "encoding/json": {
"Compact",
"Decoder",
"Delim",
@@ -2726,13 +2921,13 @@ var stdlib = map[string][]string{
"UnsupportedValueError",
"Valid",
},
- "encoding/pem": []string{
+ "encoding/pem": {
"Block",
"Decode",
"Encode",
"EncodeToMemory",
},
- "encoding/xml": []string{
+ "encoding/xml": {
"Attr",
"CharData",
"Comment",
@@ -2766,13 +2961,14 @@ var stdlib = map[string][]string{
"UnmarshalerAttr",
"UnsupportedTypeError",
},
- "errors": []string{
+ "errors": {
"As",
"Is",
+ "Join",
"New",
"Unwrap",
},
- "expvar": []string{
+ "expvar": {
"Do",
"Float",
"Func",
@@ -2789,7 +2985,7 @@ var stdlib = map[string][]string{
"String",
"Var",
},
- "flag": []string{
+ "flag": {
"Arg",
"Args",
"Bool",
@@ -2822,6 +3018,7 @@ var stdlib = map[string][]string{
"Set",
"String",
"StringVar",
+ "TextVar",
"Uint",
"Uint64",
"Uint64Var",
@@ -2833,8 +3030,12 @@ var stdlib = map[string][]string{
"Visit",
"VisitAll",
},
- "fmt": []string{
+ "fmt": {
+ "Append",
+ "Appendf",
+ "Appendln",
"Errorf",
+ "FormatString",
"Formatter",
"Fprint",
"Fprintf",
@@ -2860,7 +3061,7 @@ var stdlib = map[string][]string{
"State",
"Stringer",
},
- "go/ast": []string{
+ "go/ast": {
"ArrayType",
"AssignStmt",
"Bad",
@@ -2963,7 +3164,7 @@ var stdlib = map[string][]string{
"Visitor",
"Walk",
},
- "go/build": []string{
+ "go/build": {
"AllowBinary",
"ArchChar",
"Context",
@@ -2980,7 +3181,7 @@ var stdlib = map[string][]string{
"Package",
"ToolDir",
},
- "go/build/constraint": []string{
+ "go/build/constraint": {
"AndExpr",
"Expr",
"IsGoBuild",
@@ -2992,7 +3193,7 @@ var stdlib = map[string][]string{
"SyntaxError",
"TagExpr",
},
- "go/constant": []string{
+ "go/constant": {
"BinaryOp",
"BitLen",
"Bool",
@@ -3033,7 +3234,7 @@ var stdlib = map[string][]string{
"Val",
"Value",
},
- "go/doc": []string{
+ "go/doc": {
"AllDecls",
"AllMethods",
"Example",
@@ -3054,17 +3255,35 @@ var stdlib = map[string][]string{
"Type",
"Value",
},
- "go/format": []string{
+ "go/doc/comment": {
+ "Block",
+ "Code",
+ "DefaultLookupPackage",
+ "Doc",
+ "DocLink",
+ "Heading",
+ "Italic",
+ "Link",
+ "LinkDef",
+ "List",
+ "ListItem",
+ "Paragraph",
+ "Parser",
+ "Plain",
+ "Printer",
+ "Text",
+ },
+ "go/format": {
"Node",
"Source",
},
- "go/importer": []string{
+ "go/importer": {
"Default",
"For",
"ForCompiler",
"Lookup",
},
- "go/parser": []string{
+ "go/parser": {
"AllErrors",
"DeclarationErrors",
"ImportsOnly",
@@ -3079,7 +3298,7 @@ var stdlib = map[string][]string{
"SpuriousErrors",
"Trace",
},
- "go/printer": []string{
+ "go/printer": {
"CommentedNode",
"Config",
"Fprint",
@@ -3089,7 +3308,7 @@ var stdlib = map[string][]string{
"TabIndent",
"UseSpaces",
},
- "go/scanner": []string{
+ "go/scanner": {
"Error",
"ErrorHandler",
"ErrorList",
@@ -3098,7 +3317,7 @@ var stdlib = map[string][]string{
"ScanComments",
"Scanner",
},
- "go/token": []string{
+ "go/token": {
"ADD",
"ADD_ASSIGN",
"AND",
@@ -3196,7 +3415,7 @@ var stdlib = map[string][]string{
"XOR",
"XOR_ASSIGN",
},
- "go/types": []string{
+ "go/types": {
"ArgumentError",
"Array",
"AssertableTo",
@@ -3302,6 +3521,7 @@ var stdlib = map[string][]string{
"RecvOnly",
"RelativeTo",
"Rune",
+ "Satisfies",
"Scope",
"Selection",
"SelectionKind",
@@ -3347,17 +3567,17 @@ var stdlib = map[string][]string{
"WriteSignature",
"WriteType",
},
- "hash": []string{
+ "hash": {
"Hash",
"Hash32",
"Hash64",
},
- "hash/adler32": []string{
+ "hash/adler32": {
"Checksum",
"New",
"Size",
},
- "hash/crc32": []string{
+ "hash/crc32": {
"Castagnoli",
"Checksum",
"ChecksumIEEE",
@@ -3371,7 +3591,7 @@ var stdlib = map[string][]string{
"Table",
"Update",
},
- "hash/crc64": []string{
+ "hash/crc64": {
"Checksum",
"ECMA",
"ISO",
@@ -3381,7 +3601,7 @@ var stdlib = map[string][]string{
"Table",
"Update",
},
- "hash/fnv": []string{
+ "hash/fnv": {
"New128",
"New128a",
"New32",
@@ -3389,16 +3609,18 @@ var stdlib = map[string][]string{
"New64",
"New64a",
},
- "hash/maphash": []string{
+ "hash/maphash": {
+ "Bytes",
"Hash",
"MakeSeed",
"Seed",
+ "String",
},
- "html": []string{
+ "html": {
"EscapeString",
"UnescapeString",
},
- "html/template": []string{
+ "html/template": {
"CSS",
"ErrAmbigContext",
"ErrBadHTML",
@@ -3436,7 +3658,7 @@ var stdlib = map[string][]string{
"URL",
"URLQueryEscaper",
},
- "image": []string{
+ "image": {
"Alpha",
"Alpha16",
"Black",
@@ -3489,7 +3711,7 @@ var stdlib = map[string][]string{
"ZP",
"ZR",
},
- "image/color": []string{
+ "image/color": {
"Alpha",
"Alpha16",
"Alpha16Model",
@@ -3525,11 +3747,11 @@ var stdlib = map[string][]string{
"YCbCrModel",
"YCbCrToRGB",
},
- "image/color/palette": []string{
+ "image/color/palette": {
"Plan9",
"WebSafe",
},
- "image/draw": []string{
+ "image/draw": {
"Draw",
"DrawMask",
"Drawer",
@@ -3541,7 +3763,7 @@ var stdlib = map[string][]string{
"RGBA64Image",
"Src",
},
- "image/gif": []string{
+ "image/gif": {
"Decode",
"DecodeAll",
"DecodeConfig",
@@ -3553,7 +3775,7 @@ var stdlib = map[string][]string{
"GIF",
"Options",
},
- "image/jpeg": []string{
+ "image/jpeg": {
"Decode",
"DecodeConfig",
"DefaultQuality",
@@ -3563,7 +3785,7 @@ var stdlib = map[string][]string{
"Reader",
"UnsupportedError",
},
- "image/png": []string{
+ "image/png": {
"BestCompression",
"BestSpeed",
"CompressionLevel",
@@ -3578,11 +3800,11 @@ var stdlib = map[string][]string{
"NoCompression",
"UnsupportedError",
},
- "index/suffixarray": []string{
+ "index/suffixarray": {
"Index",
"New",
},
- "io": []string{
+ "io": {
"ByteReader",
"ByteScanner",
"ByteWriter",
@@ -3601,8 +3823,10 @@ var stdlib = map[string][]string{
"LimitedReader",
"MultiReader",
"MultiWriter",
+ "NewOffsetWriter",
"NewSectionReader",
"NopCloser",
+ "OffsetWriter",
"Pipe",
"PipeReader",
"PipeWriter",
@@ -3634,7 +3858,7 @@ var stdlib = map[string][]string{
"WriterAt",
"WriterTo",
},
- "io/fs": []string{
+ "io/fs": {
"DirEntry",
"ErrClosed",
"ErrExist",
@@ -3669,6 +3893,7 @@ var stdlib = map[string][]string{
"ReadDirFile",
"ReadFile",
"ReadFileFS",
+ "SkipAll",
"SkipDir",
"Stat",
"StatFS",
@@ -3678,7 +3903,7 @@ var stdlib = map[string][]string{
"WalkDir",
"WalkDirFunc",
},
- "io/ioutil": []string{
+ "io/ioutil": {
"Discard",
"NopCloser",
"ReadAll",
@@ -3688,7 +3913,7 @@ var stdlib = map[string][]string{
"TempFile",
"WriteFile",
},
- "log": []string{
+ "log": {
"Default",
"Fatal",
"Fatalf",
@@ -3717,7 +3942,7 @@ var stdlib = map[string][]string{
"SetPrefix",
"Writer",
},
- "log/syslog": []string{
+ "log/syslog": {
"Dial",
"LOG_ALERT",
"LOG_AUTH",
@@ -3752,7 +3977,7 @@ var stdlib = map[string][]string{
"Priority",
"Writer",
},
- "math": []string{
+ "math": {
"Abs",
"Acos",
"Acosh",
@@ -3851,7 +4076,7 @@ var stdlib = map[string][]string{
"Y1",
"Yn",
},
- "math/big": []string{
+ "math/big": {
"Above",
"Accuracy",
"AwayFromZero",
@@ -3878,7 +4103,7 @@ var stdlib = map[string][]string{
"ToZero",
"Word",
},
- "math/bits": []string{
+ "math/bits": {
"Add",
"Add32",
"Add64",
@@ -3930,7 +4155,7 @@ var stdlib = map[string][]string{
"TrailingZeros8",
"UintSize",
},
- "math/cmplx": []string{
+ "math/cmplx": {
"Abs",
"Acos",
"Acosh",
@@ -3959,7 +4184,7 @@ var stdlib = map[string][]string{
"Tan",
"Tanh",
},
- "math/rand": []string{
+ "math/rand": {
"ExpFloat64",
"Float32",
"Float64",
@@ -3984,7 +4209,7 @@ var stdlib = map[string][]string{
"Uint64",
"Zipf",
},
- "mime": []string{
+ "mime": {
"AddExtensionType",
"BEncoding",
"ErrInvalidMediaParameter",
@@ -3996,7 +4221,7 @@ var stdlib = map[string][]string{
"WordDecoder",
"WordEncoder",
},
- "mime/multipart": []string{
+ "mime/multipart": {
"ErrMessageTooLarge",
"File",
"FileHeader",
@@ -4007,13 +4232,13 @@ var stdlib = map[string][]string{
"Reader",
"Writer",
},
- "mime/quotedprintable": []string{
+ "mime/quotedprintable": {
"NewReader",
"NewWriter",
"Reader",
"Writer",
},
- "net": []string{
+ "net": {
"Addr",
"AddrError",
"Buffers",
@@ -4039,6 +4264,7 @@ var stdlib = map[string][]string{
"FlagLoopback",
"FlagMulticast",
"FlagPointToPoint",
+ "FlagRunning",
"FlagUp",
"Flags",
"HardwareAddr",
@@ -4115,7 +4341,7 @@ var stdlib = map[string][]string{
"UnixListener",
"UnknownNetworkError",
},
- "net/http": []string{
+ "net/http": {
"AllowQuerySemicolons",
"CanonicalHeaderKey",
"Client",
@@ -4168,6 +4394,7 @@ var stdlib = map[string][]string{
"ListenAndServe",
"ListenAndServeTLS",
"LocalAddrContextKey",
+ "MaxBytesError",
"MaxBytesHandler",
"MaxBytesReader",
"MethodConnect",
@@ -4182,6 +4409,7 @@ var stdlib = map[string][]string{
"NewFileTransport",
"NewRequest",
"NewRequestWithContext",
+ "NewResponseController",
"NewServeMux",
"NoBody",
"NotFound",
@@ -4201,6 +4429,7 @@ var stdlib = map[string][]string{
"RedirectHandler",
"Request",
"Response",
+ "ResponseController",
"ResponseWriter",
"RoundTripper",
"SameSite",
@@ -4290,25 +4519,25 @@ var stdlib = map[string][]string{
"TrailerPrefix",
"Transport",
},
- "net/http/cgi": []string{
+ "net/http/cgi": {
"Handler",
"Request",
"RequestFromMap",
"Serve",
},
- "net/http/cookiejar": []string{
+ "net/http/cookiejar": {
"Jar",
"New",
"Options",
"PublicSuffixList",
},
- "net/http/fcgi": []string{
+ "net/http/fcgi": {
"ErrConnClosed",
"ErrRequestAborted",
"ProcessEnv",
"Serve",
},
- "net/http/httptest": []string{
+ "net/http/httptest": {
"DefaultRemoteAddr",
"NewRecorder",
"NewRequest",
@@ -4318,7 +4547,7 @@ var stdlib = map[string][]string{
"ResponseRecorder",
"Server",
},
- "net/http/httptrace": []string{
+ "net/http/httptrace": {
"ClientTrace",
"ContextClientTrace",
"DNSDoneInfo",
@@ -4327,7 +4556,7 @@ var stdlib = map[string][]string{
"WithClientTrace",
"WroteRequestInfo",
},
- "net/http/httputil": []string{
+ "net/http/httputil": {
"BufferPool",
"ClientConn",
"DumpRequest",
@@ -4343,10 +4572,11 @@ var stdlib = map[string][]string{
"NewProxyClientConn",
"NewServerConn",
"NewSingleHostReverseProxy",
+ "ProxyRequest",
"ReverseProxy",
"ServerConn",
},
- "net/http/pprof": []string{
+ "net/http/pprof": {
"Cmdline",
"Handler",
"Index",
@@ -4354,7 +4584,7 @@ var stdlib = map[string][]string{
"Symbol",
"Trace",
},
- "net/mail": []string{
+ "net/mail": {
"Address",
"AddressParser",
"ErrHeaderNotPresent",
@@ -4365,7 +4595,7 @@ var stdlib = map[string][]string{
"ParseDate",
"ReadMessage",
},
- "net/netip": []string{
+ "net/netip": {
"Addr",
"AddrFrom16",
"AddrFrom4",
@@ -4374,6 +4604,8 @@ var stdlib = map[string][]string{
"AddrPortFrom",
"IPv4Unspecified",
"IPv6LinkLocalAllNodes",
+ "IPv6LinkLocalAllRouters",
+ "IPv6Loopback",
"IPv6Unspecified",
"MustParseAddr",
"MustParseAddrPort",
@@ -4384,7 +4616,7 @@ var stdlib = map[string][]string{
"Prefix",
"PrefixFrom",
},
- "net/rpc": []string{
+ "net/rpc": {
"Accept",
"Call",
"Client",
@@ -4411,14 +4643,14 @@ var stdlib = map[string][]string{
"ServerCodec",
"ServerError",
},
- "net/rpc/jsonrpc": []string{
+ "net/rpc/jsonrpc": {
"Dial",
"NewClient",
"NewClientCodec",
"NewServerCodec",
"ServeConn",
},
- "net/smtp": []string{
+ "net/smtp": {
"Auth",
"CRAMMD5Auth",
"Client",
@@ -4428,7 +4660,7 @@ var stdlib = map[string][]string{
"SendMail",
"ServerInfo",
},
- "net/textproto": []string{
+ "net/textproto": {
"CanonicalMIMEHeaderKey",
"Conn",
"Dial",
@@ -4444,10 +4676,11 @@ var stdlib = map[string][]string{
"TrimString",
"Writer",
},
- "net/url": []string{
+ "net/url": {
"Error",
"EscapeError",
"InvalidHostError",
+ "JoinPath",
"Parse",
"ParseQuery",
"ParseRequestURI",
@@ -4461,7 +4694,7 @@ var stdlib = map[string][]string{
"Userinfo",
"Values",
},
- "os": []string{
+ "os": {
"Args",
"Chdir",
"Chmod",
@@ -4577,16 +4810,18 @@ var stdlib = map[string][]string{
"UserHomeDir",
"WriteFile",
},
- "os/exec": []string{
+ "os/exec": {
"Cmd",
"Command",
"CommandContext",
+ "ErrDot",
"ErrNotFound",
+ "ErrWaitDelay",
"Error",
"ExitError",
"LookPath",
},
- "os/signal": []string{
+ "os/signal": {
"Ignore",
"Ignored",
"Notify",
@@ -4594,7 +4829,7 @@ var stdlib = map[string][]string{
"Reset",
"Stop",
},
- "os/user": []string{
+ "os/user": {
"Current",
"Group",
"Lookup",
@@ -4607,7 +4842,7 @@ var stdlib = map[string][]string{
"UnknownUserIdError",
"User",
},
- "path": []string{
+ "path": {
"Base",
"Clean",
"Dir",
@@ -4618,7 +4853,7 @@ var stdlib = map[string][]string{
"Match",
"Split",
},
- "path/filepath": []string{
+ "path/filepath": {
"Abs",
"Base",
"Clean",
@@ -4630,11 +4865,13 @@ var stdlib = map[string][]string{
"Glob",
"HasPrefix",
"IsAbs",
+ "IsLocal",
"Join",
"ListSeparator",
"Match",
"Rel",
"Separator",
+ "SkipAll",
"SkipDir",
"Split",
"SplitList",
@@ -4644,12 +4881,12 @@ var stdlib = map[string][]string{
"WalkDir",
"WalkFunc",
},
- "plugin": []string{
+ "plugin": {
"Open",
"Plugin",
"Symbol",
},
- "reflect": []string{
+ "reflect": {
"Append",
"AppendSlice",
"Array",
@@ -4724,7 +4961,7 @@ var stdlib = map[string][]string{
"VisibleFields",
"Zero",
},
- "regexp": []string{
+ "regexp": {
"Compile",
"CompilePOSIX",
"Match",
@@ -4735,7 +4972,7 @@ var stdlib = map[string][]string{
"QuoteMeta",
"Regexp",
},
- "regexp/syntax": []string{
+ "regexp/syntax": {
"ClassNL",
"Compile",
"DotNL",
@@ -4756,9 +4993,11 @@ var stdlib = map[string][]string{
"ErrInvalidRepeatOp",
"ErrInvalidRepeatSize",
"ErrInvalidUTF8",
+ "ErrLarge",
"ErrMissingBracket",
"ErrMissingParen",
"ErrMissingRepeatArgument",
+ "ErrNestingDepth",
"ErrTrailingBackslash",
"ErrUnexpectedParen",
"Error",
@@ -4813,7 +5052,7 @@ var stdlib = map[string][]string{
"UnicodeGroups",
"WasDollar",
},
- "runtime": []string{
+ "runtime": {
"BlockProfile",
"BlockProfileRecord",
"Breakpoint",
@@ -4861,11 +5100,19 @@ var stdlib = map[string][]string{
"UnlockOSThread",
"Version",
},
- "runtime/cgo": []string{
+ "runtime/cgo": {
"Handle",
+ "Incomplete",
"NewHandle",
},
- "runtime/debug": []string{
+ "runtime/coverage": {
+ "ClearCounters",
+ "WriteCounters",
+ "WriteCountersDir",
+ "WriteMeta",
+ "WriteMetaDir",
+ },
+ "runtime/debug": {
"BuildInfo",
"BuildSetting",
"FreeOSMemory",
@@ -4878,12 +5125,13 @@ var stdlib = map[string][]string{
"SetGCPercent",
"SetMaxStack",
"SetMaxThreads",
+ "SetMemoryLimit",
"SetPanicOnFault",
"SetTraceback",
"Stack",
"WriteHeapDump",
},
- "runtime/metrics": []string{
+ "runtime/metrics": {
"All",
"Description",
"Float64Histogram",
@@ -4896,7 +5144,7 @@ var stdlib = map[string][]string{
"Value",
"ValueKind",
},
- "runtime/pprof": []string{
+ "runtime/pprof": {
"Do",
"ForLabels",
"Label",
@@ -4912,7 +5160,7 @@ var stdlib = map[string][]string{
"WithLabels",
"WriteHeapProfile",
},
- "runtime/trace": []string{
+ "runtime/trace": {
"IsEnabled",
"Log",
"Logf",
@@ -4924,7 +5172,8 @@ var stdlib = map[string][]string{
"Task",
"WithRegion",
},
- "sort": []string{
+ "sort": {
+ "Find",
"Float64Slice",
"Float64s",
"Float64sAreSorted",
@@ -4947,7 +5196,7 @@ var stdlib = map[string][]string{
"Strings",
"StringsAreSorted",
},
- "strconv": []string{
+ "strconv": {
"AppendBool",
"AppendFloat",
"AppendInt",
@@ -4987,7 +5236,7 @@ var stdlib = map[string][]string{
"Unquote",
"UnquoteChar",
},
- "strings": []string{
+ "strings": {
"Builder",
"Clone",
"Compare",
@@ -4996,6 +5245,8 @@ var stdlib = map[string][]string{
"ContainsRune",
"Count",
"Cut",
+ "CutPrefix",
+ "CutSuffix",
"EqualFold",
"Fields",
"FieldsFunc",
@@ -5041,7 +5292,7 @@ var stdlib = map[string][]string{
"TrimSpace",
"TrimSuffix",
},
- "sync": []string{
+ "sync": {
"Cond",
"Locker",
"Map",
@@ -5052,24 +5303,28 @@ var stdlib = map[string][]string{
"RWMutex",
"WaitGroup",
},
- "sync/atomic": []string{
+ "sync/atomic": {
"AddInt32",
"AddInt64",
"AddUint32",
"AddUint64",
"AddUintptr",
+ "Bool",
"CompareAndSwapInt32",
"CompareAndSwapInt64",
"CompareAndSwapPointer",
"CompareAndSwapUint32",
"CompareAndSwapUint64",
"CompareAndSwapUintptr",
+ "Int32",
+ "Int64",
"LoadInt32",
"LoadInt64",
"LoadPointer",
"LoadUint32",
"LoadUint64",
"LoadUintptr",
+ "Pointer",
"StoreInt32",
"StoreInt64",
"StorePointer",
@@ -5082,9 +5337,12 @@ var stdlib = map[string][]string{
"SwapUint32",
"SwapUint64",
"SwapUintptr",
+ "Uint32",
+ "Uint64",
+ "Uintptr",
"Value",
},
- "syscall": []string{
+ "syscall": {
"AF_ALG",
"AF_APPLETALK",
"AF_ARP",
@@ -5158,6 +5416,7 @@ var stdlib = map[string][]string{
"AF_TIPC",
"AF_UNIX",
"AF_UNSPEC",
+ "AF_UTUN",
"AF_VENDOR00",
"AF_VENDOR01",
"AF_VENDOR02",
@@ -5496,20 +5755,25 @@ var stdlib = map[string][]string{
"CLOCAL",
"CLONE_CHILD_CLEARTID",
"CLONE_CHILD_SETTID",
+ "CLONE_CLEAR_SIGHAND",
"CLONE_CSIGNAL",
"CLONE_DETACHED",
"CLONE_FILES",
"CLONE_FS",
+ "CLONE_INTO_CGROUP",
"CLONE_IO",
+ "CLONE_NEWCGROUP",
"CLONE_NEWIPC",
"CLONE_NEWNET",
"CLONE_NEWNS",
"CLONE_NEWPID",
+ "CLONE_NEWTIME",
"CLONE_NEWUSER",
"CLONE_NEWUTS",
"CLONE_PARENT",
"CLONE_PARENT_SETTID",
"CLONE_PID",
+ "CLONE_PIDFD",
"CLONE_PTRACE",
"CLONE_SETTLS",
"CLONE_SIGHAND",
@@ -6052,6 +6316,7 @@ var stdlib = map[string][]string{
"EPROTONOSUPPORT",
"EPROTOTYPE",
"EPWROFF",
+ "EQFULL",
"ERANGE",
"EREMCHG",
"EREMOTE",
@@ -6478,6 +6743,7 @@ var stdlib = map[string][]string{
"F_DUPFD",
"F_DUPFD_CLOEXEC",
"F_EXLCK",
+ "F_FINDSIGS",
"F_FLUSH_DATA",
"F_FREEZE_FS",
"F_FSCTL",
@@ -6488,6 +6754,7 @@ var stdlib = map[string][]string{
"F_FSPRIV",
"F_FSVOID",
"F_FULLFSYNC",
+ "F_GETCODEDIR",
"F_GETFD",
"F_GETFL",
"F_GETLEASE",
@@ -6501,6 +6768,7 @@ var stdlib = map[string][]string{
"F_GETPATH_MTMINFO",
"F_GETPIPE_SZ",
"F_GETPROTECTIONCLASS",
+ "F_GETPROTECTIONLEVEL",
"F_GETSIG",
"F_GLOBAL_NOCACHE",
"F_LOCK",
@@ -6533,6 +6801,7 @@ var stdlib = map[string][]string{
"F_SETLK64",
"F_SETLKW",
"F_SETLKW64",
+ "F_SETLKWTIMEOUT",
"F_SETLK_REMOTE",
"F_SETNOSIGPIPE",
"F_SETOWN",
@@ -6542,9 +6811,11 @@ var stdlib = map[string][]string{
"F_SETSIG",
"F_SETSIZE",
"F_SHLCK",
+ "F_SINGLE_WRITER",
"F_TEST",
"F_THAW_FS",
"F_TLOCK",
+ "F_TRANSCODEKEY",
"F_ULOCK",
"F_UNLCK",
"F_UNLCKSYS",
@@ -7740,12 +8011,20 @@ var stdlib = map[string][]string{
"NOFLSH",
"NOTE_ABSOLUTE",
"NOTE_ATTRIB",
+ "NOTE_BACKGROUND",
"NOTE_CHILD",
+ "NOTE_CRITICAL",
"NOTE_DELETE",
"NOTE_EOF",
"NOTE_EXEC",
"NOTE_EXIT",
"NOTE_EXITSTATUS",
+ "NOTE_EXIT_CSERROR",
+ "NOTE_EXIT_DECRYPTFAIL",
+ "NOTE_EXIT_DETAIL",
+ "NOTE_EXIT_DETAIL_MASK",
+ "NOTE_EXIT_MEMORY",
+ "NOTE_EXIT_REPARENTED",
"NOTE_EXTEND",
"NOTE_FFAND",
"NOTE_FFCOPY",
@@ -7754,6 +8033,7 @@ var stdlib = map[string][]string{
"NOTE_FFNOP",
"NOTE_FFOR",
"NOTE_FORK",
+ "NOTE_LEEWAY",
"NOTE_LINK",
"NOTE_LOWAT",
"NOTE_NONE",
@@ -7832,6 +8112,7 @@ var stdlib = map[string][]string{
"O_CREAT",
"O_DIRECT",
"O_DIRECTORY",
+ "O_DP_GETRAWENCRYPTED",
"O_DSYNC",
"O_EVTONLY",
"O_EXCL",
@@ -8121,6 +8402,7 @@ var stdlib = map[string][]string{
"RLIMIT_AS",
"RLIMIT_CORE",
"RLIMIT_CPU",
+ "RLIMIT_CPU_USAGE_MONITOR",
"RLIMIT_DATA",
"RLIMIT_FSIZE",
"RLIMIT_NOFILE",
@@ -8233,9 +8515,11 @@ var stdlib = map[string][]string{
"RTF_PROTO1",
"RTF_PROTO2",
"RTF_PROTO3",
+ "RTF_PROXY",
"RTF_REINSTATE",
"RTF_REJECT",
"RTF_RNH_LOCKED",
+ "RTF_ROUTER",
"RTF_SOURCE",
"RTF_SRC",
"RTF_STATIC",
@@ -8754,6 +9038,7 @@ var stdlib = map[string][]string{
"SO_NO_OFFLOAD",
"SO_NP_EXTENSIONS",
"SO_NREAD",
+ "SO_NUMRCVPKT",
"SO_NWRITE",
"SO_OOBINLINE",
"SO_OVERFLOWED",
@@ -8923,6 +9208,7 @@ var stdlib = map[string][]string{
"SYS_CREAT",
"SYS_CREATE_MODULE",
"SYS_CSOPS",
+ "SYS_CSOPS_AUDITTOKEN",
"SYS_DELETE",
"SYS_DELETE_MODULE",
"SYS_DUP",
@@ -9109,6 +9395,7 @@ var stdlib = map[string][]string{
"SYS_JAIL_GET",
"SYS_JAIL_REMOVE",
"SYS_JAIL_SET",
+ "SYS_KAS_INFO",
"SYS_KDEBUG_TRACE",
"SYS_KENV",
"SYS_KEVENT",
@@ -9136,6 +9423,7 @@ var stdlib = map[string][]string{
"SYS_LCHMOD",
"SYS_LCHOWN",
"SYS_LCHOWN32",
+ "SYS_LEDGER",
"SYS_LGETFH",
"SYS_LGETXATTR",
"SYS_LINK",
@@ -9232,6 +9520,7 @@ var stdlib = map[string][]string{
"SYS_OPENAT",
"SYS_OPENBSD_POLL",
"SYS_OPEN_BY_HANDLE_AT",
+ "SYS_OPEN_DPROTECTED_NP",
"SYS_OPEN_EXTENDED",
"SYS_OPEN_NOCANCEL",
"SYS_OVADVISE",
@@ -9864,6 +10153,7 @@ var stdlib = map[string][]string{
"TCP_CONNECTIONTIMEOUT",
"TCP_CORK",
"TCP_DEFER_ACCEPT",
+ "TCP_ENABLE_ECN",
"TCP_INFO",
"TCP_KEEPALIVE",
"TCP_KEEPCNT",
@@ -9886,11 +10176,13 @@ var stdlib = map[string][]string{
"TCP_NODELAY",
"TCP_NOOPT",
"TCP_NOPUSH",
+ "TCP_NOTSENT_LOWAT",
"TCP_NSTATES",
"TCP_QUICKACK",
"TCP_RXT_CONNDROPTIME",
"TCP_RXT_FINDROP",
"TCP_SACK_ENABLE",
+ "TCP_SENDMOREACKS",
"TCP_SYNCNT",
"TCP_VENDOR",
"TCP_WINDOW_CLAMP",
@@ -10234,7 +10526,7 @@ var stdlib = map[string][]string{
"XP1_UNI_RECV",
"XP1_UNI_SEND",
},
- "syscall/js": []string{
+ "syscall/js": {
"CopyBytesToGo",
"CopyBytesToJS",
"Error",
@@ -10256,7 +10548,7 @@ var stdlib = map[string][]string{
"ValueError",
"ValueOf",
},
- "testing": []string{
+ "testing": {
"AllocsPerRun",
"B",
"Benchmark",
@@ -10284,12 +10576,12 @@ var stdlib = map[string][]string{
"TB",
"Verbose",
},
- "testing/fstest": []string{
+ "testing/fstest": {
"MapFS",
"MapFile",
"TestFS",
},
- "testing/iotest": []string{
+ "testing/iotest": {
"DataErrReader",
"ErrReader",
"ErrTimeout",
@@ -10301,7 +10593,7 @@ var stdlib = map[string][]string{
"TimeoutReader",
"TruncateWriter",
},
- "testing/quick": []string{
+ "testing/quick": {
"Check",
"CheckEqual",
"CheckEqualError",
@@ -10311,7 +10603,7 @@ var stdlib = map[string][]string{
"SetupError",
"Value",
},
- "text/scanner": []string{
+ "text/scanner": {
"Char",
"Comment",
"EOF",
@@ -10334,7 +10626,7 @@ var stdlib = map[string][]string{
"String",
"TokenString",
},
- "text/tabwriter": []string{
+ "text/tabwriter": {
"AlignRight",
"Debug",
"DiscardEmptyColumns",
@@ -10345,7 +10637,7 @@ var stdlib = map[string][]string{
"TabIndent",
"Writer",
},
- "text/template": []string{
+ "text/template": {
"ExecError",
"FuncMap",
"HTMLEscape",
@@ -10363,7 +10655,7 @@ var stdlib = map[string][]string{
"Template",
"URLQueryEscaper",
},
- "text/template/parse": []string{
+ "text/template/parse": {
"ActionNode",
"BoolNode",
"BranchNode",
@@ -10419,13 +10711,15 @@ var stdlib = map[string][]string{
"VariableNode",
"WithNode",
},
- "time": []string{
+ "time": {
"ANSIC",
"After",
"AfterFunc",
"April",
"August",
"Date",
+ "DateOnly",
+ "DateTime",
"December",
"Duration",
"February",
@@ -10480,6 +10774,7 @@ var stdlib = map[string][]string{
"Tick",
"Ticker",
"Time",
+ "TimeOnly",
"Timer",
"Tuesday",
"UTC",
@@ -10491,7 +10786,7 @@ var stdlib = map[string][]string{
"Wednesday",
"Weekday",
},
- "unicode": []string{
+ "unicode": {
"ASCII_Hex_Digit",
"Adlam",
"Ahom",
@@ -10777,14 +11072,15 @@ var stdlib = map[string][]string{
"Zp",
"Zs",
},
- "unicode/utf16": []string{
+ "unicode/utf16": {
+ "AppendRune",
"Decode",
"DecodeRune",
"Encode",
"EncodeRune",
"IsSurrogate",
},
- "unicode/utf8": []string{
+ "unicode/utf8": {
"AppendRune",
"DecodeLastRune",
"DecodeLastRuneInString",
@@ -10805,11 +11101,15 @@ var stdlib = map[string][]string{
"ValidRune",
"ValidString",
},
- "unsafe": []string{
+ "unsafe": {
+ "Add",
"Alignof",
- "ArbitraryType",
"Offsetof",
"Pointer",
"Sizeof",
+ "Slice",
+ "SliceData",
+ "String",
+ "StringData",
},
}
diff --git a/internal/jsonrpc2/conn.go b/internal/jsonrpc2/conn.go
index ca7752d66..529cfa5de 100644
--- a/internal/jsonrpc2/conn.go
+++ b/internal/jsonrpc2/conn.go
@@ -13,7 +13,7 @@ import (
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/debug/tag"
+ "golang.org/x/tools/internal/event/tag"
)
// Conn is the common interface to jsonrpc clients and servers.
diff --git a/internal/jsonrpc2/messages.go b/internal/jsonrpc2/messages.go
index c29a0e851..58d285d99 100644
--- a/internal/jsonrpc2/messages.go
+++ b/internal/jsonrpc2/messages.go
@@ -6,9 +6,8 @@ package jsonrpc2
import (
"encoding/json"
+ "errors"
"fmt"
-
- errors "golang.org/x/xerrors"
)
// Message is the interface to all jsonrpc2 message types.
diff --git a/internal/jsonrpc2/serve.go b/internal/jsonrpc2/serve.go
index d58797152..cfbcbcb02 100644
--- a/internal/jsonrpc2/serve.go
+++ b/internal/jsonrpc2/serve.go
@@ -6,13 +6,14 @@ package jsonrpc2
import (
"context"
+ "errors"
"io"
+ "math"
"net"
"os"
"time"
"golang.org/x/tools/internal/event"
- errors "golang.org/x/xerrors"
)
// NOTE: This file provides an experimental API for serving multiple remote
@@ -100,7 +101,7 @@ func Serve(ctx context.Context, ln net.Listener, server StreamServer, idleTimeou
}()
// Max duration: ~290 years; surely that's long enough.
- const forever = 1<<63 - 1
+ const forever = math.MaxInt64
if idleTimeout <= 0 {
idleTimeout = forever
}
diff --git a/internal/jsonrpc2/servertest/servertest.go b/internal/jsonrpc2/servertest/servertest.go
index 392e084a9..37f8475be 100644
--- a/internal/jsonrpc2/servertest/servertest.go
+++ b/internal/jsonrpc2/servertest/servertest.go
@@ -50,7 +50,7 @@ func NewTCPServer(ctx context.Context, server jsonrpc2.StreamServer, framer json
// Connect dials the test server and returns a jsonrpc2 Connection that is
// ready for use.
-func (s *TCPServer) Connect(ctx context.Context) jsonrpc2.Conn {
+func (s *TCPServer) Connect(_ context.Context) jsonrpc2.Conn {
netConn, err := net.Dial("tcp", s.Addr)
if err != nil {
panic(fmt.Sprintf("servertest: failed to connect to test instance: %v", err))
@@ -68,7 +68,7 @@ type PipeServer struct {
}
// NewPipeServer returns a test server that can be connected to via io.Pipes.
-func NewPipeServer(ctx context.Context, server jsonrpc2.StreamServer, framer jsonrpc2.Framer) *PipeServer {
+func NewPipeServer(server jsonrpc2.StreamServer, framer jsonrpc2.Framer) *PipeServer {
if framer == nil {
framer = jsonrpc2.NewRawStream
}
diff --git a/internal/jsonrpc2/servertest/servertest_test.go b/internal/jsonrpc2/servertest/servertest_test.go
index 38fa21a24..1780d4f91 100644
--- a/internal/jsonrpc2/servertest/servertest_test.go
+++ b/internal/jsonrpc2/servertest/servertest_test.go
@@ -26,7 +26,7 @@ func TestTestServer(t *testing.T) {
server := jsonrpc2.HandlerServer(fakeHandler)
tcpTS := NewTCPServer(ctx, server, nil)
defer tcpTS.Close()
- pipeTS := NewPipeServer(ctx, server, nil)
+ pipeTS := NewPipeServer(server, nil)
defer pipeTS.Close()
tests := []struct {
diff --git a/internal/jsonrpc2/wire.go b/internal/jsonrpc2/wire.go
index d805f5793..ac39f1601 100644
--- a/internal/jsonrpc2/wire.go
+++ b/internal/jsonrpc2/wire.go
@@ -33,7 +33,7 @@ var (
ErrServerOverloaded = NewError(-32000, "JSON RPC overloaded")
)
-// wireRequest is sent to a server to represent a Call or Notify operaton.
+// wireRequest is sent to a server to represent a Call or Notify operation.
type wireRequest struct {
// VersionTag is always encoded as the string "2.0"
VersionTag wireVersionTag `json:"jsonrpc"`
diff --git a/internal/jsonrpc2_v2/conn.go b/internal/jsonrpc2_v2/conn.go
index 018175e88..04d1445cc 100644
--- a/internal/jsonrpc2_v2/conn.go
+++ b/internal/jsonrpc2_v2/conn.go
@@ -7,14 +7,17 @@ package jsonrpc2
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
+ "sync"
"sync/atomic"
+ "time"
"golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/debug/tag"
- errors "golang.org/x/xerrors"
+ "golang.org/x/tools/internal/event/tag"
)
// Binder builds a connection configuration.
@@ -24,10 +27,21 @@ import (
type Binder interface {
// Bind returns the ConnectionOptions to use when establishing the passed-in
// Connection.
- // The connection is not ready to use when Bind is called.
- Bind(context.Context, *Connection) (ConnectionOptions, error)
+ //
+ // The connection is not ready to use when Bind is called,
+ // but Bind may close it without reading or writing to it.
+ Bind(context.Context, *Connection) ConnectionOptions
}
+// A BinderFunc implements the Binder interface for a standalone Bind function.
+type BinderFunc func(context.Context, *Connection) ConnectionOptions
+
+func (f BinderFunc) Bind(ctx context.Context, c *Connection) ConnectionOptions {
+ return f(ctx, c)
+}
+
+var _ Binder = BinderFunc(nil)
+
// ConnectionOptions holds the options for new connections.
type ConnectionOptions struct {
// Framer allows control over the message framing and encoding.
@@ -39,6 +53,10 @@ type ConnectionOptions struct {
// Handler is used as the queued message handler for inbound messages.
// If nil, all responses will be ErrNotHandled.
Handler Handler
+ // OnInternalError, if non-nil, is called with any internal errors that occur
+ // while serving the connection, such as protocol errors or invariant
+ // violations. (If nil, internal errors result in panics.)
+ OnInternalError func(error)
}
// Connection manages the jsonrpc2 protocol, connecting responses back to their
@@ -46,102 +64,244 @@ type ConnectionOptions struct {
// Connection is bidirectional; it does not have a designated server or client
// end.
type Connection struct {
- seq int64 // must only be accessed using atomic operations
- closer io.Closer
- writerBox chan Writer
- outgoingBox chan map[ID]chan<- *Response
- incomingBox chan map[ID]*incoming
- async *async
+ seq int64 // must only be accessed using atomic operations
+
+ stateMu sync.Mutex
+ state inFlightState // accessed only in updateInFlight
+ done chan struct{} // closed (under stateMu) when state.closed is true and all goroutines have completed
+
+ writer chan Writer // 1-buffered; stores the writer when not in use
+
+ handler Handler
+
+ onInternalError func(error)
+ onDone func()
}
-type AsyncCall struct {
- id ID
- response chan *Response // the channel a response will be delivered on
- resultBox chan asyncResult
- endSpan func() // close the tracing span when all processing for the message is complete
+// inFlightState records the state of the incoming and outgoing calls on a
+// Connection.
+type inFlightState struct {
+ connClosing bool // true when the Connection's Close method has been called
+ reading bool // true while the readIncoming goroutine is running
+ readErr error // non-nil when the readIncoming goroutine exits (typically io.EOF)
+ writeErr error // non-nil if a call to the Writer has failed with a non-canceled Context
+
+ // closer shuts down and cleans up the Reader and Writer state, ideally
+ // interrupting any Read or Write call that is currently blocked. It is closed
+ // when the state is idle and one of: connClosing is true, readErr is non-nil,
+ // or writeErr is non-nil.
+ //
+ // After the closer has been invoked, the closer field is set to nil
+ // and the closeErr field is simultaneously set to its result.
+ closer io.Closer
+ closeErr error // error returned from closer.Close
+
+ outgoingCalls map[ID]*AsyncCall // calls only
+ outgoingNotifications int // # of notifications awaiting "write"
+
+ // incoming stores the total number of incoming calls and notifications
+ // that have not yet written or processed a result.
+ incoming int
+
+ incomingByID map[ID]*incomingRequest // calls only
+
+ // handlerQueue stores the backlog of calls and notifications that were not
+ // already handled by a preempter.
+ // The queue does not include the request currently being handled (if any).
+ handlerQueue []*incomingRequest
+ handlerRunning bool
+}
+
+// updateInFlight locks the state of the connection's in-flight requests, allows
+// f to mutate that state, and closes the connection if it is idle and either
+// is closing or has a read or write error.
+func (c *Connection) updateInFlight(f func(*inFlightState)) {
+ c.stateMu.Lock()
+ defer c.stateMu.Unlock()
+
+ s := &c.state
+
+ f(s)
+
+ select {
+ case <-c.done:
+ // The connection was already completely done at the start of this call to
+ // updateInFlight, so it must remain so. (The call to f should have noticed
+ // that and avoided making any updates that would cause the state to be
+ // non-idle.)
+ if !s.idle() {
+ panic("jsonrpc2_v2: updateInFlight transitioned to non-idle when already done")
+ }
+ return
+ default:
+ }
+
+ if s.idle() && s.shuttingDown(ErrUnknown) != nil {
+ if s.closer != nil {
+ s.closeErr = s.closer.Close()
+ s.closer = nil // prevent duplicate Close calls
+ }
+ if s.reading {
+ // The readIncoming goroutine is still running. Our call to Close should
+ // cause it to exit soon, at which point it will make another call to
+ // updateInFlight, set s.reading to false, and mark the Connection done.
+ } else {
+ // The readIncoming goroutine has exited, or never started to begin with.
+ // Since everything else is idle, we're completely done.
+ if c.onDone != nil {
+ c.onDone()
+ }
+ close(c.done)
+ }
+ }
}
-type asyncResult struct {
- result []byte
- err error
+// idle reports whether the connection is in a state with no pending calls or
+// notifications.
+//
+// If idle returns true, the readIncoming goroutine may still be running,
+// but no other goroutines are doing work on behalf of the connection.
+func (s *inFlightState) idle() bool {
+ return len(s.outgoingCalls) == 0 && s.outgoingNotifications == 0 && s.incoming == 0 && !s.handlerRunning
}
-// incoming is used to track an incoming request as it is being handled
-type incoming struct {
- request *Request // the request being processed
- baseCtx context.Context // a base context for the message processing
- done func() // a function called when all processing for the message is complete
- handleCtx context.Context // the context for handling the message, child of baseCtx
- cancel func() // a function that cancels the handling context
+// shuttingDown reports whether the connection is in a state that should
+// disallow new (incoming and outgoing) calls. It returns either nil or
+// an error that is or wraps the provided errClosing.
+func (s *inFlightState) shuttingDown(errClosing error) error {
+ if s.connClosing {
+ // If Close has been called explicitly, it doesn't matter what state the
+ // Reader and Writer are in: we shouldn't be starting new work because the
+ // caller told us not to start new work.
+ return errClosing
+ }
+ if s.readErr != nil {
+ // If the read side of the connection is broken, we cannot read new call
+ // requests, and cannot read responses to our outgoing calls.
+ return fmt.Errorf("%w: %v", errClosing, s.readErr)
+ }
+ if s.writeErr != nil {
+ // If the write side of the connection is broken, we cannot write responses
+ // for incoming calls, and cannot write requests for outgoing calls.
+ return fmt.Errorf("%w: %v", errClosing, s.writeErr)
+ }
+ return nil
+}
+
+// incomingRequest is used to track an incoming request as it is being handled
+type incomingRequest struct {
+ *Request // the request being processed
+ ctx context.Context
+ cancel context.CancelFunc
+ endSpan func() // called (and set to nil) when the response is sent
}
// Bind returns the options unmodified.
-func (o ConnectionOptions) Bind(context.Context, *Connection) (ConnectionOptions, error) {
- return o, nil
+func (o ConnectionOptions) Bind(context.Context, *Connection) ConnectionOptions {
+ return o
}
// newConnection creates a new connection and runs it.
+//
// This is used by the Dial and Serve functions to build the actual connection.
-func newConnection(ctx context.Context, rwc io.ReadWriteCloser, binder Binder) (*Connection, error) {
+//
+// The connection is closed automatically (and its resources cleaned up) when
+// the last request has completed after the underlying ReadWriteCloser breaks,
+// but it may be stopped earlier by calling Close (for a clean shutdown).
+func newConnection(bindCtx context.Context, rwc io.ReadWriteCloser, binder Binder, onDone func()) *Connection {
+ // TODO: Should we create a new event span here?
+ // This will propagate cancellation from ctx; should it?
+ ctx := notDone{bindCtx}
+
c := &Connection{
- closer: rwc,
- writerBox: make(chan Writer, 1),
- outgoingBox: make(chan map[ID]chan<- *Response, 1),
- incomingBox: make(chan map[ID]*incoming, 1),
- async: newAsync(),
+ state: inFlightState{closer: rwc},
+ done: make(chan struct{}),
+ writer: make(chan Writer, 1),
+ onDone: onDone,
}
+ // It's tempting to set a finalizer on c to verify that the state has gone
+ // idle when the connection becomes unreachable. Unfortunately, the Binder
+ // interface makes that unsafe: it allows the Handler to close over the
+ // Connection, which could create a reference cycle that would cause the
+ // Connection to become uncollectable.
- options, err := binder.Bind(ctx, c)
- if err != nil {
- return nil, err
- }
- if options.Framer == nil {
- options.Framer = HeaderFramer()
- }
- if options.Preempter == nil {
- options.Preempter = defaultHandler{}
- }
- if options.Handler == nil {
- options.Handler = defaultHandler{}
- }
- c.outgoingBox <- make(map[ID]chan<- *Response)
- c.incomingBox <- make(map[ID]*incoming)
- // the goroutines started here will continue until the underlying stream is closed
- reader := options.Framer.Reader(rwc)
- readToQueue := make(chan *incoming)
- queueToDeliver := make(chan *incoming)
- go c.readIncoming(ctx, reader, readToQueue)
- go c.manageQueue(ctx, options.Preempter, readToQueue, queueToDeliver)
- go c.deliverMessages(ctx, options.Handler, queueToDeliver)
-
- // releaseing the writer must be the last thing we do in case any requests
- // are blocked waiting for the connection to be ready
- c.writerBox <- options.Framer.Writer(rwc)
- return c, nil
+ options := binder.Bind(bindCtx, c)
+ framer := options.Framer
+ if framer == nil {
+ framer = HeaderFramer()
+ }
+ c.handler = options.Handler
+ if c.handler == nil {
+ c.handler = defaultHandler{}
+ }
+ c.onInternalError = options.OnInternalError
+
+ c.writer <- framer.Writer(rwc)
+ reader := framer.Reader(rwc)
+
+ c.updateInFlight(func(s *inFlightState) {
+ select {
+ case <-c.done:
+ // Bind already closed the connection; don't start a goroutine to read it.
+ return
+ default:
+ }
+
+ // The goroutine started here will continue until the underlying stream is closed.
+ //
+ // (If the Binder closed the Connection already, this should error out and
+ // return almost immediately.)
+ s.reading = true
+ go c.readIncoming(ctx, reader, options.Preempter)
+ })
+ return c
}
// Notify invokes the target method but does not wait for a response.
// The params will be marshaled to JSON before sending over the wire, and will
// be handed to the method invoked.
-func (c *Connection) Notify(ctx context.Context, method string, params interface{}) error {
- notify, err := NewNotification(method, params)
- if err != nil {
- return errors.Errorf("marshaling notify parameters: %v", err)
- }
+func (c *Connection) Notify(ctx context.Context, method string, params interface{}) (err error) {
ctx, done := event.Start(ctx, method,
tag.Method.Of(method),
tag.RPCDirection.Of(tag.Outbound),
)
- event.Metric(ctx, tag.Started.Of(1))
- err = c.write(ctx, notify)
- switch {
- case err != nil:
- event.Label(ctx, tag.StatusCode.Of("ERROR"))
- default:
- event.Label(ctx, tag.StatusCode.Of("OK"))
+ attempted := false
+
+ defer func() {
+ labelStatus(ctx, err)
+ done()
+ if attempted {
+ c.updateInFlight(func(s *inFlightState) {
+ s.outgoingNotifications--
+ })
+ }
+ }()
+
+ c.updateInFlight(func(s *inFlightState) {
+ // If the connection is shutting down, allow outgoing notifications only if
+ // there is at least one call still in flight. The number of calls in flight
+ // cannot increase once shutdown begins, and allowing outgoing notifications
+ // may permit notifications that will cancel in-flight calls.
+ if len(s.outgoingCalls) == 0 && len(s.incomingByID) == 0 {
+ err = s.shuttingDown(ErrClientClosing)
+ if err != nil {
+ return
+ }
+ }
+ s.outgoingNotifications++
+ attempted = true
+ })
+ if err != nil {
+ return err
}
- done()
- return err
+
+ notify, err := NewNotification(method, params)
+ if err != nil {
+ return fmt.Errorf("marshaling notify parameters: %v", err)
+ }
+
+ event.Metric(ctx, tag.Started.Of(1))
+ return c.write(ctx, notify)
}
// Call invokes the target method and returns an object that can be used to await the response.
@@ -150,339 +310,503 @@ func (c *Connection) Notify(ctx context.Context, method string, params interface
// You do not have to wait for the response, it can just be ignored if not needed.
// If sending the call failed, the response will be ready and have the error in it.
func (c *Connection) Call(ctx context.Context, method string, params interface{}) *AsyncCall {
- result := &AsyncCall{
- id: Int64ID(atomic.AddInt64(&c.seq, 1)),
- resultBox: make(chan asyncResult, 1),
- }
- // generate a new request identifier
- call, err := NewCall(result.id, method, params)
- if err != nil {
- //set the result to failed
- result.resultBox <- asyncResult{err: errors.Errorf("marshaling call parameters: %w", err)}
- return result
- }
+ // Generate a new request identifier.
+ id := Int64ID(atomic.AddInt64(&c.seq, 1))
ctx, endSpan := event.Start(ctx, method,
tag.Method.Of(method),
tag.RPCDirection.Of(tag.Outbound),
- tag.RPCID.Of(fmt.Sprintf("%q", result.id)),
+ tag.RPCID.Of(fmt.Sprintf("%q", id)),
)
- result.endSpan = endSpan
+
+ ac := &AsyncCall{
+ id: id,
+ ready: make(chan struct{}),
+ ctx: ctx,
+ endSpan: endSpan,
+ }
+ // When this method returns, either ac is retired, or the request has been
+ // written successfully and the call is awaiting a response (to be provided by
+ // the readIncoming goroutine).
+
+ call, err := NewCall(ac.id, method, params)
+ if err != nil {
+ ac.retire(&Response{ID: id, Error: fmt.Errorf("marshaling call parameters: %w", err)})
+ return ac
+ }
+
+ c.updateInFlight(func(s *inFlightState) {
+ err = s.shuttingDown(ErrClientClosing)
+ if err != nil {
+ return
+ }
+ if s.outgoingCalls == nil {
+ s.outgoingCalls = make(map[ID]*AsyncCall)
+ }
+ s.outgoingCalls[ac.id] = ac
+ })
+ if err != nil {
+ ac.retire(&Response{ID: id, Error: err})
+ return ac
+ }
+
event.Metric(ctx, tag.Started.Of(1))
- // We have to add ourselves to the pending map before we send, otherwise we
- // are racing the response.
- // rchan is buffered in case the response arrives without a listener.
- result.response = make(chan *Response, 1)
- pending := <-c.outgoingBox
- pending[result.id] = result.response
- c.outgoingBox <- pending
- // now we are ready to send
if err := c.write(ctx, call); err != nil {
- // sending failed, we will never get a response, so deliver a fake one
- r, _ := NewResponse(result.id, nil, err)
- c.incomingResponse(r)
+ // Sending failed. We will never get a response, so deliver a fake one if it
+ // wasn't already retired by the connection breaking.
+ c.updateInFlight(func(s *inFlightState) {
+ if s.outgoingCalls[ac.id] == ac {
+ delete(s.outgoingCalls, ac.id)
+ ac.retire(&Response{ID: id, Error: err})
+ } else {
+ // ac was already retired by the readIncoming goroutine:
+ // perhaps our write raced with the Read side of the connection breaking.
+ }
+ })
}
- return result
+ return ac
+}
+
+type AsyncCall struct {
+ id ID
+ ready chan struct{} // closed after response has been set and span has been ended
+ response *Response
+ ctx context.Context // for event logging only
+ endSpan func() // close the tracing span when all processing for the message is complete
}
// ID used for this call.
// This can be used to cancel the call if needed.
-func (a *AsyncCall) ID() ID { return a.id }
+func (ac *AsyncCall) ID() ID { return ac.id }
// IsReady can be used to check if the result is already prepared.
// This is guaranteed to return true on a result for which Await has already
// returned, or a call that failed to send in the first place.
-func (a *AsyncCall) IsReady() bool {
+func (ac *AsyncCall) IsReady() bool {
select {
- case r := <-a.resultBox:
- a.resultBox <- r
+ case <-ac.ready:
return true
default:
return false
}
}
-// Await the results of a Call.
+// retire processes the response to the call.
+func (ac *AsyncCall) retire(response *Response) {
+ select {
+ case <-ac.ready:
+ panic(fmt.Sprintf("jsonrpc2: retire called twice for ID %v", ac.id))
+ default:
+ }
+
+ ac.response = response
+ labelStatus(ac.ctx, response.Error)
+ ac.endSpan()
+ // Allow the trace context, which may retain a lot of reachable values,
+ // to be garbage-collected.
+ ac.ctx, ac.endSpan = nil, nil
+
+ close(ac.ready)
+}
+
+// Await waits for (and decodes) the results of a Call.
// The response will be unmarshaled from JSON into the result.
-func (a *AsyncCall) Await(ctx context.Context, result interface{}) error {
- defer a.endSpan()
- var r asyncResult
+func (ac *AsyncCall) Await(ctx context.Context, result interface{}) error {
select {
- case response := <-a.response:
- // response just arrived, prepare the result
- switch {
- case response.Error != nil:
- r.err = response.Error
- event.Label(ctx, tag.StatusCode.Of("ERROR"))
- default:
- r.result = response.Result
- event.Label(ctx, tag.StatusCode.Of("OK"))
- }
- case r = <-a.resultBox:
- // result already available
case <-ctx.Done():
- event.Label(ctx, tag.StatusCode.Of("CANCELLED"))
return ctx.Err()
+ case <-ac.ready:
}
- // refill the box for the next caller
- a.resultBox <- r
- // and unpack the result
- if r.err != nil {
- return r.err
+ if ac.response.Error != nil {
+ return ac.response.Error
}
- if result == nil || len(r.result) == 0 {
+ if result == nil {
return nil
}
- return json.Unmarshal(r.result, result)
+ return json.Unmarshal(ac.response.Result, result)
}
// Respond delivers a response to an incoming Call.
//
// Respond must be called exactly once for any message for which a handler
// returns ErrAsyncResponse. It must not be called for any other message.
-func (c *Connection) Respond(id ID, result interface{}, rerr error) error {
- pending := <-c.incomingBox
- defer func() { c.incomingBox <- pending }()
- entry, found := pending[id]
- if !found {
- return nil
+func (c *Connection) Respond(id ID, result interface{}, err error) error {
+ var req *incomingRequest
+ c.updateInFlight(func(s *inFlightState) {
+ req = s.incomingByID[id]
+ })
+ if req == nil {
+ return c.internalErrorf("Request not found for ID %v", id)
+ }
+
+ if err == ErrAsyncResponse {
+ // Respond is supposed to supply the asynchronous response, so it would be
+ // confusing to call Respond with an error that promises to call Respond
+ // again.
+ err = c.internalErrorf("Respond called with ErrAsyncResponse for %q", req.Method)
}
- delete(pending, id)
- return c.respond(entry, result, rerr)
+ return c.processResult("Respond", req, result, err)
}
-// Cancel is used to cancel an inbound message by ID, it does not cancel
-// outgoing messages.
-// This is only used inside a message handler that is layering a
-// cancellation protocol on top of JSON RPC 2.
-// It will not complain if the ID is not a currently active message, and it will
-// not cause any messages that have not arrived yet with that ID to be
+// Cancel cancels the Context passed to the Handle call for the inbound message
+// with the given ID.
+//
+// Cancel will not complain if the ID is not a currently active message, and it
+// will not cause any messages that have not arrived yet with that ID to be
// cancelled.
func (c *Connection) Cancel(id ID) {
- pending := <-c.incomingBox
- defer func() { c.incomingBox <- pending }()
- if entry, found := pending[id]; found && entry.cancel != nil {
- entry.cancel()
- entry.cancel = nil
+ var req *incomingRequest
+ c.updateInFlight(func(s *inFlightState) {
+ req = s.incomingByID[id]
+ })
+ if req != nil {
+ req.cancel()
}
}
// Wait blocks until the connection is fully closed, but does not close it.
func (c *Connection) Wait() error {
- return c.async.wait()
+ var err error
+ <-c.done
+ c.updateInFlight(func(s *inFlightState) {
+ err = s.closeErr
+ })
+ return err
}
-// Close can be used to close the underlying stream, and then wait for the connection to
-// fully shut down.
-// This does not cancel in flight requests, but waits for them to gracefully complete.
+// Close stops accepting new requests, waits for in-flight requests and enqueued
+// Handle calls to complete, and then closes the underlying stream.
+//
+// After the start of a Close, notification requests (that lack IDs and do not
+// receive responses) will continue to be passed to the Preempter, but calls
+// with IDs will receive immediate responses with ErrServerClosing, and no new
+// requests (not even notifications!) will be enqueued to the Handler.
func (c *Connection) Close() error {
- // close the underlying stream
- if err := c.closer.Close(); err != nil && !isClosingError(err) {
- return err
- }
- // and then wait for it to cause the connection to close
- if err := c.Wait(); err != nil && !isClosingError(err) {
- return err
- }
- return nil
+ // Stop handling new requests, and interrupt the reader (by closing the
+ // connection) as soon as the active requests finish.
+ c.updateInFlight(func(s *inFlightState) { s.connClosing = true })
+
+ return c.Wait()
}
// readIncoming collects inbound messages from the reader and delivers them, either responding
// to outgoing calls or feeding requests to the queue.
-func (c *Connection) readIncoming(ctx context.Context, reader Reader, toQueue chan<- *incoming) {
- defer close(toQueue)
+func (c *Connection) readIncoming(ctx context.Context, reader Reader, preempter Preempter) {
+ var err error
for {
- // get the next message
- // no lock is needed, this is the only reader
- msg, n, err := reader.Read(ctx)
+ var (
+ msg Message
+ n int64
+ )
+ msg, n, err = reader.Read(ctx)
if err != nil {
- // The stream failed, we cannot continue
- c.async.setError(err)
- return
+ break
}
+
switch msg := msg.(type) {
case *Request:
- entry := &incoming{
- request: msg,
- }
- // add a span to the context for this request
- labels := append(make([]label.Label, 0, 3), // make space for the id if present
- tag.Method.Of(msg.Method),
- tag.RPCDirection.Of(tag.Inbound),
- )
- if msg.IsCall() {
- labels = append(labels, tag.RPCID.Of(fmt.Sprintf("%q", msg.ID)))
- }
- entry.baseCtx, entry.done = event.Start(ctx, msg.Method, labels...)
- event.Metric(entry.baseCtx,
- tag.Started.Of(1),
- tag.ReceivedBytes.Of(n))
- // in theory notifications cannot be cancelled, but we build them a cancel context anyway
- entry.handleCtx, entry.cancel = context.WithCancel(entry.baseCtx)
- // if the request is a call, add it to the incoming map so it can be
- // cancelled by id
- if msg.IsCall() {
- pending := <-c.incomingBox
- pending[msg.ID] = entry
- c.incomingBox <- pending
- }
- // send the message to the incoming queue
- toQueue <- entry
+ c.acceptRequest(ctx, msg, n, preempter)
+
case *Response:
- // If method is not set, this should be a response, in which case we must
- // have an id to send the response back to the caller.
- c.incomingResponse(msg)
+ c.updateInFlight(func(s *inFlightState) {
+ if ac, ok := s.outgoingCalls[msg.ID]; ok {
+ delete(s.outgoingCalls, msg.ID)
+ ac.retire(msg)
+ } else {
+ // TODO: How should we report unexpected responses?
+ }
+ })
+
+ default:
+ c.internalErrorf("Read returned an unexpected message of type %T", msg)
}
}
+
+ c.updateInFlight(func(s *inFlightState) {
+ s.reading = false
+ s.readErr = err
+
+ // Retire any outgoing requests that were still in flight: with the Reader no
+ // longer being processed, they necessarily cannot receive a response.
+ for id, ac := range s.outgoingCalls {
+ ac.retire(&Response{ID: id, Error: err})
+ }
+ s.outgoingCalls = nil
+ })
}
-func (c *Connection) incomingResponse(msg *Response) {
- pending := <-c.outgoingBox
- response, ok := pending[msg.ID]
- if ok {
- delete(pending, msg.ID)
+// acceptRequest either handles msg synchronously or enqueues it to be handled
+// asynchronously.
+func (c *Connection) acceptRequest(ctx context.Context, msg *Request, msgBytes int64, preempter Preempter) {
+ // Add a span to the context for this request.
+ labels := append(make([]label.Label, 0, 3), // Make space for the ID if present.
+ tag.Method.Of(msg.Method),
+ tag.RPCDirection.Of(tag.Inbound),
+ )
+ if msg.IsCall() {
+ labels = append(labels, tag.RPCID.Of(fmt.Sprintf("%q", msg.ID)))
}
- c.outgoingBox <- pending
- if response != nil {
- response <- msg
+ ctx, endSpan := event.Start(ctx, msg.Method, labels...)
+ event.Metric(ctx,
+ tag.Started.Of(1),
+ tag.ReceivedBytes.Of(msgBytes))
+
+ // In theory notifications cannot be cancelled, but we build them a cancel
+ // context anyway.
+ ctx, cancel := context.WithCancel(ctx)
+ req := &incomingRequest{
+ Request: msg,
+ ctx: ctx,
+ cancel: cancel,
+ endSpan: endSpan,
}
-}
-// manageQueue reads incoming requests, attempts to process them with the preempter, or queue them
-// up for normal handling.
-func (c *Connection) manageQueue(ctx context.Context, preempter Preempter, fromRead <-chan *incoming, toDeliver chan<- *incoming) {
- defer close(toDeliver)
- q := []*incoming{}
- ok := true
- for {
- var nextReq *incoming
- if len(q) == 0 {
- // no messages in the queue
- // if we were closing, then we are done
- if !ok {
+ // If the request is a call, add it to the incoming map so it can be
+ // cancelled (or responded) by ID.
+ var err error
+ c.updateInFlight(func(s *inFlightState) {
+ s.incoming++
+
+ if req.IsCall() {
+ if s.incomingByID[req.ID] != nil {
+ err = fmt.Errorf("%w: request ID %v already in use", ErrInvalidRequest, req.ID)
+ req.ID = ID{} // Don't misattribute this error to the existing request.
return
}
- // not closing, but nothing in the queue, so just block waiting for a read
- nextReq, ok = <-fromRead
- } else {
- // we have a non empty queue, so pick whichever of reading or delivering
- // that we can make progress on
- select {
- case nextReq, ok = <-fromRead:
- case toDeliver <- q[0]:
- //TODO: this causes a lot of shuffling, should we use a growing ring buffer? compaction?
- q = q[1:]
+
+ if s.incomingByID == nil {
+ s.incomingByID = make(map[ID]*incomingRequest)
}
+ s.incomingByID[req.ID] = req
+
+ // When shutting down, reject all new Call requests, even if they could
+ // theoretically be handled by the preempter. The preempter could return
+ // ErrAsyncResponse, which would increase the amount of work in flight
+ // when we're trying to ensure that it strictly decreases.
+ err = s.shuttingDown(ErrServerClosing)
}
- if nextReq != nil {
- // TODO: should we allow to limit the queue size?
- var result interface{}
- rerr := nextReq.handleCtx.Err()
- if rerr == nil {
- // only preempt if not already cancelled
- result, rerr = preempter.Preempt(nextReq.handleCtx, nextReq.request)
- }
- switch {
- case rerr == ErrNotHandled:
- // message not handled, add it to the queue for the main handler
- q = append(q, nextReq)
- case rerr == ErrAsyncResponse:
- // message handled but the response will come later
- default:
- // anything else means the message is fully handled
- c.reply(nextReq, result, rerr)
- }
+ })
+ if err != nil {
+ c.processResult("acceptRequest", req, nil, err)
+ return
+ }
+
+ if preempter != nil {
+ result, err := preempter.Preempt(req.ctx, req.Request)
+
+ if req.IsCall() && errors.Is(err, ErrAsyncResponse) {
+ // This request will remain in flight until Respond is called for it.
+ return
+ }
+
+ if !errors.Is(err, ErrNotHandled) {
+ c.processResult("Preempt", req, result, err)
+ return
+ }
+ }
+
+ c.updateInFlight(func(s *inFlightState) {
+ // If the connection is shutting down, don't enqueue anything to the
+ // handler — not even notifications. That ensures that if the handler
+ // continues to make progress, it will eventually become idle and
+ // close the connection.
+ err = s.shuttingDown(ErrServerClosing)
+ if err != nil {
+ return
}
+
+ // We enqueue requests that have not been preempted to an unbounded slice.
+ // Unfortunately, we cannot in general limit the size of the handler
+ // queue: we have to read every response that comes in on the wire
+ // (because it may be responding to a request issued by, say, an
+ // asynchronous handler), and in order to get to that response we have
+ // to read all of the requests that came in ahead of it.
+ s.handlerQueue = append(s.handlerQueue, req)
+ if !s.handlerRunning {
+ // We start the handleAsync goroutine when it has work to do, and let it
+ // exit when the queue empties.
+ //
+ // Otherwise, in order to synchronize the handler we would need some other
+ // goroutine (probably readIncoming?) to explicitly wait for handleAsync
+ // to finish, and that would complicate error reporting: either the error
+ // report from the goroutine would be blocked on the handler emptying its
+ // queue (which was tried, and introduced a deadlock detected by
+ // TestCloseCallRace), or the error would need to be reported separately
+ // from synchronizing completion. Allowing the handler goroutine to exit
+ // when idle seems simpler than trying to implement either of those
+ // alternatives correctly.
+ s.handlerRunning = true
+ go c.handleAsync()
+ }
+ })
+ if err != nil {
+ c.processResult("acceptRequest", req, nil, err)
}
}
-func (c *Connection) deliverMessages(ctx context.Context, handler Handler, fromQueue <-chan *incoming) {
- defer c.async.done()
- for entry := range fromQueue {
- // cancel any messages in the queue that we have a pending cancel for
- var result interface{}
- rerr := entry.handleCtx.Err()
- if rerr == nil {
- // only deliver if not already cancelled
- result, rerr = handler.Handle(entry.handleCtx, entry.request)
+// handleAsync invokes the handler on the requests in the handler queue
+// sequentially until the queue is empty.
+func (c *Connection) handleAsync() {
+ for {
+ var req *incomingRequest
+ c.updateInFlight(func(s *inFlightState) {
+ if len(s.handlerQueue) > 0 {
+ req, s.handlerQueue = s.handlerQueue[0], s.handlerQueue[1:]
+ } else {
+ s.handlerRunning = false
+ }
+ })
+ if req == nil {
+ return
}
- switch {
- case rerr == ErrNotHandled:
- // message not handled, report it back to the caller as an error
- c.reply(entry, nil, errors.Errorf("%w: %q", ErrMethodNotFound, entry.request.Method))
- case rerr == ErrAsyncResponse:
- // message handled but the response will come later
- default:
- c.reply(entry, result, rerr)
+
+ // Only deliver to the Handler if not already canceled.
+ if err := req.ctx.Err(); err != nil {
+ c.updateInFlight(func(s *inFlightState) {
+ if s.writeErr != nil {
+ // Assume that req.ctx was canceled due to s.writeErr.
+ // TODO(#51365): use a Context API to plumb this through req.ctx.
+ err = fmt.Errorf("%w: %v", ErrServerClosing, s.writeErr)
+ }
+ })
+ c.processResult("handleAsync", req, nil, err)
+ continue
}
+
+ result, err := c.handler.Handle(req.ctx, req.Request)
+ c.processResult(c.handler, req, result, err)
}
}
-// reply is used to reply to an incoming request that has just been handled
-func (c *Connection) reply(entry *incoming, result interface{}, rerr error) {
- if entry.request.IsCall() {
- // we have a call finishing, remove it from the incoming map
- pending := <-c.incomingBox
- defer func() { c.incomingBox <- pending }()
- delete(pending, entry.request.ID)
+// processResult processes the result of a request and, if appropriate, sends a response.
+func (c *Connection) processResult(from interface{}, req *incomingRequest, result interface{}, err error) error {
+ switch err {
+ case ErrAsyncResponse:
+ if !req.IsCall() {
+ return c.internalErrorf("%#v returned ErrAsyncResponse for a %q Request without an ID", from, req.Method)
+ }
+ return nil // This request is still in flight, so don't record the result yet.
+ case ErrNotHandled, ErrMethodNotFound:
+ // Add detail describing the unhandled method.
+ err = fmt.Errorf("%w: %q", ErrMethodNotFound, req.Method)
}
- if err := c.respond(entry, result, rerr); err != nil {
- // no way to propagate this error
- //TODO: should we do more than just log it?
- event.Error(entry.baseCtx, "jsonrpc2 message delivery failed", err)
+
+ if req.endSpan == nil {
+ return c.internalErrorf("%#v produced a duplicate %q Response", from, req.Method)
}
-}
-// respond sends a response.
-// This is the code shared between reply and SendResponse.
-func (c *Connection) respond(entry *incoming, result interface{}, rerr error) error {
- var err error
- if entry.request.IsCall() {
- // send the response
- if result == nil && rerr == nil {
- // call with no response, send an error anyway
- rerr = errors.Errorf("%w: %q produced no response", ErrInternal, entry.request.Method)
+ if result != nil && err != nil {
+ c.internalErrorf("%#v returned a non-nil result with a non-nil error for %s:\n%v\n%#v", from, req.Method, err, result)
+ result = nil // Discard the spurious result and respond with err.
+ }
+
+ if req.IsCall() {
+ if result == nil && err == nil {
+ err = c.internalErrorf("%#v returned a nil result and nil error for a %q Request that requires a Response", from, req.Method)
}
- var response *Response
- response, err = NewResponse(entry.request.ID, result, rerr)
- if err == nil {
- // we write the response with the base context, in case the message was cancelled
- err = c.write(entry.baseCtx, response)
+
+ response, respErr := NewResponse(req.ID, result, err)
+
+ // The caller could theoretically reuse the request's ID as soon as we've
+ // sent the response, so ensure that it is removed from the incoming map
+ // before sending.
+ c.updateInFlight(func(s *inFlightState) {
+ delete(s.incomingByID, req.ID)
+ })
+ if respErr == nil {
+ writeErr := c.write(notDone{req.ctx}, response)
+ if err == nil {
+ err = writeErr
+ }
+ } else {
+ err = c.internalErrorf("%#v returned a malformed result for %q: %w", from, req.Method, respErr)
}
- } else {
- switch {
- case rerr != nil:
- // notification failed
- err = errors.Errorf("%w: %q notification failed: %v", ErrInternal, entry.request.Method, rerr)
- rerr = nil
- case result != nil:
- //notification produced a response, which is an error
- err = errors.Errorf("%w: %q produced unwanted response", ErrInternal, entry.request.Method)
- default:
- // normal notification finish
+ } else { // req is a notification
+ if result != nil {
+ err = c.internalErrorf("%#v returned a non-nil result for a %q Request without an ID", from, req.Method)
+ } else if err != nil {
+ err = fmt.Errorf("%w: %q notification failed: %v", ErrInternal, req.Method, err)
+ }
+ if err != nil {
+ // TODO: can/should we do anything with this error beyond writing it to the event log?
+ // (Is this the right label to attach to the log?)
+ event.Label(req.ctx, keys.Err.Of(err))
}
}
- switch {
- case rerr != nil || err != nil:
- event.Label(entry.baseCtx, tag.StatusCode.Of("ERROR"))
- default:
- event.Label(entry.baseCtx, tag.StatusCode.Of("OK"))
- }
- // and just to be clean, invoke and clear the cancel if needed
- if entry.cancel != nil {
- entry.cancel()
- entry.cancel = nil
- }
- // mark the entire request processing as done
- entry.done()
- return err
+
+ labelStatus(req.ctx, err)
+
+ // Cancel the request and finalize the event span to free any associated resources.
+ req.cancel()
+ req.endSpan()
+ req.endSpan = nil
+ c.updateInFlight(func(s *inFlightState) {
+ if s.incoming == 0 {
+ panic("jsonrpc2_v2: processResult called when incoming count is already zero")
+ }
+ s.incoming--
+ })
+ return nil
}
// write is used by all things that write outgoing messages, including replies.
// it makes sure that writes are atomic
func (c *Connection) write(ctx context.Context, msg Message) error {
- writer := <-c.writerBox
- defer func() { c.writerBox <- writer }()
+ writer := <-c.writer
+ defer func() { c.writer <- writer }()
n, err := writer.Write(ctx, msg)
event.Metric(ctx, tag.SentBytes.Of(n))
+
+ if err != nil && ctx.Err() == nil {
+ // The call to Write failed, and since ctx.Err() is nil we can't attribute
+ // the failure (even indirectly) to Context cancellation. The writer appears
+ // to be broken, and future writes are likely to also fail.
+ //
+ // If the read side of the connection is also broken, we might not even be
+ // able to receive cancellation notifications. Since we can't reliably write
+ // the results of incoming calls and can't receive explicit cancellations,
+ // cancel the calls now.
+ c.updateInFlight(func(s *inFlightState) {
+ if s.writeErr == nil {
+ s.writeErr = err
+ for _, r := range s.incomingByID {
+ r.cancel()
+ }
+ }
+ })
+ }
+
return err
}
+
+// internalErrorf reports an internal error. By default it panics, but if
+// c.onInternalError is non-nil it instead calls that and returns an error
+// wrapping ErrInternal.
+func (c *Connection) internalErrorf(format string, args ...interface{}) error {
+ err := fmt.Errorf(format, args...)
+ if c.onInternalError == nil {
+ panic("jsonrpc2: " + err.Error())
+ }
+ c.onInternalError(err)
+
+ return fmt.Errorf("%w: %v", ErrInternal, err)
+}
+
+// labelStatus labels the status of the event in ctx based on whether err is nil.
+func labelStatus(ctx context.Context, err error) {
+ if err == nil {
+ event.Label(ctx, tag.StatusCode.Of("OK"))
+ } else {
+ event.Label(ctx, tag.StatusCode.Of("ERROR"))
+ }
+}
+
+// notDone is a context.Context wrapper that returns a nil Done channel.
+type notDone struct{ ctx context.Context }
+
+func (ic notDone) Value(key interface{}) interface{} {
+ return ic.ctx.Value(key)
+}
+
+func (notDone) Done() <-chan struct{} { return nil }
+func (notDone) Err() error { return nil }
+func (notDone) Deadline() (time.Time, bool) { return time.Time{}, false }
diff --git a/internal/jsonrpc2_v2/frame.go b/internal/jsonrpc2_v2/frame.go
index 634717c73..e42483281 100644
--- a/internal/jsonrpc2_v2/frame.go
+++ b/internal/jsonrpc2_v2/frame.go
@@ -12,8 +12,6 @@ import (
"io"
"strconv"
"strings"
-
- errors "golang.org/x/xerrors"
)
// Reader abstracts the transport mechanics from the JSON RPC protocol.
@@ -87,7 +85,7 @@ func (w *rawWriter) Write(ctx context.Context, msg Message) (int64, error) {
}
data, err := EncodeMessage(msg)
if err != nil {
- return 0, errors.Errorf("marshaling message: %v", err)
+ return 0, fmt.Errorf("marshaling message: %v", err)
}
n, err := w.out.Write(data)
return int64(n), err
@@ -122,7 +120,13 @@ func (r *headerReader) Read(ctx context.Context) (Message, int64, error) {
line, err := r.in.ReadString('\n')
total += int64(len(line))
if err != nil {
- return nil, total, errors.Errorf("failed reading header line: %w", err)
+ if err == io.EOF {
+ if total == 0 {
+ return nil, 0, io.EOF
+ }
+ err = io.ErrUnexpectedEOF
+ }
+ return nil, total, fmt.Errorf("failed reading header line: %w", err)
}
line = strings.TrimSpace(line)
// check we have a header line
@@ -131,23 +135,23 @@ func (r *headerReader) Read(ctx context.Context) (Message, int64, error) {
}
colon := strings.IndexRune(line, ':')
if colon < 0 {
- return nil, total, errors.Errorf("invalid header line %q", line)
+ return nil, total, fmt.Errorf("invalid header line %q", line)
}
name, value := line[:colon], strings.TrimSpace(line[colon+1:])
switch name {
case "Content-Length":
if length, err = strconv.ParseInt(value, 10, 32); err != nil {
- return nil, total, errors.Errorf("failed parsing Content-Length: %v", value)
+ return nil, total, fmt.Errorf("failed parsing Content-Length: %v", value)
}
if length <= 0 {
- return nil, total, errors.Errorf("invalid Content-Length: %v", length)
+ return nil, total, fmt.Errorf("invalid Content-Length: %v", length)
}
default:
// ignoring unknown headers
}
}
if length == 0 {
- return nil, total, errors.Errorf("missing Content-Length header")
+ return nil, total, fmt.Errorf("missing Content-Length header")
}
data := make([]byte, length)
n, err := io.ReadFull(r.in, data)
@@ -167,7 +171,7 @@ func (w *headerWriter) Write(ctx context.Context, msg Message) (int64, error) {
}
data, err := EncodeMessage(msg)
if err != nil {
- return 0, errors.Errorf("marshaling message: %v", err)
+ return 0, fmt.Errorf("marshaling message: %v", err)
}
n, err := fmt.Fprintf(w.out, "Content-Length: %v\r\n\r\n", len(data))
total := int64(n)
diff --git a/internal/jsonrpc2_v2/jsonrpc2.go b/internal/jsonrpc2_v2/jsonrpc2.go
index e68558442..e9164b0bc 100644
--- a/internal/jsonrpc2_v2/jsonrpc2.go
+++ b/internal/jsonrpc2_v2/jsonrpc2.go
@@ -47,6 +47,15 @@ type Preempter interface {
Preempt(ctx context.Context, req *Request) (result interface{}, err error)
}
+// A PreempterFunc implements the Preempter interface for a standalone Preempt function.
+type PreempterFunc func(ctx context.Context, req *Request) (interface{}, error)
+
+func (f PreempterFunc) Preempt(ctx context.Context, req *Request) (interface{}, error) {
+ return f(ctx, req)
+}
+
+var _ Preempter = PreempterFunc(nil)
+
// Handler handles messages on a connection.
type Handler interface {
// Handle is invoked sequentially for each incoming request that has not
@@ -75,12 +84,15 @@ func (defaultHandler) Handle(context.Context, *Request) (interface{}, error) {
return nil, ErrNotHandled
}
+// A HandlerFunc implements the Handler interface for a standalone Handle function.
type HandlerFunc func(ctx context.Context, req *Request) (interface{}, error)
func (f HandlerFunc) Handle(ctx context.Context, req *Request) (interface{}, error) {
return f(ctx, req)
}
+var _ Handler = HandlerFunc(nil)
+
// async is a small helper for operations with an asynchronous result that you
// can wait for.
type async struct {
diff --git a/internal/jsonrpc2_v2/jsonrpc2_test.go b/internal/jsonrpc2_v2/jsonrpc2_test.go
index 4f4b7d9b9..dd8d09c88 100644
--- a/internal/jsonrpc2_v2/jsonrpc2_test.go
+++ b/internal/jsonrpc2_v2/jsonrpc2_test.go
@@ -11,12 +11,10 @@ import (
"path"
"reflect"
"testing"
- "time"
"golang.org/x/tools/internal/event/export/eventtest"
jsonrpc2 "golang.org/x/tools/internal/jsonrpc2_v2"
"golang.org/x/tools/internal/stack/stacktest"
- errors "golang.org/x/xerrors"
)
var callTests = []invoker{
@@ -78,7 +76,7 @@ type binder struct {
type handler struct {
conn *jsonrpc2.Connection
accumulator int
- waitersBox chan map[string]chan struct{}
+ waiters chan map[string]chan struct{}
calls map[string]*jsonrpc2.AsyncCall
}
@@ -138,10 +136,7 @@ func testConnection(t *testing.T, framer jsonrpc2.Framer) {
if err != nil {
t.Fatal(err)
}
- server, err := jsonrpc2.Serve(ctx, listener, binder{framer, nil})
- if err != nil {
- t.Fatal(err)
- }
+ server := jsonrpc2.NewServer(ctx, listener, binder{framer, nil})
defer func() {
listener.Close()
server.Wait()
@@ -255,13 +250,13 @@ func verifyResults(t *testing.T, method string, results interface{}, expect inte
}
}
-func (b binder) Bind(ctx context.Context, conn *jsonrpc2.Connection) (jsonrpc2.ConnectionOptions, error) {
+func (b binder) Bind(ctx context.Context, conn *jsonrpc2.Connection) jsonrpc2.ConnectionOptions {
h := &handler{
- conn: conn,
- waitersBox: make(chan map[string]chan struct{}, 1),
- calls: make(map[string]*jsonrpc2.AsyncCall),
+ conn: conn,
+ waiters: make(chan map[string]chan struct{}, 1),
+ calls: make(map[string]*jsonrpc2.AsyncCall),
}
- h.waitersBox <- make(map[string]chan struct{})
+ h.waiters <- make(map[string]chan struct{})
if b.runTest != nil {
go b.runTest(h)
}
@@ -269,12 +264,12 @@ func (b binder) Bind(ctx context.Context, conn *jsonrpc2.Connection) (jsonrpc2.C
Framer: b.framer,
Preempter: h,
Handler: h,
- }, nil
+ }
}
func (h *handler) waiter(name string) chan struct{} {
- waiters := <-h.waitersBox
- defer func() { h.waitersBox <- waiters }()
+ waiters := <-h.waiters
+ defer func() { h.waiters <- waiters }()
waiter, found := waiters[name]
if !found {
waiter = make(chan struct{})
@@ -288,19 +283,19 @@ func (h *handler) Preempt(ctx context.Context, req *jsonrpc2.Request) (interface
case "unblock":
var name string
if err := json.Unmarshal(req.Params, &name); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
close(h.waiter(name))
return nil, nil
case "peek":
if len(req.Params) > 0 {
- return nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)
+ return nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)
}
return h.accumulator, nil
case "cancel":
var params cancelParams
if err := json.Unmarshal(req.Params, &params); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
h.conn.Cancel(jsonrpc2.Int64ID(params.ID))
return nil, nil
@@ -313,50 +308,50 @@ func (h *handler) Handle(ctx context.Context, req *jsonrpc2.Request) (interface{
switch req.Method {
case "no_args":
if len(req.Params) > 0 {
- return nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)
+ return nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)
}
return true, nil
case "one_string":
var v string
if err := json.Unmarshal(req.Params, &v); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
return "got:" + v, nil
case "one_number":
var v int
if err := json.Unmarshal(req.Params, &v); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
return fmt.Sprintf("got:%d", v), nil
case "set":
var v int
if err := json.Unmarshal(req.Params, &v); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
h.accumulator = v
return nil, nil
case "add":
var v int
if err := json.Unmarshal(req.Params, &v); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
h.accumulator += v
return nil, nil
case "get":
if len(req.Params) > 0 {
- return nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)
+ return nil, fmt.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams)
}
return h.accumulator, nil
case "join":
var v []string
if err := json.Unmarshal(req.Params, &v); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
return path.Join(v...), nil
case "echo":
var v []interface{}
if err := json.Unmarshal(req.Params, &v); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
var result interface{}
err := h.conn.Call(ctx, v[0].(string), v[1]).Await(ctx, &result)
@@ -364,20 +359,18 @@ func (h *handler) Handle(ctx context.Context, req *jsonrpc2.Request) (interface{
case "wait":
var name string
if err := json.Unmarshal(req.Params, &name); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
select {
case <-h.waiter(name):
return true, nil
case <-ctx.Done():
return nil, ctx.Err()
- case <-time.After(time.Second):
- return nil, errors.Errorf("wait for %q timed out", name)
}
case "fork":
var name string
if err := json.Unmarshal(req.Params, &name); err != nil {
- return nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err)
+ return nil, fmt.Errorf("%w: %s", jsonrpc2.ErrParse, err)
}
waitFor := h.waiter(name)
go func() {
@@ -386,8 +379,6 @@ func (h *handler) Handle(ctx context.Context, req *jsonrpc2.Request) (interface{
h.conn.Respond(req.ID, true, nil)
case <-ctx.Done():
h.conn.Respond(req.ID, nil, ctx.Err())
- case <-time.After(time.Second):
- h.conn.Respond(req.ID, nil, errors.Errorf("wait for %q timed out", name))
}
}()
return nil, jsonrpc2.ErrAsyncResponse
diff --git a/internal/jsonrpc2_v2/messages.go b/internal/jsonrpc2_v2/messages.go
index 652ac817a..af145641d 100644
--- a/internal/jsonrpc2_v2/messages.go
+++ b/internal/jsonrpc2_v2/messages.go
@@ -6,8 +6,8 @@ package jsonrpc2
import (
"encoding/json"
-
- errors "golang.org/x/xerrors"
+ "errors"
+ "fmt"
)
// ID is a Request identifier.
@@ -120,7 +120,7 @@ func EncodeMessage(msg Message) ([]byte, error) {
msg.marshal(&wire)
data, err := json.Marshal(&wire)
if err != nil {
- return data, errors.Errorf("marshaling jsonrpc message: %w", err)
+ return data, fmt.Errorf("marshaling jsonrpc message: %w", err)
}
return data, nil
}
@@ -128,10 +128,10 @@ func EncodeMessage(msg Message) ([]byte, error) {
func DecodeMessage(data []byte) (Message, error) {
msg := wireCombined{}
if err := json.Unmarshal(data, &msg); err != nil {
- return nil, errors.Errorf("unmarshaling jsonrpc message: %w", err)
+ return nil, fmt.Errorf("unmarshaling jsonrpc message: %w", err)
}
if msg.VersionTag != wireVersion {
- return nil, errors.Errorf("invalid message version tag %s expected %s", msg.VersionTag, wireVersion)
+ return nil, fmt.Errorf("invalid message version tag %s expected %s", msg.VersionTag, wireVersion)
}
id := ID{}
switch v := msg.ID.(type) {
@@ -144,7 +144,7 @@ func DecodeMessage(data []byte) (Message, error) {
case string:
id = StringID(v)
default:
- return nil, errors.Errorf("invalid message id type <%T>%v", v, v)
+ return nil, fmt.Errorf("invalid message id type <%T>%v", v, v)
}
if msg.Method != "" {
// has a method, must be a call
diff --git a/internal/jsonrpc2_v2/net.go b/internal/jsonrpc2_v2/net.go
index 4f2082599..15d0aea3a 100644
--- a/internal/jsonrpc2_v2/net.go
+++ b/internal/jsonrpc2_v2/net.go
@@ -9,7 +9,6 @@ import (
"io"
"net"
"os"
- "time"
)
// This file contains implementations of the transport primitives that use the standard network
@@ -36,7 +35,7 @@ type netListener struct {
}
// Accept blocks waiting for an incoming connection to the listener.
-func (l *netListener) Accept(ctx context.Context) (io.ReadWriteCloser, error) {
+func (l *netListener) Accept(context.Context) (io.ReadWriteCloser, error) {
return l.net.Accept()
}
@@ -56,9 +55,7 @@ func (l *netListener) Close() error {
// Dialer returns a dialer that can be used to connect to the listener.
func (l *netListener) Dialer() Dialer {
- return NetDialer(l.net.Addr().Network(), l.net.Addr().String(), net.Dialer{
- Timeout: 5 * time.Second,
- })
+ return NetDialer(l.net.Addr().Network(), l.net.Addr().String(), net.Dialer{})
}
// NetDialer returns a Dialer using the supplied standard network dialer.
@@ -81,7 +78,7 @@ func (n *netDialer) Dial(ctx context.Context) (io.ReadWriteCloser, error) {
}
// NetPipeListener returns a new Listener that listens using net.Pipe.
-// It is only possibly to connect to it using the Dialier returned by the
+// It is only possibly to connect to it using the Dialer returned by the
// Dialer method, each call to that method will generate a new pipe the other
// side of which will be returned from the Accept call.
func NetPipeListener(ctx context.Context) (Listener, error) {
@@ -98,15 +95,19 @@ type netPiper struct {
}
// Accept blocks waiting for an incoming connection to the listener.
-func (l *netPiper) Accept(ctx context.Context) (io.ReadWriteCloser, error) {
- // block until we have a listener, or are closed or cancelled
+func (l *netPiper) Accept(context.Context) (io.ReadWriteCloser, error) {
+ // Block until the pipe is dialed or the listener is closed,
+ // preferring the latter if already closed at the start of Accept.
+ select {
+ case <-l.done:
+ return nil, errClosed
+ default:
+ }
select {
case rwc := <-l.dialed:
return rwc, nil
case <-l.done:
- return nil, io.EOF
- case <-ctx.Done():
- return nil, ctx.Err()
+ return nil, errClosed
}
}
@@ -124,6 +125,14 @@ func (l *netPiper) Dialer() Dialer {
func (l *netPiper) Dial(ctx context.Context) (io.ReadWriteCloser, error) {
client, server := net.Pipe()
- l.dialed <- server
- return client, nil
+
+ select {
+ case l.dialed <- server:
+ return client, nil
+
+ case <-l.done:
+ client.Close()
+ server.Close()
+ return nil, errClosed
+ }
}
diff --git a/internal/jsonrpc2_v2/serve.go b/internal/jsonrpc2_v2/serve.go
index fb3516635..5e0827354 100644
--- a/internal/jsonrpc2_v2/serve.go
+++ b/internal/jsonrpc2_v2/serve.go
@@ -6,14 +6,12 @@ package jsonrpc2
import (
"context"
+ "fmt"
"io"
"runtime"
- "strings"
"sync"
- "syscall"
+ "sync/atomic"
"time"
-
- errors "golang.org/x/xerrors"
)
// Listener is implemented by protocols to accept new inbound connections.
@@ -43,35 +41,43 @@ type Server struct {
listener Listener
binder Binder
async *async
+
+ shutdownOnce sync.Once
+ closing int32 // atomic: set to nonzero when Shutdown is called
}
// Dial uses the dialer to make a new connection, wraps the returned
// reader and writer using the framer to make a stream, and then builds
// a connection on top of that stream using the binder.
+//
+// The returned Connection will operate independently using the Preempter and/or
+// Handler provided by the Binder, and will release its own resources when the
+// connection is broken, but the caller may Close it earlier to stop accepting
+// (or sending) new requests.
func Dial(ctx context.Context, dialer Dialer, binder Binder) (*Connection, error) {
// dial a server
rwc, err := dialer.Dial(ctx)
if err != nil {
return nil, err
}
- return newConnection(ctx, rwc, binder)
+ return newConnection(ctx, rwc, binder, nil), nil
}
-// Serve starts a new server listening for incoming connections and returns
+// NewServer starts a new server listening for incoming connections and returns
// it.
// This returns a fully running and connected server, it does not block on
// the listener.
// You can call Wait to block on the server, or Shutdown to get the sever to
// terminate gracefully.
// To notice incoming connections, use an intercepting Binder.
-func Serve(ctx context.Context, listener Listener, binder Binder) (*Server, error) {
+func NewServer(ctx context.Context, listener Listener, binder Binder) *Server {
server := &Server{
listener: listener,
binder: binder,
async: newAsync(),
}
go server.run(ctx)
- return server, nil
+ return server
}
// Wait returns only when the server has shut down.
@@ -79,173 +85,160 @@ func (s *Server) Wait() error {
return s.async.wait()
}
+// Shutdown informs the server to stop accepting new connections.
+func (s *Server) Shutdown() {
+ s.shutdownOnce.Do(func() {
+ atomic.StoreInt32(&s.closing, 1)
+ s.listener.Close()
+ })
+}
+
// run accepts incoming connections from the listener,
// If IdleTimeout is non-zero, run exits after there are no clients for this
// duration, otherwise it exits only on error.
func (s *Server) run(ctx context.Context) {
defer s.async.done()
- var activeConns []*Connection
+
+ var activeConns sync.WaitGroup
for {
- // we never close the accepted connection, we rely on the other end
- // closing or the socket closing itself naturally
rwc, err := s.listener.Accept(ctx)
if err != nil {
- if !isClosingError(err) {
+ // Only Shutdown closes the listener. If we get an error after Shutdown is
+ // called, assume that that was the cause and don't report the error;
+ // otherwise, report the error in case it is unexpected.
+ if atomic.LoadInt32(&s.closing) == 0 {
s.async.setError(err)
}
- // we are done generating new connections for good
+ // We are done generating new connections for good.
break
}
- // see if any connections were closed while we were waiting
- activeConns = onlyActive(activeConns)
-
- // a new inbound connection,
- conn, err := newConnection(ctx, rwc, s.binder)
- if err != nil {
- if !isClosingError(err) {
- s.async.setError(err)
- }
- continue
- }
- activeConns = append(activeConns, conn)
- }
-
- // wait for all active conns to finish
- for _, c := range activeConns {
- c.Wait()
+ // A new inbound connection.
+ activeConns.Add(1)
+ _ = newConnection(ctx, rwc, s.binder, activeConns.Done) // unregisters itself when done
}
+ activeConns.Wait()
}
-func onlyActive(conns []*Connection) []*Connection {
- i := 0
- for _, c := range conns {
- if !c.async.isDone() {
- conns[i] = c
- i++
- }
+// NewIdleListener wraps a listener with an idle timeout.
+//
+// When there are no active connections for at least the timeout duration,
+// calls to Accept will fail with ErrIdleTimeout.
+//
+// A connection is considered inactive as soon as its Close method is called.
+func NewIdleListener(timeout time.Duration, wrap Listener) Listener {
+ l := &idleListener{
+ wrapped: wrap,
+ timeout: timeout,
+ active: make(chan int, 1),
+ timedOut: make(chan struct{}),
+ idleTimer: make(chan *time.Timer, 1),
}
- // trim the slice down
- return conns[:i]
+ l.idleTimer <- time.AfterFunc(l.timeout, l.timerExpired)
+ return l
}
-// isClosingError reports if the error occurs normally during the process of
-// closing a network connection. It uses imperfect heuristics that err on the
-// side of false negatives, and should not be used for anything critical.
-func isClosingError(err error) bool {
- if err == nil {
- return false
- }
- // Fully unwrap the error, so the following tests work.
- for wrapped := err; wrapped != nil; wrapped = errors.Unwrap(err) {
- err = wrapped
- }
-
- // Was it based on an EOF error?
- if err == io.EOF {
- return true
- }
+type idleListener struct {
+ wrapped Listener
+ timeout time.Duration
- // Was it based on a closed pipe?
- if err == io.ErrClosedPipe {
- return true
- }
+ // Only one of these channels is receivable at any given time.
+ active chan int // count of active connections; closed when Close is called if not timed out
+ timedOut chan struct{} // closed when the idle timer expires
+ idleTimer chan *time.Timer // holds the timer only when idle
+}
- // Per https://github.com/golang/go/issues/4373, this error string should not
- // change. This is not ideal, but since the worst that could happen here is
- // some superfluous logging, it is acceptable.
- if err.Error() == "use of closed network connection" {
- return true
- }
+// Accept accepts an incoming connection.
+//
+// If an incoming connection is accepted concurrent to the listener being closed
+// due to idleness, the new connection is immediately closed.
+func (l *idleListener) Accept(ctx context.Context) (io.ReadWriteCloser, error) {
+ rwc, err := l.wrapped.Accept(ctx)
- if runtime.GOOS == "plan9" {
- // Error reading from a closed connection.
- if err == syscall.EINVAL {
- return true
+ select {
+ case n, ok := <-l.active:
+ if err != nil {
+ if ok {
+ l.active <- n
+ }
+ return nil, err
}
- // Error trying to accept a new connection from a closed listener.
- if strings.HasSuffix(err.Error(), " listen hungup") {
- return true
+ if ok {
+ l.active <- n + 1
+ } else {
+ // l.wrapped.Close Close has been called, but Accept returned a
+ // connection. This race can occur with concurrent Accept and Close calls
+ // with any net.Listener, and it is benign: since the listener was closed
+ // explicitly, it can't have also timed out.
}
- }
- return false
-}
+ return l.newConn(rwc), nil
-// NewIdleListener wraps a listener with an idle timeout.
-// When there are no active connections for at least the timeout duration a
-// call to accept will fail with ErrIdleTimeout.
-func NewIdleListener(timeout time.Duration, wrap Listener) Listener {
- l := &idleListener{
- timeout: timeout,
- wrapped: wrap,
- newConns: make(chan *idleCloser),
- closed: make(chan struct{}),
- wasTimeout: make(chan struct{}),
- }
- go l.run()
- return l
-}
+ case <-l.timedOut:
+ if err == nil {
+ // Keeping the connection open would leave the listener simultaneously
+ // active and closed due to idleness, which would be contradictory and
+ // confusing. Close the connection and pretend that it never happened.
+ rwc.Close()
+ } else {
+ // In theory the timeout could have raced with an unrelated error return
+ // from Accept. However, ErrIdleTimeout is arguably still valid (since we
+ // would have closed due to the timeout independent of the error), and the
+ // harm from returning a spurious ErrIdleTimeout is negligible anyway.
+ }
+ return nil, ErrIdleTimeout
-type idleListener struct {
- wrapped Listener
- timeout time.Duration
- newConns chan *idleCloser
- closed chan struct{}
- wasTimeout chan struct{}
- closeOnce sync.Once
-}
+ case timer := <-l.idleTimer:
+ if err != nil {
+ // The idle timer doesn't run until it receives itself from the idleTimer
+ // channel, so it can't have called l.wrapped.Close yet and thus err can't
+ // be ErrIdleTimeout. Leave the idle timer as it was and return whatever
+ // error we got.
+ l.idleTimer <- timer
+ return nil, err
+ }
-type idleCloser struct {
- wrapped io.ReadWriteCloser
- closed chan struct{}
- closeOnce sync.Once
-}
+ if !timer.Stop() {
+ // Failed to stop the timer — the timer goroutine is in the process of
+ // firing. Send the timer back to the timer goroutine so that it can
+ // safely close the timedOut channel, and then wait for the listener to
+ // actually be closed before we return ErrIdleTimeout.
+ l.idleTimer <- timer
+ rwc.Close()
+ <-l.timedOut
+ return nil, ErrIdleTimeout
+ }
-func (c *idleCloser) Read(p []byte) (int, error) {
- n, err := c.wrapped.Read(p)
- if err != nil && isClosingError(err) {
- c.closeOnce.Do(func() { close(c.closed) })
+ l.active <- 1
+ return l.newConn(rwc), nil
}
- return n, err
}
-func (c *idleCloser) Write(p []byte) (int, error) {
- // we do not close on write failure, we rely on the wrapped writer to do that
- // if it is appropriate, which we will detect in the next read.
- return c.wrapped.Write(p)
-}
+func (l *idleListener) Close() error {
+ select {
+ case _, ok := <-l.active:
+ if ok {
+ close(l.active)
+ }
-func (c *idleCloser) Close() error {
- // we rely on closing the wrapped stream to signal to the next read that we
- // are closed, rather than triggering the closed signal directly
- return c.wrapped.Close()
-}
+ case <-l.timedOut:
+ // Already closed by the timer; take care not to double-close if the caller
+ // only explicitly invokes this Close method once, since the io.Closer
+ // interface explicitly leaves doubled Close calls undefined.
+ return ErrIdleTimeout
-func (l *idleListener) Accept(ctx context.Context) (io.ReadWriteCloser, error) {
- rwc, err := l.wrapped.Accept(ctx)
- if err != nil {
- if isClosingError(err) {
- // underlying listener was closed
- l.closeOnce.Do(func() { close(l.closed) })
- // was it closed because of the idle timeout?
- select {
- case <-l.wasTimeout:
- err = ErrIdleTimeout
- default:
- }
+ case timer := <-l.idleTimer:
+ if !timer.Stop() {
+ // Couldn't stop the timer. It shouldn't take long to run, so just wait
+ // (so that the Listener is guaranteed to be closed before we return)
+ // and pretend that this call happened afterward.
+ // That way we won't leak any timers or goroutines when Close returns.
+ l.idleTimer <- timer
+ <-l.timedOut
+ return ErrIdleTimeout
}
- return nil, err
+ close(l.active)
}
- conn := &idleCloser{
- wrapped: rwc,
- closed: make(chan struct{}),
- }
- l.newConns <- conn
- return conn, err
-}
-func (l *idleListener) Close() error {
- defer l.closeOnce.Do(func() { close(l.closed) })
return l.wrapped.Close()
}
@@ -253,31 +246,83 @@ func (l *idleListener) Dialer() Dialer {
return l.wrapped.Dialer()
}
-func (l *idleListener) run() {
- var conns []*idleCloser
- for {
- var firstClosed chan struct{} // left at nil if there are no active conns
- var timeout <-chan time.Time // left at nil if there are active conns
- if len(conns) > 0 {
- firstClosed = conns[0].closed
+func (l *idleListener) timerExpired() {
+ select {
+ case n, ok := <-l.active:
+ if ok {
+ panic(fmt.Sprintf("jsonrpc2: idleListener idle timer fired with %d connections still active", n))
} else {
- timeout = time.After(l.timeout)
+ panic("jsonrpc2: Close finished with idle timer still running")
}
- select {
- case <-l.closed:
- // the main listener closed, no need to keep going
+
+ case <-l.timedOut:
+ panic("jsonrpc2: idleListener idle timer fired more than once")
+
+ case <-l.idleTimer:
+ // The timer for this very call!
+ }
+
+ // Close the Listener with all channels still blocked to ensure that this call
+ // to l.wrapped.Close doesn't race with the one in l.Close.
+ defer close(l.timedOut)
+ l.wrapped.Close()
+}
+
+func (l *idleListener) connClosed() {
+ select {
+ case n, ok := <-l.active:
+ if !ok {
+ // l is already closed, so it can't close due to idleness,
+ // and we don't need to track the number of active connections any more.
return
- case conn := <-l.newConns:
- // a new conn arrived, add it to the list
- conns = append(conns, conn)
- case <-timeout:
- // we timed out, only happens when there are no active conns
- // close the underlying listener, and allow the normal closing process to happen
- close(l.wasTimeout)
- l.wrapped.Close()
- case <-firstClosed:
- // a conn closed, remove it from the active list
- conns = conns[:copy(conns, conns[1:])]
}
+ n--
+ if n == 0 {
+ l.idleTimer <- time.AfterFunc(l.timeout, l.timerExpired)
+ } else {
+ l.active <- n
+ }
+
+ case <-l.timedOut:
+ panic("jsonrpc2: idleListener idle timer fired before last active connection was closed")
+
+ case <-l.idleTimer:
+ panic("jsonrpc2: idleListener idle timer active before last active connection was closed")
}
}
+
+type idleListenerConn struct {
+ wrapped io.ReadWriteCloser
+ l *idleListener
+ closeOnce sync.Once
+}
+
+func (l *idleListener) newConn(rwc io.ReadWriteCloser) *idleListenerConn {
+ c := &idleListenerConn{
+ wrapped: rwc,
+ l: l,
+ }
+
+ // A caller that forgets to call Close may disrupt the idleListener's
+ // accounting, even though the file descriptor for the underlying connection
+ // may eventually be garbage-collected anyway.
+ //
+ // Set a (best-effort) finalizer to verify that a Close call always occurs.
+ // (We will clear the finalizer explicitly in Close.)
+ runtime.SetFinalizer(c, func(c *idleListenerConn) {
+ panic("jsonrpc2: IdleListener connection became unreachable without a call to Close")
+ })
+
+ return c
+}
+
+func (c *idleListenerConn) Read(p []byte) (int, error) { return c.wrapped.Read(p) }
+func (c *idleListenerConn) Write(p []byte) (int, error) { return c.wrapped.Write(p) }
+
+func (c *idleListenerConn) Close() error {
+ defer c.closeOnce.Do(func() {
+ c.l.connClosed()
+ runtime.SetFinalizer(c, nil)
+ })
+ return c.wrapped.Close()
+}
diff --git a/internal/jsonrpc2_v2/serve_go116.go b/internal/jsonrpc2_v2/serve_go116.go
new file mode 100644
index 000000000..29549f105
--- /dev/null
+++ b/internal/jsonrpc2_v2/serve_go116.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.16
+// +build go1.16
+
+package jsonrpc2
+
+import (
+ "errors"
+ "net"
+)
+
+var errClosed = net.ErrClosed
+
+func isErrClosed(err error) bool {
+ return errors.Is(err, errClosed)
+}
diff --git a/internal/jsonrpc2_v2/serve_pre116.go b/internal/jsonrpc2_v2/serve_pre116.go
new file mode 100644
index 000000000..a1801d8a2
--- /dev/null
+++ b/internal/jsonrpc2_v2/serve_pre116.go
@@ -0,0 +1,30 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.16
+// +build !go1.16
+
+package jsonrpc2
+
+import (
+ "errors"
+ "strings"
+)
+
+// errClosed is an error with the same string as net.ErrClosed,
+// which was added in Go 1.16.
+var errClosed = errors.New("use of closed network connection")
+
+// isErrClosed reports whether err ends in the same string as errClosed.
+func isErrClosed(err error) bool {
+ // As of Go 1.16, this could be 'errors.Is(err, net.ErrClosing)', but
+ // unfortunately gopls still requires compatibility with
+ // (otherwise-unsupported) older Go versions.
+ //
+ // In the meantime, this error string has not changed on any supported Go
+ // version, and is not expected to change in the future.
+ // This is not ideal, but since the worst that could happen here is some
+ // superfluous logging, it is acceptable.
+ return strings.HasSuffix(err.Error(), "use of closed network connection")
+}
diff --git a/internal/jsonrpc2_v2/serve_test.go b/internal/jsonrpc2_v2/serve_test.go
index 26cf6a58c..88ac66b7e 100644
--- a/internal/jsonrpc2_v2/serve_test.go
+++ b/internal/jsonrpc2_v2/serve_test.go
@@ -7,6 +7,8 @@ package jsonrpc2_test
import (
"context"
"errors"
+ "fmt"
+ "runtime/debug"
"testing"
"time"
@@ -16,48 +18,125 @@ import (
func TestIdleTimeout(t *testing.T) {
stacktest.NoLeak(t)
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- listener, err := jsonrpc2.NetListener(ctx, "tcp", "localhost:0", jsonrpc2.NetListenOptions{})
- if err != nil {
- t.Fatal(err)
- }
- listener = jsonrpc2.NewIdleListener(100*time.Millisecond, listener)
- defer listener.Close()
- server, err := jsonrpc2.Serve(ctx, listener, jsonrpc2.ConnectionOptions{})
- if err != nil {
- t.Fatal(err)
- }
+ // Use a panicking time.AfterFunc instead of context.WithTimeout so that we
+ // get a goroutine dump on failure. We expect the test to take on the order of
+ // a few tens of milliseconds at most, so 10s should be several orders of
+ // magnitude of headroom.
+ timer := time.AfterFunc(10*time.Second, func() {
+ debug.SetTraceback("all")
+ panic("TestIdleTimeout deadlocked")
+ })
+ defer timer.Stop()
- connect := func() *jsonrpc2.Connection {
- client, err := jsonrpc2.Dial(ctx,
- listener.Dialer(),
- jsonrpc2.ConnectionOptions{})
+ ctx := context.Background()
+
+ try := func(d time.Duration) (longEnough bool) {
+ listener, err := jsonrpc2.NetListener(ctx, "tcp", "localhost:0", jsonrpc2.NetListenOptions{})
if err != nil {
t.Fatal(err)
}
- return client
- }
- // Exercise some connection/disconnection patterns, and then assert that when
- // our timer fires, the server exits.
- conn1 := connect()
- conn2 := connect()
- if err := conn1.Close(); err != nil {
- t.Fatalf("conn1.Close failed with error: %v", err)
- }
- if err := conn2.Close(); err != nil {
- t.Fatalf("conn2.Close failed with error: %v", err)
- }
- conn3 := connect()
- if err := conn3.Close(); err != nil {
- t.Fatalf("conn3.Close failed with error: %v", err)
- }
- serverError := server.Wait()
+ idleStart := time.Now()
+ listener = jsonrpc2.NewIdleListener(d, listener)
+ defer listener.Close()
- if !errors.Is(serverError, jsonrpc2.ErrIdleTimeout) {
- t.Errorf("run() returned error %v, want %v", serverError, jsonrpc2.ErrIdleTimeout)
+ server := jsonrpc2.NewServer(ctx, listener, jsonrpc2.ConnectionOptions{})
+
+ // Exercise some connection/disconnection patterns, and then assert that when
+ // our timer fires, the server exits.
+ conn1, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ if err != nil {
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn1 failed to connect after %v: %v", since, err)
+ }
+ t.Log("jsonrpc2.Dial:", err)
+ return false // Took to long to dial, so the failure could have been due to the idle timeout.
+ }
+ // On the server side, Accept can race with the connection timing out.
+ // Send a call and wait for the response to ensure that the connection was
+ // actually fully accepted.
+ ac := conn1.Call(ctx, "ping", nil)
+ if err := ac.Await(ctx, nil); !errors.Is(err, jsonrpc2.ErrMethodNotFound) {
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn1 broken after %v: %v", since, err)
+ }
+ t.Log(`conn1.Call(ctx, "ping", nil):`, err)
+ conn1.Close()
+ return false
+ }
+
+ // Since conn1 was successfully accepted and remains open, the server is
+ // definitely non-idle. Dialing another simultaneous connection should
+ // succeed.
+ conn2, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ if err != nil {
+ conn1.Close()
+ t.Fatalf("conn2 failed to connect while non-idle after %v: %v", time.Since(idleStart), err)
+ return false
+ }
+ // Ensure that conn2 is also accepted on the server side before we close
+ // conn1. Otherwise, the connection can appear idle if the server processes
+ // the closure of conn1 and the idle timeout before it finally notices conn2
+ // in the accept queue.
+ // (That failure mode may explain the failure noted in
+ // https://go.dev/issue/49387#issuecomment-1303979877.)
+ ac = conn2.Call(ctx, "ping", nil)
+ if err := ac.Await(ctx, nil); !errors.Is(err, jsonrpc2.ErrMethodNotFound) {
+ t.Fatalf("conn2 broken while non-idle after %v: %v", time.Since(idleStart), err)
+ }
+
+ if err := conn1.Close(); err != nil {
+ t.Fatalf("conn1.Close failed with error: %v", err)
+ }
+ idleStart = time.Now()
+ if err := conn2.Close(); err != nil {
+ t.Fatalf("conn2.Close failed with error: %v", err)
+ }
+
+ conn3, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ if err != nil {
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn3 failed to connect after %v: %v", since, err)
+ }
+ t.Log("jsonrpc2.Dial:", err)
+ return false // Took to long to dial, so the failure could have been due to the idle timeout.
+ }
+
+ ac = conn3.Call(ctx, "ping", nil)
+ if err := ac.Await(ctx, nil); !errors.Is(err, jsonrpc2.ErrMethodNotFound) {
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn3 broken after %v: %v", since, err)
+ }
+ t.Log(`conn3.Call(ctx, "ping", nil):`, err)
+ conn3.Close()
+ return false
+ }
+
+ idleStart = time.Now()
+ if err := conn3.Close(); err != nil {
+ t.Fatalf("conn3.Close failed with error: %v", err)
+ }
+
+ serverError := server.Wait()
+
+ if !errors.Is(serverError, jsonrpc2.ErrIdleTimeout) {
+ t.Errorf("run() returned error %v, want %v", serverError, jsonrpc2.ErrIdleTimeout)
+ }
+ if since := time.Since(idleStart); since < d {
+ t.Errorf("server shut down after %v idle; want at least %v", since, d)
+ }
+ return true
+ }
+
+ d := 1 * time.Millisecond
+ for {
+ t.Logf("testing with idle timout %v", d)
+ if !try(d) {
+ d *= 2
+ continue
+ }
+ break
}
}
@@ -78,8 +157,7 @@ func (fakeHandler) Handle(ctx context.Context, req *jsonrpc2.Request) (interface
func TestServe(t *testing.T) {
stacktest.NoLeak(t)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
+ ctx := context.Background()
tests := []struct {
name string
@@ -116,13 +194,9 @@ func TestServe(t *testing.T) {
}
func newFake(t *testing.T, ctx context.Context, l jsonrpc2.Listener) (*jsonrpc2.Connection, func(), error) {
- l = jsonrpc2.NewIdleListener(100*time.Millisecond, l)
- server, err := jsonrpc2.Serve(ctx, l, jsonrpc2.ConnectionOptions{
+ server := jsonrpc2.NewServer(ctx, l, jsonrpc2.ConnectionOptions{
Handler: fakeHandler{},
})
- if err != nil {
- return nil, nil, err
- }
client, err := jsonrpc2.Dial(ctx,
l.Dialer(),
@@ -142,3 +216,129 @@ func newFake(t *testing.T, ctx context.Context, l jsonrpc2.Listener) (*jsonrpc2.
server.Wait()
}, nil
}
+
+// TestIdleListenerAcceptCloseRace checks for the Accept/Close race fixed in CL 388597.
+//
+// (A bug in the idleListener implementation caused a successful Accept to block
+// on sending to a background goroutine that could have already exited.)
+func TestIdleListenerAcceptCloseRace(t *testing.T) {
+ ctx := context.Background()
+
+ n := 10
+
+ // Each iteration of the loop appears to take around a millisecond, so to
+ // avoid spurious failures we'll set the watchdog for three orders of
+ // magnitude longer. When the bug was present, this reproduced the deadlock
+ // reliably on a Linux workstation when run with -count=100, which should be
+ // frequent enough to show up on the Go build dashboard if it regresses.
+ watchdog := time.Duration(n) * 1000 * time.Millisecond
+ timer := time.AfterFunc(watchdog, func() {
+ debug.SetTraceback("all")
+ panic(fmt.Sprintf("%s deadlocked after %v", t.Name(), watchdog))
+ })
+ defer timer.Stop()
+
+ for ; n > 0; n-- {
+ listener, err := jsonrpc2.NetPipeListener(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ listener = jsonrpc2.NewIdleListener(24*time.Hour, listener)
+
+ done := make(chan struct{})
+ go func() {
+ conn, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ listener.Close()
+ if err == nil {
+ conn.Close()
+ }
+ close(done)
+ }()
+
+ // Accept may return a non-nil error if Close closes the underlying network
+ // connection before the wrapped Accept call unblocks. However, it must not
+ // deadlock!
+ c, err := listener.Accept(ctx)
+ if err == nil {
+ c.Close()
+ }
+ <-done
+ }
+}
+
+// TestCloseCallRace checks for a race resulting in a deadlock when a Call on
+// one side of the connection races with a Close (or otherwise broken
+// connection) initiated from the other side.
+//
+// (The Call method was waiting for a result from the Read goroutine to
+// determine which error value to return, but the Read goroutine was waiting for
+// in-flight calls to complete before reporting that result.)
+func TestCloseCallRace(t *testing.T) {
+ ctx := context.Background()
+ n := 10
+
+ watchdog := time.Duration(n) * 1000 * time.Millisecond
+ timer := time.AfterFunc(watchdog, func() {
+ debug.SetTraceback("all")
+ panic(fmt.Sprintf("%s deadlocked after %v", t.Name(), watchdog))
+ })
+ defer timer.Stop()
+
+ for ; n > 0; n-- {
+ listener, err := jsonrpc2.NetPipeListener(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pokec := make(chan *jsonrpc2.AsyncCall, 1)
+
+ s := jsonrpc2.NewServer(ctx, listener, jsonrpc2.BinderFunc(func(_ context.Context, srvConn *jsonrpc2.Connection) jsonrpc2.ConnectionOptions {
+ h := jsonrpc2.HandlerFunc(func(ctx context.Context, _ *jsonrpc2.Request) (interface{}, error) {
+ // Start a concurrent call from the server to the client.
+ // The point of this test is to ensure this doesn't deadlock
+ // if the client shuts down the connection concurrently.
+ //
+ // The racing Call may or may not receive a response: it should get a
+ // response if it is sent before the client closes the connection, and
+ // it should fail with some kind of "connection closed" error otherwise.
+ go func() {
+ pokec <- srvConn.Call(ctx, "poke", nil)
+ }()
+
+ return &msg{"pong"}, nil
+ })
+ return jsonrpc2.ConnectionOptions{Handler: h}
+ }))
+
+ dialConn, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ if err != nil {
+ listener.Close()
+ s.Wait()
+ t.Fatal(err)
+ }
+
+ // Calling any method on the server should provoke it to asynchronously call
+ // us back. While it is starting that call, we will close the connection.
+ if err := dialConn.Call(ctx, "ping", nil).Await(ctx, nil); err != nil {
+ t.Error(err)
+ }
+ if err := dialConn.Close(); err != nil {
+ t.Error(err)
+ }
+
+ // Ensure that the Call on the server side did not block forever when the
+ // connection closed.
+ pokeCall := <-pokec
+ if err := pokeCall.Await(ctx, nil); err == nil {
+ t.Errorf("unexpected nil error from server-initited call")
+ } else if errors.Is(err, jsonrpc2.ErrMethodNotFound) {
+ // The call completed before the Close reached the handler.
+ } else {
+ // The error was something else.
+ t.Logf("server-initiated call completed with expected error: %v", err)
+ }
+
+ listener.Close()
+ s.Wait()
+ }
+}
diff --git a/internal/jsonrpc2_v2/wire.go b/internal/jsonrpc2_v2/wire.go
index 4da129ae6..c8dc9ebf1 100644
--- a/internal/jsonrpc2_v2/wire.go
+++ b/internal/jsonrpc2_v2/wire.go
@@ -33,6 +33,10 @@ var (
ErrServerOverloaded = NewError(-32000, "JSON RPC overloaded")
// ErrUnknown should be used for all non coded errors.
ErrUnknown = NewError(-32001, "JSON RPC unknown error")
+ // ErrServerClosing is returned for calls that arrive while the server is closing.
+ ErrServerClosing = NewError(-32002, "JSON RPC server is closing")
+ // ErrClientClosing is a dummy error returned for calls initiated while the client is closing.
+ ErrClientClosing = NewError(-32003, "JSON RPC client is closing")
)
const wireVersion = "2.0"
@@ -72,3 +76,11 @@ func NewError(code int64, message string) error {
func (err *wireError) Error() string {
return err.Message
}
+
+func (err *wireError) Is(other error) bool {
+ w, ok := other.(*wireError)
+ if !ok {
+ return false
+ }
+ return err.Code == w.Code
+}
diff --git a/internal/lockedfile/internal/filelock/filelock.go b/internal/lockedfile/internal/filelock/filelock.go
new file mode 100644
index 000000000..05f27c321
--- /dev/null
+++ b/internal/lockedfile/internal/filelock/filelock.go
@@ -0,0 +1,99 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filelock provides a platform-independent API for advisory file
+// locking. Calls to functions in this package on platforms that do not support
+// advisory locks will return errors for which IsNotSupported returns true.
+package filelock
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+)
+
+// A File provides the minimal set of methods required to lock an open file.
+// File implementations must be usable as map keys.
+// The usual implementation is *os.File.
+type File interface {
+ // Name returns the name of the file.
+ Name() string
+
+ // Fd returns a valid file descriptor.
+ // (If the File is an *os.File, it must not be closed.)
+ Fd() uintptr
+
+ // Stat returns the FileInfo structure describing file.
+ Stat() (fs.FileInfo, error)
+}
+
+// Lock places an advisory write lock on the file, blocking until it can be
+// locked.
+//
+// If Lock returns nil, no other process will be able to place a read or write
+// lock on the file until this process exits, closes f, or calls Unlock on it.
+//
+// If f's descriptor is already read- or write-locked, the behavior of Lock is
+// unspecified.
+//
+// Closing the file may or may not release the lock promptly. Callers should
+// ensure that Unlock is always called when Lock succeeds.
+func Lock(f File) error {
+ return lock(f, writeLock)
+}
+
+// RLock places an advisory read lock on the file, blocking until it can be locked.
+//
+// If RLock returns nil, no other process will be able to place a write lock on
+// the file until this process exits, closes f, or calls Unlock on it.
+//
+// If f is already read- or write-locked, the behavior of RLock is unspecified.
+//
+// Closing the file may or may not release the lock promptly. Callers should
+// ensure that Unlock is always called if RLock succeeds.
+func RLock(f File) error {
+ return lock(f, readLock)
+}
+
+// Unlock removes an advisory lock placed on f by this process.
+//
+// The caller must not attempt to unlock a file that is not locked.
+func Unlock(f File) error {
+ return unlock(f)
+}
+
+// String returns the name of the function corresponding to lt
+// (Lock, RLock, or Unlock).
+func (lt lockType) String() string {
+ switch lt {
+ case readLock:
+ return "RLock"
+ case writeLock:
+ return "Lock"
+ default:
+ return "Unlock"
+ }
+}
+
+// IsNotSupported returns a boolean indicating whether the error is known to
+// report that a function is not supported (possibly for a specific input).
+// It is satisfied by ErrNotSupported as well as some syscall errors.
+func IsNotSupported(err error) bool {
+ return isNotSupported(underlyingError(err))
+}
+
+var ErrNotSupported = errors.New("operation not supported")
+
+// underlyingError returns the underlying error for known os error types.
+func underlyingError(err error) error {
+ switch err := err.(type) {
+ case *fs.PathError:
+ return err.Err
+ case *os.LinkError:
+ return err.Err
+ case *os.SyscallError:
+ return err.Err
+ }
+ return err
+}
diff --git a/internal/lockedfile/internal/filelock/filelock_fcntl.go b/internal/lockedfile/internal/filelock/filelock_fcntl.go
new file mode 100644
index 000000000..309851910
--- /dev/null
+++ b/internal/lockedfile/internal/filelock/filelock_fcntl.go
@@ -0,0 +1,215 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || (solaris && !illumos)
+// +build aix solaris,!illumos
+
+// This code implements the filelock API using POSIX 'fcntl' locks, which attach
+// to an (inode, process) pair rather than a file descriptor. To avoid unlocking
+// files prematurely when the same file is opened through different descriptors,
+// we allow only one read-lock at a time.
+//
+// Most platforms provide some alternative API, such as an 'flock' system call
+// or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and
+// does not require per-inode bookkeeping in the application.
+
+package filelock
+
+import (
+ "errors"
+ "io"
+ "io/fs"
+ "math/rand"
+ "sync"
+ "syscall"
+ "time"
+)
+
+type lockType int16
+
+const (
+ readLock lockType = syscall.F_RDLCK
+ writeLock lockType = syscall.F_WRLCK
+)
+
+type inode = uint64 // type of syscall.Stat_t.Ino
+
+type inodeLock struct {
+ owner File
+ queue []<-chan File
+}
+
+var (
+ mu sync.Mutex
+ inodes = map[File]inode{}
+ locks = map[inode]inodeLock{}
+)
+
+func lock(f File, lt lockType) (err error) {
+ // POSIX locks apply per inode and process, and the lock for an inode is
+ // released when *any* descriptor for that inode is closed. So we need to
+ // synchronize access to each inode internally, and must serialize lock and
+ // unlock calls that refer to the same inode through different descriptors.
+ fi, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ ino := fi.Sys().(*syscall.Stat_t).Ino
+
+ mu.Lock()
+ if i, dup := inodes[f]; dup && i != ino {
+ mu.Unlock()
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: errors.New("inode for file changed since last Lock or RLock"),
+ }
+ }
+ inodes[f] = ino
+
+ var wait chan File
+ l := locks[ino]
+ if l.owner == f {
+ // This file already owns the lock, but the call may change its lock type.
+ } else if l.owner == nil {
+ // No owner: it's ours now.
+ l.owner = f
+ } else {
+ // Already owned: add a channel to wait on.
+ wait = make(chan File)
+ l.queue = append(l.queue, wait)
+ }
+ locks[ino] = l
+ mu.Unlock()
+
+ if wait != nil {
+ wait <- f
+ }
+
+ // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at
+ // the process, rather than thread, level. Consider processes P and Q, with
+ // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be
+ // reported as a deadlock on systems that consider only process granularity:
+ //
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 blocks on file B. (This is erroneously reported as a deadlock.)
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 unblocks and locks file B.
+ // P.2 unlocks file B.
+ //
+ // These spurious errors were observed in practice on AIX and Solaris in
+ // cmd/go: see https://golang.org/issue/32817.
+ //
+ // We work around this bug by treating EDEADLK as always spurious. If there
+ // really is a lock-ordering bug between the interacting processes, it will
+ // become a livelock instead, but that's not appreciably worse than if we had
+ // a proper flock implementation (which generally does not even attempt to
+ // diagnose deadlocks).
+ //
+ // In the above example, that changes the trace to:
+ //
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 spuriously fails to lock file B and goes to sleep.
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 wakes up and locks file B.
+ // P.2 unlocks file B.
+ //
+ // We know that the retry loop will not introduce a *spurious* livelock
+ // because, according to the POSIX specification, EDEADLK is only to be
+ // returned when “the lock is blocked by a lock from another process”.
+ // If that process is blocked on some lock that we are holding, then the
+ // resulting livelock is due to a real deadlock (and would manifest as such
+ // when using, for example, the flock implementation of this package).
+ // If the other process is *not* blocked on some other lock that we are
+ // holding, then it will eventually release the requested lock.
+
+ nextSleep := 1 * time.Millisecond
+ const maxSleep = 500 * time.Millisecond
+ for {
+ err = setlkw(f.Fd(), lt)
+ if err != syscall.EDEADLK {
+ break
+ }
+ time.Sleep(nextSleep)
+
+ nextSleep += nextSleep
+ if nextSleep > maxSleep {
+ nextSleep = maxSleep
+ }
+ // Apply 10% jitter to avoid synchronizing collisions when we finally unblock.
+ nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep))
+ }
+
+ if err != nil {
+ unlock(f)
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: err,
+ }
+ }
+
+ return nil
+}
+
+func unlock(f File) error {
+ var owner File
+
+ mu.Lock()
+ ino, ok := inodes[f]
+ if ok {
+ owner = locks[ino].owner
+ }
+ mu.Unlock()
+
+ if owner != f {
+ panic("unlock called on a file that is not locked")
+ }
+
+ err := setlkw(f.Fd(), syscall.F_UNLCK)
+
+ mu.Lock()
+ l := locks[ino]
+ if len(l.queue) == 0 {
+ // No waiters: remove the map entry.
+ delete(locks, ino)
+ } else {
+ // The first waiter is sending us their file now.
+ // Receive it and update the queue.
+ l.owner = <-l.queue[0]
+ l.queue = l.queue[1:]
+ locks[ino] = l
+ }
+ delete(inodes, f)
+ mu.Unlock()
+
+ return err
+}
+
+// setlkw calls FcntlFlock with F_SETLKW for the entire file indicated by fd.
+func setlkw(fd uintptr, lt lockType) error {
+ for {
+ err := syscall.FcntlFlock(fd, syscall.F_SETLKW, &syscall.Flock_t{
+ Type: int16(lt),
+ Whence: io.SeekStart,
+ Start: 0,
+ Len: 0, // All bytes.
+ })
+ if err != syscall.EINTR {
+ return err
+ }
+ }
+}
+
+func isNotSupported(err error) bool {
+ return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported
+}
diff --git a/internal/lockedfile/internal/filelock/filelock_other.go b/internal/lockedfile/internal/filelock/filelock_other.go
new file mode 100644
index 000000000..cde868f49
--- /dev/null
+++ b/internal/lockedfile/internal/filelock/filelock_other.go
@@ -0,0 +1,37 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(aix || darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd || solaris) && !plan9 && !windows
+// +build !aix,!darwin,!dragonfly,!freebsd,!illumos,!linux,!netbsd,!openbsd,!solaris,!plan9,!windows
+
+package filelock
+
+import "io/fs"
+
+type lockType int8
+
+const (
+ readLock = iota + 1
+ writeLock
+)
+
+func lock(f File, lt lockType) error {
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: ErrNotSupported,
+ }
+}
+
+func unlock(f File) error {
+ return &fs.PathError{
+ Op: "Unlock",
+ Path: f.Name(),
+ Err: ErrNotSupported,
+ }
+}
+
+func isNotSupported(err error) bool {
+ return err == ErrNotSupported
+}
diff --git a/internal/lockedfile/internal/filelock/filelock_plan9.go b/internal/lockedfile/internal/filelock/filelock_plan9.go
new file mode 100644
index 000000000..908afb6c8
--- /dev/null
+++ b/internal/lockedfile/internal/filelock/filelock_plan9.go
@@ -0,0 +1,37 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9
+// +build plan9
+
+package filelock
+
+import "io/fs"
+
+type lockType int8
+
+const (
+ readLock = iota + 1
+ writeLock
+)
+
+func lock(f File, lt lockType) error {
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: ErrNotSupported,
+ }
+}
+
+func unlock(f File) error {
+ return &fs.PathError{
+ Op: "Unlock",
+ Path: f.Name(),
+ Err: ErrNotSupported,
+ }
+}
+
+func isNotSupported(err error) bool {
+ return err == ErrNotSupported
+}
diff --git a/internal/lockedfile/internal/filelock/filelock_test.go b/internal/lockedfile/internal/filelock/filelock_test.go
new file mode 100644
index 000000000..224feda93
--- /dev/null
+++ b/internal/lockedfile/internal/filelock/filelock_test.go
@@ -0,0 +1,209 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !js && !plan9
+// +build !js,!plan9
+
+package filelock_test
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/internal/lockedfile/internal/filelock"
+)
+
+func lock(t *testing.T, f *os.File) {
+ t.Helper()
+ err := filelock.Lock(f)
+ t.Logf("Lock(fd %d) = %v", f.Fd(), err)
+ if err != nil {
+ t.Fail()
+ }
+}
+
+func rLock(t *testing.T, f *os.File) {
+ t.Helper()
+ err := filelock.RLock(f)
+ t.Logf("RLock(fd %d) = %v", f.Fd(), err)
+ if err != nil {
+ t.Fail()
+ }
+}
+
+func unlock(t *testing.T, f *os.File) {
+ t.Helper()
+ err := filelock.Unlock(f)
+ t.Logf("Unlock(fd %d) = %v", f.Fd(), err)
+ if err != nil {
+ t.Fail()
+ }
+}
+
+func mustTempFile(t *testing.T) (f *os.File, remove func()) {
+ t.Helper()
+
+ base := filepath.Base(t.Name())
+ f, err := os.CreateTemp("", base)
+ if err != nil {
+ t.Fatalf(`os.CreateTemp("", %q) = %v`, base, err)
+ }
+ t.Logf("fd %d = %s", f.Fd(), f.Name())
+
+ return f, func() {
+ f.Close()
+ os.Remove(f.Name())
+ }
+}
+
+func mustOpen(t *testing.T, name string) *os.File {
+ t.Helper()
+
+ f, err := os.OpenFile(name, os.O_RDWR, 0)
+ if err != nil {
+ t.Fatalf("os.Open(%q) = %v", name, err)
+ }
+
+ t.Logf("fd %d = os.Open(%q)", f.Fd(), name)
+ return f
+}
+
+const (
+ quiescent = 10 * time.Millisecond
+ probablyStillBlocked = 10 * time.Second
+)
+
+func mustBlock(t *testing.T, op string, f *os.File) (wait func(*testing.T)) {
+ t.Helper()
+
+ desc := fmt.Sprintf("%s(fd %d)", op, f.Fd())
+
+ done := make(chan struct{})
+ go func() {
+ t.Helper()
+ switch op {
+ case "Lock":
+ lock(t, f)
+ case "RLock":
+ rLock(t, f)
+ default:
+ panic("invalid op: " + op)
+ }
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ t.Fatalf("%s unexpectedly did not block", desc)
+ return nil
+
+ case <-time.After(quiescent):
+ t.Logf("%s is blocked (as expected)", desc)
+ return func(t *testing.T) {
+ t.Helper()
+ select {
+ case <-time.After(probablyStillBlocked):
+ t.Fatalf("%s is unexpectedly still blocked", desc)
+ case <-done:
+ }
+ }
+ }
+}
+
+func TestLockExcludesLock(t *testing.T) {
+ t.Parallel()
+
+ f, remove := mustTempFile(t)
+ defer remove()
+
+ other := mustOpen(t, f.Name())
+ defer other.Close()
+
+ lock(t, f)
+ lockOther := mustBlock(t, "Lock", other)
+ unlock(t, f)
+ lockOther(t)
+ unlock(t, other)
+}
+
+func TestLockExcludesRLock(t *testing.T) {
+ t.Parallel()
+
+ f, remove := mustTempFile(t)
+ defer remove()
+
+ other := mustOpen(t, f.Name())
+ defer other.Close()
+
+ lock(t, f)
+ rLockOther := mustBlock(t, "RLock", other)
+ unlock(t, f)
+ rLockOther(t)
+ unlock(t, other)
+}
+
+func TestRLockExcludesOnlyLock(t *testing.T) {
+ t.Parallel()
+
+ f, remove := mustTempFile(t)
+ defer remove()
+ rLock(t, f)
+
+ f2 := mustOpen(t, f.Name())
+ defer f2.Close()
+
+ doUnlockTF := false
+ switch runtime.GOOS {
+ case "aix", "solaris":
+ // When using POSIX locks (as on Solaris), we can't safely read-lock the
+ // same inode through two different descriptors at the same time: when the
+ // first descriptor is closed, the second descriptor would still be open but
+ // silently unlocked. So a second RLock must block instead of proceeding.
+ lockF2 := mustBlock(t, "RLock", f2)
+ unlock(t, f)
+ lockF2(t)
+ default:
+ rLock(t, f2)
+ doUnlockTF = true
+ }
+
+ other := mustOpen(t, f.Name())
+ defer other.Close()
+ lockOther := mustBlock(t, "Lock", other)
+
+ unlock(t, f2)
+ if doUnlockTF {
+ unlock(t, f)
+ }
+ lockOther(t)
+ unlock(t, other)
+}
+
+func TestLockNotDroppedByExecCommand(t *testing.T) {
+ f, remove := mustTempFile(t)
+ defer remove()
+
+ lock(t, f)
+
+ other := mustOpen(t, f.Name())
+ defer other.Close()
+
+ // Some kinds of file locks are dropped when a duplicated or forked file
+ // descriptor is unlocked. Double-check that the approach used by os/exec does
+ // not accidentally drop locks.
+ cmd := exec.Command(os.Args[0], "-test.run=^$")
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("exec failed: %v", err)
+ }
+
+ lockOther := mustBlock(t, "Lock", other)
+ unlock(t, f)
+ lockOther(t)
+ unlock(t, other)
+}
diff --git a/internal/lockedfile/internal/filelock/filelock_unix.go b/internal/lockedfile/internal/filelock/filelock_unix.go
new file mode 100644
index 000000000..878a1e770
--- /dev/null
+++ b/internal/lockedfile/internal/filelock/filelock_unix.go
@@ -0,0 +1,45 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd
+// +build darwin dragonfly freebsd illumos linux netbsd openbsd
+
+package filelock
+
+import (
+ "io/fs"
+ "syscall"
+)
+
+type lockType int16
+
+const (
+ readLock lockType = syscall.LOCK_SH
+ writeLock lockType = syscall.LOCK_EX
+)
+
+func lock(f File, lt lockType) (err error) {
+ for {
+ err = syscall.Flock(int(f.Fd()), int(lt))
+ if err != syscall.EINTR {
+ break
+ }
+ }
+ if err != nil {
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: err,
+ }
+ }
+ return nil
+}
+
+func unlock(f File) error {
+ return lock(f, syscall.LOCK_UN)
+}
+
+func isNotSupported(err error) bool {
+ return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported
+}
diff --git a/internal/lockedfile/internal/filelock/filelock_windows.go b/internal/lockedfile/internal/filelock/filelock_windows.go
new file mode 100644
index 000000000..3273a8182
--- /dev/null
+++ b/internal/lockedfile/internal/filelock/filelock_windows.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package filelock
+
+import (
+ "io/fs"
+
+ "golang.org/x/sys/windows"
+)
+
+type lockType uint32
+
+const (
+ readLock lockType = 0
+ writeLock lockType = windows.LOCKFILE_EXCLUSIVE_LOCK
+)
+
+const (
+ reserved = 0
+ allBytes = ^uint32(0)
+)
+
+func lock(f File, lt lockType) error {
+ // Per https://golang.org/issue/19098, “Programs currently expect the Fd
+ // method to return a handle that uses ordinary synchronous I/O.”
+ // However, LockFileEx still requires an OVERLAPPED structure,
+ // which contains the file offset of the beginning of the lock range.
+ // We want to lock the entire file, so we leave the offset as zero.
+ ol := new(windows.Overlapped)
+
+ err := windows.LockFileEx(windows.Handle(f.Fd()), uint32(lt), reserved, allBytes, allBytes, ol)
+ if err != nil {
+ return &fs.PathError{
+ Op: lt.String(),
+ Path: f.Name(),
+ Err: err,
+ }
+ }
+ return nil
+}
+
+func unlock(f File) error {
+ ol := new(windows.Overlapped)
+ err := windows.UnlockFileEx(windows.Handle(f.Fd()), reserved, allBytes, allBytes, ol)
+ if err != nil {
+ return &fs.PathError{
+ Op: "Unlock",
+ Path: f.Name(),
+ Err: err,
+ }
+ }
+ return nil
+}
+
+func isNotSupported(err error) bool {
+ switch err {
+ case windows.ERROR_NOT_SUPPORTED, windows.ERROR_CALL_NOT_IMPLEMENTED, ErrNotSupported:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/internal/lockedfile/lockedfile.go b/internal/lockedfile/lockedfile.go
new file mode 100644
index 000000000..82e1a8967
--- /dev/null
+++ b/internal/lockedfile/lockedfile.go
@@ -0,0 +1,187 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lockedfile creates and manipulates files whose contents should only
+// change atomically.
+package lockedfile
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "runtime"
+)
+
+// A File is a locked *os.File.
+//
+// Closing the file releases the lock.
+//
+// If the program exits while a file is locked, the operating system releases
+// the lock but may not do so promptly: callers must ensure that all locked
+// files are closed before exiting.
+type File struct {
+ osFile
+ closed bool
+}
+
+// osFile embeds a *os.File while keeping the pointer itself unexported.
+// (When we close a File, it must be the same file descriptor that we opened!)
+type osFile struct {
+ *os.File
+}
+
+// OpenFile is like os.OpenFile, but returns a locked file.
+// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked;
+// otherwise, it is read-locked.
+func OpenFile(name string, flag int, perm fs.FileMode) (*File, error) {
+ var (
+ f = new(File)
+ err error
+ )
+ f.osFile.File, err = openFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+
+ // Although the operating system will drop locks for open files when the go
+ // command exits, we want to hold locks for as little time as possible, and we
+ // especially don't want to leave a file locked after we're done with it. Our
+ // Close method is what releases the locks, so use a finalizer to report
+ // missing Close calls on a best-effort basis.
+ runtime.SetFinalizer(f, func(f *File) {
+ panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name()))
+ })
+
+ return f, nil
+}
+
+// Open is like os.Open, but returns a read-locked file.
+func Open(name string) (*File, error) {
+ return OpenFile(name, os.O_RDONLY, 0)
+}
+
+// Create is like os.Create, but returns a write-locked file.
+func Create(name string) (*File, error) {
+ return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+}
+
+// Edit creates the named file with mode 0666 (before umask),
+// but does not truncate existing contents.
+//
+// If Edit succeeds, methods on the returned File can be used for I/O.
+// The associated file descriptor has mode O_RDWR and the file is write-locked.
+func Edit(name string) (*File, error) {
+ return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666)
+}
+
+// Close unlocks and closes the underlying file.
+//
+// Close may be called multiple times; all calls after the first will return a
+// non-nil error.
+func (f *File) Close() error {
+ if f.closed {
+ return &fs.PathError{
+ Op: "close",
+ Path: f.Name(),
+ Err: fs.ErrClosed,
+ }
+ }
+ f.closed = true
+
+ err := closeFile(f.osFile.File)
+ runtime.SetFinalizer(f, nil)
+ return err
+}
+
+// Read opens the named file with a read-lock and returns its contents.
+func Read(name string) ([]byte, error) {
+ f, err := Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return io.ReadAll(f)
+}
+
+// Write opens the named file (creating it with the given permissions if needed),
+// then write-locks it and overwrites it with the given content.
+func Write(name string, content io.Reader, perm fs.FileMode) (err error) {
+ f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(f, content)
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ return err
+}
+
+// Transform invokes t with the result of reading the named file, with its lock
+// still held.
+//
+// If t returns a nil error, Transform then writes the returned contents back to
+// the file, making a best effort to preserve existing contents on error.
+//
+// t must not modify the slice passed to it.
+func Transform(name string, t func([]byte) ([]byte, error)) (err error) {
+ f, err := Edit(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ old, err := io.ReadAll(f)
+ if err != nil {
+ return err
+ }
+
+ new, err := t(old)
+ if err != nil {
+ return err
+ }
+
+ if len(new) > len(old) {
+ // The overall file size is increasing, so write the tail first: if we're
+ // about to run out of space on the disk, we would rather detect that
+ // failure before we have overwritten the original contents.
+ if _, err := f.WriteAt(new[len(old):], int64(len(old))); err != nil {
+ // Make a best effort to remove the incomplete tail.
+ f.Truncate(int64(len(old)))
+ return err
+ }
+ }
+
+ // We're about to overwrite the old contents. In case of failure, make a best
+ // effort to roll back before we close the file.
+ defer func() {
+ if err != nil {
+ if _, err := f.WriteAt(old, 0); err == nil {
+ f.Truncate(int64(len(old)))
+ }
+ }
+ }()
+
+ if len(new) >= len(old) {
+ if _, err := f.WriteAt(new[:len(old)], 0); err != nil {
+ return err
+ }
+ } else {
+ if _, err := f.WriteAt(new, 0); err != nil {
+ return err
+ }
+ // The overall file size is decreasing, so shrink the file to its final size
+ // after writing. We do this after writing (instead of before) so that if
+ // the write fails, enough filesystem space will likely still be reserved
+ // to contain the previous contents.
+ if err := f.Truncate(int64(len(new))); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/internal/lockedfile/lockedfile_filelock.go b/internal/lockedfile/lockedfile_filelock.go
new file mode 100644
index 000000000..7c71672c8
--- /dev/null
+++ b/internal/lockedfile/lockedfile_filelock.go
@@ -0,0 +1,66 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !plan9
+// +build !plan9
+
+package lockedfile
+
+import (
+ "io/fs"
+ "os"
+
+ "golang.org/x/tools/internal/lockedfile/internal/filelock"
+)
+
+func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
+ // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile
+ // call instead of locking separately, but we have to support separate locking
+ // calls for Linux and Windows anyway, so it's simpler to use that approach
+ // consistently.
+
+ f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm)
+ if err != nil {
+ return nil, err
+ }
+
+ switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) {
+ case os.O_WRONLY, os.O_RDWR:
+ err = filelock.Lock(f)
+ default:
+ err = filelock.RLock(f)
+ }
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+
+ if flag&os.O_TRUNC == os.O_TRUNC {
+ if err := f.Truncate(0); err != nil {
+ // The documentation for os.O_TRUNC says “if possible, truncate file when
+ // opened”, but doesn't define “possible” (golang.org/issue/28699).
+ // We'll treat regular files (and symlinks to regular files) as “possible”
+ // and ignore errors for the rest.
+ if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() {
+ filelock.Unlock(f)
+ f.Close()
+ return nil, err
+ }
+ }
+ }
+
+ return f, nil
+}
+
+func closeFile(f *os.File) error {
+ // Since locking syscalls operate on file descriptors, we must unlock the file
+ // while the descriptor is still valid — that is, before the file is closed —
+ // and avoid unlocking files that are already closed.
+ err := filelock.Unlock(f)
+
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ return err
+}
diff --git a/internal/lockedfile/lockedfile_plan9.go b/internal/lockedfile/lockedfile_plan9.go
new file mode 100644
index 000000000..40871e610
--- /dev/null
+++ b/internal/lockedfile/lockedfile_plan9.go
@@ -0,0 +1,95 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9
+// +build plan9
+
+package lockedfile
+
+import (
+ "io/fs"
+ "math/rand"
+ "os"
+ "strings"
+ "time"
+)
+
+// Opening an exclusive-use file returns an error.
+// The expected error strings are:
+//
+// - "open/create -- file is locked" (cwfs, kfs)
+// - "exclusive lock" (fossil)
+// - "exclusive use file already open" (ramfs)
+var lockedErrStrings = [...]string{
+ "file is locked",
+ "exclusive lock",
+ "exclusive use file already open",
+}
+
+// Even though plan9 doesn't support the Lock/RLock/Unlock functions to
+// manipulate already-open files, IsLocked is still meaningful: os.OpenFile
+// itself may return errors that indicate that a file with the ModeExclusive bit
+// set is already open.
+func isLocked(err error) bool {
+ s := err.Error()
+
+ for _, frag := range lockedErrStrings {
+ if strings.Contains(s, frag) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
+ // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls.
+ //
+ // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open
+ // for I/O by only one fid at a time across all clients of the server. If a
+ // second open is attempted, it draws an error.”
+ //
+ // So we can try to open a locked file, but if it fails we're on our own to
+ // figure out when it becomes available. We'll use exponential backoff with
+ // some jitter and an arbitrary limit of 500ms.
+
+ // If the file was unpacked or created by some other program, it might not
+ // have the ModeExclusive bit set. Set it before we call OpenFile, so that we
+ // can be confident that a successful OpenFile implies exclusive use.
+ if fi, err := os.Stat(name); err == nil {
+ if fi.Mode()&fs.ModeExclusive == 0 {
+ if err := os.Chmod(name, fi.Mode()|fs.ModeExclusive); err != nil {
+ return nil, err
+ }
+ }
+ } else if !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ nextSleep := 1 * time.Millisecond
+ const maxSleep = 500 * time.Millisecond
+ for {
+ f, err := os.OpenFile(name, flag, perm|fs.ModeExclusive)
+ if err == nil {
+ return f, nil
+ }
+
+ if !isLocked(err) {
+ return nil, err
+ }
+
+ time.Sleep(nextSleep)
+
+ nextSleep += nextSleep
+ if nextSleep > maxSleep {
+ nextSleep = maxSleep
+ }
+ // Apply 10% jitter to avoid synchronizing collisions.
+ nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep))
+ }
+}
+
+func closeFile(f *os.File) error {
+ return f.Close()
+}
diff --git a/internal/lockedfile/lockedfile_test.go b/internal/lockedfile/lockedfile_test.go
new file mode 100644
index 000000000..572178d0d
--- /dev/null
+++ b/internal/lockedfile/lockedfile_test.go
@@ -0,0 +1,270 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// js does not support inter-process file locking.
+//
+//go:build !js
+// +build !js
+
+package lockedfile_test
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/internal/lockedfile"
+)
+
+func mustTempDir(t *testing.T) (dir string, remove func()) {
+ t.Helper()
+
+ dir, err := os.MkdirTemp("", filepath.Base(t.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ return dir, func() { os.RemoveAll(dir) }
+}
+
+const (
+ quiescent = 10 * time.Millisecond
+ probablyStillBlocked = 10 * time.Second
+)
+
+func mustBlock(t *testing.T, desc string, f func()) (wait func(*testing.T)) {
+ t.Helper()
+
+ done := make(chan struct{})
+ go func() {
+ f()
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ t.Fatalf("%s unexpectedly did not block", desc)
+ return nil
+
+ case <-time.After(quiescent):
+ return func(t *testing.T) {
+ t.Helper()
+ select {
+ case <-time.After(probablyStillBlocked):
+ t.Fatalf("%s is unexpectedly still blocked after %v", desc, probablyStillBlocked)
+ case <-done:
+ }
+ }
+ }
+}
+
+func TestMutexExcludes(t *testing.T) {
+ t.Parallel()
+
+ dir, remove := mustTempDir(t)
+ defer remove()
+
+ path := filepath.Join(dir, "lock")
+
+ mu := lockedfile.MutexAt(path)
+ t.Logf("mu := MutexAt(_)")
+
+ unlock, err := mu.Lock()
+ if err != nil {
+ t.Fatalf("mu.Lock: %v", err)
+ }
+ t.Logf("unlock, _ := mu.Lock()")
+
+ mu2 := lockedfile.MutexAt(mu.Path)
+ t.Logf("mu2 := MutexAt(mu.Path)")
+
+ wait := mustBlock(t, "mu2.Lock()", func() {
+ unlock2, err := mu2.Lock()
+ if err != nil {
+ t.Errorf("mu2.Lock: %v", err)
+ return
+ }
+ t.Logf("unlock2, _ := mu2.Lock()")
+ t.Logf("unlock2()")
+ unlock2()
+ })
+
+ t.Logf("unlock()")
+ unlock()
+ wait(t)
+}
+
+func TestReadWaitsForLock(t *testing.T) {
+ t.Parallel()
+
+ dir, remove := mustTempDir(t)
+ defer remove()
+
+ path := filepath.Join(dir, "timestamp.txt")
+
+ f, err := lockedfile.Create(path)
+ if err != nil {
+ t.Fatalf("Create: %v", err)
+ }
+ defer f.Close()
+
+ const (
+ part1 = "part 1\n"
+ part2 = "part 2\n"
+ )
+ _, err = f.WriteString(part1)
+ if err != nil {
+ t.Fatalf("WriteString: %v", err)
+ }
+ t.Logf("WriteString(%q) = <nil>", part1)
+
+ wait := mustBlock(t, "Read", func() {
+ b, err := lockedfile.Read(path)
+ if err != nil {
+ t.Errorf("Read: %v", err)
+ return
+ }
+
+ const want = part1 + part2
+ got := string(b)
+ if got == want {
+ t.Logf("Read(_) = %q", got)
+ } else {
+ t.Errorf("Read(_) = %q, _; want %q", got, want)
+ }
+ })
+
+ _, err = f.WriteString(part2)
+ if err != nil {
+ t.Errorf("WriteString: %v", err)
+ } else {
+ t.Logf("WriteString(%q) = <nil>", part2)
+ }
+ f.Close()
+
+ wait(t)
+}
+
+func TestCanLockExistingFile(t *testing.T) {
+ t.Parallel()
+
+ dir, remove := mustTempDir(t)
+ defer remove()
+ path := filepath.Join(dir, "existing.txt")
+
+ if err := os.WriteFile(path, []byte("ok"), 0777); err != nil {
+ t.Fatalf("os.WriteFile: %v", err)
+ }
+
+ f, err := lockedfile.Edit(path)
+ if err != nil {
+ t.Fatalf("first Edit: %v", err)
+ }
+
+ wait := mustBlock(t, "Edit", func() {
+ other, err := lockedfile.Edit(path)
+ if err != nil {
+ t.Errorf("second Edit: %v", err)
+ }
+ other.Close()
+ })
+
+ f.Close()
+ wait(t)
+}
+
+// TestSpuriousEDEADLK verifies that the spurious EDEADLK reported in
+// https://golang.org/issue/32817 no longer occurs.
+func TestSpuriousEDEADLK(t *testing.T) {
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 blocks on file B. (Spurious EDEADLK occurs here.)
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 unblocks and locks file B.
+ // P.2 unlocks file B.
+
+ dirVar := t.Name() + "DIR"
+
+ if dir := os.Getenv(dirVar); dir != "" {
+ // Q.3 locks file B.
+ b, err := lockedfile.Edit(filepath.Join(dir, "B"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer b.Close()
+
+ if err := os.WriteFile(filepath.Join(dir, "locked"), []byte("ok"), 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ // Q.3 blocks on file A.
+ a, err := lockedfile.Edit(filepath.Join(dir, "A"))
+ // Q.3 unblocks and locks file A.
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer a.Close()
+
+ // Q.3 unlocks files A and B.
+ return
+ }
+
+ dir, remove := mustTempDir(t)
+ defer remove()
+
+ // P.1 locks file A.
+ a, err := lockedfile.Edit(filepath.Join(dir, "A"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cmd := exec.Command(os.Args[0], "-test.run="+t.Name())
+ cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir))
+
+ qDone := make(chan struct{})
+ waitQ := mustBlock(t, "Edit A and B in subprocess", func() {
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("%v:\n%s", err, out)
+ }
+ close(qDone)
+ })
+
+ // Wait until process Q has either failed or locked file B.
+ // Otherwise, P.2 might not block on file B as intended.
+locked:
+ for {
+ if _, err := os.Stat(filepath.Join(dir, "locked")); !os.IsNotExist(err) {
+ break locked
+ }
+ select {
+ case <-qDone:
+ break locked
+ case <-time.After(1 * time.Millisecond):
+ }
+ }
+
+ waitP2 := mustBlock(t, "Edit B", func() {
+ // P.2 blocks on file B. (Spurious EDEADLK occurs here.)
+ b, err := lockedfile.Edit(filepath.Join(dir, "B"))
+ // P.2 unblocks and locks file B.
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ // P.2 unlocks file B.
+ b.Close()
+ })
+
+ // P.1 unlocks file A.
+ a.Close()
+
+ waitQ(t)
+ waitP2(t)
+}
diff --git a/internal/lockedfile/mutex.go b/internal/lockedfile/mutex.go
new file mode 100644
index 000000000..180a36c62
--- /dev/null
+++ b/internal/lockedfile/mutex.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lockedfile
+
+import (
+ "fmt"
+ "os"
+ "sync"
+)
+
+// A Mutex provides mutual exclusion within and across processes by locking a
+// well-known file. Such a file generally guards some other part of the
+// filesystem: for example, a Mutex file in a directory might guard access to
+// the entire tree rooted in that directory.
+//
+// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex
+// can fail to lock (e.g. if there is a permission error in the filesystem).
+//
+// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but
+// must not be copied after first use. The Path field must be set before first
+// use and must not be change thereafter.
+type Mutex struct {
+ Path string // The path to the well-known lock file. Must be non-empty.
+ mu sync.Mutex // A redundant mutex. The race detector doesn't know about file locking, so in tests we may need to lock something that it understands.
+}
+
+// MutexAt returns a new Mutex with Path set to the given non-empty path.
+func MutexAt(path string) *Mutex {
+ if path == "" {
+ panic("lockedfile.MutexAt: path must be non-empty")
+ }
+ return &Mutex{Path: path}
+}
+
+func (mu *Mutex) String() string {
+ return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path)
+}
+
+// Lock attempts to lock the Mutex.
+//
+// If successful, Lock returns a non-nil unlock function: it is provided as a
+// return-value instead of a separate method to remind the caller to check the
+// accompanying error. (See https://golang.org/issue/20803.)
+func (mu *Mutex) Lock() (unlock func(), err error) {
+ if mu.Path == "" {
+ panic("lockedfile.Mutex: missing Path during Lock")
+ }
+
+ // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the
+ // file at mu.Path is write-only, the call to OpenFile will fail with a
+ // permission error. That's actually what we want: if we add an RLock method
+ // in the future, it should call OpenFile with O_RDONLY and will require the
+ // files must be readable, so we should not let the caller make any
+ // assumptions about Mutex working with write-only files.
+ f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666)
+ if err != nil {
+ return nil, err
+ }
+ mu.mu.Lock()
+
+ return func() {
+ mu.mu.Unlock()
+ f.Close()
+ }, nil
+}
diff --git a/internal/lockedfile/transform_test.go b/internal/lockedfile/transform_test.go
new file mode 100644
index 000000000..cebbf4101
--- /dev/null
+++ b/internal/lockedfile/transform_test.go
@@ -0,0 +1,106 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// js does not support inter-process file locking.
+//
+//go:build !js
+// +build !js
+
+package lockedfile_test
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math/rand"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/internal/lockedfile"
+)
+
+func isPowerOf2(x int) bool {
+ return x > 0 && x&(x-1) == 0
+}
+
+func roundDownToPowerOf2(x int) int {
+ if x <= 0 {
+ panic("nonpositive x")
+ }
+ bit := 1
+ for x != bit {
+ x = x &^ bit
+ bit <<= 1
+ }
+ return x
+}
+
+func TestTransform(t *testing.T) {
+ dir, remove := mustTempDir(t)
+ defer remove()
+ path := filepath.Join(dir, "blob.bin")
+
+ const maxChunkWords = 8 << 10
+ buf := make([]byte, 2*maxChunkWords*8)
+ for i := uint64(0); i < 2*maxChunkWords; i++ {
+ binary.LittleEndian.PutUint64(buf[i*8:], i)
+ }
+ if err := lockedfile.Write(path, bytes.NewReader(buf[:8]), 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ var attempts int64 = 128
+ if !testing.Short() {
+ attempts *= 16
+ }
+ const parallel = 32
+
+ var sem = make(chan bool, parallel)
+
+ for n := attempts; n > 0; n-- {
+ sem <- true
+ go func() {
+ defer func() { <-sem }()
+
+ time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
+ chunkWords := roundDownToPowerOf2(rand.Intn(maxChunkWords) + 1)
+ offset := rand.Intn(chunkWords)
+
+ err := lockedfile.Transform(path, func(data []byte) (chunk []byte, err error) {
+ chunk = buf[offset*8 : (offset+chunkWords)*8]
+
+ if len(data)&^7 != len(data) {
+ t.Errorf("read %d bytes, but each write is an integer multiple of 8 bytes", len(data))
+ return chunk, nil
+ }
+
+ words := len(data) / 8
+ if !isPowerOf2(words) {
+ t.Errorf("read %d 8-byte words, but each write is a power-of-2 number of words", words)
+ return chunk, nil
+ }
+
+ u := binary.LittleEndian.Uint64(data)
+ for i := 1; i < words; i++ {
+ next := binary.LittleEndian.Uint64(data[i*8:])
+ if next != u+1 {
+ t.Errorf("wrote sequential integers, but read integer out of sequence at offset %d", i)
+ return chunk, nil
+ }
+ u = next
+ }
+
+ return chunk, nil
+ })
+
+ if err != nil {
+ t.Errorf("unexpected error from Transform: %v", err)
+ }
+ }()
+ }
+
+ for n := parallel; n > 0; n-- {
+ sem <- true
+ }
+}
diff --git a/internal/lsp/analysis/fillreturns/fillreturns.go b/internal/lsp/analysis/fillreturns/fillreturns.go
deleted file mode 100644
index 4607f37c0..000000000
--- a/internal/lsp/analysis/fillreturns/fillreturns.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fillreturns defines an Analyzer that will attempt to
-// automatically fill in a return statement that has missing
-// values with zero value elements.
-package fillreturns
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
- "go/types"
- "regexp"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/typeparams"
-)
-
-const Doc = `suggest fixes for errors due to an incorrect number of return values
-
-This checker provides suggested fixes for type errors of the
-type "wrong number of return values (want %d, got %d)". For example:
- func m() (int, string, *bool, error) {
- return
- }
-will turn into
- func m() (int, string, *bool, error) {
- return 0, "", nil, nil
- }
-
-This functionality is similar to https://github.com/sqs/goreturns.
-`
-
-var Analyzer = &analysis.Analyzer{
- Name: "fillreturns",
- Doc: Doc,
- Requires: []*analysis.Analyzer{},
- Run: run,
- RunDespiteErrors: true,
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- info := pass.TypesInfo
- if info == nil {
- return nil, fmt.Errorf("nil TypeInfo")
- }
-
- errors := analysisinternal.GetTypeErrors(pass)
-outer:
- for _, typeErr := range errors {
- // Filter out the errors that are not relevant to this analyzer.
- if !FixesError(typeErr) {
- continue
- }
- var file *ast.File
- for _, f := range pass.Files {
- if f.Pos() <= typeErr.Pos && typeErr.Pos <= f.End() {
- file = f
- break
- }
- }
- if file == nil {
- continue
- }
-
- // Get the end position of the error.
- var buf bytes.Buffer
- if err := format.Node(&buf, pass.Fset, file); err != nil {
- continue
- }
- typeErrEndPos := analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), typeErr.Pos)
-
- // TODO(rfindley): much of the error handling code below returns, when it
- // should probably continue.
-
- // Get the path for the relevant range.
- path, _ := astutil.PathEnclosingInterval(file, typeErr.Pos, typeErrEndPos)
- if len(path) == 0 {
- return nil, nil
- }
-
- // Find the enclosing return statement.
- var ret *ast.ReturnStmt
- var retIdx int
- for i, n := range path {
- if r, ok := n.(*ast.ReturnStmt); ok {
- ret = r
- retIdx = i
- break
- }
- }
- if ret == nil {
- return nil, nil
- }
-
- // Get the function type that encloses the ReturnStmt.
- var enclosingFunc *ast.FuncType
- for _, n := range path[retIdx+1:] {
- switch node := n.(type) {
- case *ast.FuncLit:
- enclosingFunc = node.Type
- case *ast.FuncDecl:
- enclosingFunc = node.Type
- }
- if enclosingFunc != nil {
- break
- }
- }
- if enclosingFunc == nil {
- continue
- }
-
- // Skip any generic enclosing functions, since type parameters don't
- // have 0 values.
- // TODO(rfindley): We should be able to handle this if the return
- // values are all concrete types.
- if tparams := typeparams.ForFuncType(enclosingFunc); tparams != nil && tparams.NumFields() > 0 {
- return nil, nil
- }
-
- // Find the function declaration that encloses the ReturnStmt.
- var outer *ast.FuncDecl
- for _, p := range path {
- if p, ok := p.(*ast.FuncDecl); ok {
- outer = p
- break
- }
- }
- if outer == nil {
- return nil, nil
- }
-
- // Skip any return statements that contain function calls with multiple
- // return values.
- for _, expr := range ret.Results {
- e, ok := expr.(*ast.CallExpr)
- if !ok {
- continue
- }
- if tup, ok := info.TypeOf(e).(*types.Tuple); ok && tup.Len() > 1 {
- continue outer
- }
- }
-
- // Duplicate the return values to track which values have been matched.
- remaining := make([]ast.Expr, len(ret.Results))
- copy(remaining, ret.Results)
-
- fixed := make([]ast.Expr, len(enclosingFunc.Results.List))
-
- // For each value in the return function declaration, find the leftmost element
- // in the return statement that has the desired type. If no such element exits,
- // fill in the missing value with the appropriate "zero" value.
- var retTyps []types.Type
- for _, ret := range enclosingFunc.Results.List {
- retTyps = append(retTyps, info.TypeOf(ret.Type))
- }
- matches :=
- analysisinternal.FindMatchingIdents(retTyps, file, ret.Pos(), info, pass.Pkg)
- for i, retTyp := range retTyps {
- var match ast.Expr
- var idx int
- for j, val := range remaining {
- if !matchingTypes(info.TypeOf(val), retTyp) {
- continue
- }
- if !analysisinternal.IsZeroValue(val) {
- match, idx = val, j
- break
- }
- // If the current match is a "zero" value, we keep searching in
- // case we find a non-"zero" value match. If we do not find a
- // non-"zero" value, we will use the "zero" value.
- match, idx = val, j
- }
-
- if match != nil {
- fixed[i] = match
- remaining = append(remaining[:idx], remaining[idx+1:]...)
- } else {
- idents, ok := matches[retTyp]
- if !ok {
- return nil, fmt.Errorf("invalid return type: %v", retTyp)
- }
- // Find the identifer whose name is most similar to the return type.
- // If we do not find any identifer that matches the pattern,
- // generate a zero value.
- value := analysisinternal.FindBestMatch(retTyp.String(), idents)
- if value == nil {
- value = analysisinternal.ZeroValue(
- pass.Fset, file, pass.Pkg, retTyp)
- }
- if value == nil {
- return nil, nil
- }
- fixed[i] = value
- }
- }
-
- // Remove any non-matching "zero values" from the leftover values.
- var nonZeroRemaining []ast.Expr
- for _, expr := range remaining {
- if !analysisinternal.IsZeroValue(expr) {
- nonZeroRemaining = append(nonZeroRemaining, expr)
- }
- }
- // Append leftover return values to end of new return statement.
- fixed = append(fixed, nonZeroRemaining...)
-
- newRet := &ast.ReturnStmt{
- Return: ret.Pos(),
- Results: fixed,
- }
-
- // Convert the new return statement AST to text.
- var newBuf bytes.Buffer
- if err := format.Node(&newBuf, pass.Fset, newRet); err != nil {
- return nil, err
- }
-
- pass.Report(analysis.Diagnostic{
- Pos: typeErr.Pos,
- End: typeErrEndPos,
- Message: typeErr.Msg,
- SuggestedFixes: []analysis.SuggestedFix{{
- Message: "Fill in return values",
- TextEdits: []analysis.TextEdit{{
- Pos: ret.Pos(),
- End: ret.End(),
- NewText: newBuf.Bytes(),
- }},
- }},
- })
- }
- return nil, nil
-}
-
-func matchingTypes(want, got types.Type) bool {
- if want == got || types.Identical(want, got) {
- return true
- }
- // Code segment to help check for untyped equality from (golang/go#32146).
- if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 {
- if lhs, ok := got.Underlying().(*types.Basic); ok {
- return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType
- }
- }
- return types.AssignableTo(want, got) || types.ConvertibleTo(want, got)
-}
-
-// Error messages have changed across Go versions. These regexps capture recent
-// incarnations.
-//
-// TODO(rfindley): once error codes are exported and exposed via go/packages,
-// use error codes rather than string matching here.
-var wrongReturnNumRegexes = []*regexp.Regexp{
- regexp.MustCompile(`wrong number of return values \(want (\d+), got (\d+)\)`),
- regexp.MustCompile(`too many return values`),
- regexp.MustCompile(`not enough return values`),
-}
-
-func FixesError(err types.Error) bool {
- msg := strings.TrimSpace(err.Msg)
- for _, rx := range wrongReturnNumRegexes {
- if rx.MatchString(msg) {
- return true
- }
- }
- return false
-}
diff --git a/internal/lsp/analysis/fillreturns/fillreturns_test.go b/internal/lsp/analysis/fillreturns/fillreturns_test.go
deleted file mode 100644
index 7ef0d4679..000000000
--- a/internal/lsp/analysis/fillreturns/fillreturns_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fillreturns_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/fillreturns"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- tests := []string{"a"}
- if typeparams.Enabled {
- tests = append(tests, "typeparams")
- }
- analysistest.RunWithSuggestedFixes(t, testdata, fillreturns.Analyzer, tests...)
-}
diff --git a/internal/lsp/analysis/fillstruct/fillstruct.go b/internal/lsp/analysis/fillstruct/fillstruct.go
deleted file mode 100644
index a4dd8ccb8..000000000
--- a/internal/lsp/analysis/fillstruct/fillstruct.go
+++ /dev/null
@@ -1,495 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fillstruct defines an Analyzer that automatically
-// fills in a struct declaration with zero value elements for each field.
-package fillstruct
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
- "go/token"
- "go/types"
- "strings"
- "unicode"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/typeparams"
-)
-
-const Doc = `note incomplete struct initializations
-
-This analyzer provides diagnostics for any struct literals that do not have
-any fields initialized. Because the suggested fix for this analysis is
-expensive to compute, callers should compute it separately, using the
-SuggestedFix function below.
-`
-
-var Analyzer = &analysis.Analyzer{
- Name: "fillstruct",
- Doc: Doc,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- Run: run,
- RunDespiteErrors: true,
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
- nodeFilter := []ast.Node{(*ast.CompositeLit)(nil)}
- inspect.Preorder(nodeFilter, func(n ast.Node) {
- info := pass.TypesInfo
- if info == nil {
- return
- }
- expr := n.(*ast.CompositeLit)
-
- var file *ast.File
- for _, f := range pass.Files {
- if f.Pos() <= expr.Pos() && expr.Pos() <= f.End() {
- file = f
- break
- }
- }
- if file == nil {
- return
- }
-
- typ := info.TypeOf(expr)
- if typ == nil {
- return
- }
-
- // Ignore types that have type parameters for now.
- // TODO: support type params.
- if typ, ok := typ.(*types.Named); ok {
- if tparams := typeparams.ForNamed(typ); tparams != nil && tparams.Len() > 0 {
- return
- }
- }
-
- // Find reference to the type declaration of the struct being initialized.
- for {
- p, ok := typ.Underlying().(*types.Pointer)
- if !ok {
- break
- }
- typ = p.Elem()
- }
- typ = typ.Underlying()
-
- obj, ok := typ.(*types.Struct)
- if !ok {
- return
- }
- fieldCount := obj.NumFields()
-
- // Skip any struct that is already populated or that has no fields.
- if fieldCount == 0 || fieldCount == len(expr.Elts) {
- return
- }
-
- var fillable bool
- var fillableFields []string
- for i := 0; i < fieldCount; i++ {
- field := obj.Field(i)
- // Ignore fields that are not accessible in the current package.
- if field.Pkg() != nil && field.Pkg() != pass.Pkg && !field.Exported() {
- continue
- }
- // Ignore structs containing fields that have type parameters for now.
- // TODO: support type params.
- if typ, ok := field.Type().(*types.Named); ok {
- if tparams := typeparams.ForNamed(typ); tparams != nil && tparams.Len() > 0 {
- return
- }
- }
- if _, ok := field.Type().(*typeparams.TypeParam); ok {
- return
- }
- fillable = true
- fillableFields = append(fillableFields, fmt.Sprintf("%s: %s", field.Name(), field.Type().String()))
- }
- if !fillable {
- return
- }
- var name string
- switch typ := expr.Type.(type) {
- case *ast.Ident:
- name = typ.Name
- case *ast.SelectorExpr:
- name = fmt.Sprintf("%s.%s", typ.X, typ.Sel.Name)
- default:
- totalFields := len(fillableFields)
- maxLen := 20
- // Find the index to cut off printing of fields.
- var i, fieldLen int
- for i = range fillableFields {
- if fieldLen > maxLen {
- break
- }
- fieldLen += len(fillableFields[i])
- }
- fillableFields = fillableFields[:i]
- if i < totalFields {
- fillableFields = append(fillableFields, "...")
- }
- name = fmt.Sprintf("anonymous struct { %s }", strings.Join(fillableFields, ", "))
- }
- pass.Report(analysis.Diagnostic{
- Message: fmt.Sprintf("Fill %s", name),
- Pos: expr.Pos(),
- End: expr.End(),
- })
- })
- return nil, nil
-}
-
-func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
- pos := rng.Start // don't use the end
-
- // TODO(rstambler): Using ast.Inspect would probably be more efficient than
- // calling PathEnclosingInterval. Switch this approach.
- path, _ := astutil.PathEnclosingInterval(file, pos, pos)
- if len(path) == 0 {
- return nil, fmt.Errorf("no enclosing ast.Node")
- }
- var expr *ast.CompositeLit
- for _, n := range path {
- if node, ok := n.(*ast.CompositeLit); ok {
- expr = node
- break
- }
- }
-
- if info == nil {
- return nil, fmt.Errorf("nil types.Info")
- }
- typ := info.TypeOf(expr)
- if typ == nil {
- return nil, fmt.Errorf("no composite literal")
- }
-
- // Find reference to the type declaration of the struct being initialized.
- for {
- p, ok := typ.Underlying().(*types.Pointer)
- if !ok {
- break
- }
- typ = p.Elem()
- }
- typ = typ.Underlying()
-
- obj, ok := typ.(*types.Struct)
- if !ok {
- return nil, fmt.Errorf("unexpected type %v (%T), expected *types.Struct", typ, typ)
- }
- fieldCount := obj.NumFields()
-
- // Check which types have already been filled in. (we only want to fill in
- // the unfilled types, or else we'll blat user-supplied details)
- prefilledTypes := map[string]ast.Expr{}
- for _, e := range expr.Elts {
- if kv, ok := e.(*ast.KeyValueExpr); ok {
- if key, ok := kv.Key.(*ast.Ident); ok {
- prefilledTypes[key.Name] = kv.Value
- }
- }
- }
-
- // Use a new fileset to build up a token.File for the new composite
- // literal. We need one line for foo{, one line for }, and one line for
- // each field we're going to set. format.Node only cares about line
- // numbers, so we don't need to set columns, and each line can be
- // 1 byte long.
- fakeFset := token.NewFileSet()
- tok := fakeFset.AddFile("", -1, fieldCount+2)
-
- line := 2 // account for 1-based lines and the left brace
- var elts []ast.Expr
- var fieldTyps []types.Type
- for i := 0; i < fieldCount; i++ {
- field := obj.Field(i)
- // Ignore fields that are not accessible in the current package.
- if field.Pkg() != nil && field.Pkg() != pkg && !field.Exported() {
- fieldTyps = append(fieldTyps, nil)
- continue
- }
- fieldTyps = append(fieldTyps, field.Type())
- }
- matches := analysisinternal.FindMatchingIdents(fieldTyps, file, rng.Start, info, pkg)
- for i, fieldTyp := range fieldTyps {
- if fieldTyp == nil {
- continue
- }
-
- tok.AddLine(line - 1) // add 1 byte per line
- if line > tok.LineCount() {
- panic(fmt.Sprintf("invalid line number %v (of %v) for fillstruct", line, tok.LineCount()))
- }
- pos := tok.LineStart(line)
-
- kv := &ast.KeyValueExpr{
- Key: &ast.Ident{
- NamePos: pos,
- Name: obj.Field(i).Name(),
- },
- Colon: pos,
- }
- if expr, ok := prefilledTypes[obj.Field(i).Name()]; ok {
- kv.Value = expr
- } else {
- idents, ok := matches[fieldTyp]
- if !ok {
- return nil, fmt.Errorf("invalid struct field type: %v", fieldTyp)
- }
-
- // Find the identifer whose name is most similar to the name of the field's key.
- // If we do not find any identifer that matches the pattern, generate a new value.
- // NOTE: We currently match on the name of the field key rather than the field type.
- value := analysisinternal.FindBestMatch(obj.Field(i).Name(), idents)
- if value == nil {
- value = populateValue(fset, file, pkg, fieldTyp)
- }
- if value == nil {
- return nil, nil
- }
-
- kv.Value = value
- }
- elts = append(elts, kv)
- line++
- }
-
- // If all of the struct's fields are unexported, we have nothing to do.
- if len(elts) == 0 {
- return nil, fmt.Errorf("no elements to fill")
- }
-
- // Add the final line for the right brace. Offset is the number of
- // bytes already added plus 1.
- tok.AddLine(len(elts) + 1)
- line = len(elts) + 2
- if line > tok.LineCount() {
- panic(fmt.Sprintf("invalid line number %v (of %v) for fillstruct", line, tok.LineCount()))
- }
-
- cl := &ast.CompositeLit{
- Type: expr.Type,
- Lbrace: tok.LineStart(1),
- Elts: elts,
- Rbrace: tok.LineStart(line),
- }
-
- // Find the line on which the composite literal is declared.
- split := bytes.Split(content, []byte("\n"))
- lineNumber := fset.Position(expr.Lbrace).Line
- firstLine := split[lineNumber-1] // lines are 1-indexed
-
- // Trim the whitespace from the left of the line, and use the index
- // to get the amount of whitespace on the left.
- trimmed := bytes.TrimLeftFunc(firstLine, unicode.IsSpace)
- index := bytes.Index(firstLine, trimmed)
- whitespace := firstLine[:index]
-
- // First pass through the formatter: turn the expr into a string.
- var formatBuf bytes.Buffer
- if err := format.Node(&formatBuf, fakeFset, cl); err != nil {
- return nil, fmt.Errorf("failed to run first format on:\n%s\ngot err: %v", cl.Type, err)
- }
- sug := indent(formatBuf.Bytes(), whitespace)
-
- if len(prefilledTypes) > 0 {
- // Attempt a second pass through the formatter to line up columns.
- sourced, err := format.Source(sug)
- if err == nil {
- sug = indent(sourced, whitespace)
- }
- }
-
- return &analysis.SuggestedFix{
- TextEdits: []analysis.TextEdit{
- {
- Pos: expr.Pos(),
- End: expr.End(),
- NewText: sug,
- },
- },
- }, nil
-}
-
-// indent works line by line through str, indenting (prefixing) each line with
-// ind.
-func indent(str, ind []byte) []byte {
- split := bytes.Split(str, []byte("\n"))
- newText := bytes.NewBuffer(nil)
- for i, s := range split {
- if len(s) == 0 {
- continue
- }
- // Don't add the extra indentation to the first line.
- if i != 0 {
- newText.Write(ind)
- }
- newText.Write(s)
- if i < len(split)-1 {
- newText.WriteByte('\n')
- }
- }
- return newText.Bytes()
-}
-
-// populateValue constructs an expression to fill the value of a struct field.
-//
-// When the type of a struct field is a basic literal or interface, we return
-// default values. For other types, such as maps, slices, and channels, we create
-// expressions rather than using default values.
-//
-// The reasoning here is that users will call fillstruct with the intention of
-// initializing the struct, in which case setting these fields to nil has no effect.
-func populateValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
- under := typ
- if n, ok := typ.(*types.Named); ok {
- under = n.Underlying()
- }
- switch u := under.(type) {
- case *types.Basic:
- switch {
- case u.Info()&types.IsNumeric != 0:
- return &ast.BasicLit{Kind: token.INT, Value: "0"}
- case u.Info()&types.IsBoolean != 0:
- return &ast.Ident{Name: "false"}
- case u.Info()&types.IsString != 0:
- return &ast.BasicLit{Kind: token.STRING, Value: `""`}
- default:
- panic("unknown basic type")
- }
- case *types.Map:
- k := analysisinternal.TypeExpr(fset, f, pkg, u.Key())
- v := analysisinternal.TypeExpr(fset, f, pkg, u.Elem())
- if k == nil || v == nil {
- return nil
- }
- return &ast.CompositeLit{
- Type: &ast.MapType{
- Key: k,
- Value: v,
- },
- }
- case *types.Slice:
- s := analysisinternal.TypeExpr(fset, f, pkg, u.Elem())
- if s == nil {
- return nil
- }
- return &ast.CompositeLit{
- Type: &ast.ArrayType{
- Elt: s,
- },
- }
- case *types.Array:
- a := analysisinternal.TypeExpr(fset, f, pkg, u.Elem())
- if a == nil {
- return nil
- }
- return &ast.CompositeLit{
- Type: &ast.ArrayType{
- Elt: a,
- Len: &ast.BasicLit{
- Kind: token.INT, Value: fmt.Sprintf("%v", u.Len()),
- },
- },
- }
- case *types.Chan:
- v := analysisinternal.TypeExpr(fset, f, pkg, u.Elem())
- if v == nil {
- return nil
- }
- dir := ast.ChanDir(u.Dir())
- if u.Dir() == types.SendRecv {
- dir = ast.SEND | ast.RECV
- }
- return &ast.CallExpr{
- Fun: ast.NewIdent("make"),
- Args: []ast.Expr{
- &ast.ChanType{
- Dir: dir,
- Value: v,
- },
- },
- }
- case *types.Struct:
- s := analysisinternal.TypeExpr(fset, f, pkg, typ)
- if s == nil {
- return nil
- }
- return &ast.CompositeLit{
- Type: s,
- }
- case *types.Signature:
- var params []*ast.Field
- for i := 0; i < u.Params().Len(); i++ {
- p := analysisinternal.TypeExpr(fset, f, pkg, u.Params().At(i).Type())
- if p == nil {
- return nil
- }
- params = append(params, &ast.Field{
- Type: p,
- Names: []*ast.Ident{
- {
- Name: u.Params().At(i).Name(),
- },
- },
- })
- }
- var returns []*ast.Field
- for i := 0; i < u.Results().Len(); i++ {
- r := analysisinternal.TypeExpr(fset, f, pkg, u.Results().At(i).Type())
- if r == nil {
- return nil
- }
- returns = append(returns, &ast.Field{
- Type: r,
- })
- }
- return &ast.FuncLit{
- Type: &ast.FuncType{
- Params: &ast.FieldList{
- List: params,
- },
- Results: &ast.FieldList{
- List: returns,
- },
- },
- Body: &ast.BlockStmt{},
- }
- case *types.Pointer:
- switch u.Elem().(type) {
- case *types.Basic:
- return &ast.CallExpr{
- Fun: &ast.Ident{
- Name: "new",
- },
- Args: []ast.Expr{
- &ast.Ident{
- Name: u.Elem().String(),
- },
- },
- }
- default:
- return &ast.UnaryExpr{
- Op: token.AND,
- X: populateValue(fset, f, pkg, u.Elem()),
- }
- }
- case *types.Interface:
- return ast.NewIdent("nil")
- }
- return nil
-}
diff --git a/internal/lsp/analysis/fillstruct/fillstruct_test.go b/internal/lsp/analysis/fillstruct/fillstruct_test.go
deleted file mode 100644
index 51a516cdf..000000000
--- a/internal/lsp/analysis/fillstruct/fillstruct_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fillstruct_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/fillstruct"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- tests := []string{"a"}
- if typeparams.Enabled {
- tests = append(tests, "typeparams")
- }
- analysistest.Run(t, testdata, fillstruct.Analyzer, tests...)
-}
diff --git a/internal/lsp/analysis/fillstruct/testdata/src/a/a.go b/internal/lsp/analysis/fillstruct/testdata/src/a/a.go
deleted file mode 100644
index f69fe8339..000000000
--- a/internal/lsp/analysis/fillstruct/testdata/src/a/a.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fillstruct
-
-import (
- data "b"
- "go/ast"
- "go/token"
-)
-
-type emptyStruct struct{}
-
-var _ = emptyStruct{}
-
-type basicStruct struct {
- foo int
-}
-
-var _ = basicStruct{} // want ""
-
-type twoArgStruct struct {
- foo int
- bar string
-}
-
-var _ = twoArgStruct{} // want ""
-
-var _ = twoArgStruct{ // want ""
- bar: "bar",
-}
-
-type nestedStruct struct {
- bar string
- basic basicStruct
-}
-
-var _ = nestedStruct{} // want ""
-
-var _ = data.B{} // want ""
-
-type typedStruct struct {
- m map[string]int
- s []int
- c chan int
- c1 <-chan int
- a [2]string
-}
-
-var _ = typedStruct{} // want ""
-
-type funStruct struct {
- fn func(i int) int
-}
-
-var _ = funStruct{} // want ""
-
-type funStructCompex struct {
- fn func(i int, s string) (string, int)
-}
-
-var _ = funStructCompex{} // want ""
-
-type funStructEmpty struct {
- fn func()
-}
-
-var _ = funStructEmpty{} // want ""
-
-type Foo struct {
- A int
-}
-
-type Bar struct {
- X *Foo
- Y *Foo
-}
-
-var _ = Bar{} // want ""
-
-type importedStruct struct {
- m map[*ast.CompositeLit]ast.Field
- s []ast.BadExpr
- a [3]token.Token
- c chan ast.EmptyStmt
- fn func(ast_decl ast.DeclStmt) ast.Ellipsis
- st ast.CompositeLit
-}
-
-var _ = importedStruct{} // want ""
-
-type pointerBuiltinStruct struct {
- b *bool
- s *string
- i *int
-}
-
-var _ = pointerBuiltinStruct{} // want ""
-
-var _ = []ast.BasicLit{
- {}, // want ""
-}
-
-var _ = []ast.BasicLit{{}, // want ""
-}
diff --git a/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go b/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
deleted file mode 100644
index 90290613d..000000000
--- a/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fillstruct
-
-type emptyStruct[A any] struct{}
-
-var _ = emptyStruct[int]{}
-
-type basicStruct[T any] struct {
- foo T
-}
-
-var _ = basicStruct[int]{}
-
-type fooType[T any] T
-
-type twoArgStruct[F, B any] struct {
- foo fooType[F]
- bar fooType[B]
-}
-
-var _ = twoArgStruct[string, int]{}
-
-var _ = twoArgStruct[int, string]{
- bar: "bar",
-}
-
-type nestedStruct struct {
- bar string
- basic basicStruct[int]
-}
-
-var _ = nestedStruct{}
-
-func _[T any]() {
- type S struct{ t T }
- x := S{}
- _ = x
-}
diff --git a/internal/lsp/analysis/infertypeargs/infertypeargs_test.go b/internal/lsp/analysis/infertypeargs/infertypeargs_test.go
deleted file mode 100644
index 2957f46e3..000000000
--- a/internal/lsp/analysis/infertypeargs/infertypeargs_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package infertypeargs_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/infertypeargs"
- "golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func Test(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
- if !typeparams.Enabled {
- t.Skip("type params are not enabled")
- }
- testdata := analysistest.TestData()
- analysistest.RunWithSuggestedFixes(t, testdata, infertypeargs.Analyzer, "a")
-}
diff --git a/internal/lsp/analysis/nonewvars/nonewvars.go b/internal/lsp/analysis/nonewvars/nonewvars.go
deleted file mode 100644
index e7fa430cc..000000000
--- a/internal/lsp/analysis/nonewvars/nonewvars.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package nonewvars defines an Analyzer that applies suggested fixes
-// to errors of the type "no new variables on left side of :=".
-package nonewvars
-
-import (
- "bytes"
- "go/ast"
- "go/format"
- "go/token"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/internal/analysisinternal"
-)
-
-const Doc = `suggested fixes for "no new vars on left side of :="
-
-This checker provides suggested fixes for type errors of the
-type "no new vars on left side of :=". For example:
- z := 1
- z := 2
-will turn into
- z := 1
- z = 2
-`
-
-var Analyzer = &analysis.Analyzer{
- Name: string(analysisinternal.NoNewVars),
- Doc: Doc,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- Run: run,
- RunDespiteErrors: true,
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
- errors := analysisinternal.GetTypeErrors(pass)
-
- nodeFilter := []ast.Node{(*ast.AssignStmt)(nil)}
- inspect.Preorder(nodeFilter, func(n ast.Node) {
- assignStmt, _ := n.(*ast.AssignStmt)
- // We only care about ":=".
- if assignStmt.Tok != token.DEFINE {
- return
- }
-
- var file *ast.File
- for _, f := range pass.Files {
- if f.Pos() <= assignStmt.Pos() && assignStmt.Pos() < f.End() {
- file = f
- break
- }
- }
- if file == nil {
- return
- }
-
- for _, err := range errors {
- if !FixesError(err.Msg) {
- continue
- }
- if assignStmt.Pos() > err.Pos || err.Pos >= assignStmt.End() {
- continue
- }
- var buf bytes.Buffer
- if err := format.Node(&buf, pass.Fset, file); err != nil {
- continue
- }
- pass.Report(analysis.Diagnostic{
- Pos: err.Pos,
- End: analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), err.Pos),
- Message: err.Msg,
- SuggestedFixes: []analysis.SuggestedFix{{
- Message: "Change ':=' to '='",
- TextEdits: []analysis.TextEdit{{
- Pos: err.Pos,
- End: err.Pos + 1,
- }},
- }},
- })
- }
- })
- return nil, nil
-}
-
-func FixesError(msg string) bool {
- return msg == "no new variables on left side of :="
-}
diff --git a/internal/lsp/analysis/nonewvars/nonewvars_test.go b/internal/lsp/analysis/nonewvars/nonewvars_test.go
deleted file mode 100644
index dc58ab0ff..000000000
--- a/internal/lsp/analysis/nonewvars/nonewvars_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package nonewvars_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/nonewvars"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- tests := []string{"a"}
- if typeparams.Enabled {
- tests = append(tests, "typeparams")
- }
- analysistest.RunWithSuggestedFixes(t, testdata, nonewvars.Analyzer, tests...)
-}
diff --git a/internal/lsp/analysis/noresultvalues/noresultvalues.go b/internal/lsp/analysis/noresultvalues/noresultvalues.go
deleted file mode 100644
index b9f21f313..000000000
--- a/internal/lsp/analysis/noresultvalues/noresultvalues.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package noresultvalues defines an Analyzer that applies suggested fixes
-// to errors of the type "no result values expected".
-package noresultvalues
-
-import (
- "bytes"
- "go/ast"
- "go/format"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/internal/analysisinternal"
-)
-
-const Doc = `suggested fixes for unexpected return values
-
-This checker provides suggested fixes for type errors of the
-type "no result values expected" or "too many return values".
-For example:
- func z() { return nil }
-will turn into
- func z() { return }
-`
-
-var Analyzer = &analysis.Analyzer{
- Name: string(analysisinternal.NoResultValues),
- Doc: Doc,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- Run: run,
- RunDespiteErrors: true,
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
- errors := analysisinternal.GetTypeErrors(pass)
-
- nodeFilter := []ast.Node{(*ast.ReturnStmt)(nil)}
- inspect.Preorder(nodeFilter, func(n ast.Node) {
- retStmt, _ := n.(*ast.ReturnStmt)
-
- var file *ast.File
- for _, f := range pass.Files {
- if f.Pos() <= retStmt.Pos() && retStmt.Pos() < f.End() {
- file = f
- break
- }
- }
- if file == nil {
- return
- }
-
- for _, err := range errors {
- if !FixesError(err.Msg) {
- continue
- }
- if retStmt.Pos() >= err.Pos || err.Pos >= retStmt.End() {
- continue
- }
- var buf bytes.Buffer
- if err := format.Node(&buf, pass.Fset, file); err != nil {
- continue
- }
- pass.Report(analysis.Diagnostic{
- Pos: err.Pos,
- End: analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), err.Pos),
- Message: err.Msg,
- SuggestedFixes: []analysis.SuggestedFix{{
- Message: "Delete return values",
- TextEdits: []analysis.TextEdit{{
- Pos: retStmt.Pos(),
- End: retStmt.End(),
- NewText: []byte("return"),
- }},
- }},
- })
- }
- })
- return nil, nil
-}
-
-func FixesError(msg string) bool {
- return msg == "no result values expected" ||
- strings.HasPrefix(msg, "too many return values") && strings.Contains(msg, "want ()")
-}
diff --git a/internal/lsp/analysis/noresultvalues/noresultvalues_test.go b/internal/lsp/analysis/noresultvalues/noresultvalues_test.go
deleted file mode 100644
index 12198a1c1..000000000
--- a/internal/lsp/analysis/noresultvalues/noresultvalues_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package noresultvalues_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/noresultvalues"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- tests := []string{"a"}
- if typeparams.Enabled {
- tests = append(tests, "typeparams")
- }
- analysistest.RunWithSuggestedFixes(t, testdata, noresultvalues.Analyzer, tests...)
-}
diff --git a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go b/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
deleted file mode 100644
index e60f7d6b0..000000000
--- a/internal/lsp/analysis/simplifycompositelit/simplifycompositelit_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package simplifycompositelit_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/simplifycompositelit"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- analysistest.RunWithSuggestedFixes(t, testdata, simplifycompositelit.Analyzer, "a")
-}
diff --git a/internal/lsp/analysis/simplifyrange/simplifyrange_test.go b/internal/lsp/analysis/simplifyrange/simplifyrange_test.go
deleted file mode 100644
index ecc7a9692..000000000
--- a/internal/lsp/analysis/simplifyrange/simplifyrange_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package simplifyrange_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/simplifyrange"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- analysistest.RunWithSuggestedFixes(t, testdata, simplifyrange.Analyzer, "a")
-}
diff --git a/internal/lsp/analysis/simplifyslice/simplifyslice_test.go b/internal/lsp/analysis/simplifyslice/simplifyslice_test.go
deleted file mode 100644
index cff6267c6..000000000
--- a/internal/lsp/analysis/simplifyslice/simplifyslice_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package simplifyslice_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/simplifyslice"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- tests := []string{"a"}
- if typeparams.Enabled {
- tests = append(tests, "typeparams")
- }
- analysistest.RunWithSuggestedFixes(t, testdata, simplifyslice.Analyzer, tests...)
-}
diff --git a/internal/lsp/analysis/stubmethods/stubmethods.go b/internal/lsp/analysis/stubmethods/stubmethods.go
deleted file mode 100644
index c2a4138fa..000000000
--- a/internal/lsp/analysis/stubmethods/stubmethods.go
+++ /dev/null
@@ -1,351 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package stubmethods
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
- "go/token"
- "go/types"
- "strconv"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/typesinternal"
-)
-
-const Doc = `stub methods analyzer
-
-This analyzer generates method stubs for concrete types
-in order to implement a target interface`
-
-var Analyzer = &analysis.Analyzer{
- Name: "stubmethods",
- Doc: Doc,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- Run: run,
- RunDespiteErrors: true,
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- for _, err := range analysisinternal.GetTypeErrors(pass) {
- ifaceErr := strings.Contains(err.Msg, "missing method") || strings.HasPrefix(err.Msg, "cannot convert")
- if !ifaceErr {
- continue
- }
- var file *ast.File
- for _, f := range pass.Files {
- if f.Pos() <= err.Pos && err.Pos < f.End() {
- file = f
- break
- }
- }
- if file == nil {
- continue
- }
- // Get the end position of the error.
- _, _, endPos, ok := typesinternal.ReadGo116ErrorData(err)
- if !ok {
- var buf bytes.Buffer
- if err := format.Node(&buf, pass.Fset, file); err != nil {
- continue
- }
- endPos = analysisinternal.TypeErrorEndPos(pass.Fset, buf.Bytes(), err.Pos)
- }
- path, _ := astutil.PathEnclosingInterval(file, err.Pos, endPos)
- si := GetStubInfo(pass.TypesInfo, path, err.Pos)
- if si == nil {
- continue
- }
- qf := RelativeToFiles(si.Concrete.Obj().Pkg(), file, nil, nil)
- pass.Report(analysis.Diagnostic{
- Pos: err.Pos,
- End: endPos,
- Message: fmt.Sprintf("Implement %s", types.TypeString(si.Interface.Type(), qf)),
- })
- }
- return nil, nil
-}
-
-// StubInfo represents a concrete type
-// that wants to stub out an interface type
-type StubInfo struct {
- // Interface is the interface that the client wants to implement.
- // When the interface is defined, the underlying object will be a TypeName.
- // Note that we keep track of types.Object instead of types.Type in order
- // to keep a reference to the declaring object's package and the ast file
- // in the case where the concrete type file requires a new import that happens to be renamed
- // in the interface file.
- // TODO(marwan-at-work): implement interface literals.
- Interface types.Object
- Concrete *types.Named
- Pointer bool
-}
-
-// GetStubInfo determines whether the "missing method error"
-// can be used to deduced what the concrete and interface types are.
-func GetStubInfo(ti *types.Info, path []ast.Node, pos token.Pos) *StubInfo {
- for _, n := range path {
- switch n := n.(type) {
- case *ast.ValueSpec:
- return fromValueSpec(ti, n, pos)
- case *ast.ReturnStmt:
- // An error here may not indicate a real error the user should know about, but it may.
- // Therefore, it would be best to log it out for debugging/reporting purposes instead of ignoring
- // it. However, event.Log takes a context which is not passed via the analysis package.
- // TODO(marwan-at-work): properly log this error.
- si, _ := fromReturnStmt(ti, pos, path, n)
- return si
- case *ast.AssignStmt:
- return fromAssignStmt(ti, n, pos)
- }
- }
- return nil
-}
-
-// fromReturnStmt analyzes a "return" statement to extract
-// a concrete type that is trying to be returned as an interface type.
-//
-// For example, func() io.Writer { return myType{} }
-// would return StubInfo with the interface being io.Writer and the concrete type being myType{}.
-func fromReturnStmt(ti *types.Info, pos token.Pos, path []ast.Node, rs *ast.ReturnStmt) (*StubInfo, error) {
- returnIdx := -1
- for i, r := range rs.Results {
- if pos >= r.Pos() && pos <= r.End() {
- returnIdx = i
- }
- }
- if returnIdx == -1 {
- return nil, fmt.Errorf("pos %d not within return statement bounds: [%d-%d]", pos, rs.Pos(), rs.End())
- }
- concObj, pointer := concreteType(rs.Results[returnIdx], ti)
- if concObj == nil || concObj.Obj().Pkg() == nil {
- return nil, nil
- }
- ef := enclosingFunction(path, ti)
- if ef == nil {
- return nil, fmt.Errorf("could not find the enclosing function of the return statement")
- }
- iface := ifaceType(ef.Results.List[returnIdx].Type, ti)
- if iface == nil {
- return nil, nil
- }
- return &StubInfo{
- Concrete: concObj,
- Pointer: pointer,
- Interface: iface,
- }, nil
-}
-
-// fromValueSpec returns *StubInfo from a variable declaration such as
-// var x io.Writer = &T{}
-func fromValueSpec(ti *types.Info, vs *ast.ValueSpec, pos token.Pos) *StubInfo {
- var idx int
- for i, vs := range vs.Values {
- if pos >= vs.Pos() && pos <= vs.End() {
- idx = i
- break
- }
- }
-
- valueNode := vs.Values[idx]
- ifaceNode := vs.Type
- callExp, ok := valueNode.(*ast.CallExpr)
- // if the ValueSpec is `var _ = myInterface(...)`
- // as opposed to `var _ myInterface = ...`
- if ifaceNode == nil && ok && len(callExp.Args) == 1 {
- ifaceNode = callExp.Fun
- valueNode = callExp.Args[0]
- }
- concObj, pointer := concreteType(valueNode, ti)
- if concObj == nil || concObj.Obj().Pkg() == nil {
- return nil
- }
- ifaceObj := ifaceType(ifaceNode, ti)
- if ifaceObj == nil {
- return nil
- }
- return &StubInfo{
- Concrete: concObj,
- Interface: ifaceObj,
- Pointer: pointer,
- }
-}
-
-// fromAssignStmt returns *StubInfo from a variable re-assignment such as
-// var x io.Writer
-// x = &T{}
-func fromAssignStmt(ti *types.Info, as *ast.AssignStmt, pos token.Pos) *StubInfo {
- idx := -1
- var lhs, rhs ast.Expr
- // Given a re-assignment interface conversion error,
- // the compiler error shows up on the right hand side of the expression.
- // For example, x = &T{} where x is io.Writer highlights the error
- // under "&T{}" and not "x".
- for i, hs := range as.Rhs {
- if pos >= hs.Pos() && pos <= hs.End() {
- idx = i
- break
- }
- }
- if idx == -1 {
- return nil
- }
- // Technically, this should never happen as
- // we would get a "cannot assign N values to M variables"
- // before we get an interface conversion error. Nonetheless,
- // guard against out of range index errors.
- if idx >= len(as.Lhs) {
- return nil
- }
- lhs, rhs = as.Lhs[idx], as.Rhs[idx]
- ifaceObj := ifaceType(lhs, ti)
- if ifaceObj == nil {
- return nil
- }
- concType, pointer := concreteType(rhs, ti)
- if concType == nil || concType.Obj().Pkg() == nil {
- return nil
- }
- return &StubInfo{
- Concrete: concType,
- Interface: ifaceObj,
- Pointer: pointer,
- }
-}
-
-// RelativeToFiles returns a types.Qualifier that formats package names
-// according to the files where the concrete and interface types are defined.
-//
-// This is similar to types.RelativeTo except if a file imports the package with a different name,
-// then it will use it. And if the file does import the package but it is ignored,
-// then it will return the original name. It also prefers package names in ifaceFile in case
-// an import is missing from concFile but is present in ifaceFile.
-//
-// Additionally, if missingImport is not nil, the function will be called whenever the concFile
-// is presented with a package that is not imported. This is useful so that as types.TypeString is
-// formatting a function signature, it is identifying packages that will need to be imported when
-// stubbing an interface.
-func RelativeToFiles(concPkg *types.Package, concFile, ifaceFile *ast.File, missingImport func(name, path string)) types.Qualifier {
- return func(other *types.Package) string {
- if other == concPkg {
- return ""
- }
-
- // Check if the concrete file already has the given import,
- // if so return the default package name or the renamed import statement.
- for _, imp := range concFile.Imports {
- impPath, _ := strconv.Unquote(imp.Path.Value)
- isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_")
- if impPath == other.Path() && !isIgnored {
- importName := other.Name()
- if imp.Name != nil {
- importName = imp.Name.Name
- }
- return importName
- }
- }
-
- // If the concrete file does not have the import, check if the package
- // is renamed in the interface file and prefer that.
- var importName string
- if ifaceFile != nil {
- for _, imp := range ifaceFile.Imports {
- impPath, _ := strconv.Unquote(imp.Path.Value)
- isIgnored := imp.Name != nil && (imp.Name.Name == "." || imp.Name.Name == "_")
- if impPath == other.Path() && !isIgnored {
- if imp.Name != nil && imp.Name.Name != concPkg.Name() {
- importName = imp.Name.Name
- }
- break
- }
- }
- }
-
- if missingImport != nil {
- missingImport(importName, other.Path())
- }
-
- // Up until this point, importName must stay empty when calling missingImport,
- // otherwise we'd end up with `import time "time"` which doesn't look idiomatic.
- if importName == "" {
- importName = other.Name()
- }
- return importName
- }
-}
-
-// ifaceType will try to extract the types.Object that defines
-// the interface given the ast.Expr where the "missing method"
-// or "conversion" errors happen.
-func ifaceType(n ast.Expr, ti *types.Info) types.Object {
- tv, ok := ti.Types[n]
- if !ok {
- return nil
- }
- typ := tv.Type
- named, ok := typ.(*types.Named)
- if !ok {
- return nil
- }
- _, ok = named.Underlying().(*types.Interface)
- if !ok {
- return nil
- }
- // Interfaces defined in the "builtin" package return nil a Pkg().
- // But they are still real interfaces that we need to make a special case for.
- // Therefore, protect gopls from panicking if a new interface type was added in the future.
- if named.Obj().Pkg() == nil && named.Obj().Name() != "error" {
- return nil
- }
- return named.Obj()
-}
-
-// concreteType tries to extract the *types.Named that defines
-// the concrete type given the ast.Expr where the "missing method"
-// or "conversion" errors happened. If the concrete type is something
-// that cannot have methods defined on it (such as basic types), this
-// method will return a nil *types.Named. The second return parameter
-// is a boolean that indicates whether the concreteType was defined as a
-// pointer or value.
-func concreteType(n ast.Expr, ti *types.Info) (*types.Named, bool) {
- tv, ok := ti.Types[n]
- if !ok {
- return nil, false
- }
- typ := tv.Type
- ptr, isPtr := typ.(*types.Pointer)
- if isPtr {
- typ = ptr.Elem()
- }
- named, ok := typ.(*types.Named)
- if !ok {
- return nil, false
- }
- return named, isPtr
-}
-
-// enclosingFunction returns the signature and type of the function
-// enclosing the given position.
-func enclosingFunction(path []ast.Node, info *types.Info) *ast.FuncType {
- for _, node := range path {
- switch t := node.(type) {
- case *ast.FuncDecl:
- if _, ok := info.Defs[t.Name]; ok {
- return t.Type
- }
- case *ast.FuncLit:
- if _, ok := info.Types[t]; ok {
- return t.Type
- }
- }
- }
- return nil
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
deleted file mode 100644
index 81c732001..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/a.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-func x() int {
- var z int
- z = y // want "undeclared name: y"
-
- if z == m { // want "undeclared name: m"
- z = 1
- }
-
- if z == 1 {
- z = 1
- } else if z == n+1 { // want "undeclared name: n"
- z = 1
- }
-
- switch z {
- case 10:
- z = 1
- case a: // want "undeclared name: a"
- z = 1
- }
- return z
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
deleted file mode 100644
index ecf00ecfc..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/channels.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-func channels(s string) {
- undefinedChannels(c()) // want "undeclared name: undefinedChannels"
-}
-
-func c() (<-chan string, chan string) {
- return make(<-chan string), make(chan string)
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
deleted file mode 100644
index ab7b2ba5c..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/consecutive_params.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-func consecutiveParams() {
- var s string
- undefinedConsecutiveParams(s, s) // want "undeclared name: undefinedConsecutiveParams"
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
deleted file mode 100644
index 341a9d2a4..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/error_param.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-func errorParam() {
- var err error
- undefinedErrorParam(err) // want "undeclared name: undefinedErrorParam"
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
deleted file mode 100644
index ab82463d0..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/literals.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-type T struct{}
-
-func literals() {
- undefinedLiterals("hey compiler", T{}, &T{}) // want "undeclared name: undefinedLiterals"
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
deleted file mode 100644
index 9a543821e..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/operation.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-import "time"
-
-func operation() {
- undefinedOperation(10 * time.Second) // want "undeclared name: undefinedOperation"
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
deleted file mode 100644
index 9ed09a27f..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/selector.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-func selector() {
- m := map[int]bool{}
- undefinedSelector(m[1]) // want "undeclared name: undefinedSelector"
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
deleted file mode 100644
index d741c68f6..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/slice.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-func slice() {
- undefinedSlice([]int{1, 2}) // want "undeclared name: undefinedSlice"
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
deleted file mode 100644
index 3148e8f4d..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/tuple.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-func tuple() {
- undefinedTuple(b()) // want "undeclared name: undefinedTuple"
-}
-
-func b() (string, error) {
- return "", nil
-}
diff --git a/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go b/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
deleted file mode 100644
index 98f77a43c..000000000
--- a/internal/lsp/analysis/undeclaredname/testdata/src/a/unique_params.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclared
-
-func uniqueArguments() {
- var s string
- var i int
- undefinedUniqueArguments(s, i, s) // want "undeclared name: undefinedUniqueArguments"
-}
diff --git a/internal/lsp/analysis/undeclaredname/undeclared.go b/internal/lsp/analysis/undeclaredname/undeclared.go
deleted file mode 100644
index 22b552c37..000000000
--- a/internal/lsp/analysis/undeclaredname/undeclared.go
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package undeclaredname defines an Analyzer that applies suggested fixes
-// to errors of the type "undeclared name: %s".
-package undeclaredname
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
- "go/token"
- "go/types"
- "strings"
- "unicode"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/span"
-)
-
-const Doc = `suggested fixes for "undeclared name: <>"
-
-This checker provides suggested fixes for type errors of the
-type "undeclared name: <>". It will either insert a new statement,
-such as:
-
-"<> := "
-
-or a new function declaration, such as:
-
-func <>(inferred parameters) {
- panic("implement me!")
-}
-`
-
-var Analyzer = &analysis.Analyzer{
- Name: string(analysisinternal.UndeclaredName),
- Doc: Doc,
- Requires: []*analysis.Analyzer{},
- Run: run,
- RunDespiteErrors: true,
-}
-
-const undeclaredNamePrefix = "undeclared name: "
-
-func run(pass *analysis.Pass) (interface{}, error) {
- for _, err := range analysisinternal.GetTypeErrors(pass) {
- runForError(pass, err)
- }
- return nil, nil
-}
-
-func runForError(pass *analysis.Pass, err types.Error) {
- if !strings.HasPrefix(err.Msg, undeclaredNamePrefix) {
- return
- }
- name := strings.TrimPrefix(err.Msg, undeclaredNamePrefix)
- var file *ast.File
- for _, f := range pass.Files {
- if f.Pos() <= err.Pos && err.Pos < f.End() {
- file = f
- break
- }
- }
- if file == nil {
- return
- }
-
- // Get the path for the relevant range.
- path, _ := astutil.PathEnclosingInterval(file, err.Pos, err.Pos)
- if len(path) < 2 {
- return
- }
- ident, ok := path[0].(*ast.Ident)
- if !ok || ident.Name != name {
- return
- }
-
- // Undeclared quick fixes only work in function bodies.
- inFunc := false
- for i := range path {
- if _, inFunc = path[i].(*ast.FuncDecl); inFunc {
- if i == 0 {
- return
- }
- if _, isBody := path[i-1].(*ast.BlockStmt); !isBody {
- return
- }
- break
- }
- }
- if !inFunc {
- return
- }
- // Skip selector expressions because it might be too complex
- // to try and provide a suggested fix for fields and methods.
- if _, ok := path[1].(*ast.SelectorExpr); ok {
- return
- }
- tok := pass.Fset.File(file.Pos())
- if tok == nil {
- return
- }
- offset := pass.Fset.Position(err.Pos).Offset
- end := tok.Pos(offset + len(name))
- pass.Report(analysis.Diagnostic{
- Pos: err.Pos,
- End: end,
- Message: err.Msg,
- })
-}
-
-func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
- pos := rng.Start // don't use the end
- path, _ := astutil.PathEnclosingInterval(file, pos, pos)
- if len(path) < 2 {
- return nil, fmt.Errorf("no expression found")
- }
- ident, ok := path[0].(*ast.Ident)
- if !ok {
- return nil, fmt.Errorf("no identifier found")
- }
-
- // Check for a possible call expression, in which case we should add a
- // new function declaration.
- if len(path) > 1 {
- if _, ok := path[1].(*ast.CallExpr); ok {
- return newFunctionDeclaration(path, file, pkg, info, fset)
- }
- }
-
- // Get the place to insert the new statement.
- insertBeforeStmt := analysisinternal.StmtToInsertVarBefore(path)
- if insertBeforeStmt == nil {
- return nil, fmt.Errorf("could not locate insertion point")
- }
-
- insertBefore := fset.Position(insertBeforeStmt.Pos()).Offset
-
- // Get the indent to add on the line after the new statement.
- // Since this will have a parse error, we can not use format.Source().
- contentBeforeStmt, indent := content[:insertBefore], "\n"
- if nl := bytes.LastIndex(contentBeforeStmt, []byte("\n")); nl != -1 {
- indent = string(contentBeforeStmt[nl:])
- }
-
- // Create the new local variable statement.
- newStmt := fmt.Sprintf("%s := %s", ident.Name, indent)
- return &analysis.SuggestedFix{
- Message: fmt.Sprintf("Create variable \"%s\"", ident.Name),
- TextEdits: []analysis.TextEdit{{
- Pos: insertBeforeStmt.Pos(),
- End: insertBeforeStmt.Pos(),
- NewText: []byte(newStmt),
- }},
- }, nil
-}
-
-func newFunctionDeclaration(path []ast.Node, file *ast.File, pkg *types.Package, info *types.Info, fset *token.FileSet) (*analysis.SuggestedFix, error) {
- if len(path) < 3 {
- return nil, fmt.Errorf("unexpected set of enclosing nodes: %v", path)
- }
- ident, ok := path[0].(*ast.Ident)
- if !ok {
- return nil, fmt.Errorf("no name for function declaration %v (%T)", path[0], path[0])
- }
- call, ok := path[1].(*ast.CallExpr)
- if !ok {
- return nil, fmt.Errorf("no call expression found %v (%T)", path[1], path[1])
- }
-
- // Find the enclosing function, so that we can add the new declaration
- // below.
- var enclosing *ast.FuncDecl
- for _, n := range path {
- if n, ok := n.(*ast.FuncDecl); ok {
- enclosing = n
- break
- }
- }
- // TODO(rstambler): Support the situation when there is no enclosing
- // function.
- if enclosing == nil {
- return nil, fmt.Errorf("no enclosing function found: %v", path)
- }
-
- pos := enclosing.End()
-
- var paramNames []string
- var paramTypes []types.Type
- // keep track of all param names to later ensure uniqueness
- nameCounts := map[string]int{}
- for _, arg := range call.Args {
- typ := info.TypeOf(arg)
- if typ == nil {
- return nil, fmt.Errorf("unable to determine type for %s", arg)
- }
-
- switch t := typ.(type) {
- // this is the case where another function call returning multiple
- // results is used as an argument
- case *types.Tuple:
- n := t.Len()
- for i := 0; i < n; i++ {
- name := typeToArgName(t.At(i).Type())
- nameCounts[name]++
-
- paramNames = append(paramNames, name)
- paramTypes = append(paramTypes, types.Default(t.At(i).Type()))
- }
-
- default:
- // does the argument have a name we can reuse?
- // only happens in case of a *ast.Ident
- var name string
- if ident, ok := arg.(*ast.Ident); ok {
- name = ident.Name
- }
-
- if name == "" {
- name = typeToArgName(typ)
- }
-
- nameCounts[name]++
-
- paramNames = append(paramNames, name)
- paramTypes = append(paramTypes, types.Default(typ))
- }
- }
-
- for n, c := range nameCounts {
- // Any names we saw more than once will need a unique suffix added
- // on. Reset the count to 1 to act as the suffix for the first
- // occurrence of that name.
- if c >= 2 {
- nameCounts[n] = 1
- } else {
- delete(nameCounts, n)
- }
- }
-
- params := &ast.FieldList{}
-
- for i, name := range paramNames {
- if suffix, repeats := nameCounts[name]; repeats {
- nameCounts[name]++
- name = fmt.Sprintf("%s%d", name, suffix)
- }
-
- // only worth checking after previous param in the list
- if i > 0 {
- // if type of parameter at hand is the same as the previous one,
- // add it to the previous param list of identifiers so to have:
- // (s1, s2 string)
- // and not
- // (s1 string, s2 string)
- if paramTypes[i] == paramTypes[i-1] {
- params.List[len(params.List)-1].Names = append(params.List[len(params.List)-1].Names, ast.NewIdent(name))
- continue
- }
- }
-
- params.List = append(params.List, &ast.Field{
- Names: []*ast.Ident{
- ast.NewIdent(name),
- },
- Type: analysisinternal.TypeExpr(fset, file, pkg, paramTypes[i]),
- })
- }
-
- decl := &ast.FuncDecl{
- Name: ast.NewIdent(ident.Name),
- Type: &ast.FuncType{
- Params: params,
- // TODO(rstambler): Also handle result parameters here.
- },
- Body: &ast.BlockStmt{
- List: []ast.Stmt{
- &ast.ExprStmt{
- X: &ast.CallExpr{
- Fun: ast.NewIdent("panic"),
- Args: []ast.Expr{
- &ast.BasicLit{
- Value: `"unimplemented"`,
- },
- },
- },
- },
- },
- },
- }
-
- b := bytes.NewBufferString("\n\n")
- if err := format.Node(b, fset, decl); err != nil {
- return nil, err
- }
- return &analysis.SuggestedFix{
- Message: fmt.Sprintf("Create function \"%s\"", ident.Name),
- TextEdits: []analysis.TextEdit{{
- Pos: pos,
- End: pos,
- NewText: b.Bytes(),
- }},
- }, nil
-}
-func typeToArgName(ty types.Type) string {
- s := types.Default(ty).String()
-
- switch t := ty.(type) {
- case *types.Basic:
- // use first letter in type name for basic types
- return s[0:1]
- case *types.Slice:
- // use element type to decide var name for slices
- return typeToArgName(t.Elem())
- case *types.Array:
- // use element type to decide var name for arrays
- return typeToArgName(t.Elem())
- case *types.Chan:
- return "ch"
- }
-
- s = strings.TrimFunc(s, func(r rune) bool {
- return !unicode.IsLetter(r)
- })
-
- if s == "error" {
- return "err"
- }
-
- // remove package (if present)
- // and make first letter lowercase
- a := []rune(s[strings.LastIndexByte(s, '.')+1:])
- a[0] = unicode.ToLower(a[0])
- return string(a)
-}
diff --git a/internal/lsp/analysis/undeclaredname/undeclared_test.go b/internal/lsp/analysis/undeclaredname/undeclared_test.go
deleted file mode 100644
index b71543937..000000000
--- a/internal/lsp/analysis/undeclaredname/undeclared_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package undeclaredname_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/undeclaredname"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- analysistest.Run(t, testdata, undeclaredname.Analyzer, "a")
-}
diff --git a/internal/lsp/analysis/unusedparams/unusedparams_test.go b/internal/lsp/analysis/unusedparams/unusedparams_test.go
deleted file mode 100644
index dff17c95e..000000000
--- a/internal/lsp/analysis/unusedparams/unusedparams_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unusedparams_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/unusedparams"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func Test(t *testing.T) {
- testdata := analysistest.TestData()
- tests := []string{"a"}
- if typeparams.Enabled {
- tests = append(tests, "typeparams")
- }
- analysistest.RunWithSuggestedFixes(t, testdata, unusedparams.Analyzer, tests...)
-}
diff --git a/internal/lsp/analysis/useany/useany_test.go b/internal/lsp/analysis/useany/useany_test.go
deleted file mode 100644
index 535d91526..000000000
--- a/internal/lsp/analysis/useany/useany_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package useany_test
-
-import (
- "testing"
-
- "golang.org/x/tools/go/analysis/analysistest"
- "golang.org/x/tools/internal/lsp/analysis/useany"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func Test(t *testing.T) {
- if !typeparams.Enabled {
- t.Skip("type params are not enabled")
- }
- testdata := analysistest.TestData()
- analysistest.RunWithSuggestedFixes(t, testdata, useany.Analyzer, "a")
-}
diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go
deleted file mode 100644
index d66a3ed37..000000000
--- a/internal/lsp/cache/analysis.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/types"
- "reflect"
- "sort"
- "sync"
-
- "golang.org/x/sync/errgroup"
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-func (s *snapshot) Analyze(ctx context.Context, id string, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) {
- var roots []*actionHandle
- for _, a := range analyzers {
- if !a.IsEnabled(s.view) {
- continue
- }
- ah, err := s.actionHandle(ctx, PackageID(id), a.Analyzer)
- if err != nil {
- return nil, err
- }
- roots = append(roots, ah)
- }
-
- // Check if the context has been canceled before running the analyses.
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
-
- var results []*source.Diagnostic
- for _, ah := range roots {
- diagnostics, _, err := ah.analyze(ctx, s)
- if err != nil {
- // Keep going if a single analyzer failed.
- event.Error(ctx, fmt.Sprintf("analyzer %q failed", ah.analyzer.Name), err)
- continue
- }
- results = append(results, diagnostics...)
- }
- return results, nil
-}
-
-type actionHandleKey string
-
-// An action represents one unit of analysis work: the application of
-// one analysis to one package. Actions form a DAG, both within a
-// package (as different analyzers are applied, either in sequence or
-// parallel), and across packages (as dependencies are analyzed).
-type actionHandle struct {
- handle *memoize.Handle
-
- analyzer *analysis.Analyzer
- pkg *pkg
-}
-
-type actionData struct {
- diagnostics []*source.Diagnostic
- result interface{}
- objectFacts map[objectFactKey]analysis.Fact
- packageFacts map[packageFactKey]analysis.Fact
- err error
-}
-
-type objectFactKey struct {
- obj types.Object
- typ reflect.Type
-}
-
-type packageFactKey struct {
- pkg *types.Package
- typ reflect.Type
-}
-
-func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.Analyzer) (*actionHandle, error) {
- ph, err := s.buildPackageHandle(ctx, id, source.ParseFull)
- if err != nil {
- return nil, err
- }
- act := s.getActionHandle(id, ph.mode, a)
- if act != nil {
- return act, nil
- }
- if len(ph.key) == 0 {
- return nil, errors.Errorf("actionHandle: no key for package %s", id)
- }
- pkg, err := ph.check(ctx, s)
- if err != nil {
- return nil, err
- }
- act = &actionHandle{
- analyzer: a,
- pkg: pkg,
- }
- var deps []*actionHandle
- // Add a dependency on each required analyzers.
- for _, req := range a.Requires {
- reqActionHandle, err := s.actionHandle(ctx, id, req)
- if err != nil {
- return nil, err
- }
- deps = append(deps, reqActionHandle)
- }
-
- // TODO(golang/go#35089): Re-enable this when we doesn't use ParseExported
- // mode for dependencies. In the meantime, disable analysis for dependencies,
- // since we don't get anything useful out of it.
- if false {
- // An analysis that consumes/produces facts
- // must run on the package's dependencies too.
- if len(a.FactTypes) > 0 {
- importIDs := make([]string, 0, len(ph.m.Deps))
- for _, importID := range ph.m.Deps {
- importIDs = append(importIDs, string(importID))
- }
- sort.Strings(importIDs) // for determinism
- for _, importID := range importIDs {
- depActionHandle, err := s.actionHandle(ctx, PackageID(importID), a)
- if err != nil {
- return nil, err
- }
- deps = append(deps, depActionHandle)
- }
- }
- }
-
- h := s.generation.Bind(buildActionKey(a, ph), func(ctx context.Context, arg memoize.Arg) interface{} {
- snapshot := arg.(*snapshot)
- // Analyze dependencies first.
- results, err := execAll(ctx, snapshot, deps)
- if err != nil {
- return &actionData{
- err: err,
- }
- }
- return runAnalysis(ctx, snapshot, a, pkg, results)
- }, nil)
- act.handle = h
-
- act = s.addActionHandle(act)
- return act, nil
-}
-
-func (act *actionHandle) analyze(ctx context.Context, snapshot *snapshot) ([]*source.Diagnostic, interface{}, error) {
- d, err := act.handle.Get(ctx, snapshot.generation, snapshot)
- if err != nil {
- return nil, nil, err
- }
- data, ok := d.(*actionData)
- if !ok {
- return nil, nil, errors.Errorf("unexpected type for %s:%s", act.pkg.ID(), act.analyzer.Name)
- }
- if data == nil {
- return nil, nil, errors.Errorf("unexpected nil analysis for %s:%s", act.pkg.ID(), act.analyzer.Name)
- }
- return data.diagnostics, data.result, data.err
-}
-
-func buildActionKey(a *analysis.Analyzer, ph *packageHandle) actionHandleKey {
- return actionHandleKey(hashContents([]byte(fmt.Sprintf("%p %s", a, string(ph.key)))))
-}
-
-func (act *actionHandle) String() string {
- return fmt.Sprintf("%s@%s", act.analyzer, act.pkg.PkgPath())
-}
-
-func execAll(ctx context.Context, snapshot *snapshot, actions []*actionHandle) (map[*actionHandle]*actionData, error) {
- var mu sync.Mutex
- results := make(map[*actionHandle]*actionData)
-
- g, ctx := errgroup.WithContext(ctx)
- for _, act := range actions {
- act := act
- g.Go(func() error {
- v, err := act.handle.Get(ctx, snapshot.generation, snapshot)
- if err != nil {
- return err
- }
- data, ok := v.(*actionData)
- if !ok {
- return errors.Errorf("unexpected type for %s: %T", act, v)
- }
-
- mu.Lock()
- defer mu.Unlock()
- results[act] = data
-
- return nil
- })
- }
- return results, g.Wait()
-}
-
-func runAnalysis(ctx context.Context, snapshot *snapshot, analyzer *analysis.Analyzer, pkg *pkg, deps map[*actionHandle]*actionData) (data *actionData) {
- data = &actionData{
- objectFacts: make(map[objectFactKey]analysis.Fact),
- packageFacts: make(map[packageFactKey]analysis.Fact),
- }
- defer func() {
- if r := recover(); r != nil {
- data.err = errors.Errorf("analysis %s for package %s panicked: %v", analyzer.Name, pkg.PkgPath(), r)
- }
- }()
-
- // Plumb the output values of the dependencies
- // into the inputs of this action. Also facts.
- inputs := make(map[*analysis.Analyzer]interface{})
-
- for depHandle, depData := range deps {
- if depHandle.pkg == pkg {
- // Same package, different analysis (horizontal edge):
- // in-memory outputs of prerequisite analyzers
- // become inputs to this analysis pass.
- inputs[depHandle.analyzer] = depData.result
- } else if depHandle.analyzer == analyzer { // (always true)
- // Same analysis, different package (vertical edge):
- // serialized facts produced by prerequisite analysis
- // become available to this analysis pass.
- for key, fact := range depData.objectFacts {
- // Filter out facts related to objects
- // that are irrelevant downstream
- // (equivalently: not in the compiler export data).
- if !exportedFrom(key.obj, depHandle.pkg.types) {
- continue
- }
- data.objectFacts[key] = fact
- }
- for key, fact := range depData.packageFacts {
- // TODO: filter out facts that belong to
- // packages not mentioned in the export data
- // to prevent side channels.
-
- data.packageFacts[key] = fact
- }
- }
- }
-
- var syntax []*ast.File
- for _, cgf := range pkg.compiledGoFiles {
- syntax = append(syntax, cgf.File)
- }
-
- var diagnostics []*analysis.Diagnostic
-
- // Run the analysis.
- pass := &analysis.Pass{
- Analyzer: analyzer,
- Fset: snapshot.FileSet(),
- Files: syntax,
- Pkg: pkg.GetTypes(),
- TypesInfo: pkg.GetTypesInfo(),
- TypesSizes: pkg.GetTypesSizes(),
- ResultOf: inputs,
- Report: func(d analysis.Diagnostic) {
- // Prefix the diagnostic category with the analyzer's name.
- if d.Category == "" {
- d.Category = analyzer.Name
- } else {
- d.Category = analyzer.Name + "." + d.Category
- }
- diagnostics = append(diagnostics, &d)
- },
- ImportObjectFact: func(obj types.Object, ptr analysis.Fact) bool {
- if obj == nil {
- panic("nil object")
- }
- key := objectFactKey{obj, factType(ptr)}
-
- if v, ok := data.objectFacts[key]; ok {
- reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
- return true
- }
- return false
- },
- ExportObjectFact: func(obj types.Object, fact analysis.Fact) {
- if obj.Pkg() != pkg.types {
- panic(fmt.Sprintf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
- analyzer, pkg.ID(), obj, fact))
- }
- key := objectFactKey{obj, factType(fact)}
- data.objectFacts[key] = fact // clobber any existing entry
- },
- ImportPackageFact: func(pkg *types.Package, ptr analysis.Fact) bool {
- if pkg == nil {
- panic("nil package")
- }
- key := packageFactKey{pkg, factType(ptr)}
- if v, ok := data.packageFacts[key]; ok {
- reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
- return true
- }
- return false
- },
- ExportPackageFact: func(fact analysis.Fact) {
- key := packageFactKey{pkg.types, factType(fact)}
- data.packageFacts[key] = fact // clobber any existing entry
- },
- AllObjectFacts: func() []analysis.ObjectFact {
- facts := make([]analysis.ObjectFact, 0, len(data.objectFacts))
- for k := range data.objectFacts {
- facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: data.objectFacts[k]})
- }
- return facts
- },
- AllPackageFacts: func() []analysis.PackageFact {
- facts := make([]analysis.PackageFact, 0, len(data.packageFacts))
- for k := range data.packageFacts {
- facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: data.packageFacts[k]})
- }
- return facts
- },
- }
- analysisinternal.SetTypeErrors(pass, pkg.typeErrors)
-
- if pkg.IsIllTyped() {
- data.err = errors.Errorf("analysis skipped due to errors in package")
- return data
- }
- data.result, data.err = pass.Analyzer.Run(pass)
- if data.err != nil {
- return data
- }
-
- if got, want := reflect.TypeOf(data.result), pass.Analyzer.ResultType; got != want {
- data.err = errors.Errorf(
- "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
- pass.Pkg.Path(), pass.Analyzer, got, want)
- return data
- }
-
- // disallow calls after Run
- pass.ExportObjectFact = func(obj types.Object, fact analysis.Fact) {
- panic(fmt.Sprintf("%s:%s: Pass.ExportObjectFact(%s, %T) called after Run", analyzer.Name, pkg.PkgPath(), obj, fact))
- }
- pass.ExportPackageFact = func(fact analysis.Fact) {
- panic(fmt.Sprintf("%s:%s: Pass.ExportPackageFact(%T) called after Run", analyzer.Name, pkg.PkgPath(), fact))
- }
-
- for _, diag := range diagnostics {
- srcDiags, err := analysisDiagnosticDiagnostics(snapshot, pkg, analyzer, diag)
- if err != nil {
- event.Error(ctx, "unable to compute analysis error position", err, tag.Category.Of(diag.Category), tag.Package.Of(pkg.ID()))
- continue
- }
- if ctx.Err() != nil {
- data.err = ctx.Err()
- return data
- }
- data.diagnostics = append(data.diagnostics, srcDiags...)
- }
- return data
-}
-
-// exportedFrom reports whether obj may be visible to a package that imports pkg.
-// This includes not just the exported members of pkg, but also unexported
-// constants, types, fields, and methods, perhaps belonging to oether packages,
-// that find there way into the API.
-// This is an overapproximation of the more accurate approach used by
-// gc export data, which walks the type graph, but it's much simpler.
-//
-// TODO(adonovan): do more accurate filtering by walking the type graph.
-func exportedFrom(obj types.Object, pkg *types.Package) bool {
- switch obj := obj.(type) {
- case *types.Func:
- return obj.Exported() && obj.Pkg() == pkg ||
- obj.Type().(*types.Signature).Recv() != nil
- case *types.Var:
- return obj.Exported() && obj.Pkg() == pkg ||
- obj.IsField()
- case *types.TypeName, *types.Const:
- return true
- }
- return false // Nil, Builtin, Label, or PkgName
-}
-
-func factType(fact analysis.Fact) reflect.Type {
- t := reflect.TypeOf(fact)
- if t.Kind() != reflect.Ptr {
- panic(fmt.Sprintf("invalid Fact type: got %T, want pointer", fact))
- }
- return t
-}
-
-func (s *snapshot) DiagnosePackage(ctx context.Context, spkg source.Package) (map[span.URI][]*source.Diagnostic, error) {
- pkg := spkg.(*pkg)
- // Apply type error analyzers. They augment type error diagnostics with their own fixes.
- var analyzers []*source.Analyzer
- for _, a := range s.View().Options().TypeErrorAnalyzers {
- analyzers = append(analyzers, a)
- }
- var errorAnalyzerDiag []*source.Diagnostic
- if pkg.HasTypeErrors() {
- var err error
- errorAnalyzerDiag, err = s.Analyze(ctx, pkg.ID(), analyzers)
- if err != nil {
- // Keep going: analysis failures should not block diagnostics.
- event.Error(ctx, "type error analysis failed", err, tag.Package.Of(pkg.ID()))
- }
- }
- diags := map[span.URI][]*source.Diagnostic{}
- for _, diag := range pkg.diagnostics {
- for _, eaDiag := range errorAnalyzerDiag {
- if eaDiag.URI == diag.URI && eaDiag.Range == diag.Range && eaDiag.Message == diag.Message {
- // Type error analyzers just add fixes and tags. Make a copy,
- // since we don't own either, and overwrite.
- // The analyzer itself can't do this merge because
- // analysis.Diagnostic doesn't have all the fields, and Analyze
- // can't because it doesn't have the type error, notably its code.
- clone := *diag
- clone.SuggestedFixes = eaDiag.SuggestedFixes
- clone.Tags = eaDiag.Tags
- clone.Analyzer = eaDiag.Analyzer
- diag = &clone
- }
- }
- diags[diag.URI] = append(diags[diag.URI], diag)
- }
- return diags, nil
-}
diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go
deleted file mode 100644
index ac670b573..000000000
--- a/internal/lsp/cache/cache.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "crypto/sha256"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "html/template"
- "io/ioutil"
- "os"
- "reflect"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
-)
-
-func New(options func(*source.Options)) *Cache {
- index := atomic.AddInt64(&cacheIndex, 1)
- c := &Cache{
- id: strconv.FormatInt(index, 10),
- fset: token.NewFileSet(),
- options: options,
- fileContent: map[span.URI]*fileHandle{},
- }
- return c
-}
-
-type Cache struct {
- id string
- fset *token.FileSet
- options func(*source.Options)
-
- store memoize.Store
-
- fileMu sync.Mutex
- fileContent map[span.URI]*fileHandle
-}
-
-type fileHandle struct {
- modTime time.Time
- uri span.URI
- bytes []byte
- hash string
- err error
-
- // size is the file length as reported by Stat, for the purpose of
- // invalidation. Probably we could just use len(bytes), but this is done
- // defensively in case the definition of file size in the file system
- // differs.
- size int64
-}
-
-func (h *fileHandle) Saved() bool {
- return true
-}
-
-func (c *Cache) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
- return c.getFile(ctx, uri)
-}
-
-func (c *Cache) getFile(ctx context.Context, uri span.URI) (*fileHandle, error) {
- fi, statErr := os.Stat(uri.Filename())
- if statErr != nil {
- return &fileHandle{
- err: statErr,
- uri: uri,
- }, nil
- }
-
- c.fileMu.Lock()
- fh, ok := c.fileContent[uri]
- c.fileMu.Unlock()
-
- // Check mtime and file size to infer whether the file has changed. This is
- // an imperfect heuristic. Notably on some real systems (such as WSL) the
- // filesystem clock resolution can be large -- 1/64s was observed. Therefore
- // it's quite possible for multiple file modifications to occur within a
- // single logical 'tick'. This can leave the cache in an incorrect state, but
- // unfortunately we can't afford to pay the price of reading the actual file
- // content here. Or to be more precise, reading would be a risky change and
- // we don't know if we can afford it.
- //
- // We check file size in an attempt to reduce the probability of false cache
- // hits.
- if ok && fh.modTime.Equal(fi.ModTime()) && fh.size == fi.Size() {
- return fh, nil
- }
-
- fh, err := readFile(ctx, uri, fi)
- if err != nil {
- return nil, err
- }
- c.fileMu.Lock()
- c.fileContent[uri] = fh
- c.fileMu.Unlock()
- return fh, nil
-}
-
-// ioLimit limits the number of parallel file reads per process.
-var ioLimit = make(chan struct{}, 128)
-
-func readFile(ctx context.Context, uri span.URI, fi os.FileInfo) (*fileHandle, error) {
- select {
- case ioLimit <- struct{}{}:
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- defer func() { <-ioLimit }()
-
- ctx, done := event.Start(ctx, "cache.readFile", tag.File.Of(uri.Filename()))
- _ = ctx
- defer done()
-
- data, err := ioutil.ReadFile(uri.Filename())
- if err != nil {
- return &fileHandle{
- modTime: fi.ModTime(),
- size: fi.Size(),
- err: err,
- }, nil
- }
- return &fileHandle{
- modTime: fi.ModTime(),
- size: fi.Size(),
- uri: uri,
- bytes: data,
- hash: hashContents(data),
- }, nil
-}
-
-func (c *Cache) NewSession(ctx context.Context) *Session {
- index := atomic.AddInt64(&sessionIndex, 1)
- options := source.DefaultOptions().Clone()
- if c.options != nil {
- c.options(options)
- }
- s := &Session{
- cache: c,
- id: strconv.FormatInt(index, 10),
- options: options,
- overlays: make(map[span.URI]*overlay),
- gocmdRunner: &gocommand.Runner{},
- }
- event.Log(ctx, "New session", KeyCreateSession.Of(s))
- return s
-}
-
-func (c *Cache) FileSet() *token.FileSet {
- return c.fset
-}
-
-func (h *fileHandle) URI() span.URI {
- return h.uri
-}
-
-func (h *fileHandle) Hash() string {
- return h.hash
-}
-
-func (h *fileHandle) FileIdentity() source.FileIdentity {
- return source.FileIdentity{
- URI: h.uri,
- Hash: h.hash,
- }
-}
-
-func (h *fileHandle) Read() ([]byte, error) {
- return h.bytes, h.err
-}
-
-func hashContents(contents []byte) string {
- return fmt.Sprintf("%x", sha256.Sum256(contents))
-}
-
-var cacheIndex, sessionIndex, viewIndex int64
-
-func (c *Cache) ID() string { return c.id }
-func (c *Cache) MemStats() map[reflect.Type]int { return c.store.Stats() }
-
-type packageStat struct {
- id PackageID
- mode source.ParseMode
- file int64
- ast int64
- types int64
- typesInfo int64
- total int64
-}
-
-func (c *Cache) PackageStats(withNames bool) template.HTML {
- var packageStats []packageStat
- c.store.DebugOnlyIterate(func(k, v interface{}) {
- switch k.(type) {
- case packageHandleKey:
- v := v.(*packageData)
- if v.pkg == nil {
- break
- }
- var typsCost, typInfoCost int64
- if v.pkg.types != nil {
- typsCost = typesCost(v.pkg.types.Scope())
- }
- if v.pkg.typesInfo != nil {
- typInfoCost = typesInfoCost(v.pkg.typesInfo)
- }
- stat := packageStat{
- id: v.pkg.m.ID,
- mode: v.pkg.mode,
- types: typsCost,
- typesInfo: typInfoCost,
- }
- for _, f := range v.pkg.compiledGoFiles {
- stat.file += int64(len(f.Src))
- stat.ast += astCost(f.File)
- }
- stat.total = stat.file + stat.ast + stat.types + stat.typesInfo
- packageStats = append(packageStats, stat)
- }
- })
- var totalCost int64
- for _, stat := range packageStats {
- totalCost += stat.total
- }
- sort.Slice(packageStats, func(i, j int) bool {
- return packageStats[i].total > packageStats[j].total
- })
- html := "<table><thead><td>Name</td><td>total = file + ast + types + types info</td></thead>\n"
- human := func(n int64) string {
- return fmt.Sprintf("%.2f", float64(n)/(1024*1024))
- }
- var printedCost int64
- for _, stat := range packageStats {
- name := stat.id
- if !withNames {
- name = "-"
- }
- html += fmt.Sprintf("<tr><td>%v (%v)</td><td>%v = %v + %v + %v + %v</td></tr>\n", name, stat.mode,
- human(stat.total), human(stat.file), human(stat.ast), human(stat.types), human(stat.typesInfo))
- printedCost += stat.total
- if float64(printedCost) > float64(totalCost)*.9 {
- break
- }
- }
- html += "</table>\n"
- return template.HTML(html)
-}
-
-func astCost(f *ast.File) int64 {
- if f == nil {
- return 0
- }
- var count int64
- ast.Inspect(f, func(_ ast.Node) bool {
- count += 32 // nodes are pretty small.
- return true
- })
- return count
-}
-
-func typesCost(scope *types.Scope) int64 {
- cost := 64 + int64(scope.Len())*128 // types.object looks pretty big
- for i := 0; i < scope.NumChildren(); i++ {
- cost += typesCost(scope.Child(i))
- }
- return cost
-}
-
-func typesInfoCost(info *types.Info) int64 {
- // Most of these refer to existing objects, with the exception of InitOrder, Selections, and Types.
- cost := 24*len(info.Defs) +
- 32*len(info.Implicits) +
- 256*len(info.InitOrder) + // these are big, but there aren't many of them.
- 32*len(info.Scopes) +
- 128*len(info.Selections) + // wild guess
- 128*len(info.Types) + // wild guess
- 32*len(info.Uses)
- return int64(cost)
-}
diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go
deleted file mode 100644
index f16686354..000000000
--- a/internal/lsp/cache/check.go
+++ /dev/null
@@ -1,863 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/types"
- "path"
- "path/filepath"
- "regexp"
- "sort"
- "strings"
- "sync"
-
- "golang.org/x/mod/module"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/packagesinternal"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/typeparams"
- "golang.org/x/tools/internal/typesinternal"
- errors "golang.org/x/xerrors"
-)
-
-type packageHandleKey string
-
-type packageHandle struct {
- handle *memoize.Handle
-
- goFiles, compiledGoFiles []*parseGoHandle
-
- // mode is the mode the files were parsed in.
- mode source.ParseMode
-
- // m is the metadata associated with the package.
- m *KnownMetadata
-
- // key is the hashed key for the package.
- key packageHandleKey
-}
-
-func (ph *packageHandle) packageKey() packageKey {
- return packageKey{
- id: ph.m.ID,
- mode: ph.mode,
- }
-}
-
-func (ph *packageHandle) imports(ctx context.Context, s source.Snapshot) (result []string) {
- for _, pgh := range ph.goFiles {
- f, err := s.ParseGo(ctx, pgh.file, source.ParseHeader)
- if err != nil {
- continue
- }
- seen := map[string]struct{}{}
- for _, impSpec := range f.File.Imports {
- imp := strings.Trim(impSpec.Path.Value, `"`)
- if _, ok := seen[imp]; !ok {
- seen[imp] = struct{}{}
- result = append(result, imp)
- }
- }
- }
-
- sort.Strings(result)
- return result
-}
-
-// packageData contains the data produced by type-checking a package.
-type packageData struct {
- pkg *pkg
- err error
-}
-
-// buildPackageHandle returns a packageHandle for a given package and mode.
-// It assumes that the given ID already has metadata available, so it does not
-// attempt to reload missing or invalid metadata. The caller must reload
-// metadata if needed.
-func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, error) {
- if ph := s.getPackage(id, mode); ph != nil {
- return ph, nil
- }
-
- // Build the packageHandle for this ID and its dependencies.
- ph, deps, err := s.buildKey(ctx, id, mode)
- if err != nil {
- return nil, err
- }
-
- // Do not close over the packageHandle or the snapshot in the Bind function.
- // This creates a cycle, which causes the finalizers to never run on the handles.
- // The possible cycles are:
- //
- // packageHandle.h.function -> packageHandle
- // packageHandle.h.function -> snapshot -> packageHandle
- //
-
- m := ph.m
- key := ph.key
-
- h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- snapshot := arg.(*snapshot)
-
- // Begin loading the direct dependencies, in parallel.
- var wg sync.WaitGroup
- for _, dep := range deps {
- wg.Add(1)
- go func(dep *packageHandle) {
- dep.check(ctx, snapshot)
- wg.Done()
- }(dep)
- }
-
- data := &packageData{}
- data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps)
- // Make sure that the workers above have finished before we return,
- // especially in case of cancellation.
- wg.Wait()
-
- return data
- }, nil)
- ph.handle = h
-
- // Cache the handle in the snapshot. If a package handle has already
- // been cached, addPackage will return the cached value. This is fine,
- // since the original package handle above will have no references and be
- // garbage collected.
- ph = s.addPackageHandle(ph)
-
- return ph, nil
-}
-
-// buildKey computes the key for a given packageHandle.
-func (s *snapshot) buildKey(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, map[PackagePath]*packageHandle, error) {
- m := s.getMetadata(id)
- if m == nil {
- return nil, nil, errors.Errorf("no metadata for %s", id)
- }
- goFiles, err := s.parseGoHandles(ctx, m.GoFiles, mode)
- if err != nil {
- return nil, nil, err
- }
- compiledGoFiles, err := s.parseGoHandles(ctx, m.CompiledGoFiles, mode)
- if err != nil {
- return nil, nil, err
- }
- ph := &packageHandle{
- m: m,
- goFiles: goFiles,
- compiledGoFiles: compiledGoFiles,
- mode: mode,
- }
- // Make sure all of the depList are sorted.
- depList := append([]PackageID{}, m.Deps...)
- sort.Slice(depList, func(i, j int) bool {
- return depList[i] < depList[j]
- })
-
- deps := make(map[PackagePath]*packageHandle)
-
- // Begin computing the key by getting the depKeys for all dependencies.
- var depKeys []packageHandleKey
- for _, depID := range depList {
- depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID))
- // Don't use invalid metadata for dependencies if the top-level
- // metadata is valid. We only load top-level packages, so if the
- // top-level is valid, all of its dependencies should be as well.
- if err != nil || m.Valid && !depHandle.m.Valid {
- if err != nil {
- event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, tag.Snapshot.Of(s.id))
- } else {
- event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), tag.Snapshot.Of(s.id))
- }
-
- if ctx.Err() != nil {
- return nil, nil, ctx.Err()
- }
- // One bad dependency should not prevent us from checking the entire package.
- // Add a special key to mark a bad dependency.
- depKeys = append(depKeys, packageHandleKey(fmt.Sprintf("%s import not found", depID)))
- continue
- }
- deps[depHandle.m.PkgPath] = depHandle
- depKeys = append(depKeys, depHandle.key)
- }
- experimentalKey := s.View().Options().ExperimentalPackageCacheKey
- ph.key = checkPackageKey(ph.m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey)
- return ph, deps, nil
-}
-
-func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode {
- s.mu.Lock()
- defer s.mu.Unlock()
- _, ws := s.workspacePackages[id]
- if !ws {
- return source.ParseExported
- }
- if s.view.Options().MemoryMode == source.ModeNormal {
- return source.ParseFull
- }
- if s.isActiveLocked(id, nil) {
- return source.ParseFull
- }
- return source.ParseExported
-}
-
-func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey {
- b := bytes.NewBuffer(nil)
- b.WriteString(string(id))
- if m.Module != nil {
- b.WriteString(m.Module.GoVersion) // go version affects type check errors.
- }
- if !experimentalKey {
- // cfg was used to produce the other hashed inputs (package ID, parsed Go
- // files, and deps). It should not otherwise affect the inputs to the type
- // checker, so this experiment omits it. This should increase cache hits on
- // the daemon as cfg contains the environment and working directory.
- b.WriteString(hashConfig(m.Config))
- }
- b.WriteByte(byte(mode))
- for _, dep := range deps {
- b.WriteString(string(dep))
- }
- for _, cgf := range pghs {
- b.WriteString(cgf.file.FileIdentity().String())
- }
- return packageHandleKey(hashContents(b.Bytes()))
-}
-
-// hashEnv returns a hash of the snapshot's configuration.
-func hashEnv(s *snapshot) string {
- s.view.optionsMu.Lock()
- env := s.view.options.EnvSlice()
- s.view.optionsMu.Unlock()
-
- b := &bytes.Buffer{}
- for _, e := range env {
- b.WriteString(e)
- }
- return hashContents(b.Bytes())
-}
-
-// hashConfig returns the hash for the *packages.Config.
-func hashConfig(config *packages.Config) string {
- b := bytes.NewBuffer(nil)
-
- // Dir, Mode, Env, BuildFlags are the parts of the config that can change.
- b.WriteString(config.Dir)
- b.WriteString(string(rune(config.Mode)))
-
- for _, e := range config.Env {
- b.WriteString(e)
- }
- for _, f := range config.BuildFlags {
- b.WriteString(f)
- }
- return hashContents(b.Bytes())
-}
-
-func (ph *packageHandle) Check(ctx context.Context, s source.Snapshot) (source.Package, error) {
- return ph.check(ctx, s.(*snapshot))
-}
-
-func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) {
- v, err := ph.handle.Get(ctx, s.generation, s)
- if err != nil {
- return nil, err
- }
- data := v.(*packageData)
- return data.pkg, data.err
-}
-
-func (ph *packageHandle) CompiledGoFiles() []span.URI {
- return ph.m.CompiledGoFiles
-}
-
-func (ph *packageHandle) ID() string {
- return string(ph.m.ID)
-}
-
-func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) {
- v := ph.handle.Cached(g)
- if v == nil {
- return nil, errors.Errorf("no cached type information for %s", ph.m.PkgPath)
- }
- data := v.(*packageData)
- return data.pkg, data.err
-}
-
-func (s *snapshot) parseGoHandles(ctx context.Context, files []span.URI, mode source.ParseMode) ([]*parseGoHandle, error) {
- pghs := make([]*parseGoHandle, 0, len(files))
- for _, uri := range files {
- fh, err := s.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- pghs = append(pghs, s.parseGoHandle(ctx, fh, mode))
- }
- return pghs, nil
-}
-
-func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) {
- var filter *unexportedFilter
- if mode == source.ParseExported {
- filter = &unexportedFilter{uses: map[string]bool{}}
- }
- pkg, err := doTypeCheck(ctx, snapshot, m, mode, deps, filter)
- if err != nil {
- return nil, err
- }
-
- if mode == source.ParseExported {
- // The AST filtering is a little buggy and may remove things it
- // shouldn't. If we only got undeclared name errors, try one more
- // time keeping those names.
- missing, unexpected := filter.ProcessErrors(pkg.typeErrors)
- if len(unexpected) == 0 && len(missing) != 0 {
- event.Log(ctx, fmt.Sprintf("discovered missing identifiers: %v", missing), tag.Package.Of(string(m.ID)))
- pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, filter)
- if err != nil {
- return nil, err
- }
- missing, unexpected = filter.ProcessErrors(pkg.typeErrors)
- }
- if len(unexpected) != 0 || len(missing) != 0 {
- event.Log(ctx, fmt.Sprintf("falling back to safe trimming due to type errors: %v or still-missing identifiers: %v", unexpected, missing), tag.Package.Of(string(m.ID)))
- pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, nil)
- if err != nil {
- return nil, err
- }
- }
- }
- // If this is a replaced module in the workspace, the version is
- // meaningless, and we don't want clients to access it.
- if m.Module != nil {
- version := m.Module.Version
- if source.IsWorkspaceModuleVersion(version) {
- version = ""
- }
- pkg.version = &module.Version{
- Path: m.Module.Path,
- Version: version,
- }
- }
-
- // We don't care about a package's errors unless we have parsed it in full.
- if mode != source.ParseFull {
- return pkg, nil
- }
-
- for _, e := range m.Errors {
- diags, err := goPackagesErrorDiagnostics(snapshot, pkg, e)
- if err != nil {
- event.Error(ctx, "unable to compute positions for list errors", err, tag.Package.Of(pkg.ID()))
- continue
- }
- pkg.diagnostics = append(pkg.diagnostics, diags...)
- }
-
- // Our heuristic for whether to show type checking errors is:
- // + If any file was 'fixed', don't show type checking errors as we
- // can't guarantee that they reference accurate locations in the source.
- // + If there is a parse error _in the current file_, suppress type
- // errors in that file.
- // + Otherwise, show type errors even in the presence of parse errors in
- // other package files. go/types attempts to suppress follow-on errors
- // due to bad syntax, so on balance type checking errors still provide
- // a decent signal/noise ratio as long as the file in question parses.
-
- // Track URIs with parse errors so that we can suppress type errors for these
- // files.
- unparseable := map[span.URI]bool{}
- for _, e := range pkg.parseErrors {
- diags, err := parseErrorDiagnostics(snapshot, pkg, e)
- if err != nil {
- event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(pkg.ID()))
- continue
- }
- for _, diag := range diags {
- unparseable[diag.URI] = true
- pkg.diagnostics = append(pkg.diagnostics, diag)
- }
- }
-
- if pkg.hasFixedFiles {
- return pkg, nil
- }
-
- unexpanded := pkg.typeErrors
- pkg.typeErrors = nil
- for _, e := range expandErrors(unexpanded, snapshot.View().Options().RelatedInformationSupported) {
- diags, err := typeErrorDiagnostics(snapshot, pkg, e)
- if err != nil {
- event.Error(ctx, "unable to compute positions for type errors", err, tag.Package.Of(pkg.ID()))
- continue
- }
- pkg.typeErrors = append(pkg.typeErrors, e.primary)
- for _, diag := range diags {
- // If the file didn't parse cleanly, it is highly likely that type
- // checking errors will be confusing or redundant. But otherwise, type
- // checking usually provides a good enough signal to include.
- if !unparseable[diag.URI] {
- pkg.diagnostics = append(pkg.diagnostics, diag)
- }
- }
- }
-
- depsErrors, err := snapshot.depsErrors(ctx, pkg)
- if err != nil {
- return nil, err
- }
- pkg.diagnostics = append(pkg.diagnostics, depsErrors...)
-
- return pkg, nil
-}
-
-var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
-
-func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
- ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.ID)))
- defer done()
-
- pkg := &pkg{
- m: m,
- mode: mode,
- imports: make(map[PackagePath]*pkg),
- types: types.NewPackage(string(m.PkgPath), string(m.Name)),
- typesInfo: &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- Scopes: make(map[ast.Node]*types.Scope),
- },
- typesSizes: m.TypesSizes,
- }
- typeparams.InitInstanceInfo(pkg.typesInfo)
-
- for _, gf := range pkg.m.GoFiles {
- // In the presence of line directives, we may need to report errors in
- // non-compiled Go files, so we need to register them on the package.
- // However, we only need to really parse them in ParseFull mode, when
- // the user might actually be looking at the file.
- fh, err := snapshot.GetFile(ctx, gf)
- if err != nil {
- return nil, err
- }
- goMode := source.ParseFull
- if mode != source.ParseFull {
- goMode = source.ParseHeader
- }
- pgf, err := snapshot.ParseGo(ctx, fh, goMode)
- if err != nil {
- return nil, err
- }
- pkg.goFiles = append(pkg.goFiles, pgf)
- }
-
- if err := parseCompiledGoFiles(ctx, snapshot, mode, pkg, astFilter); err != nil {
- return nil, err
- }
-
- // Use the default type information for the unsafe package.
- if m.PkgPath == "unsafe" {
- // Don't type check Unsafe: it's unnecessary, and doing so exposes a data
- // race to Unsafe.completed.
- pkg.types = types.Unsafe
- return pkg, nil
- }
-
- if len(m.CompiledGoFiles) == 0 {
- // No files most likely means go/packages failed. Try to attach error
- // messages to the file as much as possible.
- var found bool
- for _, e := range m.Errors {
- srcDiags, err := goPackagesErrorDiagnostics(snapshot, pkg, e)
- if err != nil {
- continue
- }
- found = true
- pkg.diagnostics = append(pkg.diagnostics, srcDiags...)
- }
- if found {
- return pkg, nil
- }
- return nil, errors.Errorf("no parsed files for package %s, expected: %v, errors: %v", pkg.m.PkgPath, pkg.compiledGoFiles, m.Errors)
- }
-
- cfg := &types.Config{
- Error: func(e error) {
- pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
- },
- Importer: importerFunc(func(pkgPath string) (*types.Package, error) {
- // If the context was cancelled, we should abort.
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
- dep := resolveImportPath(pkgPath, pkg, deps)
- if dep == nil {
- return nil, snapshot.missingPkgError(ctx, pkgPath)
- }
- if !source.IsValidImport(string(m.PkgPath), string(dep.m.PkgPath)) {
- return nil, errors.Errorf("invalid use of internal package %s", pkgPath)
- }
- depPkg, err := dep.check(ctx, snapshot)
- if err != nil {
- return nil, err
- }
- pkg.imports[depPkg.m.PkgPath] = depPkg
- return depPkg.types, nil
- }),
- }
- if pkg.m.Module != nil && pkg.m.Module.GoVersion != "" {
- goVersion := "go" + pkg.m.Module.GoVersion
- // types.NewChecker panics if GoVersion is invalid. An unparsable mod
- // file should probably stop us before we get here, but double check
- // just in case.
- if goVersionRx.MatchString(goVersion) {
- typesinternal.SetGoVersion(cfg, goVersion)
- }
- }
-
- if mode != source.ParseFull {
- cfg.DisableUnusedImportCheck = true
- cfg.IgnoreFuncBodies = true
- }
-
- // We want to type check cgo code if go/types supports it.
- // We passed typecheckCgo to go/packages when we Loaded.
- typesinternal.SetUsesCgo(cfg)
-
- check := types.NewChecker(cfg, snapshot.FileSet(), pkg.types, pkg.typesInfo)
-
- var files []*ast.File
- for _, cgf := range pkg.compiledGoFiles {
- files = append(files, cgf.File)
- }
-
- // Type checking errors are handled via the config, so ignore them here.
- _ = check.Files(files)
-
- // If the context was cancelled, we may have returned a ton of transient
- // errors to the type checker. Swallow them.
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
- return pkg, nil
-}
-
-func parseCompiledGoFiles(ctx context.Context, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error {
- for _, cgf := range pkg.m.CompiledGoFiles {
- fh, err := snapshot.GetFile(ctx, cgf)
- if err != nil {
- return err
- }
-
- var pgf *source.ParsedGoFile
- var fixed bool
- // Only parse Full through the cache -- we need to own Exported ASTs
- // to prune them.
- if mode == source.ParseFull {
- pgh := snapshot.parseGoHandle(ctx, fh, mode)
- pgf, fixed, err = snapshot.parseGo(ctx, pgh)
- } else {
- d := parseGo(ctx, snapshot.FileSet(), fh, mode)
- pgf, fixed, err = d.parsed, d.fixed, d.err
- }
- if err != nil {
- return err
- }
- pkg.compiledGoFiles = append(pkg.compiledGoFiles, pgf)
- if pgf.ParseErr != nil {
- pkg.parseErrors = append(pkg.parseErrors, pgf.ParseErr)
- }
- // If we have fixed parse errors in any of the files, we should hide type
- // errors, as they may be completely nonsensical.
- pkg.hasFixedFiles = pkg.hasFixedFiles || fixed
- }
- if mode != source.ParseExported {
- return nil
- }
- if astFilter != nil {
- var files []*ast.File
- for _, cgf := range pkg.compiledGoFiles {
- files = append(files, cgf.File)
- }
- astFilter.Filter(files)
- } else {
- for _, cgf := range pkg.compiledGoFiles {
- trimAST(cgf.File)
- }
- }
- return nil
-}
-
-func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnostic, error) {
- // Select packages that can't be found, and were imported in non-workspace packages.
- // Workspace packages already show their own errors.
- var relevantErrors []*packagesinternal.PackageError
- for _, depsError := range pkg.m.depsErrors {
- // Up to Go 1.15, the missing package was included in the stack, which
- // was presumably a bug. We want the next one up.
- directImporterIdx := len(depsError.ImportStack) - 1
- if s.view.goversion < 15 {
- directImporterIdx = len(depsError.ImportStack) - 2
- }
- if directImporterIdx < 0 {
- continue
- }
-
- directImporter := depsError.ImportStack[directImporterIdx]
- if s.isWorkspacePackage(PackageID(directImporter)) {
- continue
- }
- relevantErrors = append(relevantErrors, depsError)
- }
-
- // Don't build the import index for nothing.
- if len(relevantErrors) == 0 {
- return nil, nil
- }
-
- // Build an index of all imports in the package.
- type fileImport struct {
- cgf *source.ParsedGoFile
- imp *ast.ImportSpec
- }
- allImports := map[string][]fileImport{}
- for _, cgf := range pkg.compiledGoFiles {
- for _, group := range astutil.Imports(s.FileSet(), cgf.File) {
- for _, imp := range group {
- if imp.Path == nil {
- continue
- }
- path := strings.Trim(imp.Path.Value, `"`)
- allImports[path] = append(allImports[path], fileImport{cgf, imp})
- }
- }
- }
-
- // Apply a diagnostic to any import involved in the error, stopping once
- // we reach the workspace.
- var errors []*source.Diagnostic
- for _, depErr := range relevantErrors {
- for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
- item := depErr.ImportStack[i]
- if s.isWorkspacePackage(PackageID(item)) {
- break
- }
-
- for _, imp := range allImports[item] {
- rng, err := source.NewMappedRange(s.FileSet(), imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range()
- if err != nil {
- return nil, err
- }
- fixes, err := goGetQuickFixes(s, imp.cgf.URI, item)
- if err != nil {
- return nil, err
- }
- errors = append(errors, &source.Diagnostic{
- URI: imp.cgf.URI,
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.TypeError,
- Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
- SuggestedFixes: fixes,
- })
- }
- }
- }
-
- if len(pkg.compiledGoFiles) == 0 {
- return errors, nil
- }
- mod := s.GoModForFile(pkg.compiledGoFiles[0].URI)
- if mod == "" {
- return errors, nil
- }
- fh, err := s.GetFile(ctx, mod)
- if err != nil {
- return nil, err
- }
- pm, err := s.ParseMod(ctx, fh)
- if err != nil {
- return nil, err
- }
-
- // Add a diagnostic to the module that contained the lowest-level import of
- // the missing package.
- for _, depErr := range relevantErrors {
- for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
- item := depErr.ImportStack[i]
- m := s.getMetadata(PackageID(item))
- if m == nil || m.Module == nil {
- continue
- }
- modVer := module.Version{Path: m.Module.Path, Version: m.Module.Version}
- reference := findModuleReference(pm.File, modVer)
- if reference == nil {
- continue
- }
- rng, err := rangeFromPositions(pm.Mapper, reference.Start, reference.End)
- if err != nil {
- return nil, err
- }
- fixes, err := goGetQuickFixes(s, pm.URI, item)
- if err != nil {
- return nil, err
- }
- errors = append(errors, &source.Diagnostic{
- URI: pm.URI,
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.TypeError,
- Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
- SuggestedFixes: fixes,
- })
- break
- }
- }
- return errors, nil
-}
-
-// missingPkgError returns an error message for a missing package that varies
-// based on the user's workspace mode.
-func (s *snapshot) missingPkgError(ctx context.Context, pkgPath string) error {
- var b strings.Builder
- if s.workspaceMode()&moduleMode == 0 {
- gorootSrcPkg := filepath.FromSlash(filepath.Join(s.view.goroot, "src", pkgPath))
-
- b.WriteString(fmt.Sprintf("cannot find package %q in any of \n\t%s (from $GOROOT)", pkgPath, gorootSrcPkg))
-
- for _, gopath := range filepath.SplitList(s.view.gopath) {
- gopathSrcPkg := filepath.FromSlash(filepath.Join(gopath, "src", pkgPath))
- b.WriteString(fmt.Sprintf("\n\t%s (from $GOPATH)", gopathSrcPkg))
- }
- } else {
- b.WriteString(fmt.Sprintf("no required module provides package %q", pkgPath))
- if err := s.getInitializationError(ctx); err != nil {
- b.WriteString(fmt.Sprintf("(workspace configuration error: %s)", err.MainError))
- }
- }
- return errors.New(b.String())
-}
-
-type extendedError struct {
- primary types.Error
- secondaries []types.Error
-}
-
-func (e extendedError) Error() string {
- return e.primary.Error()
-}
-
-// expandErrors duplicates "secondary" errors by mapping them to their main
-// error. Some errors returned by the type checker are followed by secondary
-// errors which give more information about the error. These are errors in
-// their own right, and they are marked by starting with \t. For instance, when
-// there is a multiply-defined function, the secondary error points back to the
-// definition first noticed.
-//
-// This function associates the secondary error with its primary error, which can
-// then be used as RelatedInformation when the error becomes a diagnostic.
-//
-// If supportsRelatedInformation is false, the secondary is instead embedded as
-// additional context in the primary error.
-func expandErrors(errs []types.Error, supportsRelatedInformation bool) []extendedError {
- var result []extendedError
- for i := 0; i < len(errs); {
- original := extendedError{
- primary: errs[i],
- }
- for i++; i < len(errs); i++ {
- spl := errs[i]
- if len(spl.Msg) == 0 || spl.Msg[0] != '\t' {
- break
- }
- spl.Msg = spl.Msg[1:]
- original.secondaries = append(original.secondaries, spl)
- }
-
- // Clone the error to all its related locations -- VS Code, at least,
- // doesn't do it for us.
- result = append(result, original)
- for i, mainSecondary := range original.secondaries {
- // Create the new primary error, with a tweaked message, in the
- // secondary's location. We need to start from the secondary to
- // capture its unexported location fields.
- relocatedSecondary := mainSecondary
- if supportsRelatedInformation {
- relocatedSecondary.Msg = fmt.Sprintf("%v (see details)", original.primary.Msg)
- } else {
- relocatedSecondary.Msg = fmt.Sprintf("%v (this error: %v)", original.primary.Msg, mainSecondary.Msg)
- }
- relocatedSecondary.Soft = original.primary.Soft
-
- // Copy over the secondary errors, noting the location of the
- // current error we're cloning.
- clonedError := extendedError{primary: relocatedSecondary, secondaries: []types.Error{original.primary}}
- for j, secondary := range original.secondaries {
- if i == j {
- secondary.Msg += " (this error)"
- }
- clonedError.secondaries = append(clonedError.secondaries, secondary)
- }
- result = append(result, clonedError)
- }
-
- }
- return result
-}
-
-// resolveImportPath resolves an import path in pkg to a package from deps.
-// It should produce the same results as resolveImportPath:
-// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;drc=641918ee09cb44d282a30ee8b66f99a0b63eaef9;l=990.
-func resolveImportPath(importPath string, pkg *pkg, deps map[PackagePath]*packageHandle) *packageHandle {
- if dep := deps[PackagePath(importPath)]; dep != nil {
- return dep
- }
- // We may be in GOPATH mode, in which case we need to check vendor dirs.
- searchDir := path.Dir(pkg.PkgPath())
- for {
- vdir := PackagePath(path.Join(searchDir, "vendor", importPath))
- if vdep := deps[vdir]; vdep != nil {
- return vdep
- }
-
- // Search until Dir doesn't take us anywhere new, e.g. "." or "/".
- next := path.Dir(searchDir)
- if searchDir == next {
- break
- }
- searchDir = next
- }
-
- // Vendor didn't work. Let's try minimal module compatibility mode.
- // In MMC, the packagePath is the canonical (.../vN/...) path, which
- // is hard to calculate. But the go command has already resolved the ID
- // to the non-versioned path, and we can take advantage of that.
- for _, dep := range deps {
- if dep.ID() == importPath {
- return dep
- }
- }
- return nil
-}
-
-// An importFunc is an implementation of the single-method
-// types.Importer interface based on a function value.
-type importerFunc func(path string) (*types.Package, error)
-
-func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
diff --git a/internal/lsp/cache/errors.go b/internal/lsp/cache/errors.go
deleted file mode 100644
index e9a86de35..000000000
--- a/internal/lsp/cache/errors.go
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "fmt"
- "go/scanner"
- "go/token"
- "go/types"
- "regexp"
- "strconv"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/typesinternal"
- errors "golang.org/x/xerrors"
-)
-
-func goPackagesErrorDiagnostics(snapshot *snapshot, pkg *pkg, e packages.Error) ([]*source.Diagnostic, error) {
- if msg, spn, ok := parseGoListImportCycleError(snapshot, e, pkg); ok {
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
- return []*source.Diagnostic{{
- URI: spn.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ListError,
- Message: msg,
- }}, nil
- }
-
- var spn span.Span
- if e.Pos == "" {
- spn = parseGoListError(e.Msg, pkg.m.Config.Dir)
- // We may not have been able to parse a valid span. Apply the errors to all files.
- if _, err := spanToRange(pkg, spn); err != nil {
- var diags []*source.Diagnostic
- for _, cgf := range pkg.compiledGoFiles {
- diags = append(diags, &source.Diagnostic{
- URI: cgf.URI,
- Severity: protocol.SeverityError,
- Source: source.ListError,
- Message: e.Msg,
- })
- }
- return diags, nil
- }
- } else {
- spn = span.ParseInDir(e.Pos, pkg.m.Config.Dir)
- }
-
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
- return []*source.Diagnostic{{
- URI: spn.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ListError,
- Message: e.Msg,
- }}, nil
-}
-
-func parseErrorDiagnostics(snapshot *snapshot, pkg *pkg, errList scanner.ErrorList) ([]*source.Diagnostic, error) {
- // The first parser error is likely the root cause of the problem.
- if errList.Len() <= 0 {
- return nil, errors.Errorf("no errors in %v", errList)
- }
- e := errList[0]
- pgf, err := pkg.File(span.URIFromPath(e.Pos.Filename))
- if err != nil {
- return nil, err
- }
- pos := pgf.Tok.Pos(e.Pos.Offset)
- spn, err := span.NewRange(snapshot.FileSet(), pos, pos).Span()
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
- return []*source.Diagnostic{{
- URI: spn.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ParseError,
- Message: e.Msg,
- }}, nil
-}
-
-var importErrorRe = regexp.MustCompile(`could not import ([^\s]+)`)
-var unsupportedFeatureRe = regexp.MustCompile(`.*require.* go(\d+\.\d+) or later`)
-
-func typeErrorDiagnostics(snapshot *snapshot, pkg *pkg, e extendedError) ([]*source.Diagnostic, error) {
- code, spn, err := typeErrorData(snapshot.FileSet(), pkg, e.primary)
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
- diag := &source.Diagnostic{
- URI: spn.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.TypeError,
- Message: e.primary.Msg,
- }
- if code != 0 {
- diag.Code = code.String()
- diag.CodeHref = typesCodeHref(snapshot, code)
- }
-
- for _, secondary := range e.secondaries {
- _, secondarySpan, err := typeErrorData(snapshot.FileSet(), pkg, secondary)
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, secondarySpan)
- if err != nil {
- return nil, err
- }
- diag.Related = append(diag.Related, source.RelatedInformation{
- URI: secondarySpan.URI(),
- Range: rng,
- Message: secondary.Msg,
- })
- }
-
- if match := importErrorRe.FindStringSubmatch(e.primary.Msg); match != nil {
- diag.SuggestedFixes, err = goGetQuickFixes(snapshot, spn.URI(), match[1])
- if err != nil {
- return nil, err
- }
- }
- if match := unsupportedFeatureRe.FindStringSubmatch(e.primary.Msg); match != nil {
- diag.SuggestedFixes, err = editGoDirectiveQuickFix(snapshot, spn.URI(), match[1])
- if err != nil {
- return nil, err
- }
- }
- return []*source.Diagnostic{diag}, nil
-}
-
-func goGetQuickFixes(snapshot *snapshot, uri span.URI, pkg string) ([]source.SuggestedFix, error) {
- // Go get only supports module mode for now.
- if snapshot.workspaceMode()&moduleMode == 0 {
- return nil, nil
- }
- title := fmt.Sprintf("go get package %v", pkg)
- cmd, err := command.NewGoGetPackageCommand(title, command.GoGetPackageArgs{
- URI: protocol.URIFromSpanURI(uri),
- AddRequire: true,
- Pkg: pkg,
- })
- if err != nil {
- return nil, err
- }
- return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil
-}
-
-func editGoDirectiveQuickFix(snapshot *snapshot, uri span.URI, version string) ([]source.SuggestedFix, error) {
- // Go mod edit only supports module mode.
- if snapshot.workspaceMode()&moduleMode == 0 {
- return nil, nil
- }
- title := fmt.Sprintf("go mod edit -go=%s", version)
- cmd, err := command.NewEditGoDirectiveCommand(title, command.EditGoDirectiveArgs{
- URI: protocol.URIFromSpanURI(uri),
- Version: version,
- })
- if err != nil {
- return nil, err
- }
- return []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)}, nil
-}
-
-func analysisDiagnosticDiagnostics(snapshot *snapshot, pkg *pkg, a *analysis.Analyzer, e *analysis.Diagnostic) ([]*source.Diagnostic, error) {
- var srcAnalyzer *source.Analyzer
- // Find the analyzer that generated this diagnostic.
- for _, sa := range source.EnabledAnalyzers(snapshot) {
- if a == sa.Analyzer {
- srcAnalyzer = sa
- break
- }
- }
-
- spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span()
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
- kinds := srcAnalyzer.ActionKind
- if len(srcAnalyzer.ActionKind) == 0 {
- kinds = append(kinds, protocol.QuickFix)
- }
- fixes, err := suggestedAnalysisFixes(snapshot, pkg, e, kinds)
- if err != nil {
- return nil, err
- }
- if srcAnalyzer.Fix != "" {
- cmd, err := command.NewApplyFixCommand(e.Message, command.ApplyFixArgs{
- URI: protocol.URIFromSpanURI(spn.URI()),
- Range: rng,
- Fix: srcAnalyzer.Fix,
- })
- if err != nil {
- return nil, err
- }
- for _, kind := range kinds {
- fixes = append(fixes, source.SuggestedFixFromCommand(cmd, kind))
- }
- }
- related, err := relatedInformation(pkg, snapshot.FileSet(), e)
- if err != nil {
- return nil, err
- }
-
- severity := srcAnalyzer.Severity
- if severity == 0 {
- severity = protocol.SeverityWarning
- }
- diag := &source.Diagnostic{
- URI: spn.URI(),
- Range: rng,
- Severity: severity,
- Source: source.AnalyzerErrorKind(e.Category),
- Message: e.Message,
- Related: related,
- SuggestedFixes: fixes,
- Analyzer: srcAnalyzer,
- }
- // If the fixes only delete code, assume that the diagnostic is reporting dead code.
- if onlyDeletions(fixes) {
- diag.Tags = []protocol.DiagnosticTag{protocol.Unnecessary}
- }
- return []*source.Diagnostic{diag}, nil
-}
-
-// onlyDeletions returns true if all of the suggested fixes are deletions.
-func onlyDeletions(fixes []source.SuggestedFix) bool {
- for _, fix := range fixes {
- if fix.Command != nil {
- return false
- }
- for _, edits := range fix.Edits {
- for _, edit := range edits {
- if edit.NewText != "" {
- return false
- }
- if protocol.ComparePosition(edit.Range.Start, edit.Range.End) == 0 {
- return false
- }
- }
- }
- }
- return len(fixes) > 0
-}
-
-func typesCodeHref(snapshot *snapshot, code typesinternal.ErrorCode) string {
- target := snapshot.View().Options().LinkTarget
- return source.BuildLink(target, "golang.org/x/tools/internal/typesinternal", code.String())
-}
-
-func suggestedAnalysisFixes(snapshot *snapshot, pkg *pkg, diag *analysis.Diagnostic, kinds []protocol.CodeActionKind) ([]source.SuggestedFix, error) {
- var fixes []source.SuggestedFix
- for _, fix := range diag.SuggestedFixes {
- edits := make(map[span.URI][]protocol.TextEdit)
- for _, e := range fix.TextEdits {
- spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span()
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
- edits[spn.URI()] = append(edits[spn.URI()], protocol.TextEdit{
- Range: rng,
- NewText: string(e.NewText),
- })
- }
- for _, kind := range kinds {
- fixes = append(fixes, source.SuggestedFix{
- Title: fix.Message,
- Edits: edits,
- ActionKind: kind,
- })
- }
-
- }
- return fixes, nil
-}
-
-func relatedInformation(pkg *pkg, fset *token.FileSet, diag *analysis.Diagnostic) ([]source.RelatedInformation, error) {
- var out []source.RelatedInformation
- for _, related := range diag.Related {
- spn, err := span.NewRange(fset, related.Pos, related.End).Span()
- if err != nil {
- return nil, err
- }
- rng, err := spanToRange(pkg, spn)
- if err != nil {
- return nil, err
- }
- out = append(out, source.RelatedInformation{
- URI: spn.URI(),
- Range: rng,
- Message: related.Message,
- })
- }
- return out, nil
-}
-
-func typeErrorData(fset *token.FileSet, pkg *pkg, terr types.Error) (typesinternal.ErrorCode, span.Span, error) {
- ecode, start, end, ok := typesinternal.ReadGo116ErrorData(terr)
- if !ok {
- start, end = terr.Pos, terr.Pos
- ecode = 0
- }
- posn := fset.Position(start)
- pgf, err := pkg.File(span.URIFromPath(posn.Filename))
- if err != nil {
- return 0, span.Span{}, err
- }
- if !end.IsValid() || end == start {
- end = analysisinternal.TypeErrorEndPos(fset, pgf.Src, start)
- }
- spn, err := parsedGoSpan(pgf, start, end)
- if err != nil {
- return 0, span.Span{}, err
- }
- return ecode, spn, nil
-}
-
-func parsedGoSpan(pgf *source.ParsedGoFile, start, end token.Pos) (span.Span, error) {
- return span.FileSpan(pgf.Tok, pgf.Mapper.Converter, start, end)
-}
-
-// spanToRange converts a span.Span to a protocol.Range,
-// assuming that the span belongs to the package whose diagnostics are being computed.
-func spanToRange(pkg *pkg, spn span.Span) (protocol.Range, error) {
- pgf, err := pkg.File(spn.URI())
- if err != nil {
- return protocol.Range{}, err
- }
- return pgf.Mapper.Range(spn)
-}
-
-// parseGoListError attempts to parse a standard `go list` error message
-// by stripping off the trailing error message.
-//
-// It works only on errors whose message is prefixed by colon,
-// followed by a space (": "). For example:
-//
-// attributes.go:13:1: expected 'package', found 'type'
-//
-func parseGoListError(input, wd string) span.Span {
- input = strings.TrimSpace(input)
- msgIndex := strings.Index(input, ": ")
- if msgIndex < 0 {
- return span.Parse(input)
- }
- return span.ParseInDir(input[:msgIndex], wd)
-}
-
-func parseGoListImportCycleError(snapshot *snapshot, e packages.Error, pkg *pkg) (string, span.Span, bool) {
- re := regexp.MustCompile(`(.*): import stack: \[(.+)\]`)
- matches := re.FindStringSubmatch(strings.TrimSpace(e.Msg))
- if len(matches) < 3 {
- return e.Msg, span.Span{}, false
- }
- msg := matches[1]
- importList := strings.Split(matches[2], " ")
- // Since the error is relative to the current package. The import that is causing
- // the import cycle error is the second one in the list.
- if len(importList) < 2 {
- return msg, span.Span{}, false
- }
- // Imports have quotation marks around them.
- circImp := strconv.Quote(importList[1])
- for _, cgf := range pkg.compiledGoFiles {
- // Search file imports for the import that is causing the import cycle.
- for _, imp := range cgf.File.Imports {
- if imp.Path.Value == circImp {
- spn, err := span.NewRange(snapshot.FileSet(), imp.Pos(), imp.End()).Span()
- if err != nil {
- return msg, span.Span{}, false
- }
- return msg, spn, true
- }
- }
- }
- return msg, span.Span{}, false
-}
diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go
deleted file mode 100644
index 01a2468ef..000000000
--- a/internal/lsp/cache/imports.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "fmt"
- "reflect"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/event/keys"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-type importsState struct {
- ctx context.Context
-
- mu sync.Mutex
- processEnv *imports.ProcessEnv
- cleanupProcessEnv func()
- cacheRefreshDuration time.Duration
- cacheRefreshTimer *time.Timer
- cachedModFileHash string
- cachedBuildFlags []string
-}
-
-func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot, fn func(*imports.Options) error) error {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- // Find the hash of the active mod file, if any. Using the unsaved content
- // is slightly wasteful, since we'll drop caches a little too often, but
- // the mod file shouldn't be changing while people are autocompleting.
- var modFileHash string
- // If we are using 'legacyWorkspace' mode, we can just read the modfile from
- // the snapshot. Otherwise, we need to get the synthetic workspace mod file.
- //
- // TODO(rfindley): we should be able to just always use the synthetic
- // workspace module, or alternatively use the go.work file.
- if snapshot.workspace.moduleSource == legacyWorkspace {
- for m := range snapshot.workspace.getActiveModFiles() { // range to access the only element
- modFH, err := snapshot.GetFile(ctx, m)
- if err != nil {
- return err
- }
- modFileHash = modFH.FileIdentity().Hash
- }
- } else {
- modFile, err := snapshot.workspace.modFile(ctx, snapshot)
- if err != nil {
- return err
- }
- modBytes, err := modFile.Format()
- if err != nil {
- return err
- }
- modFileHash = hashContents(modBytes)
- }
-
- // view.goEnv is immutable -- changes make a new view. Options can change.
- // We can't compare build flags directly because we may add -modfile.
- snapshot.view.optionsMu.Lock()
- localPrefix := snapshot.view.options.Local
- currentBuildFlags := snapshot.view.options.BuildFlags
- changed := !reflect.DeepEqual(currentBuildFlags, s.cachedBuildFlags) ||
- snapshot.view.options.VerboseOutput != (s.processEnv.Logf != nil) ||
- modFileHash != s.cachedModFileHash
- snapshot.view.optionsMu.Unlock()
-
- // If anything relevant to imports has changed, clear caches and
- // update the processEnv. Clearing caches blocks on any background
- // scans.
- if changed {
- // As a special case, skip cleanup the first time -- we haven't fully
- // initialized the environment yet and calling GetResolver will do
- // unnecessary work and potentially mess up the go.mod file.
- if s.cleanupProcessEnv != nil {
- if resolver, err := s.processEnv.GetResolver(); err == nil {
- if modResolver, ok := resolver.(*imports.ModuleResolver); ok {
- modResolver.ClearForNewMod()
- }
- }
- s.cleanupProcessEnv()
- }
- s.cachedModFileHash = modFileHash
- s.cachedBuildFlags = currentBuildFlags
- var err error
- s.cleanupProcessEnv, err = s.populateProcessEnv(ctx, snapshot)
- if err != nil {
- return err
- }
- }
-
- // Run the user function.
- opts := &imports.Options{
- // Defaults.
- AllErrors: true,
- Comments: true,
- Fragment: true,
- FormatOnly: false,
- TabIndent: true,
- TabWidth: 8,
- Env: s.processEnv,
- LocalPrefix: localPrefix,
- }
-
- if err := fn(opts); err != nil {
- return err
- }
-
- if s.cacheRefreshTimer == nil {
- // Don't refresh more than twice per minute.
- delay := 30 * time.Second
- // Don't spend more than a couple percent of the time refreshing.
- if adaptive := 50 * s.cacheRefreshDuration; adaptive > delay {
- delay = adaptive
- }
- s.cacheRefreshTimer = time.AfterFunc(delay, s.refreshProcessEnv)
- }
-
- return nil
-}
-
-// populateProcessEnv sets the dynamically configurable fields for the view's
-// process environment. Assumes that the caller is holding the s.view.importsMu.
-func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapshot) (cleanup func(), err error) {
- pe := s.processEnv
-
- if snapshot.view.Options().VerboseOutput {
- pe.Logf = func(format string, args ...interface{}) {
- event.Log(ctx, fmt.Sprintf(format, args...))
- }
- } else {
- pe.Logf = nil
- }
-
- // Take an extra reference to the snapshot so that its workspace directory
- // (if any) isn't destroyed while we're using it.
- release := snapshot.generation.Acquire()
- _, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{
- WorkingDir: snapshot.view.rootURI.Filename(),
- })
- if err != nil {
- return nil, err
- }
- pe.WorkingDir = inv.WorkingDir
- pe.BuildFlags = inv.BuildFlags
- pe.WorkingDir = inv.WorkingDir
- pe.ModFile = inv.ModFile
- pe.ModFlag = inv.ModFlag
- pe.Env = map[string]string{}
- for _, kv := range inv.Env {
- split := strings.SplitN(kv, "=", 2)
- if len(split) != 2 {
- continue
- }
- pe.Env[split[0]] = split[1]
- }
-
- return func() {
- cleanupInvocation()
- release()
- }, nil
-}
-
-func (s *importsState) refreshProcessEnv() {
- start := time.Now()
-
- s.mu.Lock()
- env := s.processEnv
- if resolver, err := s.processEnv.GetResolver(); err == nil {
- resolver.ClearForNewScan()
- }
- s.mu.Unlock()
-
- event.Log(s.ctx, "background imports cache refresh starting")
- if err := imports.PrimeCache(context.Background(), env); err == nil {
- event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start)))
- } else {
- event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start)), keys.Err.Of(err))
- }
- s.mu.Lock()
- s.cacheRefreshDuration = time.Since(start)
- s.cacheRefreshTimer = nil
- s.mu.Unlock()
-}
-
-func (s *importsState) destroy() {
- s.mu.Lock()
- if s.cleanupProcessEnv != nil {
- s.cleanupProcessEnv()
- }
- s.mu.Unlock()
-}
diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go
deleted file mode 100644
index 17b7acae0..000000000
--- a/internal/lsp/cache/load.go
+++ /dev/null
@@ -1,507 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "crypto/sha256"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/packagesinternal"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// load calls packages.Load for the given scopes, updating package metadata,
-// import graph, and mapped files with the result.
-func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interface{}) (err error) {
- var query []string
- var containsDir bool // for logging
- for _, scope := range scopes {
- if !s.shouldLoad(scope) {
- continue
- }
- // Unless the context was canceled, set "shouldLoad" to false for all
- // of the metadata we attempted to load.
- defer func() {
- if errors.Is(err, context.Canceled) {
- return
- }
- s.clearShouldLoad(scope)
- }()
- switch scope := scope.(type) {
- case PackagePath:
- if source.IsCommandLineArguments(string(scope)) {
- panic("attempted to load command-line-arguments")
- }
- // The only time we pass package paths is when we're doing a
- // partial workspace load. In those cases, the paths came back from
- // go list and should already be GOPATH-vendorized when appropriate.
- query = append(query, string(scope))
- case fileURI:
- uri := span.URI(scope)
- // Don't try to load a file that doesn't exist.
- fh := s.FindFile(uri)
- if fh == nil || s.View().FileKind(fh) != source.Go {
- continue
- }
- query = append(query, fmt.Sprintf("file=%s", uri.Filename()))
- case moduleLoadScope:
- switch scope {
- case "std", "cmd":
- query = append(query, string(scope))
- default:
- query = append(query, fmt.Sprintf("%s/...", scope))
- }
- case viewLoadScope:
- // If we are outside of GOPATH, a module, or some other known
- // build system, don't load subdirectories.
- if !s.ValidBuildConfiguration() {
- query = append(query, "./")
- } else {
- query = append(query, "./...")
- }
- default:
- panic(fmt.Sprintf("unknown scope type %T", scope))
- }
- switch scope.(type) {
- case viewLoadScope, moduleLoadScope:
- containsDir = true
- }
- }
- if len(query) == 0 {
- return nil
- }
- sort.Strings(query) // for determinism
-
- if s.view.Options().VerboseWorkDoneProgress {
- work := s.view.session.progress.Start(ctx, "Load", fmt.Sprintf("Loading query=%s", query), nil, nil)
- defer func() {
- work.End("Done.")
- }()
- }
-
- ctx, done := event.Start(ctx, "cache.view.load", tag.Query.Of(query))
- defer done()
-
- flags := source.LoadWorkspace
- if allowNetwork {
- flags |= source.AllowNetwork
- }
- _, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{
- WorkingDir: s.view.rootURI.Filename(),
- })
- if err != nil {
- return err
- }
-
- // Set a last resort deadline on packages.Load since it calls the go
- // command, which may hang indefinitely if it has a bug. golang/go#42132
- // and golang/go#42255 have more context.
- ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
- defer cancel()
-
- cfg := s.config(ctx, inv)
- pkgs, err := packages.Load(cfg, query...)
- cleanup()
-
- // If the context was canceled, return early. Otherwise, we might be
- // type-checking an incomplete result. Check the context directly,
- // because go/packages adds extra information to the error.
- if ctx.Err() != nil {
- return ctx.Err()
- }
- if err != nil {
- event.Error(ctx, "go/packages.Load", err, tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs)))
- } else {
- event.Log(ctx, "go/packages.Load", tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs)))
- }
- if len(pkgs) == 0 {
- if err == nil {
- err = fmt.Errorf("no packages returned")
- }
- return errors.Errorf("%v: %w", err, source.PackagesLoadError)
- }
- for _, pkg := range pkgs {
- if !containsDir || s.view.Options().VerboseOutput {
- event.Log(ctx, "go/packages.Load",
- tag.Snapshot.Of(s.ID()),
- tag.Package.Of(pkg.ID),
- tag.Files.Of(pkg.CompiledGoFiles))
- }
- // Ignore packages with no sources, since we will never be able to
- // correctly invalidate that metadata.
- if len(pkg.GoFiles) == 0 && len(pkg.CompiledGoFiles) == 0 {
- continue
- }
- // Special case for the builtin package, as it has no dependencies.
- if pkg.PkgPath == "builtin" {
- if len(pkg.GoFiles) != 1 {
- return errors.Errorf("only expected 1 file for builtin, got %v", len(pkg.GoFiles))
- }
- s.setBuiltin(pkg.GoFiles[0])
- continue
- }
- // Skip test main packages.
- if isTestMain(pkg, s.view.gocache) {
- continue
- }
- // Skip filtered packages. They may be added anyway if they're
- // dependencies of non-filtered packages.
- if s.view.allFilesExcluded(pkg) {
- continue
- }
- // Set the metadata for this package.
- s.mu.Lock()
- m, err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, map[PackageID]struct{}{})
- s.mu.Unlock()
- if err != nil {
- return err
- }
- if _, err := s.buildPackageHandle(ctx, m.ID, s.workspaceParseMode(m.ID)); err != nil {
- return err
- }
- }
- // Rebuild the import graph when the metadata is updated.
- s.clearAndRebuildImportGraph()
-
- return nil
-}
-
-// workspaceLayoutErrors returns a diagnostic for every open file, as well as
-// an error message if there are no open files.
-func (s *snapshot) workspaceLayoutError(ctx context.Context) *source.CriticalError {
- if len(s.workspace.getKnownModFiles()) == 0 {
- return nil
- }
- if s.view.userGo111Module == off {
- return nil
- }
- if s.workspace.moduleSource != legacyWorkspace {
- return nil
- }
- // If the user has one module per view, there is nothing to warn about.
- if s.ValidBuildConfiguration() && len(s.workspace.getKnownModFiles()) == 1 {
- return nil
- }
-
- // Apply diagnostics about the workspace configuration to relevant open
- // files.
- openFiles := s.openFiles()
-
- // If the snapshot does not have a valid build configuration, it may be
- // that the user has opened a directory that contains multiple modules.
- // Check for that an warn about it.
- if !s.ValidBuildConfiguration() {
- msg := `gopls requires a module at the root of your workspace.
-You can work with multiple modules by opening each one as a workspace folder.
-Improvements to this workflow will be coming soon, and you can learn more here:
-https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`
- return &source.CriticalError{
- MainError: errors.Errorf(msg),
- DiagList: s.applyCriticalErrorToFiles(ctx, msg, openFiles),
- }
- }
-
- // If the user has one active go.mod file, they may still be editing files
- // in nested modules. Check the module of each open file and add warnings
- // that the nested module must be opened as a workspace folder.
- if len(s.workspace.getActiveModFiles()) == 1 {
- // Get the active root go.mod file to compare against.
- var rootModURI span.URI
- for uri := range s.workspace.getActiveModFiles() {
- rootModURI = uri
- }
- nestedModules := map[string][]source.VersionedFileHandle{}
- for _, fh := range openFiles {
- modURI := moduleForURI(s.workspace.knownModFiles, fh.URI())
- if modURI != rootModURI {
- modDir := filepath.Dir(modURI.Filename())
- nestedModules[modDir] = append(nestedModules[modDir], fh)
- }
- }
- // Add a diagnostic to each file in a nested module to mark it as
- // "orphaned". Don't show a general diagnostic in the progress bar,
- // because the user may still want to edit a file in a nested module.
- var srcDiags []*source.Diagnostic
- for modDir, uris := range nestedModules {
- msg := fmt.Sprintf(`This file is in %s, which is a nested module in the %s module.
-gopls currently requires one module per workspace folder.
-Please open %s as a separate workspace folder.
-You can learn more here: https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.
-`, modDir, filepath.Dir(rootModURI.Filename()), modDir)
- srcDiags = append(srcDiags, s.applyCriticalErrorToFiles(ctx, msg, uris)...)
- }
- if len(srcDiags) != 0 {
- return &source.CriticalError{
- MainError: errors.Errorf(`You are working in a nested module.
-Please open it as a separate workspace folder. Learn more:
-https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`),
- DiagList: srcDiags,
- }
- }
- }
- return nil
-}
-
-func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []source.VersionedFileHandle) []*source.Diagnostic {
- var srcDiags []*source.Diagnostic
- for _, fh := range files {
- // Place the diagnostics on the package or module declarations.
- var rng protocol.Range
- switch s.view.FileKind(fh) {
- case source.Go:
- if pgf, err := s.ParseGo(ctx, fh, source.ParseHeader); err == nil {
- pkgDecl := span.NewRange(s.FileSet(), pgf.File.Package, pgf.File.Name.End())
- if spn, err := pkgDecl.Span(); err == nil {
- rng, _ = pgf.Mapper.Range(spn)
- }
- }
- case source.Mod:
- if pmf, err := s.ParseMod(ctx, fh); err == nil {
- if pmf.File.Module != nil && pmf.File.Module.Syntax != nil {
- rng, _ = rangeFromPositions(pmf.Mapper, pmf.File.Module.Syntax.Start, pmf.File.Module.Syntax.End)
- }
- }
- }
- srcDiags = append(srcDiags, &source.Diagnostic{
- URI: fh.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ListError,
- Message: msg,
- })
- }
- return srcDiags
-}
-
-type workspaceDirKey string
-
-type workspaceDirData struct {
- dir string
- err error
-}
-
-// getWorkspaceDir gets the URI for the workspace directory associated with
-// this snapshot. The workspace directory is a temp directory containing the
-// go.mod file computed from all active modules.
-func (s *snapshot) getWorkspaceDir(ctx context.Context) (span.URI, error) {
- s.mu.Lock()
- h := s.workspaceDirHandle
- s.mu.Unlock()
- if h != nil {
- return getWorkspaceDir(ctx, h, s.generation)
- }
- file, err := s.workspace.modFile(ctx, s)
- if err != nil {
- return "", err
- }
- hash := sha256.New()
- modContent, err := file.Format()
- if err != nil {
- return "", err
- }
- sumContent, err := s.workspace.sumFile(ctx, s)
- if err != nil {
- return "", err
- }
- hash.Write(modContent)
- hash.Write(sumContent)
- key := workspaceDirKey(hash.Sum(nil))
- s.mu.Lock()
- h = s.generation.Bind(key, func(context.Context, memoize.Arg) interface{} {
- tmpdir, err := ioutil.TempDir("", "gopls-workspace-mod")
- if err != nil {
- return &workspaceDirData{err: err}
- }
-
- for name, content := range map[string][]byte{
- "go.mod": modContent,
- "go.sum": sumContent,
- } {
- filename := filepath.Join(tmpdir, name)
- if err := ioutil.WriteFile(filename, content, 0644); err != nil {
- os.RemoveAll(tmpdir)
- return &workspaceDirData{err: err}
- }
- }
-
- return &workspaceDirData{dir: tmpdir}
- }, func(v interface{}) {
- d := v.(*workspaceDirData)
- if d.dir != "" {
- if err := os.RemoveAll(d.dir); err != nil {
- event.Error(context.Background(), "cleaning workspace dir", err)
- }
- }
- })
- s.workspaceDirHandle = h
- s.mu.Unlock()
- return getWorkspaceDir(ctx, h, s.generation)
-}
-
-func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generation) (span.URI, error) {
- v, err := h.Get(ctx, g, nil)
- if err != nil {
- return "", err
- }
- return span.URIFromPath(v.(*workspaceDirData).dir), nil
-}
-
-// setMetadataLocked extracts metadata from pkg and records it in s. It
-// recurses through pkg.Imports to ensure that metadata exists for all
-// dependencies.
-func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, seen map[PackageID]struct{}) (*Metadata, error) {
- id := PackageID(pkg.ID)
- if source.IsCommandLineArguments(pkg.ID) {
- suffix := ":" + strings.Join(query, ",")
- id = PackageID(string(id) + suffix)
- pkgPath = PackagePath(string(pkgPath) + suffix)
- }
- if _, ok := seen[id]; ok {
- return nil, errors.Errorf("import cycle detected: %q", id)
- }
- // Recreate the metadata rather than reusing it to avoid locking.
- m := &Metadata{
- ID: id,
- PkgPath: pkgPath,
- Name: PackageName(pkg.Name),
- ForTest: PackagePath(packagesinternal.GetForTest(pkg)),
- TypesSizes: pkg.TypesSizes,
- Config: cfg,
- Module: pkg.Module,
- depsErrors: packagesinternal.GetDepsErrors(pkg),
- }
-
- for _, err := range pkg.Errors {
- // Filter out parse errors from go list. We'll get them when we
- // actually parse, and buggy overlay support may generate spurious
- // errors. (See TestNewModule_Issue38207.)
- if strings.Contains(err.Msg, "expected '") {
- continue
- }
- m.Errors = append(m.Errors, err)
- }
-
- uris := map[span.URI]struct{}{}
- for _, filename := range pkg.CompiledGoFiles {
- uri := span.URIFromPath(filename)
- m.CompiledGoFiles = append(m.CompiledGoFiles, uri)
- uris[uri] = struct{}{}
- }
- for _, filename := range pkg.GoFiles {
- uri := span.URIFromPath(filename)
- m.GoFiles = append(m.GoFiles, uri)
- uris[uri] = struct{}{}
- }
- s.updateIDForURIsLocked(id, uris)
-
- // TODO(rstambler): is this still necessary?
- copied := map[PackageID]struct{}{
- id: {},
- }
- for k, v := range seen {
- copied[k] = v
- }
- for importPath, importPkg := range pkg.Imports {
- importPkgPath := PackagePath(importPath)
- importID := PackageID(importPkg.ID)
-
- m.Deps = append(m.Deps, importID)
-
- // Don't remember any imports with significant errors.
- if importPkgPath != "unsafe" && len(importPkg.CompiledGoFiles) == 0 {
- if m.MissingDeps == nil {
- m.MissingDeps = make(map[PackagePath]struct{})
- }
- m.MissingDeps[importPkgPath] = struct{}{}
- continue
- }
- if s.noValidMetadataForIDLocked(importID) {
- if _, err := s.setMetadataLocked(ctx, importPkgPath, importPkg, cfg, query, copied); err != nil {
- event.Error(ctx, "error in dependency", err)
- }
- }
- }
-
- // Add the metadata to the cache.
-
- // If we've already set the metadata for this snapshot, reuse it.
- if original, ok := s.metadata[m.ID]; ok && original.Valid {
- // Since we've just reloaded, clear out shouldLoad.
- original.ShouldLoad = false
- m = original.Metadata
- } else {
- s.metadata[m.ID] = &KnownMetadata{
- Metadata: m,
- Valid: true,
- }
- // Invalidate any packages we may have associated with this metadata.
- for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} {
- key := packageKey{mode, m.ID}
- delete(s.packages, key)
- }
- }
-
- // Set the workspace packages. If any of the package's files belong to the
- // view, then the package may be a workspace package.
- for _, uri := range append(m.CompiledGoFiles, m.GoFiles...) {
- if !s.view.contains(uri) {
- continue
- }
-
- // The package's files are in this view. It may be a workspace package.
- if strings.Contains(string(uri), "/vendor/") {
- // Vendored packages are not likely to be interesting to the user.
- continue
- }
-
- switch {
- case m.ForTest == "":
- // A normal package.
- s.workspacePackages[m.ID] = pkgPath
- case m.ForTest == m.PkgPath, m.ForTest+"_test" == m.PkgPath:
- // The test variant of some workspace package or its x_test.
- // To load it, we need to load the non-test variant with -test.
- s.workspacePackages[m.ID] = m.ForTest
- default:
- // A test variant of some intermediate package. We don't care about it.
- m.IsIntermediateTestVariant = true
- }
- }
- return m, nil
-}
-
-func isTestMain(pkg *packages.Package, gocache string) bool {
- // Test mains must have an import path that ends with ".test".
- if !strings.HasSuffix(pkg.PkgPath, ".test") {
- return false
- }
- // Test main packages are always named "main".
- if pkg.Name != "main" {
- return false
- }
- // Test mains always have exactly one GoFile that is in the build cache.
- if len(pkg.GoFiles) > 1 {
- return false
- }
- if !source.InDir(gocache, pkg.GoFiles[0]) {
- return false
- }
- return true
-}
diff --git a/internal/lsp/cache/metadata.go b/internal/lsp/cache/metadata.go
deleted file mode 100644
index 618578dd8..000000000
--- a/internal/lsp/cache/metadata.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "go/types"
-
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/packagesinternal"
- "golang.org/x/tools/internal/span"
-)
-
-// Declare explicit types for package paths, names, and IDs to ensure that we
-// never use an ID where a path belongs, and vice versa. If we confused these,
-// it would result in confusing errors because package IDs often look like
-// package paths.
-type (
- PackageID string
- PackagePath string
- PackageName string
-)
-
-// Metadata holds package Metadata extracted from a call to packages.Load.
-type Metadata struct {
- ID PackageID
- PkgPath PackagePath
- Name PackageName
- GoFiles []span.URI
- CompiledGoFiles []span.URI
- ForTest PackagePath
- TypesSizes types.Sizes
- Errors []packages.Error
- Deps []PackageID
- MissingDeps map[PackagePath]struct{}
- Module *packages.Module
- depsErrors []*packagesinternal.PackageError
-
- // Config is the *packages.Config associated with the loaded package.
- Config *packages.Config
-
- // IsIntermediateTestVariant reports whether the given package is an
- // intermediate test variant, e.g.
- // "golang.org/x/tools/internal/lsp/cache [golang.org/x/tools/internal/lsp/source.test]".
- IsIntermediateTestVariant bool
-}
-
-// Name implements the source.Metadata interface.
-func (m *Metadata) PackageName() string {
- return string(m.Name)
-}
-
-// PkgPath implements the source.Metadata interface.
-func (m *Metadata) PackagePath() string {
- return string(m.PkgPath)
-}
-
-// ModuleInfo implements the source.Metadata interface.
-func (m *Metadata) ModuleInfo() *packages.Module {
- return m.Module
-}
-
-// KnownMetadata is a wrapper around metadata that tracks its validity.
-type KnownMetadata struct {
- *Metadata
-
- // Valid is true if the given metadata is Valid.
- // Invalid metadata can still be used if a metadata reload fails.
- Valid bool
-
- // ShouldLoad is true if the given metadata should be reloaded.
- ShouldLoad bool
-}
diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go
deleted file mode 100644
index 8a2d42abc..000000000
--- a/internal/lsp/cache/mod.go
+++ /dev/null
@@ -1,516 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "fmt"
- "path/filepath"
- "regexp"
- "strings"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/mod/module"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
-)
-
-type parseModHandle struct {
- handle *memoize.Handle
-}
-
-type parseModData struct {
- parsed *source.ParsedModule
-
- // err is any error encountered while parsing the file.
- err error
-}
-
-func (mh *parseModHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedModule, error) {
- v, err := mh.handle.Get(ctx, snapshot.generation, snapshot)
- if err != nil {
- return nil, err
- }
- data := v.(*parseModData)
- return data.parsed, data.err
-}
-
-func (s *snapshot) ParseMod(ctx context.Context, modFH source.FileHandle) (*source.ParsedModule, error) {
- if handle := s.getParseModHandle(modFH.URI()); handle != nil {
- return handle.parse(ctx, s)
- }
- h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} {
- _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI()))
- defer done()
-
- contents, err := modFH.Read()
- if err != nil {
- return &parseModData{err: err}
- }
- m := &protocol.ColumnMapper{
- URI: modFH.URI(),
- Converter: span.NewContentConverter(modFH.URI().Filename(), contents),
- Content: contents,
- }
- file, parseErr := modfile.Parse(modFH.URI().Filename(), contents, nil)
- // Attempt to convert the error to a standardized parse error.
- var parseErrors []*source.Diagnostic
- if parseErr != nil {
- mfErrList, ok := parseErr.(modfile.ErrorList)
- if !ok {
- return &parseModData{err: fmt.Errorf("unexpected parse error type %v", parseErr)}
- }
- for _, mfErr := range mfErrList {
- rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos)
- if err != nil {
- return &parseModData{err: err}
- }
- parseErrors = append(parseErrors, &source.Diagnostic{
- URI: modFH.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ParseError,
- Message: mfErr.Err.Error(),
- })
- }
- }
- return &parseModData{
- parsed: &source.ParsedModule{
- URI: modFH.URI(),
- Mapper: m,
- File: file,
- ParseErrors: parseErrors,
- },
- err: parseErr,
- }
- }, nil)
-
- pmh := &parseModHandle{handle: h}
- s.mu.Lock()
- s.parseModHandles[modFH.URI()] = pmh
- s.mu.Unlock()
-
- return pmh.parse(ctx, s)
-}
-
-type parseWorkHandle struct {
- handle *memoize.Handle
-}
-
-type parseWorkData struct {
- parsed *source.ParsedWorkFile
-
- // err is any error encountered while parsing the file.
- err error
-}
-
-func (mh *parseWorkHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedWorkFile, error) {
- v, err := mh.handle.Get(ctx, snapshot.generation, snapshot)
- if err != nil {
- return nil, err
- }
- data := v.(*parseWorkData)
- return data.parsed, data.err
-}
-
-func (s *snapshot) ParseWork(ctx context.Context, modFH source.FileHandle) (*source.ParsedWorkFile, error) {
- if handle := s.getParseWorkHandle(modFH.URI()); handle != nil {
- return handle.parse(ctx, s)
- }
- h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} {
- _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI()))
- defer done()
-
- contents, err := modFH.Read()
- if err != nil {
- return &parseWorkData{err: err}
- }
- m := &protocol.ColumnMapper{
- URI: modFH.URI(),
- Converter: span.NewContentConverter(modFH.URI().Filename(), contents),
- Content: contents,
- }
- file, parseErr := modfile.ParseWork(modFH.URI().Filename(), contents, nil)
- // Attempt to convert the error to a standardized parse error.
- var parseErrors []*source.Diagnostic
- if parseErr != nil {
- mfErrList, ok := parseErr.(modfile.ErrorList)
- if !ok {
- return &parseWorkData{err: fmt.Errorf("unexpected parse error type %v", parseErr)}
- }
- for _, mfErr := range mfErrList {
- rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos)
- if err != nil {
- return &parseWorkData{err: err}
- }
- parseErrors = append(parseErrors, &source.Diagnostic{
- URI: modFH.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ParseError,
- Message: mfErr.Err.Error(),
- })
- }
- }
- return &parseWorkData{
- parsed: &source.ParsedWorkFile{
- URI: modFH.URI(),
- Mapper: m,
- File: file,
- ParseErrors: parseErrors,
- },
- err: parseErr,
- }
- }, nil)
-
- pwh := &parseWorkHandle{handle: h}
- s.mu.Lock()
- s.parseWorkHandles[modFH.URI()] = pwh
- s.mu.Unlock()
-
- return pwh.parse(ctx, s)
-}
-
-// goSum reads the go.sum file for the go.mod file at modURI, if it exists. If
-// it doesn't exist, it returns nil.
-func (s *snapshot) goSum(ctx context.Context, modURI span.URI) []byte {
- // Get the go.sum file, either from the snapshot or directly from the
- // cache. Avoid (*snapshot).GetFile here, as we don't want to add
- // nonexistent file handles to the snapshot if the file does not exist.
- sumURI := span.URIFromPath(sumFilename(modURI))
- var sumFH source.FileHandle = s.FindFile(sumURI)
- if sumFH == nil {
- var err error
- sumFH, err = s.view.session.cache.getFile(ctx, sumURI)
- if err != nil {
- return nil
- }
- }
- content, err := sumFH.Read()
- if err != nil {
- return nil
- }
- return content
-}
-
-func sumFilename(modURI span.URI) string {
- return strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum"
-}
-
-// modKey is uniquely identifies cached data for `go mod why` or dependencies
-// to upgrade.
-type modKey struct {
- sessionID, env, view string
- mod source.FileIdentity
- verb modAction
-}
-
-type modAction int
-
-const (
- why modAction = iota
- upgrade
-)
-
-type modWhyHandle struct {
- handle *memoize.Handle
-}
-
-type modWhyData struct {
- // why keeps track of the `go mod why` results for each require statement
- // in the go.mod file.
- why map[string]string
-
- err error
-}
-
-func (mwh *modWhyHandle) why(ctx context.Context, snapshot *snapshot) (map[string]string, error) {
- v, err := mwh.handle.Get(ctx, snapshot.generation, snapshot)
- if err != nil {
- return nil, err
- }
- data := v.(*modWhyData)
- return data.why, data.err
-}
-
-func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) {
- if s.View().FileKind(fh) != source.Mod {
- return nil, fmt.Errorf("%s is not a go.mod file", fh.URI())
- }
- if handle := s.getModWhyHandle(fh.URI()); handle != nil {
- return handle.why(ctx, s)
- }
- key := modKey{
- sessionID: s.view.session.id,
- env: hashEnv(s),
- mod: fh.FileIdentity(),
- view: s.view.rootURI.Filename(),
- verb: why,
- }
- h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- ctx, done := event.Start(ctx, "cache.ModWhyHandle", tag.URI.Of(fh.URI()))
- defer done()
-
- snapshot := arg.(*snapshot)
-
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil {
- return &modWhyData{err: err}
- }
- // No requires to explain.
- if len(pm.File.Require) == 0 {
- return &modWhyData{}
- }
- // Run `go mod why` on all the dependencies.
- inv := &gocommand.Invocation{
- Verb: "mod",
- Args: []string{"why", "-m"},
- WorkingDir: filepath.Dir(fh.URI().Filename()),
- }
- for _, req := range pm.File.Require {
- inv.Args = append(inv.Args, req.Mod.Path)
- }
- stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv)
- if err != nil {
- return &modWhyData{err: err}
- }
- whyList := strings.Split(stdout.String(), "\n\n")
- if len(whyList) != len(pm.File.Require) {
- return &modWhyData{
- err: fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require)),
- }
- }
- why := make(map[string]string, len(pm.File.Require))
- for i, req := range pm.File.Require {
- why[req.Mod.Path] = whyList[i]
- }
- return &modWhyData{why: why}
- }, nil)
-
- mwh := &modWhyHandle{handle: h}
- s.mu.Lock()
- s.modWhyHandles[fh.URI()] = mwh
- s.mu.Unlock()
-
- return mwh.why(ctx, s)
-}
-
-// extractGoCommandError tries to parse errors that come from the go command
-// and shape them into go.mod diagnostics.
-func (s *snapshot) extractGoCommandErrors(ctx context.Context, goCmdError string) ([]*source.Diagnostic, error) {
- diagLocations := map[*source.ParsedModule]span.Span{}
- backupDiagLocations := map[*source.ParsedModule]span.Span{}
-
- // The go command emits parse errors for completely invalid go.mod files.
- // Those are reported by our own diagnostics and can be ignored here.
- // As of writing, we are not aware of any other errors that include
- // file/position information, so don't even try to find it.
- if strings.Contains(goCmdError, "errors parsing go.mod") {
- return nil, nil
- }
-
- // Match the error against all the mod files in the workspace.
- for _, uri := range s.ModFiles() {
- fh, err := s.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- pm, err := s.ParseMod(ctx, fh)
- if err != nil {
- return nil, err
- }
- spn, found, err := s.matchErrorToModule(ctx, pm, goCmdError)
- if err != nil {
- return nil, err
- }
- if found {
- diagLocations[pm] = spn
- } else {
- backupDiagLocations[pm] = spn
- }
- }
-
- // If we didn't find any good matches, assign diagnostics to all go.mod files.
- if len(diagLocations) == 0 {
- diagLocations = backupDiagLocations
- }
-
- var srcErrs []*source.Diagnostic
- for pm, spn := range diagLocations {
- diag, err := s.goCommandDiagnostic(pm, spn, goCmdError)
- if err != nil {
- return nil, err
- }
- srcErrs = append(srcErrs, diag)
- }
- return srcErrs, nil
-}
-
-var moduleVersionInErrorRe = regexp.MustCompile(`[:\s]([+-._~0-9A-Za-z]+)@([+-._~0-9A-Za-z]+)[:\s]`)
-
-// matchErrorToModule matches a go command error message to a go.mod file.
-// Some examples:
-//
-// example.com@v1.2.2: reading example.com/@v/v1.2.2.mod: no such file or directory
-// go: github.com/cockroachdb/apd/v2@v2.0.72: reading github.com/cockroachdb/apd/go.mod at revision v2.0.72: unknown revision v2.0.72
-// go: example.com@v1.2.3 requires\n\trandom.org@v1.2.3: parsing go.mod:\n\tmodule declares its path as: bob.org\n\tbut was required as: random.org
-//
-// It returns the location of a reference to the one of the modules and true
-// if one exists. If none is found it returns a fallback location and false.
-func (s *snapshot) matchErrorToModule(ctx context.Context, pm *source.ParsedModule, goCmdError string) (span.Span, bool, error) {
- var reference *modfile.Line
- matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1)
-
- for i := len(matches) - 1; i >= 0; i-- {
- ver := module.Version{Path: matches[i][1], Version: matches[i][2]}
- // Any module versions that come from the workspace module should not
- // be shown to the user.
- if source.IsWorkspaceModuleVersion(ver.Version) {
- continue
- }
- if err := module.Check(ver.Path, ver.Version); err != nil {
- continue
- }
- reference = findModuleReference(pm.File, ver)
- if reference != nil {
- break
- }
- }
-
- if reference == nil {
- // No match for the module path was found in the go.mod file.
- // Show the error on the module declaration, if one exists, or
- // just the first line of the file.
- if pm.File.Module == nil {
- return span.New(pm.URI, span.NewPoint(1, 1, 0), span.Point{}), false, nil
- }
- spn, err := spanFromPositions(pm.Mapper, pm.File.Module.Syntax.Start, pm.File.Module.Syntax.End)
- return spn, false, err
- }
-
- spn, err := spanFromPositions(pm.Mapper, reference.Start, reference.End)
- return spn, true, err
-}
-
-// goCommandDiagnostic creates a diagnostic for a given go command error.
-func (s *snapshot) goCommandDiagnostic(pm *source.ParsedModule, spn span.Span, goCmdError string) (*source.Diagnostic, error) {
- rng, err := pm.Mapper.Range(spn)
- if err != nil {
- return nil, err
- }
-
- matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1)
- var innermost *module.Version
- for i := len(matches) - 1; i >= 0; i-- {
- ver := module.Version{Path: matches[i][1], Version: matches[i][2]}
- // Any module versions that come from the workspace module should not
- // be shown to the user.
- if source.IsWorkspaceModuleVersion(ver.Version) {
- continue
- }
- if err := module.Check(ver.Path, ver.Version); err != nil {
- continue
- }
- innermost = &ver
- break
- }
-
- switch {
- case strings.Contains(goCmdError, "inconsistent vendoring"):
- cmd, err := command.NewVendorCommand("Run go mod vendor", command.URIArg{URI: protocol.URIFromSpanURI(pm.URI)})
- if err != nil {
- return nil, err
- }
- return &source.Diagnostic{
- URI: pm.URI,
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ListError,
- Message: `Inconsistent vendoring detected. Please re-run "go mod vendor".
-See https://github.com/golang/go/issues/39164 for more detail on this issue.`,
- SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
- }, nil
-
- case strings.Contains(goCmdError, "updates to go.sum needed"), strings.Contains(goCmdError, "missing go.sum entry"):
- var args []protocol.DocumentURI
- for _, uri := range s.ModFiles() {
- args = append(args, protocol.URIFromSpanURI(uri))
- }
- tidyCmd, err := command.NewTidyCommand("Run go mod tidy", command.URIArgs{URIs: args})
- if err != nil {
- return nil, err
- }
- updateCmd, err := command.NewUpdateGoSumCommand("Update go.sum", command.URIArgs{URIs: args})
- if err != nil {
- return nil, err
- }
- msg := "go.sum is out of sync with go.mod. Please update it by applying the quick fix."
- if innermost != nil {
- msg = fmt.Sprintf("go.sum is out of sync with go.mod: entry for %v is missing. Please updating it by applying the quick fix.", innermost)
- }
- return &source.Diagnostic{
- URI: pm.URI,
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ListError,
- Message: msg,
- SuggestedFixes: []source.SuggestedFix{
- source.SuggestedFixFromCommand(tidyCmd, protocol.QuickFix),
- source.SuggestedFixFromCommand(updateCmd, protocol.QuickFix),
- },
- }, nil
- case strings.Contains(goCmdError, "disabled by GOPROXY=off") && innermost != nil:
- title := fmt.Sprintf("Download %v@%v", innermost.Path, innermost.Version)
- cmd, err := command.NewAddDependencyCommand(title, command.DependencyArgs{
- URI: protocol.URIFromSpanURI(pm.URI),
- AddRequire: false,
- GoCmdArgs: []string{fmt.Sprintf("%v@%v", innermost.Path, innermost.Version)},
- })
- if err != nil {
- return nil, err
- }
- return &source.Diagnostic{
- URI: pm.URI,
- Range: rng,
- Severity: protocol.SeverityError,
- Message: fmt.Sprintf("%v@%v has not been downloaded", innermost.Path, innermost.Version),
- Source: source.ListError,
- SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
- }, nil
- default:
- return &source.Diagnostic{
- URI: pm.URI,
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ListError,
- Message: goCmdError,
- }, nil
- }
-}
-
-func findModuleReference(mf *modfile.File, ver module.Version) *modfile.Line {
- for _, req := range mf.Require {
- if req.Mod == ver {
- return req.Syntax
- }
- }
- for _, ex := range mf.Exclude {
- if ex.Mod == ver {
- return ex.Syntax
- }
- }
- for _, rep := range mf.Replace {
- if rep.New == ver || rep.Old == ver {
- return rep.Syntax
- }
- }
- return nil
-}
diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go
deleted file mode 100644
index e85f6510b..000000000
--- a/internal/lsp/cache/mod_tidy.go
+++ /dev/null
@@ -1,500 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "fmt"
- "go/ast"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
-)
-
-type modTidyKey struct {
- sessionID string
- env string
- gomod source.FileIdentity
- imports string
- unsavedOverlays string
- view string
-}
-
-type modTidyHandle struct {
- handle *memoize.Handle
-}
-
-type modTidyData struct {
- tidied *source.TidiedModule
- err error
-}
-
-func (mth *modTidyHandle) tidy(ctx context.Context, snapshot *snapshot) (*source.TidiedModule, error) {
- v, err := mth.handle.Get(ctx, snapshot.generation, snapshot)
- if err != nil {
- return nil, err
- }
- data := v.(*modTidyData)
- return data.tidied, data.err
-}
-
-func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) {
- if pm.File == nil {
- return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", pm.URI)
- }
- if handle := s.getModTidyHandle(pm.URI); handle != nil {
- return handle.tidy(ctx, s)
- }
- fh, err := s.GetFile(ctx, pm.URI)
- if err != nil {
- return nil, err
- }
- // If the file handle is an overlay, it may not be written to disk.
- // The go.mod file has to be on disk for `go mod tidy` to work.
- if _, ok := fh.(*overlay); ok {
- if info, _ := os.Stat(fh.URI().Filename()); info == nil {
- return nil, source.ErrNoModOnDisk
- }
- }
- if criticalErr := s.GetCriticalError(ctx); criticalErr != nil {
- return &source.TidiedModule{
- Diagnostics: criticalErr.DiagList,
- }, nil
- }
- workspacePkgs, err := s.workspacePackageHandles(ctx)
- if err != nil {
- return nil, err
- }
- importHash, err := s.hashImports(ctx, workspacePkgs)
- if err != nil {
- return nil, err
- }
-
- s.mu.Lock()
- overlayHash := hashUnsavedOverlays(s.files)
- s.mu.Unlock()
-
- key := modTidyKey{
- sessionID: s.view.session.id,
- view: s.view.folder.Filename(),
- imports: importHash,
- unsavedOverlays: overlayHash,
- gomod: fh.FileIdentity(),
- env: hashEnv(s),
- }
- h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- ctx, done := event.Start(ctx, "cache.ModTidyHandle", tag.URI.Of(fh.URI()))
- defer done()
-
- snapshot := arg.(*snapshot)
- inv := &gocommand.Invocation{
- Verb: "mod",
- Args: []string{"tidy"},
- WorkingDir: filepath.Dir(fh.URI().Filename()),
- }
- tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv)
- if err != nil {
- return &modTidyData{err: err}
- }
- // Keep the temporary go.mod file around long enough to parse it.
- defer cleanup()
-
- if _, err := s.view.session.gocmdRunner.Run(ctx, *inv); err != nil {
- return &modTidyData{err: err}
- }
- // Go directly to disk to get the temporary mod file, since it is
- // always on disk.
- tempContents, err := ioutil.ReadFile(tmpURI.Filename())
- if err != nil {
- return &modTidyData{err: err}
- }
- ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil)
- if err != nil {
- // We do not need to worry about the temporary file's parse errors
- // since it has been "tidied".
- return &modTidyData{err: err}
- }
- // Compare the original and tidied go.mod files to compute errors and
- // suggested fixes.
- diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal, workspacePkgs)
- if err != nil {
- return &modTidyData{err: err}
- }
- return &modTidyData{
- tidied: &source.TidiedModule{
- Diagnostics: diagnostics,
- TidiedContent: tempContents,
- },
- }
- }, nil)
-
- mth := &modTidyHandle{handle: h}
- s.mu.Lock()
- s.modTidyHandles[fh.URI()] = mth
- s.mu.Unlock()
-
- return mth.tidy(ctx, s)
-}
-
-func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) (string, error) {
- seen := map[string]struct{}{}
- var imports []string
- for _, ph := range wsPackages {
- for _, imp := range ph.imports(ctx, s) {
- if _, ok := seen[imp]; !ok {
- imports = append(imports, imp)
- seen[imp] = struct{}{}
- }
- }
- }
- sort.Strings(imports)
- hashed := strings.Join(imports, ",")
- return hashContents([]byte(hashed)), nil
-}
-
-// modTidyDiagnostics computes the differences between the original and tidied
-// go.mod files to produce diagnostic and suggested fixes. Some diagnostics
-// may appear on the Go files that import packages from missing modules.
-func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *source.ParsedModule, ideal *modfile.File, workspacePkgs []*packageHandle) (diagnostics []*source.Diagnostic, err error) {
- // First, determine which modules are unused and which are missing from the
- // original go.mod file.
- var (
- unused = make(map[string]*modfile.Require, len(pm.File.Require))
- missing = make(map[string]*modfile.Require, len(ideal.Require))
- wrongDirectness = make(map[string]*modfile.Require, len(pm.File.Require))
- )
- for _, req := range pm.File.Require {
- unused[req.Mod.Path] = req
- }
- for _, req := range ideal.Require {
- origReq := unused[req.Mod.Path]
- if origReq == nil {
- missing[req.Mod.Path] = req
- continue
- } else if origReq.Indirect != req.Indirect {
- wrongDirectness[req.Mod.Path] = origReq
- }
- delete(unused, req.Mod.Path)
- }
- for _, req := range wrongDirectness {
- // Handle dependencies that are incorrectly labeled indirect and
- // vice versa.
- srcDiag, err := directnessDiagnostic(pm.Mapper, req, snapshot.View().Options().ComputeEdits)
- if err != nil {
- // We're probably in a bad state if we can't compute a
- // directnessDiagnostic, but try to keep going so as to not suppress
- // other, valid diagnostics.
- event.Error(ctx, "computing directness diagnostic", err)
- continue
- }
- diagnostics = append(diagnostics, srcDiag)
- }
- // Next, compute any diagnostics for modules that are missing from the
- // go.mod file. The fixes will be for the go.mod file, but the
- // diagnostics should also appear in both the go.mod file and the import
- // statements in the Go files in which the dependencies are used.
- missingModuleFixes := map[*modfile.Require][]source.SuggestedFix{}
- for _, req := range missing {
- srcDiag, err := missingModuleDiagnostic(pm, req)
- if err != nil {
- return nil, err
- }
- missingModuleFixes[req] = srcDiag.SuggestedFixes
- diagnostics = append(diagnostics, srcDiag)
- }
- // Add diagnostics for missing modules anywhere they are imported in the
- // workspace.
- for _, ph := range workspacePkgs {
- missingImports := map[string]*modfile.Require{}
-
- // If -mod=readonly is not set we may have successfully imported
- // packages from missing modules. Otherwise they'll be in
- // MissingDependencies. Combine both.
- importedPkgs := ph.imports(ctx, snapshot)
-
- for _, imp := range importedPkgs {
- if req, ok := missing[imp]; ok {
- missingImports[imp] = req
- break
- }
- // If the import is a package of the dependency, then add the
- // package to the map, this will eliminate the need to do this
- // prefix package search on each import for each file.
- // Example:
- //
- // import (
- // "golang.org/x/tools/go/expect"
- // "golang.org/x/tools/go/packages"
- // )
- // They both are related to the same module: "golang.org/x/tools".
- var match string
- for _, req := range ideal.Require {
- if strings.HasPrefix(imp, req.Mod.Path) && len(req.Mod.Path) > len(match) {
- match = req.Mod.Path
- }
- }
- if req, ok := missing[match]; ok {
- missingImports[imp] = req
- }
- }
- // None of this package's imports are from missing modules.
- if len(missingImports) == 0 {
- continue
- }
- for _, pgh := range ph.compiledGoFiles {
- pgf, err := snapshot.ParseGo(ctx, pgh.file, source.ParseHeader)
- if err != nil {
- continue
- }
- file, m := pgf.File, pgf.Mapper
- if file == nil || m == nil {
- continue
- }
- imports := make(map[string]*ast.ImportSpec)
- for _, imp := range file.Imports {
- if imp.Path == nil {
- continue
- }
- if target, err := strconv.Unquote(imp.Path.Value); err == nil {
- imports[target] = imp
- }
- }
- if len(imports) == 0 {
- continue
- }
- for importPath, req := range missingImports {
- imp, ok := imports[importPath]
- if !ok {
- continue
- }
- fixes, ok := missingModuleFixes[req]
- if !ok {
- return nil, fmt.Errorf("no missing module fix for %q (%q)", importPath, req.Mod.Path)
- }
- srcErr, err := missingModuleForImport(snapshot, m, imp, req, fixes)
- if err != nil {
- return nil, err
- }
- diagnostics = append(diagnostics, srcErr)
- }
- }
- }
- // Finally, add errors for any unused dependencies.
- onlyDiagnostic := len(diagnostics) == 0 && len(unused) == 1
- for _, req := range unused {
- srcErr, err := unusedDiagnostic(pm.Mapper, req, onlyDiagnostic)
- if err != nil {
- return nil, err
- }
- diagnostics = append(diagnostics, srcErr)
- }
- return diagnostics, nil
-}
-
-// unusedDiagnostic returns a source.Diagnostic for an unused require.
-func unusedDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, onlyDiagnostic bool) (*source.Diagnostic, error) {
- rng, err := rangeFromPositions(m, req.Syntax.Start, req.Syntax.End)
- if err != nil {
- return nil, err
- }
- title := fmt.Sprintf("Remove dependency: %s", req.Mod.Path)
- cmd, err := command.NewRemoveDependencyCommand(title, command.RemoveDependencyArgs{
- URI: protocol.URIFromSpanURI(m.URI),
- OnlyDiagnostic: onlyDiagnostic,
- ModulePath: req.Mod.Path,
- })
- if err != nil {
- return nil, err
- }
- return &source.Diagnostic{
- URI: m.URI,
- Range: rng,
- Severity: protocol.SeverityWarning,
- Source: source.ModTidyError,
- Message: fmt.Sprintf("%s is not used in this module", req.Mod.Path),
- SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
- }, nil
-}
-
-// directnessDiagnostic extracts errors when a dependency is labeled indirect when
-// it should be direct and vice versa.
-func directnessDiagnostic(m *protocol.ColumnMapper, req *modfile.Require, computeEdits diff.ComputeEdits) (*source.Diagnostic, error) {
- rng, err := rangeFromPositions(m, req.Syntax.Start, req.Syntax.End)
- if err != nil {
- return nil, err
- }
- direction := "indirect"
- if req.Indirect {
- direction = "direct"
-
- // If the dependency should be direct, just highlight the // indirect.
- if comments := req.Syntax.Comment(); comments != nil && len(comments.Suffix) > 0 {
- end := comments.Suffix[0].Start
- end.LineRune += len(comments.Suffix[0].Token)
- end.Byte += len([]byte(comments.Suffix[0].Token))
- rng, err = rangeFromPositions(m, comments.Suffix[0].Start, end)
- if err != nil {
- return nil, err
- }
- }
- }
- // If the dependency should be indirect, add the // indirect.
- edits, err := switchDirectness(req, m, computeEdits)
- if err != nil {
- return nil, err
- }
- return &source.Diagnostic{
- URI: m.URI,
- Range: rng,
- Severity: protocol.SeverityWarning,
- Source: source.ModTidyError,
- Message: fmt.Sprintf("%s should be %s", req.Mod.Path, direction),
- SuggestedFixes: []source.SuggestedFix{{
- Title: fmt.Sprintf("Change %s to %s", req.Mod.Path, direction),
- Edits: map[span.URI][]protocol.TextEdit{
- m.URI: edits,
- },
- ActionKind: protocol.QuickFix,
- }},
- }, nil
-}
-
-func missingModuleDiagnostic(pm *source.ParsedModule, req *modfile.Require) (*source.Diagnostic, error) {
- var rng protocol.Range
- // Default to the start of the file if there is no module declaration.
- if pm.File != nil && pm.File.Module != nil && pm.File.Module.Syntax != nil {
- start, end := pm.File.Module.Syntax.Span()
- var err error
- rng, err = rangeFromPositions(pm.Mapper, start, end)
- if err != nil {
- return nil, err
- }
- }
- title := fmt.Sprintf("Add %s to your go.mod file", req.Mod.Path)
- cmd, err := command.NewAddDependencyCommand(title, command.DependencyArgs{
- URI: protocol.URIFromSpanURI(pm.Mapper.URI),
- AddRequire: !req.Indirect,
- GoCmdArgs: []string{req.Mod.Path + "@" + req.Mod.Version},
- })
- if err != nil {
- return nil, err
- }
- return &source.Diagnostic{
- URI: pm.Mapper.URI,
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ModTidyError,
- Message: fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path),
- SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
- }, nil
-}
-
-// switchDirectness gets the edits needed to change an indirect dependency to
-// direct and vice versa.
-func switchDirectness(req *modfile.Require, m *protocol.ColumnMapper, computeEdits diff.ComputeEdits) ([]protocol.TextEdit, error) {
- // We need a private copy of the parsed go.mod file, since we're going to
- // modify it.
- copied, err := modfile.Parse("", m.Content, nil)
- if err != nil {
- return nil, err
- }
- // Change the directness in the matching require statement. To avoid
- // reordering the require statements, rewrite all of them.
- var requires []*modfile.Require
- seenVersions := make(map[string]string)
- for _, r := range copied.Require {
- if seen := seenVersions[r.Mod.Path]; seen != "" && seen != r.Mod.Version {
- // Avoid a panic in SetRequire below, which panics on conflicting
- // versions.
- return nil, fmt.Errorf("%q has conflicting versions: %q and %q", r.Mod.Path, seen, r.Mod.Version)
- }
- seenVersions[r.Mod.Path] = r.Mod.Version
- if r.Mod.Path == req.Mod.Path {
- requires = append(requires, &modfile.Require{
- Mod: r.Mod,
- Syntax: r.Syntax,
- Indirect: !r.Indirect,
- })
- continue
- }
- requires = append(requires, r)
- }
- copied.SetRequire(requires)
- newContent, err := copied.Format()
- if err != nil {
- return nil, err
- }
- // Calculate the edits to be made due to the change.
- diff, err := computeEdits(m.URI, string(m.Content), string(newContent))
- if err != nil {
- return nil, err
- }
- return source.ToProtocolEdits(m, diff)
-}
-
-// missingModuleForImport creates an error for a given import path that comes
-// from a missing module.
-func missingModuleForImport(snapshot source.Snapshot, m *protocol.ColumnMapper, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) {
- if req.Syntax == nil {
- return nil, fmt.Errorf("no syntax for %v", req)
- }
- spn, err := span.NewRange(snapshot.FileSet(), imp.Path.Pos(), imp.Path.End()).Span()
- if err != nil {
- return nil, err
- }
- rng, err := m.Range(spn)
- if err != nil {
- return nil, err
- }
- return &source.Diagnostic{
- URI: m.URI,
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.ModTidyError,
- Message: fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path),
- SuggestedFixes: fixes,
- }, nil
-}
-
-func rangeFromPositions(m *protocol.ColumnMapper, s, e modfile.Position) (protocol.Range, error) {
- spn, err := spanFromPositions(m, s, e)
- if err != nil {
- return protocol.Range{}, err
- }
- return m.Range(spn)
-}
-
-func spanFromPositions(m *protocol.ColumnMapper, s, e modfile.Position) (span.Span, error) {
- toPoint := func(offset int) (span.Point, error) {
- l, c, err := m.Converter.ToPosition(offset)
- if err != nil {
- return span.Point{}, err
- }
- return span.NewPoint(l, c, offset), nil
- }
- start, err := toPoint(s.Byte)
- if err != nil {
- return span.Span{}, err
- }
- end, err := toPoint(e.Byte)
- if err != nil {
- return span.Span{}, err
- }
- return span.New(m.URI, start, end), nil
-}
diff --git a/internal/lsp/cache/os_windows.go b/internal/lsp/cache/os_windows.go
deleted file mode 100644
index 7ff1cce74..000000000
--- a/internal/lsp/cache/os_windows.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package cache
-
-import (
- "fmt"
- "path/filepath"
- "syscall"
-)
-
-func init() {
- checkPathCase = windowsCheckPathCase
-}
-
-func windowsCheckPathCase(path string) error {
- // Back in the day, Windows used to have short and long filenames, and
- // it still supports those APIs. GetLongPathName gets the real case for a
- // path, so we can use it here. Inspired by
- // http://stackoverflow.com/q/2113822.
-
- // Short paths can be longer than long paths, and unicode, so be generous.
- buflen := 4 * len(path)
- namep, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return err
- }
- short := make([]uint16, buflen)
- n, err := syscall.GetShortPathName(namep, &short[0], uint32(len(short)*2)) // buflen is in bytes.
- if err != nil {
- return err
- }
- if int(n) > len(short)*2 {
- return fmt.Errorf("short buffer too short: %v vs %v*2", n, len(short))
- }
- long := make([]uint16, buflen)
- n, err = syscall.GetLongPathName(&short[0], &long[0], uint32(len(long)*2))
- if err != nil {
- return err
- }
- if int(n) > len(long)*2 {
- return fmt.Errorf("long buffer too short: %v vs %v*2", n, len(long))
- }
- longstr := syscall.UTF16ToString(long)
-
- isRoot := func(p string) bool {
- return p[len(p)-1] == filepath.Separator
- }
- for got, want := path, longstr; !isRoot(got) && !isRoot(want); got, want = filepath.Dir(got), filepath.Dir(want) {
- if g, w := filepath.Base(got), filepath.Base(want); g != w {
- return fmt.Errorf("case mismatch in path %q: component %q is listed by Windows as %q", path, g, w)
- }
- }
- return nil
-}
diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go
deleted file mode 100644
index e761373fa..000000000
--- a/internal/lsp/cache/parse.go
+++ /dev/null
@@ -1,1467 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "go/types"
- "path/filepath"
- "reflect"
- "strconv"
- "strings"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// parseKey uniquely identifies a parsed Go file.
-type parseKey struct {
- file source.FileIdentity
- mode source.ParseMode
-}
-
-type parseGoHandle struct {
- handle *memoize.Handle
- file source.FileHandle
- mode source.ParseMode
-}
-
-type parseGoData struct {
- parsed *source.ParsedGoFile
-
- // If true, we adjusted the AST to make it type check better, and
- // it may not match the source code.
- fixed bool
- err error // any other errors
-}
-
-func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode source.ParseMode) *parseGoHandle {
- key := parseKey{
- file: fh.FileIdentity(),
- mode: mode,
- }
- if pgh := s.getGoFile(key); pgh != nil {
- return pgh
- }
- parseHandle := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- snapshot := arg.(*snapshot)
- return parseGo(ctx, snapshot.FileSet(), fh, mode)
- }, nil)
-
- pgh := &parseGoHandle{
- handle: parseHandle,
- file: fh,
- mode: mode,
- }
- return s.addGoFile(key, pgh)
-}
-
-func (pgh *parseGoHandle) String() string {
- return pgh.File().URI().Filename()
-}
-
-func (pgh *parseGoHandle) File() source.FileHandle {
- return pgh.file
-}
-
-func (pgh *parseGoHandle) Mode() source.ParseMode {
- return pgh.mode
-}
-
-func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
- pgh := s.parseGoHandle(ctx, fh, mode)
- pgf, _, err := s.parseGo(ctx, pgh)
- return pgf, err
-}
-
-func (s *snapshot) parseGo(ctx context.Context, pgh *parseGoHandle) (*source.ParsedGoFile, bool, error) {
- if pgh.mode == source.ParseExported {
- panic("only type checking should use Exported")
- }
- d, err := pgh.handle.Get(ctx, s.generation, s)
- if err != nil {
- return nil, false, err
- }
- data := d.(*parseGoData)
- return data.parsed, data.fixed, data.err
-}
-
-type astCacheKey struct {
- pkg packageHandleKey
- uri span.URI
-}
-
-func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos token.Pos) (*astCacheData, error) {
- pkg := spkg.(*pkg)
- pkgHandle := s.getPackage(pkg.m.ID, pkg.mode)
- if pkgHandle == nil {
- return nil, fmt.Errorf("could not reconstruct package handle for %v", pkg.m.ID)
- }
- tok := s.FileSet().File(pos)
- if tok == nil {
- return nil, fmt.Errorf("no file for pos %v", pos)
- }
- pgf, err := pkg.File(span.URIFromPath(tok.Name()))
- if err != nil {
- return nil, err
- }
- astHandle := s.generation.Bind(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} {
- return buildASTCache(pgf)
- }, nil)
-
- d, err := astHandle.Get(ctx, s.generation, s)
- if err != nil {
- return nil, err
- }
- data := d.(*astCacheData)
- if data.err != nil {
- return nil, data.err
- }
- return data, nil
-}
-
-func (s *snapshot) PosToDecl(ctx context.Context, spkg source.Package, pos token.Pos) (ast.Decl, error) {
- data, err := s.astCacheData(ctx, spkg, pos)
- if err != nil {
- return nil, err
- }
- return data.posToDecl[pos], nil
-}
-
-func (s *snapshot) PosToField(ctx context.Context, spkg source.Package, pos token.Pos) (*ast.Field, error) {
- data, err := s.astCacheData(ctx, spkg, pos)
- if err != nil {
- return nil, err
- }
- return data.posToField[pos], nil
-}
-
-type astCacheData struct {
- err error
-
- posToDecl map[token.Pos]ast.Decl
- posToField map[token.Pos]*ast.Field
-}
-
-// buildASTCache builds caches to aid in quickly going from the typed
-// world to the syntactic world.
-func buildASTCache(pgf *source.ParsedGoFile) *astCacheData {
- var (
- // path contains all ancestors, including n.
- path []ast.Node
- // decls contains all ancestors that are decls.
- decls []ast.Decl
- )
-
- data := &astCacheData{
- posToDecl: make(map[token.Pos]ast.Decl),
- posToField: make(map[token.Pos]*ast.Field),
- }
-
- ast.Inspect(pgf.File, func(n ast.Node) bool {
- if n == nil {
- lastP := path[len(path)-1]
- path = path[:len(path)-1]
- if len(decls) > 0 && decls[len(decls)-1] == lastP {
- decls = decls[:len(decls)-1]
- }
- return false
- }
-
- path = append(path, n)
-
- switch n := n.(type) {
- case *ast.Field:
- addField := func(f ast.Node) {
- if f.Pos().IsValid() {
- data.posToField[f.Pos()] = n
- if len(decls) > 0 {
- data.posToDecl[f.Pos()] = decls[len(decls)-1]
- }
- }
- }
-
- // Add mapping for *ast.Field itself. This handles embedded
- // fields which have no associated *ast.Ident name.
- addField(n)
-
- // Add mapping for each field name since you can have
- // multiple names for the same type expression.
- for _, name := range n.Names {
- addField(name)
- }
-
- // Also map "X" in "...X" to the containing *ast.Field. This
- // makes it easy to format variadic signature params
- // properly.
- if elips, ok := n.Type.(*ast.Ellipsis); ok && elips.Elt != nil {
- addField(elips.Elt)
- }
- case *ast.FuncDecl:
- decls = append(decls, n)
-
- if n.Name != nil && n.Name.Pos().IsValid() {
- data.posToDecl[n.Name.Pos()] = n
- }
- case *ast.GenDecl:
- decls = append(decls, n)
-
- for _, spec := range n.Specs {
- switch spec := spec.(type) {
- case *ast.TypeSpec:
- if spec.Name != nil && spec.Name.Pos().IsValid() {
- data.posToDecl[spec.Name.Pos()] = n
- }
- case *ast.ValueSpec:
- for _, id := range spec.Names {
- if id != nil && id.Pos().IsValid() {
- data.posToDecl[id.Pos()] = n
- }
- }
- }
- }
- }
-
- return true
- })
-
- return data
-}
-
-func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) *parseGoData {
- ctx, done := event.Start(ctx, "cache.parseGo", tag.File.Of(fh.URI().Filename()))
- defer done()
-
- ext := filepath.Ext(fh.URI().Filename())
- if ext != ".go" && ext != "" { // files generated by cgo have no extension
- return &parseGoData{err: errors.Errorf("cannot parse non-Go file %s", fh.URI())}
- }
- src, err := fh.Read()
- if err != nil {
- return &parseGoData{err: err}
- }
-
- parserMode := parser.AllErrors | parser.ParseComments
- if mode == source.ParseHeader {
- parserMode = parser.ImportsOnly | parser.ParseComments
- }
-
- file, err := parser.ParseFile(fset, fh.URI().Filename(), src, parserMode)
- var parseErr scanner.ErrorList
- if err != nil {
- // We passed a byte slice, so the only possible error is a parse error.
- parseErr = err.(scanner.ErrorList)
- }
-
- tok := fset.File(file.Pos())
- if tok == nil {
- // file.Pos is the location of the package declaration. If there was
- // none, we can't find the token.File that ParseFile created, and we
- // have no choice but to recreate it.
- tok = fset.AddFile(fh.URI().Filename(), -1, len(src))
- tok.SetLinesForContent(src)
- }
-
- fixed := false
- // If there were parse errors, attempt to fix them up.
- if parseErr != nil {
- // Fix any badly parsed parts of the AST.
- fixed = fixAST(ctx, file, tok, src)
-
- for i := 0; i < 10; i++ {
- // Fix certain syntax errors that render the file unparseable.
- newSrc := fixSrc(file, tok, src)
- if newSrc == nil {
- break
- }
-
- // If we thought there was something to fix 10 times in a row,
- // it is likely we got stuck in a loop somehow. Log out a diff
- // of the last changes we made to aid in debugging.
- if i == 9 {
- edits, err := myers.ComputeEdits(fh.URI(), string(src), string(newSrc))
- if err != nil {
- event.Error(ctx, "error generating fixSrc diff", err, tag.File.Of(tok.Name()))
- } else {
- unified := diff.ToUnified("before", "after", string(src), edits)
- event.Log(ctx, fmt.Sprintf("fixSrc loop - last diff:\n%v", unified), tag.File.Of(tok.Name()))
- }
- }
-
- newFile, _ := parser.ParseFile(fset, fh.URI().Filename(), newSrc, parserMode)
- if newFile != nil {
- // Maintain the original parseError so we don't try formatting the doctored file.
- file = newFile
- src = newSrc
- tok = fset.File(file.Pos())
-
- fixed = fixAST(ctx, file, tok, src)
- }
- }
- }
-
- return &parseGoData{
- parsed: &source.ParsedGoFile{
- URI: fh.URI(),
- Mode: mode,
- Src: src,
- File: file,
- Tok: tok,
- Mapper: &protocol.ColumnMapper{
- URI: fh.URI(),
- Converter: span.NewTokenConverter(fset, tok),
- Content: src,
- },
- ParseErr: parseErr,
- },
- fixed: fixed,
- }
-}
-
-// An unexportedFilter removes as much unexported AST from a set of Files as possible.
-type unexportedFilter struct {
- uses map[string]bool
-}
-
-// Filter records uses of unexported identifiers and filters out all other
-// unexported declarations.
-func (f *unexportedFilter) Filter(files []*ast.File) {
- // Iterate to fixed point -- unexported types can include other unexported types.
- oldLen := len(f.uses)
- for {
- for _, file := range files {
- f.recordUses(file)
- }
- if len(f.uses) == oldLen {
- break
- }
- oldLen = len(f.uses)
- }
-
- for _, file := range files {
- var newDecls []ast.Decl
- for _, decl := range file.Decls {
- if f.filterDecl(decl) {
- newDecls = append(newDecls, decl)
- }
- }
- file.Decls = newDecls
- file.Scope = nil
- file.Unresolved = nil
- file.Comments = nil
- trimAST(file)
- }
-}
-
-func (f *unexportedFilter) keep(ident *ast.Ident) bool {
- return ast.IsExported(ident.Name) || f.uses[ident.Name]
-}
-
-func (f *unexportedFilter) filterDecl(decl ast.Decl) bool {
- switch decl := decl.(type) {
- case *ast.FuncDecl:
- if ident := recvIdent(decl); ident != nil && !f.keep(ident) {
- return false
- }
- return f.keep(decl.Name)
- case *ast.GenDecl:
- if decl.Tok == token.CONST {
- // Constants can involve iota, and iota is hard to deal with.
- return true
- }
- var newSpecs []ast.Spec
- for _, spec := range decl.Specs {
- if f.filterSpec(spec) {
- newSpecs = append(newSpecs, spec)
- }
- }
- decl.Specs = newSpecs
- return len(newSpecs) != 0
- case *ast.BadDecl:
- return false
- }
- panic(fmt.Sprintf("unknown ast.Decl %T", decl))
-}
-
-func (f *unexportedFilter) filterSpec(spec ast.Spec) bool {
- switch spec := spec.(type) {
- case *ast.ImportSpec:
- return true
- case *ast.ValueSpec:
- var newNames []*ast.Ident
- for _, name := range spec.Names {
- if f.keep(name) {
- newNames = append(newNames, name)
- }
- }
- spec.Names = newNames
- return len(spec.Names) != 0
- case *ast.TypeSpec:
- if !f.keep(spec.Name) {
- return false
- }
- switch typ := spec.Type.(type) {
- case *ast.StructType:
- f.filterFieldList(typ.Fields)
- case *ast.InterfaceType:
- f.filterFieldList(typ.Methods)
- }
- return true
- }
- panic(fmt.Sprintf("unknown ast.Spec %T", spec))
-}
-
-func (f *unexportedFilter) filterFieldList(fields *ast.FieldList) {
- var newFields []*ast.Field
- for _, field := range fields.List {
- if len(field.Names) == 0 {
- // Keep embedded fields: they can export methods and fields.
- newFields = append(newFields, field)
- }
- for _, name := range field.Names {
- if f.keep(name) {
- newFields = append(newFields, field)
- break
- }
- }
- }
- fields.List = newFields
-}
-
-func (f *unexportedFilter) recordUses(file *ast.File) {
- for _, decl := range file.Decls {
- switch decl := decl.(type) {
- case *ast.FuncDecl:
- // Ignore methods on dropped types.
- if ident := recvIdent(decl); ident != nil && !f.keep(ident) {
- break
- }
- // Ignore functions with dropped names.
- if !f.keep(decl.Name) {
- break
- }
- f.recordFuncType(decl.Type)
- case *ast.GenDecl:
- for _, spec := range decl.Specs {
- switch spec := spec.(type) {
- case *ast.ValueSpec:
- for i, name := range spec.Names {
- // Don't mess with constants -- iota is hard.
- if f.keep(name) || decl.Tok == token.CONST {
- f.recordIdents(spec.Type)
- if len(spec.Values) > i {
- f.recordIdents(spec.Values[i])
- }
- }
- }
- case *ast.TypeSpec:
- switch typ := spec.Type.(type) {
- case *ast.StructType:
- f.recordFieldUses(false, typ.Fields)
- case *ast.InterfaceType:
- f.recordFieldUses(false, typ.Methods)
- }
- }
- }
- }
- }
-}
-
-// recvIdent returns the identifier of a method receiver, e.g. *int.
-func recvIdent(decl *ast.FuncDecl) *ast.Ident {
- if decl.Recv == nil || len(decl.Recv.List) == 0 {
- return nil
- }
- x := decl.Recv.List[0].Type
- if star, ok := x.(*ast.StarExpr); ok {
- x = star.X
- }
- if ident, ok := x.(*ast.Ident); ok {
- return ident
- }
- return nil
-}
-
-// recordIdents records unexported identifiers in an Expr in uses.
-// These may be types, e.g. in map[key]value, function names, e.g. in foo(),
-// or simple variable references. References that will be discarded, such
-// as those in function literal bodies, are ignored.
-func (f *unexportedFilter) recordIdents(x ast.Expr) {
- ast.Inspect(x, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- if complit, ok := n.(*ast.CompositeLit); ok {
- // We clear out composite literal contents; just record their type.
- f.recordIdents(complit.Type)
- return false
- }
- if flit, ok := n.(*ast.FuncLit); ok {
- f.recordFuncType(flit.Type)
- return false
- }
- if ident, ok := n.(*ast.Ident); ok && !ast.IsExported(ident.Name) {
- f.uses[ident.Name] = true
- }
- return true
- })
-}
-
-// recordFuncType records the types mentioned by a function type.
-func (f *unexportedFilter) recordFuncType(x *ast.FuncType) {
- f.recordFieldUses(true, x.Params)
- f.recordFieldUses(true, x.Results)
-}
-
-// recordFieldUses records unexported identifiers used in fields, which may be
-// struct members, interface members, or function parameter/results.
-func (f *unexportedFilter) recordFieldUses(isParams bool, fields *ast.FieldList) {
- if fields == nil {
- return
- }
- for _, field := range fields.List {
- if isParams {
- // Parameter types of retained functions need to be retained.
- f.recordIdents(field.Type)
- continue
- }
- if ft, ok := field.Type.(*ast.FuncType); ok {
- // Function declarations in interfaces need all their types retained.
- f.recordFuncType(ft)
- continue
- }
- if len(field.Names) == 0 {
- // Embedded fields might contribute exported names.
- f.recordIdents(field.Type)
- }
- for _, name := range field.Names {
- // We only need normal fields if they're exported.
- if ast.IsExported(name.Name) {
- f.recordIdents(field.Type)
- break
- }
- }
- }
-}
-
-// ProcessErrors records additional uses from errors, returning the new uses
-// and any unexpected errors.
-func (f *unexportedFilter) ProcessErrors(errors []types.Error) (map[string]bool, []types.Error) {
- var unexpected []types.Error
- missing := map[string]bool{}
- for _, err := range errors {
- if strings.Contains(err.Msg, "missing return") {
- continue
- }
- const undeclared = "undeclared name: "
- if strings.HasPrefix(err.Msg, undeclared) {
- missing[strings.TrimPrefix(err.Msg, undeclared)] = true
- f.uses[strings.TrimPrefix(err.Msg, undeclared)] = true
- continue
- }
- unexpected = append(unexpected, err)
- }
- return missing, unexpected
-}
-
-// trimAST clears any part of the AST not relevant to type checking
-// expressions at pos.
-func trimAST(file *ast.File) {
- ast.Inspect(file, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- switch n := n.(type) {
- case *ast.FuncDecl:
- n.Body = nil
- case *ast.BlockStmt:
- n.List = nil
- case *ast.CaseClause:
- n.Body = nil
- case *ast.CommClause:
- n.Body = nil
- case *ast.CompositeLit:
- // types.Info.Types for long slice/array literals are particularly
- // expensive. Try to clear them out.
- at, ok := n.Type.(*ast.ArrayType)
- if !ok {
- // Composite literal. No harm removing all its fields.
- n.Elts = nil
- break
- }
- // Removing the elements from an ellipsis array changes its type.
- // Try to set the length explicitly so we can continue.
- if _, ok := at.Len.(*ast.Ellipsis); ok {
- length, ok := arrayLength(n)
- if !ok {
- break
- }
- at.Len = &ast.BasicLit{
- Kind: token.INT,
- Value: fmt.Sprint(length),
- ValuePos: at.Len.Pos(),
- }
- }
- n.Elts = nil
- }
- return true
- })
-}
-
-// arrayLength returns the length of some simple forms of ellipsis array literal.
-// Notably, it handles the tables in golang.org/x/text.
-func arrayLength(array *ast.CompositeLit) (int, bool) {
- litVal := func(expr ast.Expr) (int, bool) {
- lit, ok := expr.(*ast.BasicLit)
- if !ok {
- return 0, false
- }
- val, err := strconv.ParseInt(lit.Value, 10, 64)
- if err != nil {
- return 0, false
- }
- return int(val), true
- }
- largestKey := -1
- for _, elt := range array.Elts {
- kve, ok := elt.(*ast.KeyValueExpr)
- if !ok {
- continue
- }
- switch key := kve.Key.(type) {
- case *ast.BasicLit:
- if val, ok := litVal(key); ok && largestKey < val {
- largestKey = val
- }
- case *ast.BinaryExpr:
- // golang.org/x/text uses subtraction (and only subtraction) in its indices.
- if key.Op != token.SUB {
- break
- }
- x, ok := litVal(key.X)
- if !ok {
- break
- }
- y, ok := litVal(key.Y)
- if !ok {
- break
- }
- if val := x - y; largestKey < val {
- largestKey = val
- }
- }
- }
- if largestKey != -1 {
- return largestKey + 1, true
- }
- return len(array.Elts), true
-}
-
-// fixAST inspects the AST and potentially modifies any *ast.BadStmts so that it can be
-// type-checked more effectively.
-//
-// If fixAST returns true, the resulting AST is considered "fixed", meaning
-// positions have been mangled, and type checker errors may not make sense.
-func fixAST(ctx context.Context, n ast.Node, tok *token.File, src []byte) (fixed bool) {
- var err error
- walkASTWithParent(n, func(n, parent ast.Node) bool {
- switch n := n.(type) {
- case *ast.BadStmt:
- if fixed = fixDeferOrGoStmt(n, parent, tok, src); fixed {
- // Recursively fix in our fixed node.
- _ = fixAST(ctx, parent, tok, src)
- } else {
- err = errors.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err)
- }
- return false
- case *ast.BadExpr:
- if fixed = fixArrayType(n, parent, tok, src); fixed {
- // Recursively fix in our fixed node.
- _ = fixAST(ctx, parent, tok, src)
- return false
- }
-
- // Fix cases where parser interprets if/for/switch "init"
- // statement as "cond" expression, e.g.:
- //
- // // "i := foo" is init statement, not condition.
- // for i := foo
- //
- fixInitStmt(n, parent, tok, src)
-
- return false
- case *ast.SelectorExpr:
- // Fix cases where a keyword prefix results in a phantom "_" selector, e.g.:
- //
- // foo.var<> // want to complete to "foo.variance"
- //
- fixPhantomSelector(n, tok, src)
- return true
-
- case *ast.BlockStmt:
- switch parent.(type) {
- case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt:
- // Adjust closing curly brace of empty switch/select
- // statements so we can complete inside them.
- fixEmptySwitch(n, tok, src)
- }
-
- return true
- default:
- return true
- }
- })
- return fixed
-}
-
-// walkASTWithParent walks the AST rooted at n. The semantics are
-// similar to ast.Inspect except it does not call f(nil).
-func walkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) {
- var ancestors []ast.Node
- ast.Inspect(n, func(n ast.Node) (recurse bool) {
- defer func() {
- if recurse {
- ancestors = append(ancestors, n)
- }
- }()
-
- if n == nil {
- ancestors = ancestors[:len(ancestors)-1]
- return false
- }
-
- var parent ast.Node
- if len(ancestors) > 0 {
- parent = ancestors[len(ancestors)-1]
- }
-
- return f(n, parent)
- })
-}
-
-// fixSrc attempts to modify the file's source code to fix certain
-// syntax errors that leave the rest of the file unparsed.
-func fixSrc(f *ast.File, tok *token.File, src []byte) (newSrc []byte) {
- walkASTWithParent(f, func(n, parent ast.Node) bool {
- if newSrc != nil {
- return false
- }
-
- switch n := n.(type) {
- case *ast.BlockStmt:
- newSrc = fixMissingCurlies(f, n, parent, tok, src)
- case *ast.SelectorExpr:
- newSrc = fixDanglingSelector(n, tok, src)
- }
-
- return newSrc == nil
- })
-
- return newSrc
-}
-
-// fixMissingCurlies adds in curly braces for block statements that
-// are missing curly braces. For example:
-//
-// if foo
-//
-// becomes
-//
-// if foo {}
-func fixMissingCurlies(f *ast.File, b *ast.BlockStmt, parent ast.Node, tok *token.File, src []byte) []byte {
- // If the "{" is already in the source code, there isn't anything to
- // fix since we aren't missing curlies.
- if b.Lbrace.IsValid() {
- braceOffset, err := source.Offset(tok, b.Lbrace)
- if err != nil {
- return nil
- }
- if braceOffset < len(src) && src[braceOffset] == '{' {
- return nil
- }
- }
-
- parentLine := tok.Line(parent.Pos())
-
- if parentLine >= tok.LineCount() {
- // If we are the last line in the file, no need to fix anything.
- return nil
- }
-
- // Insert curlies at the end of parent's starting line. The parent
- // is the statement that contains the block, e.g. *ast.IfStmt. The
- // block's Pos()/End() can't be relied upon because they are based
- // on the (missing) curly braces. We assume the statement is a
- // single line for now and try sticking the curly braces at the end.
- insertPos := tok.LineStart(parentLine+1) - 1
-
- // Scootch position backwards until it's not in a comment. For example:
- //
- // if foo<> // some amazing comment |
- // someOtherCode()
- //
- // insertPos will be located at "|", so we back it out of the comment.
- didSomething := true
- for didSomething {
- didSomething = false
- for _, c := range f.Comments {
- if c.Pos() < insertPos && insertPos <= c.End() {
- insertPos = c.Pos()
- didSomething = true
- }
- }
- }
-
- // Bail out if line doesn't end in an ident or ".". This is to avoid
- // cases like below where we end up making things worse by adding
- // curlies:
- //
- // if foo &&
- // bar<>
- switch precedingToken(insertPos, tok, src) {
- case token.IDENT, token.PERIOD:
- // ok
- default:
- return nil
- }
-
- var buf bytes.Buffer
- buf.Grow(len(src) + 3)
- offset, err := source.Offset(tok, insertPos)
- if err != nil {
- return nil
- }
- buf.Write(src[:offset])
-
- // Detect if we need to insert a semicolon to fix "for" loop situations like:
- //
- // for i := foo(); foo<>
- //
- // Just adding curlies is not sufficient to make things parse well.
- if fs, ok := parent.(*ast.ForStmt); ok {
- if _, ok := fs.Cond.(*ast.BadExpr); !ok {
- if xs, ok := fs.Post.(*ast.ExprStmt); ok {
- if _, ok := xs.X.(*ast.BadExpr); ok {
- buf.WriteByte(';')
- }
- }
- }
- }
-
- // Insert "{}" at insertPos.
- buf.WriteByte('{')
- buf.WriteByte('}')
- buf.Write(src[offset:])
- return buf.Bytes()
-}
-
-// fixEmptySwitch moves empty switch/select statements' closing curly
-// brace down one line. This allows us to properly detect incomplete
-// "case" and "default" keywords as inside the switch statement. For
-// example:
-//
-// switch {
-// def<>
-// }
-//
-// gets parsed like:
-//
-// switch {
-// }
-//
-// Later we manually pull out the "def" token, but we need to detect
-// that our "<>" position is inside the switch block. To do that we
-// move the curly brace so it looks like:
-//
-// switch {
-//
-// }
-//
-func fixEmptySwitch(body *ast.BlockStmt, tok *token.File, src []byte) {
- // We only care about empty switch statements.
- if len(body.List) > 0 || !body.Rbrace.IsValid() {
- return
- }
-
- // If the right brace is actually in the source code at the
- // specified position, don't mess with it.
- braceOffset, err := source.Offset(tok, body.Rbrace)
- if err != nil {
- return
- }
- if braceOffset < len(src) && src[braceOffset] == '}' {
- return
- }
-
- braceLine := tok.Line(body.Rbrace)
- if braceLine >= tok.LineCount() {
- // If we are the last line in the file, no need to fix anything.
- return
- }
-
- // Move the right brace down one line.
- body.Rbrace = tok.LineStart(braceLine + 1)
-}
-
-// fixDanglingSelector inserts real "_" selector expressions in place
-// of phantom "_" selectors. For example:
-//
-// func _() {
-// x.<>
-// }
-// var x struct { i int }
-//
-// To fix completion at "<>", we insert a real "_" after the "." so the
-// following declaration of "x" can be parsed and type checked
-// normally.
-func fixDanglingSelector(s *ast.SelectorExpr, tok *token.File, src []byte) []byte {
- if !isPhantomUnderscore(s.Sel, tok, src) {
- return nil
- }
-
- if !s.X.End().IsValid() {
- return nil
- }
-
- insertOffset, err := source.Offset(tok, s.X.End())
- if err != nil {
- return nil
- }
- // Insert directly after the selector's ".".
- insertOffset++
- if src[insertOffset-1] != '.' {
- return nil
- }
-
- var buf bytes.Buffer
- buf.Grow(len(src) + 1)
- buf.Write(src[:insertOffset])
- buf.WriteByte('_')
- buf.Write(src[insertOffset:])
- return buf.Bytes()
-}
-
-// fixPhantomSelector tries to fix selector expressions with phantom
-// "_" selectors. In particular, we check if the selector is a
-// keyword, and if so we swap in an *ast.Ident with the keyword text. For example:
-//
-// foo.var
-//
-// yields a "_" selector instead of "var" since "var" is a keyword.
-//
-// TODO(rfindley): should this constitute an ast 'fix'?
-func fixPhantomSelector(sel *ast.SelectorExpr, tok *token.File, src []byte) {
- if !isPhantomUnderscore(sel.Sel, tok, src) {
- return
- }
-
- // Only consider selectors directly abutting the selector ".". This
- // avoids false positives in cases like:
- //
- // foo. // don't think "var" is our selector
- // var bar = 123
- //
- if sel.Sel.Pos() != sel.X.End()+1 {
- return
- }
-
- maybeKeyword := readKeyword(sel.Sel.Pos(), tok, src)
- if maybeKeyword == "" {
- return
- }
-
- replaceNode(sel, sel.Sel, &ast.Ident{
- Name: maybeKeyword,
- NamePos: sel.Sel.Pos(),
- })
-}
-
-// isPhantomUnderscore reports whether the given ident is a phantom
-// underscore. The parser sometimes inserts phantom underscores when
-// it encounters otherwise unparseable situations.
-func isPhantomUnderscore(id *ast.Ident, tok *token.File, src []byte) bool {
- if id == nil || id.Name != "_" {
- return false
- }
-
- // Phantom underscore means the underscore is not actually in the
- // program text.
- offset, err := source.Offset(tok, id.Pos())
- if err != nil {
- return false
- }
- return len(src) <= offset || src[offset] != '_'
-}
-
-// fixInitStmt fixes cases where the parser misinterprets an
-// if/for/switch "init" statement as the "cond" conditional. In cases
-// like "if i := 0" the user hasn't typed the semicolon yet so the
-// parser is looking for the conditional expression. However, "i := 0"
-// are not valid expressions, so we get a BadExpr.
-//
-// fixInitStmt returns valid AST for the original source.
-func fixInitStmt(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) {
- if !bad.Pos().IsValid() || !bad.End().IsValid() {
- return
- }
-
- // Try to extract a statement from the BadExpr.
- start, err := source.Offset(tok, bad.Pos())
- if err != nil {
- return
- }
- end, err := source.Offset(tok, bad.End()-1)
- if err != nil {
- return
- }
- stmtBytes := src[start : end+1]
- stmt, err := parseStmt(bad.Pos(), stmtBytes)
- if err != nil {
- return
- }
-
- // If the parent statement doesn't already have an "init" statement,
- // move the extracted statement into the "init" field and insert a
- // dummy expression into the required "cond" field.
- switch p := parent.(type) {
- case *ast.IfStmt:
- if p.Init != nil {
- return
- }
- p.Init = stmt
- p.Cond = &ast.Ident{
- Name: "_",
- NamePos: stmt.End(),
- }
- case *ast.ForStmt:
- if p.Init != nil {
- return
- }
- p.Init = stmt
- p.Cond = &ast.Ident{
- Name: "_",
- NamePos: stmt.End(),
- }
- case *ast.SwitchStmt:
- if p.Init != nil {
- return
- }
- p.Init = stmt
- p.Tag = nil
- }
-}
-
-// readKeyword reads the keyword starting at pos, if any.
-func readKeyword(pos token.Pos, tok *token.File, src []byte) string {
- var kwBytes []byte
- offset, err := source.Offset(tok, pos)
- if err != nil {
- return ""
- }
- for i := offset; i < len(src); i++ {
- // Use a simplified identifier check since keywords are always lowercase ASCII.
- if src[i] < 'a' || src[i] > 'z' {
- break
- }
- kwBytes = append(kwBytes, src[i])
-
- // Stop search at arbitrarily chosen too-long-for-a-keyword length.
- if len(kwBytes) > 15 {
- return ""
- }
- }
-
- if kw := string(kwBytes); token.Lookup(kw).IsKeyword() {
- return kw
- }
-
- return ""
-}
-
-// fixArrayType tries to parse an *ast.BadExpr into an *ast.ArrayType.
-// go/parser often turns lone array types like "[]int" into BadExprs
-// if it isn't expecting a type.
-func fixArrayType(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) bool {
- // Our expected input is a bad expression that looks like "[]someExpr".
-
- from := bad.Pos()
- to := bad.End()
-
- if !from.IsValid() || !to.IsValid() {
- return false
- }
-
- exprBytes := make([]byte, 0, int(to-from)+3)
- // Avoid doing tok.Offset(to) since that panics if badExpr ends at EOF.
- // It also panics if the position is not in the range of the file, and
- // badExprs may not necessarily have good positions, so check first.
- fromOffset, err := source.Offset(tok, from)
- if err != nil {
- return false
- }
- toOffset, err := source.Offset(tok, to-1)
- if err != nil {
- return false
- }
- exprBytes = append(exprBytes, src[fromOffset:toOffset+1]...)
- exprBytes = bytes.TrimSpace(exprBytes)
-
- // If our expression ends in "]" (e.g. "[]"), add a phantom selector
- // so we can complete directly after the "[]".
- if len(exprBytes) > 0 && exprBytes[len(exprBytes)-1] == ']' {
- exprBytes = append(exprBytes, '_')
- }
-
- // Add "{}" to turn our ArrayType into a CompositeLit. This is to
- // handle the case of "[...]int" where we must make it a composite
- // literal to be parseable.
- exprBytes = append(exprBytes, '{', '}')
-
- expr, err := parseExpr(from, exprBytes)
- if err != nil {
- return false
- }
-
- cl, _ := expr.(*ast.CompositeLit)
- if cl == nil {
- return false
- }
-
- at, _ := cl.Type.(*ast.ArrayType)
- if at == nil {
- return false
- }
-
- return replaceNode(parent, bad, at)
-}
-
-// precedingToken scans src to find the token preceding pos.
-func precedingToken(pos token.Pos, tok *token.File, src []byte) token.Token {
- s := &scanner.Scanner{}
- s.Init(tok, src, nil, 0)
-
- var lastTok token.Token
- for {
- p, t, _ := s.Scan()
- if t == token.EOF || p >= pos {
- break
- }
-
- lastTok = t
- }
- return lastTok
-}
-
-// fixDeferOrGoStmt tries to parse an *ast.BadStmt into a defer or a go statement.
-//
-// go/parser packages a statement of the form "defer x." as an *ast.BadStmt because
-// it does not include a call expression. This means that go/types skips type-checking
-// this statement entirely, and we can't use the type information when completing.
-// Here, we try to generate a fake *ast.DeferStmt or *ast.GoStmt to put into the AST,
-// instead of the *ast.BadStmt.
-func fixDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src []byte) bool {
- // Check if we have a bad statement containing either a "go" or "defer".
- s := &scanner.Scanner{}
- s.Init(tok, src, nil, 0)
-
- var (
- pos token.Pos
- tkn token.Token
- )
- for {
- if tkn == token.EOF {
- return false
- }
- if pos >= bad.From {
- break
- }
- pos, tkn, _ = s.Scan()
- }
-
- var stmt ast.Stmt
- switch tkn {
- case token.DEFER:
- stmt = &ast.DeferStmt{
- Defer: pos,
- }
- case token.GO:
- stmt = &ast.GoStmt{
- Go: pos,
- }
- default:
- return false
- }
-
- var (
- from, to, last token.Pos
- lastToken token.Token
- braceDepth int
- phantomSelectors []token.Pos
- )
-FindTo:
- for {
- to, tkn, _ = s.Scan()
-
- if from == token.NoPos {
- from = to
- }
-
- switch tkn {
- case token.EOF:
- break FindTo
- case token.SEMICOLON:
- // If we aren't in nested braces, end of statement means
- // end of expression.
- if braceDepth == 0 {
- break FindTo
- }
- case token.LBRACE:
- braceDepth++
- }
-
- // This handles the common dangling selector case. For example in
- //
- // defer fmt.
- // y := 1
- //
- // we notice the dangling period and end our expression.
- //
- // If the previous token was a "." and we are looking at a "}",
- // the period is likely a dangling selector and needs a phantom
- // "_". Likewise if the current token is on a different line than
- // the period, the period is likely a dangling selector.
- if lastToken == token.PERIOD && (tkn == token.RBRACE || tok.Line(to) > tok.Line(last)) {
- // Insert phantom "_" selector after the dangling ".".
- phantomSelectors = append(phantomSelectors, last+1)
- // If we aren't in a block then end the expression after the ".".
- if braceDepth == 0 {
- to = last + 1
- break
- }
- }
-
- lastToken = tkn
- last = to
-
- switch tkn {
- case token.RBRACE:
- braceDepth--
- if braceDepth <= 0 {
- if braceDepth == 0 {
- // +1 to include the "}" itself.
- to += 1
- }
- break FindTo
- }
- }
- }
-
- fromOffset, err := source.Offset(tok, from)
- if err != nil {
- return false
- }
- if !from.IsValid() || fromOffset >= len(src) {
- return false
- }
-
- toOffset, err := source.Offset(tok, to)
- if err != nil {
- return false
- }
- if !to.IsValid() || toOffset >= len(src) {
- return false
- }
-
- // Insert any phantom selectors needed to prevent dangling "." from messing
- // up the AST.
- exprBytes := make([]byte, 0, int(to-from)+len(phantomSelectors))
- for i, b := range src[fromOffset:toOffset] {
- if len(phantomSelectors) > 0 && from+token.Pos(i) == phantomSelectors[0] {
- exprBytes = append(exprBytes, '_')
- phantomSelectors = phantomSelectors[1:]
- }
- exprBytes = append(exprBytes, b)
- }
-
- if len(phantomSelectors) > 0 {
- exprBytes = append(exprBytes, '_')
- }
-
- expr, err := parseExpr(from, exprBytes)
- if err != nil {
- return false
- }
-
- // Package the expression into a fake *ast.CallExpr and re-insert
- // into the function.
- call := &ast.CallExpr{
- Fun: expr,
- Lparen: to,
- Rparen: to,
- }
-
- switch stmt := stmt.(type) {
- case *ast.DeferStmt:
- stmt.Call = call
- case *ast.GoStmt:
- stmt.Call = call
- }
-
- return replaceNode(parent, bad, stmt)
-}
-
-// parseStmt parses the statement in src and updates its position to
-// start at pos.
-func parseStmt(pos token.Pos, src []byte) (ast.Stmt, error) {
- // Wrap our expression to make it a valid Go file we can pass to ParseFile.
- fileSrc := bytes.Join([][]byte{
- []byte("package fake;func _(){"),
- src,
- []byte("}"),
- }, nil)
-
- // Use ParseFile instead of ParseExpr because ParseFile has
- // best-effort behavior, whereas ParseExpr fails hard on any error.
- fakeFile, err := parser.ParseFile(token.NewFileSet(), "", fileSrc, 0)
- if fakeFile == nil {
- return nil, errors.Errorf("error reading fake file source: %v", err)
- }
-
- // Extract our expression node from inside the fake file.
- if len(fakeFile.Decls) == 0 {
- return nil, errors.Errorf("error parsing fake file: %v", err)
- }
-
- fakeDecl, _ := fakeFile.Decls[0].(*ast.FuncDecl)
- if fakeDecl == nil || len(fakeDecl.Body.List) == 0 {
- return nil, errors.Errorf("no statement in %s: %v", src, err)
- }
-
- stmt := fakeDecl.Body.List[0]
-
- // parser.ParseFile returns undefined positions.
- // Adjust them for the current file.
- offsetPositions(stmt, pos-1-(stmt.Pos()-1))
-
- return stmt, nil
-}
-
-// parseExpr parses the expression in src and updates its position to
-// start at pos.
-func parseExpr(pos token.Pos, src []byte) (ast.Expr, error) {
- stmt, err := parseStmt(pos, src)
- if err != nil {
- return nil, err
- }
-
- exprStmt, ok := stmt.(*ast.ExprStmt)
- if !ok {
- return nil, errors.Errorf("no expr in %s: %v", src, err)
- }
-
- return exprStmt.X, nil
-}
-
-var tokenPosType = reflect.TypeOf(token.NoPos)
-
-// offsetPositions applies an offset to the positions in an ast.Node.
-func offsetPositions(n ast.Node, offset token.Pos) {
- ast.Inspect(n, func(n ast.Node) bool {
- if n == nil {
- return false
- }
-
- v := reflect.ValueOf(n).Elem()
-
- switch v.Kind() {
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- f := v.Field(i)
- if f.Type() != tokenPosType {
- continue
- }
-
- if !f.CanSet() {
- continue
- }
-
- // Don't offset invalid positions: they should stay invalid.
- if !token.Pos(f.Int()).IsValid() {
- continue
- }
-
- f.SetInt(f.Int() + int64(offset))
- }
- }
-
- return true
- })
-}
-
-// replaceNode updates parent's child oldChild to be newChild. It
-// returns whether it replaced successfully.
-func replaceNode(parent, oldChild, newChild ast.Node) bool {
- if parent == nil || oldChild == nil || newChild == nil {
- return false
- }
-
- parentVal := reflect.ValueOf(parent).Elem()
- if parentVal.Kind() != reflect.Struct {
- return false
- }
-
- newChildVal := reflect.ValueOf(newChild)
-
- tryReplace := func(v reflect.Value) bool {
- if !v.CanSet() || !v.CanInterface() {
- return false
- }
-
- // If the existing value is oldChild, we found our child. Make
- // sure our newChild is assignable and then make the swap.
- if v.Interface() == oldChild && newChildVal.Type().AssignableTo(v.Type()) {
- v.Set(newChildVal)
- return true
- }
-
- return false
- }
-
- // Loop over parent's struct fields.
- for i := 0; i < parentVal.NumField(); i++ {
- f := parentVal.Field(i)
-
- switch f.Kind() {
- // Check interface and pointer fields.
- case reflect.Interface, reflect.Ptr:
- if tryReplace(f) {
- return true
- }
-
- // Search through any slice fields.
- case reflect.Slice:
- for i := 0; i < f.Len(); i++ {
- if tryReplace(f.Index(i)) {
- return true
- }
- }
- }
- }
-
- return false
-}
diff --git a/internal/lsp/cache/parse_test.go b/internal/lsp/cache/parse_test.go
deleted file mode 100644
index cb620f274..000000000
--- a/internal/lsp/cache/parse_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "bytes"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "go/types"
- "reflect"
- "sort"
- "testing"
-
- "golang.org/x/tools/go/packages"
-)
-
-func TestArrayLength(t *testing.T) {
- tests := []struct {
- expr string
- length int
- }{
- {`[...]int{0,1,2,3,4,5,6,7,8,9}`, 10},
- {`[...]int{9:0}`, 10},
- {`[...]int{19-10:0}`, 10},
- {`[...]int{19-10:0, 17-10:0, 18-10:0}`, 10},
- }
-
- for _, tt := range tests {
- expr, err := parser.ParseExpr(tt.expr)
- if err != nil {
- t.Fatal(err)
- }
- l, ok := arrayLength(expr.(*ast.CompositeLit))
- if !ok {
- t.Errorf("arrayLength did not recognize expression %#v", expr)
- }
- if l != tt.length {
- t.Errorf("arrayLength(%#v) = %v, want %v", expr, l, tt.length)
- }
- }
-}
-
-func TestTrim(t *testing.T) {
- tests := []struct {
- name string
- file string
- kept []string
- }{
- {
- name: "delete_unused",
- file: `
-type x struct{}
-func y()
-var z int
-`,
- kept: []string{},
- },
- {
- // From the common type in testing.
- name: "unexported_embedded",
- file: `
-type x struct {}
-type Exported struct { x }
-`,
- kept: []string{"Exported", "x"},
- },
- {
- // From the d type in unicode.
- name: "exported_field_unexported_type",
- file: `
-type x struct {}
-type Exported struct {
- X x
-}
-`,
- kept: []string{"Exported", "x"},
- },
- {
- // From errNotExist in io/fs.
- name: "exported_var_function_call",
- file: `
-func x() int { return 0 }
-var Exported = x()
-`,
- kept: []string{"Exported", "x"},
- },
- {
- // From DefaultServeMux in net/http.
- name: "exported_pointer_to_unexported_var",
- file: `
-var Exported = &x
-var x int
-`,
- kept: []string{"Exported", "x"},
- },
- {
- // From DefaultWriter in goldmark/renderer/html.
- name: "exported_pointer_to_composite_lit",
- file: `
-var Exported = &x{}
-type x struct{}
-`,
- kept: []string{"Exported", "x"},
- },
- {
- // From SelectDir in reflect.
- name: "leave_constants",
- file: `
-type Enum int
-const (
- _ Enum = iota
- EnumOne
-)
-`,
- kept: []string{"Enum", "EnumOne"},
- },
- {
- name: "constant_conversion",
- file: `
-type x int
-const (
- foo x = 0
-)
-`,
- kept: []string{"x", "foo"},
- },
- {
- name: "unexported_return",
- file: `
-type x int
-func Exported() x {}
-type y int
-type Interface interface {
- Exported() y
-}
-`,
- kept: []string{"Exported", "Interface", "x", "y"},
- },
- {
- name: "drop_composite_literals",
- file: `
-type x int
-type Exported struct {
- foo x
-}
-var Var = Exported{foo:1}
-`,
- kept: []string{"Exported", "Var"},
- },
- {
- name: "drop_function_literals",
- file: `
-type x int
-var Exported = func() { return x(0) }
-`,
- kept: []string{"Exported"},
- },
- {
- name: "missing_receiver_panic",
- file: `
- func() foo() {}
-`,
- kept: []string{},
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- fset := token.NewFileSet()
- file, err := parser.ParseFile(fset, "main.go", "package main\n\n"+tt.file, parser.AllErrors)
- if err != nil {
- t.Fatal(err)
- }
- filter := &unexportedFilter{uses: map[string]bool{}}
- filter.Filter([]*ast.File{file})
- pkg := types.NewPackage("main", "main")
- checker := types.NewChecker(&types.Config{
- DisableUnusedImportCheck: true,
- }, fset, pkg, nil)
- if err := checker.Files([]*ast.File{file}); err != nil {
- t.Error(err)
- }
- names := pkg.Scope().Names()
- sort.Strings(names)
- sort.Strings(tt.kept)
- if !reflect.DeepEqual(names, tt.kept) {
- t.Errorf("package contains names %v, wanted %v", names, tt.kept)
- }
- })
- }
-}
-
-func TestPkg(t *testing.T) {
- t.Skip("for manual debugging")
- fset := token.NewFileSet()
- pkgs, err := packages.Load(&packages.Config{
- Mode: packages.NeedSyntax | packages.NeedFiles,
- Fset: fset,
- }, "io")
- if err != nil {
- t.Fatal(err)
- }
- if len(pkgs[0].Errors) != 0 {
- t.Fatal(pkgs[0].Errors)
- }
- filter := &unexportedFilter{uses: map[string]bool{}}
- filter.Filter(pkgs[0].Syntax)
- for _, file := range pkgs[0].Syntax {
- buf := &bytes.Buffer{}
- format.Node(buf, fset, file)
- t.Log(buf.String())
- }
-}
diff --git a/internal/lsp/cache/pkg.go b/internal/lsp/cache/pkg.go
deleted file mode 100644
index 0c7bf74d3..000000000
--- a/internal/lsp/cache/pkg.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "go/ast"
- "go/scanner"
- "go/types"
-
- "golang.org/x/mod/module"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// pkg contains the type information needed by the source package.
-type pkg struct {
- m *Metadata
- mode source.ParseMode
- goFiles []*source.ParsedGoFile
- compiledGoFiles []*source.ParsedGoFile
- diagnostics []*source.Diagnostic
- imports map[PackagePath]*pkg
- version *module.Version
- parseErrors []scanner.ErrorList
- typeErrors []types.Error
- types *types.Package
- typesInfo *types.Info
- typesSizes types.Sizes
- hasFixedFiles bool // if true, AST was sufficiently mangled that we should hide type errors
-}
-
-// Declare explicit types for files and directories to distinguish between the two.
-type (
- fileURI span.URI
- moduleLoadScope string
- viewLoadScope span.URI
-)
-
-func (p *pkg) ID() string {
- return string(p.m.ID)
-}
-
-func (p *pkg) Name() string {
- return string(p.m.Name)
-}
-
-func (p *pkg) PkgPath() string {
- return string(p.m.PkgPath)
-}
-
-func (p *pkg) ParseMode() source.ParseMode {
- return p.mode
-}
-
-func (p *pkg) CompiledGoFiles() []*source.ParsedGoFile {
- return p.compiledGoFiles
-}
-
-func (p *pkg) File(uri span.URI) (*source.ParsedGoFile, error) {
- for _, cgf := range p.compiledGoFiles {
- if cgf.URI == uri {
- return cgf, nil
- }
- }
- for _, gf := range p.goFiles {
- if gf.URI == uri {
- return gf, nil
- }
- }
- return nil, errors.Errorf("no parsed file for %s in %v", uri, p.m.ID)
-}
-
-func (p *pkg) GetSyntax() []*ast.File {
- var syntax []*ast.File
- for _, pgf := range p.compiledGoFiles {
- syntax = append(syntax, pgf.File)
- }
- return syntax
-}
-
-func (p *pkg) GetTypes() *types.Package {
- return p.types
-}
-
-func (p *pkg) GetTypesInfo() *types.Info {
- return p.typesInfo
-}
-
-func (p *pkg) GetTypesSizes() types.Sizes {
- return p.typesSizes
-}
-
-func (p *pkg) IsIllTyped() bool {
- return p.types == nil || p.typesInfo == nil || p.typesSizes == nil
-}
-
-func (p *pkg) ForTest() string {
- return string(p.m.ForTest)
-}
-
-func (p *pkg) GetImport(pkgPath string) (source.Package, error) {
- if imp := p.imports[PackagePath(pkgPath)]; imp != nil {
- return imp, nil
- }
- // Don't return a nil pointer because that still satisfies the interface.
- return nil, errors.Errorf("no imported package for %s", pkgPath)
-}
-
-func (p *pkg) MissingDependencies() []string {
- // We don't invalidate metadata for import deletions, so check the package
- // imports via the *types.Package. Only use metadata if p.types is nil.
- if p.types == nil {
- var md []string
- for i := range p.m.MissingDeps {
- md = append(md, string(i))
- }
- return md
- }
- var md []string
- for _, pkg := range p.types.Imports() {
- if _, ok := p.m.MissingDeps[PackagePath(pkg.Path())]; ok {
- md = append(md, pkg.Path())
- }
- }
- return md
-}
-
-func (p *pkg) Imports() []source.Package {
- var result []source.Package
- for _, imp := range p.imports {
- result = append(result, imp)
- }
- return result
-}
-
-func (p *pkg) Version() *module.Version {
- return p.version
-}
-
-func (p *pkg) HasListOrParseErrors() bool {
- return len(p.m.Errors) != 0 || len(p.parseErrors) != 0
-}
-
-func (p *pkg) HasTypeErrors() bool {
- return len(p.typeErrors) != 0
-}
diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go
deleted file mode 100644
index e86ed25cb..000000000
--- a/internal/lsp/cache/session.go
+++ /dev/null
@@ -1,741 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "fmt"
- "strconv"
- "sync"
- "sync/atomic"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/progress"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-type Session struct {
- cache *Cache
- id string
-
- optionsMu sync.Mutex
- options *source.Options
-
- viewMu sync.RWMutex
- views []*View
- viewMap map[span.URI]*View // map of URI->best view
-
- overlayMu sync.Mutex
- overlays map[span.URI]*overlay
-
- // gocmdRunner guards go command calls from concurrency errors.
- gocmdRunner *gocommand.Runner
-
- progress *progress.Tracker
-}
-
-type overlay struct {
- session *Session
- uri span.URI
- text []byte
- hash string
- version int32
- kind source.FileKind
-
- // saved is true if a file matches the state on disk,
- // and therefore does not need to be part of the overlay sent to go/packages.
- saved bool
-}
-
-func (o *overlay) Read() ([]byte, error) {
- return o.text, nil
-}
-
-func (o *overlay) FileIdentity() source.FileIdentity {
- return source.FileIdentity{
- URI: o.uri,
- Hash: o.hash,
- }
-}
-
-func (o *overlay) VersionedFileIdentity() source.VersionedFileIdentity {
- return source.VersionedFileIdentity{
- URI: o.uri,
- SessionID: o.session.id,
- Version: o.version,
- }
-}
-
-func (o *overlay) Kind() source.FileKind {
- return o.kind
-}
-
-func (o *overlay) URI() span.URI {
- return o.uri
-}
-
-func (o *overlay) Version() int32 {
- return o.version
-}
-
-func (o *overlay) Session() string {
- return o.session.id
-}
-
-func (o *overlay) Saved() bool {
- return o.saved
-}
-
-// closedFile implements LSPFile for a file that the editor hasn't told us about.
-type closedFile struct {
- source.FileHandle
-}
-
-func (c *closedFile) VersionedFileIdentity() source.VersionedFileIdentity {
- return source.VersionedFileIdentity{
- URI: c.FileHandle.URI(),
- SessionID: "",
- Version: 0,
- }
-}
-
-func (c *closedFile) Saved() bool {
- return true
-}
-
-func (c *closedFile) Session() string {
- return ""
-}
-
-func (c *closedFile) Version() int32 {
- return 0
-}
-
-func (s *Session) ID() string { return s.id }
-func (s *Session) String() string { return s.id }
-
-func (s *Session) Options() *source.Options {
- s.optionsMu.Lock()
- defer s.optionsMu.Unlock()
- return s.options
-}
-
-func (s *Session) SetOptions(options *source.Options) {
- s.optionsMu.Lock()
- defer s.optionsMu.Unlock()
- s.options = options
-}
-
-func (s *Session) SetProgressTracker(tracker *progress.Tracker) {
- // The progress tracker should be set before any view is initialized.
- s.progress = tracker
-}
-
-func (s *Session) Shutdown(ctx context.Context) {
- var views []*View
- s.viewMu.Lock()
- views = append(views, s.views...)
- s.views = nil
- s.viewMap = nil
- s.viewMu.Unlock()
- for _, view := range views {
- view.shutdown(ctx)
- }
- event.Log(ctx, "Shutdown session", KeyShutdownSession.Of(s))
-}
-
-func (s *Session) Cache() interface{} {
- return s.cache
-}
-
-func (s *Session) NewView(ctx context.Context, name string, folder span.URI, options *source.Options) (source.View, source.Snapshot, func(), error) {
- s.viewMu.Lock()
- defer s.viewMu.Unlock()
- for _, view := range s.views {
- if span.CompareURI(view.folder, folder) == 0 {
- return nil, nil, nil, source.ErrViewExists
- }
- }
- view, snapshot, release, err := s.createView(ctx, name, folder, options, 0)
- if err != nil {
- return nil, nil, func() {}, err
- }
- s.views = append(s.views, view)
- // we always need to drop the view map
- s.viewMap = make(map[span.URI]*View)
- return view, snapshot, release, nil
-}
-
-func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, snapshotID uint64) (*View, *snapshot, func(), error) {
- index := atomic.AddInt64(&viewIndex, 1)
-
- if s.cache.options != nil {
- s.cache.options(options)
- }
-
- // Set the module-specific information.
- ws, err := s.getWorkspaceInformation(ctx, folder, options)
- if err != nil {
- return nil, nil, func() {}, err
- }
- root := folder
- if options.ExpandWorkspaceToModule {
- root, err = findWorkspaceRoot(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), options.ExperimentalWorkspaceModule)
- if err != nil {
- return nil, nil, func() {}, err
- }
- }
-
- // Build the gopls workspace, collecting active modules in the view.
- workspace, err := newWorkspace(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), ws.userGo111Module == off, options.ExperimentalWorkspaceModule)
- if err != nil {
- return nil, nil, func() {}, err
- }
-
- // We want a true background context and not a detached context here
- // the spans need to be unrelated and no tag values should pollute it.
- baseCtx := event.Detach(xcontext.Detach(ctx))
- backgroundCtx, cancel := context.WithCancel(baseCtx)
-
- v := &View{
- session: s,
- initialWorkspaceLoad: make(chan struct{}),
- initializationSema: make(chan struct{}, 1),
- id: strconv.FormatInt(index, 10),
- options: options,
- baseCtx: baseCtx,
- name: name,
- folder: folder,
- moduleUpgrades: map[string]string{},
- filesByURI: map[span.URI]*fileBase{},
- filesByBase: map[string][]*fileBase{},
- rootURI: root,
- workspaceInformation: *ws,
- }
- v.importsState = &importsState{
- ctx: backgroundCtx,
- processEnv: &imports.ProcessEnv{
- GocmdRunner: s.gocmdRunner,
- },
- }
- v.snapshot = &snapshot{
- id: snapshotID,
- view: v,
- backgroundCtx: backgroundCtx,
- cancel: cancel,
- initializeOnce: &sync.Once{},
- generation: s.cache.store.Generation(generationName(v, 0)),
- packages: make(map[packageKey]*packageHandle),
- ids: make(map[span.URI][]PackageID),
- metadata: make(map[PackageID]*KnownMetadata),
- files: make(map[span.URI]source.VersionedFileHandle),
- goFiles: make(map[parseKey]*parseGoHandle),
- symbols: make(map[span.URI]*symbolHandle),
- importedBy: make(map[PackageID][]PackageID),
- actions: make(map[actionKey]*actionHandle),
- workspacePackages: make(map[PackageID]PackagePath),
- unloadableFiles: make(map[span.URI]struct{}),
- parseModHandles: make(map[span.URI]*parseModHandle),
- parseWorkHandles: make(map[span.URI]*parseWorkHandle),
- modTidyHandles: make(map[span.URI]*modTidyHandle),
- modWhyHandles: make(map[span.URI]*modWhyHandle),
- workspace: workspace,
- }
-
- // Initialize the view without blocking.
- initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx))
- v.initCancelFirstAttempt = initCancel
- snapshot := v.snapshot
- release := snapshot.generation.Acquire()
- go func() {
- defer release()
- snapshot.initialize(initCtx, true)
- }()
- return v, snapshot, snapshot.generation.Acquire(), nil
-}
-
-// View returns the view by name.
-func (s *Session) View(name string) source.View {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
- for _, view := range s.views {
- if view.Name() == name {
- return view
- }
- }
- return nil
-}
-
-// ViewOf returns a view corresponding to the given URI.
-// If the file is not already associated with a view, pick one using some heuristics.
-func (s *Session) ViewOf(uri span.URI) (source.View, error) {
- return s.viewOf(uri)
-}
-
-func (s *Session) viewOf(uri span.URI) (*View, error) {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
- // Check if we already know this file.
- if v, found := s.viewMap[uri]; found {
- return v, nil
- }
- // Pick the best view for this file and memoize the result.
- if len(s.views) == 0 {
- return nil, fmt.Errorf("no views in session")
- }
- s.viewMap[uri] = bestViewForURI(uri, s.views)
- return s.viewMap[uri], nil
-}
-
-func (s *Session) viewsOf(uri span.URI) []*View {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
-
- var views []*View
- for _, view := range s.views {
- if source.InDir(view.folder.Filename(), uri.Filename()) {
- views = append(views, view)
- }
- }
- return views
-}
-
-func (s *Session) Views() []source.View {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
- result := make([]source.View, len(s.views))
- for i, v := range s.views {
- result[i] = v
- }
- return result
-}
-
-// bestViewForURI returns the most closely matching view for the given URI
-// out of the given set of views.
-func bestViewForURI(uri span.URI, views []*View) *View {
- // we need to find the best view for this file
- var longest *View
- for _, view := range views {
- if longest != nil && len(longest.Folder()) > len(view.Folder()) {
- continue
- }
- if view.contains(uri) {
- longest = view
- }
- }
- if longest != nil {
- return longest
- }
- // Try our best to return a view that knows the file.
- for _, view := range views {
- if view.knownFile(uri) {
- return view
- }
- }
- // TODO: are there any more heuristics we can use?
- return views[0]
-}
-
-func (s *Session) removeView(ctx context.Context, view *View) error {
- s.viewMu.Lock()
- defer s.viewMu.Unlock()
- i, err := s.dropView(ctx, view)
- if err != nil {
- return err
- }
- // delete this view... we don't care about order but we do want to make
- // sure we can garbage collect the view
- s.views[i] = s.views[len(s.views)-1]
- s.views[len(s.views)-1] = nil
- s.views = s.views[:len(s.views)-1]
- return nil
-}
-
-func (s *Session) updateView(ctx context.Context, view *View, options *source.Options) (*View, error) {
- s.viewMu.Lock()
- defer s.viewMu.Unlock()
-
- // Preserve the snapshot ID if we are recreating the view.
- view.snapshotMu.Lock()
- if view.snapshot == nil {
- view.snapshotMu.Unlock()
- panic("updateView called after View was already shut down")
- }
- snapshotID := view.snapshot.id
- view.snapshotMu.Unlock()
-
- i, err := s.dropView(ctx, view)
- if err != nil {
- return nil, err
- }
-
- v, _, release, err := s.createView(ctx, view.name, view.folder, options, snapshotID)
- release()
-
- if err != nil {
- // we have dropped the old view, but could not create the new one
- // this should not happen and is very bad, but we still need to clean
- // up the view array if it happens
- s.views[i] = s.views[len(s.views)-1]
- s.views[len(s.views)-1] = nil
- s.views = s.views[:len(s.views)-1]
- return nil, err
- }
- // substitute the new view into the array where the old view was
- s.views[i] = v
- return v, nil
-}
-
-func (s *Session) dropView(ctx context.Context, v *View) (int, error) {
- // we always need to drop the view map
- s.viewMap = make(map[span.URI]*View)
- for i := range s.views {
- if v == s.views[i] {
- // we found the view, drop it and return the index it was found at
- s.views[i] = nil
- v.shutdown(ctx)
- return i, nil
- }
- }
- return -1, errors.Errorf("view %s for %v not found", v.Name(), v.Folder())
-}
-
-func (s *Session) ModifyFiles(ctx context.Context, changes []source.FileModification) error {
- _, releases, err := s.DidModifyFiles(ctx, changes)
- for _, release := range releases {
- release()
- }
- return err
-}
-
-type fileChange struct {
- content []byte
- exists bool
- fileHandle source.VersionedFileHandle
-
- // isUnchanged indicates whether the file action is one that does not
- // change the actual contents of the file. Opens and closes should not
- // be treated like other changes, since the file content doesn't change.
- isUnchanged bool
-}
-
-func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, []func(), error) {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
- views := make(map[*View]map[span.URI]*fileChange)
- affectedViews := map[span.URI][]*View{}
-
- overlays, err := s.updateOverlays(ctx, changes)
- if err != nil {
- return nil, nil, err
- }
- var forceReloadMetadata bool
- for _, c := range changes {
- if c.Action == source.InvalidateMetadata {
- forceReloadMetadata = true
- }
-
- // Build the list of affected views.
- var changedViews []*View
- for _, view := range s.views {
- // Don't propagate changes that are outside of the view's scope
- // or knowledge.
- if !view.relevantChange(c) {
- continue
- }
- changedViews = append(changedViews, view)
- }
- // If the change is not relevant to any view, but the change is
- // happening in the editor, assign it the most closely matching view.
- if len(changedViews) == 0 {
- if c.OnDisk {
- continue
- }
- bestView, err := s.viewOf(c.URI)
- if err != nil {
- return nil, nil, err
- }
- changedViews = append(changedViews, bestView)
- }
- affectedViews[c.URI] = changedViews
-
- isUnchanged := c.Action == source.Open || c.Action == source.Close
-
- // Apply the changes to all affected views.
- for _, view := range changedViews {
- // Make sure that the file is added to the view.
- _ = view.getFile(c.URI)
- if _, ok := views[view]; !ok {
- views[view] = make(map[span.URI]*fileChange)
- }
- if fh, ok := overlays[c.URI]; ok {
- views[view][c.URI] = &fileChange{
- content: fh.text,
- exists: true,
- fileHandle: fh,
- isUnchanged: isUnchanged,
- }
- } else {
- fsFile, err := s.cache.getFile(ctx, c.URI)
- if err != nil {
- return nil, nil, err
- }
- content, err := fsFile.Read()
- fh := &closedFile{fsFile}
- views[view][c.URI] = &fileChange{
- content: content,
- exists: err == nil,
- fileHandle: fh,
- isUnchanged: isUnchanged,
- }
- }
- }
- }
-
- var releases []func()
- viewToSnapshot := map[*View]*snapshot{}
- for view, changed := range views {
- snapshot, release := view.invalidateContent(ctx, changed, forceReloadMetadata)
- releases = append(releases, release)
- viewToSnapshot[view] = snapshot
- }
-
- // We only want to diagnose each changed file once, in the view to which
- // it "most" belongs. We do this by picking the best view for each URI,
- // and then aggregating the set of snapshots and their URIs (to avoid
- // diagnosing the same snapshot multiple times).
- snapshotURIs := map[source.Snapshot][]span.URI{}
- for _, mod := range changes {
- viewSlice, ok := affectedViews[mod.URI]
- if !ok || len(viewSlice) == 0 {
- continue
- }
- view := bestViewForURI(mod.URI, viewSlice)
- snapshot, ok := viewToSnapshot[view]
- if !ok {
- panic(fmt.Sprintf("no snapshot for view %s", view.Folder()))
- }
- snapshotURIs[snapshot] = append(snapshotURIs[snapshot], mod.URI)
- }
- return snapshotURIs, releases, nil
-}
-
-func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes []source.FileModification) []source.FileModification {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
- var snapshots []*snapshot
- for _, v := range s.views {
- snapshot, release := v.getSnapshot()
- defer release()
- snapshots = append(snapshots, snapshot)
- }
- knownDirs := knownDirectories(ctx, snapshots)
- var result []source.FileModification
- for _, c := range changes {
- if _, ok := knownDirs[c.URI]; !ok {
- result = append(result, c)
- continue
- }
- affectedFiles := knownFilesInDir(ctx, snapshots, c.URI)
- var fileChanges []source.FileModification
- for uri := range affectedFiles {
- fileChanges = append(fileChanges, source.FileModification{
- URI: uri,
- Action: c.Action,
- LanguageID: "",
- OnDisk: c.OnDisk,
- // changes to directories cannot include text or versions
- })
- }
- result = append(result, fileChanges...)
- }
- return result
-}
-
-// knownDirectories returns all of the directories known to the given
-// snapshots, including workspace directories and their subdirectories.
-func knownDirectories(ctx context.Context, snapshots []*snapshot) map[span.URI]struct{} {
- result := map[span.URI]struct{}{}
- for _, snapshot := range snapshots {
- dirs := snapshot.workspace.dirs(ctx, snapshot)
- for _, dir := range dirs {
- result[dir] = struct{}{}
- }
- for _, dir := range snapshot.getKnownSubdirs(dirs) {
- result[dir] = struct{}{}
- }
- }
- return result
-}
-
-// knownFilesInDir returns the files known to the snapshots in the session.
-// It does not respect symlinks.
-func knownFilesInDir(ctx context.Context, snapshots []*snapshot, dir span.URI) map[span.URI]struct{} {
- files := map[span.URI]struct{}{}
-
- for _, snapshot := range snapshots {
- for _, uri := range snapshot.knownFilesInDir(ctx, dir) {
- files[uri] = struct{}{}
- }
- }
- return files
-}
-
-func (s *Session) updateOverlays(ctx context.Context, changes []source.FileModification) (map[span.URI]*overlay, error) {
- s.overlayMu.Lock()
- defer s.overlayMu.Unlock()
-
- for _, c := range changes {
- // Don't update overlays for metadata invalidations.
- if c.Action == source.InvalidateMetadata {
- continue
- }
-
- o, ok := s.overlays[c.URI]
-
- // If the file is not opened in an overlay and the change is on disk,
- // there's no need to update an overlay. If there is an overlay, we
- // may need to update the overlay's saved value.
- if !ok && c.OnDisk {
- continue
- }
-
- // Determine the file kind on open, otherwise, assume it has been cached.
- var kind source.FileKind
- switch c.Action {
- case source.Open:
- kind = source.FileKindForLang(c.LanguageID)
- default:
- if !ok {
- return nil, errors.Errorf("updateOverlays: modifying unopened overlay %v", c.URI)
- }
- kind = o.kind
- }
-
- // Closing a file just deletes its overlay.
- if c.Action == source.Close {
- delete(s.overlays, c.URI)
- continue
- }
-
- // If the file is on disk, check if its content is the same as in the
- // overlay. Saves and on-disk file changes don't come with the file's
- // content.
- text := c.Text
- if text == nil && (c.Action == source.Save || c.OnDisk) {
- if !ok {
- return nil, fmt.Errorf("no known content for overlay for %s", c.Action)
- }
- text = o.text
- }
- // On-disk changes don't come with versions.
- version := c.Version
- if c.OnDisk || c.Action == source.Save {
- version = o.version
- }
- hash := hashContents(text)
- var sameContentOnDisk bool
- switch c.Action {
- case source.Delete:
- // Do nothing. sameContentOnDisk should be false.
- case source.Save:
- // Make sure the version and content (if present) is the same.
- if false && o.version != version { // Client no longer sends the version
- return nil, errors.Errorf("updateOverlays: saving %s at version %v, currently at %v", c.URI, c.Version, o.version)
- }
- if c.Text != nil && o.hash != hash {
- return nil, errors.Errorf("updateOverlays: overlay %s changed on save", c.URI)
- }
- sameContentOnDisk = true
- default:
- fh, err := s.cache.getFile(ctx, c.URI)
- if err != nil {
- return nil, err
- }
- _, readErr := fh.Read()
- sameContentOnDisk = (readErr == nil && fh.FileIdentity().Hash == hash)
- }
- o = &overlay{
- session: s,
- uri: c.URI,
- version: version,
- text: text,
- kind: kind,
- hash: hash,
- saved: sameContentOnDisk,
- }
-
- // When opening files, ensure that we actually have a well-defined view and file kind.
- if c.Action == source.Open {
- view, err := s.ViewOf(o.uri)
- if err != nil {
- return nil, errors.Errorf("updateOverlays: finding view for %s: %v", o.uri, err)
- }
- if kind := view.FileKind(o); kind == source.UnknownKind {
- return nil, errors.Errorf("updateOverlays: unknown file kind for %s", o.uri)
- }
- }
-
- s.overlays[c.URI] = o
- }
-
- // Get the overlays for each change while the session's overlay map is
- // locked.
- overlays := make(map[span.URI]*overlay)
- for _, c := range changes {
- if o, ok := s.overlays[c.URI]; ok {
- overlays[c.URI] = o
- }
- }
- return overlays, nil
-}
-
-func (s *Session) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
- if overlay := s.readOverlay(uri); overlay != nil {
- return overlay, nil
- }
- // Fall back to the cache-level file system.
- return s.cache.getFile(ctx, uri)
-}
-
-func (s *Session) readOverlay(uri span.URI) *overlay {
- s.overlayMu.Lock()
- defer s.overlayMu.Unlock()
-
- if overlay, ok := s.overlays[uri]; ok {
- return overlay
- }
- return nil
-}
-
-func (s *Session) Overlays() []source.Overlay {
- s.overlayMu.Lock()
- defer s.overlayMu.Unlock()
-
- overlays := make([]source.Overlay, 0, len(s.overlays))
- for _, overlay := range s.overlays {
- overlays = append(overlays, overlay)
- }
- return overlays
-}
-
-func (s *Session) FileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
- s.viewMu.RLock()
- defer s.viewMu.RUnlock()
- patterns := map[string]struct{}{}
- for _, view := range s.views {
- snapshot, release := view.getSnapshot()
- for k, v := range snapshot.fileWatchingGlobPatterns(ctx) {
- patterns[k] = v
- }
- release()
- }
- return patterns
-}
diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go
deleted file mode 100644
index 900f13f28..000000000
--- a/internal/lsp/cache/snapshot.go
+++ /dev/null
@@ -1,2479 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "sync"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/mod/module"
- "golang.org/x/mod/semver"
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/debug/log"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/packagesinternal"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/typesinternal"
- errors "golang.org/x/xerrors"
-)
-
-type snapshot struct {
- memoize.Arg // allow as a memoize.Function arg
-
- id uint64
- view *View
-
- cancel func()
- backgroundCtx context.Context
-
- // the cache generation that contains the data for this snapshot.
- generation *memoize.Generation
-
- // The snapshot's initialization state is controlled by the fields below.
- //
- // initializeOnce guards snapshot initialization. Each snapshot is
- // initialized at most once: reinitialization is triggered on later snapshots
- // by invalidating this field.
- initializeOnce *sync.Once
- // initializedErr holds the last error resulting from initialization. If
- // initialization fails, we only retry when the the workspace modules change,
- // to avoid too many go/packages calls.
- initializedErr *source.CriticalError
-
- // mu guards all of the maps in the snapshot, as well as the builtin URI.
- mu sync.Mutex
-
- // builtin pins the AST and package for builtin.go in memory.
- builtin span.URI
-
- // ids maps file URIs to package IDs.
- // It may be invalidated on calls to go/packages.
- ids map[span.URI][]PackageID
-
- // metadata maps file IDs to their associated metadata.
- // It may invalidated on calls to go/packages.
- metadata map[PackageID]*KnownMetadata
-
- // importedBy maps package IDs to the list of packages that import them.
- importedBy map[PackageID][]PackageID
-
- // files maps file URIs to their corresponding FileHandles.
- // It may invalidated when a file's content changes.
- files map[span.URI]source.VersionedFileHandle
-
- // goFiles maps a parseKey to its parseGoHandle.
- goFiles map[parseKey]*parseGoHandle
-
- // TODO(rfindley): consider merging this with files to reduce burden on clone.
- symbols map[span.URI]*symbolHandle
-
- // packages maps a packageKey to a set of packageHandles to which that file belongs.
- // It may be invalidated when a file's content changes.
- packages map[packageKey]*packageHandle
-
- // actions maps an actionkey to its actionHandle.
- actions map[actionKey]*actionHandle
-
- // workspacePackages contains the workspace's packages, which are loaded
- // when the view is created.
- workspacePackages map[PackageID]PackagePath
-
- // unloadableFiles keeps track of files that we've failed to load.
- unloadableFiles map[span.URI]struct{}
-
- // parseModHandles keeps track of any parseModHandles for the snapshot.
- // The handles need not refer to only the view's go.mod file.
- parseModHandles map[span.URI]*parseModHandle
-
- // parseWorkHandles keeps track of any parseWorkHandles for the snapshot.
- // The handles need not refer to only the view's go.work file.
- parseWorkHandles map[span.URI]*parseWorkHandle
-
- // Preserve go.mod-related handles to avoid garbage-collecting the results
- // of various calls to the go command. The handles need not refer to only
- // the view's go.mod file.
- modTidyHandles map[span.URI]*modTidyHandle
- modWhyHandles map[span.URI]*modWhyHandle
-
- workspace *workspace
- workspaceDirHandle *memoize.Handle
-
- // knownSubdirs is the set of subdirectories in the workspace, used to
- // create glob patterns for file watching.
- knownSubdirs map[span.URI]struct{}
- // unprocessedSubdirChanges are any changes that might affect the set of
- // subdirectories in the workspace. They are not reflected to knownSubdirs
- // during the snapshot cloning step as it can slow down cloning.
- unprocessedSubdirChanges []*fileChange
-}
-
-type packageKey struct {
- mode source.ParseMode
- id PackageID
-}
-
-type actionKey struct {
- pkg packageKey
- analyzer *analysis.Analyzer
-}
-
-func (s *snapshot) ID() uint64 {
- return s.id
-}
-
-func (s *snapshot) View() source.View {
- return s.view
-}
-
-func (s *snapshot) BackgroundContext() context.Context {
- return s.backgroundCtx
-}
-
-func (s *snapshot) FileSet() *token.FileSet {
- return s.view.session.cache.fset
-}
-
-func (s *snapshot) ModFiles() []span.URI {
- var uris []span.URI
- for modURI := range s.workspace.getActiveModFiles() {
- uris = append(uris, modURI)
- }
- return uris
-}
-
-func (s *snapshot) WorkFile() span.URI {
- return s.workspace.workFile
-}
-
-func (s *snapshot) Templates() map[span.URI]source.VersionedFileHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- tmpls := map[span.URI]source.VersionedFileHandle{}
- for k, fh := range s.files {
- if s.view.FileKind(fh) == source.Tmpl {
- tmpls[k] = fh
- }
- }
- return tmpls
-}
-
-func (s *snapshot) ValidBuildConfiguration() bool {
- return validBuildConfiguration(s.view.rootURI, &s.view.workspaceInformation, s.workspace.getActiveModFiles())
-}
-
-// workspaceMode describes the way in which the snapshot's workspace should
-// be loaded.
-func (s *snapshot) workspaceMode() workspaceMode {
- var mode workspaceMode
-
- // If the view has an invalid configuration, don't build the workspace
- // module.
- validBuildConfiguration := s.ValidBuildConfiguration()
- if !validBuildConfiguration {
- return mode
- }
- // If the view is not in a module and contains no modules, but still has a
- // valid workspace configuration, do not create the workspace module.
- // It could be using GOPATH or a different build system entirely.
- if len(s.workspace.getActiveModFiles()) == 0 && validBuildConfiguration {
- return mode
- }
- mode |= moduleMode
- options := s.view.Options()
- // The -modfile flag is available for Go versions >= 1.14.
- if options.TempModfile && s.view.workspaceInformation.goversion >= 14 {
- mode |= tempModfile
- }
- return mode
-}
-
-// config returns the configuration used for the snapshot's interaction with
-// the go/packages API. It uses the given working directory.
-//
-// TODO(rstambler): go/packages requires that we do not provide overlays for
-// multiple modules in on config, so buildOverlay needs to filter overlays by
-// module.
-func (s *snapshot) config(ctx context.Context, inv *gocommand.Invocation) *packages.Config {
- s.view.optionsMu.Lock()
- verboseOutput := s.view.options.VerboseOutput
- s.view.optionsMu.Unlock()
-
- cfg := &packages.Config{
- Context: ctx,
- Dir: inv.WorkingDir,
- Env: inv.Env,
- BuildFlags: inv.BuildFlags,
- Mode: packages.NeedName |
- packages.NeedFiles |
- packages.NeedCompiledGoFiles |
- packages.NeedImports |
- packages.NeedDeps |
- packages.NeedTypesSizes |
- packages.NeedModule,
- Fset: s.FileSet(),
- Overlay: s.buildOverlay(),
- ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) {
- panic("go/packages must not be used to parse files")
- },
- Logf: func(format string, args ...interface{}) {
- if verboseOutput {
- event.Log(ctx, fmt.Sprintf(format, args...))
- }
- },
- Tests: true,
- }
- packagesinternal.SetModFile(cfg, inv.ModFile)
- packagesinternal.SetModFlag(cfg, inv.ModFlag)
- // We want to type check cgo code if go/types supports it.
- if typesinternal.SetUsesCgo(&types.Config{}) {
- cfg.Mode |= packages.LoadMode(packagesinternal.TypecheckCgo)
- }
- packagesinternal.SetGoCmdRunner(cfg, s.view.session.gocmdRunner)
- return cfg
-}
-
-func (s *snapshot) RunGoCommandDirect(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) {
- _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv)
- if err != nil {
- return nil, err
- }
- defer cleanup()
-
- return s.view.session.gocmdRunner.Run(ctx, *inv)
-}
-
-func (s *snapshot) RunGoCommandPiped(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error {
- _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv)
- if err != nil {
- return err
- }
- defer cleanup()
- return s.view.session.gocmdRunner.RunPiped(ctx, *inv, stdout, stderr)
-}
-
-func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error) {
- var flags source.InvocationFlags
- if s.workspaceMode()&tempModfile != 0 {
- flags = source.WriteTemporaryModFile
- } else {
- flags = source.Normal
- }
- if allowNetwork {
- flags |= source.AllowNetwork
- }
- tmpURI, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{WorkingDir: wd})
- if err != nil {
- return false, nil, nil, err
- }
- defer cleanup()
- invoke := func(args ...string) (*bytes.Buffer, error) {
- inv.Verb = args[0]
- inv.Args = args[1:]
- return s.view.session.gocmdRunner.Run(ctx, *inv)
- }
- if err := run(invoke); err != nil {
- return false, nil, nil, err
- }
- if flags.Mode() != source.WriteTemporaryModFile {
- return false, nil, nil, nil
- }
- var modBytes, sumBytes []byte
- modBytes, err = ioutil.ReadFile(tmpURI.Filename())
- if err != nil && !os.IsNotExist(err) {
- return false, nil, nil, err
- }
- sumBytes, err = ioutil.ReadFile(strings.TrimSuffix(tmpURI.Filename(), ".mod") + ".sum")
- if err != nil && !os.IsNotExist(err) {
- return false, nil, nil, err
- }
- return true, modBytes, sumBytes, nil
-}
-
-func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) {
- s.view.optionsMu.Lock()
- allowModfileModificationOption := s.view.options.AllowModfileModifications
- allowNetworkOption := s.view.options.AllowImplicitNetworkAccess
- inv.Env = append(append(append(os.Environ(), s.view.options.EnvSlice()...), inv.Env...), "GO111MODULE="+s.view.effectiveGo111Module)
- inv.BuildFlags = append([]string{}, s.view.options.BuildFlags...)
- s.view.optionsMu.Unlock()
- cleanup = func() {} // fallback
-
- // All logic below is for module mode.
- if s.workspaceMode()&moduleMode == 0 {
- return "", inv, cleanup, nil
- }
-
- mode, allowNetwork := flags.Mode(), flags.AllowNetwork()
- if !allowNetwork && !allowNetworkOption {
- inv.Env = append(inv.Env, "GOPROXY=off")
- }
-
- // What follows is rather complicated logic for how to actually run the go
- // command. A word of warning: this is the result of various incremental
- // features added to gopls, and varying behavior of the Go command across Go
- // versions. It can surely be cleaned up significantly, but tread carefully.
- //
- // Roughly speaking we need to resolve four things:
- // - the working directory.
- // - the -mod flag
- // - the -modfile flag
- // - the -workfile flag
- //
- // These are dependent on a number of factors: whether we need to run in a
- // synthetic workspace, whether flags are supported at the current go
- // version, and what we're actually trying to achieve (the
- // source.InvocationFlags).
-
- var modURI span.URI
- // Select the module context to use.
- // If we're type checking, we need to use the workspace context, meaning
- // the main (workspace) module. Otherwise, we should use the module for
- // the passed-in working dir.
- if mode == source.LoadWorkspace {
- switch s.workspace.moduleSource {
- case legacyWorkspace:
- for m := range s.workspace.getActiveModFiles() { // range to access the only element
- modURI = m
- }
- case goWorkWorkspace:
- if s.view.goversion >= 18 {
- break
- }
- // Before go 1.18, the Go command did not natively support go.work files,
- // so we 'fake' them with a workspace module.
- fallthrough
- case fileSystemWorkspace, goplsModWorkspace:
- var tmpDir span.URI
- var err error
- tmpDir, err = s.getWorkspaceDir(ctx)
- if err != nil {
- return "", nil, cleanup, err
- }
- inv.WorkingDir = tmpDir.Filename()
- modURI = span.URIFromPath(filepath.Join(tmpDir.Filename(), "go.mod"))
- }
- } else {
- modURI = s.GoModForFile(span.URIFromPath(inv.WorkingDir))
- }
-
- var modContent []byte
- if modURI != "" {
- modFH, err := s.GetFile(ctx, modURI)
- if err != nil {
- return "", nil, cleanup, err
- }
- modContent, err = modFH.Read()
- if err != nil {
- return "", nil, cleanup, err
- }
- }
-
- vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent)
- if err != nil {
- return "", nil, cleanup, err
- }
-
- mutableModFlag := ""
- // If the mod flag isn't set, populate it based on the mode and workspace.
- if inv.ModFlag == "" {
- if s.view.goversion >= 16 {
- mutableModFlag = "mod"
- }
-
- switch mode {
- case source.LoadWorkspace, source.Normal:
- if vendorEnabled {
- inv.ModFlag = "vendor"
- } else if !allowModfileModificationOption {
- inv.ModFlag = "readonly"
- } else {
- inv.ModFlag = mutableModFlag
- }
- case source.WriteTemporaryModFile:
- inv.ModFlag = mutableModFlag
- // -mod must be readonly when using go.work files - see issue #48941
- inv.Env = append(inv.Env, "GOWORK=off")
- }
- }
-
- // Only use a temp mod file if the modfile can actually be mutated.
- needTempMod := inv.ModFlag == mutableModFlag
- useTempMod := s.workspaceMode()&tempModfile != 0
- if needTempMod && !useTempMod {
- return "", nil, cleanup, source.ErrTmpModfileUnsupported
- }
-
- // We should use -workfile if:
- // 1. We're not actively trying to mutate a modfile.
- // 2. We have an active go.work file.
- // 3. We're using at least Go 1.18.
- useWorkFile := !needTempMod && s.workspace.moduleSource == goWorkWorkspace && s.view.goversion >= 18
- if useWorkFile {
- // TODO(#51215): build a temp workfile and set GOWORK in the environment.
- } else if useTempMod {
- if modURI == "" {
- return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir)
- }
- modFH, err := s.GetFile(ctx, modURI)
- if err != nil {
- return "", nil, cleanup, err
- }
- // Use the go.sum if it happens to be available.
- gosum := s.goSum(ctx, modURI)
- tmpURI, cleanup, err = tempModFile(modFH, gosum)
- if err != nil {
- return "", nil, cleanup, err
- }
- inv.ModFile = tmpURI.Filename()
- }
-
- return tmpURI, inv, cleanup, nil
-}
-
-func (s *snapshot) buildOverlay() map[string][]byte {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- overlays := make(map[string][]byte)
- for uri, fh := range s.files {
- overlay, ok := fh.(*overlay)
- if !ok {
- continue
- }
- if overlay.saved {
- continue
- }
- // TODO(rstambler): Make sure not to send overlays outside of the current view.
- overlays[uri.Filename()] = overlay.text
- }
- return overlays
-}
-
-func hashUnsavedOverlays(files map[span.URI]source.VersionedFileHandle) string {
- var unsaved []string
- for uri, fh := range files {
- if overlay, ok := fh.(*overlay); ok && !overlay.saved {
- unsaved = append(unsaved, uri.Filename())
- }
- }
- sort.Strings(unsaved)
- return hashContents([]byte(strings.Join(unsaved, "")))
-}
-
-func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]source.Package, error) {
- ctx = event.Label(ctx, tag.URI.Of(uri))
-
- phs, err := s.packageHandlesForFile(ctx, uri, mode, includeTestVariants)
- if err != nil {
- return nil, err
- }
- var pkgs []source.Package
- for _, ph := range phs {
- pkg, err := ph.check(ctx, s)
- if err != nil {
- return nil, err
- }
- pkgs = append(pkgs, pkg)
- }
- return pkgs, nil
-}
-
-func (s *snapshot) PackageForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, pkgPolicy source.PackageFilter) (source.Package, error) {
- ctx = event.Label(ctx, tag.URI.Of(uri))
-
- phs, err := s.packageHandlesForFile(ctx, uri, mode, false)
- if err != nil {
- return nil, err
- }
-
- if len(phs) < 1 {
- return nil, errors.Errorf("no packages")
- }
-
- ph := phs[0]
- for _, handle := range phs[1:] {
- switch pkgPolicy {
- case source.WidestPackage:
- if ph == nil || len(handle.CompiledGoFiles()) > len(ph.CompiledGoFiles()) {
- ph = handle
- }
- case source.NarrowestPackage:
- if ph == nil || len(handle.CompiledGoFiles()) < len(ph.CompiledGoFiles()) {
- ph = handle
- }
- }
- }
- if ph == nil {
- return nil, errors.Errorf("no packages in input")
- }
-
- return ph.check(ctx, s)
-}
-
-func (s *snapshot) packageHandlesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]*packageHandle, error) {
- // Check if we should reload metadata for the file. We don't invalidate IDs
- // (though we should), so the IDs will be a better source of truth than the
- // metadata. If there are no IDs for the file, then we should also reload.
- fh, err := s.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- if kind := s.view.FileKind(fh); kind != source.Go {
- return nil, fmt.Errorf("no packages for non-Go file %s (%v)", uri, kind)
- }
- knownIDs, err := s.getOrLoadIDsForURI(ctx, uri)
- if err != nil {
- return nil, err
- }
-
- var phs []*packageHandle
- for _, id := range knownIDs {
- // Filter out any intermediate test variants. We typically aren't
- // interested in these packages for file= style queries.
- if m := s.getMetadata(id); m != nil && m.IsIntermediateTestVariant && !includeTestVariants {
- continue
- }
- var parseModes []source.ParseMode
- switch mode {
- case source.TypecheckAll:
- if s.workspaceParseMode(id) == source.ParseFull {
- parseModes = []source.ParseMode{source.ParseFull}
- } else {
- parseModes = []source.ParseMode{source.ParseExported, source.ParseFull}
- }
- case source.TypecheckFull:
- parseModes = []source.ParseMode{source.ParseFull}
- case source.TypecheckWorkspace:
- parseModes = []source.ParseMode{s.workspaceParseMode(id)}
- }
-
- for _, parseMode := range parseModes {
- ph, err := s.buildPackageHandle(ctx, id, parseMode)
- if err != nil {
- return nil, err
- }
- phs = append(phs, ph)
- }
- }
- return phs, nil
-}
-
-func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]PackageID, error) {
- knownIDs := s.getIDsForURI(uri)
- reload := len(knownIDs) == 0
- for _, id := range knownIDs {
- // Reload package metadata if any of the metadata has missing
- // dependencies, in case something has changed since the last time we
- // reloaded it.
- if s.noValidMetadataForID(id) {
- reload = true
- break
- }
- // TODO(golang/go#36918): Previously, we would reload any package with
- // missing dependencies. This is expensive and results in too many
- // calls to packages.Load. Determine what we should do instead.
- }
- if reload {
- err := s.load(ctx, false, fileURI(uri))
-
- if !s.useInvalidMetadata() && err != nil {
- return nil, err
- }
- // We've tried to reload and there are still no known IDs for the URI.
- // Return the load error, if there was one.
- knownIDs = s.getIDsForURI(uri)
- if len(knownIDs) == 0 {
- return nil, err
- }
- }
- return knownIDs, nil
-}
-
-// Only use invalid metadata for Go versions >= 1.13. Go 1.12 and below has
-// issues with overlays that will cause confusing error messages if we reuse
-// old metadata.
-func (s *snapshot) useInvalidMetadata() bool {
- return s.view.goversion >= 13 && s.view.Options().ExperimentalUseInvalidMetadata
-}
-
-func (s *snapshot) GetReverseDependencies(ctx context.Context, id string) ([]source.Package, error) {
- if err := s.awaitLoaded(ctx); err != nil {
- return nil, err
- }
- ids := make(map[PackageID]struct{})
- s.transitiveReverseDependencies(PackageID(id), ids)
-
- // Make sure to delete the original package ID from the map.
- delete(ids, PackageID(id))
-
- var pkgs []source.Package
- for id := range ids {
- pkg, err := s.checkedPackage(ctx, id, s.workspaceParseMode(id))
- if err != nil {
- return nil, err
- }
- pkgs = append(pkgs, pkg)
- }
- return pkgs, nil
-}
-
-func (s *snapshot) checkedPackage(ctx context.Context, id PackageID, mode source.ParseMode) (*pkg, error) {
- ph, err := s.buildPackageHandle(ctx, id, mode)
- if err != nil {
- return nil, err
- }
- return ph.check(ctx, s)
-}
-
-// transitiveReverseDependencies populates the ids map with package IDs
-// belonging to the provided package and its transitive reverse dependencies.
-func (s *snapshot) transitiveReverseDependencies(id PackageID, ids map[PackageID]struct{}) {
- if _, ok := ids[id]; ok {
- return
- }
- m := s.getMetadata(id)
- // Only use invalid metadata if we support it.
- if m == nil || !(m.Valid || s.useInvalidMetadata()) {
- return
- }
- ids[id] = struct{}{}
- importedBy := s.getImportedBy(id)
- for _, parentID := range importedBy {
- s.transitiveReverseDependencies(parentID, ids)
- }
-}
-
-func (s *snapshot) getGoFile(key parseKey) *parseGoHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.goFiles[key]
-}
-
-func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle) *parseGoHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- if existing, ok := s.goFiles[key]; ok {
- return existing
- }
- s.goFiles[key] = pgh
- return pgh
-}
-
-func (s *snapshot) getParseModHandle(uri span.URI) *parseModHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.parseModHandles[uri]
-}
-
-func (s *snapshot) getParseWorkHandle(uri span.URI) *parseWorkHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.parseWorkHandles[uri]
-}
-
-func (s *snapshot) getModWhyHandle(uri span.URI) *modWhyHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.modWhyHandles[uri]
-}
-
-func (s *snapshot) getModTidyHandle(uri span.URI) *modTidyHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.modTidyHandles[uri]
-}
-
-func (s *snapshot) getImportedBy(id PackageID) []PackageID {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.getImportedByLocked(id)
-}
-
-func (s *snapshot) getImportedByLocked(id PackageID) []PackageID {
- // If we haven't rebuilt the import graph since creating the snapshot.
- if len(s.importedBy) == 0 {
- s.rebuildImportGraph()
- }
- return s.importedBy[id]
-}
-
-func (s *snapshot) clearAndRebuildImportGraph() {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- // Completely invalidate the original map.
- s.importedBy = make(map[PackageID][]PackageID)
- s.rebuildImportGraph()
-}
-
-func (s *snapshot) rebuildImportGraph() {
- for id, m := range s.metadata {
- for _, importID := range m.Deps {
- s.importedBy[importID] = append(s.importedBy[importID], id)
- }
- }
-}
-
-func (s *snapshot) addPackageHandle(ph *packageHandle) *packageHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- // If the package handle has already been cached,
- // return the cached handle instead of overriding it.
- if ph, ok := s.packages[ph.packageKey()]; ok {
- return ph
- }
- s.packages[ph.packageKey()] = ph
- return ph
-}
-
-func (s *snapshot) workspacePackageIDs() (ids []PackageID) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- for id := range s.workspacePackages {
- ids = append(ids, id)
- }
- return ids
-}
-
-func (s *snapshot) activePackageIDs() (ids []PackageID) {
- if s.view.Options().MemoryMode == source.ModeNormal {
- return s.workspacePackageIDs()
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- seen := make(map[PackageID]bool)
- for id := range s.workspacePackages {
- if s.isActiveLocked(id, seen) {
- ids = append(ids, id)
- }
- }
- return ids
-}
-
-func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active bool) {
- if seen == nil {
- seen = make(map[PackageID]bool)
- }
- if seen, ok := seen[id]; ok {
- return seen
- }
- defer func() {
- seen[id] = active
- }()
- m, ok := s.metadata[id]
- if !ok {
- return false
- }
- for _, cgf := range m.CompiledGoFiles {
- if s.isOpenLocked(cgf) {
- return true
- }
- }
- for _, dep := range m.Deps {
- if s.isActiveLocked(dep, seen) {
- return true
- }
- }
- return false
-}
-
-const fileExtensions = "go,mod,sum,work"
-
-func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
- extensions := fileExtensions
- for _, ext := range s.View().Options().TemplateExtensions {
- extensions += "," + ext
- }
- // Work-around microsoft/vscode#100870 by making sure that we are,
- // at least, watching the user's entire workspace. This will still be
- // applied to every folder in the workspace.
- patterns := map[string]struct{}{
- fmt.Sprintf("**/*.{%s}", extensions): {},
- }
- dirs := s.workspace.dirs(ctx, s)
- for _, dir := range dirs {
- dirName := dir.Filename()
-
- // If the directory is within the view's folder, we're already watching
- // it with the pattern above.
- if source.InDir(s.view.folder.Filename(), dirName) {
- continue
- }
- // TODO(rstambler): If microsoft/vscode#3025 is resolved before
- // microsoft/vscode#101042, we will need a work-around for Windows
- // drive letter casing.
- patterns[fmt.Sprintf("%s/**/*.{%s}", dirName, extensions)] = struct{}{}
- }
-
- // Some clients do not send notifications for changes to directories that
- // contain Go code (golang/go#42348). To handle this, explicitly watch all
- // of the directories in the workspace. We find them by adding the
- // directories of every file in the snapshot's workspace directories.
- var dirNames []string
- for _, uri := range s.getKnownSubdirs(dirs) {
- dirNames = append(dirNames, uri.Filename())
- }
- sort.Strings(dirNames)
- if len(dirNames) > 0 {
- patterns[fmt.Sprintf("{%s}", strings.Join(dirNames, ","))] = struct{}{}
- }
- return patterns
-}
-
-// collectAllKnownSubdirs collects all of the subdirectories within the
-// snapshot's workspace directories. None of the workspace directories are
-// included.
-func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) {
- dirs := s.workspace.dirs(ctx, s)
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- s.knownSubdirs = map[span.URI]struct{}{}
- for uri := range s.files {
- s.addKnownSubdirLocked(uri, dirs)
- }
-}
-
-func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- // First, process any pending changes and update the set of known
- // subdirectories.
- for _, c := range s.unprocessedSubdirChanges {
- if c.isUnchanged {
- continue
- }
- if !c.exists {
- s.removeKnownSubdirLocked(c.fileHandle.URI())
- } else {
- s.addKnownSubdirLocked(c.fileHandle.URI(), wsDirs)
- }
- }
- s.unprocessedSubdirChanges = nil
-
- var result []span.URI
- for uri := range s.knownSubdirs {
- result = append(result, uri)
- }
- return result
-}
-
-func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) {
- dir := filepath.Dir(uri.Filename())
- // First check if the directory is already known, because then we can
- // return early.
- if _, ok := s.knownSubdirs[span.URIFromPath(dir)]; ok {
- return
- }
- var matched span.URI
- for _, wsDir := range dirs {
- if source.InDir(wsDir.Filename(), dir) {
- matched = wsDir
- break
- }
- }
- // Don't watch any directory outside of the workspace directories.
- if matched == "" {
- return
- }
- for {
- if dir == "" || dir == matched.Filename() {
- break
- }
- uri := span.URIFromPath(dir)
- if _, ok := s.knownSubdirs[uri]; ok {
- break
- }
- s.knownSubdirs[uri] = struct{}{}
- dir = filepath.Dir(dir)
- }
-}
-
-func (s *snapshot) removeKnownSubdirLocked(uri span.URI) {
- dir := filepath.Dir(uri.Filename())
- for dir != "" {
- uri := span.URIFromPath(dir)
- if _, ok := s.knownSubdirs[uri]; !ok {
- break
- }
- if info, _ := os.Stat(dir); info == nil {
- delete(s.knownSubdirs, uri)
- }
- dir = filepath.Dir(dir)
- }
-}
-
-// knownFilesInDir returns the files known to the given snapshot that are in
-// the given directory. It does not respect symlinks.
-func (s *snapshot) knownFilesInDir(ctx context.Context, dir span.URI) []span.URI {
- var files []span.URI
- s.mu.Lock()
- defer s.mu.Unlock()
-
- for uri := range s.files {
- if source.InDir(dir.Filename(), uri.Filename()) {
- files = append(files, uri)
- }
- }
- return files
-}
-
-func (s *snapshot) workspacePackageHandles(ctx context.Context) ([]*packageHandle, error) {
- if err := s.awaitLoaded(ctx); err != nil {
- return nil, err
- }
- var phs []*packageHandle
- for _, pkgID := range s.workspacePackageIDs() {
- ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID))
- if err != nil {
- return nil, err
- }
- phs = append(phs, ph)
- }
- return phs, nil
-}
-
-func (s *snapshot) ActivePackages(ctx context.Context) ([]source.Package, error) {
- phs, err := s.activePackageHandles(ctx)
- if err != nil {
- return nil, err
- }
- var pkgs []source.Package
- for _, ph := range phs {
- pkg, err := ph.check(ctx, s)
- if err != nil {
- return nil, err
- }
- pkgs = append(pkgs, pkg)
- }
- return pkgs, nil
-}
-
-func (s *snapshot) activePackageHandles(ctx context.Context) ([]*packageHandle, error) {
- if err := s.awaitLoaded(ctx); err != nil {
- return nil, err
- }
- var phs []*packageHandle
- for _, pkgID := range s.activePackageIDs() {
- ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID))
- if err != nil {
- return nil, err
- }
- phs = append(phs, ph)
- }
- return phs, nil
-}
-
-func (s *snapshot) Symbols(ctx context.Context) (map[span.URI][]source.Symbol, error) {
- result := make(map[span.URI][]source.Symbol)
-
- // Keep going on errors, but log the first failure. Partial symbol results
- // are better than no symbol results.
- var firstErr error
- for uri, f := range s.files {
- sh := s.buildSymbolHandle(ctx, f)
- v, err := sh.handle.Get(ctx, s.generation, s)
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- continue
- }
- data := v.(*symbolData)
- result[uri] = data.symbols
- }
- if firstErr != nil {
- event.Error(ctx, "getting snapshot symbols", firstErr)
- }
- return result, nil
-}
-
-func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]source.Metadata, error) {
- knownIDs, err := s.getOrLoadIDsForURI(ctx, uri)
- if err != nil {
- return nil, err
- }
- var mds []source.Metadata
- for _, id := range knownIDs {
- md := s.getMetadata(id)
- // TODO(rfindley): knownIDs and metadata should be in sync, but existing
- // code is defensive of nil metadata.
- if md != nil {
- mds = append(mds, md)
- }
- }
- return mds, nil
-}
-
-func (s *snapshot) KnownPackages(ctx context.Context) ([]source.Package, error) {
- if err := s.awaitLoaded(ctx); err != nil {
- return nil, err
- }
-
- // The WorkspaceSymbols implementation relies on this function returning
- // workspace packages first.
- ids := s.workspacePackageIDs()
- s.mu.Lock()
- for id := range s.metadata {
- if _, ok := s.workspacePackages[id]; ok {
- continue
- }
- ids = append(ids, id)
- }
- s.mu.Unlock()
-
- var pkgs []source.Package
- for _, id := range ids {
- pkg, err := s.checkedPackage(ctx, id, s.workspaceParseMode(id))
- if err != nil {
- return nil, err
- }
- pkgs = append(pkgs, pkg)
- }
- return pkgs, nil
-}
-
-func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Package, error) {
- // Don't reload workspace package metadata.
- // This function is meant to only return currently cached information.
- s.AwaitInitialized(ctx)
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- results := map[string]source.Package{}
- for _, ph := range s.packages {
- cachedPkg, err := ph.cached(s.generation)
- if err != nil {
- continue
- }
- for importPath, newPkg := range cachedPkg.imports {
- if oldPkg, ok := results[string(importPath)]; ok {
- // Using the same trick as NarrowestPackage, prefer non-variants.
- if len(newPkg.compiledGoFiles) < len(oldPkg.(*pkg).compiledGoFiles) {
- results[string(importPath)] = newPkg
- }
- } else {
- results[string(importPath)] = newPkg
- }
- }
- }
- return results, nil
-}
-
-func (s *snapshot) GoModForFile(uri span.URI) span.URI {
- return moduleForURI(s.workspace.activeModFiles, uri)
-}
-
-func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI {
- var match span.URI
- for modURI := range modFiles {
- if !source.InDir(dirURI(modURI).Filename(), uri.Filename()) {
- continue
- }
- if len(modURI) > len(match) {
- match = modURI
- }
- }
- return match
-}
-
-func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- key := packageKey{
- id: id,
- mode: mode,
- }
- return s.packages[key]
-}
-
-func (s *snapshot) getSymbolHandle(uri span.URI) *symbolHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.symbols[uri]
-}
-
-func (s *snapshot) addSymbolHandle(sh *symbolHandle) *symbolHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- uri := sh.fh.URI()
- // If the package handle has already been cached,
- // return the cached handle instead of overriding it.
- if sh, ok := s.symbols[uri]; ok {
- return sh
- }
- s.symbols[uri] = sh
- return sh
-}
-
-func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- key := actionKey{
- pkg: packageKey{
- id: id,
- mode: m,
- },
- analyzer: a,
- }
- return s.actions[key]
-}
-
-func (s *snapshot) addActionHandle(ah *actionHandle) *actionHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- key := actionKey{
- analyzer: ah.analyzer,
- pkg: packageKey{
- id: ah.pkg.m.ID,
- mode: ah.pkg.mode,
- },
- }
- if ah, ok := s.actions[key]; ok {
- return ah
- }
- s.actions[key] = ah
- return ah
-}
-
-func (s *snapshot) getIDsForURI(uri span.URI) []PackageID {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.ids[uri]
-}
-
-func (s *snapshot) getMetadata(id PackageID) *KnownMetadata {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.metadata[id]
-}
-
-func (s *snapshot) shouldLoad(scope interface{}) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- switch scope := scope.(type) {
- case PackagePath:
- var meta *KnownMetadata
- for _, m := range s.metadata {
- if m.PkgPath != scope {
- continue
- }
- meta = m
- }
- if meta == nil || meta.ShouldLoad {
- return true
- }
- return false
- case fileURI:
- uri := span.URI(scope)
- ids := s.ids[uri]
- if len(ids) == 0 {
- return true
- }
- for _, id := range ids {
- m, ok := s.metadata[id]
- if !ok || m.ShouldLoad {
- return true
- }
- }
- return false
- default:
- return true
- }
-}
-
-func (s *snapshot) clearShouldLoad(scope interface{}) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- switch scope := scope.(type) {
- case PackagePath:
- var meta *KnownMetadata
- for _, m := range s.metadata {
- if m.PkgPath == scope {
- meta = m
- }
- }
- if meta == nil {
- return
- }
- meta.ShouldLoad = false
- case fileURI:
- uri := span.URI(scope)
- ids := s.ids[uri]
- if len(ids) == 0 {
- return
- }
- for _, id := range ids {
- if m, ok := s.metadata[id]; ok {
- m.ShouldLoad = false
- }
- }
- }
-}
-
-// noValidMetadataForURILocked reports whether there is any valid metadata for
-// the given URI.
-func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool {
- ids, ok := s.ids[uri]
- if !ok {
- return true
- }
- for _, id := range ids {
- if m, ok := s.metadata[id]; ok && m.Valid {
- return false
- }
- }
- return true
-}
-
-// noValidMetadataForID reports whether there is no valid metadata for the
-// given ID.
-func (s *snapshot) noValidMetadataForID(id PackageID) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.noValidMetadataForIDLocked(id)
-}
-
-func (s *snapshot) noValidMetadataForIDLocked(id PackageID) bool {
- m := s.metadata[id]
- return m == nil || !m.Valid
-}
-
-// updateIDForURIsLocked adds the given ID to the set of known IDs for the given URI.
-// Any existing invalid IDs are removed from the set of known IDs. IDs that are
-// not "command-line-arguments" are preferred, so if a new ID comes in for a
-// URI that previously only had "command-line-arguments", the new ID will
-// replace the "command-line-arguments" ID.
-func (s *snapshot) updateIDForURIsLocked(id PackageID, uris map[span.URI]struct{}) {
- for uri := range uris {
- // Collect the new set of IDs, preserving any valid existing IDs.
- newIDs := []PackageID{id}
- for _, existingID := range s.ids[uri] {
- // Don't set duplicates of the same ID.
- if existingID == id {
- continue
- }
- // If the package previously only had a command-line-arguments ID,
- // delete the command-line-arguments workspace package.
- if source.IsCommandLineArguments(string(existingID)) {
- delete(s.workspacePackages, existingID)
- continue
- }
- // If the metadata for an existing ID is invalid, and we are
- // setting metadata for a new, valid ID--don't preserve the old ID.
- if m, ok := s.metadata[existingID]; !ok || !m.Valid {
- continue
- }
- newIDs = append(newIDs, existingID)
- }
- sort.Slice(newIDs, func(i, j int) bool {
- return newIDs[i] < newIDs[j]
- })
- s.ids[uri] = newIDs
- }
-}
-
-func (s *snapshot) isWorkspacePackage(id PackageID) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- _, ok := s.workspacePackages[id]
- return ok
-}
-
-func (s *snapshot) FindFile(uri span.URI) source.VersionedFileHandle {
- f := s.view.getFile(uri)
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.files[f.URI()]
-}
-
-// GetVersionedFile returns a File for the given URI. If the file is unknown it
-// is added to the managed set.
-//
-// GetVersionedFile succeeds even if the file does not exist. A non-nil error return
-// indicates some type of internal error, for example if ctx is cancelled.
-func (s *snapshot) GetVersionedFile(ctx context.Context, uri span.URI) (source.VersionedFileHandle, error) {
- f := s.view.getFile(uri)
-
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.getFileLocked(ctx, f)
-}
-
-// GetFile implements the fileSource interface by wrapping GetVersionedFile.
-func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
- return s.GetVersionedFile(ctx, uri)
-}
-
-func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.VersionedFileHandle, error) {
- if fh, ok := s.files[f.URI()]; ok {
- return fh, nil
- }
-
- fh, err := s.view.session.cache.getFile(ctx, f.URI())
- if err != nil {
- return nil, err
- }
- closed := &closedFile{fh}
- s.files[f.URI()] = closed
- return closed, nil
-}
-
-func (s *snapshot) IsOpen(uri span.URI) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.isOpenLocked(uri)
-
-}
-
-func (s *snapshot) openFiles() []source.VersionedFileHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- var open []source.VersionedFileHandle
- for _, fh := range s.files {
- if s.isOpenLocked(fh.URI()) {
- open = append(open, fh)
- }
- }
- return open
-}
-
-func (s *snapshot) isOpenLocked(uri span.URI) bool {
- _, open := s.files[uri].(*overlay)
- return open
-}
-
-func (s *snapshot) awaitLoaded(ctx context.Context) error {
- loadErr := s.awaitLoadedAllErrors(ctx)
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- // If we still have absolutely no metadata, check if the view failed to
- // initialize and return any errors.
- if s.useInvalidMetadata() && len(s.metadata) > 0 {
- return nil
- }
- for _, m := range s.metadata {
- if m.Valid {
- return nil
- }
- }
- if loadErr != nil {
- return loadErr.MainError
- }
- return nil
-}
-
-func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError {
- loadErr := s.awaitLoadedAllErrors(ctx)
- if loadErr != nil && errors.Is(loadErr.MainError, context.Canceled) {
- return nil
- }
-
- // Even if packages didn't fail to load, we still may want to show
- // additional warnings.
- if loadErr == nil {
- wsPkgs, _ := s.ActivePackages(ctx)
- if msg := shouldShowAdHocPackagesWarning(s, wsPkgs); msg != "" {
- return &source.CriticalError{
- MainError: errors.New(msg),
- }
- }
- // Even if workspace packages were returned, there still may be an error
- // with the user's workspace layout. Workspace packages that only have the
- // ID "command-line-arguments" are usually a symptom of a bad workspace
- // configuration.
- if containsCommandLineArguments(wsPkgs) {
- return s.workspaceLayoutError(ctx)
- }
- return nil
- }
-
- if errMsg := loadErr.MainError.Error(); strings.Contains(errMsg, "cannot find main module") || strings.Contains(errMsg, "go.mod file not found") {
- return s.workspaceLayoutError(ctx)
- }
- return loadErr
-}
-
-const adHocPackagesWarning = `You are outside of a module and outside of $GOPATH/src.
-If you are using modules, please open your editor to a directory in your module.
-If you believe this warning is incorrect, please file an issue: https://github.com/golang/go/issues/new.`
-
-func shouldShowAdHocPackagesWarning(snapshot source.Snapshot, pkgs []source.Package) string {
- if snapshot.ValidBuildConfiguration() {
- return ""
- }
- for _, pkg := range pkgs {
- if len(pkg.MissingDependencies()) > 0 {
- return adHocPackagesWarning
- }
- }
- return ""
-}
-
-func containsCommandLineArguments(pkgs []source.Package) bool {
- for _, pkg := range pkgs {
- if source.IsCommandLineArguments(pkg.ID()) {
- return true
- }
- }
- return false
-}
-
-func (s *snapshot) awaitLoadedAllErrors(ctx context.Context) *source.CriticalError {
- // Do not return results until the snapshot's view has been initialized.
- s.AwaitInitialized(ctx)
-
- // TODO(rstambler): Should we be more careful about returning the
- // initialization error? Is it possible for the initialization error to be
- // corrected without a successful reinitialization?
- s.mu.Lock()
- initializedErr := s.initializedErr
- s.mu.Unlock()
- if initializedErr != nil {
- return initializedErr
- }
-
- if ctx.Err() != nil {
- return &source.CriticalError{MainError: ctx.Err()}
- }
-
- if err := s.reloadWorkspace(ctx); err != nil {
- diags, _ := s.extractGoCommandErrors(ctx, err.Error())
- return &source.CriticalError{
- MainError: err,
- DiagList: diags,
- }
- }
- if err := s.reloadOrphanedFiles(ctx); err != nil {
- diags, _ := s.extractGoCommandErrors(ctx, err.Error())
- return &source.CriticalError{
- MainError: err,
- DiagList: diags,
- }
- }
- return nil
-}
-
-func (s *snapshot) getInitializationError(ctx context.Context) *source.CriticalError {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.initializedErr
-}
-
-func (s *snapshot) AwaitInitialized(ctx context.Context) {
- select {
- case <-ctx.Done():
- return
- case <-s.view.initialWorkspaceLoad:
- }
- // We typically prefer to run something as intensive as the IWL without
- // blocking. I'm not sure if there is a way to do that here.
- s.initialize(ctx, false)
-}
-
-// reloadWorkspace reloads the metadata for all invalidated workspace packages.
-func (s *snapshot) reloadWorkspace(ctx context.Context) error {
- // See which of the workspace packages are missing metadata.
- s.mu.Lock()
- missingMetadata := len(s.workspacePackages) == 0 || len(s.metadata) == 0
- pkgPathSet := map[PackagePath]struct{}{}
- for id, pkgPath := range s.workspacePackages {
- if m, ok := s.metadata[id]; ok && m.Valid {
- continue
- }
- missingMetadata = true
-
- // Don't try to reload "command-line-arguments" directly.
- if source.IsCommandLineArguments(string(pkgPath)) {
- continue
- }
- pkgPathSet[pkgPath] = struct{}{}
- }
- s.mu.Unlock()
-
- // If the view's build configuration is invalid, we cannot reload by
- // package path. Just reload the directory instead.
- if missingMetadata && !s.ValidBuildConfiguration() {
- return s.load(ctx, false, viewLoadScope("LOAD_INVALID_VIEW"))
- }
-
- if len(pkgPathSet) == 0 {
- return nil
- }
-
- var pkgPaths []interface{}
- for pkgPath := range pkgPathSet {
- pkgPaths = append(pkgPaths, pkgPath)
- }
- return s.load(ctx, false, pkgPaths...)
-}
-
-func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error {
- // When we load ./... or a package path directly, we may not get packages
- // that exist only in overlays. As a workaround, we search all of the files
- // available in the snapshot and reload their metadata individually using a
- // file= query if the metadata is unavailable.
- files := s.orphanedFiles()
-
- // Files without a valid package declaration can't be loaded. Don't try.
- var scopes []interface{}
- for _, file := range files {
- pgf, err := s.ParseGo(ctx, file, source.ParseHeader)
- if err != nil {
- continue
- }
- if !pgf.File.Package.IsValid() {
- continue
- }
- scopes = append(scopes, fileURI(file.URI()))
- }
-
- if len(scopes) == 0 {
- return nil
- }
-
- // The regtests match this exact log message, keep them in sync.
- event.Log(ctx, "reloadOrphanedFiles reloading", tag.Query.Of(scopes))
- err := s.load(ctx, false, scopes...)
-
- // If we failed to load some files, i.e. they have no metadata,
- // mark the failures so we don't bother retrying until the file's
- // content changes.
- //
- // TODO(rstambler): This may be an overestimate if the load stopped
- // early for an unrelated errors. Add a fallback?
- //
- // Check for context cancellation so that we don't incorrectly mark files
- // as unloadable, but don't return before setting all workspace packages.
- if ctx.Err() == nil && err != nil {
- event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes))
- s.mu.Lock()
- for _, scope := range scopes {
- uri := span.URI(scope.(fileURI))
- if s.noValidMetadataForURILocked(uri) {
- s.unloadableFiles[uri] = struct{}{}
- }
- }
- s.mu.Unlock()
- }
- return nil
-}
-
-func (s *snapshot) orphanedFiles() []source.VersionedFileHandle {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- var files []source.VersionedFileHandle
- for uri, fh := range s.files {
- // Don't try to reload metadata for go.mod files.
- if s.view.FileKind(fh) != source.Go {
- continue
- }
- // If the URI doesn't belong to this view, then it's not in a workspace
- // package and should not be reloaded directly.
- if !contains(s.view.session.viewsOf(uri), s.view) {
- continue
- }
- // If the file is not open and is in a vendor directory, don't treat it
- // like a workspace package.
- if _, ok := fh.(*overlay); !ok && inVendor(uri) {
- continue
- }
- // Don't reload metadata for files we've already deemed unloadable.
- if _, ok := s.unloadableFiles[uri]; ok {
- continue
- }
- if s.noValidMetadataForURILocked(uri) {
- files = append(files, fh)
- }
- }
- return files
-}
-
-func contains(views []*View, view *View) bool {
- for _, v := range views {
- if v == view {
- return true
- }
- }
- return false
-}
-
-func inVendor(uri span.URI) bool {
- if !strings.Contains(string(uri), "/vendor/") {
- return false
- }
- // Only packages in _subdirectories_ of /vendor/ are considered vendored
- // (/vendor/a/foo.go is vendored, /vendor/foo.go is not).
- split := strings.Split(string(uri), "/vendor/")
- if len(split) < 2 {
- return false
- }
- return strings.Contains(split[1], "/")
-}
-
-func generationName(v *View, snapshotID uint64) string {
- return fmt.Sprintf("v%v/%v", v.id, snapshotID)
-}
-
-// checkSnapshotLocked verifies that some invariants are preserved on the
-// snapshot.
-func checkSnapshotLocked(ctx context.Context, s *snapshot) {
- // Check that every go file for a workspace package is identified as
- // belonging to that workspace package.
- for wsID := range s.workspacePackages {
- if m, ok := s.metadata[wsID]; ok {
- for _, uri := range m.GoFiles {
- found := false
- for _, id := range s.ids[uri] {
- if id == wsID {
- found = true
- break
- }
- }
- if !found {
- log.Error.Logf(ctx, "workspace package %v not associated with %v", wsID, uri)
- }
- }
- }
- }
-}
-
-// unappliedChanges is a file source that handles an uncloned snapshot.
-type unappliedChanges struct {
- originalSnapshot *snapshot
- changes map[span.URI]*fileChange
-}
-
-func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
- if c, ok := ac.changes[uri]; ok {
- return c.fileHandle, nil
- }
- return ac.originalSnapshot.GetFile(ctx, uri)
-}
-
-func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) *snapshot {
- var vendorChanged bool
- newWorkspace, workspaceChanged, workspaceReload := s.workspace.invalidate(ctx, changes, &unappliedChanges{
- originalSnapshot: s,
- changes: changes,
- })
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- checkSnapshotLocked(ctx, s)
-
- newGen := s.view.session.cache.store.Generation(generationName(s.view, s.id+1))
- bgCtx, cancel := context.WithCancel(bgCtx)
- result := &snapshot{
- id: s.id + 1,
- generation: newGen,
- view: s.view,
- backgroundCtx: bgCtx,
- cancel: cancel,
- builtin: s.builtin,
- initializeOnce: s.initializeOnce,
- initializedErr: s.initializedErr,
- ids: make(map[span.URI][]PackageID, len(s.ids)),
- importedBy: make(map[PackageID][]PackageID, len(s.importedBy)),
- metadata: make(map[PackageID]*KnownMetadata, len(s.metadata)),
- packages: make(map[packageKey]*packageHandle, len(s.packages)),
- actions: make(map[actionKey]*actionHandle, len(s.actions)),
- files: make(map[span.URI]source.VersionedFileHandle, len(s.files)),
- goFiles: make(map[parseKey]*parseGoHandle, len(s.goFiles)),
- symbols: make(map[span.URI]*symbolHandle, len(s.symbols)),
- workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)),
- unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)),
- parseModHandles: make(map[span.URI]*parseModHandle, len(s.parseModHandles)),
- parseWorkHandles: make(map[span.URI]*parseWorkHandle, len(s.parseWorkHandles)),
- modTidyHandles: make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)),
- modWhyHandles: make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)),
- knownSubdirs: make(map[span.URI]struct{}, len(s.knownSubdirs)),
- workspace: newWorkspace,
- }
-
- if !workspaceChanged && s.workspaceDirHandle != nil {
- result.workspaceDirHandle = s.workspaceDirHandle
- newGen.Inherit(s.workspaceDirHandle)
- }
-
- // Copy all of the FileHandles.
- for k, v := range s.files {
- result.files[k] = v
- }
- for k, v := range s.symbols {
- if change, ok := changes[k]; ok {
- if change.exists {
- result.symbols[k] = result.buildSymbolHandle(ctx, change.fileHandle)
- }
- continue
- }
- newGen.Inherit(v.handle)
- result.symbols[k] = v
- }
-
- // Copy the set of unloadable files.
- for k, v := range s.unloadableFiles {
- result.unloadableFiles[k] = v
- }
- // Copy all of the modHandles.
- for k, v := range s.parseModHandles {
- result.parseModHandles[k] = v
- }
- // Copy all of the parseWorkHandles.
- for k, v := range s.parseWorkHandles {
- result.parseWorkHandles[k] = v
- }
-
- for k, v := range s.goFiles {
- if _, ok := changes[k.file.URI]; ok {
- continue
- }
- newGen.Inherit(v.handle)
- result.goFiles[k] = v
- }
-
- // Copy all of the go.mod-related handles. They may be invalidated later,
- // so we inherit them at the end of the function.
- for k, v := range s.modTidyHandles {
- if _, ok := changes[k]; ok {
- continue
- }
- result.modTidyHandles[k] = v
- }
- for k, v := range s.modWhyHandles {
- if _, ok := changes[k]; ok {
- continue
- }
- result.modWhyHandles[k] = v
- }
-
- // Add all of the known subdirectories, but don't update them for the
- // changed files. We need to rebuild the workspace module to know the
- // true set of known subdirectories, but we don't want to do that in clone.
- for k, v := range s.knownSubdirs {
- result.knownSubdirs[k] = v
- }
- for _, c := range changes {
- result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c)
- }
-
- // directIDs keeps track of package IDs that have directly changed.
- // It maps id->invalidateMetadata.
- directIDs := map[PackageID]bool{}
-
- // Invalidate all package metadata if the workspace module has changed.
- if workspaceReload {
- for k := range s.metadata {
- directIDs[k] = true
- }
- }
-
- changedPkgNames := map[PackageID]struct{}{}
- anyImportDeleted := false
- for uri, change := range changes {
- // Maybe reinitialize the view if we see a change in the vendor
- // directory.
- if inVendor(uri) {
- vendorChanged = true
- }
-
- // The original FileHandle for this URI is cached on the snapshot.
- originalFH := s.files[uri]
-
- // Check if the file's package name or imports have changed,
- // and if so, invalidate this file's packages' metadata.
- var shouldInvalidateMetadata, pkgNameChanged, importDeleted bool
- if !isGoMod(uri) {
- shouldInvalidateMetadata, pkgNameChanged, importDeleted = s.shouldInvalidateMetadata(ctx, result, originalFH, change.fileHandle)
- }
- invalidateMetadata := forceReloadMetadata || workspaceReload || shouldInvalidateMetadata
- anyImportDeleted = anyImportDeleted || importDeleted
-
- // Mark all of the package IDs containing the given file.
- // TODO: if the file has moved into a new package, we should invalidate that too.
- filePackageIDs := guessPackageIDsForURI(uri, s.ids)
- if pkgNameChanged {
- for _, id := range filePackageIDs {
- changedPkgNames[id] = struct{}{}
- }
- }
- for _, id := range filePackageIDs {
- directIDs[id] = directIDs[id] || invalidateMetadata
- }
-
- // Invalidate the previous modTidyHandle if any of the files have been
- // saved or if any of the metadata has been invalidated.
- if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) {
- // TODO(rstambler): Only delete mod handles for which the
- // withoutURI is relevant.
- for k := range s.modTidyHandles {
- delete(result.modTidyHandles, k)
- }
- for k := range s.modWhyHandles {
- delete(result.modWhyHandles, k)
- }
- }
- delete(result.parseModHandles, uri)
- delete(result.parseWorkHandles, uri)
- // Handle the invalidated file; it may have new contents or not exist.
- if !change.exists {
- delete(result.files, uri)
- } else {
- result.files[uri] = change.fileHandle
- }
-
- // Make sure to remove the changed file from the unloadable set.
- delete(result.unloadableFiles, uri)
- }
-
- // Deleting an import can cause list errors due to import cycles to be
- // resolved. The best we can do without parsing the list error message is to
- // hope that list errors may have been resolved by a deleted import.
- //
- // We could do better by parsing the list error message. We already do this
- // to assign a better range to the list error, but for such critical
- // functionality as metadata, it's better to be conservative until it proves
- // impractical.
- //
- // We could also do better by looking at which imports were deleted and
- // trying to find cycles they are involved in. This fails when the file goes
- // from an unparseable state to a parseable state, as we don't have a
- // starting point to compare with.
- if anyImportDeleted {
- for id, metadata := range s.metadata {
- if len(metadata.Errors) > 0 {
- directIDs[id] = true
- }
- }
- }
-
- // Invalidate reverse dependencies too.
- // TODO(heschi): figure out the locking model and use transitiveReverseDeps?
- // idsToInvalidate keeps track of transitive reverse dependencies.
- // If an ID is present in the map, invalidate its types.
- // If an ID's value is true, invalidate its metadata too.
- idsToInvalidate := map[PackageID]bool{}
- var addRevDeps func(PackageID, bool)
- addRevDeps = func(id PackageID, invalidateMetadata bool) {
- current, seen := idsToInvalidate[id]
- newInvalidateMetadata := current || invalidateMetadata
-
- // If we've already seen this ID, and the value of invalidate
- // metadata has not changed, we can return early.
- if seen && current == newInvalidateMetadata {
- return
- }
- idsToInvalidate[id] = newInvalidateMetadata
- for _, rid := range s.getImportedByLocked(id) {
- addRevDeps(rid, invalidateMetadata)
- }
- }
- for id, invalidateMetadata := range directIDs {
- addRevDeps(id, invalidateMetadata)
- }
-
- // Copy the package type information.
- for k, v := range s.packages {
- if _, ok := idsToInvalidate[k.id]; ok {
- continue
- }
- newGen.Inherit(v.handle)
- result.packages[k] = v
- }
- // Copy the package analysis information.
- for k, v := range s.actions {
- if _, ok := idsToInvalidate[k.pkg.id]; ok {
- continue
- }
- newGen.Inherit(v.handle)
- result.actions[k] = v
- }
-
- // If the workspace mode has changed, we must delete all metadata, as it
- // is unusable and may produce confusing or incorrect diagnostics.
- // If a file has been deleted, we must delete metadata all packages
- // containing that file.
- workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
- skipID := map[PackageID]bool{}
- for _, c := range changes {
- if c.exists {
- continue
- }
- // The file has been deleted.
- if ids, ok := s.ids[c.fileHandle.URI()]; ok {
- for _, id := range ids {
- skipID[id] = true
- }
- }
- }
-
- // Collect all of the IDs that are reachable from the workspace packages.
- // Any unreachable IDs will have their metadata deleted outright.
- reachableID := map[PackageID]bool{}
- var addForwardDeps func(PackageID)
- addForwardDeps = func(id PackageID) {
- if reachableID[id] {
- return
- }
- reachableID[id] = true
- m, ok := s.metadata[id]
- if !ok {
- return
- }
- for _, depID := range m.Deps {
- addForwardDeps(depID)
- }
- }
- for id := range s.workspacePackages {
- addForwardDeps(id)
- }
-
- // Copy the URI to package ID mappings, skipping only those URIs whose
- // metadata will be reloaded in future calls to load.
- deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged
- idsInSnapshot := map[PackageID]bool{} // track all known IDs
- for uri, ids := range s.ids {
- for _, id := range ids {
- invalidateMetadata := idsToInvalidate[id]
- if skipID[id] || (invalidateMetadata && deleteInvalidMetadata) {
- continue
- }
- // The ID is not reachable from any workspace package, so it should
- // be deleted.
- if !reachableID[id] {
- continue
- }
- idsInSnapshot[id] = true
- result.ids[uri] = append(result.ids[uri], id)
- }
- }
-
- // Copy the package metadata. We only need to invalidate packages directly
- // containing the affected file, and only if it changed in a relevant way.
- for k, v := range s.metadata {
- if !idsInSnapshot[k] {
- // Delete metadata for IDs that are no longer reachable from files
- // in the snapshot.
- continue
- }
- invalidateMetadata := idsToInvalidate[k]
- // Mark invalidated metadata rather than deleting it outright.
- result.metadata[k] = &KnownMetadata{
- Metadata: v.Metadata,
- Valid: v.Valid && !invalidateMetadata,
- ShouldLoad: v.ShouldLoad || invalidateMetadata,
- }
- }
-
- // Copy the set of initially loaded packages.
- for id, pkgPath := range s.workspacePackages {
- // Packages with the id "command-line-arguments" are generated by the
- // go command when the user is outside of GOPATH and outside of a
- // module. Do not cache them as workspace packages for longer than
- // necessary.
- if source.IsCommandLineArguments(string(id)) {
- if invalidateMetadata, ok := idsToInvalidate[id]; invalidateMetadata && ok {
- continue
- }
- }
-
- // If all the files we know about in a package have been deleted,
- // the package is gone and we should no longer try to load it.
- if m := s.metadata[id]; m != nil {
- hasFiles := false
- for _, uri := range s.metadata[id].GoFiles {
- // For internal tests, we need _test files, not just the normal
- // ones. External tests only have _test files, but we can check
- // them anyway.
- if m.ForTest != "" && !strings.HasSuffix(string(uri), "_test.go") {
- continue
- }
- if _, ok := result.files[uri]; ok {
- hasFiles = true
- break
- }
- }
- if !hasFiles {
- continue
- }
- }
-
- // If the package name of a file in the package has changed, it's
- // possible that the package ID may no longer exist. Delete it from
- // the set of workspace packages, on the assumption that we will add it
- // back when the relevant files are reloaded.
- if _, ok := changedPkgNames[id]; ok {
- continue
- }
-
- result.workspacePackages[id] = pkgPath
- }
-
- // Inherit all of the go.mod-related handles.
- for _, v := range result.modTidyHandles {
- newGen.Inherit(v.handle)
- }
- for _, v := range result.modWhyHandles {
- newGen.Inherit(v.handle)
- }
- for _, v := range result.parseModHandles {
- newGen.Inherit(v.handle)
- }
- for _, v := range result.parseWorkHandles {
- newGen.Inherit(v.handle)
- }
- // Don't bother copying the importedBy graph,
- // as it changes each time we update metadata.
-
- // If the snapshot's workspace mode has changed, the packages loaded using
- // the previous mode are no longer relevant, so clear them out.
- if workspaceModeChanged {
- result.workspacePackages = map[PackageID]PackagePath{}
- }
-
- // The snapshot may need to be reinitialized.
- if workspaceReload || vendorChanged {
- if workspaceChanged || result.initializedErr != nil {
- result.initializeOnce = &sync.Once{}
- }
- }
- return result
-}
-
-// guessPackageIDsForURI returns all packages related to uri. If we haven't
-// seen this URI before, we guess based on files in the same directory. This
-// is of course incorrect in build systems where packages are not organized by
-// directory.
-func guessPackageIDsForURI(uri span.URI, known map[span.URI][]PackageID) []PackageID {
- packages := known[uri]
- if len(packages) > 0 {
- // We've seen this file before.
- return packages
- }
- // This is a file we don't yet know about. Guess relevant packages by
- // considering files in the same directory.
-
- // Cache of FileInfo to avoid unnecessary stats for multiple files in the
- // same directory.
- stats := make(map[string]struct {
- os.FileInfo
- error
- })
- getInfo := func(dir string) (os.FileInfo, error) {
- if res, ok := stats[dir]; ok {
- return res.FileInfo, res.error
- }
- fi, err := os.Stat(dir)
- stats[dir] = struct {
- os.FileInfo
- error
- }{fi, err}
- return fi, err
- }
- dir := filepath.Dir(uri.Filename())
- fi, err := getInfo(dir)
- if err != nil {
- return nil
- }
-
- // Aggregate all possibly relevant package IDs.
- var found []PackageID
- for knownURI, ids := range known {
- knownDir := filepath.Dir(knownURI.Filename())
- knownFI, err := getInfo(knownDir)
- if err != nil {
- continue
- }
- if os.SameFile(fi, knownFI) {
- found = append(found, ids...)
- }
- }
- return found
-}
-
-// fileWasSaved reports whether the FileHandle passed in has been saved. It
-// accomplishes this by checking to see if the original and current FileHandles
-// are both overlays, and if the current FileHandle is saved while the original
-// FileHandle was not saved.
-func fileWasSaved(originalFH, currentFH source.FileHandle) bool {
- c, ok := currentFH.(*overlay)
- if !ok || c == nil {
- return true
- }
- o, ok := originalFH.(*overlay)
- if !ok || o == nil {
- return c.saved
- }
- return !o.saved && c.saved
-}
-
-// shouldInvalidateMetadata reparses the full file's AST to determine
-// if the file requires a metadata reload.
-func (s *snapshot) shouldInvalidateMetadata(ctx context.Context, newSnapshot *snapshot, originalFH, currentFH source.FileHandle) (invalidate, pkgNameChanged, importDeleted bool) {
- if originalFH == nil {
- return true, false, false
- }
- // If the file hasn't changed, there's no need to reload.
- if originalFH.FileIdentity() == currentFH.FileIdentity() {
- return false, false, false
- }
- // Get the original and current parsed files in order to check package name
- // and imports. Use the new snapshot to parse to avoid modifying the
- // current snapshot.
- original, originalErr := newSnapshot.ParseGo(ctx, originalFH, source.ParseFull)
- current, currentErr := newSnapshot.ParseGo(ctx, currentFH, source.ParseFull)
- if originalErr != nil || currentErr != nil {
- return (originalErr == nil) != (currentErr == nil), false, (currentErr != nil) // we don't know if an import was deleted
- }
- // Check if the package's metadata has changed. The cases handled are:
- // 1. A package's name has changed
- // 2. A file's imports have changed
- if original.File.Name.Name != current.File.Name.Name {
- invalidate = true
- pkgNameChanged = true
- }
- origImportSet := make(map[string]struct{})
- for _, importSpec := range original.File.Imports {
- origImportSet[importSpec.Path.Value] = struct{}{}
- }
- curImportSet := make(map[string]struct{})
- for _, importSpec := range current.File.Imports {
- curImportSet[importSpec.Path.Value] = struct{}{}
- }
- // If any of the current imports were not in the original imports.
- for path := range curImportSet {
- if _, ok := origImportSet[path]; ok {
- delete(origImportSet, path)
- continue
- }
- // If the import path is obviously not valid, we can skip reloading
- // metadata. For now, valid means properly quoted and without a
- // terminal slash.
- if isBadImportPath(path) {
- continue
- }
- invalidate = true
- }
-
- for path := range origImportSet {
- if !isBadImportPath(path) {
- invalidate = true
- importDeleted = true
- }
- }
-
- if !invalidate {
- invalidate = magicCommentsChanged(original.File, current.File)
- }
- return invalidate, pkgNameChanged, importDeleted
-}
-
-func magicCommentsChanged(original *ast.File, current *ast.File) bool {
- oldComments := extractMagicComments(original)
- newComments := extractMagicComments(current)
- if len(oldComments) != len(newComments) {
- return true
- }
- for i := range oldComments {
- if oldComments[i] != newComments[i] {
- return true
- }
- }
- return false
-}
-
-func isBadImportPath(path string) bool {
- path, err := strconv.Unquote(path)
- if err != nil {
- return true
- }
- if path == "" {
- return true
- }
- if path[len(path)-1] == '/' {
- return true
- }
- return false
-}
-
-var buildConstraintOrEmbedRe = regexp.MustCompile(`^//(go:embed|go:build|\s*\+build).*`)
-
-// extractMagicComments finds magic comments that affect metadata in f.
-func extractMagicComments(f *ast.File) []string {
- var results []string
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- if buildConstraintOrEmbedRe.MatchString(c.Text) {
- results = append(results, c.Text)
- }
- }
- }
- return results
-}
-
-func (s *snapshot) BuiltinFile(ctx context.Context) (*source.ParsedGoFile, error) {
- s.AwaitInitialized(ctx)
-
- s.mu.Lock()
- builtin := s.builtin
- s.mu.Unlock()
-
- if builtin == "" {
- return nil, errors.Errorf("no builtin package for view %s", s.view.name)
- }
-
- fh, err := s.GetFile(ctx, builtin)
- if err != nil {
- return nil, err
- }
- return s.ParseGo(ctx, fh, source.ParseFull)
-}
-
-func (s *snapshot) IsBuiltin(ctx context.Context, uri span.URI) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- // We should always get the builtin URI in a canonical form, so use simple
- // string comparison here. span.CompareURI is too expensive.
- return uri == s.builtin
-}
-
-func (s *snapshot) setBuiltin(path string) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- s.builtin = span.URIFromPath(path)
-}
-
-// BuildGoplsMod generates a go.mod file for all modules in the workspace. It
-// bypasses any existing gopls.mod.
-func (s *snapshot) BuildGoplsMod(ctx context.Context) (*modfile.File, error) {
- allModules, err := findModules(s.view.folder, pathExcludedByFilterFunc(s.view.rootURI.Filename(), s.view.gomodcache, s.View().Options()), 0)
- if err != nil {
- return nil, err
- }
- return buildWorkspaceModFile(ctx, allModules, s)
-}
-
-// TODO(rfindley): move this to workspacemodule.go
-func buildWorkspaceModFile(ctx context.Context, modFiles map[span.URI]struct{}, fs source.FileSource) (*modfile.File, error) {
- file := &modfile.File{}
- file.AddModuleStmt("gopls-workspace")
- // Track the highest Go version, to be set on the workspace module.
- // Fall back to 1.12 -- old versions insist on having some version.
- goVersion := "1.12"
-
- paths := map[string]span.URI{}
- excludes := map[string][]string{}
- var sortedModURIs []span.URI
- for uri := range modFiles {
- sortedModURIs = append(sortedModURIs, uri)
- }
- sort.Slice(sortedModURIs, func(i, j int) bool {
- return sortedModURIs[i] < sortedModURIs[j]
- })
- for _, modURI := range sortedModURIs {
- fh, err := fs.GetFile(ctx, modURI)
- if err != nil {
- return nil, err
- }
- content, err := fh.Read()
- if err != nil {
- return nil, err
- }
- parsed, err := modfile.Parse(fh.URI().Filename(), content, nil)
- if err != nil {
- return nil, err
- }
- if file == nil || parsed.Module == nil {
- return nil, fmt.Errorf("no module declaration for %s", modURI)
- }
- // Prepend "v" to go versions to make them valid semver.
- if parsed.Go != nil && semver.Compare("v"+goVersion, "v"+parsed.Go.Version) < 0 {
- goVersion = parsed.Go.Version
- }
- path := parsed.Module.Mod.Path
- if _, ok := paths[path]; ok {
- return nil, fmt.Errorf("found module %q twice in the workspace", path)
- }
- paths[path] = modURI
- // If the module's path includes a major version, we expect it to have
- // a matching major version.
- _, majorVersion, _ := module.SplitPathVersion(path)
- if majorVersion == "" {
- majorVersion = "/v0"
- }
- majorVersion = strings.TrimLeft(majorVersion, "/.") // handle gopkg.in versions
- file.AddNewRequire(path, source.WorkspaceModuleVersion(majorVersion), false)
- if err := file.AddReplace(path, "", dirURI(modURI).Filename(), ""); err != nil {
- return nil, err
- }
- for _, exclude := range parsed.Exclude {
- excludes[exclude.Mod.Path] = append(excludes[exclude.Mod.Path], exclude.Mod.Version)
- }
- }
- if goVersion != "" {
- file.AddGoStmt(goVersion)
- }
- // Go back through all of the modules to handle any of their replace
- // statements.
- for _, modURI := range sortedModURIs {
- fh, err := fs.GetFile(ctx, modURI)
- if err != nil {
- return nil, err
- }
- content, err := fh.Read()
- if err != nil {
- return nil, err
- }
- parsed, err := modfile.Parse(fh.URI().Filename(), content, nil)
- if err != nil {
- return nil, err
- }
- // If any of the workspace modules have replace directives, they need
- // to be reflected in the workspace module.
- for _, rep := range parsed.Replace {
- // Don't replace any modules that are in our workspace--we should
- // always use the version in the workspace.
- if _, ok := paths[rep.Old.Path]; ok {
- continue
- }
- newPath := rep.New.Path
- newVersion := rep.New.Version
- // If a replace points to a module in the workspace, make sure we
- // direct it to version of the module in the workspace.
- if m, ok := paths[rep.New.Path]; ok {
- newPath = dirURI(m).Filename()
- newVersion = ""
- } else if rep.New.Version == "" && !filepath.IsAbs(rep.New.Path) {
- // Make any relative paths absolute.
- newPath = filepath.Join(dirURI(modURI).Filename(), rep.New.Path)
- }
- if err := file.AddReplace(rep.Old.Path, rep.Old.Version, newPath, newVersion); err != nil {
- return nil, err
- }
- }
- }
- for path, versions := range excludes {
- for _, version := range versions {
- file.AddExclude(path, version)
- }
- }
- file.SortBlocks()
- return file, nil
-}
-
-func buildWorkspaceSumFile(ctx context.Context, modFiles map[span.URI]struct{}, fs source.FileSource) ([]byte, error) {
- allSums := map[module.Version][]string{}
- for modURI := range modFiles {
- // TODO(rfindley): factor out this pattern into a uripath package.
- sumURI := span.URIFromPath(filepath.Join(filepath.Dir(modURI.Filename()), "go.sum"))
- fh, err := fs.GetFile(ctx, sumURI)
- if err != nil {
- continue
- }
- data, err := fh.Read()
- if os.IsNotExist(err) {
- continue
- }
- if err != nil {
- return nil, errors.Errorf("reading go sum: %w", err)
- }
- if err := readGoSum(allSums, sumURI.Filename(), data); err != nil {
- return nil, err
- }
- }
- // This logic to write go.sum is copied (with minor modifications) from
- // https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/modfetch/fetch.go;l=631;drc=762eda346a9f4062feaa8a9fc0d17d72b11586f0
- var mods []module.Version
- for m := range allSums {
- mods = append(mods, m)
- }
- module.Sort(mods)
-
- var buf bytes.Buffer
- for _, m := range mods {
- list := allSums[m]
- sort.Strings(list)
- // Note (rfindley): here we add all sum lines without verification, because
- // the assumption is that if they come from a go.sum file, they are
- // trusted.
- for _, h := range list {
- fmt.Fprintf(&buf, "%s %s %s\n", m.Path, m.Version, h)
- }
- }
- return buf.Bytes(), nil
-}
-
-// readGoSum is copied (with minor modifications) from
-// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/modfetch/fetch.go;l=398;drc=762eda346a9f4062feaa8a9fc0d17d72b11586f0
-func readGoSum(dst map[module.Version][]string, file string, data []byte) error {
- lineno := 0
- for len(data) > 0 {
- var line []byte
- lineno++
- i := bytes.IndexByte(data, '\n')
- if i < 0 {
- line, data = data, nil
- } else {
- line, data = data[:i], data[i+1:]
- }
- f := strings.Fields(string(line))
- if len(f) == 0 {
- // blank line; skip it
- continue
- }
- if len(f) != 3 {
- return fmt.Errorf("malformed go.sum:\n%s:%d: wrong number of fields %v", file, lineno, len(f))
- }
- mod := module.Version{Path: f[0], Version: f[1]}
- dst[mod] = append(dst[mod], f[2])
- }
- return nil
-}
diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go
deleted file mode 100644
index 831017246..000000000
--- a/internal/lsp/cache/symbols.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "go/ast"
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/memoize"
- "golang.org/x/tools/internal/span"
-)
-
-type symbolHandle struct {
- handle *memoize.Handle
-
- fh source.FileHandle
-
- // key is the hashed key for the package.
- key symbolHandleKey
-}
-
-// symbolData contains the data produced by extracting symbols from a file.
-type symbolData struct {
- symbols []source.Symbol
- err error
-}
-
-type symbolHandleKey string
-
-func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) *symbolHandle {
- if h := s.getSymbolHandle(fh.URI()); h != nil {
- return h
- }
- key := symbolHandleKey(fh.FileIdentity().Hash)
- h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
- snapshot := arg.(*snapshot)
- data := &symbolData{}
- data.symbols, data.err = symbolize(ctx, snapshot, fh)
- return data
- }, nil)
-
- sh := &symbolHandle{
- handle: h,
- fh: fh,
- key: key,
- }
- return s.addSymbolHandle(sh)
-}
-
-// symbolize extracts symbols from a file. It does not parse the file through the cache.
-func symbolize(ctx context.Context, snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) {
- var w symbolWalker
- fset := token.NewFileSet() // don't use snapshot.FileSet, as that would needlessly leak memory.
- data := parseGo(ctx, fset, fh, source.ParseFull)
- if data.parsed != nil && data.parsed.File != nil {
- w.curFile = data.parsed
- w.curURI = protocol.URIFromSpanURI(data.parsed.URI)
- w.fileDecls(data.parsed.File.Decls)
- }
- return w.symbols, w.firstError
-}
-
-type symbolWalker struct {
- curFile *source.ParsedGoFile
- curURI protocol.DocumentURI
- symbols []source.Symbol
- firstError error
-}
-
-func (w *symbolWalker) atNode(node ast.Node, name string, kind protocol.SymbolKind, path ...*ast.Ident) {
- var b strings.Builder
- for _, ident := range path {
- if ident != nil {
- b.WriteString(ident.Name)
- b.WriteString(".")
- }
- }
- b.WriteString(name)
-
- rng, err := fileRange(w.curFile, node.Pos(), node.End())
- if err != nil {
- w.error(err)
- return
- }
- sym := source.Symbol{
- Name: b.String(),
- Kind: kind,
- Range: rng,
- }
- w.symbols = append(w.symbols, sym)
-}
-
-func (w *symbolWalker) error(err error) {
- if err != nil && w.firstError == nil {
- w.firstError = err
- }
-}
-
-func fileRange(pgf *source.ParsedGoFile, start, end token.Pos) (protocol.Range, error) {
- s, err := span.FileSpan(pgf.Tok, pgf.Mapper.Converter, start, end)
- if err != nil {
- return protocol.Range{}, nil
- }
- return pgf.Mapper.Range(s)
-}
-
-func (w *symbolWalker) fileDecls(decls []ast.Decl) {
- for _, decl := range decls {
- switch decl := decl.(type) {
- case *ast.FuncDecl:
- kind := protocol.Function
- var recv *ast.Ident
- if decl.Recv.NumFields() > 0 {
- kind = protocol.Method
- recv = unpackRecv(decl.Recv.List[0].Type)
- }
- w.atNode(decl.Name, decl.Name.Name, kind, recv)
- case *ast.GenDecl:
- for _, spec := range decl.Specs {
- switch spec := spec.(type) {
- case *ast.TypeSpec:
- kind := guessKind(spec)
- w.atNode(spec.Name, spec.Name.Name, kind)
- w.walkType(spec.Type, spec.Name)
- case *ast.ValueSpec:
- for _, name := range spec.Names {
- kind := protocol.Variable
- if decl.Tok == token.CONST {
- kind = protocol.Constant
- }
- w.atNode(name, name.Name, kind)
- }
- }
- }
- }
- }
-}
-
-func guessKind(spec *ast.TypeSpec) protocol.SymbolKind {
- switch spec.Type.(type) {
- case *ast.InterfaceType:
- return protocol.Interface
- case *ast.StructType:
- return protocol.Struct
- case *ast.FuncType:
- return protocol.Function
- }
- return protocol.Class
-}
-
-func unpackRecv(rtyp ast.Expr) *ast.Ident {
- // Extract the receiver identifier. Lifted from go/types/resolver.go
-L:
- for {
- switch t := rtyp.(type) {
- case *ast.ParenExpr:
- rtyp = t.X
- case *ast.StarExpr:
- rtyp = t.X
- default:
- break L
- }
- }
- if name, _ := rtyp.(*ast.Ident); name != nil {
- return name
- }
- return nil
-}
-
-// walkType processes symbols related to a type expression. path is path of
-// nested type identifiers to the type expression.
-func (w *symbolWalker) walkType(typ ast.Expr, path ...*ast.Ident) {
- switch st := typ.(type) {
- case *ast.StructType:
- for _, field := range st.Fields.List {
- w.walkField(field, protocol.Field, protocol.Field, path...)
- }
- case *ast.InterfaceType:
- for _, field := range st.Methods.List {
- w.walkField(field, protocol.Interface, protocol.Method, path...)
- }
- }
-}
-
-// walkField processes symbols related to the struct field or interface method.
-//
-// unnamedKind and namedKind are the symbol kinds if the field is resp. unnamed
-// or named. path is the path of nested identifiers containing the field.
-func (w *symbolWalker) walkField(field *ast.Field, unnamedKind, namedKind protocol.SymbolKind, path ...*ast.Ident) {
- if len(field.Names) == 0 {
- switch typ := field.Type.(type) {
- case *ast.SelectorExpr:
- // embedded qualified type
- w.atNode(field, typ.Sel.Name, unnamedKind, path...)
- default:
- w.atNode(field, types.ExprString(field.Type), unnamedKind, path...)
- }
- }
- for _, name := range field.Names {
- w.atNode(name, name.Name, namedKind, path...)
- w.walkType(field.Type, append(path, name)...)
- }
-}
diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go
deleted file mode 100644
index b34807c6e..000000000
--- a/internal/lsp/cache/view.go
+++ /dev/null
@@ -1,1076 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cache implements the caching layer for gopls.
-package cache
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "regexp"
- "sort"
- "strings"
- "sync"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/mod/semver"
- exec "golang.org/x/sys/execabs"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-type View struct {
- session *Session
- id string
-
- optionsMu sync.Mutex
- options *source.Options
-
- // mu protects most mutable state of the view.
- mu sync.Mutex
-
- // baseCtx is the context handed to NewView. This is the parent of all
- // background contexts created for this view.
- baseCtx context.Context
-
- // cancel is called when all action being performed by the current view
- // should be stopped.
- cancel context.CancelFunc
-
- // name is the user visible name of this view.
- name string
-
- // folder is the folder with which this view was constructed.
- folder span.URI
-
- importsState *importsState
-
- // moduleUpgrades tracks known upgrades for module paths.
- moduleUpgrades map[string]string
-
- // keep track of files by uri and by basename, a single file may be mapped
- // to multiple uris, and the same basename may map to multiple files
- filesByURI map[span.URI]*fileBase
- filesByBase map[string][]*fileBase
-
- // initCancelFirstAttempt can be used to terminate the view's first
- // attempt at initialization.
- initCancelFirstAttempt context.CancelFunc
-
- snapshotMu sync.Mutex
- snapshot *snapshot // nil after shutdown has been called
-
- // initialWorkspaceLoad is closed when the first workspace initialization has
- // completed. If we failed to load, we only retry if the go.mod file changes,
- // to avoid too many go/packages calls.
- initialWorkspaceLoad chan struct{}
-
- // initializationSema is used limit concurrent initialization of snapshots in
- // the view. We use a channel instead of a mutex to avoid blocking when a
- // context is canceled.
- initializationSema chan struct{}
-
- // rootURI is the rootURI directory of this view. If we are in GOPATH mode, this
- // is just the folder. If we are in module mode, this is the module rootURI.
- rootURI span.URI
-
- // workspaceInformation tracks various details about this view's
- // environment variables, go version, and use of modules.
- workspaceInformation
-}
-
-type workspaceInformation struct {
- // The Go version in use: X in Go 1.X.
- goversion int
-
- // hasGopackagesDriver is true if the user has a value set for the
- // GOPACKAGESDRIVER environment variable or a gopackagesdriver binary on
- // their machine.
- hasGopackagesDriver bool
-
- // `go env` variables that need to be tracked by gopls.
- environmentVariables
-
- // userGo111Module is the user's value of GO111MODULE.
- userGo111Module go111module
-
- // The value of GO111MODULE we want to run with.
- effectiveGo111Module string
-
- // goEnv is the `go env` output collected when a view is created.
- // It includes the values of the environment variables above.
- goEnv map[string]string
-}
-
-type go111module int
-
-const (
- off = go111module(iota)
- auto
- on
-)
-
-type environmentVariables struct {
- gocache, gopath, goroot, goprivate, gomodcache, go111module string
-}
-
-type workspaceMode int
-
-const (
- moduleMode workspaceMode = 1 << iota
-
- // tempModfile indicates whether or not the -modfile flag should be used.
- tempModfile
-)
-
-// fileBase holds the common functionality for all files.
-// It is intended to be embedded in the file implementations
-type fileBase struct {
- uris []span.URI
- fname string
-
- view *View
-}
-
-func (f *fileBase) URI() span.URI {
- return f.uris[0]
-}
-
-func (f *fileBase) filename() string {
- return f.fname
-}
-
-func (f *fileBase) addURI(uri span.URI) int {
- f.uris = append(f.uris, uri)
- return len(f.uris)
-}
-
-func (v *View) ID() string { return v.id }
-
-// tempModFile creates a temporary go.mod file based on the contents of the
-// given go.mod file. It is the caller's responsibility to clean up the files
-// when they are done using them.
-func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanup func(), err error) {
- filenameHash := hashContents([]byte(modFh.URI().Filename()))
- tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash))
- if err != nil {
- return "", nil, err
- }
- defer tmpMod.Close()
-
- tmpURI = span.URIFromPath(tmpMod.Name())
- tmpSumName := sumFilename(tmpURI)
-
- content, err := modFh.Read()
- if err != nil {
- return "", nil, err
- }
-
- if _, err := tmpMod.Write(content); err != nil {
- return "", nil, err
- }
-
- cleanup = func() {
- _ = os.Remove(tmpSumName)
- _ = os.Remove(tmpURI.Filename())
- }
-
- // Be careful to clean up if we return an error from this function.
- defer func() {
- if err != nil {
- cleanup()
- cleanup = nil
- }
- }()
-
- // Create an analogous go.sum, if one exists.
- if gosum != nil {
- if err := ioutil.WriteFile(tmpSumName, gosum, 0655); err != nil {
- return "", cleanup, err
- }
- }
-
- return tmpURI, cleanup, nil
-}
-
-// Name returns the user visible name of this view.
-func (v *View) Name() string {
- return v.name
-}
-
-// Folder returns the folder at the base of this view.
-func (v *View) Folder() span.URI {
- return v.folder
-}
-
-func (v *View) Options() *source.Options {
- v.optionsMu.Lock()
- defer v.optionsMu.Unlock()
- return v.options
-}
-
-func (v *View) FileKind(fh source.FileHandle) source.FileKind {
- if o, ok := fh.(source.Overlay); ok {
- if o.Kind() != source.UnknownKind {
- return o.Kind()
- }
- }
- fext := filepath.Ext(fh.URI().Filename())
- switch fext {
- case ".go":
- return source.Go
- case ".mod":
- return source.Mod
- case ".sum":
- return source.Sum
- case ".work":
- return source.Work
- }
- exts := v.Options().TemplateExtensions
- for _, ext := range exts {
- if fext == ext || fext == "."+ext {
- return source.Tmpl
- }
- }
- // and now what? This should never happen, but it does for cgo before go1.15
- return source.Go
-}
-
-func minorOptionsChange(a, b *source.Options) bool {
- // Check if any of the settings that modify our understanding of files have been changed
- if !reflect.DeepEqual(a.Env, b.Env) {
- return false
- }
- if !reflect.DeepEqual(a.DirectoryFilters, b.DirectoryFilters) {
- return false
- }
- if a.MemoryMode != b.MemoryMode {
- return false
- }
- aBuildFlags := make([]string, len(a.BuildFlags))
- bBuildFlags := make([]string, len(b.BuildFlags))
- copy(aBuildFlags, a.BuildFlags)
- copy(bBuildFlags, b.BuildFlags)
- sort.Strings(aBuildFlags)
- sort.Strings(bBuildFlags)
- // the rest of the options are benign
- return reflect.DeepEqual(aBuildFlags, bBuildFlags)
-}
-
-func (v *View) SetOptions(ctx context.Context, options *source.Options) (source.View, error) {
- // no need to rebuild the view if the options were not materially changed
- v.optionsMu.Lock()
- if minorOptionsChange(v.options, options) {
- v.options = options
- v.optionsMu.Unlock()
- return v, nil
- }
- v.optionsMu.Unlock()
- newView, err := v.session.updateView(ctx, v, options)
- return newView, err
-}
-
-func (v *View) Rebuild(ctx context.Context) (source.Snapshot, func(), error) {
- newView, err := v.session.updateView(ctx, v, v.Options())
- if err != nil {
- return nil, func() {}, err
- }
- snapshot, release := newView.Snapshot(ctx)
- return snapshot, release, nil
-}
-
-func (s *snapshot) WriteEnv(ctx context.Context, w io.Writer) error {
- s.view.optionsMu.Lock()
- env := s.view.options.EnvSlice()
- buildFlags := append([]string{}, s.view.options.BuildFlags...)
- s.view.optionsMu.Unlock()
-
- fullEnv := make(map[string]string)
- for k, v := range s.view.goEnv {
- fullEnv[k] = v
- }
- for _, v := range env {
- s := strings.SplitN(v, "=", 2)
- if len(s) != 2 {
- continue
- }
- if _, ok := fullEnv[s[0]]; ok {
- fullEnv[s[0]] = s[1]
- }
- }
- goVersion, err := s.view.session.gocmdRunner.Run(ctx, gocommand.Invocation{
- Verb: "version",
- Env: env,
- WorkingDir: s.view.rootURI.Filename(),
- })
- if err != nil {
- return err
- }
- fmt.Fprintf(w, `go env for %v
-(root %s)
-(go version %s)
-(valid build configuration = %v)
-(build flags: %v)
-`,
- s.view.folder.Filename(),
- s.view.rootURI.Filename(),
- strings.TrimRight(goVersion.String(), "\n"),
- s.ValidBuildConfiguration(),
- buildFlags)
- for k, v := range fullEnv {
- fmt.Fprintf(w, "%s=%s\n", k, v)
- }
- return nil
-}
-
-func (s *snapshot) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error {
- return s.view.importsState.runProcessEnvFunc(ctx, s, fn)
-}
-
-// separated out from its sole use in locateTemplateFiles for testability
-func fileHasExtension(path string, suffixes []string) bool {
- ext := filepath.Ext(path)
- if ext != "" && ext[0] == '.' {
- ext = ext[1:]
- }
- for _, s := range suffixes {
- if s != "" && ext == s {
- return true
- }
- }
- return false
-}
-
-func (s *snapshot) locateTemplateFiles(ctx context.Context) {
- if len(s.view.Options().TemplateExtensions) == 0 {
- return
- }
- suffixes := s.view.Options().TemplateExtensions
-
- // The workspace root may have been expanded to a module, but we should apply
- // directory filters based on the configured workspace folder.
- //
- // TODO(rfindley): we should be more principled about paths outside of the
- // workspace folder: do we even consider them? Do we support absolute
- // exclusions? Relative exclusions starting with ..?
- dir := s.workspace.root.Filename()
- relativeTo := s.view.folder.Filename()
-
- searched := 0
- // Change to WalkDir when we move up to 1.16
- err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- relpath := strings.TrimPrefix(path, relativeTo)
- excluded := pathExcludedByFilter(relpath, dir, s.view.gomodcache, s.view.options)
- if fileHasExtension(path, suffixes) && !excluded && !fi.IsDir() {
- k := span.URIFromPath(path)
- _, err := s.GetVersionedFile(ctx, k)
- if err != nil {
- return nil
- }
- }
- searched++
- if fileLimit > 0 && searched > fileLimit {
- return errExhausted
- }
- return nil
- })
- if err != nil {
- event.Error(ctx, "searching for template files failed", err)
- }
-}
-
-func (v *View) contains(uri span.URI) bool {
- inRoot := source.InDir(v.rootURI.Filename(), uri.Filename())
- inFolder := source.InDir(v.folder.Filename(), uri.Filename())
- if !inRoot && !inFolder {
- return false
- }
- // Filters are applied relative to the workspace folder.
- if inFolder {
- return !pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.rootURI.Filename(), v.gomodcache, v.Options())
- }
- return true
-}
-
-func (v *View) mapFile(uri span.URI, f *fileBase) {
- v.filesByURI[uri] = f
- if f.addURI(uri) == 1 {
- basename := basename(f.filename())
- v.filesByBase[basename] = append(v.filesByBase[basename], f)
- }
-}
-
-func basename(filename string) string {
- return strings.ToLower(filepath.Base(filename))
-}
-
-func (v *View) relevantChange(c source.FileModification) bool {
- // If the file is known to the view, the change is relevant.
- if v.knownFile(c.URI) {
- return true
- }
- // The go.work/gopls.mod may not be "known" because we first access it
- // through the session. As a result, treat changes to the view's go.work or
- // gopls.mod file as always relevant, even if they are only on-disk
- // changes.
- // TODO(rstambler): Make sure the go.work/gopls.mod files are always known
- // to the view.
- for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
- if c.URI == uriForSource(v.rootURI, src) {
- return true
- }
- }
- // If the file is not known to the view, and the change is only on-disk,
- // we should not invalidate the snapshot. This is necessary because Emacs
- // sends didChangeWatchedFiles events for temp files.
- if c.OnDisk && (c.Action == source.Change || c.Action == source.Delete) {
- return false
- }
- return v.contains(c.URI)
-}
-
-func (v *View) knownFile(uri span.URI) bool {
- v.mu.Lock()
- defer v.mu.Unlock()
-
- f, err := v.findFile(uri)
- return f != nil && err == nil
-}
-
-// getFile returns a file for the given URI.
-func (v *View) getFile(uri span.URI) *fileBase {
- v.mu.Lock()
- defer v.mu.Unlock()
-
- f, _ := v.findFile(uri)
- if f != nil {
- return f
- }
- f = &fileBase{
- view: v,
- fname: uri.Filename(),
- }
- v.mapFile(uri, f)
- return f
-}
-
-// findFile checks the cache for any file matching the given uri.
-//
-// An error is only returned for an irreparable failure, for example, if the
-// filename in question does not exist.
-func (v *View) findFile(uri span.URI) (*fileBase, error) {
- if f := v.filesByURI[uri]; f != nil {
- // a perfect match
- return f, nil
- }
- // no exact match stored, time to do some real work
- // check for any files with the same basename
- fname := uri.Filename()
- basename := basename(fname)
- if candidates := v.filesByBase[basename]; candidates != nil {
- pathStat, err := os.Stat(fname)
- if os.IsNotExist(err) {
- return nil, err
- }
- if err != nil {
- return nil, nil // the file may exist, return without an error
- }
- for _, c := range candidates {
- if cStat, err := os.Stat(c.filename()); err == nil {
- if os.SameFile(pathStat, cStat) {
- // same file, map it
- v.mapFile(uri, c)
- return c, nil
- }
- }
- }
- }
- // no file with a matching name was found, it wasn't in our cache
- return nil, nil
-}
-
-func (v *View) Shutdown(ctx context.Context) {
- v.session.removeView(ctx, v)
-}
-
-// TODO(rFindley): probably some of this should also be one in View.Shutdown
-// above?
-func (v *View) shutdown(ctx context.Context) {
- // Cancel the initial workspace load if it is still running.
- v.initCancelFirstAttempt()
-
- v.mu.Lock()
- if v.cancel != nil {
- v.cancel()
- v.cancel = nil
- }
- v.mu.Unlock()
- v.snapshotMu.Lock()
- if v.snapshot != nil {
- go v.snapshot.generation.Destroy("View.shutdown")
- v.snapshot = nil
- }
- v.snapshotMu.Unlock()
- v.importsState.destroy()
-}
-
-func (v *View) Session() *Session {
- return v.session
-}
-
-func (s *snapshot) IgnoredFile(uri span.URI) bool {
- filename := uri.Filename()
- var prefixes []string
- if len(s.workspace.getActiveModFiles()) == 0 {
- for _, entry := range filepath.SplitList(s.view.gopath) {
- prefixes = append(prefixes, filepath.Join(entry, "src"))
- }
- } else {
- prefixes = append(prefixes, s.view.gomodcache)
- for m := range s.workspace.getActiveModFiles() {
- prefixes = append(prefixes, dirURI(m).Filename())
- }
- }
- for _, prefix := range prefixes {
- if strings.HasPrefix(filename, prefix) {
- return checkIgnored(filename[len(prefix):])
- }
- }
- return false
-}
-
-// checkIgnored implements go list's exclusion rules. go help list:
-// Directory and file names that begin with "." or "_" are ignored
-// by the go tool, as are directories named "testdata".
-func checkIgnored(suffix string) bool {
- for _, component := range strings.Split(suffix, string(filepath.Separator)) {
- if len(component) == 0 {
- continue
- }
- if component[0] == '.' || component[0] == '_' || component == "testdata" {
- return true
- }
- }
- return false
-}
-
-func (v *View) Snapshot(ctx context.Context) (source.Snapshot, func()) {
- return v.getSnapshot()
-}
-
-func (v *View) getSnapshot() (*snapshot, func()) {
- v.snapshotMu.Lock()
- defer v.snapshotMu.Unlock()
- if v.snapshot == nil {
- panic("getSnapshot called after shutdown")
- }
- return v.snapshot, v.snapshot.generation.Acquire()
-}
-
-func (s *snapshot) initialize(ctx context.Context, firstAttempt bool) {
- select {
- case <-ctx.Done():
- return
- case s.view.initializationSema <- struct{}{}:
- }
-
- defer func() {
- <-s.view.initializationSema
- }()
-
- if s.initializeOnce == nil {
- return
- }
- s.initializeOnce.Do(func() {
- s.loadWorkspace(ctx, firstAttempt)
- s.collectAllKnownSubdirs(ctx)
- })
-}
-
-func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) {
- defer func() {
- s.initializeOnce = nil
- if firstAttempt {
- close(s.view.initialWorkspaceLoad)
- }
- }()
-
- // If we have multiple modules, we need to load them by paths.
- var scopes []interface{}
- var modDiagnostics []*source.Diagnostic
- addError := func(uri span.URI, err error) {
- modDiagnostics = append(modDiagnostics, &source.Diagnostic{
- URI: uri,
- Severity: protocol.SeverityError,
- Source: source.ListError,
- Message: err.Error(),
- })
- }
- s.locateTemplateFiles(ctx)
- if len(s.workspace.getActiveModFiles()) > 0 {
- for modURI := range s.workspace.getActiveModFiles() {
- fh, err := s.GetFile(ctx, modURI)
- if err != nil {
- addError(modURI, err)
- continue
- }
- parsed, err := s.ParseMod(ctx, fh)
- if err != nil {
- addError(modURI, err)
- continue
- }
- if parsed.File == nil || parsed.File.Module == nil {
- addError(modURI, fmt.Errorf("no module path for %s", modURI))
- continue
- }
- path := parsed.File.Module.Mod.Path
- scopes = append(scopes, moduleLoadScope(path))
- }
- } else {
- scopes = append(scopes, viewLoadScope("LOAD_VIEW"))
- }
-
- // If we're loading anything, ensure we also load builtin.
- // TODO(rstambler): explain the rationale for this.
- if len(scopes) > 0 {
- scopes = append(scopes, PackagePath("builtin"))
- }
- err := s.load(ctx, firstAttempt, scopes...)
-
- // If the context is canceled on the first attempt, loading has failed
- // because the go command has timed out--that should be a critical error.
- if err != nil && !firstAttempt && ctx.Err() != nil {
- return
- }
-
- var criticalErr *source.CriticalError
- switch {
- case err != nil && ctx.Err() != nil:
- event.Error(ctx, fmt.Sprintf("initial workspace load: %v", err), err)
- criticalErr = &source.CriticalError{
- MainError: err,
- }
- case err != nil:
- event.Error(ctx, "initial workspace load failed", err)
- extractedDiags, _ := s.extractGoCommandErrors(ctx, err.Error())
- criticalErr = &source.CriticalError{
- MainError: err,
- DiagList: append(modDiagnostics, extractedDiags...),
- }
- case len(modDiagnostics) == 1:
- criticalErr = &source.CriticalError{
- MainError: fmt.Errorf(modDiagnostics[0].Message),
- DiagList: modDiagnostics,
- }
- case len(modDiagnostics) > 1:
- criticalErr = &source.CriticalError{
- MainError: fmt.Errorf("error loading module names"),
- DiagList: modDiagnostics,
- }
- }
-
- // Lock the snapshot when setting the initialized error.
- s.mu.Lock()
- defer s.mu.Unlock()
- s.initializedErr = criticalErr
-}
-
-// invalidateContent invalidates the content of a Go file,
-// including any position and type information that depends on it.
-//
-// invalidateContent returns a non-nil snapshot for the new content, along with
-// a callback which the caller must invoke to release that snapshot.
-func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) {
- // Detach the context so that content invalidation cannot be canceled.
- ctx = xcontext.Detach(ctx)
-
- // This should be the only time we hold the view's snapshot lock for any period of time.
- v.snapshotMu.Lock()
- defer v.snapshotMu.Unlock()
-
- if v.snapshot == nil {
- panic("invalidateContent called after shutdown")
- }
-
- // Cancel all still-running previous requests, since they would be
- // operating on stale data.
- v.snapshot.cancel()
-
- // Do not clone a snapshot until its view has finished initializing.
- v.snapshot.AwaitInitialized(ctx)
-
- oldSnapshot := v.snapshot
-
- v.snapshot = oldSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata)
- go oldSnapshot.generation.Destroy("View.invalidateContent")
-
- return v.snapshot, v.snapshot.generation.Acquire()
-}
-
-func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, options *source.Options) (*workspaceInformation, error) {
- if err := checkPathCase(folder.Filename()); err != nil {
- return nil, errors.Errorf("invalid workspace folder path: %w; check that the casing of the configured workspace folder path agrees with the casing reported by the operating system", err)
- }
- var err error
- inv := gocommand.Invocation{
- WorkingDir: folder.Filename(),
- Env: options.EnvSlice(),
- }
- goversion, err := gocommand.GoVersion(ctx, inv, s.gocmdRunner)
- if err != nil {
- return nil, err
- }
-
- go111module := os.Getenv("GO111MODULE")
- if v, ok := options.Env["GO111MODULE"]; ok {
- go111module = v
- }
- // Make sure to get the `go env` before continuing with initialization.
- envVars, env, err := s.getGoEnv(ctx, folder.Filename(), goversion, go111module, options.EnvSlice())
- if err != nil {
- return nil, err
- }
- // If using 1.16, change the default back to auto. The primary effect of
- // GO111MODULE=on is to break GOPATH, which we aren't too interested in.
- if goversion >= 16 && go111module == "" {
- go111module = "auto"
- }
- // The value of GOPACKAGESDRIVER is not returned through the go command.
- gopackagesdriver := os.Getenv("GOPACKAGESDRIVER")
- for _, s := range env {
- split := strings.SplitN(s, "=", 2)
- if split[0] == "GOPACKAGESDRIVER" {
- gopackagesdriver = split[1]
- }
- }
-
- // A user may also have a gopackagesdriver binary on their machine, which
- // works the same way as setting GOPACKAGESDRIVER.
- tool, _ := exec.LookPath("gopackagesdriver")
- hasGopackagesDriver := gopackagesdriver != "off" && (gopackagesdriver != "" || tool != "")
-
- return &workspaceInformation{
- hasGopackagesDriver: hasGopackagesDriver,
- effectiveGo111Module: go111module,
- userGo111Module: go111moduleForVersion(go111module, goversion),
- goversion: goversion,
- environmentVariables: envVars,
- goEnv: env,
- }, nil
-}
-
-func go111moduleForVersion(go111module string, goversion int) go111module {
- // Off by default until Go 1.12.
- if go111module == "off" || (goversion < 12 && go111module == "") {
- return off
- }
- // On by default as of Go 1.16.
- if go111module == "on" || (goversion >= 16 && go111module == "") {
- return on
- }
- return auto
-}
-
-// findWorkspaceRoot searches for the best workspace root according to the
-// following heuristics:
-// - First, look for a parent directory containing a gopls.mod file
-// (experimental only).
-// - Then, a parent directory containing a go.mod file.
-// - Then, a child directory containing a go.mod file, if there is exactly
-// one (non-experimental only).
-// Otherwise, it returns folder.
-// TODO (rFindley): move this to workspace.go
-// TODO (rFindley): simplify this once workspace modules are enabled by default.
-func findWorkspaceRoot(ctx context.Context, folder span.URI, fs source.FileSource, excludePath func(string) bool, experimental bool) (span.URI, error) {
- patterns := []string{"go.work", "go.mod"}
- if experimental {
- patterns = []string{"go.work", "gopls.mod", "go.mod"}
- }
- for _, basename := range patterns {
- dir, err := findRootPattern(ctx, folder, basename, fs)
- if err != nil {
- return "", errors.Errorf("finding %s: %w", basename, err)
- }
- if dir != "" {
- return dir, nil
- }
- }
-
- // The experimental workspace can handle nested modules at this point...
- if experimental {
- return folder, nil
- }
-
- // ...else we should check if there's exactly one nested module.
- all, err := findModules(folder, excludePath, 2)
- if err == errExhausted {
- // Fall-back behavior: if we don't find any modules after searching 10000
- // files, assume there are none.
- event.Log(ctx, fmt.Sprintf("stopped searching for modules after %d files", fileLimit))
- return folder, nil
- }
- if err != nil {
- return "", err
- }
- if len(all) == 1 {
- // range to access first element.
- for uri := range all {
- return dirURI(uri), nil
- }
- }
- return folder, nil
-}
-
-func findRootPattern(ctx context.Context, folder span.URI, basename string, fs source.FileSource) (span.URI, error) {
- dir := folder.Filename()
- for dir != "" {
- target := filepath.Join(dir, basename)
- exists, err := fileExists(ctx, span.URIFromPath(target), fs)
- if err != nil {
- return "", err
- }
- if exists {
- return span.URIFromPath(dir), nil
- }
- // Trailing separators must be trimmed, otherwise filepath.Split is a noop.
- next, _ := filepath.Split(strings.TrimRight(dir, string(filepath.Separator)))
- if next == dir {
- break
- }
- dir = next
- }
- return "", nil
-}
-
-// OS-specific path case check, for case-insensitive filesystems.
-var checkPathCase = defaultCheckPathCase
-
-func defaultCheckPathCase(path string) error {
- return nil
-}
-
-func validBuildConfiguration(folder span.URI, ws *workspaceInformation, modFiles map[span.URI]struct{}) bool {
- // Since we only really understand the `go` command, if the user has a
- // different GOPACKAGESDRIVER, assume that their configuration is valid.
- if ws.hasGopackagesDriver {
- return true
- }
- // Check if the user is working within a module or if we have found
- // multiple modules in the workspace.
- if len(modFiles) > 0 {
- return true
- }
- // The user may have a multiple directories in their GOPATH.
- // Check if the workspace is within any of them.
- for _, gp := range filepath.SplitList(ws.gopath) {
- if source.InDir(filepath.Join(gp, "src"), folder.Filename()) {
- return true
- }
- }
- return false
-}
-
-// getGoEnv gets the view's various GO* values.
-func (s *Session) getGoEnv(ctx context.Context, folder string, goversion int, go111module string, configEnv []string) (environmentVariables, map[string]string, error) {
- envVars := environmentVariables{}
- vars := map[string]*string{
- "GOCACHE": &envVars.gocache,
- "GOPATH": &envVars.gopath,
- "GOROOT": &envVars.goroot,
- "GOPRIVATE": &envVars.goprivate,
- "GOMODCACHE": &envVars.gomodcache,
- "GO111MODULE": &envVars.go111module,
- }
-
- // We can save ~200 ms by requesting only the variables we care about.
- args := append([]string{"-json"}, imports.RequiredGoEnvVars...)
- for k := range vars {
- args = append(args, k)
- }
- args = append(args, "GOWORK")
-
- inv := gocommand.Invocation{
- Verb: "env",
- Args: args,
- Env: configEnv,
- WorkingDir: folder,
- }
- // Don't go through runGoCommand, as we don't need a temporary -modfile to
- // run `go env`.
- stdout, err := s.gocmdRunner.Run(ctx, inv)
- if err != nil {
- return environmentVariables{}, nil, err
- }
- env := make(map[string]string)
- if err := json.Unmarshal(stdout.Bytes(), &env); err != nil {
- return environmentVariables{}, nil, err
- }
-
- for key, ptr := range vars {
- *ptr = env[key]
- }
-
- // Old versions of Go don't have GOMODCACHE, so emulate it.
- if envVars.gomodcache == "" && envVars.gopath != "" {
- envVars.gomodcache = filepath.Join(filepath.SplitList(envVars.gopath)[0], "pkg/mod")
- }
- // GO111MODULE does not appear in `go env` output until Go 1.13.
- if goversion < 13 {
- envVars.go111module = go111module
- }
- return envVars, env, err
-}
-
-func (v *View) IsGoPrivatePath(target string) bool {
- return globsMatchPath(v.goprivate, target)
-}
-
-func (v *View) ModuleUpgrades() map[string]string {
- v.mu.Lock()
- defer v.mu.Unlock()
-
- upgrades := map[string]string{}
- for mod, ver := range v.moduleUpgrades {
- upgrades[mod] = ver
- }
- return upgrades
-}
-
-func (v *View) RegisterModuleUpgrades(upgrades map[string]string) {
- v.mu.Lock()
- defer v.mu.Unlock()
-
- for mod, ver := range upgrades {
- v.moduleUpgrades[mod] = ver
- }
-}
-
-// Copied from
-// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/str/path.go;l=58;drc=2910c5b4a01a573ebc97744890a07c1a3122c67a
-func globsMatchPath(globs, target string) bool {
- for globs != "" {
- // Extract next non-empty glob in comma-separated list.
- var glob string
- if i := strings.Index(globs, ","); i >= 0 {
- glob, globs = globs[:i], globs[i+1:]
- } else {
- glob, globs = globs, ""
- }
- if glob == "" {
- continue
- }
-
- // A glob with N+1 path elements (N slashes) needs to be matched
- // against the first N+1 path elements of target,
- // which end just before the N+1'th slash.
- n := strings.Count(glob, "/")
- prefix := target
- // Walk target, counting slashes, truncating at the N+1'th slash.
- for i := 0; i < len(target); i++ {
- if target[i] == '/' {
- if n == 0 {
- prefix = target[:i]
- break
- }
- n--
- }
- }
- if n > 0 {
- // Not enough prefix elements.
- continue
- }
- matched, _ := path.Match(glob, prefix)
- if matched {
- return true
- }
- }
- return false
-}
-
-var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
-
-// TODO(rstambler): Consolidate modURI and modContent back into a FileHandle
-// after we have a version of the workspace go.mod file on disk. Getting a
-// FileHandle from the cache for temporary files is problematic, since we
-// cannot delete it.
-func (s *snapshot) vendorEnabled(ctx context.Context, modURI span.URI, modContent []byte) (bool, error) {
- if s.workspaceMode()&moduleMode == 0 {
- return false, nil
- }
- matches := modFlagRegexp.FindStringSubmatch(s.view.goEnv["GOFLAGS"])
- var modFlag string
- if len(matches) != 0 {
- modFlag = matches[1]
- }
- if modFlag != "" {
- // Don't override an explicit '-mod=vendor' argument.
- // We do want to override '-mod=readonly': it would break various module code lenses,
- // and on 1.16 we know -modfile is available, so we won't mess with go.mod anyway.
- return modFlag == "vendor", nil
- }
-
- modFile, err := modfile.Parse(modURI.Filename(), modContent, nil)
- if err != nil {
- return false, err
- }
- if fi, err := os.Stat(filepath.Join(s.view.rootURI.Filename(), "vendor")); err != nil || !fi.IsDir() {
- return false, nil
- }
- vendorEnabled := modFile.Go != nil && modFile.Go.Version != "" && semver.Compare("v"+modFile.Go.Version, "v1.14") >= 0
- return vendorEnabled, nil
-}
-
-func (v *View) allFilesExcluded(pkg *packages.Package) bool {
- opts := v.Options()
- folder := filepath.ToSlash(v.folder.Filename())
- for _, f := range pkg.GoFiles {
- f = filepath.ToSlash(f)
- if !strings.HasPrefix(f, folder) {
- return false
- }
- if !pathExcludedByFilter(strings.TrimPrefix(f, folder), v.rootURI.Filename(), v.gomodcache, opts) {
- return false
- }
- }
- return true
-}
-
-func pathExcludedByFilterFunc(root, gomodcache string, opts *source.Options) func(string) bool {
- return func(path string) bool {
- return pathExcludedByFilter(path, root, gomodcache, opts)
- }
-}
-
-// pathExcludedByFilter reports whether the path (relative to the workspace
-// folder) should be excluded by the configured directory filters.
-//
-// TODO(rfindley): passing root and gomodcache here makes it confusing whether
-// path should be absolute or relative, and has already caused at least one
-// bug.
-func pathExcludedByFilter(path, root, gomodcache string, opts *source.Options) bool {
- path = strings.TrimPrefix(filepath.ToSlash(path), "/")
- gomodcache = strings.TrimPrefix(filepath.ToSlash(strings.TrimPrefix(gomodcache, root)), "/")
- filters := opts.DirectoryFilters
- if gomodcache != "" {
- filters = append(filters, "-"+gomodcache)
- }
- return source.FiltersDisallow(path, filters)
-}
diff --git a/internal/lsp/cache/view_test.go b/internal/lsp/cache/view_test.go
deleted file mode 100644
index d76dcda8e..000000000
--- a/internal/lsp/cache/view_test.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package cache
-
-import (
- "context"
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-func TestCaseInsensitiveFilesystem(t *testing.T) {
- base, err := ioutil.TempDir("", t.Name())
- if err != nil {
- t.Fatal(err)
- }
-
- inner := filepath.Join(base, "a/B/c/DEFgh")
- if err := os.MkdirAll(inner, 0777); err != nil {
- t.Fatal(err)
- }
- file := filepath.Join(inner, "f.go")
- if err := ioutil.WriteFile(file, []byte("hi"), 0777); err != nil {
- t.Fatal(err)
- }
- if _, err := os.Stat(filepath.Join(inner, "F.go")); err != nil {
- t.Skip("filesystem is case-sensitive")
- }
-
- tests := []struct {
- path string
- err bool
- }{
- {file, false},
- {filepath.Join(inner, "F.go"), true},
- {filepath.Join(base, "a/b/c/defgh/f.go"), true},
- }
- for _, tt := range tests {
- err := checkPathCase(tt.path)
- if err != nil != tt.err {
- t.Errorf("checkPathCase(%q) = %v, wanted error: %v", tt.path, err, tt.err)
- }
- }
-}
-
-func TestFindWorkspaceRoot(t *testing.T) {
- workspace := `
--- a/go.mod --
-module a
--- a/x/x.go
-package x
--- a/x/y/y.go
-package x
--- b/go.mod --
-module b
--- b/c/go.mod --
-module bc
--- d/gopls.mod --
-module d-goplsworkspace
--- d/e/go.mod --
-module de
--- f/g/go.mod --
-module fg
-`
- dir, err := fake.Tempdir(fake.UnpackTxt(workspace))
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
-
- tests := []struct {
- folder, want string
- experimental bool
- }{
- {"", "", false}, // no module at root, and more than one nested module
- {"a", "a", false},
- {"a/x", "a", false},
- {"a/x/y", "a", false},
- {"b/c", "b/c", false},
- {"d", "d/e", false},
- {"d", "d", true},
- {"d/e", "d/e", false},
- {"d/e", "d", true},
- {"f", "f/g", false},
- {"f", "f", true},
- }
-
- for _, test := range tests {
- ctx := context.Background()
- rel := fake.RelativeTo(dir)
- folderURI := span.URIFromPath(rel.AbsPath(test.folder))
- excludeNothing := func(string) bool { return false }
- got, err := findWorkspaceRoot(ctx, folderURI, &osFileSource{}, excludeNothing, test.experimental)
- if err != nil {
- t.Fatal(err)
- }
- if gotf, wantf := filepath.Clean(got.Filename()), rel.AbsPath(test.want); gotf != wantf {
- t.Errorf("findWorkspaceRoot(%q, %t) = %q, want %q", test.folder, test.experimental, gotf, wantf)
- }
- }
-}
-
-func TestInVendor(t *testing.T) {
- for _, tt := range []struct {
- path string
- inVendor bool
- }{
- {
- path: "foo/vendor/x.go",
- inVendor: false,
- },
- {
- path: "foo/vendor/x/x.go",
- inVendor: true,
- },
- {
- path: "foo/x.go",
- inVendor: false,
- },
- } {
- if got := inVendor(span.URIFromPath(tt.path)); got != tt.inVendor {
- t.Errorf("expected %s inVendor %v, got %v", tt.path, tt.inVendor, got)
- }
- }
-}
-
-func TestFilters(t *testing.T) {
- tests := []struct {
- filters []string
- included []string
- excluded []string
- }{
- {
- included: []string{"x"},
- },
- {
- filters: []string{"-"},
- excluded: []string{"x", "x/a"},
- },
- {
- filters: []string{"-x", "+y"},
- included: []string{"y", "y/a", "z"},
- excluded: []string{"x", "x/a"},
- },
- {
- filters: []string{"-x", "+x/y", "-x/y/z"},
- included: []string{"x/y", "x/y/a", "a"},
- excluded: []string{"x", "x/a", "x/y/z/a"},
- },
- {
- filters: []string{"+foobar", "-foo"},
- included: []string{"foobar", "foobar/a"},
- excluded: []string{"foo", "foo/a"},
- },
- }
-
- for _, tt := range tests {
- opts := &source.Options{}
- opts.DirectoryFilters = tt.filters
- for _, inc := range tt.included {
- if pathExcludedByFilter(inc, "root", "root/gopath/pkg/mod", opts) {
- t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc)
- }
- }
- for _, exc := range tt.excluded {
- if !pathExcludedByFilter(exc, "root", "root/gopath/pkg/mod", opts) {
- t.Errorf("filters %q included %v, wanted excluded", tt.filters, exc)
- }
- }
- }
-}
-
-func TestSuffixes(t *testing.T) {
- type file struct {
- path string
- want bool
- }
- type cases struct {
- option []string
- files []file
- }
- tests := []cases{
- {[]string{"tmpl", "gotmpl"}, []file{ // default
- {"foo", false},
- {"foo.tmpl", true},
- {"foo.gotmpl", true},
- {"tmpl", false},
- {"tmpl.go", false}},
- },
- {[]string{"tmpl", "gotmpl", "html", "gohtml"}, []file{
- {"foo.gotmpl", true},
- {"foo.html", true},
- {"foo.gohtml", true},
- {"html", false}},
- },
- {[]string{"tmpl", "gotmpl", ""}, []file{ // possible user mistake
- {"foo.gotmpl", true},
- {"foo.go", false},
- {"foo", false}},
- },
- }
- for _, a := range tests {
- suffixes := a.option
- for _, b := range a.files {
- got := fileHasExtension(b.path, suffixes)
- if got != b.want {
- t.Errorf("got %v, want %v, option %q, file %q (%+v)",
- got, b.want, a.option, b.path, b)
- }
- }
- }
-}
diff --git a/internal/lsp/cache/workspace.go b/internal/lsp/cache/workspace.go
deleted file mode 100644
index 5d62d6691..000000000
--- a/internal/lsp/cache/workspace.go
+++ /dev/null
@@ -1,599 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "fmt"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "sync"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-// workspaceSource reports how the set of active modules has been derived.
-type workspaceSource int
-
-const (
- legacyWorkspace = iota // non-module or single module mode
- goplsModWorkspace // modules provided by a gopls.mod file
- goWorkWorkspace // modules provided by a go.work file
- fileSystemWorkspace // modules scanned from the filesystem
-)
-
-func (s workspaceSource) String() string {
- switch s {
- case legacyWorkspace:
- return "legacy"
- case goplsModWorkspace:
- return "gopls.mod"
- case goWorkWorkspace:
- return "go.work"
- case fileSystemWorkspace:
- return "file system"
- default:
- return "!(unknown module source)"
- }
-}
-
-// workspace tracks go.mod files in the workspace, along with the
-// gopls.mod file, to provide support for multi-module workspaces.
-//
-// Specifically, it provides:
-// - the set of modules contained within in the workspace root considered to
-// be 'active'
-// - the workspace modfile, to be used for the go command `-modfile` flag
-// - the set of workspace directories
-//
-// This type is immutable (or rather, idempotent), so that it may be shared
-// across multiple snapshots.
-type workspace struct {
- root span.URI
- excludePath func(string) bool
- moduleSource workspaceSource
-
- // activeModFiles holds the active go.mod files.
- activeModFiles map[span.URI]struct{}
-
- // knownModFiles holds the set of all go.mod files in the workspace.
- // In all modes except for legacy, this is equivalent to modFiles.
- knownModFiles map[span.URI]struct{}
-
- // workFile, if nonEmpty, is the go.work file for the workspace.
- workFile span.URI
-
- // The workspace module is lazily re-built once after being invalidated.
- // buildMu+built guards this reconstruction.
- //
- // file and wsDirs may be non-nil even if built == false, if they were copied
- // from the previous workspace module version. In this case, they will be
- // preserved if building fails.
- buildMu sync.Mutex
- built bool
- buildErr error
- mod *modfile.File
- sum []byte
- wsDirs map[span.URI]struct{}
-}
-
-// newWorkspace creates a new workspace at the given root directory,
-// determining its module source based on the presence of a gopls.mod or
-// go.work file, and the go111moduleOff and useWsModule settings.
-//
-// If useWsModule is set, the workspace may use a synthetic mod file replacing
-// all modules in the root.
-//
-// If there is no active workspace file (a gopls.mod or go.work), newWorkspace
-// scans the filesystem to find modules.
-func newWorkspace(ctx context.Context, root span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff bool, useWsModule bool) (*workspace, error) {
- ws := &workspace{
- root: root,
- excludePath: excludePath,
- }
-
- // The user may have a gopls.mod or go.work file that defines their
- // workspace.
- if err := loadExplicitWorkspaceFile(ctx, ws, fs); err == nil {
- return ws, nil
- }
-
- // Otherwise, in all other modes, search for all of the go.mod files in the
- // workspace.
- knownModFiles, err := findModules(root, excludePath, 0)
- if err != nil {
- return nil, err
- }
- ws.knownModFiles = knownModFiles
-
- switch {
- case go111moduleOff:
- ws.moduleSource = legacyWorkspace
- case useWsModule:
- ws.activeModFiles = knownModFiles
- ws.moduleSource = fileSystemWorkspace
- default:
- ws.moduleSource = legacyWorkspace
- activeModFiles, err := getLegacyModules(ctx, root, fs)
- if err != nil {
- return nil, err
- }
- ws.activeModFiles = activeModFiles
- }
- return ws, nil
-}
-
-// loadExplicitWorkspaceFile loads workspace information from go.work or
-// gopls.mod files, setting the active modules, mod file, and module source
-// accordingly.
-func loadExplicitWorkspaceFile(ctx context.Context, ws *workspace, fs source.FileSource) error {
- for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
- fh, err := fs.GetFile(ctx, uriForSource(ws.root, src))
- if err != nil {
- return err
- }
- contents, err := fh.Read()
- if err != nil {
- continue
- }
- var file *modfile.File
- var activeModFiles map[span.URI]struct{}
- switch src {
- case goWorkWorkspace:
- file, activeModFiles, err = parseGoWork(ctx, ws.root, fh.URI(), contents, fs)
- ws.workFile = fh.URI()
- case goplsModWorkspace:
- file, activeModFiles, err = parseGoplsMod(ws.root, fh.URI(), contents)
- }
- if err != nil {
- ws.buildMu.Lock()
- ws.built = true
- ws.buildErr = err
- ws.buildMu.Unlock()
- }
- ws.mod = file
- ws.activeModFiles = activeModFiles
- ws.moduleSource = src
- return nil
- }
- return noHardcodedWorkspace
-}
-
-var noHardcodedWorkspace = errors.New("no hardcoded workspace")
-
-func (w *workspace) getKnownModFiles() map[span.URI]struct{} {
- return w.knownModFiles
-}
-
-func (w *workspace) getActiveModFiles() map[span.URI]struct{} {
- return w.activeModFiles
-}
-
-// modFile gets the workspace modfile associated with this workspace,
-// computing it if it doesn't exist.
-//
-// A fileSource must be passed in to solve a chicken-egg problem: it is not
-// correct to pass in the snapshot file source to newWorkspace when
-// invalidating, because at the time these are called the snapshot is locked.
-// So we must pass it in later on when actually using the modFile.
-func (w *workspace) modFile(ctx context.Context, fs source.FileSource) (*modfile.File, error) {
- w.build(ctx, fs)
- return w.mod, w.buildErr
-}
-
-func (w *workspace) sumFile(ctx context.Context, fs source.FileSource) ([]byte, error) {
- w.build(ctx, fs)
- return w.sum, w.buildErr
-}
-
-func (w *workspace) build(ctx context.Context, fs source.FileSource) {
- w.buildMu.Lock()
- defer w.buildMu.Unlock()
-
- if w.built {
- return
- }
- // Building should never be cancelled. Since the workspace module is shared
- // across multiple snapshots, doing so would put us in a bad state, and it
- // would not be obvious to the user how to recover.
- ctx = xcontext.Detach(ctx)
-
- // If our module source is not gopls.mod, try to build the workspace module
- // from modules. Fall back on the pre-existing mod file if parsing fails.
- if w.moduleSource != goplsModWorkspace {
- file, err := buildWorkspaceModFile(ctx, w.activeModFiles, fs)
- switch {
- case err == nil:
- w.mod = file
- case w.mod != nil:
- // Parsing failed, but we have a previous file version.
- event.Error(ctx, "building workspace mod file", err)
- default:
- // No file to fall back on.
- w.buildErr = err
- }
- }
- if w.mod != nil {
- w.wsDirs = map[span.URI]struct{}{
- w.root: {},
- }
- for _, r := range w.mod.Replace {
- // We may be replacing a module with a different version, not a path
- // on disk.
- if r.New.Version != "" {
- continue
- }
- w.wsDirs[span.URIFromPath(r.New.Path)] = struct{}{}
- }
- }
- // Ensure that there is always at least the root dir.
- if len(w.wsDirs) == 0 {
- w.wsDirs = map[span.URI]struct{}{
- w.root: {},
- }
- }
- sum, err := buildWorkspaceSumFile(ctx, w.activeModFiles, fs)
- if err == nil {
- w.sum = sum
- } else {
- event.Error(ctx, "building workspace sum file", err)
- }
- w.built = true
-}
-
-// dirs returns the workspace directories for the loaded modules.
-func (w *workspace) dirs(ctx context.Context, fs source.FileSource) []span.URI {
- w.build(ctx, fs)
- var dirs []span.URI
- for d := range w.wsDirs {
- dirs = append(dirs, d)
- }
- sort.Slice(dirs, func(i, j int) bool {
- return source.CompareURI(dirs[i], dirs[j]) < 0
- })
- return dirs
-}
-
-// invalidate returns a (possibly) new workspace after invalidating the changed
-// files. If w is still valid in the presence of changedURIs, it returns itself
-// unmodified.
-//
-// The returned changed and reload flags control the level of invalidation.
-// Some workspace changes may affect workspace contents without requiring a
-// reload of metadata (for example, unsaved changes to a go.mod or go.sum
-// file).
-func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, changed, reload bool) {
- // Prevent races to w.modFile or w.wsDirs below, if wmhas not yet been built.
- w.buildMu.Lock()
- defer w.buildMu.Unlock()
-
- // Clone the workspace. This may be discarded if nothing changed.
- result := &workspace{
- root: w.root,
- moduleSource: w.moduleSource,
- knownModFiles: make(map[span.URI]struct{}),
- activeModFiles: make(map[span.URI]struct{}),
- workFile: w.workFile,
- mod: w.mod,
- sum: w.sum,
- wsDirs: w.wsDirs,
- excludePath: w.excludePath,
- }
- for k, v := range w.knownModFiles {
- result.knownModFiles[k] = v
- }
- for k, v := range w.activeModFiles {
- result.activeModFiles[k] = v
- }
-
- // First handle changes to the go.work or gopls.mod file. This must be
- // considered before any changes to go.mod or go.sum files, as these files
- // determine which modules we care about. If go.work/gopls.mod has changed
- // we need to either re-read it if it exists or walk the filesystem if it
- // has been deleted. go.work should override the gopls.mod if both exist.
- changed, reload = handleWorkspaceFileChanges(ctx, result, changes, fs)
- // Next, handle go.mod changes that could affect our workspace.
- for uri, change := range changes {
- // Otherwise, we only care about go.mod files in the workspace directory.
- if change.isUnchanged || !isGoMod(uri) || !source.InDir(result.root.Filename(), uri.Filename()) {
- continue
- }
- changed = true
- active := result.moduleSource != legacyWorkspace || source.CompareURI(modURI(w.root), uri) == 0
- reload = reload || (active && change.fileHandle.Saved())
- // Don't mess with the list of mod files if using go.work or gopls.mod.
- if result.moduleSource == goplsModWorkspace || result.moduleSource == goWorkWorkspace {
- continue
- }
- if change.exists {
- result.knownModFiles[uri] = struct{}{}
- if active {
- result.activeModFiles[uri] = struct{}{}
- }
- } else {
- delete(result.knownModFiles, uri)
- delete(result.activeModFiles, uri)
- }
- }
-
- // Finally, process go.sum changes for any modules that are now active.
- for uri, change := range changes {
- if !isGoSum(uri) {
- continue
- }
- // TODO(rFindley) factor out this URI mangling.
- dir := filepath.Dir(uri.Filename())
- modURI := span.URIFromPath(filepath.Join(dir, "go.mod"))
- if _, active := result.activeModFiles[modURI]; !active {
- continue
- }
- // Only changes to active go.sum files actually cause the workspace to
- // change.
- changed = true
- reload = reload || change.fileHandle.Saved()
- }
-
- if !changed {
- return w, false, false
- }
-
- return result, changed, reload
-}
-
-// handleWorkspaceFileChanges handles changes related to a go.work or gopls.mod
-// file, updating ws accordingly. ws.root must be set.
-func handleWorkspaceFileChanges(ctx context.Context, ws *workspace, changes map[span.URI]*fileChange, fs source.FileSource) (changed, reload bool) {
- // If go.work/gopls.mod has changed we need to either re-read it if it
- // exists or walk the filesystem if it has been deleted.
- // go.work should override the gopls.mod if both exist.
- for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
- uri := uriForSource(ws.root, src)
- // File opens/closes are just no-ops.
- change, ok := changes[uri]
- if !ok {
- continue
- }
- if change.isUnchanged {
- break
- }
- if change.exists {
- // Only invalidate if the file if it actually parses.
- // Otherwise, stick with the current file.
- var parsedFile *modfile.File
- var parsedModules map[span.URI]struct{}
- var err error
- switch src {
- case goWorkWorkspace:
- parsedFile, parsedModules, err = parseGoWork(ctx, ws.root, uri, change.content, fs)
- case goplsModWorkspace:
- parsedFile, parsedModules, err = parseGoplsMod(ws.root, uri, change.content)
- }
- if err != nil {
- // An unparseable file should not invalidate the workspace:
- // nothing good could come from changing the workspace in
- // this case.
- event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
- } else {
- // only update the modfile if it parsed.
- changed = true
- reload = change.fileHandle.Saved()
- ws.mod = parsedFile
- ws.moduleSource = src
- ws.knownModFiles = parsedModules
- ws.activeModFiles = make(map[span.URI]struct{})
- for k, v := range parsedModules {
- ws.activeModFiles[k] = v
- }
- }
- break // We've found an explicit workspace file, so can stop looking.
- } else {
- // go.work/gopls.mod is deleted. search for modules again.
- changed = true
- reload = true
- ws.moduleSource = fileSystemWorkspace
- // The parsed file is no longer valid.
- ws.mod = nil
- knownModFiles, err := findModules(ws.root, ws.excludePath, 0)
- if err != nil {
- ws.knownModFiles = nil
- ws.activeModFiles = nil
- event.Error(ctx, "finding file system modules", err)
- } else {
- ws.knownModFiles = knownModFiles
- ws.activeModFiles = make(map[span.URI]struct{})
- for k, v := range ws.knownModFiles {
- ws.activeModFiles[k] = v
- }
- }
- }
- }
- return changed, reload
-}
-
-// goplsModURI returns the URI for the gopls.mod file contained in root.
-func uriForSource(root span.URI, src workspaceSource) span.URI {
- var basename string
- switch src {
- case goplsModWorkspace:
- basename = "gopls.mod"
- case goWorkWorkspace:
- basename = "go.work"
- default:
- return ""
- }
- return span.URIFromPath(filepath.Join(root.Filename(), basename))
-}
-
-// modURI returns the URI for the go.mod file contained in root.
-func modURI(root span.URI) span.URI {
- return span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
-}
-
-// isGoMod reports if uri is a go.mod file.
-func isGoMod(uri span.URI) bool {
- return filepath.Base(uri.Filename()) == "go.mod"
-}
-
-func isGoSum(uri span.URI) bool {
- return filepath.Base(uri.Filename()) == "go.sum" || filepath.Base(uri.Filename()) == "go.work.sum"
-}
-
-// fileExists reports if the file uri exists within source.
-func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) {
- fh, err := source.GetFile(ctx, uri)
- if err != nil {
- return false, err
- }
- return fileHandleExists(fh)
-}
-
-// fileHandleExists reports if the file underlying fh actually exits.
-func fileHandleExists(fh source.FileHandle) (bool, error) {
- _, err := fh.Read()
- if err == nil {
- return true, nil
- }
- if os.IsNotExist(err) {
- return false, nil
- }
- return false, err
-}
-
-// TODO(rFindley): replace this (and similar) with a uripath package analogous
-// to filepath.
-func dirURI(uri span.URI) span.URI {
- return span.URIFromPath(filepath.Dir(uri.Filename()))
-}
-
-// getLegacyModules returns a module set containing at most the root module.
-func getLegacyModules(ctx context.Context, root span.URI, fs source.FileSource) (map[span.URI]struct{}, error) {
- uri := span.URIFromPath(filepath.Join(root.Filename(), "go.mod"))
- modules := make(map[span.URI]struct{})
- exists, err := fileExists(ctx, uri, fs)
- if err != nil {
- return nil, err
- }
- if exists {
- modules[uri] = struct{}{}
- }
- return modules, nil
-}
-
-func parseGoWork(ctx context.Context, root, uri span.URI, contents []byte, fs source.FileSource) (*modfile.File, map[span.URI]struct{}, error) {
- workFile, err := modfile.ParseWork(uri.Filename(), contents, nil)
- if err != nil {
- return nil, nil, errors.Errorf("parsing go.work: %w", err)
- }
- modFiles := make(map[span.URI]struct{})
- for _, dir := range workFile.Use {
- // The resulting modfile must use absolute paths, so that it can be
- // written to a temp directory.
- dir.Path = absolutePath(root, dir.Path)
- modURI := span.URIFromPath(filepath.Join(dir.Path, "go.mod"))
- modFiles[modURI] = struct{}{}
- }
- modFile, err := buildWorkspaceModFile(ctx, modFiles, fs)
- if err != nil {
- return nil, nil, err
- }
-
- // Require a go directive, per the spec.
- if workFile.Go == nil || workFile.Go.Version == "" {
- return nil, nil, fmt.Errorf("go.work has missing or incomplete go directive")
- }
- if err := modFile.AddGoStmt(workFile.Go.Version); err != nil {
- return nil, nil, err
- }
-
- return modFile, modFiles, nil
-}
-
-func parseGoplsMod(root, uri span.URI, contents []byte) (*modfile.File, map[span.URI]struct{}, error) {
- modFile, err := modfile.Parse(uri.Filename(), contents, nil)
- if err != nil {
- return nil, nil, errors.Errorf("parsing gopls.mod: %w", err)
- }
- modFiles := make(map[span.URI]struct{})
- for _, replace := range modFile.Replace {
- if replace.New.Version != "" {
- return nil, nil, errors.Errorf("gopls.mod: replaced module %q@%q must not have version", replace.New.Path, replace.New.Version)
- }
- // The resulting modfile must use absolute paths, so that it can be
- // written to a temp directory.
- replace.New.Path = absolutePath(root, replace.New.Path)
- modURI := span.URIFromPath(filepath.Join(replace.New.Path, "go.mod"))
- modFiles[modURI] = struct{}{}
- }
- return modFile, modFiles, nil
-}
-
-func absolutePath(root span.URI, path string) string {
- dirFP := filepath.FromSlash(path)
- if !filepath.IsAbs(dirFP) {
- dirFP = filepath.Join(root.Filename(), dirFP)
- }
- return dirFP
-}
-
-// errExhausted is returned by findModules if the file scan limit is reached.
-var errExhausted = errors.New("exhausted")
-
-// Limit go.mod search to 1 million files. As a point of reference,
-// Kubernetes has 22K files (as of 2020-11-24).
-const fileLimit = 1000000
-
-// findModules recursively walks the root directory looking for go.mod files,
-// returning the set of modules it discovers. If modLimit is non-zero,
-// searching stops once modLimit modules have been found.
-//
-// TODO(rfindley): consider overlays.
-func findModules(root span.URI, excludePath func(string) bool, modLimit int) (map[span.URI]struct{}, error) {
- // Walk the view's folder to find all modules in the view.
- modFiles := make(map[span.URI]struct{})
- searched := 0
- errDone := errors.New("done")
- err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error {
- if err != nil {
- // Probably a permission error. Keep looking.
- return filepath.SkipDir
- }
- // For any path that is not the workspace folder, check if the path
- // would be ignored by the go command. Vendor directories also do not
- // contain workspace modules.
- if info.IsDir() && path != root.Filename() {
- suffix := strings.TrimPrefix(path, root.Filename())
- switch {
- case checkIgnored(suffix),
- strings.Contains(filepath.ToSlash(suffix), "/vendor/"),
- excludePath(suffix):
- return filepath.SkipDir
- }
- }
- // We're only interested in go.mod files.
- uri := span.URIFromPath(path)
- if isGoMod(uri) {
- modFiles[uri] = struct{}{}
- }
- if modLimit > 0 && len(modFiles) >= modLimit {
- return errDone
- }
- searched++
- if fileLimit > 0 && searched >= fileLimit {
- return errExhausted
- }
- return nil
- })
- if err == errDone {
- return modFiles, nil
- }
- return modFiles, err
-}
diff --git a/internal/lsp/cache/workspace_test.go b/internal/lsp/cache/workspace_test.go
deleted file mode 100644
index b809ad196..000000000
--- a/internal/lsp/cache/workspace_test.go
+++ /dev/null
@@ -1,425 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "context"
- "errors"
- "os"
- "strings"
- "testing"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-// osFileSource is a fileSource that just reads from the operating system.
-type osFileSource struct {
- overlays map[span.URI]fakeOverlay
-}
-
-type fakeOverlay struct {
- source.VersionedFileHandle
- uri span.URI
- content string
- err error
- saved bool
-}
-
-func (o fakeOverlay) Saved() bool { return o.saved }
-
-func (o fakeOverlay) Read() ([]byte, error) {
- if o.err != nil {
- return nil, o.err
- }
- return []byte(o.content), nil
-}
-
-func (o fakeOverlay) URI() span.URI {
- return o.uri
-}
-
-// change updates the file source with the given file content. For convenience,
-// empty content signals a deletion. If saved is true, these changes are
-// persisted to disk.
-func (s *osFileSource) change(ctx context.Context, uri span.URI, content string, saved bool) (*fileChange, error) {
- if content == "" {
- delete(s.overlays, uri)
- if saved {
- if err := os.Remove(uri.Filename()); err != nil {
- return nil, err
- }
- }
- fh, err := s.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- data, err := fh.Read()
- return &fileChange{exists: err == nil, content: data, fileHandle: &closedFile{fh}}, nil
- }
- if s.overlays == nil {
- s.overlays = map[span.URI]fakeOverlay{}
- }
- s.overlays[uri] = fakeOverlay{uri: uri, content: content, saved: saved}
- return &fileChange{
- exists: content != "",
- content: []byte(content),
- fileHandle: s.overlays[uri],
- }, nil
-}
-
-func (s *osFileSource) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
- if overlay, ok := s.overlays[uri]; ok {
- return overlay, nil
- }
- fi, statErr := os.Stat(uri.Filename())
- if statErr != nil {
- return &fileHandle{
- err: statErr,
- uri: uri,
- }, nil
- }
- fh, err := readFile(ctx, uri, fi)
- if err != nil {
- return nil, err
- }
- return fh, nil
-}
-
-type wsState struct {
- source workspaceSource
- modules []string
- dirs []string
- sum string
-}
-
-type wsChange struct {
- content string
- saved bool
-}
-
-func TestWorkspaceModule(t *testing.T) {
- tests := []struct {
- desc string
- initial string // txtar-encoded
- legacyMode bool
- initialState wsState
- updates map[string]wsChange
- wantChanged bool
- wantReload bool
- finalState wsState
- }{
- {
- desc: "legacy mode",
- initial: `
--- go.mod --
-module mod.com
--- go.sum --
-golang.org/x/mod v0.3.0 h1:deadbeef
--- a/go.mod --
-module moda.com`,
- legacyMode: true,
- initialState: wsState{
- modules: []string{"./go.mod"},
- source: legacyWorkspace,
- dirs: []string{"."},
- sum: "golang.org/x/mod v0.3.0 h1:deadbeef\n",
- },
- },
- {
- desc: "nested module",
- initial: `
--- go.mod --
-module mod.com
--- a/go.mod --
-module moda.com`,
- initialState: wsState{
- modules: []string{"./go.mod", "a/go.mod"},
- source: fileSystemWorkspace,
- dirs: []string{".", "a"},
- },
- },
- {
- desc: "removing module",
- initial: `
--- a/go.mod --
-module moda.com
--- a/go.sum --
-golang.org/x/mod v0.3.0 h1:deadbeef
--- b/go.mod --
-module modb.com
--- b/go.sum --
-golang.org/x/mod v0.3.0 h1:beefdead`,
- initialState: wsState{
- modules: []string{"a/go.mod", "b/go.mod"},
- source: fileSystemWorkspace,
- dirs: []string{".", "a", "b"},
- sum: "golang.org/x/mod v0.3.0 h1:beefdead\ngolang.org/x/mod v0.3.0 h1:deadbeef\n",
- },
- updates: map[string]wsChange{
- "gopls.mod": {`module gopls-workspace
-
-require moda.com v0.0.0-goplsworkspace
-replace moda.com => $SANDBOX_WORKDIR/a`, true},
- },
- wantChanged: true,
- wantReload: true,
- finalState: wsState{
- modules: []string{"a/go.mod"},
- source: goplsModWorkspace,
- dirs: []string{".", "a"},
- sum: "golang.org/x/mod v0.3.0 h1:deadbeef\n",
- },
- },
- {
- desc: "adding module",
- initial: `
--- gopls.mod --
-require moda.com v0.0.0-goplsworkspace
-replace moda.com => $SANDBOX_WORKDIR/a
--- a/go.mod --
-module moda.com
--- b/go.mod --
-module modb.com`,
- initialState: wsState{
- modules: []string{"a/go.mod"},
- source: goplsModWorkspace,
- dirs: []string{".", "a"},
- },
- updates: map[string]wsChange{
- "gopls.mod": {`module gopls-workspace
-
-require moda.com v0.0.0-goplsworkspace
-require modb.com v0.0.0-goplsworkspace
-
-replace moda.com => $SANDBOX_WORKDIR/a
-replace modb.com => $SANDBOX_WORKDIR/b`, true},
- },
- wantChanged: true,
- wantReload: true,
- finalState: wsState{
- modules: []string{"a/go.mod", "b/go.mod"},
- source: goplsModWorkspace,
- dirs: []string{".", "a", "b"},
- },
- },
- {
- desc: "deleting gopls.mod",
- initial: `
--- gopls.mod --
-module gopls-workspace
-
-require moda.com v0.0.0-goplsworkspace
-replace moda.com => $SANDBOX_WORKDIR/a
--- a/go.mod --
-module moda.com
--- b/go.mod --
-module modb.com`,
- initialState: wsState{
- modules: []string{"a/go.mod"},
- source: goplsModWorkspace,
- dirs: []string{".", "a"},
- },
- updates: map[string]wsChange{
- "gopls.mod": {"", true},
- },
- wantChanged: true,
- wantReload: true,
- finalState: wsState{
- modules: []string{"a/go.mod", "b/go.mod"},
- source: fileSystemWorkspace,
- dirs: []string{".", "a", "b"},
- },
- },
- {
- desc: "broken module parsing",
- initial: `
--- a/go.mod --
-module moda.com
-
-require gopls.test v0.0.0-goplsworkspace
-replace gopls.test => ../../gopls.test // (this path shouldn't matter)
--- b/go.mod --
-module modb.com`,
- initialState: wsState{
- modules: []string{"a/go.mod", "b/go.mod"},
- source: fileSystemWorkspace,
- dirs: []string{".", "a", "b", "../gopls.test"},
- },
- updates: map[string]wsChange{
- "a/go.mod": {`modul moda.com
-
-require gopls.test v0.0.0-goplsworkspace
-replace gopls.test => ../../gopls.test2`, false},
- },
- wantChanged: true,
- wantReload: false,
- finalState: wsState{
- modules: []string{"a/go.mod", "b/go.mod"},
- source: fileSystemWorkspace,
- // finalDirs should be unchanged: we should preserve dirs in the presence
- // of a broken modfile.
- dirs: []string{".", "a", "b", "../gopls.test"},
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.desc, func(t *testing.T) {
- ctx := context.Background()
- dir, err := fake.Tempdir(fake.UnpackTxt(test.initial))
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
- root := span.URIFromPath(dir)
-
- fs := &osFileSource{}
- excludeNothing := func(string) bool { return false }
- w, err := newWorkspace(ctx, root, fs, excludeNothing, false, !test.legacyMode)
- if err != nil {
- t.Fatal(err)
- }
- rel := fake.RelativeTo(dir)
- checkState(ctx, t, fs, rel, w, test.initialState)
-
- // Apply updates.
- if test.updates != nil {
- changes := make(map[span.URI]*fileChange)
- for k, v := range test.updates {
- content := strings.ReplaceAll(v.content, "$SANDBOX_WORKDIR", string(rel))
- uri := span.URIFromPath(rel.AbsPath(k))
- changes[uri], err = fs.change(ctx, uri, content, v.saved)
- if err != nil {
- t.Fatal(err)
- }
- }
- got, gotChanged, gotReload := w.invalidate(ctx, changes, fs)
- if gotChanged != test.wantChanged {
- t.Errorf("w.invalidate(): got changed %t, want %t", gotChanged, test.wantChanged)
- }
- if gotReload != test.wantReload {
- t.Errorf("w.invalidate(): got reload %t, want %t", gotReload, test.wantReload)
- }
- checkState(ctx, t, fs, rel, got, test.finalState)
- }
- })
- }
-}
-
-func workspaceFromTxtar(t *testing.T, files string) (*workspace, func(), error) {
- ctx := context.Background()
- dir, err := fake.Tempdir(fake.UnpackTxt(files))
- if err != nil {
- return nil, func() {}, err
- }
- cleanup := func() {
- os.RemoveAll(dir)
- }
- root := span.URIFromPath(dir)
-
- fs := &osFileSource{}
- excludeNothing := func(string) bool { return false }
- workspace, err := newWorkspace(ctx, root, fs, excludeNothing, false, false)
- return workspace, cleanup, err
-}
-
-func TestWorkspaceParseError(t *testing.T) {
- w, cleanup, err := workspaceFromTxtar(t, `
--- go.work --
-go 1.18
-
-usa ./typo
--- typo/go.mod --
-module foo
-`)
- defer cleanup()
- if err != nil {
- t.Fatalf("error creating workspace: %v; want no error", err)
- }
- w.buildMu.Lock()
- built, buildErr := w.built, w.buildErr
- w.buildMu.Unlock()
- if !built || buildErr == nil {
- t.Fatalf("built, buildErr: got %v, %v; want true, non-nil", built, buildErr)
- }
- var errList modfile.ErrorList
- if !errors.As(buildErr, &errList) {
- t.Fatalf("expected error to be an errorlist; got %v", buildErr)
- }
- if len(errList) != 1 {
- t.Fatalf("expected errorList to have one element; got %v elements", len(errList))
- }
- parseErr := errList[0]
- if parseErr.Pos.Line != 3 {
- t.Fatalf("expected error to be on line 3; got %v", parseErr.Pos.Line)
- }
-}
-
-func TestWorkspaceMissingModFile(t *testing.T) {
- w, cleanup, err := workspaceFromTxtar(t, `
--- go.work --
-go 1.18
-
-use ./missing
-`)
- defer cleanup()
- if err != nil {
- t.Fatalf("error creating workspace: %v; want no error", err)
- }
- w.buildMu.Lock()
- built, buildErr := w.built, w.buildErr
- w.buildMu.Unlock()
- if !built || buildErr == nil {
- t.Fatalf("built, buildErr: got %v, %v; want true, non-nil", built, buildErr)
- }
-}
-
-func checkState(ctx context.Context, t *testing.T, fs source.FileSource, rel fake.RelativeTo, got *workspace, want wsState) {
- t.Helper()
- if got.moduleSource != want.source {
- t.Errorf("module source = %v, want %v", got.moduleSource, want.source)
- }
- modules := make(map[span.URI]struct{})
- for k := range got.getActiveModFiles() {
- modules[k] = struct{}{}
- }
- for _, modPath := range want.modules {
- path := rel.AbsPath(modPath)
- uri := span.URIFromPath(path)
- if _, ok := modules[uri]; !ok {
- t.Errorf("missing module %q", uri)
- }
- delete(modules, uri)
- }
- for remaining := range modules {
- t.Errorf("unexpected module %q", remaining)
- }
- gotDirs := got.dirs(ctx, fs)
- gotM := make(map[span.URI]bool)
- for _, dir := range gotDirs {
- gotM[dir] = true
- }
- for _, dir := range want.dirs {
- path := rel.AbsPath(dir)
- uri := span.URIFromPath(path)
- if !gotM[uri] {
- t.Errorf("missing dir %q", uri)
- }
- delete(gotM, uri)
- }
- for remaining := range gotM {
- t.Errorf("unexpected dir %q", remaining)
- }
- gotSumBytes, err := got.sumFile(ctx, fs)
- if err != nil {
- t.Fatal(err)
- }
- if gotSum := string(gotSumBytes); gotSum != want.sum {
- t.Errorf("got final sum %q, want %q", gotSum, want.sum)
- }
-}
diff --git a/internal/lsp/call_hierarchy.go b/internal/lsp/call_hierarchy.go
deleted file mode 100644
index 43c4ea8d5..000000000
--- a/internal/lsp/call_hierarchy.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func (s *Server) prepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) ([]protocol.CallHierarchyItem, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
-
- return source.PrepareCallHierarchy(ctx, snapshot, fh, params.Position)
-}
-
-func (s *Server) incomingCalls(ctx context.Context, params *protocol.CallHierarchyIncomingCallsParams) ([]protocol.CallHierarchyIncomingCall, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.Item.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
-
- return source.IncomingCalls(ctx, snapshot, fh, params.Item.Range.Start)
-}
-
-func (s *Server) outgoingCalls(ctx context.Context, params *protocol.CallHierarchyOutgoingCallsParams) ([]protocol.CallHierarchyOutgoingCall, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.Item.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
-
- return source.OutgoingCalls(ctx, snapshot, fh, params.Item.Range.Start)
-}
diff --git a/internal/lsp/cmd/call_hierarchy.go b/internal/lsp/cmd/call_hierarchy.go
deleted file mode 100644
index c9f9e73e0..000000000
--- a/internal/lsp/cmd/call_hierarchy.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "strings"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
-)
-
-// callHierarchy implements the callHierarchy verb for gopls.
-type callHierarchy struct {
- app *Application
-}
-
-func (c *callHierarchy) Name() string { return "call_hierarchy" }
-func (c *callHierarchy) Parent() string { return c.app.Name() }
-func (c *callHierarchy) Usage() string { return "<position>" }
-func (c *callHierarchy) ShortHelp() string { return "display selected identifier's call hierarchy" }
-func (c *callHierarchy) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ # 1-indexed location (:line:column or :#offset) of the target identifier
- $ gopls call_hierarchy helper/helper.go:8:6
- $ gopls call_hierarchy helper/helper.go:#53
-`)
- printFlagDefaults(f)
-}
-
-func (c *callHierarchy) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("call_hierarchy expects 1 argument (position)")
- }
-
- conn, err := c.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
-
- loc, err := file.mapper.Location(from)
- if err != nil {
- return err
- }
-
- p := protocol.CallHierarchyPrepareParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- },
- }
-
- callItems, err := conn.PrepareCallHierarchy(ctx, &p)
- if err != nil {
- return err
- }
- if len(callItems) == 0 {
- return fmt.Errorf("function declaration identifier not found at %v", args[0])
- }
-
- for _, item := range callItems {
- incomingCalls, err := conn.IncomingCalls(ctx, &protocol.CallHierarchyIncomingCallsParams{Item: item})
- if err != nil {
- return err
- }
- for i, call := range incomingCalls {
- // From the spec: CallHierarchyIncomingCall.FromRanges is relative to
- // the caller denoted by CallHierarchyIncomingCall.from.
- printString, err := callItemPrintString(ctx, conn, call.From, call.From.URI, call.FromRanges)
- if err != nil {
- return err
- }
- fmt.Printf("caller[%d]: %s\n", i, printString)
- }
-
- printString, err := callItemPrintString(ctx, conn, item, "", nil)
- if err != nil {
- return err
- }
- fmt.Printf("identifier: %s\n", printString)
-
- outgoingCalls, err := conn.OutgoingCalls(ctx, &protocol.CallHierarchyOutgoingCallsParams{Item: item})
- if err != nil {
- return err
- }
- for i, call := range outgoingCalls {
- // From the spec: CallHierarchyOutgoingCall.FromRanges is the range
- // relative to the caller, e.g the item passed to
- printString, err := callItemPrintString(ctx, conn, call.To, item.URI, call.FromRanges)
- if err != nil {
- return err
- }
- fmt.Printf("callee[%d]: %s\n", i, printString)
- }
- }
-
- return nil
-}
-
-// callItemPrintString returns a protocol.CallHierarchyItem object represented as a string.
-// item and call ranges (protocol.Range) are converted to user friendly spans (1-indexed).
-func callItemPrintString(ctx context.Context, conn *connection, item protocol.CallHierarchyItem, callsURI protocol.DocumentURI, calls []protocol.Range) (string, error) {
- itemFile := conn.AddFile(ctx, item.URI.SpanURI())
- if itemFile.err != nil {
- return "", itemFile.err
- }
- itemSpan, err := itemFile.mapper.Span(protocol.Location{URI: item.URI, Range: item.Range})
- if err != nil {
- return "", err
- }
-
- callsFile := conn.AddFile(ctx, callsURI.SpanURI())
- if callsURI != "" && callsFile.err != nil {
- return "", callsFile.err
- }
- var callRanges []string
- for _, rng := range calls {
- callSpan, err := callsFile.mapper.Span(protocol.Location{URI: item.URI, Range: rng})
- if err != nil {
- return "", err
- }
-
- spn := fmt.Sprint(callSpan)
- callRanges = append(callRanges, fmt.Sprint(spn[strings.Index(spn, ":")+1:]))
- }
-
- printString := fmt.Sprintf("function %s in %v", item.Name, itemSpan)
- if len(calls) > 0 {
- printString = fmt.Sprintf("ranges %s in %s from/to %s", strings.Join(callRanges, ", "), callsURI.SpanURI().Filename(), printString)
- }
- return printString, nil
-}
diff --git a/internal/lsp/cmd/capabilities_test.go b/internal/lsp/cmd/capabilities_test.go
deleted file mode 100644
index 70db8d7d3..000000000
--- a/internal/lsp/cmd/capabilities_test.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/protocol"
- errors "golang.org/x/xerrors"
-)
-
-// TestCapabilities does some minimal validation of the server's adherence to the LSP.
-// The checks in the test are added as changes are made and errors noticed.
-func TestCapabilities(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", "fake")
- if err != nil {
- t.Fatal(err)
- }
- tmpFile := filepath.Join(tmpDir, "fake.go")
- if err := ioutil.WriteFile(tmpFile, []byte(""), 0775); err != nil {
- t.Fatal(err)
- }
- if err := ioutil.WriteFile(filepath.Join(tmpDir, "go.mod"), []byte("module fake\n\ngo 1.12\n"), 0775); err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tmpDir)
-
- app := New("gopls-test", tmpDir, os.Environ(), nil)
- c := newConnection(app)
- ctx := context.Background()
- defer c.terminate(ctx)
-
- params := &protocol.ParamInitialize{}
- params.RootURI = protocol.URIFromPath(c.Client.app.wd)
- params.Capabilities.Workspace.Configuration = true
-
- // Send an initialize request to the server.
- c.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), c.Client)
- result, err := c.Server.Initialize(ctx, params)
- if err != nil {
- t.Fatal(err)
- }
- // Validate initialization result.
- if err := validateCapabilities(result); err != nil {
- t.Error(err)
- }
- // Complete initialization of server.
- if err := c.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil {
- t.Fatal(err)
- }
-
- // Open the file on the server side.
- uri := protocol.URIFromPath(tmpFile)
- if err := c.Server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{
- TextDocument: protocol.TextDocumentItem{
- URI: uri,
- LanguageID: "go",
- Version: 1,
- Text: `package main; func main() {};`,
- },
- }); err != nil {
- t.Fatal(err)
- }
-
- // If we are sending a full text change, the change.Range must be nil.
- // It is not enough for the Change to be empty, as that is ambiguous.
- if err := c.Server.DidChange(ctx, &protocol.DidChangeTextDocumentParams{
- TextDocument: protocol.VersionedTextDocumentIdentifier{
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: uri,
- },
- Version: 2,
- },
- ContentChanges: []protocol.TextDocumentContentChangeEvent{
- {
- Range: nil,
- Text: `package main; func main() { fmt.Println("") }`,
- },
- },
- }); err != nil {
- t.Fatal(err)
- }
-
- // Send a code action request to validate expected types.
- actions, err := c.Server.CodeAction(ctx, &protocol.CodeActionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: uri,
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- for _, action := range actions {
- // Validate that an empty command is sent along with import organization responses.
- if action.Kind == protocol.SourceOrganizeImports && action.Command != nil {
- t.Errorf("unexpected command for import organization")
- }
- }
-
- if err := c.Server.DidSave(ctx, &protocol.DidSaveTextDocumentParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: uri,
- },
- // LSP specifies that a file can be saved with optional text, so this field must be nil.
- Text: nil,
- }); err != nil {
- t.Fatal(err)
- }
-
- // Send a completion request to validate expected types.
- list, err := c.Server.Completion(ctx, &protocol.CompletionParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: uri,
- },
- Position: protocol.Position{
- Line: 0,
- Character: 28,
- },
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- for _, item := range list.Items {
- // All other completion items should have nil commands.
- // An empty command will be treated as a command with the name '' by VS Code.
- // This causes VS Code to report errors to users about invalid commands.
- if item.Command != nil {
- t.Errorf("unexpected command for completion item")
- }
- // The item's TextEdit must be a pointer, as VS Code considers TextEdits
- // that don't contain the cursor position to be invalid.
- var textEdit interface{} = item.TextEdit
- if _, ok := textEdit.(*protocol.TextEdit); !ok {
- t.Errorf("textEdit is not a *protocol.TextEdit, instead it is %T", textEdit)
- }
- }
- if err := c.Server.Shutdown(ctx); err != nil {
- t.Fatal(err)
- }
- if err := c.Server.Exit(ctx); err != nil {
- t.Fatal(err)
- }
-}
-
-func validateCapabilities(result *protocol.InitializeResult) error {
- // If the client sends "false" for RenameProvider.PrepareSupport,
- // the server must respond with a boolean.
- if v, ok := result.Capabilities.RenameProvider.(bool); !ok {
- return errors.Errorf("RenameProvider must be a boolean if PrepareSupport is false (got %T)", v)
- }
- // The same goes for CodeActionKind.ValueSet.
- if v, ok := result.Capabilities.CodeActionProvider.(bool); !ok {
- return errors.Errorf("CodeActionSupport must be a boolean if CodeActionKind.ValueSet has length 0 (got %T)", v)
- }
- return nil
-}
diff --git a/internal/lsp/cmd/check.go b/internal/lsp/cmd/check.go
deleted file mode 100644
index 566924aa6..000000000
--- a/internal/lsp/cmd/check.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
-
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// check implements the check verb for gopls.
-type check struct {
- app *Application
-}
-
-func (c *check) Name() string { return "check" }
-func (c *check) Parent() string { return c.app.Name() }
-func (c *check) Usage() string { return "<filename>" }
-func (c *check) ShortHelp() string { return "show diagnostic results for the specified file" }
-func (c *check) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example: show the diagnostic results of this file:
-
- $ gopls check internal/lsp/cmd/check.go
-`)
- printFlagDefaults(f)
-}
-
-// Run performs the check on the files specified by args and prints the
-// results to stdout.
-func (c *check) Run(ctx context.Context, args ...string) error {
- if len(args) == 0 {
- // no files, so no results
- return nil
- }
- checking := map[span.URI]*cmdFile{}
- var uris []span.URI
- // now we ready to kick things off
- conn, err := c.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
- for _, arg := range args {
- uri := span.URIFromPath(arg)
- uris = append(uris, uri)
- file := conn.AddFile(ctx, uri)
- if file.err != nil {
- return file.err
- }
- checking[uri] = file
- }
- if err := conn.diagnoseFiles(ctx, uris); err != nil {
- return err
- }
- conn.Client.filesMu.Lock()
- defer conn.Client.filesMu.Unlock()
-
- for _, file := range checking {
- for _, d := range file.diagnostics {
- spn, err := file.mapper.RangeSpan(d.Range)
- if err != nil {
- return errors.Errorf("Could not convert position %v for %q", d.Range, d.Message)
- }
- fmt.Printf("%v: %v\n", spn, d.Message)
- }
- }
- return nil
-}
diff --git a/internal/lsp/cmd/cmd.go b/internal/lsp/cmd/cmd.go
deleted file mode 100644
index d48398d0d..000000000
--- a/internal/lsp/cmd/cmd.go
+++ /dev/null
@@ -1,630 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cmd handles the gopls command line.
-// It contains a handler for each of the modes, along with all the flag handling
-// and the command line output format.
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "go/token"
- "io/ioutil"
- "log"
- "os"
- "reflect"
- "sort"
- "strings"
- "sync"
- "text/tabwriter"
- "time"
-
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/lsprpc"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-// Application is the main application as passed to tool.Main
-// It handles the main command line parsing and dispatch to the sub commands.
-type Application struct {
- // Core application flags
-
- // Embed the basic profiling flags supported by the tool package
- tool.Profile
-
- // We include the server configuration directly for now, so the flags work
- // even without the verb.
- // TODO: Remove this when we stop allowing the serve verb by default.
- Serve Serve
-
- // the options configuring function to invoke when building a server
- options func(*source.Options)
-
- // The name of the binary, used in help and telemetry.
- name string
-
- // The working directory to run commands in.
- wd string
-
- // The environment variables to use.
- env []string
-
- // Support for remote LSP server.
- Remote string `flag:"remote" help:"forward all commands to a remote lsp specified by this flag. With no special prefix, this is assumed to be a TCP address. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. If 'auto', or prefixed by 'auto;', the remote address is automatically resolved based on the executing environment."`
-
- // Verbose enables verbose logging.
- Verbose bool `flag:"v,verbose" help:"verbose output"`
-
- // VeryVerbose enables a higher level of verbosity in logging output.
- VeryVerbose bool `flag:"vv,veryverbose" help:"very verbose output"`
-
- // Control ocagent export of telemetry
- OCAgent string `flag:"ocagent" help:"the address of the ocagent (e.g. http://localhost:55678), or off"`
-
- // PrepareOptions is called to update the options when a new view is built.
- // It is primarily to allow the behavior of gopls to be modified by hooks.
- PrepareOptions func(*source.Options)
-}
-
-func (app *Application) verbose() bool {
- return app.Verbose || app.VeryVerbose
-}
-
-// New returns a new Application ready to run.
-func New(name, wd string, env []string, options func(*source.Options)) *Application {
- if wd == "" {
- wd, _ = os.Getwd()
- }
- app := &Application{
- options: options,
- name: name,
- wd: wd,
- env: env,
- OCAgent: "off", //TODO: Remove this line to default the exporter to on
-
- Serve: Serve{
- RemoteListenTimeout: 1 * time.Minute,
- },
- }
- app.Serve.app = app
- return app
-}
-
-// Name implements tool.Application returning the binary name.
-func (app *Application) Name() string { return app.name }
-
-// Usage implements tool.Application returning empty extra argument usage.
-func (app *Application) Usage() string { return "" }
-
-// ShortHelp implements tool.Application returning the main binary help.
-func (app *Application) ShortHelp() string {
- return ""
-}
-
-// DetailedHelp implements tool.Application returning the main binary help.
-// This includes the short help for all the sub commands.
-func (app *Application) DetailedHelp(f *flag.FlagSet) {
- w := tabwriter.NewWriter(f.Output(), 0, 0, 2, ' ', 0)
- defer w.Flush()
-
- fmt.Fprint(w, `
-gopls is a Go language server.
-
-It is typically used with an editor to provide language features. When no
-command is specified, gopls will default to the 'serve' command. The language
-features can also be accessed via the gopls command-line interface.
-
-Usage:
- gopls help [<subject>]
-
-Command:
-`)
- fmt.Fprint(w, "\nMain\t\n")
- for _, c := range app.mainCommands() {
- fmt.Fprintf(w, " %s\t%s\n", c.Name(), c.ShortHelp())
- }
- fmt.Fprint(w, "\t\nFeatures\t\n")
- for _, c := range app.featureCommands() {
- fmt.Fprintf(w, " %s\t%s\n", c.Name(), c.ShortHelp())
- }
- fmt.Fprint(w, "\nflags:\n")
- printFlagDefaults(f)
-}
-
-// this is a slightly modified version of flag.PrintDefaults to give us control
-func printFlagDefaults(s *flag.FlagSet) {
- var flags [][]*flag.Flag
- seen := map[flag.Value]int{}
- s.VisitAll(func(f *flag.Flag) {
- if i, ok := seen[f.Value]; !ok {
- seen[f.Value] = len(flags)
- flags = append(flags, []*flag.Flag{f})
- } else {
- flags[i] = append(flags[i], f)
- }
- })
- for _, entry := range flags {
- sort.SliceStable(entry, func(i, j int) bool {
- return len(entry[i].Name) < len(entry[j].Name)
- })
- var b strings.Builder
- for i, f := range entry {
- switch i {
- case 0:
- b.WriteString(" -")
- default:
- b.WriteString(",-")
- }
- b.WriteString(f.Name)
- }
-
- f := entry[0]
- name, usage := flag.UnquoteUsage(f)
- if len(name) > 0 {
- b.WriteString("=")
- b.WriteString(name)
- }
- // Boolean flags of one ASCII letter are so common we
- // treat them specially, putting their usage on the same line.
- if b.Len() <= 4 { // space, space, '-', 'x'.
- b.WriteString("\t")
- } else {
- // Four spaces before the tab triggers good alignment
- // for both 4- and 8-space tab stops.
- b.WriteString("\n \t")
- }
- b.WriteString(strings.ReplaceAll(usage, "\n", "\n \t"))
- if !isZeroValue(f, f.DefValue) {
- if reflect.TypeOf(f.Value).Elem().Name() == "stringValue" {
- fmt.Fprintf(&b, " (default %q)", f.DefValue)
- } else {
- fmt.Fprintf(&b, " (default %v)", f.DefValue)
- }
- }
- fmt.Fprint(s.Output(), b.String(), "\n")
- }
-}
-
-// isZeroValue is copied from the flags package
-func isZeroValue(f *flag.Flag, value string) bool {
- // Build a zero value of the flag's Value type, and see if the
- // result of calling its String method equals the value passed in.
- // This works unless the Value type is itself an interface type.
- typ := reflect.TypeOf(f.Value)
- var z reflect.Value
- if typ.Kind() == reflect.Ptr {
- z = reflect.New(typ.Elem())
- } else {
- z = reflect.Zero(typ)
- }
- return value == z.Interface().(flag.Value).String()
-}
-
-// Run takes the args after top level flag processing, and invokes the correct
-// sub command as specified by the first argument.
-// If no arguments are passed it will invoke the server sub command, as a
-// temporary measure for compatibility.
-func (app *Application) Run(ctx context.Context, args ...string) error {
- ctx = debug.WithInstance(ctx, app.wd, app.OCAgent)
- if len(args) == 0 {
- s := flag.NewFlagSet(app.Name(), flag.ExitOnError)
- return tool.Run(ctx, s, &app.Serve, args)
- }
- command, args := args[0], args[1:]
- for _, c := range app.Commands() {
- if c.Name() == command {
- s := flag.NewFlagSet(app.Name(), flag.ExitOnError)
- return tool.Run(ctx, s, c, args)
- }
- }
- return tool.CommandLineErrorf("Unknown command %v", command)
-}
-
-// commands returns the set of commands supported by the gopls tool on the
-// command line.
-// The command is specified by the first non flag argument.
-func (app *Application) Commands() []tool.Application {
- var commands []tool.Application
- commands = append(commands, app.mainCommands()...)
- commands = append(commands, app.featureCommands()...)
- return commands
-}
-
-func (app *Application) mainCommands() []tool.Application {
- return []tool.Application{
- &app.Serve,
- &version{app: app},
- &bug{app: app},
- &apiJSON{app: app},
- &licenses{app: app},
- }
-}
-
-func (app *Application) featureCommands() []tool.Application {
- return []tool.Application{
- &callHierarchy{app: app},
- &check{app: app},
- &definition{app: app},
- &foldingRanges{app: app},
- &format{app: app},
- &highlight{app: app},
- &implementation{app: app},
- &imports{app: app},
- newRemote(app, ""),
- newRemote(app, "inspect"),
- &links{app: app},
- &prepareRename{app: app},
- &references{app: app},
- &rename{app: app},
- &semtok{app: app},
- &signature{app: app},
- &suggestedFix{app: app},
- &symbols{app: app},
- newWorkspace(app),
- &workspaceSymbol{app: app},
- &vulncheck{app: app},
- }
-}
-
-var (
- internalMu sync.Mutex
- internalConnections = make(map[string]*connection)
-)
-
-func (app *Application) connect(ctx context.Context) (*connection, error) {
- switch {
- case app.Remote == "":
- connection := newConnection(app)
- connection.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), connection.Client)
- ctx = protocol.WithClient(ctx, connection.Client)
- return connection, connection.initialize(ctx, app.options)
- case strings.HasPrefix(app.Remote, "internal@"):
- internalMu.Lock()
- defer internalMu.Unlock()
- opts := source.DefaultOptions().Clone()
- if app.options != nil {
- app.options(opts)
- }
- key := fmt.Sprintf("%s %v %v %v", app.wd, opts.PreferredContentFormat, opts.HierarchicalDocumentSymbolSupport, opts.SymbolMatcher)
- if c := internalConnections[key]; c != nil {
- return c, nil
- }
- remote := app.Remote[len("internal@"):]
- ctx := xcontext.Detach(ctx) //TODO:a way of shutting down the internal server
- connection, err := app.connectRemote(ctx, remote)
- if err != nil {
- return nil, err
- }
- internalConnections[key] = connection
- return connection, nil
- default:
- return app.connectRemote(ctx, app.Remote)
- }
-}
-
-// CloseTestConnections terminates shared connections used in command tests. It
-// should only be called from tests.
-func CloseTestConnections(ctx context.Context) {
- for _, c := range internalConnections {
- c.Shutdown(ctx)
- c.Exit(ctx)
- }
-}
-
-func (app *Application) connectRemote(ctx context.Context, remote string) (*connection, error) {
- connection := newConnection(app)
- conn, err := lsprpc.ConnectToRemote(ctx, remote)
- if err != nil {
- return nil, err
- }
- stream := jsonrpc2.NewHeaderStream(conn)
- cc := jsonrpc2.NewConn(stream)
- connection.Server = protocol.ServerDispatcher(cc)
- ctx = protocol.WithClient(ctx, connection.Client)
- cc.Go(ctx,
- protocol.Handlers(
- protocol.ClientHandler(connection.Client,
- jsonrpc2.MethodNotFound)))
- return connection, connection.initialize(ctx, app.options)
-}
-
-var matcherString = map[source.SymbolMatcher]string{
- source.SymbolFuzzy: "fuzzy",
- source.SymbolCaseSensitive: "caseSensitive",
- source.SymbolCaseInsensitive: "caseInsensitive",
-}
-
-func (c *connection) initialize(ctx context.Context, options func(*source.Options)) error {
- params := &protocol.ParamInitialize{}
- params.RootURI = protocol.URIFromPath(c.Client.app.wd)
- params.Capabilities.Workspace.Configuration = true
-
- // Make sure to respect configured options when sending initialize request.
- opts := source.DefaultOptions().Clone()
- if options != nil {
- options(opts)
- }
- // If you add an additional option here, you must update the map key in connect.
- params.Capabilities.TextDocument.Hover = protocol.HoverClientCapabilities{
- ContentFormat: []protocol.MarkupKind{opts.PreferredContentFormat},
- }
- params.Capabilities.TextDocument.DocumentSymbol.HierarchicalDocumentSymbolSupport = opts.HierarchicalDocumentSymbolSupport
- params.Capabilities.TextDocument.SemanticTokens = protocol.SemanticTokensClientCapabilities{}
- params.Capabilities.TextDocument.SemanticTokens.Formats = []string{"relative"}
- params.Capabilities.TextDocument.SemanticTokens.Requests.Range = true
- params.Capabilities.TextDocument.SemanticTokens.Requests.Full = true
- params.Capabilities.TextDocument.SemanticTokens.TokenTypes = lsp.SemanticTypes()
- params.Capabilities.TextDocument.SemanticTokens.TokenModifiers = lsp.SemanticModifiers()
- params.InitializationOptions = map[string]interface{}{
- "symbolMatcher": matcherString[opts.SymbolMatcher],
- }
- if _, err := c.Server.Initialize(ctx, params); err != nil {
- return err
- }
- if err := c.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil {
- return err
- }
- return nil
-}
-
-type connection struct {
- protocol.Server
- Client *cmdClient
-}
-
-type cmdClient struct {
- protocol.Server
- app *Application
- fset *token.FileSet
-
- diagnosticsMu sync.Mutex
- diagnosticsDone chan struct{}
-
- filesMu sync.Mutex
- files map[span.URI]*cmdFile
-}
-
-type cmdFile struct {
- uri span.URI
- mapper *protocol.ColumnMapper
- err error
- added bool
- diagnostics []protocol.Diagnostic
-}
-
-func newConnection(app *Application) *connection {
- return &connection{
- Client: &cmdClient{
- app: app,
- fset: token.NewFileSet(),
- files: make(map[span.URI]*cmdFile),
- },
- }
-}
-
-// fileURI converts a DocumentURI to a file:// span.URI, panicking if it's not a file.
-func fileURI(uri protocol.DocumentURI) span.URI {
- sURI := uri.SpanURI()
- if !sURI.IsFile() {
- panic(fmt.Sprintf("%q is not a file URI", uri))
- }
- return sURI
-}
-
-func (c *cmdClient) ShowMessage(ctx context.Context, p *protocol.ShowMessageParams) error { return nil }
-
-func (c *cmdClient) ShowMessageRequest(ctx context.Context, p *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) {
- return nil, nil
-}
-
-func (c *cmdClient) LogMessage(ctx context.Context, p *protocol.LogMessageParams) error {
- switch p.Type {
- case protocol.Error:
- log.Print("Error:", p.Message)
- case protocol.Warning:
- log.Print("Warning:", p.Message)
- case protocol.Info:
- if c.app.verbose() {
- log.Print("Info:", p.Message)
- }
- case protocol.Log:
- if c.app.verbose() {
- log.Print("Log:", p.Message)
- }
- default:
- if c.app.verbose() {
- log.Print(p.Message)
- }
- }
- return nil
-}
-
-func (c *cmdClient) Event(ctx context.Context, t *interface{}) error { return nil }
-
-func (c *cmdClient) RegisterCapability(ctx context.Context, p *protocol.RegistrationParams) error {
- return nil
-}
-
-func (c *cmdClient) UnregisterCapability(ctx context.Context, p *protocol.UnregistrationParams) error {
- return nil
-}
-
-func (c *cmdClient) WorkspaceFolders(ctx context.Context) ([]protocol.WorkspaceFolder, error) {
- return nil, nil
-}
-
-func (c *cmdClient) Configuration(ctx context.Context, p *protocol.ParamConfiguration) ([]interface{}, error) {
- results := make([]interface{}, len(p.Items))
- for i, item := range p.Items {
- if item.Section != "gopls" {
- continue
- }
- env := map[string]interface{}{}
- for _, value := range c.app.env {
- l := strings.SplitN(value, "=", 2)
- if len(l) != 2 {
- continue
- }
- env[l[0]] = l[1]
- }
- m := map[string]interface{}{
- "env": env,
- "analyses": map[string]bool{
- "fillreturns": true,
- "nonewvars": true,
- "noresultvalues": true,
- "undeclaredname": true,
- },
- }
- if c.app.VeryVerbose {
- m["verboseOutput"] = true
- }
- results[i] = m
- }
- return results, nil
-}
-
-func (c *cmdClient) ApplyEdit(ctx context.Context, p *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) {
- return &protocol.ApplyWorkspaceEditResult{Applied: false, FailureReason: "not implemented"}, nil
-}
-
-func (c *cmdClient) PublishDiagnostics(ctx context.Context, p *protocol.PublishDiagnosticsParams) error {
- if p.URI == "gopls://diagnostics-done" {
- close(c.diagnosticsDone)
- }
- // Don't worry about diagnostics without versions.
- if p.Version == 0 {
- return nil
- }
-
- c.filesMu.Lock()
- defer c.filesMu.Unlock()
-
- file := c.getFile(ctx, fileURI(p.URI))
- file.diagnostics = p.Diagnostics
- return nil
-}
-
-func (c *cmdClient) Progress(context.Context, *protocol.ProgressParams) error {
- return nil
-}
-
-func (c *cmdClient) ShowDocument(context.Context, *protocol.ShowDocumentParams) (*protocol.ShowDocumentResult, error) {
- return nil, nil
-}
-
-func (c *cmdClient) WorkDoneProgressCreate(context.Context, *protocol.WorkDoneProgressCreateParams) error {
- return nil
-}
-
-func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile {
- file, found := c.files[uri]
- if !found || file.err != nil {
- file = &cmdFile{
- uri: uri,
- }
- c.files[uri] = file
- }
- if file.mapper == nil {
- fname := uri.Filename()
- content, err := ioutil.ReadFile(fname)
- if err != nil {
- file.err = errors.Errorf("getFile: %v: %v", uri, err)
- return file
- }
- f := c.fset.AddFile(fname, -1, len(content))
- f.SetLinesForContent(content)
- converter := span.NewContentConverter(fname, content)
- file.mapper = &protocol.ColumnMapper{
- URI: uri,
- Converter: converter,
- Content: content,
- }
- }
- return file
-}
-
-func (c *connection) AddFile(ctx context.Context, uri span.URI) *cmdFile {
- c.Client.filesMu.Lock()
- defer c.Client.filesMu.Unlock()
-
- file := c.Client.getFile(ctx, uri)
- // This should never happen.
- if file == nil {
- return &cmdFile{
- uri: uri,
- err: fmt.Errorf("no file found for %s", uri),
- }
- }
- if file.err != nil || file.added {
- return file
- }
- file.added = true
- p := &protocol.DidOpenTextDocumentParams{
- TextDocument: protocol.TextDocumentItem{
- URI: protocol.URIFromSpanURI(uri),
- LanguageID: "go",
- Version: 1,
- Text: string(file.mapper.Content),
- },
- }
- if err := c.Server.DidOpen(ctx, p); err != nil {
- file.err = errors.Errorf("%v: %v", uri, err)
- }
- return file
-}
-
-func (c *connection) semanticTokens(ctx context.Context, p *protocol.SemanticTokensRangeParams) (*protocol.SemanticTokens, error) {
- // use range to avoid limits on full
- resp, err := c.Server.SemanticTokensRange(ctx, p)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *connection) diagnoseFiles(ctx context.Context, files []span.URI) error {
- var untypedFiles []interface{}
- for _, file := range files {
- untypedFiles = append(untypedFiles, string(file))
- }
- c.Client.diagnosticsMu.Lock()
- defer c.Client.diagnosticsMu.Unlock()
-
- c.Client.diagnosticsDone = make(chan struct{})
- _, err := c.Server.NonstandardRequest(ctx, "gopls/diagnoseFiles", map[string]interface{}{"files": untypedFiles})
- if err != nil {
- close(c.Client.diagnosticsDone)
- return err
- }
-
- <-c.Client.diagnosticsDone
- return nil
-}
-
-func (c *connection) terminate(ctx context.Context) {
- if strings.HasPrefix(c.Client.app.Remote, "internal@") {
- // internal connections need to be left alive for the next test
- return
- }
- //TODO: do we need to handle errors on these calls?
- c.Shutdown(ctx)
- //TODO: right now calling exit terminates the process, we should rethink that
- //server.Exit(ctx)
-}
-
-// Implement io.Closer.
-func (c *cmdClient) Close() error {
- return nil
-}
diff --git a/internal/lsp/cmd/cmd_test.go b/internal/lsp/cmd/cmd_test.go
deleted file mode 100644
index 29816c83e..000000000
--- a/internal/lsp/cmd/cmd_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd_test
-
-import (
- "os"
- "testing"
-
- cmdtest "golang.org/x/tools/internal/lsp/cmd/test"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/testenv"
-)
-
-func TestMain(m *testing.M) {
- testenv.ExitIfSmallMachine()
- os.Exit(m.Run())
-}
-
-func TestCommandLine(t *testing.T) {
- cmdtest.TestCommandLine(t, "../testdata", tests.DefaultOptions)
-}
diff --git a/internal/lsp/cmd/definition.go b/internal/lsp/cmd/definition.go
deleted file mode 100644
index f3c71b671..000000000
--- a/internal/lsp/cmd/definition.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "encoding/json"
- "flag"
- "fmt"
- "os"
- "strings"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
- errors "golang.org/x/xerrors"
-)
-
-// A Definition is the result of a 'definition' query.
-type Definition struct {
- Span span.Span `json:"span"` // span of the definition
- Description string `json:"description"` // description of the denoted object
-}
-
-// These constant is printed in the help, and then used in a test to verify the
-// help is still valid.
-// They refer to "Set" in "flag.FlagSet" from the DetailedHelp method below.
-const (
- exampleLine = 44
- exampleColumn = 47
- exampleOffset = 1270
-)
-
-// definition implements the definition verb for gopls.
-type definition struct {
- app *Application
-
- JSON bool `flag:"json" help:"emit output in JSON format"`
- MarkdownSupported bool `flag:"markdown" help:"support markdown in responses"`
-}
-
-func (d *definition) Name() string { return "definition" }
-func (d *definition) Parent() string { return d.app.Name() }
-func (d *definition) Usage() string { return "[definition-flags] <position>" }
-func (d *definition) ShortHelp() string { return "show declaration of selected identifier" }
-func (d *definition) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprintf(f.Output(), `
-Example: show the definition of the identifier at syntax at offset %[1]v in this file (flag.FlagSet):
-
- $ gopls definition internal/lsp/cmd/definition.go:%[1]v:%[2]v
- $ gopls definition internal/lsp/cmd/definition.go:#%[3]v
-
-definition-flags:
-`, exampleLine, exampleColumn, exampleOffset)
- printFlagDefaults(f)
-}
-
-// Run performs the definition query as specified by args and prints the
-// results to stdout.
-func (d *definition) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("definition expects 1 argument")
- }
- // Plaintext makes more sense for the command line.
- opts := d.app.options
- d.app.options = func(o *source.Options) {
- if opts != nil {
- opts(o)
- }
- o.PreferredContentFormat = protocol.PlainText
- if d.MarkdownSupported {
- o.PreferredContentFormat = protocol.Markdown
- }
- }
- conn, err := d.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
- loc, err := file.mapper.Location(from)
- if err != nil {
- return err
- }
- tdpp := protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- }
- p := protocol.DefinitionParams{
- TextDocumentPositionParams: tdpp,
- }
- locs, err := conn.Definition(ctx, &p)
- if err != nil {
- return errors.Errorf("%v: %v", from, err)
- }
-
- if len(locs) == 0 {
- return errors.Errorf("%v: not an identifier", from)
- }
- q := protocol.HoverParams{
- TextDocumentPositionParams: tdpp,
- }
- hover, err := conn.Hover(ctx, &q)
- if err != nil {
- return errors.Errorf("%v: %v", from, err)
- }
- if hover == nil {
- return errors.Errorf("%v: not an identifier", from)
- }
- file = conn.AddFile(ctx, fileURI(locs[0].URI))
- if file.err != nil {
- return errors.Errorf("%v: %v", from, file.err)
- }
- definition, err := file.mapper.Span(locs[0])
- if err != nil {
- return errors.Errorf("%v: %v", from, err)
- }
- description := strings.TrimSpace(hover.Contents.Value)
- result := &Definition{
- Span: definition,
- Description: description,
- }
- if d.JSON {
- enc := json.NewEncoder(os.Stdout)
- enc.SetIndent("", "\t")
- return enc.Encode(result)
- }
- fmt.Printf("%v: defined here as %s", result.Span, result.Description)
- return nil
-}
diff --git a/internal/lsp/cmd/export_test.go b/internal/lsp/cmd/export_test.go
deleted file mode 100644
index 05b3cd312..000000000
--- a/internal/lsp/cmd/export_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-const (
- ExampleLine = exampleLine
- ExampleColumn = exampleColumn
- ExampleOffset = exampleOffset
-)
diff --git a/internal/lsp/cmd/folding_range.go b/internal/lsp/cmd/folding_range.go
deleted file mode 100644
index 513c9bdd2..000000000
--- a/internal/lsp/cmd/folding_range.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
-)
-
-// foldingRanges implements the folding_ranges verb for gopls
-type foldingRanges struct {
- app *Application
-}
-
-func (r *foldingRanges) Name() string { return "folding_ranges" }
-func (r *foldingRanges) Parent() string { return r.app.Name() }
-func (r *foldingRanges) Usage() string { return "<file>" }
-func (r *foldingRanges) ShortHelp() string { return "display selected file's folding ranges" }
-func (r *foldingRanges) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ gopls folding_ranges helper/helper.go
-`)
- printFlagDefaults(f)
-}
-
-func (r *foldingRanges) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("folding_ranges expects 1 argument (file)")
- }
-
- conn, err := r.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
-
- p := protocol.FoldingRangeParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(from.URI()),
- },
- }
-
- ranges, err := conn.FoldingRange(ctx, &p)
- if err != nil {
- return err
- }
-
- for _, r := range ranges {
- fmt.Printf("%v:%v-%v:%v\n",
- r.StartLine+1,
- r.StartCharacter+1,
- r.EndLine+1,
- r.EndCharacter,
- )
- }
-
- return nil
-}
diff --git a/internal/lsp/cmd/format.go b/internal/lsp/cmd/format.go
deleted file mode 100644
index 2d0f3f7c3..000000000
--- a/internal/lsp/cmd/format.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// format implements the format verb for gopls.
-type format struct {
- Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"`
- Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"`
- List bool `flag:"l,list" help:"list files whose formatting differs from gofmt's"`
-
- app *Application
-}
-
-func (c *format) Name() string { return "format" }
-func (c *format) Parent() string { return c.app.Name() }
-func (c *format) Usage() string { return "[format-flags] <filerange>" }
-func (c *format) ShortHelp() string { return "format the code according to the go standard" }
-func (c *format) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-The arguments supplied may be simple file names, or ranges within files.
-
-Example: reformat this file:
-
- $ gopls format -w internal/lsp/cmd/check.go
-
-format-flags:
-`)
- printFlagDefaults(f)
-}
-
-// Run performs the check on the files specified by args and prints the
-// results to stdout.
-func (c *format) Run(ctx context.Context, args ...string) error {
- if len(args) == 0 {
- // no files, so no results
- return nil
- }
- // now we ready to kick things off
- conn, err := c.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
- for _, arg := range args {
- spn := span.Parse(arg)
- file := conn.AddFile(ctx, spn.URI())
- if file.err != nil {
- return file.err
- }
- filename := spn.URI().Filename()
- loc, err := file.mapper.Location(spn)
- if err != nil {
- return err
- }
- if loc.Range.Start != loc.Range.End {
- return errors.Errorf("only full file formatting supported")
- }
- p := protocol.DocumentFormattingParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- }
- edits, err := conn.Formatting(ctx, &p)
- if err != nil {
- return errors.Errorf("%v: %v", spn, err)
- }
- sedits, err := source.FromProtocolEdits(file.mapper, edits)
- if err != nil {
- return errors.Errorf("%v: %v", spn, err)
- }
- formatted := diff.ApplyEdits(string(file.mapper.Content), sedits)
- printIt := true
- if c.List {
- printIt = false
- if len(edits) > 0 {
- fmt.Println(filename)
- }
- }
- if c.Write {
- printIt = false
- if len(edits) > 0 {
- ioutil.WriteFile(filename, []byte(formatted), 0644)
- }
- }
- if c.Diff {
- printIt = false
- u := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
- fmt.Print(u)
- }
- if printIt {
- fmt.Print(formatted)
- }
- }
- return nil
-}
diff --git a/internal/lsp/cmd/help_test.go b/internal/lsp/cmd/help_test.go
deleted file mode 100644
index 536d19dc2..000000000
--- a/internal/lsp/cmd/help_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd_test
-
-import (
- "bytes"
- "context"
- "flag"
- "io/ioutil"
- "path/filepath"
- "testing"
-
- "golang.org/x/tools/internal/lsp/cmd"
- "golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/internal/tool"
-)
-
-//go:generate go test -run Help -update-help-files
-
-var updateHelpFiles = flag.Bool("update-help-files", false, "Write out the help files instead of checking them")
-
-const appName = "gopls"
-
-func TestHelpFiles(t *testing.T) {
- testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code.
- app := cmd.New(appName, "", nil, nil)
- ctx := context.Background()
- for _, page := range append(app.Commands(), app) {
- t.Run(page.Name(), func(t *testing.T) {
- var buf bytes.Buffer
- s := flag.NewFlagSet(page.Name(), flag.ContinueOnError)
- s.SetOutput(&buf)
- tool.Run(ctx, s, page, []string{"-h"})
- name := page.Name()
- if name == appName {
- name = "usage"
- }
- helpFile := filepath.Join("usage", name+".hlp")
- got := buf.Bytes()
- if *updateHelpFiles {
- if err := ioutil.WriteFile(helpFile, got, 0666); err != nil {
- t.Errorf("Failed writing %v: %v", helpFile, err)
- }
- return
- }
- expect, err := ioutil.ReadFile(helpFile)
- switch {
- case err != nil:
- t.Errorf("Missing help file %q", helpFile)
- case !bytes.Equal(expect, got):
- t.Errorf("Help file %q did not match, got:\n%q\nwant:\n%q", helpFile, string(got), string(expect))
- }
- })
- }
-}
diff --git a/internal/lsp/cmd/highlight.go b/internal/lsp/cmd/highlight.go
deleted file mode 100644
index a325a2d53..000000000
--- a/internal/lsp/cmd/highlight.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "sort"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
-)
-
-// highlight implements the highlight verb for gopls.
-type highlight struct {
- app *Application
-}
-
-func (r *highlight) Name() string { return "highlight" }
-func (r *highlight) Parent() string { return r.app.Name() }
-func (r *highlight) Usage() string { return "<position>" }
-func (r *highlight) ShortHelp() string { return "display selected identifier's highlights" }
-func (r *highlight) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ # 1-indexed location (:line:column or :#offset) of the target identifier
- $ gopls highlight helper/helper.go:8:6
- $ gopls highlight helper/helper.go:#53
-`)
- printFlagDefaults(f)
-}
-
-func (r *highlight) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("highlight expects 1 argument (position)")
- }
-
- conn, err := r.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
-
- loc, err := file.mapper.Location(from)
- if err != nil {
- return err
- }
-
- p := protocol.DocumentHighlightParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- },
- }
- highlights, err := conn.DocumentHighlight(ctx, &p)
- if err != nil {
- return err
- }
-
- var results []span.Span
- for _, h := range highlights {
- l := protocol.Location{Range: h.Range}
- s, err := file.mapper.Span(l)
- if err != nil {
- return err
- }
- results = append(results, s)
- }
- // Sort results to make tests deterministic since DocumentHighlight uses a map.
- sort.SliceStable(results, func(i, j int) bool {
- return span.Compare(results[i], results[j]) == -1
- })
-
- for _, s := range results {
- fmt.Println(s)
- }
- return nil
-}
diff --git a/internal/lsp/cmd/implementation.go b/internal/lsp/cmd/implementation.go
deleted file mode 100644
index 7b42d9943..000000000
--- a/internal/lsp/cmd/implementation.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "sort"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
-)
-
-// implementation implements the implementation verb for gopls
-type implementation struct {
- app *Application
-}
-
-func (i *implementation) Name() string { return "implementation" }
-func (i *implementation) Parent() string { return i.app.Name() }
-func (i *implementation) Usage() string { return "<position>" }
-func (i *implementation) ShortHelp() string { return "display selected identifier's implementation" }
-func (i *implementation) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ # 1-indexed location (:line:column or :#offset) of the target identifier
- $ gopls implementation helper/helper.go:8:6
- $ gopls implementation helper/helper.go:#53
-`)
- printFlagDefaults(f)
-}
-
-func (i *implementation) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("implementation expects 1 argument (position)")
- }
-
- conn, err := i.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
-
- loc, err := file.mapper.Location(from)
- if err != nil {
- return err
- }
-
- p := protocol.ImplementationParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- },
- }
-
- implementations, err := conn.Implementation(ctx, &p)
- if err != nil {
- return err
- }
-
- var spans []string
- for _, impl := range implementations {
- f := conn.AddFile(ctx, fileURI(impl.URI))
- span, err := f.mapper.Span(impl)
- if err != nil {
- return err
- }
- spans = append(spans, fmt.Sprint(span))
- }
- sort.Strings(spans)
-
- for _, s := range spans {
- fmt.Println(s)
- }
-
- return nil
-}
diff --git a/internal/lsp/cmd/imports.go b/internal/lsp/cmd/imports.go
deleted file mode 100644
index 215c57f11..000000000
--- a/internal/lsp/cmd/imports.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
- errors "golang.org/x/xerrors"
-)
-
-// imports implements the import verb for gopls.
-type imports struct {
- Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"`
- Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"`
-
- app *Application
-}
-
-func (t *imports) Name() string { return "imports" }
-func (t *imports) Parent() string { return t.app.Name() }
-func (t *imports) Usage() string { return "[imports-flags] <filename>" }
-func (t *imports) ShortHelp() string { return "updates import statements" }
-func (t *imports) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprintf(f.Output(), `
-Example: update imports statements in a file:
-
- $ gopls imports -w internal/lsp/cmd/check.go
-
-imports-flags:
-`)
- printFlagDefaults(f)
-}
-
-// Run performs diagnostic checks on the file specified and either;
-// - if -w is specified, updates the file in place;
-// - if -d is specified, prints out unified diffs of the changes; or
-// - otherwise, prints the new versions to stdout.
-func (t *imports) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("imports expects 1 argument")
- }
- conn, err := t.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- uri := from.URI()
- file := conn.AddFile(ctx, uri)
- if file.err != nil {
- return file.err
- }
- actions, err := conn.CodeAction(ctx, &protocol.CodeActionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- })
- if err != nil {
- return errors.Errorf("%v: %v", from, err)
- }
- var edits []protocol.TextEdit
- for _, a := range actions {
- if a.Title != "Organize Imports" {
- continue
- }
- for _, c := range a.Edit.DocumentChanges {
- if fileURI(c.TextDocument.URI) == uri {
- edits = append(edits, c.Edits...)
- }
- }
- }
- sedits, err := source.FromProtocolEdits(file.mapper, edits)
- if err != nil {
- return errors.Errorf("%v: %v", edits, err)
- }
- newContent := diff.ApplyEdits(string(file.mapper.Content), sedits)
-
- filename := file.uri.Filename()
- switch {
- case t.Write:
- if len(edits) > 0 {
- ioutil.WriteFile(filename, []byte(newContent), 0644)
- }
- case t.Diff:
- diffs := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
- fmt.Print(diffs)
- default:
- fmt.Print(string(newContent))
- }
- return nil
-}
diff --git a/internal/lsp/cmd/info.go b/internal/lsp/cmd/info.go
deleted file mode 100644
index 09f453e1a..000000000
--- a/internal/lsp/cmd/info.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "flag"
- "fmt"
- "net/url"
- "os"
- "strings"
-
- "golang.org/x/tools/internal/lsp/browser"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// version implements the version command.
-type version struct {
- JSON bool `flag:"json" help:"outputs in json format."`
-
- app *Application
-}
-
-func (v *version) Name() string { return "version" }
-func (v *version) Parent() string { return v.app.Name() }
-func (v *version) Usage() string { return "" }
-func (v *version) ShortHelp() string { return "print the gopls version information" }
-func (v *version) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), ``)
- printFlagDefaults(f)
-}
-
-// Run prints version information to stdout.
-func (v *version) Run(ctx context.Context, args ...string) error {
- var mode = debug.PlainText
- if v.JSON {
- mode = debug.JSON
- }
-
- return debug.PrintVersionInfo(ctx, os.Stdout, v.app.verbose(), mode)
-}
-
-// bug implements the bug command.
-type bug struct {
- app *Application
-}
-
-func (b *bug) Name() string { return "bug" }
-func (b *bug) Parent() string { return b.app.Name() }
-func (b *bug) Usage() string { return "" }
-func (b *bug) ShortHelp() string { return "report a bug in gopls" }
-func (b *bug) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), ``)
- printFlagDefaults(f)
-}
-
-const goplsBugPrefix = "x/tools/gopls: <DESCRIBE THE PROBLEM>"
-const goplsBugHeader = `ATTENTION: Please answer these questions BEFORE submitting your issue. Thanks!
-
-#### What did you do?
-If possible, provide a recipe for reproducing the error.
-A complete runnable program is good.
-A link on play.golang.org is better.
-A failing unit test is the best.
-
-#### What did you expect to see?
-
-
-#### What did you see instead?
-
-
-`
-
-// Run collects some basic information and then prepares an issue ready to
-// be reported.
-func (b *bug) Run(ctx context.Context, args ...string) error {
- buf := &bytes.Buffer{}
- fmt.Fprint(buf, goplsBugHeader)
- debug.PrintVersionInfo(ctx, buf, true, debug.Markdown)
- body := buf.String()
- title := strings.Join(args, " ")
- if !strings.HasPrefix(title, goplsBugPrefix) {
- title = goplsBugPrefix + title
- }
- if !browser.Open("https://github.com/golang/go/issues/new?title=" + url.QueryEscape(title) + "&body=" + url.QueryEscape(body)) {
- fmt.Print("Please file a new issue at golang.org/issue/new using this template:\n\n")
- fmt.Print(body)
- }
- return nil
-}
-
-type apiJSON struct {
- app *Application
-}
-
-func (j *apiJSON) Name() string { return "api-json" }
-func (j *apiJSON) Parent() string { return j.app.Name() }
-func (j *apiJSON) Usage() string { return "" }
-func (j *apiJSON) ShortHelp() string { return "print json describing gopls API" }
-func (j *apiJSON) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), ``)
- printFlagDefaults(f)
-}
-
-func (j *apiJSON) Run(ctx context.Context, args ...string) error {
- js, err := json.MarshalIndent(source.GeneratedAPIJSON, "", "\t")
- if err != nil {
- return err
- }
- fmt.Fprint(os.Stdout, string(js))
- return nil
-}
-
-type licenses struct {
- app *Application
-}
-
-func (l *licenses) Name() string { return "licenses" }
-func (l *licenses) Parent() string { return l.app.Name() }
-func (l *licenses) Usage() string { return "" }
-func (l *licenses) ShortHelp() string { return "print licenses of included software" }
-func (l *licenses) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), ``)
- printFlagDefaults(f)
-}
-
-const licensePreamble = `
-gopls is made available under the following BSD-style license:
-
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-gopls implements the LSP specification, which is made available under the following license:
-
-Copyright (c) Microsoft Corporation
-
-All rights reserved.
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
-modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
-OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-gopls also includes software made available under these licenses:
-`
-
-func (l *licenses) Run(ctx context.Context, args ...string) error {
- opts := source.DefaultOptions()
- l.app.options(opts)
- txt := licensePreamble
- if opts.LicensesText == "" {
- txt += "(development gopls, license information not available)"
- } else {
- txt += opts.LicensesText
- }
- fmt.Fprint(os.Stdout, txt)
- return nil
-}
diff --git a/internal/lsp/cmd/links.go b/internal/lsp/cmd/links.go
deleted file mode 100644
index d49aabb6f..000000000
--- a/internal/lsp/cmd/links.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "encoding/json"
- "flag"
- "fmt"
- "os"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
- errors "golang.org/x/xerrors"
-)
-
-// links implements the links verb for gopls.
-type links struct {
- JSON bool `flag:"json" help:"emit document links in JSON format"`
-
- app *Application
-}
-
-func (l *links) Name() string { return "links" }
-func (l *links) Parent() string { return l.app.Name() }
-func (l *links) Usage() string { return "[links-flags] <filename>" }
-func (l *links) ShortHelp() string { return "list links in a file" }
-func (l *links) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprintf(f.Output(), `
-Example: list links contained within a file:
-
- $ gopls links internal/lsp/cmd/check.go
-
-links-flags:
-`)
- printFlagDefaults(f)
-}
-
-// Run finds all the links within a document
-// - if -json is specified, outputs location range and uri
-// - otherwise, prints the a list of unique links
-func (l *links) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("links expects 1 argument")
- }
- conn, err := l.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- uri := from.URI()
- file := conn.AddFile(ctx, uri)
- if file.err != nil {
- return file.err
- }
- results, err := conn.DocumentLink(ctx, &protocol.DocumentLinkParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- })
- if err != nil {
- return errors.Errorf("%v: %v", from, err)
- }
- if l.JSON {
- enc := json.NewEncoder(os.Stdout)
- enc.SetIndent("", "\t")
- return enc.Encode(results)
- }
- for _, v := range results {
- fmt.Println(v.Target)
- }
- return nil
-}
diff --git a/internal/lsp/cmd/prepare_rename.go b/internal/lsp/cmd/prepare_rename.go
deleted file mode 100644
index aef0477e8..000000000
--- a/internal/lsp/cmd/prepare_rename.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
- errors "golang.org/x/xerrors"
-)
-
-// prepareRename implements the prepare_rename verb for gopls.
-type prepareRename struct {
- app *Application
-}
-
-func (r *prepareRename) Name() string { return "prepare_rename" }
-func (r *prepareRename) Parent() string { return r.app.Name() }
-func (r *prepareRename) Usage() string { return "<position>" }
-func (r *prepareRename) ShortHelp() string { return "test validity of a rename operation at location" }
-func (r *prepareRename) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ # 1-indexed location (:line:column or :#offset) of the target identifier
- $ gopls prepare_rename helper/helper.go:8:6
- $ gopls prepare_rename helper/helper.go:#53
-`)
- printFlagDefaults(f)
-}
-
-// ErrInvalidRenamePosition is returned when prepareRename is run at a position that
-// is not a candidate for renaming.
-var ErrInvalidRenamePosition = errors.New("request is not valid at the given position")
-
-func (r *prepareRename) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("prepare_rename expects 1 argument (file)")
- }
-
- conn, err := r.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
- loc, err := file.mapper.Location(from)
- if err != nil {
- return err
- }
- p := protocol.PrepareRenameParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- },
- }
- result, err := conn.PrepareRename(ctx, &p)
- if err != nil {
- return errors.Errorf("prepare_rename failed: %w", err)
- }
- if result == nil {
- return ErrInvalidRenamePosition
- }
-
- l := protocol.Location{Range: result.Range}
- s, err := file.mapper.Span(l)
- if err != nil {
- return err
- }
-
- fmt.Println(s)
- return nil
-}
diff --git a/internal/lsp/cmd/references.go b/internal/lsp/cmd/references.go
deleted file mode 100644
index 0697d2e11..000000000
--- a/internal/lsp/cmd/references.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "sort"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
-)
-
-// references implements the references verb for gopls
-type references struct {
- IncludeDeclaration bool `flag:"d,declaration" help:"include the declaration of the specified identifier in the results"`
-
- app *Application
-}
-
-func (r *references) Name() string { return "references" }
-func (r *references) Parent() string { return r.app.Name() }
-func (r *references) Usage() string { return "[references-flags] <position>" }
-func (r *references) ShortHelp() string { return "display selected identifier's references" }
-func (r *references) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ # 1-indexed location (:line:column or :#offset) of the target identifier
- $ gopls references helper/helper.go:8:6
- $ gopls references helper/helper.go:#53
-
-references-flags:
-`)
- printFlagDefaults(f)
-}
-
-func (r *references) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("references expects 1 argument (position)")
- }
-
- conn, err := r.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
- loc, err := file.mapper.Location(from)
- if err != nil {
- return err
- }
- p := protocol.ReferenceParams{
- Context: protocol.ReferenceContext{
- IncludeDeclaration: r.IncludeDeclaration,
- },
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- },
- }
- locations, err := conn.References(ctx, &p)
- if err != nil {
- return err
- }
- var spans []string
- for _, l := range locations {
- f := conn.AddFile(ctx, fileURI(l.URI))
- // convert location to span for user-friendly 1-indexed line
- // and column numbers
- span, err := f.mapper.Span(l)
- if err != nil {
- return err
- }
- spans = append(spans, fmt.Sprint(span))
- }
-
- sort.Strings(spans)
- for _, s := range spans {
- fmt.Println(s)
- }
- return nil
-}
diff --git a/internal/lsp/cmd/remote.go b/internal/lsp/cmd/remote.go
deleted file mode 100644
index f71113576..000000000
--- a/internal/lsp/cmd/remote.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "encoding/json"
- "flag"
- "fmt"
- "log"
- "os"
-
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/lsprpc"
- errors "golang.org/x/xerrors"
-)
-
-type remote struct {
- app *Application
- subcommands
-
- // For backward compatibility, allow aliasing this command (it was previously
- // called 'inspect').
- //
- // TODO(rFindley): delete this after allowing some transition time in case
- // there were any users of 'inspect' (I suspect not).
- alias string
-}
-
-func newRemote(app *Application, alias string) *remote {
- return &remote{
- app: app,
- subcommands: subcommands{
- &listSessions{app: app},
- &startDebugging{app: app},
- },
- alias: alias,
- }
-}
-
-func (r *remote) Name() string {
- if r.alias != "" {
- return r.alias
- }
- return "remote"
-}
-
-func (r *remote) Parent() string { return r.app.Name() }
-
-func (r *remote) ShortHelp() string {
- short := "interact with the gopls daemon"
- if r.alias != "" {
- short += " (deprecated: use 'remote')"
- }
- return short
-}
-
-// listSessions is an inspect subcommand to list current sessions.
-type listSessions struct {
- app *Application
-}
-
-func (c *listSessions) Name() string { return "sessions" }
-func (c *listSessions) Parent() string { return c.app.Name() }
-func (c *listSessions) Usage() string { return "" }
-func (c *listSessions) ShortHelp() string {
- return "print information about current gopls sessions"
-}
-
-const listSessionsExamples = `
-Examples:
-
-1) list sessions for the default daemon:
-
-$ gopls -remote=auto remote sessions
-or just
-$ gopls remote sessions
-
-2) list sessions for a specific daemon:
-
-$ gopls -remote=localhost:8082 remote sessions
-`
-
-func (c *listSessions) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), listSessionsExamples)
- printFlagDefaults(f)
-}
-
-func (c *listSessions) Run(ctx context.Context, args ...string) error {
- remote := c.app.Remote
- if remote == "" {
- remote = "auto"
- }
- state, err := lsprpc.QueryServerState(ctx, remote)
- if err != nil {
- return err
- }
- v, err := json.MarshalIndent(state, "", "\t")
- if err != nil {
- log.Fatal(err)
- }
- os.Stdout.Write(v)
- return nil
-}
-
-type startDebugging struct {
- app *Application
-}
-
-func (c *startDebugging) Name() string { return "debug" }
-func (c *startDebugging) Usage() string { return "[host:port]" }
-func (c *startDebugging) ShortHelp() string {
- return "start the debug server"
-}
-
-const startDebuggingExamples = `
-Examples:
-
-1) start a debug server for the default daemon, on an arbitrary port:
-
-$ gopls -remote=auto remote debug
-or just
-$ gopls remote debug
-
-2) start for a specific daemon, on a specific port:
-
-$ gopls -remote=localhost:8082 remote debug localhost:8083
-`
-
-func (c *startDebugging) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), startDebuggingExamples)
- printFlagDefaults(f)
-}
-
-func (c *startDebugging) Run(ctx context.Context, args ...string) error {
- if len(args) > 1 {
- fmt.Fprintln(os.Stderr, c.Usage())
- return errors.New("invalid usage")
- }
- remote := c.app.Remote
- if remote == "" {
- remote = "auto"
- }
- debugAddr := ""
- if len(args) > 0 {
- debugAddr = args[0]
- }
- debugArgs := command.DebuggingArgs{
- Addr: debugAddr,
- }
- var result command.DebuggingResult
- if err := lsprpc.ExecuteCommand(ctx, remote, command.StartDebugging.ID(), debugArgs, &result); err != nil {
- return err
- }
- if len(result.URLs) == 0 {
- return errors.New("no debugging URLs")
- }
- for _, url := range result.URLs {
- fmt.Printf("debugging on %s\n", url)
- }
- return nil
-}
diff --git a/internal/lsp/cmd/rename.go b/internal/lsp/cmd/rename.go
deleted file mode 100644
index b0a22a1b4..000000000
--- a/internal/lsp/cmd/rename.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
- errors "golang.org/x/xerrors"
-)
-
-// rename implements the rename verb for gopls.
-type rename struct {
- Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"`
- Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"`
- Preserve bool `flag:"preserve" help:"preserve original files"`
-
- app *Application
-}
-
-func (r *rename) Name() string { return "rename" }
-func (r *rename) Parent() string { return r.app.Name() }
-func (r *rename) Usage() string { return "[rename-flags] <position> <name>" }
-func (r *rename) ShortHelp() string { return "rename selected identifier" }
-func (r *rename) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ # 1-based location (:line:column or :#position) of the thing to change
- $ gopls rename helper/helper.go:8:6 Foo
- $ gopls rename helper/helper.go:#53 Foo
-
-rename-flags:
-`)
- printFlagDefaults(f)
-}
-
-// Run renames the specified identifier and either;
-// - if -w is specified, updates the file(s) in place;
-// - if -d is specified, prints out unified diffs of the changes; or
-// - otherwise, prints the new versions to stdout.
-func (r *rename) Run(ctx context.Context, args ...string) error {
- if len(args) != 2 {
- return tool.CommandLineErrorf("definition expects 2 arguments (position, new name)")
- }
- conn, err := r.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
- loc, err := file.mapper.Location(from)
- if err != nil {
- return err
- }
- p := protocol.RenameParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- NewName: args[1],
- }
- edit, err := conn.Rename(ctx, &p)
- if err != nil {
- return err
- }
- var orderedURIs []string
- edits := map[span.URI][]protocol.TextEdit{}
- for _, c := range edit.DocumentChanges {
- uri := fileURI(c.TextDocument.URI)
- edits[uri] = append(edits[uri], c.Edits...)
- orderedURIs = append(orderedURIs, string(uri))
- }
- sort.Strings(orderedURIs)
- changeCount := len(orderedURIs)
-
- for _, u := range orderedURIs {
- uri := span.URIFromURI(u)
- cmdFile := conn.AddFile(ctx, uri)
- filename := cmdFile.uri.Filename()
-
- // convert LSP-style edits to []diff.TextEdit cuz Spans are handy
- renameEdits, err := source.FromProtocolEdits(cmdFile.mapper, edits[uri])
- if err != nil {
- return errors.Errorf("%v: %v", edits, err)
- }
- newContent := diff.ApplyEdits(string(cmdFile.mapper.Content), renameEdits)
-
- switch {
- case r.Write:
- fmt.Fprintln(os.Stderr, filename)
- if r.Preserve {
- if err := os.Rename(filename, filename+".orig"); err != nil {
- return errors.Errorf("%v: %v", edits, err)
- }
- }
- ioutil.WriteFile(filename, []byte(newContent), 0644)
- case r.Diff:
- diffs := diff.ToUnified(filename+".orig", filename, string(cmdFile.mapper.Content), renameEdits)
- fmt.Print(diffs)
- default:
- if len(orderedURIs) > 1 {
- fmt.Printf("%s:\n", filepath.Base(filename))
- }
- fmt.Print(string(newContent))
- if changeCount > 1 { // if this wasn't last change, print newline
- fmt.Println()
- }
- changeCount -= 1
- }
- }
- return nil
-}
diff --git a/internal/lsp/cmd/semantictokens.go b/internal/lsp/cmd/semantictokens.go
deleted file mode 100644
index 120f91d36..000000000
--- a/internal/lsp/cmd/semantictokens.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "bytes"
- "context"
- "flag"
- "fmt"
- "go/parser"
- "go/token"
- "io/ioutil"
- "log"
- "os"
- "unicode/utf8"
-
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-// generate semantic tokens and interpolate them in the file
-
-// The output is the input file decorated with comments showing the
-// syntactic tokens. The comments are stylized:
-// /*<arrow><length>,<token type>,[<modifiers]*/
-// For most occurrences, the comment comes just before the token it
-// describes, and arrow is a right arrow. If the token is inside a string
-// the comment comes just after the string, and the arrow is a left arrow.
-// <length> is the length of the token in runes, <token type> is one
-// of the supported semantic token types, and <modifiers. is a
-// (possibly empty) list of token type modifiers.
-
-// There are 3 coordinate systems for lines and character offsets in lines
-// LSP (what's returned from semanticTokens()):
-// 0-based: the first line is line 0, the first character of a line
-// is character 0, and characters are counted as UTF-16 code points
-// gopls (and Go error messages):
-// 1-based: the first line is line1, the first chararcter of a line
-// is character 0, and characters are counted as bytes
-// internal (as used in marks, and lines:=bytes.Split(buf, '\n'))
-// 0-based: lines and character positions are 1 less than in
-// the gopls coordinate system
-
-type semtok struct {
- app *Application
-}
-
-var colmap *protocol.ColumnMapper
-
-func (c *semtok) Name() string { return "semtok" }
-func (c *semtok) Parent() string { return c.app.Name() }
-func (c *semtok) Usage() string { return "<filename>" }
-func (c *semtok) ShortHelp() string { return "show semantic tokens for the specified file" }
-func (c *semtok) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example: show the semantic tokens for this file:
-
- $ gopls semtok internal/lsp/cmd/semtok.go
-`)
- printFlagDefaults(f)
-}
-
-// Run performs the semtok on the files specified by args and prints the
-// results to stdout in the format described above.
-func (c *semtok) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return fmt.Errorf("expected one file name, got %d", len(args))
- }
- // perhaps simpler if app had just had a FlagSet member
- origOptions := c.app.options
- c.app.options = func(opts *source.Options) {
- origOptions(opts)
- opts.SemanticTokens = true
- }
- conn, err := c.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
- uri := span.URIFromPath(args[0])
- file := conn.AddFile(ctx, uri)
- if file.err != nil {
- return file.err
- }
-
- buf, err := ioutil.ReadFile(args[0])
- if err != nil {
- return err
- }
- lines := bytes.Split(buf, []byte{'\n'})
- p := &protocol.SemanticTokensRangeParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- Range: protocol.Range{Start: protocol.Position{Line: 0, Character: 0},
- End: protocol.Position{
- Line: uint32(len(lines) - 1),
- Character: uint32(len(lines[len(lines)-1]))},
- },
- }
- resp, err := conn.semanticTokens(ctx, p)
- if err != nil {
- return err
- }
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, args[0], buf, 0)
- if err != nil {
- log.Printf("parsing %s failed %v", args[0], err)
- return err
- }
- tok := fset.File(f.Pos())
- if tok == nil {
- // can't happen; just parsed this file
- return fmt.Errorf("can't find %s in fset", args[0])
- }
- tc := span.NewContentConverter(args[0], buf)
- colmap = &protocol.ColumnMapper{
- URI: span.URI(args[0]),
- Content: buf,
- Converter: tc,
- }
- err = decorate(file.uri.Filename(), resp.Data)
- if err != nil {
- return err
- }
- return nil
-}
-
-type mark struct {
- line, offset int // 1-based, from RangeSpan
- len int // bytes, not runes
- typ string
- mods []string
-}
-
-// prefixes for semantic token comments
-const (
- SemanticLeft = "/*⇐"
- SemanticRight = "/*⇒"
-)
-
-func markLine(m mark, lines [][]byte) {
- l := lines[m.line-1] // mx is 1-based
- length := utf8.RuneCount(l[m.offset-1 : m.offset-1+m.len])
- splitAt := m.offset - 1
- insert := ""
- if m.typ == "namespace" && m.offset-1+m.len < len(l) && l[m.offset-1+m.len] == '"' {
- // it is the last component of an import spec
- // cannot put a comment inside a string
- insert = fmt.Sprintf("%s%d,namespace,[]*/", SemanticLeft, length)
- splitAt = m.offset + m.len
- } else {
- // be careful not to generate //*
- spacer := ""
- if splitAt-1 >= 0 && l[splitAt-1] == '/' {
- spacer = " "
- }
- insert = fmt.Sprintf("%s%s%d,%s,%v*/", spacer, SemanticRight, length, m.typ, m.mods)
- }
- x := append([]byte(insert), l[splitAt:]...)
- l = append(l[:splitAt], x...)
- lines[m.line-1] = l
-}
-
-func decorate(file string, result []uint32) error {
- buf, err := ioutil.ReadFile(file)
- if err != nil {
- return err
- }
- marks := newMarks(result)
- if len(marks) == 0 {
- return nil
- }
- lines := bytes.Split(buf, []byte{'\n'})
- for i := len(marks) - 1; i >= 0; i-- {
- mx := marks[i]
- markLine(mx, lines)
- }
- os.Stdout.Write(bytes.Join(lines, []byte{'\n'}))
- return nil
-}
-
-func newMarks(d []uint32) []mark {
- ans := []mark{}
- // the following two loops could be merged, at the cost
- // of making the logic slightly more complicated to understand
- // first, convert from deltas to absolute, in LSP coordinates
- lspLine := make([]uint32, len(d)/5)
- lspChar := make([]uint32, len(d)/5)
- var line, char uint32
- for i := 0; 5*i < len(d); i++ {
- lspLine[i] = line + d[5*i+0]
- if d[5*i+0] > 0 {
- char = 0
- }
- lspChar[i] = char + d[5*i+1]
- char = lspChar[i]
- line = lspLine[i]
- }
- // second, convert to gopls coordinates
- for i := 0; 5*i < len(d); i++ {
- pr := protocol.Range{
- Start: protocol.Position{
- Line: lspLine[i],
- Character: lspChar[i],
- },
- End: protocol.Position{
- Line: lspLine[i],
- Character: lspChar[i] + d[5*i+2],
- },
- }
- spn, err := colmap.RangeSpan(pr)
- if err != nil {
- log.Fatal(err)
- }
- m := mark{
- line: spn.Start().Line(),
- offset: spn.Start().Column(),
- len: spn.End().Column() - spn.Start().Column(),
- typ: lsp.SemType(int(d[5*i+3])),
- mods: lsp.SemMods(int(d[5*i+4])),
- }
- ans = append(ans, m)
- }
- return ans
-}
diff --git a/internal/lsp/cmd/serve.go b/internal/lsp/cmd/serve.go
deleted file mode 100644
index f6e268397..000000000
--- a/internal/lsp/cmd/serve.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "io"
- "log"
- "os"
- "time"
-
- "golang.org/x/tools/internal/fakenet"
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/lsprpc"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/tool"
- errors "golang.org/x/xerrors"
-)
-
-// Serve is a struct that exposes the configurable parts of the LSP server as
-// flags, in the right form for tool.Main to consume.
-type Serve struct {
- Logfile string `flag:"logfile" help:"filename to log to. if value is \"auto\", then logging to a default output file is enabled"`
- Mode string `flag:"mode" help:"no effect"`
- Port int `flag:"port" help:"port on which to run gopls for debugging purposes"`
- Address string `flag:"listen" help:"address on which to listen for remote connections. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. Otherwise, TCP is used."`
- IdleTimeout time.Duration `flag:"listen.timeout" help:"when used with -listen, shut down the server when there are no connected clients for this duration"`
- Trace bool `flag:"rpc.trace" help:"print the full rpc trace in lsp inspector format"`
- Debug string `flag:"debug" help:"serve debug information on the supplied address"`
-
- RemoteListenTimeout time.Duration `flag:"remote.listen.timeout" help:"when used with -remote=auto, the -listen.timeout value used to start the daemon"`
- RemoteDebug string `flag:"remote.debug" help:"when used with -remote=auto, the -debug value used to start the daemon"`
- RemoteLogfile string `flag:"remote.logfile" help:"when used with -remote=auto, the -logfile value used to start the daemon"`
-
- app *Application
-}
-
-func (s *Serve) Name() string { return "serve" }
-func (s *Serve) Parent() string { return s.app.Name() }
-func (s *Serve) Usage() string { return "[server-flags]" }
-func (s *Serve) ShortHelp() string {
- return "run a server for Go code using the Language Server Protocol"
-}
-func (s *Serve) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), ` gopls [flags] [server-flags]
-
-The server communicates using JSONRPC2 on stdin and stdout, and is intended to be run directly as
-a child of an editor process.
-
-server-flags:
-`)
- printFlagDefaults(f)
-}
-
-func (s *Serve) remoteArgs(network, address string) []string {
- args := []string{"serve",
- "-listen", fmt.Sprintf(`%s;%s`, network, address),
- }
- if s.RemoteDebug != "" {
- args = append(args, "-debug", s.RemoteDebug)
- }
- if s.RemoteListenTimeout != 0 {
- args = append(args, "-listen.timeout", s.RemoteListenTimeout.String())
- }
- if s.RemoteLogfile != "" {
- args = append(args, "-logfile", s.RemoteLogfile)
- }
- return args
-}
-
-// Run configures a server based on the flags, and then runs it.
-// It blocks until the server shuts down.
-func (s *Serve) Run(ctx context.Context, args ...string) error {
- if len(args) > 0 {
- return tool.CommandLineErrorf("server does not take arguments, got %v", args)
- }
-
- di := debug.GetInstance(ctx)
- isDaemon := s.Address != "" || s.Port != 0
- if di != nil {
- closeLog, err := di.SetLogFile(s.Logfile, isDaemon)
- if err != nil {
- return err
- }
- defer closeLog()
- di.ServerAddress = s.Address
- di.MonitorMemory(ctx)
- di.Serve(ctx, s.Debug)
- }
- var ss jsonrpc2.StreamServer
- if s.app.Remote != "" {
- var err error
- ss, err = lsprpc.NewForwarder(s.app.Remote, s.remoteArgs)
- if err != nil {
- return errors.Errorf("creating forwarder: %w", err)
- }
- } else {
- ss = lsprpc.NewStreamServer(cache.New(s.app.options), isDaemon)
- }
-
- var network, addr string
- if s.Address != "" {
- network, addr = lsprpc.ParseAddr(s.Address)
- }
- if s.Port != 0 {
- network = "tcp"
- addr = fmt.Sprintf(":%v", s.Port)
- }
- if addr != "" {
- log.Printf("Gopls daemon: listening on %s network, address %s...", network, addr)
- defer log.Printf("Gopls daemon: exiting")
- return jsonrpc2.ListenAndServe(ctx, network, addr, ss, s.IdleTimeout)
- }
- stream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", os.Stdin, os.Stdout))
- if s.Trace && di != nil {
- stream = protocol.LoggingStream(stream, di.LogWriter)
- }
- conn := jsonrpc2.NewConn(stream)
- err := ss.ServeStream(ctx, conn)
- if errors.Is(err, io.EOF) {
- return nil
- }
- return err
-}
diff --git a/internal/lsp/cmd/signature.go b/internal/lsp/cmd/signature.go
deleted file mode 100644
index db9484301..000000000
--- a/internal/lsp/cmd/signature.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
-)
-
-// signature implements the signature verb for gopls
-type signature struct {
- app *Application
-}
-
-func (r *signature) Name() string { return "signature" }
-func (r *signature) Parent() string { return r.app.Name() }
-func (r *signature) Usage() string { return "<position>" }
-func (r *signature) ShortHelp() string { return "display selected identifier's signature" }
-func (r *signature) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ # 1-indexed location (:line:column or :#offset) of the target identifier
- $ gopls signature helper/helper.go:8:6
- $ gopls signature helper/helper.go:#53
-`)
- printFlagDefaults(f)
-}
-
-func (r *signature) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("signature expects 1 argument (position)")
- }
-
- conn, err := r.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- file := conn.AddFile(ctx, from.URI())
- if file.err != nil {
- return file.err
- }
-
- loc, err := file.mapper.Location(from)
- if err != nil {
- return err
- }
-
- tdpp := protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(from.URI()),
- },
- Position: loc.Range.Start,
- }
- p := protocol.SignatureHelpParams{
- TextDocumentPositionParams: tdpp,
- }
-
- s, err := conn.SignatureHelp(ctx, &p)
- if err != nil {
- return err
- }
-
- if s == nil || len(s.Signatures) == 0 {
- return tool.CommandLineErrorf("%v: not a function", from)
- }
-
- // there is only ever one possible signature,
- // see toProtocolSignatureHelp in lsp/signature_help.go
- signature := s.Signatures[0]
- fmt.Printf("%s\n", signature.Label)
- if signature.Documentation != "" {
- fmt.Printf("\n%s\n", signature.Documentation)
- }
-
- return nil
-}
diff --git a/internal/lsp/cmd/subcommands.go b/internal/lsp/cmd/subcommands.go
deleted file mode 100644
index deac5c822..000000000
--- a/internal/lsp/cmd/subcommands.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "text/tabwriter"
-
- "golang.org/x/tools/internal/tool"
-)
-
-// subcommands is a helper that may be embedded for commands that delegate to
-// subcommands.
-type subcommands []tool.Application
-
-func (s subcommands) DetailedHelp(f *flag.FlagSet) {
- w := tabwriter.NewWriter(f.Output(), 0, 0, 2, ' ', 0)
- defer w.Flush()
- fmt.Fprint(w, "\nSubcommand:\n")
- for _, c := range s {
- fmt.Fprintf(w, " %s\t%s\n", c.Name(), c.ShortHelp())
- }
- printFlagDefaults(f)
-}
-
-func (s subcommands) Usage() string { return "<subcommand> [arg]..." }
-
-func (s subcommands) Run(ctx context.Context, args ...string) error {
- if len(args) == 0 {
- return tool.CommandLineErrorf("must provide subcommand")
- }
- command, args := args[0], args[1:]
- for _, c := range s {
- if c.Name() == command {
- s := flag.NewFlagSet(c.Name(), flag.ExitOnError)
- return tool.Run(ctx, s, c, args)
- }
- }
- return tool.CommandLineErrorf("unknown subcommand %v", command)
-}
diff --git a/internal/lsp/cmd/suggested_fix.go b/internal/lsp/cmd/suggested_fix.go
deleted file mode 100644
index df14631da..000000000
--- a/internal/lsp/cmd/suggested_fix.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
- errors "golang.org/x/xerrors"
-)
-
-// suggestedFix implements the fix verb for gopls.
-type suggestedFix struct {
- Diff bool `flag:"d,diff" help:"display diffs instead of rewriting files"`
- Write bool `flag:"w,write" help:"write result to (source) file instead of stdout"`
- All bool `flag:"a,all" help:"apply all fixes, not just preferred fixes"`
-
- app *Application
-}
-
-func (s *suggestedFix) Name() string { return "fix" }
-func (s *suggestedFix) Parent() string { return s.app.Name() }
-func (s *suggestedFix) Usage() string { return "[fix-flags] <filename>" }
-func (s *suggestedFix) ShortHelp() string { return "apply suggested fixes" }
-func (s *suggestedFix) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprintf(f.Output(), `
-Example: apply suggested fixes for this file
- $ gopls fix -w internal/lsp/cmd/check.go
-
-fix-flags:
-`)
- printFlagDefaults(f)
-}
-
-// Run performs diagnostic checks on the file specified and either;
-// - if -w is specified, updates the file in place;
-// - if -d is specified, prints out unified diffs of the changes; or
-// - otherwise, prints the new versions to stdout.
-func (s *suggestedFix) Run(ctx context.Context, args ...string) error {
- if len(args) < 1 {
- return tool.CommandLineErrorf("fix expects at least 1 argument")
- }
- conn, err := s.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- uri := from.URI()
- file := conn.AddFile(ctx, uri)
- if file.err != nil {
- return file.err
- }
-
- if err := conn.diagnoseFiles(ctx, []span.URI{uri}); err != nil {
- return err
- }
- conn.Client.filesMu.Lock()
- defer conn.Client.filesMu.Unlock()
-
- codeActionKinds := []protocol.CodeActionKind{protocol.QuickFix}
- if len(args) > 1 {
- codeActionKinds = []protocol.CodeActionKind{}
- for _, k := range args[1:] {
- codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k))
- }
- }
-
- rng, err := file.mapper.Range(from)
- if err != nil {
- return err
- }
- p := protocol.CodeActionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- Context: protocol.CodeActionContext{
- Only: codeActionKinds,
- Diagnostics: file.diagnostics,
- },
- Range: rng,
- }
- actions, err := conn.CodeAction(ctx, &p)
- if err != nil {
- return errors.Errorf("%v: %v", from, err)
- }
- var edits []protocol.TextEdit
- for _, a := range actions {
- if a.Command != nil {
- return fmt.Errorf("ExecuteCommand is not yet supported on the command line")
- }
- if !a.IsPreferred && !s.All {
- continue
- }
- if !from.HasPosition() {
- for _, c := range a.Edit.DocumentChanges {
- if fileURI(c.TextDocument.URI) == uri {
- edits = append(edits, c.Edits...)
- }
- }
- continue
- }
- // If the span passed in has a position, then we need to find
- // the codeaction that has the same range as the passed in span.
- for _, diag := range a.Diagnostics {
- spn, err := file.mapper.RangeSpan(diag.Range)
- if err != nil {
- continue
- }
- if span.ComparePoint(from.Start(), spn.Start()) == 0 {
- for _, c := range a.Edit.DocumentChanges {
- if fileURI(c.TextDocument.URI) == uri {
- edits = append(edits, c.Edits...)
- }
- }
- break
- }
- }
-
- // If suggested fix is not a diagnostic, still must collect edits.
- if len(a.Diagnostics) == 0 {
- for _, c := range a.Edit.DocumentChanges {
- if fileURI(c.TextDocument.URI) == uri {
- edits = append(edits, c.Edits...)
- }
- }
- }
- }
-
- sedits, err := source.FromProtocolEdits(file.mapper, edits)
- if err != nil {
- return errors.Errorf("%v: %v", edits, err)
- }
- newContent := diff.ApplyEdits(string(file.mapper.Content), sedits)
-
- filename := file.uri.Filename()
- switch {
- case s.Write:
- if len(edits) > 0 {
- ioutil.WriteFile(filename, []byte(newContent), 0644)
- }
- case s.Diff:
- diffs := diff.ToUnified(filename+".orig", filename, string(file.mapper.Content), sedits)
- fmt.Print(diffs)
- default:
- fmt.Print(string(newContent))
- }
- return nil
-}
diff --git a/internal/lsp/cmd/symbols.go b/internal/lsp/cmd/symbols.go
deleted file mode 100644
index b43a6dcd1..000000000
--- a/internal/lsp/cmd/symbols.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "encoding/json"
- "flag"
- "fmt"
- "sort"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
-)
-
-// symbols implements the symbols verb for gopls
-type symbols struct {
- app *Application
-}
-
-func (r *symbols) Name() string { return "symbols" }
-func (r *symbols) Parent() string { return r.app.Name() }
-func (r *symbols) Usage() string { return "<file>" }
-func (r *symbols) ShortHelp() string { return "display selected file's symbols" }
-func (r *symbols) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
- $ gopls symbols helper/helper.go
-`)
- printFlagDefaults(f)
-}
-func (r *symbols) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("symbols expects 1 argument (position)")
- }
-
- conn, err := r.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- from := span.Parse(args[0])
- p := protocol.DocumentSymbolParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(from.URI()),
- },
- }
- symbols, err := conn.DocumentSymbol(ctx, &p)
- if err != nil {
- return err
- }
- for _, s := range symbols {
- if m, ok := s.(map[string]interface{}); ok {
- s, err = mapToSymbol(m)
- if err != nil {
- return err
- }
- }
- switch t := s.(type) {
- case protocol.DocumentSymbol:
- printDocumentSymbol(t)
- case protocol.SymbolInformation:
- printSymbolInformation(t)
- }
- }
- return nil
-}
-
-func mapToSymbol(m map[string]interface{}) (interface{}, error) {
- b, err := json.Marshal(m)
- if err != nil {
- return nil, err
- }
-
- if _, ok := m["selectionRange"]; ok {
- var s protocol.DocumentSymbol
- if err := json.Unmarshal(b, &s); err != nil {
- return nil, err
- }
- return s, nil
- }
-
- var s protocol.SymbolInformation
- if err := json.Unmarshal(b, &s); err != nil {
- return nil, err
- }
- return s, nil
-}
-
-func printDocumentSymbol(s protocol.DocumentSymbol) {
- fmt.Printf("%s %s %s\n", s.Name, s.Kind, positionToString(s.SelectionRange))
- // Sort children for consistency
- sort.Slice(s.Children, func(i, j int) bool {
- return s.Children[i].Name < s.Children[j].Name
- })
- for _, c := range s.Children {
- fmt.Printf("\t%s %s %s\n", c.Name, c.Kind, positionToString(c.SelectionRange))
- }
-}
-
-func printSymbolInformation(s protocol.SymbolInformation) {
- fmt.Printf("%s %s %s\n", s.Name, s.Kind, positionToString(s.Location.Range))
-}
-
-func positionToString(r protocol.Range) string {
- return fmt.Sprintf("%v:%v-%v:%v",
- r.Start.Line+1,
- r.Start.Character+1,
- r.End.Line+1,
- r.End.Character+1,
- )
-}
diff --git a/internal/lsp/cmd/test/call_hierarchy.go b/internal/lsp/cmd/test/call_hierarchy.go
deleted file mode 100644
index 38f8ed707..000000000
--- a/internal/lsp/cmd/test/call_hierarchy.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "sort"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) {
- collectCallSpansString := func(callItems []protocol.CallHierarchyItem) string {
- var callSpans []string
- for _, call := range callItems {
- mapper, err := r.data.Mapper(call.URI.SpanURI())
- if err != nil {
- t.Fatal(err)
- }
- callSpan, err := mapper.Span(protocol.Location{URI: call.URI, Range: call.Range})
- if err != nil {
- t.Fatal(err)
- }
- callSpans = append(callSpans, fmt.Sprint(callSpan))
- }
- // to make tests deterministic
- sort.Strings(callSpans)
- return r.Normalize(strings.Join(callSpans, "\n"))
- }
-
- expectIn, expectOut := collectCallSpansString(expectedCalls.IncomingCalls), collectCallSpansString(expectedCalls.OutgoingCalls)
- expectIdent := r.Normalize(fmt.Sprint(spn))
-
- uri := spn.URI()
- filename := uri.Filename()
- target := filename + fmt.Sprintf(":%v:%v", spn.Start().Line(), spn.Start().Column())
-
- got, stderr := r.NormalizeGoplsCmd(t, "call_hierarchy", target)
- if stderr != "" {
- t.Fatalf("call_hierarchy failed for %s: %s", target, stderr)
- }
-
- gotIn, gotIdent, gotOut := cleanCallHierarchyCmdResult(got)
- if expectIn != gotIn {
- t.Errorf("incoming calls call_hierarchy failed for %s expected:\n%s\ngot:\n%s", target, expectIn, gotIn)
- }
- if expectIdent != gotIdent {
- t.Errorf("call_hierarchy failed for %s expected:\n%s\ngot:\n%s", target, expectIdent, gotIdent)
- }
- if expectOut != gotOut {
- t.Errorf("outgoing calls call_hierarchy failed for %s expected:\n%s\ngot:\n%s", target, expectOut, gotOut)
- }
-
-}
-
-// parses function URI and Range from call hierarchy cmd output to
-// incoming, identifier and outgoing calls (returned in that order)
-// ex: "identifier: function d at .../callhierarchy/callhierarchy.go:19:6-7" -> ".../callhierarchy/callhierarchy.go:19:6-7"
-func cleanCallHierarchyCmdResult(output string) (incoming, ident, outgoing string) {
- var incomingCalls, outgoingCalls []string
- for _, out := range strings.Split(output, "\n") {
- if out == "" {
- continue
- }
-
- callLocation := out[strings.LastIndex(out, " ")+1:]
- if strings.HasPrefix(out, "caller") {
- incomingCalls = append(incomingCalls, callLocation)
- } else if strings.HasPrefix(out, "callee") {
- outgoingCalls = append(outgoingCalls, callLocation)
- } else {
- ident = callLocation
- }
- }
- sort.Strings(incomingCalls)
- sort.Strings(outgoingCalls)
- incoming, outgoing = strings.Join(incomingCalls, "\n"), strings.Join(outgoingCalls, "\n")
- return
-}
diff --git a/internal/lsp/cmd/test/check.go b/internal/lsp/cmd/test/check.go
deleted file mode 100644
index f0e6d8fef..000000000
--- a/internal/lsp/cmd/test/check.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "io/ioutil"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) {
- if len(want) == 1 && want[0].Message == "" {
- return
- }
- fname := uri.Filename()
- out, _ := r.runGoplsCmd(t, "check", fname)
- // parse got into a collection of reports
- got := map[string]struct{}{}
- for _, l := range strings.Split(out, "\n") {
- if len(l) == 0 {
- continue
- }
- // parse and reprint to normalize the span
- bits := strings.SplitN(l, ": ", 2)
- if len(bits) == 2 {
- spn := span.Parse(strings.TrimSpace(bits[0]))
- spn = span.New(spn.URI(), spn.Start(), span.Point{})
- data, err := ioutil.ReadFile(fname)
- if err != nil {
- t.Fatal(err)
- }
- converter := span.NewContentConverter(fname, data)
- s, err := spn.WithPosition(converter)
- if err != nil {
- t.Fatal(err)
- }
- l = fmt.Sprintf("%s: %s", s, strings.TrimSpace(bits[1]))
- }
- got[r.NormalizePrefix(l)] = struct{}{}
- }
- for _, diag := range want {
- expect := fmt.Sprintf("%v:%v:%v: %v", uri.Filename(), diag.Range.Start.Line+1, diag.Range.Start.Character+1, diag.Message)
- if diag.Range.Start.Character == 0 {
- expect = fmt.Sprintf("%v:%v: %v", uri.Filename(), diag.Range.Start.Line+1, diag.Message)
- }
- expect = r.NormalizePrefix(expect)
- _, found := got[expect]
- if !found {
- t.Errorf("missing diagnostic %q, %v", expect, got)
- } else {
- delete(got, expect)
- }
- }
- for extra := range got {
- t.Errorf("extra diagnostic %q", extra)
- }
-}
diff --git a/internal/lsp/cmd/test/cmdtest.go b/internal/lsp/cmd/test/cmdtest.go
deleted file mode 100644
index 312f7b8b4..000000000
--- a/internal/lsp/cmd/test/cmdtest.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cmdtest contains the test suite for the command line behavior of gopls.
-package cmdtest
-
-import (
- "bytes"
- "context"
- "flag"
- "fmt"
- "io"
- "os"
- "sync"
- "testing"
-
- "golang.org/x/tools/internal/jsonrpc2/servertest"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/cmd"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/lsprpc"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/tool"
-)
-
-type runner struct {
- data *tests.Data
- ctx context.Context
- options func(*source.Options)
- normalizers []tests.Normalizer
- remote string
-}
-
-func TestCommandLine(t *testing.T, testdata string, options func(*source.Options)) {
- // On Android, the testdata directory is not copied to the runner.
- if stat, err := os.Stat(testdata); err != nil || !stat.IsDir() {
- t.Skip("testdata directory not present")
- }
- tests.RunTests(t, testdata, false, func(t *testing.T, datum *tests.Data) {
- ctx := tests.Context(t)
- ts := NewTestServer(ctx, options)
- tests.Run(t, NewRunner(datum, ctx, ts.Addr, options), datum)
- cmd.CloseTestConnections(ctx)
- })
-}
-
-func NewTestServer(ctx context.Context, options func(*source.Options)) *servertest.TCPServer {
- ctx = debug.WithInstance(ctx, "", "")
- cache := cache.New(options)
- ss := lsprpc.NewStreamServer(cache, false)
- return servertest.NewTCPServer(ctx, ss, nil)
-}
-
-func NewRunner(data *tests.Data, ctx context.Context, remote string, options func(*source.Options)) *runner {
- return &runner{
- data: data,
- ctx: ctx,
- options: options,
- normalizers: tests.CollectNormalizers(data.Exported),
- remote: remote,
- }
-}
-
-func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) {
- //TODO: add command line completions tests when it works
-}
-
-func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- //TODO: add command line completions tests when it works
-}
-
-func (r *runner) CompletionSnippet(t *testing.T, src span.Span, expected tests.CompletionSnippet, placeholders bool, items tests.CompletionItems) {
- //TODO: add command line completions tests when it works
-}
-
-func (r *runner) UnimportedCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- //TODO: add command line completions tests when it works
-}
-
-func (r *runner) DeepCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- //TODO: add command line completions tests when it works
-}
-
-func (r *runner) FuzzyCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- //TODO: add command line completions tests when it works
-}
-
-func (r *runner) CaseSensitiveCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- //TODO: add command line completions tests when it works
-}
-
-func (r *runner) RankCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- //TODO: add command line completions tests when it works
-}
-
-func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {
- //TODO: function extraction not supported on command line
-}
-
-func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {
- //TODO: function extraction not supported on command line
-}
-
-func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) {
- //TODO: import addition not supported on command line
-}
-
-func (r *runner) Hover(t *testing.T, spn span.Span, info string) {
- //TODO: hovering not supported on command line
-}
-
-func (r *runner) runGoplsCmd(t testing.TB, args ...string) (string, string) {
- rStdout, wStdout, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
- oldStdout := os.Stdout
- rStderr, wStderr, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
- oldStderr := os.Stderr
- stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- io.Copy(stdout, rStdout)
- wg.Done()
- }()
- go func() {
- io.Copy(stderr, rStderr)
- wg.Done()
- }()
- os.Stdout, os.Stderr = wStdout, wStderr
- app := cmd.New("gopls-test", r.data.Config.Dir, r.data.Exported.Config.Env, r.options)
- remote := r.remote
- s := flag.NewFlagSet(app.Name(), flag.ExitOnError)
- err = tool.Run(tests.Context(t), s,
- app,
- append([]string{fmt.Sprintf("-remote=internal@%s", remote)}, args...))
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- }
- wStdout.Close()
- wStderr.Close()
- wg.Wait()
- os.Stdout, os.Stderr = oldStdout, oldStderr
- rStdout.Close()
- rStderr.Close()
- return stdout.String(), stderr.String()
-}
-
-// NormalizeGoplsCmd runs the gopls command and normalizes its output.
-func (r *runner) NormalizeGoplsCmd(t testing.TB, args ...string) (string, string) {
- stdout, stderr := r.runGoplsCmd(t, args...)
- return r.Normalize(stdout), r.Normalize(stderr)
-}
-
-func (r *runner) Normalize(s string) string {
- return tests.Normalize(s, r.normalizers)
-}
-
-func (r *runner) NormalizePrefix(s string) string {
- return tests.NormalizePrefix(s, r.normalizers)
-}
diff --git a/internal/lsp/cmd/test/definition.go b/internal/lsp/cmd/test/definition.go
deleted file mode 100644
index c82d9a6c1..000000000
--- a/internal/lsp/cmd/test/definition.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "runtime"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
-)
-
-type godefMode int
-
-const (
- plainGodef = godefMode(1 << iota)
- jsonGoDef
-)
-
-var godefModes = []godefMode{
- plainGodef,
- jsonGoDef,
-}
-
-func (r *runner) Definition(t *testing.T, spn span.Span, d tests.Definition) {
- if d.IsType || d.OnlyHover {
- // TODO: support type definition, hover queries
- return
- }
- d.Src = span.New(d.Src.URI(), span.NewPoint(0, 0, d.Src.Start().Offset()), span.Point{})
- for _, mode := range godefModes {
- args := []string{"definition", "-markdown"}
- tag := d.Name + "-definition"
- if mode&jsonGoDef != 0 {
- tag += "-json"
- args = append(args, "-json")
- }
- uri := d.Src.URI()
- args = append(args, fmt.Sprint(d.Src))
- got, _ := r.NormalizeGoplsCmd(t, args...)
- if mode&jsonGoDef != 0 && runtime.GOOS == "windows" {
- got = strings.Replace(got, "file:///", "file://", -1)
- }
- expect := strings.TrimSpace(string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- })))
- if expect != "" && !strings.HasPrefix(got, expect) {
- d, err := myers.ComputeEdits("", expect, got)
- if err != nil {
- t.Fatal(err)
- }
- t.Errorf("definition %v failed with %#v\n%s", tag, args, diff.ToUnified("expect", "got", expect, d))
- }
- }
-}
diff --git a/internal/lsp/cmd/test/folding_range.go b/internal/lsp/cmd/test/folding_range.go
deleted file mode 100644
index 4478687b5..000000000
--- a/internal/lsp/cmd/test/folding_range.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) FoldingRanges(t *testing.T, spn span.Span) {
- goldenTag := "foldingRange-cmd"
- uri := spn.URI()
- filename := uri.Filename()
- got, _ := r.NormalizeGoplsCmd(t, "folding_ranges", filename)
- expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
-
- if expect != got {
- t.Errorf("folding_ranges failed failed for %s expected:\n%s\ngot:\n%s", filename, expect, got)
- }
-}
diff --git a/internal/lsp/cmd/test/format.go b/internal/lsp/cmd/test/format.go
deleted file mode 100644
index 77eedd440..000000000
--- a/internal/lsp/cmd/test/format.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "bytes"
- exec "golang.org/x/sys/execabs"
- "io/ioutil"
- "os"
- "regexp"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/testenv"
-)
-
-func (r *runner) Format(t *testing.T, spn span.Span) {
- tag := "gofmt"
- uri := spn.URI()
- filename := uri.Filename()
- expect := string(r.data.Golden(tag, filename, func() ([]byte, error) {
- cmd := exec.Command("gofmt", filename)
- contents, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files
- contents = []byte(r.Normalize(fixFileHeader(string(contents))))
- return contents, nil
- }))
- if expect == "" {
- //TODO: our error handling differs, for now just skip unformattable files
- t.Skip("Unformattable file")
- }
- got, _ := r.NormalizeGoplsCmd(t, "format", filename)
- if expect != got {
- t.Errorf("format failed for %s expected:\n%s\ngot:\n%s", filename, expect, got)
- }
- // now check we can build a valid unified diff
- unified, _ := r.NormalizeGoplsCmd(t, "format", "-d", filename)
- checkUnified(t, filename, expect, unified)
-}
-
-var unifiedHeader = regexp.MustCompile(`^diff -u.*\n(---\s+\S+\.go\.orig)\s+[\d-:. ]+(\n\+\+\+\s+\S+\.go)\s+[\d-:. ]+(\n@@)`)
-
-func fixFileHeader(s string) string {
- match := unifiedHeader.FindStringSubmatch(s)
- if match == nil {
- return s
- }
- return strings.Join(append(match[1:], s[len(match[0]):]), "")
-}
-
-func checkUnified(t *testing.T, filename string, expect string, patch string) {
- testenv.NeedsTool(t, "patch")
- if strings.Count(patch, "\n+++ ") > 1 {
- // TODO(golang/go/#34580)
- t.Skip("multi-file patch tests not supported yet")
- }
- applied := ""
- if patch == "" {
- applied = expect
- } else {
- temp, err := ioutil.TempFile("", "applied")
- if err != nil {
- t.Fatal(err)
- }
- temp.Close()
- defer os.Remove(temp.Name())
- cmd := exec.Command("patch", "-u", "-p0", "-o", temp.Name(), filename)
- cmd.Stdin = bytes.NewBuffer([]byte(patch))
- msg, err := cmd.CombinedOutput()
- if err != nil {
- t.Errorf("failed applying patch to %s: %v\ngot:\n%s\npatch:\n%s", filename, err, msg, patch)
- return
- }
- out, err := ioutil.ReadFile(temp.Name())
- if err != nil {
- t.Errorf("failed reading patched output for %s: %v\n", filename, err)
- return
- }
- applied = string(out)
- }
- if expect != applied {
- t.Errorf("apply unified gave wrong result for %s expected:\n%s\ngot:\n%s\npatch:\n%s", filename, expect, applied, patch)
- }
-}
diff --git a/internal/lsp/cmd/test/highlight.go b/internal/lsp/cmd/test/highlight.go
deleted file mode 100644
index 99e8b2c3f..000000000
--- a/internal/lsp/cmd/test/highlight.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "testing"
-
- "fmt"
-
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) Highlight(t *testing.T, spn span.Span, spans []span.Span) {
- var expect string
- for _, l := range spans {
- expect += fmt.Sprintln(l)
- }
- expect = r.Normalize(expect)
-
- uri := spn.URI()
- filename := uri.Filename()
- target := filename + ":" + fmt.Sprint(spn.Start().Line()) + ":" + fmt.Sprint(spn.Start().Column())
- got, _ := r.NormalizeGoplsCmd(t, "highlight", target)
- if expect != got {
- t.Errorf("highlight failed for %s expected:\n%s\ngot:\n%s", target, expect, got)
- }
-}
diff --git a/internal/lsp/cmd/test/implementation.go b/internal/lsp/cmd/test/implementation.go
deleted file mode 100644
index 189452466..000000000
--- a/internal/lsp/cmd/test/implementation.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "sort"
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) Implementation(t *testing.T, spn span.Span, imps []span.Span) {
- var itemStrings []string
- for _, i := range imps {
- itemStrings = append(itemStrings, fmt.Sprint(i))
- }
- sort.Strings(itemStrings)
- var expect string
- for _, i := range itemStrings {
- expect += i + "\n"
- }
- expect = r.Normalize(expect)
-
- uri := spn.URI()
- filename := uri.Filename()
- target := filename + fmt.Sprintf(":%v:%v", spn.Start().Line(), spn.Start().Column())
-
- got, stderr := r.NormalizeGoplsCmd(t, "implementation", target)
- if stderr != "" {
- t.Errorf("implementation failed for %s: %s", target, stderr)
- } else if expect != got {
- t.Errorf("implementation failed for %s expected:\n%s\ngot:\n%s", target, expect, got)
- }
-}
diff --git a/internal/lsp/cmd/test/imports.go b/internal/lsp/cmd/test/imports.go
deleted file mode 100644
index ce8aee55d..000000000
--- a/internal/lsp/cmd/test/imports.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "testing"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) Import(t *testing.T, spn span.Span) {
- uri := spn.URI()
- filename := uri.Filename()
- got, _ := r.NormalizeGoplsCmd(t, "imports", filename)
- want := string(r.data.Golden("goimports", filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- d, err := myers.ComputeEdits(uri, want, got)
- if err != nil {
- t.Fatal(err)
- }
- t.Errorf("imports failed for %s, expected:\n%s", filename, diff.ToUnified("want", "got", want, d))
- }
-}
diff --git a/internal/lsp/cmd/test/links.go b/internal/lsp/cmd/test/links.go
deleted file mode 100644
index 88df76832..000000000
--- a/internal/lsp/cmd/test/links.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "encoding/json"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {
- m, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- out, _ := r.NormalizeGoplsCmd(t, "links", "-json", uri.Filename())
- var got []protocol.DocumentLink
- err = json.Unmarshal([]byte(out), &got)
- if err != nil {
- t.Fatal(err)
- }
- if diff := tests.DiffLinks(m, wantLinks, got); diff != "" {
- t.Error(diff)
- }
-}
diff --git a/internal/lsp/cmd/test/prepare_rename.go b/internal/lsp/cmd/test/prepare_rename.go
deleted file mode 100644
index b5359e57b..000000000
--- a/internal/lsp/cmd/test/prepare_rename.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "testing"
-
- "golang.org/x/tools/internal/lsp/cmd"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) {
- m, err := r.data.Mapper(src.URI())
- if err != nil {
- t.Errorf("prepare_rename failed: %v", err)
- }
-
- var (
- target = fmt.Sprintf("%v", src)
- args = []string{"prepare_rename", target}
- stdOut, stdErr = r.NormalizeGoplsCmd(t, args...)
- expect string
- )
-
- if want.Text == "" {
- if stdErr != "" && stdErr != cmd.ErrInvalidRenamePosition.Error() {
- t.Errorf("prepare_rename failed for %s,\nexpected:\n`%v`\ngot:\n`%v`", target, expect, stdErr)
- }
- return
- }
-
- ws, err := m.Span(protocol.Location{Range: want.Range})
- if err != nil {
- t.Errorf("prepare_rename failed: %v", err)
- }
-
- expect = r.Normalize(fmt.Sprintln(ws))
- if expect != stdOut {
- t.Errorf("prepare_rename failed for %s expected:\n`%s`\ngot:\n`%s`\n", target, expect, stdOut)
- }
-}
diff --git a/internal/lsp/cmd/test/references.go b/internal/lsp/cmd/test/references.go
deleted file mode 100644
index 66d0d0662..000000000
--- a/internal/lsp/cmd/test/references.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "sort"
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) References(t *testing.T, spn span.Span, itemList []span.Span) {
- for _, includeDeclaration := range []bool{true, false} {
- t.Run(fmt.Sprintf("refs-declaration-%v", includeDeclaration), func(t *testing.T) {
- var itemStrings []string
- for i, s := range itemList {
- // We don't want the first result if we aren't including the declaration.
- if i == 0 && !includeDeclaration {
- continue
- }
- itemStrings = append(itemStrings, fmt.Sprint(s))
- }
- sort.Strings(itemStrings)
- var expect string
- for _, s := range itemStrings {
- expect += s + "\n"
- }
- expect = r.Normalize(expect)
-
- uri := spn.URI()
- filename := uri.Filename()
- target := filename + fmt.Sprintf(":%v:%v", spn.Start().Line(), spn.Start().Column())
- args := []string{"references"}
- if includeDeclaration {
- args = append(args, "-d")
- }
- args = append(args, target)
- got, stderr := r.NormalizeGoplsCmd(t, args...)
- if stderr != "" {
- t.Errorf("references failed for %s: %s", target, stderr)
- } else if expect != got {
- t.Errorf("references failed for %s expected:\n%s\ngot:\n%s", target, expect, got)
- }
- })
- }
-}
diff --git a/internal/lsp/cmd/test/rename.go b/internal/lsp/cmd/test/rename.go
deleted file mode 100644
index 0fe2d1e18..000000000
--- a/internal/lsp/cmd/test/rename.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) Rename(t *testing.T, spn span.Span, newText string) {
- filename := spn.URI().Filename()
- goldenTag := newText + "-rename"
- loc := fmt.Sprintf("%v", spn)
- got, err := r.NormalizeGoplsCmd(t, "rename", loc, newText)
- got += err
- expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
- if expect != got {
- t.Errorf("rename failed with %v %v\nexpected:\n%s\ngot:\n%s", loc, newText, expect, got)
- }
- // now check we can build a valid unified diff
- unified, _ := r.NormalizeGoplsCmd(t, "rename", "-d", loc, newText)
- checkUnified(t, filename, expect, unified)
-}
diff --git a/internal/lsp/cmd/test/semanticdriver.go b/internal/lsp/cmd/test/semanticdriver.go
deleted file mode 100644
index 80dc61e3d..000000000
--- a/internal/lsp/cmd/test/semanticdriver.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) SemanticTokens(t *testing.T, spn span.Span) {
- uri := spn.URI()
- filename := uri.Filename()
- got, stderr := r.NormalizeGoplsCmd(t, "semtok", filename)
- if stderr != "" {
- t.Fatalf("%s: %q", filename, stderr)
- }
- want := string(r.data.Golden("semantic", filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- lwant := strings.Split(want, "\n")
- lgot := strings.Split(got, "\n")
- t.Errorf("want(%d-%d) != got(%d-%d) for %s", len(want), len(lwant), len(got), len(lgot), r.Normalize(filename))
- for i := 0; i < len(lwant) && i < len(lgot); i++ {
- if lwant[i] != lgot[i] {
- t.Errorf("line %d:\nwant%q\ngot %q\n", i, lwant[i], lgot[i])
- }
- }
- }
-}
diff --git a/internal/lsp/cmd/test/signature.go b/internal/lsp/cmd/test/signature.go
deleted file mode 100644
index f6bdaebf3..000000000
--- a/internal/lsp/cmd/test/signature.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) {
- uri := spn.URI()
- filename := uri.Filename()
- target := filename + fmt.Sprintf(":%v:%v", spn.Start().Line(), spn.Start().Column())
- got, _ := r.NormalizeGoplsCmd(t, "signature", target)
- if want == nil {
- if got != "" {
- t.Fatalf("want nil, but got %s", got)
- }
- return
- }
- goldenTag := want.Signatures[0].Label + "-signature"
- expect := string(r.data.Golden(goldenTag, filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
- if tests.NormalizeAny(expect) != tests.NormalizeAny(got) {
- t.Errorf("signature failed for %s expected:\n%q\ngot:\n%q'", filename, expect, got)
- }
-}
diff --git a/internal/lsp/cmd/test/suggested_fix.go b/internal/lsp/cmd/test/suggested_fix.go
deleted file mode 100644
index c819e0517..000000000
--- a/internal/lsp/cmd/test/suggested_fix.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "testing"
-
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) {
- uri := spn.URI()
- filename := uri.Filename()
- args := []string{"fix", "-a", fmt.Sprintf("%s", spn)}
- for _, kind := range actionKinds {
- if kind == "refactor.rewrite" {
- t.Skip("refactor.rewrite is not yet supported on the command line")
- }
- }
- args = append(args, actionKinds...)
- got, stderr := r.NormalizeGoplsCmd(t, args...)
- if stderr == "ExecuteCommand is not yet supported on the command line" {
- return // don't skip to keep the summary counts correct
- }
- want := string(r.data.Golden("suggestedfix_"+tests.SpanName(spn), filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- t.Errorf("suggested fixes failed for %s:\n%s", filename, tests.Diff(t, want, got))
- }
-}
diff --git a/internal/lsp/cmd/test/symbols.go b/internal/lsp/cmd/test/symbols.go
deleted file mode 100644
index 055be0308..000000000
--- a/internal/lsp/cmd/test/symbols.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) {
- filename := uri.Filename()
- got, _ := r.NormalizeGoplsCmd(t, "symbols", filename)
- expect := string(r.data.Golden("symbols", filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
- if expect != got {
- t.Errorf("symbols failed for %s expected:\n%s\ngot:\n%s", filename, expect, got)
- }
-}
diff --git a/internal/lsp/cmd/test/workspace_symbol.go b/internal/lsp/cmd/test/workspace_symbol.go
deleted file mode 100644
index ce965f03a..000000000
--- a/internal/lsp/cmd/test/workspace_symbol.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmdtest
-
-import (
- "fmt"
- "path/filepath"
- "sort"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) {
- var matcher string
- switch typ {
- case tests.WorkspaceSymbolsFuzzy:
- matcher = "fuzzy"
- case tests.WorkspaceSymbolsCaseSensitive:
- matcher = "caseSensitive"
- case tests.WorkspaceSymbolsDefault:
- matcher = "caseInsensitive"
- }
- r.runWorkspaceSymbols(t, uri, matcher, query)
-}
-
-func (r *runner) runWorkspaceSymbols(t *testing.T, uri span.URI, matcher, query string) {
- t.Helper()
-
- out, _ := r.runGoplsCmd(t, "workspace_symbol", "-matcher", matcher, query)
- var filtered []string
- dir := filepath.Dir(uri.Filename())
- for _, line := range strings.Split(out, "\n") {
- if source.InDir(dir, line) {
- filtered = append(filtered, filepath.ToSlash(line))
- }
- }
- sort.Strings(filtered)
- got := r.Normalize(strings.Join(filtered, "\n") + "\n")
-
- expect := string(r.data.Golden(fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
-
- if expect != got {
- t.Errorf("workspace_symbol failed for %s:\n%s", query, tests.Diff(t, expect, got))
- }
-}
diff --git a/internal/lsp/cmd/usage/usage.hlp b/internal/lsp/cmd/usage/usage.hlp
deleted file mode 100644
index 1d0fb8d4c..000000000
--- a/internal/lsp/cmd/usage/usage.hlp
+++ /dev/null
@@ -1,77 +0,0 @@
-
-gopls is a Go language server.
-
-It is typically used with an editor to provide language features. When no
-command is specified, gopls will default to the 'serve' command. The language
-features can also be accessed via the gopls command-line interface.
-
-Usage:
- gopls help [<subject>]
-
-Command:
-
-Main
- serve run a server for Go code using the Language Server Protocol
- version print the gopls version information
- bug report a bug in gopls
- api-json print json describing gopls API
- licenses print licenses of included software
-
-Features
- call_hierarchy display selected identifier's call hierarchy
- check show diagnostic results for the specified file
- definition show declaration of selected identifier
- folding_ranges display selected file's folding ranges
- format format the code according to the go standard
- highlight display selected identifier's highlights
- implementation display selected identifier's implementation
- imports updates import statements
- remote interact with the gopls daemon
- inspect interact with the gopls daemon (deprecated: use 'remote')
- links list links in a file
- prepare_rename test validity of a rename operation at location
- references display selected identifier's references
- rename rename selected identifier
- semtok show semantic tokens for the specified file
- signature display selected identifier's signature
- fix apply suggested fixes
- symbols display selected file's symbols
- workspace manage the gopls workspace (experimental: under development)
- workspace_symbol search symbols in workspace
- vulncheck run experimental vulncheck analysis (experimental: under development)
-
-flags:
- -debug=string
- serve debug information on the supplied address
- -listen=string
- address on which to listen for remote connections. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. Otherwise, TCP is used.
- -listen.timeout=duration
- when used with -listen, shut down the server when there are no connected clients for this duration
- -logfile=string
- filename to log to. if value is "auto", then logging to a default output file is enabled
- -mode=string
- no effect
- -ocagent=string
- the address of the ocagent (e.g. http://localhost:55678), or off (default "off")
- -port=int
- port on which to run gopls for debugging purposes
- -profile.cpu=string
- write CPU profile to this file
- -profile.mem=string
- write memory profile to this file
- -profile.trace=string
- write trace log to this file
- -remote=string
- forward all commands to a remote lsp specified by this flag. With no special prefix, this is assumed to be a TCP address. If prefixed by 'unix;', the subsequent address is assumed to be a unix domain socket. If 'auto', or prefixed by 'auto;', the remote address is automatically resolved based on the executing environment.
- -remote.debug=string
- when used with -remote=auto, the -debug value used to start the daemon
- -remote.listen.timeout=duration
- when used with -remote=auto, the -listen.timeout value used to start the daemon (default 1m0s)
- -remote.logfile=string
- when used with -remote=auto, the -logfile value used to start the daemon
- -rpc.trace
- print the full rpc trace in lsp inspector format
- -v,-verbose
- verbose output
- -vv,-veryverbose
- very verbose output
diff --git a/internal/lsp/cmd/usage/vulncheck.hlp b/internal/lsp/cmd/usage/vulncheck.hlp
deleted file mode 100644
index 4bfdc4b47..000000000
--- a/internal/lsp/cmd/usage/vulncheck.hlp
+++ /dev/null
@@ -1,9 +0,0 @@
-run experimental vulncheck analysis (experimental: under development)
-
-Usage:
- gopls [flags] vulncheck
-
- WARNING: this command is experimental.
-
- Example:
- $ gopls vulncheck <packages>
diff --git a/internal/lsp/cmd/usage/workspace.hlp b/internal/lsp/cmd/usage/workspace.hlp
deleted file mode 100644
index 912cf2946..000000000
--- a/internal/lsp/cmd/usage/workspace.hlp
+++ /dev/null
@@ -1,7 +0,0 @@
-manage the gopls workspace (experimental: under development)
-
-Usage:
- gopls [flags] workspace <subcommand> [arg]...
-
-Subcommand:
- generate generate a gopls.mod file for a workspace
diff --git a/internal/lsp/cmd/vulncheck.go b/internal/lsp/cmd/vulncheck.go
deleted file mode 100644
index adf59cecb..000000000
--- a/internal/lsp/cmd/vulncheck.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "encoding/json"
- "flag"
- "fmt"
- "os"
-
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/tool"
-)
-
-// vulncheck implements the vulncheck command.
-type vulncheck struct {
- app *Application
-}
-
-func (v *vulncheck) Name() string { return "vulncheck" }
-func (v *vulncheck) Parent() string { return v.app.Name() }
-func (v *vulncheck) Usage() string { return "" }
-func (v *vulncheck) ShortHelp() string {
- return "run experimental vulncheck analysis (experimental: under development)"
-}
-func (v *vulncheck) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
- WARNING: this command is experimental.
-
- Example:
- $ gopls vulncheck <packages>
-`)
- printFlagDefaults(f)
-}
-
-func (v *vulncheck) Run(ctx context.Context, args ...string) error {
- if len(args) > 1 {
- return tool.CommandLineErrorf("vulncheck accepts at most one package pattern")
- }
- pattern := "."
- if len(args) == 1 {
- pattern = args[0]
- }
-
- conn, err := v.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- cwd, err := os.Getwd()
- if err != nil {
- return err
- }
-
- cmd, err := command.NewRunVulncheckExpCommand("", command.VulncheckArgs{
- Dir: protocol.URIFromPath(cwd),
- Pattern: pattern,
- })
- if err != nil {
- return err
- }
-
- params := &protocol.ExecuteCommandParams{Command: cmd.Command, Arguments: cmd.Arguments}
- res, err := conn.ExecuteCommand(ctx, params)
- if err != nil {
- return fmt.Errorf("executing server command: %v", err)
- }
- data, err := json.MarshalIndent(res, " ", " ")
- if err != nil {
- return fmt.Errorf("failed to decode results: %v", err)
- }
- fmt.Printf("%s\n", data)
- return nil
-}
diff --git a/internal/lsp/cmd/workspace.go b/internal/lsp/cmd/workspace.go
deleted file mode 100644
index c0ddd9eb4..000000000
--- a/internal/lsp/cmd/workspace.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
-
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// workspace is a top-level command for working with the gopls workspace. This
-// is experimental and subject to change. The idea is that subcommands could be
-// used for manipulating the workspace mod file, rather than editing it
-// manually.
-type workspace struct {
- app *Application
- subcommands
-}
-
-func newWorkspace(app *Application) *workspace {
- return &workspace{
- app: app,
- subcommands: subcommands{
- &generateWorkspaceMod{app: app},
- },
- }
-}
-
-func (w *workspace) Name() string { return "workspace" }
-func (w *workspace) Parent() string { return w.app.Name() }
-func (w *workspace) ShortHelp() string {
- return "manage the gopls workspace (experimental: under development)"
-}
-
-// generateWorkspaceMod (re)generates the gopls.mod file for the current
-// workspace.
-type generateWorkspaceMod struct {
- app *Application
-}
-
-func (c *generateWorkspaceMod) Name() string { return "generate" }
-func (c *generateWorkspaceMod) Usage() string { return "" }
-func (c *generateWorkspaceMod) ShortHelp() string {
- return "generate a gopls.mod file for a workspace"
-}
-
-func (c *generateWorkspaceMod) DetailedHelp(f *flag.FlagSet) {
- printFlagDefaults(f)
-}
-
-func (c *generateWorkspaceMod) Run(ctx context.Context, args ...string) error {
- origOptions := c.app.options
- c.app.options = func(opts *source.Options) {
- origOptions(opts)
- opts.ExperimentalWorkspaceModule = true
- }
- conn, err := c.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
- cmd, err := command.NewGenerateGoplsModCommand("", command.URIArg{})
- if err != nil {
- return err
- }
- params := &protocol.ExecuteCommandParams{Command: cmd.Command, Arguments: cmd.Arguments}
- if _, err := conn.ExecuteCommand(ctx, params); err != nil {
- return fmt.Errorf("executing server command: %v", err)
- }
- return nil
-}
diff --git a/internal/lsp/cmd/workspace_symbol.go b/internal/lsp/cmd/workspace_symbol.go
deleted file mode 100644
index 38fe5decf..000000000
--- a/internal/lsp/cmd/workspace_symbol.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmd
-
-import (
- "context"
- "flag"
- "fmt"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/tool"
-)
-
-// workspaceSymbol implements the workspace_symbol verb for gopls.
-type workspaceSymbol struct {
- Matcher string `flag:"matcher" help:"specifies the type of matcher: fuzzy, caseSensitive, or caseInsensitive.\nThe default is caseInsensitive."`
-
- app *Application
-}
-
-func (r *workspaceSymbol) Name() string { return "workspace_symbol" }
-func (r *workspaceSymbol) Parent() string { return r.app.Name() }
-func (r *workspaceSymbol) Usage() string { return "[workspace_symbol-flags] <query>" }
-func (r *workspaceSymbol) ShortHelp() string { return "search symbols in workspace" }
-func (r *workspaceSymbol) DetailedHelp(f *flag.FlagSet) {
- fmt.Fprint(f.Output(), `
-Example:
-
- $ gopls workspace_symbol -matcher fuzzy 'wsymbols'
-
-workspace_symbol-flags:
-`)
- printFlagDefaults(f)
-}
-
-func (r *workspaceSymbol) Run(ctx context.Context, args ...string) error {
- if len(args) != 1 {
- return tool.CommandLineErrorf("workspace_symbol expects 1 argument")
- }
-
- opts := r.app.options
- r.app.options = func(o *source.Options) {
- if opts != nil {
- opts(o)
- }
- switch r.Matcher {
- case "fuzzy":
- o.SymbolMatcher = source.SymbolFuzzy
- case "caseSensitive":
- o.SymbolMatcher = source.SymbolCaseSensitive
- case "fastfuzzy":
- o.SymbolMatcher = source.SymbolFastFuzzy
- default:
- o.SymbolMatcher = source.SymbolCaseInsensitive
- }
- }
-
- conn, err := r.app.connect(ctx)
- if err != nil {
- return err
- }
- defer conn.terminate(ctx)
-
- p := protocol.WorkspaceSymbolParams{
- Query: args[0],
- }
-
- symbols, err := conn.Symbol(ctx, &p)
- if err != nil {
- return err
- }
- for _, s := range symbols {
- f := conn.AddFile(ctx, fileURI(s.Location.URI))
- span, err := f.mapper.Span(s.Location)
- if err != nil {
- return err
- }
- fmt.Printf("%s %s %s\n", span, s.Name, s.Kind)
- }
-
- return nil
-}
diff --git a/internal/lsp/code_action.go b/internal/lsp/code_action.go
deleted file mode 100644
index 7ddf81296..000000000
--- a/internal/lsp/code_action.go
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
- "fmt"
- "sort"
- "strings"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionParams) ([]protocol.CodeAction, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- uri := fh.URI()
-
- // Determine the supported actions for this file kind.
- kind := snapshot.View().FileKind(fh)
- supportedCodeActions, ok := snapshot.View().Options().SupportedCodeActions[kind]
- if !ok {
- return nil, fmt.Errorf("no supported code actions for %v file kind", kind)
- }
-
- // The Only field of the context specifies which code actions the client wants.
- // If Only is empty, assume that the client wants all of the non-explicit code actions.
- var wanted map[protocol.CodeActionKind]bool
-
- // Explicit Code Actions are opt-in and shouldn't be returned to the client unless
- // requested using Only.
- // TODO: Add other CodeLenses such as GoGenerate, RegenerateCgo, etc..
- explicit := map[protocol.CodeActionKind]bool{
- protocol.GoTest: true,
- }
-
- if len(params.Context.Only) == 0 {
- wanted = supportedCodeActions
- } else {
- wanted = make(map[protocol.CodeActionKind]bool)
- for _, only := range params.Context.Only {
- for k, v := range supportedCodeActions {
- if only == k || strings.HasPrefix(string(k), string(only)+".") {
- wanted[k] = wanted[k] || v
- }
- }
- wanted[only] = wanted[only] || explicit[only]
- }
- }
- if len(supportedCodeActions) == 0 {
- return nil, nil // not an error if there are none supported
- }
- if len(wanted) == 0 {
- return nil, fmt.Errorf("no supported code action to execute for %s, wanted %v", uri, params.Context.Only)
- }
-
- var codeActions []protocol.CodeAction
- switch kind {
- case source.Mod:
- if diagnostics := params.Context.Diagnostics; len(diagnostics) > 0 {
- diags, err := mod.DiagnosticsForMod(ctx, snapshot, fh)
- if source.IsNonFatalGoModError(err) {
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
- quickFixes, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, diags)
- if err != nil {
- return nil, err
- }
- codeActions = append(codeActions, quickFixes...)
- }
- case source.Go:
- // Don't suggest fixes for generated files, since they are generally
- // not useful and some editors may apply them automatically on save.
- if source.IsGenerated(ctx, snapshot, uri) {
- return nil, nil
- }
- diagnostics := params.Context.Diagnostics
-
- // First, process any missing imports and pair them with the
- // diagnostics they fix.
- if wantQuickFixes := wanted[protocol.QuickFix] && len(diagnostics) > 0; wantQuickFixes || wanted[protocol.SourceOrganizeImports] {
- importEdits, importEditsPerFix, err := source.AllImportsFixes(ctx, snapshot, fh)
- if err != nil {
- event.Error(ctx, "imports fixes", err, tag.File.Of(fh.URI().Filename()))
- }
- // Separate this into a set of codeActions per diagnostic, where
- // each action is the addition, removal, or renaming of one import.
- if wantQuickFixes {
- for _, importFix := range importEditsPerFix {
- fixes := importDiagnostics(importFix.Fix, diagnostics)
- if len(fixes) == 0 {
- continue
- }
- codeActions = append(codeActions, protocol.CodeAction{
- Title: importFixTitle(importFix.Fix),
- Kind: protocol.QuickFix,
- Edit: protocol.WorkspaceEdit{
- DocumentChanges: documentChanges(fh, importFix.Edits),
- },
- Diagnostics: fixes,
- })
- }
- }
-
- // Send all of the import edits as one code action if the file is
- // being organized.
- if wanted[protocol.SourceOrganizeImports] && len(importEdits) > 0 {
- codeActions = append(codeActions, protocol.CodeAction{
- Title: "Organize Imports",
- Kind: protocol.SourceOrganizeImports,
- Edit: protocol.WorkspaceEdit{
- DocumentChanges: documentChanges(fh, importEdits),
- },
- })
- }
- }
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
- pkg, err := snapshot.PackageForFile(ctx, fh.URI(), source.TypecheckFull, source.WidestPackage)
- if err != nil {
- return nil, err
- }
-
- pkgDiagnostics, err := snapshot.DiagnosePackage(ctx, pkg)
- if err != nil {
- return nil, err
- }
- analysisDiags, err := source.Analyze(ctx, snapshot, pkg, true)
- if err != nil {
- return nil, err
- }
- fileDiags := append(pkgDiagnostics[uri], analysisDiags[uri]...)
-
- // Split diagnostics into fixes, which must match incoming diagnostics,
- // and non-fixes, which must match the requested range. Build actions
- // for all of them.
- var fixDiags, nonFixDiags []*source.Diagnostic
- for _, d := range fileDiags {
- if len(d.SuggestedFixes) == 0 {
- continue
- }
- var isFix bool
- for _, fix := range d.SuggestedFixes {
- if fix.ActionKind == protocol.QuickFix || fix.ActionKind == protocol.SourceFixAll {
- isFix = true
- break
- }
- }
- if isFix {
- fixDiags = append(fixDiags, d)
- } else {
- nonFixDiags = append(nonFixDiags, d)
- }
- }
-
- fixActions, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, fixDiags)
- if err != nil {
- return nil, err
- }
- codeActions = append(codeActions, fixActions...)
-
- for _, nonfix := range nonFixDiags {
- // For now, only show diagnostics for matching lines. Maybe we should
- // alter this behavior in the future, depending on the user experience.
- if !protocol.Intersect(nonfix.Range, params.Range) {
- continue
- }
- actions, err := codeActionsForDiagnostic(ctx, snapshot, nonfix, nil)
- if err != nil {
- return nil, err
- }
- codeActions = append(codeActions, actions...)
- }
-
- if wanted[protocol.RefactorExtract] {
- fixes, err := extractionFixes(ctx, snapshot, pkg, uri, params.Range)
- if err != nil {
- return nil, err
- }
- codeActions = append(codeActions, fixes...)
- }
-
- if wanted[protocol.GoTest] {
- fixes, err := goTest(ctx, snapshot, uri, params.Range)
- if err != nil {
- return nil, err
- }
- codeActions = append(codeActions, fixes...)
- }
-
- default:
- // Unsupported file kind for a code action.
- return nil, nil
- }
-
- var filtered []protocol.CodeAction
- for _, action := range codeActions {
- if wanted[action.Kind] {
- filtered = append(filtered, action)
- }
- }
- return filtered, nil
-}
-
-func (s *Server) getSupportedCodeActions() []protocol.CodeActionKind {
- allCodeActionKinds := make(map[protocol.CodeActionKind]struct{})
- for _, kinds := range s.session.Options().SupportedCodeActions {
- for kind := range kinds {
- allCodeActionKinds[kind] = struct{}{}
- }
- }
- var result []protocol.CodeActionKind
- for kind := range allCodeActionKinds {
- result = append(result, kind)
- }
- sort.Slice(result, func(i, j int) bool {
- return result[i] < result[j]
- })
- return result
-}
-
-func importFixTitle(fix *imports.ImportFix) string {
- var str string
- switch fix.FixType {
- case imports.AddImport:
- str = fmt.Sprintf("Add import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath)
- case imports.DeleteImport:
- str = fmt.Sprintf("Delete import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath)
- case imports.SetImportName:
- str = fmt.Sprintf("Rename import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath)
- }
- return str
-}
-
-func importDiagnostics(fix *imports.ImportFix, diagnostics []protocol.Diagnostic) (results []protocol.Diagnostic) {
- for _, diagnostic := range diagnostics {
- switch {
- // "undeclared name: X" may be an unresolved import.
- case strings.HasPrefix(diagnostic.Message, "undeclared name: "):
- ident := strings.TrimPrefix(diagnostic.Message, "undeclared name: ")
- if ident == fix.IdentName {
- results = append(results, diagnostic)
- }
- // "could not import: X" may be an invalid import.
- case strings.HasPrefix(diagnostic.Message, "could not import: "):
- ident := strings.TrimPrefix(diagnostic.Message, "could not import: ")
- if ident == fix.IdentName {
- results = append(results, diagnostic)
- }
- // "X imported but not used" is an unused import.
- // "X imported but not used as Y" is an unused import.
- case strings.Contains(diagnostic.Message, " imported but not used"):
- idx := strings.Index(diagnostic.Message, " imported but not used")
- importPath := diagnostic.Message[:idx]
- if importPath == fmt.Sprintf("%q", fix.StmtInfo.ImportPath) {
- results = append(results, diagnostic)
- }
- }
- }
- return results
-}
-
-func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.Package, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) {
- if rng.Start == rng.End {
- return nil, nil
- }
- fh, err := snapshot.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- _, pgf, err := source.GetParsedFile(ctx, snapshot, fh, source.NarrowestPackage)
- if err != nil {
- return nil, errors.Errorf("getting file for Identifier: %w", err)
- }
- srng, err := pgf.Mapper.RangeToSpanRange(rng)
- if err != nil {
- return nil, err
- }
- puri := protocol.URIFromSpanURI(uri)
- var commands []protocol.Command
- if _, ok, methodOk, _ := source.CanExtractFunction(snapshot.FileSet(), srng, pgf.Src, pgf.File); ok {
- cmd, err := command.NewApplyFixCommand("Extract function", command.ApplyFixArgs{
- URI: puri,
- Fix: source.ExtractFunction,
- Range: rng,
- })
- if err != nil {
- return nil, err
- }
- commands = append(commands, cmd)
- if methodOk {
- cmd, err := command.NewApplyFixCommand("Extract method", command.ApplyFixArgs{
- URI: puri,
- Fix: source.ExtractMethod,
- Range: rng,
- })
- if err != nil {
- return nil, err
- }
- commands = append(commands, cmd)
- }
- }
- if _, _, ok, _ := source.CanExtractVariable(srng, pgf.File); ok {
- cmd, err := command.NewApplyFixCommand("Extract variable", command.ApplyFixArgs{
- URI: puri,
- Fix: source.ExtractVariable,
- Range: rng,
- })
- if err != nil {
- return nil, err
- }
- commands = append(commands, cmd)
- }
- var actions []protocol.CodeAction
- for i := range commands {
- actions = append(actions, protocol.CodeAction{
- Title: commands[i].Title,
- Kind: protocol.RefactorExtract,
- Command: &commands[i],
- })
- }
- return actions, nil
-}
-
-func documentChanges(fh source.VersionedFileHandle, edits []protocol.TextEdit) []protocol.TextDocumentEdit {
- return []protocol.TextDocumentEdit{
- {
- TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
- Version: fh.Version(),
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(fh.URI()),
- },
- },
- Edits: edits,
- },
- }
-}
-
-func codeActionsMatchingDiagnostics(ctx context.Context, snapshot source.Snapshot, pdiags []protocol.Diagnostic, sdiags []*source.Diagnostic) ([]protocol.CodeAction, error) {
- var actions []protocol.CodeAction
- for _, sd := range sdiags {
- var diag *protocol.Diagnostic
- for _, pd := range pdiags {
- if sameDiagnostic(pd, sd) {
- diag = &pd
- break
- }
- }
- if diag == nil {
- continue
- }
- diagActions, err := codeActionsForDiagnostic(ctx, snapshot, sd, diag)
- if err != nil {
- return nil, err
- }
- actions = append(actions, diagActions...)
-
- }
- return actions, nil
-}
-
-func codeActionsForDiagnostic(ctx context.Context, snapshot source.Snapshot, sd *source.Diagnostic, pd *protocol.Diagnostic) ([]protocol.CodeAction, error) {
- var actions []protocol.CodeAction
- for _, fix := range sd.SuggestedFixes {
- var changes []protocol.TextDocumentEdit
- for uri, edits := range fix.Edits {
- fh, err := snapshot.GetVersionedFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- changes = append(changes, protocol.TextDocumentEdit{
- TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
- Version: fh.Version(),
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- },
- Edits: edits,
- })
- }
- action := protocol.CodeAction{
- Title: fix.Title,
- Kind: fix.ActionKind,
- Edit: protocol.WorkspaceEdit{
- DocumentChanges: changes,
- },
- Command: fix.Command,
- }
- if pd != nil {
- action.Diagnostics = []protocol.Diagnostic{*pd}
- }
- actions = append(actions, action)
- }
- return actions, nil
-}
-
-func sameDiagnostic(pd protocol.Diagnostic, sd *source.Diagnostic) bool {
- return pd.Message == sd.Message && protocol.CompareRange(pd.Range, sd.Range) == 0 && pd.Source == string(sd.Source)
-}
-
-func goTest(ctx context.Context, snapshot source.Snapshot, uri span.URI, rng protocol.Range) ([]protocol.CodeAction, error) {
- fh, err := snapshot.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- fns, err := source.TestsAndBenchmarks(ctx, snapshot, fh)
- if err != nil {
- return nil, err
- }
-
- var tests, benchmarks []string
- for _, fn := range fns.Tests {
- if !protocol.Intersect(fn.Rng, rng) {
- continue
- }
- tests = append(tests, fn.Name)
- }
- for _, fn := range fns.Benchmarks {
- if !protocol.Intersect(fn.Rng, rng) {
- continue
- }
- benchmarks = append(benchmarks, fn.Name)
- }
-
- if len(tests) == 0 && len(benchmarks) == 0 {
- return nil, nil
- }
-
- cmd, err := command.NewTestCommand("Run tests and benchmarks", protocol.URIFromSpanURI(uri), tests, benchmarks)
- if err != nil {
- return nil, err
- }
- return []protocol.CodeAction{{
- Title: cmd.Title,
- Kind: protocol.GoTest,
- Command: &cmd,
- }}, nil
-}
diff --git a/internal/lsp/code_lens.go b/internal/lsp/code_lens.go
deleted file mode 100644
index e19445838..000000000
--- a/internal/lsp/code_lens.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
- "fmt"
- "sort"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func (s *Server) codeLens(ctx context.Context, params *protocol.CodeLensParams) ([]protocol.CodeLens, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- var lenses map[command.Command]source.LensFunc
- switch snapshot.View().FileKind(fh) {
- case source.Mod:
- lenses = mod.LensFuncs()
- case source.Go:
- lenses = source.LensFuncs()
- default:
- // Unsupported file kind for a code lens.
- return nil, nil
- }
- var result []protocol.CodeLens
- for cmd, lf := range lenses {
- if !snapshot.View().Options().Codelenses[string(cmd)] {
- continue
- }
- added, err := lf(ctx, snapshot, fh)
- // Code lens is called on every keystroke, so we should just operate in
- // a best-effort mode, ignoring errors.
- if err != nil {
- event.Error(ctx, fmt.Sprintf("code lens %s failed", cmd), err)
- continue
- }
- result = append(result, added...)
- }
- sort.Slice(result, func(i, j int) bool {
- a, b := result[i], result[j]
- if protocol.CompareRange(a.Range, b.Range) == 0 {
- return a.Command.Command < b.Command.Command
- }
- return protocol.CompareRange(a.Range, b.Range) < 0
- })
- return result, nil
-}
diff --git a/internal/lsp/command.go b/internal/lsp/command.go
deleted file mode 100644
index 088fa57d5..000000000
--- a/internal/lsp/command.go
+++ /dev/null
@@ -1,819 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strings"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/progress"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-func (s *Server) executeCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) {
- var found bool
- for _, name := range s.session.Options().SupportedCommands {
- if name == params.Command {
- found = true
- break
- }
- }
- if !found {
- return nil, fmt.Errorf("%s is not a supported command", params.Command)
- }
-
- handler := &commandHandler{
- s: s,
- params: params,
- }
- return command.Dispatch(ctx, params, handler)
-}
-
-type commandHandler struct {
- s *Server
- params *protocol.ExecuteCommandParams
-}
-
-// commandConfig configures common command set-up and execution.
-type commandConfig struct {
- async bool // whether to run the command asynchronously. Async commands can only return errors.
- requireSave bool // whether all files must be saved for the command to work
- progress string // title to use for progress reporting. If empty, no progress will be reported.
- forURI protocol.DocumentURI // URI to resolve to a snapshot. If unset, snapshot will be nil.
-}
-
-// commandDeps is evaluated from a commandConfig. Note that not all fields may
-// be populated, depending on which configuration is set. See comments in-line
-// for details.
-type commandDeps struct {
- snapshot source.Snapshot // present if cfg.forURI was set
- fh source.VersionedFileHandle // present if cfg.forURI was set
- work *progress.WorkDone // present cfg.progress was set
-}
-
-type commandFunc func(context.Context, commandDeps) error
-
-func (c *commandHandler) run(ctx context.Context, cfg commandConfig, run commandFunc) (err error) {
- if cfg.requireSave {
- var unsaved []string
- for _, overlay := range c.s.session.Overlays() {
- if !overlay.Saved() {
- unsaved = append(unsaved, overlay.URI().Filename())
- }
- }
- if len(unsaved) > 0 {
- return errors.Errorf("All files must be saved first (unsaved: %v).", unsaved)
- }
- }
- var deps commandDeps
- if cfg.forURI != "" {
- var ok bool
- var release func()
- deps.snapshot, deps.fh, ok, release, err = c.s.beginFileRequest(ctx, cfg.forURI, source.UnknownKind)
- defer release()
- if !ok {
- if err != nil {
- return err
- }
- return fmt.Errorf("invalid file URL: %v", cfg.forURI)
- }
- }
- ctx, cancel := context.WithCancel(xcontext.Detach(ctx))
- if cfg.progress != "" {
- deps.work = c.s.progress.Start(ctx, cfg.progress, "Running...", c.params.WorkDoneToken, cancel)
- }
- runcmd := func() error {
- defer cancel()
- err := run(ctx, deps)
- if deps.work != nil {
- switch {
- case errors.Is(err, context.Canceled):
- deps.work.End("canceled")
- case err != nil:
- event.Error(ctx, "command error", err)
- deps.work.End("failed")
- default:
- deps.work.End("completed")
- }
- }
- return err
- }
- if cfg.async {
- go func() {
- if err := runcmd(); err != nil {
- if showMessageErr := c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
- Type: protocol.Error,
- Message: err.Error(),
- }); showMessageErr != nil {
- event.Error(ctx, fmt.Sprintf("failed to show message: %q", err.Error()), showMessageErr)
- }
- }
- }()
- return nil
- }
- return runcmd()
-}
-
-func (c *commandHandler) ApplyFix(ctx context.Context, args command.ApplyFixArgs) error {
- return c.run(ctx, commandConfig{
- // Note: no progress here. Applying fixes should be quick.
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- edits, err := source.ApplyFix(ctx, args.Fix, deps.snapshot, deps.fh, args.Range)
- if err != nil {
- return err
- }
- r, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
- Edit: protocol.WorkspaceEdit{
- DocumentChanges: edits,
- },
- })
- if err != nil {
- return err
- }
- if !r.Applied {
- return errors.New(r.FailureReason)
- }
- return nil
- })
-}
-
-func (c *commandHandler) RegenerateCgo(ctx context.Context, args command.URIArg) error {
- return c.run(ctx, commandConfig{
- progress: "Regenerating Cgo",
- }, func(ctx context.Context, deps commandDeps) error {
- mod := source.FileModification{
- URI: args.URI.SpanURI(),
- Action: source.InvalidateMetadata,
- }
- return c.s.didModifyFiles(ctx, []source.FileModification{mod}, FromRegenerateCgo)
- })
-}
-
-func (c *commandHandler) CheckUpgrades(ctx context.Context, args command.CheckUpgradesArgs) error {
- return c.run(ctx, commandConfig{
- forURI: args.URI,
- progress: "Checking for upgrades",
- }, func(ctx context.Context, deps commandDeps) error {
- upgrades, err := c.s.getUpgrades(ctx, deps.snapshot, args.URI.SpanURI(), args.Modules)
- if err != nil {
- return err
- }
- deps.snapshot.View().RegisterModuleUpgrades(upgrades)
- // Re-diagnose the snapshot to publish the new module diagnostics.
- c.s.diagnoseSnapshot(deps.snapshot, nil, false)
- return nil
- })
-}
-
-func (c *commandHandler) AddDependency(ctx context.Context, args command.DependencyArgs) error {
- return c.GoGetModule(ctx, args)
-}
-
-func (c *commandHandler) UpgradeDependency(ctx context.Context, args command.DependencyArgs) error {
- return c.GoGetModule(ctx, args)
-}
-
-func (c *commandHandler) GoGetModule(ctx context.Context, args command.DependencyArgs) error {
- return c.run(ctx, commandConfig{
- progress: "Running go get",
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
- return runGoGetModule(invoke, args.AddRequire, args.GoCmdArgs)
- })
- })
-}
-
-// TODO(rFindley): UpdateGoSum, Tidy, and Vendor could probably all be one command.
-func (c *commandHandler) UpdateGoSum(ctx context.Context, args command.URIArgs) error {
- return c.run(ctx, commandConfig{
- progress: "Updating go.sum",
- }, func(ctx context.Context, deps commandDeps) error {
- for _, uri := range args.URIs {
- snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, uri, source.UnknownKind)
- defer release()
- if !ok {
- return err
- }
- if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
- _, err := invoke("list", "all")
- return err
- }); err != nil {
- return err
- }
- }
- return nil
- })
-}
-
-func (c *commandHandler) Tidy(ctx context.Context, args command.URIArgs) error {
- return c.run(ctx, commandConfig{
- requireSave: true,
- progress: "Running go mod tidy",
- }, func(ctx context.Context, deps commandDeps) error {
- for _, uri := range args.URIs {
- snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, uri, source.UnknownKind)
- defer release()
- if !ok {
- return err
- }
- if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
- _, err := invoke("mod", "tidy")
- return err
- }); err != nil {
- return err
- }
- }
- return nil
- })
-}
-
-func (c *commandHandler) Vendor(ctx context.Context, args command.URIArg) error {
- return c.run(ctx, commandConfig{
- requireSave: true,
- progress: "Running go mod vendor",
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- _, err := deps.snapshot.RunGoCommandDirect(ctx, source.Normal|source.AllowNetwork, &gocommand.Invocation{
- Verb: "mod",
- Args: []string{"vendor"},
- WorkingDir: filepath.Dir(args.URI.SpanURI().Filename()),
- })
- return err
- })
-}
-
-func (c *commandHandler) EditGoDirective(ctx context.Context, args command.EditGoDirectiveArgs) error {
- return c.run(ctx, commandConfig{
- requireSave: true, // if go.mod isn't saved it could cause a problem
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- snapshot, fh, ok, release, err := c.s.beginFileRequest(ctx, args.URI, source.UnknownKind)
- defer release()
- if !ok {
- return err
- }
- if err := c.s.runGoModUpdateCommands(ctx, snapshot, fh.URI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
- _, err := invoke("mod", "edit", "-go", args.Version)
- return err
- }); err != nil {
- return err
- }
- return nil
- })
-}
-
-func (c *commandHandler) RemoveDependency(ctx context.Context, args command.RemoveDependencyArgs) error {
- return c.run(ctx, commandConfig{
- progress: "Removing dependency",
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- // If the module is tidied apart from the one unused diagnostic, we can
- // run `go get module@none`, and then run `go mod tidy`. Otherwise, we
- // must make textual edits.
- // TODO(rstambler): In Go 1.17+, we will be able to use the go command
- // without checking if the module is tidy.
- if args.OnlyDiagnostic {
- return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
- if err := runGoGetModule(invoke, false, []string{args.ModulePath + "@none"}); err != nil {
- return err
- }
- _, err := invoke("mod", "tidy")
- return err
- })
- }
- pm, err := deps.snapshot.ParseMod(ctx, deps.fh)
- if err != nil {
- return err
- }
- edits, err := dropDependency(deps.snapshot, pm, args.ModulePath)
- if err != nil {
- return err
- }
- response, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
- Edit: protocol.WorkspaceEdit{
- DocumentChanges: []protocol.TextDocumentEdit{{
- TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
- Version: deps.fh.Version(),
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(deps.fh.URI()),
- },
- },
- Edits: edits,
- }},
- },
- })
- if err != nil {
- return err
- }
- if !response.Applied {
- return fmt.Errorf("edits not applied because of %s", response.FailureReason)
- }
- return nil
- })
-}
-
-// dropDependency returns the edits to remove the given require from the go.mod
-// file.
-func dropDependency(snapshot source.Snapshot, pm *source.ParsedModule, modulePath string) ([]protocol.TextEdit, error) {
- // We need a private copy of the parsed go.mod file, since we're going to
- // modify it.
- copied, err := modfile.Parse("", pm.Mapper.Content, nil)
- if err != nil {
- return nil, err
- }
- if err := copied.DropRequire(modulePath); err != nil {
- return nil, err
- }
- copied.Cleanup()
- newContent, err := copied.Format()
- if err != nil {
- return nil, err
- }
- // Calculate the edits to be made due to the change.
- diff, err := snapshot.View().Options().ComputeEdits(pm.URI, string(pm.Mapper.Content), string(newContent))
- if err != nil {
- return nil, err
- }
- return source.ToProtocolEdits(pm.Mapper, diff)
-}
-
-func (c *commandHandler) Test(ctx context.Context, uri protocol.DocumentURI, tests, benchmarks []string) error {
- return c.RunTests(ctx, command.RunTestsArgs{
- URI: uri,
- Tests: tests,
- Benchmarks: benchmarks,
- })
-}
-
-func (c *commandHandler) RunTests(ctx context.Context, args command.RunTestsArgs) error {
- return c.run(ctx, commandConfig{
- async: true,
- progress: "Running go test",
- requireSave: true,
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- if err := c.runTests(ctx, deps.snapshot, deps.work, args.URI, args.Tests, args.Benchmarks); err != nil {
- return errors.Errorf("running tests failed: %w", err)
- }
- return nil
- })
-}
-
-func (c *commandHandler) runTests(ctx context.Context, snapshot source.Snapshot, work *progress.WorkDone, uri protocol.DocumentURI, tests, benchmarks []string) error {
- // TODO: fix the error reporting when this runs async.
- pkgs, err := snapshot.PackagesForFile(ctx, uri.SpanURI(), source.TypecheckWorkspace, false)
- if err != nil {
- return err
- }
- if len(pkgs) == 0 {
- return fmt.Errorf("package could not be found for file: %s", uri.SpanURI().Filename())
- }
- pkgPath := pkgs[0].ForTest()
-
- // create output
- buf := &bytes.Buffer{}
- ew := progress.NewEventWriter(ctx, "test")
- out := io.MultiWriter(ew, progress.NewWorkDoneWriter(work), buf)
-
- // Run `go test -run Func` on each test.
- var failedTests int
- for _, funcName := range tests {
- inv := &gocommand.Invocation{
- Verb: "test",
- Args: []string{pkgPath, "-v", "-count=1", "-run", fmt.Sprintf("^%s$", funcName)},
- WorkingDir: filepath.Dir(uri.SpanURI().Filename()),
- }
- if err := snapshot.RunGoCommandPiped(ctx, source.Normal, inv, out, out); err != nil {
- if errors.Is(err, context.Canceled) {
- return err
- }
- failedTests++
- }
- }
-
- // Run `go test -run=^$ -bench Func` on each test.
- var failedBenchmarks int
- for _, funcName := range benchmarks {
- inv := &gocommand.Invocation{
- Verb: "test",
- Args: []string{pkgPath, "-v", "-run=^$", "-bench", fmt.Sprintf("^%s$", funcName)},
- WorkingDir: filepath.Dir(uri.SpanURI().Filename()),
- }
- if err := snapshot.RunGoCommandPiped(ctx, source.Normal, inv, out, out); err != nil {
- if errors.Is(err, context.Canceled) {
- return err
- }
- failedBenchmarks++
- }
- }
-
- var title string
- if len(tests) > 0 && len(benchmarks) > 0 {
- title = "tests and benchmarks"
- } else if len(tests) > 0 {
- title = "tests"
- } else if len(benchmarks) > 0 {
- title = "benchmarks"
- } else {
- return errors.New("No functions were provided")
- }
- message := fmt.Sprintf("all %s passed", title)
- if failedTests > 0 && failedBenchmarks > 0 {
- message = fmt.Sprintf("%d / %d tests failed and %d / %d benchmarks failed", failedTests, len(tests), failedBenchmarks, len(benchmarks))
- } else if failedTests > 0 {
- message = fmt.Sprintf("%d / %d tests failed", failedTests, len(tests))
- } else if failedBenchmarks > 0 {
- message = fmt.Sprintf("%d / %d benchmarks failed", failedBenchmarks, len(benchmarks))
- }
- if failedTests > 0 || failedBenchmarks > 0 {
- message += "\n" + buf.String()
- }
-
- return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
- Type: protocol.Info,
- Message: message,
- })
-}
-
-func (c *commandHandler) Generate(ctx context.Context, args command.GenerateArgs) error {
- title := "Running go generate ."
- if args.Recursive {
- title = "Running go generate ./..."
- }
- return c.run(ctx, commandConfig{
- requireSave: true,
- progress: title,
- forURI: args.Dir,
- }, func(ctx context.Context, deps commandDeps) error {
- er := progress.NewEventWriter(ctx, "generate")
-
- pattern := "."
- if args.Recursive {
- pattern = "./..."
- }
- inv := &gocommand.Invocation{
- Verb: "generate",
- Args: []string{"-x", pattern},
- WorkingDir: args.Dir.SpanURI().Filename(),
- }
- stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(deps.work))
- if err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal, inv, er, stderr); err != nil {
- return err
- }
- return nil
- })
-}
-
-func (c *commandHandler) GoGetPackage(ctx context.Context, args command.GoGetPackageArgs) error {
- return c.run(ctx, commandConfig{
- forURI: args.URI,
- progress: "Running go get",
- }, func(ctx context.Context, deps commandDeps) error {
- // Run on a throwaway go.mod, otherwise it'll write to the real one.
- stdout, err := deps.snapshot.RunGoCommandDirect(ctx, source.WriteTemporaryModFile|source.AllowNetwork, &gocommand.Invocation{
- Verb: "list",
- Args: []string{"-f", "{{.Module.Path}}@{{.Module.Version}}", args.Pkg},
- WorkingDir: filepath.Dir(args.URI.SpanURI().Filename()),
- })
- if err != nil {
- return err
- }
- ver := strings.TrimSpace(stdout.String())
- return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error {
- if args.AddRequire {
- if err := addModuleRequire(invoke, []string{ver}); err != nil {
- return err
- }
- }
- _, err := invoke(append([]string{"get", "-d"}, args.Pkg)...)
- return err
- })
- })
-}
-
-func (s *Server) runGoModUpdateCommands(ctx context.Context, snapshot source.Snapshot, uri span.URI, run func(invoke func(...string) (*bytes.Buffer, error)) error) error {
- tmpModfile, newModBytes, newSumBytes, err := snapshot.RunGoCommands(ctx, true, filepath.Dir(uri.Filename()), run)
- if err != nil {
- return err
- }
- if !tmpModfile {
- return nil
- }
- modURI := snapshot.GoModForFile(uri)
- sumURI := span.URIFromPath(strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum")
- modEdits, err := applyFileEdits(ctx, snapshot, modURI, newModBytes)
- if err != nil {
- return err
- }
- sumEdits, err := applyFileEdits(ctx, snapshot, sumURI, newSumBytes)
- if err != nil {
- return err
- }
- changes := append(sumEdits, modEdits...)
- if len(changes) == 0 {
- return nil
- }
- response, err := s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
- Edit: protocol.WorkspaceEdit{
- DocumentChanges: changes,
- },
- })
- if err != nil {
- return err
- }
- if !response.Applied {
- return fmt.Errorf("edits not applied because of %s", response.FailureReason)
- }
- return nil
-}
-
-func applyFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI, newContent []byte) ([]protocol.TextDocumentEdit, error) {
- fh, err := snapshot.GetVersionedFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- oldContent, err := fh.Read()
- if err != nil && !os.IsNotExist(err) {
- return nil, err
- }
- if bytes.Equal(oldContent, newContent) {
- return nil, nil
- }
-
- // Sending a workspace edit to a closed file causes VS Code to open the
- // file and leave it unsaved. We would rather apply the changes directly,
- // especially to go.sum, which should be mostly invisible to the user.
- if !snapshot.IsOpen(uri) {
- err := ioutil.WriteFile(uri.Filename(), newContent, 0666)
- return nil, err
- }
-
- m := &protocol.ColumnMapper{
- URI: fh.URI(),
- Converter: span.NewContentConverter(fh.URI().Filename(), oldContent),
- Content: oldContent,
- }
- diff, err := snapshot.View().Options().ComputeEdits(uri, string(oldContent), string(newContent))
- if err != nil {
- return nil, err
- }
- edits, err := source.ToProtocolEdits(m, diff)
- if err != nil {
- return nil, err
- }
- return []protocol.TextDocumentEdit{{
- TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
- Version: fh.Version(),
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- },
- Edits: edits,
- }}, nil
-}
-
-func runGoGetModule(invoke func(...string) (*bytes.Buffer, error), addRequire bool, args []string) error {
- if addRequire {
- if err := addModuleRequire(invoke, args); err != nil {
- return err
- }
- }
- _, err := invoke(append([]string{"get", "-d"}, args...)...)
- return err
-}
-
-func addModuleRequire(invoke func(...string) (*bytes.Buffer, error), args []string) error {
- // Using go get to create a new dependency results in an
- // `// indirect` comment we may not want. The only way to avoid it
- // is to add the require as direct first. Then we can use go get to
- // update go.sum and tidy up.
- _, err := invoke(append([]string{"mod", "edit", "-require"}, args...)...)
- return err
-}
-
-func (s *Server) getUpgrades(ctx context.Context, snapshot source.Snapshot, uri span.URI, modules []string) (map[string]string, error) {
- stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal|source.AllowNetwork, &gocommand.Invocation{
- Verb: "list",
- Args: append([]string{"-m", "-u", "-json"}, modules...),
- WorkingDir: filepath.Dir(uri.Filename()),
- ModFlag: "readonly",
- })
- if err != nil {
- return nil, err
- }
-
- upgrades := map[string]string{}
- for dec := json.NewDecoder(stdout); dec.More(); {
- mod := &gocommand.ModuleJSON{}
- if err := dec.Decode(mod); err != nil {
- return nil, err
- }
- if mod.Update == nil {
- continue
- }
- upgrades[mod.Path] = mod.Update.Version
- }
- return upgrades, nil
-}
-
-func (c *commandHandler) GCDetails(ctx context.Context, uri protocol.DocumentURI) error {
- return c.ToggleGCDetails(ctx, command.URIArg{URI: uri})
-}
-
-func (c *commandHandler) ToggleGCDetails(ctx context.Context, args command.URIArg) error {
- return c.run(ctx, commandConfig{
- requireSave: true,
- progress: "Toggling GC Details",
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- pkg, err := deps.snapshot.PackageForFile(ctx, deps.fh.URI(), source.TypecheckWorkspace, source.NarrowestPackage)
- if err != nil {
- return err
- }
- c.s.gcOptimizationDetailsMu.Lock()
- if _, ok := c.s.gcOptimizationDetails[pkg.ID()]; ok {
- delete(c.s.gcOptimizationDetails, pkg.ID())
- c.s.clearDiagnosticSource(gcDetailsSource)
- } else {
- c.s.gcOptimizationDetails[pkg.ID()] = struct{}{}
- }
- c.s.gcOptimizationDetailsMu.Unlock()
- c.s.diagnoseSnapshot(deps.snapshot, nil, false)
- return nil
- })
-}
-
-func (c *commandHandler) GenerateGoplsMod(ctx context.Context, args command.URIArg) error {
- // TODO: go back to using URI
- return c.run(ctx, commandConfig{
- requireSave: true,
- progress: "Generating gopls.mod",
- }, func(ctx context.Context, deps commandDeps) error {
- views := c.s.session.Views()
- if len(views) != 1 {
- return fmt.Errorf("cannot resolve view: have %d views", len(views))
- }
- v := views[0]
- snapshot, release := v.Snapshot(ctx)
- defer release()
- modFile, err := snapshot.BuildGoplsMod(ctx)
- if err != nil {
- return errors.Errorf("getting workspace mod file: %w", err)
- }
- content, err := modFile.Format()
- if err != nil {
- return errors.Errorf("formatting mod file: %w", err)
- }
- filename := filepath.Join(snapshot.View().Folder().Filename(), "gopls.mod")
- if err := ioutil.WriteFile(filename, content, 0644); err != nil {
- return errors.Errorf("writing mod file: %w", err)
- }
- return nil
- })
-}
-
-func (c *commandHandler) ListKnownPackages(ctx context.Context, args command.URIArg) (command.ListKnownPackagesResult, error) {
- var result command.ListKnownPackagesResult
- err := c.run(ctx, commandConfig{
- progress: "Listing packages",
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- var err error
- result.Packages, err = source.KnownPackages(ctx, deps.snapshot, deps.fh)
- return err
- })
- return result, err
-}
-
-func (c *commandHandler) ListImports(ctx context.Context, args command.URIArg) (command.ListImportsResult, error) {
- var result command.ListImportsResult
- err := c.run(ctx, commandConfig{
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- pkg, err := deps.snapshot.PackageForFile(ctx, args.URI.SpanURI(), source.TypecheckWorkspace, source.NarrowestPackage)
- if err != nil {
- return err
- }
- pgf, err := pkg.File(args.URI.SpanURI())
- if err != nil {
- return err
- }
- for _, group := range astutil.Imports(deps.snapshot.FileSet(), pgf.File) {
- for _, imp := range group {
- if imp.Path == nil {
- continue
- }
- var name string
- if imp.Name != nil {
- name = imp.Name.Name
- }
- result.Imports = append(result.Imports, command.FileImport{
- Path: source.ImportPath(imp),
- Name: name,
- })
- }
- }
- for _, imp := range pkg.Imports() {
- result.PackageImports = append(result.PackageImports, command.PackageImport{
- Path: imp.PkgPath(), // This might be the vendored path under GOPATH vendoring, in which case it's a bug.
- })
- }
- sort.Slice(result.PackageImports, func(i, j int) bool {
- return result.PackageImports[i].Path < result.PackageImports[j].Path
- })
- return nil
- })
- return result, err
-}
-
-func (c *commandHandler) AddImport(ctx context.Context, args command.AddImportArgs) error {
- return c.run(ctx, commandConfig{
- progress: "Adding import",
- forURI: args.URI,
- }, func(ctx context.Context, deps commandDeps) error {
- edits, err := source.AddImport(ctx, deps.snapshot, deps.fh, args.ImportPath)
- if err != nil {
- return fmt.Errorf("could not add import: %v", err)
- }
- if _, err := c.s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{
- Edit: protocol.WorkspaceEdit{
- DocumentChanges: documentChanges(deps.fh, edits),
- },
- }); err != nil {
- return fmt.Errorf("could not apply import edits: %v", err)
- }
- return nil
- })
-}
-
-func (c *commandHandler) StartDebugging(ctx context.Context, args command.DebuggingArgs) (result command.DebuggingResult, _ error) {
- addr := args.Addr
- if addr == "" {
- addr = "localhost:0"
- }
- di := debug.GetInstance(ctx)
- if di == nil {
- return result, errors.New("internal error: server has no debugging instance")
- }
- listenedAddr, err := di.Serve(ctx, addr)
- if err != nil {
- return result, errors.Errorf("starting debug server: %w", err)
- }
- result.URLs = []string{"http://" + listenedAddr}
- return result, nil
-}
-
-func (c *commandHandler) RunVulncheckExp(ctx context.Context, args command.VulncheckArgs) (result command.VulncheckResult, _ error) {
- err := c.run(ctx, commandConfig{
- progress: "Running vulncheck",
- requireSave: true,
- forURI: args.Dir, // Will dir work?
- }, func(ctx context.Context, deps commandDeps) error {
- view := deps.snapshot.View()
- opts := view.Options()
- if opts == nil || opts.Hooks.Govulncheck == nil {
- return errors.New("vulncheck feature is not available")
- }
-
- buildFlags := opts.BuildFlags // XXX: is session.Options equivalent to view.Options?
- var viewEnv []string
- if e := opts.EnvSlice(); e != nil {
- viewEnv = append(os.Environ(), e...)
- }
- cfg := &packages.Config{
- Context: ctx,
- Tests: true, // TODO(hyangah): add a field in args.
- BuildFlags: buildFlags,
- Env: viewEnv,
- Dir: view.Folder().Filename(),
- // TODO(hyangah): configure overlay
- }
- var err error
- result, err = opts.Hooks.Govulncheck(ctx, cfg, args)
- return err
- })
- return result, err
-}
diff --git a/internal/lsp/command/command_gen.go b/internal/lsp/command/command_gen.go
deleted file mode 100644
index 22cfeff5b..000000000
--- a/internal/lsp/command/command_gen.go
+++ /dev/null
@@ -1,473 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Don't include this file during code generation, or it will break the build
-// if existing interface methods have been modified.
-//go:build !generate
-// +build !generate
-
-package command
-
-// Code generated by generate.go. DO NOT EDIT.
-
-import (
- "context"
- "fmt"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-const (
- AddDependency Command = "add_dependency"
- AddImport Command = "add_import"
- ApplyFix Command = "apply_fix"
- CheckUpgrades Command = "check_upgrades"
- EditGoDirective Command = "edit_go_directive"
- GCDetails Command = "gc_details"
- Generate Command = "generate"
- GenerateGoplsMod Command = "generate_gopls_mod"
- GoGetPackage Command = "go_get_package"
- ListImports Command = "list_imports"
- ListKnownPackages Command = "list_known_packages"
- RegenerateCgo Command = "regenerate_cgo"
- RemoveDependency Command = "remove_dependency"
- RunTests Command = "run_tests"
- RunVulncheckExp Command = "run_vulncheck_exp"
- StartDebugging Command = "start_debugging"
- Test Command = "test"
- Tidy Command = "tidy"
- ToggleGCDetails Command = "toggle_gc_details"
- UpdateGoSum Command = "update_go_sum"
- UpgradeDependency Command = "upgrade_dependency"
- Vendor Command = "vendor"
-)
-
-var Commands = []Command{
- AddDependency,
- AddImport,
- ApplyFix,
- CheckUpgrades,
- EditGoDirective,
- GCDetails,
- Generate,
- GenerateGoplsMod,
- GoGetPackage,
- ListImports,
- ListKnownPackages,
- RegenerateCgo,
- RemoveDependency,
- RunTests,
- RunVulncheckExp,
- StartDebugging,
- Test,
- Tidy,
- ToggleGCDetails,
- UpdateGoSum,
- UpgradeDependency,
- Vendor,
-}
-
-func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Interface) (interface{}, error) {
- switch params.Command {
- case "gopls.add_dependency":
- var a0 DependencyArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.AddDependency(ctx, a0)
- case "gopls.add_import":
- var a0 AddImportArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.AddImport(ctx, a0)
- case "gopls.apply_fix":
- var a0 ApplyFixArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.ApplyFix(ctx, a0)
- case "gopls.check_upgrades":
- var a0 CheckUpgradesArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.CheckUpgrades(ctx, a0)
- case "gopls.edit_go_directive":
- var a0 EditGoDirectiveArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.EditGoDirective(ctx, a0)
- case "gopls.gc_details":
- var a0 protocol.DocumentURI
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.GCDetails(ctx, a0)
- case "gopls.generate":
- var a0 GenerateArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.Generate(ctx, a0)
- case "gopls.generate_gopls_mod":
- var a0 URIArg
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.GenerateGoplsMod(ctx, a0)
- case "gopls.go_get_package":
- var a0 GoGetPackageArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.GoGetPackage(ctx, a0)
- case "gopls.list_imports":
- var a0 URIArg
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return s.ListImports(ctx, a0)
- case "gopls.list_known_packages":
- var a0 URIArg
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return s.ListKnownPackages(ctx, a0)
- case "gopls.regenerate_cgo":
- var a0 URIArg
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.RegenerateCgo(ctx, a0)
- case "gopls.remove_dependency":
- var a0 RemoveDependencyArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.RemoveDependency(ctx, a0)
- case "gopls.run_tests":
- var a0 RunTestsArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.RunTests(ctx, a0)
- case "gopls.run_vulncheck_exp":
- var a0 VulncheckArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return s.RunVulncheckExp(ctx, a0)
- case "gopls.start_debugging":
- var a0 DebuggingArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return s.StartDebugging(ctx, a0)
- case "gopls.test":
- var a0 protocol.DocumentURI
- var a1 []string
- var a2 []string
- if err := UnmarshalArgs(params.Arguments, &a0, &a1, &a2); err != nil {
- return nil, err
- }
- return nil, s.Test(ctx, a0, a1, a2)
- case "gopls.tidy":
- var a0 URIArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.Tidy(ctx, a0)
- case "gopls.toggle_gc_details":
- var a0 URIArg
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.ToggleGCDetails(ctx, a0)
- case "gopls.update_go_sum":
- var a0 URIArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.UpdateGoSum(ctx, a0)
- case "gopls.upgrade_dependency":
- var a0 DependencyArgs
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.UpgradeDependency(ctx, a0)
- case "gopls.vendor":
- var a0 URIArg
- if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
- return nil, err
- }
- return nil, s.Vendor(ctx, a0)
- }
- return nil, fmt.Errorf("unsupported command %q", params.Command)
-}
-
-func NewAddDependencyCommand(title string, a0 DependencyArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.add_dependency",
- Arguments: args,
- }, nil
-}
-
-func NewAddImportCommand(title string, a0 AddImportArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.add_import",
- Arguments: args,
- }, nil
-}
-
-func NewApplyFixCommand(title string, a0 ApplyFixArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.apply_fix",
- Arguments: args,
- }, nil
-}
-
-func NewCheckUpgradesCommand(title string, a0 CheckUpgradesArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.check_upgrades",
- Arguments: args,
- }, nil
-}
-
-func NewEditGoDirectiveCommand(title string, a0 EditGoDirectiveArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.edit_go_directive",
- Arguments: args,
- }, nil
-}
-
-func NewGCDetailsCommand(title string, a0 protocol.DocumentURI) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.gc_details",
- Arguments: args,
- }, nil
-}
-
-func NewGenerateCommand(title string, a0 GenerateArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.generate",
- Arguments: args,
- }, nil
-}
-
-func NewGenerateGoplsModCommand(title string, a0 URIArg) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.generate_gopls_mod",
- Arguments: args,
- }, nil
-}
-
-func NewGoGetPackageCommand(title string, a0 GoGetPackageArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.go_get_package",
- Arguments: args,
- }, nil
-}
-
-func NewListImportsCommand(title string, a0 URIArg) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.list_imports",
- Arguments: args,
- }, nil
-}
-
-func NewListKnownPackagesCommand(title string, a0 URIArg) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.list_known_packages",
- Arguments: args,
- }, nil
-}
-
-func NewRegenerateCgoCommand(title string, a0 URIArg) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.regenerate_cgo",
- Arguments: args,
- }, nil
-}
-
-func NewRemoveDependencyCommand(title string, a0 RemoveDependencyArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.remove_dependency",
- Arguments: args,
- }, nil
-}
-
-func NewRunTestsCommand(title string, a0 RunTestsArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.run_tests",
- Arguments: args,
- }, nil
-}
-
-func NewRunVulncheckExpCommand(title string, a0 VulncheckArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.run_vulncheck_exp",
- Arguments: args,
- }, nil
-}
-
-func NewStartDebuggingCommand(title string, a0 DebuggingArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.start_debugging",
- Arguments: args,
- }, nil
-}
-
-func NewTestCommand(title string, a0 protocol.DocumentURI, a1 []string, a2 []string) (protocol.Command, error) {
- args, err := MarshalArgs(a0, a1, a2)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.test",
- Arguments: args,
- }, nil
-}
-
-func NewTidyCommand(title string, a0 URIArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.tidy",
- Arguments: args,
- }, nil
-}
-
-func NewToggleGCDetailsCommand(title string, a0 URIArg) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.toggle_gc_details",
- Arguments: args,
- }, nil
-}
-
-func NewUpdateGoSumCommand(title string, a0 URIArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.update_go_sum",
- Arguments: args,
- }, nil
-}
-
-func NewUpgradeDependencyCommand(title string, a0 DependencyArgs) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.upgrade_dependency",
- Arguments: args,
- }, nil
-}
-
-func NewVendorCommand(title string, a0 URIArg) (protocol.Command, error) {
- args, err := MarshalArgs(a0)
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "gopls.vendor",
- Arguments: args,
- }, nil
-}
diff --git a/internal/lsp/command/commandmeta/meta.go b/internal/lsp/command/commandmeta/meta.go
deleted file mode 100644
index 102b89839..000000000
--- a/internal/lsp/command/commandmeta/meta.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package commandmeta provides metadata about LSP commands, by analyzing the
-// command.Interface type.
-package commandmeta
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "reflect"
- "strings"
- "unicode"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/command"
-)
-
-type Command struct {
- MethodName string
- Name string
- // TODO(rFindley): I think Title can actually be eliminated. In all cases
- // where we use it, there is probably a more appropriate contextual title.
- Title string
- Doc string
- Args []*Field
- Result *Field
-}
-
-func (c *Command) ID() string {
- return command.ID(c.Name)
-}
-
-type Field struct {
- Name string
- Doc string
- JSONTag string
- Type types.Type
- FieldMod string
- // In some circumstances, we may want to recursively load additional field
- // descriptors for fields of struct types, documenting their internals.
- Fields []*Field
-}
-
-func Load() (*packages.Package, []*Command, error) {
- pkgs, err := packages.Load(
- &packages.Config{
- Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedImports | packages.NeedDeps,
- BuildFlags: []string{"-tags=generate"},
- },
- "golang.org/x/tools/internal/lsp/command",
- )
- if err != nil {
- return nil, nil, fmt.Errorf("packages.Load: %v", err)
- }
- pkg := pkgs[0]
- if len(pkg.Errors) > 0 {
- return pkg, nil, pkg.Errors[0]
- }
-
- // For a bit of type safety, use reflection to get the interface name within
- // the package scope.
- it := reflect.TypeOf((*command.Interface)(nil)).Elem()
- obj := pkg.Types.Scope().Lookup(it.Name()).Type().Underlying().(*types.Interface)
-
- // Load command metadata corresponding to each interface method.
- var commands []*Command
- loader := fieldLoader{make(map[types.Object]*Field)}
- for i := 0; i < obj.NumMethods(); i++ {
- m := obj.Method(i)
- c, err := loader.loadMethod(pkg, m)
- if err != nil {
- return nil, nil, fmt.Errorf("loading %s: %v", m.Name(), err)
- }
- commands = append(commands, c)
- }
- return pkg, commands, nil
-}
-
-// fieldLoader loads field information, memoizing results to prevent infinite
-// recursion.
-type fieldLoader struct {
- loaded map[types.Object]*Field
-}
-
-var universeError = types.Universe.Lookup("error").Type()
-
-func (l *fieldLoader) loadMethod(pkg *packages.Package, m *types.Func) (*Command, error) {
- node, err := findField(pkg, m.Pos())
- if err != nil {
- return nil, err
- }
- title, doc := splitDoc(node.Doc.Text())
- c := &Command{
- MethodName: m.Name(),
- Name: lspName(m.Name()),
- Doc: doc,
- Title: title,
- }
- sig := m.Type().Underlying().(*types.Signature)
- rlen := sig.Results().Len()
- if rlen > 2 || rlen == 0 {
- return nil, fmt.Errorf("must have 1 or 2 returns, got %d", rlen)
- }
- finalResult := sig.Results().At(rlen - 1)
- if !types.Identical(finalResult.Type(), universeError) {
- return nil, fmt.Errorf("final return must be error")
- }
- if rlen == 2 {
- obj := sig.Results().At(0)
- c.Result, err = l.loadField(pkg, obj, "", "")
- if err != nil {
- return nil, err
- }
- }
- for i := 0; i < sig.Params().Len(); i++ {
- obj := sig.Params().At(i)
- fld, err := l.loadField(pkg, obj, "", "")
- if err != nil {
- return nil, err
- }
- if i == 0 {
- // Lazy check that the first argument is a context. We could relax this,
- // but then the generated code gets more complicated.
- if named, ok := fld.Type.(*types.Named); !ok || named.Obj().Name() != "Context" || named.Obj().Pkg().Path() != "context" {
- return nil, fmt.Errorf("first method parameter must be context.Context")
- }
- // Skip the context argument, as it is implied.
- continue
- }
- c.Args = append(c.Args, fld)
- }
- return c, nil
-}
-
-func (l *fieldLoader) loadField(pkg *packages.Package, obj *types.Var, doc, tag string) (*Field, error) {
- if existing, ok := l.loaded[obj]; ok {
- return existing, nil
- }
- fld := &Field{
- Name: obj.Name(),
- Doc: strings.TrimSpace(doc),
- Type: obj.Type(),
- JSONTag: reflect.StructTag(tag).Get("json"),
- }
- under := fld.Type.Underlying()
- // Quick-and-dirty handling for various underlying types.
- switch p := under.(type) {
- case *types.Pointer:
- under = p.Elem().Underlying()
- case *types.Array:
- under = p.Elem().Underlying()
- fld.FieldMod = fmt.Sprintf("[%d]", p.Len())
- case *types.Slice:
- under = p.Elem().Underlying()
- fld.FieldMod = "[]"
- }
-
- if s, ok := under.(*types.Struct); ok {
- for i := 0; i < s.NumFields(); i++ {
- obj2 := s.Field(i)
- pkg2 := pkg
- if obj2.Pkg() != pkg2.Types {
- pkg2, ok = pkg.Imports[obj2.Pkg().Path()]
- if !ok {
- return nil, fmt.Errorf("missing import for %q: %q", pkg.ID, obj2.Pkg().Path())
- }
- }
- node, err := findField(pkg2, obj2.Pos())
- if err != nil {
- return nil, err
- }
- tag := s.Tag(i)
- structField, err := l.loadField(pkg2, obj2, node.Doc.Text(), tag)
- if err != nil {
- return nil, err
- }
- fld.Fields = append(fld.Fields, structField)
- }
- }
- return fld, nil
-}
-
-// splitDoc parses a command doc string to separate the title from normal
-// documentation.
-//
-// The doc comment should be of the form: "MethodName: Title\nDocumentation"
-func splitDoc(text string) (title, doc string) {
- docParts := strings.SplitN(text, "\n", 2)
- titleParts := strings.SplitN(docParts[0], ":", 2)
- if len(titleParts) > 1 {
- title = strings.TrimSpace(titleParts[1])
- }
- if len(docParts) > 1 {
- doc = strings.TrimSpace(docParts[1])
- }
- return title, doc
-}
-
-// lspName returns the normalized command name to use in the LSP.
-func lspName(methodName string) string {
- words := splitCamel(methodName)
- for i := range words {
- words[i] = strings.ToLower(words[i])
- }
- return strings.Join(words, "_")
-}
-
-// splitCamel splits s into words, according to camel-case word boundaries.
-// Initialisms are grouped as a single word.
-//
-// For example:
-// "RunTests" -> []string{"Run", "Tests"}
-// "GCDetails" -> []string{"GC", "Details"}
-func splitCamel(s string) []string {
- var words []string
- for len(s) > 0 {
- last := strings.LastIndexFunc(s, unicode.IsUpper)
- if last < 0 {
- last = 0
- }
- if last == len(s)-1 {
- // Group initialisms as a single word.
- last = 1 + strings.LastIndexFunc(s[:last], func(r rune) bool { return !unicode.IsUpper(r) })
- }
- words = append(words, s[last:])
- s = s[:last]
- }
- for i := 0; i < len(words)/2; i++ {
- j := len(words) - i - 1
- words[i], words[j] = words[j], words[i]
- }
- return words
-}
-
-// findField finds the struct field or interface method positioned at pos,
-// within the AST.
-func findField(pkg *packages.Package, pos token.Pos) (*ast.Field, error) {
- fset := pkg.Fset
- var file *ast.File
- for _, f := range pkg.Syntax {
- if fset.Position(f.Pos()).Filename == fset.Position(pos).Filename {
- file = f
- break
- }
- }
- if file == nil {
- return nil, fmt.Errorf("no file for pos %v", pos)
- }
- path, _ := astutil.PathEnclosingInterval(file, pos, pos)
- // This is fragile, but in the cases we care about, the field will be in
- // path[1].
- return path[1].(*ast.Field), nil
-}
diff --git a/internal/lsp/command/gen/gen.go b/internal/lsp/command/gen/gen.go
deleted file mode 100644
index 8f7a2d503..000000000
--- a/internal/lsp/command/gen/gen.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package gen is used to generate command bindings from the gopls command
-// interface.
-package gen
-
-import (
- "bytes"
- "fmt"
- "go/types"
- "text/template"
-
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/command/commandmeta"
-)
-
-const src = `// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Don't include this file during code generation, or it will break the build
-// if existing interface methods have been modified.
-//go:build !generate
-// +build !generate
-
-package command
-
-// Code generated by generate.go. DO NOT EDIT.
-
-import (
- {{range $k, $v := .Imports -}}
- "{{$k}}"
- {{end}}
-)
-
-const (
-{{- range .Commands}}
- {{.MethodName}} Command = "{{.Name}}"
-{{- end}}
-)
-
-var Commands = []Command {
-{{- range .Commands}}
- {{.MethodName}},
-{{- end}}
-}
-
-func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Interface) (interface{}, error) {
- switch params.Command {
- {{- range .Commands}}
- case "{{.ID}}":
- {{- if .Args -}}
- {{- range $i, $v := .Args}}
- var a{{$i}} {{typeString $v.Type}}
- {{- end}}
- if err := UnmarshalArgs(params.Arguments{{range $i, $v := .Args}}, &a{{$i}}{{end}}); err != nil {
- return nil, err
- }
- {{end -}}
- return {{if not .Result}}nil, {{end}}s.{{.MethodName}}(ctx{{range $i, $v := .Args}}, a{{$i}}{{end}})
- {{- end}}
- }
- return nil, fmt.Errorf("unsupported command %q", params.Command)
-}
-{{- range .Commands}}
-
-func New{{.MethodName}}Command(title string, {{range $i, $v := .Args}}{{if $i}}, {{end}}a{{$i}} {{typeString $v.Type}}{{end}}) (protocol.Command, error) {
- args, err := MarshalArgs({{range $i, $v := .Args}}{{if $i}}, {{end}}a{{$i}}{{end}})
- if err != nil {
- return protocol.Command{}, err
- }
- return protocol.Command{
- Title: title,
- Command: "{{.ID}}",
- Arguments: args,
- }, nil
-}
-{{end}}
-`
-
-type data struct {
- Imports map[string]bool
- Commands []*commandmeta.Command
-}
-
-func Generate() ([]byte, error) {
- pkg, cmds, err := commandmeta.Load()
- if err != nil {
- return nil, fmt.Errorf("loading command data: %v", err)
- }
- qf := func(p *types.Package) string {
- if p == pkg.Types {
- return ""
- }
- return p.Name()
- }
- tmpl, err := template.New("").Funcs(template.FuncMap{
- "typeString": func(t types.Type) string {
- return types.TypeString(t, qf)
- },
- }).Parse(src)
- if err != nil {
- return nil, err
- }
- d := data{
- Commands: cmds,
- Imports: map[string]bool{
- "context": true,
- "fmt": true,
- "golang.org/x/tools/internal/lsp/protocol": true,
- },
- }
- const thispkg = "golang.org/x/tools/internal/lsp/command"
- for _, c := range d.Commands {
- for _, arg := range c.Args {
- pth := pkgPath(arg.Type)
- if pth != "" && pth != thispkg {
- d.Imports[pth] = true
- }
- }
- if c.Result != nil {
- pth := pkgPath(c.Result.Type)
- if pth != "" && pth != thispkg {
- d.Imports[pth] = true
- }
- }
- }
-
- var buf bytes.Buffer
- if err := tmpl.Execute(&buf, d); err != nil {
- return nil, fmt.Errorf("executing: %v", err)
- }
-
- opts := &imports.Options{
- AllErrors: true,
- FormatOnly: true,
- Comments: true,
- }
- content, err := imports.Process("", buf.Bytes(), opts)
- if err != nil {
- return nil, fmt.Errorf("goimports: %v", err)
- }
- return content, nil
-}
-
-func pkgPath(t types.Type) string {
- if n, ok := t.(*types.Named); ok {
- if pkg := n.Obj().Pkg(); pkg != nil {
- return pkg.Path()
- }
- }
- return ""
-}
diff --git a/internal/lsp/command/generate.go b/internal/lsp/command/generate.go
deleted file mode 100644
index 14628c733..000000000
--- a/internal/lsp/command/generate.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build ignore
-// +build ignore
-
-package main
-
-import (
- "fmt"
- "io/ioutil"
- "os"
-
- "golang.org/x/tools/internal/lsp/command/gen"
-)
-
-func main() {
- content, err := gen.Generate()
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
- ioutil.WriteFile("command_gen.go", content, 0644)
-}
diff --git a/internal/lsp/command/interface.go b/internal/lsp/command/interface.go
deleted file mode 100644
index 9aecfbe78..000000000
--- a/internal/lsp/command/interface.go
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package command defines the interface provided by gopls for the
-// workspace/executeCommand LSP request.
-//
-// This interface is fully specified by the Interface type, provided it
-// conforms to the restrictions outlined in its doc string.
-//
-// Bindings for server-side command dispatch and client-side serialization are
-// also provided by this package, via code generation.
-package command
-
-//go:generate go run -tags=generate generate.go
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-// Interface defines the interface gopls exposes for the
-// workspace/executeCommand request.
-//
-// This interface is used to generate marshaling/unmarshaling code, dispatch,
-// and documentation, and so has some additional restrictions:
-// 1. All method arguments must be JSON serializable.
-// 2. Methods must return either error or (T, error), where T is a
-// JSON serializable type.
-// 3. The first line of the doc string is special. Everything after the colon
-// is considered the command 'Title'.
-// TODO(rFindley): reconsider this -- Title may be unnecessary.
-type Interface interface {
- // ApplyFix: Apply a fix
- //
- // Applies a fix to a region of source code.
- ApplyFix(context.Context, ApplyFixArgs) error
- // Test: Run test(s) (legacy)
- //
- // Runs `go test` for a specific set of test or benchmark functions.
- Test(context.Context, protocol.DocumentURI, []string, []string) error
-
- // TODO: deprecate Test in favor of RunTests below.
-
- // Test: Run test(s)
- //
- // Runs `go test` for a specific set of test or benchmark functions.
- RunTests(context.Context, RunTestsArgs) error
-
- // Generate: Run go generate
- //
- // Runs `go generate` for a given directory.
- Generate(context.Context, GenerateArgs) error
-
- // RegenerateCgo: Regenerate cgo
- //
- // Regenerates cgo definitions.
- RegenerateCgo(context.Context, URIArg) error
-
- // Tidy: Run go mod tidy
- //
- // Runs `go mod tidy` for a module.
- Tidy(context.Context, URIArgs) error
-
- // Vendor: Run go mod vendor
- //
- // Runs `go mod vendor` for a module.
- Vendor(context.Context, URIArg) error
-
- // EditGoDirective: Run go mod edit -go=version
- //
- // Runs `go mod edit -go=version` for a module.
- EditGoDirective(context.Context, EditGoDirectiveArgs) error
-
- // UpdateGoSum: Update go.sum
- //
- // Updates the go.sum file for a module.
- UpdateGoSum(context.Context, URIArgs) error
-
- // CheckUpgrades: Check for upgrades
- //
- // Checks for module upgrades.
- CheckUpgrades(context.Context, CheckUpgradesArgs) error
-
- // AddDependency: Add a dependency
- //
- // Adds a dependency to the go.mod file for a module.
- AddDependency(context.Context, DependencyArgs) error
-
- // UpgradeDependency: Upgrade a dependency
- //
- // Upgrades a dependency in the go.mod file for a module.
- UpgradeDependency(context.Context, DependencyArgs) error
-
- // RemoveDependency: Remove a dependency
- //
- // Removes a dependency from the go.mod file of a module.
- RemoveDependency(context.Context, RemoveDependencyArgs) error
-
- // GoGetPackage: go get a package
- //
- // Runs `go get` to fetch a package.
- GoGetPackage(context.Context, GoGetPackageArgs) error
-
- // GCDetails: Toggle gc_details
- //
- // Toggle the calculation of gc annotations.
- GCDetails(context.Context, protocol.DocumentURI) error
-
- // TODO: deprecate GCDetails in favor of ToggleGCDetails below.
-
- // ToggleGCDetails: Toggle gc_details
- //
- // Toggle the calculation of gc annotations.
- ToggleGCDetails(context.Context, URIArg) error
-
- // GenerateGoplsMod: Generate gopls.mod
- //
- // (Re)generate the gopls.mod file for a workspace.
- GenerateGoplsMod(context.Context, URIArg) error
-
- // ListKnownPackages: List known packages
- //
- // Retrieve a list of packages that are importable from the given URI.
- ListKnownPackages(context.Context, URIArg) (ListKnownPackagesResult, error)
-
- // ListImports: List imports of a file and its package
- //
- // Retrieve a list of imports in the given Go file, and the package it
- // belongs to.
- ListImports(context.Context, URIArg) (ListImportsResult, error)
-
- // AddImport: Add an import
- //
- // Ask the server to add an import path to a given Go file. The method will
- // call applyEdit on the client so that clients don't have to apply the edit
- // themselves.
- AddImport(context.Context, AddImportArgs) error
-
- // StartDebugging: Start the gopls debug server
- //
- // Start the gopls debug server if it isn't running, and return the debug
- // address.
- StartDebugging(context.Context, DebuggingArgs) (DebuggingResult, error)
-
- // RunVulncheckExp: Run vulncheck (experimental)
- //
- // Run vulnerability check (`govulncheck`).
- RunVulncheckExp(context.Context, VulncheckArgs) (VulncheckResult, error)
-}
-
-type RunTestsArgs struct {
- // The test file containing the tests to run.
- URI protocol.DocumentURI
-
- // Specific test names to run, e.g. TestFoo.
- Tests []string
-
- // Specific benchmarks to run, e.g. BenchmarkFoo.
- Benchmarks []string
-}
-
-type GenerateArgs struct {
- // URI for the directory to generate.
- Dir protocol.DocumentURI
-
- // Whether to generate recursively (go generate ./...)
- Recursive bool
-}
-
-// TODO(rFindley): document the rest of these once the docgen is fleshed out.
-
-type ApplyFixArgs struct {
- // The fix to apply.
- Fix string
- // The file URI for the document to fix.
- URI protocol.DocumentURI
- // The document range to scan for fixes.
- Range protocol.Range
-}
-
-type URIArg struct {
- // The file URI.
- URI protocol.DocumentURI
-}
-
-type URIArgs struct {
- // The file URIs.
- URIs []protocol.DocumentURI
-}
-
-type CheckUpgradesArgs struct {
- // The go.mod file URI.
- URI protocol.DocumentURI
- // The modules to check.
- Modules []string
-}
-
-type DependencyArgs struct {
- // The go.mod file URI.
- URI protocol.DocumentURI
- // Additional args to pass to the go command.
- GoCmdArgs []string
- // Whether to add a require directive.
- AddRequire bool
-}
-
-type RemoveDependencyArgs struct {
- // The go.mod file URI.
- URI protocol.DocumentURI
- // The module path to remove.
- ModulePath string
- OnlyDiagnostic bool
-}
-
-type EditGoDirectiveArgs struct {
- // Any document URI within the relevant module.
- URI protocol.DocumentURI
- // The version to pass to `go mod edit -go`.
- Version string
-}
-
-type GoGetPackageArgs struct {
- // Any document URI within the relevant module.
- URI protocol.DocumentURI
- // The package to go get.
- Pkg string
- AddRequire bool
-}
-
-type AddImportArgs struct {
- // ImportPath is the target import path that should
- // be added to the URI file
- ImportPath string
- // URI is the file that the ImportPath should be
- // added to
- URI protocol.DocumentURI
-}
-
-type ListKnownPackagesResult struct {
- // Packages is a list of packages relative
- // to the URIArg passed by the command request.
- // In other words, it omits paths that are already
- // imported or cannot be imported due to compiler
- // restrictions.
- Packages []string
-}
-
-type ListImportsResult struct {
- // Imports is a list of imports in the requested file.
- Imports []FileImport
-
- // PackageImports is a list of all imports in the requested file's package.
- PackageImports []PackageImport
-}
-
-type FileImport struct {
- // Path is the import path of the import.
- Path string
- // Name is the name of the import, e.g. `foo` in `import foo "strings"`.
- Name string
-}
-
-type PackageImport struct {
- // Path is the import path of the import.
- Path string
-}
-
-type WorkspaceMetadataArgs struct {
-}
-
-type WorkspaceMetadataResult struct {
- // All workspaces for this session.
- Workspaces []Workspace
-}
-
-type Workspace struct {
- // The workspace name.
- Name string
- // The workspace module directory.
- ModuleDir string
-}
-
-type DebuggingArgs struct {
- // Optional: the address (including port) for the debug server to listen on.
- // If not provided, the debug server will bind to "localhost:0", and the
- // full debug URL will be contained in the result.
- //
- // If there is more than one gopls instance along the serving path (i.e. you
- // are using a daemon), each gopls instance will attempt to start debugging.
- // If Addr specifies a port, only the daemon will be able to bind to that
- // port, and each intermediate gopls instance will fail to start debugging.
- // For this reason it is recommended not to specify a port (or equivalently,
- // to specify ":0").
- //
- // If the server was already debugging this field has no effect, and the
- // result will contain the previously configured debug URL(s).
- Addr string
-}
-
-type DebuggingResult struct {
- // The URLs to use to access the debug servers, for all gopls instances in
- // the serving path. For the common case of a single gopls instance (i.e. no
- // daemon), this will be exactly one address.
- //
- // In the case of one or more gopls instances forwarding the LSP to a daemon,
- // URLs will contain debug addresses for each server in the serving path, in
- // serving order. The daemon debug address will be the last entry in the
- // slice. If any intermediate gopls instance fails to start debugging, no
- // error will be returned but the debug URL for that server in the URLs slice
- // will be empty.
- URLs []string
-}
-
-type VulncheckArgs struct {
- // Dir is the directory from which vulncheck will run from.
- Dir protocol.DocumentURI
-
- // Package pattern. E.g. "", ".", "./...".
- Pattern string
-
- // TODO: Flag []string (flags accepted by govulncheck, e.g., -tests)
- // TODO: Format string (json, text)
-}
-
-type VulncheckResult struct {
- Vuln []Vuln
-
- // TODO: Text string format output?
-}
-
-// CallStack models a trace of function calls starting
-// with a client function or method and ending with a
-// call to a vulnerable symbol.
-type CallStack []StackEntry
-
-// StackEntry models an element of a call stack.
-type StackEntry struct {
- // See golang.org/x/exp/vulncheck.StackEntry.
-
- // User-friendly representation of function/method names.
- // e.g. package.funcName, package.(recvType).methodName, ...
- Name string
- URI protocol.DocumentURI
- Pos protocol.Position // Start position. (0-based. Column is always 0)
-}
-
-// Vuln models an osv.Entry and representative call stacks.
-type Vuln struct {
- // ID is the vulnerability ID (osv.Entry.ID).
- // https://ossf.github.io/osv-schema/#id-modified-fields
- ID string
- // Details is the description of the vulnerability (osv.Entry.Details).
- // https://ossf.github.io/osv-schema/#summary-details-fields
- Details string `json:",omitempty"`
- // Aliases are alternative IDs of the vulnerability.
- // https://ossf.github.io/osv-schema/#aliases-field
- Aliases []string `json:",omitempty"`
-
- // Symbol is the name of the detected vulnerable function or method.
- Symbol string `json:",omitempty"`
- // PkgPath is the package path of the detected Symbol.
- PkgPath string `json:",omitempty"`
- // ModPath is the module path corresponding to PkgPath.
- // TODO: how do we specify standard library's vulnerability?
- ModPath string `json:",omitempty"`
-
- // URL is the URL for more info about the information.
- // Either the database specific URL or the one of the URLs
- // included in osv.Entry.References.
- URL string `json:",omitempty"`
-
- // Current is the current module version.
- CurrentVersion string `json:",omitempty"`
-
- // Fixed is the minimum module version that contains the fix.
- FixedVersion string `json:",omitempty"`
-
- // Example call stacks.
- CallStacks []CallStack `json:",omitempty"`
-
- // TODO: import graph & module graph.
-}
diff --git a/internal/lsp/command/interface_test.go b/internal/lsp/command/interface_test.go
deleted file mode 100644
index 9ea30b446..000000000
--- a/internal/lsp/command/interface_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package command_test
-
-import (
- "bytes"
- "io/ioutil"
- "testing"
-
- "golang.org/x/tools/internal/lsp/command/gen"
- "golang.org/x/tools/internal/testenv"
-)
-
-func TestGenerated(t *testing.T) {
- testenv.NeedsGoBuild(t) // This is a lie. We actually need the source code.
-
- onDisk, err := ioutil.ReadFile("command_gen.go")
- if err != nil {
- t.Fatal(err)
- }
-
- generated, err := gen.Generate()
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(onDisk, generated) {
- t.Error("command_gen.go is stale -- regenerate")
- }
-}
diff --git a/internal/lsp/command/util.go b/internal/lsp/command/util.go
deleted file mode 100644
index dc9f22fad..000000000
--- a/internal/lsp/command/util.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package command
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// ID returns the command name for use in the LSP.
-func ID(name string) string {
- return "gopls." + name
-}
-
-type Command string
-
-func (c Command) ID() string {
- return ID(string(c))
-}
-
-// MarshalArgs encodes the given arguments to json.RawMessages. This function
-// is used to construct arguments to a protocol.Command.
-//
-// Example usage:
-//
-// jsonArgs, err := MarshalArgs(1, "hello", true, StructuredArg{42, 12.6})
-//
-func MarshalArgs(args ...interface{}) ([]json.RawMessage, error) {
- var out []json.RawMessage
- for _, arg := range args {
- argJSON, err := json.Marshal(arg)
- if err != nil {
- return nil, err
- }
- out = append(out, argJSON)
- }
- return out, nil
-}
-
-// UnmarshalArgs decodes the given json.RawMessages to the variables provided
-// by args. Each element of args should be a pointer.
-//
-// Example usage:
-//
-// var (
-// num int
-// str string
-// bul bool
-// structured StructuredArg
-// )
-// err := UnmarshalArgs(args, &num, &str, &bul, &structured)
-//
-func UnmarshalArgs(jsonArgs []json.RawMessage, args ...interface{}) error {
- if len(args) != len(jsonArgs) {
- return fmt.Errorf("DecodeArgs: expected %d input arguments, got %d JSON arguments", len(args), len(jsonArgs))
- }
- for i, arg := range args {
- if err := json.Unmarshal(jsonArgs[i], arg); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/internal/lsp/completion.go b/internal/lsp/completion.go
deleted file mode 100644
index 5c88ed0e4..000000000
--- a/internal/lsp/completion.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "bytes"
- "context"
- "fmt"
- "strings"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/source/completion"
- "golang.org/x/tools/internal/lsp/template"
- "golang.org/x/tools/internal/lsp/work"
- "golang.org/x/tools/internal/span"
-)
-
-func (s *Server) completion(ctx context.Context, params *protocol.CompletionParams) (*protocol.CompletionList, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- var candidates []completion.CompletionItem
- var surrounding *completion.Selection
- switch snapshot.View().FileKind(fh) {
- case source.Go:
- candidates, surrounding, err = completion.Completion(ctx, snapshot, fh, params.Position, params.Context)
- case source.Mod:
- candidates, surrounding = nil, nil
- case source.Work:
- cl, err := work.Completion(ctx, snapshot, fh, params.Position)
- if err != nil {
- break
- }
- return cl, nil
- case source.Tmpl:
- var cl *protocol.CompletionList
- cl, err = template.Completion(ctx, snapshot, fh, params.Position, params.Context)
- if err != nil {
- break // use common error handling, candidates==nil
- }
- return cl, nil
- }
- if err != nil {
- event.Error(ctx, "no completions found", err, tag.Position.Of(params.Position))
- }
- if candidates == nil {
- return &protocol.CompletionList{
- IsIncomplete: true,
- Items: []protocol.CompletionItem{},
- }, nil
- }
- // We might need to adjust the position to account for the prefix.
- rng, err := surrounding.Range()
- if err != nil {
- return nil, err
- }
-
- // internal/span treats end of file as the beginning of the next line, even
- // when it's not newline-terminated. We correct for that behaviour here if
- // end of file is not newline-terminated. See golang/go#41029.
- src, err := fh.Read()
- if err != nil {
- return nil, err
- }
- numLines := len(bytes.Split(src, []byte("\n")))
- tok := snapshot.FileSet().File(surrounding.Start())
- eof := tok.Pos(tok.Size())
-
- // For newline-terminated files, the line count reported by go/token should
- // be lower than the actual number of lines we see when splitting by \n. If
- // they're the same, the file isn't newline-terminated.
- if tok.Size() > 0 && tok.LineCount() == numLines {
- // Get the span for the last character in the file-1. This is
- // technically incorrect, but will get span to point to the previous
- // line.
- spn, err := span.NewRange(snapshot.FileSet(), eof-1, eof-1).Span()
- if err != nil {
- return nil, err
- }
- m := &protocol.ColumnMapper{
- URI: fh.URI(),
- Converter: span.NewContentConverter(fh.URI().Filename(), src),
- Content: src,
- }
- eofRng, err := m.Range(spn)
- if err != nil {
- return nil, err
- }
- // Instead of using the computed range, correct for our earlier
- // position adjustment by adding 1 to the column, not the line number.
- pos := protocol.Position{
- Line: eofRng.Start.Line,
- Character: eofRng.Start.Character + 1,
- }
- if surrounding.Start() >= eof {
- rng.Start = pos
- }
- if surrounding.End() >= eof {
- rng.End = pos
- }
- }
-
- // When using deep completions/fuzzy matching, report results as incomplete so
- // client fetches updated completions after every key stroke.
- options := snapshot.View().Options()
- incompleteResults := options.DeepCompletion || options.Matcher == source.Fuzzy
-
- items := toProtocolCompletionItems(candidates, rng, options)
-
- return &protocol.CompletionList{
- IsIncomplete: incompleteResults,
- Items: items,
- }, nil
-}
-
-func toProtocolCompletionItems(candidates []completion.CompletionItem, rng protocol.Range, options *source.Options) []protocol.CompletionItem {
- var (
- items = make([]protocol.CompletionItem, 0, len(candidates))
- numDeepCompletionsSeen int
- )
- for i, candidate := range candidates {
- // Limit the number of deep completions to not overwhelm the user in cases
- // with dozens of deep completion matches.
- if candidate.Depth > 0 {
- if !options.DeepCompletion {
- continue
- }
- if numDeepCompletionsSeen >= completion.MaxDeepCompletions {
- continue
- }
- numDeepCompletionsSeen++
- }
- insertText := candidate.InsertText
- if options.InsertTextFormat == protocol.SnippetTextFormat {
- insertText = candidate.Snippet()
- }
-
- // This can happen if the client has snippets disabled but the
- // candidate only supports snippet insertion.
- if insertText == "" {
- continue
- }
-
- item := protocol.CompletionItem{
- Label: candidate.Label,
- Detail: candidate.Detail,
- Kind: candidate.Kind,
- TextEdit: &protocol.TextEdit{
- NewText: insertText,
- Range: rng,
- },
- InsertTextFormat: options.InsertTextFormat,
- AdditionalTextEdits: candidate.AdditionalTextEdits,
- // This is a hack so that the client sorts completion results in the order
- // according to their score. This can be removed upon the resolution of
- // https://github.com/Microsoft/language-server-protocol/issues/348.
- SortText: fmt.Sprintf("%05d", i),
-
- // Trim operators (VSCode doesn't like weird characters in
- // filterText).
- FilterText: strings.TrimLeft(candidate.InsertText, "&*"),
-
- Preselect: i == 0,
- Documentation: candidate.Documentation,
- Tags: candidate.Tags,
- Deprecated: candidate.Deprecated,
- }
- items = append(items, item)
- }
- return items
-}
diff --git a/internal/lsp/completion_test.go b/internal/lsp/completion_test.go
deleted file mode 100644
index d496a40a5..000000000
--- a/internal/lsp/completion_test.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
-)
-
-func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- got := r.callCompletion(t, src, func(opts *source.Options) {
- opts.DeepCompletion = false
- opts.Matcher = source.CaseInsensitive
- opts.CompleteUnimported = false
- opts.InsertTextFormat = protocol.SnippetTextFormat
- opts.LiteralCompletions = strings.Contains(string(src.URI()), "literal")
- opts.ExperimentalPostfixCompletions = strings.Contains(string(src.URI()), "postfix")
- })
- got = tests.FilterBuiltins(src, got)
- want := expected(t, test, items)
- if diff := tests.DiffCompletionItems(want, got); diff != "" {
- t.Errorf("%s", diff)
- }
-}
-
-func (r *runner) CompletionSnippet(t *testing.T, src span.Span, expected tests.CompletionSnippet, placeholders bool, items tests.CompletionItems) {
- list := r.callCompletion(t, src, func(opts *source.Options) {
- opts.UsePlaceholders = placeholders
- opts.DeepCompletion = true
- opts.Matcher = source.Fuzzy
- opts.CompleteUnimported = false
- })
- got := tests.FindItem(list, *items[expected.CompletionItem])
- want := expected.PlainSnippet
- if placeholders {
- want = expected.PlaceholderSnippet
- }
- if diff := tests.DiffSnippets(want, got); diff != "" {
- t.Errorf("%s", diff)
- }
-}
-
-func (r *runner) UnimportedCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- got := r.callCompletion(t, src, func(opts *source.Options) {})
- got = tests.FilterBuiltins(src, got)
- want := expected(t, test, items)
- if diff := tests.CheckCompletionOrder(want, got, false); diff != "" {
- t.Errorf("%s", diff)
- }
-}
-
-func (r *runner) DeepCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- got := r.callCompletion(t, src, func(opts *source.Options) {
- opts.DeepCompletion = true
- opts.Matcher = source.CaseInsensitive
- opts.CompleteUnimported = false
- })
- got = tests.FilterBuiltins(src, got)
- want := expected(t, test, items)
- if msg := tests.DiffCompletionItems(want, got); msg != "" {
- t.Errorf("%s", msg)
- }
-}
-
-func (r *runner) FuzzyCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- got := r.callCompletion(t, src, func(opts *source.Options) {
- opts.DeepCompletion = true
- opts.Matcher = source.Fuzzy
- opts.CompleteUnimported = false
- })
- got = tests.FilterBuiltins(src, got)
- want := expected(t, test, items)
- if msg := tests.DiffCompletionItems(want, got); msg != "" {
- t.Errorf("%s", msg)
- }
-}
-
-func (r *runner) CaseSensitiveCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- got := r.callCompletion(t, src, func(opts *source.Options) {
- opts.Matcher = source.CaseSensitive
- opts.CompleteUnimported = false
- })
- got = tests.FilterBuiltins(src, got)
- want := expected(t, test, items)
- if msg := tests.DiffCompletionItems(want, got); msg != "" {
- t.Errorf("%s", msg)
- }
-}
-
-func (r *runner) RankCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- got := r.callCompletion(t, src, func(opts *source.Options) {
- opts.DeepCompletion = true
- opts.Matcher = source.Fuzzy
- opts.CompleteUnimported = false
- opts.LiteralCompletions = true
- opts.ExperimentalPostfixCompletions = true
- })
- want := expected(t, test, items)
- if msg := tests.CheckCompletionOrder(want, got, true); msg != "" {
- t.Errorf("%s", msg)
- }
-}
-
-func expected(t *testing.T, test tests.Completion, items tests.CompletionItems) []protocol.CompletionItem {
- t.Helper()
-
- var want []protocol.CompletionItem
- for _, pos := range test.CompletionItems {
- item := items[pos]
- want = append(want, tests.ToProtocolCompletionItem(*item))
- }
- return want
-}
-
-func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*source.Options)) []protocol.CompletionItem {
- t.Helper()
-
- view, err := r.server.session.ViewOf(src.URI())
- if err != nil {
- t.Fatal(err)
- }
- original := view.Options()
- modified := view.Options().Clone()
- options(modified)
- view, err = view.SetOptions(r.ctx, modified)
- if err != nil {
- t.Error(err)
- return nil
- }
- defer view.SetOptions(r.ctx, original)
-
- list, err := r.server.Completion(r.ctx, &protocol.CompletionParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(src.URI()),
- },
- Position: protocol.Position{
- Line: uint32(src.Start().Line() - 1),
- Character: uint32(src.Start().Column() - 1),
- },
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- return list.Items
-}
diff --git a/internal/lsp/debug/info.go b/internal/lsp/debug/info.go
deleted file mode 100644
index bcc2f4f06..000000000
--- a/internal/lsp/debug/info.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package debug exports debug information for gopls.
-package debug
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io"
- "reflect"
- "runtime"
- "runtime/debug"
- "sort"
- "strings"
-
- "golang.org/x/tools/internal/lsp/source"
-)
-
-type PrintMode int
-
-const (
- PlainText = PrintMode(iota)
- Markdown
- HTML
- JSON
-)
-
-// Version is a manually-updated mechanism for tracking versions.
-const Version = "master"
-
-// ServerVersion is the format used by gopls to report its version to the
-// client. This format is structured so that the client can parse it easily.
-type ServerVersion struct {
- *BuildInfo
- Version string
-}
-
-type Module struct {
- ModuleVersion
- Replace *ModuleVersion `json:"replace,omitempty"`
-}
-
-type ModuleVersion struct {
- Path string `json:"path,omitempty"`
- Version string `json:"version,omitempty"`
- Sum string `json:"sum,omitempty"`
-}
-
-// VersionInfo returns the build info for the gopls process. If it was not
-// built in module mode, we return a GOPATH-specific message with the
-// hardcoded version.
-func VersionInfo() *ServerVersion {
- if info, ok := readBuildInfo(); ok {
- return getVersion(info)
- }
- buildInfo := &BuildInfo{}
- // go1.17 or earlier, part of s.BuildInfo are embedded fields.
- buildInfo.Path = "gopls, built in GOPATH mode"
- buildInfo.GoVersion = runtime.Version()
- return &ServerVersion{
- Version: Version,
- BuildInfo: buildInfo,
- }
-}
-
-func getVersion(info *BuildInfo) *ServerVersion {
- return &ServerVersion{
- Version: Version,
- BuildInfo: info,
- }
-}
-
-// PrintServerInfo writes HTML debug info to w for the Instance.
-func (i *Instance) PrintServerInfo(ctx context.Context, w io.Writer) {
- section(w, HTML, "Server Instance", func() {
- fmt.Fprintf(w, "Start time: %v\n", i.StartTime)
- fmt.Fprintf(w, "LogFile: %s\n", i.Logfile)
- fmt.Fprintf(w, "Working directory: %s\n", i.Workdir)
- fmt.Fprintf(w, "Address: %s\n", i.ServerAddress)
- fmt.Fprintf(w, "Debug address: %s\n", i.DebugAddress())
- })
- PrintVersionInfo(ctx, w, true, HTML)
- section(w, HTML, "Command Line", func() {
- fmt.Fprintf(w, "<a href=/debug/pprof/cmdline>cmdline</a>")
- })
-}
-
-// PrintVersionInfo writes version information to w, using the output format
-// specified by mode. verbose controls whether additional information is
-// written, including section headers.
-func PrintVersionInfo(_ context.Context, w io.Writer, verbose bool, mode PrintMode) error {
- info := VersionInfo()
- if mode == JSON {
- return printVersionInfoJSON(w, info)
- }
-
- if !verbose {
- printBuildInfo(w, info, false, mode)
- return nil
- }
- section(w, mode, "Build info", func() {
- printBuildInfo(w, info, true, mode)
- })
- return nil
-}
-
-func printVersionInfoJSON(w io.Writer, info *ServerVersion) error {
- js, err := json.MarshalIndent(info, "", "\t")
- if err != nil {
- return err
- }
- _, err = fmt.Fprint(w, string(js))
- return err
-}
-
-func section(w io.Writer, mode PrintMode, title string, body func()) {
- switch mode {
- case PlainText:
- fmt.Fprintln(w, title)
- fmt.Fprintln(w, strings.Repeat("-", len(title)))
- body()
- case Markdown:
- fmt.Fprintf(w, "#### %s\n\n```\n", title)
- body()
- fmt.Fprintf(w, "```\n")
- case HTML:
- fmt.Fprintf(w, "<h3>%s</h3>\n<pre>\n", title)
- body()
- fmt.Fprint(w, "</pre>\n")
- }
-}
-
-func printBuildInfo(w io.Writer, info *ServerVersion, verbose bool, mode PrintMode) {
- fmt.Fprintf(w, "%v %v\n", info.Path, Version)
- printModuleInfo(w, info.Main, mode)
- if !verbose {
- return
- }
- for _, dep := range info.Deps {
- printModuleInfo(w, *dep, mode)
- }
- fmt.Fprintf(w, "go: %v\n", info.GoVersion)
-}
-
-func printModuleInfo(w io.Writer, m debug.Module, _ PrintMode) {
- fmt.Fprintf(w, " %s@%s", m.Path, m.Version)
- if m.Sum != "" {
- fmt.Fprintf(w, " %s", m.Sum)
- }
- if m.Replace != nil {
- fmt.Fprintf(w, " => %v", m.Replace.Path)
- }
- fmt.Fprintf(w, "\n")
-}
-
-type field struct {
- index []int
-}
-
-var fields []field
-
-// find all the options. The presumption is that the Options are nested structs
-// and that pointers don't need to be dereferenced
-func swalk(t reflect.Type, ix []int, indent string) {
- switch t.Kind() {
- case reflect.Struct:
- for i := 0; i < t.NumField(); i++ {
- fld := t.Field(i)
- ixx := append(append([]int{}, ix...), i)
- swalk(fld.Type, ixx, indent+". ")
- }
- default:
- // everything is either a struct or a field (that's an assumption about Options)
- fields = append(fields, field{ix})
- }
-}
-
-type sessionOption struct {
- Name string
- Type string
- Current string
- Default string
-}
-
-func showOptions(o *source.Options) []sessionOption {
- var out []sessionOption
- t := reflect.TypeOf(*o)
- swalk(t, []int{}, "")
- v := reflect.ValueOf(*o)
- do := reflect.ValueOf(*source.DefaultOptions())
- for _, f := range fields {
- val := v.FieldByIndex(f.index)
- def := do.FieldByIndex(f.index)
- tx := t.FieldByIndex(f.index)
- is := strVal(val)
- was := strVal(def)
- out = append(out, sessionOption{
- Name: tx.Name,
- Type: tx.Type.String(),
- Current: is,
- Default: was,
- })
- }
- sort.Slice(out, func(i, j int) bool {
- rd := out[i].Current == out[i].Default
- ld := out[j].Current == out[j].Default
- if rd != ld {
- return ld
- }
- return out[i].Name < out[j].Name
- })
- return out
-}
-
-func strVal(val reflect.Value) string {
- switch val.Kind() {
- case reflect.Bool:
- return fmt.Sprintf("%v", val.Interface())
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return fmt.Sprintf("%v", val.Interface())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return fmt.Sprintf("%v", val.Interface())
- case reflect.Uintptr, reflect.UnsafePointer:
- return fmt.Sprintf("0x%x", val.Pointer())
- case reflect.Complex64, reflect.Complex128:
- return fmt.Sprintf("%v", val.Complex())
- case reflect.Array, reflect.Slice:
- ans := []string{}
- for i := 0; i < val.Len(); i++ {
- ans = append(ans, strVal(val.Index(i)))
- }
- sort.Strings(ans)
- return fmt.Sprintf("%v", ans)
- case reflect.Chan, reflect.Func, reflect.Ptr:
- return val.Kind().String()
- case reflect.Struct:
- var x source.Analyzer
- if val.Type() != reflect.TypeOf(x) {
- return val.Kind().String()
- }
- // this is sort of ugly, but usable
- str := val.FieldByName("Analyzer").Elem().FieldByName("Doc").String()
- ix := strings.Index(str, "\n")
- if ix == -1 {
- ix = len(str)
- }
- return str[:ix]
- case reflect.String:
- return fmt.Sprintf("%q", val.Interface())
- case reflect.Map:
- ans := []string{}
- iter := val.MapRange()
- for iter.Next() {
- k := iter.Key()
- v := iter.Value()
- ans = append(ans, fmt.Sprintf("%s:%s, ", strVal(k), strVal(v)))
- }
- sort.Strings(ans)
- return fmt.Sprintf("%v", ans)
- }
- return fmt.Sprintf("??%s??", val.Type())
-}
diff --git a/internal/lsp/debug/log/log.go b/internal/lsp/debug/log/log.go
deleted file mode 100644
index 44638f8a5..000000000
--- a/internal/lsp/debug/log/log.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package log provides helper methods for exporting log events to the
-// internal/event package.
-package log
-
-import (
- "context"
- "fmt"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/debug/tag"
-)
-
-// Level parameterizes log severity.
-type Level int
-
-const (
- _ Level = iota
- Error
- Warning
- Info
- Debug
- Trace
-)
-
-// Log exports a log event labeled with level l.
-func (l Level) Log(ctx context.Context, msg string) {
- event.Log(ctx, msg, tag.Level.Of(int(l)))
-}
-
-// Logf formats and exports a log event labeled with level l.
-func (l Level) Logf(ctx context.Context, format string, args ...interface{}) {
- l.Log(ctx, fmt.Sprintf(format, args...))
-}
-
-// LabeledLevel extracts the labeled log l
-func LabeledLevel(lm label.Map) Level {
- return Level(tag.Level.Get(lm))
-}
diff --git a/internal/lsp/debug/metrics.go b/internal/lsp/debug/metrics.go
deleted file mode 100644
index 8efc1d495..000000000
--- a/internal/lsp/debug/metrics.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package debug
-
-import (
- "golang.org/x/tools/internal/event/export/metric"
- "golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/debug/tag"
-)
-
-var (
- // the distributions we use for histograms
- bytesDistribution = []int64{1 << 10, 1 << 11, 1 << 12, 1 << 14, 1 << 16, 1 << 20}
- millisecondsDistribution = []float64{0.1, 0.5, 1, 2, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000}
-
- receivedBytes = metric.HistogramInt64{
- Name: "received_bytes",
- Description: "Distribution of received bytes, by method.",
- Keys: []label.Key{tag.RPCDirection, tag.Method},
- Buckets: bytesDistribution,
- }
-
- sentBytes = metric.HistogramInt64{
- Name: "sent_bytes",
- Description: "Distribution of sent bytes, by method.",
- Keys: []label.Key{tag.RPCDirection, tag.Method},
- Buckets: bytesDistribution,
- }
-
- latency = metric.HistogramFloat64{
- Name: "latency",
- Description: "Distribution of latency in milliseconds, by method.",
- Keys: []label.Key{tag.RPCDirection, tag.Method},
- Buckets: millisecondsDistribution,
- }
-
- started = metric.Scalar{
- Name: "started",
- Description: "Count of RPCs started by method.",
- Keys: []label.Key{tag.RPCDirection, tag.Method},
- }
-
- completed = metric.Scalar{
- Name: "completed",
- Description: "Count of RPCs completed by method and status.",
- Keys: []label.Key{tag.RPCDirection, tag.Method, tag.StatusCode},
- }
-)
-
-func registerMetrics(m *metric.Config) {
- receivedBytes.Record(m, tag.ReceivedBytes)
- sentBytes.Record(m, tag.SentBytes)
- latency.Record(m, tag.Latency)
- started.Count(m, tag.Started)
- completed.Count(m, tag.Latency)
-}
diff --git a/internal/lsp/debug/rpc.go b/internal/lsp/debug/rpc.go
deleted file mode 100644
index 033ee3797..000000000
--- a/internal/lsp/debug/rpc.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package debug
-
-import (
- "context"
- "fmt"
- "html/template"
- "net/http"
- "sort"
- "sync"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/event/core"
- "golang.org/x/tools/internal/event/export"
- "golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/debug/tag"
-)
-
-var RPCTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}RPC Information{{end}}
-{{define "body"}}
- <H2>Inbound</H2>
- {{template "rpcSection" .Inbound}}
- <H2>Outbound</H2>
- {{template "rpcSection" .Outbound}}
-{{end}}
-{{define "rpcSection"}}
- {{range .}}<P>
- <b>{{.Method}}</b> {{.Started}} <a href="/trace/{{.Method}}">traces</a> ({{.InProgress}} in progress)
- <br>
- <i>Latency</i> {{with .Latency}}{{.Mean}} ({{.Min}}<{{.Max}}){{end}}
- <i>By bucket</i> 0s {{range .Latency.Values}}{{if gt .Count 0}}<b>{{.Count}}</b> {{.Limit}} {{end}}{{end}}
- <br>
- <i>Received</i> {{.Received}} (avg. {{.ReceivedMean}})
- <i>Sent</i> {{.Sent}} (avg. {{.SentMean}})
- <br>
- <i>Result codes</i> {{range .Codes}}{{.Key}}={{.Count}} {{end}}
- </P>
- {{end}}
-{{end}}
-`))
-
-type Rpcs struct { // exported for testing
- mu sync.Mutex
- Inbound []*rpcStats // stats for incoming lsp rpcs sorted by method name
- Outbound []*rpcStats // stats for outgoing lsp rpcs sorted by method name
-}
-
-type rpcStats struct {
- Method string
- Started int64
- Completed int64
-
- Latency rpcTimeHistogram
- Received byteUnits
- Sent byteUnits
- Codes []*rpcCodeBucket
-}
-
-type rpcTimeHistogram struct {
- Sum timeUnits
- Count int64
- Min timeUnits
- Max timeUnits
- Values []rpcTimeBucket
-}
-
-type rpcTimeBucket struct {
- Limit timeUnits
- Count int64
-}
-
-type rpcCodeBucket struct {
- Key string
- Count int64
-}
-
-func (r *Rpcs) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context {
- r.mu.Lock()
- defer r.mu.Unlock()
- switch {
- case event.IsStart(ev):
- if _, stats := r.getRPCSpan(ctx, ev); stats != nil {
- stats.Started++
- }
- case event.IsEnd(ev):
- span, stats := r.getRPCSpan(ctx, ev)
- if stats != nil {
- endRPC(ctx, ev, span, stats)
- }
- case event.IsMetric(ev):
- sent := byteUnits(tag.SentBytes.Get(lm))
- rec := byteUnits(tag.ReceivedBytes.Get(lm))
- if sent != 0 || rec != 0 {
- if _, stats := r.getRPCSpan(ctx, ev); stats != nil {
- stats.Sent += sent
- stats.Received += rec
- }
- }
- }
- return ctx
-}
-
-func endRPC(ctx context.Context, ev core.Event, span *export.Span, stats *rpcStats) {
- // update the basic counts
- stats.Completed++
-
- // get and record the status code
- if status := getStatusCode(span); status != "" {
- var b *rpcCodeBucket
- for c, entry := range stats.Codes {
- if entry.Key == status {
- b = stats.Codes[c]
- break
- }
- }
- if b == nil {
- b = &rpcCodeBucket{Key: status}
- stats.Codes = append(stats.Codes, b)
- sort.Slice(stats.Codes, func(i int, j int) bool {
- return stats.Codes[i].Key < stats.Codes[j].Key
- })
- }
- b.Count++
- }
-
- // calculate latency if this was an rpc span
- elapsedTime := span.Finish().At().Sub(span.Start().At())
- latencyMillis := timeUnits(elapsedTime) / timeUnits(time.Millisecond)
- if stats.Latency.Count == 0 {
- stats.Latency.Min = latencyMillis
- stats.Latency.Max = latencyMillis
- } else {
- if stats.Latency.Min > latencyMillis {
- stats.Latency.Min = latencyMillis
- }
- if stats.Latency.Max < latencyMillis {
- stats.Latency.Max = latencyMillis
- }
- }
- stats.Latency.Count++
- stats.Latency.Sum += latencyMillis
- for i := range stats.Latency.Values {
- if stats.Latency.Values[i].Limit > latencyMillis {
- stats.Latency.Values[i].Count++
- break
- }
- }
-}
-
-func (r *Rpcs) getRPCSpan(ctx context.Context, ev core.Event) (*export.Span, *rpcStats) {
- // get the span
- span := export.GetSpan(ctx)
- if span == nil {
- return nil, nil
- }
- // use the span start event look up the correct stats block
- // we do this because it prevents us matching a sub span
- return span, r.getRPCStats(span.Start())
-}
-
-func (r *Rpcs) getRPCStats(lm label.Map) *rpcStats {
- method := tag.Method.Get(lm)
- if method == "" {
- return nil
- }
- set := &r.Inbound
- if tag.RPCDirection.Get(lm) != tag.Inbound {
- set = &r.Outbound
- }
- // get the record for this method
- index := sort.Search(len(*set), func(i int) bool {
- return (*set)[i].Method >= method
- })
-
- if index < len(*set) && (*set)[index].Method == method {
- return (*set)[index]
- }
-
- old := *set
- *set = make([]*rpcStats, len(old)+1)
- copy(*set, old[:index])
- copy((*set)[index+1:], old[index:])
- stats := &rpcStats{Method: method}
- stats.Latency.Values = make([]rpcTimeBucket, len(millisecondsDistribution))
- for i, m := range millisecondsDistribution {
- stats.Latency.Values[i].Limit = timeUnits(m)
- }
- (*set)[index] = stats
- return stats
-}
-
-func (s *rpcStats) InProgress() int64 { return s.Started - s.Completed }
-func (s *rpcStats) SentMean() byteUnits { return s.Sent / byteUnits(s.Started) }
-func (s *rpcStats) ReceivedMean() byteUnits { return s.Received / byteUnits(s.Started) }
-
-func (h *rpcTimeHistogram) Mean() timeUnits { return h.Sum / timeUnits(h.Count) }
-
-func getStatusCode(span *export.Span) string {
- for _, ev := range span.Events() {
- if status := tag.StatusCode.Get(ev); status != "" {
- return status
- }
- }
- return ""
-}
-
-func (r *Rpcs) getData(req *http.Request) interface{} {
- return r
-}
-
-func units(v float64, suffixes []string) string {
- s := ""
- for _, s = range suffixes {
- n := v / 1000
- if n < 1 {
- break
- }
- v = n
- }
- return fmt.Sprintf("%.2f%s", v, s)
-}
-
-type timeUnits float64
-
-func (v timeUnits) String() string {
- v = v * 1000 * 1000
- return units(float64(v), []string{"ns", "μs", "ms", "s"})
-}
-
-type byteUnits float64
-
-func (v byteUnits) String() string {
- return units(float64(v), []string{"B", "KB", "MB", "GB", "TB"})
-}
diff --git a/internal/lsp/debug/serve.go b/internal/lsp/debug/serve.go
deleted file mode 100644
index b6dba60ab..000000000
--- a/internal/lsp/debug/serve.go
+++ /dev/null
@@ -1,954 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package debug
-
-import (
- "archive/zip"
- "bytes"
- "context"
- "fmt"
- "html/template"
- "io"
- stdlog "log"
- "net"
- "net/http"
- "net/http/pprof"
- "os"
- "path"
- "path/filepath"
- "runtime"
- rpprof "runtime/pprof"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/event/core"
- "golang.org/x/tools/internal/event/export"
- "golang.org/x/tools/internal/event/export/metric"
- "golang.org/x/tools/internal/event/export/ocagent"
- "golang.org/x/tools/internal/event/export/prometheus"
- "golang.org/x/tools/internal/event/keys"
- "golang.org/x/tools/internal/event/label"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug/log"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- errors "golang.org/x/xerrors"
-)
-
-type contextKeyType int
-
-const (
- instanceKey contextKeyType = iota
- traceKey
-)
-
-// An Instance holds all debug information associated with a gopls instance.
-type Instance struct {
- Logfile string
- StartTime time.Time
- ServerAddress string
- Workdir string
- OCAgentConfig string
-
- LogWriter io.Writer
-
- exporter event.Exporter
-
- ocagent *ocagent.Exporter
- prometheus *prometheus.Exporter
- rpcs *Rpcs
- traces *traces
- State *State
-
- serveMu sync.Mutex
- debugAddress string
- listenedDebugAddress string
-}
-
-// State holds debugging information related to the server state.
-type State struct {
- mu sync.Mutex
- clients []*Client
- servers []*Server
-
- // bugs maps bug description -> formatted event
- bugs map[string]string
-}
-
-func Bug(ctx context.Context, desc, format string, args ...interface{}) {
- labels := []label.Label{tag.Bug.Of(desc)}
- _, file, line, ok := runtime.Caller(1)
- if ok {
- labels = append(labels, tag.Callsite.Of(fmt.Sprintf("%s:%d", file, line)))
- }
- msg := fmt.Sprintf(format, args...)
- event.Log(ctx, msg, labels...)
-}
-
-type bug struct {
- Description, Event string
-}
-
-func (st *State) Bugs() []bug {
- st.mu.Lock()
- defer st.mu.Unlock()
- var bugs []bug
- for k, v := range st.bugs {
- bugs = append(bugs, bug{k, v})
- }
- sort.Slice(bugs, func(i, j int) bool {
- return bugs[i].Description < bugs[j].Description
- })
- return bugs
-}
-
-func (st *State) recordBug(description, event string) {
- st.mu.Lock()
- defer st.mu.Unlock()
- if st.bugs == nil {
- st.bugs = make(map[string]string)
- }
- st.bugs[description] = event
-}
-
-// Caches returns the set of Cache objects currently being served.
-func (st *State) Caches() []*cache.Cache {
- var caches []*cache.Cache
- seen := make(map[string]struct{})
- for _, client := range st.Clients() {
- cache, ok := client.Session.Cache().(*cache.Cache)
- if !ok {
- continue
- }
- if _, found := seen[cache.ID()]; found {
- continue
- }
- seen[cache.ID()] = struct{}{}
- caches = append(caches, cache)
- }
- return caches
-}
-
-// Cache returns the Cache that matches the supplied id.
-func (st *State) Cache(id string) *cache.Cache {
- for _, c := range st.Caches() {
- if c.ID() == id {
- return c
- }
- }
- return nil
-}
-
-// Sessions returns the set of Session objects currently being served.
-func (st *State) Sessions() []*cache.Session {
- var sessions []*cache.Session
- for _, client := range st.Clients() {
- sessions = append(sessions, client.Session)
- }
- return sessions
-}
-
-// Session returns the Session that matches the supplied id.
-func (st *State) Session(id string) *cache.Session {
- for _, s := range st.Sessions() {
- if s.ID() == id {
- return s
- }
- }
- return nil
-}
-
-// Views returns the set of View objects currently being served.
-func (st *State) Views() []*cache.View {
- var views []*cache.View
- for _, s := range st.Sessions() {
- for _, v := range s.Views() {
- if cv, ok := v.(*cache.View); ok {
- views = append(views, cv)
- }
- }
- }
- return views
-}
-
-// View returns the View that matches the supplied id.
-func (st *State) View(id string) *cache.View {
- for _, v := range st.Views() {
- if v.ID() == id {
- return v
- }
- }
- return nil
-}
-
-// Clients returns the set of Clients currently being served.
-func (st *State) Clients() []*Client {
- st.mu.Lock()
- defer st.mu.Unlock()
- clients := make([]*Client, len(st.clients))
- copy(clients, st.clients)
- return clients
-}
-
-// Client returns the Client matching the supplied id.
-func (st *State) Client(id string) *Client {
- for _, c := range st.Clients() {
- if c.Session.ID() == id {
- return c
- }
- }
- return nil
-}
-
-// Servers returns the set of Servers the instance is currently connected to.
-func (st *State) Servers() []*Server {
- st.mu.Lock()
- defer st.mu.Unlock()
- servers := make([]*Server, len(st.servers))
- copy(servers, st.servers)
- return servers
-}
-
-// A Client is an incoming connection from a remote client.
-type Client struct {
- Session *cache.Session
- DebugAddress string
- Logfile string
- GoplsPath string
- ServerID string
- Service protocol.Server
-}
-
-// A Server is an outgoing connection to a remote LSP server.
-type Server struct {
- ID string
- DebugAddress string
- Logfile string
- GoplsPath string
- ClientID string
-}
-
-// AddClient adds a client to the set being served.
-func (st *State) addClient(session *cache.Session) {
- st.mu.Lock()
- defer st.mu.Unlock()
- st.clients = append(st.clients, &Client{Session: session})
-}
-
-// DropClient removes a client from the set being served.
-func (st *State) dropClient(session source.Session) {
- st.mu.Lock()
- defer st.mu.Unlock()
- for i, c := range st.clients {
- if c.Session == session {
- copy(st.clients[i:], st.clients[i+1:])
- st.clients[len(st.clients)-1] = nil
- st.clients = st.clients[:len(st.clients)-1]
- return
- }
- }
-}
-
-// AddServer adds a server to the set being queried. In practice, there should
-// be at most one remote server.
-func (st *State) updateServer(server *Server) {
- st.mu.Lock()
- defer st.mu.Unlock()
- for i, existing := range st.servers {
- if existing.ID == server.ID {
- // Replace, rather than mutate, to avoid a race.
- newServers := make([]*Server, len(st.servers))
- copy(newServers, st.servers[:i])
- newServers[i] = server
- copy(newServers[i+1:], st.servers[i+1:])
- st.servers = newServers
- return
- }
- }
- st.servers = append(st.servers, server)
-}
-
-// DropServer drops a server from the set being queried.
-func (st *State) dropServer(id string) {
- st.mu.Lock()
- defer st.mu.Unlock()
- for i, s := range st.servers {
- if s.ID == id {
- copy(st.servers[i:], st.servers[i+1:])
- st.servers[len(st.servers)-1] = nil
- st.servers = st.servers[:len(st.servers)-1]
- return
- }
- }
-}
-
-// an http.ResponseWriter that filters writes
-type filterResponse struct {
- w http.ResponseWriter
- edit func([]byte) []byte
-}
-
-func (c filterResponse) Header() http.Header {
- return c.w.Header()
-}
-
-func (c filterResponse) Write(buf []byte) (int, error) {
- ans := c.edit(buf)
- return c.w.Write(ans)
-}
-
-func (c filterResponse) WriteHeader(n int) {
- c.w.WriteHeader(n)
-}
-
-// replace annoying nuls by spaces
-func cmdline(w http.ResponseWriter, r *http.Request) {
- fake := filterResponse{
- w: w,
- edit: func(buf []byte) []byte {
- return bytes.ReplaceAll(buf, []byte{0}, []byte{' '})
- },
- }
- pprof.Cmdline(fake, r)
-}
-
-func (i *Instance) getCache(r *http.Request) interface{} {
- return i.State.Cache(path.Base(r.URL.Path))
-}
-
-func (i *Instance) getSession(r *http.Request) interface{} {
- return i.State.Session(path.Base(r.URL.Path))
-}
-
-func (i *Instance) getClient(r *http.Request) interface{} {
- return i.State.Client(path.Base(r.URL.Path))
-}
-
-func (i *Instance) getServer(r *http.Request) interface{} {
- i.State.mu.Lock()
- defer i.State.mu.Unlock()
- id := path.Base(r.URL.Path)
- for _, s := range i.State.servers {
- if s.ID == id {
- return s
- }
- }
- return nil
-}
-
-func (i *Instance) getView(r *http.Request) interface{} {
- return i.State.View(path.Base(r.URL.Path))
-}
-
-func (i *Instance) getFile(r *http.Request) interface{} {
- identifier := path.Base(r.URL.Path)
- sid := path.Base(path.Dir(r.URL.Path))
- s := i.State.Session(sid)
- if s == nil {
- return nil
- }
- for _, o := range s.Overlays() {
- if o.FileIdentity().Hash == identifier {
- return o
- }
- }
- return nil
-}
-
-func (i *Instance) getInfo(r *http.Request) interface{} {
- buf := &bytes.Buffer{}
- i.PrintServerInfo(r.Context(), buf)
- return template.HTML(buf.String())
-}
-
-func (i *Instance) AddService(s protocol.Server, session *cache.Session) {
- for _, c := range i.State.clients {
- if c.Session == session {
- c.Service = s
- return
- }
- }
- stdlog.Printf("unable to find a Client to add the protocol.Server to")
-}
-
-func getMemory(_ *http.Request) interface{} {
- var m runtime.MemStats
- runtime.ReadMemStats(&m)
- return m
-}
-
-func init() {
- event.SetExporter(makeGlobalExporter(os.Stderr))
-}
-
-func GetInstance(ctx context.Context) *Instance {
- if ctx == nil {
- return nil
- }
- v := ctx.Value(instanceKey)
- if v == nil {
- return nil
- }
- return v.(*Instance)
-}
-
-// WithInstance creates debug instance ready for use using the supplied
-// configuration and stores it in the returned context.
-func WithInstance(ctx context.Context, workdir, agent string) context.Context {
- i := &Instance{
- StartTime: time.Now(),
- Workdir: workdir,
- OCAgentConfig: agent,
- }
- i.LogWriter = os.Stderr
- ocConfig := ocagent.Discover()
- //TODO: we should not need to adjust the discovered configuration
- ocConfig.Address = i.OCAgentConfig
- i.ocagent = ocagent.Connect(ocConfig)
- i.prometheus = prometheus.New()
- i.rpcs = &Rpcs{}
- i.traces = &traces{}
- i.State = &State{}
- i.exporter = makeInstanceExporter(i)
- return context.WithValue(ctx, instanceKey, i)
-}
-
-// SetLogFile sets the logfile for use with this instance.
-func (i *Instance) SetLogFile(logfile string, isDaemon bool) (func(), error) {
- // TODO: probably a better solution for deferring closure to the caller would
- // be for the debug instance to itself be closed, but this fixes the
- // immediate bug of logs not being captured.
- closeLog := func() {}
- if logfile != "" {
- if logfile == "auto" {
- if isDaemon {
- logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-daemon-%d.log", os.Getpid()))
- } else {
- logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.log", os.Getpid()))
- }
- }
- f, err := os.Create(logfile)
- if err != nil {
- return nil, errors.Errorf("unable to create log file: %w", err)
- }
- closeLog = func() {
- defer f.Close()
- }
- stdlog.SetOutput(io.MultiWriter(os.Stderr, f))
- i.LogWriter = f
- }
- i.Logfile = logfile
- return closeLog, nil
-}
-
-// Serve starts and runs a debug server in the background on the given addr.
-// It also logs the port the server starts on, to allow for :0 auto assigned
-// ports.
-func (i *Instance) Serve(ctx context.Context, addr string) (string, error) {
- stdlog.SetFlags(stdlog.Lshortfile)
- if addr == "" {
- return "", nil
- }
- i.serveMu.Lock()
- defer i.serveMu.Unlock()
-
- if i.listenedDebugAddress != "" {
- // Already serving. Return the bound address.
- return i.listenedDebugAddress, nil
- }
-
- i.debugAddress = addr
- listener, err := net.Listen("tcp", i.debugAddress)
- if err != nil {
- return "", err
- }
- i.listenedDebugAddress = listener.Addr().String()
-
- port := listener.Addr().(*net.TCPAddr).Port
- if strings.HasSuffix(i.debugAddress, ":0") {
- stdlog.Printf("debug server listening at http://localhost:%d", port)
- }
- event.Log(ctx, "Debug serving", tag.Port.Of(port))
- go func() {
- mux := http.NewServeMux()
- mux.HandleFunc("/", render(MainTmpl, func(*http.Request) interface{} { return i }))
- mux.HandleFunc("/debug/", render(DebugTmpl, nil))
- mux.HandleFunc("/debug/pprof/", pprof.Index)
- mux.HandleFunc("/debug/pprof/cmdline", cmdline)
- mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
- mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
- mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
- if i.prometheus != nil {
- mux.HandleFunc("/metrics/", i.prometheus.Serve)
- }
- if i.rpcs != nil {
- mux.HandleFunc("/rpc/", render(RPCTmpl, i.rpcs.getData))
- }
- if i.traces != nil {
- mux.HandleFunc("/trace/", render(TraceTmpl, i.traces.getData))
- }
- mux.HandleFunc("/cache/", render(CacheTmpl, i.getCache))
- mux.HandleFunc("/session/", render(SessionTmpl, i.getSession))
- mux.HandleFunc("/view/", render(ViewTmpl, i.getView))
- mux.HandleFunc("/client/", render(ClientTmpl, i.getClient))
- mux.HandleFunc("/server/", render(ServerTmpl, i.getServer))
- mux.HandleFunc("/file/", render(FileTmpl, i.getFile))
- mux.HandleFunc("/info", render(InfoTmpl, i.getInfo))
- mux.HandleFunc("/memory", render(MemoryTmpl, getMemory))
- if err := http.Serve(listener, mux); err != nil {
- event.Error(ctx, "Debug server failed", err)
- return
- }
- event.Log(ctx, "Debug server finished")
- }()
- return i.listenedDebugAddress, nil
-}
-
-func (i *Instance) DebugAddress() string {
- i.serveMu.Lock()
- defer i.serveMu.Unlock()
- return i.debugAddress
-}
-
-func (i *Instance) ListenedDebugAddress() string {
- i.serveMu.Lock()
- defer i.serveMu.Unlock()
- return i.listenedDebugAddress
-}
-
-// MonitorMemory starts recording memory statistics each second.
-func (i *Instance) MonitorMemory(ctx context.Context) {
- tick := time.NewTicker(time.Second)
- nextThresholdGiB := uint64(1)
- go func() {
- for {
- <-tick.C
- var mem runtime.MemStats
- runtime.ReadMemStats(&mem)
- if mem.HeapAlloc < nextThresholdGiB*1<<30 {
- continue
- }
- if err := i.writeMemoryDebug(nextThresholdGiB, true); err != nil {
- event.Error(ctx, "writing memory debug info", err)
- }
- if err := i.writeMemoryDebug(nextThresholdGiB, false); err != nil {
- event.Error(ctx, "writing memory debug info", err)
- }
- event.Log(ctx, fmt.Sprintf("Wrote memory usage debug info to %v", os.TempDir()))
- nextThresholdGiB++
- }
- }()
-}
-
-func (i *Instance) writeMemoryDebug(threshold uint64, withNames bool) error {
- suffix := "withnames"
- if !withNames {
- suffix = "nonames"
- }
-
- filename := fmt.Sprintf("gopls.%d-%dGiB-%s.zip", os.Getpid(), threshold, suffix)
- zipf, err := os.OpenFile(filepath.Join(os.TempDir(), filename), os.O_CREATE|os.O_RDWR, 0644)
- if err != nil {
- return err
- }
- zipw := zip.NewWriter(zipf)
-
- f, err := zipw.Create("heap.pb.gz")
- if err != nil {
- return err
- }
- if err := rpprof.Lookup("heap").WriteTo(f, 0); err != nil {
- return err
- }
-
- f, err = zipw.Create("goroutines.txt")
- if err != nil {
- return err
- }
- if err := rpprof.Lookup("goroutine").WriteTo(f, 1); err != nil {
- return err
- }
-
- for _, cache := range i.State.Caches() {
- cf, err := zipw.Create(fmt.Sprintf("cache-%v.html", cache.ID()))
- if err != nil {
- return err
- }
- if _, err := cf.Write([]byte(cache.PackageStats(withNames))); err != nil {
- return err
- }
- }
-
- if err := zipw.Close(); err != nil {
- return err
- }
- return zipf.Close()
-}
-
-func makeGlobalExporter(stderr io.Writer) event.Exporter {
- p := export.Printer{}
- var pMu sync.Mutex
- return func(ctx context.Context, ev core.Event, lm label.Map) context.Context {
- i := GetInstance(ctx)
-
- if event.IsLog(ev) {
- // Don't log context cancellation errors.
- if err := keys.Err.Get(ev); errors.Is(err, context.Canceled) {
- return ctx
- }
- // Make sure any log messages without an instance go to stderr.
- if i == nil {
- pMu.Lock()
- p.WriteEvent(stderr, ev, lm)
- pMu.Unlock()
- }
- level := log.LabeledLevel(lm)
- // Exclude trace logs from LSP logs.
- if level < log.Trace {
- ctx = protocol.LogEvent(ctx, ev, lm, messageType(level))
- }
- }
- if i == nil {
- return ctx
- }
- return i.exporter(ctx, ev, lm)
- }
-}
-
-func messageType(l log.Level) protocol.MessageType {
- switch l {
- case log.Error:
- return protocol.Error
- case log.Warning:
- return protocol.Warning
- case log.Debug:
- return protocol.Log
- }
- return protocol.Info
-}
-
-func makeInstanceExporter(i *Instance) event.Exporter {
- exporter := func(ctx context.Context, ev core.Event, lm label.Map) context.Context {
- if i.ocagent != nil {
- ctx = i.ocagent.ProcessEvent(ctx, ev, lm)
- }
- if i.prometheus != nil {
- ctx = i.prometheus.ProcessEvent(ctx, ev, lm)
- }
- if i.rpcs != nil {
- ctx = i.rpcs.ProcessEvent(ctx, ev, lm)
- }
- if i.traces != nil {
- ctx = i.traces.ProcessEvent(ctx, ev, lm)
- }
- if event.IsLog(ev) {
- if s := cache.KeyCreateSession.Get(ev); s != nil {
- i.State.addClient(s)
- }
- if sid := tag.NewServer.Get(ev); sid != "" {
- i.State.updateServer(&Server{
- ID: sid,
- Logfile: tag.Logfile.Get(ev),
- DebugAddress: tag.DebugAddress.Get(ev),
- GoplsPath: tag.GoplsPath.Get(ev),
- ClientID: tag.ClientID.Get(ev),
- })
- }
- if s := cache.KeyShutdownSession.Get(ev); s != nil {
- i.State.dropClient(s)
- }
- if sid := tag.EndServer.Get(ev); sid != "" {
- i.State.dropServer(sid)
- }
- if s := cache.KeyUpdateSession.Get(ev); s != nil {
- if c := i.State.Client(s.ID()); c != nil {
- c.DebugAddress = tag.DebugAddress.Get(ev)
- c.Logfile = tag.Logfile.Get(ev)
- c.ServerID = tag.ServerID.Get(ev)
- c.GoplsPath = tag.GoplsPath.Get(ev)
- }
- }
- }
- if b := tag.Bug.Get(ev); b != "" {
- i.State.recordBug(b, fmt.Sprintf("%v", ev))
- }
- return ctx
- }
- // StdTrace must be above export.Spans below (by convention, export
- // middleware applies its wrapped exporter last).
- exporter = StdTrace(exporter)
- metrics := metric.Config{}
- registerMetrics(&metrics)
- exporter = metrics.Exporter(exporter)
- exporter = export.Spans(exporter)
- exporter = export.Labels(exporter)
- return exporter
-}
-
-type dataFunc func(*http.Request) interface{}
-
-func render(tmpl *template.Template, fun dataFunc) func(http.ResponseWriter, *http.Request) {
- return func(w http.ResponseWriter, r *http.Request) {
- var data interface{}
- if fun != nil {
- data = fun(r)
- }
- if err := tmpl.Execute(w, data); err != nil {
- event.Error(context.Background(), "", err)
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
- }
-}
-
-func commas(s string) string {
- for i := len(s); i > 3; {
- i -= 3
- s = s[:i] + "," + s[i:]
- }
- return s
-}
-
-func fuint64(v uint64) string {
- return commas(strconv.FormatUint(v, 10))
-}
-
-func fuint32(v uint32) string {
- return commas(strconv.FormatUint(uint64(v), 10))
-}
-
-func fcontent(v []byte) string {
- return string(v)
-}
-
-var BaseTemplate = template.Must(template.New("").Parse(`
-<html>
-<head>
-<title>{{template "title" .}}</title>
-<style>
-.profile-name{
- display:inline-block;
- width:6rem;
-}
-td.value {
- text-align: right;
-}
-ul.events {
- list-style-type: none;
-}
-
-</style>
-{{block "head" .}}{{end}}
-</head>
-<body>
-<a href="/">Main</a>
-<a href="/info">Info</a>
-<a href="/memory">Memory</a>
-<a href="/metrics">Metrics</a>
-<a href="/rpc">RPC</a>
-<a href="/trace">Trace</a>
-<hr>
-<h1>{{template "title" .}}</h1>
-{{block "body" .}}
-Unknown page
-{{end}}
-</body>
-</html>
-
-{{define "cachelink"}}<a href="/cache/{{.}}">Cache {{.}}</a>{{end}}
-{{define "clientlink"}}<a href="/client/{{.}}">Client {{.}}</a>{{end}}
-{{define "serverlink"}}<a href="/server/{{.}}">Server {{.}}</a>{{end}}
-{{define "sessionlink"}}<a href="/session/{{.}}">Session {{.}}</a>{{end}}
-{{define "viewlink"}}<a href="/view/{{.}}">View {{.}}</a>{{end}}
-{{define "filelink"}}<a href="/file/{{.Session}}/{{.FileIdentity.Hash}}">{{.FileIdentity.URI}}</a>{{end}}
-`)).Funcs(template.FuncMap{
- "fuint64": fuint64,
- "fuint32": fuint32,
- "fcontent": fcontent,
- "localAddress": func(s string) string {
- // Try to translate loopback addresses to localhost, both for cosmetics and
- // because unspecified ipv6 addresses can break links on Windows.
- //
- // TODO(rfindley): In the future, it would be better not to assume the
- // server is running on localhost, and instead construct this address using
- // the remote host.
- host, port, err := net.SplitHostPort(s)
- if err != nil {
- return s
- }
- ip := net.ParseIP(host)
- if ip == nil {
- return s
- }
- if ip.IsLoopback() || ip.IsUnspecified() {
- return "localhost:" + port
- }
- return s
- },
- "options": func(s *cache.Session) []sessionOption {
- return showOptions(s.Options())
- },
-})
-
-var MainTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}GoPls server information{{end}}
-{{define "body"}}
-<h2>Caches</h2>
-<ul>{{range .State.Caches}}<li>{{template "cachelink" .ID}}</li>{{end}}</ul>
-<h2>Sessions</h2>
-<ul>{{range .State.Sessions}}<li>{{template "sessionlink" .ID}} from {{template "cachelink" .Cache.ID}}</li>{{end}}</ul>
-<h2>Views</h2>
-<ul>{{range .State.Views}}<li>{{.Name}} is {{template "viewlink" .ID}} from {{template "sessionlink" .Session.ID}} in {{.Folder}}</li>{{end}}</ul>
-<h2>Clients</h2>
-<ul>{{range .State.Clients}}<li>{{template "clientlink" .Session.ID}}</li>{{end}}</ul>
-<h2>Servers</h2>
-<ul>{{range .State.Servers}}<li>{{template "serverlink" .ID}}</li>{{end}}</ul>
-<h2>Known bugs encountered</h2>
-<dl>{{range .State.Bugs}}<dt>{{.Description}}</dt><dd>{{.Event}}</dd>{{end}}</dl>
-{{end}}
-`))
-
-var InfoTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}GoPls version information{{end}}
-{{define "body"}}
-{{.}}
-{{end}}
-`))
-
-var MemoryTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}GoPls memory usage{{end}}
-{{define "head"}}<meta http-equiv="refresh" content="5">{{end}}
-{{define "body"}}
-<h2>Stats</h2>
-<table>
-<tr><td class="label">Allocated bytes</td><td class="value">{{fuint64 .HeapAlloc}}</td></tr>
-<tr><td class="label">Total allocated bytes</td><td class="value">{{fuint64 .TotalAlloc}}</td></tr>
-<tr><td class="label">System bytes</td><td class="value">{{fuint64 .Sys}}</td></tr>
-<tr><td class="label">Heap system bytes</td><td class="value">{{fuint64 .HeapSys}}</td></tr>
-<tr><td class="label">Malloc calls</td><td class="value">{{fuint64 .Mallocs}}</td></tr>
-<tr><td class="label">Frees</td><td class="value">{{fuint64 .Frees}}</td></tr>
-<tr><td class="label">Idle heap bytes</td><td class="value">{{fuint64 .HeapIdle}}</td></tr>
-<tr><td class="label">In use bytes</td><td class="value">{{fuint64 .HeapInuse}}</td></tr>
-<tr><td class="label">Released to system bytes</td><td class="value">{{fuint64 .HeapReleased}}</td></tr>
-<tr><td class="label">Heap object count</td><td class="value">{{fuint64 .HeapObjects}}</td></tr>
-<tr><td class="label">Stack in use bytes</td><td class="value">{{fuint64 .StackInuse}}</td></tr>
-<tr><td class="label">Stack from system bytes</td><td class="value">{{fuint64 .StackSys}}</td></tr>
-<tr><td class="label">Bucket hash bytes</td><td class="value">{{fuint64 .BuckHashSys}}</td></tr>
-<tr><td class="label">GC metadata bytes</td><td class="value">{{fuint64 .GCSys}}</td></tr>
-<tr><td class="label">Off heap bytes</td><td class="value">{{fuint64 .OtherSys}}</td></tr>
-</table>
-<h2>By size</h2>
-<table>
-<tr><th>Size</th><th>Mallocs</th><th>Frees</th></tr>
-{{range .BySize}}<tr><td class="value">{{fuint32 .Size}}</td><td class="value">{{fuint64 .Mallocs}}</td><td class="value">{{fuint64 .Frees}}</td></tr>{{end}}
-</table>
-{{end}}
-`))
-
-var DebugTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}GoPls Debug pages{{end}}
-{{define "body"}}
-<a href="/debug/pprof">Profiling</a>
-{{end}}
-`))
-
-var CacheTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}Cache {{.ID}}{{end}}
-{{define "body"}}
-<h2>memoize.Store entries</h2>
-<ul>{{range $k,$v := .MemStats}}<li>{{$k}} - {{$v}}</li>{{end}}</ul>
-<h2>Per-package usage - not accurate, for guidance only</h2>
-{{.PackageStats true}}
-{{end}}
-`))
-
-var ClientTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}Client {{.Session.ID}}{{end}}
-{{define "body"}}
-Using session: <b>{{template "sessionlink" .Session.ID}}</b><br>
-{{if .DebugAddress}}Debug this client at: <a href="http://{{localAddress .DebugAddress}}">{{localAddress .DebugAddress}}</a><br>{{end}}
-Logfile: {{.Logfile}}<br>
-Gopls Path: {{.GoplsPath}}<br>
-<h2>Diagnostics</h2>
-{{/*Service: []protocol.Server; each server has map[uri]fileReports;
- each fileReport: map[diagnosticSoure]diagnosticReport
- diagnosticSource is one of 5 source
- diagnosticReport: snapshotID and map[hash]*source.Diagnostic
- sourceDiagnostic: struct {
- Range protocol.Range
- Message string
- Source string
- Code string
- CodeHref string
- Severity protocol.DiagnosticSeverity
- Tags []protocol.DiagnosticTag
-
- Related []RelatedInformation
- }
- RelatedInformation: struct {
- URI span.URI
- Range protocol.Range
- Message string
- }
- */}}
-<ul>{{range $k, $v := .Service.Diagnostics}}<li>{{$k}}:<ol>{{range $v}}<li>{{.}}</li>{{end}}</ol></li>{{end}}</ul>
-{{end}}
-`))
-
-var ServerTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}Server {{.ID}}{{end}}
-{{define "body"}}
-{{if .DebugAddress}}Debug this server at: <a href="http://{{localAddress .DebugAddress}}">{{localAddress .DebugAddress}}</a><br>{{end}}
-Logfile: {{.Logfile}}<br>
-Gopls Path: {{.GoplsPath}}<br>
-{{end}}
-`))
-
-var SessionTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}Session {{.ID}}{{end}}
-{{define "body"}}
-From: <b>{{template "cachelink" .Cache.ID}}</b><br>
-<h2>Views</h2>
-<ul>{{range .Views}}<li>{{.Name}} is {{template "viewlink" .ID}} in {{.Folder}}</li>{{end}}</ul>
-<h2>Overlays</h2>
-<ul>{{range .Overlays}}<li>{{template "filelink" .}}</li>{{end}}</ul>
-<h2>Options</h2>
-{{range options .}}
-<p><b>{{.Name}}</b> {{.Type}}</p>
-<p><i>default:</i> {{.Default}}</p>
-{{if ne .Default .Current}}<p><i>current:</i> {{.Current}}</p>{{end}}
-{{end}}
-{{end}}
-`))
-
-var ViewTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}View {{.ID}}{{end}}
-{{define "body"}}
-Name: <b>{{.Name}}</b><br>
-Folder: <b>{{.Folder}}</b><br>
-From: <b>{{template "sessionlink" .Session.ID}}</b><br>
-<h2>Environment</h2>
-<ul>{{range .Options.Env}}<li>{{.}}</li>{{end}}</ul>
-{{end}}
-`))
-
-var FileTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}Overlay {{.FileIdentity.Hash}}{{end}}
-{{define "body"}}
-{{with .}}
- From: <b>{{template "sessionlink" .Session}}</b><br>
- URI: <b>{{.URI}}</b><br>
- Identifier: <b>{{.FileIdentity.Hash}}</b><br>
- Version: <b>{{.Version}}</b><br>
- Kind: <b>{{.Kind}}</b><br>
-{{end}}
-<h3>Contents</h3>
-<pre>{{fcontent .Read}}</pre>
-{{end}}
-`))
diff --git a/internal/lsp/debug/tag/tag.go b/internal/lsp/debug/tag/tag.go
deleted file mode 100644
index 1d00038f0..000000000
--- a/internal/lsp/debug/tag/tag.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tag provides the labels used for telemetry throughout gopls.
-package tag
-
-import (
- "golang.org/x/tools/internal/event/keys"
-)
-
-var (
- // create the label keys we use
- Method = keys.NewString("method", "")
- StatusCode = keys.NewString("status.code", "")
- StatusMessage = keys.NewString("status.message", "")
- RPCID = keys.NewString("id", "")
- RPCDirection = keys.NewString("direction", "")
- File = keys.NewString("file", "")
- Directory = keys.New("directory", "")
- URI = keys.New("URI", "")
- Package = keys.NewString("package", "") // Package ID
- PackagePath = keys.NewString("package_path", "")
- Query = keys.New("query", "")
- Snapshot = keys.NewUInt64("snapshot", "")
- Operation = keys.NewString("operation", "")
-
- Position = keys.New("position", "")
- Category = keys.NewString("category", "")
- PackageCount = keys.NewInt("packages", "")
- Files = keys.New("files", "")
- Port = keys.NewInt("port", "")
- Type = keys.New("type", "")
- HoverKind = keys.NewString("hoverkind", "")
-
- NewServer = keys.NewString("new_server", "A new server was added")
- EndServer = keys.NewString("end_server", "A server was shut down")
-
- ServerID = keys.NewString("server", "The server ID an event is related to")
- Logfile = keys.NewString("logfile", "")
- DebugAddress = keys.NewString("debug_address", "")
- GoplsPath = keys.NewString("gopls_path", "")
- ClientID = keys.NewString("client_id", "")
-
- Level = keys.NewInt("level", "The logging level")
-
- // Bug tracks occurrences of known bugs in the server.
- Bug = keys.NewString("bug", "A bug has occurred")
- Callsite = keys.NewString("callsite", "gopls function call site")
-)
-
-var (
- // create the stats we measure
- Started = keys.NewInt64("started", "Count of started RPCs.")
- ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes)
- SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes)
- Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds)
-)
-
-const (
- Inbound = "in"
- Outbound = "out"
-)
diff --git a/internal/lsp/debug/trace.go b/internal/lsp/debug/trace.go
deleted file mode 100644
index ca612867a..000000000
--- a/internal/lsp/debug/trace.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package debug
-
-import (
- "bytes"
- "context"
- "fmt"
- "html/template"
- "net/http"
- "runtime/trace"
- "sort"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/event/core"
- "golang.org/x/tools/internal/event/export"
- "golang.org/x/tools/internal/event/label"
-)
-
-var TraceTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
-{{define "title"}}Trace Information{{end}}
-{{define "body"}}
- {{range .Traces}}<a href="/trace/{{.Name}}">{{.Name}}</a> last: {{.Last.Duration}}, longest: {{.Longest.Duration}}<br>{{end}}
- {{if .Selected}}
- <H2>{{.Selected.Name}}</H2>
- {{if .Selected.Last}}<H3>Last</H3><ul>{{template "details" .Selected.Last}}</ul>{{end}}
- {{if .Selected.Longest}}<H3>Longest</H3><ul>{{template "details" .Selected.Longest}}</ul>{{end}}
- {{end}}
-{{end}}
-{{define "details"}}
- <li>{{.Offset}} {{.Name}} {{.Duration}} {{.Tags}}</li>
- {{if .Events}}<ul class=events>{{range .Events}}<li>{{.Offset}} {{.Tags}}</li>{{end}}</ul>{{end}}
- {{if .Children}}<ul>{{range .Children}}{{template "details" .}}{{end}}</ul>{{end}}
-{{end}}
-`))
-
-type traces struct {
- mu sync.Mutex
- sets map[string]*traceSet
- unfinished map[export.SpanContext]*traceData
-}
-
-type TraceResults struct { // exported for testing
- Traces []*traceSet
- Selected *traceSet
-}
-
-type traceSet struct {
- Name string
- Last *traceData
- Longest *traceData
-}
-
-type traceData struct {
- TraceID export.TraceID
- SpanID export.SpanID
- ParentID export.SpanID
- Name string
- Start time.Time
- Finish time.Time
- Offset time.Duration
- Duration time.Duration
- Tags string
- Events []traceEvent
- Children []*traceData
-}
-
-type traceEvent struct {
- Time time.Time
- Offset time.Duration
- Tags string
-}
-
-func StdTrace(exporter event.Exporter) event.Exporter {
- return func(ctx context.Context, ev core.Event, lm label.Map) context.Context {
- span := export.GetSpan(ctx)
- if span == nil {
- return exporter(ctx, ev, lm)
- }
- switch {
- case event.IsStart(ev):
- if span.ParentID.IsValid() {
- region := trace.StartRegion(ctx, span.Name)
- ctx = context.WithValue(ctx, traceKey, region)
- } else {
- var task *trace.Task
- ctx, task = trace.NewTask(ctx, span.Name)
- ctx = context.WithValue(ctx, traceKey, task)
- }
- // Log the start event as it may contain useful labels.
- msg := formatEvent(ctx, ev, lm)
- trace.Log(ctx, "start", msg)
- case event.IsLog(ev):
- category := ""
- if event.IsError(ev) {
- category = "error"
- }
- msg := formatEvent(ctx, ev, lm)
- trace.Log(ctx, category, msg)
- case event.IsEnd(ev):
- if v := ctx.Value(traceKey); v != nil {
- v.(interface{ End() }).End()
- }
- }
- return exporter(ctx, ev, lm)
- }
-}
-
-func formatEvent(ctx context.Context, ev core.Event, lm label.Map) string {
- buf := &bytes.Buffer{}
- p := export.Printer{}
- p.WriteEvent(buf, ev, lm)
- return buf.String()
-}
-
-func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context {
- t.mu.Lock()
- defer t.mu.Unlock()
- span := export.GetSpan(ctx)
- if span == nil {
- return ctx
- }
-
- switch {
- case event.IsStart(ev):
- if t.sets == nil {
- t.sets = make(map[string]*traceSet)
- t.unfinished = make(map[export.SpanContext]*traceData)
- }
- // just starting, add it to the unfinished map
- td := &traceData{
- TraceID: span.ID.TraceID,
- SpanID: span.ID.SpanID,
- ParentID: span.ParentID,
- Name: span.Name,
- Start: span.Start().At(),
- Tags: renderLabels(span.Start()),
- }
- t.unfinished[span.ID] = td
- // and wire up parents if we have them
- if !span.ParentID.IsValid() {
- return ctx
- }
- parentID := export.SpanContext{TraceID: span.ID.TraceID, SpanID: span.ParentID}
- parent, found := t.unfinished[parentID]
- if !found {
- // trace had an invalid parent, so it cannot itself be valid
- return ctx
- }
- parent.Children = append(parent.Children, td)
-
- case event.IsEnd(ev):
- // finishing, must be already in the map
- td, found := t.unfinished[span.ID]
- if !found {
- return ctx // if this happens we are in a bad place
- }
- delete(t.unfinished, span.ID)
-
- td.Finish = span.Finish().At()
- td.Duration = span.Finish().At().Sub(span.Start().At())
- events := span.Events()
- td.Events = make([]traceEvent, len(events))
- for i, event := range events {
- td.Events[i] = traceEvent{
- Time: event.At(),
- Tags: renderLabels(event),
- }
- }
-
- set, ok := t.sets[span.Name]
- if !ok {
- set = &traceSet{Name: span.Name}
- t.sets[span.Name] = set
- }
- set.Last = td
- if set.Longest == nil || set.Last.Duration > set.Longest.Duration {
- set.Longest = set.Last
- }
- if !td.ParentID.IsValid() {
- fillOffsets(td, td.Start)
- }
- }
- return ctx
-}
-
-func (t *traces) getData(req *http.Request) interface{} {
- if len(t.sets) == 0 {
- return nil
- }
- data := TraceResults{}
- data.Traces = make([]*traceSet, 0, len(t.sets))
- for _, set := range t.sets {
- data.Traces = append(data.Traces, set)
- }
- sort.Slice(data.Traces, func(i, j int) bool { return data.Traces[i].Name < data.Traces[j].Name })
- if bits := strings.SplitN(req.URL.Path, "/trace/", 2); len(bits) > 1 {
- data.Selected = t.sets[bits[1]]
- }
- return data
-}
-
-func fillOffsets(td *traceData, start time.Time) {
- td.Offset = td.Start.Sub(start)
- for i := range td.Events {
- td.Events[i].Offset = td.Events[i].Time.Sub(start)
- }
- for _, child := range td.Children {
- fillOffsets(child, start)
- }
-}
-
-func renderLabels(labels label.List) string {
- buf := &bytes.Buffer{}
- for index := 0; labels.Valid(index); index++ {
- if l := labels.Label(index); l.Valid() {
- fmt.Fprintf(buf, "%v ", l)
- }
- }
- return buf.String()
-}
diff --git a/internal/lsp/definition.go b/internal/lsp/definition.go
deleted file mode 100644
index 599228a89..000000000
--- a/internal/lsp/definition.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/template"
-)
-
-func (s *Server) definition(ctx context.Context, params *protocol.DefinitionParams) ([]protocol.Location, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- if snapshot.View().FileKind(fh) == source.Tmpl {
- return template.Definition(snapshot, fh, params.Position)
- }
- ident, err := source.Identifier(ctx, snapshot, fh, params.Position)
- if err != nil {
- return nil, err
- }
- if ident.IsImport() && !snapshot.View().Options().ImportShortcut.ShowDefinition() {
- return nil, nil
- }
- var locations []protocol.Location
- for _, ref := range ident.Declaration.MappedRange {
- decRange, err := ref.Range()
- if err != nil {
- return nil, err
- }
-
- locations = append(locations, protocol.Location{
- URI: protocol.URIFromSpanURI(ref.URI()),
- Range: decRange,
- })
- }
-
- return locations, nil
-}
-
-func (s *Server) typeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) ([]protocol.Location, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
- ident, err := source.Identifier(ctx, snapshot, fh, params.Position)
- if err != nil {
- return nil, err
- }
- identRange, err := ident.Type.Range()
- if err != nil {
- return nil, err
- }
- return []protocol.Location{
- {
- URI: protocol.URIFromSpanURI(ident.Type.URI()),
- Range: identRange,
- },
- }, nil
-}
diff --git a/internal/lsp/diagnostics.go b/internal/lsp/diagnostics.go
deleted file mode 100644
index 3bf81226c..000000000
--- a/internal/lsp/diagnostics.go
+++ /dev/null
@@ -1,649 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
- "crypto/sha256"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/log"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/template"
- "golang.org/x/tools/internal/lsp/work"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-// diagnosticSource differentiates different sources of diagnostics.
-type diagnosticSource int
-
-const (
- modSource diagnosticSource = iota
- gcDetailsSource
- analysisSource
- typeCheckSource
- orphanedSource
- workSource
-)
-
-// A diagnosticReport holds results for a single diagnostic source.
-type diagnosticReport struct {
- snapshotID uint64
- publishedHash string
- diags map[string]*source.Diagnostic
-}
-
-// fileReports holds a collection of diagnostic reports for a single file, as
-// well as the hash of the last published set of diagnostics.
-type fileReports struct {
- snapshotID uint64
- publishedHash string
- reports map[diagnosticSource]diagnosticReport
-}
-
-func (d diagnosticSource) String() string {
- switch d {
- case modSource:
- return "FromSource"
- case gcDetailsSource:
- return "FromGCDetails"
- case analysisSource:
- return "FromAnalysis"
- case typeCheckSource:
- return "FromTypeChecking"
- case orphanedSource:
- return "FromOrphans"
- default:
- return fmt.Sprintf("From?%d?", d)
- }
-}
-
-// hashDiagnostics computes a hash to identify diags.
-func hashDiagnostics(diags ...*source.Diagnostic) string {
- source.SortDiagnostics(diags)
- h := sha256.New()
- for _, d := range diags {
- for _, t := range d.Tags {
- fmt.Fprintf(h, "%s", t)
- }
- for _, r := range d.Related {
- fmt.Fprintf(h, "%s%s%s", r.URI, r.Message, r.Range)
- }
- fmt.Fprintf(h, "%s%s%s%s", d.Message, d.Range, d.Severity, d.Source)
- }
- return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-func (s *Server) diagnoseDetached(snapshot source.Snapshot) {
- ctx := snapshot.BackgroundContext()
- ctx = xcontext.Detach(ctx)
- s.diagnose(ctx, snapshot, false)
- s.publishDiagnostics(ctx, true, snapshot)
-}
-
-func (s *Server) diagnoseSnapshots(snapshots map[source.Snapshot][]span.URI, onDisk bool) {
- var diagnosticWG sync.WaitGroup
- for snapshot, uris := range snapshots {
- diagnosticWG.Add(1)
- go func(snapshot source.Snapshot, uris []span.URI) {
- defer diagnosticWG.Done()
- s.diagnoseSnapshot(snapshot, uris, onDisk)
- }(snapshot, uris)
- }
- diagnosticWG.Wait()
-}
-
-func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.URI, onDisk bool) {
- ctx := snapshot.BackgroundContext()
- ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", tag.Snapshot.Of(snapshot.ID()))
- defer done()
-
- delay := snapshot.View().Options().DiagnosticsDelay
- if delay > 0 {
- // 2-phase diagnostics.
- //
- // The first phase just parses and checks packages that have been
- // affected by file modifications (no analysis).
- //
- // The second phase does everything, and is debounced by the configured
- // delay.
- s.diagnoseChangedFiles(ctx, snapshot, changedURIs, onDisk)
- s.publishDiagnostics(ctx, false, snapshot)
- if ok := <-s.diagDebouncer.debounce(snapshot.View().Name(), snapshot.ID(), time.After(delay)); ok {
- s.diagnose(ctx, snapshot, false)
- s.publishDiagnostics(ctx, true, snapshot)
- }
- return
- }
-
- // Ignore possible workspace configuration warnings in the normal flow.
- s.diagnose(ctx, snapshot, false)
- s.publishDiagnostics(ctx, true, snapshot)
-}
-
-func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snapshot, uris []span.URI, onDisk bool) {
- ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", tag.Snapshot.Of(snapshot.ID()))
- defer done()
-
- packages := make(map[source.Package]struct{})
- for _, uri := range uris {
- // If the change is only on-disk and the file is not open, don't
- // directly request its package. It may not be a workspace package.
- if onDisk && !snapshot.IsOpen(uri) {
- continue
- }
- // If the file is not known to the snapshot (e.g., if it was deleted),
- // don't diagnose it.
- if snapshot.FindFile(uri) == nil {
- continue
- }
- // Don't call PackagesForFile for builtin.go, as it results in a
- // command-line-arguments load.
- if snapshot.IsBuiltin(ctx, uri) {
- continue
- }
- pkgs, err := snapshot.PackagesForFile(ctx, uri, source.TypecheckFull, false)
- if err != nil {
- // TODO (findleyr): we should probably do something with the error here,
- // but as of now this can fail repeatedly if load fails, so can be too
- // noisy to log (and we'll handle things later in the slow pass).
- continue
- }
- for _, pkg := range pkgs {
- packages[pkg] = struct{}{}
- }
- }
- var wg sync.WaitGroup
- for pkg := range packages {
- wg.Add(1)
-
- go func(pkg source.Package) {
- defer wg.Done()
-
- s.diagnosePkg(ctx, snapshot, pkg, false)
- }(pkg)
- }
- wg.Wait()
-}
-
-// diagnose is a helper function for running diagnostics with a given context.
-// Do not call it directly. forceAnalysis is only true for testing purposes.
-func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, forceAnalysis bool) {
- ctx, done := event.Start(ctx, "Server.diagnose", tag.Snapshot.Of(snapshot.ID()))
- defer done()
-
- // Wait for a free diagnostics slot.
- select {
- case <-ctx.Done():
- return
- case s.diagnosticsSema <- struct{}{}:
- }
- defer func() {
- <-s.diagnosticsSema
- }()
-
- // First, diagnose the go.mod file.
- modReports, modErr := mod.Diagnostics(ctx, snapshot)
- if ctx.Err() != nil {
- log.Trace.Log(ctx, "diagnose cancelled")
- return
- }
- if modErr != nil {
- event.Error(ctx, "warning: diagnose go.mod", modErr, tag.Directory.Of(snapshot.View().Folder().Filename()), tag.Snapshot.Of(snapshot.ID()))
- }
- for id, diags := range modReports {
- if id.URI == "" {
- event.Error(ctx, "missing URI for module diagnostics", fmt.Errorf("empty URI"), tag.Directory.Of(snapshot.View().Folder().Filename()))
- continue
- }
- s.storeDiagnostics(snapshot, id.URI, modSource, diags)
- }
-
- // Diagnose the go.work file, if it exists.
- workReports, workErr := work.Diagnostics(ctx, snapshot)
- if ctx.Err() != nil {
- log.Trace.Log(ctx, "diagnose cancelled")
- return
- }
- if workErr != nil {
- event.Error(ctx, "warning: diagnose go.work", workErr, tag.Directory.Of(snapshot.View().Folder().Filename()), tag.Snapshot.Of(snapshot.ID()))
- }
- for id, diags := range workReports {
- if id.URI == "" {
- event.Error(ctx, "missing URI for work file diagnostics", fmt.Errorf("empty URI"), tag.Directory.Of(snapshot.View().Folder().Filename()))
- continue
- }
- s.storeDiagnostics(snapshot, id.URI, workSource, diags)
- }
-
- // Diagnose all of the packages in the workspace.
- wsPkgs, err := snapshot.ActivePackages(ctx)
- if s.shouldIgnoreError(ctx, snapshot, err) {
- return
- }
- criticalErr := snapshot.GetCriticalError(ctx)
-
- // Show the error as a progress error report so that it appears in the
- // status bar. If a client doesn't support progress reports, the error
- // will still be shown as a ShowMessage. If there is no error, any running
- // error progress reports will be closed.
- s.showCriticalErrorStatus(ctx, snapshot, criticalErr)
-
- // There may be .tmpl files.
- for _, f := range snapshot.Templates() {
- diags := template.Diagnose(f)
- s.storeDiagnostics(snapshot, f.URI(), typeCheckSource, diags)
- }
-
- // If there are no workspace packages, there is nothing to diagnose and
- // there are no orphaned files.
- if len(wsPkgs) == 0 {
- return
- }
-
- var (
- wg sync.WaitGroup
- seen = map[span.URI]struct{}{}
- )
- for _, pkg := range wsPkgs {
- wg.Add(1)
-
- for _, pgf := range pkg.CompiledGoFiles() {
- seen[pgf.URI] = struct{}{}
- }
-
- go func(pkg source.Package) {
- defer wg.Done()
-
- s.diagnosePkg(ctx, snapshot, pkg, forceAnalysis)
- }(pkg)
- }
- wg.Wait()
-
- // Confirm that every opened file belongs to a package (if any exist in
- // the workspace). Otherwise, add a diagnostic to the file.
- for _, o := range s.session.Overlays() {
- if _, ok := seen[o.URI()]; ok {
- continue
- }
- diagnostic := s.checkForOrphanedFile(ctx, snapshot, o)
- if diagnostic == nil {
- continue
- }
- s.storeDiagnostics(snapshot, o.URI(), orphanedSource, []*source.Diagnostic{diagnostic})
- }
-}
-
-func (s *Server) diagnosePkg(ctx context.Context, snapshot source.Snapshot, pkg source.Package, alwaysAnalyze bool) {
- ctx, done := event.Start(ctx, "Server.diagnosePkg", tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID()))
- defer done()
- enableDiagnostics := false
- includeAnalysis := alwaysAnalyze // only run analyses for packages with open files
- for _, pgf := range pkg.CompiledGoFiles() {
- enableDiagnostics = enableDiagnostics || !snapshot.IgnoredFile(pgf.URI)
- includeAnalysis = includeAnalysis || snapshot.IsOpen(pgf.URI)
- }
- // Don't show any diagnostics on ignored files.
- if !enableDiagnostics {
- return
- }
-
- pkgDiagnostics, err := snapshot.DiagnosePackage(ctx, pkg)
- if err != nil {
- event.Error(ctx, "warning: diagnosing package", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID()))
- return
- }
- for _, cgf := range pkg.CompiledGoFiles() {
- // builtin.go exists only for documentation purposes, and is not valid Go code.
- // Don't report distracting errors
- if !snapshot.IsBuiltin(ctx, cgf.URI) {
- s.storeDiagnostics(snapshot, cgf.URI, typeCheckSource, pkgDiagnostics[cgf.URI])
- }
- }
- if includeAnalysis && !pkg.HasListOrParseErrors() {
- reports, err := source.Analyze(ctx, snapshot, pkg, false)
- if err != nil {
- event.Error(ctx, "warning: analyzing package", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID()))
- return
- }
- for _, cgf := range pkg.CompiledGoFiles() {
- s.storeDiagnostics(snapshot, cgf.URI, analysisSource, reports[cgf.URI])
- }
- }
-
- // If gc optimization details are requested, add them to the
- // diagnostic reports.
- s.gcOptimizationDetailsMu.Lock()
- _, enableGCDetails := s.gcOptimizationDetails[pkg.ID()]
- s.gcOptimizationDetailsMu.Unlock()
- if enableGCDetails {
- gcReports, err := source.GCOptimizationDetails(ctx, snapshot, pkg)
- if err != nil {
- event.Error(ctx, "warning: gc details", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID()))
- }
- s.gcOptimizationDetailsMu.Lock()
- _, enableGCDetails := s.gcOptimizationDetails[pkg.ID()]
-
- // NOTE(golang/go#44826): hold the gcOptimizationDetails lock, and re-check
- // whether gc optimization details are enabled, while storing gc_details
- // results. This ensures that the toggling of GC details and clearing of
- // diagnostics does not race with storing the results here.
- if enableGCDetails {
- for id, diags := range gcReports {
- fh := snapshot.FindFile(id.URI)
- // Don't publish gc details for unsaved buffers, since the underlying
- // logic operates on the file on disk.
- if fh == nil || !fh.Saved() {
- continue
- }
- s.storeDiagnostics(snapshot, id.URI, gcDetailsSource, diags)
- }
- }
- s.gcOptimizationDetailsMu.Unlock()
- }
-}
-
-// storeDiagnostics stores results from a single diagnostic source. If merge is
-// true, it merges results into any existing results for this snapshot.
-func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsource diagnosticSource, diags []*source.Diagnostic) {
- // Safeguard: ensure that the file actually exists in the snapshot
- // (see golang.org/issues/38602).
- fh := snapshot.FindFile(uri)
- if fh == nil {
- return
- }
- s.diagnosticsMu.Lock()
- defer s.diagnosticsMu.Unlock()
- if s.diagnostics[uri] == nil {
- s.diagnostics[uri] = &fileReports{
- publishedHash: hashDiagnostics(), // Hash for 0 diagnostics.
- reports: map[diagnosticSource]diagnosticReport{},
- }
- }
- report := s.diagnostics[uri].reports[dsource]
- // Don't set obsolete diagnostics.
- if report.snapshotID > snapshot.ID() {
- return
- }
- if report.diags == nil || report.snapshotID != snapshot.ID() {
- report.diags = map[string]*source.Diagnostic{}
- }
- report.snapshotID = snapshot.ID()
- for _, d := range diags {
- report.diags[hashDiagnostics(d)] = d
- }
- s.diagnostics[uri].reports[dsource] = report
-}
-
-// clearDiagnosticSource clears all diagnostics for a given source type. It is
-// necessary for cases where diagnostics have been invalidated by something
-// other than a snapshot change, for example when gc_details is toggled.
-func (s *Server) clearDiagnosticSource(dsource diagnosticSource) {
- s.diagnosticsMu.Lock()
- defer s.diagnosticsMu.Unlock()
- for _, reports := range s.diagnostics {
- delete(reports.reports, dsource)
- }
-}
-
-const WorkspaceLoadFailure = "Error loading workspace"
-
-// showCriticalErrorStatus shows the error as a progress report.
-// If the error is nil, it clears any existing error progress report.
-func (s *Server) showCriticalErrorStatus(ctx context.Context, snapshot source.Snapshot, err *source.CriticalError) {
- s.criticalErrorStatusMu.Lock()
- defer s.criticalErrorStatusMu.Unlock()
-
- // Remove all newlines so that the error message can be formatted in a
- // status bar.
- var errMsg string
- if err != nil {
- event.Error(ctx, "errors loading workspace", err.MainError, tag.Snapshot.Of(snapshot.ID()), tag.Directory.Of(snapshot.View().Folder()))
- for _, d := range err.DiagList {
- s.storeDiagnostics(snapshot, d.URI, modSource, []*source.Diagnostic{d})
- }
- errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ")
- }
-
- if s.criticalErrorStatus == nil {
- if errMsg != "" {
- s.criticalErrorStatus = s.progress.Start(ctx, WorkspaceLoadFailure, errMsg, nil, nil)
- }
- return
- }
-
- // If an error is already shown to the user, update it or mark it as
- // resolved.
- if errMsg == "" {
- s.criticalErrorStatus.End("Done.")
- s.criticalErrorStatus = nil
- } else {
- s.criticalErrorStatus.Report(errMsg, 0)
- }
-}
-
-// checkForOrphanedFile checks that the given URIs can be mapped to packages.
-// If they cannot and the workspace is not otherwise unloaded, it also surfaces
-// a warning, suggesting that the user check the file for build tags.
-func (s *Server) checkForOrphanedFile(ctx context.Context, snapshot source.Snapshot, fh source.VersionedFileHandle) *source.Diagnostic {
- if snapshot.View().FileKind(fh) != source.Go {
- return nil
- }
- // builtin files won't have a package, but they are never orphaned.
- if snapshot.IsBuiltin(ctx, fh.URI()) {
- return nil
- }
- pkgs, err := snapshot.PackagesForFile(ctx, fh.URI(), source.TypecheckWorkspace, false)
- if len(pkgs) > 0 || err == nil {
- return nil
- }
- pgf, err := snapshot.ParseGo(ctx, fh, source.ParseHeader)
- if err != nil {
- return nil
- }
- spn, err := span.NewRange(snapshot.FileSet(), pgf.File.Name.Pos(), pgf.File.Name.End()).Span()
- if err != nil {
- return nil
- }
- rng, err := pgf.Mapper.Range(spn)
- if err != nil {
- return nil
- }
- // If the file no longer has a name ending in .go, this diagnostic is wrong
- if filepath.Ext(fh.URI().Filename()) != ".go" {
- return nil
- }
- // TODO(rstambler): We should be able to parse the build tags in the
- // file and show a more specific error message. For now, put the diagnostic
- // on the package declaration.
- return &source.Diagnostic{
- URI: fh.URI(),
- Range: rng,
- Severity: protocol.SeverityWarning,
- Source: source.ListError,
- Message: fmt.Sprintf(`No packages found for open file %s: %v.
-If this file contains build tags, try adding "-tags=<build tag>" to your gopls "buildFlags" configuration (see (https://github.com/golang/tools/blob/master/gopls/doc/settings.md#buildflags-string).
-Otherwise, see the troubleshooting guidelines for help investigating (https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md).
-`, fh.URI().Filename(), err),
- }
-}
-
-// publishDiagnostics collects and publishes any unpublished diagnostic reports.
-func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot source.Snapshot) {
- ctx, done := event.Start(ctx, "Server.publishDiagnostics", tag.Snapshot.Of(snapshot.ID()))
- defer done()
- s.diagnosticsMu.Lock()
- defer s.diagnosticsMu.Unlock()
-
- published := 0
- defer func() {
- log.Trace.Logf(ctx, "published %d diagnostics", published)
- }()
-
- for uri, r := range s.diagnostics {
- // Snapshot IDs are always increasing, so we use them instead of file
- // versions to create the correct order for diagnostics.
-
- // If we've already delivered diagnostics for a future snapshot for this
- // file, do not deliver them.
- if r.snapshotID > snapshot.ID() {
- continue
- }
- anyReportsChanged := false
- reportHashes := map[diagnosticSource]string{}
- var diags []*source.Diagnostic
- for dsource, report := range r.reports {
- if report.snapshotID != snapshot.ID() {
- continue
- }
- var reportDiags []*source.Diagnostic
- for _, d := range report.diags {
- diags = append(diags, d)
- reportDiags = append(reportDiags, d)
- }
- hash := hashDiagnostics(reportDiags...)
- if hash != report.publishedHash {
- anyReportsChanged = true
- }
- reportHashes[dsource] = hash
- }
-
- if !final && !anyReportsChanged {
- // Don't invalidate existing reports on the client if we haven't got any
- // new information.
- continue
- }
- source.SortDiagnostics(diags)
- hash := hashDiagnostics(diags...)
- if hash == r.publishedHash {
- // Update snapshotID to be the latest snapshot for which this diagnostic
- // hash is valid.
- r.snapshotID = snapshot.ID()
- continue
- }
- var version int32
- if fh := snapshot.FindFile(uri); fh != nil { // file may have been deleted
- version = fh.Version()
- }
- if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
- Diagnostics: toProtocolDiagnostics(diags),
- URI: protocol.URIFromSpanURI(uri),
- Version: version,
- }); err == nil {
- published++
- r.publishedHash = hash
- r.snapshotID = snapshot.ID()
- for dsource, hash := range reportHashes {
- report := r.reports[dsource]
- report.publishedHash = hash
- r.reports[dsource] = report
- }
- } else {
- if ctx.Err() != nil {
- // Publish may have failed due to a cancelled context.
- log.Trace.Log(ctx, "publish cancelled")
- return
- }
- event.Error(ctx, "publishReports: failed to deliver diagnostic", err, tag.URI.Of(uri))
- }
- }
-}
-
-func toProtocolDiagnostics(diagnostics []*source.Diagnostic) []protocol.Diagnostic {
- reports := []protocol.Diagnostic{}
- for _, diag := range diagnostics {
- related := make([]protocol.DiagnosticRelatedInformation, 0, len(diag.Related))
- for _, rel := range diag.Related {
- related = append(related, protocol.DiagnosticRelatedInformation{
- Location: protocol.Location{
- URI: protocol.URIFromSpanURI(rel.URI),
- Range: rel.Range,
- },
- Message: rel.Message,
- })
- }
- pdiag := protocol.Diagnostic{
- // diag.Message might start with \n or \t
- Message: strings.TrimSpace(diag.Message),
- Range: diag.Range,
- Severity: diag.Severity,
- Source: string(diag.Source),
- Tags: diag.Tags,
- RelatedInformation: related,
- }
- if diag.Code != "" {
- pdiag.Code = diag.Code
- }
- if diag.CodeHref != "" {
- pdiag.CodeDescription = &protocol.CodeDescription{Href: diag.CodeHref}
- }
- reports = append(reports, pdiag)
- }
- return reports
-}
-
-func (s *Server) shouldIgnoreError(ctx context.Context, snapshot source.Snapshot, err error) bool {
- if err == nil { // if there is no error at all
- return false
- }
- if errors.Is(err, context.Canceled) {
- return true
- }
- // If the folder has no Go code in it, we shouldn't spam the user with a warning.
- var hasGo bool
- _ = filepath.Walk(snapshot.View().Folder().Filename(), func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !strings.HasSuffix(info.Name(), ".go") {
- return nil
- }
- hasGo = true
- return errors.New("done")
- })
- return !hasGo
-}
-
-// Diagnostics formattedfor the debug server
-// (all the relevant fields of Server are private)
-// (The alternative is to export them)
-func (s *Server) Diagnostics() map[string][]string {
- ans := make(map[string][]string)
- s.diagnosticsMu.Lock()
- defer s.diagnosticsMu.Unlock()
- for k, v := range s.diagnostics {
- fn := k.Filename()
- for typ, d := range v.reports {
- if len(d.diags) == 0 {
- continue
- }
- for _, dx := range d.diags {
- ans[fn] = append(ans[fn], auxStr(dx, d, typ))
- }
- }
- }
- return ans
-}
-
-func auxStr(v *source.Diagnostic, d diagnosticReport, typ diagnosticSource) string {
- // Tags? RelatedInformation?
- msg := fmt.Sprintf("(%s)%q(source:%q,code:%q,severity:%s,snapshot:%d,type:%s)",
- v.Range, v.Message, v.Source, v.Code, v.Severity, d.snapshotID, typ)
- for _, r := range v.Related {
- msg += fmt.Sprintf(" [%s:%s,%q]", r.URI.Filename(), r.Range, r.Message)
- }
- return msg
-}
diff --git a/internal/lsp/diff/diff.go b/internal/lsp/diff/diff.go
deleted file mode 100644
index 5d8c69ca5..000000000
--- a/internal/lsp/diff/diff.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package diff supports a pluggable diff algorithm.
-package diff
-
-import (
- "sort"
- "strings"
-
- "golang.org/x/tools/internal/span"
-)
-
-// TextEdit represents a change to a section of a document.
-// The text within the specified span should be replaced by the supplied new text.
-type TextEdit struct {
- Span span.Span
- NewText string
-}
-
-// ComputeEdits is the type for a function that produces a set of edits that
-// convert from the before content to the after content.
-type ComputeEdits func(uri span.URI, before, after string) ([]TextEdit, error)
-
-// SortTextEdits attempts to order all edits by their starting points.
-// The sort is stable so that edits with the same starting point will not
-// be reordered.
-func SortTextEdits(d []TextEdit) {
- // Use a stable sort to maintain the order of edits inserted at the same position.
- sort.SliceStable(d, func(i int, j int) bool {
- return span.Compare(d[i].Span, d[j].Span) < 0
- })
-}
-
-// ApplyEdits applies the set of edits to the before and returns the resulting
-// content.
-// It may panic or produce garbage if the edits are not valid for the provided
-// before content.
-func ApplyEdits(before string, edits []TextEdit) string {
- // Preconditions:
- // - all of the edits apply to before
- // - and all the spans for each TextEdit have the same URI
- if len(edits) == 0 {
- return before
- }
- _, edits, _ = prepareEdits(before, edits)
- after := strings.Builder{}
- last := 0
- for _, edit := range edits {
- start := edit.Span.Start().Offset()
- if start > last {
- after.WriteString(before[last:start])
- last = start
- }
- after.WriteString(edit.NewText)
- last = edit.Span.End().Offset()
- }
- if last < len(before) {
- after.WriteString(before[last:])
- }
- return after.String()
-}
-
-// LineEdits takes a set of edits and expands and merges them as necessary
-// to ensure that there are only full line edits left when it is done.
-func LineEdits(before string, edits []TextEdit) []TextEdit {
- if len(edits) == 0 {
- return nil
- }
- c, edits, partial := prepareEdits(before, edits)
- if partial {
- edits = lineEdits(before, c, edits)
- }
- return edits
-}
-
-// prepareEdits returns a sorted copy of the edits
-func prepareEdits(before string, edits []TextEdit) (*span.TokenConverter, []TextEdit, bool) {
- partial := false
- c := span.NewContentConverter("", []byte(before))
- copied := make([]TextEdit, len(edits))
- for i, edit := range edits {
- edit.Span, _ = edit.Span.WithAll(c)
- copied[i] = edit
- partial = partial ||
- edit.Span.Start().Offset() >= len(before) ||
- edit.Span.Start().Column() > 1 || edit.Span.End().Column() > 1
- }
- SortTextEdits(copied)
- return c, copied, partial
-}
-
-// lineEdits rewrites the edits to always be full line edits
-func lineEdits(before string, c *span.TokenConverter, edits []TextEdit) []TextEdit {
- adjusted := make([]TextEdit, 0, len(edits))
- current := TextEdit{Span: span.Invalid}
- for _, edit := range edits {
- if current.Span.IsValid() && edit.Span.Start().Line() <= current.Span.End().Line() {
- // overlaps with the current edit, need to combine
- // first get the gap from the previous edit
- gap := before[current.Span.End().Offset():edit.Span.Start().Offset()]
- // now add the text of this edit
- current.NewText += gap + edit.NewText
- // and then adjust the end position
- current.Span = span.New(current.Span.URI(), current.Span.Start(), edit.Span.End())
- } else {
- // does not overlap, add previous run (if there is one)
- adjusted = addEdit(before, adjusted, current)
- // and then remember this edit as the start of the next run
- current = edit
- }
- }
- // add the current pending run if there is one
- return addEdit(before, adjusted, current)
-}
-
-func addEdit(before string, edits []TextEdit, edit TextEdit) []TextEdit {
- if !edit.Span.IsValid() {
- return edits
- }
- // if edit is partial, expand it to full line now
- start := edit.Span.Start()
- end := edit.Span.End()
- if start.Column() > 1 {
- // prepend the text and adjust to start of line
- delta := start.Column() - 1
- start = span.NewPoint(start.Line(), 1, start.Offset()-delta)
- edit.Span = span.New(edit.Span.URI(), start, end)
- edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText
- }
- if start.Offset() >= len(before) && start.Line() > 1 && before[len(before)-1] != '\n' {
- // after end of file that does not end in eol, so join to last line of file
- // to do this we need to know where the start of the last line was
- eol := strings.LastIndex(before, "\n")
- if eol < 0 {
- // file is one non terminated line
- eol = 0
- }
- delta := len(before) - eol
- start = span.NewPoint(start.Line()-1, 1, start.Offset()-delta)
- edit.Span = span.New(edit.Span.URI(), start, end)
- edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText
- }
- if end.Column() > 1 {
- remains := before[end.Offset():]
- eol := strings.IndexRune(remains, '\n')
- if eol < 0 {
- eol = len(remains)
- } else {
- eol++
- }
- end = span.NewPoint(end.Line()+1, 1, end.Offset()+eol)
- edit.Span = span.New(edit.Span.URI(), start, end)
- edit.NewText = edit.NewText + remains[:eol]
- }
- edits = append(edits, edit)
- return edits
-}
diff --git a/internal/lsp/diff/diff_test.go b/internal/lsp/diff/diff_test.go
deleted file mode 100644
index dd9414e5d..000000000
--- a/internal/lsp/diff/diff_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package diff_test
-
-import (
- "fmt"
- "testing"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/difftest"
- "golang.org/x/tools/internal/span"
-)
-
-func TestApplyEdits(t *testing.T) {
- for _, tc := range difftest.TestCases {
- t.Run(tc.Name, func(t *testing.T) {
- t.Helper()
- if got := diff.ApplyEdits(tc.In, tc.Edits); got != tc.Out {
- t.Errorf("ApplyEdits edits got %q, want %q", got, tc.Out)
- }
- if tc.LineEdits != nil {
- if got := diff.ApplyEdits(tc.In, tc.LineEdits); got != tc.Out {
- t.Errorf("ApplyEdits lineEdits got %q, want %q", got, tc.Out)
- }
- }
- })
- }
-}
-
-func TestLineEdits(t *testing.T) {
- for _, tc := range difftest.TestCases {
- t.Run(tc.Name, func(t *testing.T) {
- t.Helper()
- // if line edits not specified, it is the same as edits
- edits := tc.LineEdits
- if edits == nil {
- edits = tc.Edits
- }
- if got := diff.LineEdits(tc.In, tc.Edits); diffEdits(got, edits) {
- t.Errorf("LineEdits got %q, want %q", got, edits)
- }
- })
- }
-}
-
-func TestUnified(t *testing.T) {
- for _, tc := range difftest.TestCases {
- t.Run(tc.Name, func(t *testing.T) {
- t.Helper()
- unified := fmt.Sprint(diff.ToUnified(difftest.FileA, difftest.FileB, tc.In, tc.Edits))
- if unified != tc.Unified {
- t.Errorf("edits got diff:\n%v\nexpected:\n%v", unified, tc.Unified)
- }
- if tc.LineEdits != nil {
- unified := fmt.Sprint(diff.ToUnified(difftest.FileA, difftest.FileB, tc.In, tc.LineEdits))
- if unified != tc.Unified {
- t.Errorf("lineEdits got diff:\n%v\nexpected:\n%v", unified, tc.Unified)
- }
- }
- })
- }
-}
-
-func diffEdits(got, want []diff.TextEdit) bool {
- if len(got) != len(want) {
- return true
- }
- for i, w := range want {
- g := got[i]
- if span.Compare(w.Span, g.Span) != 0 {
- return true
- }
- if w.NewText != g.NewText {
- return true
- }
- }
- return false
-}
diff --git a/internal/lsp/diff/difftest/difftest.go b/internal/lsp/diff/difftest/difftest.go
deleted file mode 100644
index 0e014bc30..000000000
--- a/internal/lsp/diff/difftest/difftest.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package difftest supplies a set of tests that will operate on any
-// implementation of a diff algorithm as exposed by
-// "golang.org/x/tools/internal/lsp/diff"
-package difftest
-
-import (
- "fmt"
- "testing"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/span"
-)
-
-const (
- FileA = "from"
- FileB = "to"
- UnifiedPrefix = "--- " + FileA + "\n+++ " + FileB + "\n"
-)
-
-var TestCases = []struct {
- Name, In, Out, Unified string
- Edits, LineEdits []diff.TextEdit
- NoDiff bool
-}{{
- Name: "empty",
- In: "",
- Out: "",
-}, {
- Name: "no_diff",
- In: "gargantuan\n",
- Out: "gargantuan\n",
-}, {
- Name: "replace_all",
- In: "fruit\n",
- Out: "cheese\n",
- Unified: UnifiedPrefix + `
-@@ -1 +1 @@
--fruit
-+cheese
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(0, 5), NewText: "cheese"}},
- LineEdits: []diff.TextEdit{{Span: newSpan(0, 6), NewText: "cheese\n"}},
-}, {
- Name: "insert_rune",
- In: "gord\n",
- Out: "gourd\n",
- Unified: UnifiedPrefix + `
-@@ -1 +1 @@
--gord
-+gourd
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(2, 2), NewText: "u"}},
- LineEdits: []diff.TextEdit{{Span: newSpan(0, 5), NewText: "gourd\n"}},
-}, {
- Name: "delete_rune",
- In: "groat\n",
- Out: "goat\n",
- Unified: UnifiedPrefix + `
-@@ -1 +1 @@
--groat
-+goat
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(1, 2), NewText: ""}},
- LineEdits: []diff.TextEdit{{Span: newSpan(0, 6), NewText: "goat\n"}},
-}, {
- Name: "replace_rune",
- In: "loud\n",
- Out: "lord\n",
- Unified: UnifiedPrefix + `
-@@ -1 +1 @@
--loud
-+lord
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(2, 3), NewText: "r"}},
- LineEdits: []diff.TextEdit{{Span: newSpan(0, 5), NewText: "lord\n"}},
-}, {
- Name: "replace_partials",
- In: "blanket\n",
- Out: "bunker\n",
- Unified: UnifiedPrefix + `
-@@ -1 +1 @@
--blanket
-+bunker
-`[1:],
- Edits: []diff.TextEdit{
- {Span: newSpan(1, 3), NewText: "u"},
- {Span: newSpan(6, 7), NewText: "r"},
- },
- LineEdits: []diff.TextEdit{{Span: newSpan(0, 8), NewText: "bunker\n"}},
-}, {
- Name: "insert_line",
- In: "1: one\n3: three\n",
- Out: "1: one\n2: two\n3: three\n",
- Unified: UnifiedPrefix + `
-@@ -1,2 +1,3 @@
- 1: one
-+2: two
- 3: three
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(7, 7), NewText: "2: two\n"}},
-}, {
- Name: "replace_no_newline",
- In: "A",
- Out: "B",
- Unified: UnifiedPrefix + `
-@@ -1 +1 @@
--A
-\ No newline at end of file
-+B
-\ No newline at end of file
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "B"}},
-}, {
- Name: "add_end",
- In: "A",
- Out: "AB",
- Unified: UnifiedPrefix + `
-@@ -1 +1 @@
--A
-\ No newline at end of file
-+AB
-\ No newline at end of file
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(1, 1), NewText: "B"}},
- LineEdits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "AB"}},
-}, {
- Name: "add_newline",
- In: "A",
- Out: "A\n",
- Unified: UnifiedPrefix + `
-@@ -1 +1 @@
--A
-\ No newline at end of file
-+A
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(1, 1), NewText: "\n"}},
- LineEdits: []diff.TextEdit{{Span: newSpan(0, 1), NewText: "A\n"}},
-}, {
- Name: "delete_front",
- In: "A\nB\nC\nA\nB\nB\nA\n",
- Out: "C\nB\nA\nB\nA\nC\n",
- Unified: UnifiedPrefix + `
-@@ -1,7 +1,6 @@
--A
--B
- C
-+B
- A
- B
--B
- A
-+C
-`[1:],
- Edits: []diff.TextEdit{
- {Span: newSpan(0, 4), NewText: ""},
- {Span: newSpan(6, 6), NewText: "B\n"},
- {Span: newSpan(10, 12), NewText: ""},
- {Span: newSpan(14, 14), NewText: "C\n"},
- },
- NoDiff: true, // diff algorithm produces different delete/insert pattern
-},
- {
- Name: "replace_last_line",
- In: "A\nB\n",
- Out: "A\nC\n\n",
- Unified: UnifiedPrefix + `
-@@ -1,2 +1,3 @@
- A
--B
-+C
-+
-`[1:],
- Edits: []diff.TextEdit{{Span: newSpan(2, 3), NewText: "C\n"}},
- LineEdits: []diff.TextEdit{{Span: newSpan(2, 4), NewText: "C\n\n"}},
- },
- {
- Name: "multiple_replace",
- In: "A\nB\nC\nD\nE\nF\nG\n",
- Out: "A\nH\nI\nJ\nE\nF\nK\n",
- Unified: UnifiedPrefix + `
-@@ -1,7 +1,7 @@
- A
--B
--C
--D
-+H
-+I
-+J
- E
- F
--G
-+K
-`[1:],
- Edits: []diff.TextEdit{
- {Span: newSpan(2, 8), NewText: "H\nI\nJ\n"},
- {Span: newSpan(12, 14), NewText: "K\n"},
- },
- NoDiff: true, // diff algorithm produces different delete/insert pattern
- },
-}
-
-func init() {
- // expand all the spans to full versions
- // we need them all to have their line number and column
- for _, tc := range TestCases {
- c := span.NewContentConverter("", []byte(tc.In))
- for i := range tc.Edits {
- tc.Edits[i].Span, _ = tc.Edits[i].Span.WithAll(c)
- }
- for i := range tc.LineEdits {
- tc.LineEdits[i].Span, _ = tc.LineEdits[i].Span.WithAll(c)
- }
- }
-}
-
-func DiffTest(t *testing.T, compute diff.ComputeEdits) {
- t.Helper()
- for _, test := range TestCases {
- t.Run(test.Name, func(t *testing.T) {
- t.Helper()
- edits, err := compute(span.URIFromPath("/"+test.Name), test.In, test.Out)
- if err != nil {
- t.Fatal(err)
- }
- got := diff.ApplyEdits(test.In, edits)
- unified := fmt.Sprint(diff.ToUnified(FileA, FileB, test.In, edits))
- if got != test.Out {
- t.Errorf("got patched:\n%v\nfrom diff:\n%v\nexpected:\n%v", got, unified, test.Out)
- }
- if !test.NoDiff && unified != test.Unified {
- t.Errorf("got diff:\n%v\nexpected:\n%v", unified, test.Unified)
- }
- })
- }
-}
-
-func newSpan(start, end int) span.Span {
- return span.New("", span.NewPoint(0, 0, start), span.NewPoint(0, 0, end))
-}
diff --git a/internal/lsp/diff/difftest/difftest_test.go b/internal/lsp/diff/difftest/difftest_test.go
deleted file mode 100644
index fd7ecf959..000000000
--- a/internal/lsp/diff/difftest/difftest_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package difftest supplies a set of tests that will operate on any
-// implementation of a diff algorithm as exposed by
-// "golang.org/x/tools/internal/lsp/diff"
-package difftest_test
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/diff/difftest"
- "golang.org/x/tools/internal/testenv"
-)
-
-func TestVerifyUnified(t *testing.T) {
- testenv.NeedsTool(t, "diff")
- for _, test := range difftest.TestCases {
- t.Run(test.Name, func(t *testing.T) {
- t.Helper()
- if test.NoDiff {
- t.Skip("diff tool produces expected different results")
- }
- diff, err := getDiffOutput(test.In, test.Out)
- if err != nil {
- t.Fatal(err)
- }
- if len(diff) > 0 {
- diff = difftest.UnifiedPrefix + diff
- }
- if diff != test.Unified {
- t.Errorf("unified:\n%q\ndiff -u:\n%q", test.Unified, diff)
- }
- })
- }
-}
-
-func getDiffOutput(a, b string) (string, error) {
- fileA, err := ioutil.TempFile("", "myers.in")
- if err != nil {
- return "", err
- }
- defer os.Remove(fileA.Name())
- if _, err := fileA.Write([]byte(a)); err != nil {
- return "", err
- }
- if err := fileA.Close(); err != nil {
- return "", err
- }
- fileB, err := ioutil.TempFile("", "myers.in")
- if err != nil {
- return "", err
- }
- defer os.Remove(fileB.Name())
- if _, err := fileB.Write([]byte(b)); err != nil {
- return "", err
- }
- if err := fileB.Close(); err != nil {
- return "", err
- }
- cmd := exec.Command("diff", "-u", fileA.Name(), fileB.Name())
- cmd.Env = append(cmd.Env, "LANG=en_US.UTF-8")
- out, err := cmd.CombinedOutput()
- if err != nil {
- if _, ok := err.(*exec.ExitError); !ok {
- return "", fmt.Errorf("failed to run diff -u %v %v: %v\n%v", fileA.Name(), fileB.Name(), err, string(out))
- }
- }
- diff := string(out)
- if len(diff) <= 0 {
- return diff, nil
- }
- bits := strings.SplitN(diff, "\n", 3)
- if len(bits) != 3 {
- return "", fmt.Errorf("diff output did not have file prefix:\n%s", diff)
- }
- return bits[2], nil
-}
diff --git a/internal/lsp/diff/myers/diff.go b/internal/lsp/diff/myers/diff.go
deleted file mode 100644
index a59475058..000000000
--- a/internal/lsp/diff/myers/diff.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package myers implements the Myers diff algorithm.
-package myers
-
-import (
- "strings"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/span"
-)
-
-// Sources:
-// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/
-// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2
-
-func ComputeEdits(uri span.URI, before, after string) ([]diff.TextEdit, error) {
- ops := operations(splitLines(before), splitLines(after))
- edits := make([]diff.TextEdit, 0, len(ops))
- for _, op := range ops {
- s := span.New(uri, span.NewPoint(op.I1+1, 1, 0), span.NewPoint(op.I2+1, 1, 0))
- switch op.Kind {
- case diff.Delete:
- // Delete: unformatted[i1:i2] is deleted.
- edits = append(edits, diff.TextEdit{Span: s})
- case diff.Insert:
- // Insert: formatted[j1:j2] is inserted at unformatted[i1:i1].
- if content := strings.Join(op.Content, ""); content != "" {
- edits = append(edits, diff.TextEdit{Span: s, NewText: content})
- }
- }
- }
- return edits, nil
-}
-
-type operation struct {
- Kind diff.OpKind
- Content []string // content from b
- I1, I2 int // indices of the line in a
- J1 int // indices of the line in b, J2 implied by len(Content)
-}
-
-// operations returns the list of operations to convert a into b, consolidating
-// operations for multiple lines and not including equal lines.
-func operations(a, b []string) []*operation {
- if len(a) == 0 && len(b) == 0 {
- return nil
- }
-
- trace, offset := shortestEditSequence(a, b)
- snakes := backtrack(trace, len(a), len(b), offset)
-
- M, N := len(a), len(b)
-
- var i int
- solution := make([]*operation, len(a)+len(b))
-
- add := func(op *operation, i2, j2 int) {
- if op == nil {
- return
- }
- op.I2 = i2
- if op.Kind == diff.Insert {
- op.Content = b[op.J1:j2]
- }
- solution[i] = op
- i++
- }
- x, y := 0, 0
- for _, snake := range snakes {
- if len(snake) < 2 {
- continue
- }
- var op *operation
- // delete (horizontal)
- for snake[0]-snake[1] > x-y {
- if op == nil {
- op = &operation{
- Kind: diff.Delete,
- I1: x,
- J1: y,
- }
- }
- x++
- if x == M {
- break
- }
- }
- add(op, x, y)
- op = nil
- // insert (vertical)
- for snake[0]-snake[1] < x-y {
- if op == nil {
- op = &operation{
- Kind: diff.Insert,
- I1: x,
- J1: y,
- }
- }
- y++
- }
- add(op, x, y)
- op = nil
- // equal (diagonal)
- for x < snake[0] {
- x++
- y++
- }
- if x >= M && y >= N {
- break
- }
- }
- return solution[:i]
-}
-
-// backtrack uses the trace for the edit sequence computation and returns the
-// "snakes" that make up the solution. A "snake" is a single deletion or
-// insertion followed by zero or diagonals.
-func backtrack(trace [][]int, x, y, offset int) [][]int {
- snakes := make([][]int, len(trace))
- d := len(trace) - 1
- for ; x > 0 && y > 0 && d > 0; d-- {
- V := trace[d]
- if len(V) == 0 {
- continue
- }
- snakes[d] = []int{x, y}
-
- k := x - y
-
- var kPrev int
- if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
- kPrev = k + 1
- } else {
- kPrev = k - 1
- }
-
- x = V[kPrev+offset]
- y = x - kPrev
- }
- if x < 0 || y < 0 {
- return snakes
- }
- snakes[d] = []int{x, y}
- return snakes
-}
-
-// shortestEditSequence returns the shortest edit sequence that converts a into b.
-func shortestEditSequence(a, b []string) ([][]int, int) {
- M, N := len(a), len(b)
- V := make([]int, 2*(N+M)+1)
- offset := N + M
- trace := make([][]int, N+M+1)
-
- // Iterate through the maximum possible length of the SES (N+M).
- for d := 0; d <= N+M; d++ {
- copyV := make([]int, len(V))
- // k lines are represented by the equation y = x - k. We move in
- // increments of 2 because end points for even d are on even k lines.
- for k := -d; k <= d; k += 2 {
- // At each point, we either go down or to the right. We go down if
- // k == -d, and we go to the right if k == d. We also prioritize
- // the maximum x value, because we prefer deletions to insertions.
- var x int
- if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
- x = V[k+1+offset] // down
- } else {
- x = V[k-1+offset] + 1 // right
- }
-
- y := x - k
-
- // Diagonal moves while we have equal contents.
- for x < M && y < N && a[x] == b[y] {
- x++
- y++
- }
-
- V[k+offset] = x
-
- // Return if we've exceeded the maximum values.
- if x == M && y == N {
- // Makes sure to save the state of the array before returning.
- copy(copyV, V)
- trace[d] = copyV
- return trace, offset
- }
- }
-
- // Save the state of the array.
- copy(copyV, V)
- trace[d] = copyV
- }
- return nil, 0
-}
-
-func splitLines(text string) []string {
- lines := strings.SplitAfter(text, "\n")
- if lines[len(lines)-1] == "" {
- lines = lines[:len(lines)-1]
- }
- return lines
-}
diff --git a/internal/lsp/diff/myers/diff_test.go b/internal/lsp/diff/myers/diff_test.go
deleted file mode 100644
index bce0399c5..000000000
--- a/internal/lsp/diff/myers/diff_test.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package myers_test
-
-import (
- "testing"
-
- "golang.org/x/tools/internal/lsp/diff/difftest"
- "golang.org/x/tools/internal/lsp/diff/myers"
-)
-
-func TestDiff(t *testing.T) {
- difftest.DiffTest(t, myers.ComputeEdits)
-}
diff --git a/internal/lsp/diff/unified.go b/internal/lsp/diff/unified.go
deleted file mode 100644
index b2e630eff..000000000
--- a/internal/lsp/diff/unified.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package diff
-
-import (
- "fmt"
- "strings"
-)
-
-// Unified represents a set of edits as a unified diff.
-type Unified struct {
- // From is the name of the original file.
- From string
- // To is the name of the modified file.
- To string
- // Hunks is the set of edit hunks needed to transform the file content.
- Hunks []*Hunk
-}
-
-// Hunk represents a contiguous set of line edits to apply.
-type Hunk struct {
- // The line in the original source where the hunk starts.
- FromLine int
- // The line in the original source where the hunk finishes.
- ToLine int
- // The set of line based edits to apply.
- Lines []Line
-}
-
-// Line represents a single line operation to apply as part of a Hunk.
-type Line struct {
- // Kind is the type of line this represents, deletion, insertion or copy.
- Kind OpKind
- // Content is the content of this line.
- // For deletion it is the line being removed, for all others it is the line
- // to put in the output.
- Content string
-}
-
-// OpKind is used to denote the type of operation a line represents.
-type OpKind int
-
-const (
- // Delete is the operation kind for a line that is present in the input
- // but not in the output.
- Delete OpKind = iota
- // Insert is the operation kind for a line that is new in the output.
- Insert
- // Equal is the operation kind for a line that is the same in the input and
- // output, often used to provide context around edited lines.
- Equal
-)
-
-// String returns a human readable representation of an OpKind. It is not
-// intended for machine processing.
-func (k OpKind) String() string {
- switch k {
- case Delete:
- return "delete"
- case Insert:
- return "insert"
- case Equal:
- return "equal"
- default:
- panic("unknown operation kind")
- }
-}
-
-const (
- edge = 3
- gap = edge * 2
-)
-
-// ToUnified takes a file contents and a sequence of edits, and calculates
-// a unified diff that represents those edits.
-func ToUnified(from, to string, content string, edits []TextEdit) Unified {
- u := Unified{
- From: from,
- To: to,
- }
- if len(edits) == 0 {
- return u
- }
- c, edits, partial := prepareEdits(content, edits)
- if partial {
- edits = lineEdits(content, c, edits)
- }
- lines := splitLines(content)
- var h *Hunk
- last := 0
- toLine := 0
- for _, edit := range edits {
- start := edit.Span.Start().Line() - 1
- end := edit.Span.End().Line() - 1
- switch {
- case h != nil && start == last:
- //direct extension
- case h != nil && start <= last+gap:
- //within range of previous lines, add the joiners
- addEqualLines(h, lines, last, start)
- default:
- //need to start a new hunk
- if h != nil {
- // add the edge to the previous hunk
- addEqualLines(h, lines, last, last+edge)
- u.Hunks = append(u.Hunks, h)
- }
- toLine += start - last
- h = &Hunk{
- FromLine: start + 1,
- ToLine: toLine + 1,
- }
- // add the edge to the new hunk
- delta := addEqualLines(h, lines, start-edge, start)
- h.FromLine -= delta
- h.ToLine -= delta
- }
- last = start
- for i := start; i < end; i++ {
- h.Lines = append(h.Lines, Line{Kind: Delete, Content: lines[i]})
- last++
- }
- if edit.NewText != "" {
- for _, line := range splitLines(edit.NewText) {
- h.Lines = append(h.Lines, Line{Kind: Insert, Content: line})
- toLine++
- }
- }
- }
- if h != nil {
- // add the edge to the final hunk
- addEqualLines(h, lines, last, last+edge)
- u.Hunks = append(u.Hunks, h)
- }
- return u
-}
-
-func splitLines(text string) []string {
- lines := strings.SplitAfter(text, "\n")
- if lines[len(lines)-1] == "" {
- lines = lines[:len(lines)-1]
- }
- return lines
-}
-
-func addEqualLines(h *Hunk, lines []string, start, end int) int {
- delta := 0
- for i := start; i < end; i++ {
- if i < 0 {
- continue
- }
- if i >= len(lines) {
- return delta
- }
- h.Lines = append(h.Lines, Line{Kind: Equal, Content: lines[i]})
- delta++
- }
- return delta
-}
-
-// Format converts a unified diff to the standard textual form for that diff.
-// The output of this function can be passed to tools like patch.
-func (u Unified) Format(f fmt.State, r rune) {
- if len(u.Hunks) == 0 {
- return
- }
- fmt.Fprintf(f, "--- %s\n", u.From)
- fmt.Fprintf(f, "+++ %s\n", u.To)
- for _, hunk := range u.Hunks {
- fromCount, toCount := 0, 0
- for _, l := range hunk.Lines {
- switch l.Kind {
- case Delete:
- fromCount++
- case Insert:
- toCount++
- default:
- fromCount++
- toCount++
- }
- }
- fmt.Fprint(f, "@@")
- if fromCount > 1 {
- fmt.Fprintf(f, " -%d,%d", hunk.FromLine, fromCount)
- } else {
- fmt.Fprintf(f, " -%d", hunk.FromLine)
- }
- if toCount > 1 {
- fmt.Fprintf(f, " +%d,%d", hunk.ToLine, toCount)
- } else {
- fmt.Fprintf(f, " +%d", hunk.ToLine)
- }
- fmt.Fprint(f, " @@\n")
- for _, l := range hunk.Lines {
- switch l.Kind {
- case Delete:
- fmt.Fprintf(f, "-%s", l.Content)
- case Insert:
- fmt.Fprintf(f, "+%s", l.Content)
- default:
- fmt.Fprintf(f, " %s", l.Content)
- }
- if !strings.HasSuffix(l.Content, "\n") {
- fmt.Fprintf(f, "\n\\ No newline at end of file\n")
- }
- }
- }
-}
diff --git a/internal/lsp/fake/client.go b/internal/lsp/fake/client.go
deleted file mode 100644
index fdc67a6cc..000000000
--- a/internal/lsp/fake/client.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "context"
- "fmt"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-// ClientHooks are called to handle the corresponding client LSP method.
-type ClientHooks struct {
- OnLogMessage func(context.Context, *protocol.LogMessageParams) error
- OnDiagnostics func(context.Context, *protocol.PublishDiagnosticsParams) error
- OnWorkDoneProgressCreate func(context.Context, *protocol.WorkDoneProgressCreateParams) error
- OnProgress func(context.Context, *protocol.ProgressParams) error
- OnShowMessage func(context.Context, *protocol.ShowMessageParams) error
- OnShowMessageRequest func(context.Context, *protocol.ShowMessageRequestParams) error
- OnRegistration func(context.Context, *protocol.RegistrationParams) error
- OnUnregistration func(context.Context, *protocol.UnregistrationParams) error
-}
-
-// Client is an adapter that converts an *Editor into an LSP Client. It mosly
-// delegates functionality to hooks that can be configured by tests.
-type Client struct {
- editor *Editor
- hooks ClientHooks
-}
-
-func (c *Client) ShowMessage(ctx context.Context, params *protocol.ShowMessageParams) error {
- if c.hooks.OnShowMessage != nil {
- return c.hooks.OnShowMessage(ctx, params)
- }
- return nil
-}
-
-func (c *Client) ShowMessageRequest(ctx context.Context, params *protocol.ShowMessageRequestParams) (*protocol.MessageActionItem, error) {
- if c.hooks.OnShowMessageRequest != nil {
- if err := c.hooks.OnShowMessageRequest(ctx, params); err != nil {
- return nil, err
- }
- }
- if len(params.Actions) == 0 || len(params.Actions) > 1 {
- return nil, fmt.Errorf("fake editor cannot handle multiple action items")
- }
- return &params.Actions[0], nil
-}
-
-func (c *Client) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
- if c.hooks.OnLogMessage != nil {
- return c.hooks.OnLogMessage(ctx, params)
- }
- return nil
-}
-
-func (c *Client) Event(ctx context.Context, event *interface{}) error {
- return nil
-}
-
-func (c *Client) PublishDiagnostics(ctx context.Context, params *protocol.PublishDiagnosticsParams) error {
- if c.hooks.OnDiagnostics != nil {
- return c.hooks.OnDiagnostics(ctx, params)
- }
- return nil
-}
-
-func (c *Client) WorkspaceFolders(context.Context) ([]protocol.WorkspaceFolder, error) {
- return []protocol.WorkspaceFolder{}, nil
-}
-
-func (c *Client) Configuration(_ context.Context, p *protocol.ParamConfiguration) ([]interface{}, error) {
- results := make([]interface{}, len(p.Items))
- for i, item := range p.Items {
- if item.Section != "gopls" {
- continue
- }
- results[i] = c.editor.configuration()
- }
- return results, nil
-}
-
-func (c *Client) RegisterCapability(ctx context.Context, params *protocol.RegistrationParams) error {
- if c.hooks.OnRegistration != nil {
- return c.hooks.OnRegistration(ctx, params)
- }
- return nil
-}
-
-func (c *Client) UnregisterCapability(ctx context.Context, params *protocol.UnregistrationParams) error {
- if c.hooks.OnUnregistration != nil {
- return c.hooks.OnUnregistration(ctx, params)
- }
- return nil
-}
-
-func (c *Client) Progress(ctx context.Context, params *protocol.ProgressParams) error {
- if c.hooks.OnProgress != nil {
- return c.hooks.OnProgress(ctx, params)
- }
- return nil
-}
-
-func (c *Client) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) error {
- if c.hooks.OnWorkDoneProgressCreate != nil {
- return c.hooks.OnWorkDoneProgressCreate(ctx, params)
- }
- return nil
-}
-
-func (c *Client) ShowDocument(context.Context, *protocol.ShowDocumentParams) (*protocol.ShowDocumentResult, error) {
- return nil, nil
-}
-
-// ApplyEdit applies edits sent from the server.
-func (c *Client) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) {
- if len(params.Edit.Changes) != 0 {
- return &protocol.ApplyWorkspaceEditResult{FailureReason: "Edit.Changes is unsupported"}, nil
- }
- for _, change := range params.Edit.DocumentChanges {
- if err := c.editor.applyProtocolEdit(ctx, change); err != nil {
- return nil, err
- }
- }
- return &protocol.ApplyWorkspaceEditResult{Applied: true}, nil
-}
diff --git a/internal/lsp/fake/edit.go b/internal/lsp/fake/edit.go
deleted file mode 100644
index 8b04c390f..000000000
--- a/internal/lsp/fake/edit.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-// Pos represents a position in a text buffer. Both Line and Column are
-// 0-indexed.
-type Pos struct {
- Line, Column int
-}
-
-func (p Pos) String() string {
- return fmt.Sprintf("%v:%v", p.Line, p.Column)
-}
-
-// Range corresponds to protocol.Range, but uses the editor friend Pos
-// instead of UTF-16 oriented protocol.Position
-type Range struct {
- Start Pos
- End Pos
-}
-
-func (p Pos) ToProtocolPosition() protocol.Position {
- return protocol.Position{
- Line: uint32(p.Line),
- Character: uint32(p.Column),
- }
-}
-
-func fromProtocolPosition(pos protocol.Position) Pos {
- return Pos{
- Line: int(pos.Line),
- Column: int(pos.Character),
- }
-}
-
-// Edit represents a single (contiguous) buffer edit.
-type Edit struct {
- Start, End Pos
- Text string
-}
-
-// Location is the editor friendly equivalent of protocol.Location
-type Location struct {
- Path string
- Range Range
-}
-
-// SymbolInformation is an editor friendly version of
-// protocol.SymbolInformation, with location information transformed to byte
-// offsets. Field names correspond to the protocol type.
-type SymbolInformation struct {
- Name string
- Kind protocol.SymbolKind
- Location Location
-}
-
-// NewEdit creates an edit replacing all content between
-// (startLine, startColumn) and (endLine, endColumn) with text.
-func NewEdit(startLine, startColumn, endLine, endColumn int, text string) Edit {
- return Edit{
- Start: Pos{Line: startLine, Column: startColumn},
- End: Pos{Line: endLine, Column: endColumn},
- Text: text,
- }
-}
-
-func (e Edit) toProtocolChangeEvent() protocol.TextDocumentContentChangeEvent {
- return protocol.TextDocumentContentChangeEvent{
- Range: &protocol.Range{
- Start: e.Start.ToProtocolPosition(),
- End: e.End.ToProtocolPosition(),
- },
- Text: e.Text,
- }
-}
-
-func fromProtocolTextEdit(textEdit protocol.TextEdit) Edit {
- return Edit{
- Start: fromProtocolPosition(textEdit.Range.Start),
- End: fromProtocolPosition(textEdit.Range.End),
- Text: textEdit.NewText,
- }
-}
-
-// inText reports whether p is a valid position in the text buffer.
-func inText(p Pos, content []string) bool {
- if p.Line < 0 || p.Line >= len(content) {
- return false
- }
- // Note the strict right bound: the column indexes character _separators_,
- // not characters.
- if p.Column < 0 || p.Column > len([]rune(content[p.Line])) {
- return false
- }
- return true
-}
-
-// editContent implements a simplistic, inefficient algorithm for applying text
-// edits to our buffer representation. It returns an error if the edit is
-// invalid for the current content.
-func editContent(content []string, edits []Edit) ([]string, error) {
- newEdits := make([]Edit, len(edits))
- copy(newEdits, edits)
- sort.Slice(newEdits, func(i, j int) bool {
- if newEdits[i].Start.Line < newEdits[j].Start.Line {
- return true
- }
- if newEdits[i].Start.Line > newEdits[j].Start.Line {
- return false
- }
- return newEdits[i].Start.Column < newEdits[j].Start.Column
- })
-
- // Validate edits.
- for _, edit := range newEdits {
- if edit.End.Line < edit.Start.Line || (edit.End.Line == edit.Start.Line && edit.End.Column < edit.Start.Column) {
- return nil, fmt.Errorf("invalid edit: end %v before start %v", edit.End, edit.Start)
- }
- if !inText(edit.Start, content) {
- return nil, fmt.Errorf("start position %v is out of bounds", edit.Start)
- }
- if !inText(edit.End, content) {
- return nil, fmt.Errorf("end position %v is out of bounds", edit.End)
- }
- }
-
- var (
- b strings.Builder
- line, column int
- )
- advance := func(toLine, toColumn int) {
- for ; line < toLine; line++ {
- b.WriteString(string([]rune(content[line])[column:]) + "\n")
- column = 0
- }
- b.WriteString(string([]rune(content[line])[column:toColumn]))
- column = toColumn
- }
- for _, edit := range newEdits {
- advance(edit.Start.Line, edit.Start.Column)
- b.WriteString(edit.Text)
- line = edit.End.Line
- column = edit.End.Column
- }
- advance(len(content)-1, len([]rune(content[len(content)-1])))
- return strings.Split(b.String(), "\n"), nil
-}
diff --git a/internal/lsp/fake/edit_test.go b/internal/lsp/fake/edit_test.go
deleted file mode 100644
index 4fa23bdb7..000000000
--- a/internal/lsp/fake/edit_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "strings"
- "testing"
-)
-
-func TestApplyEdit(t *testing.T) {
- tests := []struct {
- label string
- content string
- edits []Edit
- want string
- wantErr bool
- }{
- {
- label: "empty content",
- },
- {
- label: "empty edit",
- content: "hello",
- edits: []Edit{},
- want: "hello",
- },
- {
- label: "unicode edit",
- content: "hello, 日本語",
- edits: []Edit{{
- Start: Pos{Line: 0, Column: 7},
- End: Pos{Line: 0, Column: 10},
- Text: "world",
- }},
- want: "hello, world",
- },
- {
- label: "range edit",
- content: "ABC\nDEF\nGHI\nJKL",
- edits: []Edit{{
- Start: Pos{Line: 1, Column: 1},
- End: Pos{Line: 2, Column: 3},
- Text: "12\n345",
- }},
- want: "ABC\nD12\n345\nJKL",
- },
- {
- label: "end before start",
- content: "ABC\nDEF\nGHI\nJKL",
- edits: []Edit{{
- End: Pos{Line: 1, Column: 1},
- Start: Pos{Line: 2, Column: 3},
- Text: "12\n345",
- }},
- wantErr: true,
- },
- {
- label: "out of bounds line",
- content: "ABC\nDEF\nGHI\nJKL",
- edits: []Edit{{
- Start: Pos{Line: 1, Column: 1},
- End: Pos{Line: 4, Column: 3},
- Text: "12\n345",
- }},
- wantErr: true,
- },
- {
- label: "out of bounds column",
- content: "ABC\nDEF\nGHI\nJKL",
- edits: []Edit{{
- Start: Pos{Line: 1, Column: 4},
- End: Pos{Line: 2, Column: 3},
- Text: "12\n345",
- }},
- wantErr: true,
- },
- }
-
- for _, test := range tests {
- test := test
- t.Run(test.label, func(t *testing.T) {
- lines := strings.Split(test.content, "\n")
- newLines, err := editContent(lines, test.edits)
- if (err != nil) != test.wantErr {
- t.Errorf("got err %v, want error: %t", err, test.wantErr)
- }
- if err != nil {
- return
- }
- if got := strings.Join(newLines, "\n"); got != test.want {
- t.Errorf("got %q, want %q", got, test.want)
- }
- })
- }
-}
diff --git a/internal/lsp/fake/editor.go b/internal/lsp/fake/editor.go
deleted file mode 100644
index 5bce5609f..000000000
--- a/internal/lsp/fake/editor.go
+++ /dev/null
@@ -1,1258 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "bufio"
- "context"
- "fmt"
- "os"
- "path"
- "path/filepath"
- "regexp"
- "strings"
- "sync"
-
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// Editor is a fake editor client. It keeps track of client state and can be
-// used for writing LSP tests.
-type Editor struct {
- Config EditorConfig
-
- // Server, client, and sandbox are concurrency safe and written only
- // at construction time, so do not require synchronization.
- Server protocol.Server
- serverConn jsonrpc2.Conn
- client *Client
- sandbox *Sandbox
- defaultEnv map[string]string
-
- // Since this editor is intended just for testing, we use very coarse
- // locking.
- mu sync.Mutex
- // Editor state.
- buffers map[string]buffer
- // Capabilities / Options
- serverCapabilities protocol.ServerCapabilities
-
- // Call metrics for the purpose of expectations. This is done in an ad-hoc
- // manner for now. Perhaps in the future we should do something more
- // systematic. Guarded with a separate mutex as calls may need to be accessed
- // asynchronously via callbacks into the Editor.
- callsMu sync.Mutex
- calls CallCounts
-}
-
-type CallCounts struct {
- DidOpen, DidChange, DidSave, DidChangeWatchedFiles, DidClose uint64
-}
-
-type buffer struct {
- windowsLineEndings bool
- version int
- path string
- lines []string
- dirty bool
-}
-
-func (b buffer) text() string {
- eol := "\n"
- if b.windowsLineEndings {
- eol = "\r\n"
- }
- return strings.Join(b.lines, eol)
-}
-
-// EditorConfig configures the editor's LSP session. This is similar to
-// source.UserOptions, but we use a separate type here so that we expose only
-// that configuration which we support.
-//
-// The zero value for EditorConfig should correspond to its defaults.
-type EditorConfig struct {
- Env map[string]string
- BuildFlags []string
-
- // CodeLenses is a map defining whether codelens are enabled, keyed by the
- // codeLens command. CodeLenses which are not present in this map are left in
- // their default state.
- CodeLenses map[string]bool
-
- // SymbolMatcher is the config associated with the "symbolMatcher" gopls
- // config option.
- SymbolMatcher, SymbolStyle *string
-
- // LimitWorkspaceScope is true if the user does not want to expand their
- // workspace scope to the entire module.
- LimitWorkspaceScope bool
-
- // WorkspaceFolders is the workspace folders to configure on the LSP server,
- // relative to the sandbox workdir.
- //
- // As a special case, if WorkspaceFolders is nil the editor defaults to
- // configuring a single workspace folder corresponding to the workdir root.
- // To explicitly send no workspace folders, use an empty (non-nil) slice.
- WorkspaceFolders []string
-
- // EnableStaticcheck enables staticcheck analyzers.
- EnableStaticcheck bool
-
- // AllExperiments sets the "allExperiments" configuration, which enables
- // all of gopls's opt-in settings.
- AllExperiments bool
-
- // Whether to send the current process ID, for testing data that is joined to
- // the PID. This can only be set by one test.
- SendPID bool
-
- // Whether to edit files with windows line endings.
- WindowsLineEndings bool
-
- // Map of language ID -> regexp to match, used to set the file type of new
- // buffers. Applied as an overlay on top of the following defaults:
- // "go" -> ".*\.go"
- // "go.mod" -> "go\.mod"
- // "go.sum" -> "go\.sum"
- // "gotmpl" -> ".*tmpl"
- FileAssociations map[string]string
-
- // Settings holds arbitrary additional settings to apply to the gopls config.
- // TODO(rfindley): replace existing EditorConfig fields with Settings.
- Settings map[string]interface{}
-
- ImportShortcut string
- DirectoryFilters []string
- VerboseOutput bool
- ExperimentalUseInvalidMetadata bool
-}
-
-// NewEditor Creates a new Editor.
-func NewEditor(sandbox *Sandbox, config EditorConfig) *Editor {
- return &Editor{
- buffers: make(map[string]buffer),
- sandbox: sandbox,
- defaultEnv: sandbox.GoEnv(),
- Config: config,
- }
-}
-
-// Connect configures the editor to communicate with an LSP server on conn. It
-// is not concurrency safe, and should be called at most once, before using the
-// editor.
-//
-// It returns the editor, so that it may be called as follows:
-// editor, err := NewEditor(s).Connect(ctx, conn)
-func (e *Editor) Connect(ctx context.Context, conn jsonrpc2.Conn, hooks ClientHooks) (*Editor, error) {
- e.serverConn = conn
- e.Server = protocol.ServerDispatcher(conn)
- e.client = &Client{editor: e, hooks: hooks}
- conn.Go(ctx,
- protocol.Handlers(
- protocol.ClientHandler(e.client,
- jsonrpc2.MethodNotFound)))
- if err := e.initialize(ctx, e.Config.WorkspaceFolders); err != nil {
- return nil, err
- }
- e.sandbox.Workdir.AddWatcher(e.onFileChanges)
- return e, nil
-}
-
-func (e *Editor) Stats() CallCounts {
- e.callsMu.Lock()
- defer e.callsMu.Unlock()
- return e.calls
-}
-
-// Shutdown issues the 'shutdown' LSP notification.
-func (e *Editor) Shutdown(ctx context.Context) error {
- if e.Server != nil {
- if err := e.Server.Shutdown(ctx); err != nil {
- return errors.Errorf("Shutdown: %w", err)
- }
- }
- return nil
-}
-
-// Exit issues the 'exit' LSP notification.
-func (e *Editor) Exit(ctx context.Context) error {
- if e.Server != nil {
- // Not all LSP clients issue the exit RPC, but we do so here to ensure that
- // we gracefully handle it on multi-session servers.
- if err := e.Server.Exit(ctx); err != nil {
- return errors.Errorf("Exit: %w", err)
- }
- }
- return nil
-}
-
-// Close issues the shutdown and exit sequence an editor should.
-func (e *Editor) Close(ctx context.Context) error {
- if err := e.Shutdown(ctx); err != nil {
- return err
- }
- if err := e.Exit(ctx); err != nil {
- return err
- }
- // called close on the editor should result in the connection closing
- select {
- case <-e.serverConn.Done():
- // connection closed itself
- return nil
- case <-ctx.Done():
- return errors.Errorf("connection not closed: %w", ctx.Err())
- }
-}
-
-// Client returns the LSP client for this editor.
-func (e *Editor) Client() *Client {
- return e.client
-}
-
-func (e *Editor) overlayEnv() map[string]string {
- env := make(map[string]string)
- for k, v := range e.defaultEnv {
- v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename())
- env[k] = v
- }
- for k, v := range e.Config.Env {
- v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename())
- env[k] = v
- }
- return env
-}
-
-func (e *Editor) configuration() map[string]interface{} {
- config := map[string]interface{}{
- "verboseWorkDoneProgress": true,
- "env": e.overlayEnv(),
- "expandWorkspaceToModule": !e.Config.LimitWorkspaceScope,
- "completionBudget": "10s",
- }
-
- for k, v := range e.Config.Settings {
- config[k] = v
- }
-
- if e.Config.BuildFlags != nil {
- config["buildFlags"] = e.Config.BuildFlags
- }
- if e.Config.DirectoryFilters != nil {
- config["directoryFilters"] = e.Config.DirectoryFilters
- }
- if e.Config.ExperimentalUseInvalidMetadata {
- config["experimentalUseInvalidMetadata"] = true
- }
- if e.Config.CodeLenses != nil {
- config["codelenses"] = e.Config.CodeLenses
- }
- if e.Config.SymbolMatcher != nil {
- config["symbolMatcher"] = *e.Config.SymbolMatcher
- }
- if e.Config.SymbolStyle != nil {
- config["symbolStyle"] = *e.Config.SymbolStyle
- }
- if e.Config.EnableStaticcheck {
- config["staticcheck"] = true
- }
- if e.Config.AllExperiments {
- config["allExperiments"] = true
- }
-
- if e.Config.VerboseOutput {
- config["verboseOutput"] = true
- }
-
- if e.Config.ImportShortcut != "" {
- config["importShortcut"] = e.Config.ImportShortcut
- }
-
- config["diagnosticsDelay"] = "10ms"
-
- // ExperimentalWorkspaceModule is only set as a mode, not a configuration.
- return config
-}
-
-func (e *Editor) initialize(ctx context.Context, workspaceFolders []string) error {
- params := &protocol.ParamInitialize{}
- params.ClientInfo.Name = "fakeclient"
- params.ClientInfo.Version = "v1.0.0"
-
- if workspaceFolders == nil {
- workspaceFolders = []string{string(e.sandbox.Workdir.RelativeTo)}
- }
- for _, folder := range workspaceFolders {
- params.WorkspaceFolders = append(params.WorkspaceFolders, protocol.WorkspaceFolder{
- URI: string(e.sandbox.Workdir.URI(folder)),
- Name: filepath.Base(folder),
- })
- }
-
- params.Capabilities.Workspace.Configuration = true
- params.Capabilities.Window.WorkDoneProgress = true
- // TODO: set client capabilities
- params.Capabilities.TextDocument.Completion.CompletionItem.TagSupport.ValueSet = []protocol.CompletionItemTag{protocol.ComplDeprecated}
- params.InitializationOptions = e.configuration()
- if e.Config.SendPID {
- params.ProcessID = int32(os.Getpid())
- }
-
- params.Capabilities.TextDocument.Completion.CompletionItem.SnippetSupport = true
- params.Capabilities.TextDocument.SemanticTokens.Requests.Full = true
- // copied from lsp/semantic.go to avoid import cycle in tests
- params.Capabilities.TextDocument.SemanticTokens.TokenTypes = []string{
- "namespace", "type", "class", "enum", "interface",
- "struct", "typeParameter", "parameter", "variable", "property", "enumMember",
- "event", "function", "method", "macro", "keyword", "modifier", "comment",
- "string", "number", "regexp", "operator",
- }
-
- // This is a bit of a hack, since the fake editor doesn't actually support
- // watching changed files that match a specific glob pattern. However, the
- // editor does send didChangeWatchedFiles notifications, so set this to
- // true.
- params.Capabilities.Workspace.DidChangeWatchedFiles.DynamicRegistration = true
-
- params.Trace = "messages"
- // TODO: support workspace folders.
- if e.Server != nil {
- resp, err := e.Server.Initialize(ctx, params)
- if err != nil {
- return errors.Errorf("initialize: %w", err)
- }
- e.mu.Lock()
- e.serverCapabilities = resp.Capabilities
- e.mu.Unlock()
-
- if err := e.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil {
- return errors.Errorf("initialized: %w", err)
- }
- }
- // TODO: await initial configuration here, or expect gopls to manage that?
- return nil
-}
-
-// onFileChanges is registered to be called by the Workdir on any writes that
-// go through the Workdir API. It is called synchronously by the Workdir.
-func (e *Editor) onFileChanges(ctx context.Context, evts []FileEvent) {
- if e.Server == nil {
- return
- }
-
- // e may be locked when onFileChanges is called, but it is important that we
- // synchronously increment this counter so that we can subsequently assert on
- // the number of expected DidChangeWatchedFiles calls.
- e.callsMu.Lock()
- e.calls.DidChangeWatchedFiles++
- e.callsMu.Unlock()
-
- // Since e may be locked, we must run this mutation asynchronously.
- go func() {
- e.mu.Lock()
- defer e.mu.Unlock()
- var lspevts []protocol.FileEvent
- for _, evt := range evts {
- // Always send an on-disk change, even for events that seem useless
- // because they're shadowed by an open buffer.
- lspevts = append(lspevts, evt.ProtocolEvent)
-
- if buf, ok := e.buffers[evt.Path]; ok {
- // Following VS Code, don't honor deletions or changes to dirty buffers.
- if buf.dirty || evt.ProtocolEvent.Type == protocol.Deleted {
- continue
- }
-
- content, err := e.sandbox.Workdir.ReadFile(evt.Path)
- if err != nil {
- continue // A race with some other operation.
- }
- // No need to update if the buffer content hasn't changed.
- if content == buf.text() {
- continue
- }
- // During shutdown, this call will fail. Ignore the error.
- _ = e.setBufferContentLocked(ctx, evt.Path, false, lines(content), nil)
- }
- }
- e.Server.DidChangeWatchedFiles(ctx, &protocol.DidChangeWatchedFilesParams{
- Changes: lspevts,
- })
- }()
-}
-
-// OpenFile creates a buffer for the given workdir-relative file.
-func (e *Editor) OpenFile(ctx context.Context, path string) error {
- content, err := e.sandbox.Workdir.ReadFile(path)
- if err != nil {
- return err
- }
- return e.createBuffer(ctx, path, false, content)
-}
-
-// CreateBuffer creates a new unsaved buffer corresponding to the workdir path,
-// containing the given textual content.
-func (e *Editor) CreateBuffer(ctx context.Context, path, content string) error {
- return e.createBuffer(ctx, path, true, content)
-}
-
-func (e *Editor) createBuffer(ctx context.Context, path string, dirty bool, content string) error {
- buf := buffer{
- windowsLineEndings: e.Config.WindowsLineEndings,
- version: 1,
- path: path,
- lines: lines(content),
- dirty: dirty,
- }
- e.mu.Lock()
- defer e.mu.Unlock()
- e.buffers[path] = buf
-
- item := protocol.TextDocumentItem{
- URI: e.sandbox.Workdir.URI(buf.path),
- LanguageID: e.languageID(buf.path),
- Version: int32(buf.version),
- Text: buf.text(),
- }
-
- if e.Server != nil {
- if err := e.Server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{
- TextDocument: item,
- }); err != nil {
- return errors.Errorf("DidOpen: %w", err)
- }
- e.callsMu.Lock()
- e.calls.DidOpen++
- e.callsMu.Unlock()
- }
- return nil
-}
-
-var defaultFileAssociations = map[string]*regexp.Regexp{
- "go": regexp.MustCompile(`^.*\.go$`), // '$' is important: don't match .gotmpl!
- "go.mod": regexp.MustCompile(`^go\.mod$`),
- "go.sum": regexp.MustCompile(`^go(\.work)?\.sum$`),
- "go.work": regexp.MustCompile(`^go\.work$`),
- "gotmpl": regexp.MustCompile(`^.*tmpl$`),
-}
-
-func (e *Editor) languageID(p string) string {
- base := path.Base(p)
- for lang, re := range e.Config.FileAssociations {
- re := regexp.MustCompile(re)
- if re.MatchString(base) {
- return lang
- }
- }
- for lang, re := range defaultFileAssociations {
- if re.MatchString(base) {
- return lang
- }
- }
- return ""
-}
-
-// lines returns line-ending agnostic line representation of content.
-func lines(content string) []string {
- lines := strings.Split(content, "\n")
- for i, l := range lines {
- lines[i] = strings.TrimSuffix(l, "\r")
- }
- return lines
-}
-
-// CloseBuffer removes the current buffer (regardless of whether it is saved).
-func (e *Editor) CloseBuffer(ctx context.Context, path string) error {
- e.mu.Lock()
- _, ok := e.buffers[path]
- if !ok {
- e.mu.Unlock()
- return ErrUnknownBuffer
- }
- delete(e.buffers, path)
- e.mu.Unlock()
-
- if e.Server != nil {
- if err := e.Server.DidClose(ctx, &protocol.DidCloseTextDocumentParams{
- TextDocument: e.textDocumentIdentifier(path),
- }); err != nil {
- return errors.Errorf("DidClose: %w", err)
- }
- e.callsMu.Lock()
- e.calls.DidClose++
- e.callsMu.Unlock()
- }
- return nil
-}
-
-func (e *Editor) textDocumentIdentifier(path string) protocol.TextDocumentIdentifier {
- return protocol.TextDocumentIdentifier{
- URI: e.sandbox.Workdir.URI(path),
- }
-}
-
-// SaveBuffer writes the content of the buffer specified by the given path to
-// the filesystem.
-func (e *Editor) SaveBuffer(ctx context.Context, path string) error {
- if err := e.OrganizeImports(ctx, path); err != nil {
- return errors.Errorf("organizing imports before save: %w", err)
- }
- if err := e.FormatBuffer(ctx, path); err != nil {
- return errors.Errorf("formatting before save: %w", err)
- }
- return e.SaveBufferWithoutActions(ctx, path)
-}
-
-func (e *Editor) SaveBufferWithoutActions(ctx context.Context, path string) error {
- e.mu.Lock()
- defer e.mu.Unlock()
- buf, ok := e.buffers[path]
- if !ok {
- return fmt.Errorf(fmt.Sprintf("unknown buffer: %q", path))
- }
- content := buf.text()
- includeText := false
- syncOptions, ok := e.serverCapabilities.TextDocumentSync.(protocol.TextDocumentSyncOptions)
- if ok {
- includeText = syncOptions.Save.IncludeText
- }
-
- docID := e.textDocumentIdentifier(buf.path)
- if e.Server != nil {
- if err := e.Server.WillSave(ctx, &protocol.WillSaveTextDocumentParams{
- TextDocument: docID,
- Reason: protocol.Manual,
- }); err != nil {
- return errors.Errorf("WillSave: %w", err)
- }
- }
- if err := e.sandbox.Workdir.WriteFile(ctx, path, content); err != nil {
- return errors.Errorf("writing %q: %w", path, err)
- }
-
- buf.dirty = false
- e.buffers[path] = buf
-
- if e.Server != nil {
- params := &protocol.DidSaveTextDocumentParams{
- TextDocument: docID,
- }
- if includeText {
- params.Text = &content
- }
- if err := e.Server.DidSave(ctx, params); err != nil {
- return errors.Errorf("DidSave: %w", err)
- }
- e.callsMu.Lock()
- e.calls.DidSave++
- e.callsMu.Unlock()
- }
- return nil
-}
-
-// contentPosition returns the (Line, Column) position corresponding to offset
-// in the buffer referenced by path.
-func contentPosition(content string, offset int) (Pos, error) {
- scanner := bufio.NewScanner(strings.NewReader(content))
- start := 0
- line := 0
- for scanner.Scan() {
- end := start + len([]rune(scanner.Text())) + 1
- if offset < end {
- return Pos{Line: line, Column: offset - start}, nil
- }
- start = end
- line++
- }
- if err := scanner.Err(); err != nil {
- return Pos{}, errors.Errorf("scanning content: %w", err)
- }
- // Scan() will drop the last line if it is empty. Correct for this.
- if (strings.HasSuffix(content, "\n") || content == "") && offset == start {
- return Pos{Line: line, Column: 0}, nil
- }
- return Pos{}, fmt.Errorf("position %d out of bounds in %q (line = %d, start = %d)", offset, content, line, start)
-}
-
-// ErrNoMatch is returned if a regexp search fails.
-var (
- ErrNoMatch = errors.New("no match")
- ErrUnknownBuffer = errors.New("unknown buffer")
-)
-
-// regexpRange returns the start and end of the first occurrence of either re
-// or its singular subgroup. It returns ErrNoMatch if the regexp doesn't match.
-func regexpRange(content, re string) (Pos, Pos, error) {
- content = normalizeEOL(content)
- var start, end int
- rec, err := regexp.Compile(re)
- if err != nil {
- return Pos{}, Pos{}, err
- }
- indexes := rec.FindStringSubmatchIndex(content)
- if indexes == nil {
- return Pos{}, Pos{}, ErrNoMatch
- }
- switch len(indexes) {
- case 2:
- // no subgroups: return the range of the regexp expression
- start, end = indexes[0], indexes[1]
- case 4:
- // one subgroup: return its range
- start, end = indexes[2], indexes[3]
- default:
- return Pos{}, Pos{}, fmt.Errorf("invalid search regexp %q: expect either 0 or 1 subgroups, got %d", re, len(indexes)/2-1)
- }
- startPos, err := contentPosition(content, start)
- if err != nil {
- return Pos{}, Pos{}, err
- }
- endPos, err := contentPosition(content, end)
- if err != nil {
- return Pos{}, Pos{}, err
- }
- return startPos, endPos, nil
-}
-
-func normalizeEOL(content string) string {
- return strings.Join(lines(content), "\n")
-}
-
-// RegexpRange returns the first range in the buffer bufName matching re. See
-// RegexpSearch for more information on matching.
-func (e *Editor) RegexpRange(bufName, re string) (Pos, Pos, error) {
- e.mu.Lock()
- defer e.mu.Unlock()
- buf, ok := e.buffers[bufName]
- if !ok {
- return Pos{}, Pos{}, ErrUnknownBuffer
- }
- return regexpRange(buf.text(), re)
-}
-
-// RegexpSearch returns the position of the first match for re in the buffer
-// bufName. For convenience, RegexpSearch supports the following two modes:
-// 1. If re has no subgroups, return the position of the match for re itself.
-// 2. If re has one subgroup, return the position of the first subgroup.
-// It returns an error re is invalid, has more than one subgroup, or doesn't
-// match the buffer.
-func (e *Editor) RegexpSearch(bufName, re string) (Pos, error) {
- start, _, err := e.RegexpRange(bufName, re)
- return start, err
-}
-
-// RegexpReplace edits the buffer corresponding to path by replacing the first
-// instance of re, or its first subgroup, with the replace text. See
-// RegexpSearch for more explanation of these two modes.
-// It returns an error if re is invalid, has more than one subgroup, or doesn't
-// match the buffer.
-func (e *Editor) RegexpReplace(ctx context.Context, path, re, replace string) error {
- e.mu.Lock()
- defer e.mu.Unlock()
- buf, ok := e.buffers[path]
- if !ok {
- return ErrUnknownBuffer
- }
- content := buf.text()
- start, end, err := regexpRange(content, re)
- if err != nil {
- return err
- }
- return e.editBufferLocked(ctx, path, []Edit{{
- Start: start,
- End: end,
- Text: replace,
- }})
-}
-
-// EditBuffer applies the given test edits to the buffer identified by path.
-func (e *Editor) EditBuffer(ctx context.Context, path string, edits []Edit) error {
- e.mu.Lock()
- defer e.mu.Unlock()
- return e.editBufferLocked(ctx, path, edits)
-}
-
-func (e *Editor) SetBufferContent(ctx context.Context, path, content string) error {
- e.mu.Lock()
- defer e.mu.Unlock()
- lines := lines(content)
- return e.setBufferContentLocked(ctx, path, true, lines, nil)
-}
-
-// HasBuffer reports whether the file name is open in the editor.
-func (e *Editor) HasBuffer(name string) bool {
- e.mu.Lock()
- defer e.mu.Unlock()
- _, ok := e.buffers[name]
- return ok
-}
-
-// BufferText returns the content of the buffer with the given name.
-func (e *Editor) BufferText(name string) string {
- e.mu.Lock()
- defer e.mu.Unlock()
- return e.buffers[name].text()
-}
-
-// BufferVersion returns the current version of the buffer corresponding to
-// name (or 0 if it is not being edited).
-func (e *Editor) BufferVersion(name string) int {
- e.mu.Lock()
- defer e.mu.Unlock()
- return e.buffers[name].version
-}
-
-func (e *Editor) editBufferLocked(ctx context.Context, path string, edits []Edit) error {
- buf, ok := e.buffers[path]
- if !ok {
- return fmt.Errorf("unknown buffer %q", path)
- }
- content := make([]string, len(buf.lines))
- copy(content, buf.lines)
- content, err := editContent(content, edits)
- if err != nil {
- return err
- }
- return e.setBufferContentLocked(ctx, path, true, content, edits)
-}
-
-func (e *Editor) setBufferContentLocked(ctx context.Context, path string, dirty bool, content []string, fromEdits []Edit) error {
- buf, ok := e.buffers[path]
- if !ok {
- return fmt.Errorf("unknown buffer %q", path)
- }
- buf.lines = content
- buf.version++
- buf.dirty = dirty
- e.buffers[path] = buf
- // A simple heuristic: if there is only one edit, send it incrementally.
- // Otherwise, send the entire content.
- var evts []protocol.TextDocumentContentChangeEvent
- if len(fromEdits) == 1 {
- evts = append(evts, fromEdits[0].toProtocolChangeEvent())
- } else {
- evts = append(evts, protocol.TextDocumentContentChangeEvent{
- Text: buf.text(),
- })
- }
- params := &protocol.DidChangeTextDocumentParams{
- TextDocument: protocol.VersionedTextDocumentIdentifier{
- Version: int32(buf.version),
- TextDocumentIdentifier: e.textDocumentIdentifier(buf.path),
- },
- ContentChanges: evts,
- }
- if e.Server != nil {
- if err := e.Server.DidChange(ctx, params); err != nil {
- return errors.Errorf("DidChange: %w", err)
- }
- e.callsMu.Lock()
- e.calls.DidChange++
- e.callsMu.Unlock()
- }
- return nil
-}
-
-// GoToDefinition jumps to the definition of the symbol at the given position
-// in an open buffer. It returns the path and position of the resulting jump.
-func (e *Editor) GoToDefinition(ctx context.Context, path string, pos Pos) (string, Pos, error) {
- if err := e.checkBufferPosition(path, pos); err != nil {
- return "", Pos{}, err
- }
- params := &protocol.DefinitionParams{}
- params.TextDocument.URI = e.sandbox.Workdir.URI(path)
- params.Position = pos.ToProtocolPosition()
-
- resp, err := e.Server.Definition(ctx, params)
- if err != nil {
- return "", Pos{}, errors.Errorf("definition: %w", err)
- }
- return e.extractFirstPathAndPos(ctx, resp)
-}
-
-// GoToTypeDefinition jumps to the type definition of the symbol at the given position
-// in an open buffer.
-func (e *Editor) GoToTypeDefinition(ctx context.Context, path string, pos Pos) (string, Pos, error) {
- if err := e.checkBufferPosition(path, pos); err != nil {
- return "", Pos{}, err
- }
- params := &protocol.TypeDefinitionParams{}
- params.TextDocument.URI = e.sandbox.Workdir.URI(path)
- params.Position = pos.ToProtocolPosition()
-
- resp, err := e.Server.TypeDefinition(ctx, params)
- if err != nil {
- return "", Pos{}, errors.Errorf("type definition: %w", err)
- }
- return e.extractFirstPathAndPos(ctx, resp)
-}
-
-// extractFirstPathAndPos returns the path and the position of the first location.
-// It opens the file if needed.
-func (e *Editor) extractFirstPathAndPos(ctx context.Context, locs []protocol.Location) (string, Pos, error) {
- if len(locs) == 0 {
- return "", Pos{}, nil
- }
-
- newPath := e.sandbox.Workdir.URIToPath(locs[0].URI)
- newPos := fromProtocolPosition(locs[0].Range.Start)
- if !e.HasBuffer(newPath) {
- if err := e.OpenFile(ctx, newPath); err != nil {
- return "", Pos{}, errors.Errorf("OpenFile: %w", err)
- }
- }
- return newPath, newPos, nil
-}
-
-// Symbol performs a workspace symbol search using query
-func (e *Editor) Symbol(ctx context.Context, query string) ([]SymbolInformation, error) {
- params := &protocol.WorkspaceSymbolParams{}
- params.Query = query
-
- resp, err := e.Server.Symbol(ctx, params)
- if err != nil {
- return nil, errors.Errorf("symbol: %w", err)
- }
- var res []SymbolInformation
- for _, si := range resp {
- ploc := si.Location
- path := e.sandbox.Workdir.URIToPath(ploc.URI)
- start := fromProtocolPosition(ploc.Range.Start)
- end := fromProtocolPosition(ploc.Range.End)
- rnge := Range{
- Start: start,
- End: end,
- }
- loc := Location{
- Path: path,
- Range: rnge,
- }
- res = append(res, SymbolInformation{
- Name: si.Name,
- Kind: si.Kind,
- Location: loc,
- })
- }
- return res, nil
-}
-
-// OrganizeImports requests and performs the source.organizeImports codeAction.
-func (e *Editor) OrganizeImports(ctx context.Context, path string) error {
- _, err := e.applyCodeActions(ctx, path, nil, nil, protocol.SourceOrganizeImports)
- return err
-}
-
-// RefactorRewrite requests and performs the source.refactorRewrite codeAction.
-func (e *Editor) RefactorRewrite(ctx context.Context, path string, rng *protocol.Range) error {
- applied, err := e.applyCodeActions(ctx, path, rng, nil, protocol.RefactorRewrite)
- if applied == 0 {
- return errors.Errorf("no refactorings were applied")
- }
- return err
-}
-
-// ApplyQuickFixes requests and performs the quickfix codeAction.
-func (e *Editor) ApplyQuickFixes(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic) error {
- applied, err := e.applyCodeActions(ctx, path, rng, diagnostics, protocol.SourceFixAll, protocol.QuickFix)
- if applied == 0 {
- return errors.Errorf("no quick fixes were applied")
- }
- return err
-}
-
-// ApplyCodeAction applies the given code action.
-func (e *Editor) ApplyCodeAction(ctx context.Context, action protocol.CodeAction) error {
- for _, change := range action.Edit.DocumentChanges {
- path := e.sandbox.Workdir.URIToPath(change.TextDocument.URI)
- if int32(e.buffers[path].version) != change.TextDocument.Version {
- // Skip edits for old versions.
- continue
- }
- edits := convertEdits(change.Edits)
- if err := e.EditBuffer(ctx, path, edits); err != nil {
- return errors.Errorf("editing buffer %q: %w", path, err)
- }
- }
- // Execute any commands. The specification says that commands are
- // executed after edits are applied.
- if action.Command != nil {
- if _, err := e.ExecuteCommand(ctx, &protocol.ExecuteCommandParams{
- Command: action.Command.Command,
- Arguments: action.Command.Arguments,
- }); err != nil {
- return err
- }
- }
- // Some commands may edit files on disk.
- return e.sandbox.Workdir.CheckForFileChanges(ctx)
-}
-
-// GetQuickFixes returns the available quick fix code actions.
-func (e *Editor) GetQuickFixes(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) {
- return e.getCodeActions(ctx, path, rng, diagnostics, protocol.QuickFix, protocol.SourceFixAll)
-}
-
-func (e *Editor) applyCodeActions(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic, only ...protocol.CodeActionKind) (int, error) {
- actions, err := e.getCodeActions(ctx, path, rng, diagnostics, only...)
- if err != nil {
- return 0, err
- }
- applied := 0
- for _, action := range actions {
- if action.Title == "" {
- return 0, errors.Errorf("empty title for code action")
- }
- var match bool
- for _, o := range only {
- if action.Kind == o {
- match = true
- break
- }
- }
- if !match {
- continue
- }
- applied++
- if err := e.ApplyCodeAction(ctx, action); err != nil {
- return 0, err
- }
- }
- return applied, nil
-}
-
-func (e *Editor) getCodeActions(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic, only ...protocol.CodeActionKind) ([]protocol.CodeAction, error) {
- if e.Server == nil {
- return nil, nil
- }
- params := &protocol.CodeActionParams{}
- params.TextDocument.URI = e.sandbox.Workdir.URI(path)
- params.Context.Only = only
- if diagnostics != nil {
- params.Context.Diagnostics = diagnostics
- }
- if rng != nil {
- params.Range = *rng
- }
- return e.Server.CodeAction(ctx, params)
-}
-
-func (e *Editor) ExecuteCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) {
- if e.Server == nil {
- return nil, nil
- }
- var match bool
- // Ensure that this command was actually listed as a supported command.
- for _, command := range e.serverCapabilities.ExecuteCommandProvider.Commands {
- if command == params.Command {
- match = true
- break
- }
- }
- if !match {
- return nil, fmt.Errorf("unsupported command %q", params.Command)
- }
- result, err := e.Server.ExecuteCommand(ctx, params)
- if err != nil {
- return nil, err
- }
- // Some commands use the go command, which writes directly to disk.
- // For convenience, check for those changes.
- if err := e.sandbox.Workdir.CheckForFileChanges(ctx); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func convertEdits(protocolEdits []protocol.TextEdit) []Edit {
- var edits []Edit
- for _, lspEdit := range protocolEdits {
- edits = append(edits, fromProtocolTextEdit(lspEdit))
- }
- return edits
-}
-
-// FormatBuffer gofmts a Go file.
-func (e *Editor) FormatBuffer(ctx context.Context, path string) error {
- if e.Server == nil {
- return nil
- }
- e.mu.Lock()
- version := e.buffers[path].version
- e.mu.Unlock()
- params := &protocol.DocumentFormattingParams{}
- params.TextDocument.URI = e.sandbox.Workdir.URI(path)
- resp, err := e.Server.Formatting(ctx, params)
- if err != nil {
- return errors.Errorf("textDocument/formatting: %w", err)
- }
- e.mu.Lock()
- defer e.mu.Unlock()
- if versionAfter := e.buffers[path].version; versionAfter != version {
- return fmt.Errorf("before receipt of formatting edits, buffer version changed from %d to %d", version, versionAfter)
- }
- edits := convertEdits(resp)
- if len(edits) == 0 {
- return nil
- }
- return e.editBufferLocked(ctx, path, edits)
-}
-
-func (e *Editor) checkBufferPosition(path string, pos Pos) error {
- e.mu.Lock()
- defer e.mu.Unlock()
- buf, ok := e.buffers[path]
- if !ok {
- return fmt.Errorf("buffer %q is not open", path)
- }
- if !inText(pos, buf.lines) {
- return fmt.Errorf("position %v is invalid in buffer %q", pos, path)
- }
- return nil
-}
-
-// RunGenerate runs `go generate` non-recursively in the workdir-relative dir
-// path. It does not report any resulting file changes as a watched file
-// change, so must be followed by a call to Workdir.CheckForFileChanges once
-// the generate command has completed.
-// TODO(rFindley): this shouldn't be necessary anymore. Delete it.
-func (e *Editor) RunGenerate(ctx context.Context, dir string) error {
- if e.Server == nil {
- return nil
- }
- absDir := e.sandbox.Workdir.AbsPath(dir)
- cmd, err := command.NewGenerateCommand("", command.GenerateArgs{
- Dir: protocol.URIFromSpanURI(span.URIFromPath(absDir)),
- Recursive: false,
- })
- if err != nil {
- return err
- }
- params := &protocol.ExecuteCommandParams{
- Command: cmd.Command,
- Arguments: cmd.Arguments,
- }
- if _, err := e.ExecuteCommand(ctx, params); err != nil {
- return fmt.Errorf("running generate: %v", err)
- }
- // Unfortunately we can't simply poll the workdir for file changes here,
- // because server-side command may not have completed. In regtests, we can
- // Await this state change, but here we must delegate that responsibility to
- // the caller.
- return nil
-}
-
-// CodeLens executes a codelens request on the server.
-func (e *Editor) CodeLens(ctx context.Context, path string) ([]protocol.CodeLens, error) {
- if e.Server == nil {
- return nil, nil
- }
- e.mu.Lock()
- _, ok := e.buffers[path]
- e.mu.Unlock()
- if !ok {
- return nil, fmt.Errorf("buffer %q is not open", path)
- }
- params := &protocol.CodeLensParams{
- TextDocument: e.textDocumentIdentifier(path),
- }
- lens, err := e.Server.CodeLens(ctx, params)
- if err != nil {
- return nil, err
- }
- return lens, nil
-}
-
-// Completion executes a completion request on the server.
-func (e *Editor) Completion(ctx context.Context, path string, pos Pos) (*protocol.CompletionList, error) {
- if e.Server == nil {
- return nil, nil
- }
- e.mu.Lock()
- _, ok := e.buffers[path]
- e.mu.Unlock()
- if !ok {
- return nil, fmt.Errorf("buffer %q is not open", path)
- }
- params := &protocol.CompletionParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: e.textDocumentIdentifier(path),
- Position: pos.ToProtocolPosition(),
- },
- }
- completions, err := e.Server.Completion(ctx, params)
- if err != nil {
- return nil, err
- }
- return completions, nil
-}
-
-// AcceptCompletion accepts a completion for the given item at the given
-// position.
-func (e *Editor) AcceptCompletion(ctx context.Context, path string, pos Pos, item protocol.CompletionItem) error {
- if e.Server == nil {
- return nil
- }
- e.mu.Lock()
- defer e.mu.Unlock()
- _, ok := e.buffers[path]
- if !ok {
- return fmt.Errorf("buffer %q is not open", path)
- }
- return e.editBufferLocked(ctx, path, convertEdits(append([]protocol.TextEdit{
- *item.TextEdit,
- }, item.AdditionalTextEdits...)))
-}
-
-// Symbols executes a workspace/symbols request on the server.
-func (e *Editor) Symbols(ctx context.Context, sym string) ([]protocol.SymbolInformation, error) {
- if e.Server == nil {
- return nil, nil
- }
- params := &protocol.WorkspaceSymbolParams{Query: sym}
- ans, err := e.Server.Symbol(ctx, params)
- return ans, err
-}
-
-// References executes a reference request on the server.
-func (e *Editor) References(ctx context.Context, path string, pos Pos) ([]protocol.Location, error) {
- if e.Server == nil {
- return nil, nil
- }
- e.mu.Lock()
- _, ok := e.buffers[path]
- e.mu.Unlock()
- if !ok {
- return nil, fmt.Errorf("buffer %q is not open", path)
- }
- params := &protocol.ReferenceParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: e.textDocumentIdentifier(path),
- Position: pos.ToProtocolPosition(),
- },
- Context: protocol.ReferenceContext{
- IncludeDeclaration: true,
- },
- }
- locations, err := e.Server.References(ctx, params)
- if err != nil {
- return nil, err
- }
- return locations, nil
-}
-
-func (e *Editor) Rename(ctx context.Context, path string, pos Pos, newName string) error {
- if e.Server == nil {
- return nil
- }
- params := &protocol.RenameParams{
- TextDocument: e.textDocumentIdentifier(path),
- Position: pos.ToProtocolPosition(),
- NewName: newName,
- }
- wsEdits, err := e.Server.Rename(ctx, params)
- if err != nil {
- return err
- }
- for _, change := range wsEdits.DocumentChanges {
- if err := e.applyProtocolEdit(ctx, change); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (e *Editor) applyProtocolEdit(ctx context.Context, change protocol.TextDocumentEdit) error {
- path := e.sandbox.Workdir.URIToPath(change.TextDocument.URI)
- if ver := int32(e.BufferVersion(path)); ver != change.TextDocument.Version {
- return fmt.Errorf("buffer versions for %q do not match: have %d, editing %d", path, ver, change.TextDocument.Version)
- }
- if !e.HasBuffer(path) {
- err := e.OpenFile(ctx, path)
- if os.IsNotExist(err) {
- // TODO: it's unclear if this is correct. Here we create the buffer (with
- // version 1), then apply edits. Perhaps we should apply the edits before
- // sending the didOpen notification.
- e.CreateBuffer(ctx, path, "")
- err = nil
- }
- if err != nil {
- return err
- }
- }
- fakeEdits := convertEdits(change.Edits)
- return e.EditBuffer(ctx, path, fakeEdits)
-}
-
-// CodeAction executes a codeAction request on the server.
-func (e *Editor) CodeAction(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) {
- if e.Server == nil {
- return nil, nil
- }
- e.mu.Lock()
- _, ok := e.buffers[path]
- e.mu.Unlock()
- if !ok {
- return nil, fmt.Errorf("buffer %q is not open", path)
- }
- params := &protocol.CodeActionParams{
- TextDocument: e.textDocumentIdentifier(path),
- Context: protocol.CodeActionContext{
- Diagnostics: diagnostics,
- },
- }
- if rng != nil {
- params.Range = *rng
- }
- lens, err := e.Server.CodeAction(ctx, params)
- if err != nil {
- return nil, err
- }
- return lens, nil
-}
-
-// Hover triggers a hover at the given position in an open buffer.
-func (e *Editor) Hover(ctx context.Context, path string, pos Pos) (*protocol.MarkupContent, Pos, error) {
- if err := e.checkBufferPosition(path, pos); err != nil {
- return nil, Pos{}, err
- }
- params := &protocol.HoverParams{}
- params.TextDocument.URI = e.sandbox.Workdir.URI(path)
- params.Position = pos.ToProtocolPosition()
-
- resp, err := e.Server.Hover(ctx, params)
- if err != nil {
- return nil, Pos{}, errors.Errorf("hover: %w", err)
- }
- if resp == nil {
- return nil, Pos{}, nil
- }
- return &resp.Contents, fromProtocolPosition(resp.Range.Start), nil
-}
-
-func (e *Editor) DocumentLink(ctx context.Context, path string) ([]protocol.DocumentLink, error) {
- if e.Server == nil {
- return nil, nil
- }
- params := &protocol.DocumentLinkParams{}
- params.TextDocument.URI = e.sandbox.Workdir.URI(path)
- return e.Server.DocumentLink(ctx, params)
-}
-
-func (e *Editor) DocumentHighlight(ctx context.Context, path string, pos Pos) ([]protocol.DocumentHighlight, error) {
- if e.Server == nil {
- return nil, nil
- }
- if err := e.checkBufferPosition(path, pos); err != nil {
- return nil, err
- }
- params := &protocol.DocumentHighlightParams{}
- params.TextDocument.URI = e.sandbox.Workdir.URI(path)
- params.Position = pos.ToProtocolPosition()
-
- return e.Server.DocumentHighlight(ctx, params)
-}
diff --git a/internal/lsp/fake/editor_test.go b/internal/lsp/fake/editor_test.go
deleted file mode 100644
index 3ce5df6e0..000000000
--- a/internal/lsp/fake/editor_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "context"
- "testing"
-)
-
-func TestContentPosition(t *testing.T) {
- content := "foo\n😀\nbar"
- tests := []struct {
- offset, wantLine, wantColumn int
- }{
- {0, 0, 0},
- {3, 0, 3},
- {4, 1, 0},
- {5, 1, 1},
- {6, 2, 0},
- }
- for _, test := range tests {
- pos, err := contentPosition(content, test.offset)
- if err != nil {
- t.Fatal(err)
- }
- if pos.Line != test.wantLine {
- t.Errorf("contentPosition(%q, %d): Line = %d, want %d", content, test.offset, pos.Line, test.wantLine)
- }
- if pos.Column != test.wantColumn {
- t.Errorf("contentPosition(%q, %d): Column = %d, want %d", content, test.offset, pos.Column, test.wantColumn)
- }
- }
-}
-
-const exampleProgram = `
--- go.mod --
-go 1.12
--- main.go --
-package main
-
-import "fmt"
-
-func main() {
- fmt.Println("Hello World.")
-}
-`
-
-func TestClientEditing(t *testing.T) {
- ws, err := NewSandbox(&SandboxConfig{Files: UnpackTxt(exampleProgram)})
- if err != nil {
- t.Fatal(err)
- }
- defer ws.Close()
- ctx := context.Background()
- editor := NewEditor(ws, EditorConfig{})
- if err := editor.OpenFile(ctx, "main.go"); err != nil {
- t.Fatal(err)
- }
- if err := editor.EditBuffer(ctx, "main.go", []Edit{
- {
- Start: Pos{5, 14},
- End: Pos{5, 26},
- Text: "Hola, mundo.",
- },
- }); err != nil {
- t.Fatal(err)
- }
- got := editor.buffers["main.go"].text()
- want := `package main
-
-import "fmt"
-
-func main() {
- fmt.Println("Hola, mundo.")
-}
-`
- if got != want {
- t.Errorf("got text %q, want %q", got, want)
- }
-}
diff --git a/internal/lsp/fake/sandbox.go b/internal/lsp/fake/sandbox.go
deleted file mode 100644
index f628f2d54..000000000
--- a/internal/lsp/fake/sandbox.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/txtar"
- errors "golang.org/x/xerrors"
-)
-
-// Sandbox holds a collection of temporary resources to use for working with Go
-// code in tests.
-type Sandbox struct {
- gopath string
- rootdir string
- goproxy string
- Workdir *Workdir
-}
-
-// SandboxConfig controls the behavior of a test sandbox. The zero value
-// defines a reasonable default.
-type SandboxConfig struct {
- // RootDir sets the base directory to use when creating temporary
- // directories. If not specified, defaults to a new temporary directory.
- RootDir string
- // Files holds a txtar-encoded archive of files to populate the initial state
- // of the working directory.
- //
- // For convenience, the special substring "$SANDBOX_WORKDIR" is replaced with
- // the sandbox's resolved working directory before writing files.
- Files map[string][]byte
- // InGoPath specifies that the working directory should be within the
- // temporary GOPATH.
- InGoPath bool
- // Workdir configures the working directory of the Sandbox. It behaves as
- // follows:
- // - if set to an absolute path, use that path as the working directory.
- // - if set to a relative path, create and use that path relative to the
- // sandbox.
- // - if unset, default to a the 'work' subdirectory of the sandbox.
- //
- // This option is incompatible with InGoPath or Files.
- Workdir string
- // ProxyFiles holds a txtar-encoded archive of files to populate a file-based
- // Go proxy.
- ProxyFiles map[string][]byte
- // GOPROXY is the explicit GOPROXY value that should be used for the sandbox.
- //
- // This option is incompatible with ProxyFiles.
- GOPROXY string
-}
-
-// NewSandbox creates a collection of named temporary resources, with a
-// working directory populated by the txtar-encoded content in srctxt, and a
-// file-based module proxy populated with the txtar-encoded content in
-// proxytxt.
-//
-// If rootDir is non-empty, it will be used as the root of temporary
-// directories created for the sandbox. Otherwise, a new temporary directory
-// will be used as root.
-func NewSandbox(config *SandboxConfig) (_ *Sandbox, err error) {
- if config == nil {
- config = new(SandboxConfig)
- }
- if err := validateConfig(*config); err != nil {
- return nil, fmt.Errorf("invalid SandboxConfig: %v", err)
- }
-
- sb := &Sandbox{}
- defer func() {
- // Clean up if we fail at any point in this constructor.
- if err != nil {
- sb.Close()
- }
- }()
-
- rootDir := config.RootDir
- if rootDir == "" {
- rootDir, err = ioutil.TempDir(config.RootDir, "gopls-sandbox-")
- if err != nil {
- return nil, fmt.Errorf("creating temporary workdir: %v", err)
- }
- }
- sb.rootdir = rootDir
- sb.gopath = filepath.Join(sb.rootdir, "gopath")
- if err := os.Mkdir(sb.gopath, 0755); err != nil {
- return nil, err
- }
- if config.GOPROXY != "" {
- sb.goproxy = config.GOPROXY
- } else {
- proxydir := filepath.Join(sb.rootdir, "proxy")
- if err := os.Mkdir(proxydir, 0755); err != nil {
- return nil, err
- }
- sb.goproxy, err = WriteProxy(proxydir, config.ProxyFiles)
- if err != nil {
- return nil, err
- }
- }
- // Short-circuit writing the workdir if we're given an absolute path, since
- // this is used for running in an existing directory.
- // TODO(findleyr): refactor this to be less of a workaround.
- if filepath.IsAbs(config.Workdir) {
- sb.Workdir = NewWorkdir(config.Workdir)
- return sb, nil
- }
- var workdir string
- if config.Workdir == "" {
- if config.InGoPath {
- // Set the working directory as $GOPATH/src.
- workdir = filepath.Join(sb.gopath, "src")
- } else if workdir == "" {
- workdir = filepath.Join(sb.rootdir, "work")
- }
- } else {
- // relative path
- workdir = filepath.Join(sb.rootdir, config.Workdir)
- }
- if err := os.MkdirAll(workdir, 0755); err != nil {
- return nil, err
- }
- sb.Workdir = NewWorkdir(workdir)
- if err := sb.Workdir.writeInitialFiles(config.Files); err != nil {
- return nil, err
- }
- return sb, nil
-}
-
-// Tempdir creates a new temp directory with the given txtar-encoded files. It
-// is the responsibility of the caller to call os.RemoveAll on the returned
-// file path when it is no longer needed.
-func Tempdir(files map[string][]byte) (string, error) {
- dir, err := ioutil.TempDir("", "gopls-tempdir-")
- if err != nil {
- return "", err
- }
- for name, data := range files {
- if err := WriteFileData(name, data, RelativeTo(dir)); err != nil {
- return "", errors.Errorf("writing to tempdir: %w", err)
- }
- }
- return dir, nil
-}
-
-func UnpackTxt(txt string) map[string][]byte {
- dataMap := make(map[string][]byte)
- archive := txtar.Parse([]byte(txt))
- for _, f := range archive.Files {
- dataMap[f.Name] = f.Data
- }
- return dataMap
-}
-
-func validateConfig(config SandboxConfig) error {
- if filepath.IsAbs(config.Workdir) && (len(config.Files) > 0 || config.InGoPath) {
- return errors.New("absolute Workdir cannot be set in conjunction with Files or InGoPath")
- }
- if config.Workdir != "" && config.InGoPath {
- return errors.New("Workdir cannot be set in conjunction with InGoPath")
- }
- if config.GOPROXY != "" && config.ProxyFiles != nil {
- return errors.New("GOPROXY cannot be set in conjunction with ProxyFiles")
- }
- return nil
-}
-
-// splitModuleVersionPath extracts module information from files stored in the
-// directory structure modulePath@version/suffix.
-// For example:
-// splitModuleVersionPath("mod.com@v1.2.3/package") = ("mod.com", "v1.2.3", "package")
-func splitModuleVersionPath(path string) (modulePath, version, suffix string) {
- parts := strings.Split(path, "/")
- var modulePathParts []string
- for i, p := range parts {
- if strings.Contains(p, "@") {
- mv := strings.SplitN(p, "@", 2)
- modulePathParts = append(modulePathParts, mv[0])
- return strings.Join(modulePathParts, "/"), mv[1], strings.Join(parts[i+1:], "/")
- }
- modulePathParts = append(modulePathParts, p)
- }
- // Default behavior: this is just a module path.
- return path, "", ""
-}
-
-func (sb *Sandbox) RootDir() string {
- return sb.rootdir
-}
-
-// GOPATH returns the value of the Sandbox GOPATH.
-func (sb *Sandbox) GOPATH() string {
- return sb.gopath
-}
-
-// GoEnv returns the default environment variables that can be used for
-// invoking Go commands in the sandbox.
-func (sb *Sandbox) GoEnv() map[string]string {
- vars := map[string]string{
- "GOPATH": sb.GOPATH(),
- "GOPROXY": sb.goproxy,
- "GO111MODULE": "",
- "GOSUMDB": "off",
- "GOPACKAGESDRIVER": "off",
- }
- if testenv.Go1Point() >= 5 {
- vars["GOMODCACHE"] = ""
- }
- return vars
-}
-
-// RunGoCommand executes a go command in the sandbox. If checkForFileChanges is
-// true, the sandbox scans the working directory and emits file change events
-// for any file changes it finds.
-func (sb *Sandbox) RunGoCommand(ctx context.Context, dir, verb string, args []string, checkForFileChanges bool) error {
- var vars []string
- for k, v := range sb.GoEnv() {
- vars = append(vars, fmt.Sprintf("%s=%s", k, v))
- }
- inv := gocommand.Invocation{
- Verb: verb,
- Args: args,
- Env: vars,
- }
- // Use the provided directory for the working directory, if available.
- // sb.Workdir may be nil if we exited the constructor with errors (we call
- // Close to clean up any partial state from the constructor, which calls
- // RunGoCommand).
- if dir != "" {
- inv.WorkingDir = sb.Workdir.AbsPath(dir)
- } else if sb.Workdir != nil {
- inv.WorkingDir = string(sb.Workdir.RelativeTo)
- }
- gocmdRunner := &gocommand.Runner{}
- stdout, stderr, _, err := gocmdRunner.RunRaw(ctx, inv)
- if err != nil {
- return errors.Errorf("go command failed (stdout: %s) (stderr: %s): %v", stdout.String(), stderr.String(), err)
- }
- // Since running a go command may result in changes to workspace files,
- // check if we need to send any any "watched" file events.
- //
- // TODO(rFindley): this side-effect can impact the usability of the sandbox
- // for benchmarks. Consider refactoring.
- if sb.Workdir != nil && checkForFileChanges {
- if err := sb.Workdir.CheckForFileChanges(ctx); err != nil {
- return errors.Errorf("checking for file changes: %w", err)
- }
- }
- return nil
-}
-
-// Close removes all state associated with the sandbox.
-func (sb *Sandbox) Close() error {
- var goCleanErr error
- if sb.gopath != "" {
- goCleanErr = sb.RunGoCommand(context.Background(), "", "clean", []string{"-modcache"}, false)
- }
- err := os.RemoveAll(sb.rootdir)
- if err != nil || goCleanErr != nil {
- return fmt.Errorf("error(s) cleaning sandbox: cleaning modcache: %v; removing files: %v", goCleanErr, err)
- }
- return nil
-}
diff --git a/internal/lsp/fake/workdir.go b/internal/lsp/fake/workdir.go
deleted file mode 100644
index 0be1d8fdf..000000000
--- a/internal/lsp/fake/workdir.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "bytes"
- "context"
- "crypto/sha256"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// FileEvent wraps the protocol.FileEvent so that it can be associated with a
-// workdir-relative path.
-type FileEvent struct {
- Path, Content string
- ProtocolEvent protocol.FileEvent
-}
-
-// RelativeTo is a helper for operations relative to a given directory.
-type RelativeTo string
-
-// AbsPath returns an absolute filesystem path for the workdir-relative path.
-func (r RelativeTo) AbsPath(path string) string {
- fp := filepath.FromSlash(path)
- if filepath.IsAbs(fp) {
- return fp
- }
- return filepath.Join(string(r), filepath.FromSlash(path))
-}
-
-// RelPath returns a '/'-encoded path relative to the working directory (or an
-// absolute path if the file is outside of workdir)
-func (r RelativeTo) RelPath(fp string) string {
- root := string(r)
- if rel, err := filepath.Rel(root, fp); err == nil && !strings.HasPrefix(rel, "..") {
- return filepath.ToSlash(rel)
- }
- return filepath.ToSlash(fp)
-}
-
-// WriteFileData writes content to the relative path, replacing the special
-// token $SANDBOX_WORKDIR with the relative root given by rel.
-func WriteFileData(path string, content []byte, rel RelativeTo) error {
- content = bytes.ReplaceAll(content, []byte("$SANDBOX_WORKDIR"), []byte(rel))
- fp := rel.AbsPath(path)
- if err := os.MkdirAll(filepath.Dir(fp), 0755); err != nil {
- return errors.Errorf("creating nested directory: %w", err)
- }
- backoff := 1 * time.Millisecond
- for {
- err := ioutil.WriteFile(fp, []byte(content), 0644)
- if err != nil {
- if isWindowsErrLockViolation(err) {
- time.Sleep(backoff)
- backoff *= 2
- continue
- }
- return errors.Errorf("writing %q: %w", path, err)
- }
- return nil
- }
-}
-
-// isWindowsErrLockViolation reports whether err is ERROR_LOCK_VIOLATION
-// on Windows.
-var isWindowsErrLockViolation = func(err error) bool { return false }
-
-// Workdir is a temporary working directory for tests. It exposes file
-// operations in terms of relative paths, and fakes file watching by triggering
-// events on file operations.
-type Workdir struct {
- RelativeTo
-
- watcherMu sync.Mutex
- watchers []func(context.Context, []FileEvent)
-
- fileMu sync.Mutex
- files map[string]string
-}
-
-// NewWorkdir writes the txtar-encoded file data in txt to dir, and returns a
-// Workir for operating on these files using
-func NewWorkdir(dir string) *Workdir {
- return &Workdir{RelativeTo: RelativeTo(dir)}
-}
-
-func hashFile(data []byte) string {
- return fmt.Sprintf("%x", sha256.Sum256(data))
-}
-
-func (w *Workdir) writeInitialFiles(files map[string][]byte) error {
- w.files = map[string]string{}
- for name, data := range files {
- w.files[name] = hashFile(data)
- if err := WriteFileData(name, data, w.RelativeTo); err != nil {
- return errors.Errorf("writing to workdir: %w", err)
- }
- }
- return nil
-}
-
-// RootURI returns the root URI for this working directory of this scratch
-// environment.
-func (w *Workdir) RootURI() protocol.DocumentURI {
- return toURI(string(w.RelativeTo))
-}
-
-// AddWatcher registers the given func to be called on any file change.
-func (w *Workdir) AddWatcher(watcher func(context.Context, []FileEvent)) {
- w.watcherMu.Lock()
- w.watchers = append(w.watchers, watcher)
- w.watcherMu.Unlock()
-}
-
-// URI returns the URI to a the workdir-relative path.
-func (w *Workdir) URI(path string) protocol.DocumentURI {
- return toURI(w.AbsPath(path))
-}
-
-// URIToPath converts a uri to a workdir-relative path (or an absolute path,
-// if the uri is outside of the workdir).
-func (w *Workdir) URIToPath(uri protocol.DocumentURI) string {
- fp := uri.SpanURI().Filename()
- return w.RelPath(fp)
-}
-
-func toURI(fp string) protocol.DocumentURI {
- return protocol.DocumentURI(span.URIFromPath(fp))
-}
-
-// ReadFile reads a text file specified by a workdir-relative path.
-func (w *Workdir) ReadFile(path string) (string, error) {
- backoff := 1 * time.Millisecond
- for {
- b, err := ioutil.ReadFile(w.AbsPath(path))
- if err != nil {
- if runtime.GOOS == "plan9" && strings.HasSuffix(err.Error(), " exclusive use file already open") {
- // Plan 9 enforces exclusive access to locked files.
- // Give the owner time to unlock it and retry.
- time.Sleep(backoff)
- backoff *= 2
- continue
- }
- return "", err
- }
- return string(b), nil
- }
-}
-
-func (w *Workdir) RegexpRange(path, re string) (Pos, Pos, error) {
- content, err := w.ReadFile(path)
- if err != nil {
- return Pos{}, Pos{}, err
- }
- return regexpRange(content, re)
-}
-
-// RegexpSearch searches the file corresponding to path for the first position
-// matching re.
-func (w *Workdir) RegexpSearch(path string, re string) (Pos, error) {
- content, err := w.ReadFile(path)
- if err != nil {
- return Pos{}, err
- }
- start, _, err := regexpRange(content, re)
- return start, err
-}
-
-// ChangeFilesOnDisk executes the given on-disk file changes in a batch,
-// simulating the action of changing branches outside of an editor.
-func (w *Workdir) ChangeFilesOnDisk(ctx context.Context, events []FileEvent) error {
- for _, e := range events {
- switch e.ProtocolEvent.Type {
- case protocol.Deleted:
- fp := w.AbsPath(e.Path)
- if err := os.Remove(fp); err != nil {
- return errors.Errorf("removing %q: %w", e.Path, err)
- }
- case protocol.Changed, protocol.Created:
- if _, err := w.writeFile(ctx, e.Path, e.Content); err != nil {
- return err
- }
- }
- }
- w.sendEvents(ctx, events)
- return nil
-}
-
-// RemoveFile removes a workdir-relative file path.
-func (w *Workdir) RemoveFile(ctx context.Context, path string) error {
- fp := w.AbsPath(path)
- if err := os.RemoveAll(fp); err != nil {
- return errors.Errorf("removing %q: %w", path, err)
- }
- w.fileMu.Lock()
- defer w.fileMu.Unlock()
-
- evts := []FileEvent{{
- Path: path,
- ProtocolEvent: protocol.FileEvent{
- URI: w.URI(path),
- Type: protocol.Deleted,
- },
- }}
- w.sendEvents(ctx, evts)
- delete(w.files, path)
- return nil
-}
-
-func (w *Workdir) sendEvents(ctx context.Context, evts []FileEvent) {
- if len(evts) == 0 {
- return
- }
- w.watcherMu.Lock()
- watchers := make([]func(context.Context, []FileEvent), len(w.watchers))
- copy(watchers, w.watchers)
- w.watcherMu.Unlock()
- for _, w := range watchers {
- w(ctx, evts)
- }
-}
-
-// WriteFiles writes the text file content to workdir-relative paths.
-// It batches notifications rather than sending them consecutively.
-func (w *Workdir) WriteFiles(ctx context.Context, files map[string]string) error {
- var evts []FileEvent
- for filename, content := range files {
- evt, err := w.writeFile(ctx, filename, content)
- if err != nil {
- return err
- }
- evts = append(evts, evt)
- }
- w.sendEvents(ctx, evts)
- return nil
-}
-
-// WriteFile writes text file content to a workdir-relative path.
-func (w *Workdir) WriteFile(ctx context.Context, path, content string) error {
- evt, err := w.writeFile(ctx, path, content)
- if err != nil {
- return err
- }
- w.sendEvents(ctx, []FileEvent{evt})
- return nil
-}
-
-func (w *Workdir) writeFile(ctx context.Context, path, content string) (FileEvent, error) {
- fp := w.AbsPath(path)
- _, err := os.Stat(fp)
- if err != nil && !os.IsNotExist(err) {
- return FileEvent{}, errors.Errorf("checking if %q exists: %w", path, err)
- }
- var changeType protocol.FileChangeType
- if os.IsNotExist(err) {
- changeType = protocol.Created
- } else {
- changeType = protocol.Changed
- }
- if err := WriteFileData(path, []byte(content), w.RelativeTo); err != nil {
- return FileEvent{}, err
- }
- return FileEvent{
- Path: path,
- ProtocolEvent: protocol.FileEvent{
- URI: w.URI(path),
- Type: changeType,
- },
- }, nil
-}
-
-// listFiles lists files in the given directory, returning a map of relative
-// path to modification time.
-func (w *Workdir) listFiles(dir string) (map[string]string, error) {
- files := make(map[string]string)
- absDir := w.AbsPath(dir)
- if err := filepath.Walk(absDir, func(fp string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if info.IsDir() {
- return nil
- }
- path := w.RelPath(fp)
- data, err := ioutil.ReadFile(fp)
- if err != nil {
- return err
- }
- files[path] = hashFile(data)
- return nil
- }); err != nil {
- return nil, err
- }
- return files, nil
-}
-
-// CheckForFileChanges walks the working directory and checks for any files
-// that have changed since the last poll.
-func (w *Workdir) CheckForFileChanges(ctx context.Context) error {
- evts, err := w.pollFiles()
- if err != nil {
- return err
- }
- w.sendEvents(ctx, evts)
- return nil
-}
-
-// pollFiles updates w.files and calculates FileEvents corresponding to file
-// state changes since the last poll. It does not call sendEvents.
-func (w *Workdir) pollFiles() ([]FileEvent, error) {
- w.fileMu.Lock()
- defer w.fileMu.Unlock()
-
- files, err := w.listFiles(".")
- if err != nil {
- return nil, err
- }
- var evts []FileEvent
- // Check which files have been added or modified.
- for path, hash := range files {
- oldhash, ok := w.files[path]
- delete(w.files, path)
- var typ protocol.FileChangeType
- switch {
- case !ok:
- typ = protocol.Created
- case oldhash != hash:
- typ = protocol.Changed
- default:
- continue
- }
- evts = append(evts, FileEvent{
- Path: path,
- ProtocolEvent: protocol.FileEvent{
- URI: w.URI(path),
- Type: typ,
- },
- })
- }
- // Any remaining files must have been deleted.
- for path := range w.files {
- evts = append(evts, FileEvent{
- Path: path,
- ProtocolEvent: protocol.FileEvent{
- URI: w.URI(path),
- Type: protocol.Deleted,
- },
- })
- }
- w.files = files
- return evts, nil
-}
diff --git a/internal/lsp/fake/workdir_test.go b/internal/lsp/fake/workdir_test.go
deleted file mode 100644
index 33fbb9fa1..000000000
--- a/internal/lsp/fake/workdir_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "context"
- "io/ioutil"
- "os"
- "sort"
- "testing"
- "time"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-const data = `
--- go.mod --
-go 1.12
--- nested/README.md --
-Hello World!
-`
-
-func newWorkdir(t *testing.T) (*Workdir, <-chan []FileEvent, func()) {
- t.Helper()
-
- tmpdir, err := ioutil.TempDir("", "goplstest-workdir-")
- if err != nil {
- t.Fatal(err)
- }
- wd := NewWorkdir(tmpdir)
- if err := wd.writeInitialFiles(UnpackTxt(data)); err != nil {
- t.Fatal(err)
- }
- cleanup := func() {
- if err := os.RemoveAll(tmpdir); err != nil {
- t.Error(err)
- }
- }
-
- fileEvents := make(chan []FileEvent)
- watch := func(_ context.Context, events []FileEvent) {
- go func() {
- fileEvents <- events
- }()
- }
- wd.AddWatcher(watch)
- return wd, fileEvents, cleanup
-}
-
-func TestWorkdir_ReadFile(t *testing.T) {
- wd, _, cleanup := newWorkdir(t)
- defer cleanup()
-
- got, err := wd.ReadFile("nested/README.md")
- if err != nil {
- t.Fatal(err)
- }
- want := "Hello World!\n"
- if got != want {
- t.Errorf("reading workdir file, got %q, want %q", got, want)
- }
-}
-
-func TestWorkdir_WriteFile(t *testing.T) {
- wd, events, cleanup := newWorkdir(t)
- defer cleanup()
- ctx := context.Background()
-
- tests := []struct {
- path string
- wantType protocol.FileChangeType
- }{
- {"data.txt", protocol.Created},
- {"nested/README.md", protocol.Changed},
- }
-
- for _, test := range tests {
- if err := wd.WriteFile(ctx, test.path, "42"); err != nil {
- t.Fatal(err)
- }
- es := <-events
- if got := len(es); got != 1 {
- t.Fatalf("len(events) = %d, want 1", got)
- }
- if es[0].Path != test.path {
- t.Errorf("event.Path = %q, want %q", es[0].Path, test.path)
- }
- if es[0].ProtocolEvent.Type != test.wantType {
- t.Errorf("event type = %v, want %v", es[0].ProtocolEvent.Type, test.wantType)
- }
- got, err := wd.ReadFile(test.path)
- if err != nil {
- t.Fatal(err)
- }
- want := "42"
- if got != want {
- t.Errorf("ws.ReadFile(%q) = %q, want %q", test.path, got, want)
- }
- }
-}
-
-func TestWorkdir_ListFiles(t *testing.T) {
- wd, _, cleanup := newWorkdir(t)
- defer cleanup()
-
- checkFiles := func(dir string, want []string) {
- files, err := wd.listFiles(dir)
- if err != nil {
- t.Fatal(err)
- }
- sort.Strings(want)
- var got []string
- for p := range files {
- got = append(got, p)
- }
- sort.Strings(got)
- if len(got) != len(want) {
- t.Fatalf("ListFiles(): len = %d, want %d; got=%v; want=%v", len(got), len(want), got, want)
- }
- for i, f := range got {
- if f != want[i] {
- t.Errorf("ListFiles()[%d] = %s, want %s", i, f, want[i])
- }
- }
- }
-
- checkFiles(".", []string{"go.mod", "nested/README.md"})
- checkFiles("nested", []string{"nested/README.md"})
-}
-
-func TestWorkdir_CheckForFileChanges(t *testing.T) {
- t.Skip("broken on darwin-amd64-10_12")
- wd, events, cleanup := newWorkdir(t)
- defer cleanup()
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- checkChange := func(path string, typ protocol.FileChangeType) {
- if err := wd.CheckForFileChanges(ctx); err != nil {
- t.Fatal(err)
- }
- var gotEvt FileEvent
- select {
- case <-ctx.Done():
- t.Fatal(ctx.Err())
- case ev := <-events:
- gotEvt = ev[0]
- }
- // Only check relative path and Type
- if gotEvt.Path != path || gotEvt.ProtocolEvent.Type != typ {
- t.Errorf("file events: got %v, want {Path: %s, Type: %v}", gotEvt, path, typ)
- }
- }
- // Sleep some positive amount of time to ensure a distinct mtime.
- time.Sleep(100 * time.Millisecond)
- if err := WriteFileData("go.mod", []byte("module foo.test\n"), wd.RelativeTo); err != nil {
- t.Fatal(err)
- }
- checkChange("go.mod", protocol.Changed)
- if err := WriteFileData("newFile", []byte("something"), wd.RelativeTo); err != nil {
- t.Fatal(err)
- }
- checkChange("newFile", protocol.Created)
- fp := wd.AbsPath("newFile")
- if err := os.Remove(fp); err != nil {
- t.Fatal(err)
- }
- checkChange("newFile", protocol.Deleted)
-}
-
-func TestSplitModuleVersionPath(t *testing.T) {
- tests := []struct {
- path string
- wantModule, wantVersion, wantSuffix string
- }{
- {"foo.com@v1.2.3/bar", "foo.com", "v1.2.3", "bar"},
- {"foo.com/module@v1.2.3/bar", "foo.com/module", "v1.2.3", "bar"},
- {"foo.com@v1.2.3", "foo.com", "v1.2.3", ""},
- {"std@v1.14.0", "std", "v1.14.0", ""},
- {"another/module/path", "another/module/path", "", ""},
- }
-
- for _, test := range tests {
- module, version, suffix := splitModuleVersionPath(test.path)
- if module != test.wantModule || version != test.wantVersion || suffix != test.wantSuffix {
- t.Errorf("splitModuleVersionPath(%q) =\n\t(%q, %q, %q)\nwant\n\t(%q, %q, %q)",
- test.path, module, version, suffix, test.wantModule, test.wantVersion, test.wantSuffix)
- }
- }
-}
diff --git a/internal/lsp/fake/workdir_windows.go b/internal/lsp/fake/workdir_windows.go
deleted file mode 100644
index ed2b4bb36..000000000
--- a/internal/lsp/fake/workdir_windows.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fake
-
-import (
- "syscall"
-
- errors "golang.org/x/xerrors"
-)
-
-func init() {
- // from https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499-
- const ERROR_LOCK_VIOLATION syscall.Errno = 33
-
- isWindowsErrLockViolation = func(err error) bool {
- return errors.Is(err, ERROR_LOCK_VIOLATION)
- }
-}
diff --git a/internal/lsp/folding_range.go b/internal/lsp/folding_range.go
deleted file mode 100644
index 75f48a449..000000000
--- a/internal/lsp/folding_range.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func (s *Server) foldingRange(ctx context.Context, params *protocol.FoldingRangeParams) ([]protocol.FoldingRange, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
-
- ranges, err := source.FoldingRange(ctx, snapshot, fh, snapshot.View().Options().LineFoldingOnly)
- if err != nil {
- return nil, err
- }
- return toProtocolFoldingRanges(ranges)
-}
-
-func toProtocolFoldingRanges(ranges []*source.FoldingRangeInfo) ([]protocol.FoldingRange, error) {
- result := make([]protocol.FoldingRange, 0, len(ranges))
- for _, info := range ranges {
- rng, err := info.Range()
- if err != nil {
- return nil, err
- }
- result = append(result, protocol.FoldingRange{
- StartLine: rng.Start.Line,
- StartCharacter: rng.Start.Character,
- EndLine: rng.End.Line,
- EndCharacter: rng.End.Character,
- Kind: string(info.Kind),
- })
- }
- return result, nil
-}
diff --git a/internal/lsp/format.go b/internal/lsp/format.go
deleted file mode 100644
index 19736af38..000000000
--- a/internal/lsp/format.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/work"
-)
-
-func (s *Server) formatting(ctx context.Context, params *protocol.DocumentFormattingParams) ([]protocol.TextEdit, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- switch snapshot.View().FileKind(fh) {
- case source.Mod:
- return mod.Format(ctx, snapshot, fh)
- case source.Go:
- return source.Format(ctx, snapshot, fh)
- case source.Work:
- return work.Format(ctx, snapshot, fh)
- }
- return nil, nil
-}
diff --git a/internal/lsp/fuzzy/input_test.go b/internal/lsp/fuzzy/input_test.go
deleted file mode 100644
index 0228347e4..000000000
--- a/internal/lsp/fuzzy/input_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fuzzy_test
-
-import (
- "bytes"
- "sort"
- "testing"
-
- "golang.org/x/tools/internal/lsp/fuzzy"
-)
-
-var rolesTests = []struct {
- str string
- want string
-}{
- {str: "abc::def::goo", want: "Ccc//Ccc//Ccc"},
- {str: "proto::Message", want: "Ccccc//Ccccccc"},
- {str: "AbstractSWTFactory", want: "CcccccccCuuCcccccc"},
- {str: "Abs012", want: "Cccccc"},
- {str: "/", want: " "},
- {str: "fOO", want: "CCu"},
- {str: "fo_oo.o_oo", want: "Cc Cc/C Cc"},
-}
-
-func rolesString(roles []fuzzy.RuneRole) string {
- var buf bytes.Buffer
- for _, r := range roles {
- buf.WriteByte(" /cuC"[int(r)])
- }
- return buf.String()
-}
-
-func TestRoles(t *testing.T) {
- for _, tc := range rolesTests {
- gotRoles := make([]fuzzy.RuneRole, len(tc.str))
- fuzzy.RuneRoles([]byte(tc.str), gotRoles)
- got := rolesString(gotRoles)
- if got != tc.want {
- t.Errorf("roles(%s) = %v; want %v", tc.str, got, tc.want)
- }
- }
-}
-
-var wordSplitTests = []struct {
- input string
- want []string
-}{
- {
- input: "foo bar baz",
- want: []string{"foo", "bar", "baz"},
- },
- {
- input: "fooBarBaz",
- want: []string{"foo", "Bar", "Baz"},
- },
- {
- input: "FOOBarBAZ",
- want: []string{"FOO", "Bar", "BAZ"},
- },
- {
- input: "foo123_bar2Baz3",
- want: []string{"foo123", "bar2", "Baz3"},
- },
-}
-
-func TestWordSplit(t *testing.T) {
- for _, tc := range wordSplitTests {
- roles := fuzzy.RuneRoles([]byte(tc.input), nil)
-
- var got []string
- consumer := func(i, j int) {
- got = append(got, tc.input[i:j])
- }
- fuzzy.Words(roles, consumer)
-
- if eq := diffStringLists(tc.want, got); !eq {
- t.Errorf("input %v: (want %v -> got %v)", tc.input, tc.want, got)
- }
- }
-}
-
-func diffStringLists(a, b []string) bool {
- if len(a) != len(b) {
- return false
- }
- sort.Strings(a)
- sort.Strings(b)
- for i := range a {
- if a[i] != b[i] {
- return false
- }
- }
- return true
-}
-
-var lastSegmentSplitTests = []struct {
- str string
- want string
-}{
- {
- str: "identifier",
- want: "identifier",
- },
- {
- str: "two_words",
- want: "two_words",
- },
- {
- str: "first::second",
- want: "second",
- },
- {
- str: "foo.bar.FOOBar_buz123_test",
- want: "FOOBar_buz123_test",
- },
-}
-
-func TestLastSegment(t *testing.T) {
- for _, tc := range lastSegmentSplitTests {
- roles := fuzzy.RuneRoles([]byte(tc.str), nil)
-
- got := fuzzy.LastSegment(tc.str, roles)
-
- if got != tc.want {
- t.Errorf("str %v: want %v; got %v", tc.str, tc.want, got)
- }
- }
-}
-
-func BenchmarkRoles(b *testing.B) {
- str := "AbstractSWTFactory"
- out := make([]fuzzy.RuneRole, len(str))
-
- for i := 0; i < b.N; i++ {
- fuzzy.RuneRoles([]byte(str), out)
- }
- b.SetBytes(int64(len(str)))
-}
diff --git a/internal/lsp/fuzzy/matcher.go b/internal/lsp/fuzzy/matcher.go
deleted file mode 100644
index 265cdcf16..000000000
--- a/internal/lsp/fuzzy/matcher.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fuzzy implements a fuzzy matching algorithm.
-package fuzzy
-
-import (
- "bytes"
- "fmt"
-)
-
-const (
- // MaxInputSize is the maximum size of the input scored against the fuzzy matcher. Longer inputs
- // will be truncated to this size.
- MaxInputSize = 127
- // MaxPatternSize is the maximum size of the pattern used to construct the fuzzy matcher. Longer
- // inputs are truncated to this size.
- MaxPatternSize = 63
-)
-
-type scoreVal int
-
-func (s scoreVal) val() int {
- return int(s) >> 1
-}
-
-func (s scoreVal) prevK() int {
- return int(s) & 1
-}
-
-func score(val int, prevK int /*0 or 1*/) scoreVal {
- return scoreVal(val<<1 + prevK)
-}
-
-// Matcher implements a fuzzy matching algorithm for scoring candidates against a pattern.
-// The matcher does not support parallel usage.
-type Matcher struct {
- pattern string
- patternLower []byte // lower-case version of the pattern
- patternShort []byte // first characters of the pattern
- caseSensitive bool // set if the pattern is mix-cased
-
- patternRoles []RuneRole // the role of each character in the pattern
- roles []RuneRole // the role of each character in the tested string
-
- scores [MaxInputSize + 1][MaxPatternSize + 1][2]scoreVal
-
- scoreScale float32
-
- lastCandidateLen int // in bytes
- lastCandidateMatched bool
-
- // Reusable buffers to avoid allocating for every candidate.
- // - inputBuf stores the concatenated input chunks
- // - lowerBuf stores the last candidate in lower-case
- // - rolesBuf stores the calculated roles for each rune in the last
- // candidate.
- inputBuf [MaxInputSize]byte
- lowerBuf [MaxInputSize]byte
- rolesBuf [MaxInputSize]RuneRole
-}
-
-func (m *Matcher) bestK(i, j int) int {
- if m.scores[i][j][0].val() < m.scores[i][j][1].val() {
- return 1
- }
- return 0
-}
-
-// NewMatcher returns a new fuzzy matcher for scoring candidates against the provided pattern.
-func NewMatcher(pattern string) *Matcher {
- if len(pattern) > MaxPatternSize {
- pattern = pattern[:MaxPatternSize]
- }
-
- m := &Matcher{
- pattern: pattern,
- patternLower: toLower([]byte(pattern), nil),
- }
-
- for i, c := range m.patternLower {
- if pattern[i] != c {
- m.caseSensitive = true
- break
- }
- }
-
- if len(pattern) > 3 {
- m.patternShort = m.patternLower[:3]
- } else {
- m.patternShort = m.patternLower
- }
-
- m.patternRoles = RuneRoles([]byte(pattern), nil)
-
- if len(pattern) > 0 {
- maxCharScore := 4
- m.scoreScale = 1 / float32(maxCharScore*len(pattern))
- }
-
- return m
-}
-
-// Score returns the score returned by matching the candidate to the pattern.
-// This is not designed for parallel use. Multiple candidates must be scored sequentially.
-// Returns a score between 0 and 1 (0 - no match, 1 - perfect match).
-func (m *Matcher) Score(candidate string) float32 {
- return m.ScoreChunks([]string{candidate})
-}
-
-func (m *Matcher) ScoreChunks(chunks []string) float32 {
- candidate := fromChunks(chunks, m.inputBuf[:])
- if len(candidate) > MaxInputSize {
- candidate = candidate[:MaxInputSize]
- }
- lower := toLower(candidate, m.lowerBuf[:])
- m.lastCandidateLen = len(candidate)
-
- if len(m.pattern) == 0 {
- // Empty patterns perfectly match candidates.
- return 1
- }
-
- if m.match(candidate, lower) {
- sc := m.computeScore(candidate, lower)
- if sc > minScore/2 && !m.poorMatch() {
- m.lastCandidateMatched = true
- if len(m.pattern) == len(candidate) {
- // Perfect match.
- return 1
- }
-
- if sc < 0 {
- sc = 0
- }
- normalizedScore := float32(sc) * m.scoreScale
- if normalizedScore > 1 {
- normalizedScore = 1
- }
-
- return normalizedScore
- }
- }
-
- m.lastCandidateMatched = false
- return 0
-}
-
-const minScore = -10000
-
-// MatchedRanges returns matches ranges for the last scored string as a flattened array of
-// [begin, end) byte offset pairs.
-func (m *Matcher) MatchedRanges() []int {
- if len(m.pattern) == 0 || !m.lastCandidateMatched {
- return nil
- }
- i, j := m.lastCandidateLen, len(m.pattern)
- if m.scores[i][j][0].val() < minScore/2 && m.scores[i][j][1].val() < minScore/2 {
- return nil
- }
-
- var ret []int
- k := m.bestK(i, j)
- for i > 0 {
- take := (k == 1)
- k = m.scores[i][j][k].prevK()
- if take {
- if len(ret) == 0 || ret[len(ret)-1] != i {
- ret = append(ret, i)
- ret = append(ret, i-1)
- } else {
- ret[len(ret)-1] = i - 1
- }
- j--
- }
- i--
- }
- // Reverse slice.
- for i := 0; i < len(ret)/2; i++ {
- ret[i], ret[len(ret)-1-i] = ret[len(ret)-1-i], ret[i]
- }
- return ret
-}
-
-func (m *Matcher) match(candidate []byte, candidateLower []byte) bool {
- i, j := 0, 0
- for ; i < len(candidateLower) && j < len(m.patternLower); i++ {
- if candidateLower[i] == m.patternLower[j] {
- j++
- }
- }
- if j != len(m.patternLower) {
- return false
- }
-
- // The input passes the simple test against pattern, so it is time to classify its characters.
- // Character roles are used below to find the last segment.
- m.roles = RuneRoles(candidate, m.rolesBuf[:])
-
- return true
-}
-
-func (m *Matcher) computeScore(candidate []byte, candidateLower []byte) int {
- pattLen, candLen := len(m.pattern), len(candidate)
-
- for j := 0; j <= len(m.pattern); j++ {
- m.scores[0][j][0] = minScore << 1
- m.scores[0][j][1] = minScore << 1
- }
- m.scores[0][0][0] = score(0, 0) // Start with 0.
-
- segmentsLeft, lastSegStart := 1, 0
- for i := 0; i < candLen; i++ {
- if m.roles[i] == RSep {
- segmentsLeft++
- lastSegStart = i + 1
- }
- }
-
- // A per-character bonus for a consecutive match.
- consecutiveBonus := 2
- wordIdx := 0 // Word count within segment.
- for i := 1; i <= candLen; i++ {
-
- role := m.roles[i-1]
- isHead := role == RHead
-
- if isHead {
- wordIdx++
- } else if role == RSep && segmentsLeft > 1 {
- wordIdx = 0
- segmentsLeft--
- }
-
- var skipPenalty int
- if i == 1 || (i-1) == lastSegStart {
- // Skipping the start of first or last segment.
- skipPenalty++
- }
-
- for j := 0; j <= pattLen; j++ {
- // By default, we don't have a match. Fill in the skip data.
- m.scores[i][j][1] = minScore << 1
-
- // Compute the skip score.
- k := 0
- if m.scores[i-1][j][0].val() < m.scores[i-1][j][1].val() {
- k = 1
- }
-
- skipScore := m.scores[i-1][j][k].val()
- // Do not penalize missing characters after the last matched segment.
- if j != pattLen {
- skipScore -= skipPenalty
- }
- m.scores[i][j][0] = score(skipScore, k)
-
- if j == 0 || candidateLower[i-1] != m.patternLower[j-1] {
- // Not a match.
- continue
- }
- pRole := m.patternRoles[j-1]
-
- if role == RTail && pRole == RHead {
- if j > 1 {
- // Not a match: a head in the pattern matches a tail character in the candidate.
- continue
- }
- // Special treatment for the first character of the pattern. We allow
- // matches in the middle of a word if they are long enough, at least
- // min(3, pattern.length) characters.
- if !bytes.HasPrefix(candidateLower[i-1:], m.patternShort) {
- continue
- }
- }
-
- // Compute the char score.
- var charScore int
- // Bonus 1: the char is in the candidate's last segment.
- if segmentsLeft <= 1 {
- charScore++
- }
- // Bonus 2: Case match or a Head in the pattern aligns with one in the word.
- // Single-case patterns lack segmentation signals and we assume any character
- // can be a head of a segment.
- if candidate[i-1] == m.pattern[j-1] || role == RHead && (!m.caseSensitive || pRole == RHead) {
- charScore++
- }
-
- // Penalty 1: pattern char is Head, candidate char is Tail.
- if role == RTail && pRole == RHead {
- charScore--
- }
- // Penalty 2: first pattern character matched in the middle of a word.
- if j == 1 && role == RTail {
- charScore -= 4
- }
-
- // Third dimension encodes whether there is a gap between the previous match and the current
- // one.
- for k := 0; k < 2; k++ {
- sc := m.scores[i-1][j-1][k].val() + charScore
-
- isConsecutive := k == 1 || i-1 == 0 || i-1 == lastSegStart
- if isConsecutive {
- // Bonus 3: a consecutive match. First character match also gets a bonus to
- // ensure prefix final match score normalizes to 1.0.
- // Logically, this is a part of charScore, but we have to compute it here because it
- // only applies for consecutive matches (k == 1).
- sc += consecutiveBonus
- }
- if k == 0 {
- // Penalty 3: Matching inside a segment (and previous char wasn't matched). Penalize for the lack
- // of alignment.
- if role == RTail || role == RUCTail {
- sc -= 3
- }
- }
-
- if sc > m.scores[i][j][1].val() {
- m.scores[i][j][1] = score(sc, k)
- }
- }
- }
- }
-
- result := m.scores[len(candidate)][len(m.pattern)][m.bestK(len(candidate), len(m.pattern))].val()
-
- return result
-}
-
-// ScoreTable returns the score table computed for the provided candidate. Used only for debugging.
-func (m *Matcher) ScoreTable(candidate string) string {
- var buf bytes.Buffer
-
- var line1, line2, separator bytes.Buffer
- line1.WriteString("\t")
- line2.WriteString("\t")
- for j := 0; j < len(m.pattern); j++ {
- line1.WriteString(fmt.Sprintf("%c\t\t", m.pattern[j]))
- separator.WriteString("----------------")
- }
-
- buf.WriteString(line1.String())
- buf.WriteString("\n")
- buf.WriteString(separator.String())
- buf.WriteString("\n")
-
- for i := 1; i <= len(candidate); i++ {
- line1.Reset()
- line2.Reset()
-
- line1.WriteString(fmt.Sprintf("%c\t", candidate[i-1]))
- line2.WriteString("\t")
-
- for j := 1; j <= len(m.pattern); j++ {
- line1.WriteString(fmt.Sprintf("M%6d(%c)\t", m.scores[i][j][0].val(), dir(m.scores[i][j][0].prevK())))
- line2.WriteString(fmt.Sprintf("H%6d(%c)\t", m.scores[i][j][1].val(), dir(m.scores[i][j][1].prevK())))
- }
- buf.WriteString(line1.String())
- buf.WriteString("\n")
- buf.WriteString(line2.String())
- buf.WriteString("\n")
- buf.WriteString(separator.String())
- buf.WriteString("\n")
- }
-
- return buf.String()
-}
-
-func dir(prevK int) rune {
- if prevK == 0 {
- return 'M'
- }
- return 'H'
-}
-
-func (m *Matcher) poorMatch() bool {
- if len(m.pattern) < 2 {
- return false
- }
-
- i, j := m.lastCandidateLen, len(m.pattern)
- k := m.bestK(i, j)
-
- var counter, len int
- for i > 0 {
- take := (k == 1)
- k = m.scores[i][j][k].prevK()
- if take {
- len++
- if k == 0 && len < 3 && m.roles[i-1] == RTail {
- // Short match in the middle of a word
- counter++
- if counter > 1 {
- return true
- }
- }
- j--
- } else {
- len = 0
- }
- i--
- }
- return false
-}
diff --git a/internal/lsp/fuzzy/matcher_test.go b/internal/lsp/fuzzy/matcher_test.go
deleted file mode 100644
index bac81c098..000000000
--- a/internal/lsp/fuzzy/matcher_test.go
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Benchmark results:
-//
-// BenchmarkMatcher-12 1000000 1615 ns/op 30.95 MB/s 0 B/op 0 allocs/op
-//
-package fuzzy_test
-
-import (
- "bytes"
- "fmt"
- "math"
- "testing"
-
- "golang.org/x/tools/internal/lsp/fuzzy"
-)
-
-type comparator struct {
- f func(val, ref float32) bool
- descr string
-}
-
-var (
- eq = comparator{
- f: func(val, ref float32) bool {
- return val == ref
- },
- descr: "==",
- }
- ge = comparator{
- f: func(val, ref float32) bool {
- return val >= ref
- },
- descr: ">=",
- }
- gt = comparator{
- f: func(val, ref float32) bool {
- return val > ref
- },
- descr: ">",
- }
-)
-
-func (c comparator) eval(val, ref float32) bool {
- return c.f(val, ref)
-}
-
-func (c comparator) String() string {
- return c.descr
-}
-
-type scoreTest struct {
- candidate string
- comparator
- ref float32
-}
-
-var matcherTests = []struct {
- pattern string
- tests []scoreTest
-}{
- {
- pattern: "",
- tests: []scoreTest{
- {"def", eq, 1},
- {"Ab stuff c", eq, 1},
- },
- },
- {
- pattern: "abc",
- tests: []scoreTest{
- {"def", eq, 0},
- {"abd", eq, 0},
- {"abc", ge, 0},
- {"Abc", ge, 0},
- {"Ab stuff c", ge, 0},
- },
- },
- {
- pattern: "Abc",
- tests: []scoreTest{
- {"def", eq, 0},
- {"abd", eq, 0},
- {"abc", ge, 0},
- {"Abc", ge, 0},
- {"Ab stuff c", ge, 0},
- },
- },
- {
- pattern: "U",
- tests: []scoreTest{
- {"ErrUnexpectedEOF", gt, 0},
- {"ErrUnexpectedEOF.Error", eq, 0},
- },
- },
-}
-
-func TestScore(t *testing.T) {
- for _, tc := range matcherTests {
- m := fuzzy.NewMatcher(tc.pattern)
- for _, sct := range tc.tests {
- score := m.Score(sct.candidate)
- if !sct.comparator.eval(score, sct.ref) {
- t.Errorf("m.Score(%q) = %.2g, want %s %v", sct.candidate, score, sct.comparator, sct.ref)
- }
- }
- }
-}
-
-var compareCandidatesTestCases = []struct {
- pattern string
- orderedCandidates []string
-}{
- {
- pattern: "Foo",
- orderedCandidates: []string{
- "Barfoo",
- "Faoo",
- "F_o_o",
- "FaoFooa",
- "BarFoo",
- "F__oo",
- "F_oo",
- "FooA",
- "FooBar",
- "Foo",
- },
- },
- {
- pattern: "U",
- orderedCandidates: []string{
- "ErrUnexpectedEOF.Error",
- "ErrUnexpectedEOF",
- },
- },
-}
-
-func TestCompareCandidateScores(t *testing.T) {
- for _, tc := range compareCandidatesTestCases {
- m := fuzzy.NewMatcher(tc.pattern)
-
- var prevScore float32
- prevCand := "MIN_SCORE"
- for _, cand := range tc.orderedCandidates {
- score := m.Score(cand)
- if prevScore > score {
- t.Errorf("%s[=%v] is scored lower than %s[=%v]", cand, score, prevCand, prevScore)
- }
- if score < -1 || score > 1 {
- t.Errorf("%s score is %v; want value between [-1, 1]", cand, score)
- }
- prevScore = score
- prevCand = cand
- }
- }
-}
-
-var fuzzyMatcherTestCases = []struct {
- p string
- str string
- want string
-}{
- {p: "foo", str: "abc::foo", want: "abc::[foo]"},
- {p: "foo", str: "foo.foo", want: "foo.[foo]"},
- {p: "foo", str: "fo_oo.o_oo", want: "[fo]_oo.[o]_oo"},
- {p: "foo", str: "fo_oo.fo_oo", want: "fo_oo.[fo]_[o]o"},
- {p: "fo_o", str: "fo_oo.o_oo", want: "[f]o_oo.[o_o]o"},
- {p: "fOO", str: "fo_oo.o_oo", want: "[f]o_oo.[o]_[o]o"},
- {p: "tedit", str: "foo.TextEdit", want: "foo.[T]ext[Edit]"},
- {p: "TEdit", str: "foo.TextEdit", want: "foo.[T]ext[Edit]"},
- {p: "Tedit", str: "foo.TextEdit", want: "foo.[T]ext[Edit]"},
- {p: "Tedit", str: "foo.Textedit", want: "foo.[Te]xte[dit]"},
- {p: "TEdit", str: "foo.Textedit", want: ""},
- {p: "te", str: "foo.Textedit", want: "foo.[Te]xtedit"},
- {p: "ee", str: "foo.Textedit", want: ""}, // short middle of the word match
- {p: "ex", str: "foo.Textedit", want: "foo.T[ex]tedit"},
- {p: "exdi", str: "foo.Textedit", want: ""}, // short middle of the word match
- {p: "exdit", str: "foo.Textedit", want: ""}, // short middle of the word match
- {p: "extdit", str: "foo.Textedit", want: "foo.T[ext]e[dit]"},
- {p: "e", str: "foo.Textedit", want: "foo.T[e]xtedit"},
- {p: "E", str: "foo.Textedit", want: "foo.T[e]xtedit"},
- {p: "ed", str: "foo.Textedit", want: "foo.Text[ed]it"},
- {p: "edt", str: "foo.Textedit", want: ""}, // short middle of the word match
- {p: "edit", str: "foo.Textedit", want: "foo.Text[edit]"},
- {p: "edin", str: "foo.TexteditNum", want: "foo.Text[edi]t[N]um"},
- {p: "n", str: "node.GoNodeMax", want: "[n]ode.GoNodeMax"},
- {p: "N", str: "node.GoNodeMax", want: "[n]ode.GoNodeMax"},
- {p: "completio", str: "completion", want: "[completio]n"},
- {p: "completio", str: "completion.None", want: "[completio]n.None"},
-}
-
-func TestFuzzyMatcherRanges(t *testing.T) {
- for _, tc := range fuzzyMatcherTestCases {
- matcher := fuzzy.NewMatcher(tc.p)
- score := matcher.Score(tc.str)
- if tc.want == "" {
- if score > 0 {
- t.Errorf("Score(%s, %s) = %v; want: <= 0", tc.p, tc.str, score)
- }
- continue
- }
- if score < 0 {
- t.Errorf("Score(%s, %s) = %v, want: > 0", tc.p, tc.str, score)
- continue
- }
- got := highlightMatches(tc.str, matcher)
- if tc.want != got {
- t.Errorf("highlightMatches(%s, %s) = %v, want: %v", tc.p, tc.str, got, tc.want)
- }
- }
-}
-
-var scoreTestCases = []struct {
- p string
- str string
- want float64
-}{
- // Score precision up to five digits. Modify if changing the score, but make sure the new values
- // are reasonable.
- {p: "abc", str: "abc", want: 1},
- {p: "abc", str: "Abc", want: 1},
- {p: "abc", str: "Abcdef", want: 1},
- {p: "strc", str: "StrCat", want: 1},
- {p: "abc_def", str: "abc_def_xyz", want: 1},
- {p: "abcdef", str: "abc_def_xyz", want: 0.91667},
- {p: "abcxyz", str: "abc_def_xyz", want: 0.91667},
- {p: "sc", str: "StrCat", want: 0.75},
- {p: "abc", str: "AbstrBasicCtor", want: 0.83333},
- {p: "foo", str: "abc::foo", want: 0.91667},
- {p: "afoo", str: "abc::foo", want: 0.9375},
- {p: "abr", str: "abc::bar", want: 0.5},
- {p: "br", str: "abc::bar", want: 0.25},
- {p: "aar", str: "abc::bar", want: 0.41667},
- {p: "edin", str: "foo.TexteditNum", want: 0.125},
- {p: "ediu", str: "foo.TexteditNum", want: 0},
- // We want the next two items to have roughly similar scores.
- {p: "up", str: "unique_ptr", want: 0.75},
- {p: "up", str: "upper_bound", want: 1},
-}
-
-func TestScores(t *testing.T) {
- for _, tc := range scoreTestCases {
- matcher := fuzzy.NewMatcher(tc.p)
- got := math.Round(float64(matcher.Score(tc.str))*1e5) / 1e5
- if got != tc.want {
- t.Errorf("Score(%s, %s) = %v, want: %v", tc.p, tc.str, got, tc.want)
- }
- }
-}
-
-func highlightMatches(str string, matcher *fuzzy.Matcher) string {
- matches := matcher.MatchedRanges()
-
- var buf bytes.Buffer
- index := 0
- for i := 0; i < len(matches)-1; i += 2 {
- s, e := matches[i], matches[i+1]
- fmt.Fprintf(&buf, "%s[%s]", str[index:s], str[s:e])
- index = e
- }
- buf.WriteString(str[index:])
- return buf.String()
-}
-
-func BenchmarkMatcher(b *testing.B) {
- pattern := "Foo"
- candidates := []string{
- "F_o_o",
- "Barfoo",
- "Faoo",
- "F__oo",
- "F_oo",
- "FaoFooa",
- "BarFoo",
- "FooA",
- "FooBar",
- "Foo",
- }
-
- matcher := fuzzy.NewMatcher(pattern)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- for _, c := range candidates {
- matcher.Score(c)
- }
- }
- var numBytes int
- for _, c := range candidates {
- numBytes += len(c)
- }
- b.SetBytes(int64(numBytes))
-}
diff --git a/internal/lsp/fuzzy/symbol.go b/internal/lsp/fuzzy/symbol.go
deleted file mode 100644
index df9fbd514..000000000
--- a/internal/lsp/fuzzy/symbol.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fuzzy
-
-import (
- "unicode"
-)
-
-// SymbolMatcher implements a fuzzy matching algorithm optimized for Go symbols
-// of the form:
-// example.com/path/to/package.object.field
-//
-// Knowing that we are matching symbols like this allows us to make the
-// following optimizations:
-// - We can incorporate right-to-left relevance directly into the score
-// calculation.
-// - We can match from right to left, discarding leading bytes if the input is
-// too long.
-// - We just take the right-most match without losing too much precision. This
-// allows us to use an O(n) algorithm.
-// - We can operate directly on chunked strings; in many cases we will
-// be storing the package path and/or package name separately from the
-// symbol or identifiers, so doing this avoids allocating strings.
-// - We can return the index of the right-most match, allowing us to trim
-// irrelevant qualification.
-//
-// This implementation is experimental, serving as a reference fast algorithm
-// to compare to the fuzzy algorithm implemented by Matcher.
-type SymbolMatcher struct {
- // Using buffers of length 256 is both a reasonable size for most qualified
- // symbols, and makes it easy to avoid bounds checks by using uint8 indexes.
- pattern [256]rune
- patternLen uint8
- inputBuffer [256]rune // avoid allocating when considering chunks
- roles [256]uint32 // which roles does a rune play (word start, etc.)
- segments [256]uint8 // how many segments from the right is each rune
-}
-
-const (
- segmentStart uint32 = 1 << iota
- wordStart
- separator
-)
-
-// NewSymbolMatcher creates a SymbolMatcher that may be used to match the given
-// search pattern.
-//
-// Currently this matcher only accepts case-insensitive fuzzy patterns.
-//
-// An empty pattern matches no input.
-func NewSymbolMatcher(pattern string) *SymbolMatcher {
- m := &SymbolMatcher{}
- for _, p := range pattern {
- m.pattern[m.patternLen] = unicode.ToLower(p)
- m.patternLen++
- if m.patternLen == 255 || int(m.patternLen) == len(pattern) {
- // break at 255 so that we can represent patternLen with a uint8.
- break
- }
- }
- return m
-}
-
-// Match looks for the right-most match of the search pattern within the symbol
-// represented by concatenating the given chunks, returning its offset and
-// score.
-//
-// If a match is found, the first return value will hold the absolute byte
-// offset within all chunks for the start of the symbol. In other words, the
-// index of the match within strings.Join(chunks, ""). If no match is found,
-// the first return value will be -1.
-//
-// The second return value will be the score of the match, which is always
-// between 0 and 1, inclusive. A score of 0 indicates no match.
-func (m *SymbolMatcher) Match(chunks []string) (int, float64) {
- // Explicit behavior for an empty pattern.
- //
- // As a minor optimization, this also avoids nilness checks later on, since
- // the compiler can prove that m != nil.
- if m.patternLen == 0 {
- return -1, 0
- }
-
- // First phase: populate the input buffer with lower-cased runes.
- //
- // We could also check for a forward match here, but since we'd have to write
- // the entire input anyway this has negligible impact on performance.
-
- var (
- inputLen = uint8(0)
- modifiers = wordStart | segmentStart
- )
-
-input:
- for _, chunk := range chunks {
- for _, r := range chunk {
- if r == '.' || r == '/' {
- modifiers |= separator
- }
- // optimization: avoid calls to unicode.ToLower, which can't be inlined.
- l := r
- if r <= unicode.MaxASCII {
- if 'A' <= r && r <= 'Z' {
- l = r + 'a' - 'A'
- }
- } else {
- l = unicode.ToLower(r)
- }
- if l != r {
- modifiers |= wordStart
- }
- m.inputBuffer[inputLen] = l
- m.roles[inputLen] = modifiers
- inputLen++
- if m.roles[inputLen-1]&separator != 0 {
- modifiers = wordStart | segmentStart
- } else {
- modifiers = 0
- }
- // TODO: we should prefer the right-most input if it overflows, rather
- // than the left-most as we're doing here.
- if inputLen == 255 {
- break input
- }
- }
- }
-
- // Second phase: find the right-most match, and count segments from the
- // right.
-
- var (
- pi = uint8(m.patternLen - 1) // pattern index
- p = m.pattern[pi] // pattern rune
- start = -1 // start offset of match
- rseg = uint8(0)
- )
- const maxSeg = 3 // maximum number of segments from the right to count, for scoring purposes.
-
- for ii := inputLen - 1; ; ii-- {
- r := m.inputBuffer[ii]
- if rseg < maxSeg && m.roles[ii]&separator != 0 {
- rseg++
- }
- m.segments[ii] = rseg
- if p == r {
- if pi == 0 {
- start = int(ii)
- break
- }
- pi--
- p = m.pattern[pi]
- }
- // Don't check ii >= 0 in the loop condition: ii is a uint8.
- if ii == 0 {
- break
- }
- }
-
- if start < 0 {
- // no match: skip scoring
- return -1, 0
- }
-
- // Third phase: find the shortest match, and compute the score.
-
- // Score is the average score for each character.
- //
- // A character score is the multiple of:
- // 1. 1.0 if the character starts a segment, .8 if the character start a
- // mid-segment word, otherwise 0.6. This carries over to immediately
- // following characters.
- // 2. For the final character match, the multiplier from (1) is reduced to
- // .8 if the next character in the input is a mid-segment word, or 0.6 if
- // the next character in the input is not a word or segment start. This
- // ensures that we favor whole-word or whole-segment matches over prefix
- // matches.
- // 3. 1.0 if the character is part of the last segment, otherwise
- // 1.0-.2*<segments from the right>, with a max segment count of 3.
- //
- // This is a very naive algorithm, but it is fast. There's lots of prior art
- // here, and we should leverage it. For example, we could explicitly consider
- // character distance, and exact matches of words or segments.
- //
- // Also note that this might not actually find the highest scoring match, as
- // doing so could require a non-linear algorithm, depending on how the score
- // is calculated.
-
- pi = 0
- p = m.pattern[pi]
-
- const (
- segStreak = 1.0
- wordStreak = 0.8
- noStreak = 0.6
- perSegment = 0.2 // we count at most 3 segments above
- )
-
- streakBonus := noStreak
- totScore := 0.0
- for ii := uint8(start); ii < inputLen; ii++ {
- r := m.inputBuffer[ii]
- if r == p {
- pi++
- p = m.pattern[pi]
- // Note: this could be optimized with some bit operations.
- switch {
- case m.roles[ii]&segmentStart != 0 && segStreak > streakBonus:
- streakBonus = segStreak
- case m.roles[ii]&wordStart != 0 && wordStreak > streakBonus:
- streakBonus = wordStreak
- }
- finalChar := pi >= m.patternLen
- // finalCost := 1.0
- if finalChar && streakBonus > noStreak {
- switch {
- case ii == inputLen-1 || m.roles[ii+1]&segmentStart != 0:
- // Full segment: no reduction
- case m.roles[ii+1]&wordStart != 0:
- streakBonus = wordStreak
- default:
- streakBonus = noStreak
- }
- }
- totScore += streakBonus * (1.0 - float64(m.segments[ii])*perSegment)
- if finalChar {
- break
- }
- } else {
- streakBonus = noStreak
- }
- }
-
- return start, totScore / float64(m.patternLen)
-}
diff --git a/internal/lsp/fuzzy/symbol_test.go b/internal/lsp/fuzzy/symbol_test.go
deleted file mode 100644
index cb28160de..000000000
--- a/internal/lsp/fuzzy/symbol_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fuzzy_test
-
-import (
- "testing"
-
- . "golang.org/x/tools/internal/lsp/fuzzy"
-)
-
-func TestSymbolMatchIndex(t *testing.T) {
- tests := []struct {
- pattern, input string
- want int
- }{
- {"test", "foo.TestFoo", 4},
- {"test", "test", 0},
- {"test", "Test", 0},
- {"test", "est", -1},
- {"t", "shortest", 7},
- {"", "foo", -1},
- {"", string([]rune{0}), -1}, // verify that we don't default to an empty pattern.
- {"anything", "", -1},
- }
-
- for _, test := range tests {
- matcher := NewSymbolMatcher(test.pattern)
- if got, _ := matcher.Match([]string{test.input}); got != test.want {
- t.Errorf("NewSymbolMatcher(%q).Match(%q) = %v, _, want %v, _", test.pattern, test.input, got, test.want)
- }
- }
-}
-
-func TestSymbolRanking(t *testing.T) {
- matcher := NewSymbolMatcher("test")
-
- // symbols to match, in ascending order of ranking.
- symbols := []string{
- "this.is.better.than.most",
- "test.foo.bar",
- "atest",
- "thebest",
- "test.foo",
- "test.foo",
- "tTest",
- "testage",
- "foo.test",
- "test",
- }
- prev := 0.0
- for _, sym := range symbols {
- _, score := matcher.Match([]string{sym})
- t.Logf("Match(%q) = %v", sym, score)
- if score < prev {
- t.Errorf("Match(%q) = _, %v, want > %v", sym, score, prev)
- }
- prev = score
- }
-}
-
-func TestChunkedMatch(t *testing.T) {
- matcher := NewSymbolMatcher("test")
-
- chunked := [][]string{
- {"test"},
- {"", "test"},
- {"test", ""},
- {"te", "st"},
- }
-
- for _, chunks := range chunked {
- offset, score := matcher.Match(chunks)
- if offset != 0 || score != 1.0 {
- t.Errorf("Match(%v) = %v, %v, want 0, 1.0", chunks, offset, score)
- }
- }
-}
diff --git a/internal/lsp/general.go b/internal/lsp/general.go
deleted file mode 100644
index a3662efd0..000000000
--- a/internal/lsp/general.go
+++ /dev/null
@@ -1,510 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "os"
- "path"
- "path/filepath"
- "sync"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-func (s *Server) initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
- s.stateMu.Lock()
- if s.state >= serverInitializing {
- defer s.stateMu.Unlock()
- return nil, errors.Errorf("%w: initialize called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state)
- }
- s.state = serverInitializing
- s.stateMu.Unlock()
-
- // For uniqueness, use the gopls PID rather than params.ProcessID (the client
- // pid). Some clients might start multiple gopls servers, though they
- // probably shouldn't.
- pid := os.Getpid()
- s.tempDir = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.%s", pid, s.session.ID()))
- err := os.Mkdir(s.tempDir, 0700)
- if err != nil {
- // MkdirTemp could fail due to permissions issues. This is a problem with
- // the user's environment, but should not block gopls otherwise behaving.
- // All usage of s.tempDir should be predicated on having a non-empty
- // s.tempDir.
- event.Error(ctx, "creating temp dir", err)
- s.tempDir = ""
- }
- s.progress.SetSupportsWorkDoneProgress(params.Capabilities.Window.WorkDoneProgress)
-
- options := s.session.Options()
- defer func() { s.session.SetOptions(options) }()
-
- if err := s.handleOptionResults(ctx, source.SetOptions(options, params.InitializationOptions)); err != nil {
- return nil, err
- }
- options.ForClientCapabilities(params.Capabilities)
-
- folders := params.WorkspaceFolders
- if len(folders) == 0 {
- if params.RootURI != "" {
- folders = []protocol.WorkspaceFolder{{
- URI: string(params.RootURI),
- Name: path.Base(params.RootURI.SpanURI().Filename()),
- }}
- }
- }
- for _, folder := range folders {
- uri := span.URIFromURI(folder.URI)
- if !uri.IsFile() {
- continue
- }
- s.pendingFolders = append(s.pendingFolders, folder)
- }
- // gopls only supports URIs with a file:// scheme, so if we have no
- // workspace folders with a supported scheme, fail to initialize.
- if len(folders) > 0 && len(s.pendingFolders) == 0 {
- return nil, fmt.Errorf("unsupported URI schemes: %v (gopls only supports file URIs)", folders)
- }
-
- var codeActionProvider interface{} = true
- if ca := params.Capabilities.TextDocument.CodeAction; len(ca.CodeActionLiteralSupport.CodeActionKind.ValueSet) > 0 {
- // If the client has specified CodeActionLiteralSupport,
- // send the code actions we support.
- //
- // Using CodeActionOptions is only valid if codeActionLiteralSupport is set.
- codeActionProvider = &protocol.CodeActionOptions{
- CodeActionKinds: s.getSupportedCodeActions(),
- }
- }
- var renameOpts interface{} = true
- if r := params.Capabilities.TextDocument.Rename; r.PrepareSupport {
- renameOpts = protocol.RenameOptions{
- PrepareProvider: r.PrepareSupport,
- }
- }
-
- versionInfo := debug.VersionInfo()
-
- // golang/go#45732: Warn users who've installed sergi/go-diff@v1.2.0, since
- // it will corrupt the formatting of their files.
- for _, dep := range versionInfo.Deps {
- if dep.Path == "github.com/sergi/go-diff" && dep.Version == "v1.2.0" {
- if err := s.eventuallyShowMessage(ctx, &protocol.ShowMessageParams{
- Message: `It looks like you have a bad gopls installation.
-Please reinstall gopls by running 'GO111MODULE=on go install golang.org/x/tools/gopls@latest'.
-See https://github.com/golang/go/issues/45732 for more information.`,
- Type: protocol.Error,
- }); err != nil {
- return nil, err
- }
- }
- }
-
- goplsVersion, err := json.Marshal(versionInfo)
- if err != nil {
- return nil, err
- }
-
- return &protocol.InitializeResult{
- Capabilities: protocol.ServerCapabilities{
- CallHierarchyProvider: true,
- CodeActionProvider: codeActionProvider,
- CompletionProvider: protocol.CompletionOptions{
- TriggerCharacters: []string{"."},
- },
- DefinitionProvider: true,
- TypeDefinitionProvider: true,
- ImplementationProvider: true,
- DocumentFormattingProvider: true,
- DocumentSymbolProvider: true,
- WorkspaceSymbolProvider: true,
- ExecuteCommandProvider: protocol.ExecuteCommandOptions{
- Commands: options.SupportedCommands,
- },
- FoldingRangeProvider: true,
- HoverProvider: true,
- DocumentHighlightProvider: true,
- DocumentLinkProvider: protocol.DocumentLinkOptions{},
- ReferencesProvider: true,
- RenameProvider: renameOpts,
- SignatureHelpProvider: protocol.SignatureHelpOptions{
- TriggerCharacters: []string{"(", ","},
- },
- TextDocumentSync: &protocol.TextDocumentSyncOptions{
- Change: protocol.Incremental,
- OpenClose: true,
- Save: protocol.SaveOptions{
- IncludeText: false,
- },
- },
- Workspace: protocol.Workspace6Gn{
- WorkspaceFolders: protocol.WorkspaceFolders5Gn{
- Supported: true,
- ChangeNotifications: "workspace/didChangeWorkspaceFolders",
- },
- },
- },
- ServerInfo: struct {
- Name string `json:"name"`
- Version string `json:"version,omitempty"`
- }{
- Name: "gopls",
- Version: string(goplsVersion),
- },
- }, nil
-}
-
-func (s *Server) initialized(ctx context.Context, params *protocol.InitializedParams) error {
- s.stateMu.Lock()
- if s.state >= serverInitialized {
- defer s.stateMu.Unlock()
- return errors.Errorf("%w: initialized called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state)
- }
- s.state = serverInitialized
- s.stateMu.Unlock()
-
- for _, not := range s.notifications {
- s.client.ShowMessage(ctx, not)
- }
- s.notifications = nil
-
- options := s.session.Options()
- defer func() { s.session.SetOptions(options) }()
-
- if err := s.addFolders(ctx, s.pendingFolders); err != nil {
- return err
- }
- s.pendingFolders = nil
-
- var registrations []protocol.Registration
- if options.ConfigurationSupported && options.DynamicConfigurationSupported {
- registrations = append(registrations, protocol.Registration{
- ID: "workspace/didChangeConfiguration",
- Method: "workspace/didChangeConfiguration",
- })
- }
- if options.SemanticTokens && options.DynamicRegistrationSemanticTokensSupported {
- registrations = append(registrations, semanticTokenRegistration(options.SemanticTypes, options.SemanticMods))
- }
- if len(registrations) > 0 {
- if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
- Registrations: registrations,
- }); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (s *Server) addFolders(ctx context.Context, folders []protocol.WorkspaceFolder) error {
- originalViews := len(s.session.Views())
- viewErrors := make(map[span.URI]error)
-
- var wg sync.WaitGroup
- if s.session.Options().VerboseWorkDoneProgress {
- work := s.progress.Start(ctx, DiagnosticWorkTitle(FromInitialWorkspaceLoad), "Calculating diagnostics for initial workspace load...", nil, nil)
- defer func() {
- go func() {
- wg.Wait()
- work.End("Done.")
- }()
- }()
- }
- // Only one view gets to have a workspace.
- var allFoldersWg sync.WaitGroup
- for _, folder := range folders {
- uri := span.URIFromURI(folder.URI)
- // Ignore non-file URIs.
- if !uri.IsFile() {
- continue
- }
- work := s.progress.Start(ctx, "Setting up workspace", "Loading packages...", nil, nil)
- snapshot, release, err := s.addView(ctx, folder.Name, uri)
- if err == source.ErrViewExists {
- continue
- }
- if err != nil {
- viewErrors[uri] = err
- work.End(fmt.Sprintf("Error loading packages: %s", err))
- continue
- }
- var swg sync.WaitGroup
- swg.Add(1)
- allFoldersWg.Add(1)
- go func() {
- defer swg.Done()
- defer allFoldersWg.Done()
- snapshot.AwaitInitialized(ctx)
- work.End("Finished loading packages.")
- }()
-
- // Print each view's environment.
- buf := &bytes.Buffer{}
- if err := snapshot.WriteEnv(ctx, buf); err != nil {
- viewErrors[uri] = err
- continue
- }
- event.Log(ctx, buf.String())
-
- // Diagnose the newly created view.
- wg.Add(1)
- go func() {
- s.diagnoseDetached(snapshot)
- swg.Wait()
- release()
- wg.Done()
- }()
- }
-
- // Register for file watching notifications, if they are supported.
- // Wait for all snapshots to be initialized first, since all files might
- // not yet be known to the snapshots.
- allFoldersWg.Wait()
- if err := s.updateWatchedDirectories(ctx); err != nil {
- event.Error(ctx, "failed to register for file watching notifications", err)
- }
-
- if len(viewErrors) > 0 {
- errMsg := fmt.Sprintf("Error loading workspace folders (expected %v, got %v)\n", len(folders), len(s.session.Views())-originalViews)
- for uri, err := range viewErrors {
- errMsg += fmt.Sprintf("failed to load view for %s: %v\n", uri, err)
- }
- return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
- Type: protocol.Error,
- Message: errMsg,
- })
- }
- return nil
-}
-
-// updateWatchedDirectories compares the current set of directories to watch
-// with the previously registered set of directories. If the set of directories
-// has changed, we unregister and re-register for file watching notifications.
-// updatedSnapshots is the set of snapshots that have been updated.
-func (s *Server) updateWatchedDirectories(ctx context.Context) error {
- patterns := s.session.FileWatchingGlobPatterns(ctx)
-
- s.watchedGlobPatternsMu.Lock()
- defer s.watchedGlobPatternsMu.Unlock()
-
- // Nothing to do if the set of workspace directories is unchanged.
- if equalURISet(s.watchedGlobPatterns, patterns) {
- return nil
- }
-
- // If the set of directories to watch has changed, register the updates and
- // unregister the previously watched directories. This ordering avoids a
- // period where no files are being watched. Still, if a user makes on-disk
- // changes before these updates are complete, we may miss them for the new
- // directories.
- prevID := s.watchRegistrationCount - 1
- if err := s.registerWatchedDirectoriesLocked(ctx, patterns); err != nil {
- return err
- }
- if prevID >= 0 {
- return s.client.UnregisterCapability(ctx, &protocol.UnregistrationParams{
- Unregisterations: []protocol.Unregistration{{
- ID: watchedFilesCapabilityID(prevID),
- Method: "workspace/didChangeWatchedFiles",
- }},
- })
- }
- return nil
-}
-
-func watchedFilesCapabilityID(id int) string {
- return fmt.Sprintf("workspace/didChangeWatchedFiles-%d", id)
-}
-
-func equalURISet(m1, m2 map[string]struct{}) bool {
- if len(m1) != len(m2) {
- return false
- }
- for k := range m1 {
- _, ok := m2[k]
- if !ok {
- return false
- }
- }
- return true
-}
-
-// registerWatchedDirectoriesLocked sends the workspace/didChangeWatchedFiles
-// registrations to the client and updates s.watchedDirectories.
-func (s *Server) registerWatchedDirectoriesLocked(ctx context.Context, patterns map[string]struct{}) error {
- if !s.session.Options().DynamicWatchedFilesSupported {
- return nil
- }
- for k := range s.watchedGlobPatterns {
- delete(s.watchedGlobPatterns, k)
- }
- var watchers []protocol.FileSystemWatcher
- for pattern := range patterns {
- watchers = append(watchers, protocol.FileSystemWatcher{
- GlobPattern: pattern,
- Kind: uint32(protocol.WatchChange + protocol.WatchDelete + protocol.WatchCreate),
- })
- }
-
- if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
- Registrations: []protocol.Registration{{
- ID: watchedFilesCapabilityID(s.watchRegistrationCount),
- Method: "workspace/didChangeWatchedFiles",
- RegisterOptions: protocol.DidChangeWatchedFilesRegistrationOptions{
- Watchers: watchers,
- },
- }},
- }); err != nil {
- return err
- }
- s.watchRegistrationCount++
-
- for k, v := range patterns {
- s.watchedGlobPatterns[k] = v
- }
- return nil
-}
-
-func (s *Server) fetchConfig(ctx context.Context, name string, folder span.URI, o *source.Options) error {
- if !s.session.Options().ConfigurationSupported {
- return nil
- }
- configs, err := s.client.Configuration(ctx, &protocol.ParamConfiguration{
- ConfigurationParams: protocol.ConfigurationParams{
- Items: []protocol.ConfigurationItem{{
- ScopeURI: string(folder),
- Section: "gopls",
- }},
- },
- })
- if err != nil {
- return fmt.Errorf("failed to get workspace configuration from client (%s): %v", folder, err)
- }
- for _, config := range configs {
- if err := s.handleOptionResults(ctx, source.SetOptions(o, config)); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (s *Server) eventuallyShowMessage(ctx context.Context, msg *protocol.ShowMessageParams) error {
- s.stateMu.Lock()
- defer s.stateMu.Unlock()
- if s.state == serverInitialized {
- return s.client.ShowMessage(ctx, msg)
- }
- s.notifications = append(s.notifications, msg)
- return nil
-}
-
-func (s *Server) handleOptionResults(ctx context.Context, results source.OptionResults) error {
- for _, result := range results {
- if result.Error != nil {
- msg := &protocol.ShowMessageParams{
- Type: protocol.Error,
- Message: result.Error.Error(),
- }
- if err := s.eventuallyShowMessage(ctx, msg); err != nil {
- return err
- }
- }
- switch result.State {
- case source.OptionUnexpected:
- msg := &protocol.ShowMessageParams{
- Type: protocol.Error,
- Message: fmt.Sprintf("unexpected gopls setting %q", result.Name),
- }
- if err := s.eventuallyShowMessage(ctx, msg); err != nil {
- return err
- }
- case source.OptionDeprecated:
- msg := fmt.Sprintf("gopls setting %q is deprecated", result.Name)
- if result.Replacement != "" {
- msg = fmt.Sprintf("%s, use %q instead", msg, result.Replacement)
- }
- if err := s.eventuallyShowMessage(ctx, &protocol.ShowMessageParams{
- Type: protocol.Warning,
- Message: msg,
- }); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// beginFileRequest checks preconditions for a file-oriented request and routes
-// it to a snapshot.
-// We don't want to return errors for benign conditions like wrong file type,
-// so callers should do if !ok { return err } rather than if err != nil.
-func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI, expectKind source.FileKind) (source.Snapshot, source.VersionedFileHandle, bool, func(), error) {
- uri := pURI.SpanURI()
- if !uri.IsFile() {
- // Not a file URI. Stop processing the request, but don't return an error.
- return nil, nil, false, func() {}, nil
- }
- view, err := s.session.ViewOf(uri)
- if err != nil {
- return nil, nil, false, func() {}, err
- }
- snapshot, release := view.Snapshot(ctx)
- fh, err := snapshot.GetVersionedFile(ctx, uri)
- if err != nil {
- release()
- return nil, nil, false, func() {}, err
- }
- kind := snapshot.View().FileKind(fh)
- if expectKind != source.UnknownKind && kind != expectKind {
- // Wrong kind of file. Nothing to do.
- release()
- return nil, nil, false, func() {}, nil
- }
- return snapshot, fh, true, release, nil
-}
-
-func (s *Server) shutdown(ctx context.Context) error {
- s.stateMu.Lock()
- defer s.stateMu.Unlock()
- if s.state < serverInitialized {
- event.Log(ctx, "server shutdown without initialization")
- }
- if s.state != serverShutDown {
- // drop all the active views
- s.session.Shutdown(ctx)
- s.state = serverShutDown
- if s.tempDir != "" {
- if err := os.RemoveAll(s.tempDir); err != nil {
- event.Error(ctx, "removing temp dir", err)
- }
- }
- }
- return nil
-}
-
-func (s *Server) exit(ctx context.Context) error {
- s.stateMu.Lock()
- defer s.stateMu.Unlock()
-
- s.client.Close()
-
- if s.state != serverShutDown {
- // TODO: We should be able to do better than this.
- os.Exit(1)
- }
- // we don't terminate the process on a normal exit, we just allow it to
- // close naturally if needed after the connection is closed.
- return nil
-}
diff --git a/internal/lsp/helper/README.md b/internal/lsp/helper/README.md
deleted file mode 100644
index 3c51efe88..000000000
--- a/internal/lsp/helper/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Generate server_gen.go
-
-`helper` generates boilerplate code for server.go by processing the
-generated code in `protocol/tsserver.go`.
-
-First, build `helper` in this directory (`go build .`).
-
-In directory `lsp`, executing `go generate server.go` generates the stylized file
-`server_gen.go` that contains stubs for type `Server`.
-
-It decides what stubs are needed and their signatures
-by looking at the `Server` interface (`-t` flag). These all look somewhat like
-`Resolve(context.Context, *CompletionItem) (*CompletionItem, error)`.
-
-It then parses the `lsp` directory (`-u` flag) to see if there is a corresponding
-implementation function (which in this case would be named `resolve`). If so
-it discovers the parameter names needed, and generates (in `server_gen.go`) code
-like
-
-``` go
-func (s *Server) resolve(ctx context.Context, params *protocol.CompletionItem) (*protocol.CompletionItem, error) {
- return s.resolve(ctx, params)
-}
-```
-
-If `resolve` is not defined (and it is not), then the body of the generated function is
-
-```go
- return nil, notImplemented("resolve")
-```
-
-So to add a capability currently not implemented, just define it somewhere in `lsp`.
-In this case, just define `func (s *Server) resolve(...)` and re-generate `server_gen.go`.
diff --git a/internal/lsp/helper/helper.go b/internal/lsp/helper/helper.go
deleted file mode 100644
index cadda0246..000000000
--- a/internal/lsp/helper/helper.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Invoke with //go:generate helper/helper -t Server -d protocol/tsserver.go -u lsp -o server_gen.go
-// invoke in internal/lsp
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "log"
- "os"
- "sort"
- "strings"
- "text/template"
-)
-
-var (
- typ = flag.String("t", "Server", "generate code for this type")
- def = flag.String("d", "", "the file the type is defined in") // this relies on punning
- use = flag.String("u", "", "look for uses in this package")
- out = flag.String("o", "", "where to write the generated file")
-)
-
-func main() {
- log.SetFlags(log.Lshortfile)
- flag.Parse()
- if *typ == "" || *def == "" || *use == "" || *out == "" {
- flag.PrintDefaults()
- return
- }
- // read the type definition and see what methods we're looking for
- doTypes()
-
- // parse the package and see which methods are defined
- doUses()
-
- output()
-}
-
-// replace "\\\n" with nothing before using
-var tmpl = `// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-// code generated by helper. DO NOT EDIT.
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-{{range $key, $v := .Stuff}}
-func (s *{{$.Type}}) {{$v.Name}}({{.Param}}) {{.Result}} {
- {{if ne .Found ""}} return s.{{.Internal}}({{.Invoke}})\
- {{else}}return {{if lt 1 (len .Results)}}nil, {{end}}notImplemented("{{.Name}}"){{end}}
-}
-{{end}}
-`
-
-func output() {
- // put in empty param names as needed
- for _, t := range types {
- if t.paramnames == nil {
- t.paramnames = make([]string, len(t.paramtypes))
- }
- for i, p := range t.paramtypes {
- cm := ""
- if i > 0 {
- cm = ", "
- }
- t.Param += fmt.Sprintf("%s%s %s", cm, t.paramnames[i], p)
- this := t.paramnames[i]
- if this == "_" {
- this = "nil"
- }
- t.Invoke += fmt.Sprintf("%s%s", cm, this)
- }
- if len(t.Results) > 1 {
- t.Result = "("
- }
- for i, r := range t.Results {
- cm := ""
- if i > 0 {
- cm = ", "
- }
- t.Result += fmt.Sprintf("%s%s", cm, r)
- }
- if len(t.Results) > 1 {
- t.Result += ")"
- }
- }
-
- fd, err := os.Create(*out)
- if err != nil {
- log.Fatal(err)
- }
- t, err := template.New("foo").Parse(tmpl)
- if err != nil {
- log.Fatal(err)
- }
- type par struct {
- Type string
- Stuff []*Function
- }
- p := par{*typ, types}
- if false { // debugging the template
- t.Execute(os.Stderr, &p)
- }
- buf := bytes.NewBuffer(nil)
- err = t.Execute(buf, &p)
- if err != nil {
- log.Fatal(err)
- }
- ans, err := format.Source(bytes.Replace(buf.Bytes(), []byte("\\\n"), []byte{}, -1))
- if err != nil {
- log.Fatal(err)
- }
- fd.Write(ans)
-}
-
-func doUses() {
- fset := token.NewFileSet()
- pkgs, err := parser.ParseDir(fset, *use, nil, 0)
- if err != nil {
- log.Fatalf("%q:%v", *use, err)
- }
- pkg := pkgs["lsp"] // CHECK
- files := pkg.Files
- for fname, f := range files {
- for _, d := range f.Decls {
- fd, ok := d.(*ast.FuncDecl)
- if !ok {
- continue
- }
- nm := fd.Name.String()
- if ast.IsExported(nm) {
- // we're looking for things like didChange
- continue
- }
- if fx, ok := byname[nm]; ok {
- if fx.Found != "" {
- log.Fatalf("found %s in %s and %s", fx.Internal, fx.Found, fname)
- }
- fx.Found = fname
- // and the Paramnames
- ft := fd.Type
- for _, f := range ft.Params.List {
- nm := ""
- if len(f.Names) > 0 {
- nm = f.Names[0].String()
- if nm == "_" {
- nm = "_gen"
- }
- }
- fx.paramnames = append(fx.paramnames, nm)
- }
- }
- }
- }
- if false {
- for i, f := range types {
- log.Printf("%d %s %s", i, f.Internal, f.Found)
- }
- }
-}
-
-type Function struct {
- Name string
- Internal string // first letter lower case
- paramtypes []string
- paramnames []string
- Results []string
- Param string
- Result string // do it in code, easier than in a template
- Invoke string
- Found string // file it was found in
-}
-
-var types []*Function
-var byname = map[string]*Function{} // internal names
-
-func doTypes() {
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, *def, nil, 0)
- if err != nil {
- log.Fatal(err)
- }
- fd, err := os.Create("/tmp/ast")
- if err != nil {
- log.Fatal(err)
- }
- ast.Fprint(fd, fset, f, ast.NotNilFilter)
- ast.Inspect(f, inter)
- sort.Slice(types, func(i, j int) bool { return types[i].Name < types[j].Name })
- if false {
- for i, f := range types {
- log.Printf("%d %s(%v) %v", i, f.Name, f.paramtypes, f.Results)
- }
- }
-}
-
-func inter(n ast.Node) bool {
- x, ok := n.(*ast.TypeSpec)
- if !ok || x.Name.Name != *typ {
- return true
- }
- m := x.Type.(*ast.InterfaceType).Methods.List
- for _, fld := range m {
- fn := fld.Type.(*ast.FuncType)
- p := fn.Params.List
- r := fn.Results.List
- fx := &Function{
- Name: fld.Names[0].String(),
- }
- fx.Internal = strings.ToLower(fx.Name[:1]) + fx.Name[1:]
- for _, f := range p {
- fx.paramtypes = append(fx.paramtypes, whatis(f.Type))
- }
- for _, f := range r {
- fx.Results = append(fx.Results, whatis(f.Type))
- }
- types = append(types, fx)
- byname[fx.Internal] = fx
- }
- return false
-}
-
-func whatis(x ast.Expr) string {
- switch n := x.(type) {
- case *ast.SelectorExpr:
- return whatis(n.X) + "." + n.Sel.String()
- case *ast.StarExpr:
- return "*" + whatis(n.X)
- case *ast.Ident:
- if ast.IsExported(n.Name) {
- // these are from package protocol
- return "protocol." + n.Name
- }
- return n.Name
- case *ast.ArrayType:
- return "[]" + whatis(n.Elt)
- case *ast.InterfaceType:
- return "interface{}"
- default:
- log.Fatalf("Fatal %T", x)
- return fmt.Sprintf("%T", x)
- }
-}
diff --git a/internal/lsp/highlight.go b/internal/lsp/highlight.go
deleted file mode 100644
index 5dc636eb5..000000000
--- a/internal/lsp/highlight.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/template"
-)
-
-func (s *Server) documentHighlight(ctx context.Context, params *protocol.DocumentHighlightParams) ([]protocol.DocumentHighlight, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
-
- if snapshot.View().FileKind(fh) == source.Tmpl {
- return template.Highlight(ctx, snapshot, fh, params.Position)
- }
-
- rngs, err := source.Highlight(ctx, snapshot, fh, params.Position)
- if err != nil {
- event.Error(ctx, "no highlight", err, tag.URI.Of(params.TextDocument.URI))
- }
- return toProtocolHighlight(rngs), nil
-}
-
-func toProtocolHighlight(rngs []protocol.Range) []protocol.DocumentHighlight {
- result := make([]protocol.DocumentHighlight, 0, len(rngs))
- kind := protocol.Text
- for _, rng := range rngs {
- result = append(result, protocol.DocumentHighlight{
- Kind: kind,
- Range: rng,
- })
- }
- return result
-}
diff --git a/internal/lsp/hover.go b/internal/lsp/hover.go
deleted file mode 100644
index d59f5dbdb..000000000
--- a/internal/lsp/hover.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/mod"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/template"
- "golang.org/x/tools/internal/lsp/work"
-)
-
-func (s *Server) hover(ctx context.Context, params *protocol.HoverParams) (*protocol.Hover, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- switch snapshot.View().FileKind(fh) {
- case source.Mod:
- return mod.Hover(ctx, snapshot, fh, params.Position)
- case source.Go:
- return source.Hover(ctx, snapshot, fh, params.Position)
- case source.Tmpl:
- return template.Hover(ctx, snapshot, fh, params.Position)
- case source.Work:
- return work.Hover(ctx, snapshot, fh, params.Position)
- }
- return nil, nil
-}
diff --git a/internal/lsp/implementation.go b/internal/lsp/implementation.go
deleted file mode 100644
index 49992b911..000000000
--- a/internal/lsp/implementation.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func (s *Server) implementation(ctx context.Context, params *protocol.ImplementationParams) ([]protocol.Location, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
- return source.Implementation(ctx, snapshot, fh, params.Position)
-}
diff --git a/internal/lsp/link.go b/internal/lsp/link.go
deleted file mode 100644
index 86c59fc4d..000000000
--- a/internal/lsp/link.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/token"
- "net/url"
- "regexp"
- "strconv"
- "strings"
- "sync"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-func (s *Server) documentLink(ctx context.Context, params *protocol.DocumentLinkParams) (links []protocol.DocumentLink, err error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- switch snapshot.View().FileKind(fh) {
- case source.Mod:
- links, err = modLinks(ctx, snapshot, fh)
- case source.Go:
- links, err = goLinks(ctx, snapshot, fh)
- }
- // Don't return errors for document links.
- if err != nil {
- event.Error(ctx, "failed to compute document links", err, tag.URI.Of(fh.URI()))
- return nil, nil
- }
- return links, nil
-}
-
-func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentLink, error) {
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil {
- return nil, err
- }
- var links []protocol.DocumentLink
- for _, req := range pm.File.Require {
- if req.Syntax == nil {
- continue
- }
- // See golang/go#36998: don't link to modules matching GOPRIVATE.
- if snapshot.View().IsGoPrivatePath(req.Mod.Path) {
- continue
- }
- dep := []byte(req.Mod.Path)
- s, e := req.Syntax.Start.Byte, req.Syntax.End.Byte
- i := bytes.Index(pm.Mapper.Content[s:e], dep)
- if i == -1 {
- continue
- }
- // Shift the start position to the location of the
- // dependency within the require statement.
- start, end := token.Pos(s+i), token.Pos(s+i+len(dep))
- target := source.BuildLink(snapshot.View().Options().LinkTarget, "mod/"+req.Mod.String(), "")
- l, err := toProtocolLink(snapshot, pm.Mapper, target, start, end, source.Mod)
- if err != nil {
- return nil, err
- }
- links = append(links, l)
- }
- // TODO(ridersofrohan): handle links for replace and exclude directives.
- if syntax := pm.File.Syntax; syntax == nil {
- return links, nil
- }
- // Get all the links that are contained in the comments of the file.
- for _, expr := range pm.File.Syntax.Stmt {
- comments := expr.Comment()
- if comments == nil {
- continue
- }
- for _, section := range [][]modfile.Comment{comments.Before, comments.Suffix, comments.After} {
- for _, comment := range section {
- l, err := findLinksInString(ctx, snapshot, comment.Token, token.Pos(comment.Start.Byte), pm.Mapper, source.Mod)
- if err != nil {
- return nil, err
- }
- links = append(links, l...)
- }
- }
- }
- return links, nil
-}
-
-func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentLink, error) {
- view := snapshot.View()
- // We don't actually need type information, so any typecheck mode is fine.
- pkg, err := snapshot.PackageForFile(ctx, fh.URI(), source.TypecheckWorkspace, source.WidestPackage)
- if err != nil {
- return nil, err
- }
- pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull)
- if err != nil {
- return nil, err
- }
- var imports []*ast.ImportSpec
- var str []*ast.BasicLit
- ast.Inspect(pgf.File, func(node ast.Node) bool {
- switch n := node.(type) {
- case *ast.ImportSpec:
- imports = append(imports, n)
- return false
- case *ast.BasicLit:
- // Look for links in string literals.
- if n.Kind == token.STRING {
- str = append(str, n)
- }
- return false
- }
- return true
- })
- var links []protocol.DocumentLink
- // For import specs, provide a link to a documentation website, like
- // https://pkg.go.dev.
- if view.Options().ImportShortcut.ShowLinks() {
- for _, imp := range imports {
- target, err := strconv.Unquote(imp.Path.Value)
- if err != nil {
- continue
- }
- // See golang/go#36998: don't link to modules matching GOPRIVATE.
- if view.IsGoPrivatePath(target) {
- continue
- }
- if mod, version, ok := moduleAtVersion(ctx, snapshot, target, pkg); ok && strings.ToLower(view.Options().LinkTarget) == "pkg.go.dev" {
- target = strings.Replace(target, mod, mod+"@"+version, 1)
- }
- // Account for the quotation marks in the positions.
- start := imp.Path.Pos() + 1
- end := imp.Path.End() - 1
- target = source.BuildLink(view.Options().LinkTarget, target, "")
- l, err := toProtocolLink(snapshot, pgf.Mapper, target, start, end, source.Go)
- if err != nil {
- return nil, err
- }
- links = append(links, l)
- }
- }
- for _, s := range str {
- l, err := findLinksInString(ctx, snapshot, s.Value, s.Pos(), pgf.Mapper, source.Go)
- if err != nil {
- return nil, err
- }
- links = append(links, l...)
- }
- for _, commentGroup := range pgf.File.Comments {
- for _, comment := range commentGroup.List {
- l, err := findLinksInString(ctx, snapshot, comment.Text, comment.Pos(), pgf.Mapper, source.Go)
- if err != nil {
- return nil, err
- }
- links = append(links, l...)
- }
- }
- return links, nil
-}
-
-func moduleAtVersion(ctx context.Context, snapshot source.Snapshot, target string, pkg source.Package) (string, string, bool) {
- impPkg, err := pkg.GetImport(target)
- if err != nil {
- return "", "", false
- }
- if impPkg.Version() == nil {
- return "", "", false
- }
- version, modpath := impPkg.Version().Version, impPkg.Version().Path
- if modpath == "" || version == "" {
- return "", "", false
- }
- return modpath, version, true
-}
-
-func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string, pos token.Pos, m *protocol.ColumnMapper, fileKind source.FileKind) ([]protocol.DocumentLink, error) {
- var links []protocol.DocumentLink
- for _, index := range snapshot.View().Options().URLRegexp.FindAllIndex([]byte(src), -1) {
- start, end := index[0], index[1]
- startPos := token.Pos(int(pos) + start)
- endPos := token.Pos(int(pos) + end)
- link := src[start:end]
- linkURL, err := url.Parse(link)
- // Fallback: Linkify IP addresses as suggested in golang/go#18824.
- if err != nil {
- linkURL, err = url.Parse("//" + link)
- // Not all potential links will be valid, so don't return this error.
- if err != nil {
- continue
- }
- }
- // If the URL has no scheme, use https.
- if linkURL.Scheme == "" {
- linkURL.Scheme = "https"
- }
- l, err := toProtocolLink(snapshot, m, linkURL.String(), startPos, endPos, fileKind)
- if err != nil {
- return nil, err
- }
- links = append(links, l)
- }
- // Handle golang/go#1234-style links.
- r := getIssueRegexp()
- for _, index := range r.FindAllIndex([]byte(src), -1) {
- start, end := index[0], index[1]
- startPos := token.Pos(int(pos) + start)
- endPos := token.Pos(int(pos) + end)
- matches := r.FindStringSubmatch(src)
- if len(matches) < 4 {
- continue
- }
- org, repo, number := matches[1], matches[2], matches[3]
- target := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number)
- l, err := toProtocolLink(snapshot, m, target, startPos, endPos, fileKind)
- if err != nil {
- return nil, err
- }
- links = append(links, l)
- }
- return links, nil
-}
-
-func getIssueRegexp() *regexp.Regexp {
- once.Do(func() {
- issueRegexp = regexp.MustCompile(`(\w+)/([\w-]+)#([0-9]+)`)
- })
- return issueRegexp
-}
-
-var (
- once sync.Once
- issueRegexp *regexp.Regexp
-)
-
-func toProtocolLink(snapshot source.Snapshot, m *protocol.ColumnMapper, target string, start, end token.Pos, fileKind source.FileKind) (protocol.DocumentLink, error) {
- var rng protocol.Range
- switch fileKind {
- case source.Go:
- spn, err := span.NewRange(snapshot.FileSet(), start, end).Span()
- if err != nil {
- return protocol.DocumentLink{}, err
- }
- rng, err = m.Range(spn)
- if err != nil {
- return protocol.DocumentLink{}, err
- }
- case source.Mod:
- s, e := int(start), int(end)
- line, col, err := m.Converter.ToPosition(s)
- if err != nil {
- return protocol.DocumentLink{}, err
- }
- start := span.NewPoint(line, col, s)
- line, col, err = m.Converter.ToPosition(e)
- if err != nil {
- return protocol.DocumentLink{}, err
- }
- end := span.NewPoint(line, col, e)
- rng, err = m.Range(span.New(m.URI, start, end))
- if err != nil {
- return protocol.DocumentLink{}, err
- }
- }
- return protocol.DocumentLink{
- Range: rng,
- Target: target,
- }, nil
-}
diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go
deleted file mode 100644
index ca0985a98..000000000
--- a/internal/lsp/lsp_test.go
+++ /dev/null
@@ -1,1319 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
- "fmt"
- "go/token"
- "os"
- "os/exec"
- "path/filepath"
- "sort"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/testenv"
-)
-
-func TestMain(m *testing.M) {
- testenv.ExitIfSmallMachine()
- os.Exit(m.Run())
-}
-
-func TestLSP(t *testing.T) {
- tests.RunTests(t, "testdata", true, testLSP)
-}
-
-type runner struct {
- server *Server
- data *tests.Data
- diagnostics map[span.URI][]*source.Diagnostic
- ctx context.Context
- normalizers []tests.Normalizer
- editRecv chan map[span.URI]string
-}
-
-func testLSP(t *testing.T, datum *tests.Data) {
- ctx := tests.Context(t)
-
- cache := cache.New(nil)
- session := cache.NewSession(ctx)
- options := source.DefaultOptions().Clone()
- tests.DefaultOptions(options)
- session.SetOptions(options)
- options.SetEnvSlice(datum.Config.Env)
- view, snapshot, release, err := session.NewView(ctx, datum.Config.Dir, span.URIFromPath(datum.Config.Dir), options)
- if err != nil {
- t.Fatal(err)
- }
-
- defer view.Shutdown(ctx)
-
- // Enable type error analyses for tests.
- // TODO(golang/go#38212): Delete this once they are enabled by default.
- tests.EnableAllAnalyzers(view, options)
- view.SetOptions(ctx, options)
-
- // Only run the -modfile specific tests in module mode with Go 1.14 or above.
- datum.ModfileFlagAvailable = len(snapshot.ModFiles()) > 0 && testenv.Go1Point() >= 14
- release()
-
- var modifications []source.FileModification
- for filename, content := range datum.Config.Overlay {
- if filepath.Ext(filename) != ".go" {
- continue
- }
- modifications = append(modifications, source.FileModification{
- URI: span.URIFromPath(filename),
- Action: source.Open,
- Version: -1,
- Text: content,
- LanguageID: "go",
- })
- }
- if err := session.ModifyFiles(ctx, modifications); err != nil {
- t.Fatal(err)
- }
- r := &runner{
- data: datum,
- ctx: ctx,
- normalizers: tests.CollectNormalizers(datum.Exported),
- editRecv: make(chan map[span.URI]string, 1),
- }
-
- r.server = NewServer(session, testClient{runner: r})
- tests.Run(t, r, datum)
-}
-
-// testClient stubs any client functions that may be called by LSP functions.
-type testClient struct {
- protocol.Client
- runner *runner
-}
-
-func (c testClient) Close() error {
- return nil
-}
-
-// Trivially implement PublishDiagnostics so that we can call
-// server.publishReports below to de-dup sent diagnostics.
-func (c testClient) PublishDiagnostics(context.Context, *protocol.PublishDiagnosticsParams) error {
- return nil
-}
-
-func (c testClient) ShowMessage(context.Context, *protocol.ShowMessageParams) error {
- return nil
-}
-
-func (c testClient) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (*protocol.ApplyWorkspaceEditResult, error) {
- res, err := applyTextDocumentEdits(c.runner, params.Edit.DocumentChanges)
- if err != nil {
- return nil, err
- }
- c.runner.editRecv <- res
- return &protocol.ApplyWorkspaceEditResult{Applied: true}, nil
-}
-
-func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) {
- mapper, err := r.data.Mapper(spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := mapper.Location(spn)
- if err != nil {
- t.Fatalf("failed for %v: %v", spn, err)
- }
-
- params := &protocol.CallHierarchyPrepareParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- },
- }
-
- items, err := r.server.PrepareCallHierarchy(r.ctx, params)
- if err != nil {
- t.Fatal(err)
- }
- if len(items) == 0 {
- t.Fatalf("expected call hierarchy item to be returned for identifier at %v\n", loc.Range)
- }
-
- callLocation := protocol.Location{
- URI: items[0].URI,
- Range: items[0].Range,
- }
- if callLocation != loc {
- t.Fatalf("expected server.PrepareCallHierarchy to return identifier at %v but got %v\n", loc, callLocation)
- }
-
- incomingCalls, err := r.server.IncomingCalls(r.ctx, &protocol.CallHierarchyIncomingCallsParams{Item: items[0]})
- if err != nil {
- t.Error(err)
- }
- var incomingCallItems []protocol.CallHierarchyItem
- for _, item := range incomingCalls {
- incomingCallItems = append(incomingCallItems, item.From)
- }
- msg := tests.DiffCallHierarchyItems(incomingCallItems, expectedCalls.IncomingCalls)
- if msg != "" {
- t.Error(fmt.Sprintf("incoming calls: %s", msg))
- }
-
- outgoingCalls, err := r.server.OutgoingCalls(r.ctx, &protocol.CallHierarchyOutgoingCallsParams{Item: items[0]})
- if err != nil {
- t.Error(err)
- }
- var outgoingCallItems []protocol.CallHierarchyItem
- for _, item := range outgoingCalls {
- outgoingCallItems = append(outgoingCallItems, item.To)
- }
- msg = tests.DiffCallHierarchyItems(outgoingCallItems, expectedCalls.OutgoingCalls)
- if msg != "" {
- t.Error(fmt.Sprintf("outgoing calls: %s", msg))
- }
-}
-
-func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) {
- if !strings.HasSuffix(uri.Filename(), "go.mod") {
- return
- }
- got, err := r.server.codeLens(r.ctx, &protocol.CodeLensParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.DocumentURI(uri),
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- if diff := tests.DiffCodeLens(uri, want, got); diff != "" {
- t.Errorf("%s: %s", uri, diff)
- }
-}
-
-func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) {
- // Get the diagnostics for this view if we have not done it before.
- v := r.server.session.View(r.data.Config.Dir)
- r.collectDiagnostics(v)
- d := r.diagnostics[uri]
- got := make([]*source.Diagnostic, len(d))
- copy(got, d)
- // A special case to test that there are no diagnostics for a file.
- if len(want) == 1 && want[0].Source == "no_diagnostics" {
- if len(got) != 0 {
- t.Errorf("expected no diagnostics for %s, got %v", uri, got)
- }
- return
- }
- if diff := tests.DiffDiagnostics(uri, want, got); diff != "" {
- t.Error(diff)
- }
-}
-
-func (r *runner) FoldingRanges(t *testing.T, spn span.Span) {
- uri := spn.URI()
- view, err := r.server.session.ViewOf(uri)
- if err != nil {
- t.Fatal(err)
- }
- original := view.Options()
- modified := original
-
- // Test all folding ranges.
- modified.LineFoldingOnly = false
- view, err = view.SetOptions(r.ctx, modified)
- if err != nil {
- t.Error(err)
- return
- }
- ranges, err := r.server.FoldingRange(r.ctx, &protocol.FoldingRangeParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- })
- if err != nil {
- t.Error(err)
- return
- }
- r.foldingRanges(t, "foldingRange", uri, ranges)
-
- // Test folding ranges with lineFoldingOnly = true.
- modified.LineFoldingOnly = true
- view, err = view.SetOptions(r.ctx, modified)
- if err != nil {
- t.Error(err)
- return
- }
- ranges, err = r.server.FoldingRange(r.ctx, &protocol.FoldingRangeParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- })
- if err != nil {
- t.Error(err)
- return
- }
- r.foldingRanges(t, "foldingRange-lineFolding", uri, ranges)
- view.SetOptions(r.ctx, original)
-}
-
-func (r *runner) foldingRanges(t *testing.T, prefix string, uri span.URI, ranges []protocol.FoldingRange) {
- m, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- // Fold all ranges.
- nonOverlapping := nonOverlappingRanges(ranges)
- for i, rngs := range nonOverlapping {
- got, err := foldRanges(m, string(m.Content), rngs)
- if err != nil {
- t.Error(err)
- continue
- }
- tag := fmt.Sprintf("%s-%d", prefix, i)
- want := string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
-
- if want != got {
- t.Errorf("%s: foldingRanges failed for %s, expected:\n%v\ngot:\n%v", tag, uri.Filename(), want, got)
- }
- }
-
- // Filter by kind.
- kinds := []protocol.FoldingRangeKind{protocol.Imports, protocol.Comment}
- for _, kind := range kinds {
- var kindOnly []protocol.FoldingRange
- for _, fRng := range ranges {
- if fRng.Kind == string(kind) {
- kindOnly = append(kindOnly, fRng)
- }
- }
-
- nonOverlapping := nonOverlappingRanges(kindOnly)
- for i, rngs := range nonOverlapping {
- got, err := foldRanges(m, string(m.Content), rngs)
- if err != nil {
- t.Error(err)
- continue
- }
- tag := fmt.Sprintf("%s-%s-%d", prefix, kind, i)
- want := string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
-
- if want != got {
- t.Errorf("%s: foldingRanges failed for %s, expected:\n%v\ngot:\n%v", tag, uri.Filename(), want, got)
- }
- }
-
- }
-}
-
-func nonOverlappingRanges(ranges []protocol.FoldingRange) (res [][]protocol.FoldingRange) {
- for _, fRng := range ranges {
- setNum := len(res)
- for i := 0; i < len(res); i++ {
- canInsert := true
- for _, rng := range res[i] {
- if conflict(rng, fRng) {
- canInsert = false
- break
- }
- }
- if canInsert {
- setNum = i
- break
- }
- }
- if setNum == len(res) {
- res = append(res, []protocol.FoldingRange{})
- }
- res[setNum] = append(res[setNum], fRng)
- }
- return res
-}
-
-func conflict(a, b protocol.FoldingRange) bool {
- // a start position is <= b start positions
- return (a.StartLine < b.StartLine || (a.StartLine == b.StartLine && a.StartCharacter <= b.StartCharacter)) &&
- (a.EndLine > b.StartLine || (a.EndLine == b.StartLine && a.EndCharacter > b.StartCharacter))
-}
-
-func foldRanges(m *protocol.ColumnMapper, contents string, ranges []protocol.FoldingRange) (string, error) {
- foldedText := "<>"
- res := contents
- // Apply the edits from the end of the file forward
- // to preserve the offsets
- for i := len(ranges) - 1; i >= 0; i-- {
- fRange := ranges[i]
- spn, err := m.RangeSpan(protocol.Range{
- Start: protocol.Position{
- Line: fRange.StartLine,
- Character: fRange.StartCharacter,
- },
- End: protocol.Position{
- Line: fRange.EndLine,
- Character: fRange.EndCharacter,
- },
- })
- if err != nil {
- return "", err
- }
- start := spn.Start().Offset()
- end := spn.End().Offset()
-
- tmp := res[0:start] + foldedText
- res = tmp + res[end:]
- }
- return res, nil
-}
-
-func (r *runner) Format(t *testing.T, spn span.Span) {
- uri := spn.URI()
- filename := uri.Filename()
- gofmted := string(r.data.Golden("gofmt", filename, func() ([]byte, error) {
- cmd := exec.Command("gofmt", filename)
- out, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files
- return out, nil
- }))
-
- edits, err := r.server.Formatting(r.ctx, &protocol.DocumentFormattingParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- })
- if err != nil {
- if gofmted != "" {
- t.Error(err)
- }
- return
- }
- m, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- sedits, err := source.FromProtocolEdits(m, edits)
- if err != nil {
- t.Error(err)
- }
- got := diff.ApplyEdits(string(m.Content), sedits)
- if gofmted != got {
- t.Errorf("format failed for %s, expected:\n%v\ngot:\n%v", filename, gofmted, got)
- }
-}
-
-func (r *runner) SemanticTokens(t *testing.T, spn span.Span) {
- uri := spn.URI()
- filename := uri.Filename()
- // this is called solely for coverage in semantic.go
- _, err := r.server.semanticTokensFull(r.ctx, &protocol.SemanticTokensParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- })
- if err != nil {
- t.Errorf("%v for %s", err, filename)
- }
- _, err = r.server.semanticTokensRange(r.ctx, &protocol.SemanticTokensRangeParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- // any legal range. Just to exercise the call.
- Range: protocol.Range{
- Start: protocol.Position{
- Line: 0,
- Character: 0,
- },
- End: protocol.Position{
- Line: 2,
- Character: 0,
- },
- },
- })
- if err != nil {
- t.Errorf("%v for Range %s", err, filename)
- }
-}
-
-func (r *runner) Import(t *testing.T, spn span.Span) {
- uri := spn.URI()
- filename := uri.Filename()
- actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- m, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- got := string(m.Content)
- if len(actions) > 0 {
- res, err := applyTextDocumentEdits(r, actions[0].Edit.DocumentChanges)
- if err != nil {
- t.Fatal(err)
- }
- got = res[uri]
- }
- want := string(r.data.Golden("goimports", filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- d, err := myers.ComputeEdits(uri, want, got)
- if err != nil {
- t.Fatal(err)
- }
- t.Errorf("import failed for %s: %s", filename, diff.ToUnified("want", "got", want, d))
- }
-}
-
-func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) {
- uri := spn.URI()
- view, err := r.server.session.ViewOf(uri)
- if err != nil {
- t.Fatal(err)
- }
-
- m, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- rng, err := m.Range(spn)
- if err != nil {
- t.Fatal(err)
- }
- // Get the diagnostics for this view if we have not done it before.
- r.collectDiagnostics(view)
- var diagnostics []protocol.Diagnostic
- for _, d := range r.diagnostics[uri] {
- // Compare the start positions rather than the entire range because
- // some diagnostics have a range with the same start and end position (8:1-8:1).
- // The current marker functionality prevents us from having a range of 0 length.
- if protocol.ComparePosition(d.Range.Start, rng.Start) == 0 {
- diagnostics = append(diagnostics, toProtocolDiagnostics([]*source.Diagnostic{d})...)
- break
- }
- }
- codeActionKinds := []protocol.CodeActionKind{}
- for _, k := range actionKinds {
- codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k))
- }
- actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- Range: rng,
- Context: protocol.CodeActionContext{
- Only: codeActionKinds,
- Diagnostics: diagnostics,
- },
- })
- if err != nil {
- t.Fatalf("CodeAction %s failed: %v", spn, err)
- }
- if len(actions) != expectedActions {
- // Hack: We assume that we only get one code action per range.
- var cmds []string
- for _, a := range actions {
- cmds = append(cmds, fmt.Sprintf("%s (%s)", a.Command, a.Title))
- }
- t.Fatalf("unexpected number of code actions, want %d, got %d: %v", expectedActions, len(actions), cmds)
- }
- action := actions[0]
- var match bool
- for _, k := range codeActionKinds {
- if action.Kind == k {
- match = true
- break
- }
- }
- if !match {
- t.Fatalf("unexpected kind for code action %s, expected one of %v, got %v", action.Title, codeActionKinds, action.Kind)
- }
- var res map[span.URI]string
- if cmd := action.Command; cmd != nil {
- _, err := r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{
- Command: action.Command.Command,
- Arguments: action.Command.Arguments,
- })
- if err != nil {
- t.Fatalf("error converting command %q to edits: %v", action.Command.Command, err)
- }
- res = <-r.editRecv
- } else {
- res, err = applyTextDocumentEdits(r, action.Edit.DocumentChanges)
- if err != nil {
- t.Fatal(err)
- }
- }
- for u, got := range res {
- want := string(r.data.Golden("suggestedfix_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- t.Errorf("suggested fixes failed for %s:\n%s", u.Filename(), tests.Diff(t, want, got))
- }
- }
-}
-
-func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {
- uri := start.URI()
- m, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- spn := span.New(start.URI(), start.Start(), end.End())
- rng, err := m.Range(spn)
- if err != nil {
- t.Fatal(err)
- }
- actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- Range: rng,
- Context: protocol.CodeActionContext{
- Only: []protocol.CodeActionKind{"refactor.extract"},
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- var actions []protocol.CodeAction
- for _, action := range actionsRaw {
- if action.Command.Title == "Extract function" {
- actions = append(actions, action)
- }
- }
- // Hack: We assume that we only get one code action per range.
- // TODO(rstambler): Support multiple code actions per test.
- if len(actions) == 0 || len(actions) > 1 {
- t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions))
- }
- _, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{
- Command: actions[0].Command.Command,
- Arguments: actions[0].Command.Arguments,
- })
- if err != nil {
- t.Fatal(err)
- }
- res := <-r.editRecv
- for u, got := range res {
- want := string(r.data.Golden("functionextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- t.Errorf("function extraction failed for %s:\n%s", u.Filename(), tests.Diff(t, want, got))
- }
- }
-}
-
-func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {
- uri := start.URI()
- m, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- spn := span.New(start.URI(), start.Start(), end.End())
- rng, err := m.Range(spn)
- if err != nil {
- t.Fatal(err)
- }
- actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- Range: rng,
- Context: protocol.CodeActionContext{
- Only: []protocol.CodeActionKind{"refactor.extract"},
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- var actions []protocol.CodeAction
- for _, action := range actionsRaw {
- if action.Command.Title == "Extract method" {
- actions = append(actions, action)
- }
- }
- // Hack: We assume that we only get one matching code action per range.
- // TODO(rstambler): Support multiple code actions per test.
- if len(actions) == 0 || len(actions) > 1 {
- t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions))
- }
- _, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{
- Command: actions[0].Command.Command,
- Arguments: actions[0].Command.Arguments,
- })
- if err != nil {
- t.Fatal(err)
- }
- res := <-r.editRecv
- for u, got := range res {
- want := string(r.data.Golden("methodextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- t.Errorf("method extraction failed for %s:\n%s", u.Filename(), tests.Diff(t, want, got))
- }
- }
-}
-
-func (r *runner) Definition(t *testing.T, spn span.Span, d tests.Definition) {
- sm, err := r.data.Mapper(d.Src.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := sm.Location(d.Src)
- if err != nil {
- t.Fatalf("failed for %v: %v", d.Src, err)
- }
- tdpp := protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- }
- var locs []protocol.Location
- var hover *protocol.Hover
- if d.IsType {
- params := &protocol.TypeDefinitionParams{
- TextDocumentPositionParams: tdpp,
- }
- locs, err = r.server.TypeDefinition(r.ctx, params)
- } else {
- params := &protocol.DefinitionParams{
- TextDocumentPositionParams: tdpp,
- }
- locs, err = r.server.Definition(r.ctx, params)
- if err != nil {
- t.Fatalf("failed for %v: %+v", d.Src, err)
- }
- v := &protocol.HoverParams{
- TextDocumentPositionParams: tdpp,
- }
- hover, err = r.server.Hover(r.ctx, v)
- }
- if err != nil {
- t.Fatalf("failed for %v: %v", d.Src, err)
- }
- if len(locs) != 1 {
- t.Errorf("got %d locations for definition, expected 1", len(locs))
- }
- didSomething := false
- if hover != nil {
- didSomething = true
- tag := fmt.Sprintf("%s-hoverdef", d.Name)
- expectHover := string(r.data.Golden(tag, d.Src.URI().Filename(), func() ([]byte, error) {
- return []byte(hover.Contents.Value), nil
- }))
- got := tests.StripSubscripts(hover.Contents.Value)
- expectHover = tests.StripSubscripts(expectHover)
- if got != expectHover {
- t.Errorf("%s:\n%s", d.Src, tests.Diff(t, expectHover, got))
- }
- }
- if !d.OnlyHover {
- didSomething = true
- locURI := locs[0].URI.SpanURI()
- lm, err := r.data.Mapper(locURI)
- if err != nil {
- t.Fatal(err)
- }
- if def, err := lm.Span(locs[0]); err != nil {
- t.Fatalf("failed for %v: %v", locs[0], err)
- } else if def != d.Def {
- t.Errorf("for %v got %v want %v", d.Src, def, d.Def)
- }
- }
- if !didSomething {
- t.Errorf("no tests ran for %s", d.Src.URI())
- }
-}
-
-func (r *runner) Implementation(t *testing.T, spn span.Span, impls []span.Span) {
- sm, err := r.data.Mapper(spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := sm.Location(spn)
- if err != nil {
- t.Fatalf("failed for %v: %v", spn, err)
- }
- tdpp := protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- }
- var locs []protocol.Location
- params := &protocol.ImplementationParams{
- TextDocumentPositionParams: tdpp,
- }
- locs, err = r.server.Implementation(r.ctx, params)
- if err != nil {
- t.Fatalf("failed for %v: %v", spn, err)
- }
- if len(locs) != len(impls) {
- t.Fatalf("got %d locations for implementation, expected %d", len(locs), len(impls))
- }
-
- var results []span.Span
- for i := range locs {
- locURI := locs[i].URI.SpanURI()
- lm, err := r.data.Mapper(locURI)
- if err != nil {
- t.Fatal(err)
- }
- imp, err := lm.Span(locs[i])
- if err != nil {
- t.Fatalf("failed for %v: %v", locs[i], err)
- }
- results = append(results, imp)
- }
- // Sort results and expected to make tests deterministic.
- sort.SliceStable(results, func(i, j int) bool {
- return span.Compare(results[i], results[j]) == -1
- })
- sort.SliceStable(impls, func(i, j int) bool {
- return span.Compare(impls[i], impls[j]) == -1
- })
- for i := range results {
- if results[i] != impls[i] {
- t.Errorf("for %dth implementation of %v got %v want %v", i, spn, results[i], impls[i])
- }
- }
-}
-
-func (r *runner) Highlight(t *testing.T, src span.Span, locations []span.Span) {
- m, err := r.data.Mapper(src.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := m.Location(src)
- if err != nil {
- t.Fatalf("failed for %v: %v", locations[0], err)
- }
- tdpp := protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- }
- params := &protocol.DocumentHighlightParams{
- TextDocumentPositionParams: tdpp,
- }
- highlights, err := r.server.DocumentHighlight(r.ctx, params)
- if err != nil {
- t.Fatal(err)
- }
- if len(highlights) != len(locations) {
- t.Fatalf("got %d highlights for highlight at %v:%v:%v, expected %d", len(highlights), src.URI().Filename(), src.Start().Line(), src.Start().Column(), len(locations))
- }
- // Check to make sure highlights have a valid range.
- var results []span.Span
- for i := range highlights {
- h, err := m.RangeSpan(highlights[i].Range)
- if err != nil {
- t.Fatalf("failed for %v: %v", highlights[i], err)
- }
- results = append(results, h)
- }
- // Sort results to make tests deterministic since DocumentHighlight uses a map.
- sort.SliceStable(results, func(i, j int) bool {
- return span.Compare(results[i], results[j]) == -1
- })
- // Check to make sure all the expected highlights are found.
- for i := range results {
- if results[i] != locations[i] {
- t.Errorf("want %v, got %v\n", locations[i], results[i])
- }
- }
-}
-
-func (r *runner) Hover(t *testing.T, src span.Span, text string) {
- m, err := r.data.Mapper(src.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := m.Location(src)
- if err != nil {
- t.Fatalf("failed for %v", err)
- }
- tdpp := protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- }
- params := &protocol.HoverParams{
- TextDocumentPositionParams: tdpp,
- }
- hover, err := r.server.Hover(r.ctx, params)
- if err != nil {
- t.Fatal(err)
- }
- if text == "" {
- if hover != nil {
- t.Errorf("want nil, got %v\n", hover)
- }
- } else {
- if hover == nil {
- t.Fatalf("want hover result to include %s, but got nil", text)
- }
- if got := hover.Contents.Value; got != text {
- t.Errorf("want %v, got %v\n", text, got)
- }
- if want, got := loc.Range, hover.Range; want != got {
- t.Errorf("want range %v, got %v instead", want, got)
- }
- }
-}
-
-func (r *runner) References(t *testing.T, src span.Span, itemList []span.Span) {
- sm, err := r.data.Mapper(src.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := sm.Location(src)
- if err != nil {
- t.Fatalf("failed for %v: %v", src, err)
- }
- for _, includeDeclaration := range []bool{true, false} {
- t.Run(fmt.Sprintf("refs-declaration-%v", includeDeclaration), func(t *testing.T) {
- want := make(map[protocol.Location]bool)
- for i, pos := range itemList {
- // We don't want the first result if we aren't including the declaration.
- if i == 0 && !includeDeclaration {
- continue
- }
- m, err := r.data.Mapper(pos.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := m.Location(pos)
- if err != nil {
- t.Fatalf("failed for %v: %v", src, err)
- }
- want[loc] = true
- }
- params := &protocol.ReferenceParams{
- TextDocumentPositionParams: protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- },
- Context: protocol.ReferenceContext{
- IncludeDeclaration: includeDeclaration,
- },
- }
- got, err := r.server.References(r.ctx, params)
- if err != nil {
- t.Fatalf("failed for %v: %v", src, err)
- }
- if len(got) != len(want) {
- t.Errorf("references failed: different lengths got %v want %v", len(got), len(want))
- }
- for _, loc := range got {
- if !want[loc] {
- t.Errorf("references failed: incorrect references got %v want %v", loc, want)
- }
- }
- })
- }
-}
-
-func (r *runner) Rename(t *testing.T, spn span.Span, newText string) {
- tag := fmt.Sprintf("%s-rename", newText)
-
- uri := spn.URI()
- filename := uri.Filename()
- sm, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- loc, err := sm.Location(spn)
- if err != nil {
- t.Fatalf("failed for %v: %v", spn, err)
- }
-
- wedit, err := r.server.Rename(r.ctx, &protocol.RenameParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- Position: loc.Range.Start,
- NewName: newText,
- })
- if err != nil {
- renamed := string(r.data.Golden(tag, filename, func() ([]byte, error) {
- return []byte(err.Error()), nil
- }))
- if err.Error() != renamed {
- t.Errorf("rename failed for %s, expected:\n%v\ngot:\n%v\n", newText, renamed, err)
- }
- return
- }
- res, err := applyTextDocumentEdits(r, wedit.DocumentChanges)
- if err != nil {
- t.Fatal(err)
- }
- var orderedURIs []string
- for uri := range res {
- orderedURIs = append(orderedURIs, string(uri))
- }
- sort.Strings(orderedURIs)
-
- var got string
- for i := 0; i < len(res); i++ {
- if i != 0 {
- got += "\n"
- }
- uri := span.URIFromURI(orderedURIs[i])
- if len(res) > 1 {
- got += filepath.Base(uri.Filename()) + ":\n"
- }
- val := res[uri]
- got += val
- }
- want := string(r.data.Golden(tag, filename, func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- t.Errorf("rename failed for %s:\n%s", newText, tests.Diff(t, want, got))
- }
-}
-
-func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) {
- m, err := r.data.Mapper(src.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := m.Location(src)
- if err != nil {
- t.Fatalf("failed for %v: %v", src, err)
- }
- tdpp := protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
- Position: loc.Range.Start,
- }
- params := &protocol.PrepareRenameParams{
- TextDocumentPositionParams: tdpp,
- }
- got, err := r.server.PrepareRename(context.Background(), params)
- if err != nil {
- t.Errorf("prepare rename failed for %v: got error: %v", src, err)
- return
- }
- // we all love typed nils
- if got == nil {
- if want.Text != "" { // expected an ident.
- t.Errorf("prepare rename failed for %v: got nil", src)
- }
- return
- }
- if got.Range.Start == got.Range.End {
- // Special case for 0-length ranges. Marks can't specify a 0-length range,
- // so just compare the start.
- if got.Range.Start != want.Range.Start {
- t.Errorf("prepare rename failed: incorrect point, got %v want %v", got.Range.Start, want.Range.Start)
- }
- } else {
- if protocol.CompareRange(got.Range, want.Range) != 0 {
- t.Errorf("prepare rename failed: incorrect range got %v want %v", got.Range, want.Range)
- }
- }
- if got.Placeholder != want.Text {
- t.Errorf("prepare rename failed: incorrect text got %v want %v", got.Placeholder, want.Text)
- }
-}
-
-func applyTextDocumentEdits(r *runner, edits []protocol.TextDocumentEdit) (map[span.URI]string, error) {
- res := map[span.URI]string{}
- for _, docEdits := range edits {
- uri := docEdits.TextDocument.URI.SpanURI()
- var m *protocol.ColumnMapper
- // If we have already edited this file, we use the edited version (rather than the
- // file in its original state) so that we preserve our initial changes.
- if content, ok := res[uri]; ok {
- m = &protocol.ColumnMapper{
- URI: uri,
- Converter: span.NewContentConverter(
- uri.Filename(), []byte(content)),
- Content: []byte(content),
- }
- } else {
- var err error
- if m, err = r.data.Mapper(uri); err != nil {
- return nil, err
- }
- }
- res[uri] = string(m.Content)
- sedits, err := source.FromProtocolEdits(m, docEdits.Edits)
- if err != nil {
- return nil, err
- }
- res[uri] = applyEdits(res[uri], sedits)
- }
- return res, nil
-}
-
-func applyEdits(contents string, edits []diff.TextEdit) string {
- res := contents
-
- // Apply the edits from the end of the file forward
- // to preserve the offsets
- for i := len(edits) - 1; i >= 0; i-- {
- edit := edits[i]
- start := edit.Span.Start().Offset()
- end := edit.Span.End().Offset()
- tmp := res[0:start] + edit.NewText
- res = tmp + res[end:]
- }
- return res
-}
-
-func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) {
- params := &protocol.DocumentSymbolParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- }
- got, err := r.server.DocumentSymbol(r.ctx, params)
- if err != nil {
- t.Fatal(err)
- }
- if len(got) != len(expectedSymbols) {
- t.Errorf("want %d top-level symbols in %v, got %d", len(expectedSymbols), uri, len(got))
- return
- }
- symbols := make([]protocol.DocumentSymbol, len(got))
- for i, s := range got {
- s, ok := s.(protocol.DocumentSymbol)
- if !ok {
- t.Fatalf("%v: wanted []DocumentSymbols but got %v", uri, got)
- }
- symbols[i] = s
- }
- if diff := tests.DiffSymbols(t, uri, expectedSymbols, symbols); diff != "" {
- t.Error(diff)
- }
-}
-
-func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) {
- r.callWorkspaceSymbols(t, uri, query, typ)
-}
-
-func (r *runner) callWorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) {
- t.Helper()
-
- matcher := tests.WorkspaceSymbolsTestTypeToMatcher(typ)
-
- original := r.server.session.Options()
- modified := original
- modified.SymbolMatcher = matcher
- r.server.session.SetOptions(modified)
- defer r.server.session.SetOptions(original)
-
- params := &protocol.WorkspaceSymbolParams{
- Query: query,
- }
- gotSymbols, err := r.server.Symbol(r.ctx, params)
- if err != nil {
- t.Fatal(err)
- }
- got, err := tests.WorkspaceSymbolsString(r.ctx, r.data, uri, gotSymbols)
- if err != nil {
- t.Fatal(err)
- }
- got = filepath.ToSlash(tests.Normalize(got, r.normalizers))
- want := string(r.data.Golden(fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
- if diff := tests.Diff(t, want, got); diff != "" {
- t.Error(diff)
- }
-}
-
-func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) {
- m, err := r.data.Mapper(spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := m.Location(spn)
- if err != nil {
- t.Fatalf("failed for %v: %v", loc, err)
- }
- tdpp := protocol.TextDocumentPositionParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(spn.URI()),
- },
- Position: loc.Range.Start,
- }
- params := &protocol.SignatureHelpParams{
- TextDocumentPositionParams: tdpp,
- }
- got, err := r.server.SignatureHelp(r.ctx, params)
- if err != nil {
- // Only fail if we got an error we did not expect.
- if want != nil {
- t.Fatal(err)
- }
- return
- }
- if want == nil {
- if got != nil {
- t.Errorf("expected no signature, got %v", got)
- }
- return
- }
- if got == nil {
- t.Fatalf("expected %v, got nil", want)
- }
- diff, err := tests.DiffSignatures(spn, want, got)
- if err != nil {
- t.Fatal(err)
- }
- if diff != "" {
- t.Error(diff)
- }
-}
-
-func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {
- m, err := r.data.Mapper(uri)
- if err != nil {
- t.Fatal(err)
- }
- got, err := r.server.DocumentLink(r.ctx, &protocol.DocumentLinkParams{
- TextDocument: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(uri),
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- if diff := tests.DiffLinks(m, wantLinks, got); diff != "" {
- t.Error(diff)
- }
-}
-
-func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) {
- cmd, err := command.NewListKnownPackagesCommand("List Known Packages", command.URIArg{
- URI: protocol.URIFromSpanURI(uri),
- })
- if err != nil {
- t.Fatal(err)
- }
- resp, err := r.server.executeCommand(r.ctx, &protocol.ExecuteCommandParams{
- Command: cmd.Command,
- Arguments: cmd.Arguments,
- })
- if err != nil {
- t.Fatal(err)
- }
- res := resp.(command.ListKnownPackagesResult)
- var hasPkg bool
- for _, p := range res.Packages {
- if p == expectedImport {
- hasPkg = true
- break
- }
- }
- if !hasPkg {
- t.Fatalf("%s: got %v packages\nwant contains %q", command.ListKnownPackages, res.Packages, expectedImport)
- }
- cmd, err = command.NewAddImportCommand("Add Imports", command.AddImportArgs{
- URI: protocol.URIFromSpanURI(uri),
- ImportPath: expectedImport,
- })
- if err != nil {
- t.Fatal(err)
- }
- _, err = r.server.executeCommand(r.ctx, &protocol.ExecuteCommandParams{
- Command: cmd.Command,
- Arguments: cmd.Arguments,
- })
- if err != nil {
- t.Fatal(err)
- }
- got := (<-r.editRecv)[uri]
- want := r.data.Golden("addimport", uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- })
- if want == nil {
- t.Fatalf("golden file %q not found", uri.Filename())
- }
- if diff := tests.Diff(t, got, string(want)); diff != "" {
- t.Errorf("%s mismatch\n%s", command.AddImport, diff)
- }
-}
-
-func TestBytesOffset(t *testing.T) {
- tests := []struct {
- text string
- pos protocol.Position
- want int
- }{
- {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 0}, want: 0},
- {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 1}, want: 1},
- {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 2}, want: 1},
- {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 3}, want: 5},
- {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 4}, want: 6},
- {text: `a𐐀b`, pos: protocol.Position{Line: 0, Character: 5}, want: -1},
- {text: "aaa\nbbb\n", pos: protocol.Position{Line: 0, Character: 3}, want: 3},
- {text: "aaa\nbbb\n", pos: protocol.Position{Line: 0, Character: 4}, want: 3},
- {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 0}, want: 4},
- {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 3}, want: 7},
- {text: "aaa\nbbb\n", pos: protocol.Position{Line: 1, Character: 4}, want: 7},
- {text: "aaa\nbbb\n", pos: protocol.Position{Line: 2, Character: 0}, want: 8},
- {text: "aaa\nbbb\n", pos: protocol.Position{Line: 2, Character: 1}, want: -1},
- {text: "aaa\nbbb\n\n", pos: protocol.Position{Line: 2, Character: 0}, want: 8},
- }
-
- for i, test := range tests {
- fname := fmt.Sprintf("test %d", i)
- fset := token.NewFileSet()
- f := fset.AddFile(fname, -1, len(test.text))
- f.SetLinesForContent([]byte(test.text))
- uri := span.URIFromPath(fname)
- converter := span.NewContentConverter(fname, []byte(test.text))
- mapper := &protocol.ColumnMapper{
- URI: uri,
- Converter: converter,
- Content: []byte(test.text),
- }
- got, err := mapper.Point(test.pos)
- if err != nil && test.want != -1 {
- t.Errorf("unexpected error: %v", err)
- }
- if err == nil && got.Offset() != test.want {
- t.Errorf("want %d for %q(Line:%d,Character:%d), but got %d", test.want, test.text, int(test.pos.Line), int(test.pos.Character), got.Offset())
- }
- }
-}
-
-func (r *runner) collectDiagnostics(view source.View) {
- if r.diagnostics != nil {
- return
- }
- r.diagnostics = make(map[span.URI][]*source.Diagnostic)
-
- snapshot, release := view.Snapshot(r.ctx)
- defer release()
-
- // Always run diagnostics with analysis.
- r.server.diagnose(r.ctx, snapshot, true)
- for uri, reports := range r.server.diagnostics {
- for _, report := range reports.reports {
- for _, d := range report.diags {
- r.diagnostics[uri] = append(r.diagnostics[uri], d)
- }
- }
- }
-}
diff --git a/internal/lsp/lsppos/lsppos.go b/internal/lsp/lsppos/lsppos.go
deleted file mode 100644
index f27bde573..000000000
--- a/internal/lsp/lsppos/lsppos.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package lsppos provides utilities for working with LSP positions.
-//
-// See https://microsoft.github.io/language-server-protocol/specification#textDocuments
-// for a description of LSP positions. Notably:
-// - Positions are specified by a 0-based line count and 0-based utf-16
-// character offset.
-// - Positions are line-ending agnostic: there is no way to specify \r|\n or
-// \n|. Instead the former maps to the end of the current line, and the
-// latter to the start of the next line.
-package lsppos
-
-import (
- "sort"
- "unicode/utf8"
-)
-
-type Mapper struct {
- nonASCII bool
- src []byte
-
- // Start-of-line positions. If src is newline-terminated, the final entry will be empty.
- lines []int
-}
-
-func NewMapper(src []byte) *Mapper {
- m := &Mapper{src: src}
- if len(src) == 0 {
- return m
- }
- m.lines = []int{0}
- for offset, b := range src {
- if b == '\n' {
- m.lines = append(m.lines, offset+1)
- }
- if b >= utf8.RuneSelf {
- m.nonASCII = true
- }
- }
- return m
-}
-
-func (m *Mapper) Position(offset int) (line, char int) {
- if offset < 0 || offset > len(m.src) {
- return -1, -1
- }
- nextLine := sort.Search(len(m.lines), func(i int) bool {
- return offset < m.lines[i]
- })
- if nextLine == 0 {
- return -1, -1
- }
- line = nextLine - 1
- start := m.lines[line]
- var charOffset int
- if m.nonASCII {
- charOffset = UTF16len(m.src[start:offset])
- } else {
- charOffset = offset - start
- }
-
- var eol int
- if line == len(m.lines)-1 {
- eol = len(m.src)
- } else {
- eol = m.lines[line+1] - 1
- }
-
- // Adjustment for line-endings: \r|\n is the same as |\r\n.
- if offset == eol && offset > 0 && m.src[offset-1] == '\r' {
- charOffset--
- }
-
- return line, charOffset
-}
-
-func UTF16len(buf []byte) int {
- cnt := 0
- for _, r := range string(buf) {
- cnt++
- if r >= 1<<16 {
- cnt++
- }
- }
- return cnt
-}
diff --git a/internal/lsp/lsprpc/autostart_default.go b/internal/lsp/lsprpc/autostart_default.go
deleted file mode 100644
index b23a1e508..000000000
--- a/internal/lsp/lsprpc/autostart_default.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc
-
-import (
- exec "golang.org/x/sys/execabs"
-
- errors "golang.org/x/xerrors"
-)
-
-var (
- daemonize = func(*exec.Cmd) {}
- autoNetworkAddress = autoNetworkAddressDefault
- verifyRemoteOwnership = verifyRemoteOwnershipDefault
-)
-
-func runRemote(cmd *exec.Cmd) error {
- daemonize(cmd)
- if err := cmd.Start(); err != nil {
- return errors.Errorf("starting remote gopls: %w", err)
- }
- return nil
-}
-
-// autoNetworkAddress returns the default network and address for the
-// automatically-started gopls remote. See autostart_posix.go for more
-// information.
-func autoNetworkAddressDefault(goplsPath, id string) (network string, address string) {
- if id != "" {
- panic("identified remotes are not supported on windows")
- }
- return "tcp", "localhost:37374"
-}
-
-func verifyRemoteOwnershipDefault(network, address string) (bool, error) {
- return true, nil
-}
diff --git a/internal/lsp/lsprpc/autostart_posix.go b/internal/lsp/lsprpc/autostart_posix.go
deleted file mode 100644
index d5644e2b6..000000000
--- a/internal/lsp/lsprpc/autostart_posix.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
-
-package lsprpc
-
-import (
- "crypto/sha256"
- "errors"
- "fmt"
- "log"
- "os"
- "os/user"
- "path/filepath"
- "strconv"
- "syscall"
-
- exec "golang.org/x/sys/execabs"
-
- "golang.org/x/xerrors"
-)
-
-func init() {
- daemonize = daemonizePosix
- autoNetworkAddress = autoNetworkAddressPosix
- verifyRemoteOwnership = verifyRemoteOwnershipPosix
-}
-
-func daemonizePosix(cmd *exec.Cmd) {
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Setsid: true,
- }
-}
-
-// autoNetworkAddress resolves an id on the 'auto' pseduo-network to a
-// real network and address. On unix, this uses unix domain sockets.
-func autoNetworkAddressPosix(goplsPath, id string) (network string, address string) {
- // Especially when doing local development or testing, it's important that
- // the remote gopls instance we connect to is running the same binary as our
- // forwarder. So we encode a short hash of the binary path into the daemon
- // socket name. If possible, we also include the buildid in this hash, to
- // account for long-running processes where the binary has been subsequently
- // rebuilt.
- h := sha256.New()
- cmd := exec.Command("go", "tool", "buildid", goplsPath)
- cmd.Stdout = h
- var pathHash []byte
- if err := cmd.Run(); err == nil {
- pathHash = h.Sum(nil)
- } else {
- log.Printf("error getting current buildid: %v", err)
- sum := sha256.Sum256([]byte(goplsPath))
- pathHash = sum[:]
- }
- shortHash := fmt.Sprintf("%x", pathHash)[:6]
- user := os.Getenv("USER")
- if user == "" {
- user = "shared"
- }
- basename := filepath.Base(goplsPath)
- idComponent := ""
- if id != "" {
- idComponent = "-" + id
- }
- runtimeDir := os.TempDir()
- if xdg := os.Getenv("XDG_RUNTIME_DIR"); xdg != "" {
- runtimeDir = xdg
- }
- return "unix", filepath.Join(runtimeDir, fmt.Sprintf("%s-%s-daemon.%s%s", basename, shortHash, user, idComponent))
-}
-
-func verifyRemoteOwnershipPosix(network, address string) (bool, error) {
- if network != "unix" {
- return true, nil
- }
- fi, err := os.Stat(address)
- if err != nil {
- if os.IsNotExist(err) {
- return true, nil
- }
- return false, xerrors.Errorf("checking socket owner: %w", err)
- }
- stat, ok := fi.Sys().(*syscall.Stat_t)
- if !ok {
- return false, errors.New("fi.Sys() is not a Stat_t")
- }
- user, err := user.Current()
- if err != nil {
- return false, xerrors.Errorf("checking current user: %w", err)
- }
- uid, err := strconv.ParseUint(user.Uid, 10, 32)
- if err != nil {
- return false, xerrors.Errorf("parsing current UID: %w", err)
- }
- return stat.Uid == uint32(uid), nil
-}
diff --git a/internal/lsp/lsprpc/binder.go b/internal/lsp/lsprpc/binder.go
deleted file mode 100644
index f3320e17a..000000000
--- a/internal/lsp/lsprpc/binder.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "golang.org/x/tools/internal/event"
- jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-// The BinderFunc type adapts a bind function to implement the jsonrpc2.Binder
-// interface.
-type BinderFunc func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error)
-
-func (f BinderFunc) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
- return f(ctx, conn)
-}
-
-// Middleware defines a transformation of jsonrpc2 Binders, that may be
-// composed to build jsonrpc2 servers.
-type Middleware func(jsonrpc2_v2.Binder) jsonrpc2_v2.Binder
-
-// A ServerFunc is used to construct an LSP server for a given client.
-type ServerFunc func(context.Context, protocol.ClientCloser) protocol.Server
-
-// ServerBinder binds incoming connections to a new server.
-type ServerBinder struct {
- newServer ServerFunc
-}
-
-func NewServerBinder(newServer ServerFunc) *ServerBinder {
- return &ServerBinder{newServer: newServer}
-}
-
-func (b *ServerBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
- client := protocol.ClientDispatcherV2(conn)
- server := b.newServer(ctx, client)
- serverHandler := protocol.ServerHandlerV2(server)
- // Wrap the server handler to inject the client into each request context, so
- // that log events are reflected back to the client.
- wrapped := jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
- ctx = protocol.WithClient(ctx, client)
- return serverHandler.Handle(ctx, req)
- })
- preempter := &canceler{
- conn: conn,
- }
- return jsonrpc2_v2.ConnectionOptions{
- Handler: wrapped,
- Preempter: preempter,
- }, nil
-}
-
-type canceler struct {
- conn *jsonrpc2_v2.Connection
-}
-
-func (c *canceler) Preempt(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
- if req.Method != "$/cancelRequest" {
- return nil, jsonrpc2_v2.ErrNotHandled
- }
- var params protocol.CancelParams
- if err := json.Unmarshal(req.Params, &params); err != nil {
- return nil, errors.Errorf("%w: %v", jsonrpc2_v2.ErrParse, err)
- }
- var id jsonrpc2_v2.ID
- switch raw := params.ID.(type) {
- case float64:
- id = jsonrpc2_v2.Int64ID(int64(raw))
- case string:
- id = jsonrpc2_v2.StringID(raw)
- default:
- return nil, errors.Errorf("%w: invalid ID type %T", jsonrpc2_v2.ErrParse, params.ID)
- }
- c.conn.Cancel(id)
- return nil, nil
-}
-
-type ForwardBinder struct {
- dialer jsonrpc2_v2.Dialer
- onBind func(*jsonrpc2_v2.Connection)
-}
-
-func NewForwardBinder(dialer jsonrpc2_v2.Dialer) *ForwardBinder {
- return &ForwardBinder{
- dialer: dialer,
- }
-}
-
-func (b *ForwardBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (opts jsonrpc2_v2.ConnectionOptions, _ error) {
- client := protocol.ClientDispatcherV2(conn)
- clientBinder := NewClientBinder(func(context.Context, protocol.Server) protocol.Client { return client })
- serverConn, err := jsonrpc2_v2.Dial(context.Background(), b.dialer, clientBinder)
- if err != nil {
- return opts, err
- }
- if b.onBind != nil {
- b.onBind(serverConn)
- }
- server := protocol.ServerDispatcherV2(serverConn)
- preempter := &canceler{
- conn: conn,
- }
- detached := xcontext.Detach(ctx)
- go func() {
- conn.Wait()
- if err := serverConn.Close(); err != nil {
- event.Log(detached, fmt.Sprintf("closing remote connection: %v", err))
- }
- }()
- return jsonrpc2_v2.ConnectionOptions{
- Handler: protocol.ServerHandlerV2(server),
- Preempter: preempter,
- }, nil
-}
-
-// A ClientFunc is used to construct an LSP client for a given server.
-type ClientFunc func(context.Context, protocol.Server) protocol.Client
-
-// ClientBinder binds an LSP client to an incoming connection.
-type ClientBinder struct {
- newClient ClientFunc
-}
-
-func NewClientBinder(newClient ClientFunc) *ClientBinder {
- return &ClientBinder{newClient}
-}
-
-func (b *ClientBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
- server := protocol.ServerDispatcherV2(conn)
- client := b.newClient(ctx, server)
- return jsonrpc2_v2.ConnectionOptions{
- Handler: protocol.ClientHandlerV2(client),
- }, nil
-}
diff --git a/internal/lsp/lsprpc/binder_test.go b/internal/lsp/lsprpc/binder_test.go
deleted file mode 100644
index f7dd83033..000000000
--- a/internal/lsp/lsprpc/binder_test.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc_test
-
-import (
- "context"
- "regexp"
- "strings"
- "testing"
- "time"
-
- jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
- "golang.org/x/tools/internal/lsp/protocol"
-
- . "golang.org/x/tools/internal/lsp/lsprpc"
-)
-
-type TestEnv struct {
- Listeners []jsonrpc2_v2.Listener
- Conns []*jsonrpc2_v2.Connection
- Servers []*jsonrpc2_v2.Server
-}
-
-func (e *TestEnv) Shutdown(t *testing.T) {
- for _, l := range e.Listeners {
- if err := l.Close(); err != nil {
- t.Error(err)
- }
- }
- for _, c := range e.Conns {
- if err := c.Close(); err != nil {
- t.Error(err)
- }
- }
- for _, s := range e.Servers {
- if err := s.Wait(); err != nil {
- t.Error(err)
- }
- }
-}
-
-func (e *TestEnv) serve(ctx context.Context, t *testing.T, server jsonrpc2_v2.Binder) (jsonrpc2_v2.Listener, *jsonrpc2_v2.Server) {
- l, err := jsonrpc2_v2.NetPipeListener(ctx)
- if err != nil {
- t.Fatal(err)
- }
- e.Listeners = append(e.Listeners, l)
- s, err := jsonrpc2_v2.Serve(ctx, l, server)
- if err != nil {
- t.Fatal(err)
- }
- e.Servers = append(e.Servers, s)
- return l, s
-}
-
-func (e *TestEnv) dial(ctx context.Context, t *testing.T, dialer jsonrpc2_v2.Dialer, client jsonrpc2_v2.Binder, forwarded bool) *jsonrpc2_v2.Connection {
- if forwarded {
- l, _ := e.serve(ctx, t, NewForwardBinder(dialer))
- dialer = l.Dialer()
- }
- conn, err := jsonrpc2_v2.Dial(ctx, dialer, client)
- if err != nil {
- t.Fatal(err)
- }
- e.Conns = append(e.Conns, conn)
- return conn
-}
-
-func staticClientBinder(client protocol.Client) jsonrpc2_v2.Binder {
- f := func(context.Context, protocol.Server) protocol.Client { return client }
- return NewClientBinder(f)
-}
-
-func staticServerBinder(server protocol.Server) jsonrpc2_v2.Binder {
- f := func(ctx context.Context, client protocol.ClientCloser) protocol.Server {
- return server
- }
- return NewServerBinder(f)
-}
-
-func TestClientLoggingV2(t *testing.T) {
- ctx := context.Background()
-
- for name, forwarded := range map[string]bool{
- "forwarded": true,
- "standalone": false,
- } {
- t.Run(name, func(t *testing.T) {
- client := FakeClient{Logs: make(chan string, 10)}
- env := new(TestEnv)
- defer env.Shutdown(t)
- l, _ := env.serve(ctx, t, staticServerBinder(PingServer{}))
- conn := env.dial(ctx, t, l.Dialer(), staticClientBinder(client), forwarded)
-
- if err := protocol.ServerDispatcherV2(conn).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{}); err != nil {
- t.Errorf("DidOpen: %v", err)
- }
- select {
- case got := <-client.Logs:
- want := "ping"
- matched, err := regexp.MatchString(want, got)
- if err != nil {
- t.Fatal(err)
- }
- if !matched {
- t.Errorf("got log %q, want a log containing %q", got, want)
- }
- case <-time.After(1 * time.Second):
- t.Error("timeout waiting for client log")
- }
- })
- }
-}
-
-func TestRequestCancellationV2(t *testing.T) {
- ctx := context.Background()
-
- for name, forwarded := range map[string]bool{
- "forwarded": true,
- "standalone": false,
- } {
- t.Run(name, func(t *testing.T) {
- server := WaitableServer{
- Started: make(chan struct{}),
- Completed: make(chan error),
- }
- env := new(TestEnv)
- defer env.Shutdown(t)
- l, _ := env.serve(ctx, t, staticServerBinder(server))
- client := FakeClient{Logs: make(chan string, 10)}
- conn := env.dial(ctx, t, l.Dialer(), staticClientBinder(client), forwarded)
-
- sd := protocol.ServerDispatcherV2(conn)
- ctx, cancel := context.WithCancel(ctx)
-
- result := make(chan error)
- go func() {
- _, err := sd.Hover(ctx, &protocol.HoverParams{})
- result <- err
- }()
- // Wait for the Hover request to start.
- <-server.Started
- cancel()
- if err := <-result; err == nil {
- t.Error("nil error for cancelled Hover(), want non-nil")
- }
- if err := <-server.Completed; err == nil || !strings.Contains(err.Error(), "cancelled hover") {
- t.Errorf("Hover(): unexpected server-side error %v", err)
- }
- })
- }
-}
diff --git a/internal/lsp/lsprpc/commandinterceptor.go b/internal/lsp/lsprpc/commandinterceptor.go
deleted file mode 100644
index 5c36af759..000000000
--- a/internal/lsp/lsprpc/commandinterceptor.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc
-
-import (
- "context"
- "encoding/json"
-
- jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-// HandlerMiddleware is a middleware that only modifies the jsonrpc2 handler.
-type HandlerMiddleware func(jsonrpc2_v2.Handler) jsonrpc2_v2.Handler
-
-// BindHandler transforms a HandlerMiddleware into a Middleware.
-func BindHandler(hmw HandlerMiddleware) Middleware {
- return Middleware(func(binder jsonrpc2_v2.Binder) jsonrpc2_v2.Binder {
- return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
- opts, err := binder.Bind(ctx, conn)
- if err != nil {
- return opts, err
- }
- opts.Handler = hmw(opts.Handler)
- return opts, nil
- })
- })
-}
-
-func CommandInterceptor(command string, run func(*protocol.ExecuteCommandParams) (interface{}, error)) Middleware {
- return BindHandler(func(delegate jsonrpc2_v2.Handler) jsonrpc2_v2.Handler {
- return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
- if req.Method == "workspace/executeCommand" {
- var params protocol.ExecuteCommandParams
- if err := json.Unmarshal(req.Params, &params); err == nil {
- if params.Command == command {
- return run(&params)
- }
- }
- }
-
- return delegate.Handle(ctx, req)
- })
- })
-}
diff --git a/internal/lsp/lsprpc/commandinterceptor_test.go b/internal/lsp/lsprpc/commandinterceptor_test.go
deleted file mode 100644
index 06550e8fa..000000000
--- a/internal/lsp/lsprpc/commandinterceptor_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc_test
-
-import (
- "context"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
-
- . "golang.org/x/tools/internal/lsp/lsprpc"
-)
-
-func TestCommandInterceptor(t *testing.T) {
- const command = "foo"
- caught := false
- intercept := func(_ *protocol.ExecuteCommandParams) (interface{}, error) {
- caught = true
- return map[string]interface{}{}, nil
- }
-
- ctx := context.Background()
- env := new(TestEnv)
- defer env.Shutdown(t)
- mw := CommandInterceptor(command, intercept)
- l, _ := env.serve(ctx, t, mw(noopBinder))
- conn := env.dial(ctx, t, l.Dialer(), noopBinder, false)
-
- params := &protocol.ExecuteCommandParams{
- Command: command,
- }
- var res interface{}
- err := conn.Call(ctx, "workspace/executeCommand", params).Await(ctx, &res)
- if err != nil {
- t.Fatal(err)
- }
- if !caught {
- t.Errorf("workspace/executeCommand was not intercepted")
- }
-}
diff --git a/internal/lsp/lsprpc/dialer.go b/internal/lsp/lsprpc/dialer.go
deleted file mode 100644
index 713307ca0..000000000
--- a/internal/lsp/lsprpc/dialer.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc
-
-import (
- "context"
- "fmt"
- "io"
- "net"
- "os"
- "time"
-
- exec "golang.org/x/sys/execabs"
- "golang.org/x/tools/internal/event"
- errors "golang.org/x/xerrors"
-)
-
-// AutoNetwork is the pseudo network type used to signal that gopls should use
-// automatic discovery to resolve a remote address.
-const AutoNetwork = "auto"
-
-// An AutoDialer is a jsonrpc2 dialer that understands the 'auto' network.
-type AutoDialer struct {
- network, addr string // the 'real' network and address
- isAuto bool // whether the server is on the 'auto' network
-
- executable string
- argFunc func(network, addr string) []string
-}
-
-func NewAutoDialer(rawAddr string, argFunc func(network, addr string) []string) (*AutoDialer, error) {
- d := AutoDialer{
- argFunc: argFunc,
- }
- d.network, d.addr = ParseAddr(rawAddr)
- if d.network == AutoNetwork {
- d.isAuto = true
- bin, err := os.Executable()
- if err != nil {
- return nil, errors.Errorf("getting executable: %w", err)
- }
- d.executable = bin
- d.network, d.addr = autoNetworkAddress(bin, d.addr)
- }
- return &d, nil
-}
-
-// Dial implements the jsonrpc2.Dialer interface.
-func (d *AutoDialer) Dial(ctx context.Context) (io.ReadWriteCloser, error) {
- conn, err := d.dialNet(ctx)
- return conn, err
-}
-
-// TODO(rFindley): remove this once we no longer need to integrate with v1 of
-// the jsonrpc2 package.
-func (d *AutoDialer) dialNet(ctx context.Context) (net.Conn, error) {
- // Attempt to verify that we own the remote. This is imperfect, but if we can
- // determine that the remote is owned by a different user, we should fail.
- ok, err := verifyRemoteOwnership(d.network, d.addr)
- if err != nil {
- // If the ownership check itself failed, we fail open but log an error to
- // the user.
- event.Error(ctx, "unable to check daemon socket owner, failing open", err)
- } else if !ok {
- // We successfully checked that the socket is not owned by us, we fail
- // closed.
- return nil, fmt.Errorf("socket %q is owned by a different user", d.addr)
- }
- const dialTimeout = 1 * time.Second
- // Try dialing our remote once, in case it is already running.
- netConn, err := net.DialTimeout(d.network, d.addr, dialTimeout)
- if err == nil {
- return netConn, nil
- }
- if d.isAuto && d.argFunc != nil {
- if d.network == "unix" {
- // Sometimes the socketfile isn't properly cleaned up when the server
- // shuts down. Since we have already tried and failed to dial this
- // address, it should *usually* be safe to remove the socket before
- // binding to the address.
- // TODO(rfindley): there is probably a race here if multiple server
- // instances are simultaneously starting up.
- if _, err := os.Stat(d.addr); err == nil {
- if err := os.Remove(d.addr); err != nil {
- return nil, errors.Errorf("removing remote socket file: %w", err)
- }
- }
- }
- args := d.argFunc(d.network, d.addr)
- cmd := exec.Command(d.executable, args...)
- if err := runRemote(cmd); err != nil {
- return nil, err
- }
- }
-
- const retries = 5
- // It can take some time for the newly started server to bind to our address,
- // so we retry for a bit.
- for retry := 0; retry < retries; retry++ {
- startDial := time.Now()
- netConn, err = net.DialTimeout(d.network, d.addr, dialTimeout)
- if err == nil {
- return netConn, nil
- }
- event.Log(ctx, fmt.Sprintf("failed attempt #%d to connect to remote: %v\n", retry+2, err))
- // In case our failure was a fast-failure, ensure we wait at least
- // f.dialTimeout before trying again.
- if retry != retries-1 {
- time.Sleep(dialTimeout - time.Since(startDial))
- }
- }
- return nil, errors.Errorf("dialing remote: %w", err)
-}
diff --git a/internal/lsp/lsprpc/goenv.go b/internal/lsp/lsprpc/goenv.go
deleted file mode 100644
index 4b16d8d9e..000000000
--- a/internal/lsp/lsprpc/goenv.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/gocommand"
- jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-func GoEnvMiddleware() (Middleware, error) {
- return BindHandler(func(delegate jsonrpc2_v2.Handler) jsonrpc2_v2.Handler {
- return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
- if req.Method == "initialize" {
- if err := addGoEnvToInitializeRequestV2(ctx, req); err != nil {
- event.Error(ctx, "adding go env to initialize", err)
- }
- }
- return delegate.Handle(ctx, req)
- })
- }), nil
-}
-
-func addGoEnvToInitializeRequestV2(ctx context.Context, req *jsonrpc2_v2.Request) error {
- var params protocol.ParamInitialize
- if err := json.Unmarshal(req.Params, &params); err != nil {
- return err
- }
- var opts map[string]interface{}
- switch v := params.InitializationOptions.(type) {
- case nil:
- opts = make(map[string]interface{})
- case map[string]interface{}:
- opts = v
- default:
- return fmt.Errorf("unexpected type for InitializationOptions: %T", v)
- }
- envOpt, ok := opts["env"]
- if !ok {
- envOpt = make(map[string]interface{})
- }
- env, ok := envOpt.(map[string]interface{})
- if !ok {
- return fmt.Errorf("env option is %T, expected a map", envOpt)
- }
- goenv, err := getGoEnv(ctx, env)
- if err != nil {
- return err
- }
- for govar, value := range goenv {
- env[govar] = value
- }
- opts["env"] = env
- params.InitializationOptions = opts
- raw, err := json.Marshal(params)
- if err != nil {
- return fmt.Errorf("marshaling updated options: %v", err)
- }
- req.Params = json.RawMessage(raw)
- return nil
-}
-
-func getGoEnv(ctx context.Context, env map[string]interface{}) (map[string]string, error) {
- var runEnv []string
- for k, v := range env {
- runEnv = append(runEnv, fmt.Sprintf("%s=%s", k, v))
- }
- runner := gocommand.Runner{}
- output, err := runner.Run(ctx, gocommand.Invocation{
- Verb: "env",
- Args: []string{"-json"},
- Env: runEnv,
- })
- if err != nil {
- return nil, err
- }
- envmap := make(map[string]string)
- if err := json.Unmarshal(output.Bytes(), &envmap); err != nil {
- return nil, err
- }
- return envmap, nil
-}
diff --git a/internal/lsp/lsprpc/goenv_test.go b/internal/lsp/lsprpc/goenv_test.go
deleted file mode 100644
index cdfe23c90..000000000
--- a/internal/lsp/lsprpc/goenv_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc_test
-
-import (
- "context"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/testenv"
-
- . "golang.org/x/tools/internal/lsp/lsprpc"
-)
-
-type initServer struct {
- protocol.Server
-
- params *protocol.ParamInitialize
-}
-
-func (s *initServer) Initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
- s.params = params
- return &protocol.InitializeResult{}, nil
-}
-
-func TestGoEnvMiddleware(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
-
- ctx := context.Background()
-
- server := &initServer{}
- env := new(TestEnv)
- defer env.Shutdown(t)
- l, _ := env.serve(ctx, t, staticServerBinder(server))
- mw, err := GoEnvMiddleware()
- if err != nil {
- t.Fatal(err)
- }
- binder := mw(NewForwardBinder(l.Dialer()))
- l, _ = env.serve(ctx, t, binder)
- conn := env.dial(ctx, t, l.Dialer(), noopBinder, true)
- dispatch := protocol.ServerDispatcherV2(conn)
- initParams := &protocol.ParamInitialize{}
- initParams.InitializationOptions = map[string]interface{}{
- "env": map[string]interface{}{
- "GONOPROXY": "example.com",
- },
- }
- if _, err := dispatch.Initialize(ctx, initParams); err != nil {
- t.Fatal(err)
- }
-
- if server.params == nil {
- t.Fatalf("initialize params are unset")
- }
- envOpts := server.params.InitializationOptions.(map[string]interface{})["env"].(map[string]interface{})
-
- // Check for an arbitrary Go variable. It should be set.
- if _, ok := envOpts["GOPRIVATE"]; !ok {
- t.Errorf("Go environment variable GOPRIVATE unset in initialization options")
- }
- // Check that the variable present in our user config was not overwritten.
- if got, want := envOpts["GONOPROXY"], "example.com"; got != want {
- t.Errorf("GONOPROXY=%q, want %q", got, want)
- }
-}
diff --git a/internal/lsp/lsprpc/lsprpc.go b/internal/lsp/lsprpc/lsprpc.go
deleted file mode 100644
index ca32f0e17..000000000
--- a/internal/lsp/lsprpc/lsprpc.go
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package lsprpc implements a jsonrpc2.StreamServer that may be used to
-// serve the LSP on a jsonrpc2 channel.
-package lsprpc
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "log"
- "net"
- "os"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- errors "golang.org/x/xerrors"
-)
-
-// Unique identifiers for client/server.
-var serverIndex int64
-
-// The StreamServer type is a jsonrpc2.StreamServer that handles incoming
-// streams as a new LSP session, using a shared cache.
-type StreamServer struct {
- cache *cache.Cache
- // daemon controls whether or not to log new connections.
- daemon bool
-
- // serverForTest may be set to a test fake for testing.
- serverForTest protocol.Server
-}
-
-// NewStreamServer creates a StreamServer using the shared cache. If
-// withTelemetry is true, each session is instrumented with telemetry that
-// records RPC statistics.
-func NewStreamServer(cache *cache.Cache, daemon bool) *StreamServer {
- return &StreamServer{cache: cache, daemon: daemon}
-}
-
-func (s *StreamServer) Binder() *ServerBinder {
- newServer := func(ctx context.Context, client protocol.ClientCloser) protocol.Server {
- session := s.cache.NewSession(ctx)
- server := s.serverForTest
- if server == nil {
- server = lsp.NewServer(session, client)
- debug.GetInstance(ctx).AddService(server, session)
- }
- return server
- }
- return NewServerBinder(newServer)
-}
-
-// ServeStream implements the jsonrpc2.StreamServer interface, by handling
-// incoming streams using a new lsp server.
-func (s *StreamServer) ServeStream(ctx context.Context, conn jsonrpc2.Conn) error {
- client := protocol.ClientDispatcher(conn)
- session := s.cache.NewSession(ctx)
- server := s.serverForTest
- if server == nil {
- server = lsp.NewServer(session, client)
- debug.GetInstance(ctx).AddService(server, session)
- }
- // Clients may or may not send a shutdown message. Make sure the server is
- // shut down.
- // TODO(rFindley): this shutdown should perhaps be on a disconnected context.
- defer func() {
- if err := server.Shutdown(ctx); err != nil {
- event.Error(ctx, "error shutting down", err)
- }
- }()
- executable, err := os.Executable()
- if err != nil {
- log.Printf("error getting gopls path: %v", err)
- executable = ""
- }
- ctx = protocol.WithClient(ctx, client)
- conn.Go(ctx,
- protocol.Handlers(
- handshaker(session, executable, s.daemon,
- protocol.ServerHandler(server,
- jsonrpc2.MethodNotFound))))
- if s.daemon {
- log.Printf("Session %s: connected", session.ID())
- defer log.Printf("Session %s: exited", session.ID())
- }
- <-conn.Done()
- return conn.Err()
-}
-
-// A Forwarder is a jsonrpc2.StreamServer that handles an LSP stream by
-// forwarding it to a remote. This is used when the gopls process started by
-// the editor is in the `-remote` mode, which means it finds and connects to a
-// separate gopls daemon. In these cases, we still want the forwarder gopls to
-// be instrumented with telemetry, and want to be able to in some cases hijack
-// the jsonrpc2 connection with the daemon.
-type Forwarder struct {
- dialer *AutoDialer
-
- mu sync.Mutex
- // Hold on to the server connection so that we can redo the handshake if any
- // information changes.
- serverConn jsonrpc2.Conn
- serverID string
-}
-
-// NewForwarder creates a new Forwarder, ready to forward connections to the
-// remote server specified by rawAddr. If provided and rawAddr indicates an
-// 'automatic' address (starting with 'auto;'), argFunc may be used to start a
-// remote server for the auto-discovered address.
-func NewForwarder(rawAddr string, argFunc func(network, address string) []string) (*Forwarder, error) {
- dialer, err := NewAutoDialer(rawAddr, argFunc)
- if err != nil {
- return nil, err
- }
- fwd := &Forwarder{
- dialer: dialer,
- }
- return fwd, nil
-}
-
-// QueryServerState queries the server state of the current server.
-func QueryServerState(ctx context.Context, addr string) (*ServerState, error) {
- serverConn, err := dialRemote(ctx, addr)
- if err != nil {
- return nil, err
- }
- var state ServerState
- if err := protocol.Call(ctx, serverConn, sessionsMethod, nil, &state); err != nil {
- return nil, errors.Errorf("querying server state: %w", err)
- }
- return &state, nil
-}
-
-// dialRemote is used for making calls into the gopls daemon. addr should be a
-// URL, possibly on the synthetic 'auto' network (e.g. tcp://..., unix://...,
-// or auto://...).
-func dialRemote(ctx context.Context, addr string) (jsonrpc2.Conn, error) {
- network, address := ParseAddr(addr)
- if network == AutoNetwork {
- gp, err := os.Executable()
- if err != nil {
- return nil, errors.Errorf("getting gopls path: %w", err)
- }
- network, address = autoNetworkAddress(gp, address)
- }
- netConn, err := net.DialTimeout(network, address, 5*time.Second)
- if err != nil {
- return nil, errors.Errorf("dialing remote: %w", err)
- }
- serverConn := jsonrpc2.NewConn(jsonrpc2.NewHeaderStream(netConn))
- serverConn.Go(ctx, jsonrpc2.MethodNotFound)
- return serverConn, nil
-}
-
-func ExecuteCommand(ctx context.Context, addr string, id string, request, result interface{}) error {
- serverConn, err := dialRemote(ctx, addr)
- if err != nil {
- return err
- }
- args, err := command.MarshalArgs(request)
- if err != nil {
- return err
- }
- params := protocol.ExecuteCommandParams{
- Command: id,
- Arguments: args,
- }
- return protocol.Call(ctx, serverConn, "workspace/executeCommand", params, result)
-}
-
-// ServeStream dials the forwarder remote and binds the remote to serve the LSP
-// on the incoming stream.
-func (f *Forwarder) ServeStream(ctx context.Context, clientConn jsonrpc2.Conn) error {
- client := protocol.ClientDispatcher(clientConn)
-
- netConn, err := f.dialer.dialNet(ctx)
- if err != nil {
- return errors.Errorf("forwarder: connecting to remote: %w", err)
- }
- serverConn := jsonrpc2.NewConn(jsonrpc2.NewHeaderStream(netConn))
- server := protocol.ServerDispatcher(serverConn)
-
- // Forward between connections.
- serverConn.Go(ctx,
- protocol.Handlers(
- protocol.ClientHandler(client,
- jsonrpc2.MethodNotFound)))
-
- // Don't run the clientConn yet, so that we can complete the handshake before
- // processing any client messages.
-
- // Do a handshake with the server instance to exchange debug information.
- index := atomic.AddInt64(&serverIndex, 1)
- f.mu.Lock()
- f.serverConn = serverConn
- f.serverID = strconv.FormatInt(index, 10)
- f.mu.Unlock()
- f.handshake(ctx)
- clientConn.Go(ctx,
- protocol.Handlers(
- f.handler(
- protocol.ServerHandler(server,
- jsonrpc2.MethodNotFound))))
-
- select {
- case <-serverConn.Done():
- clientConn.Close()
- case <-clientConn.Done():
- serverConn.Close()
- }
-
- err = nil
- if serverConn.Err() != nil {
- err = errors.Errorf("remote disconnected: %v", serverConn.Err())
- } else if clientConn.Err() != nil {
- err = errors.Errorf("client disconnected: %v", clientConn.Err())
- }
- event.Log(ctx, fmt.Sprintf("forwarder: exited with error: %v", err))
- return err
-}
-
-// TODO(rfindley): remove this handshaking in favor of middleware.
-func (f *Forwarder) handshake(ctx context.Context) {
- // This call to os.Execuable is redundant, and will be eliminated by the
- // transition to the V2 API.
- goplsPath, err := os.Executable()
- if err != nil {
- event.Error(ctx, "getting executable for handshake", err)
- goplsPath = ""
- }
- var (
- hreq = handshakeRequest{
- ServerID: f.serverID,
- GoplsPath: goplsPath,
- }
- hresp handshakeResponse
- )
- if di := debug.GetInstance(ctx); di != nil {
- hreq.Logfile = di.Logfile
- hreq.DebugAddr = di.ListenedDebugAddress()
- }
- if err := protocol.Call(ctx, f.serverConn, handshakeMethod, hreq, &hresp); err != nil {
- // TODO(rfindley): at some point in the future we should return an error
- // here. Handshakes have become functional in nature.
- event.Error(ctx, "forwarder: gopls handshake failed", err)
- }
- if hresp.GoplsPath != goplsPath {
- event.Error(ctx, "", fmt.Errorf("forwarder: gopls path mismatch: forwarder is %q, remote is %q", goplsPath, hresp.GoplsPath))
- }
- event.Log(ctx, "New server",
- tag.NewServer.Of(f.serverID),
- tag.Logfile.Of(hresp.Logfile),
- tag.DebugAddress.Of(hresp.DebugAddr),
- tag.GoplsPath.Of(hresp.GoplsPath),
- tag.ClientID.Of(hresp.SessionID),
- )
-}
-
-func ConnectToRemote(ctx context.Context, addr string) (net.Conn, error) {
- dialer, err := NewAutoDialer(addr, nil)
- if err != nil {
- return nil, err
- }
- return dialer.dialNet(ctx)
-}
-
-// handler intercepts messages to the daemon to enrich them with local
-// information.
-func (f *Forwarder) handler(handler jsonrpc2.Handler) jsonrpc2.Handler {
- return func(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2.Request) error {
- // Intercept certain messages to add special handling.
- switch r.Method() {
- case "initialize":
- if newr, err := addGoEnvToInitializeRequest(ctx, r); err == nil {
- r = newr
- } else {
- log.Printf("unable to add local env to initialize request: %v", err)
- }
- case "workspace/executeCommand":
- var params protocol.ExecuteCommandParams
- if err := json.Unmarshal(r.Params(), &params); err == nil {
- if params.Command == command.StartDebugging.ID() {
- var args command.DebuggingArgs
- if err := command.UnmarshalArgs(params.Arguments, &args); err == nil {
- reply = f.replyWithDebugAddress(ctx, reply, args)
- } else {
- event.Error(ctx, "unmarshaling debugging args", err)
- }
- }
- } else {
- event.Error(ctx, "intercepting executeCommand request", err)
- }
- }
- // The gopls workspace environment defaults to the process environment in
- // which gopls daemon was started. To avoid discrepancies in Go environment
- // between the editor and daemon, inject any unset variables in `go env`
- // into the options sent by initialize.
- //
- // See also golang.org/issue/37830.
- return handler(ctx, reply, r)
- }
-}
-
-// addGoEnvToInitializeRequest builds a new initialize request in which we set
-// any environment variables output by `go env` and not already present in the
-// request.
-//
-// It returns an error if r is not an initialize requst, or is otherwise
-// malformed.
-func addGoEnvToInitializeRequest(ctx context.Context, r jsonrpc2.Request) (jsonrpc2.Request, error) {
- var params protocol.ParamInitialize
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return nil, err
- }
- var opts map[string]interface{}
- switch v := params.InitializationOptions.(type) {
- case nil:
- opts = make(map[string]interface{})
- case map[string]interface{}:
- opts = v
- default:
- return nil, fmt.Errorf("unexpected type for InitializationOptions: %T", v)
- }
- envOpt, ok := opts["env"]
- if !ok {
- envOpt = make(map[string]interface{})
- }
- env, ok := envOpt.(map[string]interface{})
- if !ok {
- return nil, fmt.Errorf(`env option is %T, expected a map`, envOpt)
- }
- goenv, err := getGoEnv(ctx, env)
- if err != nil {
- return nil, err
- }
- for govar, value := range goenv {
- env[govar] = value
- }
- opts["env"] = env
- params.InitializationOptions = opts
- call, ok := r.(*jsonrpc2.Call)
- if !ok {
- return nil, fmt.Errorf("%T is not a *jsonrpc2.Call", r)
- }
- return jsonrpc2.NewCall(call.ID(), "initialize", params)
-}
-
-func (f *Forwarder) replyWithDebugAddress(outerCtx context.Context, r jsonrpc2.Replier, args command.DebuggingArgs) jsonrpc2.Replier {
- di := debug.GetInstance(outerCtx)
- if di == nil {
- event.Log(outerCtx, "no debug instance to start")
- return r
- }
- return func(ctx context.Context, result interface{}, outerErr error) error {
- if outerErr != nil {
- return r(ctx, result, outerErr)
- }
- // Enrich the result with our own debugging information. Since we're an
- // intermediary, the jsonrpc2 package has deserialized the result into
- // maps, by default. Re-do the unmarshalling.
- raw, err := json.Marshal(result)
- if err != nil {
- event.Error(outerCtx, "marshaling intermediate command result", err)
- return r(ctx, result, err)
- }
- var modified command.DebuggingResult
- if err := json.Unmarshal(raw, &modified); err != nil {
- event.Error(outerCtx, "unmarshaling intermediate command result", err)
- return r(ctx, result, err)
- }
- addr := args.Addr
- if addr == "" {
- addr = "localhost:0"
- }
- addr, err = di.Serve(outerCtx, addr)
- if err != nil {
- event.Error(outerCtx, "starting debug server", err)
- return r(ctx, result, outerErr)
- }
- urls := []string{"http://" + addr}
- modified.URLs = append(urls, modified.URLs...)
- go f.handshake(ctx)
- return r(ctx, modified, nil)
- }
-}
-
-// A handshakeRequest identifies a client to the LSP server.
-type handshakeRequest struct {
- // ServerID is the ID of the server on the client. This should usually be 0.
- ServerID string `json:"serverID"`
- // Logfile is the location of the clients log file.
- Logfile string `json:"logfile"`
- // DebugAddr is the client debug address.
- DebugAddr string `json:"debugAddr"`
- // GoplsPath is the path to the Gopls binary running the current client
- // process.
- GoplsPath string `json:"goplsPath"`
-}
-
-// A handshakeResponse is returned by the LSP server to tell the LSP client
-// information about its session.
-type handshakeResponse struct {
- // SessionID is the server session associated with the client.
- SessionID string `json:"sessionID"`
- // Logfile is the location of the server logs.
- Logfile string `json:"logfile"`
- // DebugAddr is the server debug address.
- DebugAddr string `json:"debugAddr"`
- // GoplsPath is the path to the Gopls binary running the current server
- // process.
- GoplsPath string `json:"goplsPath"`
-}
-
-// ClientSession identifies a current client LSP session on the server. Note
-// that it looks similar to handshakeResposne, but in fact 'Logfile' and
-// 'DebugAddr' now refer to the client.
-type ClientSession struct {
- SessionID string `json:"sessionID"`
- Logfile string `json:"logfile"`
- DebugAddr string `json:"debugAddr"`
-}
-
-// ServerState holds information about the gopls daemon process, including its
-// debug information and debug information of all of its current connected
-// clients.
-type ServerState struct {
- Logfile string `json:"logfile"`
- DebugAddr string `json:"debugAddr"`
- GoplsPath string `json:"goplsPath"`
- CurrentClientID string `json:"currentClientID"`
- Clients []ClientSession `json:"clients"`
-}
-
-const (
- handshakeMethod = "gopls/handshake"
- sessionsMethod = "gopls/sessions"
-)
-
-func handshaker(session *cache.Session, goplsPath string, logHandshakes bool, handler jsonrpc2.Handler) jsonrpc2.Handler {
- return func(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2.Request) error {
- switch r.Method() {
- case handshakeMethod:
- // We log.Printf in this handler, rather than event.Log when we want logs
- // to go to the daemon log rather than being reflected back to the
- // client.
- var req handshakeRequest
- if err := json.Unmarshal(r.Params(), &req); err != nil {
- if logHandshakes {
- log.Printf("Error processing handshake for session %s: %v", session.ID(), err)
- }
- sendError(ctx, reply, err)
- return nil
- }
- if logHandshakes {
- log.Printf("Session %s: got handshake. Logfile: %q, Debug addr: %q", session.ID(), req.Logfile, req.DebugAddr)
- }
- event.Log(ctx, "Handshake session update",
- cache.KeyUpdateSession.Of(session),
- tag.DebugAddress.Of(req.DebugAddr),
- tag.Logfile.Of(req.Logfile),
- tag.ServerID.Of(req.ServerID),
- tag.GoplsPath.Of(req.GoplsPath),
- )
- resp := handshakeResponse{
- SessionID: session.ID(),
- GoplsPath: goplsPath,
- }
- if di := debug.GetInstance(ctx); di != nil {
- resp.Logfile = di.Logfile
- resp.DebugAddr = di.ListenedDebugAddress()
- }
- return reply(ctx, resp, nil)
-
- case sessionsMethod:
- resp := ServerState{
- GoplsPath: goplsPath,
- CurrentClientID: session.ID(),
- }
- if di := debug.GetInstance(ctx); di != nil {
- resp.Logfile = di.Logfile
- resp.DebugAddr = di.ListenedDebugAddress()
- for _, c := range di.State.Clients() {
- resp.Clients = append(resp.Clients, ClientSession{
- SessionID: c.Session.ID(),
- Logfile: c.Logfile,
- DebugAddr: c.DebugAddress,
- })
- }
- }
- return reply(ctx, resp, nil)
- }
- return handler(ctx, reply, r)
- }
-}
-
-func sendError(ctx context.Context, reply jsonrpc2.Replier, err error) {
- err = errors.Errorf("%v: %w", err, jsonrpc2.ErrParse)
- if err := reply(ctx, nil, err); err != nil {
- event.Error(ctx, "", err)
- }
-}
-
-// ParseAddr parses the address of a gopls remote.
-// TODO(rFindley): further document this syntax, and allow URI-style remote
-// addresses such as "auto://...".
-func ParseAddr(listen string) (network string, address string) {
- // Allow passing just -remote=auto, as a shorthand for using automatic remote
- // resolution.
- if listen == AutoNetwork {
- return AutoNetwork, ""
- }
- if parts := strings.SplitN(listen, ";", 2); len(parts) == 2 {
- return parts[0], parts[1]
- }
- return "tcp", listen
-}
diff --git a/internal/lsp/lsprpc/lsprpc_test.go b/internal/lsp/lsprpc/lsprpc_test.go
deleted file mode 100644
index 795c887e4..000000000
--- a/internal/lsp/lsprpc/lsprpc_test.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc
-
-import (
- "context"
- "errors"
- "regexp"
- "strings"
- "testing"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/jsonrpc2/servertest"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/testenv"
-)
-
-type FakeClient struct {
- protocol.Client
-
- Logs chan string
-}
-
-func (c FakeClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
- c.Logs <- params.Message
- return nil
-}
-
-// fakeServer is intended to be embedded in the test fakes below, to trivially
-// implement Shutdown.
-type fakeServer struct {
- protocol.Server
-}
-
-func (fakeServer) Shutdown(ctx context.Context) error {
- return nil
-}
-
-type PingServer struct{ fakeServer }
-
-func (s PingServer) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
- event.Log(ctx, "ping")
- return nil
-}
-
-func TestClientLogging(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- server := PingServer{}
- client := FakeClient{Logs: make(chan string, 10)}
-
- ctx = debug.WithInstance(ctx, "", "")
- ss := NewStreamServer(cache.New(nil), false)
- ss.serverForTest = server
- ts := servertest.NewPipeServer(ctx, ss, nil)
- defer checkClose(t, ts.Close)
- cc := ts.Connect(ctx)
- cc.Go(ctx, protocol.ClientHandler(client, jsonrpc2.MethodNotFound))
-
- if err := protocol.ServerDispatcher(cc).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{}); err != nil {
- t.Errorf("DidOpen: %v", err)
- }
-
- select {
- case got := <-client.Logs:
- want := "ping"
- matched, err := regexp.MatchString(want, got)
- if err != nil {
- t.Fatal(err)
- }
- if !matched {
- t.Errorf("got log %q, want a log containing %q", got, want)
- }
- case <-time.After(1 * time.Second):
- t.Error("timeout waiting for client log")
- }
-}
-
-// WaitableServer instruments LSP request so that we can control their timing.
-// The requests chosen are arbitrary: we simply needed one that blocks, and
-// another that doesn't.
-type WaitableServer struct {
- fakeServer
-
- Started chan struct{}
- Completed chan error
-}
-
-func (s WaitableServer) Hover(ctx context.Context, _ *protocol.HoverParams) (_ *protocol.Hover, err error) {
- s.Started <- struct{}{}
- defer func() {
- s.Completed <- err
- }()
- select {
- case <-ctx.Done():
- return nil, errors.New("cancelled hover")
- case <-time.After(10 * time.Second):
- }
- return &protocol.Hover{}, nil
-}
-
-func (s WaitableServer) ResolveCompletionItem(_ context.Context, item *protocol.CompletionItem) (*protocol.CompletionItem, error) {
- return item, nil
-}
-
-func checkClose(t *testing.T, closer func() error) {
- t.Helper()
- if err := closer(); err != nil {
- t.Errorf("closing: %v", err)
- }
-}
-
-func setupForwarding(ctx context.Context, t *testing.T, s protocol.Server) (direct, forwarded servertest.Connector, cleanup func()) {
- t.Helper()
- serveCtx := debug.WithInstance(ctx, "", "")
- ss := NewStreamServer(cache.New(nil), false)
- ss.serverForTest = s
- tsDirect := servertest.NewTCPServer(serveCtx, ss, nil)
-
- forwarderCtx := debug.WithInstance(ctx, "", "")
- forwarder, err := NewForwarder("tcp;"+tsDirect.Addr, nil)
- if err != nil {
- t.Fatal(err)
- }
- tsForwarded := servertest.NewPipeServer(forwarderCtx, forwarder, nil)
- return tsDirect, tsForwarded, func() {
- checkClose(t, tsDirect.Close)
- checkClose(t, tsForwarded.Close)
- }
-}
-
-func TestRequestCancellation(t *testing.T) {
- ctx := context.Background()
- server := WaitableServer{
- Started: make(chan struct{}),
- Completed: make(chan error),
- }
- tsDirect, tsForwarded, cleanup := setupForwarding(ctx, t, server)
- defer cleanup()
- tests := []struct {
- serverType string
- ts servertest.Connector
- }{
- {"direct", tsDirect},
- {"forwarder", tsForwarded},
- }
-
- for _, test := range tests {
- t.Run(test.serverType, func(t *testing.T) {
- cc := test.ts.Connect(ctx)
- sd := protocol.ServerDispatcher(cc)
- cc.Go(ctx,
- protocol.Handlers(
- jsonrpc2.MethodNotFound))
-
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
-
- result := make(chan error)
- go func() {
- _, err := sd.Hover(ctx, &protocol.HoverParams{})
- result <- err
- }()
- // Wait for the Hover request to start.
- <-server.Started
- cancel()
- if err := <-result; err == nil {
- t.Error("nil error for cancelled Hover(), want non-nil")
- }
- if err := <-server.Completed; err == nil || !strings.Contains(err.Error(), "cancelled hover") {
- t.Errorf("Hover(): unexpected server-side error %v", err)
- }
- })
- }
-}
-
-const exampleProgram = `
--- go.mod --
-module mod
-
-go 1.12
--- main.go --
-package main
-
-import "fmt"
-
-func main() {
- fmt.Println("Hello World.")
-}`
-
-func TestDebugInfoLifecycle(t *testing.T) {
- sb, err := fake.NewSandbox(&fake.SandboxConfig{Files: fake.UnpackTxt(exampleProgram)})
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- if err := sb.Close(); err != nil {
- // TODO(golang/go#38490): we can't currently make this an error because
- // it fails on Windows: the workspace directory is still locked by a
- // separate Go process.
- // Once we have a reliable way to wait for proper shutdown, make this an
- // error.
- t.Logf("closing workspace failed: %v", err)
- }
- }()
-
- baseCtx, cancel := context.WithCancel(context.Background())
- defer cancel()
- clientCtx := debug.WithInstance(baseCtx, "", "")
- serverCtx := debug.WithInstance(baseCtx, "", "")
-
- cache := cache.New(nil)
- ss := NewStreamServer(cache, false)
- tsBackend := servertest.NewTCPServer(serverCtx, ss, nil)
-
- forwarder, err := NewForwarder("tcp;"+tsBackend.Addr, nil)
- if err != nil {
- t.Fatal(err)
- }
- tsForwarder := servertest.NewPipeServer(clientCtx, forwarder, nil)
-
- conn1 := tsForwarder.Connect(clientCtx)
- ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, conn1, fake.ClientHooks{})
- if err != nil {
- t.Fatal(err)
- }
- defer ed1.Close(clientCtx)
- conn2 := tsBackend.Connect(baseCtx)
- ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, conn2, fake.ClientHooks{})
- if err != nil {
- t.Fatal(err)
- }
- defer ed2.Close(baseCtx)
-
- serverDebug := debug.GetInstance(serverCtx)
- if got, want := len(serverDebug.State.Clients()), 2; got != want {
- t.Errorf("len(server:Clients) = %d, want %d", got, want)
- }
- if got, want := len(serverDebug.State.Sessions()), 2; got != want {
- t.Errorf("len(server:Sessions) = %d, want %d", got, want)
- }
- clientDebug := debug.GetInstance(clientCtx)
- if got, want := len(clientDebug.State.Servers()), 1; got != want {
- t.Errorf("len(client:Servers) = %d, want %d", got, want)
- }
- // Close one of the connections to verify that the client and session were
- // dropped.
- if err := ed1.Close(clientCtx); err != nil {
- t.Fatal(err)
- }
- /*TODO: at this point we have verified the editor is closed
- However there is no way currently to wait for all associated go routines to
- go away, and we need to wait for those to trigger the client drop
- for now we just give it a little bit of time, but we need to fix this
- in a principled way
- */
- start := time.Now()
- delay := time.Millisecond
- const maxWait = time.Second
- for len(serverDebug.State.Clients()) > 1 {
- if time.Since(start) > maxWait {
- break
- }
- time.Sleep(delay)
- delay *= 2
- }
- if got, want := len(serverDebug.State.Clients()), 1; got != want {
- t.Errorf("len(server:Clients) = %d, want %d", got, want)
- }
- if got, want := len(serverDebug.State.Sessions()), 1; got != want {
- t.Errorf("len(server:Sessions()) = %d, want %d", got, want)
- }
-}
-
-type initServer struct {
- fakeServer
-
- params *protocol.ParamInitialize
-}
-
-func (s *initServer) Initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
- s.params = params
- return &protocol.InitializeResult{}, nil
-}
-
-func TestEnvForwarding(t *testing.T) {
- testenv.NeedsGo1Point(t, 13)
- server := &initServer{}
- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- defer cancel()
- _, tsForwarded, cleanup := setupForwarding(ctx, t, server)
- defer cleanup()
-
- conn := tsForwarded.Connect(ctx)
- conn.Go(ctx, jsonrpc2.MethodNotFound)
- dispatch := protocol.ServerDispatcher(conn)
- initParams := &protocol.ParamInitialize{}
- initParams.InitializationOptions = map[string]interface{}{
- "env": map[string]interface{}{
- "GONOPROXY": "example.com",
- },
- }
- _, err := dispatch.Initialize(ctx, initParams)
- if err != nil {
- t.Fatal(err)
- }
- if server.params == nil {
- t.Fatalf("initialize params are unset")
- }
- env := server.params.InitializationOptions.(map[string]interface{})["env"].(map[string]interface{})
-
- // Check for an arbitrary Go variable. It should be set.
- if _, ok := env["GOPRIVATE"]; !ok {
- t.Errorf("Go environment variable GOPRIVATE unset in initialization options")
- }
- // Check that the variable present in our user config was not overwritten.
- if v := env["GONOPROXY"]; v != "example.com" {
- t.Errorf("GONOPROXY environment variable was overwritten")
- }
-}
-
-func TestListenParsing(t *testing.T) {
- tests := []struct {
- input, wantNetwork, wantAddr string
- }{
- {"127.0.0.1:0", "tcp", "127.0.0.1:0"},
- {"unix;/tmp/sock", "unix", "/tmp/sock"},
- {"auto", "auto", ""},
- {"auto;foo", "auto", "foo"},
- }
-
- for _, test := range tests {
- gotNetwork, gotAddr := ParseAddr(test.input)
- if gotNetwork != test.wantNetwork {
- t.Errorf("network = %q, want %q", gotNetwork, test.wantNetwork)
- }
- if gotAddr != test.wantAddr {
- t.Errorf("addr = %q, want %q", gotAddr, test.wantAddr)
- }
- }
-}
diff --git a/internal/lsp/lsprpc/middleware.go b/internal/lsp/lsprpc/middleware.go
deleted file mode 100644
index 2ee83a203..000000000
--- a/internal/lsp/lsprpc/middleware.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc
-
-import (
- "context"
- "encoding/json"
- "sync"
-
- "golang.org/x/tools/internal/event"
- jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
- "golang.org/x/xerrors"
-)
-
-// Metadata holds arbitrary data transferred between jsonrpc2 peers.
-type Metadata map[string]interface{}
-
-// PeerInfo holds information about a peering between jsonrpc2 servers.
-type PeerInfo struct {
- // RemoteID is the identity of the current server on its peer.
- RemoteID int64
-
- // LocalID is the identity of the peer on the server.
- LocalID int64
-
- // IsClient reports whether the peer is a client. If false, the peer is a
- // server.
- IsClient bool
-
- // Metadata holds arbitrary information provided by the peer.
- Metadata Metadata
-}
-
-// Handshaker handles both server and client handshaking over jsonrpc2. To
-// instrument server-side handshaking, use Handshaker.Middleware. To instrument
-// client-side handshaking, call Handshaker.ClientHandshake for any new
-// client-side connections.
-type Handshaker struct {
- // Metadata will be shared with peers via handshaking.
- Metadata Metadata
-
- mu sync.Mutex
- prevID int64
- peers map[int64]PeerInfo
-}
-
-// Peers returns the peer info this handshaker knows about by way of either the
-// server-side handshake middleware, or client-side handshakes.
-func (h *Handshaker) Peers() []PeerInfo {
- h.mu.Lock()
- defer h.mu.Unlock()
-
- var c []PeerInfo
- for _, v := range h.peers {
- c = append(c, v)
- }
- return c
-}
-
-// Middleware is a jsonrpc2 middleware function to augment connection binding
-// to handle the handshake method, and record disconnections.
-func (h *Handshaker) Middleware(inner jsonrpc2_v2.Binder) jsonrpc2_v2.Binder {
- return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
- opts, err := inner.Bind(ctx, conn)
- if err != nil {
- return opts, err
- }
-
- localID := h.nextID()
- info := &PeerInfo{
- RemoteID: localID,
- Metadata: h.Metadata,
- }
-
- // Wrap the delegated handler to accept the handshake.
- delegate := opts.Handler
- opts.Handler = jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
- if req.Method == handshakeMethod {
- var peerInfo PeerInfo
- if err := json.Unmarshal(req.Params, &peerInfo); err != nil {
- return nil, xerrors.Errorf("%w: unmarshaling client info: %v", jsonrpc2_v2.ErrInvalidParams, err)
- }
- peerInfo.LocalID = localID
- peerInfo.IsClient = true
- h.recordPeer(peerInfo)
- return info, nil
- }
- return delegate.Handle(ctx, req)
- })
-
- // Record the dropped client.
- go h.cleanupAtDisconnect(conn, localID)
-
- return opts, nil
- })
-}
-
-// ClientHandshake performs a client-side handshake with the server at the
-// other end of conn, recording the server's peer info and watching for conn's
-// disconnection.
-func (h *Handshaker) ClientHandshake(ctx context.Context, conn *jsonrpc2_v2.Connection) {
- localID := h.nextID()
- info := &PeerInfo{
- RemoteID: localID,
- Metadata: h.Metadata,
- }
-
- call := conn.Call(ctx, handshakeMethod, info)
- var serverInfo PeerInfo
- if err := call.Await(ctx, &serverInfo); err != nil {
- event.Error(ctx, "performing handshake", err)
- return
- }
- serverInfo.LocalID = localID
- h.recordPeer(serverInfo)
-
- go h.cleanupAtDisconnect(conn, localID)
-}
-
-func (h *Handshaker) nextID() int64 {
- h.mu.Lock()
- defer h.mu.Unlock()
-
- h.prevID++
- return h.prevID
-}
-
-func (h *Handshaker) cleanupAtDisconnect(conn *jsonrpc2_v2.Connection, peerID int64) {
- conn.Wait()
-
- h.mu.Lock()
- defer h.mu.Unlock()
- delete(h.peers, peerID)
-}
-
-func (h *Handshaker) recordPeer(info PeerInfo) {
- h.mu.Lock()
- defer h.mu.Unlock()
- if h.peers == nil {
- h.peers = make(map[int64]PeerInfo)
- }
- h.peers[info.LocalID] = info
-}
diff --git a/internal/lsp/lsprpc/middleware_test.go b/internal/lsp/lsprpc/middleware_test.go
deleted file mode 100644
index a385f1003..000000000
--- a/internal/lsp/lsprpc/middleware_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsprpc_test
-
-import (
- "context"
- "errors"
- "fmt"
- "testing"
- "time"
-
- jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
- . "golang.org/x/tools/internal/lsp/lsprpc"
-)
-
-var noopBinder = BinderFunc(func(context.Context, *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
- return jsonrpc2_v2.ConnectionOptions{}, nil
-})
-
-func TestHandshakeMiddleware(t *testing.T) {
- sh := &Handshaker{
- Metadata: Metadata{
- "answer": 42,
- },
- }
- ctx := context.Background()
- env := new(TestEnv)
- defer env.Shutdown(t)
- l, _ := env.serve(ctx, t, sh.Middleware(noopBinder))
- conn := env.dial(ctx, t, l.Dialer(), noopBinder, false)
- ch := &Handshaker{
- Metadata: Metadata{
- "question": 6 * 9,
- },
- }
-
- check := func(connected bool) error {
- clients := sh.Peers()
- servers := ch.Peers()
- want := 0
- if connected {
- want = 1
- }
- if got := len(clients); got != want {
- return fmt.Errorf("got %d clients on the server, want %d", got, want)
- }
- if got := len(servers); got != want {
- return fmt.Errorf("got %d servers on the client, want %d", got, want)
- }
- if !connected {
- return nil
- }
- client := clients[0]
- server := servers[0]
- if _, ok := client.Metadata["question"]; !ok {
- return errors.New("no client metadata")
- }
- if _, ok := server.Metadata["answer"]; !ok {
- return errors.New("no server metadata")
- }
- if client.LocalID != server.RemoteID {
- return fmt.Errorf("client.LocalID == %d, server.PeerID == %d", client.LocalID, server.RemoteID)
- }
- if client.RemoteID != server.LocalID {
- return fmt.Errorf("client.PeerID == %d, server.LocalID == %d", client.RemoteID, server.LocalID)
- }
- return nil
- }
-
- if err := check(false); err != nil {
- t.Fatalf("before handshake: %v", err)
- }
- ch.ClientHandshake(ctx, conn)
- if err := check(true); err != nil {
- t.Fatalf("after handshake: %v", err)
- }
- conn.Close()
- // Wait for up to ~2s for connections to get cleaned up.
- delay := 25 * time.Millisecond
- for retries := 3; retries >= 0; retries-- {
- time.Sleep(delay)
- err := check(false)
- if err == nil {
- return
- }
- if retries == 0 {
- t.Fatalf("after closing connection: %v", err)
- }
- delay *= 4
- }
-}
diff --git a/internal/lsp/mod/code_lens.go b/internal/lsp/mod/code_lens.go
deleted file mode 100644
index b26bae75c..000000000
--- a/internal/lsp/mod/code_lens.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mod
-
-import (
- "context"
- "fmt"
- "os"
- "path/filepath"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// LensFuncs returns the supported lensFuncs for go.mod files.
-func LensFuncs() map[command.Command]source.LensFunc {
- return map[command.Command]source.LensFunc{
- command.UpgradeDependency: upgradeLenses,
- command.Tidy: tidyLens,
- command.Vendor: vendorLens,
- }
-}
-
-func upgradeLenses(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) {
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil || pm.File == nil {
- return nil, err
- }
- if len(pm.File.Require) == 0 {
- // Nothing to upgrade.
- return nil, nil
- }
- var requires []string
- for _, req := range pm.File.Require {
- requires = append(requires, req.Mod.Path)
- }
- uri := protocol.URIFromSpanURI(fh.URI())
- checkUpgrade, err := command.NewCheckUpgradesCommand("Check for upgrades", command.CheckUpgradesArgs{
- URI: uri,
- Modules: requires,
- })
- if err != nil {
- return nil, err
- }
- upgradeTransitive, err := command.NewUpgradeDependencyCommand("Upgrade transitive dependencies", command.DependencyArgs{
- URI: uri,
- AddRequire: false,
- GoCmdArgs: []string{"-d", "-u", "-t", "./..."},
- })
- if err != nil {
- return nil, err
- }
- upgradeDirect, err := command.NewUpgradeDependencyCommand("Upgrade direct dependencies", command.DependencyArgs{
- URI: uri,
- AddRequire: false,
- GoCmdArgs: append([]string{"-d"}, requires...),
- })
- if err != nil {
- return nil, err
- }
- // Put the upgrade code lenses above the first require block or statement.
- rng, err := firstRequireRange(fh, pm)
- if err != nil {
- return nil, err
- }
-
- return []protocol.CodeLens{
- {Range: rng, Command: checkUpgrade},
- {Range: rng, Command: upgradeTransitive},
- {Range: rng, Command: upgradeDirect},
- }, nil
-}
-
-func tidyLens(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) {
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil || pm.File == nil {
- return nil, err
- }
- uri := protocol.URIFromSpanURI(fh.URI())
- cmd, err := command.NewTidyCommand("Run go mod tidy", command.URIArgs{URIs: []protocol.DocumentURI{uri}})
- if err != nil {
- return nil, err
- }
- rng, err := moduleStmtRange(fh, pm)
- if err != nil {
- return nil, err
- }
- return []protocol.CodeLens{{
- Range: rng,
- Command: cmd,
- }}, nil
-}
-
-func vendorLens(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) {
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil || pm.File == nil {
- return nil, err
- }
- if len(pm.File.Require) == 0 {
- // Nothing to vendor.
- return nil, nil
- }
- rng, err := moduleStmtRange(fh, pm)
- if err != nil {
- return nil, err
- }
- title := "Create vendor directory"
- uri := protocol.URIFromSpanURI(fh.URI())
- cmd, err := command.NewVendorCommand(title, command.URIArg{URI: uri})
- if err != nil {
- return nil, err
- }
- // Change the message depending on whether or not the module already has a
- // vendor directory.
- vendorDir := filepath.Join(filepath.Dir(fh.URI().Filename()), "vendor")
- if info, _ := os.Stat(vendorDir); info != nil && info.IsDir() {
- title = "Sync vendor directory"
- }
- return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil
-}
-
-func moduleStmtRange(fh source.FileHandle, pm *source.ParsedModule) (protocol.Range, error) {
- if pm.File == nil || pm.File.Module == nil || pm.File.Module.Syntax == nil {
- return protocol.Range{}, fmt.Errorf("no module statement in %s", fh.URI())
- }
- syntax := pm.File.Module.Syntax
- return source.LineToRange(pm.Mapper, fh.URI(), syntax.Start, syntax.End)
-}
-
-// firstRequireRange returns the range for the first "require" in the given
-// go.mod file. This is either a require block or an individual require line.
-func firstRequireRange(fh source.FileHandle, pm *source.ParsedModule) (protocol.Range, error) {
- if len(pm.File.Require) == 0 {
- return protocol.Range{}, fmt.Errorf("no requires in the file %s", fh.URI())
- }
- var start, end modfile.Position
- for _, stmt := range pm.File.Syntax.Stmt {
- if b, ok := stmt.(*modfile.LineBlock); ok && len(b.Token) == 1 && b.Token[0] == "require" {
- start, end = b.Span()
- break
- }
- }
-
- firstRequire := pm.File.Require[0].Syntax
- if start.Byte == 0 || firstRequire.Start.Byte < start.Byte {
- start, end = firstRequire.Start, firstRequire.End
- }
- return source.LineToRange(pm.Mapper, fh.URI(), start, end)
-}
diff --git a/internal/lsp/mod/diagnostics.go b/internal/lsp/mod/diagnostics.go
deleted file mode 100644
index 9c49d8b36..000000000
--- a/internal/lsp/mod/diagnostics.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package mod provides core features related to go.mod file
-// handling for use by Go editors and tools.
-package mod
-
-import (
- "context"
- "fmt"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.VersionedFileIdentity][]*source.Diagnostic, error) {
- ctx, done := event.Start(ctx, "mod.Diagnostics", tag.Snapshot.Of(snapshot.ID()))
- defer done()
-
- reports := map[source.VersionedFileIdentity][]*source.Diagnostic{}
- for _, uri := range snapshot.ModFiles() {
- fh, err := snapshot.GetVersionedFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- reports[fh.VersionedFileIdentity()] = []*source.Diagnostic{}
- diagnostics, err := DiagnosticsForMod(ctx, snapshot, fh)
- if err != nil {
- return nil, err
- }
- for _, d := range diagnostics {
- fh, err := snapshot.GetVersionedFile(ctx, d.URI)
- if err != nil {
- return nil, err
- }
- reports[fh.VersionedFileIdentity()] = append(reports[fh.VersionedFileIdentity()], d)
- }
- }
- return reports, nil
-}
-
-func DiagnosticsForMod(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]*source.Diagnostic, error) {
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil {
- if pm == nil || len(pm.ParseErrors) == 0 {
- return nil, err
- }
- return pm.ParseErrors, nil
- }
-
- var diagnostics []*source.Diagnostic
-
- // Add upgrade quick fixes for individual modules if we know about them.
- upgrades := snapshot.View().ModuleUpgrades()
- for _, req := range pm.File.Require {
- ver, ok := upgrades[req.Mod.Path]
- if !ok || req.Mod.Version == ver {
- continue
- }
- rng, err := source.LineToRange(pm.Mapper, fh.URI(), req.Syntax.Start, req.Syntax.End)
- if err != nil {
- return nil, err
- }
- // Upgrade to the exact version we offer the user, not the most recent.
- title := fmt.Sprintf("Upgrade to %v", ver)
- cmd, err := command.NewUpgradeDependencyCommand(title, command.DependencyArgs{
- URI: protocol.URIFromSpanURI(fh.URI()),
- AddRequire: false,
- GoCmdArgs: []string{req.Mod.Path + "@" + ver},
- })
- if err != nil {
- return nil, err
- }
- diagnostics = append(diagnostics, &source.Diagnostic{
- URI: fh.URI(),
- Range: rng,
- Severity: protocol.SeverityInformation,
- Source: source.UpgradeNotification,
- Message: fmt.Sprintf("%v can be upgraded", req.Mod.Path),
- SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
- })
- }
-
- // Packages in the workspace can contribute diagnostics to go.mod files.
- wspkgs, err := snapshot.ActivePackages(ctx)
- if err != nil && !source.IsNonFatalGoModError(err) {
- event.Error(ctx, fmt.Sprintf("workspace packages: diagnosing %s", pm.URI), err)
- }
- if err == nil {
- for _, pkg := range wspkgs {
- pkgDiagnostics, err := snapshot.DiagnosePackage(ctx, pkg)
- if err != nil {
- return nil, err
- }
- diagnostics = append(diagnostics, pkgDiagnostics[fh.URI()]...)
- }
- }
-
- tidied, err := snapshot.ModTidy(ctx, pm)
- if err != nil && !source.IsNonFatalGoModError(err) {
- event.Error(ctx, fmt.Sprintf("tidy: diagnosing %s", pm.URI), err)
- }
- if err == nil {
- for _, d := range tidied.Diagnostics {
- if d.URI != fh.URI() {
- continue
- }
- diagnostics = append(diagnostics, d)
- }
- }
- return diagnostics, nil
-}
diff --git a/internal/lsp/mod/format.go b/internal/lsp/mod/format.go
deleted file mode 100644
index c35576632..000000000
--- a/internal/lsp/mod/format.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mod
-
-import (
- "context"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func Format(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.TextEdit, error) {
- ctx, done := event.Start(ctx, "mod.Format")
- defer done()
-
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil {
- return nil, err
- }
- formatted, err := pm.File.Format()
- if err != nil {
- return nil, err
- }
- // Calculate the edits to be made due to the change.
- diff, err := snapshot.View().Options().ComputeEdits(fh.URI(), string(pm.Mapper.Content), string(formatted))
- if err != nil {
- return nil, err
- }
- return source.ToProtocolEdits(pm.Mapper, diff)
-}
diff --git a/internal/lsp/mod/hover.go b/internal/lsp/mod/hover.go
deleted file mode 100644
index 0837e2aaa..000000000
--- a/internal/lsp/mod/hover.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mod
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/token"
- "strings"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- errors "golang.org/x/xerrors"
-)
-
-func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) {
- var found bool
- for _, uri := range snapshot.ModFiles() {
- if fh.URI() == uri {
- found = true
- break
- }
- }
-
- // We only provide hover information for the view's go.mod files.
- if !found {
- return nil, nil
- }
-
- ctx, done := event.Start(ctx, "mod.Hover")
- defer done()
-
- // Get the position of the cursor.
- pm, err := snapshot.ParseMod(ctx, fh)
- if err != nil {
- return nil, errors.Errorf("getting modfile handle: %w", err)
- }
- spn, err := pm.Mapper.PointSpan(position)
- if err != nil {
- return nil, errors.Errorf("computing cursor position: %w", err)
- }
- hoverRng, err := spn.Range(pm.Mapper.Converter)
- if err != nil {
- return nil, errors.Errorf("computing hover range: %w", err)
- }
-
- // Confirm that the cursor is at the position of a require statement.
- var req *modfile.Require
- var startPos, endPos int
- for _, r := range pm.File.Require {
- dep := []byte(r.Mod.Path)
- s, e := r.Syntax.Start.Byte, r.Syntax.End.Byte
- i := bytes.Index(pm.Mapper.Content[s:e], dep)
- if i == -1 {
- continue
- }
- // Shift the start position to the location of the
- // dependency within the require statement.
- startPos, endPos = s+i, s+i+len(dep)
- if token.Pos(startPos) <= hoverRng.Start && hoverRng.Start <= token.Pos(endPos) {
- req = r
- break
- }
- }
-
- // The cursor position is not on a require statement.
- if req == nil {
- return nil, nil
- }
-
- // Get the `go mod why` results for the given file.
- why, err := snapshot.ModWhy(ctx, fh)
- if err != nil {
- return nil, err
- }
- explanation, ok := why[req.Mod.Path]
- if !ok {
- return nil, nil
- }
-
- // Get the range to highlight for the hover.
- rng, err := source.ByteOffsetsToRange(pm.Mapper, fh.URI(), startPos, endPos)
- if err != nil {
- return nil, err
- }
- if err != nil {
- return nil, err
- }
- options := snapshot.View().Options()
- isPrivate := snapshot.View().IsGoPrivatePath(req.Mod.Path)
- explanation = formatExplanation(explanation, req, options, isPrivate)
- return &protocol.Hover{
- Contents: protocol.MarkupContent{
- Kind: options.PreferredContentFormat,
- Value: explanation,
- },
- Range: rng,
- }, nil
-}
-
-func formatExplanation(text string, req *modfile.Require, options *source.Options, isPrivate bool) string {
- text = strings.TrimSuffix(text, "\n")
- splt := strings.Split(text, "\n")
- length := len(splt)
-
- var b strings.Builder
- // Write the heading as an H3.
- b.WriteString("##" + splt[0])
- if options.PreferredContentFormat == protocol.Markdown {
- b.WriteString("\n\n")
- } else {
- b.WriteRune('\n')
- }
-
- // If the explanation is 2 lines, then it is of the form:
- // # golang.org/x/text/encoding
- // (main module does not need package golang.org/x/text/encoding)
- if length == 2 {
- b.WriteString(splt[1])
- return b.String()
- }
-
- imp := splt[length-1] // import path
- reference := imp
- // See golang/go#36998: don't link to modules matching GOPRIVATE.
- if !isPrivate && options.PreferredContentFormat == protocol.Markdown {
- target := imp
- if strings.ToLower(options.LinkTarget) == "pkg.go.dev" {
- target = strings.Replace(target, req.Mod.Path, req.Mod.String(), 1)
- }
- reference = fmt.Sprintf("[%s](%s)", imp, source.BuildLink(options.LinkTarget, target, ""))
- }
- b.WriteString("This module is necessary because " + reference + " is imported in")
-
- // If the explanation is 3 lines, then it is of the form:
- // # golang.org/x/tools
- // modtest
- // golang.org/x/tools/go/packages
- if length == 3 {
- msg := fmt.Sprintf(" `%s`.", splt[1])
- b.WriteString(msg)
- return b.String()
- }
-
- // If the explanation is more than 3 lines, then it is of the form:
- // # golang.org/x/text/language
- // rsc.io/quote
- // rsc.io/sampler
- // golang.org/x/text/language
- b.WriteString(":\n```text")
- dash := ""
- for _, imp := range splt[1 : length-1] {
- dash += "-"
- b.WriteString("\n" + dash + " " + imp)
- }
- b.WriteString("\n```")
- return b.String()
-}
diff --git a/internal/lsp/mod/mod_test.go b/internal/lsp/mod/mod_test.go
deleted file mode 100644
index b2d257cae..000000000
--- a/internal/lsp/mod/mod_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mod
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/testenv"
-)
-
-func TestMain(m *testing.M) {
- testenv.ExitIfSmallMachine()
- os.Exit(m.Run())
-}
-
-func TestModfileRemainsUnchanged(t *testing.T) {
- testenv.NeedsGo1Point(t, 14)
-
- ctx := tests.Context(t)
- cache := cache.New(nil)
- session := cache.NewSession(ctx)
- options := source.DefaultOptions().Clone()
- tests.DefaultOptions(options)
- options.TempModfile = true
- options.Env = map[string]string{"GOPACKAGESDRIVER": "off", "GOROOT": ""}
-
- // Make sure to copy the test directory to a temporary directory so we do not
- // modify the test code or add go.sum files when we run the tests.
- folder, err := tests.CopyFolderToTempDir(filepath.Join("testdata", "unchanged"))
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(folder)
-
- before, err := ioutil.ReadFile(filepath.Join(folder, "go.mod"))
- if err != nil {
- t.Fatal(err)
- }
- _, _, release, err := session.NewView(ctx, "diagnostics_test", span.URIFromPath(folder), options)
- release()
- if err != nil {
- t.Fatal(err)
- }
- after, err := ioutil.ReadFile(filepath.Join(folder, "go.mod"))
- if err != nil {
- t.Fatal(err)
- }
- if string(before) != string(after) {
- t.Errorf("the real go.mod file was changed even when tempModfile=true")
- }
-}
diff --git a/internal/lsp/progress/progress.go b/internal/lsp/progress/progress.go
deleted file mode 100644
index 18e1bd0f1..000000000
--- a/internal/lsp/progress/progress.go
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package progress
-
-import (
- "context"
- "math/rand"
- "strconv"
- "strings"
- "sync"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-type Tracker struct {
- client protocol.Client
- supportsWorkDoneProgress bool
-
- mu sync.Mutex
- inProgress map[protocol.ProgressToken]*WorkDone
-}
-
-func NewTracker(client protocol.Client) *Tracker {
- return &Tracker{
- client: client,
- inProgress: make(map[protocol.ProgressToken]*WorkDone),
- }
-}
-
-func (tracker *Tracker) SetSupportsWorkDoneProgress(b bool) {
- tracker.supportsWorkDoneProgress = b
-}
-
-// Start notifies the client of work being done on the server. It uses either
-// ShowMessage RPCs or $/progress messages, depending on the capabilities of
-// the client. The returned WorkDone handle may be used to report incremental
-// progress, and to report work completion. In particular, it is an error to
-// call start and not call end(...) on the returned WorkDone handle.
-//
-// If token is empty, a token will be randomly generated.
-//
-// The progress item is considered cancellable if the given cancel func is
-// non-nil. In this case, cancel is called when the work done
-//
-// Example:
-// func Generate(ctx) (err error) {
-// ctx, cancel := context.WithCancel(ctx)
-// defer cancel()
-// work := s.progress.start(ctx, "generate", "running go generate", cancel)
-// defer func() {
-// if err != nil {
-// work.end(ctx, fmt.Sprintf("generate failed: %v", err))
-// } else {
-// work.end(ctx, "done")
-// }
-// }()
-// // Do the work...
-// }
-//
-func (t *Tracker) Start(ctx context.Context, title, message string, token protocol.ProgressToken, cancel func()) *WorkDone {
- wd := &WorkDone{
- ctx: xcontext.Detach(ctx),
- client: t.client,
- token: token,
- cancel: cancel,
- }
- if !t.supportsWorkDoneProgress {
- // Previous iterations of this fallback attempted to retain cancellation
- // support by using ShowMessageCommand with a 'Cancel' button, but this is
- // not ideal as the 'Cancel' dialog stays open even after the command
- // completes.
- //
- // Just show a simple message. Clients can implement workDone progress
- // reporting to get cancellation support.
- if err := wd.client.ShowMessage(wd.ctx, &protocol.ShowMessageParams{
- Type: protocol.Log,
- Message: message,
- }); err != nil {
- event.Error(ctx, "showing start message for "+title, err)
- }
- return wd
- }
- if wd.token == nil {
- token = strconv.FormatInt(rand.Int63(), 10)
- err := wd.client.WorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
- Token: token,
- })
- if err != nil {
- wd.err = err
- event.Error(ctx, "starting work for "+title, err)
- return wd
- }
- wd.token = token
- }
- // At this point we have a token that the client knows about. Store the token
- // before starting work.
- t.mu.Lock()
- t.inProgress[wd.token] = wd
- t.mu.Unlock()
- wd.cleanup = func() {
- t.mu.Lock()
- delete(t.inProgress, token)
- t.mu.Unlock()
- }
- err := wd.client.Progress(ctx, &protocol.ProgressParams{
- Token: wd.token,
- Value: &protocol.WorkDoneProgressBegin{
- Kind: "begin",
- Cancellable: wd.cancel != nil,
- Message: message,
- Title: title,
- },
- })
- if err != nil {
- event.Error(ctx, "generate progress begin", err)
- }
- return wd
-}
-
-func (t *Tracker) Cancel(ctx context.Context, token protocol.ProgressToken) error {
- t.mu.Lock()
- defer t.mu.Unlock()
- wd, ok := t.inProgress[token]
- if !ok {
- return errors.Errorf("token %q not found in progress", token)
- }
- if wd.cancel == nil {
- return errors.Errorf("work %q is not cancellable", token)
- }
- wd.doCancel()
- return nil
-}
-
-// WorkDone represents a unit of work that is reported to the client via the
-// progress API.
-type WorkDone struct {
- // ctx is detached, for sending $/progress updates.
- ctx context.Context
- client protocol.Client
- // If token is nil, this workDone object uses the ShowMessage API, rather
- // than $/progress.
- token protocol.ProgressToken
- // err is set if progress reporting is broken for some reason (for example,
- // if there was an initial error creating a token).
- err error
-
- cancelMu sync.Mutex
- cancelled bool
- cancel func()
-
- cleanup func()
-}
-
-func (wd *WorkDone) Token() protocol.ProgressToken {
- return wd.token
-}
-
-func (wd *WorkDone) doCancel() {
- wd.cancelMu.Lock()
- defer wd.cancelMu.Unlock()
- if !wd.cancelled {
- wd.cancel()
- }
-}
-
-// report reports an update on WorkDone report back to the client.
-func (wd *WorkDone) Report(message string, percentage float64) {
- if wd == nil {
- return
- }
- wd.cancelMu.Lock()
- cancelled := wd.cancelled
- wd.cancelMu.Unlock()
- if cancelled {
- return
- }
- if wd.err != nil || wd.token == nil {
- // Not using the workDone API, so we do nothing. It would be far too spammy
- // to send incremental messages.
- return
- }
- message = strings.TrimSuffix(message, "\n")
- err := wd.client.Progress(wd.ctx, &protocol.ProgressParams{
- Token: wd.token,
- Value: &protocol.WorkDoneProgressReport{
- Kind: "report",
- // Note that in the LSP spec, the value of Cancellable may be changed to
- // control whether the cancel button in the UI is enabled. Since we don't
- // yet use this feature, the value is kept constant here.
- Cancellable: wd.cancel != nil,
- Message: message,
- Percentage: uint32(percentage),
- },
- })
- if err != nil {
- event.Error(wd.ctx, "reporting progress", err)
- }
-}
-
-// end reports a workdone completion back to the client.
-func (wd *WorkDone) End(message string) {
- if wd == nil {
- return
- }
- var err error
- switch {
- case wd.err != nil:
- // There is a prior error.
- case wd.token == nil:
- // We're falling back to message-based reporting.
- err = wd.client.ShowMessage(wd.ctx, &protocol.ShowMessageParams{
- Type: protocol.Info,
- Message: message,
- })
- default:
- err = wd.client.Progress(wd.ctx, &protocol.ProgressParams{
- Token: wd.token,
- Value: &protocol.WorkDoneProgressEnd{
- Kind: "end",
- Message: message,
- },
- })
- }
- if err != nil {
- event.Error(wd.ctx, "ending work", err)
- }
- if wd.cleanup != nil {
- wd.cleanup()
- }
-}
-
-// EventWriter writes every incoming []byte to
-// event.Print with the operation=generate tag
-// to distinguish its logs from others.
-type EventWriter struct {
- ctx context.Context
- operation string
-}
-
-func NewEventWriter(ctx context.Context, operation string) *EventWriter {
- return &EventWriter{ctx: ctx, operation: operation}
-}
-
-func (ew *EventWriter) Write(p []byte) (n int, err error) {
- event.Log(ew.ctx, string(p), tag.Operation.Of(ew.operation))
- return len(p), nil
-}
-
-// WorkDoneWriter wraps a workDone handle to provide a Writer interface,
-// so that workDone reporting can more easily be hooked into commands.
-type WorkDoneWriter struct {
- wd *WorkDone
-}
-
-func NewWorkDoneWriter(wd *WorkDone) *WorkDoneWriter {
- return &WorkDoneWriter{wd: wd}
-}
-
-func (wdw WorkDoneWriter) Write(p []byte) (n int, err error) {
- wdw.wd.Report(string(p), 0)
- // Don't fail just because of a failure to report progress.
- return len(p), nil
-}
diff --git a/internal/lsp/progress/progress_test.go b/internal/lsp/progress/progress_test.go
deleted file mode 100644
index b3c821938..000000000
--- a/internal/lsp/progress/progress_test.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package progress
-
-import (
- "context"
- "fmt"
- "sync"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-type fakeClient struct {
- protocol.Client
-
- token protocol.ProgressToken
-
- mu sync.Mutex
- created, begun, reported, messages, ended int
-}
-
-func (c *fakeClient) checkToken(token protocol.ProgressToken) {
- if token == nil {
- panic("nil token in progress message")
- }
- if c.token != nil && c.token != token {
- panic(fmt.Errorf("invalid token in progress message: got %v, want %v", token, c.token))
- }
-}
-
-func (c *fakeClient) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) error {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.checkToken(params.Token)
- c.created++
- return nil
-}
-
-func (c *fakeClient) Progress(ctx context.Context, params *protocol.ProgressParams) error {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.checkToken(params.Token)
- switch params.Value.(type) {
- case *protocol.WorkDoneProgressBegin:
- c.begun++
- case *protocol.WorkDoneProgressReport:
- c.reported++
- case *protocol.WorkDoneProgressEnd:
- c.ended++
- default:
- panic(fmt.Errorf("unknown progress value %T", params.Value))
- }
- return nil
-}
-
-func (c *fakeClient) ShowMessage(context.Context, *protocol.ShowMessageParams) error {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.messages++
- return nil
-}
-
-func setup(token protocol.ProgressToken) (context.Context, *Tracker, *fakeClient) {
- c := &fakeClient{}
- tracker := NewTracker(c)
- tracker.SetSupportsWorkDoneProgress(true)
- return context.Background(), tracker, c
-}
-
-func TestProgressTracker_Reporting(t *testing.T) {
- for _, test := range []struct {
- name string
- supported bool
- token protocol.ProgressToken
- wantReported, wantCreated, wantBegun, wantEnded int
- wantMessages int
- }{
- {
- name: "unsupported",
- wantMessages: 2,
- },
- {
- name: "random token",
- supported: true,
- wantCreated: 1,
- wantBegun: 1,
- wantReported: 1,
- wantEnded: 1,
- },
- {
- name: "string token",
- supported: true,
- token: "token",
- wantBegun: 1,
- wantReported: 1,
- wantEnded: 1,
- },
- {
- name: "numeric token",
- supported: true,
- token: 1,
- wantReported: 1,
- wantBegun: 1,
- wantEnded: 1,
- },
- } {
- test := test
- t.Run(test.name, func(t *testing.T) {
- ctx, tracker, client := setup(test.token)
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- tracker.supportsWorkDoneProgress = test.supported
- work := tracker.Start(ctx, "work", "message", test.token, nil)
- client.mu.Lock()
- gotCreated, gotBegun := client.created, client.begun
- client.mu.Unlock()
- if gotCreated != test.wantCreated {
- t.Errorf("got %d created tokens, want %d", gotCreated, test.wantCreated)
- }
- if gotBegun != test.wantBegun {
- t.Errorf("got %d work begun, want %d", gotBegun, test.wantBegun)
- }
- // Ignore errors: this is just testing the reporting behavior.
- work.Report("report", 50)
- client.mu.Lock()
- gotReported := client.reported
- client.mu.Unlock()
- if gotReported != test.wantReported {
- t.Errorf("got %d progress reports, want %d", gotReported, test.wantCreated)
- }
- work.End("done")
- client.mu.Lock()
- gotEnded, gotMessages := client.ended, client.messages
- client.mu.Unlock()
- if gotEnded != test.wantEnded {
- t.Errorf("got %d ended reports, want %d", gotEnded, test.wantEnded)
- }
- if gotMessages != test.wantMessages {
- t.Errorf("got %d messages, want %d", gotMessages, test.wantMessages)
- }
- })
- }
-}
-
-func TestProgressTracker_Cancellation(t *testing.T) {
- for _, token := range []protocol.ProgressToken{nil, 1, "a"} {
- ctx, tracker, _ := setup(token)
- var canceled bool
- cancel := func() { canceled = true }
- work := tracker.Start(ctx, "work", "message", token, cancel)
- if err := tracker.Cancel(ctx, work.Token()); err != nil {
- t.Fatal(err)
- }
- if !canceled {
- t.Errorf("tracker.cancel(...): cancel not called")
- }
- }
-}
diff --git a/internal/lsp/protocol/doc.go b/internal/lsp/protocol/doc.go
deleted file mode 100644
index 2ffdf5128..000000000
--- a/internal/lsp/protocol/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package protocol contains the structs that map directly to the wire format
-// of the "Language Server Protocol".
-//
-// It is a literal transcription, with unmodified comments, and only the changes
-// required to make it go code.
-// Names are uppercased to export them.
-// All fields have JSON tags added to correct the names.
-// Fields marked with a ? are also marked as "omitempty"
-// Fields that are "|| null" are made pointers
-// Fields that are string or number are left as string
-// Fields that are type "number" are made float64
-package protocol
diff --git a/internal/lsp/protocol/enums.go b/internal/lsp/protocol/enums.go
deleted file mode 100644
index 434808eeb..000000000
--- a/internal/lsp/protocol/enums.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protocol
-
-import (
- "fmt"
-)
-
-var (
- namesTextDocumentSyncKind [int(Incremental) + 1]string
- namesInitializeError [int(UnknownProtocolVersion) + 1]string
- namesMessageType [int(Log) + 1]string
- namesFileChangeType [int(Deleted) + 1]string
- namesWatchKind [int(WatchDelete) + 1]string
- namesCompletionTriggerKind [int(TriggerForIncompleteCompletions) + 1]string
- namesDiagnosticSeverity [int(SeverityHint) + 1]string
- namesDiagnosticTag [int(Unnecessary) + 1]string
- namesCompletionItemKind [int(TypeParameterCompletion) + 1]string
- namesInsertTextFormat [int(SnippetTextFormat) + 1]string
- namesDocumentHighlightKind [int(Write) + 1]string
- namesSymbolKind [int(TypeParameter) + 1]string
- namesTextDocumentSaveReason [int(FocusOut) + 1]string
-)
-
-func init() {
- namesTextDocumentSyncKind[int(None)] = "None"
- namesTextDocumentSyncKind[int(Full)] = "Full"
- namesTextDocumentSyncKind[int(Incremental)] = "Incremental"
-
- namesInitializeError[int(UnknownProtocolVersion)] = "UnknownProtocolVersion"
-
- namesMessageType[int(Error)] = "Error"
- namesMessageType[int(Warning)] = "Warning"
- namesMessageType[int(Info)] = "Info"
- namesMessageType[int(Log)] = "Log"
-
- namesFileChangeType[int(Created)] = "Created"
- namesFileChangeType[int(Changed)] = "Changed"
- namesFileChangeType[int(Deleted)] = "Deleted"
-
- namesWatchKind[int(WatchCreate)] = "WatchCreate"
- namesWatchKind[int(WatchChange)] = "WatchChange"
- namesWatchKind[int(WatchDelete)] = "WatchDelete"
-
- namesCompletionTriggerKind[int(Invoked)] = "Invoked"
- namesCompletionTriggerKind[int(TriggerCharacter)] = "TriggerCharacter"
- namesCompletionTriggerKind[int(TriggerForIncompleteCompletions)] = "TriggerForIncompleteCompletions"
-
- namesDiagnosticSeverity[int(SeverityError)] = "Error"
- namesDiagnosticSeverity[int(SeverityWarning)] = "Warning"
- namesDiagnosticSeverity[int(SeverityInformation)] = "Information"
- namesDiagnosticSeverity[int(SeverityHint)] = "Hint"
-
- namesDiagnosticTag[int(Unnecessary)] = "Unnecessary"
-
- namesCompletionItemKind[int(TextCompletion)] = "text"
- namesCompletionItemKind[int(MethodCompletion)] = "method"
- namesCompletionItemKind[int(FunctionCompletion)] = "func"
- namesCompletionItemKind[int(ConstructorCompletion)] = "constructor"
- namesCompletionItemKind[int(FieldCompletion)] = "field"
- namesCompletionItemKind[int(VariableCompletion)] = "var"
- namesCompletionItemKind[int(ClassCompletion)] = "type"
- namesCompletionItemKind[int(InterfaceCompletion)] = "interface"
- namesCompletionItemKind[int(ModuleCompletion)] = "package"
- namesCompletionItemKind[int(PropertyCompletion)] = "property"
- namesCompletionItemKind[int(UnitCompletion)] = "unit"
- namesCompletionItemKind[int(ValueCompletion)] = "value"
- namesCompletionItemKind[int(EnumCompletion)] = "enum"
- namesCompletionItemKind[int(KeywordCompletion)] = "keyword"
- namesCompletionItemKind[int(SnippetCompletion)] = "snippet"
- namesCompletionItemKind[int(ColorCompletion)] = "color"
- namesCompletionItemKind[int(FileCompletion)] = "file"
- namesCompletionItemKind[int(ReferenceCompletion)] = "reference"
- namesCompletionItemKind[int(FolderCompletion)] = "folder"
- namesCompletionItemKind[int(EnumMemberCompletion)] = "enumMember"
- namesCompletionItemKind[int(ConstantCompletion)] = "const"
- namesCompletionItemKind[int(StructCompletion)] = "struct"
- namesCompletionItemKind[int(EventCompletion)] = "event"
- namesCompletionItemKind[int(OperatorCompletion)] = "operator"
- namesCompletionItemKind[int(TypeParameterCompletion)] = "typeParam"
-
- namesInsertTextFormat[int(PlainTextTextFormat)] = "PlainText"
- namesInsertTextFormat[int(SnippetTextFormat)] = "Snippet"
-
- namesDocumentHighlightKind[int(Text)] = "Text"
- namesDocumentHighlightKind[int(Read)] = "Read"
- namesDocumentHighlightKind[int(Write)] = "Write"
-
- namesSymbolKind[int(File)] = "File"
- namesSymbolKind[int(Module)] = "Module"
- namesSymbolKind[int(Namespace)] = "Namespace"
- namesSymbolKind[int(Package)] = "Package"
- namesSymbolKind[int(Class)] = "Class"
- namesSymbolKind[int(Method)] = "Method"
- namesSymbolKind[int(Property)] = "Property"
- namesSymbolKind[int(Field)] = "Field"
- namesSymbolKind[int(Constructor)] = "Constructor"
- namesSymbolKind[int(Enum)] = "Enum"
- namesSymbolKind[int(Interface)] = "Interface"
- namesSymbolKind[int(Function)] = "Function"
- namesSymbolKind[int(Variable)] = "Variable"
- namesSymbolKind[int(Constant)] = "Constant"
- namesSymbolKind[int(String)] = "String"
- namesSymbolKind[int(Number)] = "Number"
- namesSymbolKind[int(Boolean)] = "Boolean"
- namesSymbolKind[int(Array)] = "Array"
- namesSymbolKind[int(Object)] = "Object"
- namesSymbolKind[int(Key)] = "Key"
- namesSymbolKind[int(Null)] = "Null"
- namesSymbolKind[int(EnumMember)] = "EnumMember"
- namesSymbolKind[int(Struct)] = "Struct"
- namesSymbolKind[int(Event)] = "Event"
- namesSymbolKind[int(Operator)] = "Operator"
- namesSymbolKind[int(TypeParameter)] = "TypeParameter"
-
- namesTextDocumentSaveReason[int(Manual)] = "Manual"
- namesTextDocumentSaveReason[int(AfterDelay)] = "AfterDelay"
- namesTextDocumentSaveReason[int(FocusOut)] = "FocusOut"
-}
-
-func formatEnum(f fmt.State, c rune, i int, names []string, unknown string) {
- s := ""
- if i >= 0 && i < len(names) {
- s = names[i]
- }
- if s != "" {
- fmt.Fprint(f, s)
- } else {
- fmt.Fprintf(f, "%s(%d)", unknown, i)
- }
-}
-
-func parseEnum(s string, names []string) int {
- for i, name := range names {
- if s == name {
- return i
- }
- }
- return 0
-}
-
-func (e TextDocumentSyncKind) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesTextDocumentSyncKind[:], "TextDocumentSyncKind")
-}
-
-func ParseTextDocumentSyncKind(s string) TextDocumentSyncKind {
- return TextDocumentSyncKind(parseEnum(s, namesTextDocumentSyncKind[:]))
-}
-
-func (e InitializeError) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesInitializeError[:], "InitializeError")
-}
-
-func ParseInitializeError(s string) InitializeError {
- return InitializeError(parseEnum(s, namesInitializeError[:]))
-}
-
-func (e MessageType) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesMessageType[:], "MessageType")
-}
-
-func ParseMessageType(s string) MessageType {
- return MessageType(parseEnum(s, namesMessageType[:]))
-}
-
-func (e FileChangeType) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesFileChangeType[:], "FileChangeType")
-}
-
-func ParseFileChangeType(s string) FileChangeType {
- return FileChangeType(parseEnum(s, namesFileChangeType[:]))
-}
-
-func (e WatchKind) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesWatchKind[:], "WatchKind")
-}
-
-func ParseWatchKind(s string) WatchKind {
- return WatchKind(parseEnum(s, namesWatchKind[:]))
-}
-
-func (e CompletionTriggerKind) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesCompletionTriggerKind[:], "CompletionTriggerKind")
-}
-
-func ParseCompletionTriggerKind(s string) CompletionTriggerKind {
- return CompletionTriggerKind(parseEnum(s, namesCompletionTriggerKind[:]))
-}
-
-func (e DiagnosticSeverity) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesDiagnosticSeverity[:], "DiagnosticSeverity")
-}
-
-func ParseDiagnosticSeverity(s string) DiagnosticSeverity {
- return DiagnosticSeverity(parseEnum(s, namesDiagnosticSeverity[:]))
-}
-
-func (e DiagnosticTag) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesDiagnosticTag[:], "DiagnosticTag")
-}
-
-func ParseDiagnosticTag(s string) DiagnosticTag {
- return DiagnosticTag(parseEnum(s, namesDiagnosticTag[:]))
-}
-
-func (e CompletionItemKind) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesCompletionItemKind[:], "CompletionItemKind")
-}
-
-func ParseCompletionItemKind(s string) CompletionItemKind {
- return CompletionItemKind(parseEnum(s, namesCompletionItemKind[:]))
-}
-
-func (e InsertTextFormat) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesInsertTextFormat[:], "InsertTextFormat")
-}
-
-func ParseInsertTextFormat(s string) InsertTextFormat {
- return InsertTextFormat(parseEnum(s, namesInsertTextFormat[:]))
-}
-
-func (e DocumentHighlightKind) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesDocumentHighlightKind[:], "DocumentHighlightKind")
-}
-
-func ParseDocumentHighlightKind(s string) DocumentHighlightKind {
- return DocumentHighlightKind(parseEnum(s, namesDocumentHighlightKind[:]))
-}
-
-func (e SymbolKind) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesSymbolKind[:], "SymbolKind")
-}
-
-func ParseSymbolKind(s string) SymbolKind {
- return SymbolKind(parseEnum(s, namesSymbolKind[:]))
-}
-
-func (e TextDocumentSaveReason) Format(f fmt.State, c rune) {
- formatEnum(f, c, int(e), namesTextDocumentSaveReason[:], "TextDocumentSaveReason")
-}
-
-func ParseTextDocumentSaveReason(s string) TextDocumentSaveReason {
- return TextDocumentSaveReason(parseEnum(s, namesTextDocumentSaveReason[:]))
-}
diff --git a/internal/lsp/protocol/protocol.go b/internal/lsp/protocol/protocol.go
deleted file mode 100644
index a8b3354b4..000000000
--- a/internal/lsp/protocol/protocol.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protocol
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/jsonrpc2"
- jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-var (
- // RequestCancelledError should be used when a request is cancelled early.
- RequestCancelledError = jsonrpc2.NewError(-32800, "JSON RPC cancelled")
- RequestCancelledErrorV2 = jsonrpc2_v2.NewError(-32800, "JSON RPC cancelled")
-)
-
-type ClientCloser interface {
- Client
- io.Closer
-}
-
-type connSender interface {
- io.Closer
-
- Notify(ctx context.Context, method string, params interface{}) error
- Call(ctx context.Context, method string, params, result interface{}) error
-}
-
-type clientDispatcher struct {
- sender connSender
-}
-
-func (c *clientDispatcher) Close() error {
- return c.sender.Close()
-}
-
-// ClientDispatcher returns a Client that dispatches LSP requests across the
-// given jsonrpc2 connection.
-func ClientDispatcher(conn jsonrpc2.Conn) ClientCloser {
- return &clientDispatcher{sender: clientConn{conn}}
-}
-
-type clientConn struct {
- conn jsonrpc2.Conn
-}
-
-func (c clientConn) Close() error {
- return c.conn.Close()
-}
-
-func (c clientConn) Notify(ctx context.Context, method string, params interface{}) error {
- return c.conn.Notify(ctx, method, params)
-}
-
-func (c clientConn) Call(ctx context.Context, method string, params interface{}, result interface{}) error {
- id, err := c.conn.Call(ctx, method, params, result)
- if ctx.Err() != nil {
- cancelCall(ctx, c, id)
- }
- return err
-}
-
-func ClientDispatcherV2(conn *jsonrpc2_v2.Connection) ClientCloser {
- return &clientDispatcher{clientConnV2{conn}}
-}
-
-type clientConnV2 struct {
- conn *jsonrpc2_v2.Connection
-}
-
-func (c clientConnV2) Close() error {
- return c.conn.Close()
-}
-
-func (c clientConnV2) Notify(ctx context.Context, method string, params interface{}) error {
- return c.conn.Notify(ctx, method, params)
-}
-
-func (c clientConnV2) Call(ctx context.Context, method string, params interface{}, result interface{}) error {
- call := c.conn.Call(ctx, method, params)
- err := call.Await(ctx, result)
- if ctx.Err() != nil {
- detached := xcontext.Detach(ctx)
- c.conn.Notify(detached, "$/cancelRequest", &CancelParams{ID: call.ID().Raw()})
- }
- return err
-}
-
-// ServerDispatcher returns a Server that dispatches LSP requests across the
-// given jsonrpc2 connection.
-func ServerDispatcher(conn jsonrpc2.Conn) Server {
- return &serverDispatcher{sender: clientConn{conn}}
-}
-
-func ServerDispatcherV2(conn *jsonrpc2_v2.Connection) Server {
- return &serverDispatcher{sender: clientConnV2{conn}}
-}
-
-type serverDispatcher struct {
- sender connSender
-}
-
-func ClientHandler(client Client, handler jsonrpc2.Handler) jsonrpc2.Handler {
- return func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
- if ctx.Err() != nil {
- ctx := xcontext.Detach(ctx)
- return reply(ctx, nil, RequestCancelledError)
- }
- handled, err := clientDispatch(ctx, client, reply, req)
- if handled || err != nil {
- return err
- }
- return handler(ctx, reply, req)
- }
-}
-
-func ClientHandlerV2(client Client) jsonrpc2_v2.Handler {
- return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
- if ctx.Err() != nil {
- return nil, RequestCancelledErrorV2
- }
- req1 := req2to1(req)
- var (
- result interface{}
- resErr error
- )
- replier := func(_ context.Context, res interface{}, err error) error {
- result, resErr = res, err
- return nil
- }
- _, err := clientDispatch(ctx, client, replier, req1)
- if err != nil {
- return nil, err
- }
- return result, resErr
- })
-}
-
-func ServerHandler(server Server, handler jsonrpc2.Handler) jsonrpc2.Handler {
- return func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
- if ctx.Err() != nil {
- ctx := xcontext.Detach(ctx)
- return reply(ctx, nil, RequestCancelledError)
- }
- handled, err := serverDispatch(ctx, server, reply, req)
- if handled || err != nil {
- return err
- }
- //TODO: This code is wrong, it ignores handler and assumes non standard
- // request handles everything
- // non standard request should just be a layered handler.
- var params interface{}
- if err := json.Unmarshal(req.Params(), &params); err != nil {
- return sendParseError(ctx, reply, err)
- }
- resp, err := server.NonstandardRequest(ctx, req.Method(), params)
- return reply(ctx, resp, err)
-
- }
-}
-
-func ServerHandlerV2(server Server) jsonrpc2_v2.Handler {
- return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
- if ctx.Err() != nil {
- return nil, RequestCancelledErrorV2
- }
- req1 := req2to1(req)
- var (
- result interface{}
- resErr error
- )
- replier := func(_ context.Context, res interface{}, err error) error {
- result, resErr = res, err
- return nil
- }
- _, err := serverDispatch(ctx, server, replier, req1)
- if err != nil {
- return nil, err
- }
- return result, resErr
- })
-}
-
-func req2to1(req2 *jsonrpc2_v2.Request) jsonrpc2.Request {
- if req2.ID.IsValid() {
- raw := req2.ID.Raw()
- var idv1 jsonrpc2.ID
- switch v := raw.(type) {
- case int64:
- idv1 = jsonrpc2.NewIntID(v)
- case string:
- idv1 = jsonrpc2.NewStringID(v)
- default:
- panic(fmt.Sprintf("unsupported ID type %T", raw))
- }
- req1, err := jsonrpc2.NewCall(idv1, req2.Method, req2.Params)
- if err != nil {
- panic(err)
- }
- return req1
- }
- req1, err := jsonrpc2.NewNotification(req2.Method, req2.Params)
- if err != nil {
- panic(err)
- }
- return req1
-}
-
-func Handlers(handler jsonrpc2.Handler) jsonrpc2.Handler {
- return CancelHandler(
- jsonrpc2.AsyncHandler(
- jsonrpc2.MustReplyHandler(handler)))
-}
-
-func CancelHandler(handler jsonrpc2.Handler) jsonrpc2.Handler {
- handler, canceller := jsonrpc2.CancelHandler(handler)
- return func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
- if req.Method() != "$/cancelRequest" {
- // TODO(iancottrell): See if we can generate a reply for the request to be cancelled
- // at the point of cancellation rather than waiting for gopls to naturally reply.
- // To do that, we need to keep track of whether a reply has been sent already and
- // be careful about racing between the two paths.
- // TODO(iancottrell): Add a test that watches the stream and verifies the response
- // for the cancelled request flows.
- replyWithDetachedContext := func(ctx context.Context, resp interface{}, err error) error {
- // https://microsoft.github.io/language-server-protocol/specifications/specification-current/#cancelRequest
- if ctx.Err() != nil && err == nil {
- err = RequestCancelledError
- }
- ctx = xcontext.Detach(ctx)
- return reply(ctx, resp, err)
- }
- return handler(ctx, replyWithDetachedContext, req)
- }
- var params CancelParams
- if err := json.Unmarshal(req.Params(), &params); err != nil {
- return sendParseError(ctx, reply, err)
- }
- if n, ok := params.ID.(float64); ok {
- canceller(jsonrpc2.NewIntID(int64(n)))
- } else if s, ok := params.ID.(string); ok {
- canceller(jsonrpc2.NewStringID(s))
- } else {
- return sendParseError(ctx, reply, fmt.Errorf("request ID %v malformed", params.ID))
- }
- return reply(ctx, nil, nil)
- }
-}
-
-func Call(ctx context.Context, conn jsonrpc2.Conn, method string, params interface{}, result interface{}) error {
- id, err := conn.Call(ctx, method, params, result)
- if ctx.Err() != nil {
- cancelCall(ctx, clientConn{conn}, id)
- }
- return err
-}
-
-func cancelCall(ctx context.Context, sender connSender, id jsonrpc2.ID) {
- ctx = xcontext.Detach(ctx)
- ctx, done := event.Start(ctx, "protocol.canceller")
- defer done()
- // Note that only *jsonrpc2.ID implements json.Marshaler.
- sender.Notify(ctx, "$/cancelRequest", &CancelParams{ID: &id})
-}
-
-func sendParseError(ctx context.Context, reply jsonrpc2.Replier, err error) error {
- return reply(ctx, nil, errors.Errorf("%w: %s", jsonrpc2.ErrParse, err))
-}
diff --git a/internal/lsp/protocol/span.go b/internal/lsp/protocol/span.go
deleted file mode 100644
index 381e5f500..000000000
--- a/internal/lsp/protocol/span.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// this file contains protocol<->span converters
-
-package protocol
-
-import (
- "fmt"
-
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-type ColumnMapper struct {
- URI span.URI
- Converter *span.TokenConverter
- Content []byte
-}
-
-func URIFromSpanURI(uri span.URI) DocumentURI {
- return DocumentURI(uri)
-}
-
-func URIFromPath(path string) DocumentURI {
- return URIFromSpanURI(span.URIFromPath(path))
-}
-
-func (u DocumentURI) SpanURI() span.URI {
- return span.URIFromURI(string(u))
-}
-
-func (m *ColumnMapper) Location(s span.Span) (Location, error) {
- rng, err := m.Range(s)
- if err != nil {
- return Location{}, err
- }
- return Location{URI: URIFromSpanURI(s.URI()), Range: rng}, nil
-}
-
-func (m *ColumnMapper) Range(s span.Span) (Range, error) {
- if span.CompareURI(m.URI, s.URI()) != 0 {
- return Range{}, errors.Errorf("column mapper is for file %q instead of %q", m.URI, s.URI())
- }
- s, err := s.WithAll(m.Converter)
- if err != nil {
- return Range{}, err
- }
- start, err := m.Position(s.Start())
- if err != nil {
- return Range{}, err
- }
- end, err := m.Position(s.End())
- if err != nil {
- return Range{}, err
- }
- return Range{Start: start, End: end}, nil
-}
-
-func (m *ColumnMapper) Position(p span.Point) (Position, error) {
- chr, err := span.ToUTF16Column(p, m.Content)
- if err != nil {
- return Position{}, err
- }
- return Position{
- Line: uint32(p.Line() - 1),
- Character: uint32(chr - 1),
- }, nil
-}
-
-func (m *ColumnMapper) Span(l Location) (span.Span, error) {
- return m.RangeSpan(l.Range)
-}
-
-func (m *ColumnMapper) RangeSpan(r Range) (span.Span, error) {
- start, err := m.Point(r.Start)
- if err != nil {
- return span.Span{}, err
- }
- end, err := m.Point(r.End)
- if err != nil {
- return span.Span{}, err
- }
- return span.New(m.URI, start, end).WithAll(m.Converter)
-}
-
-func (m *ColumnMapper) RangeToSpanRange(r Range) (span.Range, error) {
- spn, err := m.RangeSpan(r)
- if err != nil {
- return span.Range{}, err
- }
- return spn.Range(m.Converter)
-}
-
-func (m *ColumnMapper) PointSpan(p Position) (span.Span, error) {
- start, err := m.Point(p)
- if err != nil {
- return span.Span{}, err
- }
- return span.New(m.URI, start, start).WithAll(m.Converter)
-}
-
-func (m *ColumnMapper) Point(p Position) (span.Point, error) {
- line := int(p.Line) + 1
- offset, err := m.Converter.ToOffset(line, 1)
- if err != nil {
- return span.Point{}, err
- }
- lineStart := span.NewPoint(line, 1, offset)
- return span.FromUTF16Column(lineStart, int(p.Character)+1, m.Content)
-}
-
-func IsPoint(r Range) bool {
- return r.Start.Line == r.End.Line && r.Start.Character == r.End.Character
-}
-
-func CompareRange(a, b Range) int {
- if r := ComparePosition(a.Start, b.Start); r != 0 {
- return r
- }
- return ComparePosition(a.End, b.End)
-}
-
-func ComparePosition(a, b Position) int {
- if a.Line < b.Line {
- return -1
- }
- if a.Line > b.Line {
- return 1
- }
- if a.Character < b.Character {
- return -1
- }
- if a.Character > b.Character {
- return 1
- }
- return 0
-}
-
-func Intersect(a, b Range) bool {
- if a.Start.Line > b.End.Line || a.End.Line < b.Start.Line {
- return false
- }
- return !((a.Start.Line == b.End.Line) && a.Start.Character > b.End.Character ||
- (a.End.Line == b.Start.Line) && a.End.Character < b.Start.Character)
-}
-
-func (r Range) Format(f fmt.State, _ rune) {
- fmt.Fprintf(f, "%v:%v-%v:%v", r.Start.Line, r.Start.Character, r.End.Line, r.End.Character)
-}
diff --git a/internal/lsp/protocol/tsclient.go b/internal/lsp/protocol/tsclient.go
deleted file mode 100644
index 004cad93b..000000000
--- a/internal/lsp/protocol/tsclient.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated (see typescript/README.md) DO NOT EDIT.
-
-package protocol
-
-// Package protocol contains data types and code for LSP json rpcs
-// generated automatically from vscode-languageserver-node
-// commit: 696f9285bf849b73745682fdb1c1feac73eb8772
-// last fetched Fri Mar 04 2022 14:48:10 GMT-0500 (Eastern Standard Time)
-
-import (
- "context"
- "encoding/json"
-
- "golang.org/x/tools/internal/jsonrpc2"
- errors "golang.org/x/xerrors"
-)
-
-type Client interface {
- ShowMessage(context.Context, *ShowMessageParams) error
- LogMessage(context.Context, *LogMessageParams) error
- Event(context.Context, *interface{}) error
- PublishDiagnostics(context.Context, *PublishDiagnosticsParams) error
- Progress(context.Context, *ProgressParams) error
- WorkspaceFolders(context.Context) ([]WorkspaceFolder /*WorkspaceFolder[] | null*/, error)
- Configuration(context.Context, *ParamConfiguration) ([]LSPAny, error)
- WorkDoneProgressCreate(context.Context, *WorkDoneProgressCreateParams) error
- ShowDocument(context.Context, *ShowDocumentParams) (*ShowDocumentResult, error)
- RegisterCapability(context.Context, *RegistrationParams) error
- UnregisterCapability(context.Context, *UnregistrationParams) error
- ShowMessageRequest(context.Context, *ShowMessageRequestParams) (*MessageActionItem /*MessageActionItem | null*/, error)
- ApplyEdit(context.Context, *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResult, error)
-}
-
-func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) {
- switch r.Method() {
- case "window/showMessage": // notif
- var params ShowMessageParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := client.ShowMessage(ctx, &params)
- return true, reply(ctx, nil, err)
- case "window/logMessage": // notif
- var params LogMessageParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := client.LogMessage(ctx, &params)
- return true, reply(ctx, nil, err)
- case "telemetry/event": // notif
- var params interface{}
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := client.Event(ctx, &params)
- return true, reply(ctx, nil, err)
- case "textDocument/publishDiagnostics": // notif
- var params PublishDiagnosticsParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := client.PublishDiagnostics(ctx, &params)
- return true, reply(ctx, nil, err)
- case "$/progress": // notif
- var params ProgressParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := client.Progress(ctx, &params)
- return true, reply(ctx, nil, err)
- case "workspace/workspaceFolders": // req
- if len(r.Params()) > 0 {
- return true, reply(ctx, nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams))
- }
- resp, err := client.WorkspaceFolders(ctx)
- return true, reply(ctx, resp, err)
- case "workspace/configuration": // req
- var params ParamConfiguration
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := client.Configuration(ctx, &params)
- return true, reply(ctx, resp, err)
- case "window/workDoneProgress/create": // req
- var params WorkDoneProgressCreateParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := client.WorkDoneProgressCreate(ctx, &params)
- return true, reply(ctx, nil, err)
- case "window/showDocument": // req
- var params ShowDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := client.ShowDocument(ctx, &params)
- return true, reply(ctx, resp, err)
- case "client/registerCapability": // req
- var params RegistrationParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := client.RegisterCapability(ctx, &params)
- return true, reply(ctx, nil, err)
- case "client/unregisterCapability": // req
- var params UnregistrationParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := client.UnregisterCapability(ctx, &params)
- return true, reply(ctx, nil, err)
- case "window/showMessageRequest": // req
- var params ShowMessageRequestParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := client.ShowMessageRequest(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/applyEdit": // req
- var params ApplyWorkspaceEditParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := client.ApplyEdit(ctx, &params)
- return true, reply(ctx, resp, err)
-
- default:
- return false, nil
- }
-}
-
-func (s *clientDispatcher) ShowMessage(ctx context.Context, params *ShowMessageParams) error {
- return s.sender.Notify(ctx, "window/showMessage", params)
-}
-
-func (s *clientDispatcher) LogMessage(ctx context.Context, params *LogMessageParams) error {
- return s.sender.Notify(ctx, "window/logMessage", params)
-}
-
-func (s *clientDispatcher) Event(ctx context.Context, params *interface{}) error {
- return s.sender.Notify(ctx, "telemetry/event", params)
-}
-
-func (s *clientDispatcher) PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) error {
- return s.sender.Notify(ctx, "textDocument/publishDiagnostics", params)
-}
-
-func (s *clientDispatcher) Progress(ctx context.Context, params *ProgressParams) error {
- return s.sender.Notify(ctx, "$/progress", params)
-}
-func (s *clientDispatcher) WorkspaceFolders(ctx context.Context) ([]WorkspaceFolder /*WorkspaceFolder[] | null*/, error) {
- var result []WorkspaceFolder /*WorkspaceFolder[] | null*/
- if err := s.sender.Call(ctx, "workspace/workspaceFolders", nil, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *clientDispatcher) Configuration(ctx context.Context, params *ParamConfiguration) ([]LSPAny, error) {
- var result []LSPAny
- if err := s.sender.Call(ctx, "workspace/configuration", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *clientDispatcher) WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) error {
- return s.sender.Call(ctx, "window/workDoneProgress/create", params, nil) // Call, not Notify
-}
-
-func (s *clientDispatcher) ShowDocument(ctx context.Context, params *ShowDocumentParams) (*ShowDocumentResult, error) {
- var result *ShowDocumentResult
- if err := s.sender.Call(ctx, "window/showDocument", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *clientDispatcher) RegisterCapability(ctx context.Context, params *RegistrationParams) error {
- return s.sender.Call(ctx, "client/registerCapability", params, nil) // Call, not Notify
-}
-
-func (s *clientDispatcher) UnregisterCapability(ctx context.Context, params *UnregistrationParams) error {
- return s.sender.Call(ctx, "client/unregisterCapability", params, nil) // Call, not Notify
-}
-
-func (s *clientDispatcher) ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (*MessageActionItem /*MessageActionItem | null*/, error) {
- var result *MessageActionItem /*MessageActionItem | null*/
- if err := s.sender.Call(ctx, "window/showMessageRequest", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *clientDispatcher) ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResult, error) {
- var result *ApplyWorkspaceEditResult
- if err := s.sender.Call(ctx, "workspace/applyEdit", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
diff --git a/internal/lsp/protocol/tsprotocol.go b/internal/lsp/protocol/tsprotocol.go
deleted file mode 100644
index 2438d40c3..000000000
--- a/internal/lsp/protocol/tsprotocol.go
+++ /dev/null
@@ -1,6750 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated (see typescript/README.md) DO NOT EDIT.
-
-// Package protocol contains data types and code for LSP json rpcs
-// generated automatically from vscode-languageserver-node
-// commit: 696f9285bf849b73745682fdb1c1feac73eb8772
-// last fetched Fri Mar 04 2022 14:48:10 GMT-0500 (Eastern Standard Time)
-package protocol
-
-import "encoding/json"
-
-/**
- * A special text edit with an additional change annotation.
- *
- * @since 3.16.0.
- */
-type AnnotatedTextEdit struct {
- /**
- * The actual identifier of the change annotation
- */
- AnnotationID ChangeAnnotationIdentifier `json:"annotationId"`
- TextEdit
-}
-
-/**
- * The parameters passed via a apply workspace edit request.
- */
-type ApplyWorkspaceEditParams struct {
- /**
- * An optional label of the workspace edit. This label is
- * presented in the user interface for example on an undo
- * stack to undo the workspace edit.
- */
- Label string `json:"label,omitempty"`
- /**
- * The edits to apply.
- */
- Edit WorkspaceEdit `json:"edit"`
-}
-
-/**
- * The result returned from the apply workspace edit request.
- *
- * @since 3.17 renamed from ApplyWorkspaceEditResponse
- */
-type ApplyWorkspaceEditResult struct {
- /**
- * Indicates whether the edit was applied or not.
- */
- Applied bool `json:"applied"`
- /**
- * An optional textual description for why the edit was not applied.
- * This may be used by the server for diagnostic logging or to provide
- * a suitable error for a request that triggered the edit.
- */
- FailureReason string `json:"failureReason,omitempty"`
- /**
- * Depending on the client's failure handling strategy `failedChange` might
- * contain the index of the change that failed. This property is only available
- * if the client signals a `failureHandlingStrategy` in its client capabilities.
- */
- FailedChange uint32 `json:"failedChange,omitempty"`
-}
-
-/**
- * @since 3.16.0
- */
-type CallHierarchyClientCapabilities struct {
- /**
- * Whether implementation supports dynamic registration. If this is set to `true`
- * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
- * return value for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * Represents an incoming call, e.g. a caller of a method or constructor.
- *
- * @since 3.16.0
- */
-type CallHierarchyIncomingCall struct {
- /**
- * The item that makes the call.
- */
- From CallHierarchyItem `json:"from"`
- /**
- * The ranges at which the calls appear. This is relative to the caller
- * denoted by [`this.from`](#CallHierarchyIncomingCall.from).
- */
- FromRanges []Range `json:"fromRanges"`
-}
-
-/**
- * The parameter of a `callHierarchy/incomingCalls` request.
- *
- * @since 3.16.0
- */
-type CallHierarchyIncomingCallsParams struct {
- Item CallHierarchyItem `json:"item"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * Represents programming constructs like functions or constructors in the context
- * of call hierarchy.
- *
- * @since 3.16.0
- */
-type CallHierarchyItem struct {
- /**
- * The name of this item.
- */
- Name string `json:"name"`
- /**
- * The kind of this item.
- */
- Kind SymbolKind `json:"kind"`
- /**
- * Tags for this item.
- */
- Tags []SymbolTag `json:"tags,omitempty"`
- /**
- * More detail for this item, e.g. the signature of a function.
- */
- Detail string `json:"detail,omitempty"`
- /**
- * The resource identifier of this item.
- */
- URI DocumentURI `json:"uri"`
- /**
- * The range enclosing this symbol not including leading/trailing whitespace but everything else, e.g. comments and code.
- */
- Range Range `json:"range"`
- /**
- * The range that should be selected and revealed when this symbol is being picked, e.g. the name of a function.
- * Must be contained by the [`range`](#CallHierarchyItem.range).
- */
- SelectionRange Range `json:"selectionRange"`
- /**
- * A data entry field that is preserved between a call hierarchy prepare and
- * incoming calls or outgoing calls requests.
- */
- Data LSPAny `json:"data,omitempty"`
-}
-
-/**
- * Call hierarchy options used during static registration.
- *
- * @since 3.16.0
- */
-type CallHierarchyOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * Represents an outgoing call, e.g. calling a getter from a method or a method from a constructor etc.
- *
- * @since 3.16.0
- */
-type CallHierarchyOutgoingCall struct {
- /**
- * The item that is called.
- */
- To CallHierarchyItem `json:"to"`
- /**
- * The range at which this item is called. This is the range relative to the caller, e.g the item
- * passed to [`provideCallHierarchyOutgoingCalls`](#CallHierarchyItemProvider.provideCallHierarchyOutgoingCalls)
- * and not [`this.to`](#CallHierarchyOutgoingCall.to).
- */
- FromRanges []Range `json:"fromRanges"`
-}
-
-/**
- * The parameter of a `callHierarchy/outgoingCalls` request.
- *
- * @since 3.16.0
- */
-type CallHierarchyOutgoingCallsParams struct {
- Item CallHierarchyItem `json:"item"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * The parameter of a `textDocument/prepareCallHierarchy` request.
- *
- * @since 3.16.0
- */
-type CallHierarchyPrepareParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
-}
-
-/**
- * Call hierarchy options used during static or dynamic registration.
- *
- * @since 3.16.0
- */
-type CallHierarchyRegistrationOptions struct {
- TextDocumentRegistrationOptions
- CallHierarchyOptions
- StaticRegistrationOptions
-}
-
-type CancelParams struct {
- /**
- * The request id to cancel.
- */
- ID interface{} /*number | string*/ `json:"id"`
-}
-
-/**
- * Additional information that describes document changes.
- *
- * @since 3.16.0
- */
-type ChangeAnnotation struct {
- /**
- * A human-readable string describing the actual change. The string
- * is rendered prominent in the user interface.
- */
- Label string `json:"label"`
- /**
- * A flag which indicates that user confirmation is needed
- * before applying the change.
- */
- NeedsConfirmation bool `json:"needsConfirmation,omitempty"`
- /**
- * A human-readable string which is rendered less prominent in
- * the user interface.
- */
- Description string `json:"description,omitempty"`
-}
-
-/**
- * An identifier to refer to a change annotation stored with a workspace edit.
- */
-type ChangeAnnotationIdentifier = string
-
-type ClientCapabilities struct {
- /**
- * The workspace client capabilities
- */
- Workspace Workspace3Gn `json:"workspace,omitempty"`
- /**
- * Text document specific client capabilities.
- */
- TextDocument TextDocumentClientCapabilities `json:"textDocument,omitempty"`
- /**
- * Window specific client capabilities.
- */
- Window struct {
- /**
- * Whether client supports server initiated progress using the
- * `window/workDoneProgress/create` request.
- *
- * Since 3.15.0
- */
- WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
- /**
- * Capabilities specific to the showMessage request.
- *
- * @since 3.16.0
- */
- ShowMessage ShowMessageRequestClientCapabilities `json:"showMessage,omitempty"`
- /**
- * Capabilities specific to the showDocument request.
- *
- * @since 3.16.0
- */
- ShowDocument ShowDocumentClientCapabilities `json:"showDocument,omitempty"`
- } `json:"window,omitempty"`
- /**
- * General client capabilities.
- *
- * @since 3.16.0
- */
- General GeneralClientCapabilities `json:"general,omitempty"`
- /**
- * Experimental client capabilities.
- */
- Experimental interface{} `json:"experimental,omitempty"`
-}
-
-/**
- * A code action represents a change that can be performed in code, e.g. to fix a problem or
- * to refactor code.
- *
- * A CodeAction must set either `edit` and/or a `command`. If both are supplied, the `edit` is applied first, then the `command` is executed.
- */
-type CodeAction struct {
- /**
- * A short, human-readable, title for this code action.
- */
- Title string `json:"title"`
- /**
- * The kind of the code action.
- *
- * Used to filter code actions.
- */
- Kind CodeActionKind `json:"kind,omitempty"`
- /**
- * The diagnostics that this code action resolves.
- */
- Diagnostics []Diagnostic `json:"diagnostics,omitempty"`
- /**
- * Marks this as a preferred action. Preferred actions are used by the `auto fix` command and can be targeted
- * by keybindings.
- *
- * A quick fix should be marked preferred if it properly addresses the underlying error.
- * A refactoring should be marked preferred if it is the most reasonable choice of actions to take.
- *
- * @since 3.15.0
- */
- IsPreferred bool `json:"isPreferred,omitempty"`
- /**
- * Marks that the code action cannot currently be applied.
- *
- * Clients should follow the following guidelines regarding disabled code actions:
- *
- * - Disabled code actions are not shown in automatic [lightbulb](https://code.visualstudio.com/docs/editor/editingevolved#_code-action)
- * code action menu.
- *
- * - Disabled actions are shown as faded out in the code action menu when the user request a more specific type
- * of code action, such as refactorings.
- *
- * - If the user has a [keybinding](https://code.visualstudio.com/docs/editor/refactoring#_keybindings-for-code-actions)
- * that auto applies a code action and only a disabled code actions are returned, the client should show the user an
- * error message with `reason` in the editor.
- *
- * @since 3.16.0
- */
- Disabled *struct {
- /**
- * Human readable description of why the code action is currently disabled.
- *
- * This is displayed in the code actions UI.
- */
- Reason string `json:"reason"`
- } `json:"disabled,omitempty"`
- /**
- * The workspace edit this code action performs.
- */
- Edit WorkspaceEdit `json:"edit,omitempty"`
- /**
- * A command this code action executes. If a code action
- * provides a edit and a command, first the edit is
- * executed and then the command.
- */
- Command *Command `json:"command,omitempty"`
- /**
- * A data entry field that is preserved on a code action between
- * a `textDocument/codeAction` and a `codeAction/resolve` request.
- *
- * @since 3.16.0
- */
- Data LSPAny `json:"data,omitempty"`
-}
-
-/**
- * The Client Capabilities of a [CodeActionRequest](#CodeActionRequest).
- */
-type CodeActionClientCapabilities struct {
- /**
- * Whether code action supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client support code action literals of type `CodeAction` as a valid
- * response of the `textDocument/codeAction` request. If the property is not
- * set the request can only return `Command` literals.
- *
- * @since 3.8.0
- */
- CodeActionLiteralSupport struct {
- /**
- * The code action kind is support with the following value
- * set.
- */
- CodeActionKind struct {
- /**
- * The code action kind values the client supports. When this
- * property exists the client also guarantees that it will
- * handle values outside its set gracefully and falls back
- * to a default value when unknown.
- */
- ValueSet []CodeActionKind `json:"valueSet"`
- } `json:"codeActionKind"`
- } `json:"codeActionLiteralSupport,omitempty"`
- /**
- * Whether code action supports the `isPreferred` property.
- *
- * @since 3.15.0
- */
- IsPreferredSupport bool `json:"isPreferredSupport,omitempty"`
- /**
- * Whether code action supports the `disabled` property.
- *
- * @since 3.16.0
- */
- DisabledSupport bool `json:"disabledSupport,omitempty"`
- /**
- * Whether code action supports the `data` property which is
- * preserved between a `textDocument/codeAction` and a
- * `codeAction/resolve` request.
- *
- * @since 3.16.0
- */
- DataSupport bool `json:"dataSupport,omitempty"`
- /**
- * Whether the client support resolving additional code action
- * properties via a separate `codeAction/resolve` request.
- *
- * @since 3.16.0
- */
- ResolveSupport struct {
- /**
- * The properties that a client can resolve lazily.
- */
- Properties []string `json:"properties"`
- } `json:"resolveSupport,omitempty"`
- /**
- * Whether th client honors the change annotations in
- * text edits and resource operations returned via the
- * `CodeAction#edit` property by for example presenting
- * the workspace edit in the user interface and asking
- * for confirmation.
- *
- * @since 3.16.0
- */
- HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"`
-}
-
-/**
- * Contains additional diagnostic information about the context in which
- * a [code action](#CodeActionProvider.provideCodeActions) is run.
- */
-type CodeActionContext struct {
- /**
- * An array of diagnostics known on the client side overlapping the range provided to the
- * `textDocument/codeAction` request. They are provided so that the server knows which
- * errors are currently presented to the user for the given range. There is no guarantee
- * that these accurately reflect the error state of the resource. The primary parameter
- * to compute code actions is the provided range.
- */
- Diagnostics []Diagnostic `json:"diagnostics"`
- /**
- * Requested kind of actions to return.
- *
- * Actions not of this kind are filtered out by the client before being shown. So servers
- * can omit computing them.
- */
- Only []CodeActionKind `json:"only,omitempty"`
- /**
- * The reason why code actions were requested.
- *
- * @since 3.17.0
- */
- TriggerKind CodeActionTriggerKind `json:"triggerKind,omitempty"`
-}
-
-/**
- * A set of predefined code action kinds
- */
-type CodeActionKind string
-
-/**
- * Provider options for a [CodeActionRequest](#CodeActionRequest).
- */
-type CodeActionOptions struct {
- /**
- * CodeActionKinds that this server may return.
- *
- * The list of kinds may be generic, such as `CodeActionKind.Refactor`, or the server
- * may list out every specific kind they provide.
- */
- CodeActionKinds []CodeActionKind `json:"codeActionKinds,omitempty"`
- /**
- * The server provides support to resolve additional
- * information for a code action.
- *
- * @since 3.16.0
- */
- ResolveProvider bool `json:"resolveProvider,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * The parameters of a [CodeActionRequest](#CodeActionRequest).
- */
-type CodeActionParams struct {
- /**
- * The document in which the command was invoked.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The range for which the command was invoked.
- */
- Range Range `json:"range"`
- /**
- * Context carrying additional information.
- */
- Context CodeActionContext `json:"context"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * The reason why code actions were requested.
- *
- * @since 3.17.0 - proposed state
- */
-type CodeActionTriggerKind float64
-
-/**
- * Structure to capture a description for an error code.
- *
- * @since 3.16.0
- */
-type CodeDescription struct {
- /**
- * An URI to open with more information about the diagnostic error.
- */
- Href URI `json:"href"`
-}
-
-/**
- * A code lens represents a [command](#Command) that should be shown along with
- * source text, like the number of references, a way to run tests, etc.
- *
- * A code lens is _unresolved_ when no command is associated to it. For performance
- * reasons the creation of a code lens and resolving should be done to two stages.
- */
-type CodeLens struct {
- /**
- * The range in which this code lens is valid. Should only span a single line.
- */
- Range Range `json:"range"`
- /**
- * The command this code lens represents.
- */
- Command Command `json:"command,omitempty"`
- /**
- * A data entry field that is preserved on a code lens item between
- * a [CodeLensRequest](#CodeLensRequest) and a [CodeLensResolveRequest]
- * (#CodeLensResolveRequest)
- */
- Data LSPAny `json:"data,omitempty"`
-}
-
-/**
- * The client capabilities of a [CodeLensRequest](#CodeLensRequest).
- */
-type CodeLensClientCapabilities struct {
- /**
- * Whether code lens supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * Code Lens provider options of a [CodeLensRequest](#CodeLensRequest).
- */
-type CodeLensOptions struct {
- /**
- * Code lens has a resolve provider as well.
- */
- ResolveProvider bool `json:"resolveProvider,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * The parameters of a [CodeLensRequest](#CodeLensRequest).
- */
-type CodeLensParams struct {
- /**
- * The document to request code lens for.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * @since 3.16.0
- */
-type CodeLensWorkspaceClientCapabilities struct {
- /**
- * Whether the client implementation supports a refresh request sent from the
- * server to the client.
- *
- * Note that this event is global and will force the client to refresh all
- * code lenses currently shown. It should be used with absolute care and is
- * useful for situation where a server for example detect a project wide
- * change that requires such a calculation.
- */
- RefreshSupport bool `json:"refreshSupport,omitempty"`
-}
-
-/**
- * Represents a color in RGBA space.
- */
-type Color struct {
- /**
- * The red component of this color in the range [0-1].
- */
- Red Decimal `json:"red"`
- /**
- * The green component of this color in the range [0-1].
- */
- Green Decimal `json:"green"`
- /**
- * The blue component of this color in the range [0-1].
- */
- Blue Decimal `json:"blue"`
- /**
- * The alpha component of this color in the range [0-1].
- */
- Alpha Decimal `json:"alpha"`
-}
-
-/**
- * Represents a color range from a document.
- */
-type ColorInformation struct {
- /**
- * The range in the document where this color appears.
- */
- Range Range `json:"range"`
- /**
- * The actual color value for this color range.
- */
- Color Color `json:"color"`
-}
-
-type ColorPresentation struct {
- /**
- * The label of this color presentation. It will be shown on the color
- * picker header. By default this is also the text that is inserted when selecting
- * this color presentation.
- */
- Label string `json:"label"`
- /**
- * An [edit](#TextEdit) which is applied to a document when selecting
- * this presentation for the color. When `falsy` the [label](#ColorPresentation.label)
- * is used.
- */
- TextEdit TextEdit `json:"textEdit,omitempty"`
- /**
- * An optional array of additional [text edits](#TextEdit) that are applied when
- * selecting this color presentation. Edits must not overlap with the main [edit](#ColorPresentation.textEdit) nor with themselves.
- */
- AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"`
-}
-
-/**
- * Parameters for a [ColorPresentationRequest](#ColorPresentationRequest).
- */
-type ColorPresentationParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The color to request presentations for.
- */
- Color Color `json:"color"`
- /**
- * The range where the color would be inserted. Serves as a context.
- */
- Range Range `json:"range"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * Represents a reference to a command. Provides a title which
- * will be used to represent a command in the UI and, optionally,
- * an array of arguments which will be passed to the command handler
- * function when invoked.
- */
-type Command struct {
- /**
- * Title of the command, like `save`.
- */
- Title string `json:"title"`
- /**
- * The identifier of the actual command handler.
- */
- Command string `json:"command"`
- /**
- * Arguments that the command handler should be
- * invoked with.
- */
- Arguments []json.RawMessage `json:"arguments,omitempty"`
-}
-
-/**
- * Completion client capabilities
- */
-type CompletionClientCapabilities struct {
- /**
- * Whether completion supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client supports the following `CompletionItem` specific
- * capabilities.
- */
- CompletionItem struct {
- /**
- * Client supports snippets as insert text.
- *
- * A snippet can define tab stops and placeholders with `$1`, `$2`
- * and `${3:foo}`. `$0` defines the final tab stop, it defaults to
- * the end of the snippet. Placeholders with equal identifiers are linked,
- * that is typing in one will update others too.
- */
- SnippetSupport bool `json:"snippetSupport,omitempty"`
- /**
- * Client supports commit characters on a completion item.
- */
- CommitCharactersSupport bool `json:"commitCharactersSupport,omitempty"`
- /**
- * Client supports the follow content formats for the documentation
- * property. The order describes the preferred format of the client.
- */
- DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"`
- /**
- * Client supports the deprecated property on a completion item.
- */
- DeprecatedSupport bool `json:"deprecatedSupport,omitempty"`
- /**
- * Client supports the preselect property on a completion item.
- */
- PreselectSupport bool `json:"preselectSupport,omitempty"`
- /**
- * Client supports the tag property on a completion item. Clients supporting
- * tags have to handle unknown tags gracefully. Clients especially need to
- * preserve unknown tags when sending a completion item back to the server in
- * a resolve call.
- *
- * @since 3.15.0
- */
- TagSupport struct {
- /**
- * The tags supported by the client.
- */
- ValueSet []CompletionItemTag `json:"valueSet"`
- } `json:"tagSupport,omitempty"`
- /**
- * Client support insert replace edit to control different behavior if a
- * completion item is inserted in the text or should replace text.
- *
- * @since 3.16.0
- */
- InsertReplaceSupport bool `json:"insertReplaceSupport,omitempty"`
- /**
- * Indicates which properties a client can resolve lazily on a completion
- * item. Before version 3.16.0 only the predefined properties `documentation`
- * and `details` could be resolved lazily.
- *
- * @since 3.16.0
- */
- ResolveSupport struct {
- /**
- * The properties that a client can resolve lazily.
- */
- Properties []string `json:"properties"`
- } `json:"resolveSupport,omitempty"`
- /**
- * The client supports the `insertTextMode` property on
- * a completion item to override the whitespace handling mode
- * as defined by the client (see `insertTextMode`).
- *
- * @since 3.16.0
- */
- InsertTextModeSupport struct {
- ValueSet []InsertTextMode `json:"valueSet"`
- } `json:"insertTextModeSupport,omitempty"`
- /**
- * The client has support for completion item label
- * details (see also `CompletionItemLabelDetails`).
- *
- * @since 3.17.0 - proposed state
- */
- LabelDetailsSupport bool `json:"labelDetailsSupport,omitempty"`
- } `json:"completionItem,omitempty"`
- CompletionItemKind struct {
- /**
- * The completion item kind values the client supports. When this
- * property exists the client also guarantees that it will
- * handle values outside its set gracefully and falls back
- * to a default value when unknown.
- *
- * If this property is not present the client only supports
- * the completion items kinds from `Text` to `Reference` as defined in
- * the initial version of the protocol.
- */
- ValueSet []CompletionItemKind `json:"valueSet,omitempty"`
- } `json:"completionItemKind,omitempty"`
- /**
- * Defines how the client handles whitespace and indentation
- * when accepting a completion item that uses multi line
- * text in either `insertText` or `textEdit`.
- *
- * @since 3.17.0 - proposed state
- */
- InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"`
- /**
- * The client supports to send additional context information for a
- * `textDocument/completion` request.
- */
- ContextSupport bool `json:"contextSupport,omitempty"`
- /**
- * The client supports the following `CompletionList` specific
- * capabilities.
- *
- * @since 3.17.0 - proposed state
- */
- CompletionList struct {
- /**
- * The client supports the the following itemDefaults on
- * a completion list.
- *
- * The value lists the supported property names of the
- * `CompletionList.itemDefaults` object. If omitted
- * no properties are supported.
- *
- * @since 3.17.0 - proposed state
- */
- ItemDefaults []string `json:"itemDefaults,omitempty"`
- } `json:"completionList,omitempty"`
-}
-
-/**
- * Contains additional information about the context in which a completion request is triggered.
- */
-type CompletionContext struct {
- /**
- * How the completion was triggered.
- */
- TriggerKind CompletionTriggerKind `json:"triggerKind"`
- /**
- * The trigger character (a single character) that has trigger code complete.
- * Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter`
- */
- TriggerCharacter string `json:"triggerCharacter,omitempty"`
-}
-
-/**
- * A completion item represents a text snippet that is
- * proposed to complete text that is being typed.
- */
-type CompletionItem struct {
- /**
- * The label of this completion item.
- *
- * The label property is also by default the text that
- * is inserted when selecting this completion.
- *
- * If label details are provided the label itself should
- * be an unqualified name of the completion item.
- */
- Label string `json:"label"`
- /**
- * Additional details for the label
- *
- * @since 3.17.0 - proposed state
- */
- LabelDetails CompletionItemLabelDetails `json:"labelDetails,omitempty"`
- /**
- * The kind of this completion item. Based of the kind
- * an icon is chosen by the editor.
- */
- Kind CompletionItemKind `json:"kind,omitempty"`
- /**
- * Tags for this completion item.
- *
- * @since 3.15.0
- */
- Tags []CompletionItemTag `json:"tags,omitempty"`
- /**
- * A human-readable string with additional information
- * about this item, like type or symbol information.
- */
- Detail string `json:"detail,omitempty"`
- /**
- * A human-readable string that represents a doc-comment.
- */
- Documentation string/*string | MarkupContent*/ `json:"documentation,omitempty"`
- /**
- * Indicates if this item is deprecated.
- * @deprecated Use `tags` instead.
- */
- Deprecated bool `json:"deprecated,omitempty"`
- /**
- * Select this item when showing.
- *
- * *Note* that only one completion item can be selected and that the
- * tool / client decides which item that is. The rule is that the *first*
- * item of those that match best is selected.
- */
- Preselect bool `json:"preselect,omitempty"`
- /**
- * A string that should be used when comparing this item
- * with other items. When `falsy` the [label](#CompletionItem.label)
- * is used.
- */
- SortText string `json:"sortText,omitempty"`
- /**
- * A string that should be used when filtering a set of
- * completion items. When `falsy` the [label](#CompletionItem.label)
- * is used.
- */
- FilterText string `json:"filterText,omitempty"`
- /**
- * A string that should be inserted into a document when selecting
- * this completion. When `falsy` the [label](#CompletionItem.label)
- * is used.
- *
- * The `insertText` is subject to interpretation by the client side.
- * Some tools might not take the string literally. For example
- * VS Code when code complete is requested in this example `con<cursor position>`
- * and a completion item with an `insertText` of `console` is provided it
- * will only insert `sole`. Therefore it is recommended to use `textEdit` instead
- * since it avoids additional client side interpretation.
- */
- InsertText string `json:"insertText,omitempty"`
- /**
- * The format of the insert text. The format applies to both the `insertText` property
- * and the `newText` property of a provided `textEdit`. If omitted defaults to
- * `InsertTextFormat.PlainText`.
- *
- * Please note that the insertTextFormat doesn't apply to `additionalTextEdits`.
- */
- InsertTextFormat InsertTextFormat `json:"insertTextFormat,omitempty"`
- /**
- * How whitespace and indentation is handled during completion
- * item insertion. If ignored the clients default value depends on
- * the `textDocument.completion.insertTextMode` client capability.
- *
- * @since 3.16.0
- */
- InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"`
- /**
- * An [edit](#TextEdit) which is applied to a document when selecting
- * this completion. When an edit is provided the value of
- * [insertText](#CompletionItem.insertText) is ignored.
- *
- * Most editors support two different operation when accepting a completion item. One is to insert a
- * completion text and the other is to replace an existing text with a completion text. Since this can
- * usually not predetermined by a server it can report both ranges. Clients need to signal support for
- * `InsertReplaceEdits` via the `textDocument.completion.insertReplaceSupport` client capability
- * property.
- *
- * *Note 1:* The text edit's range as well as both ranges from a insert replace edit must be a
- * [single line] and they must contain the position at which completion has been requested.
- * *Note 2:* If an `InsertReplaceEdit` is returned the edit's insert range must be a prefix of
- * the edit's replace range, that means it must be contained and starting at the same position.
- *
- * @since 3.16.0 additional type `InsertReplaceEdit`
- */
- TextEdit *TextEdit/*TextEdit | InsertReplaceEdit*/ `json:"textEdit,omitempty"`
- /**
- * An optional array of additional [text edits](#TextEdit) that are applied when
- * selecting this completion. Edits must not overlap (including the same insert position)
- * with the main [edit](#CompletionItem.textEdit) nor with themselves.
- *
- * Additional text edits should be used to change text unrelated to the current cursor position
- * (for example adding an import statement at the top of the file if the completion item will
- * insert an unqualified type).
- */
- AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"`
- /**
- * An optional set of characters that when pressed while this completion is active will accept it first and
- * then type that character. *Note* that all commit characters should have `length=1` and that superfluous
- * characters will be ignored.
- */
- CommitCharacters []string `json:"commitCharacters,omitempty"`
- /**
- * An optional [command](#Command) that is executed *after* inserting this completion. *Note* that
- * additional modifications to the current document should be described with the
- * [additionalTextEdits](#CompletionItem.additionalTextEdits)-property.
- */
- Command *Command `json:"command,omitempty"`
- /**
- * A data entry field that is preserved on a completion item between a
- * [CompletionRequest](#CompletionRequest) and a [CompletionResolveRequest](#CompletionResolveRequest).
- */
- Data LSPAny `json:"data,omitempty"`
-}
-
-/**
- * The kind of a completion entry.
- */
-type CompletionItemKind float64
-
-/**
- * Additional details for a completion item label.
- *
- * @since 3.17.0 - proposed state
- */
-type CompletionItemLabelDetails struct {
- /**
- * An optional string which is rendered less prominently directly after {@link CompletionItem.label label},
- * without any spacing. Should be used for function signatures or type annotations.
- */
- Detail string `json:"detail,omitempty"`
- /**
- * An optional string which is rendered less prominently after {@link CompletionItem.detail}. Should be used
- * for fully qualified names or file path.
- */
- Description string `json:"description,omitempty"`
-}
-
-/**
- * Completion item tags are extra annotations that tweak the rendering of a completion
- * item.
- *
- * @since 3.15.0
- */
-type CompletionItemTag float64
-
-/**
- * Represents a collection of [completion items](#CompletionItem) to be presented
- * in the editor.
- */
-type CompletionList struct {
- /**
- * This list it not complete. Further typing results in recomputing this list.
- */
- IsIncomplete bool `json:"isIncomplete"`
- /**
- * In many cases the items of an actual completion result share the same
- * value for properties like `commitCharacters` or the range of a text
- * edit. A completion list can therefore define item defaults which will
- * be used if a completion item itself doesn't specify the value.
- *
- * If a completion list specifies a default value and a completion item
- * also specifies a corresponding value the one from the item is used.
- *
- * Servers are only allowed to return default values if the client
- * signals support for this via the `completionList.itemDefaults`
- * capability.
- *
- * @since 3.17.0 - proposed state
- */
- ItemDefaults struct {
- /**
- * A default commit character set.
- *
- * @since 3.17.0 - proposed state
- */
- CommitCharacters []string `json:"commitCharacters,omitempty"`
- /**
- * A default edit range
- *
- * @since 3.17.0 - proposed state
- */
- EditRange Range/*Range | { insert: Range; replace: Range; }*/ `json:"editRange,omitempty"`
- /**
- * A default insert text format
- *
- * @since 3.17.0 - proposed state
- */
- InsertTextFormat InsertTextFormat `json:"insertTextFormat,omitempty"`
- /**
- * A default insert text mode
- *
- * @since 3.17.0 - proposed state
- */
- InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"`
- } `json:"itemDefaults,omitempty"`
- /**
- * The completion items.
- */
- Items []CompletionItem `json:"items"`
-}
-
-/**
- * Completion options.
- */
-type CompletionOptions struct {
- /**
- * Most tools trigger completion request automatically without explicitly requesting
- * it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user
- * starts to type an identifier. For example if the user types `c` in a JavaScript file
- * code complete will automatically pop up present `console` besides others as a
- * completion item. Characters that make up identifiers don't need to be listed here.
- *
- * If code complete should automatically be trigger on characters not being valid inside
- * an identifier (for example `.` in JavaScript) list them in `triggerCharacters`.
- */
- TriggerCharacters []string `json:"triggerCharacters,omitempty"`
- /**
- * The list of all possible characters that commit a completion. This field can be used
- * if clients don't support individual commit characters per completion item. See
- * `ClientCapabilities.textDocument.completion.completionItem.commitCharactersSupport`
- *
- * If a server provides both `allCommitCharacters` and commit characters on an individual
- * completion item the ones on the completion item win.
- *
- * @since 3.2.0
- */
- AllCommitCharacters []string `json:"allCommitCharacters,omitempty"`
- /**
- * The server provides support to resolve additional
- * information for a completion item.
- */
- ResolveProvider bool `json:"resolveProvider,omitempty"`
- /**
- * The server supports the following `CompletionItem` specific
- * capabilities.
- *
- * @since 3.17.0 - proposed state
- */
- CompletionItem struct {
- /**
- * The server has support for completion item label
- * details (see also `CompletionItemLabelDetails`) when
- * receiving a completion item in a resolve call.
- *
- * @since 3.17.0 - proposed state
- */
- LabelDetailsSupport bool `json:"labelDetailsSupport,omitempty"`
- } `json:"completionItem,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * Completion parameters
- */
-type CompletionParams struct {
- /**
- * The completion context. This is only available it the client specifies
- * to send this using the client capability `textDocument.completion.contextSupport === true`
- */
- Context CompletionContext `json:"context,omitempty"`
- TextDocumentPositionParams
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * How a completion was triggered
- */
-type CompletionTriggerKind float64
-
-type ConfigurationClientCapabilities struct {
- /**
- * The workspace client capabilities
- */
- Workspace Workspace4Gn `json:"workspace,omitempty"`
-}
-
-type ConfigurationItem struct {
- /**
- * The scope to get the configuration section for.
- */
- ScopeURI string `json:"scopeUri,omitempty"`
- /**
- * The configuration section asked for.
- */
- Section string `json:"section,omitempty"`
-}
-
-/**
- * The parameters of a configuration request.
- */
-type ConfigurationParams struct {
- Items []ConfigurationItem `json:"items"`
-}
-
-/**
- * Create file operation.
- */
-type CreateFile struct {
- /**
- * A create
- */
- Kind string `json:"kind"`
- /**
- * The resource to create.
- */
- URI DocumentURI `json:"uri"`
- /**
- * Additional options
- */
- Options CreateFileOptions `json:"options,omitempty"`
- ResourceOperation
-}
-
-/**
- * Options to create a file.
- */
-type CreateFileOptions struct {
- /**
- * Overwrite existing file. Overwrite wins over `ignoreIfExists`
- */
- Overwrite bool `json:"overwrite,omitempty"`
- /**
- * Ignore if exists.
- */
- IgnoreIfExists bool `json:"ignoreIfExists,omitempty"`
-}
-
-/**
- * The parameters sent in file create requests/notifications.
- *
- * @since 3.16.0
- */
-type CreateFilesParams struct {
- /**
- * An array of all files/folders created in this operation.
- */
- Files []FileCreate `json:"files"`
-}
-
-/**
- * Defines a decimal number. Since decimal numbers are very
- * rare in the language server specification we denote the
- * exact range with every decimal using the mathematics
- * interval notations (e.g. [0, 1] denotes all decimals d with
- * 0 <= d <= 1.
- */
-type Decimal = float64
-
-/**
- * The declaration of a symbol representation as one or many [locations](#Location).
- */
-type Declaration = []Location /*Location | Location[]*/
-
-/**
- * @since 3.14.0
- */
-type DeclarationClientCapabilities struct {
- /**
- * Whether declaration supports dynamic registration. If this is set to `true`
- * the client supports the new `DeclarationRegistrationOptions` return value
- * for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client supports additional metadata in the form of declaration links.
- */
- LinkSupport bool `json:"linkSupport,omitempty"`
-}
-
-/**
- * Information about where a symbol is declared.
- *
- * Provides additional metadata over normal [location](#Location) declarations, including the range of
- * the declaring symbol.
- *
- * Servers should prefer returning `DeclarationLink` over `Declaration` if supported
- * by the client.
- */
-type DeclarationLink = LocationLink
-
-type DeclarationOptions struct {
- WorkDoneProgressOptions
-}
-
-type DeclarationParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
- PartialResultParams
-}
-
-type DeclarationRegistrationOptions struct {
- DeclarationOptions
- TextDocumentRegistrationOptions
- StaticRegistrationOptions
-}
-
-/**
- * The definition of a symbol represented as one or many [locations](#Location).
- * For most programming languages there is only one location at which a symbol is
- * defined.
- *
- * Servers should prefer returning `DefinitionLink` over `Definition` if supported
- * by the client.
- */
-type Definition = []Location /*Location | Location[]*/
-
-/**
- * Client Capabilities for a [DefinitionRequest](#DefinitionRequest).
- */
-type DefinitionClientCapabilities struct {
- /**
- * Whether definition supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client supports additional metadata in the form of definition links.
- *
- * @since 3.14.0
- */
- LinkSupport bool `json:"linkSupport,omitempty"`
-}
-
-/**
- * Information about where a symbol is defined.
- *
- * Provides additional metadata over normal [location](#Location) definitions, including the range of
- * the defining symbol
- */
-type DefinitionLink = LocationLink
-
-/**
- * Server Capabilities for a [DefinitionRequest](#DefinitionRequest).
- */
-type DefinitionOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * Parameters for a [DefinitionRequest](#DefinitionRequest).
- */
-type DefinitionParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * Delete file operation
- */
-type DeleteFile struct {
- /**
- * A delete
- */
- Kind string `json:"kind"`
- /**
- * The file to delete.
- */
- URI DocumentURI `json:"uri"`
- /**
- * Delete options.
- */
- Options DeleteFileOptions `json:"options,omitempty"`
- ResourceOperation
-}
-
-/**
- * Delete file options
- */
-type DeleteFileOptions struct {
- /**
- * Delete the content recursively if a folder is denoted.
- */
- Recursive bool `json:"recursive,omitempty"`
- /**
- * Ignore the operation if the file doesn't exist.
- */
- IgnoreIfNotExists bool `json:"ignoreIfNotExists,omitempty"`
-}
-
-/**
- * The parameters sent in file delete requests/notifications.
- *
- * @since 3.16.0
- */
-type DeleteFilesParams struct {
- /**
- * An array of all files/folders deleted in this operation.
- */
- Files []FileDelete `json:"files"`
-}
-
-/**
- * Represents a diagnostic, such as a compiler error or warning. Diagnostic objects
- * are only valid in the scope of a resource.
- */
-type Diagnostic struct {
- /**
- * The range at which the message applies
- */
- Range Range `json:"range"`
- /**
- * The diagnostic's severity. Can be omitted. If omitted it is up to the
- * client to interpret diagnostics as error, warning, info or hint.
- */
- Severity DiagnosticSeverity `json:"severity,omitempty"`
- /**
- * The diagnostic's code, which usually appear in the user interface.
- */
- Code interface{}/*integer | string*/ `json:"code,omitempty"`
- /**
- * An optional property to describe the error code.
- * Requires the code field (above) to be present/not null.
- *
- * @since 3.16.0
- */
- CodeDescription *CodeDescription `json:"codeDescription,omitempty"`
- /**
- * A human-readable string describing the source of this
- * diagnostic, e.g. 'typescript' or 'super lint'. It usually
- * appears in the user interface.
- */
- Source string `json:"source,omitempty"`
- /**
- * The diagnostic's message. It usually appears in the user interface
- */
- Message string `json:"message"`
- /**
- * Additional metadata about the diagnostic.
- *
- * @since 3.15.0
- */
- Tags []DiagnosticTag `json:"tags,omitempty"`
- /**
- * An array of related diagnostic information, e.g. when symbol-names within
- * a scope collide all definitions can be marked via this property.
- */
- RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"`
- /**
- * A data entry field that is preserved between a `textDocument/publishDiagnostics`
- * notification and `textDocument/codeAction` request.
- *
- * @since 3.16.0
- */
- Data LSPAny `json:"data,omitempty"`
-}
-
-/**
- * Represents a related message and source code location for a diagnostic. This should be
- * used to point to code locations that cause or related to a diagnostics, e.g when duplicating
- * a symbol in a scope.
- */
-type DiagnosticRelatedInformation struct {
- /**
- * The location of this related diagnostic information.
- */
- Location Location `json:"location"`
- /**
- * The message of this related diagnostic information.
- */
- Message string `json:"message"`
-}
-
-/**
- * The diagnostic's severity.
- */
-type DiagnosticSeverity float64
-
-/**
- * The diagnostic tags.
- *
- * @since 3.15.0
- */
-type DiagnosticTag float64
-
-type DidChangeConfigurationClientCapabilities struct {
- /**
- * Did change configuration notification supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * The parameters of a change configuration notification.
- */
-type DidChangeConfigurationParams struct {
- /**
- * The actual changed settings
- */
- Settings LSPAny `json:"settings"`
-}
-
-/**
- * The params sent in a change notebook document notification.
- *
- * @since 3.17.0 - proposed state
- */
-type DidChangeNotebookDocumentParams = struct {
- /**
- * The notebook document that did change. The version number points
- * to the version after all provided changes have been applied. If
- * only the text document content of a cell changes the notebook version
- * doesn't necessarily have to change.
- */
- NotebookDocument VersionedNotebookDocumentIdentifier `json:"notebookDocument"`
- /**
- * The actual changes to the notebook document.
- *
- * The changes describe single state changes to the notebook document.
- * So if there are two changes c1 (at array index 0) and c2 (at array
- * index 1) for a notebook in state S then c1 moves the notebook from
- * S to S' and c2 from S' to S''. So c1 is computed on the state S and
- * c2 is computed on the state S'.
- *
- * To mirror the content of a notebook using change events use the following approach:
- * - start with the same initial content
- * - apply the 'notebookDocument/didChange' notifications in the order you receive them.
- * - apply the `NotebookChangeEvent`s in a single notification in the order
- * you receive them.
- */
- Change NotebookDocumentChangeEvent `json:"change"`
-}
-
-/**
- * The change text document notification's parameters.
- */
-type DidChangeTextDocumentParams struct {
- /**
- * The document that did change. The version number points
- * to the version after all provided content changes have
- * been applied.
- */
- TextDocument VersionedTextDocumentIdentifier `json:"textDocument"`
- /**
- * The actual content changes. The content changes describe single state changes
- * to the document. So if there are two content changes c1 (at array index 0) and
- * c2 (at array index 1) for a document in state S then c1 moves the document from
- * S to S' and c2 from S' to S''. So c1 is computed on the state S and c2 is computed
- * on the state S'.
- *
- * To mirror the content of a document using change events use the following approach:
- * - start with the same initial content
- * - apply the 'textDocument/didChange' notifications in the order you receive them.
- * - apply the `TextDocumentContentChangeEvent`s in a single notification in the order
- * you receive them.
- */
- ContentChanges []TextDocumentContentChangeEvent `json:"contentChanges"`
-}
-
-type DidChangeWatchedFilesClientCapabilities struct {
- /**
- * Did change watched files notification supports dynamic registration. Please note
- * that the current protocol doesn't support static configuration for file changes
- * from the server side.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * The watched files change notification's parameters.
- */
-type DidChangeWatchedFilesParams struct {
- /**
- * The actual file events.
- */
- Changes []FileEvent `json:"changes"`
-}
-
-/**
- * Describe options to be used when registered for text document change events.
- */
-type DidChangeWatchedFilesRegistrationOptions struct {
- /**
- * The watchers to register.
- */
- Watchers []FileSystemWatcher `json:"watchers"`
-}
-
-/**
- * The parameters of a `workspace/didChangeWorkspaceFolders` notification.
- */
-type DidChangeWorkspaceFoldersParams struct {
- /**
- * The actual workspace folder change event.
- */
- Event WorkspaceFoldersChangeEvent `json:"event"`
-}
-
-/**
- * The params sent in a close notebook document notification.
- *
- * @since 3.17.0 - proposed state
- */
-type DidCloseNotebookDocumentParams = struct {
- /**
- * The notebook document that got closed.
- */
- NotebookDocument NotebookDocumentIdentifier `json:"notebookDocument"`
- /**
- * The text documents that represent the content
- * of a notebook cell that got closed.
- */
- CellTextDocuments []TextDocumentIdentifier `json:"cellTextDocuments"`
-}
-
-/**
- * The parameters send in a close text document notification
- */
-type DidCloseTextDocumentParams struct {
- /**
- * The document that was closed.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
-}
-
-/**
- * The params sent in a open notebook document notification.
- *
- * @since 3.17.0 - proposed state
- */
-type DidOpenNotebookDocumentParams = struct {
- /**
- * The notebook document that got opened.
- */
- NotebookDocument NotebookDocument `json:"notebookDocument"`
- /**
- * The text documents that represent the content
- * of a notebook cell.
- */
- CellTextDocuments []TextDocumentItem `json:"cellTextDocuments"`
-}
-
-/**
- * The parameters send in a open text document notification
- */
-type DidOpenTextDocumentParams struct {
- /**
- * The document that was opened.
- */
- TextDocument TextDocumentItem `json:"textDocument"`
-}
-
-/**
- * The params sent in a save notebook document notification.
- *
- * @since 3.17.0 - proposed state
- */
-type DidSaveNotebookDocumentParams = struct {
- /**
- * The notebook document that got saved.
- */
- NotebookDocument NotebookDocumentIdentifier `json:"notebookDocument"`
-}
-
-/**
- * The parameters send in a save text document notification
- */
-type DidSaveTextDocumentParams struct {
- /**
- * The document that was closed.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * Optional the content when saved. Depends on the includeText value
- * when the save notification was requested.
- */
- Text *string `json:"text,omitempty"`
-}
-
-type DocumentColorClientCapabilities struct {
- /**
- * Whether implementation supports dynamic registration. If this is set to `true`
- * the client supports the new `DocumentColorRegistrationOptions` return value
- * for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-type DocumentColorOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * Parameters for a [DocumentColorRequest](#DocumentColorRequest).
- */
-type DocumentColorParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-type DocumentColorRegistrationOptions struct {
- TextDocumentRegistrationOptions
- StaticRegistrationOptions
- DocumentColorOptions
-}
-
-/**
- * Parameters of the document diagnostic request.
- *
- * @since 3.17.0 - proposed state
- */
-type DocumentDiagnosticParams struct {
- /**
- * An optional token that a server can use to report work done progress.
- */
- WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
- /**
- * An optional token that a server can use to report partial results (e.g. streaming) to
- * the client.
- */
- PartialResultToken ProgressToken `json:"partialResultToken,omitempty"`
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The additional identifier provided during registration.
- */
- Identifier string `json:"identifier,omitempty"`
- /**
- * The result id of a previous response if provided.
- */
- PreviousResultID string `json:"previousResultId,omitempty"`
-}
-
-/**
- * The result of a document diagnostic pull request. A report can
- * either be a full report containing all diagnostics for the
- * requested document or a unchanged report indicating that nothing
- * has changed in terms of diagnostics in comparison to the last
- * pull request.
- *
- * @since 3.17.0 - proposed state
- */
-type DocumentDiagnosticReport = interface{} /*RelatedFullDocumentDiagnosticReport | RelatedUnchangedDocumentDiagnosticReport*/
-
-/**
- * A document filter describes a top level text document or
- * a notebook cell document.
- *
- * @since 3.17.0 - proposed support for NotebookCellTextDocumentFilter.
- */
-type DocumentFilter = interface{} /*TextDocumentFilter | NotebookCellTextDocumentFilter*/
-
-/**
- * Client capabilities of a [DocumentFormattingRequest](#DocumentFormattingRequest).
- */
-type DocumentFormattingClientCapabilities struct {
- /**
- * Whether formatting supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * Provider options for a [DocumentFormattingRequest](#DocumentFormattingRequest).
- */
-type DocumentFormattingOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * The parameters of a [DocumentFormattingRequest](#DocumentFormattingRequest).
- */
-type DocumentFormattingParams struct {
- /**
- * The document to format.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The format options
- */
- Options FormattingOptions `json:"options"`
- WorkDoneProgressParams
-}
-
-/**
- * A document highlight is a range inside a text document which deserves
- * special attention. Usually a document highlight is visualized by changing
- * the background color of its range.
- */
-type DocumentHighlight struct {
- /**
- * The range this highlight applies to.
- */
- Range Range `json:"range"`
- /**
- * The highlight kind, default is [text](#DocumentHighlightKind.Text).
- */
- Kind DocumentHighlightKind `json:"kind,omitempty"`
-}
-
-/**
- * Client Capabilities for a [DocumentHighlightRequest](#DocumentHighlightRequest).
- */
-type DocumentHighlightClientCapabilities struct {
- /**
- * Whether document highlight supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * A document highlight kind.
- */
-type DocumentHighlightKind float64
-
-/**
- * Provider options for a [DocumentHighlightRequest](#DocumentHighlightRequest).
- */
-type DocumentHighlightOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * Parameters for a [DocumentHighlightRequest](#DocumentHighlightRequest).
- */
-type DocumentHighlightParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * A document link is a range in a text document that links to an internal or external resource, like another
- * text document or a web site.
- */
-type DocumentLink struct {
- /**
- * The range this link applies to.
- */
- Range Range `json:"range"`
- /**
- * The uri this link points to.
- */
- Target string `json:"target,omitempty"`
- /**
- * The tooltip text when you hover over this link.
- *
- * If a tooltip is provided, is will be displayed in a string that includes instructions on how to
- * trigger the link, such as `{0} (ctrl + click)`. The specific instructions vary depending on OS,
- * user settings, and localization.
- *
- * @since 3.15.0
- */
- Tooltip string `json:"tooltip,omitempty"`
- /**
- * A data entry field that is preserved on a document link between a
- * DocumentLinkRequest and a DocumentLinkResolveRequest.
- */
- Data LSPAny `json:"data,omitempty"`
-}
-
-/**
- * The client capabilities of a [DocumentLinkRequest](#DocumentLinkRequest).
- */
-type DocumentLinkClientCapabilities struct {
- /**
- * Whether document link supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * Whether the client support the `tooltip` property on `DocumentLink`.
- *
- * @since 3.15.0
- */
- TooltipSupport bool `json:"tooltipSupport,omitempty"`
-}
-
-/**
- * Provider options for a [DocumentLinkRequest](#DocumentLinkRequest).
- */
-type DocumentLinkOptions struct {
- /**
- * Document links have a resolve provider as well.
- */
- ResolveProvider bool `json:"resolveProvider,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * The parameters of a [DocumentLinkRequest](#DocumentLinkRequest).
- */
-type DocumentLinkParams struct {
- /**
- * The document to provide document links for.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * Client capabilities of a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest).
- */
-type DocumentOnTypeFormattingClientCapabilities struct {
- /**
- * Whether on type formatting supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * Provider options for a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest).
- */
-type DocumentOnTypeFormattingOptions struct {
- /**
- * A character on which formatting should be triggered, like `}`.
- */
- FirstTriggerCharacter string `json:"firstTriggerCharacter"`
- /**
- * More trigger characters.
- */
- MoreTriggerCharacter []string `json:"moreTriggerCharacter,omitempty"`
-}
-
-/**
- * The parameters of a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest).
- */
-type DocumentOnTypeFormattingParams struct {
- /**
- * The document to format.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The position at which this request was send.
- */
- Position Position `json:"position"`
- /**
- * The character that has been typed.
- */
- Ch string `json:"ch"`
- /**
- * The format options.
- */
- Options FormattingOptions `json:"options"`
-}
-
-/**
- * Client capabilities of a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest).
- */
-type DocumentRangeFormattingClientCapabilities struct {
- /**
- * Whether range formatting supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * Provider options for a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest).
- */
-type DocumentRangeFormattingOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * The parameters of a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest).
- */
-type DocumentRangeFormattingParams struct {
- /**
- * The document to format.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The range to format
- */
- Range Range `json:"range"`
- /**
- * The format options
- */
- Options FormattingOptions `json:"options"`
- WorkDoneProgressParams
-}
-
-/**
- * A document selector is the combination of one or many document filters.
- *
- * @sample `let sel:DocumentSelector = [{ language: 'typescript' }, { language: 'json', pattern: '**∕tsconfig.json' }]`;
- *
- * The use of a string as a document filter is deprecated @since 3.16.0.
- */
-type DocumentSelector = []string /*string | DocumentFilter*/
-
-/**
- * Represents programming constructs like variables, classes, interfaces etc.
- * that appear in a document. Document symbols can be hierarchical and they
- * have two ranges: one that encloses its definition and one that points to
- * its most interesting range, e.g. the range of an identifier.
- */
-type DocumentSymbol struct {
- /**
- * The name of this symbol. Will be displayed in the user interface and therefore must not be
- * an empty string or a string only consisting of white spaces.
- */
- Name string `json:"name"`
- /**
- * More detail for this symbol, e.g the signature of a function.
- */
- Detail string `json:"detail,omitempty"`
- /**
- * The kind of this symbol.
- */
- Kind SymbolKind `json:"kind"`
- /**
- * Tags for this document symbol.
- *
- * @since 3.16.0
- */
- Tags []SymbolTag `json:"tags,omitempty"`
- /**
- * Indicates if this symbol is deprecated.
- *
- * @deprecated Use tags instead
- */
- Deprecated bool `json:"deprecated,omitempty"`
- /**
- * The range enclosing this symbol not including leading/trailing whitespace but everything else
- * like comments. This information is typically used to determine if the the clients cursor is
- * inside the symbol to reveal in the symbol in the UI.
- */
- Range Range `json:"range"`
- /**
- * The range that should be selected and revealed when this symbol is being picked, e.g the name of a function.
- * Must be contained by the the `range`.
- */
- SelectionRange Range `json:"selectionRange"`
- /**
- * Children of this symbol, e.g. properties of a class.
- */
- Children []DocumentSymbol `json:"children,omitempty"`
-}
-
-/**
- * Client Capabilities for a [DocumentSymbolRequest](#DocumentSymbolRequest).
- */
-type DocumentSymbolClientCapabilities struct {
- /**
- * Whether document symbol supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * Specific capabilities for the `SymbolKind`.
- */
- SymbolKind struct {
- /**
- * The symbol kind values the client supports. When this
- * property exists the client also guarantees that it will
- * handle values outside its set gracefully and falls back
- * to a default value when unknown.
- *
- * If this property is not present the client only supports
- * the symbol kinds from `File` to `Array` as defined in
- * the initial version of the protocol.
- */
- ValueSet []SymbolKind `json:"valueSet,omitempty"`
- } `json:"symbolKind,omitempty"`
- /**
- * The client support hierarchical document symbols.
- */
- HierarchicalDocumentSymbolSupport bool `json:"hierarchicalDocumentSymbolSupport,omitempty"`
- /**
- * The client supports tags on `SymbolInformation`. Tags are supported on
- * `DocumentSymbol` if `hierarchicalDocumentSymbolSupport` is set to true.
- * Clients supporting tags have to handle unknown tags gracefully.
- *
- * @since 3.16.0
- */
- TagSupport struct {
- /**
- * The tags supported by the client.
- */
- ValueSet []SymbolTag `json:"valueSet"`
- } `json:"tagSupport,omitempty"`
- /**
- * The client supports an additional label presented in the UI when
- * registering a document symbol provider.
- *
- * @since 3.16.0
- */
- LabelSupport bool `json:"labelSupport,omitempty"`
-}
-
-/**
- * Provider options for a [DocumentSymbolRequest](#DocumentSymbolRequest).
- */
-type DocumentSymbolOptions struct {
- /**
- * A human-readable string that is shown when multiple outlines trees
- * are shown for the same document.
- *
- * @since 3.16.0
- */
- Label string `json:"label,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * Parameters for a [DocumentSymbolRequest](#DocumentSymbolRequest).
- */
-type DocumentSymbolParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * A tagging type for string properties that are actually document URIs.
- */
-type DocumentURI string
-
-/**
- * The client capabilities of a [ExecuteCommandRequest](#ExecuteCommandRequest).
- */
-type ExecuteCommandClientCapabilities struct {
- /**
- * Execute command supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * The server capabilities of a [ExecuteCommandRequest](#ExecuteCommandRequest).
- */
-type ExecuteCommandOptions struct {
- /**
- * The commands to be executed on the server
- */
- Commands []string `json:"commands"`
- WorkDoneProgressOptions
-}
-
-/**
- * The parameters of a [ExecuteCommandRequest](#ExecuteCommandRequest).
- */
-type ExecuteCommandParams struct {
- /**
- * The identifier of the actual command handler.
- */
- Command string `json:"command"`
- /**
- * Arguments that the command should be invoked with.
- */
- Arguments []json.RawMessage `json:"arguments,omitempty"`
- WorkDoneProgressParams
-}
-
-type ExecutionSummary = struct {
- /**
- * A strict monotonically increasing value
- * indicating the execution order of a cell
- * inside a notebook.
- */
- ExecutionOrder uint32 `json:"executionOrder"`
- /**
- * Whether the execution was successful or
- * not if known by the client.
- */
- Success bool `json:"success,omitempty"`
-}
-
-type FailureHandlingKind string
-
-/**
- * The file event type
- */
-type FileChangeType float64
-
-/**
- * Represents information on a file/folder create.
- *
- * @since 3.16.0
- */
-type FileCreate struct {
- /**
- * A file:// URI for the location of the file/folder being created.
- */
- URI string `json:"uri"`
-}
-
-/**
- * Represents information on a file/folder delete.
- *
- * @since 3.16.0
- */
-type FileDelete struct {
- /**
- * A file:// URI for the location of the file/folder being deleted.
- */
- URI string `json:"uri"`
-}
-
-/**
- * An event describing a file change.
- */
-type FileEvent struct {
- /**
- * The file's uri.
- */
- URI DocumentURI `json:"uri"`
- /**
- * The change type.
- */
- Type FileChangeType `json:"type"`
-}
-
-/**
- * Capabilities relating to events from file operations by the user in the client.
- *
- * These events do not come from the file system, they come from user operations
- * like renaming a file in the UI.
- *
- * @since 3.16.0
- */
-type FileOperationClientCapabilities struct {
- /**
- * Whether the client supports dynamic registration for file requests/notifications.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client has support for sending didCreateFiles notifications.
- */
- DidCreate bool `json:"didCreate,omitempty"`
- /**
- * The client has support for willCreateFiles requests.
- */
- WillCreate bool `json:"willCreate,omitempty"`
- /**
- * The client has support for sending didRenameFiles notifications.
- */
- DidRename bool `json:"didRename,omitempty"`
- /**
- * The client has support for willRenameFiles requests.
- */
- WillRename bool `json:"willRename,omitempty"`
- /**
- * The client has support for sending didDeleteFiles notifications.
- */
- DidDelete bool `json:"didDelete,omitempty"`
- /**
- * The client has support for willDeleteFiles requests.
- */
- WillDelete bool `json:"willDelete,omitempty"`
-}
-
-/**
- * A filter to describe in which file operation requests or notifications
- * the server is interested in.
- *
- * @since 3.16.0
- */
-type FileOperationFilter struct {
- /**
- * A Uri like `file` or `untitled`.
- */
- Scheme string `json:"scheme,omitempty"`
- /**
- * The actual file operation pattern.
- */
- Pattern FileOperationPattern `json:"pattern"`
-}
-
-/**
- * Options for notifications/requests for user operations on files.
- *
- * @since 3.16.0
- */
-type FileOperationOptions struct {
- /**
- * The server is interested in didCreateFiles notifications.
- */
- DidCreate FileOperationRegistrationOptions `json:"didCreate,omitempty"`
- /**
- * The server is interested in willCreateFiles requests.
- */
- WillCreate FileOperationRegistrationOptions `json:"willCreate,omitempty"`
- /**
- * The server is interested in didRenameFiles notifications.
- */
- DidRename FileOperationRegistrationOptions `json:"didRename,omitempty"`
- /**
- * The server is interested in willRenameFiles requests.
- */
- WillRename FileOperationRegistrationOptions `json:"willRename,omitempty"`
- /**
- * The server is interested in didDeleteFiles file notifications.
- */
- DidDelete FileOperationRegistrationOptions `json:"didDelete,omitempty"`
- /**
- * The server is interested in willDeleteFiles file requests.
- */
- WillDelete FileOperationRegistrationOptions `json:"willDelete,omitempty"`
-}
-
-/**
- * A pattern to describe in which file operation requests or notifications
- * the server is interested in.
- *
- * @since 3.16.0
- */
-type FileOperationPattern struct {
- /**
- * The glob pattern to match. Glob patterns can have the following syntax:
- * - `*` to match one or more characters in a path segment
- * - `?` to match on one character in a path segment
- * - `**` to match any number of path segments, including none
- * - `{}` to group sub patterns into an OR expression. (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files)
- * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
- * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`)
- */
- Glob string `json:"glob"`
- /**
- * Whether to match files or folders with this pattern.
- *
- * Matches both if undefined.
- */
- Matches FileOperationPatternKind `json:"matches,omitempty"`
- /**
- * Additional options used during matching.
- */
- Options FileOperationPatternOptions `json:"options,omitempty"`
-}
-
-/**
- * A pattern kind describing if a glob pattern matches a file a folder or
- * both.
- *
- * @since 3.16.0
- */
-type FileOperationPatternKind string
-
-/**
- * Matching options for the file operation pattern.
- *
- * @since 3.16.0
- */
-type FileOperationPatternOptions struct {
- /**
- * The pattern should be matched ignoring casing.
- */
- IgnoreCase bool `json:"ignoreCase,omitempty"`
-}
-
-/**
- * The options to register for file operations.
- *
- * @since 3.16.0
- */
-type FileOperationRegistrationOptions struct {
- /**
- * The actual filters.
- */
- Filters []FileOperationFilter `json:"filters"`
-}
-
-/**
- * Represents information on a file/folder rename.
- *
- * @since 3.16.0
- */
-type FileRename struct {
- /**
- * A file:// URI for the original location of the file/folder being renamed.
- */
- OldURI string `json:"oldUri"`
- /**
- * A file:// URI for the new location of the file/folder being renamed.
- */
- NewURI string `json:"newUri"`
-}
-
-type FileSystemWatcher struct {
- /**
- * The glob pattern to watch. Glob patterns can have the following syntax:
- * - `*` to match one or more characters in a path segment
- * - `?` to match on one character in a path segment
- * - `**` to match any number of path segments, including none
- * - `{}` to group conditions (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files)
- * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
- * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`)
- */
- GlobPattern string `json:"globPattern"`
- /**
- * The kind of events of interest. If omitted it defaults
- * to WatchKind.Create | WatchKind.Change | WatchKind.Delete
- * which is 7.
- */
- Kind uint32 `json:"kind,omitempty"`
-}
-
-/**
- * Represents a folding range. To be valid, start and end line must be bigger than zero and smaller
- * than the number of lines in the document. Clients are free to ignore invalid ranges.
- */
-type FoldingRange struct {
- /**
- * The zero-based start line of the range to fold. The folded area starts after the line's last character.
- * To be valid, the end must be zero or larger and smaller than the number of lines in the document.
- */
- StartLine uint32 `json:"startLine"`
- /**
- * The zero-based character offset from where the folded range starts. If not defined, defaults to the length of the start line.
- */
- StartCharacter uint32 `json:"startCharacter,omitempty"`
- /**
- * The zero-based end line of the range to fold. The folded area ends with the line's last character.
- * To be valid, the end must be zero or larger and smaller than the number of lines in the document.
- */
- EndLine uint32 `json:"endLine"`
- /**
- * The zero-based character offset before the folded range ends. If not defined, defaults to the length of the end line.
- */
- EndCharacter uint32 `json:"endCharacter,omitempty"`
- /**
- * Describes the kind of the folding range such as `comment' or 'region'. The kind
- * is used to categorize folding ranges and used by commands like 'Fold all comments'. See
- * [FoldingRangeKind](#FoldingRangeKind) for an enumeration of standardized kinds.
- */
- Kind string `json:"kind,omitempty"`
-}
-
-type FoldingRangeClientCapabilities struct {
- /**
- * Whether implementation supports dynamic registration for folding range providers. If this is set to `true`
- * the client supports the new `FoldingRangeRegistrationOptions` return value for the corresponding server
- * capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The maximum number of folding ranges that the client prefers to receive per document. The value serves as a
- * hint, servers are free to follow the limit.
- */
- RangeLimit uint32 `json:"rangeLimit,omitempty"`
- /**
- * If set, the client signals that it only supports folding complete lines. If set, client will
- * ignore specified `startCharacter` and `endCharacter` properties in a FoldingRange.
- */
- LineFoldingOnly bool `json:"lineFoldingOnly,omitempty"`
-}
-
-/**
- * Enum of known range kinds
- */
-type FoldingRangeKind string
-
-type FoldingRangeOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * Parameters for a [FoldingRangeRequest](#FoldingRangeRequest).
- */
-type FoldingRangeParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-type FoldingRangeRegistrationOptions struct {
- TextDocumentRegistrationOptions
- FoldingRangeOptions
- StaticRegistrationOptions
-}
-
-/**
- * Value-object describing what options formatting should use.
- */
-type FormattingOptions struct {
- /**
- * Size of a tab in spaces.
- */
- TabSize uint32 `json:"tabSize"`
- /**
- * Prefer spaces over tabs.
- */
- InsertSpaces bool `json:"insertSpaces"`
- /**
- * Trim trailing whitespaces on a line.
- *
- * @since 3.15.0
- */
- TrimTrailingWhitespace bool `json:"trimTrailingWhitespace,omitempty"`
- /**
- * Insert a newline character at the end of the file if one does not exist.
- *
- * @since 3.15.0
- */
- InsertFinalNewline bool `json:"insertFinalNewline,omitempty"`
- /**
- * Trim all newlines after the final newline at the end of the file.
- *
- * @since 3.15.0
- */
- TrimFinalNewlines bool `json:"trimFinalNewlines,omitempty"`
-}
-
-/**
- * A diagnostic report with a full set of problems.
- *
- * @since 3.17.0 - proposed state
- */
-type FullDocumentDiagnosticReport = struct {
- /**
- * A full document diagnostic report.
- */
- Kind string `json:"kind"`
- /**
- * An optional result id. If provided it will
- * be sent on the next diagnostic request for the
- * same document.
- */
- ResultID string `json:"resultId,omitempty"`
- /**
- * The actual items.
- */
- Items []Diagnostic `json:"items"`
-}
-
-/**
- * General client capabilities.
- *
- * @since 3.16.0
- */
-type GeneralClientCapabilities struct {
- /**
- * Client capability that signals how the client
- * handles stale requests (e.g. a request
- * for which the client will not process the response
- * anymore since the information is outdated).
- *
- * @since 3.17.0
- */
- StaleRequestSupport struct {
- /**
- * The client will actively cancel the request.
- */
- Cancel bool `json:"cancel"`
- /**
- * The list of requests for which the client
- * will retry the request if it receives a
- * response with error code `ContentModified`
- */
- RetryOnContentModified []string `json:"retryOnContentModified"`
- } `json:"staleRequestSupport,omitempty"`
- /**
- * Client capabilities specific to regular expressions.
- *
- * @since 3.16.0
- */
- RegularExpressions RegularExpressionsClientCapabilities `json:"regularExpressions,omitempty"`
- /**
- * Client capabilities specific to the client's markdown parser.
- *
- * @since 3.16.0
- */
- Markdown MarkdownClientCapabilities `json:"markdown,omitempty"`
-}
-
-/**
- * The result of a hover request.
- */
-type Hover struct {
- /**
- * The hover's content
- */
- Contents MarkupContent/*MarkupContent | MarkedString | MarkedString[]*/ `json:"contents"`
- /**
- * An optional range
- */
- Range Range `json:"range,omitempty"`
-}
-
-type HoverClientCapabilities struct {
- /**
- * Whether hover supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * Client supports the follow content formats for the content
- * property. The order describes the preferred format of the client.
- */
- ContentFormat []MarkupKind `json:"contentFormat,omitempty"`
-}
-
-/**
- * Hover options.
- */
-type HoverOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * Parameters for a [HoverRequest](#HoverRequest).
- */
-type HoverParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
-}
-
-/**
- * @since 3.6.0
- */
-type ImplementationClientCapabilities struct {
- /**
- * Whether implementation supports dynamic registration. If this is set to `true`
- * the client supports the new `ImplementationRegistrationOptions` return value
- * for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client supports additional metadata in the form of definition links.
- *
- * @since 3.14.0
- */
- LinkSupport bool `json:"linkSupport,omitempty"`
-}
-
-type ImplementationOptions struct {
- WorkDoneProgressOptions
-}
-
-type ImplementationParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
- PartialResultParams
-}
-
-type ImplementationRegistrationOptions struct {
- TextDocumentRegistrationOptions
- ImplementationOptions
- StaticRegistrationOptions
-}
-
-/**
- * Known error codes for an `InitializeError`;
- */
-type InitializeError float64
-
-type InitializeParams struct {
- /**
- * The process Id of the parent process that started
- * the server.
- */
- ProcessID int32/*integer | null*/ `json:"processId"`
- /**
- * Information about the client
- *
- * @since 3.15.0
- */
- ClientInfo struct {
- /**
- * The name of the client as defined by the client.
- */
- Name string `json:"name"`
- /**
- * The client's version as defined by the client.
- */
- Version string `json:"version,omitempty"`
- } `json:"clientInfo,omitempty"`
- /**
- * The locale the client is currently showing the user interface
- * in. This must not necessarily be the locale of the operating
- * system.
- *
- * Uses IETF language tags as the value's syntax
- * (See https://en.wikipedia.org/wiki/IETF_language_tag)
- *
- * @since 3.16.0
- */
- Locale string `json:"locale,omitempty"`
- /**
- * The rootPath of the workspace. Is null
- * if no folder is open.
- *
- * @deprecated in favour of rootUri.
- */
- RootPath string/*string | null*/ `json:"rootPath,omitempty"`
- /**
- * The rootUri of the workspace. Is null if no
- * folder is open. If both `rootPath` and `rootUri` are set
- * `rootUri` wins.
- *
- * @deprecated in favour of workspaceFolders.
- */
- RootURI DocumentURI/*DocumentUri | null*/ `json:"rootUri"`
- /**
- * The capabilities provided by the client (editor or tool)
- */
- Capabilities ClientCapabilities `json:"capabilities"`
- /**
- * User provided initialization options.
- */
- InitializationOptions LSPAny `json:"initializationOptions,omitempty"`
- /**
- * The initial trace setting. If omitted trace is disabled ('off').
- */
- Trace string/* 'off' | 'messages' | 'compact' | 'verbose' */ `json:"trace,omitempty"`
- /**
- * The actual configured workspace folders.
- */
- WorkspaceFolders []WorkspaceFolder/*WorkspaceFolder[] | null*/ `json:"workspaceFolders"`
-}
-
-/**
- * The result returned from an initialize request.
- */
-type InitializeResult struct {
- /**
- * The capabilities the language server provides.
- */
- Capabilities ServerCapabilities `json:"capabilities"`
- /**
- * Information about the server.
- *
- * @since 3.15.0
- */
- ServerInfo struct {
- /**
- * The name of the server as defined by the server.
- */
- Name string `json:"name"`
- /**
- * The server's version as defined by the server.
- */
- Version string `json:"version,omitempty"`
- } `json:"serverInfo,omitempty"`
-}
-
-type InitializedParams struct {
-}
-
-/**
- * Inlay hint information.
- *
- * @since 3.17.0 - proposed state
- */
-type InlayHint = struct {
- /**
- * The position of this hint.
- */
- Position *Position `json:"position"`
- /**
- * The label of this hint. A human readable string or an array of
- * InlayHintLabelPart label parts.
- *
- * *Note* that neither the string nor the label part can be empty.
- */
- Label []InlayHintLabelPart/*string | InlayHintLabelPart[]*/ `json:"label"`
- /**
- * The kind of this hint. Can be omitted in which case the client
- * should fall back to a reasonable default.
- */
- Kind InlayHintKind `json:"kind,omitempty"`
- /**
- * The tooltip text when you hover over this item.
- */
- Tooltip string/*string | MarkupContent*/ `json:"tooltip,omitempty"`
- /**
- * Render padding before the hint.
- *
- * Note: Padding should use the editor's background color, not the
- * background color of the hint itself. That means padding can be used
- * to visually align/separate an inlay hint.
- */
- PaddingLeft bool `json:"paddingLeft,omitempty"`
- /**
- * Render padding after the hint.
- *
- * Note: Padding should use the editor's background color, not the
- * background color of the hint itself. That means padding can be used
- * to visually align/separate an inlay hint.
- */
- PaddingRight bool `json:"paddingRight,omitempty"`
-}
-
-/**
- * Inlay hint client capabilities
- *
- * @since 3.17.0 - proposed state
- */
-type InlayHintClientCapabilities = struct {
- /**
- * Whether inlay hints support dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * Indicates which properties a client can resolve lazily on a inlay
- * hint.
- */
- ResolveSupport struct {
- /**
- * The properties that a client can resolve lazily.
- */
- Properties []string `json:"properties"`
- } `json:"resolveSupport,omitempty"`
-}
-
-/**
- * Inlay hint kinds.
- *
- * @since 3.17.0 - proposed state
- */
-type InlayHintKind float64
-
-/**
- * An inlay hint label part allows for interactive and composite labels
- * of inlay hints.
- *
- * @since 3.17.0 - proposed state
- */
-type InlayHintLabelPart = struct {
- /**
- * The value of this label part.
- */
- Value string `json:"value"`
- /**
- * The tooltip text when you hover over this label part. Depending on
- * the client capability `inlayHint.resolveSupport` clients might resolve
- * this property late using the resolve request.
- */
- Tooltip string/*string | MarkupContent*/ `json:"tooltip,omitempty"`
- /**
- * An optional source code location that represents this
- * label part.
- *
- * The editor will use this location for the hover and for code navigation
- * features: This part will become a clickable link that resolves to the
- * definition of the symbol at the given location (not necessarily the
- * location itself), it shows the hover that shows at the given location,
- * and it shows a context menu with further code navigation commands.
- *
- * Depending on the client capability `inlayHint.resolveSupport` clients
- * might resolve this property late using the resolve request.
- */
- Location *Location `json:"location,omitempty"`
- /**
- * An optional command for this label part.
- *
- * Depending on the client capability `inlayHint.resolveSupport` clients
- * might resolve this property late using the resolve request.
- */
- Command *Command `json:"command,omitempty"`
-}
-
-/**
- * Inlay hint options used during static registration.
- *
- * @since 3.17.0 - proposed state
- */
-type InlayHintOptions struct {
- WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
- /**
- * The server provides support to resolve additional
- * information for an inlay hint item.
- */
- ResolveProvider bool `json:"resolveProvider,omitempty"`
-}
-
-/**
- * A parameter literal used in inlay hints requests.
- *
- * @since 3.17.0 - proposed state
- */
-type InlayHintParams struct {
- /**
- * An optional token that a server can use to report work done progress.
- */
- WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The visible document range for which inlay hints should be computed.
- */
- ViewPort Range `json:"viewPort"`
-}
-
-/**
- * Inlay hint options used during static or dynamic registration.
- *
- * @since 3.17.0 - proposed state
- */
-type InlayHintRegistrationOptions struct {
- WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
- /**
- * The server provides support to resolve additional
- * information for an inlay hint item.
- */
- ResolveProvider bool `json:"resolveProvider,omitempty"`
- /**
- * A document selector to identify the scope of the registration. If set to null
- * the document selector provided on the client side will be used.
- */
- DocumentSelector DocumentSelector/*DocumentSelector | null*/ `json:"documentSelector"`
- /**
- * The id used to register the request. The id can be used to deregister
- * the request again. See also Registration#id.
- */
- ID string `json:"id,omitempty"`
-}
-
-/**
- * Client workspace capabilities specific to inlay hints.
- *
- * @since 3.17.0 - proposed state
- */
-type InlayHintWorkspaceClientCapabilities = struct {
- /**
- * Whether the client implementation supports a refresh request sent from
- * the server to the client.
- *
- * Note that this event is global and will force the client to refresh all
- * inlay hints currently shown. It should be used with absolute care and
- * is useful for situation where a server for example detects a project wide
- * change that requires such a calculation.
- */
- RefreshSupport bool `json:"refreshSupport,omitempty"`
-}
-
-/**
- * Inline value information can be provided by different means:
- * - directly as a text value (class InlineValueText).
- * - as a name to use for a variable lookup (class InlineValueVariableLookup)
- * - as an evaluatable expression (class InlineValueEvaluatableExpression)
- * The InlineValue types combines all inline value types into one type.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValue = interface{} /* InlineValueText | InlineValueVariableLookup | InlineValueEvaluatableExpression*/
-
-/**
- * Client capabilities specific to inline values.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValueClientCapabilities = struct {
- /**
- * Whether implementation supports dynamic registration for inline value providers.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * @since 3.17.0 - proposed state
- */
-type InlineValueContext = struct {
- /**
- * The document range where execution has stopped.
- * Typically the end position of the range denotes the line where the inline values are shown.
- */
- StoppedLocation *Range `json:"stoppedLocation"`
-}
-
-/**
- * Provide an inline value through an expression evaluation.
- * If only a range is specified, the expression will be extracted from the underlying document.
- * An optional expression can be used to override the extracted expression.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValueEvaluatableExpression = struct {
- /**
- * The document range for which the inline value applies.
- * The range is used to extract the evaluatable expression from the underlying document.
- */
- Range *Range `json:"range"`
- /**
- * If specified the expression overrides the extracted expression.
- */
- Expression string `json:"expression,omitempty"`
-}
-
-/**
- * Inline value options used during static registration.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValueOptions = WorkDoneProgressOptions
-
-/**
- * A parameter literal used in inline value requests.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValueParams struct {
- /**
- * An optional token that a server can use to report work done progress.
- */
- WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The visible document range for which inline values should be computed.
- */
- ViewPort Range `json:"viewPort"`
- /**
- * Additional information about the context in which inline values were
- * requested.
- */
- Context InlineValueContext `json:"context"`
-}
-
-/**
- * Inline value options used during static or dynamic registration.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValueRegistrationOptions struct {
- /**
- * A document selector to identify the scope of the registration. If set to null
- * the document selector provided on the client side will be used.
- */
- DocumentSelector DocumentSelector/*DocumentSelector | null*/ `json:"documentSelector"`
- /**
- * The id used to register the request. The id can be used to deregister
- * the request again. See also Registration#id.
- */
- ID string `json:"id,omitempty"`
-}
-
-/**
- * Provide inline value as text.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValueText = struct {
- /**
- * The document range for which the inline value applies.
- */
- Range *Range `json:"range"`
- /**
- * The text of the inline value.
- */
- Text string `json:"text"`
-}
-
-/**
- * Provide inline value through a variable lookup.
- * If only a range is specified, the variable name will be extracted from the underlying document.
- * An optional variable name can be used to override the extracted name.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValueVariableLookup = struct {
- /**
- * The document range for which the inline value applies.
- * The range is used to extract the variable name from the underlying document.
- */
- Range *Range `json:"range"`
- /**
- * If specified the name of the variable to look up.
- */
- VariableName string `json:"variableName,omitempty"`
- /**
- * How to perform the lookup.
- */
- CaseSensitiveLookup bool `json:"caseSensitiveLookup"`
-}
-
-/**
- * Client workspace capabilities specific to inline values.
- *
- * @since 3.17.0 - proposed state
- */
-type InlineValueWorkspaceClientCapabilities = struct {
- /**
- * Whether the client implementation supports a refresh request sent from the
- * server to the client.
- *
- * Note that this event is global and will force the client to refresh all
- * inline values currently shown. It should be used with absolute care and is
- * useful for situation where a server for example detects a project wide
- * change that requires such a calculation.
- */
- RefreshSupport bool `json:"refreshSupport,omitempty"`
-}
-
-/**
- * A special text edit to provide an insert and a replace operation.
- *
- * @since 3.16.0
- */
-type InsertReplaceEdit struct {
- /**
- * The string to be inserted.
- */
- NewText string `json:"newText"`
- /**
- * The range if the insert is requested
- */
- Insert Range `json:"insert"`
- /**
- * The range if the replace is requested.
- */
- Replace Range `json:"replace"`
-}
-
-/**
- * Defines whether the insert text in a completion item should be interpreted as
- * plain text or a snippet.
- */
-type InsertTextFormat float64
-
-/**
- * How whitespace and indentation is handled during completion
- * item insertion.
- *
- * @since 3.16.0
- */
-type InsertTextMode float64
-
-/**
- * The LSP any type
- *
- * @since 3.17.0
- */
-type LSPAny = interface{} /* LSPObject | LSPArray | string | int32 | uint32 | Decimal | bool | float64*/
-
-/**
- * LSP arrays.
- *
- * @since 3.17.0
- */
-type LSPArray = []LSPAny
-
-/**
- * LSP object definition.
- *
- * @since 3.17.0
- */
-type LSPObject = map[string]interface{} /*[key: string]: LSPAny*/
-
-/**
- * Client capabilities for the linked editing range request.
- *
- * @since 3.16.0
- */
-type LinkedEditingRangeClientCapabilities struct {
- /**
- * Whether implementation supports dynamic registration. If this is set to `true`
- * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
- * return value for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-type LinkedEditingRangeOptions struct {
- WorkDoneProgressOptions
-}
-
-type LinkedEditingRangeParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
-}
-
-type LinkedEditingRangeRegistrationOptions struct {
- TextDocumentRegistrationOptions
- LinkedEditingRangeOptions
- StaticRegistrationOptions
-}
-
-/**
- * The result of a linked editing range request.
- *
- * @since 3.16.0
- */
-type LinkedEditingRanges struct {
- /**
- * A list of ranges that can be edited together. The ranges must have
- * identical length and contain identical text content. The ranges cannot overlap.
- */
- Ranges []Range `json:"ranges"`
- /**
- * An optional word pattern (regular expression) that describes valid contents for
- * the given ranges. If no pattern is provided, the client configuration's word
- * pattern will be used.
- */
- WordPattern string `json:"wordPattern,omitempty"`
-}
-
-/**
- * Represents a location inside a resource, such as a line
- * inside a text file.
- */
-type Location struct {
- URI DocumentURI `json:"uri"`
- Range Range `json:"range"`
-}
-
-/**
- * Represents the connection of two locations. Provides additional metadata over normal [locations](#Location),
- * including an origin range.
- */
-type LocationLink struct {
- /**
- * Span of the origin of this link.
- *
- * Used as the underlined span for mouse definition hover. Defaults to the word range at
- * the definition position.
- */
- OriginSelectionRange Range `json:"originSelectionRange,omitempty"`
- /**
- * The target resource identifier of this link.
- */
- TargetURI DocumentURI `json:"targetUri"`
- /**
- * The full target range of this link. If the target for example is a symbol then target range is the
- * range enclosing this symbol not including leading/trailing whitespace but everything else
- * like comments. This information is typically used to highlight the range in the editor.
- */
- TargetRange Range `json:"targetRange"`
- /**
- * The range that should be selected and revealed when this link is being followed, e.g the name of a function.
- * Must be contained by the the `targetRange`. See also `DocumentSymbol#range`
- */
- TargetSelectionRange Range `json:"targetSelectionRange"`
-}
-
-/**
- * The log message parameters.
- */
-type LogMessageParams struct {
- /**
- * The message type. See {@link MessageType}
- */
- Type MessageType `json:"type"`
- /**
- * The actual message
- */
- Message string `json:"message"`
-}
-
-type LogTraceParams struct {
- Message string `json:"message"`
- Verbose string `json:"verbose,omitempty"`
-}
-
-/**
- * Client capabilities specific to the used markdown parser.
- *
- * @since 3.16.0
- */
-type MarkdownClientCapabilities struct {
- /**
- * The name of the parser.
- */
- Parser string `json:"parser"`
- /**
- * The version of the parser.
- */
- Version string `json:"version,omitempty"`
- /**
- * A list of HTML tags that the client allows / supports in
- * Markdown.
- *
- * @since 3.17.0
- */
- AllowedTags []string `json:"allowedTags,omitempty"`
-}
-
-/**
- * MarkedString can be used to render human readable text. It is either a markdown string
- * or a code-block that provides a language and a code snippet. The language identifier
- * is semantically equal to the optional language identifier in fenced code blocks in GitHub
- * issues. See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting
- *
- * The pair of a language and a value is an equivalent to markdown:
- * ```${language}
- * ${value}
- * ```
- *
- * Note that markdown strings will be sanitized - that means html will be escaped.
- * @deprecated use MarkupContent instead.
- */
-type MarkedString = string /*string | { language: string; value: string }*/
-
-/**
- * A `MarkupContent` literal represents a string value which content is interpreted base on its
- * kind flag. Currently the protocol supports `plaintext` and `markdown` as markup kinds.
- *
- * If the kind is `markdown` then the value can contain fenced code blocks like in GitHub issues.
- * See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting
- *
- * Here is an example how such a string can be constructed using JavaScript / TypeScript:
- * ```ts
- * let markdown: MarkdownContent = {
- * kind: MarkupKind.Markdown,
- * value: [
- * '# Header',
- * 'Some text',
- * '```typescript',
- * 'someCode();',
- * '```'
- * ].join('\n')
- * };
- * ```
- *
- * *Please Note* that clients might sanitize the return markdown. A client could decide to
- * remove HTML from the markdown to avoid script execution.
- */
-type MarkupContent struct {
- /**
- * The type of the Markup
- */
- Kind MarkupKind `json:"kind"`
- /**
- * The content itself
- */
- Value string `json:"value"`
-}
-
-/**
- * Describes the content type that a client supports in various
- * result literals like `Hover`, `ParameterInfo` or `CompletionItem`.
- *
- * Please note that `MarkupKinds` must not start with a `$`. This kinds
- * are reserved for internal usage.
- */
-type MarkupKind string
-
-type MessageActionItem struct {
- /**
- * A short title like 'Retry', 'Open Log' etc.
- */
- Title string `json:"title"`
-}
-
-/**
- * The message type
- */
-type MessageType float64
-
-/**
- * Moniker definition to match LSIF 0.5 moniker definition.
- *
- * @since 3.16.0
- */
-type Moniker struct {
- /**
- * The scheme of the moniker. For example tsc or .Net
- */
- Scheme string `json:"scheme"`
- /**
- * The identifier of the moniker. The value is opaque in LSIF however
- * schema owners are allowed to define the structure if they want.
- */
- Identifier string `json:"identifier"`
- /**
- * The scope in which the moniker is unique
- */
- Unique UniquenessLevel `json:"unique"`
- /**
- * The moniker kind if known.
- */
- Kind MonikerKind `json:"kind,omitempty"`
-}
-
-/**
- * Client capabilities specific to the moniker request.
- *
- * @since 3.16.0
- */
-type MonikerClientCapabilities struct {
- /**
- * Whether moniker supports dynamic registration. If this is set to `true`
- * the client supports the new `MonikerRegistrationOptions` return value
- * for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * The moniker kind.
- *
- * @since 3.16.0
- */
-type MonikerKind string
-
-type MonikerOptions struct {
- WorkDoneProgressOptions
-}
-
-type MonikerParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
- PartialResultParams
-}
-
-type MonikerRegistrationOptions struct {
- TextDocumentRegistrationOptions
- MonikerOptions
-}
-
-/**
- * A notebook cell.
- *
- * A cell's document URI must be unique across ALL notebook
- * cells and can therefore be used to uniquely identify a
- * notebook cell or the cell's text document.
- *
- * @since 3.17.0 - proposed state
- */
-type NotebookCell = struct {
- /**
- * The cell's kind
- */
- Kind NotebookCellKind `json:"kind"`
- /**
- * The URI of the cell's text document
- * content.
- */
- Document DocumentURI `json:"document"`
- /**
- * Additional metadata stored with the cell.
- */
- Metadata LSPObject `json:"metadata,omitempty"`
- /**
- * Additional execution summary information
- * if supported by the client.
- */
- ExecutionSummary ExecutionSummary `json:"executionSummary,omitempty"`
-}
-
-/**
- * A change describing how to move a `NotebookCell`
- * array from state S to S'.
- *
- * @since 3.17.0 - proposed state
- */
-type NotebookCellArrayChange = struct {
- /**
- * The start oftest of the cell that changed.
- */
- Start uint32 `json:"start"`
- /**
- * The deleted cells
- */
- DeleteCount uint32 `json:"deleteCount"`
- /**
- * The new cells, if any
- */
- Cells []NotebookCell `json:"cells,omitempty"`
-}
-
-/**
- * A notebook cell kind.
- *
- * @since 3.17.0 - proposed state
- */
-type NotebookCellKind float64
-
-/**
- * A notebook cell text document filter denotes a cell text
- * document by different properties.
- *
- * @since 3.17.0 - proposed state.
- */
-type NotebookCellTextDocumentFilter = struct {
- /**
- * A filter that matches against the notebook
- * containing the notebook cell.
- */
- NotebookDocument NotebookDocumentFilter `json:"notebookDocument"`
- /**
- * A language id like `python`.
- *
- * Will be matched against the language id of the
- * notebook cell document.
- */
- CellLanguage string `json:"cellLanguage,omitempty"`
-}
-
-/**
- * A notebook document.
- *
- * @since 3.17.0 - proposed state
- */
-type NotebookDocument = struct {
- /**
- * The notebook document's uri.
- */
- URI URI `json:"uri"`
- /**
- * The type of the notebook.
- */
- NotebookType string `json:"notebookType"`
- /**
- * The version number of this document (it will increase after each
- * change, including undo/redo).
- */
- Version int32 `json:"version"`
- /**
- * Additional metadata stored with the notebook
- * document.
- */
- Metadata LSPObject `json:"metadata,omitempty"`
- /**
- * The cells of a notebook.
- */
- Cells []NotebookCell `json:"cells"`
-}
-
-/**
- * A change event for a notebook document.
- *
- * @since 3.17.0 - proposed state
- */
-type NotebookDocumentChangeEvent = struct {
- /**
- * The changed meta data if any.
- */
- Metadata LSPObject `json:"metadata,omitempty"`
- /**
- * Changes to cells
- */
- Cells struct {
- /**
- * Changes to the cell structure to add or
- * remove cells.
- */
- Structure struct {
- /**
- * The change to the cell array.
- */
- Array NotebookCellArrayChange `json:"array"`
- /**
- * Additional opened cell text documents.
- */
- DidOpen []TextDocumentItem `json:"didOpen,omitempty"`
- /**
- * Additional closed cell text documents.
- */
- DidClose []TextDocumentIdentifier `json:"didClose,omitempty"`
- } `json:"structure,omitempty"`
- /**
- * Changes to notebook cells properties like its
- * kind, execution summary or metadata.
- */
- Data []NotebookCell `json:"data,omitempty"`
- /**
- * Changes to the text content of notebook cells.
- */
- TextContent []struct {
- Document VersionedTextDocumentIdentifier `json:"document"`
- Changes []TextDocumentContentChangeEvent `json:"changes"`
- } `json:"textContent,omitempty"`
- } `json:"cells,omitempty"`
-}
-
-/**
- * A notebook document filter denotes a notebook document by
- * different properties.
- *
- * @since 3.17.0 - proposed state.
- */
-type NotebookDocumentFilter = struct {
- /** The type of the enclosing notebook. */
- NotebookType string `json:"notebookType"`
- /** A Uri [scheme](#Uri.scheme), like `file` or `untitled`.
- * Will be matched against the URI of the notebook. */
- Scheme string `json:"scheme,omitempty"`
- /** A glob pattern, like `*.ipynb`.
- * Will be matched against the notebooks` URI path section.*/
- Pattern string `json:"pattern,omitempty"`
-}
-
-/**
- * A literal to identify a notebook document in the client.
- *
- * @since 3.17.0 - proposed state
- */
-type NotebookDocumentIdentifier = struct {
- /**
- * The notebook document's uri.
- */
- URI URI `json:"uri"`
-}
-
-/**
- * A text document identifier to optionally denote a specific version of a text document.
- */
-type OptionalVersionedTextDocumentIdentifier struct {
- /**
- * The version number of this document. If a versioned text document identifier
- * is sent from the server to the client and the file is not open in the editor
- * (the server has not received an open notification before) the server can send
- * `null` to indicate that the version is unknown and the content on disk is the
- * truth (as specified with document content ownership).
- */
- Version int32/*integer | null*/ `json:"version"`
- TextDocumentIdentifier
-}
-
-/**
- * Represents a parameter of a callable-signature. A parameter can
- * have a label and a doc-comment.
- */
-type ParameterInformation struct {
- /**
- * The label of this parameter information.
- *
- * Either a string or an inclusive start and exclusive end offsets within its containing
- * signature label. (see SignatureInformation.label). The offsets are based on a UTF-16
- * string representation as `Position` and `Range` does.
- *
- * *Note*: a label of type string should be a substring of its containing signature label.
- * Its intended use case is to highlight the parameter label part in the `SignatureInformation.label`.
- */
- Label string/*string | [uinteger, uinteger]*/ `json:"label"`
- /**
- * The human-readable doc-comment of this signature. Will be shown
- * in the UI but can be omitted.
- */
- Documentation string/*string | MarkupContent*/ `json:"documentation,omitempty"`
-}
-
-type PartialResultParams struct {
- /**
- * An optional token that a server can use to report partial results (e.g. streaming) to
- * the client.
- */
- PartialResultToken ProgressToken `json:"partialResultToken,omitempty"`
-}
-
-/**
- * Position in a text document expressed as zero-based line and character offset.
- * The offsets are based on a UTF-16 string representation. So a string of the form
- * `a𐐀b` the character offset of the character `a` is 0, the character offset of `𐐀`
- * is 1 and the character offset of b is 3 since `𐐀` is represented using two code
- * units in UTF-16.
- *
- * Positions are line end character agnostic. So you can not specify a position that
- * denotes `\r|\n` or `\n|` where `|` represents the character offset.
- */
-type Position struct {
- /**
- * Line position in a document (zero-based).
- */
- Line uint32 `json:"line"`
- /**
- * Character offset on a line in a document (zero-based). Assuming that the line is
- * represented as a string, the `character` value represents the gap between the
- * `character` and `character + 1`.
- *
- * If the character value is greater than the line length it defaults back to the
- * line length.
- */
- Character uint32 `json:"character"`
-}
-
-type PrepareRenameParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
-}
-
-type PrepareSupportDefaultBehavior = interface{}
-
-/**
- * A previous result id in a workspace pull request.
- *
- * @since 3.17.0 - proposed state
- */
-type PreviousResultID = struct {
- /**
- * The URI for which the client knowns a
- * result id.
- */
- URI DocumentURI `json:"uri"`
- /**
- * The value of the previous result id.
- */
- Value string `json:"value"`
-}
-
-type ProgressParams struct {
- /**
- * The progress token provided by the client or server.
- */
- Token ProgressToken `json:"token"`
- /**
- * The progress data.
- */
- Value interface{} `json:"value"`
-}
-
-type ProgressToken = interface{} /*number | string*/
-
-/**
- * The publish diagnostic client capabilities.
- */
-type PublishDiagnosticsClientCapabilities struct {
- /**
- * Whether the clients accepts diagnostics with related information.
- */
- RelatedInformation bool `json:"relatedInformation,omitempty"`
- /**
- * Client supports the tag property to provide meta data about a diagnostic.
- * Clients supporting tags have to handle unknown tags gracefully.
- *
- * @since 3.15.0
- */
- TagSupport struct {
- /**
- * The tags supported by the client.
- */
- ValueSet []DiagnosticTag `json:"valueSet"`
- } `json:"tagSupport,omitempty"`
- /**
- * Whether the client interprets the version property of the
- * `textDocument/publishDiagnostics` notification`s parameter.
- *
- * @since 3.15.0
- */
- VersionSupport bool `json:"versionSupport,omitempty"`
- /**
- * Client supports a codeDescription property
- *
- * @since 3.16.0
- */
- CodeDescriptionSupport bool `json:"codeDescriptionSupport,omitempty"`
- /**
- * Whether code action supports the `data` property which is
- * preserved between a `textDocument/publishDiagnostics` and
- * `textDocument/codeAction` request.
- *
- * @since 3.16.0
- */
- DataSupport bool `json:"dataSupport,omitempty"`
-}
-
-/**
- * The publish diagnostic notification's parameters.
- */
-type PublishDiagnosticsParams struct {
- /**
- * The URI for which diagnostic information is reported.
- */
- URI DocumentURI `json:"uri"`
- /**
- * Optional the version number of the document the diagnostics are published for.
- *
- * @since 3.15.0
- */
- Version int32 `json:"version,omitempty"`
- /**
- * An array of diagnostic information items.
- */
- Diagnostics []Diagnostic `json:"diagnostics"`
-}
-
-/**
- * A range in a text document expressed as (zero-based) start and end positions.
- *
- * If you want to specify a range that contains a line including the line ending
- * character(s) then use an end position denoting the start of the next line.
- * For example:
- * ```ts
- * {
- * start: { line: 5, character: 23 }
- * end : { line 6, character : 0 }
- * }
- * ```
- */
-type Range struct {
- /**
- * The range's start position
- */
- Start Position `json:"start"`
- /**
- * The range's end position.
- */
- End Position `json:"end"`
-}
-
-/**
- * Client Capabilities for a [ReferencesRequest](#ReferencesRequest).
- */
-type ReferenceClientCapabilities struct {
- /**
- * Whether references supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * Value-object that contains additional information when
- * requesting references.
- */
-type ReferenceContext struct {
- /**
- * Include the declaration of the current symbol.
- */
- IncludeDeclaration bool `json:"includeDeclaration"`
-}
-
-/**
- * Reference options.
- */
-type ReferenceOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * Parameters for a [ReferencesRequest](#ReferencesRequest).
- */
-type ReferenceParams struct {
- Context ReferenceContext `json:"context"`
- TextDocumentPositionParams
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * General parameters to to register for an notification or to register a provider.
- */
-type Registration struct {
- /**
- * The id used to register the request. The id can be used to deregister
- * the request again.
- */
- ID string `json:"id"`
- /**
- * The method to register for.
- */
- Method string `json:"method"`
- /**
- * Options necessary for the registration.
- */
- RegisterOptions LSPAny `json:"registerOptions,omitempty"`
-}
-
-type RegistrationParams struct {
- Registrations []Registration `json:"registrations"`
-}
-
-/**
- * Client capabilities specific to regular expressions.
- *
- * @since 3.16.0
- */
-type RegularExpressionsClientCapabilities struct {
- /**
- * The engine's name.
- */
- Engine string `json:"engine"`
- /**
- * The engine's version.
- */
- Version string `json:"version,omitempty"`
-}
-
-/**
- * A full diagnostic report with a set of related documents.
- *
- * @since 3.17.0 - proposed state
- */
-type RelatedFullDocumentDiagnosticReport struct {
- /**
- * Diagnostics of related documents. This information is useful
- * in programming languages where code in a file A can generate
- * diagnostics in a file B which A depends on. An example of
- * such a language is C/C++ where marco definitions in a file
- * a.cpp and result in errors in a header file b.hpp.
- *
- * @since 3.17.0 - proposed state
- */
- RelatedDocuments map[string]interface{} /*[uri: string ** DocumentUri *]: FullDocumentDiagnosticReport | UnchangedDocumentDiagnosticReport;*/ `json:"relatedDocuments,omitempty"`
-}
-
-/**
- * An unchanged diagnostic report with a set of related documents.
- *
- * @since 3.17.0 - proposed state
- */
-type RelatedUnchangedDocumentDiagnosticReport struct {
- /**
- * Diagnostics of related documents. This information is useful
- * in programming languages where code in a file A can generate
- * diagnostics in a file B which A depends on. An example of
- * such a language is C/C++ where marco definitions in a file
- * a.cpp and result in errors in a header file b.hpp.
- *
- * @since 3.17.0 - proposed state
- */
- RelatedDocuments map[string]interface{} /*[uri: string ** DocumentUri *]: FullDocumentDiagnosticReport | UnchangedDocumentDiagnosticReport;*/ `json:"relatedDocuments,omitempty"`
-}
-
-type RenameClientCapabilities struct {
- /**
- * Whether rename supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * Client supports testing for validity of rename operations
- * before execution.
- *
- * @since 3.12.0
- */
- PrepareSupport bool `json:"prepareSupport,omitempty"`
- /**
- * Client supports the default behavior result.
- *
- * The value indicates the default behavior used by the
- * client.
- *
- * @since 3.16.0
- */
- PrepareSupportDefaultBehavior PrepareSupportDefaultBehavior `json:"prepareSupportDefaultBehavior,omitempty"`
- /**
- * Whether th client honors the change annotations in
- * text edits and resource operations returned via the
- * rename request's workspace edit by for example presenting
- * the workspace edit in the user interface and asking
- * for confirmation.
- *
- * @since 3.16.0
- */
- HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"`
-}
-
-/**
- * Rename file operation
- */
-type RenameFile struct {
- /**
- * A rename
- */
- Kind string `json:"kind"`
- /**
- * The old (existing) location.
- */
- OldURI DocumentURI `json:"oldUri"`
- /**
- * The new location.
- */
- NewURI DocumentURI `json:"newUri"`
- /**
- * Rename options.
- */
- Options RenameFileOptions `json:"options,omitempty"`
- ResourceOperation
-}
-
-/**
- * Rename file options
- */
-type RenameFileOptions struct {
- /**
- * Overwrite target if existing. Overwrite wins over `ignoreIfExists`
- */
- Overwrite bool `json:"overwrite,omitempty"`
- /**
- * Ignores if target exists.
- */
- IgnoreIfExists bool `json:"ignoreIfExists,omitempty"`
-}
-
-/**
- * The parameters sent in file rename requests/notifications.
- *
- * @since 3.16.0
- */
-type RenameFilesParams struct {
- /**
- * An array of all files/folders renamed in this operation. When a folder is renamed, only
- * the folder will be included, and not its children.
- */
- Files []FileRename `json:"files"`
-}
-
-/**
- * Provider options for a [RenameRequest](#RenameRequest).
- */
-type RenameOptions struct {
- /**
- * Renames should be checked and tested before being executed.
- *
- * @since version 3.12.0
- */
- PrepareProvider bool `json:"prepareProvider,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * The parameters of a [RenameRequest](#RenameRequest).
- */
-type RenameParams struct {
- /**
- * The document to rename.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The position at which this request was sent.
- */
- Position Position `json:"position"`
- /**
- * The new name of the symbol. If the given name is not valid the
- * request must return a [ResponseError](#ResponseError) with an
- * appropriate message set.
- */
- NewName string `json:"newName"`
- WorkDoneProgressParams
-}
-
-/**
- * A generic resource operation.
- */
-type ResourceOperation struct {
- /**
- * The resource operation kind.
- */
- Kind string `json:"kind"`
- /**
- * An optional annotation identifier describing the operation.
- *
- * @since 3.16.0
- */
- AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"`
-}
-
-type ResourceOperationKind string
-
-/**
- * Save options.
- */
-type SaveOptions struct {
- /**
- * The client is supposed to include the content on save.
- */
- IncludeText bool `json:"includeText,omitempty"`
-}
-
-/**
- * A selection range represents a part of a selection hierarchy. A selection range
- * may have a parent selection range that contains it.
- */
-type SelectionRange struct {
- /**
- * The [range](#Range) of this selection range.
- */
- Range Range `json:"range"`
- /**
- * The parent selection range containing this range. Therefore `parent.range` must contain `this.range`.
- */
- Parent *SelectionRange `json:"parent,omitempty"`
-}
-
-type SelectionRangeClientCapabilities struct {
- /**
- * Whether implementation supports dynamic registration for selection range providers. If this is set to `true`
- * the client supports the new `SelectionRangeRegistrationOptions` return value for the corresponding server
- * capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-type SelectionRangeOptions struct {
- WorkDoneProgressOptions
-}
-
-/**
- * A parameter literal used in selection range requests.
- */
-type SelectionRangeParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The positions inside the text document.
- */
- Positions []Position `json:"positions"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-type SelectionRangeRegistrationOptions struct {
- SelectionRangeOptions
- TextDocumentRegistrationOptions
- StaticRegistrationOptions
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokens struct {
- /**
- * An optional result id. If provided and clients support delta updating
- * the client will include the result id in the next semantic token request.
- * A server can then instead of computing all semantic tokens again simply
- * send a delta.
- */
- ResultID string `json:"resultId,omitempty"`
- /**
- * The actual tokens.
- */
- Data []uint32 `json:"data"`
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensClientCapabilities struct {
- /**
- * Whether implementation supports dynamic registration. If this is set to `true`
- * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
- * return value for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * Which requests the client supports and might send to the server
- * depending on the server's capability. Please note that clients might not
- * show semantic tokens or degrade some of the user experience if a range
- * or full request is advertised by the client but not provided by the
- * server. If for example the client capability `requests.full` and
- * `request.range` are both set to true but the server only provides a
- * range provider the client might not render a minimap correctly or might
- * even decide to not show any semantic tokens at all.
- */
- Requests struct {
- /**
- * The client will send the `textDocument/semanticTokens/range` request if
- * the server provides a corresponding handler.
- */
- Range bool/*boolean | { }*/ `json:"range,omitempty"`
- /**
- * The client will send the `textDocument/semanticTokens/full` request if
- * the server provides a corresponding handler.
- */
- Full interface{}/*boolean | <elided struct>*/ `json:"full,omitempty"`
- } `json:"requests"`
- /**
- * The token types that the client supports.
- */
- TokenTypes []string `json:"tokenTypes"`
- /**
- * The token modifiers that the client supports.
- */
- TokenModifiers []string `json:"tokenModifiers"`
- /**
- * The token formats the clients supports.
- */
- Formats []TokenFormat `json:"formats"`
- /**
- * Whether the client supports tokens that can overlap each other.
- */
- OverlappingTokenSupport bool `json:"overlappingTokenSupport,omitempty"`
- /**
- * Whether the client supports tokens that can span multiple lines.
- */
- MultilineTokenSupport bool `json:"multilineTokenSupport,omitempty"`
- /**
- * Whether the client allows the server to actively cancel a
- * semantic token request, e.g. supports returning
- * LSPErrorCodes.ServerCancelled. If a server does the client
- * needs to retrigger the request.
- *
- * @since 3.17.0
- */
- ServerCancelSupport bool `json:"serverCancelSupport,omitempty"`
- /**
- * Whether the client uses semantic tokens to augment existing
- * syntax tokens. If set to `true` client side created syntax
- * tokens and semantic tokens are both used for colorization. If
- * set to `false` the client only uses the returned semantic tokens
- * for colorization.
- *
- * If the value is `undefined` then the client behavior is not
- * specified.
- *
- * @since 3.17.0
- */
- AugmentsSyntaxTokens bool `json:"augmentsSyntaxTokens,omitempty"`
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensDelta struct {
- ResultID string `json:"resultId,omitempty"`
- /**
- * The semantic token edits to transform a previous result into a new result.
- */
- Edits []SemanticTokensEdit `json:"edits"`
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensDeltaParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The result id of a previous response. The result Id can either point to a full response
- * or a delta response depending on what was received last.
- */
- PreviousResultID string `json:"previousResultId"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensEdit struct {
- /**
- * The start offset of the edit.
- */
- Start uint32 `json:"start"`
- /**
- * The count of elements to remove.
- */
- DeleteCount uint32 `json:"deleteCount"`
- /**
- * The elements to insert.
- */
- Data []uint32 `json:"data,omitempty"`
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensLegend struct {
- /**
- * The token types a server uses.
- */
- TokenTypes []string `json:"tokenTypes"`
- /**
- * The token modifiers a server uses.
- */
- TokenModifiers []string `json:"tokenModifiers"`
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensOptions struct {
- /**
- * The legend used by the server
- */
- Legend SemanticTokensLegend `json:"legend"`
- /**
- * Server supports providing semantic tokens for a specific range
- * of a document.
- */
- Range bool/*boolean | { }*/ `json:"range,omitempty"`
- /**
- * Server supports providing semantic tokens for a full document.
- */
- Full interface{}/*boolean | <elided struct>*/ `json:"full,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensRangeParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The range the semantic tokens are requested for.
- */
- Range Range `json:"range"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensRegistrationOptions struct {
- TextDocumentRegistrationOptions
- SemanticTokensOptions
- StaticRegistrationOptions
-}
-
-/**
- * @since 3.16.0
- */
-type SemanticTokensWorkspaceClientCapabilities struct {
- /**
- * Whether the client implementation supports a refresh request sent from
- * the server to the client.
- *
- * Note that this event is global and will force the client to refresh all
- * semantic tokens currently shown. It should be used with absolute care
- * and is useful for situation where a server for example detects a project
- * wide change that requires such a calculation.
- */
- RefreshSupport bool `json:"refreshSupport,omitempty"`
-}
-
-type ServerCapabilities struct {
- /**
- * Defines how text documents are synced. Is either a detailed structure defining each notification or
- * for backwards compatibility the TextDocumentSyncKind number.
- */
- TextDocumentSync interface{}/*TextDocumentSyncOptions | TextDocumentSyncKind*/ `json:"textDocumentSync,omitempty"`
- /**
- * The server provides completion support.
- */
- CompletionProvider CompletionOptions `json:"completionProvider,omitempty"`
- /**
- * The server provides hover support.
- */
- HoverProvider bool/*boolean | HoverOptions*/ `json:"hoverProvider,omitempty"`
- /**
- * The server provides signature help support.
- */
- SignatureHelpProvider SignatureHelpOptions `json:"signatureHelpProvider,omitempty"`
- /**
- * The server provides Goto Declaration support.
- */
- DeclarationProvider interface{}/* bool | DeclarationOptions | DeclarationRegistrationOptions*/ `json:"declarationProvider,omitempty"`
- /**
- * The server provides goto definition support.
- */
- DefinitionProvider bool/*boolean | DefinitionOptions*/ `json:"definitionProvider,omitempty"`
- /**
- * The server provides Goto Type Definition support.
- */
- TypeDefinitionProvider interface{}/* bool | TypeDefinitionOptions | TypeDefinitionRegistrationOptions*/ `json:"typeDefinitionProvider,omitempty"`
- /**
- * The server provides Goto Implementation support.
- */
- ImplementationProvider interface{}/* bool | ImplementationOptions | ImplementationRegistrationOptions*/ `json:"implementationProvider,omitempty"`
- /**
- * The server provides find references support.
- */
- ReferencesProvider bool/*boolean | ReferenceOptions*/ `json:"referencesProvider,omitempty"`
- /**
- * The server provides document highlight support.
- */
- DocumentHighlightProvider bool/*boolean | DocumentHighlightOptions*/ `json:"documentHighlightProvider,omitempty"`
- /**
- * The server provides document symbol support.
- */
- DocumentSymbolProvider bool/*boolean | DocumentSymbolOptions*/ `json:"documentSymbolProvider,omitempty"`
- /**
- * The server provides code actions. CodeActionOptions may only be
- * specified if the client states that it supports
- * `codeActionLiteralSupport` in its initial `initialize` request.
- */
- CodeActionProvider interface{}/*boolean | CodeActionOptions*/ `json:"codeActionProvider,omitempty"`
- /**
- * The server provides code lens.
- */
- CodeLensProvider CodeLensOptions `json:"codeLensProvider,omitempty"`
- /**
- * The server provides document link support.
- */
- DocumentLinkProvider DocumentLinkOptions `json:"documentLinkProvider,omitempty"`
- /**
- * The server provides color provider support.
- */
- ColorProvider interface{}/* bool | DocumentColorOptions | DocumentColorRegistrationOptions*/ `json:"colorProvider,omitempty"`
- /**
- * The server provides workspace symbol support.
- */
- WorkspaceSymbolProvider bool/*boolean | WorkspaceSymbolOptions*/ `json:"workspaceSymbolProvider,omitempty"`
- /**
- * The server provides document formatting.
- */
- DocumentFormattingProvider bool/*boolean | DocumentFormattingOptions*/ `json:"documentFormattingProvider,omitempty"`
- /**
- * The server provides document range formatting.
- */
- DocumentRangeFormattingProvider bool/*boolean | DocumentRangeFormattingOptions*/ `json:"documentRangeFormattingProvider,omitempty"`
- /**
- * The server provides document formatting on typing.
- */
- DocumentOnTypeFormattingProvider DocumentOnTypeFormattingOptions `json:"documentOnTypeFormattingProvider,omitempty"`
- /**
- * The server provides rename support. RenameOptions may only be
- * specified if the client states that it supports
- * `prepareSupport` in its initial `initialize` request.
- */
- RenameProvider interface{}/*boolean | RenameOptions*/ `json:"renameProvider,omitempty"`
- /**
- * The server provides folding provider support.
- */
- FoldingRangeProvider interface{}/* bool | FoldingRangeOptions | FoldingRangeRegistrationOptions*/ `json:"foldingRangeProvider,omitempty"`
- /**
- * The server provides selection range support.
- */
- SelectionRangeProvider interface{}/* bool | SelectionRangeOptions | SelectionRangeRegistrationOptions*/ `json:"selectionRangeProvider,omitempty"`
- /**
- * The server provides execute command support.
- */
- ExecuteCommandProvider ExecuteCommandOptions `json:"executeCommandProvider,omitempty"`
- /**
- * The server provides call hierarchy support.
- *
- * @since 3.16.0
- */
- CallHierarchyProvider interface{}/* bool | CallHierarchyOptions | CallHierarchyRegistrationOptions*/ `json:"callHierarchyProvider,omitempty"`
- /**
- * The server provides linked editing range support.
- *
- * @since 3.16.0
- */
- LinkedEditingRangeProvider interface{}/* bool | LinkedEditingRangeOptions | LinkedEditingRangeRegistrationOptions*/ `json:"linkedEditingRangeProvider,omitempty"`
- /**
- * The server provides semantic tokens support.
- *
- * @since 3.16.0
- */
- SemanticTokensProvider interface{}/*SemanticTokensOptions | SemanticTokensRegistrationOptions*/ `json:"semanticTokensProvider,omitempty"`
- /**
- * The workspace server capabilities
- */
- Workspace Workspace6Gn `json:"workspace,omitempty"`
- /**
- * The server provides moniker support.
- *
- * @since 3.16.0
- */
- MonikerProvider interface{}/* bool | MonikerOptions | MonikerRegistrationOptions*/ `json:"monikerProvider,omitempty"`
- /**
- * The server provides type hierarchy support.
- *
- * @since 3.17.0 - proposed state
- */
- TypeHierarchyProvider interface{}/* bool | TypeHierarchyOptions | TypeHierarchyRegistrationOptions*/ `json:"typeHierarchyProvider,omitempty"`
- /**
- * The server provides inline values.
- *
- * @since 3.17.0 - proposed state
- */
- InlineValueProvider interface{}/* bool | InlineValueOptions | InlineValueRegistrationOptions*/ `json:"inlineValueProvider,omitempty"`
- /**
- * The server provides inlay hints.
- *
- * @since 3.17.0 - proposed state
- */
- InlayHintProvider interface{}/* bool | InlayHintOptions | InlayHintRegistrationOptions*/ `json:"inlayHintProvider,omitempty"`
- /**
- * Experimental server capabilities.
- */
- Experimental interface{} `json:"experimental,omitempty"`
-}
-
-type SetTraceParams struct {
- Value TraceValues `json:"value"`
-}
-
-/**
- * Client capabilities for the show document request.
- *
- * @since 3.16.0
- */
-type ShowDocumentClientCapabilities struct {
- /**
- * The client has support for the show document
- * request.
- */
- Support bool `json:"support"`
-}
-
-/**
- * Params to show a document.
- *
- * @since 3.16.0
- */
-type ShowDocumentParams struct {
- /**
- * The document uri to show.
- */
- URI URI `json:"uri"`
- /**
- * Indicates to show the resource in an external program.
- * To show for example `https://code.visualstudio.com/`
- * in the default WEB browser set `external` to `true`.
- */
- External bool `json:"external,omitempty"`
- /**
- * An optional property to indicate whether the editor
- * showing the document should take focus or not.
- * Clients might ignore this property if an external
- * program in started.
- */
- TakeFocus bool `json:"takeFocus,omitempty"`
- /**
- * An optional selection range if the document is a text
- * document. Clients might ignore the property if an
- * external program is started or the file is not a text
- * file.
- */
- Selection Range `json:"selection,omitempty"`
-}
-
-/**
- * The result of an show document request.
- *
- * @since 3.16.0
- */
-type ShowDocumentResult struct {
- /**
- * A boolean indicating if the show was successful.
- */
- Success bool `json:"success"`
-}
-
-/**
- * The parameters of a notification message.
- */
-type ShowMessageParams struct {
- /**
- * The message type. See {@link MessageType}
- */
- Type MessageType `json:"type"`
- /**
- * The actual message
- */
- Message string `json:"message"`
-}
-
-/**
- * Show message request client capabilities
- */
-type ShowMessageRequestClientCapabilities struct {
- /**
- * Capabilities specific to the `MessageActionItem` type.
- */
- MessageActionItem struct {
- /**
- * Whether the client supports additional attributes which
- * are preserved and send back to the server in the
- * request's response.
- */
- AdditionalPropertiesSupport bool `json:"additionalPropertiesSupport,omitempty"`
- } `json:"messageActionItem,omitempty"`
-}
-
-type ShowMessageRequestParams struct {
- /**
- * The message type. See {@link MessageType}
- */
- Type MessageType `json:"type"`
- /**
- * The actual message
- */
- Message string `json:"message"`
- /**
- * The message action items to present.
- */
- Actions []MessageActionItem `json:"actions,omitempty"`
-}
-
-/**
- * Signature help represents the signature of something
- * callable. There can be multiple signature but only one
- * active and only one active parameter.
- */
-type SignatureHelp struct {
- /**
- * One or more signatures.
- */
- Signatures []SignatureInformation `json:"signatures"`
- /**
- * The active signature. If omitted or the value lies outside the
- * range of `signatures` the value defaults to zero or is ignored if
- * the `SignatureHelp` has no signatures.
- *
- * Whenever possible implementors should make an active decision about
- * the active signature and shouldn't rely on a default value.
- *
- * In future version of the protocol this property might become
- * mandatory to better express this.
- */
- ActiveSignature uint32 `json:"activeSignature,omitempty"`
- /**
- * The active parameter of the active signature. If omitted or the value
- * lies outside the range of `signatures[activeSignature].parameters`
- * defaults to 0 if the active signature has parameters. If
- * the active signature has no parameters it is ignored.
- * In future version of the protocol this property might become
- * mandatory to better express the active parameter if the
- * active signature does have any.
- */
- ActiveParameter uint32 `json:"activeParameter,omitempty"`
-}
-
-/**
- * Client Capabilities for a [SignatureHelpRequest](#SignatureHelpRequest).
- */
-type SignatureHelpClientCapabilities struct {
- /**
- * Whether signature help supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client supports the following `SignatureInformation`
- * specific properties.
- */
- SignatureInformation struct {
- /**
- * Client supports the follow content formats for the documentation
- * property. The order describes the preferred format of the client.
- */
- DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"`
- /**
- * Client capabilities specific to parameter information.
- */
- ParameterInformation struct {
- /**
- * The client supports processing label offsets instead of a
- * simple label string.
- *
- * @since 3.14.0
- */
- LabelOffsetSupport bool `json:"labelOffsetSupport,omitempty"`
- } `json:"parameterInformation,omitempty"`
- /**
- * The client support the `activeParameter` property on `SignatureInformation`
- * literal.
- *
- * @since 3.16.0
- */
- ActiveParameterSupport bool `json:"activeParameterSupport,omitempty"`
- } `json:"signatureInformation,omitempty"`
- /**
- * The client supports to send additional context information for a
- * `textDocument/signatureHelp` request. A client that opts into
- * contextSupport will also support the `retriggerCharacters` on
- * `SignatureHelpOptions`.
- *
- * @since 3.15.0
- */
- ContextSupport bool `json:"contextSupport,omitempty"`
-}
-
-/**
- * Additional information about the context in which a signature help request was triggered.
- *
- * @since 3.15.0
- */
-type SignatureHelpContext struct {
- /**
- * Action that caused signature help to be triggered.
- */
- TriggerKind SignatureHelpTriggerKind `json:"triggerKind"`
- /**
- * Character that caused signature help to be triggered.
- *
- * This is undefined when `triggerKind !== SignatureHelpTriggerKind.TriggerCharacter`
- */
- TriggerCharacter string `json:"triggerCharacter,omitempty"`
- /**
- * `true` if signature help was already showing when it was triggered.
- *
- * Retrigger occurs when the signature help is already active and can be caused by actions such as
- * typing a trigger character, a cursor move, or document content changes.
- */
- IsRetrigger bool `json:"isRetrigger"`
- /**
- * The currently active `SignatureHelp`.
- *
- * The `activeSignatureHelp` has its `SignatureHelp.activeSignature` field updated based on
- * the user navigating through available signatures.
- */
- ActiveSignatureHelp SignatureHelp `json:"activeSignatureHelp,omitempty"`
-}
-
-/**
- * Server Capabilities for a [SignatureHelpRequest](#SignatureHelpRequest).
- */
-type SignatureHelpOptions struct {
- /**
- * List of characters that trigger signature help.
- */
- TriggerCharacters []string `json:"triggerCharacters,omitempty"`
- /**
- * List of characters that re-trigger signature help.
- *
- * These trigger characters are only active when signature help is already showing. All trigger characters
- * are also counted as re-trigger characters.
- *
- * @since 3.15.0
- */
- RetriggerCharacters []string `json:"retriggerCharacters,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * Parameters for a [SignatureHelpRequest](#SignatureHelpRequest).
- */
-type SignatureHelpParams struct {
- /**
- * The signature help context. This is only available if the client specifies
- * to send this using the client capability `textDocument.signatureHelp.contextSupport === true`
- *
- * @since 3.15.0
- */
- Context SignatureHelpContext `json:"context,omitempty"`
- TextDocumentPositionParams
- WorkDoneProgressParams
-}
-
-/**
- * How a signature help was triggered.
- *
- * @since 3.15.0
- */
-type SignatureHelpTriggerKind float64
-
-/**
- * Represents the signature of something callable. A signature
- * can have a label, like a function-name, a doc-comment, and
- * a set of parameters.
- */
-type SignatureInformation struct {
- /**
- * The label of this signature. Will be shown in
- * the UI.
- */
- Label string `json:"label"`
- /**
- * The human-readable doc-comment of this signature. Will be shown
- * in the UI but can be omitted.
- */
- Documentation string/*string | MarkupContent*/ `json:"documentation,omitempty"`
- /**
- * The parameters of this signature.
- */
- Parameters []ParameterInformation `json:"parameters,omitempty"`
- /**
- * The index of the active parameter.
- *
- * If provided, this is used in place of `SignatureHelp.activeParameter`.
- *
- * @since 3.16.0
- */
- ActiveParameter uint32 `json:"activeParameter,omitempty"`
-}
-
-/**
- * Static registration options to be returned in the initialize
- * request.
- */
-type StaticRegistrationOptions struct {
- /**
- * The id used to register the request. The id can be used to deregister
- * the request again. See also Registration#id.
- */
- ID string `json:"id,omitempty"`
-}
-
-/**
- * Represents information about programming constructs like variables, classes,
- * interfaces etc.
- */
-type SymbolInformation struct {
- /**
- * The name of this symbol.
- */
- Name string `json:"name"`
- /**
- * The kind of this symbol.
- */
- Kind SymbolKind `json:"kind"`
- /**
- * Tags for this completion item.
- *
- * @since 3.16.0
- */
- Tags []SymbolTag `json:"tags,omitempty"`
- /**
- * Indicates if this symbol is deprecated.
- *
- * @deprecated Use tags instead
- */
- Deprecated bool `json:"deprecated,omitempty"`
- /**
- * The location of this symbol. The location's range is used by a tool
- * to reveal the location in the editor. If the symbol is selected in the
- * tool the range's start information is used to position the cursor. So
- * the range usually spans more than the actual symbol's name and does
- * normally include thinks like visibility modifiers.
- *
- * The range doesn't have to denote a node range in the sense of a abstract
- * syntax tree. It can therefore not be used to re-construct a hierarchy of
- * the symbols.
- */
- Location Location `json:"location"`
- /**
- * The name of the symbol containing this symbol. This information is for
- * user interface purposes (e.g. to render a qualifier in the user interface
- * if necessary). It can't be used to re-infer a hierarchy for the document
- * symbols.
- */
- ContainerName string `json:"containerName,omitempty"`
-}
-
-/**
- * A symbol kind.
- */
-type SymbolKind float64
-
-/**
- * Symbol tags are extra annotations that tweak the rendering of a symbol.
- * @since 3.16
- */
-type SymbolTag float64
-
-/**
- * Text document specific client capabilities.
- */
-type TextDocumentClientCapabilities struct {
- /**
- * Defines which synchronization capabilities the client supports.
- */
- Synchronization TextDocumentSyncClientCapabilities `json:"synchronization,omitempty"`
- /**
- * Capabilities specific to the `textDocument/completion`
- */
- Completion CompletionClientCapabilities `json:"completion,omitempty"`
- /**
- * Capabilities specific to the `textDocument/hover`
- */
- Hover HoverClientCapabilities `json:"hover,omitempty"`
- /**
- * Capabilities specific to the `textDocument/signatureHelp`
- */
- SignatureHelp SignatureHelpClientCapabilities `json:"signatureHelp,omitempty"`
- /**
- * Capabilities specific to the `textDocument/declaration`
- *
- * @since 3.14.0
- */
- Declaration DeclarationClientCapabilities `json:"declaration,omitempty"`
- /**
- * Capabilities specific to the `textDocument/definition`
- */
- Definition DefinitionClientCapabilities `json:"definition,omitempty"`
- /**
- * Capabilities specific to the `textDocument/typeDefinition`
- *
- * @since 3.6.0
- */
- TypeDefinition TypeDefinitionClientCapabilities `json:"typeDefinition,omitempty"`
- /**
- * Capabilities specific to the `textDocument/implementation`
- *
- * @since 3.6.0
- */
- Implementation ImplementationClientCapabilities `json:"implementation,omitempty"`
- /**
- * Capabilities specific to the `textDocument/references`
- */
- References ReferenceClientCapabilities `json:"references,omitempty"`
- /**
- * Capabilities specific to the `textDocument/documentHighlight`
- */
- DocumentHighlight DocumentHighlightClientCapabilities `json:"documentHighlight,omitempty"`
- /**
- * Capabilities specific to the `textDocument/documentSymbol`
- */
- DocumentSymbol DocumentSymbolClientCapabilities `json:"documentSymbol,omitempty"`
- /**
- * Capabilities specific to the `textDocument/codeAction`
- */
- CodeAction CodeActionClientCapabilities `json:"codeAction,omitempty"`
- /**
- * Capabilities specific to the `textDocument/codeLens`
- */
- CodeLens CodeLensClientCapabilities `json:"codeLens,omitempty"`
- /**
- * Capabilities specific to the `textDocument/documentLink`
- */
- DocumentLink DocumentLinkClientCapabilities `json:"documentLink,omitempty"`
- /**
- * Capabilities specific to the `textDocument/documentColor`
- */
- ColorProvider DocumentColorClientCapabilities `json:"colorProvider,omitempty"`
- /**
- * Capabilities specific to the `textDocument/formatting`
- */
- Formatting DocumentFormattingClientCapabilities `json:"formatting,omitempty"`
- /**
- * Capabilities specific to the `textDocument/rangeFormatting`
- */
- RangeFormatting DocumentRangeFormattingClientCapabilities `json:"rangeFormatting,omitempty"`
- /**
- * Capabilities specific to the `textDocument/onTypeFormatting`
- */
- OnTypeFormatting DocumentOnTypeFormattingClientCapabilities `json:"onTypeFormatting,omitempty"`
- /**
- * Capabilities specific to the `textDocument/rename`
- */
- Rename RenameClientCapabilities `json:"rename,omitempty"`
- /**
- * Capabilities specific to `textDocument/foldingRange` request.
- *
- * @since 3.10.0
- */
- FoldingRange FoldingRangeClientCapabilities `json:"foldingRange,omitempty"`
- /**
- * Capabilities specific to `textDocument/selectionRange` request.
- *
- * @since 3.15.0
- */
- SelectionRange SelectionRangeClientCapabilities `json:"selectionRange,omitempty"`
- /**
- * Capabilities specific to `textDocument/publishDiagnostics` notification.
- */
- PublishDiagnostics PublishDiagnosticsClientCapabilities `json:"publishDiagnostics,omitempty"`
- /**
- * Capabilities specific to the various call hierarchy request.
- *
- * @since 3.16.0
- */
- CallHierarchy CallHierarchyClientCapabilities `json:"callHierarchy,omitempty"`
- /**
- * Capabilities specific to the various semantic token request.
- *
- * @since 3.16.0
- */
- SemanticTokens SemanticTokensClientCapabilities `json:"semanticTokens,omitempty"`
- /**
- * Capabilities specific to the linked editing range request.
- *
- * @since 3.16.0
- */
- LinkedEditingRange LinkedEditingRangeClientCapabilities `json:"linkedEditingRange,omitempty"`
- /**
- * Client capabilities specific to the moniker request.
- *
- * @since 3.16.0
- */
- Moniker MonikerClientCapabilities `json:"moniker,omitempty"`
- /**
- * Capabilities specific to the various type hierarchy requests.
- *
- * @since 3.17.0 - proposed state
- */
- TypeHierarchy TypeHierarchyClientCapabilities `json:"typeHierarchy,omitempty"`
- /**
- * Capabilities specific to the `textDocument/inlineValue` request.
- *
- * @since 3.17.0 - proposed state
- */
- InlineValue InlineValueClientCapabilities `json:"inlineValue,omitempty"`
- /**
- * Capabilities specific to the `textDocument/inlayHint` request.
- *
- * @since 3.17.0 - proposed state
- */
- InlayHint InlayHintClientCapabilities `json:"inlayHint,omitempty"`
-}
-
-/**
- * An event describing a change to a text document. If range and rangeLength are omitted
- * the new text is considered to be the full content of the document.
- */
-type TextDocumentContentChangeEvent = struct {
- /**
- * The range of the document that changed.
- */
- Range *Range `json:"range,omitempty"`
- /**
- * The optional length of the range that got replaced.
- *
- * @deprecated use range instead.
- */
- RangeLength uint32 `json:"rangeLength,omitempty"`
- /**
- * The new text for the provided range.
- */
- Text string `json:"text"`
-}
-
-/**
- * Describes textual changes on a text document. A TextDocumentEdit describes all changes
- * on a document version Si and after they are applied move the document to version Si+1.
- * So the creator of a TextDocumentEdit doesn't need to sort the array of edits or do any
- * kind of ordering. However the edits must be non overlapping.
- */
-type TextDocumentEdit struct {
- /**
- * The text document to change.
- */
- TextDocument OptionalVersionedTextDocumentIdentifier `json:"textDocument"`
- /**
- * The edits to be applied.
- *
- * @since 3.16.0 - support for AnnotatedTextEdit. This is guarded using a
- * client capability.
- */
- Edits []TextEdit/*TextEdit | AnnotatedTextEdit*/ `json:"edits"`
-}
-
-/**
- * A document filter denotes a document by different properties like
- * the [language](#TextDocument.languageId), the [scheme](#Uri.scheme) of
- * its resource, or a glob-pattern that is applied to the [path](#TextDocument.fileName).
- *
- * Glob patterns can have the following syntax:
- * - `*` to match one or more characters in a path segment
- * - `?` to match on one character in a path segment
- * - `**` to match any number of path segments, including none
- * - `{}` to group sub patterns into an OR expression. (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files)
- * - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
- * - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`)
- *
- * @sample A language filter that applies to typescript files on disk: `{ language: 'typescript', scheme: 'file' }`
- * @sample A language filter that applies to all package.json paths: `{ language: 'json', pattern: '**package.json' }`
- *
- * @since 3.17.0 - proposed state.
- */
-type TextDocumentFilter = struct {
- /** A language id, like `typescript`. */
- Language string `json:"language"`
- /** A Uri [scheme](#Uri.scheme), like `file` or `untitled`. */
- Scheme string `json:"scheme,omitempty"`
- /** A glob pattern, like `*.{ts,js}`. */
- Pattern string `json:"pattern,omitempty"`
-}
-
-/**
- * A literal to identify a text document in the client.
- */
-type TextDocumentIdentifier struct {
- /**
- * The text document's uri.
- */
- URI DocumentURI `json:"uri"`
-}
-
-/**
- * An item to transfer a text document from the client to the
- * server.
- */
-type TextDocumentItem struct {
- /**
- * The text document's uri.
- */
- URI DocumentURI `json:"uri"`
- /**
- * The text document's language identifier
- */
- LanguageID string `json:"languageId"`
- /**
- * The version number of this document (it will increase after each
- * change, including undo/redo).
- */
- Version int32 `json:"version"`
- /**
- * The content of the opened text document.
- */
- Text string `json:"text"`
-}
-
-/**
- * A parameter literal used in requests to pass a text document and a position inside that
- * document.
- */
-type TextDocumentPositionParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The position inside the text document.
- */
- Position Position `json:"position"`
-}
-
-/**
- * General text document registration options.
- */
-type TextDocumentRegistrationOptions struct {
- /**
- * A document selector to identify the scope of the registration. If set to null
- * the document selector provided on the client side will be used.
- */
- DocumentSelector DocumentSelector /*DocumentSelector | null*/ `json:"documentSelector"`
-}
-
-/**
- * Represents reasons why a text document is saved.
- */
-type TextDocumentSaveReason float64
-
-type TextDocumentSyncClientCapabilities struct {
- /**
- * Whether text document synchronization supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client supports sending will save notifications.
- */
- WillSave bool `json:"willSave,omitempty"`
- /**
- * The client supports sending a will save request and
- * waits for a response providing text edits which will
- * be applied to the document before it is saved.
- */
- WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"`
- /**
- * The client supports did save notifications.
- */
- DidSave bool `json:"didSave,omitempty"`
-}
-
-/**
- * Defines how the host (editor) should sync
- * document changes to the language server.
- */
-type TextDocumentSyncKind float64
-
-type TextDocumentSyncOptions struct {
- /**
- * Open and close notifications are sent to the server. If omitted open close notification should not
- * be sent.
- */
- OpenClose bool `json:"openClose,omitempty"`
- /**
- * Change notifications are sent to the server. See TextDocumentSyncKind.None, TextDocumentSyncKind.Full
- * and TextDocumentSyncKind.Incremental. If omitted it defaults to TextDocumentSyncKind.None.
- */
- Change TextDocumentSyncKind `json:"change,omitempty"`
- /**
- * If present will save notifications are sent to the server. If omitted the notification should not be
- * sent.
- */
- WillSave bool `json:"willSave,omitempty"`
- /**
- * If present will save wait until requests are sent to the server. If omitted the request should not be
- * sent.
- */
- WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"`
- /**
- * If present save notifications are sent to the server. If omitted the notification should not be
- * sent.
- */
- Save SaveOptions/*boolean | SaveOptions*/ `json:"save,omitempty"`
-}
-
-/**
- * A text edit applicable to a text document.
- */
-type TextEdit struct {
- /**
- * The range of the text document to be manipulated. To insert
- * text into a document create a range where start === end.
- */
- Range Range `json:"range"`
- /**
- * The string to be inserted. For delete operations use an
- * empty string.
- */
- NewText string `json:"newText"`
-}
-
-type TokenFormat = string
-
-type TraceValues = string /* 'off' | 'messages' | 'compact' | 'verbose' */
-
-/**
- * Since 3.6.0
- */
-type TypeDefinitionClientCapabilities struct {
- /**
- * Whether implementation supports dynamic registration. If this is set to `true`
- * the client supports the new `TypeDefinitionRegistrationOptions` return value
- * for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * The client supports additional metadata in the form of definition links.
- *
- * Since 3.14.0
- */
- LinkSupport bool `json:"linkSupport,omitempty"`
-}
-
-type TypeDefinitionOptions struct {
- WorkDoneProgressOptions
-}
-
-type TypeDefinitionParams struct {
- TextDocumentPositionParams
- WorkDoneProgressParams
- PartialResultParams
-}
-
-type TypeDefinitionRegistrationOptions struct {
- TextDocumentRegistrationOptions
- TypeDefinitionOptions
- StaticRegistrationOptions
-}
-
-/**
- * @since 3.17.0 - proposed state
- */
-type TypeHierarchyClientCapabilities = struct {
- /**
- * Whether implementation supports dynamic registration. If this is set to `true`
- * the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)`
- * return value for the corresponding server capability as well.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
-}
-
-/**
- * @since 3.17.0 - proposed state
- */
-type TypeHierarchyItem = struct {
- /**
- * The name of this item.
- */
- Name string `json:"name"`
- /**
- * The kind of this item.
- */
- Kind SymbolKind `json:"kind"`
- /**
- * Tags for this item.
- */
- Tags []SymbolTag `json:"tags,omitempty"`
- /**
- * More detail for this item, e.g. the signature of a function.
- */
- Detail string `json:"detail,omitempty"`
- /**
- * The resource identifier of this item.
- */
- URI DocumentURI `json:"uri"`
- /**
- * The range enclosing this symbol not including leading/trailing whitespace
- * but everything else, e.g. comments and code.
- */
- Range *Range `json:"range"`
- /**
- * The range that should be selected and revealed when this symbol is being
- * picked, e.g. the name of a function. Must be contained by the
- * [`range`](#TypeHierarchyItem.range).
- */
- SelectionRange *Range `json:"selectionRange"`
- /**
- * A data entry field that is preserved between a type hierarchy prepare and
- * supertypes or subtypes requests. It could also be used to identify the
- * type hierarchy in the server, helping improve the performance on
- * resolving supertypes and subtypes.
- */
- Data LSPAny `json:"data,omitempty"`
-}
-
-/**
- * Type hierarchy options used during static registration.
- *
- * @since 3.17.0 - proposed state
- */
-type TypeHierarchyOptions = WorkDoneProgressOptions
-
-/**
- * The parameter of a `textDocument/prepareTypeHierarchy` request.
- *
- * @since 3.17.0 - proposed state
- */
-type TypeHierarchyPrepareParams struct {
- /**
- * The text document.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The position inside the text document.
- */
- Position Position `json:"position"`
- /**
- * An optional token that a server can use to report work done progress.
- */
- WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
-}
-
-/**
- * Type hierarchy options used during static or dynamic registration.
- *
- * @since 3.17.0 - proposed state
- */
-type TypeHierarchyRegistrationOptions struct {
- /**
- * A document selector to identify the scope of the registration. If set to null
- * the document selector provided on the client side will be used.
- */
- DocumentSelector DocumentSelector/*DocumentSelector | null*/ `json:"documentSelector"`
- /**
- * The id used to register the request. The id can be used to deregister
- * the request again. See also Registration#id.
- */
- ID string `json:"id,omitempty"`
-}
-
-/**
- * The parameter of a `typeHierarchy/subtypes` request.
- *
- * @since 3.17.0 - proposed state
- */
-type TypeHierarchySubtypesParams struct {
- /**
- * An optional token that a server can use to report work done progress.
- */
- WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
- /**
- * An optional token that a server can use to report partial results (e.g. streaming) to
- * the client.
- */
- PartialResultToken ProgressToken `json:"partialResultToken,omitempty"`
- Item TypeHierarchyItem `json:"item"`
-}
-
-/**
- * The parameter of a `typeHierarchy/supertypes` request.
- *
- * @since 3.17.0 - proposed state
- */
-type TypeHierarchySupertypesParams struct {
- /**
- * An optional token that a server can use to report work done progress.
- */
- WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
- /**
- * An optional token that a server can use to report partial results (e.g. streaming) to
- * the client.
- */
- PartialResultToken ProgressToken `json:"partialResultToken,omitempty"`
- Item TypeHierarchyItem `json:"item"`
-}
-
-/**
- * A tagging type for string properties that are actually URIs
- *
- * @since 3.16.0
- */
-type URI = string
-
-/**
- * A diagnostic report indicating that the last returned
- * report is still accurate.
- *
- * @since 3.17.0 - proposed state
- */
-type UnchangedDocumentDiagnosticReport = struct {
- /**
- * A document diagnostic report indicating
- * no changes to the last result. A server can
- * only return `unchanged` if result ids are
- * provided.
- */
- Kind string `json:"kind"`
- /**
- * A result id which will be sent on the next
- * diagnostic request for the same document.
- */
- ResultID string `json:"resultId"`
-}
-
-/**
- * Moniker uniqueness level to define scope of the moniker.
- *
- * @since 3.16.0
- */
-type UniquenessLevel string
-
-/**
- * General parameters to unregister a request or notification.
- */
-type Unregistration struct {
- /**
- * The id used to unregister the request or notification. Usually an id
- * provided during the register request.
- */
- ID string `json:"id"`
- /**
- * The method to unregister for.
- */
- Method string `json:"method"`
-}
-
-type UnregistrationParams struct {
- Unregisterations []Unregistration `json:"unregisterations"`
-}
-
-/**
- * A versioned notebook document identifier.
- *
- * @since 3.17.0 - proposed state
- */
-type VersionedNotebookDocumentIdentifier = struct {
- /**
- * The version number of this notebook document.
- */
- Version int32 `json:"version"`
- /**
- * The notebook document's uri.
- */
- URI URI `json:"uri"`
-}
-
-/**
- * A text document identifier to denote a specific version of a text document.
- */
-type VersionedTextDocumentIdentifier struct {
- /**
- * The version number of this document.
- */
- Version int32 `json:"version"`
- TextDocumentIdentifier
-}
-
-type WatchKind float64
-
-/**
- * The parameters send in a will save text document notification.
- */
-type WillSaveTextDocumentParams struct {
- /**
- * The document that will be saved.
- */
- TextDocument TextDocumentIdentifier `json:"textDocument"`
- /**
- * The 'TextDocumentSaveReason'.
- */
- Reason TextDocumentSaveReason `json:"reason"`
-}
-
-type WorkDoneProgressBegin struct {
- Kind string `json:"kind"`
- /**
- * Mandatory title of the progress operation. Used to briefly inform about
- * the kind of operation being performed.
- *
- * Examples: "Indexing" or "Linking dependencies".
- */
- Title string `json:"title"`
- /**
- * Controls if a cancel button should show to allow the user to cancel the
- * long running operation. Clients that don't support cancellation are allowed
- * to ignore the setting.
- */
- Cancellable bool `json:"cancellable,omitempty"`
- /**
- * Optional, more detailed associated progress message. Contains
- * complementary information to the `title`.
- *
- * Examples: "3/25 files", "project/src/module2", "node_modules/some_dep".
- * If unset, the previous progress message (if any) is still valid.
- */
- Message string `json:"message,omitempty"`
- /**
- * Optional progress percentage to display (value 100 is considered 100%).
- * If not provided infinite progress is assumed and clients are allowed
- * to ignore the `percentage` value in subsequent in report notifications.
- *
- * The value should be steadily rising. Clients are free to ignore values
- * that are not following this rule. The value range is [0, 100].
- */
- Percentage uint32 `json:"percentage,omitempty"`
-}
-
-type WorkDoneProgressCancelParams struct {
- /**
- * The token to be used to report progress.
- */
- Token ProgressToken `json:"token"`
-}
-
-type WorkDoneProgressClientCapabilities struct {
- /**
- * Window specific client capabilities.
- */
- Window struct {
- /**
- * Whether client supports server initiated progress using the
- * `window/workDoneProgress/create` request.
- *
- * Since 3.15.0
- */
- WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
- /**
- * Capabilities specific to the showMessage request.
- *
- * @since 3.16.0
- */
- ShowMessage ShowMessageRequestClientCapabilities `json:"showMessage,omitempty"`
- /**
- * Capabilities specific to the showDocument request.
- *
- * @since 3.16.0
- */
- ShowDocument ShowDocumentClientCapabilities `json:"showDocument,omitempty"`
- } `json:"window,omitempty"`
-}
-
-type WorkDoneProgressCreateParams struct {
- /**
- * The token to be used to report progress.
- */
- Token ProgressToken `json:"token"`
-}
-
-type WorkDoneProgressEnd struct {
- Kind string `json:"kind"`
- /**
- * Optional, a final message indicating to for example indicate the outcome
- * of the operation.
- */
- Message string `json:"message,omitempty"`
-}
-
-type WorkDoneProgressOptions struct {
- WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
-}
-
-type WorkDoneProgressParams struct {
- /**
- * An optional token that a server can use to report work done progress.
- */
- WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
-}
-
-type WorkDoneProgressReport struct {
- Kind string `json:"kind"`
- /**
- * Controls enablement state of a cancel button.
- *
- * Clients that don't support cancellation or don't support controlling the button's
- * enablement state are allowed to ignore the property.
- */
- Cancellable bool `json:"cancellable,omitempty"`
- /**
- * Optional, more detailed associated progress message. Contains
- * complementary information to the `title`.
- *
- * Examples: "3/25 files", "project/src/module2", "node_modules/some_dep".
- * If unset, the previous progress message (if any) is still valid.
- */
- Message string `json:"message,omitempty"`
- /**
- * Optional progress percentage to display (value 100 is considered 100%).
- * If not provided infinite progress is assumed and clients are allowed
- * to ignore the `percentage` value in subsequent in report notifications.
- *
- * The value should be steadily rising. Clients are free to ignore values
- * that are not following this rule. The value range is [0, 100]
- */
- Percentage uint32 `json:"percentage,omitempty"`
-}
-
-/**
- * Workspace specific client capabilities.
- */
-type WorkspaceClientCapabilities struct {
- /**
- * The client supports applying batch edits
- * to the workspace by supporting the request
- * 'workspace/applyEdit'
- */
- ApplyEdit bool `json:"applyEdit,omitempty"`
- /**
- * Capabilities specific to `WorkspaceEdit`s
- */
- WorkspaceEdit WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"`
- /**
- * Capabilities specific to the `workspace/didChangeConfiguration` notification.
- */
- DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"`
- /**
- * Capabilities specific to the `workspace/didChangeWatchedFiles` notification.
- */
- DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"`
- /**
- * Capabilities specific to the `workspace/symbol` request.
- */
- Symbol WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"`
- /**
- * Capabilities specific to the `workspace/executeCommand` request.
- */
- ExecuteCommand ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"`
- /**
- * Capabilities specific to the semantic token requests scoped to the
- * workspace.
- *
- * @since 3.16.0.
- */
- SemanticTokens SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"`
- /**
- * Capabilities specific to the code lens requests scoped to the
- * workspace.
- *
- * @since 3.16.0.
- */
- CodeLens CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"`
- /**
- * The client has support for file notifications/requests for user operations on files.
- *
- * Since 3.16.0
- */
- FileOperations FileOperationClientCapabilities `json:"fileOperations,omitempty"`
- /**
- * Capabilities specific to the inline values requests scoped to the
- * workspace.
- *
- * @since 3.17.0.
- */
- InlineValue InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"`
- /**
- * Capabilities specific to the inlay hints requests scoped to the
- * workspace.
- *
- * @since 3.17.0.
- */
- InlayHint InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"`
-}
-
-/**
- * Parameters of the workspace diagnostic request.
- *
- * @since 3.17.0 - proposed state
- */
-type WorkspaceDiagnosticParams struct {
- /**
- * An optional token that a server can use to report work done progress.
- */
- WorkDoneToken ProgressToken `json:"workDoneToken,omitempty"`
- /**
- * An optional token that a server can use to report partial results (e.g. streaming) to
- * the client.
- */
- PartialResultToken ProgressToken `json:"partialResultToken,omitempty"`
- /**
- * The additional identifier provided during registration.
- */
- Identifier string `json:"identifier,omitempty"`
- /**
- * The currently known diagnostic reports with their
- * previous result ids.
- */
- PreviousResultIds []PreviousResultID `json:"previousResultIds"`
-}
-
-/**
- * A workspace diagnostic report.
- *
- * @since 3.17.0 - proposed state
- */
-type WorkspaceDiagnosticReport = struct {
- Items []WorkspaceDocumentDiagnosticReport `json:"items"`
-}
-
-/**
- * A workspace diagnostic document report.
- *
- * @since 3.17.0 - proposed state
- */
-type WorkspaceDocumentDiagnosticReport = interface{} /*WorkspaceFullDocumentDiagnosticReport | WorkspaceUnchangedDocumentDiagnosticReport*/
-
-/**
- * A workspace edit represents changes to many resources managed in the workspace. The edit
- * should either provide `changes` or `documentChanges`. If documentChanges are present
- * they are preferred over `changes` if the client can handle versioned document edits.
- *
- * Since version 3.13.0 a workspace edit can contain resource operations as well. If resource
- * operations are present clients need to execute the operations in the order in which they
- * are provided. So a workspace edit for example can consist of the following two changes:
- * (1) a create file a.txt and (2) a text document edit which insert text into file a.txt.
- *
- * An invalid sequence (e.g. (1) delete file a.txt and (2) insert text into file a.txt) will
- * cause failure of the operation. How the client recovers from the failure is described by
- * the client capability: `workspace.workspaceEdit.failureHandling`
- */
-type WorkspaceEdit struct {
- /**
- * Holds changes to existing resources.
- */
- Changes map[DocumentURI][]TextEdit/*[uri: DocumentUri]: TextEdit[]*/ `json:"changes,omitempty"`
- /**
- * Depending on the client capability `workspace.workspaceEdit.resourceOperations` document changes
- * are either an array of `TextDocumentEdit`s to express changes to n different text documents
- * where each text document edit addresses a specific version of a text document. Or it can contain
- * above `TextDocumentEdit`s mixed with create, rename and delete file / folder operations.
- *
- * Whether a client supports versioned document edits is expressed via
- * `workspace.workspaceEdit.documentChanges` client capability.
- *
- * If a client neither supports `documentChanges` nor `workspace.workspaceEdit.resourceOperations` then
- * only plain `TextEdit`s using the `changes` property are supported.
- */
- DocumentChanges []TextDocumentEdit/*TextDocumentEdit | CreateFile | RenameFile | DeleteFile*/ `json:"documentChanges,omitempty"`
- /**
- * A map of change annotations that can be referenced in `AnnotatedTextEdit`s or create, rename and
- * delete file / folder operations.
- *
- * Whether clients honor this property depends on the client capability `workspace.changeAnnotationSupport`.
- *
- * @since 3.16.0
- */
- ChangeAnnotations map[string]ChangeAnnotationIdentifier/*[id: ChangeAnnotationIdentifier]: ChangeAnnotation;*/ `json:"changeAnnotations,omitempty"`
-}
-
-type WorkspaceEditClientCapabilities struct {
- /**
- * The client supports versioned document changes in `WorkspaceEdit`s
- */
- DocumentChanges bool `json:"documentChanges,omitempty"`
- /**
- * The resource operations the client supports. Clients should at least
- * support 'create', 'rename' and 'delete' files and folders.
- *
- * @since 3.13.0
- */
- ResourceOperations []ResourceOperationKind `json:"resourceOperations,omitempty"`
- /**
- * The failure handling strategy of a client if applying the workspace edit
- * fails.
- *
- * @since 3.13.0
- */
- FailureHandling FailureHandlingKind `json:"failureHandling,omitempty"`
- /**
- * Whether the client normalizes line endings to the client specific
- * setting.
- * If set to `true` the client will normalize line ending characters
- * in a workspace edit containing to the client specific new line
- * character.
- *
- * @since 3.16.0
- */
- NormalizesLineEndings bool `json:"normalizesLineEndings,omitempty"`
- /**
- * Whether the client in general supports change annotations on text edits,
- * create file, rename file and delete file changes.
- *
- * @since 3.16.0
- */
- ChangeAnnotationSupport struct {
- /**
- * Whether the client groups edits with equal labels into tree nodes,
- * for instance all edits labelled with "Changes in Strings" would
- * be a tree node.
- */
- GroupsOnLabel bool `json:"groupsOnLabel,omitempty"`
- } `json:"changeAnnotationSupport,omitempty"`
-}
-
-type WorkspaceFolder struct {
- /**
- * The associated URI for this workspace folder.
- */
- URI string `json:"uri"`
- /**
- * The name of the workspace folder. Used to refer to this
- * workspace folder in the user interface.
- */
- Name string `json:"name"`
-}
-
-/**
- * The workspace folder change event.
- */
-type WorkspaceFoldersChangeEvent struct {
- /**
- * The array of added workspace folders
- */
- Added []WorkspaceFolder `json:"added"`
- /**
- * The array of the removed workspace folders
- */
- Removed []WorkspaceFolder `json:"removed"`
-}
-
-type WorkspaceFoldersClientCapabilities struct {
- /**
- * The workspace client capabilities
- */
- Workspace Workspace7Gn `json:"workspace,omitempty"`
-}
-
-type WorkspaceFoldersInitializeParams struct {
- /**
- * The actual configured workspace folders.
- */
- WorkspaceFolders []WorkspaceFolder /*WorkspaceFolder[] | null*/ `json:"workspaceFolders"`
-}
-
-type WorkspaceFoldersServerCapabilities struct {
- /**
- * The workspace server capabilities
- */
- Workspace Workspace9Gn `json:"workspace,omitempty"`
-}
-
-/**
- * A full document diagnostic report for a workspace diagnostic result.
- *
- * @since 3.17.0 - proposed state
- */
-type WorkspaceFullDocumentDiagnosticReport struct {
- /**
- * The URI for which diagnostic information is reported.
- */
- URI DocumentURI `json:"uri"`
- /**
- * The version number for which the diagnostics are reported.
- * If the document is not marked as open `null` can be provided.
- */
- Version int32/*integer | null*/ `json:"version"`
-}
-
-/**
- * A special workspace symbol that supports locations without a range
- *
- * @since 3.17.0 - proposed state
- */
-type WorkspaceSymbol struct {
- /**
- * The location of the symbol.
- *
- * See SymbolInformation#location for more details.
- */
- Location Location/*Location | { uri: DocumentUri }*/ `json:"location"`
- /**
- * A data entry field that is preserved on a workspace symbol between a
- * workspace symbol request and a workspace symbol resolve request.
- */
- Data LSPAny `json:"data,omitempty"`
-}
-
-/**
- * Client capabilities for a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest).
- */
-type WorkspaceSymbolClientCapabilities struct {
- /**
- * Symbol request supports dynamic registration.
- */
- DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
- /**
- * Specific capabilities for the `SymbolKind` in the `workspace/symbol` request.
- */
- SymbolKind struct {
- /**
- * The symbol kind values the client supports. When this
- * property exists the client also guarantees that it will
- * handle values outside its set gracefully and falls back
- * to a default value when unknown.
- *
- * If this property is not present the client only supports
- * the symbol kinds from `File` to `Array` as defined in
- * the initial version of the protocol.
- */
- ValueSet []SymbolKind `json:"valueSet,omitempty"`
- } `json:"symbolKind,omitempty"`
- /**
- * The client supports tags on `SymbolInformation`.
- * Clients supporting tags have to handle unknown tags gracefully.
- *
- * @since 3.16.0
- */
- TagSupport struct {
- /**
- * The tags supported by the client.
- */
- ValueSet []SymbolTag `json:"valueSet"`
- } `json:"tagSupport,omitempty"`
- /**
- * The client support partial workspace symbols. The client will send the
- * request `workspaceSymbol/resolve` to the server to resolve additional
- * properties.
- *
- * @since 3.17.0 - proposedState
- */
- ResolveSupport struct {
- /**
- * The properties that a client can resolve lazily. Usually
- * `location.range`
- */
- Properties []string `json:"properties"`
- } `json:"resolveSupport,omitempty"`
-}
-
-/**
- * Server capabilities for a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest).
- */
-type WorkspaceSymbolOptions struct {
- /**
- * The server provides support to resolve additional
- * information for a workspace symbol.
- *
- * @since 3.17.0 - proposed state
- */
- ResolveProvider bool `json:"resolveProvider,omitempty"`
- WorkDoneProgressOptions
-}
-
-/**
- * The parameters of a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest).
- */
-type WorkspaceSymbolParams struct {
- /**
- * A query string to filter symbols by. Clients may send an empty
- * string here to request all symbols.
- */
- Query string `json:"query"`
- WorkDoneProgressParams
- PartialResultParams
-}
-
-/**
- * An unchanged document diagnostic report for a workspace diagnostic result.
- *
- * @since 3.17.0 - proposed state
- */
-type WorkspaceUnchangedDocumentDiagnosticReport struct {
- /**
- * The URI for which diagnostic information is reported.
- */
- URI DocumentURI `json:"uri"`
- /**
- * The version number for which the diagnostics are reported.
- * If the document is not marked as open `null` can be provided.
- */
- Version int32/*integer | null*/ `json:"version"`
-}
-
-const (
- /**
- * Empty kind.
- */
-
- Empty CodeActionKind = ""
- /**
- * Base kind for quickfix actions: 'quickfix'
- */
-
- QuickFix CodeActionKind = "quickfix"
- /**
- * Base kind for refactoring actions: 'refactor'
- */
-
- Refactor CodeActionKind = "refactor"
- /**
- * Base kind for refactoring extraction actions: 'refactor.extract'
- *
- * Example extract actions:
- *
- * - Extract method
- * - Extract function
- * - Extract variable
- * - Extract interface from class
- * - ...
- */
-
- RefactorExtract CodeActionKind = "refactor.extract"
- /**
- * Base kind for refactoring inline actions: 'refactor.inline'
- *
- * Example inline actions:
- *
- * - Inline function
- * - Inline variable
- * - Inline constant
- * - ...
- */
-
- RefactorInline CodeActionKind = "refactor.inline"
- /**
- * Base kind for refactoring rewrite actions: 'refactor.rewrite'
- *
- * Example rewrite actions:
- *
- * - Convert JavaScript function to class
- * - Add or remove parameter
- * - Encapsulate field
- * - Make method static
- * - Move method to base class
- * - ...
- */
-
- RefactorRewrite CodeActionKind = "refactor.rewrite"
- /**
- * Base kind for source actions: `source`
- *
- * Source code actions apply to the entire file.
- */
-
- Source CodeActionKind = "source"
- /**
- * Base kind for an organize imports source action: `source.organizeImports`
- */
-
- SourceOrganizeImports CodeActionKind = "source.organizeImports"
- /**
- * Base kind for auto-fix source actions: `source.fixAll`.
- *
- * Fix all actions automatically fix errors that have a clear fix that do not require user input.
- * They should not suppress errors or perform unsafe fixes such as generating new types or classes.
- *
- * @since 3.15.0
- */
-
- SourceFixAll CodeActionKind = "source.fixAll"
- /**
- * Code actions were explicitly requested by the user or by an extension.
- */
-
- CodeActionInvoked CodeActionTriggerKind = 1
- /**
- * Code actions were requested automatically.
- *
- * This typically happens when current selection in a file changes, but can
- * also be triggered when file content changes.
- */
-
- CodeActionAutomatic CodeActionTriggerKind = 2
- TextCompletion CompletionItemKind = 1
- MethodCompletion CompletionItemKind = 2
- FunctionCompletion CompletionItemKind = 3
- ConstructorCompletion CompletionItemKind = 4
- FieldCompletion CompletionItemKind = 5
- VariableCompletion CompletionItemKind = 6
- ClassCompletion CompletionItemKind = 7
- InterfaceCompletion CompletionItemKind = 8
- ModuleCompletion CompletionItemKind = 9
- PropertyCompletion CompletionItemKind = 10
- UnitCompletion CompletionItemKind = 11
- ValueCompletion CompletionItemKind = 12
- EnumCompletion CompletionItemKind = 13
- KeywordCompletion CompletionItemKind = 14
- SnippetCompletion CompletionItemKind = 15
- ColorCompletion CompletionItemKind = 16
- FileCompletion CompletionItemKind = 17
- ReferenceCompletion CompletionItemKind = 18
- FolderCompletion CompletionItemKind = 19
- EnumMemberCompletion CompletionItemKind = 20
- ConstantCompletion CompletionItemKind = 21
- StructCompletion CompletionItemKind = 22
- EventCompletion CompletionItemKind = 23
- OperatorCompletion CompletionItemKind = 24
- TypeParameterCompletion CompletionItemKind = 25
- /**
- * Render a completion as obsolete, usually using a strike-out.
- */
-
- ComplDeprecated CompletionItemTag = 1
- /**
- * Completion was triggered by typing an identifier (24x7 code
- * complete), manual invocation (e.g Ctrl+Space) or via API.
- */
-
- Invoked CompletionTriggerKind = 1
- /**
- * Completion was triggered by a trigger character specified by
- * the `triggerCharacters` properties of the `CompletionRegistrationOptions`.
- */
-
- TriggerCharacter CompletionTriggerKind = 2
- /**
- * Completion was re-triggered as current completion list is incomplete
- */
-
- TriggerForIncompleteCompletions CompletionTriggerKind = 3
- /**
- * Reports an error.
- */
-
- SeverityError DiagnosticSeverity = 1
- /**
- * Reports a warning.
- */
-
- SeverityWarning DiagnosticSeverity = 2
- /**
- * Reports an information.
- */
-
- SeverityInformation DiagnosticSeverity = 3
- /**
- * Reports a hint.
- */
-
- SeverityHint DiagnosticSeverity = 4
- /**
- * Unused or unnecessary code.
- *
- * Clients are allowed to render diagnostics with this tag faded out instead of having
- * an error squiggle.
- */
-
- Unnecessary DiagnosticTag = 1
- /**
- * Deprecated or obsolete code.
- *
- * Clients are allowed to rendered diagnostics with this tag strike through.
- */
-
- Deprecated DiagnosticTag = 2
- /**
- * A textual occurrence.
- */
-
- Text DocumentHighlightKind = 1
- /**
- * Read-access of a symbol, like reading a variable.
- */
-
- Read DocumentHighlightKind = 2
- /**
- * Write-access of a symbol, like writing to a variable.
- */
-
- Write DocumentHighlightKind = 3
- /**
- * Applying the workspace change is simply aborted if one of the changes provided
- * fails. All operations executed before the failing operation stay executed.
- */
-
- Abort FailureHandlingKind = "abort"
- /**
- * All operations are executed transactional. That means they either all
- * succeed or no changes at all are applied to the workspace.
- */
-
- Transactional FailureHandlingKind = "transactional"
- /**
- * If the workspace edit contains only textual file changes they are executed transactional.
- * If resource changes (create, rename or delete file) are part of the change the failure
- * handling strategy is abort.
- */
-
- TextOnlyTransactional FailureHandlingKind = "textOnlyTransactional"
- /**
- * The client tries to undo the operations already executed. But there is no
- * guarantee that this is succeeding.
- */
-
- Undo FailureHandlingKind = "undo"
- /**
- * The file got created.
- */
-
- Created FileChangeType = 1
- /**
- * The file got changed.
- */
-
- Changed FileChangeType = 2
- /**
- * The file got deleted.
- */
-
- Deleted FileChangeType = 3
- /**
- * The pattern matches a file only.
- */
-
- FileOp FileOperationPatternKind = "file"
- /**
- * The pattern matches a folder only.
- */
-
- FolderOp FileOperationPatternKind = "folder"
- /**
- * Folding range for a comment
- */
- Comment FoldingRangeKind = "comment"
- /**
- * Folding range for a imports or includes
- */
- Imports FoldingRangeKind = "imports"
- /**
- * Folding range for a region (e.g. `#region`)
- */
- Region FoldingRangeKind = "region"
- /**
- * If the protocol version provided by the client can't be handled by the server.
- * @deprecated This initialize error got replaced by client capabilities. There is
- * no version handshake in version 3.0x
- */
-
- UnknownProtocolVersion InitializeError = 1
- /**
- * An inlay hint that for a type annotation.
- */
-
- Type InlayHintKind = 1
- /**
- * An inlay hint that is for a parameter.
- */
-
- Parameter InlayHintKind = 2
- /**
- * The primary text to be inserted is treated as a plain string.
- */
-
- PlainTextTextFormat InsertTextFormat = 1
- /**
- * The primary text to be inserted is treated as a snippet.
- *
- * A snippet can define tab stops and placeholders with `$1`, `$2`
- * and `${3:foo}`. `$0` defines the final tab stop, it defaults to
- * the end of the snippet. Placeholders with equal identifiers are linked,
- * that is typing in one will update others too.
- *
- * See also: https://microsoft.github.io/language-server-protocol/specifications/specification-current/#snippet_syntax
- */
-
- SnippetTextFormat InsertTextFormat = 2
- /**
- * The insertion or replace strings is taken as it is. If the
- * value is multi line the lines below the cursor will be
- * inserted using the indentation defined in the string value.
- * The client will not apply any kind of adjustments to the
- * string.
- */
-
- AsIs InsertTextMode = 1
- /**
- * The editor adjusts leading whitespace of new lines so that
- * they match the indentation up to the cursor of the line for
- * which the item is accepted.
- *
- * Consider a line like this: <2tabs><cursor><3tabs>foo. Accepting a
- * multi line completion item is indented using 2 tabs and all
- * following lines inserted will be indented using 2 tabs as well.
- */
-
- AdjustIndentation InsertTextMode = 2
- /**
- * Plain text is supported as a content format
- */
-
- PlainText MarkupKind = "plaintext"
- /**
- * Markdown is supported as a content format
- */
-
- Markdown MarkupKind = "markdown"
- /**
- * An error message.
- */
-
- Error MessageType = 1
- /**
- * A warning message.
- */
-
- Warning MessageType = 2
- /**
- * An information message.
- */
-
- Info MessageType = 3
- /**
- * A log message.
- */
-
- Log MessageType = 4
- /**
- * The moniker represent a symbol that is imported into a project
- */
- Import MonikerKind = "import"
- /**
- * The moniker represents a symbol that is exported from a project
- */
- Export MonikerKind = "export"
- /**
- * The moniker represents a symbol that is local to a project (e.g. a local
- * variable of a function, a class not visible outside the project, ...)
- */
- Local MonikerKind = "local"
- /**
- * A markup-cell is formatted source that is used for display.
- */
-
- Markup NotebookCellKind = 1
- /**
- * A code-cell is source code.
- */
-
- Code NotebookCellKind = 2
- /**
- * Supports creating new files and folders.
- */
-
- Create ResourceOperationKind = "create"
- /**
- * Supports renaming existing files and folders.
- */
-
- Rename ResourceOperationKind = "rename"
- /**
- * Supports deleting existing files and folders.
- */
-
- Delete ResourceOperationKind = "delete"
- /**
- * Signature help was invoked manually by the user or by a command.
- */
-
- SigInvoked SignatureHelpTriggerKind = 1
- /**
- * Signature help was triggered by a trigger character.
- */
-
- SigTriggerCharacter SignatureHelpTriggerKind = 2
- /**
- * Signature help was triggered by the cursor moving or by the document content changing.
- */
-
- SigContentChange SignatureHelpTriggerKind = 3
- File SymbolKind = 1
- Module SymbolKind = 2
- Namespace SymbolKind = 3
- Package SymbolKind = 4
- Class SymbolKind = 5
- Method SymbolKind = 6
- Property SymbolKind = 7
- Field SymbolKind = 8
- Constructor SymbolKind = 9
- Enum SymbolKind = 10
- Interface SymbolKind = 11
- Function SymbolKind = 12
- Variable SymbolKind = 13
- Constant SymbolKind = 14
- String SymbolKind = 15
- Number SymbolKind = 16
- Boolean SymbolKind = 17
- Array SymbolKind = 18
- Object SymbolKind = 19
- Key SymbolKind = 20
- Null SymbolKind = 21
- EnumMember SymbolKind = 22
- Struct SymbolKind = 23
- Event SymbolKind = 24
- Operator SymbolKind = 25
- TypeParameter SymbolKind = 26
- /**
- * Render a symbol as obsolete, usually using a strike-out.
- */
-
- DeprecatedSymbol SymbolTag = 1
- /**
- * Manually triggered, e.g. by the user pressing save, by starting debugging,
- * or by an API call.
- */
-
- Manual TextDocumentSaveReason = 1
- /**
- * Automatic after a delay.
- */
-
- AfterDelay TextDocumentSaveReason = 2
- /**
- * When the editor lost focus.
- */
-
- FocusOut TextDocumentSaveReason = 3
- /**
- * Documents should not be synced at all.
- */
-
- None TextDocumentSyncKind = 0
- /**
- * Documents are synced by always sending the full content
- * of the document.
- */
-
- Full TextDocumentSyncKind = 1
- /**
- * Documents are synced by sending the full content on open.
- * After that only incremental updates to the document are
- * send.
- */
-
- Incremental TextDocumentSyncKind = 2
- /**
- * The moniker is only unique inside a document
- */
- Document UniquenessLevel = "document"
- /**
- * The moniker is unique inside a project for which a dump got created
- */
- Project UniquenessLevel = "project"
- /**
- * The moniker is unique inside the group to which a project belongs
- */
- Group UniquenessLevel = "group"
- /**
- * The moniker is unique inside the moniker scheme.
- */
- Scheme UniquenessLevel = "scheme"
- /**
- * The moniker is globally unique
- */
- Global UniquenessLevel = "global"
- /**
- * Interested in create events.
- */
-
- WatchCreate WatchKind = 1
- /**
- * Interested in change events
- */
-
- WatchChange WatchKind = 2
- /**
- * Interested in delete events
- */
-
- WatchDelete WatchKind = 4
-)
-
-// Types created to name formal parameters and embedded structs
-type ParamConfiguration struct {
- ConfigurationParams
- PartialResultParams
-}
-type ParamInitialize struct {
- InitializeParams
- WorkDoneProgressParams
-}
-type PrepareRename2Gn struct {
- Range Range `json:"range"`
- Placeholder string `json:"placeholder"`
-}
-type Workspace3Gn struct {
- /**
- * The client supports applying batch edits
- * to the workspace by supporting the request
- * 'workspace/applyEdit'
- */
- ApplyEdit bool `json:"applyEdit,omitempty"`
-
- /**
- * Capabilities specific to `WorkspaceEdit`s
- */
- WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/didChangeConfiguration` notification.
- */
- DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/didChangeWatchedFiles` notification.
- */
- DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/symbol` request.
- */
- Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/executeCommand` request.
- */
- ExecuteCommand ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"`
-
- /**
- * Capabilities specific to the semantic token requests scoped to the
- * workspace.
- *
- * @since 3.16.0.
- */
- SemanticTokens SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"`
-
- /**
- * Capabilities specific to the code lens requests scoped to the
- * workspace.
- *
- * @since 3.16.0.
- */
- CodeLens CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"`
-
- /**
- * The client has support for file notifications/requests for user operations on files.
- *
- * Since 3.16.0
- */
- FileOperations *FileOperationClientCapabilities `json:"fileOperations,omitempty"`
-
- /**
- * Capabilities specific to the inline values requests scoped to the
- * workspace.
- *
- * @since 3.17.0.
- */
- InlineValue InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"`
-
- /**
- * Capabilities specific to the inlay hints requests scoped to the
- * workspace.
- *
- * @since 3.17.0.
- */
- InlayHint InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"`
-
- /**
- * The client has support for workspace folders
- *
- * @since 3.6.0
- */
- WorkspaceFolders bool `json:"workspaceFolders,omitempty"`
-
- /**
- * The client supports `workspace/configuration` requests.
- *
- * @since 3.6.0
- */
- Configuration bool `json:"configuration,omitempty"`
-}
-type Workspace4Gn struct {
- /**
- * The client supports applying batch edits
- * to the workspace by supporting the request
- * 'workspace/applyEdit'
- */
- ApplyEdit bool `json:"applyEdit,omitempty"`
-
- /**
- * Capabilities specific to `WorkspaceEdit`s
- */
- WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/didChangeConfiguration` notification.
- */
- DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/didChangeWatchedFiles` notification.
- */
- DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/symbol` request.
- */
- Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/executeCommand` request.
- */
- ExecuteCommand ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"`
-
- /**
- * Capabilities specific to the semantic token requests scoped to the
- * workspace.
- *
- * @since 3.16.0.
- */
- SemanticTokens SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"`
-
- /**
- * Capabilities specific to the code lens requests scoped to the
- * workspace.
- *
- * @since 3.16.0.
- */
- CodeLens CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"`
-
- /**
- * The client has support for file notifications/requests for user operations on files.
- *
- * Since 3.16.0
- */
- FileOperations *FileOperationClientCapabilities `json:"fileOperations,omitempty"`
-
- /**
- * Capabilities specific to the inline values requests scoped to the
- * workspace.
- *
- * @since 3.17.0.
- */
- InlineValue InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"`
-
- /**
- * Capabilities specific to the inlay hints requests scoped to the
- * workspace.
- *
- * @since 3.17.0.
- */
- InlayHint InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"`
-
- /**
- * The client has support for workspace folders
- *
- * @since 3.6.0
- */
- WorkspaceFolders bool `json:"workspaceFolders,omitempty"`
-
- /**
- * The client supports `workspace/configuration` requests.
- *
- * @since 3.6.0
- */
- Configuration bool `json:"configuration,omitempty"`
-}
-type WorkspaceFolders5Gn struct {
- /**
- * The Server has support for workspace folders
- */
- Supported bool `json:"supported,omitempty"`
-
- /**
- * Whether the server wants to receive workspace folder
- * change notifications.
- *
- * If a strings is provided the string is treated as a ID
- * under which the notification is registered on the client
- * side. The ID can be used to unregister for these events
- * using the `client/unregisterCapability` request.
- */
- ChangeNotifications string/*string | boolean*/ `json:"changeNotifications,omitempty"`
-}
-type Workspace6Gn struct {
- /**
- * The server is interested in notifications/requests for operations on files.
- *
- * @since 3.16.0
- */
- FileOperations *FileOperationOptions `json:"fileOperations,omitempty"`
-
- WorkspaceFolders WorkspaceFolders5Gn `json:"workspaceFolders,omitempty"`
-}
-type Workspace7Gn struct {
- /**
- * The client supports applying batch edits
- * to the workspace by supporting the request
- * 'workspace/applyEdit'
- */
- ApplyEdit bool `json:"applyEdit,omitempty"`
-
- /**
- * Capabilities specific to `WorkspaceEdit`s
- */
- WorkspaceEdit *WorkspaceEditClientCapabilities `json:"workspaceEdit,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/didChangeConfiguration` notification.
- */
- DidChangeConfiguration DidChangeConfigurationClientCapabilities `json:"didChangeConfiguration,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/didChangeWatchedFiles` notification.
- */
- DidChangeWatchedFiles DidChangeWatchedFilesClientCapabilities `json:"didChangeWatchedFiles,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/symbol` request.
- */
- Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"`
-
- /**
- * Capabilities specific to the `workspace/executeCommand` request.
- */
- ExecuteCommand ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"`
-
- /**
- * Capabilities specific to the semantic token requests scoped to the
- * workspace.
- *
- * @since 3.16.0.
- */
- SemanticTokens SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"`
-
- /**
- * Capabilities specific to the code lens requests scoped to the
- * workspace.
- *
- * @since 3.16.0.
- */
- CodeLens CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"`
-
- /**
- * The client has support for file notifications/requests for user operations on files.
- *
- * Since 3.16.0
- */
- FileOperations *FileOperationClientCapabilities `json:"fileOperations,omitempty"`
-
- /**
- * Capabilities specific to the inline values requests scoped to the
- * workspace.
- *
- * @since 3.17.0.
- */
- InlineValue InlineValueWorkspaceClientCapabilities `json:"inlineValue,omitempty"`
-
- /**
- * Capabilities specific to the inlay hints requests scoped to the
- * workspace.
- *
- * @since 3.17.0.
- */
- InlayHint InlayHintWorkspaceClientCapabilities `json:"inlayHint,omitempty"`
-
- /**
- * The client has support for workspace folders
- *
- * @since 3.6.0
- */
- WorkspaceFolders bool `json:"workspaceFolders,omitempty"`
-
- /**
- * The client supports `workspace/configuration` requests.
- *
- * @since 3.6.0
- */
- Configuration bool `json:"configuration,omitempty"`
-}
-type WorkspaceFolders8Gn struct {
- /**
- * The Server has support for workspace folders
- */
- Supported bool `json:"supported,omitempty"`
-
- /**
- * Whether the server wants to receive workspace folder
- * change notifications.
- *
- * If a strings is provided the string is treated as a ID
- * under which the notification is registered on the client
- * side. The ID can be used to unregister for these events
- * using the `client/unregisterCapability` request.
- */
- ChangeNotifications string/*string | boolean*/ `json:"changeNotifications,omitempty"`
-}
-type Workspace9Gn struct {
- /**
- * The server is interested in notifications/requests for operations on files.
- *
- * @since 3.16.0
- */
- FileOperations *FileOperationOptions `json:"fileOperations,omitempty"`
-
- WorkspaceFolders WorkspaceFolders8Gn `json:"workspaceFolders,omitempty"`
-}
diff --git a/internal/lsp/protocol/tsserver.go b/internal/lsp/protocol/tsserver.go
deleted file mode 100644
index db345b3e4..000000000
--- a/internal/lsp/protocol/tsserver.go
+++ /dev/null
@@ -1,1143 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated (see typescript/README.md) DO NOT EDIT.
-
-package protocol
-
-// Package protocol contains data types and code for LSP json rpcs
-// generated automatically from vscode-languageserver-node
-// commit: 696f9285bf849b73745682fdb1c1feac73eb8772
-// last fetched Fri Mar 04 2022 14:48:10 GMT-0500 (Eastern Standard Time)
-
-import (
- "context"
- "encoding/json"
-
- "golang.org/x/tools/internal/jsonrpc2"
- errors "golang.org/x/xerrors"
-)
-
-type Server interface {
- DidChangeWorkspaceFolders(context.Context, *DidChangeWorkspaceFoldersParams) error
- WorkDoneProgressCancel(context.Context, *WorkDoneProgressCancelParams) error
- DidCreateFiles(context.Context, *CreateFilesParams) error
- DidRenameFiles(context.Context, *RenameFilesParams) error
- DidDeleteFiles(context.Context, *DeleteFilesParams) error
- Initialized(context.Context, *InitializedParams) error
- Exit(context.Context) error
- DidChangeConfiguration(context.Context, *DidChangeConfigurationParams) error
- DidOpen(context.Context, *DidOpenTextDocumentParams) error
- DidChange(context.Context, *DidChangeTextDocumentParams) error
- DidClose(context.Context, *DidCloseTextDocumentParams) error
- DidSave(context.Context, *DidSaveTextDocumentParams) error
- WillSave(context.Context, *WillSaveTextDocumentParams) error
- DidChangeWatchedFiles(context.Context, *DidChangeWatchedFilesParams) error
- DidOpenNotebookDocument(context.Context, *DidOpenNotebookDocumentParams) error
- DidChangeNotebookDocument(context.Context, *DidChangeNotebookDocumentParams) error
- DidSaveNotebookDocument(context.Context, *DidSaveNotebookDocumentParams) error
- DidCloseNotebookDocument(context.Context, *DidCloseNotebookDocumentParams) error
- SetTrace(context.Context, *SetTraceParams) error
- LogTrace(context.Context, *LogTraceParams) error
- Implementation(context.Context, *ImplementationParams) (Definition /*Definition | DefinitionLink[] | null*/, error)
- TypeDefinition(context.Context, *TypeDefinitionParams) (Definition /*Definition | DefinitionLink[] | null*/, error)
- DocumentColor(context.Context, *DocumentColorParams) ([]ColorInformation, error)
- ColorPresentation(context.Context, *ColorPresentationParams) ([]ColorPresentation, error)
- FoldingRange(context.Context, *FoldingRangeParams) ([]FoldingRange /*FoldingRange[] | null*/, error)
- Declaration(context.Context, *DeclarationParams) (Declaration /*Declaration | DeclarationLink[] | null*/, error)
- SelectionRange(context.Context, *SelectionRangeParams) ([]SelectionRange /*SelectionRange[] | null*/, error)
- PrepareCallHierarchy(context.Context, *CallHierarchyPrepareParams) ([]CallHierarchyItem /*CallHierarchyItem[] | null*/, error)
- IncomingCalls(context.Context, *CallHierarchyIncomingCallsParams) ([]CallHierarchyIncomingCall /*CallHierarchyIncomingCall[] | null*/, error)
- OutgoingCalls(context.Context, *CallHierarchyOutgoingCallsParams) ([]CallHierarchyOutgoingCall /*CallHierarchyOutgoingCall[] | null*/, error)
- SemanticTokensFull(context.Context, *SemanticTokensParams) (*SemanticTokens /*SemanticTokens | null*/, error)
- SemanticTokensFullDelta(context.Context, *SemanticTokensDeltaParams) (interface{} /* SemanticTokens | SemanticTokensDelta | float64*/, error)
- SemanticTokensRange(context.Context, *SemanticTokensRangeParams) (*SemanticTokens /*SemanticTokens | null*/, error)
- SemanticTokensRefresh(context.Context) error
- LinkedEditingRange(context.Context, *LinkedEditingRangeParams) (*LinkedEditingRanges /*LinkedEditingRanges | null*/, error)
- WillCreateFiles(context.Context, *CreateFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error)
- WillRenameFiles(context.Context, *RenameFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error)
- WillDeleteFiles(context.Context, *DeleteFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error)
- Moniker(context.Context, *MonikerParams) ([]Moniker /*Moniker[] | null*/, error)
- PrepareTypeHierarchy(context.Context, *TypeHierarchyPrepareParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error)
- Supertypes(context.Context, *TypeHierarchySupertypesParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error)
- Subtypes(context.Context, *TypeHierarchySubtypesParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error)
- InlineValue(context.Context, *InlineValueParams) ([]InlineValue /*InlineValue[] | null*/, error)
- InlineValueRefresh(context.Context) error
- InlayHint(context.Context, *InlayHintParams) ([]InlayHint /*InlayHint[] | null*/, error)
- Resolve(context.Context, *InlayHint) (*InlayHint, error)
- InlayHintRefresh(context.Context) error
- Initialize(context.Context, *ParamInitialize) (*InitializeResult, error)
- Shutdown(context.Context) error
- WillSaveWaitUntil(context.Context, *WillSaveTextDocumentParams) ([]TextEdit /*TextEdit[] | null*/, error)
- Completion(context.Context, *CompletionParams) (*CompletionList /*CompletionItem[] | CompletionList | null*/, error)
- ResolveCompletionItem(context.Context, *CompletionItem) (*CompletionItem, error)
- Hover(context.Context, *HoverParams) (*Hover /*Hover | null*/, error)
- SignatureHelp(context.Context, *SignatureHelpParams) (*SignatureHelp /*SignatureHelp | null*/, error)
- Definition(context.Context, *DefinitionParams) (Definition /*Definition | DefinitionLink[] | null*/, error)
- References(context.Context, *ReferenceParams) ([]Location /*Location[] | null*/, error)
- DocumentHighlight(context.Context, *DocumentHighlightParams) ([]DocumentHighlight /*DocumentHighlight[] | null*/, error)
- DocumentSymbol(context.Context, *DocumentSymbolParams) ([]interface{} /*SymbolInformation[] | DocumentSymbol[] | null*/, error)
- CodeAction(context.Context, *CodeActionParams) ([]CodeAction /*(Command | CodeAction)[] | null*/, error)
- ResolveCodeAction(context.Context, *CodeAction) (*CodeAction, error)
- Symbol(context.Context, *WorkspaceSymbolParams) ([]SymbolInformation /*SymbolInformation[] | WorkspaceSymbol[] | null*/, error)
- ResolveWorkspaceSymbol(context.Context, *WorkspaceSymbol) (*WorkspaceSymbol, error)
- CodeLens(context.Context, *CodeLensParams) ([]CodeLens /*CodeLens[] | null*/, error)
- ResolveCodeLens(context.Context, *CodeLens) (*CodeLens, error)
- CodeLensRefresh(context.Context) error
- DocumentLink(context.Context, *DocumentLinkParams) ([]DocumentLink /*DocumentLink[] | null*/, error)
- ResolveDocumentLink(context.Context, *DocumentLink) (*DocumentLink, error)
- Formatting(context.Context, *DocumentFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error)
- RangeFormatting(context.Context, *DocumentRangeFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error)
- OnTypeFormatting(context.Context, *DocumentOnTypeFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error)
- Rename(context.Context, *RenameParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error)
- PrepareRename(context.Context, *PrepareRenameParams) (*PrepareRename2Gn /*Range | { range: Range; placeholder: string } | { defaultBehavior: boolean } | null*/, error)
- ExecuteCommand(context.Context, *ExecuteCommandParams) (interface{} /* LSPAny | void | float64*/, error)
- Diagnostic(context.Context, *string) (*string, error)
- DiagnosticWorkspace(context.Context, *WorkspaceDiagnosticParams) (*WorkspaceDiagnosticReport, error)
- DiagnosticRefresh(context.Context) error
- NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error)
-}
-
-func serverDispatch(ctx context.Context, server Server, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) {
- switch r.Method() {
- case "workspace/didChangeWorkspaceFolders": // notif
- var params DidChangeWorkspaceFoldersParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidChangeWorkspaceFolders(ctx, &params)
- return true, reply(ctx, nil, err)
- case "window/workDoneProgress/cancel": // notif
- var params WorkDoneProgressCancelParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.WorkDoneProgressCancel(ctx, &params)
- return true, reply(ctx, nil, err)
- case "workspace/didCreateFiles": // notif
- var params CreateFilesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidCreateFiles(ctx, &params)
- return true, reply(ctx, nil, err)
- case "workspace/didRenameFiles": // notif
- var params RenameFilesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidRenameFiles(ctx, &params)
- return true, reply(ctx, nil, err)
- case "workspace/didDeleteFiles": // notif
- var params DeleteFilesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidDeleteFiles(ctx, &params)
- return true, reply(ctx, nil, err)
- case "initialized": // notif
- var params InitializedParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.Initialized(ctx, &params)
- return true, reply(ctx, nil, err)
- case "exit": // notif
- err := server.Exit(ctx)
- return true, reply(ctx, nil, err)
- case "workspace/didChangeConfiguration": // notif
- var params DidChangeConfigurationParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidChangeConfiguration(ctx, &params)
- return true, reply(ctx, nil, err)
- case "textDocument/didOpen": // notif
- var params DidOpenTextDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidOpen(ctx, &params)
- return true, reply(ctx, nil, err)
- case "textDocument/didChange": // notif
- var params DidChangeTextDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidChange(ctx, &params)
- return true, reply(ctx, nil, err)
- case "textDocument/didClose": // notif
- var params DidCloseTextDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidClose(ctx, &params)
- return true, reply(ctx, nil, err)
- case "textDocument/didSave": // notif
- var params DidSaveTextDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidSave(ctx, &params)
- return true, reply(ctx, nil, err)
- case "textDocument/willSave": // notif
- var params WillSaveTextDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.WillSave(ctx, &params)
- return true, reply(ctx, nil, err)
- case "workspace/didChangeWatchedFiles": // notif
- var params DidChangeWatchedFilesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidChangeWatchedFiles(ctx, &params)
- return true, reply(ctx, nil, err)
- case "notebookDocument/didOpen": // notif
- var params DidOpenNotebookDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidOpenNotebookDocument(ctx, &params)
- return true, reply(ctx, nil, err)
- case "notebookDocument/didChange": // notif
- var params DidChangeNotebookDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidChangeNotebookDocument(ctx, &params)
- return true, reply(ctx, nil, err)
- case "notebookDocument/didSave": // notif
- var params DidSaveNotebookDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidSaveNotebookDocument(ctx, &params)
- return true, reply(ctx, nil, err)
- case "notebookDocument/didClose": // notif
- var params DidCloseNotebookDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.DidCloseNotebookDocument(ctx, &params)
- return true, reply(ctx, nil, err)
- case "$/setTrace": // notif
- var params SetTraceParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.SetTrace(ctx, &params)
- return true, reply(ctx, nil, err)
- case "$/logTrace": // notif
- var params LogTraceParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err := server.LogTrace(ctx, &params)
- return true, reply(ctx, nil, err)
- case "textDocument/implementation": // req
- var params ImplementationParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Implementation(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/typeDefinition": // req
- var params TypeDefinitionParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.TypeDefinition(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/documentColor": // req
- var params DocumentColorParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.DocumentColor(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/colorPresentation": // req
- var params ColorPresentationParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.ColorPresentation(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/foldingRange": // req
- var params FoldingRangeParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.FoldingRange(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/declaration": // req
- var params DeclarationParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Declaration(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/selectionRange": // req
- var params SelectionRangeParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.SelectionRange(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/prepareCallHierarchy": // req
- var params CallHierarchyPrepareParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.PrepareCallHierarchy(ctx, &params)
- return true, reply(ctx, resp, err)
- case "callHierarchy/incomingCalls": // req
- var params CallHierarchyIncomingCallsParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.IncomingCalls(ctx, &params)
- return true, reply(ctx, resp, err)
- case "callHierarchy/outgoingCalls": // req
- var params CallHierarchyOutgoingCallsParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.OutgoingCalls(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/semanticTokens/full": // req
- var params SemanticTokensParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.SemanticTokensFull(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/semanticTokens/full/delta": // req
- var params SemanticTokensDeltaParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.SemanticTokensFullDelta(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/semanticTokens/range": // req
- var params SemanticTokensRangeParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.SemanticTokensRange(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/semanticTokens/refresh": // req
- if len(r.Params()) > 0 {
- return true, reply(ctx, nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams))
- }
- err := server.SemanticTokensRefresh(ctx)
- return true, reply(ctx, nil, err)
- case "textDocument/linkedEditingRange": // req
- var params LinkedEditingRangeParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.LinkedEditingRange(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/willCreateFiles": // req
- var params CreateFilesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.WillCreateFiles(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/willRenameFiles": // req
- var params RenameFilesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.WillRenameFiles(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/willDeleteFiles": // req
- var params DeleteFilesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.WillDeleteFiles(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/moniker": // req
- var params MonikerParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Moniker(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/prepareTypeHierarchy": // req
- var params TypeHierarchyPrepareParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.PrepareTypeHierarchy(ctx, &params)
- return true, reply(ctx, resp, err)
- case "typeHierarchy/supertypes": // req
- var params TypeHierarchySupertypesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Supertypes(ctx, &params)
- return true, reply(ctx, resp, err)
- case "typeHierarchy/subtypes": // req
- var params TypeHierarchySubtypesParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Subtypes(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/inlineValue": // req
- var params InlineValueParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.InlineValue(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/inlineValue/refresh": // req
- if len(r.Params()) > 0 {
- return true, reply(ctx, nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams))
- }
- err := server.InlineValueRefresh(ctx)
- return true, reply(ctx, nil, err)
- case "textDocument/inlayHint": // req
- var params InlayHintParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.InlayHint(ctx, &params)
- return true, reply(ctx, resp, err)
- case "inlayHint/resolve": // req
- var params InlayHint
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Resolve(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/inlayHint/refresh": // req
- if len(r.Params()) > 0 {
- return true, reply(ctx, nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams))
- }
- err := server.InlayHintRefresh(ctx)
- return true, reply(ctx, nil, err)
- case "initialize": // req
- var params ParamInitialize
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- if _, ok := err.(*json.UnmarshalTypeError); !ok {
- return true, sendParseError(ctx, reply, err)
- }
- }
- resp, err := server.Initialize(ctx, &params)
- return true, reply(ctx, resp, err)
- case "shutdown": // req
- if len(r.Params()) > 0 {
- return true, reply(ctx, nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams))
- }
- err := server.Shutdown(ctx)
- return true, reply(ctx, nil, err)
- case "textDocument/willSaveWaitUntil": // req
- var params WillSaveTextDocumentParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.WillSaveWaitUntil(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/completion": // req
- var params CompletionParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Completion(ctx, &params)
- return true, reply(ctx, resp, err)
- case "completionItem/resolve": // req
- var params CompletionItem
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.ResolveCompletionItem(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/hover": // req
- var params HoverParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Hover(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/signatureHelp": // req
- var params SignatureHelpParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.SignatureHelp(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/definition": // req
- var params DefinitionParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Definition(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/references": // req
- var params ReferenceParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.References(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/documentHighlight": // req
- var params DocumentHighlightParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.DocumentHighlight(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/documentSymbol": // req
- var params DocumentSymbolParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.DocumentSymbol(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/codeAction": // req
- var params CodeActionParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.CodeAction(ctx, &params)
- return true, reply(ctx, resp, err)
- case "codeAction/resolve": // req
- var params CodeAction
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.ResolveCodeAction(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/symbol": // req
- var params WorkspaceSymbolParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Symbol(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspaceSymbol/resolve": // req
- var params WorkspaceSymbol
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.ResolveWorkspaceSymbol(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/codeLens": // req
- var params CodeLensParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.CodeLens(ctx, &params)
- return true, reply(ctx, resp, err)
- case "codeLens/resolve": // req
- var params CodeLens
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.ResolveCodeLens(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/codeLens/refresh": // req
- if len(r.Params()) > 0 {
- return true, reply(ctx, nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams))
- }
- err := server.CodeLensRefresh(ctx)
- return true, reply(ctx, nil, err)
- case "textDocument/documentLink": // req
- var params DocumentLinkParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.DocumentLink(ctx, &params)
- return true, reply(ctx, resp, err)
- case "documentLink/resolve": // req
- var params DocumentLink
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.ResolveDocumentLink(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/formatting": // req
- var params DocumentFormattingParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Formatting(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/rangeFormatting": // req
- var params DocumentRangeFormattingParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.RangeFormatting(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/onTypeFormatting": // req
- var params DocumentOnTypeFormattingParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.OnTypeFormatting(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/rename": // req
- var params RenameParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Rename(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/prepareRename": // req
- var params PrepareRenameParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.PrepareRename(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/executeCommand": // req
- var params ExecuteCommandParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.ExecuteCommand(ctx, &params)
- return true, reply(ctx, resp, err)
- case "textDocument/diagnostic": // req
- var params string
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.Diagnostic(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/diagnostic": // req
- var params WorkspaceDiagnosticParams
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- resp, err := server.DiagnosticWorkspace(ctx, &params)
- return true, reply(ctx, resp, err)
- case "workspace/diagnostic/refresh": // req
- if len(r.Params()) > 0 {
- return true, reply(ctx, nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams))
- }
- err := server.DiagnosticRefresh(ctx)
- return true, reply(ctx, nil, err)
-
- default:
- return false, nil
- }
-}
-
-func (s *serverDispatcher) DidChangeWorkspaceFolders(ctx context.Context, params *DidChangeWorkspaceFoldersParams) error {
- return s.sender.Notify(ctx, "workspace/didChangeWorkspaceFolders", params)
-}
-
-func (s *serverDispatcher) WorkDoneProgressCancel(ctx context.Context, params *WorkDoneProgressCancelParams) error {
- return s.sender.Notify(ctx, "window/workDoneProgress/cancel", params)
-}
-
-func (s *serverDispatcher) DidCreateFiles(ctx context.Context, params *CreateFilesParams) error {
- return s.sender.Notify(ctx, "workspace/didCreateFiles", params)
-}
-
-func (s *serverDispatcher) DidRenameFiles(ctx context.Context, params *RenameFilesParams) error {
- return s.sender.Notify(ctx, "workspace/didRenameFiles", params)
-}
-
-func (s *serverDispatcher) DidDeleteFiles(ctx context.Context, params *DeleteFilesParams) error {
- return s.sender.Notify(ctx, "workspace/didDeleteFiles", params)
-}
-
-func (s *serverDispatcher) Initialized(ctx context.Context, params *InitializedParams) error {
- return s.sender.Notify(ctx, "initialized", params)
-}
-
-func (s *serverDispatcher) Exit(ctx context.Context) error {
- return s.sender.Notify(ctx, "exit", nil)
-}
-
-func (s *serverDispatcher) DidChangeConfiguration(ctx context.Context, params *DidChangeConfigurationParams) error {
- return s.sender.Notify(ctx, "workspace/didChangeConfiguration", params)
-}
-
-func (s *serverDispatcher) DidOpen(ctx context.Context, params *DidOpenTextDocumentParams) error {
- return s.sender.Notify(ctx, "textDocument/didOpen", params)
-}
-
-func (s *serverDispatcher) DidChange(ctx context.Context, params *DidChangeTextDocumentParams) error {
- return s.sender.Notify(ctx, "textDocument/didChange", params)
-}
-
-func (s *serverDispatcher) DidClose(ctx context.Context, params *DidCloseTextDocumentParams) error {
- return s.sender.Notify(ctx, "textDocument/didClose", params)
-}
-
-func (s *serverDispatcher) DidSave(ctx context.Context, params *DidSaveTextDocumentParams) error {
- return s.sender.Notify(ctx, "textDocument/didSave", params)
-}
-
-func (s *serverDispatcher) WillSave(ctx context.Context, params *WillSaveTextDocumentParams) error {
- return s.sender.Notify(ctx, "textDocument/willSave", params)
-}
-
-func (s *serverDispatcher) DidChangeWatchedFiles(ctx context.Context, params *DidChangeWatchedFilesParams) error {
- return s.sender.Notify(ctx, "workspace/didChangeWatchedFiles", params)
-}
-
-func (s *serverDispatcher) DidOpenNotebookDocument(ctx context.Context, params *DidOpenNotebookDocumentParams) error {
- return s.sender.Notify(ctx, "notebookDocument/didOpen", params)
-}
-
-func (s *serverDispatcher) DidChangeNotebookDocument(ctx context.Context, params *DidChangeNotebookDocumentParams) error {
- return s.sender.Notify(ctx, "notebookDocument/didChange", params)
-}
-
-func (s *serverDispatcher) DidSaveNotebookDocument(ctx context.Context, params *DidSaveNotebookDocumentParams) error {
- return s.sender.Notify(ctx, "notebookDocument/didSave", params)
-}
-
-func (s *serverDispatcher) DidCloseNotebookDocument(ctx context.Context, params *DidCloseNotebookDocumentParams) error {
- return s.sender.Notify(ctx, "notebookDocument/didClose", params)
-}
-
-func (s *serverDispatcher) SetTrace(ctx context.Context, params *SetTraceParams) error {
- return s.sender.Notify(ctx, "$/setTrace", params)
-}
-
-func (s *serverDispatcher) LogTrace(ctx context.Context, params *LogTraceParams) error {
- return s.sender.Notify(ctx, "$/logTrace", params)
-}
-func (s *serverDispatcher) Implementation(ctx context.Context, params *ImplementationParams) (Definition /*Definition | DefinitionLink[] | null*/, error) {
- var result Definition /*Definition | DefinitionLink[] | null*/
- if err := s.sender.Call(ctx, "textDocument/implementation", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) TypeDefinition(ctx context.Context, params *TypeDefinitionParams) (Definition /*Definition | DefinitionLink[] | null*/, error) {
- var result Definition /*Definition | DefinitionLink[] | null*/
- if err := s.sender.Call(ctx, "textDocument/typeDefinition", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) DocumentColor(ctx context.Context, params *DocumentColorParams) ([]ColorInformation, error) {
- var result []ColorInformation
- if err := s.sender.Call(ctx, "textDocument/documentColor", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) ColorPresentation(ctx context.Context, params *ColorPresentationParams) ([]ColorPresentation, error) {
- var result []ColorPresentation
- if err := s.sender.Call(ctx, "textDocument/colorPresentation", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) FoldingRange(ctx context.Context, params *FoldingRangeParams) ([]FoldingRange /*FoldingRange[] | null*/, error) {
- var result []FoldingRange /*FoldingRange[] | null*/
- if err := s.sender.Call(ctx, "textDocument/foldingRange", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Declaration(ctx context.Context, params *DeclarationParams) (Declaration /*Declaration | DeclarationLink[] | null*/, error) {
- var result Declaration /*Declaration | DeclarationLink[] | null*/
- if err := s.sender.Call(ctx, "textDocument/declaration", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) SelectionRange(ctx context.Context, params *SelectionRangeParams) ([]SelectionRange /*SelectionRange[] | null*/, error) {
- var result []SelectionRange /*SelectionRange[] | null*/
- if err := s.sender.Call(ctx, "textDocument/selectionRange", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) PrepareCallHierarchy(ctx context.Context, params *CallHierarchyPrepareParams) ([]CallHierarchyItem /*CallHierarchyItem[] | null*/, error) {
- var result []CallHierarchyItem /*CallHierarchyItem[] | null*/
- if err := s.sender.Call(ctx, "textDocument/prepareCallHierarchy", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) IncomingCalls(ctx context.Context, params *CallHierarchyIncomingCallsParams) ([]CallHierarchyIncomingCall /*CallHierarchyIncomingCall[] | null*/, error) {
- var result []CallHierarchyIncomingCall /*CallHierarchyIncomingCall[] | null*/
- if err := s.sender.Call(ctx, "callHierarchy/incomingCalls", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) OutgoingCalls(ctx context.Context, params *CallHierarchyOutgoingCallsParams) ([]CallHierarchyOutgoingCall /*CallHierarchyOutgoingCall[] | null*/, error) {
- var result []CallHierarchyOutgoingCall /*CallHierarchyOutgoingCall[] | null*/
- if err := s.sender.Call(ctx, "callHierarchy/outgoingCalls", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) SemanticTokensFull(ctx context.Context, params *SemanticTokensParams) (*SemanticTokens /*SemanticTokens | null*/, error) {
- var result *SemanticTokens /*SemanticTokens | null*/
- if err := s.sender.Call(ctx, "textDocument/semanticTokens/full", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) SemanticTokensFullDelta(ctx context.Context, params *SemanticTokensDeltaParams) (interface{} /* SemanticTokens | SemanticTokensDelta | float64*/, error) {
- var result interface{} /* SemanticTokens | SemanticTokensDelta | float64*/
- if err := s.sender.Call(ctx, "textDocument/semanticTokens/full/delta", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) SemanticTokensRange(ctx context.Context, params *SemanticTokensRangeParams) (*SemanticTokens /*SemanticTokens | null*/, error) {
- var result *SemanticTokens /*SemanticTokens | null*/
- if err := s.sender.Call(ctx, "textDocument/semanticTokens/range", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) SemanticTokensRefresh(ctx context.Context) error {
- return s.sender.Call(ctx, "workspace/semanticTokens/refresh", nil, nil)
-}
-
-func (s *serverDispatcher) LinkedEditingRange(ctx context.Context, params *LinkedEditingRangeParams) (*LinkedEditingRanges /*LinkedEditingRanges | null*/, error) {
- var result *LinkedEditingRanges /*LinkedEditingRanges | null*/
- if err := s.sender.Call(ctx, "textDocument/linkedEditingRange", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) WillCreateFiles(ctx context.Context, params *CreateFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) {
- var result *WorkspaceEdit /*WorkspaceEdit | null*/
- if err := s.sender.Call(ctx, "workspace/willCreateFiles", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) WillRenameFiles(ctx context.Context, params *RenameFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) {
- var result *WorkspaceEdit /*WorkspaceEdit | null*/
- if err := s.sender.Call(ctx, "workspace/willRenameFiles", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) WillDeleteFiles(ctx context.Context, params *DeleteFilesParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) {
- var result *WorkspaceEdit /*WorkspaceEdit | null*/
- if err := s.sender.Call(ctx, "workspace/willDeleteFiles", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Moniker(ctx context.Context, params *MonikerParams) ([]Moniker /*Moniker[] | null*/, error) {
- var result []Moniker /*Moniker[] | null*/
- if err := s.sender.Call(ctx, "textDocument/moniker", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) PrepareTypeHierarchy(ctx context.Context, params *TypeHierarchyPrepareParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) {
- var result []TypeHierarchyItem /*TypeHierarchyItem[] | null*/
- if err := s.sender.Call(ctx, "textDocument/prepareTypeHierarchy", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Supertypes(ctx context.Context, params *TypeHierarchySupertypesParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) {
- var result []TypeHierarchyItem /*TypeHierarchyItem[] | null*/
- if err := s.sender.Call(ctx, "typeHierarchy/supertypes", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Subtypes(ctx context.Context, params *TypeHierarchySubtypesParams) ([]TypeHierarchyItem /*TypeHierarchyItem[] | null*/, error) {
- var result []TypeHierarchyItem /*TypeHierarchyItem[] | null*/
- if err := s.sender.Call(ctx, "typeHierarchy/subtypes", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) InlineValue(ctx context.Context, params *InlineValueParams) ([]InlineValue /*InlineValue[] | null*/, error) {
- var result []InlineValue /*InlineValue[] | null*/
- if err := s.sender.Call(ctx, "textDocument/inlineValue", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) InlineValueRefresh(ctx context.Context) error {
- return s.sender.Call(ctx, "workspace/inlineValue/refresh", nil, nil)
-}
-
-func (s *serverDispatcher) InlayHint(ctx context.Context, params *InlayHintParams) ([]InlayHint /*InlayHint[] | null*/, error) {
- var result []InlayHint /*InlayHint[] | null*/
- if err := s.sender.Call(ctx, "textDocument/inlayHint", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Resolve(ctx context.Context, params *InlayHint) (*InlayHint, error) {
- var result *InlayHint
- if err := s.sender.Call(ctx, "inlayHint/resolve", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) InlayHintRefresh(ctx context.Context) error {
- return s.sender.Call(ctx, "workspace/inlayHint/refresh", nil, nil)
-}
-
-func (s *serverDispatcher) Initialize(ctx context.Context, params *ParamInitialize) (*InitializeResult, error) {
- var result *InitializeResult
- if err := s.sender.Call(ctx, "initialize", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Shutdown(ctx context.Context) error {
- return s.sender.Call(ctx, "shutdown", nil, nil)
-}
-
-func (s *serverDispatcher) WillSaveWaitUntil(ctx context.Context, params *WillSaveTextDocumentParams) ([]TextEdit /*TextEdit[] | null*/, error) {
- var result []TextEdit /*TextEdit[] | null*/
- if err := s.sender.Call(ctx, "textDocument/willSaveWaitUntil", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Completion(ctx context.Context, params *CompletionParams) (*CompletionList /*CompletionItem[] | CompletionList | null*/, error) {
- var result *CompletionList /*CompletionItem[] | CompletionList | null*/
- if err := s.sender.Call(ctx, "textDocument/completion", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) ResolveCompletionItem(ctx context.Context, params *CompletionItem) (*CompletionItem, error) {
- var result *CompletionItem
- if err := s.sender.Call(ctx, "completionItem/resolve", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Hover(ctx context.Context, params *HoverParams) (*Hover /*Hover | null*/, error) {
- var result *Hover /*Hover | null*/
- if err := s.sender.Call(ctx, "textDocument/hover", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) SignatureHelp(ctx context.Context, params *SignatureHelpParams) (*SignatureHelp /*SignatureHelp | null*/, error) {
- var result *SignatureHelp /*SignatureHelp | null*/
- if err := s.sender.Call(ctx, "textDocument/signatureHelp", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Definition(ctx context.Context, params *DefinitionParams) (Definition /*Definition | DefinitionLink[] | null*/, error) {
- var result Definition /*Definition | DefinitionLink[] | null*/
- if err := s.sender.Call(ctx, "textDocument/definition", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) References(ctx context.Context, params *ReferenceParams) ([]Location /*Location[] | null*/, error) {
- var result []Location /*Location[] | null*/
- if err := s.sender.Call(ctx, "textDocument/references", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) DocumentHighlight(ctx context.Context, params *DocumentHighlightParams) ([]DocumentHighlight /*DocumentHighlight[] | null*/, error) {
- var result []DocumentHighlight /*DocumentHighlight[] | null*/
- if err := s.sender.Call(ctx, "textDocument/documentHighlight", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) DocumentSymbol(ctx context.Context, params *DocumentSymbolParams) ([]interface{} /*SymbolInformation[] | DocumentSymbol[] | null*/, error) {
- var result []interface{} /*SymbolInformation[] | DocumentSymbol[] | null*/
- if err := s.sender.Call(ctx, "textDocument/documentSymbol", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) CodeAction(ctx context.Context, params *CodeActionParams) ([]CodeAction /*(Command | CodeAction)[] | null*/, error) {
- var result []CodeAction /*(Command | CodeAction)[] | null*/
- if err := s.sender.Call(ctx, "textDocument/codeAction", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) ResolveCodeAction(ctx context.Context, params *CodeAction) (*CodeAction, error) {
- var result *CodeAction
- if err := s.sender.Call(ctx, "codeAction/resolve", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Symbol(ctx context.Context, params *WorkspaceSymbolParams) ([]SymbolInformation /*SymbolInformation[] | WorkspaceSymbol[] | null*/, error) {
- var result []SymbolInformation /*SymbolInformation[] | WorkspaceSymbol[] | null*/
- if err := s.sender.Call(ctx, "workspace/symbol", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) ResolveWorkspaceSymbol(ctx context.Context, params *WorkspaceSymbol) (*WorkspaceSymbol, error) {
- var result *WorkspaceSymbol
- if err := s.sender.Call(ctx, "workspaceSymbol/resolve", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) CodeLens(ctx context.Context, params *CodeLensParams) ([]CodeLens /*CodeLens[] | null*/, error) {
- var result []CodeLens /*CodeLens[] | null*/
- if err := s.sender.Call(ctx, "textDocument/codeLens", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) ResolveCodeLens(ctx context.Context, params *CodeLens) (*CodeLens, error) {
- var result *CodeLens
- if err := s.sender.Call(ctx, "codeLens/resolve", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) CodeLensRefresh(ctx context.Context) error {
- return s.sender.Call(ctx, "workspace/codeLens/refresh", nil, nil)
-}
-
-func (s *serverDispatcher) DocumentLink(ctx context.Context, params *DocumentLinkParams) ([]DocumentLink /*DocumentLink[] | null*/, error) {
- var result []DocumentLink /*DocumentLink[] | null*/
- if err := s.sender.Call(ctx, "textDocument/documentLink", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) ResolveDocumentLink(ctx context.Context, params *DocumentLink) (*DocumentLink, error) {
- var result *DocumentLink
- if err := s.sender.Call(ctx, "documentLink/resolve", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Formatting(ctx context.Context, params *DocumentFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) {
- var result []TextEdit /*TextEdit[] | null*/
- if err := s.sender.Call(ctx, "textDocument/formatting", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) RangeFormatting(ctx context.Context, params *DocumentRangeFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) {
- var result []TextEdit /*TextEdit[] | null*/
- if err := s.sender.Call(ctx, "textDocument/rangeFormatting", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) OnTypeFormatting(ctx context.Context, params *DocumentOnTypeFormattingParams) ([]TextEdit /*TextEdit[] | null*/, error) {
- var result []TextEdit /*TextEdit[] | null*/
- if err := s.sender.Call(ctx, "textDocument/onTypeFormatting", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Rename(ctx context.Context, params *RenameParams) (*WorkspaceEdit /*WorkspaceEdit | null*/, error) {
- var result *WorkspaceEdit /*WorkspaceEdit | null*/
- if err := s.sender.Call(ctx, "textDocument/rename", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) PrepareRename(ctx context.Context, params *PrepareRenameParams) (*PrepareRename2Gn /*Range | { range: Range; placeholder: string } | { defaultBehavior: boolean } | null*/, error) {
- var result *PrepareRename2Gn /*Range | { range: Range; placeholder: string } | { defaultBehavior: boolean } | null*/
- if err := s.sender.Call(ctx, "textDocument/prepareRename", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) ExecuteCommand(ctx context.Context, params *ExecuteCommandParams) (interface{} /* LSPAny | void | float64*/, error) {
- var result interface{} /* LSPAny | void | float64*/
- if err := s.sender.Call(ctx, "workspace/executeCommand", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) Diagnostic(ctx context.Context, params *string) (*string, error) {
- var result *string
- if err := s.sender.Call(ctx, "textDocument/diagnostic", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) DiagnosticWorkspace(ctx context.Context, params *WorkspaceDiagnosticParams) (*WorkspaceDiagnosticReport, error) {
- var result *WorkspaceDiagnosticReport
- if err := s.sender.Call(ctx, "workspace/diagnostic", params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *serverDispatcher) DiagnosticRefresh(ctx context.Context) error {
- return s.sender.Call(ctx, "workspace/diagnostic/refresh", nil, nil)
-}
-
-func (s *serverDispatcher) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
- var result interface{}
- if err := s.sender.Call(ctx, method, params, &result); err != nil {
- return nil, err
- }
- return result, nil
-}
diff --git a/internal/lsp/protocol/typescript/README.md b/internal/lsp/protocol/typescript/README.md
deleted file mode 100644
index 74bcd1883..000000000
--- a/internal/lsp/protocol/typescript/README.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# Generate Go types and signatures for the LSP protocol
-
-## Setup
-
-Make sure `node` and `tsc` are installed and in your PATH. There are detailed instructions below.
-(`tsc -v` should be at least `4.2.4`.)
-Get the typescript code for the jsonrpc protocol with
-
-`git clone git@github.com:microsoft vscode-languageserver-node.git` or
-`git clone https://github.com/microsoft/vscode-languageserver-node.git`
-
-`util.ts` expects it to be in your HOME directory
-
-If you want to reproduce the existing files you need to be on a branch with the same git hash that `util.ts` expects, for instance, `git checkout 7b90c29`
-
-## Usage
-
-Code is generated and normalized by
-
-`tsc && node code.js && gofmt -w ts*.go`
-
-(`code.ts` imports `util.ts`.) This generates 3 files in the current directory, `tsprotocol.go`
-containing type definitions, and `tsserver.go`, `tsclient.go` containing API stubs.
-
-## Notes
-
-1. `code.ts` and `util.ts` use the Typescript compiler's API, which is [introduced](https://github.com/Microsoft/TypeScript/wiki/Architectural-Overview) in their wiki.
-2. Because the Typescript and Go type systems are incompatible, `code.ts` and `util.ts` are filled with heuristics and special cases. Therefore they are tied to a specific commit of `vscode-languageserver-node`. The hash code of the commit is included in the header of
-the generated files and stored in the variable `gitHash` in `go.ts`. It is checked (see `git()` in `util.ts`) on every execution.
-3. Generating the `ts*.go` files is only semi-automated. Please file an issue if the released version is too far behind.
-4. For the impatient, first change `gitHash` by hand (`git()` shows how to find the hash).
- 1. Then try to run `code.ts`. This will likely fail because the heuristics don't cover some new case. For instance, some simple type like `string` might have changed to a union type `string | [number,number]`. Another example is that some generated formal parameter may have anonymous structure type, which is essentially unusable.
- 2. Next step is to move the generated code to `internal/lsp/protocol` and try to build `gopls` and its tests. This will likely fail because types have changed. Generally the fixes are fairly easy. Then run all the tests.
- 3. Since there are not adequate integration tests, the next step is to run `gopls`.
-
-## Detailed instructions for installing node and typescript
-
-(The instructions are somewhat different for Linux and MacOS. They install some things locally, so `$PATH` needs to be changed.)
-
-1. For Linux, it is possible to build node from scratch, but if there's a package manager, that's simpler.
- 1. To use the Ubuntu package manager
- 1. `sudo apt update` (if you can't `sudo` then these instructions are not helpful)
- 2. `sudo apt install nodejs` (this may install `/usr/bin/nodejs` rather than `/usr/bin/node`. For me, `/usr/bin/nodejs` pointed to an actual executable `/etc/alternatives/nodejs`, which should be copied to `/usr/bin/node`)
- 3. `sudo apt intall npm`
- 1. To build from scratch
- 1. Go to the [node site](https://nodejs.org), and download the one recommended for most users, and then you're on your own. (It's got binaries in it. Untar the file somewhere and put its `bin` directory in your path, perhaps?)
-2. The Mac is easier. Download the macOS installer from [nodejs](https://nodejs.org), click on it, and let it install.
-3. (There's a good chance that soon you will be asked to upgrade your new npm. `sudo npm install -g npm` is the command.)
-4. For either system, node and nvm should now be available. Running `node -v` and `npm -v` should produce version numbers.
-5. `npm install typescript`
- 1. This may give warning messages that indicate you've failed to set up a project. Ignore them.
- 2. Your home directory will now have new directories `.npm` and `node_modules` (and a `package_lock.json` file)
- 3. The typescript executable `tsc` will be in `node_modules/.bin`, so put that directory in your path.
- 4. `tsc -v` should print "Version 4.2.4" (or later). If not you may (as I did) have an obsolete tsc earlier in your path.
-6. `npm install @types/node` (Without this there will be many incomprehensible typescript error messages.)
diff --git a/internal/lsp/protocol/typescript/code.ts b/internal/lsp/protocol/typescript/code.ts
deleted file mode 100644
index dcb1b67a5..000000000
--- a/internal/lsp/protocol/typescript/code.ts
+++ /dev/null
@@ -1,1448 +0,0 @@
-/* eslint-disable no-useless-return */
-// read files from vscode-languageserver-node, and generate Go rpc stubs
-// and data definitions. (and maybe someday unmarshaling code)
-
-// The output is 3 files, tsprotocol.go contains the type definitions
-// while tsclient.go and tsserver.go contain the LSP API and stub. An LSP server
-// uses both APIs. To read the code, start in this file's main() function.
-
-// The code is rich in heuristics and special cases, some of which are to avoid
-// extensive changes to gopls, and some of which are due to the mismatch between
-// typescript and Go types. In particular, there is no Go equivalent to union
-// types, so each case ought to be considered separately. The Go equivalent of A
-// & B could frequently be struct{A;B;}, or it could be the equivalent type
-// listing all the members of A and B. Typically the code uses the former, but
-// especially if A and B have elements with the same name, it does a version of
-// the latter. ClientCapabilities has to be expanded, and ServerCapabilities is
-// expanded to make the generated code easier to read.
-
-// for us typescript ignorati, having an import makes this file a module
-import * as fs from 'fs';
-import * as ts from 'typescript';
-import * as u from './util';
-import { constName, getComments, goName, loc, strKind } from './util';
-
-var program: ts.Program;
-
-function parse() {
- // this won't complain if some fnames don't exist
- program = ts.createProgram(
- u.fnames,
- { target: ts.ScriptTarget.ES2018, module: ts.ModuleKind.CommonJS });
- program.getTypeChecker(); // finish type checking and assignment
-}
-
-// ----- collecting information for RPCs
-let req = new Map<string, ts.NewExpression>(); // requests
-let not = new Map<string, ts.NewExpression>(); // notifications
-let ptypes = new Map<string, [ts.TypeNode, ts.TypeNode]>(); // req, resp types
-let receives = new Map<string, 'server' | 'client'>(); // who receives it
-let rpcTypes = new Set<string>(); // types seen in the rpcs
-
-function findRPCs(node: ts.Node) {
- if (!ts.isModuleDeclaration(node)) {
- return;
- }
- if (!ts.isIdentifier(node.name)) {
- throw new Error(
- `expected Identifier, got ${strKind(node.name)} at ${loc(node)}`);
- }
- let reqnot = req;
- let v = node.name.getText();
- if (v.endsWith('Notification')) reqnot = not;
- else if (!v.endsWith('Request')) return;
-
- if (!ts.isModuleBlock(node.body)) {
- throw new Error(
- `expected ModuleBlock got ${strKind(node.body)} at ${loc(node)}`);
- }
- let x: ts.ModuleBlock = node.body;
- // The story is to expect const method = 'textDocument/implementation'
- // const type = new ProtocolRequestType<...>(method)
- // but the method may be an explicit string
- let rpc: string = '';
- let newNode: ts.NewExpression;
- for (let i = 0; i < x.statements.length; i++) {
- const uu = x.statements[i];
- if (!ts.isVariableStatement(uu)) continue;
- const dl: ts.VariableDeclarationList = uu.declarationList;
- if (dl.declarations.length != 1)
- throw new Error(`expected a single decl at ${loc(dl)}`);
- const decl: ts.VariableDeclaration = dl.declarations[0];
- const name = decl.name.getText();
- // we want the initializers
- if (name == 'method') { // mostly StringLiteral but NoSubstitutionTemplateLiteral in protocol.semanticTokens.ts
- if (!ts.isStringLiteral(decl.initializer)) {
- if (!ts.isNoSubstitutionTemplateLiteral(decl.initializer)) {
- console.log(`81: ${decl.initializer.getText()}`);
- throw new Error(`expect StringLiteral at ${loc(decl)} got ${strKind(decl.initializer)}`);
- }
- }
- rpc = decl.initializer.getText();
- }
- else if (name == 'type') { // NewExpression
- if (!ts.isNewExpression(decl.initializer))
- throw new Error(`89 expected new at ${loc(decl)}`);
- const nn: ts.NewExpression = decl.initializer;
- newNode = nn;
- const mtd = nn.arguments[0];
- if (ts.isStringLiteral(mtd)) rpc = mtd.getText();
- switch (nn.typeArguments.length) {
- case 1: // exit
- ptypes.set(rpc, [nn.typeArguments[0], null]);
- break;
- case 2: // notifications
- ptypes.set(rpc, [nn.typeArguments[0], null]);
- break;
- case 4: // request with no parameters
- ptypes.set(rpc, [null, nn.typeArguments[0]]);
- break;
- case 5: // request req, resp, partial(?)
- ptypes.set(rpc, [nn.typeArguments[0], nn.typeArguments[1]]);
- break;
- default:
- throw new Error(`${nn.typeArguments?.length} at ${loc(nn)}`);
- }
- }
- }
- if (rpc == '') throw new Error(`112 no name found at ${loc(x)}`);
- // remember the implied types
- const [a, b] = ptypes.get(rpc);
- const add = function (n: ts.Node) {
- rpcTypes.add(goName(n.getText()));
- };
- underlying(a, add);
- underlying(b, add);
- rpc = rpc.substring(1, rpc.length - 1); // 'exit'
- reqnot.set(rpc, newNode);
-}
-
-function setReceives() {
- // mark them all as server, then adjust the client ones.
- // it would be nice to have some independent check on this
- // (this logic fails if the server ever sends $/canceRequest
- // or $/progress)
- req.forEach((_, k) => { receives.set(k, 'server'); });
- not.forEach((_, k) => { receives.set(k, 'server'); });
- receives.set('window/showMessage', 'client');
- receives.set('window/showMessageRequest', 'client');
- receives.set('window/logMessage', 'client');
- receives.set('telemetry/event', 'client');
- receives.set('client/registerCapability', 'client');
- receives.set('client/unregisterCapability', 'client');
- receives.set('workspace/workspaceFolders', 'client');
- receives.set('workspace/configuration', 'client');
- receives.set('workspace/applyEdit', 'client');
- receives.set('textDocument/publishDiagnostics', 'client');
- receives.set('window/workDoneProgress/create', 'client');
- receives.set('window/showDocument', 'client');
- receives.set('$/progress', 'client');
- // a small check
- receives.forEach((_, k) => {
- if (!req.get(k) && !not.get(k)) throw new Error(`145 missing ${k}}`);
- if (req.get(k) && not.get(k)) throw new Error(`146 dup ${k}`);
- });
-}
-
-type DataKind = 'module' | 'interface' | 'alias' | 'enum' | 'class';
-
-interface Data {
- kind: DataKind;
- me: ts.Node; // root node for this type
- name: string; // Go name
- origname: string; // their name
- generics: ts.NodeArray<ts.TypeParameterDeclaration>;
- as: ts.NodeArray<ts.HeritageClause>; // inheritance
- // Interface
- properties: ts.NodeArray<ts.PropertySignature>
- alias: ts.TypeNode; // type alias
- // module
- statements: ts.NodeArray<ts.Statement>;
- enums: ts.NodeArray<ts.EnumMember>;
- // class
- members: ts.NodeArray<ts.PropertyDeclaration>;
-}
-function newData(n: ts.Node, nm: string, k: DataKind, origname: string): Data {
- return {
- kind: k,
- me: n, name: goName(nm), origname: origname,
- generics: ts.factory.createNodeArray<ts.TypeParameterDeclaration>(),
- as: ts.factory.createNodeArray<ts.HeritageClause>(),
- properties: ts.factory.createNodeArray<ts.PropertySignature>(), alias: undefined,
- statements: ts.factory.createNodeArray<ts.Statement>(),
- enums: ts.factory.createNodeArray<ts.EnumMember>(),
- members: ts.factory.createNodeArray<ts.PropertyDeclaration>(),
- };
-}
-
-// for debugging, produce a skeleton description
-function strData(d: Data): string {
- if (!d) { return 'nil'; }
- const f = function (na: ts.NodeArray<any>): number {
- return na.length;
- };
- const nm = d.name == d.origname ? `${d.name}` : `${d.name}/${d.origname}`;
- return `g:${f(d.generics)} a:${f(d.as)} p:${f(d.properties)} s:${f(d.statements)} e:${f(d.enums)} m:${f(d.members)} a:${d.alias !== undefined} D(${nm}) k:${d.kind}`;
-}
-
-let data = new Map<string, Data>(); // parsed data types
-let seenTypes = new Map<string, Data>(); // type names we've seen
-let extraTypes = new Map<string, string[]>(); // to avoid struct params
-
-function setData(nm: string, d: Data) {
- const v = data.get(nm);
- if (!v) {
- data.set(nm, d);
- return;
- }
- // if there are multiple definitions of the same name, decide what to do.
- // For now the choices are only aliases and modules
- // alias is preferred unless the constant values are needed
- if (nm === 'PrepareSupportDefaultBehavior') {
- // want the alias, as we're going to change the type and can't afford a constant
- if (d.kind === 'alias') data.set(nm, d);
- else if (v.kind == 'alias') data.set(nm, v);
- else throw new Error(`208 ${d.kind} ${v.kind}`);
- return;
- }
- if (nm === 'CodeActionKind') {
- // want the module, need the constants
- if (d.kind === 'module') data.set(nm, d);
- else if (v.kind === 'module') data.set(nm, v);
- else throw new Error(`215 ${d.kind} ${v.kind}`);
- }
- if (v.kind === 'alias' && d.kind !== 'alias') return;
- if (d.kind === 'alias' && v.kind !== 'alias') {
- data.set(nm, d);
- return;
- }
- if (v.kind === 'alias' && d.kind === 'alias') return;
- // protocol/src/common/protocol.foldingRange.ts 44: 1 (39: 2) and
- // types/src/main.ts 397: 1 (392: 2)
- // for FoldingRangeKind
- if (d.me.getText() === v.me.getText()) return;
- // error messages for an unexpected case
- console.log(`228 ${strData(v)} ${loc(v.me)} for`);
- console.log(`229 ${v.me.getText().replace(/\n/g, '\\n')}`);
- console.log(`230 ${strData(d)} ${loc(d.me)}`);
- console.log(`231 ${d.me.getText().replace(/\n/g, '\\n')}`);
- throw new Error(`232 setData found ${v.kind} for ${d.kind}`);
-}
-
-// look at top level data definitions
-function genTypes(node: ts.Node) {
- // Ignore top-level items that can't produce output
- if (ts.isExpressionStatement(node) || ts.isFunctionDeclaration(node) ||
- ts.isImportDeclaration(node) || ts.isVariableStatement(node) ||
- ts.isExportDeclaration(node) || ts.isEmptyStatement(node) ||
- ts.isExportAssignment(node) || ts.isImportEqualsDeclaration(node) ||
- ts.isBlock(node) || node.kind == ts.SyntaxKind.EndOfFileToken) {
- return;
- }
- if (ts.isInterfaceDeclaration(node)) {
- const v: ts.InterfaceDeclaration = node;
- // need to check the members, many of which are disruptive
- let mems: ts.PropertySignature[] = [];
- const f = function (t: ts.TypeElement) {
- if (ts.isPropertySignature(t)) {
- mems.push(t);
- } else if (ts.isMethodSignature(t) || ts.isCallSignatureDeclaration(t)) {
- return;
- } else if (ts.isIndexSignatureDeclaration(t)) {
- // probably safe to ignore these
- // [key: string]: boolean | number | string | undefined;
- // and InitializeResult: [custom: string]: any;]
- } else
- throw new Error(`259 unexpected ${strKind(t)}`);
- };
- v.members.forEach(f);
- if (mems.length == 0 && !v.heritageClauses &&
- v.name.getText() != 'InitializedParams') {
- return; // Don't seem to need any of these [Logger, PipTransport, ...]
- }
- // Found one we want
- let x = newData(v, goName(v.name.getText()), 'interface', v.name.getText());
- x.properties = ts.factory.createNodeArray<ts.PropertySignature>(mems);
- if (v.typeParameters) x.generics = v.typeParameters;
- if (v.heritageClauses) x.as = v.heritageClauses;
- if (x.generics.length > 1) { // Unneeded
- // Item interface Item<K, V>...
- return;
- }
- if (data.has(x.name)) { // modifying one we've seen
- x = dataChoose(x, data.get(x.name));
- }
- setData(x.name, x);
- } else if (ts.isTypeAliasDeclaration(node)) {
- const v: ts.TypeAliasDeclaration = node;
- let x = newData(v, v.name.getText(), 'alias', v.name.getText());
- x.alias = v.type;
- // if type is a union of constants, we (mostly) don't want it
- // (at the top level)
- // Unfortunately this is false for TraceValues
- if (ts.isUnionTypeNode(v.type) &&
- v.type.types.every((n: ts.TypeNode) => ts.isLiteralTypeNode(n))) {
- if (x.name != 'TraceValues') return;
- }
- if (v.typeParameters) {
- x.generics = v.typeParameters;
- }
- if (data.has(x.name)) x = dataChoose(x, data.get(x.name));
- if (x.generics.length > 1) {
- return;
- }
- setData(x.name, x);
- } else if (ts.isModuleDeclaration(node)) {
- const v: ts.ModuleDeclaration = node;
- if (!ts.isModuleBlock(v.body)) {
- throw new Error(`${loc(v)} not ModuleBlock, but ${strKind(v.body)}`);
- }
- const b: ts.ModuleBlock = v.body;
- var s: ts.Statement[] = [];
- // we don't want most of these
- const fx = function (x: ts.Statement) {
- if (ts.isFunctionDeclaration(x)) {
- return;
- }
- if (ts.isTypeAliasDeclaration(x) || ts.isModuleDeclaration(x)) {
- return;
- }
- if (!ts.isVariableStatement(x))
- throw new Error(
- `315 expected VariableStatment ${loc(x)} ${strKind(x)} ${x.getText()}`);
- if (hasNewExpression(x)) {
- return;
- }
- s.push(x);
- };
- b.statements.forEach(fx);
- if (s.length == 0) {
- return;
- }
- let m = newData(node, v.name.getText(), 'module', v.name.getText());
- m.statements = ts.factory.createNodeArray<ts.Statement>(s);
- if (data.has(m.name)) m = dataChoose(m, data.get(m.name));
- setData(m.name, m);
- } else if (ts.isEnumDeclaration(node)) {
- const nm = node.name.getText();
- let v = newData(node, nm, 'enum', node.name.getText());
- v.enums = node.members;
- if (data.has(nm)) {
- v = dataChoose(v, data.get(nm));
- }
- setData(nm, v);
- } else if (ts.isClassDeclaration(node)) {
- const v: ts.ClassDeclaration = node;
- var d: ts.PropertyDeclaration[] = [];
- const wanted = function (c: ts.ClassElement): string {
- if (ts.isConstructorDeclaration(c)) {
- return '';
- }
- if (ts.isMethodDeclaration(c)) {
- return '';
- }
- if (ts.isGetAccessor(c)) {
- return '';
- }
- if (ts.isSetAccessor(c)) {
- return '';
- }
- if (ts.isPropertyDeclaration(c)) {
- d.push(c);
- return strKind(c);
- }
- throw new Error(`Class decl ${strKind(c)} `);
- };
- v.members.forEach((c) => wanted(c));
- if (d.length == 0) {
- return;
- } // don't need it
- let c = newData(v, v.name.getText(), 'class', v.name.getText());
- c.members = ts.factory.createNodeArray<ts.PropertyDeclaration>(d);
- if (v.typeParameters) {
- c.generics = v.typeParameters;
- }
- if (c.generics.length > 1) {
- return;
- }
- if (v.heritageClauses) {
- c.as = v.heritageClauses;
- }
- if (data.has(c.name))
- throw new Error(`Class dup ${loc(c.me)} and ${loc(data.get(c.name).me)}`);
- setData(c.name, c);
- } else {
- throw new Error(`378 unexpected ${strKind(node)} ${loc(node)} `);
- }
-}
-
-// Typescript can accumulate, but this chooses one or the other
-function dataChoose(a: Data, b: Data): Data {
- // maybe they are textually identical? (e.g., FoldingRangeKind)
- const [at, bt] = [a.me.getText(), b.me.getText()];
- if (at == bt) {
- return a;
- }
- switch (a.name) {
- case 'InitializeError':
- case 'CompletionItemTag':
- case 'SymbolTag':
- case 'CodeActionKind':
- case 'Integer':
- case 'Uinteger':
- case 'Decimal':
- // want the Module, if anything
- return a.statements.length > 0 ? a : b;
- case 'CancellationToken':
- case 'CancellationStrategy':
- // want the Interface
- return a.properties.length > 0 ? a : b;
- case 'TextDocumentContentChangeEvent': // almost the same
- case 'TokenFormat':
- case 'PrepareSupportDefaultBehavior':
- return a;
- }
- console.log(
- `409 ${strKind(a.me)} ${strKind(b.me)} ${a.name} ${loc(a.me)} ${loc(b.me)}`);
- throw new Error(`410 Fix dataChoose for ${a.name}`);
-}
-
-// is a node an ancestor of a NewExpression
-function hasNewExpression(n: ts.Node): boolean {
- let ans = false;
- n.forEachChild((n: ts.Node) => {
- if (ts.isNewExpression(n)) ans = true;
- });
- return ans;
-}
-
-function checkOnce() {
- // Data for all the rpc types?
- rpcTypes.forEach(s => {
- if (!data.has(s)) throw new Error(`checkOnce, ${s}?`);
- });
-}
-
-// helper function to find underlying types
-// eslint-disable-next-line no-unused-vars
-function underlying(n: ts.Node | undefined, f: (n: ts.Node) => void) {
- if (!n) return;
- const ff = function (n: ts.Node) {
- underlying(n, f);
- };
- if (ts.isIdentifier(n)) {
- f(n);
- } else if (
- n.kind == ts.SyntaxKind.StringKeyword ||
- n.kind == ts.SyntaxKind.NumberKeyword ||
- n.kind == ts.SyntaxKind.AnyKeyword ||
- n.kind == ts.SyntaxKind.UnknownKeyword ||
- n.kind == ts.SyntaxKind.NullKeyword ||
- n.kind == ts.SyntaxKind.BooleanKeyword ||
- n.kind == ts.SyntaxKind.ObjectKeyword ||
- n.kind == ts.SyntaxKind.VoidKeyword) {
- // nothing to do
- } else if (ts.isTypeReferenceNode(n)) {
- f(n.typeName);
- } else if (ts.isArrayTypeNode(n)) {
- underlying(n.elementType, f);
- } else if (ts.isHeritageClause(n)) {
- n.types.forEach(ff);
- } else if (ts.isExpressionWithTypeArguments(n)) {
- underlying(n.expression, f);
- } else if (ts.isPropertySignature(n)) {
- underlying(n.type, f);
- } else if (ts.isTypeLiteralNode(n)) {
- n.members.forEach(ff);
- } else if (ts.isUnionTypeNode(n) || ts.isIntersectionTypeNode(n)) {
- n.types.forEach(ff);
- } else if (ts.isIndexSignatureDeclaration(n)) {
- underlying(n.type, f);
- } else if (ts.isParenthesizedTypeNode(n)) {
- underlying(n.type, f);
- } else if (
- ts.isLiteralTypeNode(n) || ts.isVariableStatement(n) ||
- ts.isTupleTypeNode(n)) {
- // we only see these in moreTypes, but they are handled elsewhere
- } else if (ts.isEnumMember(n)) {
- if (ts.isStringLiteral(n.initializer)) return;
- throw new Error(`472 EnumMember ${strKind(n.initializer)} ${n.name.getText()}`);
- } else {
- throw new Error(`474 saw ${strKind(n)} in underlying. ${n.getText()} at ${loc(n)}`);
- }
-}
-
-// find all the types implied by seenTypes.
-// Simplest way to the transitive closure is to stabilize the size of seenTypes
-// but it is slow
-function moreTypes() {
- const extra = function (s: string) {
- if (!data.has(s)) throw new Error(`moreTypes needs ${s}`);
- seenTypes.set(s, data.get(s));
- };
- rpcTypes.forEach(extra); // all the types needed by the rpcs
- // needed in enums.go (or elsewhere)
- extra('InitializeError');
- extra('WatchKind');
- extra('FoldingRangeKind');
- // not sure why these weren't picked up
- extra('DidChangeWatchedFilesRegistrationOptions');
- extra('WorkDoneProgressBegin');
- extra('WorkDoneProgressReport');
- extra('WorkDoneProgressEnd');
- let old = 0;
- do {
- old = seenTypes.size;
-
- const m = new Map<string, Data>();
- const add = function (n: ts.Node) {
- const nm = goName(n.getText());
- if (seenTypes.has(nm) || m.has(nm)) return;
- if (data.get(nm)) {
- m.set(nm, data.get(nm));
- }
- };
- // expect all the heritage clauses have single Identifiers
- const h = function (n: ts.Node) {
- underlying(n, add);
- };
- const f = function (x: ts.NodeArray<ts.Node>) {
- x.forEach(h);
- };
- seenTypes.forEach((d: Data) => d && f(d.as));
- // find the types in the properties
- seenTypes.forEach((d: Data) => d && f(d.properties));
- // and in the alias and in the statements and in the enums
- seenTypes.forEach((d: Data) => d && underlying(d.alias, add));
- seenTypes.forEach((d: Data) => d && f(d.statements));
- seenTypes.forEach((d: Data) => d && f(d.enums));
- m.forEach((d, k) => seenTypes.set(k, d));
- }
- while (seenTypes.size != old)
- ;
-}
-
-function cleanData() { // middle pass
- // seenTypes contains all the top-level types.
- seenTypes.forEach((d) => {
- if (d.kind == 'alias') mergeAlias(d);
- });
-}
-
-function sameType(a: ts.TypeNode, b: ts.TypeNode): boolean {
- if (a.kind !== b.kind) return false;
- if (a.kind === ts.SyntaxKind.BooleanKeyword) return true;
- if (a.kind === ts.SyntaxKind.StringKeyword) return true;
- if (ts.isTypeReferenceNode(a) && ts.isTypeReferenceNode(b) &&
- a.typeName.getText() === b.typeName.getText()) return true;
- if (ts.isArrayTypeNode(a) && ts.isArrayTypeNode(b)) return sameType(a.elementType, b.elementType);
- if (ts.isTypeLiteralNode(a) && ts.isTypeLiteralNode(b)) {
- if (a.members.length !== b.members.length) return false;
- if (a.members.length === 1) return a.members[0].name.getText() === b.members[0].name.getText();
- if (loc(a) === loc(b)) return true;
- }
- throw new Error(`544 sameType? ${strKind(a)} ${strKind(b)} ${a.getText()}`);
-}
-type CreateMutable<Type> = {
- -readonly [Property in keyof Type]: Type[Property];
-};
-type propMap = Map<string, ts.PropertySignature>;
-function propMapSet(pm: propMap, name: string, v: ts.PropertySignature) {
- if (!pm.get(name)) {
- try { getComments(v); } catch (e) { console.log(`552 ${name} ${e}`); }
- pm.set(name, v);
- return;
- }
- const a = pm.get(name).type;
- const b = v.type;
- if (sameType(a, b)) {
- return;
- }
- if (ts.isTypeReferenceNode(a) && ts.isTypeLiteralNode(b)) {
- const x = mergeTypeRefLit(a, b);
- const fake: CreateMutable<ts.PropertySignature> = v;
- fake['type'] = x;
- check(fake as ts.PropertySignature, '565');
- pm.set(name, fake as ts.PropertySignature);
- return;
- }
- if (ts.isTypeLiteralNode(a) && ts.isTypeLiteralNode(b)) {
- const x = mergeTypeLitLit(a, b);
- const fake: CreateMutable<ts.PropertySignature> = v;
- fake['type'] = x;
- check(fake as ts.PropertySignature, '578');
- pm.set(name, fake as ts.PropertySignature);
- return;
- }
- console.log(`577 ${pm.get(name).getText()}\n${v.getText()}`);
- throw new Error(`578 should merge ${strKind(a)} and ${strKind(b)} for ${name}`);
-}
-function addToProperties(pm: propMap, tn: ts.TypeNode | undefined, prefix = '') {
- if (!tn) return;
- if (ts.isTypeReferenceNode(tn)) {
- const d = seenTypes.get(goName(tn.typeName.getText()));
- if (tn.typeName.getText() === 'T') return;
- if (!d) throw new Error(`584 ${tn.typeName.getText()} not found`);
- if (d.properties.length === 0 && d.alias === undefined) return;
- if (d.alias !== undefined) {
- if (ts.isIntersectionTypeNode(d.alias)) {
- d.alias.types.forEach((tn) => addToProperties(pm, tn, prefix)); // prefix?
- return;
- }
- }
- d.properties.forEach((ps) => {
- const name = `${prefix}.${ps.name.getText()}`;
- propMapSet(pm, name, ps);
- addToProperties(pm, ps.type, name);
- });
- } else if (strKind(tn) === 'TypeLiteral') {
- if (!ts.isTypeLiteralNode(tn)) new Error(`599 ${strKind(tn)}`);
- tn.forEachChild((child: ts.Node) => {
- if (ts.isPropertySignature(child)) {
- const name = `${prefix}.${child.name.getText()}`;
- propMapSet(pm, name, child);
- addToProperties(pm, child.type, name);
- } else if (!ts.isIndexSignatureDeclaration(child)) {
- // ignoring IndexSignatures, seen as relatedDocument in
- // RelatedFullDocumentDiagnosticReport
- throw new Error(`608 ${strKind(child)} ${loc(child)}`);
- }
- });
- }
-}
-function deepProperties(d: Data): propMap | undefined {
- let properties: propMap = new Map<string, ts.PropertySignature>();
- if (!d.alias || !ts.isIntersectionTypeNode(d.alias)) return undefined;
- d.alias.types.forEach((ts) => addToProperties(properties, ts));
- return properties;
-}
-
-function mergeAlias(d: Data) {
- const props = deepProperties(d);
- if (!props) return; // nothing merged
- // now each element of props should have length 1
- // change d to merged, toss its alias field, fill in its properties
- const v: ts.PropertySignature[] = [];
- props.forEach((ps, nm) => {
- const xlen = nm.split('.').length;
- if (xlen !== 2) return; // not top-level
- v.push(ps);
- });
- d.kind = 'interface';
- d.alias = undefined;
- d.properties = ts.factory.createNodeArray(v);
-}
-
-function mergeTypeLitLit(a: ts.TypeLiteralNode, b: ts.TypeLiteralNode): ts.TypeLiteralNode {
- const v = new Map<string, ts.TypeElement>(); // avoid duplicates
- a.members.forEach((te) => v.set(te.name.getText(), te));
- b.members.forEach((te) => v.set(te.name.getText(), te));
- const x: ts.TypeElement[] = [];
- v.forEach((te) => x.push(te));
- const fake: CreateMutable<ts.TypeLiteralNode> = a;
- fake['members'] = ts.factory.createNodeArray(x);
- check(fake as ts.TypeLiteralNode, '643');
- return fake as ts.TypeLiteralNode;
-}
-
-function mergeTypeRefLit(a: ts.TypeReferenceNode, b: ts.TypeLiteralNode): ts.TypeLiteralNode {
- const d = seenTypes.get(goName(a.typeName.getText()));
- if (!d) throw new Error(`644 name ${a.typeName.getText()} not found`);
- const typ = d.me;
- if (!ts.isInterfaceDeclaration(typ)) throw new Error(`646 got ${strKind(typ)} not InterfaceDecl`);
- const v = new Map<string, ts.TypeElement>(); // avoid duplicates
- typ.members.forEach((te) => v.set(te.name.getText(), te));
- b.members.forEach((te) => v.set(te.name.getText(), te));
- const x: ts.TypeElement[] = [];
- v.forEach((te) => x.push(te));
-
- const w = ts.factory.createNodeArray(x);
- const fk: CreateMutable<ts.TypeLiteralNode> = b;
- fk['members'] = w;
- (fk['members'] as { pos: number })['pos'] = b.members.pos;
- (fk['members'] as { end: number })['end'] = b.members.end;
- check(fk as ts.TypeLiteralNode, '662');
- return fk as ts.TypeLiteralNode;
-}
-
-// check that constructed nodes still have associated text
-function check(n: ts.Node, loc: string) {
- try { getComments(n); } catch (e) { console.log(`check at ${loc} ${e}`); }
- try { n.getText(); } catch (e) { console.log(`text check at ${loc}`); }
-}
-
-let typesOut = new Array<string>();
-let constsOut = new Array<string>();
-
-// generate Go types
-function toGo(d: Data, nm: string) {
- if (!d) return; // this is probably a generic T
- if (d.name.startsWith('Inner') || d.name === 'WindowClientCapabilities') return; // removed by alias processing
- if (d.name === 'Integer' || d.name === 'Uinteger') return; // unneeded
- switch (d.kind) {
- case 'alias':
- goTypeAlias(d, nm); break;
- case 'module': goModule(d, nm); break;
- case 'enum': goEnum(d, nm); break;
- case 'interface': goInterface(d, nm); break;
- default:
- throw new Error(
- `672: more cases in toGo ${nm} ${d.kind}`);
- }
-}
-
-// these fields need a * and are not covered by the code
-// that calls isStructType.
-var starred: [string, string][] = [
- ['TextDocumentContentChangeEvent', 'range'], ['CodeAction', 'command'],
- ['CodeAction', 'disabled'],
- ['DidSaveTextDocumentParams', 'text'], ['CompletionItem', 'command'],
- ['Diagnostic', 'codeDescription']
-];
-
-// generate Go code for an interface
-function goInterface(d: Data, nm: string) {
- let ans = `type ${goName(nm)} struct {\n`;
-
- // generate the code for each member
- const g = function (n: ts.PropertySignature) {
- if (!ts.isPropertySignature(n))
- throw new Error(`expected PropertySignature got ${strKind(n)} `);
- ans = ans.concat(getComments(n));
- const json = u.JSON(n);
- let gt = goType(n.type, n.name.getText());
- if (gt == d.name) gt = '*' + gt; // avoid recursive types (SelectionRange)
- // there are several cases where a * is needed
- // (putting * in front of too many things breaks uses of CodeActionKind)
- starred.forEach(([a, b]) => {
- if (d.name == a && n.name.getText() == b) {
- gt = '*' + gt;
- }
- });
- ans = ans.concat(`${goName(n.name.getText())} ${gt}`, json, '\n');
- };
- d.properties.forEach(g);
- // heritage clauses become embedded types
- // check they are all Identifiers
- const f = function (n: ts.ExpressionWithTypeArguments) {
- if (!ts.isIdentifier(n.expression))
- throw new Error(`Interface ${nm} heritage ${strKind(n.expression)} `);
- if (n.expression.getText() === 'Omit') return; // Type modification type
- ans = ans.concat(goName(n.expression.getText()), '\n');
- };
- d.as.forEach((n: ts.HeritageClause) => n.types.forEach(f));
- ans = ans.concat('}\n');
- typesOut.push(getComments(d.me));
- typesOut.push(ans);
-}
-
-// generate Go code for a module (const declarations)
-// Generates type definitions, and named constants
-function goModule(d: Data, nm: string) {
- if (d.generics.length > 0 || d.as.length > 0) {
- throw new Error(`743 goModule: unexpected for ${nm}
- `);
- }
- // all the statements should be export const <id>: value
- // or value = value
- // They are VariableStatements with x.declarationList having a single
- // VariableDeclaration
- let isNumeric = false;
- const f = function (n: ts.Statement, i: number) {
- if (!ts.isVariableStatement(n)) {
- throw new Error(`753 ${nm} ${i} expected VariableStatement,
- got ${strKind(n)}`);
- }
- const c = getComments(n);
- const v = n.declarationList.declarations[0]; // only one
-
- if (!v.initializer)
- throw new Error(`760 no initializer ${nm} ${i} ${v.name.getText()}`);
- isNumeric = strKind(v.initializer) == 'NumericLiteral';
- if (c != '') constsOut.push(c); // no point if there are no comments
- // There are duplicates.
- const cname = constName(goName(v.name.getText()), nm);
- let val = v.initializer.getText();
- val = val.split('\'').join('"'); // useless work for numbers
- constsOut.push(`${cname} ${nm} = ${val}`);
- };
- d.statements.forEach(f);
- typesOut.push(getComments(d.me));
- // Or should they be type aliases?
- typesOut.push(`type ${nm} ${isNumeric ? 'float64' : 'string'}`);
-}
-
-// generate Go code for an enum. Both types and named constants
-function goEnum(d: Data, nm: string) {
- let isNumeric = false;
- const f = function (v: ts.EnumMember, j: number) { // same as goModule
- if (!v.initializer)
- throw new Error(`goEnum no initializer ${nm} ${j} ${v.name.getText()}`);
- isNumeric = strKind(v.initializer) == 'NumericLiteral';
- const c = getComments(v);
- const cname = constName(goName(v.name.getText()), nm);
- let val = v.initializer.getText();
- val = val.split('\'').join('"'); // replace quotes. useless work for numbers
- constsOut.push(`${c}${cname} ${nm} = ${val}`);
- };
- d.enums.forEach(f);
- typesOut.push(getComments(d.me));
- // Or should they be type aliases?
- typesOut.push(`type ${nm} ${isNumeric ? 'float64' : 'string'}`);
-}
-
-// generate code for a type alias
-function goTypeAlias(d: Data, nm: string) {
- if (d.as.length != 0 || d.generics.length != 0) {
- if (nm != 'ServerCapabilities')
- throw new Error(`${nm} has extra fields(${d.as.length},${d.generics.length}) ${d.me.getText()}`);
- }
- typesOut.push(getComments(d.me));
- // d.alias doesn't seem to have comments
- let aliasStr = goName(nm) == 'DocumentURI' ? ' ' : ' = ';
- if (nm == 'PrepareSupportDefaultBehavior') {
- // code-insiders is sending a bool, not a number. PJW: check this after Feb/2021
- // (and gopls never looks at it anyway)
- typesOut.push(`type ${goName(nm)}${aliasStr}interface{}\n`);
- return;
- }
- typesOut.push(`type ${goName(nm)}${aliasStr}${goType(d.alias, nm)}\n`);
-}
-
-// return a go type and maybe an assocated javascript tag
-function goType(n: ts.TypeNode | undefined, nm: string): string {
- if (!n) throw new Error(`goType undefined for ${nm}`);
- if (n.getText() == 'T') return 'interface{}'; // should check it's generic
- if (ts.isTypeReferenceNode(n)) {
- // DocumentDiagnosticReportKind.unChanged (or .new) value is "new" or "unChanged"
- if (n.getText().startsWith('DocumentDiagnostic')) return 'string';
- switch (n.getText()) {
- case 'integer': return 'int32';
- case 'uinteger': return 'uint32';
- default: return goName(n.typeName.getText()); // avoid <T>
- }
- } else if (ts.isUnionTypeNode(n)) {
- return goUnionType(n, nm);
- } else if (ts.isIntersectionTypeNode(n)) {
- return goIntersectionType(n, nm);
- } else if (strKind(n) == 'StringKeyword') {
- return 'string';
- } else if (strKind(n) == 'NumberKeyword') {
- return 'float64';
- } else if (strKind(n) == 'BooleanKeyword') {
- return 'bool';
- } else if (strKind(n) == 'AnyKeyword' || strKind(n) == 'UnknownKeyword') {
- return 'interface{}';
- } else if (strKind(n) == 'NullKeyword') {
- return 'nil';
- } else if (strKind(n) == 'VoidKeyword' || strKind(n) == 'NeverKeyword') {
- return 'void';
- } else if (strKind(n) == 'ObjectKeyword') {
- return 'interface{}';
- } else if (ts.isArrayTypeNode(n)) {
- if (nm === 'arguments') {
- // Command and ExecuteCommandParams
- return '[]json.RawMessage';
- }
- return `[]${goType(n.elementType, nm)}`;
- } else if (ts.isParenthesizedTypeNode(n)) {
- return goType(n.type, nm);
- } else if (ts.isLiteralTypeNode(n)) {
- return strKind(n.literal) == 'StringLiteral' ? 'string' : 'float64';
- } else if (ts.isTypeLiteralNode(n)) {
- // these are anonymous structs
- const v = goTypeLiteral(n, nm);
- return v;
- } else if (ts.isTupleTypeNode(n)) {
- if (n.getText() == '[number, number]') return '[]float64';
- throw new Error(`goType unexpected Tuple ${n.getText()}`);
- }
- throw new Error(`${strKind(n)} goType unexpected ${n.getText()} for ${nm}`);
-}
-
-// The choice is uniform interface{}, or some heuristically assigned choice,
-// or some better sytematic idea I haven't thought of. Using interface{}
-// is, in practice, impossibly complex in the existing code.
-function goUnionType(n: ts.UnionTypeNode, nm: string): string {
- let help = `/*${n.getText()}*/`; // show the original as a comment
- // There are some bad cases with newlines:
- // range?: boolean | {\n };
- // full?: boolean | {\n /**\n * The server supports deltas for full documents.\n */\n delta?: boolean;\n }
- // These are handled specially:
- if (nm == 'range') help = help.replace(/\n/, '');
- if (nm == 'full' && help.indexOf('\n') != -1) {
- help = '/*boolean | <elided struct>*/';
- }
- // handle all the special cases
- switch (n.types.length) {
- case 2: {
- const a = strKind(n.types[0]);
- const b = strKind(n.types[1]);
- if (a == 'NumberKeyword' && b == 'StringKeyword') { // ID
- return `interface{} ${help}`;
- }
- // for null, b is not useful (LiternalType)
- if (n.types[1].getText() === 'null') {
- if (nm == 'textDocument/codeAction') {
- // (Command | CodeAction)[] | null
- return `[]CodeAction ${help}`;
- }
- let v = goType(n.types[0], 'a');
- return `${v} ${help}`;
- }
- if (a == 'BooleanKeyword') { // usually want bool
- if (nm == 'codeActionProvider') return `interface{} ${help}`;
- if (nm == 'renameProvider') return `interface{} ${help}`;
- if (nm == 'full') return `interface{} ${help}`; // there's a struct
- if (nm == 'save') return `${goType(n.types[1], '680')} ${help}`;
- return `${goType(n.types[0], 'b')} ${help}`;
- }
- if (b == 'ArrayType') return `${goType(n.types[1], 'c')} ${help}`;
- if (help.includes('InsertReplaceEdit') && n.types[0].getText() == 'TextEdit') {
- return `*TextEdit ${help}`;
- }
- if (a == 'TypeReference') {
- if (nm == 'edits') return `${goType(n.types[0], '901')} ${help}`;
- if (a == b) return `interface{} ${help}`;
- if (nm == 'code') return `interface{} ${help}`;
- if (nm == 'editRange') return `${goType(n.types[0], '904')} ${help}`;
- if (nm === 'location') return `${goType(n.types[0], '905')} ${help}`;
- }
- if (a == 'StringKeyword') return `string ${help}`;
- if (a == 'TypeLiteral' && nm == 'TextDocumentContentChangeEvent') {
- return `${goType(n.types[0], nm)}`;
- }
- if (a == 'TypeLiteral' && b === 'TypeLiteral') {
- // DocumentDiagnosticReport
- // the first one includes the second one
- return `${goType(n.types[0], '9d')}`;
- }
- throw new Error(`911 ${nm}: a:${a}/${goType(n.types[0], '9a')} b:${b}/${goType(n.types[1], '9b')} ${loc(n)}`);
- }
- case 3: {
- const aa = strKind(n.types[0]);
- const bb = strKind(n.types[1]);
- const cc = strKind(n.types[2]);
- if (nm === 'workspace/symbol') return `${goType(n.types[0], '930')} ${help}`;
- if (nm == 'DocumentFilter' || nm == 'NotebookDocumentFilter' || nm == 'TextDocumentFilter') {
- // not really a union. the first is enough, up to a missing
- // omitempty but avoid repetitious comments
- return `${goType(n.types[0], 'g')}`;
- }
- if (nm == 'textDocument/documentSymbol') {
- return `[]interface{} ${help}`;
- }
- if (aa == 'TypeReference' && bb == 'ArrayType' && (cc == 'NullKeyword' || cc === 'LiteralType')) {
- return `${goType(n.types[0], 'd')} ${help}`;
- }
- if (aa == 'TypeReference' && bb == aa && cc == 'ArrayType') {
- // should check that this is Hover.Contents
- return `${goType(n.types[0], 'e')} ${help}`;
- }
- if (aa == 'ArrayType' && bb == 'TypeReference' && (cc == 'NullKeyword' || cc === 'LiteralType')) {
- // check this is nm == 'textDocument/completion'
- return `${goType(n.types[1], 'f')} ${help}`;
- }
- if (aa == 'LiteralType' && bb == aa && cc == aa) return `string ${help}`;
- // keep this for diagnosing unexpected interface{} results
- // console.log(`931, interface{} for ${aa}/${goType(n.types[0], 'g')},${bb}/${goType(n.types[1], 'h')},${cc}/${goType(n.types[2], 'i')} ${nm}`);
- break;
- }
- case 4:
- if (nm == 'documentChanges') return `TextDocumentEdit ${help} `;
- if (nm == 'textDocument/prepareRename') {
- // these names have to be made unique
- const genName = `${goName("prepareRename")}${extraTypes.size}Gn`;
- extraTypes.set(genName, [`Range Range \`json:"range"\`
- Placeholder string \`json:"placeholder"\``]);
- return `${genName} ${help} `;
- }
- break;
- case 8: // LSPany
- break;
- default:
- throw new Error(`957 goUnionType len=${n.types.length} nm=${nm} ${n.getText()}`);
- }
-
- // Result will be interface{} with a comment
- let isLiteral = true;
- let literal = 'string';
- let res = 'interface{} /* ';
- n.types.forEach((v: ts.TypeNode, i: number) => {
- // might get an interface inside:
- // (Command | CodeAction)[] | null
- let m = goType(v, nm);
- if (m.indexOf('interface') != -1) {
- // avoid nested comments
- m = m.split(' ')[0];
- }
- m = m.split('\n').join('; '); // sloppy: struct{;
- res = res.concat(`${i == 0 ? '' : ' | '}`, m);
- if (!ts.isLiteralTypeNode(v)) isLiteral = false;
- else literal = strKind(v.literal) == 'StringLiteral' ? 'string' : 'number';
- });
- if (!isLiteral) {
- return res + '*/';
- }
- // I don't think we get here
- // trace?: 'off' | 'messages' | 'verbose' should get string
- return `${literal} /* ${n.getText()} */`;
-}
-
-// some of the intersection types A&B are ok as struct{A;B;} and some
-// could be expanded, and ClientCapabilites has to be expanded,
-// at least for workspace. It's possible to check algorithmically,
-// but much simpler just to check explicitly.
-function goIntersectionType(n: ts.IntersectionTypeNode, nm: string): string {
- if (nm == 'ClientCapabilities') return expandIntersection(n);
- //if (nm == 'ServerCapabilities') return expandIntersection(n); // save for later consideration
- let inner = '';
- n.types.forEach(
- (t: ts.TypeNode) => { inner = inner.concat(goType(t, nm), '\n'); });
- return `struct{ \n${inner}} `;
-}
-
-// for each of the intersected types, extract its components (each will
-// have a Data with properties) extract the properties, and keep track
-// of them by name. The names that occur once can be output. The names
-// that occur more than once need to be combined.
-function expandIntersection(n: ts.IntersectionTypeNode): string {
- const bad = function (n: ts.Node, s: string) {
- return new Error(`expandIntersection ${strKind(n)} ${s}`);
- };
- let props = new Map<string, ts.PropertySignature[]>();
- for (const tp of n.types) {
- if (!ts.isTypeReferenceNode(tp)) throw bad(tp, 'A');
- const d = data.get(goName(tp.typeName.getText()));
- for (const p of d.properties) {
- if (!ts.isPropertySignature(p)) throw bad(p, 'B');
- let v = props.get(p.name.getText()) || [];
- v.push(p);
- props.set(p.name.getText(), v);
- }
- }
- let ans = 'struct {\n';
- for (const [k, v] of Array.from(props)) {
- if (v.length == 1) {
- const a = v[0];
- ans = ans.concat(getComments(a));
- ans = ans.concat(`${goName(k)} ${goType(a.type, k)} ${u.JSON(a)}\n`);
- continue;
- }
- ans = ans.concat(`${goName(k)} struct {\n`);
- for (let i = 0; i < v.length; i++) {
- const a = v[i];
- if (ts.isTypeReferenceNode(a.type)) {
- ans = ans.concat(getComments(a));
- ans = ans.concat(goName(a.type.typeName.getText()), '\n');
- } else if (ts.isTypeLiteralNode(a.type)) {
- if (a.type.members.length != 1) throw bad(a.type, 'C');
- const b = a.type.members[0];
- if (!ts.isPropertySignature(b)) throw bad(b, 'D');
- ans = ans.concat(getComments(b));
- ans = ans.concat(
- goName(b.name.getText()), ' ', goType(b.type, 'a'), u.JSON(b), '\n');
- } else {
- throw bad(a.type, `E ${a.getText()} in ${goName(k)} at ${loc(a)}`);
- }
- }
- ans = ans.concat('}\n');
- }
- ans = ans.concat('}\n');
- return ans;
-}
-
-// Does it make sense to use a pointer?
-function isStructType(te: ts.TypeNode): boolean {
- switch (strKind(te)) {
- case 'UnionType': // really need to know which type will be chosen
- case 'BooleanKeyword':
- case 'StringKeyword':
- case 'ArrayType':
- return false;
- case 'TypeLiteral': return false; // true makes for difficult compound constants
- // but think more carefully to understands why starred is needed.
- case 'TypeReference': {
- if (!ts.isTypeReferenceNode(te)) throw new Error(`1047 impossible ${strKind(te)}`);
- const d = seenTypes.get(goName(te.typeName.getText()));
- if (d === undefined || d.properties.length == 0) return false;
- if (d.properties.length > 1) return true;
- // alias or interface with a single property (The alias is Uinteger, which we ignore later)
- if (d.alias) return false;
- const x = d.properties[0].type;
- return isStructType(x);
- }
- default: throw new Error(`1055 indirectable> ${strKind(te)}`);
- }
-}
-
-function goTypeLiteral(n: ts.TypeLiteralNode, nm: string): string {
- let ans: string[] = []; // in case we generate a new extra type
- let res = 'struct{\n'; // the actual answer usually
- const g = function (nx: ts.TypeElement) {
- // add the json, as in goInterface(). Strange inside union types.
- if (ts.isPropertySignature(nx)) {
- let json = u.JSON(nx);
- let typ = goType(nx.type, nx.name.getText());
- // }/*\n*/`json:v` is not legal, the comment is a newline
- if (typ.includes('\n') && typ.indexOf('*/') === typ.length - 2) {
- typ = typ.replace(/\n\t*/g, ' ');
- }
- const v = getComments(nx) || '';
- starred.forEach(([a, b]) => {
- if (a != nm || b != typ.toLowerCase()) return;
- typ = '*' + typ;
- json = json.substring(0, json.length - 2) + ',omitempty"`';
- });
- if (typ[0] !== '*' && isStructType(nx.type)) typ = '*' + typ;
- res = res.concat(`${v} ${goName(nx.name.getText())} ${typ}`, json, '\n');
- ans.push(`${v}${goName(nx.name.getText())} ${typ} ${json}\n`);
- } else if (ts.isIndexSignatureDeclaration(nx)) {
- const comment = nx.getText().replace(/[/]/g, '');
- if (nx.getText() == '[uri: string]: TextEdit[];') {
- res = 'map[string][]TextEdit';
- } else if (nx.getText().startsWith('[id: ChangeAnnotationIdentifier]')) {
- res = 'map[string]ChangeAnnotationIdentifier';
- } else if (nx.getText().startsWith('[uri: string')) {
- res = 'map[string]interface{}';
- } else if (nx.getText().startsWith('[uri: DocumentUri')) {
- res = 'map[DocumentURI][]TextEdit';
- } else if (nx.getText().startsWith('[key: string')) {
- res = 'map[string]interface{}';
- } else {
- throw new Error(`1100 handle ${nx.getText()} ${loc(nx)}`);
- }
- res += ` /*${comment}*/`;
- ans.push(res);
- return;
- } else
- throw new Error(`TypeLiteral had ${strKind(nx)}`);
- };
- n.members.forEach(g);
- // for some the generated type is wanted, for others it's not needed
- if (!nm.startsWith('workspace')) {
- if (res.startsWith('struct')) return res + '}'; // map[] is special
- return res;
- }
- // these names have to be made unique
- const genName = `${goName(nm)}${extraTypes.size}Gn`;
- extraTypes.set(genName, ans);
- return genName;
-}
-
-// print all the types and constants and extra types
-function outputTypes() {
- // generate go types alphabeticaly
- let v = Array.from(seenTypes.keys());
- v.sort();
- v.forEach((x) => toGo(seenTypes.get(x), x));
- u.prgo(u.computeHeader(true));
- u.prgo('import "encoding/json"\n\n');
- typesOut.forEach((s) => {
- u.prgo(s);
- // it's more convenient not to have to think about trailing newlines
- // when generating types, but doc comments can't have an extra \n
- if (s.indexOf('/**') < 0) u.prgo('\n');
- });
- u.prgo('\nconst (\n');
- constsOut.forEach((s) => {
- u.prgo(s);
- u.prgo('\n');
- });
- u.prgo(')\n');
- u.prgo('// Types created to name formal parameters and embedded structs\n');
- extraTypes.forEach((v, k) => {
- u.prgo(` type ${k} struct {\n`);
- v.forEach((s) => {
- u.prgo(s);
- u.prgo('\n');
- });
- u.prgo('}\n');
- });
-}
-
-// client and server ------------------
-
-interface side {
- methods: string[];
- cases: string[];
- calls: string[];
- name: string; // client or server
- goName: string; // Client or Server
- outputFile?: string;
- fd?: number
-}
-let client: side = {
- methods: [],
- cases: [],
- calls: [],
- name: 'client',
- goName: 'Client',
-};
-let server: side = {
- methods: [],
- cases: [],
- calls: [],
- name: 'server',
- goName: 'Server',
-};
-
-// commonly used output
-const notNil = `if len(r.Params()) > 0 {
- return true, reply(ctx, nil, errors.Errorf("%w: expected no params", jsonrpc2.ErrInvalidParams))
-}`;
-
-// Go code for notifications. Side is client or server, m is the request
-// method
-function goNot(side: side, m: string) {
- if (m == '$/cancelRequest') return; // handled specially in protocol.go
- const n = not.get(m);
- const a = goType(n.typeArguments[0], m);
- const nm = methodName(m);
- side.methods.push(sig(nm, a, ''));
- const caseHdr = ` case "${m}": // notif`;
- let case1 = notNil;
- if (a != '' && a != 'void') {
- case1 = `var params ${a}
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }
- err:= ${side.name}.${nm}(ctx, &params)
- return true, reply(ctx, nil, err)`;
- } else {
- case1 = `err := ${side.name}.${nm}(ctx)
- return true, reply(ctx, nil, err)`;
- }
- side.cases.push(`${caseHdr}\n${case1}`);
-
- const arg3 = a == '' || a == 'void' ? 'nil' : 'params';
- side.calls.push(`
- func (s *${side.name}Dispatcher) ${sig(nm, a, '', true)} {
- return s.sender.Notify(ctx, "${m}", ${arg3})
- }`);
-}
-
-// Go code for requests.
-function goReq(side: side, m: string) {
- const n = req.get(m);
- const nm = methodName(m);
- let a = goType(n.typeArguments[0], m);
- let b = goType(n.typeArguments[1], m);
- if (n.getText().includes('Type0')) {
- b = a;
- a = ''; // workspace/workspaceFolders and shutdown
- }
- u.prb(`${side.name} req ${a != ''}, ${b != ''} ${nm} ${m} ${loc(n)} `);
- side.methods.push(sig(nm, a, b));
-
- const caseHdr = `case "${m}": // req`;
- let case1 = notNil;
- if (a != '') {
- if (extraTypes.has('Param' + nm)) a = 'Param' + nm;
- case1 = `var params ${a}
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- return true, sendParseError(ctx, reply, err)
- }`;
- if (a === 'ParamInitialize') {
- case1 = `var params ${a}
- if err := json.Unmarshal(r.Params(), &params); err != nil {
- if _, ok := err.(*json.UnmarshalTypeError); !ok {
- return true, sendParseError(ctx, reply, err)
- }
- }`;
- }
- }
- const arg2 = a == '' ? '' : ', &params';
- // if case2 is not explicitly typed string, typescript makes it a union of strings
- let case2: string = `if err := ${side.name}.${nm}(ctx${arg2}); err != nil {
- event.Error(ctx, "", err)
- }`;
- if (b != '' && b != 'void') {
- case2 = `resp, err := ${side.name}.${nm}(ctx${arg2})
- return true, reply(ctx, resp, err)`;
- } else { // response is nil
- case2 = `err := ${side.name}.${nm}(ctx${arg2})
- return true, reply(ctx, nil, err)`;
- }
-
- side.cases.push(`${caseHdr}\n${case1}\n${case2}`);
-
- const callHdr = `func (s *${side.name}Dispatcher) ${sig(nm, a, b, true)} {`;
- let callBody = `return s.sender.Call(ctx, "${m}", nil, nil)\n}`;
- if (b != '' && b != 'void') {
- const p2 = a == '' ? 'nil' : 'params';
- const returnType = indirect(b) ? `*${b}` : b;
- callBody = `var result ${returnType}
- if err := s.sender.Call(ctx, "${m}", ${p2}, &result); err != nil {
- return nil, err
- }
- return result, nil
- }`;
- } else if (a != '') {
- callBody = `return s.sender.Call(ctx, "${m}", params, nil) // Call, not Notify
- }`;
- }
- side.calls.push(`${callHdr}\n${callBody}\n`);
-}
-
-// make sure method names are unique
-let seenNames = new Set<string>();
-function methodName(m: string): string {
- let i = m.indexOf('/');
- let s = m.substring(i + 1);
- let x = s[0].toUpperCase() + s.substring(1);
- for (let j = x.indexOf('/'); j >= 0; j = x.indexOf('/')) {
- let suffix = x.substring(j + 1);
- suffix = suffix[0].toUpperCase() + suffix.substring(1);
- let prefix = x.substring(0, j);
- x = prefix + suffix;
- }
- if (seenNames.has(x)) {
- // various Resolve and Diagnostic
- x += m[0].toUpperCase() + m.substring(1, i);
- }
- seenNames.add(x);
- return x;
-}
-
-// used in sig and in goReq
-function indirect(s: string): boolean {
- if (s == '' || s == 'void') return false;
- const skip = (x: string) => s.startsWith(x);
- if (skip('[]') || skip('interface') || skip('Declaration') ||
- skip('Definition') || skip('DocumentSelector'))
- return false;
- return true;
-}
-
-// Go signatures for methods.
-function sig(nm: string, a: string, b: string, names?: boolean): string {
- if (a.indexOf('struct') != -1) {
- const v = a.split('\n');
- extraTypes.set(`Param${nm}`, v.slice(1, v.length - 1));
- a = 'Param' + nm;
- }
- if (a == 'void')
- a = '';
- else if (a != '') {
- if (names)
- a = ', params *' + a;
- else
- a = ', *' + a;
- }
- let ret = 'error';
- if (b != '' && b != 'void') {
- // avoid * when it is senseless
- if (indirect(b)) b = '*' + b;
- ret = `(${b}, error)`;
- }
- let start = `${nm}(`;
- if (names) {
- start = start + 'ctx ';
- }
- return `${start}context.Context${a}) ${ret}`;
-}
-
-// write the request/notification code
-function output(side: side) {
- // make sure the output file exists
- if (!side.outputFile) {
- side.outputFile = `ts${side.name}.go`;
- side.fd = fs.openSync(side.outputFile, 'w');
- }
- const f = function (s: string) {
- fs.writeSync(side.fd!, s);
- fs.writeSync(side.fd!, '\n');
- };
- f(u.computeHeader(false));
- f(`
- import (
- "context"
- "encoding/json"
-
- "golang.org/x/tools/internal/jsonrpc2"
- errors "golang.org/x/xerrors"
- )
- `);
- const a = side.name[0].toUpperCase() + side.name.substring(1);
- f(`type ${a} interface {`);
- side.methods.forEach((v) => { f(v); });
- f('}\n');
- f(`func ${side.name}Dispatch(ctx context.Context, ${side.name} ${a}, reply jsonrpc2.Replier, r jsonrpc2.Request) (bool, error) {
- switch r.Method() {`);
- side.cases.forEach((v) => { f(v); });
- f(`
- default:
- return false, nil
- }
- }`);
- side.calls.forEach((v) => { f(v); });
-}
-
-// Handling of non-standard requests, so we can add gopls-specific calls.
-function nonstandardRequests() {
- server.methods.push(
- 'NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error)');
- server.calls.push(
- `func (s *serverDispatcher) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
- var result interface{}
- if err := s.sender.Call(ctx, method, params, &result); err != nil {
- return nil, err
- }
- return result, nil
- }
- `);
-}
-
-// ----- remember it's a scripting language
-function main() {
- if (u.gitHash != u.git()) {
- throw new Error(
- `git hash mismatch, wanted\n${u.gitHash} but source is at\n${u.git()}`);
- }
- u.createOutputFiles();
- parse();
- u.printAST(program);
- // find the Requests and Nofificatations
- for (const sourceFile of program.getSourceFiles()) {
- if (!sourceFile.isDeclarationFile) {
- ts.forEachChild(sourceFile, findRPCs);
- }
- }
- // separate RPCs into client and server
- setReceives();
- // visit every sourceFile collecting top-level type definitions
- for (const sourceFile of program.getSourceFiles()) {
- if (!sourceFile.isDeclarationFile) {
- ts.forEachChild(sourceFile, genTypes);
- }
- }
- // check that each thing occurs exactly once, and put pointers into
- // seenTypes
- checkOnce();
- // for each of Client and Server there are 3 parts to the output:
- // 1. type X interface {methods}
- // 2. func (h *serverHandler) Deliver(...) { switch r.method }
- // 3. func (x *xDispatcher) Method(ctx, parm)
- not.forEach( // notifications
- (v, k) => {
- receives.get(k) == 'client' ? goNot(client, k) : goNot(server, k);
- });
- req.forEach( // requests
- (v, k) => {
- receives.get(k) == 'client' ? goReq(client, k) : goReq(server, k);
- });
- nonstandardRequests();
- // find all the types implied by seenTypes and rpcs to try to avoid
- // generating types that aren't used
- moreTypes();
- // do merging
- cleanData();
- // and print the Go code
- outputTypes();
- console.log(`seen ${seenTypes.size + extraTypes.size}`);
- output(client);
- output(server);
-}
-
-main();
diff --git a/internal/lsp/protocol/typescript/tsconfig.json b/internal/lsp/protocol/typescript/tsconfig.json
deleted file mode 100644
index 14cfe0c7e..000000000
--- a/internal/lsp/protocol/typescript/tsconfig.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "compilerOptions": {
- "isolatedModules": true,
- "moduleResolution": "node",
- "lib":["ES2020"],
- "sourceMap": true, // sourceMap or inlineSourceMap? and see inlineSources
- "target": "ES5",
-
- "noFallthroughCasesInSwitch": false, // there is one legitimate on
- "noImplicitReturns": true,
- "noPropertyAccessFromIndexSignature": true,
- "noUncheckedIndexedAccess": true,
- "noUnusedLocals": true,
- "noUnusedParameters": false,
- "noEmitOnError": true,
-
- // "extendedDiagnostics": true, // for occasional amusement
-
- // "strict": true, // too many undefineds in types, etc
- "alwaysStrict": true,
- "noImplicitAny": true,
- "noImplicitThis": true,
- "strictBindCallApply": true,
- "strictFunctionTypes": true,
- "strictNullChecks": false, // doesn't like arrray access, among other things.
- //"strictPropertyInitialization": true, // needs strictNullChecks
- },
- "files": ["./code.ts", "./util.ts"]
-}
diff --git a/internal/lsp/protocol/typescript/util.ts b/internal/lsp/protocol/typescript/util.ts
deleted file mode 100644
index 9475b26a1..000000000
--- a/internal/lsp/protocol/typescript/util.ts
+++ /dev/null
@@ -1,254 +0,0 @@
-
-// for us typescript ignorati, having an import makes this file a module
-import * as fs from 'fs';
-import * as process from 'process';
-import * as ts from 'typescript';
-
-// This file contains various utilities having to do with producing strings
-// and managing output
-
-// ------ create files
-let dir = process.env['HOME'];
-const srcDir = '/vscode-languageserver-node';
-export const fnames = [
- `${dir}${srcDir}/protocol/src/common/protocol.ts`,
- `${dir}/${srcDir}/protocol/src/browser/main.ts`, `${dir}${srcDir}/types/src/main.ts`,
- `${dir}${srcDir}/jsonrpc/src/node/main.ts`
-];
-export const gitHash = '696f9285bf849b73745682fdb1c1feac73eb8772';
-let outFname = 'tsprotocol.go';
-let fda: number, fdb: number, fde: number; // file descriptors
-
-export function createOutputFiles() {
- fda = fs.openSync('/tmp/ts-a', 'w'); // dump of AST
- fdb = fs.openSync('/tmp/ts-b', 'w'); // unused, for debugging
- fde = fs.openSync(outFname, 'w'); // generated Go
-}
-export function pra(s: string) {
- return (fs.writeSync(fda, s));
-}
-export function prb(s: string) {
- return (fs.writeSync(fdb, s));
-}
-export function prgo(s: string) {
- return (fs.writeSync(fde, s));
-}
-
-// Get the hash value of the git commit
-export function git(): string {
- let a = fs.readFileSync(`${dir}${srcDir}/.git/HEAD`).toString();
- // ref: refs/heads/foo, or a hash like
- // cc12d1a1c7df935012cdef5d085cdba04a7c8ebe
- if (a.charAt(a.length - 1) == '\n') {
- a = a.substring(0, a.length - 1);
- }
- if (a.length == 40) {
- return a; // a hash
- }
- if (a.substring(0, 5) == 'ref: ') {
- const fname = `${dir}${srcDir}/.git/` + a.substring(5);
- let b = fs.readFileSync(fname).toString();
- if (b.length == 41) {
- return b.substring(0, 40);
- }
- }
- throw new Error('failed to find the git commit hash');
-}
-
-// Produce a header for Go output files
-export function computeHeader(pkgDoc: boolean): string {
- let lastMod = 0;
- let lastDate = new Date();
- for (const f of fnames) {
- const st = fs.statSync(f);
- if (st.mtimeMs > lastMod) {
- lastMod = st.mtimeMs;
- lastDate = st.mtime;
- }
- }
- const cp = `// Copyright 2019 The Go Authors. All rights reserved.
- // Use of this source code is governed by a BSD-style
- // license that can be found in the LICENSE file.
-
- `;
- const a =
- '// Package protocol contains data types and code for LSP json rpcs\n' +
- '// generated automatically from vscode-languageserver-node\n' +
- `// commit: ${gitHash}\n` +
- `// last fetched ${lastDate}\n`;
- const b = 'package protocol\n';
- const c = '\n// Code generated (see typescript/README.md) DO NOT EDIT.\n\n';
- if (pkgDoc) {
- return cp + c + a + b;
- }
- else {
- return cp + c+ b + a;
- }
-}
-
-// Turn a typescript name into an exportable Go name, and appease lint
-export function goName(s: string): string {
- let ans = s;
- if (s.charAt(0) == '_') {
- // in the end, none of these are emitted.
- ans = 'Inner' + s.substring(1);
- }
- else { ans = s.substring(0, 1).toUpperCase() + s.substring(1); }
- ans = ans.replace(/Uri$/, 'URI');
- ans = ans.replace(/Id$/, 'ID');
- return ans;
-}
-
-// Generate JSON tag for a struct field
-export function JSON(n: ts.PropertySignature): string {
- const json = `\`json:"${n.name.getText()}${n.questionToken !== undefined ? ',omitempty' : ''}"\``;
- return json;
-}
-
-// Generate modifying prefixes and suffixes to ensure
-// consts are unique. (Go consts are package-level, but Typescript's are
-// not.) Use suffixes to minimize changes to gopls.
-export function constName(nm: string, type: string): string {
- let pref = new Map<string, string>([
- ['DiagnosticSeverity', 'Severity'], ['WatchKind', 'Watch'],
- ['SignatureHelpTriggerKind', 'Sig'], ['CompletionItemTag', 'Compl'],
- ['Integer', 'INT_'], ['Uinteger', 'UINT_'], ['CodeActionTriggerKind', 'CodeAction']
- ]); // typeName->prefix
- let suff = new Map<string, string>([
- ['CompletionItemKind', 'Completion'], ['InsertTextFormat', 'TextFormat'],
- ['SymbolTag', 'Symbol'], ['FileOperationPatternKind', 'Op'],
- ]);
- let ans = nm;
- if (pref.get(type)) ans = pref.get(type) + ans;
- if (suff.has(type)) ans = ans + suff.get(type);
- return ans;
-}
-
-// Find the comments associated with an AST node
-export function getComments(node: ts.Node): string {
- const sf = node.getSourceFile();
- const start = node.getStart(sf, false);
- const starta = node.getStart(sf, true);
- const x = sf.text.substring(starta, start);
- return x;
-}
-
-
-// --------- printing the AST, for debugging
-
-export function printAST(program: ts.Program) {
- // dump the ast, for debugging
- const f = function (n: ts.Node) {
- describe(n, pra);
- };
- for (const sourceFile of program.getSourceFiles()) {
- if (!sourceFile.isDeclarationFile) {
- // walk the tree to do stuff
- ts.forEachChild(sourceFile, f);
- }
- }
- pra('\n');
- for (const key of Object.keys(seenThings).sort()) {
- pra(`${key}: ${seenThings.get(key)} \n`);
- }
-}
-
-// Used in printing the AST
-let seenThings = new Map<string, number>();
-function seenAdd(x: string) {
- const u = seenThings.get(x);
- seenThings.set(x, u === undefined ? 1 : u + 1);
-}
-
-// eslint-disable-next-line no-unused-vars
-function describe(node: ts.Node, pr: (_: string) => any) {
- if (node === undefined) {
- return;
- }
- let indent = '';
-
- function f(n: ts.Node) {
- seenAdd(kinds(n));
- if (ts.isIdentifier(n)) {
- pr(`${indent} ${loc(n)} ${strKind(n)} ${n.text} \n`);
- }
- else if (ts.isPropertySignature(n) || ts.isEnumMember(n)) {
- pra(`${indent} ${loc(n)} ${strKind(n)} \n`);
- }
- else if (ts.isTypeLiteralNode(n)) {
- let m = n.members;
- pr(`${indent} ${loc(n)} ${strKind(n)} ${m.length} \n`);
- }
- else if (ts.isStringLiteral(n)) {
- pr(`${indent} ${loc(n)} ${strKind(n)} ${n.text} \n`);
- }
- else { pr(`${indent} ${loc(n)} ${strKind(n)} \n`); }
- indent += ' .';
- ts.forEachChild(n, f);
- indent = indent.slice(0, indent.length - 2);
- }
- f(node);
-}
-
-
-// For debugging, say where an AST node is in a file
-export function loc(node: ts.Node | undefined): string {
- if (!node) throw new Error('loc called with undefined (cannot happen!)');
- const sf = node.getSourceFile();
- const start = node.getStart();
- const x = sf.getLineAndCharacterOfPosition(start);
- const full = node.getFullStart();
- const y = sf.getLineAndCharacterOfPosition(full);
- let fn = sf.fileName;
- const n = fn.search(/-node./);
- fn = fn.substring(n + 6);
- return `${fn} ${x.line + 1}: ${x.character + 1} (${y.line + 1}: ${y.character + 1})`;
-}
-
-// --- various string stuff
-
-// return a string of the kinds of the immediate descendants
-// as part of printing the AST tree
-function kinds(n: ts.Node): string {
- let res = 'Seen ' + strKind(n);
- function f(n: ts.Node): void { res += ' ' + strKind(n); }
- ts.forEachChild(n, f);
- return res;
-}
-
-// What kind of AST node is it? This would just be typescript's
-// SyntaxKind[n.kind] except that the default names for some nodes
-// are misleading
-export function strKind(n: ts.Node | undefined): string {
- if (n == null || n == undefined) {
- return 'null';
- }
- return kindToStr(n.kind);
-}
-
-function kindToStr(k: ts.SyntaxKind): string {
- const x = ts.SyntaxKind[k];
- // some of these have two names
- switch (x) {
- default:
- return x;
- case 'FirstAssignment':
- return 'EqualsToken';
- case 'FirstBinaryOperator':
- return 'LessThanToken';
- case 'FirstCompoundAssignment':
- return 'PlusEqualsToken';
- case 'FirstContextualKeyword':
- return 'AbstractKeyword';
- case 'FirstLiteralToken':
- return 'NumericLiteral';
- case 'FirstNode':
- return 'QualifiedName';
- case 'FirstTemplateToken':
- return 'NoSubstitutionTemplateLiteral';
- case 'LastTemplateToken':
- return 'TemplateTail';
- case 'FirstTypeNode':
- return 'TypePredicate';
- }
-}
diff --git a/internal/lsp/references.go b/internal/lsp/references.go
deleted file mode 100644
index f96e5532c..000000000
--- a/internal/lsp/references.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/template"
-)
-
-func (s *Server) references(ctx context.Context, params *protocol.ReferenceParams) ([]protocol.Location, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- if snapshot.View().FileKind(fh) == source.Tmpl {
- return template.References(ctx, snapshot, fh, params)
- }
- references, err := source.References(ctx, snapshot, fh, params.Position, params.Context.IncludeDeclaration)
- if err != nil {
- return nil, err
- }
- var locations []protocol.Location
- for _, ref := range references {
- refRange, err := ref.Range()
- if err != nil {
- return nil, err
- }
- locations = append(locations, protocol.Location{
- URI: protocol.URIFromSpanURI(ref.URI()),
- Range: refRange,
- })
- }
- return locations, nil
-}
diff --git a/internal/lsp/regtest/doc.go b/internal/lsp/regtest/doc.go
deleted file mode 100644
index 3994e54cb..000000000
--- a/internal/lsp/regtest/doc.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package regtest provides a framework for writing gopls regression tests.
-//
-// User reported regressions are often expressed in terms of editor
-// interactions. For example: "When I open my editor in this directory,
-// navigate to this file, and change this line, I get a diagnostic that doesn't
-// make sense". In these cases reproducing, diagnosing, and writing a test to
-// protect against this regression can be difficult.
-//
-// The regtest package provides an API for developers to express these types of
-// user interactions in ordinary Go tests, validate them, and run them in a
-// variety of execution modes (see gopls/doc/daemon.md for more information on
-// execution modes). This is achieved roughly as follows:
-// + the Runner type starts and connects to a gopls instance for each
-// configured execution mode.
-// + the Env type provides a collection of resources to use in writing tests
-// (for example a temporary working directory and fake text editor)
-// + user interactions with these resources are scripted using test wrappers
-// around the API provided by the golang.org/x/tools/internal/lsp/fake
-// package.
-//
-// Regressions are expressed in terms of Expectations, which at a high level
-// are conditions that we expect to be met (or not to be met) at some point
-// after performing the interactions in the test. This is necessary because the
-// LSP is by construction asynchronous: both client and server can send
-// eachother notifications without formal acknowledgement that they have been
-// fully processed.
-//
-// Simple Expectations may be combined to match specific conditions reported by
-// the user. In the example above, a regtest validating that the user-reported
-// bug had been fixed would "expect" that the editor never displays the
-// confusing diagnostic.
-package regtest
diff --git a/internal/lsp/regtest/env.go b/internal/lsp/regtest/env.go
deleted file mode 100644
index b6b163a87..000000000
--- a/internal/lsp/regtest/env.go
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package regtest
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
- "testing"
-
- "golang.org/x/tools/internal/jsonrpc2/servertest"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-// Env holds an initialized fake Editor, Workspace, and Server, which may be
-// used for writing tests. It also provides adapter methods that call t.Fatal
-// on any error, so that tests for the happy path may be written without
-// checking errors.
-type Env struct {
- T testing.TB
- Ctx context.Context
-
- // Most tests should not need to access the scratch area, editor, server, or
- // connection, but they are available if needed.
- Sandbox *fake.Sandbox
- Editor *fake.Editor
- Server servertest.Connector
-
- // mu guards the fields below, for the purpose of checking conditions on
- // every change to diagnostics.
- mu sync.Mutex
- // For simplicity, each waiter gets a unique ID.
- nextWaiterID int
- state State
- waiters map[int]*condition
-}
-
-// State encapsulates the server state TODO: explain more
-type State struct {
- // diagnostics are a map of relative path->diagnostics params
- diagnostics map[string]*protocol.PublishDiagnosticsParams
- logs []*protocol.LogMessageParams
- showMessage []*protocol.ShowMessageParams
- showMessageRequest []*protocol.ShowMessageRequestParams
-
- registrations []*protocol.RegistrationParams
- unregistrations []*protocol.UnregistrationParams
-
- // outstandingWork is a map of token->work summary. All tokens are assumed to
- // be string, though the spec allows for numeric tokens as well. When work
- // completes, it is deleted from this map.
- outstandingWork map[protocol.ProgressToken]*workProgress
- startedWork map[string]uint64
- completedWork map[string]uint64
-}
-
-type workProgress struct {
- title, msg string
- percent float64
-}
-
-func (s State) String() string {
- var b strings.Builder
- b.WriteString("#### log messages (see RPC logs for full text):\n")
- for _, msg := range s.logs {
- summary := fmt.Sprintf("%v: %q", msg.Type, msg.Message)
- if len(summary) > 60 {
- summary = summary[:57] + "..."
- }
- // Some logs are quite long, and since they should be reproduced in the RPC
- // logs on any failure we include here just a short summary.
- fmt.Fprint(&b, "\t"+summary+"\n")
- }
- b.WriteString("\n")
- b.WriteString("#### diagnostics:\n")
- for name, params := range s.diagnostics {
- fmt.Fprintf(&b, "\t%s (version %d):\n", name, int(params.Version))
- for _, d := range params.Diagnostics {
- fmt.Fprintf(&b, "\t\t(%d, %d): %s\n", int(d.Range.Start.Line), int(d.Range.Start.Character), d.Message)
- }
- }
- b.WriteString("\n")
- b.WriteString("#### outstanding work:\n")
- for token, state := range s.outstandingWork {
- name := state.title
- if name == "" {
- name = fmt.Sprintf("!NO NAME(token: %s)", token)
- }
- fmt.Fprintf(&b, "\t%s: %.2f\n", name, state.percent)
- }
- b.WriteString("#### completed work:\n")
- for name, count := range s.completedWork {
- fmt.Fprintf(&b, "\t%s: %d\n", name, count)
- }
- return b.String()
-}
-
-// A condition is satisfied when all expectations are simultaneously
-// met. At that point, the 'met' channel is closed. On any failure, err is set
-// and the failed channel is closed.
-type condition struct {
- expectations []Expectation
- verdict chan Verdict
-}
-
-// NewEnv creates a new test environment using the given scratch environment
-// and gopls server.
-func NewEnv(ctx context.Context, tb testing.TB, sandbox *fake.Sandbox, ts servertest.Connector, editorConfig fake.EditorConfig, withHooks bool) *Env {
- tb.Helper()
- conn := ts.Connect(ctx)
- env := &Env{
- T: tb,
- Ctx: ctx,
- Sandbox: sandbox,
- Server: ts,
- state: State{
- diagnostics: make(map[string]*protocol.PublishDiagnosticsParams),
- outstandingWork: make(map[protocol.ProgressToken]*workProgress),
- startedWork: make(map[string]uint64),
- completedWork: make(map[string]uint64),
- },
- waiters: make(map[int]*condition),
- }
- var hooks fake.ClientHooks
- if withHooks {
- hooks = fake.ClientHooks{
- OnDiagnostics: env.onDiagnostics,
- OnLogMessage: env.onLogMessage,
- OnWorkDoneProgressCreate: env.onWorkDoneProgressCreate,
- OnProgress: env.onProgress,
- OnShowMessage: env.onShowMessage,
- OnShowMessageRequest: env.onShowMessageRequest,
- OnRegistration: env.onRegistration,
- OnUnregistration: env.onUnregistration,
- }
- }
- editor, err := fake.NewEditor(sandbox, editorConfig).Connect(ctx, conn, hooks)
- if err != nil {
- tb.Fatal(err)
- }
- env.Editor = editor
- return env
-}
-
-func (e *Env) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- pth := e.Sandbox.Workdir.URIToPath(d.URI)
- e.state.diagnostics[pth] = d
- e.checkConditionsLocked()
- return nil
-}
-
-func (e *Env) onShowMessage(_ context.Context, m *protocol.ShowMessageParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- e.state.showMessage = append(e.state.showMessage, m)
- e.checkConditionsLocked()
- return nil
-}
-
-func (e *Env) onShowMessageRequest(_ context.Context, m *protocol.ShowMessageRequestParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- e.state.showMessageRequest = append(e.state.showMessageRequest, m)
- e.checkConditionsLocked()
- return nil
-}
-
-func (e *Env) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- e.state.logs = append(e.state.logs, m)
- e.checkConditionsLocked()
- return nil
-}
-
-func (e *Env) onWorkDoneProgressCreate(_ context.Context, m *protocol.WorkDoneProgressCreateParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- e.state.outstandingWork[m.Token] = &workProgress{}
- return nil
-}
-
-func (e *Env) onProgress(_ context.Context, m *protocol.ProgressParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
- work, ok := e.state.outstandingWork[m.Token]
- if !ok {
- panic(fmt.Sprintf("got progress report for unknown report %v: %v", m.Token, m))
- }
- v := m.Value.(map[string]interface{})
- switch kind := v["kind"]; kind {
- case "begin":
- work.title = v["title"].(string)
- e.state.startedWork[work.title] = e.state.startedWork[work.title] + 1
- if msg, ok := v["message"]; ok {
- work.msg = msg.(string)
- }
- case "report":
- if pct, ok := v["percentage"]; ok {
- work.percent = pct.(float64)
- }
- if msg, ok := v["message"]; ok {
- work.msg = msg.(string)
- }
- case "end":
- title := e.state.outstandingWork[m.Token].title
- e.state.completedWork[title] = e.state.completedWork[title] + 1
- delete(e.state.outstandingWork, m.Token)
- }
- e.checkConditionsLocked()
- return nil
-}
-
-func (e *Env) onRegistration(_ context.Context, m *protocol.RegistrationParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- e.state.registrations = append(e.state.registrations, m)
- e.checkConditionsLocked()
- return nil
-}
-
-func (e *Env) onUnregistration(_ context.Context, m *protocol.UnregistrationParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- e.state.unregistrations = append(e.state.unregistrations, m)
- e.checkConditionsLocked()
- return nil
-}
-
-func (e *Env) checkConditionsLocked() {
- for id, condition := range e.waiters {
- if v, _ := checkExpectations(e.state, condition.expectations); v != Unmet {
- delete(e.waiters, id)
- condition.verdict <- v
- }
- }
-}
-
-// checkExpectations reports whether s meets all expectations.
-func checkExpectations(s State, expectations []Expectation) (Verdict, string) {
- finalVerdict := Met
- var summary strings.Builder
- for _, e := range expectations {
- v := e.Check(s)
- if v > finalVerdict {
- finalVerdict = v
- }
- summary.WriteString(fmt.Sprintf("\t%v: %s\n", v, e.Description()))
- }
- return finalVerdict, summary.String()
-}
-
-// DiagnosticsFor returns the current diagnostics for the file. It is useful
-// after waiting on AnyDiagnosticAtCurrentVersion, when the desired diagnostic
-// is not simply described by DiagnosticAt.
-func (e *Env) DiagnosticsFor(name string) *protocol.PublishDiagnosticsParams {
- e.mu.Lock()
- defer e.mu.Unlock()
- return e.state.diagnostics[name]
-}
-
-// Await waits for all expectations to simultaneously be met. It should only be
-// called from the main test goroutine.
-func (e *Env) Await(expectations ...Expectation) {
- e.T.Helper()
- e.mu.Lock()
- // Before adding the waiter, we check if the condition is currently met or
- // failed to avoid a race where the condition was realized before Await was
- // called.
- switch verdict, summary := checkExpectations(e.state, expectations); verdict {
- case Met:
- e.mu.Unlock()
- return
- case Unmeetable:
- failure := fmt.Sprintf("unmeetable expectations:\n%s\nstate:\n%v", summary, e.state)
- e.mu.Unlock()
- e.T.Fatal(failure)
- }
- cond := &condition{
- expectations: expectations,
- verdict: make(chan Verdict),
- }
- e.waiters[e.nextWaiterID] = cond
- e.nextWaiterID++
- e.mu.Unlock()
-
- var err error
- select {
- case <-e.Ctx.Done():
- err = e.Ctx.Err()
- case v := <-cond.verdict:
- if v != Met {
- err = fmt.Errorf("condition has final verdict %v", v)
- }
- }
- e.mu.Lock()
- defer e.mu.Unlock()
- _, summary := checkExpectations(e.state, expectations)
-
- // Debugging an unmet expectation can be tricky, so we put some effort into
- // nicely formatting the failure.
- if err != nil {
- e.T.Fatalf("waiting on:\n%s\nerr:%v\n\nstate:\n%v", summary, err, e.state)
- }
-}
diff --git a/internal/lsp/regtest/env_test.go b/internal/lsp/regtest/env_test.go
deleted file mode 100644
index fe5864ca7..000000000
--- a/internal/lsp/regtest/env_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package regtest
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-func TestProgressUpdating(t *testing.T) {
- e := &Env{
- state: State{
- outstandingWork: make(map[protocol.ProgressToken]*workProgress),
- startedWork: make(map[string]uint64),
- completedWork: make(map[string]uint64),
- },
- }
- ctx := context.Background()
- if err := e.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
- Token: "foo",
- }); err != nil {
- t.Fatal(err)
- }
- if err := e.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
- Token: "bar",
- }); err != nil {
- t.Fatal(err)
- }
- updates := []struct {
- token string
- value interface{}
- }{
- {"foo", protocol.WorkDoneProgressBegin{Kind: "begin", Title: "foo work"}},
- {"bar", protocol.WorkDoneProgressBegin{Kind: "begin", Title: "bar work"}},
- {"foo", protocol.WorkDoneProgressEnd{Kind: "end"}},
- {"bar", protocol.WorkDoneProgressReport{Kind: "report", Percentage: 42}},
- }
- for _, update := range updates {
- params := &protocol.ProgressParams{
- Token: update.token,
- Value: update.value,
- }
- data, err := json.Marshal(params)
- if err != nil {
- t.Fatal(err)
- }
- var unmarshaled protocol.ProgressParams
- if err := json.Unmarshal(data, &unmarshaled); err != nil {
- t.Fatal(err)
- }
- if err := e.onProgress(ctx, &unmarshaled); err != nil {
- t.Fatal(err)
- }
- }
- if _, ok := e.state.outstandingWork["foo"]; ok {
- t.Error("got work entry for \"foo\", want none")
- }
- got := *e.state.outstandingWork["bar"]
- want := workProgress{title: "bar work", percent: 42}
- if got != want {
- t.Errorf("work progress for \"bar\": %v, want %v", got, want)
- }
-}
diff --git a/internal/lsp/regtest/expectation.go b/internal/lsp/regtest/expectation.go
deleted file mode 100644
index 5cf2b6c15..000000000
--- a/internal/lsp/regtest/expectation.go
+++ /dev/null
@@ -1,668 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package regtest
-
-import (
- "fmt"
- "regexp"
- "strings"
-
- "golang.org/x/tools/internal/lsp"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/testenv"
-)
-
-// An Expectation asserts that the state of the editor at a point in time
-// matches an expected condition. This is used for signaling in tests when
-// certain conditions in the editor are met.
-type Expectation interface {
- // Check determines whether the state of the editor satisfies the
- // expectation, returning the results that met the condition.
- Check(State) Verdict
- // Description is a human-readable description of the expectation.
- Description() string
-}
-
-var (
- // InitialWorkspaceLoad is an expectation that the workspace initial load has
- // completed. It is verified via workdone reporting.
- InitialWorkspaceLoad = CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromInitialWorkspaceLoad), 1, false)
-)
-
-// A Verdict is the result of checking an expectation against the current
-// editor state.
-type Verdict int
-
-// Order matters for the following constants: verdicts are sorted in order of
-// decisiveness.
-const (
- // Met indicates that an expectation is satisfied by the current state.
- Met Verdict = iota
- // Unmet indicates that an expectation is not currently met, but could be met
- // in the future.
- Unmet
- // Unmeetable indicates that an expectation cannot be satisfied in the
- // future.
- Unmeetable
-)
-
-func (v Verdict) String() string {
- switch v {
- case Met:
- return "Met"
- case Unmet:
- return "Unmet"
- case Unmeetable:
- return "Unmeetable"
- }
- return fmt.Sprintf("unrecognized verdict %d", v)
-}
-
-// SimpleExpectation holds an arbitrary check func, and implements the Expectation interface.
-type SimpleExpectation struct {
- check func(State) Verdict
- description string
-}
-
-// Check invokes e.check.
-func (e SimpleExpectation) Check(s State) Verdict {
- return e.check(s)
-}
-
-// Description returns e.descriptin.
-func (e SimpleExpectation) Description() string {
- return e.description
-}
-
-// OnceMet returns an Expectation that, once the precondition is met, asserts
-// that mustMeet is met.
-func OnceMet(precondition Expectation, mustMeets ...Expectation) *SimpleExpectation {
- check := func(s State) Verdict {
- switch pre := precondition.Check(s); pre {
- case Unmeetable:
- return Unmeetable
- case Met:
- for _, mustMeet := range mustMeets {
- verdict := mustMeet.Check(s)
- if verdict != Met {
- return Unmeetable
- }
- }
- return Met
- default:
- return Unmet
- }
- }
- var descriptions []string
- for _, mustMeet := range mustMeets {
- descriptions = append(descriptions, mustMeet.Description())
- }
- return &SimpleExpectation{
- check: check,
- description: fmt.Sprintf("once %q is met, must have %q", precondition.Description(), strings.Join(descriptions, "\n")),
- }
-}
-
-// ReadDiagnostics is an 'expectation' that is used to read diagnostics
-// atomically. It is intended to be used with 'OnceMet'.
-func ReadDiagnostics(fileName string, into *protocol.PublishDiagnosticsParams) *SimpleExpectation {
- check := func(s State) Verdict {
- diags, ok := s.diagnostics[fileName]
- if !ok {
- return Unmeetable
- }
- *into = *diags
- return Met
- }
- return &SimpleExpectation{
- check: check,
- description: fmt.Sprintf("read diagnostics for %q", fileName),
- }
-}
-
-// NoOutstandingWork asserts that there is no work initiated using the LSP
-// $/progress API that has not completed.
-func NoOutstandingWork() SimpleExpectation {
- check := func(s State) Verdict {
- if len(s.outstandingWork) == 0 {
- return Met
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: "no outstanding work",
- }
-}
-
-// NoShowMessage asserts that the editor has not received a ShowMessage.
-func NoShowMessage() SimpleExpectation {
- check := func(s State) Verdict {
- if len(s.showMessage) == 0 {
- return Met
- }
- return Unmeetable
- }
- return SimpleExpectation{
- check: check,
- description: "no ShowMessage received",
- }
-}
-
-// ShownMessage asserts that the editor has received a ShownMessage with the
-// given title.
-func ShownMessage(title string) SimpleExpectation {
- check := func(s State) Verdict {
- for _, m := range s.showMessage {
- if strings.Contains(m.Message, title) {
- return Met
- }
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: "received ShowMessage",
- }
-}
-
-// ShowMessageRequest asserts that the editor has received a ShowMessageRequest
-// with an action item that has the given title.
-func ShowMessageRequest(title string) SimpleExpectation {
- check := func(s State) Verdict {
- if len(s.showMessageRequest) == 0 {
- return Unmet
- }
- // Only check the most recent one.
- m := s.showMessageRequest[len(s.showMessageRequest)-1]
- if len(m.Actions) == 0 || len(m.Actions) > 1 {
- return Unmet
- }
- if m.Actions[0].Title == title {
- return Met
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: "received ShowMessageRequest",
- }
-}
-
-// DoneWithOpen expects all didOpen notifications currently sent by the editor
-// to be completely processed.
-func (e *Env) DoneWithOpen() Expectation {
- opens := e.Editor.Stats().DidOpen
- return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidOpen), opens, true)
-}
-
-// StartedChange expects there to have been i work items started for
-// processing didChange notifications.
-func StartedChange(i uint64) Expectation {
- return StartedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), i)
-}
-
-// DoneWithChange expects all didChange notifications currently sent by the
-// editor to be completely processed.
-func (e *Env) DoneWithChange() Expectation {
- changes := e.Editor.Stats().DidChange
- return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), changes, true)
-}
-
-// DoneWithSave expects all didSave notifications currently sent by the editor
-// to be completely processed.
-func (e *Env) DoneWithSave() Expectation {
- saves := e.Editor.Stats().DidSave
- return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidSave), saves, true)
-}
-
-// DoneWithChangeWatchedFiles expects all didChangeWatchedFiles notifications
-// currently sent by the editor to be completely processed.
-func (e *Env) DoneWithChangeWatchedFiles() Expectation {
- changes := e.Editor.Stats().DidChangeWatchedFiles
- return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChangeWatchedFiles), changes, true)
-}
-
-// DoneWithClose expects all didClose notifications currently sent by the
-// editor to be completely processed.
-func (e *Env) DoneWithClose() Expectation {
- changes := e.Editor.Stats().DidClose
- return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidClose), changes, true)
-}
-
-// StartedWork expect a work item to have been started >= atLeast times.
-//
-// See CompletedWork.
-func StartedWork(title string, atLeast uint64) SimpleExpectation {
- check := func(s State) Verdict {
- if s.startedWork[title] >= atLeast {
- return Met
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: fmt.Sprintf("started work %q at least %d time(s)", title, atLeast),
- }
-}
-
-// CompletedWork expects a work item to have been completed >= atLeast times.
-//
-// Since the Progress API doesn't include any hidden metadata, we must use the
-// progress notification title to identify the work we expect to be completed.
-func CompletedWork(title string, count uint64, atLeast bool) SimpleExpectation {
- check := func(s State) Verdict {
- if s.completedWork[title] == count || atLeast && s.completedWork[title] > count {
- return Met
- }
- return Unmet
- }
- desc := fmt.Sprintf("completed work %q %v times", title, count)
- if atLeast {
- desc = fmt.Sprintf("completed work %q at least %d time(s)", title, count)
- }
- return SimpleExpectation{
- check: check,
- description: desc,
- }
-}
-
-// OutstandingWork expects a work item to be outstanding. The given title must
-// be an exact match, whereas the given msg must only be contained in the work
-// item's message.
-func OutstandingWork(title, msg string) SimpleExpectation {
- check := func(s State) Verdict {
- for _, work := range s.outstandingWork {
- if work.title == title && strings.Contains(work.msg, msg) {
- return Met
- }
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: fmt.Sprintf("outstanding work: %s", title),
- }
-}
-
-// LogExpectation is an expectation on the log messages received by the editor
-// from gopls.
-type LogExpectation struct {
- check func([]*protocol.LogMessageParams) Verdict
- description string
-}
-
-// Check implements the Expectation interface.
-func (e LogExpectation) Check(s State) Verdict {
- return e.check(s.logs)
-}
-
-// Description implements the Expectation interface.
-func (e LogExpectation) Description() string {
- return e.description
-}
-
-// NoErrorLogs asserts that the client has not received any log messages of
-// error severity.
-func NoErrorLogs() LogExpectation {
- return NoLogMatching(protocol.Error, "")
-}
-
-// LogMatching asserts that the client has received a log message
-// of type typ matching the regexp re.
-func LogMatching(typ protocol.MessageType, re string, count int, atLeast bool) LogExpectation {
- rec, err := regexp.Compile(re)
- if err != nil {
- panic(err)
- }
- check := func(msgs []*protocol.LogMessageParams) Verdict {
- var found int
- for _, msg := range msgs {
- if msg.Type == typ && rec.Match([]byte(msg.Message)) {
- found++
- }
- }
- // Check for an exact or "at least" match.
- if found == count || (found >= count && atLeast) {
- return Met
- }
- return Unmet
- }
- desc := fmt.Sprintf("log message matching %q expected %v times", re, count)
- if atLeast {
- desc = fmt.Sprintf("log message matching %q expected at least %v times", re, count)
- }
- return LogExpectation{
- check: check,
- description: desc,
- }
-}
-
-// NoLogMatching asserts that the client has not received a log message
-// of type typ matching the regexp re. If re is an empty string, any log
-// message is considered a match.
-func NoLogMatching(typ protocol.MessageType, re string) LogExpectation {
- var r *regexp.Regexp
- if re != "" {
- var err error
- r, err = regexp.Compile(re)
- if err != nil {
- panic(err)
- }
- }
- check := func(msgs []*protocol.LogMessageParams) Verdict {
- for _, msg := range msgs {
- if msg.Type != typ {
- continue
- }
- if r == nil || r.Match([]byte(msg.Message)) {
- return Unmeetable
- }
- }
- return Met
- }
- return LogExpectation{
- check: check,
- description: fmt.Sprintf("no log message matching %q", re),
- }
-}
-
-// RegistrationExpectation is an expectation on the capability registrations
-// received by the editor from gopls.
-type RegistrationExpectation struct {
- check func([]*protocol.RegistrationParams) Verdict
- description string
-}
-
-// Check implements the Expectation interface.
-func (e RegistrationExpectation) Check(s State) Verdict {
- return e.check(s.registrations)
-}
-
-// Description implements the Expectation interface.
-func (e RegistrationExpectation) Description() string {
- return e.description
-}
-
-// RegistrationMatching asserts that the client has received a capability
-// registration matching the given regexp.
-func RegistrationMatching(re string) RegistrationExpectation {
- rec, err := regexp.Compile(re)
- if err != nil {
- panic(err)
- }
- check := func(params []*protocol.RegistrationParams) Verdict {
- for _, p := range params {
- for _, r := range p.Registrations {
- if rec.Match([]byte(r.Method)) {
- return Met
- }
- }
- }
- return Unmet
- }
- return RegistrationExpectation{
- check: check,
- description: fmt.Sprintf("registration matching %q", re),
- }
-}
-
-// UnregistrationExpectation is an expectation on the capability
-// unregistrations received by the editor from gopls.
-type UnregistrationExpectation struct {
- check func([]*protocol.UnregistrationParams) Verdict
- description string
-}
-
-// Check implements the Expectation interface.
-func (e UnregistrationExpectation) Check(s State) Verdict {
- return e.check(s.unregistrations)
-}
-
-// Description implements the Expectation interface.
-func (e UnregistrationExpectation) Description() string {
- return e.description
-}
-
-// UnregistrationMatching asserts that the client has received an
-// unregistration whose ID matches the given regexp.
-func UnregistrationMatching(re string) UnregistrationExpectation {
- rec, err := regexp.Compile(re)
- if err != nil {
- panic(err)
- }
- check := func(params []*protocol.UnregistrationParams) Verdict {
- for _, p := range params {
- for _, r := range p.Unregisterations {
- if rec.Match([]byte(r.Method)) {
- return Met
- }
- }
- }
- return Unmet
- }
- return UnregistrationExpectation{
- check: check,
- description: fmt.Sprintf("unregistration matching %q", re),
- }
-}
-
-// A DiagnosticExpectation is a condition that must be met by the current set
-// of diagnostics for a file.
-type DiagnosticExpectation struct {
- // optionally, the position of the diagnostic and the regex used to calculate it.
- pos *fake.Pos
- re string
-
- // optionally, the message that the diagnostic should contain.
- message string
-
- // whether the expectation is that the diagnostic is present, or absent.
- present bool
-
- // path is the scratch workdir-relative path to the file being asserted on.
- path string
-}
-
-// Check implements the Expectation interface.
-func (e DiagnosticExpectation) Check(s State) Verdict {
- diags, ok := s.diagnostics[e.path]
- if !ok {
- if !e.present {
- return Met
- }
- return Unmet
- }
-
- found := false
- for _, d := range diags.Diagnostics {
- if e.pos != nil {
- if d.Range.Start.Line != uint32(e.pos.Line) || d.Range.Start.Character != uint32(e.pos.Column) {
- continue
- }
- }
- if e.message != "" {
- if !strings.Contains(d.Message, e.message) {
- continue
- }
- }
- found = true
- break
- }
-
- if found == e.present {
- return Met
- }
- return Unmet
-}
-
-// Description implements the Expectation interface.
-func (e DiagnosticExpectation) Description() string {
- desc := e.path + ":"
- if !e.present {
- desc += " no"
- }
- desc += " diagnostic"
- if e.pos != nil {
- desc += fmt.Sprintf(" at {line:%d, column:%d}", e.pos.Line, e.pos.Column)
- if e.re != "" {
- desc += fmt.Sprintf(" (location of %q)", e.re)
- }
- }
- if e.message != "" {
- desc += fmt.Sprintf(" with message %q", e.message)
- }
- return desc
-}
-
-// NoOutstandingDiagnostics asserts that the workspace has no outstanding
-// diagnostic messages.
-func NoOutstandingDiagnostics() Expectation {
- check := func(s State) Verdict {
- for _, diags := range s.diagnostics {
- if len(diags.Diagnostics) > 0 {
- return Unmet
- }
- }
- return Met
- }
- return SimpleExpectation{
- check: check,
- description: "no outstanding diagnostics",
- }
-}
-
-// EmptyDiagnostics asserts that empty diagnostics are sent for the
-// workspace-relative path name.
-func EmptyDiagnostics(name string) Expectation {
- check := func(s State) Verdict {
- if diags := s.diagnostics[name]; diags != nil && len(diags.Diagnostics) == 0 {
- return Met
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: fmt.Sprintf("empty diagnostics for %q", name),
- }
-}
-
-// EmptyOrNoDiagnostics asserts that either no diagnostics are sent for the
-// workspace-relative path name, or empty diagnostics are sent.
-// TODO(rFindley): this subtlety shouldn't be necessary. Gopls should always
-// send at least one diagnostic set for open files.
-func EmptyOrNoDiagnostics(name string) Expectation {
- check := func(s State) Verdict {
- if diags := s.diagnostics[name]; diags == nil || len(diags.Diagnostics) == 0 {
- return Met
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: fmt.Sprintf("empty or no diagnostics for %q", name),
- }
-}
-
-// NoDiagnostics asserts that no diagnostics are sent for the
-// workspace-relative path name. It should be used primarily in conjunction
-// with a OnceMet, as it has to check that all outstanding diagnostics have
-// already been delivered.
-func NoDiagnostics(name string) Expectation {
- check := func(s State) Verdict {
- if _, ok := s.diagnostics[name]; !ok {
- return Met
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: "no diagnostics",
- }
-}
-
-// AnyDiagnosticAtCurrentVersion asserts that there is a diagnostic report for
-// the current edited version of the buffer corresponding to the given
-// workdir-relative pathname.
-func (e *Env) AnyDiagnosticAtCurrentVersion(name string) Expectation {
- version := e.Editor.BufferVersion(name)
- check := func(s State) Verdict {
- diags, ok := s.diagnostics[name]
- if ok && diags.Version == int32(version) {
- return Met
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: fmt.Sprintf("any diagnostics at version %d", version),
- }
-}
-
-// DiagnosticAtRegexp expects that there is a diagnostic entry at the start
-// position matching the regexp search string re in the buffer specified by
-// name. Note that this currently ignores the end position.
-func (e *Env) DiagnosticAtRegexp(name, re string) DiagnosticExpectation {
- e.T.Helper()
- pos := e.RegexpSearch(name, re)
- return DiagnosticExpectation{path: name, pos: &pos, re: re, present: true}
-}
-
-// DiagnosticAtRegexpWithMessage is like DiagnosticAtRegexp, but it also
-// checks for the content of the diagnostic message,
-func (e *Env) DiagnosticAtRegexpWithMessage(name, re, msg string) DiagnosticExpectation {
- e.T.Helper()
- pos := e.RegexpSearch(name, re)
- return DiagnosticExpectation{path: name, pos: &pos, re: re, present: true, message: msg}
-}
-
-// DiagnosticAt asserts that there is a diagnostic entry at the position
-// specified by line and col, for the workdir-relative path name.
-func DiagnosticAt(name string, line, col int) DiagnosticExpectation {
- return DiagnosticExpectation{path: name, pos: &fake.Pos{Line: line, Column: col}, present: true}
-}
-
-// NoDiagnosticAtRegexp expects that there is no diagnostic entry at the start
-// position matching the regexp search string re in the buffer specified by
-// name. Note that this currently ignores the end position.
-// This should only be used in combination with OnceMet for a given condition,
-// otherwise it may always succeed.
-func (e *Env) NoDiagnosticAtRegexp(name, re string) DiagnosticExpectation {
- e.T.Helper()
- pos := e.RegexpSearch(name, re)
- return DiagnosticExpectation{path: name, pos: &pos, re: re, present: false}
-}
-
-// NoDiagnosticAt asserts that there is no diagnostic entry at the position
-// specified by line and col, for the workdir-relative path name.
-// This should only be used in combination with OnceMet for a given condition,
-// otherwise it may always succeed.
-func NoDiagnosticAt(name string, line, col int) DiagnosticExpectation {
- return DiagnosticExpectation{path: name, pos: &fake.Pos{Line: line, Column: col}, present: false}
-}
-
-// NoDiagnosticWithMessage asserts that there is no diagnostic entry with the
-// given message.
-//
-// This should only be used in combination with OnceMet for a given condition,
-// otherwise it may always succeed.
-func NoDiagnosticWithMessage(name, msg string) DiagnosticExpectation {
- return DiagnosticExpectation{path: name, message: msg, present: false}
-}
-
-// GoSumDiagnostic asserts that a "go.sum is out of sync" diagnostic for the
-// given module (as formatted in a go.mod file, e.g. "example.com v1.0.0") is
-// present.
-func (e *Env) GoSumDiagnostic(name, module string) Expectation {
- e.T.Helper()
- // In 1.16, go.sum diagnostics should appear on the relevant module. Earlier
- // errors have no information and appear on the module declaration.
- if testenv.Go1Point() >= 16 {
- return e.DiagnosticAtRegexpWithMessage(name, module, "go.sum is out of sync")
- } else {
- return e.DiagnosticAtRegexpWithMessage(name, `module`, "go.sum is out of sync")
- }
-}
diff --git a/internal/lsp/regtest/regtest.go b/internal/lsp/regtest/regtest.go
deleted file mode 100644
index 31806233c..000000000
--- a/internal/lsp/regtest/regtest.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package regtest
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "runtime"
- "testing"
- "time"
-
- "golang.org/x/tools/internal/lsp/cmd"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/internal/tool"
-)
-
-var (
- runSubprocessTests = flag.Bool("enable_gopls_subprocess_tests", false, "run regtests against a gopls subprocess")
- goplsBinaryPath = flag.String("gopls_test_binary", "", "path to the gopls binary for use as a remote, for use with the -enable_gopls_subprocess_tests flag")
- regtestTimeout = flag.Duration("regtest_timeout", defaultRegtestTimeout(), "if nonzero, default timeout for each regtest; defaults to GOPLS_REGTEST_TIMEOUT")
- skipCleanup = flag.Bool("regtest_skip_cleanup", false, "whether to skip cleaning up temp directories")
- printGoroutinesOnFailure = flag.Bool("regtest_print_goroutines", false, "whether to print goroutines info on failure")
-)
-
-func defaultRegtestTimeout() time.Duration {
- s := os.Getenv("GOPLS_REGTEST_TIMEOUT")
- if s == "" {
- return 0
- }
- d, err := time.ParseDuration(s)
- if err != nil {
- fmt.Fprintf(os.Stderr, "invalid GOPLS_REGTEST_TIMEOUT %q: %v\n", s, err)
- os.Exit(2)
- }
- return d
-}
-
-var runner *Runner
-
-type regtestRunner interface {
- Run(t *testing.T, files string, f TestFunc)
-}
-
-func Run(t *testing.T, files string, f TestFunc) {
- runner.Run(t, files, f)
-}
-
-func WithOptions(opts ...RunOption) configuredRunner {
- return configuredRunner{opts: opts}
-}
-
-type configuredRunner struct {
- opts []RunOption
-}
-
-func (r configuredRunner) Run(t *testing.T, files string, f TestFunc) {
- runner.Run(t, files, f, r.opts...)
-}
-
-type RunMultiple []struct {
- Name string
- Runner regtestRunner
-}
-
-func (r RunMultiple) Run(t *testing.T, files string, f TestFunc) {
- for _, runner := range r {
- t.Run(runner.Name, func(t *testing.T) {
- runner.Runner.Run(t, files, f)
- })
- }
-}
-
-// The regtests run significantly slower on these operating systems, due to (we
-// believe) kernel locking behavior. Only run in singleton mode on these
-// operating system when using -short.
-var slowGOOS = map[string]bool{
- "darwin": true,
- "openbsd": true,
- "plan9": true,
-}
-
-func DefaultModes() Mode {
- normal := Singleton | Experimental
- if slowGOOS[runtime.GOOS] && testing.Short() {
- normal = Singleton
- }
- if *runSubprocessTests {
- return normal | SeparateProcess
- }
- return normal
-}
-
-// Main sets up and tears down the shared regtest state.
-func Main(m *testing.M, hook func(*source.Options)) {
- testenv.ExitIfSmallMachine()
-
- // Disable GOPACKAGESDRIVER, as it can cause spurious test failures.
- os.Setenv("GOPACKAGESDRIVER", "off")
-
- flag.Parse()
- if os.Getenv("_GOPLS_TEST_BINARY_RUN_AS_GOPLS") == "true" {
- tool.Main(context.Background(), cmd.New("gopls", "", nil, nil), os.Args[1:])
- os.Exit(0)
- }
-
- runner = &Runner{
- DefaultModes: DefaultModes(),
- Timeout: *regtestTimeout,
- PrintGoroutinesOnFailure: *printGoroutinesOnFailure,
- SkipCleanup: *skipCleanup,
- OptionsHook: hook,
- }
- if *runSubprocessTests {
- goplsPath := *goplsBinaryPath
- if goplsPath == "" {
- var err error
- goplsPath, err = os.Executable()
- if err != nil {
- panic(fmt.Sprintf("finding test binary path: %v", err))
- }
- }
- runner.GoplsPath = goplsPath
- }
- dir, err := ioutil.TempDir("", "gopls-regtest-")
- if err != nil {
- panic(fmt.Errorf("creating regtest temp directory: %v", err))
- }
- runner.TempDir = dir
-
- code := m.Run()
- if err := runner.Close(); err != nil {
- fmt.Fprintf(os.Stderr, "closing test runner: %v\n", err)
- // Regtest cleanup is broken in go1.12 and earlier, and sometimes flakes on
- // Windows due to file locking, but this is OK for our CI.
- //
- // Fail on go1.13+, except for windows and android which have shutdown problems.
- if testenv.Go1Point() >= 13 && runtime.GOOS != "windows" && runtime.GOOS != "android" {
- os.Exit(1)
- }
- }
- os.Exit(code)
-}
diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go
deleted file mode 100644
index 822a5a315..000000000
--- a/internal/lsp/regtest/runner.go
+++ /dev/null
@@ -1,533 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package regtest
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "os"
- "path/filepath"
- "runtime/pprof"
- "strings"
- "sync"
- "testing"
- "time"
-
- exec "golang.org/x/sys/execabs"
-
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/jsonrpc2/servertest"
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/lsprpc"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/internal/xcontext"
-)
-
-// Mode is a bitmask that defines for which execution modes a test should run.
-type Mode int
-
-const (
- // Singleton mode uses a separate in-process gopls instance for each test,
- // and communicates over pipes to mimic the gopls sidecar execution mode,
- // which communicates over stdin/stderr.
- Singleton Mode = 1 << iota
- // Forwarded forwards connections to a shared in-process gopls instance.
- Forwarded
- // SeparateProcess forwards connection to a shared separate gopls process.
- SeparateProcess
- // Experimental enables all of the experimental configurations that are
- // being developed.
- Experimental
-)
-
-// A Runner runs tests in gopls execution environments, as specified by its
-// modes. For modes that share state (for example, a shared cache or common
-// remote), any tests that execute on the same Runner will share the same
-// state.
-type Runner struct {
- DefaultModes Mode
- Timeout time.Duration
- GoplsPath string
- PrintGoroutinesOnFailure bool
- TempDir string
- SkipCleanup bool
- OptionsHook func(*source.Options)
-
- mu sync.Mutex
- ts *servertest.TCPServer
- socketDir string
- // closers is a queue of clean-up functions to run at the end of the entire
- // test suite.
- closers []io.Closer
-}
-
-type runConfig struct {
- editor fake.EditorConfig
- sandbox fake.SandboxConfig
- modes Mode
- noDefaultTimeout bool
- debugAddr string
- skipLogs bool
- skipHooks bool
- optionsHook func(*source.Options)
-}
-
-func (r *Runner) defaultConfig() *runConfig {
- return &runConfig{
- modes: r.DefaultModes,
- optionsHook: r.OptionsHook,
- }
-}
-
-// A RunOption augments the behavior of the test runner.
-type RunOption interface {
- set(*runConfig)
-}
-
-type optionSetter func(*runConfig)
-
-func (f optionSetter) set(opts *runConfig) {
- f(opts)
-}
-
-// NoDefaultTimeout removes the timeout set by the -regtest_timeout flag, for
-// individual tests that are expected to run longer than is reasonable for
-// ordinary regression tests.
-func NoDefaultTimeout() RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.noDefaultTimeout = true
- })
-}
-
-// ProxyFiles configures a file proxy using the given txtar-encoded string.
-func ProxyFiles(txt string) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.sandbox.ProxyFiles = fake.UnpackTxt(txt)
- })
-}
-
-// Modes configures the execution modes that the test should run in.
-func Modes(modes Mode) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.modes = modes
- })
-}
-
-// Options configures the various server and user options.
-func Options(hook func(*source.Options)) RunOption {
- return optionSetter(func(opts *runConfig) {
- old := opts.optionsHook
- opts.optionsHook = func(o *source.Options) {
- if old != nil {
- old(o)
- }
- hook(o)
- }
- })
-}
-
-func SendPID() RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.editor.SendPID = true
- })
-}
-
-// EditorConfig is a RunOption option that configured the regtest editor.
-type EditorConfig fake.EditorConfig
-
-func (c EditorConfig) set(opts *runConfig) {
- opts.editor = fake.EditorConfig(c)
-}
-
-// WorkspaceFolders configures the workdir-relative workspace folders to send
-// to the LSP server. By default the editor sends a single workspace folder
-// corresponding to the workdir root. To explicitly configure no workspace
-// folders, use WorkspaceFolders with no arguments.
-func WorkspaceFolders(relFolders ...string) RunOption {
- if len(relFolders) == 0 {
- // Use an empty non-nil slice to signal explicitly no folders.
- relFolders = []string{}
- }
- return optionSetter(func(opts *runConfig) {
- opts.editor.WorkspaceFolders = relFolders
- })
-}
-
-// InGOPATH configures the workspace working directory to be GOPATH, rather
-// than a separate working directory for use with modules.
-func InGOPATH() RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.sandbox.InGoPath = true
- })
-}
-
-// DebugAddress configures a debug server bound to addr. This option is
-// currently only supported when executing in Singleton mode. It is intended to
-// be used for long-running stress tests.
-func DebugAddress(addr string) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.debugAddr = addr
- })
-}
-
-// SkipLogs skips the buffering of logs during test execution. It is intended
-// for long-running stress tests.
-func SkipLogs() RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.skipLogs = true
- })
-}
-
-// InExistingDir runs the test in a pre-existing directory. If set, no initial
-// files may be passed to the runner. It is intended for long-running stress
-// tests.
-func InExistingDir(dir string) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.sandbox.Workdir = dir
- })
-}
-
-// SkipHooks allows for disabling the test runner's client hooks that are used
-// for instrumenting expectations (tracking diagnostics, logs, work done,
-// etc.). It is intended for performance-sensitive stress tests or benchmarks.
-func SkipHooks(skip bool) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.skipHooks = skip
- })
-}
-
-// GOPROXY configures the test environment to have an explicit proxy value.
-// This is intended for stress tests -- to ensure their isolation, regtests
-// should instead use WithProxyFiles.
-func GOPROXY(goproxy string) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.sandbox.GOPROXY = goproxy
- })
-}
-
-// LimitWorkspaceScope sets the LimitWorkspaceScope configuration.
-func LimitWorkspaceScope() RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.editor.LimitWorkspaceScope = true
- })
-}
-
-type TestFunc func(t *testing.T, env *Env)
-
-// Run executes the test function in the default configured gopls execution
-// modes. For each a test run, a new workspace is created containing the
-// un-txtared files specified by filedata.
-func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOption) {
- t.Helper()
- checkBuilder(t)
-
- tests := []struct {
- name string
- mode Mode
- getServer func(context.Context, *testing.T, func(*source.Options)) jsonrpc2.StreamServer
- }{
- {"singleton", Singleton, singletonServer},
- {"forwarded", Forwarded, r.forwardedServer},
- {"separate_process", SeparateProcess, r.separateProcessServer},
- {"experimental", Experimental, experimentalServer},
- }
-
- for _, tc := range tests {
- tc := tc
- config := r.defaultConfig()
- for _, opt := range opts {
- opt.set(config)
- }
- if config.modes&tc.mode == 0 {
- continue
- }
- if config.debugAddr != "" && tc.mode != Singleton {
- // Debugging is useful for running stress tests, but since the daemon has
- // likely already been started, it would be too late to debug.
- t.Fatalf("debugging regtest servers only works in Singleton mode, "+
- "got debug addr %q and mode %v", config.debugAddr, tc.mode)
- }
-
- t.Run(tc.name, func(t *testing.T) {
- ctx := context.Background()
- if r.Timeout != 0 && !config.noDefaultTimeout {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, r.Timeout)
- defer cancel()
- } else if d, ok := testenv.Deadline(t); ok {
- timeout := time.Until(d) * 19 / 20 // Leave an arbitrary 5% for cleanup.
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, timeout)
- defer cancel()
- }
-
- ctx = debug.WithInstance(ctx, "", "off")
- if config.debugAddr != "" {
- di := debug.GetInstance(ctx)
- di.Serve(ctx, config.debugAddr)
- di.MonitorMemory(ctx)
- }
-
- rootDir := filepath.Join(r.TempDir, filepath.FromSlash(t.Name()))
- if err := os.MkdirAll(rootDir, 0755); err != nil {
- t.Fatal(err)
- }
- files := fake.UnpackTxt(files)
- if config.editor.WindowsLineEndings {
- for name, data := range files {
- files[name] = bytes.ReplaceAll(data, []byte("\n"), []byte("\r\n"))
- }
- }
- config.sandbox.Files = files
- config.sandbox.RootDir = rootDir
- sandbox, err := fake.NewSandbox(&config.sandbox)
- if err != nil {
- t.Fatal(err)
- }
- // Deferring the closure of ws until the end of the entire test suite
- // has, in testing, given the LSP server time to properly shutdown and
- // release any file locks held in workspace, which is a problem on
- // Windows. This may still be flaky however, and in the future we need a
- // better solution to ensure that all Go processes started by gopls have
- // exited before we clean up.
- r.AddCloser(sandbox)
- ss := tc.getServer(ctx, t, config.optionsHook)
- framer := jsonrpc2.NewRawStream
- ls := &loggingFramer{}
- if !config.skipLogs {
- framer = ls.framer(jsonrpc2.NewRawStream)
- }
- ts := servertest.NewPipeServer(ctx, ss, framer)
- env := NewEnv(ctx, t, sandbox, ts, config.editor, !config.skipHooks)
- defer func() {
- if t.Failed() && r.PrintGoroutinesOnFailure {
- pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
- }
- if t.Failed() || testing.Verbose() {
- ls.printBuffers(t.Name(), os.Stderr)
- }
- // For tests that failed due to a timeout, don't fail to shutdown
- // because ctx is done.
- closeCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Second)
- defer cancel()
- if err := env.Editor.Close(closeCtx); err != nil {
- t.Errorf("closing editor: %v", err)
- }
- }()
- // Always await the initial workspace load.
- env.Await(InitialWorkspaceLoad)
- test(t, env)
- })
- }
-}
-
-// longBuilders maps builders that are skipped when -short is set to a
-// (possibly empty) justification.
-var longBuilders = map[string]string{
- "openbsd-amd64-64": "golang.org/issues/42789",
- "openbsd-386-64": "golang.org/issues/42789",
- "openbsd-386-68": "golang.org/issues/42789",
- "openbsd-amd64-68": "golang.org/issues/42789",
- "darwin-amd64-10_12": "",
- "freebsd-amd64-race": "",
- "illumos-amd64": "",
- "netbsd-arm-bsiegert": "",
- "solaris-amd64-oraclerel": "",
- "windows-arm-zx2c4": "",
-}
-
-func checkBuilder(t *testing.T) {
- t.Helper()
- builder := os.Getenv("GO_BUILDER_NAME")
- if reason, ok := longBuilders[builder]; ok && testing.Short() {
- if reason != "" {
- t.Skipf("Skipping %s with -short due to %s", builder, reason)
- } else {
- t.Skipf("Skipping %s with -short", builder)
- }
- }
-}
-
-type loggingFramer struct {
- mu sync.Mutex
- buf *safeBuffer
-}
-
-// safeBuffer is a threadsafe buffer for logs.
-type safeBuffer struct {
- mu sync.Mutex
- buf bytes.Buffer
-}
-
-func (b *safeBuffer) Write(p []byte) (int, error) {
- b.mu.Lock()
- defer b.mu.Unlock()
- return b.buf.Write(p)
-}
-
-func (s *loggingFramer) framer(f jsonrpc2.Framer) jsonrpc2.Framer {
- return func(nc net.Conn) jsonrpc2.Stream {
- s.mu.Lock()
- framed := false
- if s.buf == nil {
- s.buf = &safeBuffer{buf: bytes.Buffer{}}
- framed = true
- }
- s.mu.Unlock()
- stream := f(nc)
- if framed {
- return protocol.LoggingStream(stream, s.buf)
- }
- return stream
- }
-}
-
-func (s *loggingFramer) printBuffers(testname string, w io.Writer) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.buf == nil {
- return
- }
- fmt.Fprintf(os.Stderr, "#### Start Gopls Test Logs for %q\n", testname)
- s.buf.mu.Lock()
- io.Copy(w, &s.buf.buf)
- s.buf.mu.Unlock()
- fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs for %q\n", testname)
-}
-
-func singletonServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
- return lsprpc.NewStreamServer(cache.New(optsHook), false)
-}
-
-func experimentalServer(_ context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
- options := func(o *source.Options) {
- optsHook(o)
- o.EnableAllExperiments()
- // ExperimentalWorkspaceModule is not (as of writing) enabled by
- // source.Options.EnableAllExperiments, but we want to test it.
- o.ExperimentalWorkspaceModule = true
- }
- return lsprpc.NewStreamServer(cache.New(options), false)
-}
-
-func (r *Runner) forwardedServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
- ts := r.getTestServer(optsHook)
- return newForwarder("tcp", ts.Addr)
-}
-
-// getTestServer gets the shared test server instance to connect to, or creates
-// one if it doesn't exist.
-func (r *Runner) getTestServer(optsHook func(*source.Options)) *servertest.TCPServer {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.ts == nil {
- ctx := context.Background()
- ctx = debug.WithInstance(ctx, "", "off")
- ss := lsprpc.NewStreamServer(cache.New(optsHook), false)
- r.ts = servertest.NewTCPServer(ctx, ss, nil)
- }
- return r.ts
-}
-
-func (r *Runner) separateProcessServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
- // TODO(rfindley): can we use the autostart behavior here, instead of
- // pre-starting the remote?
- socket := r.getRemoteSocket(t)
- return newForwarder("unix", socket)
-}
-
-func newForwarder(network, address string) *lsprpc.Forwarder {
- server, err := lsprpc.NewForwarder(network+";"+address, nil)
- if err != nil {
- // This should never happen, as we are passing an explicit address.
- panic(fmt.Sprintf("internal error: unable to create forwarder: %v", err))
- }
- return server
-}
-
-// runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running
-// tests. It's a trick to allow tests to find a binary to use to start a gopls
-// subprocess.
-const runTestAsGoplsEnvvar = "_GOPLS_TEST_BINARY_RUN_AS_GOPLS"
-
-func (r *Runner) getRemoteSocket(t *testing.T) string {
- t.Helper()
- r.mu.Lock()
- defer r.mu.Unlock()
- const daemonFile = "gopls-test-daemon"
- if r.socketDir != "" {
- return filepath.Join(r.socketDir, daemonFile)
- }
-
- if r.GoplsPath == "" {
- t.Fatal("cannot run tests with a separate process unless a path to a gopls binary is configured")
- }
- var err error
- r.socketDir, err = ioutil.TempDir(r.TempDir, "gopls-regtest-socket")
- if err != nil {
- t.Fatalf("creating tempdir: %v", err)
- }
- socket := filepath.Join(r.socketDir, daemonFile)
- args := []string{"serve", "-listen", "unix;" + socket, "-listen.timeout", "10s"}
- cmd := exec.Command(r.GoplsPath, args...)
- cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true")
- var stderr bytes.Buffer
- cmd.Stderr = &stderr
- go func() {
- if err := cmd.Run(); err != nil {
- panic(fmt.Sprintf("error running external gopls: %v\nstderr:\n%s", err, stderr.String()))
- }
- }()
- return socket
-}
-
-// AddCloser schedules a closer to be closed at the end of the test run. This
-// is useful for Windows in particular, as
-func (r *Runner) AddCloser(closer io.Closer) {
- r.mu.Lock()
- defer r.mu.Unlock()
- r.closers = append(r.closers, closer)
-}
-
-// Close cleans up resource that have been allocated to this workspace.
-func (r *Runner) Close() error {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- var errmsgs []string
- if r.ts != nil {
- if err := r.ts.Close(); err != nil {
- errmsgs = append(errmsgs, err.Error())
- }
- }
- if r.socketDir != "" {
- if err := os.RemoveAll(r.socketDir); err != nil {
- errmsgs = append(errmsgs, err.Error())
- }
- }
- if !r.SkipCleanup {
- for _, closer := range r.closers {
- if err := closer.Close(); err != nil {
- errmsgs = append(errmsgs, err.Error())
- }
- }
- if err := os.RemoveAll(r.TempDir); err != nil {
- errmsgs = append(errmsgs, err.Error())
- }
- }
- if len(errmsgs) > 0 {
- return fmt.Errorf("errors closing the test runner:\n\t%s", strings.Join(errmsgs, "\n\t"))
- }
- return nil
-}
diff --git a/internal/lsp/regtest/wrappers.go b/internal/lsp/regtest/wrappers.go
deleted file mode 100644
index 9031e71f1..000000000
--- a/internal/lsp/regtest/wrappers.go
+++ /dev/null
@@ -1,446 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package regtest
-
-import (
- "encoding/json"
- "path"
- "testing"
-
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/fake"
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-func (e *Env) ChangeFilesOnDisk(events []fake.FileEvent) {
- e.T.Helper()
- if err := e.Sandbox.Workdir.ChangeFilesOnDisk(e.Ctx, events); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// RemoveWorkspaceFile deletes a file on disk but does nothing in the
-// editor. It calls t.Fatal on any error.
-func (e *Env) RemoveWorkspaceFile(name string) {
- e.T.Helper()
- if err := e.Sandbox.Workdir.RemoveFile(e.Ctx, name); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// ReadWorkspaceFile reads a file from the workspace, calling t.Fatal on any
-// error.
-func (e *Env) ReadWorkspaceFile(name string) string {
- e.T.Helper()
- content, err := e.Sandbox.Workdir.ReadFile(name)
- if err != nil {
- e.T.Fatal(err)
- }
- return content
-}
-
-// WriteWorkspaceFile writes a file to disk but does nothing in the editor.
-// It calls t.Fatal on any error.
-func (e *Env) WriteWorkspaceFile(name, content string) {
- e.T.Helper()
- if err := e.Sandbox.Workdir.WriteFile(e.Ctx, name, content); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// WriteWorkspaceFiles deletes a file on disk but does nothing in the
-// editor. It calls t.Fatal on any error.
-func (e *Env) WriteWorkspaceFiles(files map[string]string) {
- e.T.Helper()
- if err := e.Sandbox.Workdir.WriteFiles(e.Ctx, files); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// OpenFile opens a file in the editor, calling t.Fatal on any error.
-func (e *Env) OpenFile(name string) {
- e.T.Helper()
- if err := e.Editor.OpenFile(e.Ctx, name); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// CreateBuffer creates a buffer in the editor, calling t.Fatal on any error.
-func (e *Env) CreateBuffer(name string, content string) {
- e.T.Helper()
- if err := e.Editor.CreateBuffer(e.Ctx, name, content); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// CloseBuffer closes an editor buffer without saving, calling t.Fatal on any
-// error.
-func (e *Env) CloseBuffer(name string) {
- e.T.Helper()
- if err := e.Editor.CloseBuffer(e.Ctx, name); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// EditBuffer applies edits to an editor buffer, calling t.Fatal on any error.
-func (e *Env) EditBuffer(name string, edits ...fake.Edit) {
- e.T.Helper()
- if err := e.Editor.EditBuffer(e.Ctx, name, edits); err != nil {
- e.T.Fatal(err)
- }
-}
-
-func (e *Env) SetBufferContent(name string, content string) {
- e.T.Helper()
- if err := e.Editor.SetBufferContent(e.Ctx, name, content); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// RegexpRange returns the range of the first match for re in the buffer
-// specified by name, calling t.Fatal on any error. It first searches for the
-// position in open buffers, then in workspace files.
-func (e *Env) RegexpRange(name, re string) (fake.Pos, fake.Pos) {
- e.T.Helper()
- start, end, err := e.Editor.RegexpRange(name, re)
- if err == fake.ErrUnknownBuffer {
- start, end, err = e.Sandbox.Workdir.RegexpRange(name, re)
- }
- if err != nil {
- e.T.Fatalf("RegexpRange: %v, %v", name, err)
- }
- return start, end
-}
-
-// RegexpSearch returns the starting position of the first match for re in the
-// buffer specified by name, calling t.Fatal on any error. It first searches
-// for the position in open buffers, then in workspace files.
-func (e *Env) RegexpSearch(name, re string) fake.Pos {
- e.T.Helper()
- pos, err := e.Editor.RegexpSearch(name, re)
- if err == fake.ErrUnknownBuffer {
- pos, err = e.Sandbox.Workdir.RegexpSearch(name, re)
- }
- if err != nil {
- e.T.Fatalf("RegexpSearch: %v, %v", name, err)
- }
- return pos
-}
-
-// RegexpReplace replaces the first group in the first match of regexpStr with
-// the replace text, calling t.Fatal on any error.
-func (e *Env) RegexpReplace(name, regexpStr, replace string) {
- e.T.Helper()
- if err := e.Editor.RegexpReplace(e.Ctx, name, regexpStr, replace); err != nil {
- e.T.Fatalf("RegexpReplace: %v", err)
- }
-}
-
-// SaveBuffer saves an editor buffer, calling t.Fatal on any error.
-func (e *Env) SaveBuffer(name string) {
- e.T.Helper()
- if err := e.Editor.SaveBuffer(e.Ctx, name); err != nil {
- e.T.Fatal(err)
- }
-}
-
-func (e *Env) SaveBufferWithoutActions(name string) {
- e.T.Helper()
- if err := e.Editor.SaveBufferWithoutActions(e.Ctx, name); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// GoToDefinition goes to definition in the editor, calling t.Fatal on any
-// error. It returns the path and position of the resulting jump.
-func (e *Env) GoToDefinition(name string, pos fake.Pos) (string, fake.Pos) {
- e.T.Helper()
- n, p, err := e.Editor.GoToDefinition(e.Ctx, name, pos)
- if err != nil {
- e.T.Fatal(err)
- }
- return n, p
-}
-
-// Symbol returns symbols matching query
-func (e *Env) Symbol(query string) []fake.SymbolInformation {
- e.T.Helper()
- r, err := e.Editor.Symbol(e.Ctx, query)
- if err != nil {
- e.T.Fatal(err)
- }
- return r
-}
-
-// FormatBuffer formats the editor buffer, calling t.Fatal on any error.
-func (e *Env) FormatBuffer(name string) {
- e.T.Helper()
- if err := e.Editor.FormatBuffer(e.Ctx, name); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// OrganizeImports processes the source.organizeImports codeAction, calling
-// t.Fatal on any error.
-func (e *Env) OrganizeImports(name string) {
- e.T.Helper()
- if err := e.Editor.OrganizeImports(e.Ctx, name); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// ApplyQuickFixes processes the quickfix codeAction, calling t.Fatal on any error.
-func (e *Env) ApplyQuickFixes(path string, diagnostics []protocol.Diagnostic) {
- e.T.Helper()
- if err := e.Editor.ApplyQuickFixes(e.Ctx, path, nil, diagnostics); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// ApplyCodeAction applies the given code action.
-func (e *Env) ApplyCodeAction(action protocol.CodeAction) {
- e.T.Helper()
- if err := e.Editor.ApplyCodeAction(e.Ctx, action); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// GetQuickFixes returns the available quick fix code actions.
-func (e *Env) GetQuickFixes(path string, diagnostics []protocol.Diagnostic) []protocol.CodeAction {
- e.T.Helper()
- actions, err := e.Editor.GetQuickFixes(e.Ctx, path, nil, diagnostics)
- if err != nil {
- e.T.Fatal(err)
- }
- return actions
-}
-
-// Hover in the editor, calling t.Fatal on any error.
-func (e *Env) Hover(name string, pos fake.Pos) (*protocol.MarkupContent, fake.Pos) {
- e.T.Helper()
- c, p, err := e.Editor.Hover(e.Ctx, name, pos)
- if err != nil {
- e.T.Fatal(err)
- }
- return c, p
-}
-
-func (e *Env) DocumentLink(name string) []protocol.DocumentLink {
- e.T.Helper()
- links, err := e.Editor.DocumentLink(e.Ctx, name)
- if err != nil {
- e.T.Fatal(err)
- }
- return links
-}
-
-func (e *Env) DocumentHighlight(name string, pos fake.Pos) []protocol.DocumentHighlight {
- e.T.Helper()
- highlights, err := e.Editor.DocumentHighlight(e.Ctx, name, pos)
- if err != nil {
- e.T.Fatal(err)
- }
- return highlights
-}
-
-// RunGenerate runs go:generate on the given dir, calling t.Fatal on any error.
-// It waits for the generate command to complete and checks for file changes
-// before returning.
-func (e *Env) RunGenerate(dir string) {
- e.T.Helper()
- if err := e.Editor.RunGenerate(e.Ctx, dir); err != nil {
- e.T.Fatal(err)
- }
- e.Await(NoOutstandingWork())
- // Ideally the fake.Workspace would handle all synthetic file watching, but
- // we help it out here as we need to wait for the generate command to
- // complete before checking the filesystem.
- e.CheckForFileChanges()
-}
-
-// RunGoCommand runs the given command in the sandbox's default working
-// directory.
-func (e *Env) RunGoCommand(verb string, args ...string) {
- e.T.Helper()
- if err := e.Sandbox.RunGoCommand(e.Ctx, "", verb, args, true); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// RunGoCommandInDir is like RunGoCommand, but executes in the given
-// relative directory of the sandbox.
-func (e *Env) RunGoCommandInDir(dir, verb string, args ...string) {
- e.T.Helper()
- if err := e.Sandbox.RunGoCommand(e.Ctx, dir, verb, args, true); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// DumpGoSum prints the correct go.sum contents for dir in txtar format,
-// for use in creating regtests.
-func (e *Env) DumpGoSum(dir string) {
- e.T.Helper()
-
- if err := e.Sandbox.RunGoCommand(e.Ctx, dir, "list", []string{"-mod=mod", "..."}, true); err != nil {
- e.T.Fatal(err)
- }
- sumFile := path.Join(dir, "/go.sum")
- e.T.Log("\n\n-- " + sumFile + " --\n" + e.ReadWorkspaceFile(sumFile))
- e.T.Fatal("see contents above")
-}
-
-// CheckForFileChanges triggers a manual poll of the workspace for any file
-// changes since creation, or since last polling. It is a workaround for the
-// lack of true file watching support in the fake workspace.
-func (e *Env) CheckForFileChanges() {
- e.T.Helper()
- if err := e.Sandbox.Workdir.CheckForFileChanges(e.Ctx); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// CodeLens calls textDocument/codeLens for the given path, calling t.Fatal on
-// any error.
-func (e *Env) CodeLens(path string) []protocol.CodeLens {
- e.T.Helper()
- lens, err := e.Editor.CodeLens(e.Ctx, path)
- if err != nil {
- e.T.Fatal(err)
- }
- return lens
-}
-
-// ExecuteCodeLensCommand executes the command for the code lens matching the
-// given command name.
-func (e *Env) ExecuteCodeLensCommand(path string, cmd command.Command) {
- e.T.Helper()
- lenses := e.CodeLens(path)
- var lens protocol.CodeLens
- var found bool
- for _, l := range lenses {
- if l.Command.Command == cmd.ID() {
- lens = l
- found = true
- }
- }
- if !found {
- e.T.Fatalf("found no command with the ID %s", cmd.ID())
- }
- e.ExecuteCommand(&protocol.ExecuteCommandParams{
- Command: lens.Command.Command,
- Arguments: lens.Command.Arguments,
- }, nil)
-}
-
-func (e *Env) ExecuteCommand(params *protocol.ExecuteCommandParams, result interface{}) {
- e.T.Helper()
- response, err := e.Editor.ExecuteCommand(e.Ctx, params)
- if err != nil {
- e.T.Fatal(err)
- }
- if result == nil {
- return
- }
- // Hack: The result of an executeCommand request will be unmarshaled into
- // maps. Re-marshal and unmarshal into the type we expect.
- //
- // This could be improved by generating a jsonrpc2 command client from the
- // command.Interface, but that should only be done if we're consolidating
- // this part of the tsprotocol generation.
- data, err := json.Marshal(response)
- if err != nil {
- e.T.Fatal(err)
- }
- if err := json.Unmarshal(data, result); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// WorkspaceSymbol calls workspace/symbol
-func (e *Env) WorkspaceSymbol(sym string) []protocol.SymbolInformation {
- e.T.Helper()
- ans, err := e.Editor.Symbols(e.Ctx, sym)
- if err != nil {
- e.T.Fatal(err)
- }
- return ans
-}
-
-// References calls textDocument/references for the given path at the given
-// position.
-func (e *Env) References(path string, pos fake.Pos) []protocol.Location {
- e.T.Helper()
- locations, err := e.Editor.References(e.Ctx, path, pos)
- if err != nil {
- e.T.Fatal(err)
- }
- return locations
-}
-
-func (e *Env) Rename(path string, pos fake.Pos, newName string) {
- e.T.Helper()
- if err := e.Editor.Rename(e.Ctx, path, pos, newName); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// Completion executes a completion request on the server.
-func (e *Env) Completion(path string, pos fake.Pos) *protocol.CompletionList {
- e.T.Helper()
- completions, err := e.Editor.Completion(e.Ctx, path, pos)
- if err != nil {
- e.T.Fatal(err)
- }
- return completions
-}
-
-// AcceptCompletion accepts a completion for the given item at the given
-// position.
-func (e *Env) AcceptCompletion(path string, pos fake.Pos, item protocol.CompletionItem) {
- e.T.Helper()
- if err := e.Editor.AcceptCompletion(e.Ctx, path, pos, item); err != nil {
- e.T.Fatal(err)
- }
-}
-
-// CodeAction calls testDocument/codeAction for the given path, and calls
-// t.Fatal if there are errors.
-func (e *Env) CodeAction(path string, diagnostics []protocol.Diagnostic) []protocol.CodeAction {
- e.T.Helper()
- actions, err := e.Editor.CodeAction(e.Ctx, path, nil, diagnostics)
- if err != nil {
- e.T.Fatal(err)
- }
- return actions
-}
-
-func (e *Env) ChangeConfiguration(t *testing.T, config *fake.EditorConfig) {
- e.Editor.Config = *config
- if err := e.Editor.Server.DidChangeConfiguration(e.Ctx, &protocol.DidChangeConfigurationParams{
- // gopls currently ignores the Settings field
- }); err != nil {
- t.Fatal(err)
- }
-}
-
-// ChangeEnv modifies the editor environment and reconfigures the LSP client.
-// TODO: extend this to "ChangeConfiguration", once we refactor the way editor
-// configuration is defined.
-func (e *Env) ChangeEnv(overlay map[string]string) {
- e.T.Helper()
- // TODO: to be correct, this should probably be synchronized, but right now
- // configuration is only ever modified synchronously in a regtest, so this
- // correctness can wait for the previously mentioned refactoring.
- if e.Editor.Config.Env == nil {
- e.Editor.Config.Env = make(map[string]string)
- }
- for k, v := range overlay {
- e.Editor.Config.Env[k] = v
- }
- var params protocol.DidChangeConfigurationParams
- if err := e.Editor.Server.DidChangeConfiguration(e.Ctx, &params); err != nil {
- e.T.Fatal(err)
- }
-}
diff --git a/internal/lsp/rename.go b/internal/lsp/rename.go
deleted file mode 100644
index 739ae906b..000000000
--- a/internal/lsp/rename.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func (s *Server) rename(ctx context.Context, params *protocol.RenameParams) (*protocol.WorkspaceEdit, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
- edits, err := source.Rename(ctx, snapshot, fh, params.Position, params.NewName)
- if err != nil {
- return nil, err
- }
-
- var docChanges []protocol.TextDocumentEdit
- for uri, e := range edits {
- fh, err := snapshot.GetVersionedFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- docChanges = append(docChanges, documentChanges(fh, e)...)
- }
- return &protocol.WorkspaceEdit{
- DocumentChanges: docChanges,
- }, nil
-}
-
-func (s *Server) prepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (*protocol.PrepareRename2Gn, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
- // Do not return errors here, as it adds clutter.
- // Returning a nil result means there is not a valid rename.
- item, usererr, err := source.PrepareRename(ctx, snapshot, fh, params.Position)
- if err != nil {
- // Return usererr here rather than err, to avoid cluttering the UI with
- // internal error details.
- return nil, usererr
- }
- return &protocol.PrepareRename2Gn{
- Range: item.Range,
- Placeholder: item.Text,
- }, nil
-}
diff --git a/internal/lsp/reset_golden.sh b/internal/lsp/reset_golden.sh
deleted file mode 100755
index 2689407ca..000000000
--- a/internal/lsp/reset_golden.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-find ./internal/lsp/ -name *.golden -delete
-go test ./internal/lsp/source -golden
-go test ./internal/lsp/ -golden
-go test ./internal/lsp/cmd -golden
diff --git a/internal/lsp/semantic.go b/internal/lsp/semantic.go
deleted file mode 100644
index 7c0419c20..000000000
--- a/internal/lsp/semantic.go
+++ /dev/null
@@ -1,958 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "log"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/template"
- "golang.org/x/tools/internal/typeparams"
- errors "golang.org/x/xerrors"
-)
-
-// The LSP says that errors for the semantic token requests should only be returned
-// for exceptions (a word not otherwise defined). This code treats a too-large file
-// as an exception. On parse errors, the code does what it can.
-
-// reject full semantic token requests for large files
-const maxFullFileSize int = 100000
-
-// to control comprehensive logging of decisions (gopls semtok foo.go > /dev/null shows log output)
-// semDebug should NEVER be true in checked-in code
-const semDebug = false
-
-func (s *Server) semanticTokensFull(ctx context.Context, p *protocol.SemanticTokensParams) (*protocol.SemanticTokens, error) {
- ret, err := s.computeSemanticTokens(ctx, p.TextDocument, nil)
- return ret, err
-}
-
-func (s *Server) semanticTokensFullDelta(ctx context.Context, p *protocol.SemanticTokensDeltaParams) (interface{}, error) {
- return nil, errors.Errorf("implement SemanticTokensFullDelta")
-}
-
-func (s *Server) semanticTokensRange(ctx context.Context, p *protocol.SemanticTokensRangeParams) (*protocol.SemanticTokens, error) {
- ret, err := s.computeSemanticTokens(ctx, p.TextDocument, &p.Range)
- return ret, err
-}
-
-func (s *Server) semanticTokensRefresh(ctx context.Context) error {
- // in the code, but not in the protocol spec
- return errors.Errorf("implement SemanticTokensRefresh")
-}
-
-func (s *Server) computeSemanticTokens(ctx context.Context, td protocol.TextDocumentIdentifier, rng *protocol.Range) (*protocol.SemanticTokens, error) {
- ans := protocol.SemanticTokens{
- Data: []uint32{},
- }
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, td.URI, source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
- vv := snapshot.View()
- if !vv.Options().SemanticTokens {
- // return an error, so if the option changes
- // the client won't remember the wrong answer
- return nil, errors.Errorf("semantictokens are disabled")
- }
- kind := snapshot.View().FileKind(fh)
- if kind == source.Tmpl {
- // this is a little cumbersome to avoid both exporting 'encoded' and its methods
- // and to avoid import cycles
- e := &encoded{
- ctx: ctx,
- rng: rng,
- tokTypes: s.session.Options().SemanticTypes,
- tokMods: s.session.Options().SemanticMods,
- }
- add := func(line, start uint32, len uint32) {
- e.add(line, start, len, tokMacro, nil)
- }
- data := func() []uint32 {
- return e.Data()
- }
- return template.SemanticTokens(ctx, snapshot, fh.URI(), add, data)
- }
- if kind != source.Go {
- return nil, nil
- }
- pkg, err := snapshot.PackageForFile(ctx, fh.URI(), source.TypecheckFull, source.WidestPackage)
- if err != nil {
- return nil, err
- }
- pgf, err := pkg.File(fh.URI())
- if err != nil {
- return nil, err
- }
- // ignore pgf.ParseErr. Do what we can.
- if rng == nil && len(pgf.Src) > maxFullFileSize {
- err := fmt.Errorf("semantic tokens: file %s too large for full (%d>%d)",
- fh.URI().Filename(), len(pgf.Src), maxFullFileSize)
- return nil, err
- }
- e := &encoded{
- ctx: ctx,
- pgf: pgf,
- rng: rng,
- ti: pkg.GetTypesInfo(),
- pkg: pkg,
- fset: snapshot.FileSet(),
- tokTypes: s.session.Options().SemanticTypes,
- tokMods: s.session.Options().SemanticMods,
- }
- if err := e.init(); err != nil {
- // e.init should never return an error, unless there's some
- // seemingly impossible race condition
- return nil, err
- }
- e.semantics()
- ans.Data = e.Data()
- // For delta requests, but we've never seen any.
- ans.ResultID = fmt.Sprintf("%v", time.Now())
- return &ans, nil
-}
-
-func (e *encoded) semantics() {
- f := e.pgf.File
- // may not be in range, but harmless
- e.token(f.Package, len("package"), tokKeyword, nil)
- e.token(f.Name.NamePos, len(f.Name.Name), tokNamespace, nil)
- inspect := func(n ast.Node) bool {
- return e.inspector(n)
- }
- for _, d := range f.Decls {
- // only look at the decls that overlap the range
- start, end := d.Pos(), d.End()
- if end <= e.start || start >= e.end {
- continue
- }
- ast.Inspect(d, inspect)
- }
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- if !strings.Contains(c.Text, "\n") {
- e.token(c.Pos(), len(c.Text), tokComment, nil)
- continue
- }
- e.multiline(c.Pos(), c.End(), c.Text, tokComment)
- }
- }
-}
-
-type tokenType string
-
-const (
- tokNamespace tokenType = "namespace"
- tokType tokenType = "type"
- tokInterface tokenType = "interface"
- tokTypeParam tokenType = "typeParameter"
- tokParameter tokenType = "parameter"
- tokVariable tokenType = "variable"
- tokMethod tokenType = "method"
- tokFunction tokenType = "function"
- tokKeyword tokenType = "keyword"
- tokComment tokenType = "comment"
- tokString tokenType = "string"
- tokNumber tokenType = "number"
- tokOperator tokenType = "operator"
-
- tokMacro tokenType = "macro" // for templates
-)
-
-func (e *encoded) token(start token.Pos, leng int, typ tokenType, mods []string) {
-
- if !start.IsValid() {
- // This is not worth reporting
- return
- }
- if start >= e.end || start+token.Pos(leng) <= e.start {
- return
- }
- // want a line and column from start (in LSP coordinates)
- // [//line directives should be ignored]
- rng := source.NewMappedRange(e.fset, e.pgf.Mapper, start, start+token.Pos(leng))
- lspRange, err := rng.Range()
- if err != nil {
- // possibly a //line directive. TODO(pjw): fix this somehow
- // "column mapper is for file...instead of..."
- // "line is beyond end of file..."
- // see line 116 of internal/span/token.go which uses Position not PositionFor
- // (it is too verbose to print the error on every token. some other RPC will fail)
- // event.Error(e.ctx, "failed to convert to range", err)
- return
- }
- if lspRange.End.Line != lspRange.Start.Line {
- // this happens if users are typing at the end of the file, but report nothing
- return
- }
- // token is all on one line
- length := lspRange.End.Character - lspRange.Start.Character
- e.add(lspRange.Start.Line, lspRange.Start.Character, length, typ, mods)
-}
-
-func (e *encoded) add(line, start uint32, len uint32, tok tokenType, mod []string) {
- x := semItem{line, start, len, tok, mod}
- e.items = append(e.items, x)
-}
-
-// semItem represents a token found walking the parse tree
-type semItem struct {
- line, start uint32
- len uint32
- typeStr tokenType
- mods []string
-}
-
-type encoded struct {
- // the generated data
- items []semItem
-
- ctx context.Context
- tokTypes, tokMods []string
- pgf *source.ParsedGoFile
- rng *protocol.Range
- ti *types.Info
- pkg source.Package
- fset *token.FileSet
- // allowed starting and ending token.Pos, set by init
- // used to avoid looking at declarations not in range
- start, end token.Pos
- // path from the root of the parse tree, used for debugging
- stack []ast.Node
-}
-
-// convert the stack to a string, for debugging
-func (e *encoded) strStack() string {
- msg := []string{"["}
- for i := len(e.stack) - 1; i >= 0; i-- {
- s := e.stack[i]
- msg = append(msg, fmt.Sprintf("%T", s)[5:])
- }
- if len(e.stack) > 0 {
- loc := e.stack[len(e.stack)-1].Pos()
- if !source.InRange(e.pgf.Tok, loc) {
- msg = append(msg, fmt.Sprintf("invalid position %v for %s", loc, e.pgf.URI))
- } else if locInRange(e.pgf.Tok, loc) {
- add := e.pgf.Tok.PositionFor(loc, false)
- nm := filepath.Base(add.Filename)
- msg = append(msg, fmt.Sprintf("(%s:%d,col:%d)", nm, add.Line, add.Column))
- } else {
- msg = append(msg, fmt.Sprintf("(loc %d out of range)", loc))
- }
- }
- msg = append(msg, "]")
- return strings.Join(msg, " ")
-}
-
-// avoid panic in token.PostionFor() when typing at the end of the file
-func locInRange(f *token.File, loc token.Pos) bool {
- return f.Base() <= int(loc) && int(loc) < f.Base()+f.Size()
-}
-
-// find the line in the source
-func (e *encoded) srcLine(x ast.Node) string {
- file := e.pgf.Tok
- line := file.Line(x.Pos())
- start, err := source.Offset(file, file.LineStart(line))
- if err != nil {
- return ""
- }
- end := start
- for ; end < len(e.pgf.Src) && e.pgf.Src[end] != '\n'; end++ {
-
- }
- ans := e.pgf.Src[start:end]
- return string(ans)
-}
-
-func (e *encoded) inspector(n ast.Node) bool {
- pop := func() {
- e.stack = e.stack[:len(e.stack)-1]
- }
- if n == nil {
- pop()
- return true
- }
- e.stack = append(e.stack, n)
- switch x := n.(type) {
- case *ast.ArrayType:
- case *ast.AssignStmt:
- e.token(x.TokPos, len(x.Tok.String()), tokOperator, nil)
- case *ast.BasicLit:
- if strings.Contains(x.Value, "\n") {
- // has to be a string
- e.multiline(x.Pos(), x.End(), x.Value, tokString)
- break
- }
- ln := len(x.Value)
- what := tokNumber
- if x.Kind == token.STRING {
- what = tokString
- if _, ok := e.stack[len(e.stack)-2].(*ast.Field); ok {
- // struct tags (this is probably pointless, as the
- // TextMate grammar will treat all the other comments the same)
- what = tokComment
- }
- }
- e.token(x.Pos(), ln, what, nil)
- case *ast.BinaryExpr:
- e.token(x.OpPos, len(x.Op.String()), tokOperator, nil)
- case *ast.BlockStmt:
- case *ast.BranchStmt:
- e.token(x.TokPos, len(x.Tok.String()), tokKeyword, nil)
- // There's no semantic encoding for labels
- case *ast.CallExpr:
- if x.Ellipsis != token.NoPos {
- e.token(x.Ellipsis, len("..."), tokOperator, nil)
- }
- case *ast.CaseClause:
- iam := "case"
- if x.List == nil {
- iam = "default"
- }
- e.token(x.Case, len(iam), tokKeyword, nil)
- case *ast.ChanType:
- // chan | chan <- | <- chan
- if x.Arrow == token.NoPos || x.Arrow != x.Begin {
- e.token(x.Begin, len("chan"), tokKeyword, nil)
- break
- }
- pos := e.findKeyword("chan", x.Begin+2, x.Value.Pos())
- e.token(pos, len("chan"), tokKeyword, nil)
- case *ast.CommClause:
- iam := len("case")
- if x.Comm == nil {
- iam = len("default")
- }
- e.token(x.Case, iam, tokKeyword, nil)
- case *ast.CompositeLit:
- case *ast.DeclStmt:
- case *ast.DeferStmt:
- e.token(x.Defer, len("defer"), tokKeyword, nil)
- case *ast.Ellipsis:
- e.token(x.Ellipsis, len("..."), tokOperator, nil)
- case *ast.EmptyStmt:
- case *ast.ExprStmt:
- case *ast.Field:
- case *ast.FieldList:
- case *ast.ForStmt:
- e.token(x.For, len("for"), tokKeyword, nil)
- case *ast.FuncDecl:
- case *ast.FuncLit:
- case *ast.FuncType:
- if x.Func != token.NoPos {
- e.token(x.Func, len("func"), tokKeyword, nil)
- }
- case *ast.GenDecl:
- e.token(x.TokPos, len(x.Tok.String()), tokKeyword, nil)
- case *ast.GoStmt:
- e.token(x.Go, len("go"), tokKeyword, nil)
- case *ast.Ident:
- e.ident(x)
- case *ast.IfStmt:
- e.token(x.If, len("if"), tokKeyword, nil)
- if x.Else != nil {
- // x.Body.End() or x.Body.End()+1, not that it matters
- pos := e.findKeyword("else", x.Body.End(), x.Else.Pos())
- e.token(pos, len("else"), tokKeyword, nil)
- }
- case *ast.ImportSpec:
- e.importSpec(x)
- pop()
- return false
- case *ast.IncDecStmt:
- e.token(x.TokPos, len(x.Tok.String()), tokOperator, nil)
- case *ast.IndexExpr:
- case *typeparams.IndexListExpr: // accommodate generics
- case *ast.InterfaceType:
- e.token(x.Interface, len("interface"), tokKeyword, nil)
- case *ast.KeyValueExpr:
- case *ast.LabeledStmt:
- case *ast.MapType:
- e.token(x.Map, len("map"), tokKeyword, nil)
- case *ast.ParenExpr:
- case *ast.RangeStmt:
- e.token(x.For, len("for"), tokKeyword, nil)
- // x.TokPos == token.NoPos is legal (for range foo {})
- offset := x.TokPos
- if offset == token.NoPos {
- offset = x.For
- }
- pos := e.findKeyword("range", offset, x.X.Pos())
- e.token(pos, len("range"), tokKeyword, nil)
- case *ast.ReturnStmt:
- e.token(x.Return, len("return"), tokKeyword, nil)
- case *ast.SelectStmt:
- e.token(x.Select, len("select"), tokKeyword, nil)
- case *ast.SelectorExpr:
- case *ast.SendStmt:
- e.token(x.Arrow, len("<-"), tokOperator, nil)
- case *ast.SliceExpr:
- case *ast.StarExpr:
- e.token(x.Star, len("*"), tokOperator, nil)
- case *ast.StructType:
- e.token(x.Struct, len("struct"), tokKeyword, nil)
- case *ast.SwitchStmt:
- e.token(x.Switch, len("switch"), tokKeyword, nil)
- case *ast.TypeAssertExpr:
- if x.Type == nil {
- pos := e.findKeyword("type", x.Lparen, x.Rparen)
- e.token(pos, len("type"), tokKeyword, nil)
- }
- case *ast.TypeSpec:
- case *ast.TypeSwitchStmt:
- e.token(x.Switch, len("switch"), tokKeyword, nil)
- case *ast.UnaryExpr:
- e.token(x.OpPos, len(x.Op.String()), tokOperator, nil)
- case *ast.ValueSpec:
- // things only seen with parsing or type errors, so ignore them
- case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
- return true
- // not going to see these
- case *ast.File, *ast.Package:
- e.unexpected(fmt.Sprintf("implement %T %s", x, e.pgf.Tok.PositionFor(x.Pos(), false)))
- // other things we knowingly ignore
- case *ast.Comment, *ast.CommentGroup:
- pop()
- return false
- default:
- e.unexpected(fmt.Sprintf("failed to implement %T", x))
- }
- return true
-}
-
-func (e *encoded) ident(x *ast.Ident) {
- if e.ti == nil {
- what, mods := e.unkIdent(x)
- if what != "" {
- e.token(x.Pos(), len(x.String()), what, mods)
- }
- if semDebug {
- log.Printf(" nil %s/nil/nil %q %v %s", x.String(), what, mods, e.strStack())
- }
- return
- }
- def := e.ti.Defs[x]
- if def != nil {
- what, mods := e.definitionFor(x, def)
- if what != "" {
- e.token(x.Pos(), len(x.String()), what, mods)
- }
- if semDebug {
- log.Printf(" for %s/%T/%T got %s %v (%s)", x.String(), def, def.Type(), what, mods, e.strStack())
- }
- return
- }
- use := e.ti.Uses[x]
- tok := func(pos token.Pos, lng int, tok tokenType, mods []string) {
- e.token(pos, lng, tok, mods)
- q := "nil"
- if use != nil {
- q = fmt.Sprintf("%T", use.Type())
- }
- if semDebug {
- log.Printf(" use %s/%T/%s got %s %v (%s)", x.String(), use, q, tok, mods, e.strStack())
- }
- }
-
- switch y := use.(type) {
- case nil:
- what, mods := e.unkIdent(x)
- if what != "" {
- tok(x.Pos(), len(x.String()), what, mods)
- } else if semDebug {
- // tok() wasn't called, so didn't log
- log.Printf(" nil %s/%T/nil %q %v (%s)", x.String(), use, what, mods, e.strStack())
- }
- return
- case *types.Builtin:
- tok(x.NamePos, len(x.Name), tokFunction, []string{"defaultLibrary"})
- case *types.Const:
- mods := []string{"readonly"}
- tt := y.Type()
- if _, ok := tt.(*types.Basic); ok {
- tok(x.Pos(), len(x.String()), tokVariable, mods)
- break
- }
- if ttx, ok := tt.(*types.Named); ok {
- if x.String() == "iota" {
- e.unexpected(fmt.Sprintf("iota:%T", ttx))
- }
- if _, ok := ttx.Underlying().(*types.Basic); ok {
- tok(x.Pos(), len(x.String()), tokVariable, mods)
- break
- }
- e.unexpected(fmt.Sprintf("%q/%T", x.String(), tt))
- }
- // can this happen? Don't think so
- e.unexpected(fmt.Sprintf("%s %T %#v", x.String(), tt, tt))
- case *types.Func:
- tok(x.Pos(), len(x.Name), tokFunction, nil)
- case *types.Label:
- // nothing to map it to
- case *types.Nil:
- // nil is a predeclared identifier
- tok(x.Pos(), len("nil"), tokVariable, []string{"readonly", "defaultLibrary"})
- case *types.PkgName:
- tok(x.Pos(), len(x.Name), tokNamespace, nil)
- case *types.TypeName: // could be a tokTpeParam
- var mods []string
- if _, ok := y.Type().(*types.Basic); ok {
- mods = []string{"defaultLibrary"}
- } else if _, ok := y.Type().(*typeparams.TypeParam); ok {
- tok(x.Pos(), len(x.String()), tokTypeParam, mods)
- break
- }
- tok(x.Pos(), len(x.String()), tokType, mods)
- case *types.Var:
- if isSignature(y) {
- tok(x.Pos(), len(x.Name), tokFunction, nil)
- } else if _, ok := y.Type().(*typeparams.TypeParam); ok {
- tok(x.Pos(), len(x.Name), tokTypeParam, nil)
- } else {
- tok(x.Pos(), len(x.Name), tokVariable, nil)
- }
- default:
- // can't happen
- if use == nil {
- msg := fmt.Sprintf("%#v/%#v %#v %#v", x, x.Obj, e.ti.Defs[x], e.ti.Uses[x])
- e.unexpected(msg)
- }
- if use.Type() != nil {
- e.unexpected(fmt.Sprintf("%s %T/%T,%#v", x.String(), use, use.Type(), use))
- } else {
- e.unexpected(fmt.Sprintf("%s %T", x.String(), use))
- }
- }
-}
-
-func isSignature(use types.Object) bool {
- if true {
- return false //PJW: fix after generics seem ok
- }
- if _, ok := use.(*types.Var); !ok {
- return false
- }
- v := use.Type()
- if v == nil {
- return false
- }
- if _, ok := v.(*types.Signature); ok {
- return true
- }
- return false
-}
-
-// both e.ti.Defs and e.ti.Uses are nil. use the parse stack.
-// a lot of these only happen when the package doesn't compile
-// but in that case it is all best-effort from the parse tree
-func (e *encoded) unkIdent(x *ast.Ident) (tokenType, []string) {
- def := []string{"definition"}
- n := len(e.stack) - 2 // parent of Ident
- if n < 0 {
- e.unexpected("no stack?")
- return "", nil
- }
- switch nd := e.stack[n].(type) {
- case *ast.BinaryExpr, *ast.UnaryExpr, *ast.ParenExpr, *ast.StarExpr,
- *ast.IncDecStmt, *ast.SliceExpr, *ast.ExprStmt, *ast.IndexExpr,
- *ast.ReturnStmt, *ast.ChanType, *ast.SendStmt,
- *ast.ForStmt, // possibly incomplete
- *ast.IfStmt, /* condition */
- *ast.KeyValueExpr: // either key or value
- return tokVariable, nil
- case *typeparams.IndexListExpr: // generic?
- return tokVariable, nil
- case *ast.Ellipsis:
- return tokType, nil
- case *ast.CaseClause:
- if n-2 >= 0 {
- if _, ok := e.stack[n-2].(*ast.TypeSwitchStmt); ok {
- return tokType, nil
- }
- }
- return tokVariable, nil
- case *ast.ArrayType:
- if x == nd.Len {
- // or maybe a Type Param, but we can't just from the parse tree
- return tokVariable, nil
- } else {
- return tokType, nil
- }
- case *ast.MapType:
- return tokType, nil
- case *ast.CallExpr:
- if x == nd.Fun {
- return tokFunction, nil
- }
- return tokVariable, nil
- case *ast.SwitchStmt:
- return tokVariable, nil
- case *ast.TypeAssertExpr:
- if x == nd.X {
- return tokVariable, nil
- } else if x == nd.Type {
- return tokType, nil
- }
- case *ast.ValueSpec:
- for _, p := range nd.Names {
- if p == x {
- return tokVariable, def
- }
- }
- for _, p := range nd.Values {
- if p == x {
- return tokVariable, nil
- }
- }
- return tokType, nil
- case *ast.SelectorExpr: // e.ti.Selections[nd] is nil, so no help
- if n-1 >= 0 {
- if ce, ok := e.stack[n-1].(*ast.CallExpr); ok {
- // ... CallExpr SelectorExpr Ident (_.x())
- if ce.Fun == nd && nd.Sel == x {
- return tokFunction, nil
- }
- }
- }
- return tokVariable, nil
- case *ast.AssignStmt:
- for _, p := range nd.Lhs {
- // x := ..., or x = ...
- if p == x {
- if nd.Tok != token.DEFINE {
- def = nil
- }
- return tokVariable, def
- }
- }
- // RHS, = x
- return tokVariable, nil
- case *ast.TypeSpec: // it's a type if it is either the Name or the Type
- if x == nd.Type {
- def = nil
- }
- return tokType, def
- case *ast.Field:
- // ident could be type in a field, or a method in an interface type, or a variable
- if x == nd.Type {
- return tokType, nil
- }
- if n-2 >= 0 {
- _, okit := e.stack[n-2].(*ast.InterfaceType)
- _, okfl := e.stack[n-1].(*ast.FieldList)
- if okit && okfl {
- return tokMethod, def
- }
- }
- return tokVariable, nil
- case *ast.LabeledStmt, *ast.BranchStmt:
- // nothing to report
- case *ast.CompositeLit:
- if nd.Type == x {
- return tokType, nil
- }
- return tokVariable, nil
- case *ast.RangeStmt:
- if nd.Tok != token.DEFINE {
- def = nil
- }
- return tokVariable, def
- case *ast.FuncDecl:
- return tokFunction, def
- default:
- msg := fmt.Sprintf("%T undexpected: %s %s%q", nd, x.Name, e.strStack(), e.srcLine(x))
- e.unexpected(msg)
- }
- return "", nil
-}
-
-func isDeprecated(n *ast.CommentGroup) bool {
- if n == nil {
- return false
- }
- for _, c := range n.List {
- if strings.HasPrefix(c.Text, "// Deprecated") {
- return true
- }
- }
- return false
-}
-
-func (e *encoded) definitionFor(x *ast.Ident, def types.Object) (tokenType, []string) {
- // PJW: def == types.Label? probably a nothing
- // PJW: look into replaceing these syntactic tests with types more generally
- mods := []string{"definition"}
- for i := len(e.stack) - 1; i >= 0; i-- {
- s := e.stack[i]
- switch y := s.(type) {
- case *ast.AssignStmt, *ast.RangeStmt:
- if x.Name == "_" {
- return "", nil // not really a variable
- }
- return tokVariable, mods
- case *ast.GenDecl:
- if isDeprecated(y.Doc) {
- mods = append(mods, "deprecated")
- }
- if y.Tok == token.CONST {
- mods = append(mods, "readonly")
- }
- return tokVariable, mods
- case *ast.FuncDecl:
- // If x is immediately under a FuncDecl, it is a function or method
- if i == len(e.stack)-2 {
- if isDeprecated(y.Doc) {
- mods = append(mods, "deprecated")
- }
- if y.Recv != nil {
- return tokMethod, mods
- }
- return tokFunction, mods
- }
- // if x < ... < FieldList < FuncDecl, this is the receiver, a variable
- if _, ok := e.stack[i+1].(*ast.FieldList); ok {
- return tokVariable, nil
- }
- // if x < ... < FieldList < FuncType < FuncDecl, this is a param
- return tokParameter, mods
- case *ast.FuncType:
- return tokParameter, mods
- case *ast.InterfaceType:
- return tokMethod, mods
- case *ast.TypeSpec:
- // GenDecl/Typespec/FuncType/FieldList/Field/Ident
- // (type A func(b uint64)) (err error)
- // b and err should not be tokType, but tokVaraible
- // and in GenDecl/TpeSpec/StructType/FieldList/Field/Ident
- // (type A struct{b uint64}
- // but on type B struct{C}), C is a type, but is not being defined.
- // GenDecl/TypeSpec/FieldList/Field/Ident is a typeParam
- if _, ok := e.stack[i+1].(*ast.FieldList); ok {
- return tokTypeParam, mods
- }
- fldm := e.stack[len(e.stack)-2]
- if fld, ok := fldm.(*ast.Field); ok {
- // if len(fld.names) == 0 this is a tokType, being used
- if len(fld.Names) == 0 {
- return tokType, nil
- }
- return tokVariable, mods
- }
- return tokType, mods
- }
- }
- // can't happen
- msg := fmt.Sprintf("failed to find the decl for %s", e.pgf.Tok.PositionFor(x.Pos(), false))
- e.unexpected(msg)
- return "", []string{""}
-}
-
-func (e *encoded) multiline(start, end token.Pos, val string, tok tokenType) {
- f := e.fset.File(start)
- // the hard part is finding the lengths of lines. include the \n
- leng := func(line int) int {
- n := f.LineStart(line)
- if line >= f.LineCount() {
- return f.Size() - int(n)
- }
- return int(f.LineStart(line+1) - n)
- }
- spos := e.fset.PositionFor(start, false)
- epos := e.fset.PositionFor(end, false)
- sline := spos.Line
- eline := epos.Line
- // first line is from spos.Column to end
- e.token(start, leng(sline)-spos.Column, tok, nil) // leng(sline)-1 - (spos.Column-1)
- for i := sline + 1; i < eline; i++ {
- // intermediate lines are from 1 to end
- e.token(f.LineStart(i), leng(i)-1, tok, nil) // avoid the newline
- }
- // last line is from 1 to epos.Column
- e.token(f.LineStart(eline), epos.Column-1, tok, nil) // columns are 1-based
-}
-
-// findKeyword finds a keyword rather than guessing its location
-func (e *encoded) findKeyword(keyword string, start, end token.Pos) token.Pos {
- offset := int(start) - e.pgf.Tok.Base()
- last := int(end) - e.pgf.Tok.Base()
- buf := e.pgf.Src
- idx := bytes.Index(buf[offset:last], []byte(keyword))
- if idx != -1 {
- return start + token.Pos(idx)
- }
- //(in unparsable programs: type _ <-<-chan int)
- e.unexpected(fmt.Sprintf("not found:%s %v", keyword, e.fset.PositionFor(start, false)))
- return token.NoPos
-}
-
-func (e *encoded) init() error {
- e.start = token.Pos(e.pgf.Tok.Base())
- e.end = e.start + token.Pos(e.pgf.Tok.Size())
- if e.rng == nil {
- return nil
- }
- span, err := e.pgf.Mapper.RangeSpan(*e.rng)
- if err != nil {
- return errors.Errorf("range span (%w) error for %s", err, e.pgf.File.Name)
- }
- e.end = e.start + token.Pos(span.End().Offset())
- e.start += token.Pos(span.Start().Offset())
- return nil
-}
-
-func (e *encoded) Data() []uint32 {
- // binary operators, at least, will be out of order
- sort.Slice(e.items, func(i, j int) bool {
- if e.items[i].line != e.items[j].line {
- return e.items[i].line < e.items[j].line
- }
- return e.items[i].start < e.items[j].start
- })
- typeMap, modMap := e.maps()
- // each semantic token needs five values
- // (see Integer Encoding for Tokens in the LSP spec)
- x := make([]uint32, 5*len(e.items))
- var j int
- for i := 0; i < len(e.items); i++ {
- typ, ok := typeMap[e.items[i].typeStr]
- if !ok {
- continue // client doesn't want typeStr
- }
- if i == 0 {
- x[0] = e.items[0].line
- } else {
- x[j] = e.items[i].line - e.items[i-1].line
- }
- x[j+1] = e.items[i].start
- if i > 0 && e.items[i].line == e.items[i-1].line {
- x[j+1] = e.items[i].start - e.items[i-1].start
- }
- x[j+2] = e.items[i].len
- x[j+3] = uint32(typ)
- mask := 0
- for _, s := range e.items[i].mods {
- // modMap[s] is 0 if the client doesn't want this modifier
- mask |= modMap[s]
- }
- x[j+4] = uint32(mask)
- j += 5
- }
- return x[:j]
-}
-
-func (e *encoded) importSpec(d *ast.ImportSpec) {
- // a local package name or the last component of the Path
- if d.Name != nil {
- nm := d.Name.String()
- if nm != "_" && nm != "." {
- e.token(d.Name.Pos(), len(nm), tokNamespace, nil)
- }
- return // don't mark anything for . or _
- }
- val := d.Path.Value
- if len(val) < 2 || val[0] != '"' || val[len(val)-1] != '"' {
- // avoid panics on imports without a properly quoted string
- return
- }
- nm := val[1 : len(val)-1] // remove surrounding "s
- // Import strings are implementation defined. Try to match with parse information.
- x, err := e.pkg.GetImport(nm)
- if err != nil {
- // unexpected, but impact is that maybe some import is not colored
- return
- }
- // expect that nm is x.PkgPath and that x.Name() is a component of it
- if x.PkgPath() != nm {
- // don't know how or what to color (if this can happen at all)
- return
- }
- // this is not a precise test: imagine "github.com/nasty/v/v2"
- j := strings.LastIndex(nm, x.Name())
- if j == -1 {
- // name doesn't show up, for whatever reason, so nothing to report
- return
- }
- start := d.Path.Pos() + 1 + token.Pos(j) // skip the initial quote
- e.token(start, len(x.Name()), tokNamespace, nil)
-}
-
-// log unexpected state
-func (e *encoded) unexpected(msg string) {
- if semDebug {
- panic(msg)
- }
- event.Error(e.ctx, e.strStack(), errors.New(msg))
-}
-
-// SemType returns a string equivalent of the type, for gopls semtok
-func SemType(n int) string {
- tokTypes := SemanticTypes()
- tokMods := SemanticModifiers()
- if n >= 0 && n < len(tokTypes) {
- return tokTypes[n]
- }
- return fmt.Sprintf("?%d[%d,%d]?", n, len(tokTypes), len(tokMods))
-}
-
-// SemMods returns the []string equivalent of the mods, for gopls semtok.
-func SemMods(n int) []string {
- tokMods := SemanticModifiers()
- mods := []string{}
- for i := 0; i < len(tokMods); i++ {
- if (n & (1 << uint(i))) != 0 {
- mods = append(mods, tokMods[i])
- }
- }
- return mods
-}
-
-func (e *encoded) maps() (map[tokenType]int, map[string]int) {
- tmap := make(map[tokenType]int)
- mmap := make(map[string]int)
- for i, t := range e.tokTypes {
- tmap[tokenType(t)] = i
- }
- for i, m := range e.tokMods {
- mmap[m] = 1 << uint(i) // go 1.12 compatibility
- }
- return tmap, mmap
-}
-
-// SemanticTypes to use in case there is no client, as in the command line, or tests
-func SemanticTypes() []string {
- return semanticTypes[:]
-}
-
-// SemanticModifiers to use in case there is no client.
-func SemanticModifiers() []string {
- return semanticModifiers[:]
-}
-
-var (
- semanticTypes = [...]string{
- "namespace", "type", "class", "enum", "interface",
- "struct", "typeParameter", "parameter", "variable", "property", "enumMember",
- "event", "function", "method", "macro", "keyword", "modifier", "comment",
- "string", "number", "regexp", "operator",
- }
- semanticModifiers = [...]string{
- "declaration", "definition", "readonly", "static",
- "deprecated", "abstract", "async", "modification", "documentation", "defaultLibrary",
- }
-)
diff --git a/internal/lsp/server.go b/internal/lsp/server.go
deleted file mode 100644
index becfc718e..000000000
--- a/internal/lsp/server.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package lsp implements LSP for gopls.
-package lsp
-
-import (
- "context"
- "fmt"
- "sync"
-
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp/progress"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-const concurrentAnalyses = 1
-
-// NewServer creates an LSP server and binds it to handle incoming client
-// messages on on the supplied stream.
-func NewServer(session source.Session, client protocol.ClientCloser) *Server {
- tracker := progress.NewTracker(client)
- session.SetProgressTracker(tracker)
- return &Server{
- diagnostics: map[span.URI]*fileReports{},
- gcOptimizationDetails: make(map[string]struct{}),
- watchedGlobPatterns: make(map[string]struct{}),
- changedFiles: make(map[span.URI]struct{}),
- session: session,
- client: client,
- diagnosticsSema: make(chan struct{}, concurrentAnalyses),
- progress: tracker,
- diagDebouncer: newDebouncer(),
- watchedFileDebouncer: newDebouncer(),
- }
-}
-
-type serverState int
-
-const (
- serverCreated = serverState(iota)
- serverInitializing // set once the server has received "initialize" request
- serverInitialized // set once the server has received "initialized" request
- serverShutDown
-)
-
-func (s serverState) String() string {
- switch s {
- case serverCreated:
- return "created"
- case serverInitializing:
- return "initializing"
- case serverInitialized:
- return "initialized"
- case serverShutDown:
- return "shutDown"
- }
- return fmt.Sprintf("(unknown state: %d)", int(s))
-}
-
-// Server implements the protocol.Server interface.
-type Server struct {
- client protocol.ClientCloser
-
- stateMu sync.Mutex
- state serverState
- // notifications generated before serverInitialized
- notifications []*protocol.ShowMessageParams
-
- session source.Session
-
- tempDir string
-
- // changedFiles tracks files for which there has been a textDocument/didChange.
- changedFilesMu sync.Mutex
- changedFiles map[span.URI]struct{}
-
- // folders is only valid between initialize and initialized, and holds the
- // set of folders to build views for when we are ready
- pendingFolders []protocol.WorkspaceFolder
-
- // watchedGlobPatterns is the set of glob patterns that we have requested
- // the client watch on disk. It will be updated as the set of directories
- // that the server should watch changes.
- watchedGlobPatternsMu sync.Mutex
- watchedGlobPatterns map[string]struct{}
- watchRegistrationCount int
-
- diagnosticsMu sync.Mutex
- diagnostics map[span.URI]*fileReports
-
- // gcOptimizationDetails describes the packages for which we want
- // optimization details to be included in the diagnostics. The key is the
- // ID of the package.
- gcOptimizationDetailsMu sync.Mutex
- gcOptimizationDetails map[string]struct{}
-
- // diagnosticsSema limits the concurrency of diagnostics runs, which can be
- // expensive.
- diagnosticsSema chan struct{}
-
- progress *progress.Tracker
-
- // diagDebouncer is used for debouncing diagnostics.
- diagDebouncer *debouncer
-
- // watchedFileDebouncer is used for batching didChangeWatchedFiles notifications.
- watchedFileDebouncer *debouncer
- fileChangeMu sync.Mutex
- pendingOnDiskChanges []*pendingModificationSet
-
- // When the workspace fails to load, we show its status through a progress
- // report with an error message.
- criticalErrorStatusMu sync.Mutex
- criticalErrorStatus *progress.WorkDone
-}
-
-type pendingModificationSet struct {
- diagnoseDone chan struct{}
- changes []source.FileModification
-}
-
-func (s *Server) workDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) error {
- return s.progress.Cancel(ctx, params.Token)
-}
-
-func (s *Server) nonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
- switch method {
- case "gopls/diagnoseFiles":
- paramMap := params.(map[string]interface{})
- for _, file := range paramMap["files"].([]interface{}) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, protocol.DocumentURI(file.(string)), source.UnknownKind)
- defer release()
- if !ok {
- return nil, err
- }
-
- fileID, diagnostics, err := source.FileDiagnostics(ctx, snapshot, fh.URI())
- if err != nil {
- return nil, err
- }
- if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
- URI: protocol.URIFromSpanURI(fh.URI()),
- Diagnostics: toProtocolDiagnostics(diagnostics),
- Version: fileID.Version,
- }); err != nil {
- return nil, err
- }
- }
- if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
- URI: "gopls://diagnostics-done",
- }); err != nil {
- return nil, err
- }
- return struct{}{}, nil
- }
- return nil, notImplemented(method)
-}
-
-func notImplemented(method string) error {
- return errors.Errorf("%w: %q not yet implemented", jsonrpc2.ErrMethodNotFound, method)
-}
-
-//go:generate helper/helper -d protocol/tsserver.go -o server_gen.go -u .
diff --git a/internal/lsp/server_gen.go b/internal/lsp/server_gen.go
deleted file mode 100644
index 2062693db..000000000
--- a/internal/lsp/server_gen.go
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-// code generated by helper. DO NOT EDIT.
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-func (s *Server) CodeAction(ctx context.Context, params *protocol.CodeActionParams) ([]protocol.CodeAction, error) {
- return s.codeAction(ctx, params)
-}
-
-func (s *Server) CodeLens(ctx context.Context, params *protocol.CodeLensParams) ([]protocol.CodeLens, error) {
- return s.codeLens(ctx, params)
-}
-
-func (s *Server) CodeLensRefresh(context.Context) error {
- return notImplemented("CodeLensRefresh")
-}
-
-func (s *Server) ColorPresentation(context.Context, *protocol.ColorPresentationParams) ([]protocol.ColorPresentation, error) {
- return nil, notImplemented("ColorPresentation")
-}
-
-func (s *Server) Completion(ctx context.Context, params *protocol.CompletionParams) (*protocol.CompletionList, error) {
- return s.completion(ctx, params)
-}
-
-func (s *Server) Declaration(context.Context, *protocol.DeclarationParams) (protocol.Declaration, error) {
- return nil, notImplemented("Declaration")
-}
-
-func (s *Server) Definition(ctx context.Context, params *protocol.DefinitionParams) (protocol.Definition, error) {
- return s.definition(ctx, params)
-}
-
-func (s *Server) Diagnostic(context.Context, *string) (*string, error) {
- return nil, notImplemented("Diagnostic")
-}
-
-func (s *Server) DiagnosticRefresh(context.Context) error {
- return notImplemented("DiagnosticRefresh")
-}
-
-func (s *Server) DiagnosticWorkspace(context.Context, *protocol.WorkspaceDiagnosticParams) (*protocol.WorkspaceDiagnosticReport, error) {
- return nil, notImplemented("DiagnosticWorkspace")
-}
-
-func (s *Server) DidChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error {
- return s.didChange(ctx, params)
-}
-
-func (s *Server) DidChangeConfiguration(ctx context.Context, _gen *protocol.DidChangeConfigurationParams) error {
- return s.didChangeConfiguration(ctx, _gen)
-}
-
-func (s *Server) DidChangeNotebookDocument(context.Context, *protocol.DidChangeNotebookDocumentParams) error {
- return notImplemented("DidChangeNotebookDocument")
-}
-
-func (s *Server) DidChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) error {
- return s.didChangeWatchedFiles(ctx, params)
-}
-
-func (s *Server) DidChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) error {
- return s.didChangeWorkspaceFolders(ctx, params)
-}
-
-func (s *Server) DidClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) error {
- return s.didClose(ctx, params)
-}
-
-func (s *Server) DidCloseNotebookDocument(context.Context, *protocol.DidCloseNotebookDocumentParams) error {
- return notImplemented("DidCloseNotebookDocument")
-}
-
-func (s *Server) DidCreateFiles(context.Context, *protocol.CreateFilesParams) error {
- return notImplemented("DidCreateFiles")
-}
-
-func (s *Server) DidDeleteFiles(context.Context, *protocol.DeleteFilesParams) error {
- return notImplemented("DidDeleteFiles")
-}
-
-func (s *Server) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
- return s.didOpen(ctx, params)
-}
-
-func (s *Server) DidOpenNotebookDocument(context.Context, *protocol.DidOpenNotebookDocumentParams) error {
- return notImplemented("DidOpenNotebookDocument")
-}
-
-func (s *Server) DidRenameFiles(context.Context, *protocol.RenameFilesParams) error {
- return notImplemented("DidRenameFiles")
-}
-
-func (s *Server) DidSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) error {
- return s.didSave(ctx, params)
-}
-
-func (s *Server) DidSaveNotebookDocument(context.Context, *protocol.DidSaveNotebookDocumentParams) error {
- return notImplemented("DidSaveNotebookDocument")
-}
-
-func (s *Server) DocumentColor(context.Context, *protocol.DocumentColorParams) ([]protocol.ColorInformation, error) {
- return nil, notImplemented("DocumentColor")
-}
-
-func (s *Server) DocumentHighlight(ctx context.Context, params *protocol.DocumentHighlightParams) ([]protocol.DocumentHighlight, error) {
- return s.documentHighlight(ctx, params)
-}
-
-func (s *Server) DocumentLink(ctx context.Context, params *protocol.DocumentLinkParams) ([]protocol.DocumentLink, error) {
- return s.documentLink(ctx, params)
-}
-
-func (s *Server) DocumentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) ([]interface{}, error) {
- return s.documentSymbol(ctx, params)
-}
-
-func (s *Server) ExecuteCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (interface{}, error) {
- return s.executeCommand(ctx, params)
-}
-
-func (s *Server) Exit(ctx context.Context) error {
- return s.exit(ctx)
-}
-
-func (s *Server) FoldingRange(ctx context.Context, params *protocol.FoldingRangeParams) ([]protocol.FoldingRange, error) {
- return s.foldingRange(ctx, params)
-}
-
-func (s *Server) Formatting(ctx context.Context, params *protocol.DocumentFormattingParams) ([]protocol.TextEdit, error) {
- return s.formatting(ctx, params)
-}
-
-func (s *Server) Hover(ctx context.Context, params *protocol.HoverParams) (*protocol.Hover, error) {
- return s.hover(ctx, params)
-}
-
-func (s *Server) Implementation(ctx context.Context, params *protocol.ImplementationParams) (protocol.Definition, error) {
- return s.implementation(ctx, params)
-}
-
-func (s *Server) IncomingCalls(ctx context.Context, params *protocol.CallHierarchyIncomingCallsParams) ([]protocol.CallHierarchyIncomingCall, error) {
- return s.incomingCalls(ctx, params)
-}
-
-func (s *Server) Initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
- return s.initialize(ctx, params)
-}
-
-func (s *Server) Initialized(ctx context.Context, params *protocol.InitializedParams) error {
- return s.initialized(ctx, params)
-}
-
-func (s *Server) InlayHint(context.Context, *protocol.InlayHintParams) ([]protocol.InlayHint, error) {
- return nil, notImplemented("InlayHint")
-}
-
-func (s *Server) InlayHintRefresh(context.Context) error {
- return notImplemented("InlayHintRefresh")
-}
-
-func (s *Server) InlineValue(context.Context, *protocol.InlineValueParams) ([]protocol.InlineValue, error) {
- return nil, notImplemented("InlineValue")
-}
-
-func (s *Server) InlineValueRefresh(context.Context) error {
- return notImplemented("InlineValueRefresh")
-}
-
-func (s *Server) LinkedEditingRange(context.Context, *protocol.LinkedEditingRangeParams) (*protocol.LinkedEditingRanges, error) {
- return nil, notImplemented("LinkedEditingRange")
-}
-
-func (s *Server) LogTrace(context.Context, *protocol.LogTraceParams) error {
- return notImplemented("LogTrace")
-}
-
-func (s *Server) Moniker(context.Context, *protocol.MonikerParams) ([]protocol.Moniker, error) {
- return nil, notImplemented("Moniker")
-}
-
-func (s *Server) NonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
- return s.nonstandardRequest(ctx, method, params)
-}
-
-func (s *Server) OnTypeFormatting(context.Context, *protocol.DocumentOnTypeFormattingParams) ([]protocol.TextEdit, error) {
- return nil, notImplemented("OnTypeFormatting")
-}
-
-func (s *Server) OutgoingCalls(ctx context.Context, params *protocol.CallHierarchyOutgoingCallsParams) ([]protocol.CallHierarchyOutgoingCall, error) {
- return s.outgoingCalls(ctx, params)
-}
-
-func (s *Server) PrepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) ([]protocol.CallHierarchyItem, error) {
- return s.prepareCallHierarchy(ctx, params)
-}
-
-func (s *Server) PrepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (*protocol.PrepareRename2Gn, error) {
- return s.prepareRename(ctx, params)
-}
-
-func (s *Server) PrepareTypeHierarchy(context.Context, *protocol.TypeHierarchyPrepareParams) ([]protocol.TypeHierarchyItem, error) {
- return nil, notImplemented("PrepareTypeHierarchy")
-}
-
-func (s *Server) RangeFormatting(context.Context, *protocol.DocumentRangeFormattingParams) ([]protocol.TextEdit, error) {
- return nil, notImplemented("RangeFormatting")
-}
-
-func (s *Server) References(ctx context.Context, params *protocol.ReferenceParams) ([]protocol.Location, error) {
- return s.references(ctx, params)
-}
-
-func (s *Server) Rename(ctx context.Context, params *protocol.RenameParams) (*protocol.WorkspaceEdit, error) {
- return s.rename(ctx, params)
-}
-
-func (s *Server) Resolve(context.Context, *protocol.InlayHint) (*protocol.InlayHint, error) {
- return nil, notImplemented("Resolve")
-}
-
-func (s *Server) ResolveCodeAction(context.Context, *protocol.CodeAction) (*protocol.CodeAction, error) {
- return nil, notImplemented("ResolveCodeAction")
-}
-
-func (s *Server) ResolveCodeLens(context.Context, *protocol.CodeLens) (*protocol.CodeLens, error) {
- return nil, notImplemented("ResolveCodeLens")
-}
-
-func (s *Server) ResolveCompletionItem(context.Context, *protocol.CompletionItem) (*protocol.CompletionItem, error) {
- return nil, notImplemented("ResolveCompletionItem")
-}
-
-func (s *Server) ResolveDocumentLink(context.Context, *protocol.DocumentLink) (*protocol.DocumentLink, error) {
- return nil, notImplemented("ResolveDocumentLink")
-}
-
-func (s *Server) ResolveWorkspaceSymbol(context.Context, *protocol.WorkspaceSymbol) (*protocol.WorkspaceSymbol, error) {
- return nil, notImplemented("ResolveWorkspaceSymbol")
-}
-
-func (s *Server) SelectionRange(context.Context, *protocol.SelectionRangeParams) ([]protocol.SelectionRange, error) {
- return nil, notImplemented("SelectionRange")
-}
-
-func (s *Server) SemanticTokensFull(ctx context.Context, p *protocol.SemanticTokensParams) (*protocol.SemanticTokens, error) {
- return s.semanticTokensFull(ctx, p)
-}
-
-func (s *Server) SemanticTokensFullDelta(ctx context.Context, p *protocol.SemanticTokensDeltaParams) (interface{}, error) {
- return s.semanticTokensFullDelta(ctx, p)
-}
-
-func (s *Server) SemanticTokensRange(ctx context.Context, p *protocol.SemanticTokensRangeParams) (*protocol.SemanticTokens, error) {
- return s.semanticTokensRange(ctx, p)
-}
-
-func (s *Server) SemanticTokensRefresh(ctx context.Context) error {
- return s.semanticTokensRefresh(ctx)
-}
-
-func (s *Server) SetTrace(context.Context, *protocol.SetTraceParams) error {
- return notImplemented("SetTrace")
-}
-
-func (s *Server) Shutdown(ctx context.Context) error {
- return s.shutdown(ctx)
-}
-
-func (s *Server) SignatureHelp(ctx context.Context, params *protocol.SignatureHelpParams) (*protocol.SignatureHelp, error) {
- return s.signatureHelp(ctx, params)
-}
-
-func (s *Server) Subtypes(context.Context, *protocol.TypeHierarchySubtypesParams) ([]protocol.TypeHierarchyItem, error) {
- return nil, notImplemented("Subtypes")
-}
-
-func (s *Server) Supertypes(context.Context, *protocol.TypeHierarchySupertypesParams) ([]protocol.TypeHierarchyItem, error) {
- return nil, notImplemented("Supertypes")
-}
-
-func (s *Server) Symbol(ctx context.Context, params *protocol.WorkspaceSymbolParams) ([]protocol.SymbolInformation, error) {
- return s.symbol(ctx, params)
-}
-
-func (s *Server) TypeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) (protocol.Definition, error) {
- return s.typeDefinition(ctx, params)
-}
-
-func (s *Server) WillCreateFiles(context.Context, *protocol.CreateFilesParams) (*protocol.WorkspaceEdit, error) {
- return nil, notImplemented("WillCreateFiles")
-}
-
-func (s *Server) WillDeleteFiles(context.Context, *protocol.DeleteFilesParams) (*protocol.WorkspaceEdit, error) {
- return nil, notImplemented("WillDeleteFiles")
-}
-
-func (s *Server) WillRenameFiles(context.Context, *protocol.RenameFilesParams) (*protocol.WorkspaceEdit, error) {
- return nil, notImplemented("WillRenameFiles")
-}
-
-func (s *Server) WillSave(context.Context, *protocol.WillSaveTextDocumentParams) error {
- return notImplemented("WillSave")
-}
-
-func (s *Server) WillSaveWaitUntil(context.Context, *protocol.WillSaveTextDocumentParams) ([]protocol.TextEdit, error) {
- return nil, notImplemented("WillSaveWaitUntil")
-}
-
-func (s *Server) WorkDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) error {
- return s.workDoneProgressCancel(ctx, params)
-}
diff --git a/internal/lsp/signature_help.go b/internal/lsp/signature_help.go
deleted file mode 100644
index 24dee1b9a..000000000
--- a/internal/lsp/signature_help.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func (s *Server) signatureHelp(ctx context.Context, params *protocol.SignatureHelpParams) (*protocol.SignatureHelp, error) {
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go)
- defer release()
- if !ok {
- return nil, err
- }
- info, activeParameter, err := source.SignatureHelp(ctx, snapshot, fh, params.Position)
- if err != nil {
- event.Error(ctx, "no signature help", err, tag.Position.Of(params.Position))
- return nil, nil
- }
- return &protocol.SignatureHelp{
- Signatures: []protocol.SignatureInformation{*info},
- ActiveParameter: uint32(activeParameter),
- }, nil
-}
diff --git a/internal/lsp/snippet/snippet_builder.go b/internal/lsp/snippet/snippet_builder.go
deleted file mode 100644
index f7fc5b445..000000000
--- a/internal/lsp/snippet/snippet_builder.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package snippet implements the specification for the LSP snippet format.
-//
-// Snippets are "tab stop" templates returned as an optional attribute of LSP
-// completion candidates. As the user presses tab, they cycle through a series of
-// tab stops defined in the snippet. Each tab stop can optionally have placeholder
-// text, which can be pre-selected by editors. For a full description of syntax
-// and features, see "Snippet Syntax" at
-// https://microsoft.github.io/language-server-protocol/specifications/specification-3-14/#textDocument_completion.
-//
-// A typical snippet looks like "foo(${1:i int}, ${2:s string})".
-package snippet
-
-import (
- "fmt"
- "strings"
-)
-
-// A Builder is used to build an LSP snippet piecemeal.
-// The zero value is ready to use. Do not copy a non-zero Builder.
-type Builder struct {
- // currentTabStop is the index of the previous tab stop. The
- // next tab stop will be currentTabStop+1.
- currentTabStop int
- sb strings.Builder
-}
-
-// Escape characters defined in https://microsoft.github.io/language-server-protocol/specifications/specification-3-14/#textDocument_completion under "Grammar".
-var replacer = strings.NewReplacer(
- `\`, `\\`,
- `}`, `\}`,
- `$`, `\$`,
-)
-
-func (b *Builder) WriteText(s string) {
- replacer.WriteString(&b.sb, s)
-}
-
-func (b *Builder) PrependText(s string) {
- rawSnip := b.String()
- b.sb.Reset()
- b.WriteText(s)
- b.sb.WriteString(rawSnip)
-}
-
-func (b *Builder) Write(data []byte) (int, error) {
- return b.sb.Write(data)
-}
-
-// WritePlaceholder writes a tab stop and placeholder value to the Builder.
-// The callback style allows for creating nested placeholders. To write an
-// empty tab stop, provide a nil callback.
-func (b *Builder) WritePlaceholder(fn func(*Builder)) {
- fmt.Fprintf(&b.sb, "${%d:", b.nextTabStop())
- if fn != nil {
- fn(b)
- }
- b.sb.WriteByte('}')
-}
-
-// WriteFinalTabstop marks where cursor ends up after the user has
-// cycled through all the normal tab stops. It defaults to the
-// character after the snippet.
-func (b *Builder) WriteFinalTabstop() {
- fmt.Fprint(&b.sb, "$0")
-}
-
-// In addition to '\', '}', and '$', snippet choices also use '|' and ',' as
-// meta characters, so they must be escaped within the choices.
-var choiceReplacer = strings.NewReplacer(
- `\`, `\\`,
- `}`, `\}`,
- `$`, `\$`,
- `|`, `\|`,
- `,`, `\,`,
-)
-
-// WriteChoice writes a tab stop and list of text choices to the Builder.
-// The user's editor will prompt the user to choose one of the choices.
-func (b *Builder) WriteChoice(choices []string) {
- fmt.Fprintf(&b.sb, "${%d|", b.nextTabStop())
- for i, c := range choices {
- if i != 0 {
- b.sb.WriteByte(',')
- }
- choiceReplacer.WriteString(&b.sb, c)
- }
- b.sb.WriteString("|}")
-}
-
-// String returns the built snippet string.
-func (b *Builder) String() string {
- return b.sb.String()
-}
-
-// nextTabStop returns the next tab stop index for a new placeholder.
-func (b *Builder) nextTabStop() int {
- // Tab stops start from 1, so increment before returning.
- b.currentTabStop++
- return b.currentTabStop
-}
diff --git a/internal/lsp/source/add_import.go b/internal/lsp/source/add_import.go
deleted file mode 100644
index 816acc2c2..000000000
--- a/internal/lsp/source/add_import.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
-
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-// AddImport adds a single import statement to the given file
-func AddImport(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle, importPath string) ([]protocol.TextEdit, error) {
- _, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return nil, err
- }
- return ComputeOneImportFixEdits(snapshot, pgf, &imports.ImportFix{
- StmtInfo: imports.ImportInfo{
- ImportPath: importPath,
- },
- FixType: imports.AddImport,
- })
-}
diff --git a/internal/lsp/source/api_json.go b/internal/lsp/source/api_json.go
deleted file mode 100755
index 14140bb63..000000000
--- a/internal/lsp/source/api_json.go
+++ /dev/null
@@ -1,972 +0,0 @@
-// Code generated by "golang.org/x/tools/gopls/doc/generate"; DO NOT EDIT.
-
-package source
-
-var GeneratedAPIJSON = &APIJSON{
- Options: map[string][]*OptionJSON{
- "User": {
- {
- Name: "buildFlags",
- Type: "[]string",
- Doc: "buildFlags is the set of flags passed on to the build system when invoked.\nIt is applied to queries like `go list`, which is used when discovering files.\nThe most common use is to set `-tags`.\n",
- Default: "[]",
- Hierarchy: "build",
- },
- {
- Name: "env",
- Type: "map[string]string",
- Doc: "env adds environment variables to external commands run by `gopls`, most notably `go list`.\n",
- Default: "{}",
- Hierarchy: "build",
- },
- {
- Name: "directoryFilters",
- Type: "[]string",
- Doc: "directoryFilters can be used to exclude unwanted directories from the\nworkspace. By default, all directories are included. Filters are an\noperator, `+` to include and `-` to exclude, followed by a path prefix\nrelative to the workspace folder. They are evaluated in order, and\nthe last filter that applies to a path controls whether it is included.\nThe path prefix can be empty, so an initial `-` excludes everything.\n\nExamples:\n\nExclude node_modules: `-node_modules`\n\nInclude only project_a: `-` (exclude everything), `+project_a`\n\nInclude only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules`\n",
- Default: "[\"-node_modules\"]",
- Hierarchy: "build",
- },
- {
- Name: "templateExtensions",
- Type: "[]string",
- Doc: "templateExtensions gives the extensions of file names that are treateed\nas template files. (The extension\nis the part of the file name after the final dot.)\n",
- Default: "[]",
- Hierarchy: "build",
- },
- {
- Name: "memoryMode",
- Type: "enum",
- Doc: "memoryMode controls the tradeoff `gopls` makes between memory usage and\ncorrectness.\n\nValues other than `Normal` are untested and may break in surprising ways.\n",
- EnumValues: []EnumValue{
- {
- Value: "\"DegradeClosed\"",
- Doc: "`\"DegradeClosed\"`: In DegradeClosed mode, `gopls` will collect less information about\npackages without open files. As a result, features like Find\nReferences and Rename will miss results in such packages.\n",
- },
- {Value: "\"Normal\""},
- },
- Default: "\"Normal\"",
- Status: "experimental",
- Hierarchy: "build",
- },
- {
- Name: "expandWorkspaceToModule",
- Type: "bool",
- Doc: "expandWorkspaceToModule instructs `gopls` to adjust the scope of the\nworkspace to find the best available module root. `gopls` first looks for\na go.mod file in any parent directory of the workspace folder, expanding\nthe scope to that directory if it exists. If no viable parent directory is\nfound, gopls will check if there is exactly one child directory containing\na go.mod file, narrowing the scope to that directory if it exists.\n",
- Default: "true",
- Status: "experimental",
- Hierarchy: "build",
- },
- {
- Name: "experimentalWorkspaceModule",
- Type: "bool",
- Doc: "experimentalWorkspaceModule opts a user into the experimental support\nfor multi-module workspaces.\n",
- Default: "false",
- Status: "experimental",
- Hierarchy: "build",
- },
- {
- Name: "experimentalPackageCacheKey",
- Type: "bool",
- Doc: "experimentalPackageCacheKey controls whether to use a coarser cache key\nfor package type information to increase cache hits. This setting removes\nthe user's environment, build flags, and working directory from the cache\nkey, which should be a safe change as all relevant inputs into the type\nchecking pass are already hashed into the key. This is temporarily guarded\nby an experiment because caching behavior is subtle and difficult to\ncomprehensively test.\n",
- Default: "true",
- Status: "experimental",
- Hierarchy: "build",
- },
- {
- Name: "allowModfileModifications",
- Type: "bool",
- Doc: "allowModfileModifications disables -mod=readonly, allowing imports from\nout-of-scope modules. This option will eventually be removed.\n",
- Default: "false",
- Status: "experimental",
- Hierarchy: "build",
- },
- {
- Name: "allowImplicitNetworkAccess",
- Type: "bool",
- Doc: "allowImplicitNetworkAccess disables GOPROXY=off, allowing implicit module\ndownloads rather than requiring user action. This option will eventually\nbe removed.\n",
- Default: "false",
- Status: "experimental",
- Hierarchy: "build",
- },
- {
- Name: "experimentalUseInvalidMetadata",
- Type: "bool",
- Doc: "experimentalUseInvalidMetadata enables gopls to fall back on outdated\npackage metadata to provide editor features if the go command fails to\nload packages for some reason (like an invalid go.mod file). This will\neventually be the default behavior, and this setting will be removed.\n",
- Default: "false",
- Status: "experimental",
- Hierarchy: "build",
- },
- {
- Name: "hoverKind",
- Type: "enum",
- Doc: "hoverKind controls the information that appears in the hover text.\nSingleLine and Structured are intended for use only by authors of editor plugins.\n",
- EnumValues: []EnumValue{
- {Value: "\"FullDocumentation\""},
- {Value: "\"NoDocumentation\""},
- {Value: "\"SingleLine\""},
- {
- Value: "\"Structured\"",
- Doc: "`\"Structured\"` is an experimental setting that returns a structured hover format.\nThis format separates the signature from the documentation, so that the client\ncan do more manipulation of these fields.\n\nThis should only be used by clients that support this behavior.\n",
- },
- {Value: "\"SynopsisDocumentation\""},
- },
- Default: "\"FullDocumentation\"",
- Hierarchy: "ui.documentation",
- },
- {
- Name: "linkTarget",
- Type: "string",
- Doc: "linkTarget controls where documentation links go.\nIt might be one of:\n\n* `\"godoc.org\"`\n* `\"pkg.go.dev\"`\n\nIf company chooses to use its own `godoc.org`, its address can be used as well.\n",
- Default: "\"pkg.go.dev\"",
- Hierarchy: "ui.documentation",
- },
- {
- Name: "linksInHover",
- Type: "bool",
- Doc: "linksInHover toggles the presence of links to documentation in hover.\n",
- Default: "true",
- Hierarchy: "ui.documentation",
- },
- {
- Name: "usePlaceholders",
- Type: "bool",
- Doc: "placeholders enables placeholders for function parameters or struct\nfields in completion responses.\n",
- Default: "false",
- Hierarchy: "ui.completion",
- },
- {
- Name: "completionBudget",
- Type: "time.Duration",
- Doc: "completionBudget is the soft latency goal for completion requests. Most\nrequests finish in a couple milliseconds, but in some cases deep\ncompletions can take much longer. As we use up our budget we\ndynamically reduce the search scope to ensure we return timely\nresults. Zero means unlimited.\n",
- Default: "\"100ms\"",
- Status: "debug",
- Hierarchy: "ui.completion",
- },
- {
- Name: "matcher",
- Type: "enum",
- Doc: "matcher sets the algorithm that is used when calculating completion\ncandidates.\n",
- EnumValues: []EnumValue{
- {Value: "\"CaseInsensitive\""},
- {Value: "\"CaseSensitive\""},
- {Value: "\"Fuzzy\""},
- },
- Default: "\"Fuzzy\"",
- Status: "advanced",
- Hierarchy: "ui.completion",
- },
- {
- Name: "experimentalPostfixCompletions",
- Type: "bool",
- Doc: "experimentalPostfixCompletions enables artificial method snippets\nsuch as \"someSlice.sort!\".\n",
- Default: "true",
- Status: "experimental",
- Hierarchy: "ui.completion",
- },
- {
- Name: "importShortcut",
- Type: "enum",
- Doc: "importShortcut specifies whether import statements should link to\ndocumentation or go to definitions.\n",
- EnumValues: []EnumValue{
- {Value: "\"Both\""},
- {Value: "\"Definition\""},
- {Value: "\"Link\""},
- },
- Default: "\"Both\"",
- Hierarchy: "ui.navigation",
- },
- {
- Name: "symbolMatcher",
- Type: "enum",
- Doc: "symbolMatcher sets the algorithm that is used when finding workspace symbols.\n",
- EnumValues: []EnumValue{
- {Value: "\"CaseInsensitive\""},
- {Value: "\"CaseSensitive\""},
- {Value: "\"FastFuzzy\""},
- {Value: "\"Fuzzy\""},
- },
- Default: "\"FastFuzzy\"",
- Status: "advanced",
- Hierarchy: "ui.navigation",
- },
- {
- Name: "symbolStyle",
- Type: "enum",
- Doc: "symbolStyle controls how symbols are qualified in symbol responses.\n\nExample Usage:\n\n```json5\n\"gopls\": {\n...\n \"symbolStyle\": \"Dynamic\",\n...\n}\n```\n",
- EnumValues: []EnumValue{
- {
- Value: "\"Dynamic\"",
- Doc: "`\"Dynamic\"` uses whichever qualifier results in the highest scoring\nmatch for the given symbol query. Here a \"qualifier\" is any \"/\" or \".\"\ndelimited suffix of the fully qualified symbol. i.e. \"to/pkg.Foo.Field\" or\njust \"Foo.Field\".\n",
- },
- {
- Value: "\"Full\"",
- Doc: "`\"Full\"` is fully qualified symbols, i.e.\n\"path/to/pkg.Foo.Field\".\n",
- },
- {
- Value: "\"Package\"",
- Doc: "`\"Package\"` is package qualified symbols i.e.\n\"pkg.Foo.Field\".\n",
- },
- },
- Default: "\"Dynamic\"",
- Status: "advanced",
- Hierarchy: "ui.navigation",
- },
- {
- Name: "analyses",
- Type: "map[string]bool",
- Doc: "analyses specify analyses that the user would like to enable or disable.\nA map of the names of analysis passes that should be enabled/disabled.\nA full list of analyzers that gopls uses can be found\n[here](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).\n\nExample Usage:\n\n```json5\n...\n\"analyses\": {\n \"unreachable\": false, // Disable the unreachable analyzer.\n \"unusedparams\": true // Enable the unusedparams analyzer.\n}\n...\n```\n",
- EnumKeys: EnumKeys{
- ValueType: "bool",
- Keys: []EnumKey{
- {
- Name: "\"asmdecl\"",
- Doc: "report mismatches between assembly files and Go declarations",
- Default: "true",
- },
- {
- Name: "\"assign\"",
- Doc: "check for useless assignments\n\nThis checker reports assignments of the form x = x or a[i] = a[i].\nThese are almost always useless, and even when they aren't they are\nusually a mistake.",
- Default: "true",
- },
- {
- Name: "\"atomic\"",
- Doc: "check for common mistakes using the sync/atomic package\n\nThe atomic checker looks for assignment statements of the form:\n\n\tx = atomic.AddUint64(&x, 1)\n\nwhich are not atomic.",
- Default: "true",
- },
- {
- Name: "\"atomicalign\"",
- Doc: "check for non-64-bits-aligned arguments to sync/atomic functions",
- Default: "true",
- },
- {
- Name: "\"bools\"",
- Doc: "check for common mistakes involving boolean operators",
- Default: "true",
- },
- {
- Name: "\"buildtag\"",
- Doc: "check that +build tags are well-formed and correctly located",
- Default: "true",
- },
- {
- Name: "\"cgocall\"",
- Doc: "detect some violations of the cgo pointer passing rules\n\nCheck for invalid cgo pointer passing.\nThis looks for code that uses cgo to call C code passing values\nwhose types are almost always invalid according to the cgo pointer\nsharing rules.\nSpecifically, it warns about attempts to pass a Go chan, map, func,\nor slice to C, either directly, or via a pointer, array, or struct.",
- Default: "true",
- },
- {
- Name: "\"composites\"",
- Doc: "check for unkeyed composite literals\n\nThis analyzer reports a diagnostic for composite literals of struct\ntypes imported from another package that do not use the field-keyed\nsyntax. Such literals are fragile because the addition of a new field\n(even if unexported) to the struct will cause compilation to fail.\n\nAs an example,\n\n\terr = &net.DNSConfigError{err}\n\nshould be replaced by:\n\n\terr = &net.DNSConfigError{Err: err}\n",
- Default: "true",
- },
- {
- Name: "\"copylocks\"",
- Doc: "check for locks erroneously passed by value\n\nInadvertently copying a value containing a lock, such as sync.Mutex or\nsync.WaitGroup, may cause both copies to malfunction. Generally such\nvalues should be referred to through a pointer.",
- Default: "true",
- },
- {
- Name: "\"deepequalerrors\"",
- Doc: "check for calls of reflect.DeepEqual on error values\n\nThe deepequalerrors checker looks for calls of the form:\n\n reflect.DeepEqual(err1, err2)\n\nwhere err1 and err2 are errors. Using reflect.DeepEqual to compare\nerrors is discouraged.",
- Default: "true",
- },
- {
- Name: "\"errorsas\"",
- Doc: "report passing non-pointer or non-error values to errors.As\n\nThe errorsas analysis reports calls to errors.As where the type\nof the second argument is not a pointer to a type implementing error.",
- Default: "true",
- },
- {
- Name: "\"fieldalignment\"",
- Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the optimal order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n",
- Default: "false",
- },
- {
- Name: "\"httpresponse\"",
- Doc: "check for mistakes using HTTP responses\n\nA common mistake when using the net/http package is to defer a function\ncall to close the http.Response Body before checking the error that\ndetermines whether the response is valid:\n\n\tresp, err := http.Head(url)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// (defer statement belongs here)\n\nThis checker helps uncover latent nil dereference bugs by reporting a\ndiagnostic for such mistakes.",
- Default: "true",
- },
- {
- Name: "\"ifaceassert\"",
- Doc: "detect impossible interface-to-interface type assertions\n\nThis checker flags type assertions v.(T) and corresponding type-switch cases\nin which the static type V of v is an interface that cannot possibly implement\nthe target interface T. This occurs when V and T contain methods with the same\nname but different signatures. Example:\n\n\tvar v interface {\n\t\tRead()\n\t}\n\t_ = v.(io.Reader)\n\nThe Read method in v has a different signature than the Read method in\nio.Reader, so this assertion cannot succeed.\n",
- Default: "true",
- },
- {
- Name: "\"infertypeargs\"",
- Doc: "check for unnecessary type arguments in call expressions\n\nExplicit type arguments may be omitted from call expressions if they can be\ninferred from function arguments, or from other type arguments:\n\n\tfunc f[T any](T) {}\n\t\n\tfunc _() {\n\t\tf[string](\"foo\") // string could be inferred\n\t}\n",
- Default: "true",
- },
- {
- Name: "\"loopclosure\"",
- Doc: "check references to loop variables from within nested functions\n\nThis analyzer checks for references to loop variables from within a\nfunction literal inside the loop body. It checks only instances where\nthe function literal is called in a defer or go statement that is the\nlast statement in the loop body, as otherwise we would need whole\nprogram analysis.\n\nFor example:\n\n\tfor i, v := range s {\n\t\tgo func() {\n\t\t\tprintln(i, v) // not what you might expect\n\t\t}()\n\t}\n\nSee: https://golang.org/doc/go_faq.html#closures_and_goroutines",
- Default: "true",
- },
- {
- Name: "\"lostcancel\"",
- Doc: "check cancel func returned by context.WithCancel is called\n\nThe cancellation function returned by context.WithCancel, WithTimeout,\nand WithDeadline must be called or the new context will remain live\nuntil its parent context is cancelled.\n(The background context is never cancelled.)",
- Default: "true",
- },
- {
- Name: "\"nilfunc\"",
- Doc: "check for useless comparisons between functions and nil\n\nA useless comparison is one like f == nil as opposed to f() == nil.",
- Default: "true",
- },
- {
- Name: "\"nilness\"",
- Doc: "check for redundant or impossible nil comparisons\n\nThe nilness checker inspects the control-flow graph of each function in\na package and reports nil pointer dereferences, degenerate nil\npointers, and panics with nil values. A degenerate comparison is of the form\nx==nil or x!=nil where x is statically known to be nil or non-nil. These are\noften a mistake, especially in control flow related to errors. Panics with nil\nvalues are checked because they are not detectable by\n\n\tif r := recover(); r != nil {\n\nThis check reports conditions such as:\n\n\tif f == nil { // impossible condition (f is a function)\n\t}\n\nand:\n\n\tp := &v\n\t...\n\tif p != nil { // tautological condition\n\t}\n\nand:\n\n\tif p == nil {\n\t\tprint(*p) // nil dereference\n\t}\n\nand:\n\n\tif p == nil {\n\t\tpanic(p)\n\t}\n",
- Default: "false",
- },
- {
- Name: "\"printf\"",
- Doc: "check consistency of Printf format strings and arguments\n\nThe check applies to known functions (for example, those in package fmt)\nas well as any detected wrappers of known functions.\n\nA function that wants to avail itself of printf checking but is not\nfound by this analyzer's heuristics (for example, due to use of\ndynamic calls) can insert a bogus call:\n\n\tif false {\n\t\t_ = fmt.Sprintf(format, args...) // enable printf checking\n\t}\n\nThe -funcs flag specifies a comma-separated list of names of additional\nknown formatting functions or methods. If the name contains a period,\nit must denote a specific function using one of the following forms:\n\n\tdir/pkg.Function\n\tdir/pkg.Type.Method\n\t(*dir/pkg.Type).Method\n\nOtherwise the name is interpreted as a case-insensitive unqualified\nidentifier such as \"errorf\". Either way, if a listed name ends in f, the\nfunction is assumed to be Printf-like, taking a format string before the\nargument list. Otherwise it is assumed to be Print-like, taking a list\nof arguments with no format string.\n",
- Default: "true",
- },
- {
- Name: "\"shadow\"",
- Doc: "check for possible unintended shadowing of variables\n\nThis analyzer check for shadowed variables.\nA shadowed variable is a variable declared in an inner scope\nwith the same name and type as a variable in an outer scope,\nand where the outer variable is mentioned after the inner one\nis declared.\n\n(This definition can be refined; the module generates too many\nfalse positives and is not yet enabled by default.)\n\nFor example:\n\n\tfunc BadRead(f *os.File, buf []byte) error {\n\t\tvar err error\n\t\tfor {\n\t\t\tn, err := f.Read(buf) // shadows the function variable 'err'\n\t\t\tif err != nil {\n\t\t\t\tbreak // causes return of wrong value\n\t\t\t}\n\t\t\tfoo(buf)\n\t\t}\n\t\treturn err\n\t}\n",
- Default: "false",
- },
- {
- Name: "\"shift\"",
- Doc: "check for shifts that equal or exceed the width of the integer",
- Default: "true",
- },
- {
- Name: "\"simplifycompositelit\"",
- Doc: "check for composite literal simplifications\n\nAn array, slice, or map composite literal of the form:\n\t[]T{T{}, T{}}\nwill be simplified to:\n\t[]T{{}, {}}\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
- Default: "true",
- },
- {
- Name: "\"simplifyrange\"",
- Doc: "check for range statement simplifications\n\nA range of the form:\n\tfor x, _ = range v {...}\nwill be simplified to:\n\tfor x = range v {...}\n\nA range of the form:\n\tfor _ = range v {...}\nwill be simplified to:\n\tfor range v {...}\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
- Default: "true",
- },
- {
- Name: "\"simplifyslice\"",
- Doc: "check for slice simplifications\n\nA slice expression of the form:\n\ts[a:len(s)]\nwill be simplified to:\n\ts[a:]\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
- Default: "true",
- },
- {
- Name: "\"sortslice\"",
- Doc: "check the argument type of sort.Slice\n\nsort.Slice requires an argument of a slice type. Check that\nthe interface{} value passed to sort.Slice is actually a slice.",
- Default: "true",
- },
- {
- Name: "\"stdmethods\"",
- Doc: "check signature of methods of well-known interfaces\n\nSometimes a type may be intended to satisfy an interface but may fail to\ndo so because of a mistake in its method signature.\nFor example, the result of this WriteTo method should be (int64, error),\nnot error, to satisfy io.WriterTo:\n\n\ttype myWriterTo struct{...}\n func (myWriterTo) WriteTo(w io.Writer) error { ... }\n\nThis check ensures that each method whose name matches one of several\nwell-known interface methods from the standard library has the correct\nsignature for that interface.\n\nChecked method names include:\n\tFormat GobEncode GobDecode MarshalJSON MarshalXML\n\tPeek ReadByte ReadFrom ReadRune Scan Seek\n\tUnmarshalJSON UnreadByte UnreadRune WriteByte\n\tWriteTo\n",
- Default: "true",
- },
- {
- Name: "\"stringintconv\"",
- Doc: "check for string(int) conversions\n\nThis checker flags conversions of the form string(x) where x is an integer\n(but not byte or rune) type. Such conversions are discouraged because they\nreturn the UTF-8 representation of the Unicode code point x, and not a decimal\nstring representation of x as one might expect. Furthermore, if x denotes an\ninvalid code point, the conversion cannot be statically rejected.\n\nFor conversions that intend on using the code point, consider replacing them\nwith string(rune(x)). Otherwise, strconv.Itoa and its equivalents return the\nstring representation of the value in the desired base.\n",
- Default: "true",
- },
- {
- Name: "\"structtag\"",
- Doc: "check that struct field tags conform to reflect.StructTag.Get\n\nAlso report certain struct tags (json, xml) used with unexported fields.",
- Default: "true",
- },
- {
- Name: "\"testinggoroutine\"",
- Doc: "report calls to (*testing.T).Fatal from goroutines started by a test.\n\nFunctions that abruptly terminate a test, such as the Fatal, Fatalf, FailNow, and\nSkip{,f,Now} methods of *testing.T, must be called from the test goroutine itself.\nThis checker detects calls to these functions that occur within a goroutine\nstarted by the test. For example:\n\nfunc TestFoo(t *testing.T) {\n go func() {\n t.Fatal(\"oops\") // error: (*T).Fatal called from non-test goroutine\n }()\n}\n",
- Default: "true",
- },
- {
- Name: "\"tests\"",
- Doc: "check for common mistaken usages of tests and examples\n\nThe tests checker walks Test, Benchmark and Example functions checking\nmalformed names, wrong signatures and examples documenting non-existent\nidentifiers.\n\nPlease see the documentation for package testing in golang.org/pkg/testing\nfor the conventions that are enforced for Tests, Benchmarks, and Examples.",
- Default: "true",
- },
- {
- Name: "\"unmarshal\"",
- Doc: "report passing non-pointer or non-interface values to unmarshal\n\nThe unmarshal analysis reports calls to functions such as json.Unmarshal\nin which the argument type is not a pointer or an interface.",
- Default: "true",
- },
- {
- Name: "\"unreachable\"",
- Doc: "check for unreachable code\n\nThe unreachable analyzer finds statements that execution can never reach\nbecause they are preceded by an return statement, a call to panic, an\ninfinite loop, or similar constructs.",
- Default: "true",
- },
- {
- Name: "\"unsafeptr\"",
- Doc: "check for invalid conversions of uintptr to unsafe.Pointer\n\nThe unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer\nto convert integers to pointers. A conversion from uintptr to\nunsafe.Pointer is invalid if it implies that there is a uintptr-typed\nword in memory that holds a pointer value, because that word will be\ninvisible to stack copying and to the garbage collector.",
- Default: "true",
- },
- {
- Name: "\"unusedparams\"",
- Doc: "check for unused parameters of functions\n\nThe unusedparams analyzer checks functions to see if there are\nany parameters that are not being used.\n\nTo reduce false positives it ignores:\n- methods\n- parameters that do not have a name or are underscored\n- functions in test files\n- functions with empty bodies or those with just a return stmt",
- Default: "false",
- },
- {
- Name: "\"unusedresult\"",
- Doc: "check for unused results of calls to some functions\n\nSome functions like fmt.Errorf return a result and have no side effects,\nso it is always a mistake to discard the result. This analyzer reports\ncalls to certain functions in which the result of the call is ignored.\n\nThe set of functions may be controlled using flags.",
- Default: "true",
- },
- {
- Name: "\"unusedwrite\"",
- Doc: "checks for unused writes\n\nThe analyzer reports instances of writes to struct fields and\narrays that are never read. Specifically, when a struct object\nor an array is copied, its elements are copied implicitly by\nthe compiler, and any element write to this copy does nothing\nwith the original object.\n\nFor example:\n\n\ttype T struct { x int }\n\tfunc f(input []T) {\n\t\tfor i, v := range input { // v is a copy\n\t\t\tv.x = i // unused write to field x\n\t\t}\n\t}\n\nAnother example is about non-pointer receiver:\n\n\ttype T struct { x int }\n\tfunc (t T) f() { // t is a copy\n\t\tt.x = i // unused write to field x\n\t}\n",
- Default: "false",
- },
- {
- Name: "\"useany\"",
- Doc: "check for constraints that could be simplified to \"any\"",
- Default: "false",
- },
- {
- Name: "\"fillreturns\"",
- Doc: "suggest fixes for errors due to an incorrect number of return values\n\nThis checker provides suggested fixes for type errors of the\ntype \"wrong number of return values (want %d, got %d)\". For example:\n\tfunc m() (int, string, *bool, error) {\n\t\treturn\n\t}\nwill turn into\n\tfunc m() (int, string, *bool, error) {\n\t\treturn 0, \"\", nil, nil\n\t}\n\nThis functionality is similar to https://github.com/sqs/goreturns.\n",
- Default: "true",
- },
- {
- Name: "\"nonewvars\"",
- Doc: "suggested fixes for \"no new vars on left side of :=\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"no new vars on left side of :=\". For example:\n\tz := 1\n\tz := 2\nwill turn into\n\tz := 1\n\tz = 2\n",
- Default: "true",
- },
- {
- Name: "\"noresultvalues\"",
- Doc: "suggested fixes for unexpected return values\n\nThis checker provides suggested fixes for type errors of the\ntype \"no result values expected\" or \"too many return values\".\nFor example:\n\tfunc z() { return nil }\nwill turn into\n\tfunc z() { return }\n",
- Default: "true",
- },
- {
- Name: "\"undeclaredname\"",
- Doc: "suggested fixes for \"undeclared name: <>\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"undeclared name: <>\". It will either insert a new statement,\nsuch as:\n\n\"<> := \"\n\nor a new function declaration, such as:\n\nfunc <>(inferred parameters) {\n\tpanic(\"implement me!\")\n}\n",
- Default: "true",
- },
- {
- Name: "\"fillstruct\"",
- Doc: "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n",
- Default: "true",
- },
- {
- Name: "\"stubmethods\"",
- Doc: "stub methods analyzer\n\nThis analyzer generates method stubs for concrete types\nin order to implement a target interface",
- Default: "true",
- },
- },
- },
- Default: "{}",
- Hierarchy: "ui.diagnostic",
- },
- {
- Name: "staticcheck",
- Type: "bool",
- Doc: "staticcheck enables additional analyses from staticcheck.io.\n",
- Default: "false",
- Status: "experimental",
- Hierarchy: "ui.diagnostic",
- },
- {
- Name: "annotations",
- Type: "map[string]bool",
- Doc: "annotations specifies the various kinds of optimization diagnostics\nthat should be reported by the gc_details command.\n",
- EnumKeys: EnumKeys{
- ValueType: "bool",
- Keys: []EnumKey{
- {
- Name: "\"bounds\"",
- Doc: "`\"bounds\"` controls bounds checking diagnostics.\n",
- Default: "true",
- },
- {
- Name: "\"escape\"",
- Doc: "`\"escape\"` controls diagnostics about escape choices.\n",
- Default: "true",
- },
- {
- Name: "\"inline\"",
- Doc: "`\"inline\"` controls diagnostics about inlining choices.\n",
- Default: "true",
- },
- {
- Name: "\"nil\"",
- Doc: "`\"nil\"` controls nil checks.\n",
- Default: "true",
- },
- },
- },
- Default: "{\"bounds\":true,\"escape\":true,\"inline\":true,\"nil\":true}",
- Status: "experimental",
- Hierarchy: "ui.diagnostic",
- },
- {
- Name: "diagnosticsDelay",
- Type: "time.Duration",
- Doc: "diagnosticsDelay controls the amount of time that gopls waits\nafter the most recent file modification before computing deep diagnostics.\nSimple diagnostics (parsing and type-checking) are always run immediately\non recently modified packages.\n\nThis option must be set to a valid duration string, for example `\"250ms\"`.\n",
- Default: "\"250ms\"",
- Status: "advanced",
- Hierarchy: "ui.diagnostic",
- },
- {
- Name: "experimentalWatchedFileDelay",
- Type: "time.Duration",
- Doc: "experimentalWatchedFileDelay controls the amount of time that gopls waits\nfor additional workspace/didChangeWatchedFiles notifications to arrive,\nbefore processing all such notifications in a single batch. This is\nintended for use by LSP clients that don't support their own batching of\nfile system notifications.\n\nThis option must be set to a valid duration string, for example `\"100ms\"`.\n",
- Default: "\"0s\"",
- Status: "experimental",
- Hierarchy: "ui.diagnostic",
- },
- {
- Name: "codelenses",
- Type: "map[string]bool",
- Doc: "codelenses overrides the enabled/disabled state of code lenses. See the\n\"Code Lenses\" section of the\n[Settings page](https://github.com/golang/tools/blob/master/gopls/doc/settings.md#code-lenses)\nfor the list of supported lenses.\n\nExample Usage:\n\n```json5\n\"gopls\": {\n...\n \"codelenses\": {\n \"generate\": false, // Don't show the `go generate` lens.\n \"gc_details\": true // Show a code lens toggling the display of gc's choices.\n }\n...\n}\n```\n",
- EnumKeys: EnumKeys{
- ValueType: "bool",
- Keys: []EnumKey{
- {
- Name: "\"gc_details\"",
- Doc: "Toggle the calculation of gc annotations.",
- Default: "false",
- },
- {
- Name: "\"generate\"",
- Doc: "Runs `go generate` for a given directory.",
- Default: "true",
- },
- {
- Name: "\"regenerate_cgo\"",
- Doc: "Regenerates cgo definitions.",
- Default: "true",
- },
- {
- Name: "\"test\"",
- Doc: "Runs `go test` for a specific set of test or benchmark functions.",
- Default: "false",
- },
- {
- Name: "\"tidy\"",
- Doc: "Runs `go mod tidy` for a module.",
- Default: "true",
- },
- {
- Name: "\"upgrade_dependency\"",
- Doc: "Upgrades a dependency in the go.mod file for a module.",
- Default: "true",
- },
- {
- Name: "\"vendor\"",
- Doc: "Runs `go mod vendor` for a module.",
- Default: "true",
- },
- },
- },
- Default: "{\"gc_details\":false,\"generate\":true,\"regenerate_cgo\":true,\"tidy\":true,\"upgrade_dependency\":true,\"vendor\":true}",
- Hierarchy: "ui",
- },
- {
- Name: "semanticTokens",
- Type: "bool",
- Doc: "semanticTokens controls whether the LSP server will send\nsemantic tokens to the client.\n",
- Default: "false",
- Status: "experimental",
- Hierarchy: "ui",
- },
- {
- Name: "local",
- Type: "string",
- Doc: "local is the equivalent of the `goimports -local` flag, which puts\nimports beginning with this string after third-party packages. It should\nbe the prefix of the import path whose imports should be grouped\nseparately.\n",
- Default: "\"\"",
- Hierarchy: "formatting",
- },
- {
- Name: "gofumpt",
- Type: "bool",
- Doc: "gofumpt indicates if we should run gofumpt formatting.\n",
- Default: "false",
- Hierarchy: "formatting",
- },
- {
- Name: "verboseOutput",
- Type: "bool",
- Doc: "verboseOutput enables additional debug logging.\n",
- Default: "false",
- Status: "debug",
- },
- },
- },
- Commands: []*CommandJSON{
- {
- Command: "gopls.add_dependency",
- Title: "Add a dependency",
- Doc: "Adds a dependency to the go.mod file for a module.",
- ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// Additional args to pass to the go command.\n\t\"GoCmdArgs\": []string,\n\t// Whether to add a require directive.\n\t\"AddRequire\": bool,\n}",
- },
- {
- Command: "gopls.add_import",
- Title: "Add an import",
- Doc: "Ask the server to add an import path to a given Go file. The method will\ncall applyEdit on the client so that clients don't have to apply the edit\nthemselves.",
- ArgDoc: "{\n\t// ImportPath is the target import path that should\n\t// be added to the URI file\n\t\"ImportPath\": string,\n\t// URI is the file that the ImportPath should be\n\t// added to\n\t\"URI\": string,\n}",
- },
- {
- Command: "gopls.apply_fix",
- Title: "Apply a fix",
- Doc: "Applies a fix to a region of source code.",
- ArgDoc: "{\n\t// The fix to apply.\n\t\"Fix\": string,\n\t// The file URI for the document to fix.\n\t\"URI\": string,\n\t// The document range to scan for fixes.\n\t\"Range\": {\n\t\t\"start\": {\n\t\t\t\"line\": uint32,\n\t\t\t\"character\": uint32,\n\t\t},\n\t\t\"end\": {\n\t\t\t\"line\": uint32,\n\t\t\t\"character\": uint32,\n\t\t},\n\t},\n}",
- },
- {
- Command: "gopls.check_upgrades",
- Title: "Check for upgrades",
- Doc: "Checks for module upgrades.",
- ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The modules to check.\n\t\"Modules\": []string,\n}",
- },
- {
- Command: "gopls.edit_go_directive",
- Title: "Run go mod edit -go=version",
- Doc: "Runs `go mod edit -go=version` for a module.",
- ArgDoc: "{\n\t// Any document URI within the relevant module.\n\t\"URI\": string,\n\t// The version to pass to `go mod edit -go`.\n\t\"Version\": string,\n}",
- },
- {
- Command: "gopls.gc_details",
- Title: "Toggle gc_details",
- Doc: "Toggle the calculation of gc annotations.",
- ArgDoc: "string",
- },
- {
- Command: "gopls.generate",
- Title: "Run go generate",
- Doc: "Runs `go generate` for a given directory.",
- ArgDoc: "{\n\t// URI for the directory to generate.\n\t\"Dir\": string,\n\t// Whether to generate recursively (go generate ./...)\n\t\"Recursive\": bool,\n}",
- },
- {
- Command: "gopls.generate_gopls_mod",
- Title: "Generate gopls.mod",
- Doc: "(Re)generate the gopls.mod file for a workspace.",
- ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
- },
- {
- Command: "gopls.go_get_package",
- Title: "go get a package",
- Doc: "Runs `go get` to fetch a package.",
- ArgDoc: "{\n\t// Any document URI within the relevant module.\n\t\"URI\": string,\n\t// The package to go get.\n\t\"Pkg\": string,\n\t\"AddRequire\": bool,\n}",
- },
- {
- Command: "gopls.list_imports",
- Title: "List imports of a file and its package",
- Doc: "Retrieve a list of imports in the given Go file, and the package it\nbelongs to.",
- ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
- ResultDoc: "{\n\t// Imports is a list of imports in the requested file.\n\t\"Imports\": []{\n\t\t\"Path\": string,\n\t\t\"Name\": string,\n\t},\n\t// PackageImports is a list of all imports in the requested file's package.\n\t\"PackageImports\": []{\n\t\t\"Path\": string,\n\t},\n}",
- },
- {
- Command: "gopls.list_known_packages",
- Title: "List known packages",
- Doc: "Retrieve a list of packages that are importable from the given URI.",
- ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
- ResultDoc: "{\n\t// Packages is a list of packages relative\n\t// to the URIArg passed by the command request.\n\t// In other words, it omits paths that are already\n\t// imported or cannot be imported due to compiler\n\t// restrictions.\n\t\"Packages\": []string,\n}",
- },
- {
- Command: "gopls.regenerate_cgo",
- Title: "Regenerate cgo",
- Doc: "Regenerates cgo definitions.",
- ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
- },
- {
- Command: "gopls.remove_dependency",
- Title: "Remove a dependency",
- Doc: "Removes a dependency from the go.mod file of a module.",
- ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The module path to remove.\n\t\"ModulePath\": string,\n\t\"OnlyDiagnostic\": bool,\n}",
- },
- {
- Command: "gopls.run_tests",
- Title: "Run test(s)",
- Doc: "Runs `go test` for a specific set of test or benchmark functions.",
- ArgDoc: "{\n\t// The test file containing the tests to run.\n\t\"URI\": string,\n\t// Specific test names to run, e.g. TestFoo.\n\t\"Tests\": []string,\n\t// Specific benchmarks to run, e.g. BenchmarkFoo.\n\t\"Benchmarks\": []string,\n}",
- },
- {
- Command: "gopls.run_vulncheck_exp",
- Title: "Run vulncheck (experimental)",
- Doc: "Run vulnerability check (`govulncheck`).",
- ArgDoc: "{\n\t// Dir is the directory from which vulncheck will run from.\n\t\"Dir\": string,\n\t// Package pattern. E.g. \"\", \".\", \"./...\".\n\t\"Pattern\": string,\n}",
- ResultDoc: "{\n\t\"Vuln\": []{\n\t\t\"ID\": string,\n\t\t\"Details\": string,\n\t\t\"Aliases\": []string,\n\t\t\"Symbol\": string,\n\t\t\"PkgPath\": string,\n\t\t\"ModPath\": string,\n\t\t\"URL\": string,\n\t\t\"CurrentVersion\": string,\n\t\t\"FixedVersion\": string,\n\t\t\"CallStacks\": [][]golang.org/x/tools/internal/lsp/command.StackEntry,\n\t},\n}",
- },
- {
- Command: "gopls.start_debugging",
- Title: "Start the gopls debug server",
- Doc: "Start the gopls debug server if it isn't running, and return the debug\naddress.",
- ArgDoc: "{\n\t// Optional: the address (including port) for the debug server to listen on.\n\t// If not provided, the debug server will bind to \"localhost:0\", and the\n\t// full debug URL will be contained in the result.\n\t// \n\t// If there is more than one gopls instance along the serving path (i.e. you\n\t// are using a daemon), each gopls instance will attempt to start debugging.\n\t// If Addr specifies a port, only the daemon will be able to bind to that\n\t// port, and each intermediate gopls instance will fail to start debugging.\n\t// For this reason it is recommended not to specify a port (or equivalently,\n\t// to specify \":0\").\n\t// \n\t// If the server was already debugging this field has no effect, and the\n\t// result will contain the previously configured debug URL(s).\n\t\"Addr\": string,\n}",
- ResultDoc: "{\n\t// The URLs to use to access the debug servers, for all gopls instances in\n\t// the serving path. For the common case of a single gopls instance (i.e. no\n\t// daemon), this will be exactly one address.\n\t// \n\t// In the case of one or more gopls instances forwarding the LSP to a daemon,\n\t// URLs will contain debug addresses for each server in the serving path, in\n\t// serving order. The daemon debug address will be the last entry in the\n\t// slice. If any intermediate gopls instance fails to start debugging, no\n\t// error will be returned but the debug URL for that server in the URLs slice\n\t// will be empty.\n\t\"URLs\": []string,\n}",
- },
- {
- Command: "gopls.test",
- Title: "Run test(s) (legacy)",
- Doc: "Runs `go test` for a specific set of test or benchmark functions.",
- ArgDoc: "string,\n[]string,\n[]string",
- },
- {
- Command: "gopls.tidy",
- Title: "Run go mod tidy",
- Doc: "Runs `go mod tidy` for a module.",
- ArgDoc: "{\n\t// The file URIs.\n\t\"URIs\": []string,\n}",
- },
- {
- Command: "gopls.toggle_gc_details",
- Title: "Toggle gc_details",
- Doc: "Toggle the calculation of gc annotations.",
- ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
- },
- {
- Command: "gopls.update_go_sum",
- Title: "Update go.sum",
- Doc: "Updates the go.sum file for a module.",
- ArgDoc: "{\n\t// The file URIs.\n\t\"URIs\": []string,\n}",
- },
- {
- Command: "gopls.upgrade_dependency",
- Title: "Upgrade a dependency",
- Doc: "Upgrades a dependency in the go.mod file for a module.",
- ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// Additional args to pass to the go command.\n\t\"GoCmdArgs\": []string,\n\t// Whether to add a require directive.\n\t\"AddRequire\": bool,\n}",
- },
- {
- Command: "gopls.vendor",
- Title: "Run go mod vendor",
- Doc: "Runs `go mod vendor` for a module.",
- ArgDoc: "{\n\t// The file URI.\n\t\"URI\": string,\n}",
- },
- },
- Lenses: []*LensJSON{
- {
- Lens: "gc_details",
- Title: "Toggle gc_details",
- Doc: "Toggle the calculation of gc annotations.",
- },
- {
- Lens: "generate",
- Title: "Run go generate",
- Doc: "Runs `go generate` for a given directory.",
- },
- {
- Lens: "regenerate_cgo",
- Title: "Regenerate cgo",
- Doc: "Regenerates cgo definitions.",
- },
- {
- Lens: "test",
- Title: "Run test(s) (legacy)",
- Doc: "Runs `go test` for a specific set of test or benchmark functions.",
- },
- {
- Lens: "tidy",
- Title: "Run go mod tidy",
- Doc: "Runs `go mod tidy` for a module.",
- },
- {
- Lens: "upgrade_dependency",
- Title: "Upgrade a dependency",
- Doc: "Upgrades a dependency in the go.mod file for a module.",
- },
- {
- Lens: "vendor",
- Title: "Run go mod vendor",
- Doc: "Runs `go mod vendor` for a module.",
- },
- },
- Analyzers: []*AnalyzerJSON{
- {
- Name: "asmdecl",
- Doc: "report mismatches between assembly files and Go declarations",
- Default: true,
- },
- {
- Name: "assign",
- Doc: "check for useless assignments\n\nThis checker reports assignments of the form x = x or a[i] = a[i].\nThese are almost always useless, and even when they aren't they are\nusually a mistake.",
- Default: true,
- },
- {
- Name: "atomic",
- Doc: "check for common mistakes using the sync/atomic package\n\nThe atomic checker looks for assignment statements of the form:\n\n\tx = atomic.AddUint64(&x, 1)\n\nwhich are not atomic.",
- Default: true,
- },
- {
- Name: "atomicalign",
- Doc: "check for non-64-bits-aligned arguments to sync/atomic functions",
- Default: true,
- },
- {
- Name: "bools",
- Doc: "check for common mistakes involving boolean operators",
- Default: true,
- },
- {
- Name: "buildtag",
- Doc: "check that +build tags are well-formed and correctly located",
- Default: true,
- },
- {
- Name: "cgocall",
- Doc: "detect some violations of the cgo pointer passing rules\n\nCheck for invalid cgo pointer passing.\nThis looks for code that uses cgo to call C code passing values\nwhose types are almost always invalid according to the cgo pointer\nsharing rules.\nSpecifically, it warns about attempts to pass a Go chan, map, func,\nor slice to C, either directly, or via a pointer, array, or struct.",
- Default: true,
- },
- {
- Name: "composites",
- Doc: "check for unkeyed composite literals\n\nThis analyzer reports a diagnostic for composite literals of struct\ntypes imported from another package that do not use the field-keyed\nsyntax. Such literals are fragile because the addition of a new field\n(even if unexported) to the struct will cause compilation to fail.\n\nAs an example,\n\n\terr = &net.DNSConfigError{err}\n\nshould be replaced by:\n\n\terr = &net.DNSConfigError{Err: err}\n",
- Default: true,
- },
- {
- Name: "copylocks",
- Doc: "check for locks erroneously passed by value\n\nInadvertently copying a value containing a lock, such as sync.Mutex or\nsync.WaitGroup, may cause both copies to malfunction. Generally such\nvalues should be referred to through a pointer.",
- Default: true,
- },
- {
- Name: "deepequalerrors",
- Doc: "check for calls of reflect.DeepEqual on error values\n\nThe deepequalerrors checker looks for calls of the form:\n\n reflect.DeepEqual(err1, err2)\n\nwhere err1 and err2 are errors. Using reflect.DeepEqual to compare\nerrors is discouraged.",
- Default: true,
- },
- {
- Name: "errorsas",
- Doc: "report passing non-pointer or non-error values to errors.As\n\nThe errorsas analysis reports calls to errors.As where the type\nof the second argument is not a pointer to a type implementing error.",
- Default: true,
- },
- {
- Name: "fieldalignment",
- Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the optimal order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n",
- },
- {
- Name: "httpresponse",
- Doc: "check for mistakes using HTTP responses\n\nA common mistake when using the net/http package is to defer a function\ncall to close the http.Response Body before checking the error that\ndetermines whether the response is valid:\n\n\tresp, err := http.Head(url)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// (defer statement belongs here)\n\nThis checker helps uncover latent nil dereference bugs by reporting a\ndiagnostic for such mistakes.",
- Default: true,
- },
- {
- Name: "ifaceassert",
- Doc: "detect impossible interface-to-interface type assertions\n\nThis checker flags type assertions v.(T) and corresponding type-switch cases\nin which the static type V of v is an interface that cannot possibly implement\nthe target interface T. This occurs when V and T contain methods with the same\nname but different signatures. Example:\n\n\tvar v interface {\n\t\tRead()\n\t}\n\t_ = v.(io.Reader)\n\nThe Read method in v has a different signature than the Read method in\nio.Reader, so this assertion cannot succeed.\n",
- Default: true,
- },
- {
- Name: "infertypeargs",
- Doc: "check for unnecessary type arguments in call expressions\n\nExplicit type arguments may be omitted from call expressions if they can be\ninferred from function arguments, or from other type arguments:\n\n\tfunc f[T any](T) {}\n\t\n\tfunc _() {\n\t\tf[string](\"foo\") // string could be inferred\n\t}\n",
- Default: true,
- },
- {
- Name: "loopclosure",
- Doc: "check references to loop variables from within nested functions\n\nThis analyzer checks for references to loop variables from within a\nfunction literal inside the loop body. It checks only instances where\nthe function literal is called in a defer or go statement that is the\nlast statement in the loop body, as otherwise we would need whole\nprogram analysis.\n\nFor example:\n\n\tfor i, v := range s {\n\t\tgo func() {\n\t\t\tprintln(i, v) // not what you might expect\n\t\t}()\n\t}\n\nSee: https://golang.org/doc/go_faq.html#closures_and_goroutines",
- Default: true,
- },
- {
- Name: "lostcancel",
- Doc: "check cancel func returned by context.WithCancel is called\n\nThe cancellation function returned by context.WithCancel, WithTimeout,\nand WithDeadline must be called or the new context will remain live\nuntil its parent context is cancelled.\n(The background context is never cancelled.)",
- Default: true,
- },
- {
- Name: "nilfunc",
- Doc: "check for useless comparisons between functions and nil\n\nA useless comparison is one like f == nil as opposed to f() == nil.",
- Default: true,
- },
- {
- Name: "nilness",
- Doc: "check for redundant or impossible nil comparisons\n\nThe nilness checker inspects the control-flow graph of each function in\na package and reports nil pointer dereferences, degenerate nil\npointers, and panics with nil values. A degenerate comparison is of the form\nx==nil or x!=nil where x is statically known to be nil or non-nil. These are\noften a mistake, especially in control flow related to errors. Panics with nil\nvalues are checked because they are not detectable by\n\n\tif r := recover(); r != nil {\n\nThis check reports conditions such as:\n\n\tif f == nil { // impossible condition (f is a function)\n\t}\n\nand:\n\n\tp := &v\n\t...\n\tif p != nil { // tautological condition\n\t}\n\nand:\n\n\tif p == nil {\n\t\tprint(*p) // nil dereference\n\t}\n\nand:\n\n\tif p == nil {\n\t\tpanic(p)\n\t}\n",
- },
- {
- Name: "printf",
- Doc: "check consistency of Printf format strings and arguments\n\nThe check applies to known functions (for example, those in package fmt)\nas well as any detected wrappers of known functions.\n\nA function that wants to avail itself of printf checking but is not\nfound by this analyzer's heuristics (for example, due to use of\ndynamic calls) can insert a bogus call:\n\n\tif false {\n\t\t_ = fmt.Sprintf(format, args...) // enable printf checking\n\t}\n\nThe -funcs flag specifies a comma-separated list of names of additional\nknown formatting functions or methods. If the name contains a period,\nit must denote a specific function using one of the following forms:\n\n\tdir/pkg.Function\n\tdir/pkg.Type.Method\n\t(*dir/pkg.Type).Method\n\nOtherwise the name is interpreted as a case-insensitive unqualified\nidentifier such as \"errorf\". Either way, if a listed name ends in f, the\nfunction is assumed to be Printf-like, taking a format string before the\nargument list. Otherwise it is assumed to be Print-like, taking a list\nof arguments with no format string.\n",
- Default: true,
- },
- {
- Name: "shadow",
- Doc: "check for possible unintended shadowing of variables\n\nThis analyzer check for shadowed variables.\nA shadowed variable is a variable declared in an inner scope\nwith the same name and type as a variable in an outer scope,\nand where the outer variable is mentioned after the inner one\nis declared.\n\n(This definition can be refined; the module generates too many\nfalse positives and is not yet enabled by default.)\n\nFor example:\n\n\tfunc BadRead(f *os.File, buf []byte) error {\n\t\tvar err error\n\t\tfor {\n\t\t\tn, err := f.Read(buf) // shadows the function variable 'err'\n\t\t\tif err != nil {\n\t\t\t\tbreak // causes return of wrong value\n\t\t\t}\n\t\t\tfoo(buf)\n\t\t}\n\t\treturn err\n\t}\n",
- },
- {
- Name: "shift",
- Doc: "check for shifts that equal or exceed the width of the integer",
- Default: true,
- },
- {
- Name: "simplifycompositelit",
- Doc: "check for composite literal simplifications\n\nAn array, slice, or map composite literal of the form:\n\t[]T{T{}, T{}}\nwill be simplified to:\n\t[]T{{}, {}}\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
- Default: true,
- },
- {
- Name: "simplifyrange",
- Doc: "check for range statement simplifications\n\nA range of the form:\n\tfor x, _ = range v {...}\nwill be simplified to:\n\tfor x = range v {...}\n\nA range of the form:\n\tfor _ = range v {...}\nwill be simplified to:\n\tfor range v {...}\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
- Default: true,
- },
- {
- Name: "simplifyslice",
- Doc: "check for slice simplifications\n\nA slice expression of the form:\n\ts[a:len(s)]\nwill be simplified to:\n\ts[a:]\n\nThis is one of the simplifications that \"gofmt -s\" applies.",
- Default: true,
- },
- {
- Name: "sortslice",
- Doc: "check the argument type of sort.Slice\n\nsort.Slice requires an argument of a slice type. Check that\nthe interface{} value passed to sort.Slice is actually a slice.",
- Default: true,
- },
- {
- Name: "stdmethods",
- Doc: "check signature of methods of well-known interfaces\n\nSometimes a type may be intended to satisfy an interface but may fail to\ndo so because of a mistake in its method signature.\nFor example, the result of this WriteTo method should be (int64, error),\nnot error, to satisfy io.WriterTo:\n\n\ttype myWriterTo struct{...}\n func (myWriterTo) WriteTo(w io.Writer) error { ... }\n\nThis check ensures that each method whose name matches one of several\nwell-known interface methods from the standard library has the correct\nsignature for that interface.\n\nChecked method names include:\n\tFormat GobEncode GobDecode MarshalJSON MarshalXML\n\tPeek ReadByte ReadFrom ReadRune Scan Seek\n\tUnmarshalJSON UnreadByte UnreadRune WriteByte\n\tWriteTo\n",
- Default: true,
- },
- {
- Name: "stringintconv",
- Doc: "check for string(int) conversions\n\nThis checker flags conversions of the form string(x) where x is an integer\n(but not byte or rune) type. Such conversions are discouraged because they\nreturn the UTF-8 representation of the Unicode code point x, and not a decimal\nstring representation of x as one might expect. Furthermore, if x denotes an\ninvalid code point, the conversion cannot be statically rejected.\n\nFor conversions that intend on using the code point, consider replacing them\nwith string(rune(x)). Otherwise, strconv.Itoa and its equivalents return the\nstring representation of the value in the desired base.\n",
- Default: true,
- },
- {
- Name: "structtag",
- Doc: "check that struct field tags conform to reflect.StructTag.Get\n\nAlso report certain struct tags (json, xml) used with unexported fields.",
- Default: true,
- },
- {
- Name: "testinggoroutine",
- Doc: "report calls to (*testing.T).Fatal from goroutines started by a test.\n\nFunctions that abruptly terminate a test, such as the Fatal, Fatalf, FailNow, and\nSkip{,f,Now} methods of *testing.T, must be called from the test goroutine itself.\nThis checker detects calls to these functions that occur within a goroutine\nstarted by the test. For example:\n\nfunc TestFoo(t *testing.T) {\n go func() {\n t.Fatal(\"oops\") // error: (*T).Fatal called from non-test goroutine\n }()\n}\n",
- Default: true,
- },
- {
- Name: "tests",
- Doc: "check for common mistaken usages of tests and examples\n\nThe tests checker walks Test, Benchmark and Example functions checking\nmalformed names, wrong signatures and examples documenting non-existent\nidentifiers.\n\nPlease see the documentation for package testing in golang.org/pkg/testing\nfor the conventions that are enforced for Tests, Benchmarks, and Examples.",
- Default: true,
- },
- {
- Name: "unmarshal",
- Doc: "report passing non-pointer or non-interface values to unmarshal\n\nThe unmarshal analysis reports calls to functions such as json.Unmarshal\nin which the argument type is not a pointer or an interface.",
- Default: true,
- },
- {
- Name: "unreachable",
- Doc: "check for unreachable code\n\nThe unreachable analyzer finds statements that execution can never reach\nbecause they are preceded by an return statement, a call to panic, an\ninfinite loop, or similar constructs.",
- Default: true,
- },
- {
- Name: "unsafeptr",
- Doc: "check for invalid conversions of uintptr to unsafe.Pointer\n\nThe unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer\nto convert integers to pointers. A conversion from uintptr to\nunsafe.Pointer is invalid if it implies that there is a uintptr-typed\nword in memory that holds a pointer value, because that word will be\ninvisible to stack copying and to the garbage collector.",
- Default: true,
- },
- {
- Name: "unusedparams",
- Doc: "check for unused parameters of functions\n\nThe unusedparams analyzer checks functions to see if there are\nany parameters that are not being used.\n\nTo reduce false positives it ignores:\n- methods\n- parameters that do not have a name or are underscored\n- functions in test files\n- functions with empty bodies or those with just a return stmt",
- },
- {
- Name: "unusedresult",
- Doc: "check for unused results of calls to some functions\n\nSome functions like fmt.Errorf return a result and have no side effects,\nso it is always a mistake to discard the result. This analyzer reports\ncalls to certain functions in which the result of the call is ignored.\n\nThe set of functions may be controlled using flags.",
- Default: true,
- },
- {
- Name: "unusedwrite",
- Doc: "checks for unused writes\n\nThe analyzer reports instances of writes to struct fields and\narrays that are never read. Specifically, when a struct object\nor an array is copied, its elements are copied implicitly by\nthe compiler, and any element write to this copy does nothing\nwith the original object.\n\nFor example:\n\n\ttype T struct { x int }\n\tfunc f(input []T) {\n\t\tfor i, v := range input { // v is a copy\n\t\t\tv.x = i // unused write to field x\n\t\t}\n\t}\n\nAnother example is about non-pointer receiver:\n\n\ttype T struct { x int }\n\tfunc (t T) f() { // t is a copy\n\t\tt.x = i // unused write to field x\n\t}\n",
- },
- {
- Name: "useany",
- Doc: "check for constraints that could be simplified to \"any\"",
- },
- {
- Name: "fillreturns",
- Doc: "suggest fixes for errors due to an incorrect number of return values\n\nThis checker provides suggested fixes for type errors of the\ntype \"wrong number of return values (want %d, got %d)\". For example:\n\tfunc m() (int, string, *bool, error) {\n\t\treturn\n\t}\nwill turn into\n\tfunc m() (int, string, *bool, error) {\n\t\treturn 0, \"\", nil, nil\n\t}\n\nThis functionality is similar to https://github.com/sqs/goreturns.\n",
- Default: true,
- },
- {
- Name: "nonewvars",
- Doc: "suggested fixes for \"no new vars on left side of :=\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"no new vars on left side of :=\". For example:\n\tz := 1\n\tz := 2\nwill turn into\n\tz := 1\n\tz = 2\n",
- Default: true,
- },
- {
- Name: "noresultvalues",
- Doc: "suggested fixes for unexpected return values\n\nThis checker provides suggested fixes for type errors of the\ntype \"no result values expected\" or \"too many return values\".\nFor example:\n\tfunc z() { return nil }\nwill turn into\n\tfunc z() { return }\n",
- Default: true,
- },
- {
- Name: "undeclaredname",
- Doc: "suggested fixes for \"undeclared name: <>\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"undeclared name: <>\". It will either insert a new statement,\nsuch as:\n\n\"<> := \"\n\nor a new function declaration, such as:\n\nfunc <>(inferred parameters) {\n\tpanic(\"implement me!\")\n}\n",
- Default: true,
- },
- {
- Name: "fillstruct",
- Doc: "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n",
- Default: true,
- },
- {
- Name: "stubmethods",
- Doc: "stub methods analyzer\n\nThis analyzer generates method stubs for concrete types\nin order to implement a target interface",
- Default: true,
- },
- },
-}
diff --git a/internal/lsp/source/call_hierarchy.go b/internal/lsp/source/call_hierarchy.go
deleted file mode 100644
index 991c30aeb..000000000
--- a/internal/lsp/source/call_hierarchy.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "path/filepath"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// PrepareCallHierarchy returns an array of CallHierarchyItem for a file and the position within the file.
-func PrepareCallHierarchy(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.CallHierarchyItem, error) {
- ctx, done := event.Start(ctx, "source.PrepareCallHierarchy")
- defer done()
-
- identifier, err := Identifier(ctx, snapshot, fh, pos)
- if err != nil {
- if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) {
- return nil, nil
- }
- return nil, err
- }
-
- // The identifier can be nil if it is an import spec.
- if identifier == nil || identifier.Declaration.obj == nil {
- return nil, nil
- }
-
- if _, ok := identifier.Declaration.obj.Type().Underlying().(*types.Signature); !ok {
- return nil, nil
- }
-
- if len(identifier.Declaration.MappedRange) == 0 {
- return nil, nil
- }
- declMappedRange := identifier.Declaration.MappedRange[0]
- rng, err := declMappedRange.Range()
- if err != nil {
- return nil, err
- }
-
- callHierarchyItem := protocol.CallHierarchyItem{
- Name: identifier.Name,
- Kind: protocol.Function,
- Tags: []protocol.SymbolTag{},
- Detail: fmt.Sprintf("%s • %s", identifier.Declaration.obj.Pkg().Path(), filepath.Base(declMappedRange.URI().Filename())),
- URI: protocol.DocumentURI(declMappedRange.URI()),
- Range: rng,
- SelectionRange: rng,
- }
- return []protocol.CallHierarchyItem{callHierarchyItem}, nil
-}
-
-// IncomingCalls returns an array of CallHierarchyIncomingCall for a file and the position within the file.
-func IncomingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.CallHierarchyIncomingCall, error) {
- ctx, done := event.Start(ctx, "source.IncomingCalls")
- defer done()
-
- refs, err := References(ctx, snapshot, fh, pos, false)
- if err != nil {
- if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) {
- return nil, nil
- }
- return nil, err
- }
-
- return toProtocolIncomingCalls(ctx, snapshot, refs)
-}
-
-// toProtocolIncomingCalls returns an array of protocol.CallHierarchyIncomingCall for ReferenceInfo's.
-// References inside same enclosure are assigned to the same enclosing function.
-func toProtocolIncomingCalls(ctx context.Context, snapshot Snapshot, refs []*ReferenceInfo) ([]protocol.CallHierarchyIncomingCall, error) {
- // an enclosing node could have multiple calls to a reference, we only show the enclosure
- // once in the result but highlight all calls using FromRanges (ranges at which the calls occur)
- var incomingCalls = map[protocol.Location]*protocol.CallHierarchyIncomingCall{}
- for _, ref := range refs {
- refRange, err := ref.Range()
- if err != nil {
- return nil, err
- }
-
- callItem, err := enclosingNodeCallItem(snapshot, ref.pkg, ref.URI(), ref.ident.NamePos)
- if err != nil {
- event.Error(ctx, "error getting enclosing node", err, tag.Method.Of(ref.Name))
- continue
- }
- loc := protocol.Location{
- URI: callItem.URI,
- Range: callItem.Range,
- }
-
- if incomingCall, ok := incomingCalls[loc]; ok {
- incomingCall.FromRanges = append(incomingCall.FromRanges, refRange)
- continue
- }
- incomingCalls[loc] = &protocol.CallHierarchyIncomingCall{
- From: callItem,
- FromRanges: []protocol.Range{refRange},
- }
- }
-
- incomingCallItems := make([]protocol.CallHierarchyIncomingCall, 0, len(incomingCalls))
- for _, callItem := range incomingCalls {
- incomingCallItems = append(incomingCallItems, *callItem)
- }
- return incomingCallItems, nil
-}
-
-// enclosingNodeCallItem creates a CallHierarchyItem representing the function call at pos
-func enclosingNodeCallItem(snapshot Snapshot, pkg Package, uri span.URI, pos token.Pos) (protocol.CallHierarchyItem, error) {
- pgf, err := pkg.File(uri)
- if err != nil {
- return protocol.CallHierarchyItem{}, err
- }
-
- var funcDecl *ast.FuncDecl
- var funcLit *ast.FuncLit // innermost function literal
- var litCount int
- // Find the enclosing function, if any, and the number of func literals in between.
- path, _ := astutil.PathEnclosingInterval(pgf.File, pos, pos)
-outer:
- for _, node := range path {
- switch n := node.(type) {
- case *ast.FuncDecl:
- funcDecl = n
- break outer
- case *ast.FuncLit:
- litCount++
- if litCount > 1 {
- continue
- }
- funcLit = n
- }
- }
-
- nameIdent := path[len(path)-1].(*ast.File).Name
- kind := protocol.Package
- if funcDecl != nil {
- nameIdent = funcDecl.Name
- kind = protocol.Function
- }
-
- nameStart, nameEnd := nameIdent.NamePos, nameIdent.NamePos+token.Pos(len(nameIdent.Name))
- if funcLit != nil {
- nameStart, nameEnd = funcLit.Type.Func, funcLit.Type.Params.Pos()
- kind = protocol.Function
- }
- rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, nameStart, nameEnd).Range()
- if err != nil {
- return protocol.CallHierarchyItem{}, err
- }
-
- name := nameIdent.Name
- for i := 0; i < litCount; i++ {
- name += ".func()"
- }
-
- return protocol.CallHierarchyItem{
- Name: name,
- Kind: kind,
- Tags: []protocol.SymbolTag{},
- Detail: fmt.Sprintf("%s • %s", pkg.PkgPath(), filepath.Base(uri.Filename())),
- URI: protocol.DocumentURI(uri),
- Range: rng,
- SelectionRange: rng,
- }, nil
-}
-
-// OutgoingCalls returns an array of CallHierarchyOutgoingCall for a file and the position within the file.
-func OutgoingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.CallHierarchyOutgoingCall, error) {
- ctx, done := event.Start(ctx, "source.OutgoingCalls")
- defer done()
-
- identifier, err := Identifier(ctx, snapshot, fh, pos)
- if err != nil {
- if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) {
- return nil, nil
- }
- return nil, err
- }
-
- if _, ok := identifier.Declaration.obj.Type().Underlying().(*types.Signature); !ok {
- return nil, nil
- }
- if identifier.Declaration.node == nil {
- return nil, nil
- }
- if len(identifier.Declaration.MappedRange) == 0 {
- return nil, nil
- }
- declMappedRange := identifier.Declaration.MappedRange[0]
- callExprs, err := collectCallExpressions(snapshot.FileSet(), declMappedRange.m, identifier.Declaration.node)
- if err != nil {
- return nil, err
- }
-
- return toProtocolOutgoingCalls(ctx, snapshot, fh, callExprs)
-}
-
-// collectCallExpressions collects call expression ranges inside a function.
-func collectCallExpressions(fset *token.FileSet, mapper *protocol.ColumnMapper, node ast.Node) ([]protocol.Range, error) {
- type callPos struct {
- start, end token.Pos
- }
- callPositions := []callPos{}
-
- ast.Inspect(node, func(n ast.Node) bool {
- if call, ok := n.(*ast.CallExpr); ok {
- var start, end token.Pos
- switch n := call.Fun.(type) {
- case *ast.SelectorExpr:
- start, end = n.Sel.NamePos, call.Lparen
- case *ast.Ident:
- start, end = n.NamePos, call.Lparen
- case *ast.FuncLit:
- // while we don't add the function literal as an 'outgoing' call
- // we still want to traverse into it
- return true
- default:
- // ignore any other kind of call expressions
- // for ex: direct function literal calls since that's not an 'outgoing' call
- return false
- }
- callPositions = append(callPositions, callPos{start: start, end: end})
- }
- return true
- })
-
- callRanges := []protocol.Range{}
- for _, call := range callPositions {
- callRange, err := NewMappedRange(fset, mapper, call.start, call.end).Range()
- if err != nil {
- return nil, err
- }
- callRanges = append(callRanges, callRange)
- }
- return callRanges, nil
-}
-
-// toProtocolOutgoingCalls returns an array of protocol.CallHierarchyOutgoingCall for ast call expressions.
-// Calls to the same function are assigned to the same declaration.
-func toProtocolOutgoingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, callRanges []protocol.Range) ([]protocol.CallHierarchyOutgoingCall, error) {
- // Multiple calls could be made to the same function, defined by "same declaration
- // AST node & same idenfitier name" to provide a unique identifier key even when
- // the func is declared in a struct or interface.
- type key struct {
- decl ast.Node
- name string
- }
- outgoingCalls := map[key]*protocol.CallHierarchyOutgoingCall{}
- for _, callRange := range callRanges {
- identifier, err := Identifier(ctx, snapshot, fh, callRange.Start)
- if err != nil {
- if errors.Is(err, ErrNoIdentFound) || errors.Is(err, errNoObjectFound) {
- continue
- }
- return nil, err
- }
-
- // ignore calls to builtin functions
- if identifier.Declaration.obj.Pkg() == nil {
- continue
- }
-
- if outgoingCall, ok := outgoingCalls[key{identifier.Declaration.node, identifier.Name}]; ok {
- outgoingCall.FromRanges = append(outgoingCall.FromRanges, callRange)
- continue
- }
-
- if len(identifier.Declaration.MappedRange) == 0 {
- continue
- }
- declMappedRange := identifier.Declaration.MappedRange[0]
- rng, err := declMappedRange.Range()
- if err != nil {
- return nil, err
- }
-
- outgoingCalls[key{identifier.Declaration.node, identifier.Name}] = &protocol.CallHierarchyOutgoingCall{
- To: protocol.CallHierarchyItem{
- Name: identifier.Name,
- Kind: protocol.Function,
- Tags: []protocol.SymbolTag{},
- Detail: fmt.Sprintf("%s • %s", identifier.Declaration.obj.Pkg().Path(), filepath.Base(declMappedRange.URI().Filename())),
- URI: protocol.DocumentURI(declMappedRange.URI()),
- Range: rng,
- SelectionRange: rng,
- },
- FromRanges: []protocol.Range{callRange},
- }
- }
-
- outgoingCallItems := make([]protocol.CallHierarchyOutgoingCall, 0, len(outgoingCalls))
- for _, callItem := range outgoingCalls {
- outgoingCallItems = append(outgoingCallItems, *callItem)
- }
- return outgoingCallItems, nil
-}
diff --git a/internal/lsp/source/code_lens.go b/internal/lsp/source/code_lens.go
deleted file mode 100644
index 0ab857ac6..000000000
--- a/internal/lsp/source/code_lens.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "go/ast"
- "go/token"
- "go/types"
- "path/filepath"
- "regexp"
- "strings"
-
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
-)
-
-type LensFunc func(context.Context, Snapshot, FileHandle) ([]protocol.CodeLens, error)
-
-// LensFuncs returns the supported lensFuncs for Go files.
-func LensFuncs() map[command.Command]LensFunc {
- return map[command.Command]LensFunc{
- command.Generate: goGenerateCodeLens,
- command.Test: runTestCodeLens,
- command.RegenerateCgo: regenerateCgoLens,
- command.GCDetails: toggleDetailsCodeLens,
- }
-}
-
-var (
- testRe = regexp.MustCompile("^Test[^a-z]")
- benchmarkRe = regexp.MustCompile("^Benchmark[^a-z]")
-)
-
-func runTestCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) {
- codeLens := make([]protocol.CodeLens, 0)
-
- fns, err := TestsAndBenchmarks(ctx, snapshot, fh)
- if err != nil {
- return nil, err
- }
- puri := protocol.URIFromSpanURI(fh.URI())
- for _, fn := range fns.Tests {
- cmd, err := command.NewTestCommand("run test", puri, []string{fn.Name}, nil)
- if err != nil {
- return nil, err
- }
- rng := protocol.Range{Start: fn.Rng.Start, End: fn.Rng.Start}
- codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd})
- }
-
- for _, fn := range fns.Benchmarks {
- cmd, err := command.NewTestCommand("run benchmark", puri, nil, []string{fn.Name})
- if err != nil {
- return nil, err
- }
- rng := protocol.Range{Start: fn.Rng.Start, End: fn.Rng.Start}
- codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd})
- }
-
- if len(fns.Benchmarks) > 0 {
- _, pgf, err := GetParsedFile(ctx, snapshot, fh, WidestPackage)
- if err != nil {
- return nil, err
- }
- // add a code lens to the top of the file which runs all benchmarks in the file
- rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, pgf.File.Package, pgf.File.Package).Range()
- if err != nil {
- return nil, err
- }
- var benches []string
- for _, fn := range fns.Benchmarks {
- benches = append(benches, fn.Name)
- }
- cmd, err := command.NewTestCommand("run file benchmarks", puri, nil, benches)
- if err != nil {
- return nil, err
- }
- codeLens = append(codeLens, protocol.CodeLens{Range: rng, Command: cmd})
- }
- return codeLens, nil
-}
-
-type testFn struct {
- Name string
- Rng protocol.Range
-}
-
-type testFns struct {
- Tests []testFn
- Benchmarks []testFn
-}
-
-func TestsAndBenchmarks(ctx context.Context, snapshot Snapshot, fh FileHandle) (testFns, error) {
- var out testFns
-
- if !strings.HasSuffix(fh.URI().Filename(), "_test.go") {
- return out, nil
- }
- pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, WidestPackage)
- if err != nil {
- return out, err
- }
-
- for _, d := range pgf.File.Decls {
- fn, ok := d.(*ast.FuncDecl)
- if !ok {
- continue
- }
-
- rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, d.Pos(), fn.End()).Range()
- if err != nil {
- return out, err
- }
-
- if matchTestFunc(fn, pkg, testRe, "T") {
- out.Tests = append(out.Tests, testFn{fn.Name.Name, rng})
- }
-
- if matchTestFunc(fn, pkg, benchmarkRe, "B") {
- out.Benchmarks = append(out.Benchmarks, testFn{fn.Name.Name, rng})
- }
- }
-
- return out, nil
-}
-
-func matchTestFunc(fn *ast.FuncDecl, pkg Package, nameRe *regexp.Regexp, paramID string) bool {
- // Make sure that the function name matches a test function.
- if !nameRe.MatchString(fn.Name.Name) {
- return false
- }
- info := pkg.GetTypesInfo()
- if info == nil {
- return false
- }
- obj := info.ObjectOf(fn.Name)
- if obj == nil {
- return false
- }
- sig, ok := obj.Type().(*types.Signature)
- if !ok {
- return false
- }
- // Test functions should have only one parameter.
- if sig.Params().Len() != 1 {
- return false
- }
-
- // Check the type of the only parameter
- paramTyp, ok := sig.Params().At(0).Type().(*types.Pointer)
- if !ok {
- return false
- }
- named, ok := paramTyp.Elem().(*types.Named)
- if !ok {
- return false
- }
- namedObj := named.Obj()
- if namedObj.Pkg().Path() != "testing" {
- return false
- }
- return namedObj.Id() == paramID
-}
-
-func goGenerateCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) {
- pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
- if err != nil {
- return nil, err
- }
- const ggDirective = "//go:generate"
- for _, c := range pgf.File.Comments {
- for _, l := range c.List {
- if !strings.HasPrefix(l.Text, ggDirective) {
- continue
- }
- rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, l.Pos(), l.Pos()+token.Pos(len(ggDirective))).Range()
- if err != nil {
- return nil, err
- }
- dir := protocol.URIFromSpanURI(span.URIFromPath(filepath.Dir(fh.URI().Filename())))
- nonRecursiveCmd, err := command.NewGenerateCommand("run go generate", command.GenerateArgs{Dir: dir, Recursive: false})
- if err != nil {
- return nil, err
- }
- recursiveCmd, err := command.NewGenerateCommand("run go generate ./...", command.GenerateArgs{Dir: dir, Recursive: true})
- if err != nil {
- return nil, err
- }
- return []protocol.CodeLens{
- {Range: rng, Command: recursiveCmd},
- {Range: rng, Command: nonRecursiveCmd},
- }, nil
-
- }
- }
- return nil, nil
-}
-
-func regenerateCgoLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) {
- pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
- if err != nil {
- return nil, err
- }
- var c *ast.ImportSpec
- for _, imp := range pgf.File.Imports {
- if imp.Path.Value == `"C"` {
- c = imp
- }
- }
- if c == nil {
- return nil, nil
- }
- rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, c.Pos(), c.EndPos).Range()
- if err != nil {
- return nil, err
- }
- puri := protocol.URIFromSpanURI(fh.URI())
- cmd, err := command.NewRegenerateCgoCommand("regenerate cgo definitions", command.URIArg{URI: puri})
- if err != nil {
- return nil, err
- }
- return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil
-}
-
-func toggleDetailsCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.CodeLens, error) {
- _, pgf, err := GetParsedFile(ctx, snapshot, fh, WidestPackage)
- if err != nil {
- return nil, err
- }
- rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, pgf.File.Package, pgf.File.Package).Range()
- if err != nil {
- return nil, err
- }
- puri := protocol.URIFromSpanURI(fh.URI())
- cmd, err := command.NewGCDetailsCommand("Toggle gc annotation details", puri)
- if err != nil {
- return nil, err
- }
- return []protocol.CodeLens{{Range: rng, Command: cmd}}, nil
-}
diff --git a/internal/lsp/source/comment.go b/internal/lsp/source/comment.go
deleted file mode 100644
index d88471e42..000000000
--- a/internal/lsp/source/comment.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "io"
- "regexp"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// CommentToMarkdown converts comment text to formatted markdown.
-// The comment was prepared by DocReader,
-// so it is known not to have leading, trailing blank lines
-// nor to have trailing spaces at the end of lines.
-// The comment markers have already been removed.
-//
-// Each line is converted into a markdown line and empty lines are just converted to
-// newlines. Heading are prefixed with `### ` to make it a markdown heading.
-//
-// A span of indented lines retains a 4 space prefix block, with the common indent
-// prefix removed unless empty, in which case it will be converted to a newline.
-//
-// URLs in the comment text are converted into links.
-func CommentToMarkdown(text string) string {
- buf := &bytes.Buffer{}
- commentToMarkdown(buf, text)
- return buf.String()
-}
-
-var (
- mdNewline = []byte("\n")
- mdHeader = []byte("### ")
- mdIndent = []byte(" ")
- mdLinkStart = []byte("[")
- mdLinkDiv = []byte("](")
- mdLinkEnd = []byte(")")
-)
-
-func commentToMarkdown(w io.Writer, text string) {
- blocks := blocks(text)
- for i, b := range blocks {
- switch b.op {
- case opPara:
- for _, line := range b.lines {
- emphasize(w, line, true)
- }
- case opHead:
- // The header block can consist of only one line.
- // However, check the number of lines, just in case.
- if len(b.lines) == 0 {
- // Skip this block.
- continue
- }
- header := b.lines[0]
-
- w.Write(mdHeader)
- commentEscape(w, header, true)
- // Header doesn't end with \n unlike the lines of other blocks.
- w.Write(mdNewline)
- case opPre:
- for _, line := range b.lines {
- if isBlank(line) {
- w.Write(mdNewline)
- continue
- }
- w.Write(mdIndent)
- w.Write([]byte(line))
- }
- }
-
- if i < len(blocks)-1 {
- w.Write(mdNewline)
- }
- }
-}
-
-const (
- ulquo = "“"
- urquo = "”"
-)
-
-var (
- markdownEscape = regexp.MustCompile(`([\\\x60*{}[\]()#+\-.!_>~|"$%&'\/:;<=?@^])`)
-
- unicodeQuoteReplacer = strings.NewReplacer("``", ulquo, "''", urquo)
-)
-
-// commentEscape escapes comment text for markdown. If nice is set,
-// also turn `` into “; and '' into ”;.
-func commentEscape(w io.Writer, text string, nice bool) {
- if nice {
- text = convertQuotes(text)
- }
- text = escapeRegex(text)
- w.Write([]byte(text))
-}
-
-func convertQuotes(text string) string {
- return unicodeQuoteReplacer.Replace(text)
-}
-
-func escapeRegex(text string) string {
- return markdownEscape.ReplaceAllString(text, `\$1`)
-}
-
-func emphasize(w io.Writer, line string, nice bool) {
- for {
- m := matchRx.FindStringSubmatchIndex(line)
- if m == nil {
- break
- }
- // m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx)
-
- // write text before match
- commentEscape(w, line[0:m[0]], nice)
-
- // adjust match for URLs
- match := line[m[0]:m[1]]
- if strings.Contains(match, "://") {
- m0, m1 := m[0], m[1]
- for _, s := range []string{"()", "{}", "[]"} {
- open, close := s[:1], s[1:] // E.g., "(" and ")"
- // require opening parentheses before closing parentheses (#22285)
- if i := strings.Index(match, close); i >= 0 && i < strings.Index(match, open) {
- m1 = m0 + i
- match = line[m0:m1]
- }
- // require balanced pairs of parentheses (#5043)
- for i := 0; strings.Count(match, open) != strings.Count(match, close) && i < 10; i++ {
- m1 = strings.LastIndexAny(line[:m1], s)
- match = line[m0:m1]
- }
- }
- if m1 != m[1] {
- // redo matching with shortened line for correct indices
- m = matchRx.FindStringSubmatchIndex(line[:m[0]+len(match)])
- }
- }
-
- // Following code has been modified from go/doc since words is always
- // nil. All html formatting has also been transformed into markdown formatting
-
- // analyze match
- url := ""
- if m[2] >= 0 {
- url = match
- }
-
- // write match
- if len(url) > 0 {
- w.Write(mdLinkStart)
- }
-
- commentEscape(w, match, nice)
-
- if len(url) > 0 {
- w.Write(mdLinkDiv)
- w.Write([]byte(urlReplacer.Replace(url)))
- w.Write(mdLinkEnd)
- }
-
- // advance
- line = line[m[1]:]
- }
- commentEscape(w, line, nice)
-}
-
-// Everything from here on is a copy of go/doc/comment.go
-
-const (
- // Regexp for Go identifiers
- identRx = `[\pL_][\pL_0-9]*`
-
- // Regexp for URLs
- // Match parens, and check later for balance - see #5043, #22285
- // Match .,:;?! within path, but not at end - see #18139, #16565
- // This excludes some rare yet valid urls ending in common punctuation
- // in order to allow sentences ending in URLs.
-
- // protocol (required) e.g. http
- protoPart = `(https?|ftp|file|gopher|mailto|nntp)`
- // host (required) e.g. www.example.com or [::1]:8080
- hostPart = `([a-zA-Z0-9_@\-.\[\]:]+)`
- // path+query+fragment (optional) e.g. /path/index.html?q=foo#bar
- pathPart = `([.,:;?!]*[a-zA-Z0-9$'()*+&#=@~_/\-\[\]%])*`
-
- urlRx = protoPart + `://` + hostPart + pathPart
-)
-
-var (
- matchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`)
- urlReplacer = strings.NewReplacer(`(`, `\(`, `)`, `\)`)
-)
-
-func indentLen(s string) int {
- i := 0
- for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
- i++
- }
- return i
-}
-
-func isBlank(s string) bool {
- return len(s) == 0 || (len(s) == 1 && s[0] == '\n')
-}
-
-func commonPrefix(a, b string) string {
- i := 0
- for i < len(a) && i < len(b) && a[i] == b[i] {
- i++
- }
- return a[0:i]
-}
-
-func unindent(block []string) {
- if len(block) == 0 {
- return
- }
-
- // compute maximum common white prefix
- prefix := block[0][0:indentLen(block[0])]
- for _, line := range block {
- if !isBlank(line) {
- prefix = commonPrefix(prefix, line[0:indentLen(line)])
- }
- }
- n := len(prefix)
-
- // remove
- for i, line := range block {
- if !isBlank(line) {
- block[i] = line[n:]
- }
- }
-}
-
-// heading returns the trimmed line if it passes as a section heading;
-// otherwise it returns the empty string.
-func heading(line string) string {
- line = strings.TrimSpace(line)
- if len(line) == 0 {
- return ""
- }
-
- // a heading must start with an uppercase letter
- r, _ := utf8.DecodeRuneInString(line)
- if !unicode.IsLetter(r) || !unicode.IsUpper(r) {
- return ""
- }
-
- // it must end in a letter or digit:
- r, _ = utf8.DecodeLastRuneInString(line)
- if !unicode.IsLetter(r) && !unicode.IsDigit(r) {
- return ""
- }
-
- // exclude lines with illegal characters. we allow "(),"
- if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") {
- return ""
- }
-
- // allow "'" for possessive "'s" only
- for b := line; ; {
- i := strings.IndexRune(b, '\'')
- if i < 0 {
- break
- }
- if i+1 >= len(b) || b[i+1] != 's' || (i+2 < len(b) && b[i+2] != ' ') {
- return "" // not followed by "s "
- }
- b = b[i+2:]
- }
-
- // allow "." when followed by non-space
- for b := line; ; {
- i := strings.IndexRune(b, '.')
- if i < 0 {
- break
- }
- if i+1 >= len(b) || b[i+1] == ' ' {
- return "" // not followed by non-space
- }
- b = b[i+1:]
- }
-
- return line
-}
-
-type op int
-
-const (
- opPara op = iota
- opHead
- opPre
-)
-
-type block struct {
- op op
- lines []string
-}
-
-func blocks(text string) []block {
- var (
- out []block
- para []string
-
- lastWasBlank = false
- lastWasHeading = false
- )
-
- close := func() {
- if para != nil {
- out = append(out, block{opPara, para})
- para = nil
- }
- }
-
- lines := strings.SplitAfter(text, "\n")
- unindent(lines)
- for i := 0; i < len(lines); {
- line := lines[i]
- if isBlank(line) {
- // close paragraph
- close()
- i++
- lastWasBlank = true
- continue
- }
- if indentLen(line) > 0 {
- // close paragraph
- close()
-
- // count indented or blank lines
- j := i + 1
- for j < len(lines) && (isBlank(lines[j]) || indentLen(lines[j]) > 0) {
- j++
- }
- // but not trailing blank lines
- for j > i && isBlank(lines[j-1]) {
- j--
- }
- pre := lines[i:j]
- i = j
-
- unindent(pre)
-
- // put those lines in a pre block
- out = append(out, block{opPre, pre})
- lastWasHeading = false
- continue
- }
-
- if lastWasBlank && !lastWasHeading && i+2 < len(lines) &&
- isBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {
- // current line is non-blank, surrounded by blank lines
- // and the next non-blank line is not indented: this
- // might be a heading.
- if head := heading(line); head != "" {
- close()
- out = append(out, block{opHead, []string{head}})
- i += 2
- lastWasHeading = true
- continue
- }
- }
-
- // open paragraph
- lastWasBlank = false
- lastWasHeading = false
- para = append(para, lines[i])
- i++
- }
- close()
-
- return out
-}
diff --git a/internal/lsp/source/comment_test.go b/internal/lsp/source/comment_test.go
deleted file mode 100644
index 9efde16ef..000000000
--- a/internal/lsp/source/comment_test.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "reflect"
- "strings"
- "testing"
-)
-
-// This file is a copy of go/doc/comment_test.go with the exception for
-// the test cases for TestEmphasize and TestCommentEscape
-
-var headingTests = []struct {
- line string
- ok bool
-}{
- {"Section", true},
- {"A typical usage", true},
- {"ΔΛΞ is Greek", true},
- {"Foo 42", true},
- {"", false},
- {"section", false},
- {"A typical usage:", false},
- {"This code:", false},
- {"δ is Greek", false},
- {"Foo §", false},
- {"Fermat's Last Sentence", true},
- {"Fermat's", true},
- {"'sX", false},
- {"Ted 'Too' Bar", false},
- {"Use n+m", false},
- {"Scanning:", false},
- {"N:M", false},
-}
-
-func TestIsHeading(t *testing.T) {
- for _, tt := range headingTests {
- if h := heading(tt.line); (len(h) > 0) != tt.ok {
- t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok)
- }
- }
-}
-
-var blocksTests = []struct {
- in string
- out []block
- text string
-}{
- {
- in: `Para 1.
-Para 1 line 2.
-
-Para 2.
-
-Section
-
-Para 3.
-
- pre
- pre1
-
-Para 4.
-
- pre
- pre1
-
- pre2
-
-Para 5.
-
-
- pre
-
-
- pre1
- pre2
-
-Para 6.
- pre
- pre2
-`,
- out: []block{
- {opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}},
- {opPara, []string{"Para 2.\n"}},
- {opHead, []string{"Section"}},
- {opPara, []string{"Para 3.\n"}},
- {opPre, []string{"pre\n", "pre1\n"}},
- {opPara, []string{"Para 4.\n"}},
- {opPre, []string{"pre\n", "pre1\n", "\n", "pre2\n"}},
- {opPara, []string{"Para 5.\n"}},
- {opPre, []string{"pre\n", "\n", "\n", "pre1\n", "pre2\n"}},
- {opPara, []string{"Para 6.\n"}},
- {opPre, []string{"pre\n", "pre2\n"}},
- },
- text: `. Para 1. Para 1 line 2.
-
-. Para 2.
-
-
-. Section
-
-. Para 3.
-
-$ pre
-$ pre1
-
-. Para 4.
-
-$ pre
-$ pre1
-
-$ pre2
-
-. Para 5.
-
-$ pre
-
-
-$ pre1
-$ pre2
-
-. Para 6.
-
-$ pre
-$ pre2
-`,
- },
- {
- in: "Para.\n\tshould not be ``escaped''",
- out: []block{
- {opPara, []string{"Para.\n"}},
- {opPre, []string{"should not be ``escaped''"}},
- },
- text: ". Para.\n\n$ should not be ``escaped''",
- },
- {
- in: "// A very long line of 46 char for line wrapping.",
- out: []block{
- {opPara, []string{"// A very long line of 46 char for line wrapping."}},
- },
- text: `. // A very long line of 46 char for line
-. // wrapping.
-`,
- },
- {
- in: `/* A very long line of 46 char for line wrapping.
-A very long line of 46 char for line wrapping. */`,
- out: []block{
- {opPara, []string{"/* A very long line of 46 char for line wrapping.\n", "A very long line of 46 char for line wrapping. */"}},
- },
- text: `. /* A very long line of 46 char for line
-. wrapping. A very long line of 46 char
-. for line wrapping. */
-`,
- },
-}
-
-func TestBlocks(t *testing.T) {
- for i, tt := range blocksTests {
- b := blocks(tt.in)
- if !reflect.DeepEqual(b, tt.out) {
- t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out)
- }
- }
-}
-
-// This has been modified from go/doc to use markdown links instead of html ones
-// and use markdown escaping instead oh html
-var emphasizeTests = []struct {
- in, out string
-}{
- {"", ""},
- {"http://[::1]:8080/foo.txt", `[http\:\/\/\[\:\:1\]\:8080\/foo\.txt](http://[::1]:8080/foo.txt)`},
- {"before (https://www.google.com) after", `before \([https\:\/\/www\.google\.com](https://www.google.com)\) after`},
- {"before https://www.google.com:30/x/y/z:b::c. After", `before [https\:\/\/www\.google\.com\:30\/x\/y\/z\:b\:\:c](https://www.google.com:30/x/y/z:b::c)\. After`},
- {"http://www.google.com/path/:;!-/?query=%34b#093124", `[http\:\/\/www\.google\.com\/path\/\:\;\!\-\/\?query\=\%34b\#093124](http://www.google.com/path/:;!-/?query=%34b#093124)`},
- {"http://www.google.com/path/:;!-/?query=%34bar#093124", `[http\:\/\/www\.google\.com\/path\/\:\;\!\-\/\?query\=\%34bar\#093124](http://www.google.com/path/:;!-/?query=%34bar#093124)`},
- {"http://www.google.com/index.html! After", `[http\:\/\/www\.google\.com\/index\.html](http://www.google.com/index.html)\! After`},
- {"http://www.google.com/", `[http\:\/\/www\.google\.com\/](http://www.google.com/)`},
- {"https://www.google.com/", `[https\:\/\/www\.google\.com\/](https://www.google.com/)`},
- {"http://www.google.com/path.", `[http\:\/\/www\.google\.com\/path](http://www.google.com/path)\.`},
- {"http://en.wikipedia.org/wiki/Camellia_(cipher)", `[http\:\/\/en\.wikipedia\.org\/wiki\/Camellia\_\(cipher\)](http://en.wikipedia.org/wiki/Camellia_\(cipher\))`},
- {"(http://www.google.com/)", `\([http\:\/\/www\.google\.com\/](http://www.google.com/)\)`},
- {"http://gmail.com)", `[http\:\/\/gmail\.com](http://gmail.com)\)`},
- {"((http://gmail.com))", `\(\([http\:\/\/gmail\.com](http://gmail.com)\)\)`},
- {"http://gmail.com ((http://gmail.com)) ()", `[http\:\/\/gmail\.com](http://gmail.com) \(\([http\:\/\/gmail\.com](http://gmail.com)\)\) \(\)`},
- {"Foo bar http://example.com/ quux!", `Foo bar [http\:\/\/example\.com\/](http://example.com/) quux\!`},
- {"Hello http://example.com/%2f/ /world.", `Hello [http\:\/\/example\.com\/\%2f\/](http://example.com/%2f/) \/world\.`},
- {"Lorem http: ipsum //host/path", `Lorem http\: ipsum \/\/host\/path`},
- {"javascript://is/not/linked", `javascript\:\/\/is\/not\/linked`},
- {"http://foo", `[http\:\/\/foo](http://foo)`},
- {"art by [[https://www.example.com/person/][Person Name]]", `art by \[\[[https\:\/\/www\.example\.com\/person\/](https://www.example.com/person/)\]\[Person Name\]\]`},
- {"please visit (http://golang.org/)", `please visit \([http\:\/\/golang\.org\/](http://golang.org/)\)`},
- {"please visit http://golang.org/hello())", `please visit [http\:\/\/golang\.org\/hello\(\)](http://golang.org/hello\(\))\)`},
- {"http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD", `[http\:\/\/git\.qemu\.org\/\?p\=qemu\.git\;a\=blob\;f\=qapi\-schema\.json\;hb\=HEAD](http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD)`},
- {"https://foo.bar/bal/x(])", `[https\:\/\/foo\.bar\/bal\/x\(](https://foo.bar/bal/x\()\]\)`},
- {"foo [ http://bar(])", `foo \[ [http\:\/\/bar\(](http://bar\()\]\)`},
-}
-
-func TestEmphasize(t *testing.T) {
- for i, tt := range emphasizeTests {
- var buf bytes.Buffer
- emphasize(&buf, tt.in, true)
- out := buf.String()
- if out != tt.out {
- t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out)
- }
- }
-}
-
-func TestCommentEscape(t *testing.T) {
- //ldquo -> ulquo and rdquo -> urquo
- commentTests := []struct {
- in, out string
- }{
- {"typically invoked as ``go tool asm'',", "typically invoked as " + ulquo + "go tool asm" + urquo + ","},
- {"For more detail, run ``go help test'' and ``go help testflag''", "For more detail, run " + ulquo + "go help test" + urquo + " and " + ulquo + "go help testflag" + urquo}}
- for i, tt := range commentTests {
- var buf strings.Builder
- commentEscape(&buf, tt.in, true)
- out := buf.String()
- if out != tt.out {
- t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out)
- }
- }
-}
-
-func TestCommentToMarkdown(t *testing.T) {
- tests := []struct {
- in, out string
- }{
- {
- in: "F declaration.\n",
- out: "F declaration\\.\n",
- },
- {
- in: `
-F declaration. Lorem ipsum dolor sit amet.
-Etiam mattis eros at orci mollis molestie.
-`,
- out: `
-F declaration\. Lorem ipsum dolor sit amet\.
-Etiam mattis eros at orci mollis molestie\.
-`,
- },
- {
- in: `
-F declaration.
-
-Lorem ipsum dolor sit amet.
-Sed id dui turpis.
-
-
-
-
-Aenean tempus velit non auctor eleifend.
-Aenean efficitur a sem id ultricies.
-
-
-Phasellus efficitur mauris et viverra bibendum.
-`,
- out: `
-F declaration\.
-
-Lorem ipsum dolor sit amet\.
-Sed id dui turpis\.
-
-Aenean tempus velit non auctor eleifend\.
-Aenean efficitur a sem id ultricies\.
-
-Phasellus efficitur mauris et viverra bibendum\.
-`,
- },
- {
- in: `
-F declaration.
-
-Aenean tempus velit non auctor eleifend.
-
-Section
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit.
-
- func foo() {}
-
-
- func bar() {}
-
-Fusce lorem lacus.
-
- func foo() {}
-
- func bar() {}
-
-Maecenas in lobortis lectus.
-
- func foo() {}
-
- func bar() {}
-
-Phasellus efficitur mauris et viverra bibendum.
-`,
- out: `
-F declaration\.
-
-Aenean tempus velit non auctor eleifend\.
-
-### Section
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit\.
-
- func foo() {}
-
-
- func bar() {}
-
-Fusce lorem lacus\.
-
- func foo() {}
-
- func bar() {}
-
-Maecenas in lobortis lectus\.
-
- func foo() {}
-
- func bar() {}
-
-Phasellus efficitur mauris et viverra bibendum\.
-`,
- },
- {
- in: `
-F declaration.
-
- func foo() {
- fmt.Println("foo")
- }
- func bar() {
- fmt.Println("bar")
- }
-`,
- out: `
-F declaration\.
-
- func foo() {
- fmt.Println("foo")
- }
- func bar() {
- fmt.Println("bar")
- }
-`,
- },
- }
- for i, tt := range tests {
- // Comments start with new lines for better readability. So, we should trim them.
- tt.in = strings.TrimPrefix(tt.in, "\n")
- tt.out = strings.TrimPrefix(tt.out, "\n")
-
- if out := CommentToMarkdown(tt.in); out != tt.out {
- t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out)
- }
- }
-}
diff --git a/internal/lsp/source/completion/completion.go b/internal/lsp/source/completion/completion.go
deleted file mode 100644
index 60c404dc5..000000000
--- a/internal/lsp/source/completion/completion.go
+++ /dev/null
@@ -1,2967 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package completion provides core functionality for code completion in Go
-// editors and tools.
-package completion
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/constant"
- "go/scanner"
- "go/token"
- "go/types"
- "math"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
- "unicode"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/fuzzy"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/snippet"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/typeparams"
- errors "golang.org/x/xerrors"
-)
-
-type CompletionItem struct {
- // Label is the primary text the user sees for this completion item.
- Label string
-
- // Detail is supplemental information to present to the user.
- // This often contains the type or return type of the completion item.
- Detail string
-
- // InsertText is the text to insert if this item is selected.
- // Any of the prefix that has already been typed is not trimmed.
- // The insert text does not contain snippets.
- InsertText string
-
- Kind protocol.CompletionItemKind
- Tags []protocol.CompletionItemTag
- Deprecated bool // Deprecated, prefer Tags if available
-
- // An optional array of additional TextEdits that are applied when
- // selecting this completion.
- //
- // Additional text edits should be used to change text unrelated to the current cursor position
- // (for example adding an import statement at the top of the file if the completion item will
- // insert an unqualified type).
- AdditionalTextEdits []protocol.TextEdit
-
- // Depth is how many levels were searched to find this completion.
- // For example when completing "foo<>", "fooBar" is depth 0, and
- // "fooBar.Baz" is depth 1.
- Depth int
-
- // Score is the internal relevance score.
- // A higher score indicates that this completion item is more relevant.
- Score float64
-
- // snippet is the LSP snippet for the completion item. The LSP
- // specification contains details about LSP snippets. For example, a
- // snippet for a function with the following signature:
- //
- // func foo(a, b, c int)
- //
- // would be:
- //
- // foo(${1:a int}, ${2: b int}, ${3: c int})
- //
- // If Placeholders is false in the CompletionOptions, the above
- // snippet would instead be:
- //
- // foo(${1:})
- snippet *snippet.Builder
-
- // Documentation is the documentation for the completion item.
- Documentation string
-
- // obj is the object from which this candidate was derived, if any.
- // obj is for internal use only.
- obj types.Object
-}
-
-// completionOptions holds completion specific configuration.
-type completionOptions struct {
- unimported bool
- documentation bool
- fullDocumentation bool
- placeholders bool
- literal bool
- snippets bool
- postfix bool
- matcher source.Matcher
- budget time.Duration
-}
-
-// Snippet is a convenience returns the snippet if available, otherwise
-// the InsertText.
-// used for an item, depending on if the callee wants placeholders or not.
-func (i *CompletionItem) Snippet() string {
- if i.snippet != nil {
- return i.snippet.String()
- }
- return i.InsertText
-}
-
-// Scoring constants are used for weighting the relevance of different candidates.
-const (
- // stdScore is the base score for all completion items.
- stdScore float64 = 1.0
-
- // highScore indicates a very relevant completion item.
- highScore float64 = 10.0
-
- // lowScore indicates an irrelevant or not useful completion item.
- lowScore float64 = 0.01
-)
-
-// matcher matches a candidate's label against the user input. The
-// returned score reflects the quality of the match. A score of zero
-// indicates no match, and a score of one means a perfect match.
-type matcher interface {
- Score(candidateLabel string) (score float32)
-}
-
-// prefixMatcher implements case sensitive prefix matching.
-type prefixMatcher string
-
-func (pm prefixMatcher) Score(candidateLabel string) float32 {
- if strings.HasPrefix(candidateLabel, string(pm)) {
- return 1
- }
- return -1
-}
-
-// insensitivePrefixMatcher implements case insensitive prefix matching.
-type insensitivePrefixMatcher string
-
-func (ipm insensitivePrefixMatcher) Score(candidateLabel string) float32 {
- if strings.HasPrefix(strings.ToLower(candidateLabel), string(ipm)) {
- return 1
- }
- return -1
-}
-
-// completer contains the necessary information for a single completion request.
-type completer struct {
- snapshot source.Snapshot
- pkg source.Package
- qf types.Qualifier
- opts *completionOptions
-
- // completionContext contains information about the trigger for this
- // completion request.
- completionContext completionContext
-
- // fh is a handle to the file associated with this completion request.
- fh source.FileHandle
-
- // filename is the name of the file associated with this completion request.
- filename string
-
- // file is the AST of the file associated with this completion request.
- file *ast.File
-
- // pos is the position at which the request was triggered.
- pos token.Pos
-
- // path is the path of AST nodes enclosing the position.
- path []ast.Node
-
- // seen is the map that ensures we do not return duplicate results.
- seen map[types.Object]bool
-
- // items is the list of completion items returned.
- items []CompletionItem
-
- // completionCallbacks is a list of callbacks to collect completions that
- // require expensive operations. This includes operations where we search
- // through the entire module cache.
- completionCallbacks []func(opts *imports.Options) error
-
- // surrounding describes the identifier surrounding the position.
- surrounding *Selection
-
- // inference contains information we've inferred about ideal
- // candidates such as the candidate's type.
- inference candidateInference
-
- // enclosingFunc contains information about the function enclosing
- // the position.
- enclosingFunc *funcInfo
-
- // enclosingCompositeLiteral contains information about the composite literal
- // enclosing the position.
- enclosingCompositeLiteral *compLitInfo
-
- // deepState contains the current state of our deep completion search.
- deepState deepCompletionState
-
- // matcher matches the candidates against the surrounding prefix.
- matcher matcher
-
- // methodSetCache caches the types.NewMethodSet call, which is relatively
- // expensive and can be called many times for the same type while searching
- // for deep completions.
- methodSetCache map[methodSetKey]*types.MethodSet
-
- // mapper converts the positions in the file from which the completion originated.
- mapper *protocol.ColumnMapper
-
- // startTime is when we started processing this completion request. It does
- // not include any time the request spent in the queue.
- startTime time.Time
-}
-
-// funcInfo holds info about a function object.
-type funcInfo struct {
- // sig is the function declaration enclosing the position.
- sig *types.Signature
-
- // body is the function's body.
- body *ast.BlockStmt
-}
-
-type compLitInfo struct {
- // cl is the *ast.CompositeLit enclosing the position.
- cl *ast.CompositeLit
-
- // clType is the type of cl.
- clType types.Type
-
- // kv is the *ast.KeyValueExpr enclosing the position, if any.
- kv *ast.KeyValueExpr
-
- // inKey is true if we are certain the position is in the key side
- // of a key-value pair.
- inKey bool
-
- // maybeInFieldName is true if inKey is false and it is possible
- // we are completing a struct field name. For example,
- // "SomeStruct{<>}" will be inKey=false, but maybeInFieldName=true
- // because we _could_ be completing a field name.
- maybeInFieldName bool
-}
-
-type importInfo struct {
- importPath string
- name string
- pkg source.Package
-}
-
-type methodSetKey struct {
- typ types.Type
- addressable bool
-}
-
-type completionContext struct {
- // triggerCharacter is the character used to trigger completion at current
- // position, if any.
- triggerCharacter string
-
- // triggerKind is information about how a completion was triggered.
- triggerKind protocol.CompletionTriggerKind
-
- // commentCompletion is true if we are completing a comment.
- commentCompletion bool
-
- // packageCompletion is true if we are completing a package name.
- packageCompletion bool
-}
-
-// A Selection represents the cursor position and surrounding identifier.
-type Selection struct {
- content string
- cursor token.Pos
- source.MappedRange
-}
-
-func (p Selection) Content() string {
- return p.content
-}
-
-func (p Selection) Start() token.Pos {
- return p.MappedRange.SpanRange().Start
-}
-
-func (p Selection) End() token.Pos {
- return p.MappedRange.SpanRange().End
-}
-
-func (p Selection) Prefix() string {
- return p.content[:p.cursor-p.SpanRange().Start]
-}
-
-func (p Selection) Suffix() string {
- return p.content[p.cursor-p.SpanRange().Start:]
-}
-
-func (c *completer) setSurrounding(ident *ast.Ident) {
- if c.surrounding != nil {
- return
- }
- if !(ident.Pos() <= c.pos && c.pos <= ident.End()) {
- return
- }
-
- c.surrounding = &Selection{
- content: ident.Name,
- cursor: c.pos,
- // Overwrite the prefix only.
- MappedRange: source.NewMappedRange(c.snapshot.FileSet(), c.mapper, ident.Pos(), ident.End()),
- }
-
- c.setMatcherFromPrefix(c.surrounding.Prefix())
-}
-
-func (c *completer) setMatcherFromPrefix(prefix string) {
- switch c.opts.matcher {
- case source.Fuzzy:
- c.matcher = fuzzy.NewMatcher(prefix)
- case source.CaseSensitive:
- c.matcher = prefixMatcher(prefix)
- default:
- c.matcher = insensitivePrefixMatcher(strings.ToLower(prefix))
- }
-}
-
-func (c *completer) getSurrounding() *Selection {
- if c.surrounding == nil {
- c.surrounding = &Selection{
- content: "",
- cursor: c.pos,
- MappedRange: source.NewMappedRange(c.snapshot.FileSet(), c.mapper, c.pos, c.pos),
- }
- }
- return c.surrounding
-}
-
-// candidate represents a completion candidate.
-type candidate struct {
- // obj is the types.Object to complete to.
- obj types.Object
-
- // score is used to rank candidates.
- score float64
-
- // name is the deep object name path, e.g. "foo.bar"
- name string
-
- // detail is additional information about this item. If not specified,
- // defaults to type string for the object.
- detail string
-
- // path holds the path from the search root (excluding the candidate
- // itself) for a deep candidate.
- path []types.Object
-
- // pathInvokeMask is a bit mask tracking whether each entry in path
- // should be formatted with "()" (i.e. whether it is a function
- // invocation).
- pathInvokeMask uint16
-
- // mods contains modifications that should be applied to the
- // candidate when inserted. For example, "foo" may be insterted as
- // "*foo" or "foo()".
- mods []typeModKind
-
- // addressable is true if a pointer can be taken to the candidate.
- addressable bool
-
- // convertTo is a type that this candidate should be cast to. For
- // example, if convertTo is float64, "foo" should be formatted as
- // "float64(foo)".
- convertTo types.Type
-
- // imp is the import that needs to be added to this package in order
- // for this candidate to be valid. nil if no import needed.
- imp *importInfo
-}
-
-func (c candidate) hasMod(mod typeModKind) bool {
- for _, m := range c.mods {
- if m == mod {
- return true
- }
- }
- return false
-}
-
-// ErrIsDefinition is an error that informs the user they got no
-// completions because they tried to complete the name of a new object
-// being defined.
-type ErrIsDefinition struct {
- objStr string
-}
-
-func (e ErrIsDefinition) Error() string {
- msg := "this is a definition"
- if e.objStr != "" {
- msg += " of " + e.objStr
- }
- return msg
-}
-
-// Completion returns a list of possible candidates for completion, given a
-// a file and a position.
-//
-// The selection is computed based on the preceding identifier and can be used by
-// the client to score the quality of the completion. For instance, some clients
-// may tolerate imperfect matches as valid completion results, since users may make typos.
-func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, protoPos protocol.Position, protoContext protocol.CompletionContext) ([]CompletionItem, *Selection, error) {
- ctx, done := event.Start(ctx, "completion.Completion")
- defer done()
-
- startTime := time.Now()
-
- pkg, pgf, err := source.GetParsedFile(ctx, snapshot, fh, source.NarrowestPackage)
- if err != nil || pgf.File.Package == token.NoPos {
- // If we can't parse this file or find position for the package
- // keyword, it may be missing a package declaration. Try offering
- // suggestions for the package declaration.
- // Note that this would be the case even if the keyword 'package' is
- // present but no package name exists.
- items, surrounding, innerErr := packageClauseCompletions(ctx, snapshot, fh, protoPos)
- if innerErr != nil {
- // return the error for GetParsedFile since it's more relevant in this situation.
- return nil, nil, errors.Errorf("getting file for Completion: %w (package completions: %v)", err, innerErr)
- }
- return items, surrounding, nil
- }
- spn, err := pgf.Mapper.PointSpan(protoPos)
- if err != nil {
- return nil, nil, err
- }
- rng, err := spn.Range(pgf.Mapper.Converter)
- if err != nil {
- return nil, nil, err
- }
- // Completion is based on what precedes the cursor.
- // Find the path to the position before pos.
- path, _ := astutil.PathEnclosingInterval(pgf.File, rng.Start-1, rng.Start-1)
- if path == nil {
- return nil, nil, errors.Errorf("cannot find node enclosing position")
- }
-
- pos := rng.Start
-
- // Check if completion at this position is valid. If not, return early.
- switch n := path[0].(type) {
- case *ast.BasicLit:
- // Skip completion inside literals except for ImportSpec
- if len(path) > 1 {
- if _, ok := path[1].(*ast.ImportSpec); ok {
- break
- }
- }
- return nil, nil, nil
- case *ast.CallExpr:
- if n.Ellipsis.IsValid() && pos > n.Ellipsis && pos <= n.Ellipsis+token.Pos(len("...")) {
- // Don't offer completions inside or directly after "...". For
- // example, don't offer completions at "<>" in "foo(bar...<>").
- return nil, nil, nil
- }
- case *ast.Ident:
- // reject defining identifiers
- if obj, ok := pkg.GetTypesInfo().Defs[n]; ok {
- if v, ok := obj.(*types.Var); ok && v.IsField() && v.Embedded() {
- // An anonymous field is also a reference to a type.
- } else if pgf.File.Name == n {
- // Don't skip completions if Ident is for package name.
- break
- } else {
- objStr := ""
- if obj != nil {
- qual := types.RelativeTo(pkg.GetTypes())
- objStr = types.ObjectString(obj, qual)
- }
- ans, sel := definition(path, obj, snapshot.FileSet(), pgf.Mapper, fh)
- if ans != nil {
- sort.Slice(ans, func(i, j int) bool {
- return ans[i].Score > ans[j].Score
- })
- return ans, sel, nil
- }
- return nil, nil, ErrIsDefinition{objStr: objStr}
- }
- }
- }
-
- opts := snapshot.View().Options()
- c := &completer{
- pkg: pkg,
- snapshot: snapshot,
- qf: source.Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo()),
- completionContext: completionContext{
- triggerCharacter: protoContext.TriggerCharacter,
- triggerKind: protoContext.TriggerKind,
- },
- fh: fh,
- filename: fh.URI().Filename(),
- file: pgf.File,
- path: path,
- pos: pos,
- seen: make(map[types.Object]bool),
- enclosingFunc: enclosingFunction(path, pkg.GetTypesInfo()),
- enclosingCompositeLiteral: enclosingCompositeLiteral(path, rng.Start, pkg.GetTypesInfo()),
- deepState: deepCompletionState{
- enabled: opts.DeepCompletion,
- },
- opts: &completionOptions{
- matcher: opts.Matcher,
- unimported: opts.CompleteUnimported,
- documentation: opts.CompletionDocumentation && opts.HoverKind != source.NoDocumentation,
- fullDocumentation: opts.HoverKind == source.FullDocumentation,
- placeholders: opts.UsePlaceholders,
- literal: opts.LiteralCompletions && opts.InsertTextFormat == protocol.SnippetTextFormat,
- budget: opts.CompletionBudget,
- snippets: opts.InsertTextFormat == protocol.SnippetTextFormat,
- postfix: opts.ExperimentalPostfixCompletions,
- },
- // default to a matcher that always matches
- matcher: prefixMatcher(""),
- methodSetCache: make(map[methodSetKey]*types.MethodSet),
- mapper: pgf.Mapper,
- startTime: startTime,
- }
-
- var cancel context.CancelFunc
- if c.opts.budget == 0 {
- ctx, cancel = context.WithCancel(ctx)
- } else {
- // timeoutDuration is the completion budget remaining. If less than
- // 10ms, set to 10ms
- timeoutDuration := time.Until(c.startTime.Add(c.opts.budget))
- if timeoutDuration < 10*time.Millisecond {
- timeoutDuration = 10 * time.Millisecond
- }
- ctx, cancel = context.WithTimeout(ctx, timeoutDuration)
- }
- defer cancel()
-
- if surrounding := c.containingIdent(pgf.Src); surrounding != nil {
- c.setSurrounding(surrounding)
- }
-
- c.inference = expectedCandidate(ctx, c)
-
- err = c.collectCompletions(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- // Deep search collected candidates and their members for more candidates.
- c.deepSearch(ctx)
-
- for _, callback := range c.completionCallbacks {
- if err := c.snapshot.RunProcessEnvFunc(ctx, callback); err != nil {
- return nil, nil, err
- }
- }
-
- // Search candidates populated by expensive operations like
- // unimportedMembers etc. for more completion items.
- c.deepSearch(ctx)
-
- // Statement candidates offer an entire statement in certain contexts, as
- // opposed to a single object. Add statement candidates last because they
- // depend on other candidates having already been collected.
- c.addStatementCandidates()
-
- c.sortItems()
- return c.items, c.getSurrounding(), nil
-}
-
-// collectCompletions adds possible completion candidates to either the deep
-// search queue or completion items directly for different completion contexts.
-func (c *completer) collectCompletions(ctx context.Context) error {
- // Inside import blocks, return completions for unimported packages.
- for _, importSpec := range c.file.Imports {
- if !(importSpec.Path.Pos() <= c.pos && c.pos <= importSpec.Path.End()) {
- continue
- }
- return c.populateImportCompletions(ctx, importSpec)
- }
-
- // Inside comments, offer completions for the name of the relevant symbol.
- for _, comment := range c.file.Comments {
- if comment.Pos() < c.pos && c.pos <= comment.End() {
- c.populateCommentCompletions(ctx, comment)
- return nil
- }
- }
-
- // Struct literals are handled entirely separately.
- if c.wantStructFieldCompletions() {
- // If we are definitely completing a struct field name, deep completions
- // don't make sense.
- if c.enclosingCompositeLiteral.inKey {
- c.deepState.enabled = false
- }
- return c.structLiteralFieldName(ctx)
- }
-
- if lt := c.wantLabelCompletion(); lt != labelNone {
- c.labels(lt)
- return nil
- }
-
- if c.emptySwitchStmt() {
- // Empty switch statements only admit "default" and "case" keywords.
- c.addKeywordItems(map[string]bool{}, highScore, CASE, DEFAULT)
- return nil
- }
-
- switch n := c.path[0].(type) {
- case *ast.Ident:
- if c.file.Name == n {
- return c.packageNameCompletions(ctx, c.fh.URI(), n)
- } else if sel, ok := c.path[1].(*ast.SelectorExpr); ok && sel.Sel == n {
- // Is this the Sel part of a selector?
- return c.selector(ctx, sel)
- }
- return c.lexical(ctx)
- // The function name hasn't been typed yet, but the parens are there:
- // recv.‸(arg)
- case *ast.TypeAssertExpr:
- // Create a fake selector expression.
- return c.selector(ctx, &ast.SelectorExpr{X: n.X})
- case *ast.SelectorExpr:
- return c.selector(ctx, n)
- // At the file scope, only keywords are allowed.
- case *ast.BadDecl, *ast.File:
- c.addKeywordCompletions()
- default:
- // fallback to lexical completions
- return c.lexical(ctx)
- }
-
- return nil
-}
-
-// containingIdent returns the *ast.Ident containing pos, if any. It
-// synthesizes an *ast.Ident to allow completion in the face of
-// certain syntax errors.
-func (c *completer) containingIdent(src []byte) *ast.Ident {
- // In the normal case, our leaf AST node is the identifer being completed.
- if ident, ok := c.path[0].(*ast.Ident); ok {
- return ident
- }
-
- pos, tkn, lit := c.scanToken(src)
- if !pos.IsValid() {
- return nil
- }
-
- fakeIdent := &ast.Ident{Name: lit, NamePos: pos}
-
- if _, isBadDecl := c.path[0].(*ast.BadDecl); isBadDecl {
- // You don't get *ast.Idents at the file level, so look for bad
- // decls and use the manually extracted token.
- return fakeIdent
- } else if c.emptySwitchStmt() {
- // Only keywords are allowed in empty switch statements.
- // *ast.Idents are not parsed, so we must use the manually
- // extracted token.
- return fakeIdent
- } else if tkn.IsKeyword() {
- // Otherwise, manually extract the prefix if our containing token
- // is a keyword. This improves completion after an "accidental
- // keyword", e.g. completing to "variance" in "someFunc(var<>)".
- return fakeIdent
- }
-
- return nil
-}
-
-// scanToken scans pgh's contents for the token containing pos.
-func (c *completer) scanToken(contents []byte) (token.Pos, token.Token, string) {
- tok := c.snapshot.FileSet().File(c.pos)
-
- var s scanner.Scanner
- s.Init(tok, contents, nil, 0)
- for {
- tknPos, tkn, lit := s.Scan()
- if tkn == token.EOF || tknPos >= c.pos {
- return token.NoPos, token.ILLEGAL, ""
- }
-
- if len(lit) > 0 && tknPos <= c.pos && c.pos <= tknPos+token.Pos(len(lit)) {
- return tknPos, tkn, lit
- }
- }
-}
-
-func (c *completer) sortItems() {
- sort.SliceStable(c.items, func(i, j int) bool {
- // Sort by score first.
- if c.items[i].Score != c.items[j].Score {
- return c.items[i].Score > c.items[j].Score
- }
-
- // Then sort by label so order stays consistent. This also has the
- // effect of preferring shorter candidates.
- return c.items[i].Label < c.items[j].Label
- })
-}
-
-// emptySwitchStmt reports whether pos is in an empty switch or select
-// statement.
-func (c *completer) emptySwitchStmt() bool {
- block, ok := c.path[0].(*ast.BlockStmt)
- if !ok || len(block.List) > 0 || len(c.path) == 1 {
- return false
- }
-
- switch c.path[1].(type) {
- case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt:
- return true
- default:
- return false
- }
-}
-
-// populateImportCompletions yields completions for an import path around the cursor.
-//
-// Completions are suggested at the directory depth of the given import path so
-// that we don't overwhelm the user with a large list of possibilities. As an
-// example, a completion for the prefix "golang" results in "golang.org/".
-// Completions for "golang.org/" yield its subdirectories
-// (i.e. "golang.org/x/"). The user is meant to accept completion suggestions
-// until they reach a complete import path.
-func (c *completer) populateImportCompletions(ctx context.Context, searchImport *ast.ImportSpec) error {
- if !strings.HasPrefix(searchImport.Path.Value, `"`) {
- return nil
- }
-
- // deepSearch is not valuable for import completions.
- c.deepState.enabled = false
-
- importPath := searchImport.Path.Value
-
- // Extract the text between the quotes (if any) in an import spec.
- // prefix is the part of import path before the cursor.
- prefixEnd := c.pos - searchImport.Path.Pos()
- prefix := strings.Trim(importPath[:prefixEnd], `"`)
-
- // The number of directories in the import path gives us the depth at
- // which to search.
- depth := len(strings.Split(prefix, "/")) - 1
-
- content := importPath
- start, end := searchImport.Path.Pos(), searchImport.Path.End()
- namePrefix, nameSuffix := `"`, `"`
- // If a starting quote is present, adjust surrounding to either after the
- // cursor or after the first slash (/), except if cursor is at the starting
- // quote. Otherwise we provide a completion including the starting quote.
- if strings.HasPrefix(importPath, `"`) && c.pos > searchImport.Path.Pos() {
- content = content[1:]
- start++
- if depth > 0 {
- // Adjust textEdit start to replacement range. For ex: if current
- // path was "golang.or/x/to<>ols/internal/", where <> is the cursor
- // position, start of the replacement range would be after
- // "golang.org/x/".
- path := strings.SplitAfter(prefix, "/")
- numChars := len(strings.Join(path[:len(path)-1], ""))
- content = content[numChars:]
- start += token.Pos(numChars)
- }
- namePrefix = ""
- }
-
- // We won't provide an ending quote if one is already present, except if
- // cursor is after the ending quote but still in import spec. This is
- // because cursor has to be in our textEdit range.
- if strings.HasSuffix(importPath, `"`) && c.pos < searchImport.Path.End() {
- end--
- content = content[:len(content)-1]
- nameSuffix = ""
- }
-
- c.surrounding = &Selection{
- content: content,
- cursor: c.pos,
- MappedRange: source.NewMappedRange(c.snapshot.FileSet(), c.mapper, start, end),
- }
-
- seenImports := make(map[string]struct{})
- for _, importSpec := range c.file.Imports {
- if importSpec.Path.Value == importPath {
- continue
- }
- seenImportPath, err := strconv.Unquote(importSpec.Path.Value)
- if err != nil {
- return err
- }
- seenImports[seenImportPath] = struct{}{}
- }
-
- var mu sync.Mutex // guard c.items locally, since searchImports is called in parallel
- seen := make(map[string]struct{})
- searchImports := func(pkg imports.ImportFix) {
- path := pkg.StmtInfo.ImportPath
- if _, ok := seenImports[path]; ok {
- return
- }
-
- // Any package path containing fewer directories than the search
- // prefix is not a match.
- pkgDirList := strings.Split(path, "/")
- if len(pkgDirList) < depth+1 {
- return
- }
- pkgToConsider := strings.Join(pkgDirList[:depth+1], "/")
-
- name := pkgDirList[depth]
- // if we're adding an opening quote to completion too, set name to full
- // package path since we'll need to overwrite that range.
- if namePrefix == `"` {
- name = pkgToConsider
- }
-
- score := pkg.Relevance
- if len(pkgDirList)-1 == depth {
- score *= highScore
- } else {
- // For incomplete package paths, add a terminal slash to indicate that the
- // user should keep triggering completions.
- name += "/"
- pkgToConsider += "/"
- }
-
- if _, ok := seen[pkgToConsider]; ok {
- return
- }
- seen[pkgToConsider] = struct{}{}
-
- mu.Lock()
- defer mu.Unlock()
-
- name = namePrefix + name + nameSuffix
- obj := types.NewPkgName(0, nil, name, types.NewPackage(pkgToConsider, name))
- c.deepState.enqueue(candidate{
- obj: obj,
- detail: fmt.Sprintf("%q", pkgToConsider),
- score: score,
- })
- }
-
- c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error {
- return imports.GetImportPaths(ctx, searchImports, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env)
- })
- return nil
-}
-
-// populateCommentCompletions yields completions for comments preceding or in declarations.
-func (c *completer) populateCommentCompletions(ctx context.Context, comment *ast.CommentGroup) {
- // If the completion was triggered by a period, ignore it. These types of
- // completions will not be useful in comments.
- if c.completionContext.triggerCharacter == "." {
- return
- }
-
- // Using the comment position find the line after
- file := c.snapshot.FileSet().File(comment.End())
- if file == nil {
- return
- }
-
- // Deep completion doesn't work properly in comments since we don't
- // have a type object to complete further.
- c.deepState.enabled = false
- c.completionContext.commentCompletion = true
-
- // Documentation isn't useful in comments, since it might end up being the
- // comment itself.
- c.opts.documentation = false
-
- commentLine := file.Line(comment.End())
-
- // comment is valid, set surrounding as word boundaries around cursor
- c.setSurroundingForComment(comment)
-
- // Using the next line pos, grab and parse the exported symbol on that line
- for _, n := range c.file.Decls {
- declLine := file.Line(n.Pos())
- // if the comment is not in, directly above or on the same line as a declaration
- if declLine != commentLine && declLine != commentLine+1 &&
- !(n.Pos() <= comment.Pos() && comment.End() <= n.End()) {
- continue
- }
- switch node := n.(type) {
- // handle const, vars, and types
- case *ast.GenDecl:
- for _, spec := range node.Specs {
- switch spec := spec.(type) {
- case *ast.ValueSpec:
- for _, name := range spec.Names {
- if name.String() == "_" {
- continue
- }
- obj := c.pkg.GetTypesInfo().ObjectOf(name)
- c.deepState.enqueue(candidate{obj: obj, score: stdScore})
- }
- case *ast.TypeSpec:
- // add TypeSpec fields to completion
- switch typeNode := spec.Type.(type) {
- case *ast.StructType:
- c.addFieldItems(ctx, typeNode.Fields)
- case *ast.FuncType:
- c.addFieldItems(ctx, typeNode.Params)
- c.addFieldItems(ctx, typeNode.Results)
- case *ast.InterfaceType:
- c.addFieldItems(ctx, typeNode.Methods)
- }
-
- if spec.Name.String() == "_" {
- continue
- }
-
- obj := c.pkg.GetTypesInfo().ObjectOf(spec.Name)
- // Type name should get a higher score than fields but not highScore by default
- // since field near a comment cursor gets a highScore
- score := stdScore * 1.1
- // If type declaration is on the line after comment, give it a highScore.
- if declLine == commentLine+1 {
- score = highScore
- }
-
- c.deepState.enqueue(candidate{obj: obj, score: score})
- }
- }
- // handle functions
- case *ast.FuncDecl:
- c.addFieldItems(ctx, node.Recv)
- c.addFieldItems(ctx, node.Type.Params)
- c.addFieldItems(ctx, node.Type.Results)
-
- // collect receiver struct fields
- if node.Recv != nil {
- for _, fields := range node.Recv.List {
- for _, name := range fields.Names {
- obj := c.pkg.GetTypesInfo().ObjectOf(name)
- if obj == nil {
- continue
- }
-
- recvType := obj.Type().Underlying()
- if ptr, ok := recvType.(*types.Pointer); ok {
- recvType = ptr.Elem()
- }
- recvStruct, ok := recvType.Underlying().(*types.Struct)
- if !ok {
- continue
- }
- for i := 0; i < recvStruct.NumFields(); i++ {
- field := recvStruct.Field(i)
- c.deepState.enqueue(candidate{obj: field, score: lowScore})
- }
- }
- }
- }
-
- if node.Name.String() == "_" {
- continue
- }
-
- obj := c.pkg.GetTypesInfo().ObjectOf(node.Name)
- if obj == nil || obj.Pkg() != nil && obj.Pkg() != c.pkg.GetTypes() {
- continue
- }
-
- c.deepState.enqueue(candidate{obj: obj, score: highScore})
- }
- }
-}
-
-// sets word boundaries surrounding a cursor for a comment
-func (c *completer) setSurroundingForComment(comments *ast.CommentGroup) {
- var cursorComment *ast.Comment
- for _, comment := range comments.List {
- if c.pos >= comment.Pos() && c.pos <= comment.End() {
- cursorComment = comment
- break
- }
- }
- // if cursor isn't in the comment
- if cursorComment == nil {
- return
- }
-
- // index of cursor in comment text
- cursorOffset := int(c.pos - cursorComment.Pos())
- start, end := cursorOffset, cursorOffset
- for start > 0 && isValidIdentifierChar(cursorComment.Text[start-1]) {
- start--
- }
- for end < len(cursorComment.Text) && isValidIdentifierChar(cursorComment.Text[end]) {
- end++
- }
-
- c.surrounding = &Selection{
- content: cursorComment.Text[start:end],
- cursor: c.pos,
- MappedRange: source.NewMappedRange(c.snapshot.FileSet(), c.mapper,
- token.Pos(int(cursorComment.Slash)+start), token.Pos(int(cursorComment.Slash)+end)),
- }
- c.setMatcherFromPrefix(c.surrounding.Prefix())
-}
-
-// isValidIdentifierChar returns true if a byte is a valid go identifier
-// character, i.e. unicode letter or digit or underscore.
-func isValidIdentifierChar(char byte) bool {
- charRune := rune(char)
- return unicode.In(charRune, unicode.Letter, unicode.Digit) || char == '_'
-}
-
-// adds struct fields, interface methods, function declaration fields to completion
-func (c *completer) addFieldItems(ctx context.Context, fields *ast.FieldList) {
- if fields == nil {
- return
- }
-
- cursor := c.surrounding.cursor
- for _, field := range fields.List {
- for _, name := range field.Names {
- if name.String() == "_" {
- continue
- }
- obj := c.pkg.GetTypesInfo().ObjectOf(name)
- if obj == nil {
- continue
- }
-
- // if we're in a field comment/doc, score that field as more relevant
- score := stdScore
- if field.Comment != nil && field.Comment.Pos() <= cursor && cursor <= field.Comment.End() {
- score = highScore
- } else if field.Doc != nil && field.Doc.Pos() <= cursor && cursor <= field.Doc.End() {
- score = highScore
- }
-
- c.deepState.enqueue(candidate{obj: obj, score: score})
- }
- }
-}
-
-func (c *completer) wantStructFieldCompletions() bool {
- clInfo := c.enclosingCompositeLiteral
- if clInfo == nil {
- return false
- }
-
- return clInfo.isStruct() && (clInfo.inKey || clInfo.maybeInFieldName)
-}
-
-func (c *completer) wantTypeName() bool {
- return !c.completionContext.commentCompletion && c.inference.typeName.wantTypeName
-}
-
-// See https://golang.org/issue/36001. Unimported completions are expensive.
-const (
- maxUnimportedPackageNames = 5
- unimportedMemberTarget = 100
-)
-
-// selector finds completions for the specified selector expression.
-func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error {
- c.inference.objChain = objChain(c.pkg.GetTypesInfo(), sel.X)
-
- // Is sel a qualified identifier?
- if id, ok := sel.X.(*ast.Ident); ok {
- if pkgName, ok := c.pkg.GetTypesInfo().Uses[id].(*types.PkgName); ok {
- var pkg source.Package
- for _, imp := range c.pkg.Imports() {
- if imp.PkgPath() == pkgName.Imported().Path() {
- pkg = imp
- }
- }
- // If the package is not imported, try searching for unimported
- // completions.
- if pkg == nil && c.opts.unimported {
- if err := c.unimportedMembers(ctx, id); err != nil {
- return err
- }
- }
- c.packageMembers(pkgName.Imported(), stdScore, nil, func(cand candidate) {
- c.deepState.enqueue(cand)
- })
- return nil
- }
- }
-
- // Invariant: sel is a true selector.
- tv, ok := c.pkg.GetTypesInfo().Types[sel.X]
- if ok {
- c.methodsAndFields(tv.Type, tv.Addressable(), nil, func(cand candidate) {
- c.deepState.enqueue(cand)
- })
-
- c.addPostfixSnippetCandidates(ctx, sel)
-
- return nil
- }
-
- // Try unimported packages.
- if id, ok := sel.X.(*ast.Ident); ok && c.opts.unimported {
- if err := c.unimportedMembers(ctx, id); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (c *completer) unimportedMembers(ctx context.Context, id *ast.Ident) error {
- // Try loaded packages first. They're relevant, fast, and fully typed.
- known, err := c.snapshot.CachedImportPaths(ctx)
- if err != nil {
- return err
- }
-
- var paths []string
- for path, pkg := range known {
- if pkg.GetTypes().Name() != id.Name {
- continue
- }
- paths = append(paths, path)
- }
-
- var relevances map[string]float64
- if len(paths) != 0 {
- if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error {
- var err error
- relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths)
- return err
- }); err != nil {
- return err
- }
- }
- sort.Slice(paths, func(i, j int) bool {
- return relevances[paths[i]] > relevances[paths[j]]
- })
-
- for _, path := range paths {
- pkg := known[path]
- if pkg.GetTypes().Name() != id.Name {
- continue
- }
- imp := &importInfo{
- importPath: path,
- pkg: pkg,
- }
- if imports.ImportPathToAssumedName(path) != pkg.GetTypes().Name() {
- imp.name = pkg.GetTypes().Name()
- }
- c.packageMembers(pkg.GetTypes(), unimportedScore(relevances[path]), imp, func(cand candidate) {
- c.deepState.enqueue(cand)
- })
- if len(c.items) >= unimportedMemberTarget {
- return nil
- }
- }
-
- ctx, cancel := context.WithCancel(ctx)
-
- var mu sync.Mutex
- add := func(pkgExport imports.PackageExport) {
- mu.Lock()
- defer mu.Unlock()
- if _, ok := known[pkgExport.Fix.StmtInfo.ImportPath]; ok {
- return // We got this one above.
- }
-
- // Continue with untyped proposals.
- pkg := types.NewPackage(pkgExport.Fix.StmtInfo.ImportPath, pkgExport.Fix.IdentName)
- for _, export := range pkgExport.Exports {
- score := unimportedScore(pkgExport.Fix.Relevance)
- c.deepState.enqueue(candidate{
- obj: types.NewVar(0, pkg, export, nil),
- score: score,
- imp: &importInfo{
- importPath: pkgExport.Fix.StmtInfo.ImportPath,
- name: pkgExport.Fix.StmtInfo.Name,
- },
- })
- }
- if len(c.items) >= unimportedMemberTarget {
- cancel()
- }
- }
-
- c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error {
- defer cancel()
- return imports.GetPackageExports(ctx, add, id.Name, c.filename, c.pkg.GetTypes().Name(), opts.Env)
- })
- return nil
-}
-
-// unimportedScore returns a score for an unimported package that is generally
-// lower than other candidates.
-func unimportedScore(relevance float64) float64 {
- return (stdScore + .1*relevance) / 2
-}
-
-func (c *completer) packageMembers(pkg *types.Package, score float64, imp *importInfo, cb func(candidate)) {
- scope := pkg.Scope()
- for _, name := range scope.Names() {
- obj := scope.Lookup(name)
- cb(candidate{
- obj: obj,
- score: score,
- imp: imp,
- addressable: isVar(obj),
- })
- }
-}
-
-func (c *completer) methodsAndFields(typ types.Type, addressable bool, imp *importInfo, cb func(candidate)) {
- mset := c.methodSetCache[methodSetKey{typ, addressable}]
- if mset == nil {
- if addressable && !types.IsInterface(typ) && !isPointer(typ) {
- // Add methods of *T, which includes methods with receiver T.
- mset = types.NewMethodSet(types.NewPointer(typ))
- } else {
- // Add methods of T.
- mset = types.NewMethodSet(typ)
- }
- c.methodSetCache[methodSetKey{typ, addressable}] = mset
- }
-
- if typ.String() == "*testing.F" && addressable {
- // is that a sufficient test? (or is more care needed?)
- if c.fuzz(typ, mset, imp, cb, c.snapshot.FileSet()) {
- return
- }
- }
-
- for i := 0; i < mset.Len(); i++ {
- cb(candidate{
- obj: mset.At(i).Obj(),
- score: stdScore,
- imp: imp,
- addressable: addressable || isPointer(typ),
- })
- }
-
- // Add fields of T.
- eachField(typ, func(v *types.Var) {
- cb(candidate{
- obj: v,
- score: stdScore - 0.01,
- imp: imp,
- addressable: addressable || isPointer(typ),
- })
- })
-}
-
-// lexical finds completions in the lexical environment.
-func (c *completer) lexical(ctx context.Context) error {
- scopes := source.CollectScopes(c.pkg.GetTypesInfo(), c.path, c.pos)
- scopes = append(scopes, c.pkg.GetTypes().Scope(), types.Universe)
-
- var (
- builtinIota = types.Universe.Lookup("iota")
- builtinNil = types.Universe.Lookup("nil")
- // comparable is an interface that exists on the dev.typeparams Go branch.
- // Filter it out from completion results to stabilize tests.
- // TODO(rFindley) update (or remove) our handling for comparable once the
- // type parameter API has stabilized.
- builtinAny = types.Universe.Lookup("any")
- builtinComparable = types.Universe.Lookup("comparable")
- )
-
- // Track seen variables to avoid showing completions for shadowed variables.
- // This works since we look at scopes from innermost to outermost.
- seen := make(map[string]struct{})
-
- // Process scopes innermost first.
- for i, scope := range scopes {
- if scope == nil {
- continue
- }
-
- Names:
- for _, name := range scope.Names() {
- declScope, obj := scope.LookupParent(name, c.pos)
- if declScope != scope {
- continue // Name was declared in some enclosing scope, or not at all.
- }
- if obj == builtinComparable || obj == builtinAny {
- continue
- }
-
- // If obj's type is invalid, find the AST node that defines the lexical block
- // containing the declaration of obj. Don't resolve types for packages.
- if !isPkgName(obj) && !typeIsValid(obj.Type()) {
- // Match the scope to its ast.Node. If the scope is the package scope,
- // use the *ast.File as the starting node.
- var node ast.Node
- if i < len(c.path) {
- node = c.path[i]
- } else if i == len(c.path) { // use the *ast.File for package scope
- node = c.path[i-1]
- }
- if node != nil {
- if resolved := resolveInvalid(c.snapshot.FileSet(), obj, node, c.pkg.GetTypesInfo()); resolved != nil {
- obj = resolved
- }
- }
- }
-
- // Don't use LHS of decl in RHS.
- for _, ident := range enclosingDeclLHS(c.path) {
- if obj.Pos() == ident.Pos() {
- continue Names
- }
- }
-
- // Don't suggest "iota" outside of const decls.
- if obj == builtinIota && !c.inConstDecl() {
- continue
- }
-
- // Rank outer scopes lower than inner.
- score := stdScore * math.Pow(.99, float64(i))
-
- // Dowrank "nil" a bit so it is ranked below more interesting candidates.
- if obj == builtinNil {
- score /= 2
- }
-
- // If we haven't already added a candidate for an object with this name.
- if _, ok := seen[obj.Name()]; !ok {
- seen[obj.Name()] = struct{}{}
- c.deepState.enqueue(candidate{
- obj: obj,
- score: score,
- addressable: isVar(obj),
- })
- }
- }
- }
-
- if c.inference.objType != nil {
- if named, _ := source.Deref(c.inference.objType).(*types.Named); named != nil {
- // If we expected a named type, check the type's package for
- // completion items. This is useful when the current file hasn't
- // imported the type's package yet.
-
- if named.Obj() != nil && named.Obj().Pkg() != nil {
- pkg := named.Obj().Pkg()
-
- // Make sure the package name isn't already in use by another
- // object, and that this file doesn't import the package yet.
- if _, ok := seen[pkg.Name()]; !ok && pkg != c.pkg.GetTypes() && !alreadyImports(c.file, pkg.Path()) {
- seen[pkg.Name()] = struct{}{}
- obj := types.NewPkgName(0, nil, pkg.Name(), pkg)
- imp := &importInfo{
- importPath: pkg.Path(),
- }
- if imports.ImportPathToAssumedName(pkg.Path()) != pkg.Name() {
- imp.name = pkg.Name()
- }
- c.deepState.enqueue(candidate{
- obj: obj,
- score: stdScore,
- imp: imp,
- })
- }
- }
- }
- }
-
- if c.opts.unimported {
- if err := c.unimportedPackages(ctx, seen); err != nil {
- return err
- }
- }
-
- if c.inference.typeName.isTypeParam {
- // If we are completing a type param, offer each structural type.
- // This ensures we suggest "[]int" and "[]float64" for a constraint
- // with type union "[]int | []float64".
- if t, _ := c.inference.objType.(*types.Interface); t != nil {
- terms, _ := typeparams.InterfaceTermSet(t)
- for _, term := range terms {
- c.injectType(ctx, term.Type())
- }
- }
- } else {
- c.injectType(ctx, c.inference.objType)
- }
-
- // Add keyword completion items appropriate in the current context.
- c.addKeywordCompletions()
-
- return nil
-}
-
-// injectInferredType manufacters candidates based on the given type.
-// For example, if the type is "[]int", this method makes sure you get
-// candidates "[]int{}" and "[]int" (the latter applies when
-// completing a type name).
-func (c *completer) injectType(ctx context.Context, t types.Type) {
- if t == nil {
- return
- }
-
- t = source.Deref(t)
-
- // If we have an expected type and it is _not_ a named type,
- // handle it specially. Non-named types like "[]int" will never be
- // considered via a lexical search, so we need to directly inject
- // them.
- if _, named := t.(*types.Named); !named {
- // If our expected type is "[]int", this will add a literal
- // candidate of "[]int{}".
- c.literal(ctx, t, nil)
-
- if _, isBasic := t.(*types.Basic); !isBasic {
- // If we expect a non-basic type name (e.g. "[]int"), hack up
- // a named type whose name is literally "[]int". This allows
- // us to reuse our object based completion machinery.
- fakeNamedType := candidate{
- obj: types.NewTypeName(token.NoPos, nil, types.TypeString(t, c.qf), t),
- score: stdScore,
- }
- // Make sure the type name matches before considering
- // candidate. This cuts down on useless candidates.
- if c.matchingTypeName(&fakeNamedType) {
- c.deepState.enqueue(fakeNamedType)
- }
- }
- }
-}
-
-func (c *completer) unimportedPackages(ctx context.Context, seen map[string]struct{}) error {
- var prefix string
- if c.surrounding != nil {
- prefix = c.surrounding.Prefix()
- }
-
- // Don't suggest unimported packages if we have absolutely nothing
- // to go on.
- if prefix == "" {
- return nil
- }
-
- count := 0
-
- known, err := c.snapshot.CachedImportPaths(ctx)
- if err != nil {
- return err
- }
- var paths []string
- for path, pkg := range known {
- if !strings.HasPrefix(pkg.GetTypes().Name(), prefix) {
- continue
- }
- paths = append(paths, path)
- }
-
- var relevances map[string]float64
- if len(paths) != 0 {
- if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error {
- var err error
- relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths)
- return err
- }); err != nil {
- return err
- }
- }
-
- sort.Slice(paths, func(i, j int) bool {
- if relevances[paths[i]] != relevances[paths[j]] {
- return relevances[paths[i]] > relevances[paths[j]]
- }
-
- // Fall back to lexical sort to keep truncated set of candidates
- // in a consistent order.
- return paths[i] < paths[j]
- })
-
- for _, path := range paths {
- pkg := known[path]
- if _, ok := seen[pkg.GetTypes().Name()]; ok {
- continue
- }
- imp := &importInfo{
- importPath: path,
- pkg: pkg,
- }
- if imports.ImportPathToAssumedName(path) != pkg.GetTypes().Name() {
- imp.name = pkg.GetTypes().Name()
- }
- if count >= maxUnimportedPackageNames {
- return nil
- }
- c.deepState.enqueue(candidate{
- // Pass an empty *types.Package to disable deep completions.
- obj: types.NewPkgName(0, nil, pkg.GetTypes().Name(), types.NewPackage(path, pkg.Name())),
- score: unimportedScore(relevances[path]),
- imp: imp,
- })
- count++
- }
-
- ctx, cancel := context.WithCancel(ctx)
-
- var mu sync.Mutex
- add := func(pkg imports.ImportFix) {
- mu.Lock()
- defer mu.Unlock()
- if _, ok := seen[pkg.IdentName]; ok {
- return
- }
- if _, ok := relevances[pkg.StmtInfo.ImportPath]; ok {
- return
- }
-
- if count >= maxUnimportedPackageNames {
- cancel()
- return
- }
-
- // Do not add the unimported packages to seen, since we can have
- // multiple packages of the same name as completion suggestions, since
- // only one will be chosen.
- obj := types.NewPkgName(0, nil, pkg.IdentName, types.NewPackage(pkg.StmtInfo.ImportPath, pkg.IdentName))
- c.deepState.enqueue(candidate{
- obj: obj,
- score: unimportedScore(pkg.Relevance),
- imp: &importInfo{
- importPath: pkg.StmtInfo.ImportPath,
- name: pkg.StmtInfo.Name,
- },
- })
- count++
- }
- c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error {
- defer cancel()
- return imports.GetAllCandidates(ctx, add, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env)
- })
- return nil
-}
-
-// alreadyImports reports whether f has an import with the specified path.
-func alreadyImports(f *ast.File, path string) bool {
- for _, s := range f.Imports {
- if source.ImportPath(s) == path {
- return true
- }
- }
- return false
-}
-
-func (c *completer) inConstDecl() bool {
- for _, n := range c.path {
- if decl, ok := n.(*ast.GenDecl); ok && decl.Tok == token.CONST {
- return true
- }
- }
- return false
-}
-
-// structLiteralFieldName finds completions for struct field names inside a struct literal.
-func (c *completer) structLiteralFieldName(ctx context.Context) error {
- clInfo := c.enclosingCompositeLiteral
-
- // Mark fields of the composite literal that have already been set,
- // except for the current field.
- addedFields := make(map[*types.Var]bool)
- for _, el := range clInfo.cl.Elts {
- if kvExpr, ok := el.(*ast.KeyValueExpr); ok {
- if clInfo.kv == kvExpr {
- continue
- }
-
- if key, ok := kvExpr.Key.(*ast.Ident); ok {
- if used, ok := c.pkg.GetTypesInfo().Uses[key]; ok {
- if usedVar, ok := used.(*types.Var); ok {
- addedFields[usedVar] = true
- }
- }
- }
- }
- }
-
- deltaScore := 0.0001
- switch t := clInfo.clType.(type) {
- case *types.Struct:
- for i := 0; i < t.NumFields(); i++ {
- field := t.Field(i)
- if !addedFields[field] {
- c.deepState.enqueue(candidate{
- obj: field,
- score: highScore - float64(i)*deltaScore,
- })
- }
- }
-
- // Add lexical completions if we aren't certain we are in the key part of a
- // key-value pair.
- if clInfo.maybeInFieldName {
- return c.lexical(ctx)
- }
- default:
- return c.lexical(ctx)
- }
-
- return nil
-}
-
-func (cl *compLitInfo) isStruct() bool {
- _, ok := cl.clType.(*types.Struct)
- return ok
-}
-
-// enclosingCompositeLiteral returns information about the composite literal enclosing the
-// position.
-func enclosingCompositeLiteral(path []ast.Node, pos token.Pos, info *types.Info) *compLitInfo {
- for _, n := range path {
- switch n := n.(type) {
- case *ast.CompositeLit:
- // The enclosing node will be a composite literal if the user has just
- // opened the curly brace (e.g. &x{<>) or the completion request is triggered
- // from an already completed composite literal expression (e.g. &x{foo: 1, <>})
- //
- // The position is not part of the composite literal unless it falls within the
- // curly braces (e.g. "foo.Foo<>Struct{}").
- if !(n.Lbrace < pos && pos <= n.Rbrace) {
- // Keep searching since we may yet be inside a composite literal.
- // For example "Foo{B: Ba<>{}}".
- break
- }
-
- tv, ok := info.Types[n]
- if !ok {
- return nil
- }
-
- clInfo := compLitInfo{
- cl: n,
- clType: source.Deref(tv.Type).Underlying(),
- }
-
- var (
- expr ast.Expr
- hasKeys bool
- )
- for _, el := range n.Elts {
- // Remember the expression that the position falls in, if any.
- if el.Pos() <= pos && pos <= el.End() {
- expr = el
- }
-
- if kv, ok := el.(*ast.KeyValueExpr); ok {
- hasKeys = true
- // If expr == el then we know the position falls in this expression,
- // so also record kv as the enclosing *ast.KeyValueExpr.
- if expr == el {
- clInfo.kv = kv
- break
- }
- }
- }
-
- if clInfo.kv != nil {
- // If in a *ast.KeyValueExpr, we know we are in the key if the position
- // is to the left of the colon (e.g. "Foo{F<>: V}".
- clInfo.inKey = pos <= clInfo.kv.Colon
- } else if hasKeys {
- // If we aren't in a *ast.KeyValueExpr but the composite literal has
- // other *ast.KeyValueExprs, we must be on the key side of a new
- // *ast.KeyValueExpr (e.g. "Foo{F: V, <>}").
- clInfo.inKey = true
- } else {
- switch clInfo.clType.(type) {
- case *types.Struct:
- if len(n.Elts) == 0 {
- // If the struct literal is empty, next could be a struct field
- // name or an expression (e.g. "Foo{<>}" could become "Foo{F:}"
- // or "Foo{someVar}").
- clInfo.maybeInFieldName = true
- } else if len(n.Elts) == 1 {
- // If there is one expression and the position is in that expression
- // and the expression is an identifier, we may be writing a field
- // name or an expression (e.g. "Foo{F<>}").
- _, clInfo.maybeInFieldName = expr.(*ast.Ident)
- }
- case *types.Map:
- // If we aren't in a *ast.KeyValueExpr we must be adding a new key
- // to the map.
- clInfo.inKey = true
- }
- }
-
- return &clInfo
- default:
- if breaksExpectedTypeInference(n, pos) {
- return nil
- }
- }
- }
-
- return nil
-}
-
-// enclosingFunction returns the signature and body of the function
-// enclosing the given position.
-func enclosingFunction(path []ast.Node, info *types.Info) *funcInfo {
- for _, node := range path {
- switch t := node.(type) {
- case *ast.FuncDecl:
- if obj, ok := info.Defs[t.Name]; ok {
- return &funcInfo{
- sig: obj.Type().(*types.Signature),
- body: t.Body,
- }
- }
- case *ast.FuncLit:
- if typ, ok := info.Types[t]; ok {
- if sig, _ := typ.Type.(*types.Signature); sig == nil {
- // golang/go#49397: it should not be possible, but we somehow arrived
- // here with a non-signature type, most likely due to AST mangling
- // such that node.Type is not a FuncType.
- return nil
- }
- return &funcInfo{
- sig: typ.Type.(*types.Signature),
- body: t.Body,
- }
- }
- }
- }
- return nil
-}
-
-func (c *completer) expectedCompositeLiteralType() types.Type {
- clInfo := c.enclosingCompositeLiteral
- switch t := clInfo.clType.(type) {
- case *types.Slice:
- if clInfo.inKey {
- return types.Typ[types.UntypedInt]
- }
- return t.Elem()
- case *types.Array:
- if clInfo.inKey {
- return types.Typ[types.UntypedInt]
- }
- return t.Elem()
- case *types.Map:
- if clInfo.inKey {
- return t.Key()
- }
- return t.Elem()
- case *types.Struct:
- // If we are completing a key (i.e. field name), there is no expected type.
- if clInfo.inKey {
- return nil
- }
-
- // If we are in a key-value pair, but not in the key, then we must be on the
- // value side. The expected type of the value will be determined from the key.
- if clInfo.kv != nil {
- if key, ok := clInfo.kv.Key.(*ast.Ident); ok {
- for i := 0; i < t.NumFields(); i++ {
- if field := t.Field(i); field.Name() == key.Name {
- return field.Type()
- }
- }
- }
- } else {
- // If we aren't in a key-value pair and aren't in the key, we must be using
- // implicit field names.
-
- // The order of the literal fields must match the order in the struct definition.
- // Find the element that the position belongs to and suggest that field's type.
- if i := exprAtPos(c.pos, clInfo.cl.Elts); i < t.NumFields() {
- return t.Field(i).Type()
- }
- }
- }
- return nil
-}
-
-// typeMod represents an operator that changes the expected type.
-type typeMod struct {
- mod typeModKind
- arrayLen int64
-}
-
-type typeModKind int
-
-const (
- dereference typeModKind = iota // pointer indirection: "*"
- reference // adds level of pointer: "&" for values, "*" for type names
- chanRead // channel read operator: "<-"
- sliceType // make a slice type: "[]" in "[]int"
- arrayType // make an array type: "[2]" in "[2]int"
- invoke // make a function call: "()" in "foo()"
- takeSlice // take slice of array: "[:]" in "foo[:]"
- takeDotDotDot // turn slice into variadic args: "..." in "foo..."
- index // index into slice/array: "[0]" in "foo[0]"
-)
-
-type objKind int
-
-const (
- kindAny objKind = 0
- kindArray objKind = 1 << iota
- kindSlice
- kindChan
- kindMap
- kindStruct
- kindString
- kindInt
- kindBool
- kindBytes
- kindPtr
- kindFloat
- kindComplex
- kindError
- kindStringer
- kindFunc
-)
-
-// penalizedObj represents an object that should be disfavored as a
-// completion candidate.
-type penalizedObj struct {
- // objChain is the full "chain", e.g. "foo.bar().baz" becomes
- // []types.Object{foo, bar, baz}.
- objChain []types.Object
- // penalty is score penalty in the range (0, 1).
- penalty float64
-}
-
-// candidateInference holds information we have inferred about a type that can be
-// used at the current position.
-type candidateInference struct {
- // objType is the desired type of an object used at the query position.
- objType types.Type
-
- // objKind is a mask of expected kinds of types such as "map", "slice", etc.
- objKind objKind
-
- // variadic is true if we are completing the initial variadic
- // parameter. For example:
- // append([]T{}, <>) // objType=T variadic=true
- // append([]T{}, T{}, <>) // objType=T variadic=false
- variadic bool
-
- // modifiers are prefixes such as "*", "&" or "<-" that influence how
- // a candidate type relates to the expected type.
- modifiers []typeMod
-
- // convertibleTo is a type our candidate type must be convertible to.
- convertibleTo types.Type
-
- // typeName holds information about the expected type name at
- // position, if any.
- typeName typeNameInference
-
- // assignees are the types that would receive a function call's
- // results at the position. For example:
- //
- // foo := 123
- // foo, bar := <>
- //
- // at "<>", the assignees are [int, <invalid>].
- assignees []types.Type
-
- // variadicAssignees is true if we could be completing an inner
- // function call that fills out an outer function call's variadic
- // params. For example:
- //
- // func foo(int, ...string) {}
- //
- // foo(<>) // variadicAssignees=true
- // foo(bar<>) // variadicAssignees=true
- // foo(bar, baz<>) // variadicAssignees=false
- variadicAssignees bool
-
- // penalized holds expressions that should be disfavored as
- // candidates. For example, it tracks expressions already used in a
- // switch statement's other cases. Each expression is tracked using
- // its entire object "chain" allowing differentiation between
- // "a.foo" and "b.foo" when "a" and "b" are the same type.
- penalized []penalizedObj
-
- // objChain contains the chain of objects representing the
- // surrounding *ast.SelectorExpr. For example, if we are completing
- // "foo.bar.ba<>", objChain will contain []types.Object{foo, bar}.
- objChain []types.Object
-}
-
-// typeNameInference holds information about the expected type name at
-// position.
-type typeNameInference struct {
- // wantTypeName is true if we expect the name of a type.
- wantTypeName bool
-
- // modifiers are prefixes such as "*", "&" or "<-" that influence how
- // a candidate type relates to the expected type.
- modifiers []typeMod
-
- // assertableFrom is a type that must be assertable to our candidate type.
- assertableFrom types.Type
-
- // wantComparable is true if we want a comparable type.
- wantComparable bool
-
- // seenTypeSwitchCases tracks types that have already been used by
- // the containing type switch.
- seenTypeSwitchCases []types.Type
-
- // compLitType is true if we are completing a composite literal type
- // name, e.g "foo<>{}".
- compLitType bool
-
- // isTypeParam is true if we are completing a type instantiation parameter
- isTypeParam bool
-}
-
-// expectedCandidate returns information about the expected candidate
-// for an expression at the query position.
-func expectedCandidate(ctx context.Context, c *completer) (inf candidateInference) {
- inf.typeName = expectTypeName(c)
-
- if c.enclosingCompositeLiteral != nil {
- inf.objType = c.expectedCompositeLiteralType()
- }
-
-Nodes:
- for i, node := range c.path {
- switch node := node.(type) {
- case *ast.BinaryExpr:
- // Determine if query position comes from left or right of op.
- e := node.X
- if c.pos < node.OpPos {
- e = node.Y
- }
- if tv, ok := c.pkg.GetTypesInfo().Types[e]; ok {
- switch node.Op {
- case token.LAND, token.LOR:
- // Don't infer "bool" type for "&&" or "||". Often you want
- // to compose a boolean expression from non-boolean
- // candidates.
- default:
- inf.objType = tv.Type
- }
- break Nodes
- }
- case *ast.AssignStmt:
- // Only rank completions if you are on the right side of the token.
- if c.pos > node.TokPos {
- i := exprAtPos(c.pos, node.Rhs)
- if i >= len(node.Lhs) {
- i = len(node.Lhs) - 1
- }
- if tv, ok := c.pkg.GetTypesInfo().Types[node.Lhs[i]]; ok {
- inf.objType = tv.Type
- }
-
- // If we have a single expression on the RHS, record the LHS
- // assignees so we can favor multi-return function calls with
- // matching result values.
- if len(node.Rhs) <= 1 {
- for _, lhs := range node.Lhs {
- inf.assignees = append(inf.assignees, c.pkg.GetTypesInfo().TypeOf(lhs))
- }
- } else {
- // Otherwse, record our single assignee, even if its type is
- // not available. We use this info to downrank functions
- // with the wrong number of result values.
- inf.assignees = append(inf.assignees, c.pkg.GetTypesInfo().TypeOf(node.Lhs[i]))
- }
- }
- return inf
- case *ast.ValueSpec:
- if node.Type != nil && c.pos > node.Type.End() {
- inf.objType = c.pkg.GetTypesInfo().TypeOf(node.Type)
- }
- return inf
- case *ast.CallExpr:
- // Only consider CallExpr args if position falls between parens.
- if node.Lparen < c.pos && c.pos <= node.Rparen {
- // For type conversions like "int64(foo)" we can only infer our
- // desired type is convertible to int64.
- if typ := typeConversion(node, c.pkg.GetTypesInfo()); typ != nil {
- inf.convertibleTo = typ
- break Nodes
- }
-
- if tv, ok := c.pkg.GetTypesInfo().Types[node.Fun]; ok {
- if sig, ok := tv.Type.(*types.Signature); ok {
- numParams := sig.Params().Len()
- if numParams == 0 {
- return inf
- }
-
- exprIdx := exprAtPos(c.pos, node.Args)
-
- // If we have one or zero arg expressions, we may be
- // completing to a function call that returns multiple
- // values, in turn getting passed in to the surrounding
- // call. Record the assignees so we can favor function
- // calls that return matching values.
- if len(node.Args) <= 1 && exprIdx == 0 {
- for i := 0; i < sig.Params().Len(); i++ {
- inf.assignees = append(inf.assignees, sig.Params().At(i).Type())
- }
-
- // Record that we may be completing into variadic parameters.
- inf.variadicAssignees = sig.Variadic()
- }
-
- // Make sure not to run past the end of expected parameters.
- if exprIdx >= numParams {
- inf.objType = sig.Params().At(numParams - 1).Type()
- } else {
- inf.objType = sig.Params().At(exprIdx).Type()
- }
-
- if sig.Variadic() && exprIdx >= (numParams-1) {
- // If we are completing a variadic param, deslice the variadic type.
- inf.objType = deslice(inf.objType)
- // Record whether we are completing the initial variadic param.
- inf.variadic = exprIdx == numParams-1 && len(node.Args) <= numParams
-
- // Check if we can infer object kind from printf verb.
- inf.objKind |= printfArgKind(c.pkg.GetTypesInfo(), node, exprIdx)
- }
- }
- }
-
- if funIdent, ok := node.Fun.(*ast.Ident); ok {
- obj := c.pkg.GetTypesInfo().ObjectOf(funIdent)
-
- if obj != nil && obj.Parent() == types.Universe {
- // Defer call to builtinArgType so we can provide it the
- // inferred type from its parent node.
- defer func() {
- inf = c.builtinArgType(obj, node, inf)
- inf.objKind = c.builtinArgKind(ctx, obj, node)
- }()
-
- // The expected type of builtin arguments like append() is
- // the expected type of the builtin call itself. For
- // example:
- //
- // var foo []int = append(<>)
- //
- // To find the expected type at <> we "skip" the append()
- // node and get the expected type one level up, which is
- // []int.
- continue Nodes
- }
- }
-
- return inf
- }
- case *ast.ReturnStmt:
- if c.enclosingFunc != nil {
- sig := c.enclosingFunc.sig
- // Find signature result that corresponds to our return statement.
- if resultIdx := exprAtPos(c.pos, node.Results); resultIdx < len(node.Results) {
- if resultIdx < sig.Results().Len() {
- inf.objType = sig.Results().At(resultIdx).Type()
- }
- }
- }
- return inf
- case *ast.CaseClause:
- if swtch, ok := findSwitchStmt(c.path[i+1:], c.pos, node).(*ast.SwitchStmt); ok {
- if tv, ok := c.pkg.GetTypesInfo().Types[swtch.Tag]; ok {
- inf.objType = tv.Type
-
- // Record which objects have already been used in the case
- // statements so we don't suggest them again.
- for _, cc := range swtch.Body.List {
- for _, caseExpr := range cc.(*ast.CaseClause).List {
- // Don't record the expression we are currently completing.
- if caseExpr.Pos() < c.pos && c.pos <= caseExpr.End() {
- continue
- }
-
- if objs := objChain(c.pkg.GetTypesInfo(), caseExpr); len(objs) > 0 {
- inf.penalized = append(inf.penalized, penalizedObj{objChain: objs, penalty: 0.1})
- }
- }
- }
- }
- }
- return inf
- case *ast.SliceExpr:
- // Make sure position falls within the brackets (e.g. "foo[a:<>]").
- if node.Lbrack < c.pos && c.pos <= node.Rbrack {
- inf.objType = types.Typ[types.UntypedInt]
- }
- return inf
- case *ast.IndexExpr:
- // Make sure position falls within the brackets (e.g. "foo[<>]").
- if node.Lbrack < c.pos && c.pos <= node.Rbrack {
- if tv, ok := c.pkg.GetTypesInfo().Types[node.X]; ok {
- switch t := tv.Type.Underlying().(type) {
- case *types.Map:
- inf.objType = t.Key()
- case *types.Slice, *types.Array:
- inf.objType = types.Typ[types.UntypedInt]
- }
-
- if ct := expectedConstraint(tv.Type, 0); ct != nil {
- inf.objType = ct
- inf.typeName.wantTypeName = true
- inf.typeName.isTypeParam = true
- }
- }
- }
- return inf
- case *typeparams.IndexListExpr:
- if node.Lbrack < c.pos && c.pos <= node.Rbrack {
- if tv, ok := c.pkg.GetTypesInfo().Types[node.X]; ok {
- if ct := expectedConstraint(tv.Type, exprAtPos(c.pos, node.Indices)); ct != nil {
- inf.objType = ct
- inf.typeName.wantTypeName = true
- inf.typeName.isTypeParam = true
- }
- }
- }
- return inf
- case *ast.SendStmt:
- // Make sure we are on right side of arrow (e.g. "foo <- <>").
- if c.pos > node.Arrow+1 {
- if tv, ok := c.pkg.GetTypesInfo().Types[node.Chan]; ok {
- if ch, ok := tv.Type.Underlying().(*types.Chan); ok {
- inf.objType = ch.Elem()
- }
- }
- }
- return inf
- case *ast.RangeStmt:
- if source.NodeContains(node.X, c.pos) {
- inf.objKind |= kindSlice | kindArray | kindMap | kindString
- if node.Value == nil {
- inf.objKind |= kindChan
- }
- }
- return inf
- case *ast.StarExpr:
- inf.modifiers = append(inf.modifiers, typeMod{mod: dereference})
- case *ast.UnaryExpr:
- switch node.Op {
- case token.AND:
- inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
- case token.ARROW:
- inf.modifiers = append(inf.modifiers, typeMod{mod: chanRead})
- }
- case *ast.DeferStmt, *ast.GoStmt:
- inf.objKind |= kindFunc
- return inf
- default:
- if breaksExpectedTypeInference(node, c.pos) {
- return inf
- }
- }
- }
-
- return inf
-}
-
-func expectedConstraint(t types.Type, idx int) types.Type {
- var tp *typeparams.TypeParamList
- if named, _ := t.(*types.Named); named != nil {
- tp = typeparams.ForNamed(named)
- } else if sig, _ := t.Underlying().(*types.Signature); sig != nil {
- tp = typeparams.ForSignature(sig)
- }
- if tp == nil || idx >= tp.Len() {
- return nil
- }
- return tp.At(idx).Constraint()
-}
-
-// objChain decomposes e into a chain of objects if possible. For
-// example, "foo.bar().baz" will yield []types.Object{foo, bar, baz}.
-// If any part can't be turned into an object, return nil.
-func objChain(info *types.Info, e ast.Expr) []types.Object {
- var objs []types.Object
-
- for e != nil {
- switch n := e.(type) {
- case *ast.Ident:
- obj := info.ObjectOf(n)
- if obj == nil {
- return nil
- }
- objs = append(objs, obj)
- e = nil
- case *ast.SelectorExpr:
- obj := info.ObjectOf(n.Sel)
- if obj == nil {
- return nil
- }
- objs = append(objs, obj)
- e = n.X
- case *ast.CallExpr:
- if len(n.Args) > 0 {
- return nil
- }
- e = n.Fun
- default:
- return nil
- }
- }
-
- // Reverse order so the layout matches the syntactic order.
- for i := 0; i < len(objs)/2; i++ {
- objs[i], objs[len(objs)-1-i] = objs[len(objs)-1-i], objs[i]
- }
-
- return objs
-}
-
-// applyTypeModifiers applies the list of type modifiers to a type.
-// It returns nil if the modifiers could not be applied.
-func (ci candidateInference) applyTypeModifiers(typ types.Type, addressable bool) types.Type {
- for _, mod := range ci.modifiers {
- switch mod.mod {
- case dereference:
- // For every "*" indirection operator, remove a pointer layer
- // from candidate type.
- if ptr, ok := typ.Underlying().(*types.Pointer); ok {
- typ = ptr.Elem()
- } else {
- return nil
- }
- case reference:
- // For every "&" address operator, add another pointer layer to
- // candidate type, if the candidate is addressable.
- if addressable {
- typ = types.NewPointer(typ)
- } else {
- return nil
- }
- case chanRead:
- // For every "<-" operator, remove a layer of channelness.
- if ch, ok := typ.(*types.Chan); ok {
- typ = ch.Elem()
- } else {
- return nil
- }
- }
- }
-
- return typ
-}
-
-// applyTypeNameModifiers applies the list of type modifiers to a type name.
-func (ci candidateInference) applyTypeNameModifiers(typ types.Type) types.Type {
- for _, mod := range ci.typeName.modifiers {
- switch mod.mod {
- case reference:
- typ = types.NewPointer(typ)
- case arrayType:
- typ = types.NewArray(typ, mod.arrayLen)
- case sliceType:
- typ = types.NewSlice(typ)
- }
- }
- return typ
-}
-
-// matchesVariadic returns true if we are completing a variadic
-// parameter and candType is a compatible slice type.
-func (ci candidateInference) matchesVariadic(candType types.Type) bool {
- return ci.variadic && ci.objType != nil && types.AssignableTo(candType, types.NewSlice(ci.objType))
-}
-
-// findSwitchStmt returns an *ast.CaseClause's corresponding *ast.SwitchStmt or
-// *ast.TypeSwitchStmt. path should start from the case clause's first ancestor.
-func findSwitchStmt(path []ast.Node, pos token.Pos, c *ast.CaseClause) ast.Stmt {
- // Make sure position falls within a "case <>:" clause.
- if exprAtPos(pos, c.List) >= len(c.List) {
- return nil
- }
- // A case clause is always nested within a block statement in a switch statement.
- if len(path) < 2 {
- return nil
- }
- if _, ok := path[0].(*ast.BlockStmt); !ok {
- return nil
- }
- switch s := path[1].(type) {
- case *ast.SwitchStmt:
- return s
- case *ast.TypeSwitchStmt:
- return s
- default:
- return nil
- }
-}
-
-// breaksExpectedTypeInference reports if an expression node's type is unrelated
-// to its child expression node types. For example, "Foo{Bar: x.Baz(<>)}" should
-// expect a function argument, not a composite literal value.
-func breaksExpectedTypeInference(n ast.Node, pos token.Pos) bool {
- switch n := n.(type) {
- case *ast.CompositeLit:
- // Doesn't break inference if pos is in type name.
- // For example: "Foo<>{Bar: 123}"
- return !source.NodeContains(n.Type, pos)
- case *ast.CallExpr:
- // Doesn't break inference if pos is in func name.
- // For example: "Foo<>(123)"
- return !source.NodeContains(n.Fun, pos)
- case *ast.FuncLit, *ast.IndexExpr, *ast.SliceExpr:
- return true
- default:
- return false
- }
-}
-
-// expectTypeName returns information about the expected type name at position.
-func expectTypeName(c *completer) typeNameInference {
- var inf typeNameInference
-
-Nodes:
- for i, p := range c.path {
- switch n := p.(type) {
- case *ast.FieldList:
- // Expect a type name if pos is in a FieldList. This applies to
- // FuncType params/results, FuncDecl receiver, StructType, and
- // InterfaceType. We don't need to worry about the field name
- // because completion bails out early if pos is in an *ast.Ident
- // that defines an object.
- inf.wantTypeName = true
- break Nodes
- case *ast.CaseClause:
- // Expect type names in type switch case clauses.
- if swtch, ok := findSwitchStmt(c.path[i+1:], c.pos, n).(*ast.TypeSwitchStmt); ok {
- // The case clause types must be assertable from the type switch parameter.
- ast.Inspect(swtch.Assign, func(n ast.Node) bool {
- if ta, ok := n.(*ast.TypeAssertExpr); ok {
- inf.assertableFrom = c.pkg.GetTypesInfo().TypeOf(ta.X)
- return false
- }
- return true
- })
- inf.wantTypeName = true
-
- // Track the types that have already been used in this
- // switch's case statements so we don't recommend them.
- for _, e := range swtch.Body.List {
- for _, typeExpr := range e.(*ast.CaseClause).List {
- // Skip if type expression contains pos. We don't want to
- // count it as already used if the user is completing it.
- if typeExpr.Pos() < c.pos && c.pos <= typeExpr.End() {
- continue
- }
-
- if t := c.pkg.GetTypesInfo().TypeOf(typeExpr); t != nil {
- inf.seenTypeSwitchCases = append(inf.seenTypeSwitchCases, t)
- }
- }
- }
-
- break Nodes
- }
- return typeNameInference{}
- case *ast.TypeAssertExpr:
- // Expect type names in type assert expressions.
- if n.Lparen < c.pos && c.pos <= n.Rparen {
- // The type in parens must be assertable from the expression type.
- inf.assertableFrom = c.pkg.GetTypesInfo().TypeOf(n.X)
- inf.wantTypeName = true
- break Nodes
- }
- return typeNameInference{}
- case *ast.StarExpr:
- inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
- case *ast.CompositeLit:
- // We want a type name if position is in the "Type" part of a
- // composite literal (e.g. "Foo<>{}").
- if n.Type != nil && n.Type.Pos() <= c.pos && c.pos <= n.Type.End() {
- inf.wantTypeName = true
- inf.compLitType = true
-
- if i < len(c.path)-1 {
- // Track preceding "&" operator. Technically it applies to
- // the composite literal and not the type name, but if
- // affects our type completion nonetheless.
- if u, ok := c.path[i+1].(*ast.UnaryExpr); ok && u.Op == token.AND {
- inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
- }
- }
- }
- break Nodes
- case *ast.ArrayType:
- // If we are inside the "Elt" part of an array type, we want a type name.
- if n.Elt.Pos() <= c.pos && c.pos <= n.Elt.End() {
- inf.wantTypeName = true
- if n.Len == nil {
- // No "Len" expression means a slice type.
- inf.modifiers = append(inf.modifiers, typeMod{mod: sliceType})
- } else {
- // Try to get the array type using the constant value of "Len".
- tv, ok := c.pkg.GetTypesInfo().Types[n.Len]
- if ok && tv.Value != nil && tv.Value.Kind() == constant.Int {
- if arrayLen, ok := constant.Int64Val(tv.Value); ok {
- inf.modifiers = append(inf.modifiers, typeMod{mod: arrayType, arrayLen: arrayLen})
- }
- }
- }
-
- // ArrayTypes can be nested, so keep going if our parent is an
- // ArrayType.
- if i < len(c.path)-1 {
- if _, ok := c.path[i+1].(*ast.ArrayType); ok {
- continue Nodes
- }
- }
-
- break Nodes
- }
- case *ast.MapType:
- inf.wantTypeName = true
- if n.Key != nil {
- inf.wantComparable = source.NodeContains(n.Key, c.pos)
- } else {
- // If the key is empty, assume we are completing the key if
- // pos is directly after the "map[".
- inf.wantComparable = c.pos == n.Pos()+token.Pos(len("map["))
- }
- break Nodes
- case *ast.ValueSpec:
- inf.wantTypeName = source.NodeContains(n.Type, c.pos)
- break Nodes
- case *ast.TypeSpec:
- inf.wantTypeName = source.NodeContains(n.Type, c.pos)
- default:
- if breaksExpectedTypeInference(p, c.pos) {
- return typeNameInference{}
- }
- }
- }
-
- return inf
-}
-
-func (c *completer) fakeObj(T types.Type) *types.Var {
- return types.NewVar(token.NoPos, c.pkg.GetTypes(), "", T)
-}
-
-// derivableTypes iterates types you can derive from t. For example,
-// from "foo" we might derive "&foo", and "foo()".
-func derivableTypes(t types.Type, addressable bool, f func(t types.Type, addressable bool, mod typeModKind) bool) bool {
- switch t := t.Underlying().(type) {
- case *types.Signature:
- // If t is a func type with a single result, offer the result type.
- if t.Results().Len() == 1 && f(t.Results().At(0).Type(), false, invoke) {
- return true
- }
- case *types.Array:
- if f(t.Elem(), true, index) {
- return true
- }
- // Try converting array to slice.
- if f(types.NewSlice(t.Elem()), false, takeSlice) {
- return true
- }
- case *types.Pointer:
- if f(t.Elem(), false, dereference) {
- return true
- }
- case *types.Slice:
- if f(t.Elem(), true, index) {
- return true
- }
- case *types.Map:
- if f(t.Elem(), false, index) {
- return true
- }
- case *types.Chan:
- if f(t.Elem(), false, chanRead) {
- return true
- }
- }
-
- // Check if c is addressable and a pointer to c matches our type inference.
- if addressable && f(types.NewPointer(t), false, reference) {
- return true
- }
-
- return false
-}
-
-// anyCandType reports whether f returns true for any candidate type
-// derivable from c. It searches up to three levels of type
-// modification. For example, given "foo" we could discover "***foo"
-// or "*foo()".
-func (c *candidate) anyCandType(f func(t types.Type, addressable bool) bool) bool {
- if c.obj == nil || c.obj.Type() == nil {
- return false
- }
-
- const maxDepth = 3
-
- var searchTypes func(t types.Type, addressable bool, mods []typeModKind) bool
- searchTypes = func(t types.Type, addressable bool, mods []typeModKind) bool {
- if f(t, addressable) {
- if len(mods) > 0 {
- newMods := make([]typeModKind, len(mods)+len(c.mods))
- copy(newMods, mods)
- copy(newMods[len(mods):], c.mods)
- c.mods = newMods
- }
- return true
- }
-
- if len(mods) == maxDepth {
- return false
- }
-
- return derivableTypes(t, addressable, func(t types.Type, addressable bool, mod typeModKind) bool {
- return searchTypes(t, addressable, append(mods, mod))
- })
- }
-
- return searchTypes(c.obj.Type(), c.addressable, make([]typeModKind, 0, maxDepth))
-}
-
-// matchingCandidate reports whether cand matches our type inferences.
-// It mutates cand's score in certain cases.
-func (c *completer) matchingCandidate(cand *candidate) bool {
- if c.completionContext.commentCompletion {
- return false
- }
-
- // Bail out early if we are completing a field name in a composite literal.
- if v, ok := cand.obj.(*types.Var); ok && v.IsField() && c.wantStructFieldCompletions() {
- return true
- }
-
- if isTypeName(cand.obj) {
- return c.matchingTypeName(cand)
- } else if c.wantTypeName() {
- // If we want a type, a non-type object never matches.
- return false
- }
-
- if c.inference.candTypeMatches(cand) {
- return true
- }
-
- candType := cand.obj.Type()
- if candType == nil {
- return false
- }
-
- if sig, ok := candType.Underlying().(*types.Signature); ok {
- if c.inference.assigneesMatch(cand, sig) {
- // Invoke the candidate if its results are multi-assignable.
- cand.mods = append(cand.mods, invoke)
- return true
- }
- }
-
- // Default to invoking *types.Func candidates. This is so function
- // completions in an empty statement (or other cases with no expected type)
- // are invoked by default.
- if isFunc(cand.obj) {
- cand.mods = append(cand.mods, invoke)
- }
-
- return false
-}
-
-// candTypeMatches reports whether cand makes a good completion
-// candidate given the candidate inference. cand's score may be
-// mutated to downrank the candidate in certain situations.
-func (ci *candidateInference) candTypeMatches(cand *candidate) bool {
- var (
- expTypes = make([]types.Type, 0, 2)
- variadicType types.Type
- )
- if ci.objType != nil {
- expTypes = append(expTypes, ci.objType)
-
- if ci.variadic {
- variadicType = types.NewSlice(ci.objType)
- expTypes = append(expTypes, variadicType)
- }
- }
-
- return cand.anyCandType(func(candType types.Type, addressable bool) bool {
- // Take into account any type modifiers on the expected type.
- candType = ci.applyTypeModifiers(candType, addressable)
- if candType == nil {
- return false
- }
-
- if ci.convertibleTo != nil && types.ConvertibleTo(candType, ci.convertibleTo) {
- return true
- }
-
- for _, expType := range expTypes {
- if isEmptyInterface(expType) {
- continue
- }
-
- matches := ci.typeMatches(expType, candType)
- if !matches {
- // If candType doesn't otherwise match, consider if we can
- // convert candType directly to expType.
- if considerTypeConversion(candType, expType, cand.path) {
- cand.convertTo = expType
- // Give a major score penalty so we always prefer directly
- // assignable candidates, all else equal.
- cand.score *= 0.5
- return true
- }
-
- continue
- }
-
- if expType == variadicType {
- cand.mods = append(cand.mods, takeDotDotDot)
- }
-
- // Lower candidate score for untyped conversions. This avoids
- // ranking untyped constants above candidates with an exact type
- // match. Don't lower score of builtin constants, e.g. "true".
- if isUntyped(candType) && !types.Identical(candType, expType) && cand.obj.Parent() != types.Universe {
- // Bigger penalty for deep completions into other packages to
- // avoid random constants from other packages popping up all
- // the time.
- if len(cand.path) > 0 && isPkgName(cand.path[0]) {
- cand.score *= 0.5
- } else {
- cand.score *= 0.75
- }
- }
-
- return true
- }
-
- // If we don't have a specific expected type, fall back to coarser
- // object kind checks.
- if ci.objType == nil || isEmptyInterface(ci.objType) {
- // If we were able to apply type modifiers to our candidate type,
- // count that as a match. For example:
- //
- // var foo chan int
- // <-fo<>
- //
- // We were able to apply the "<-" type modifier to "foo", so "foo"
- // matches.
- if len(ci.modifiers) > 0 {
- return true
- }
-
- // If we didn't have an exact type match, check if our object kind
- // matches.
- if ci.kindMatches(candType) {
- if ci.objKind == kindFunc {
- cand.mods = append(cand.mods, invoke)
- }
- return true
- }
- }
-
- return false
- })
-}
-
-// considerTypeConversion returns true if we should offer a completion
-// automatically converting "from" to "to".
-func considerTypeConversion(from, to types.Type, path []types.Object) bool {
- // Don't offer to convert deep completions from other packages.
- // Otherwise there are many random package level consts/vars that
- // pop up as candidates all the time.
- if len(path) > 0 && isPkgName(path[0]) {
- return false
- }
-
- if _, ok := from.(*typeparams.TypeParam); ok {
- return false
- }
-
- if !types.ConvertibleTo(from, to) {
- return false
- }
-
- // Don't offer to convert ints to strings since that probably
- // doesn't do what the user wants.
- if isBasicKind(from, types.IsInteger) && isBasicKind(to, types.IsString) {
- return false
- }
-
- return true
-}
-
-// typeMatches reports whether an object of candType makes a good
-// completion candidate given the expected type expType.
-func (ci *candidateInference) typeMatches(expType, candType types.Type) bool {
- // Handle untyped values specially since AssignableTo gives false negatives
- // for them (see https://golang.org/issue/32146).
- if candBasic, ok := candType.Underlying().(*types.Basic); ok {
- if expBasic, ok := expType.Underlying().(*types.Basic); ok {
- // Note that the candidate and/or the expected can be untyped.
- // In "fo<> == 100" the expected type is untyped, and the
- // candidate could also be an untyped constant.
-
- // Sort by is_untyped and then by is_int to simplify below logic.
- a, b := candBasic.Info(), expBasic.Info()
- if a&types.IsUntyped == 0 || (b&types.IsInteger > 0 && b&types.IsUntyped > 0) {
- a, b = b, a
- }
-
- // If at least one is untyped...
- if a&types.IsUntyped > 0 {
- switch {
- // Untyped integers are compatible with floats.
- case a&types.IsInteger > 0 && b&types.IsFloat > 0:
- return true
-
- // Check if their constant kind (bool|int|float|complex|string) matches.
- // This doesn't take into account the constant value, so there will be some
- // false positives due to integer sign and overflow.
- case a&types.IsConstType == b&types.IsConstType:
- return true
- }
- }
- }
- }
-
- // AssignableTo covers the case where the types are equal, but also handles
- // cases like assigning a concrete type to an interface type.
- return types.AssignableTo(candType, expType)
-}
-
-// kindMatches reports whether candType's kind matches our expected
-// kind (e.g. slice, map, etc.).
-func (ci *candidateInference) kindMatches(candType types.Type) bool {
- return ci.objKind > 0 && ci.objKind&candKind(candType) > 0
-}
-
-// assigneesMatch reports whether an invocation of sig matches the
-// number and type of any assignees.
-func (ci *candidateInference) assigneesMatch(cand *candidate, sig *types.Signature) bool {
- if len(ci.assignees) == 0 {
- return false
- }
-
- // Uniresult functions are always usable and are handled by the
- // normal, non-assignees type matching logic.
- if sig.Results().Len() == 1 {
- return false
- }
-
- // Don't prefer completing into func(...interface{}) calls since all
- // functions wouuld match.
- if ci.variadicAssignees && len(ci.assignees) == 1 && isEmptyInterface(deslice(ci.assignees[0])) {
- return false
- }
-
- var numberOfResultsCouldMatch bool
- if ci.variadicAssignees {
- numberOfResultsCouldMatch = sig.Results().Len() >= len(ci.assignees)-1
- } else {
- numberOfResultsCouldMatch = sig.Results().Len() == len(ci.assignees)
- }
-
- // If our signature doesn't return the right number of values, it's
- // not a match, so downrank it. For example:
- //
- // var foo func() (int, int)
- // a, b, c := <> // downrank "foo()" since it only returns two values
- if !numberOfResultsCouldMatch {
- cand.score /= 2
- return false
- }
-
- // If at least one assignee has a valid type, and all valid
- // assignees match the corresponding sig result value, the signature
- // is a match.
- allMatch := false
- for i := 0; i < sig.Results().Len(); i++ {
- var assignee types.Type
-
- // If we are completing into variadic parameters, deslice the
- // expected variadic type.
- if ci.variadicAssignees && i >= len(ci.assignees)-1 {
- assignee = ci.assignees[len(ci.assignees)-1]
- if elem := deslice(assignee); elem != nil {
- assignee = elem
- }
- } else {
- assignee = ci.assignees[i]
- }
-
- if assignee == nil {
- continue
- }
-
- allMatch = ci.typeMatches(assignee, sig.Results().At(i).Type())
- if !allMatch {
- break
- }
- }
- return allMatch
-}
-
-func (c *completer) matchingTypeName(cand *candidate) bool {
- if !c.wantTypeName() {
- return false
- }
-
- typeMatches := func(candType types.Type) bool {
- // Take into account any type name modifier prefixes.
- candType = c.inference.applyTypeNameModifiers(candType)
-
- if from := c.inference.typeName.assertableFrom; from != nil {
- // Don't suggest the starting type in type assertions. For example,
- // if "foo" is an io.Writer, don't suggest "foo.(io.Writer)".
- if types.Identical(from, candType) {
- return false
- }
-
- if intf, ok := from.Underlying().(*types.Interface); ok {
- if !types.AssertableTo(intf, candType) {
- return false
- }
- }
- }
-
- if c.inference.typeName.wantComparable && !types.Comparable(candType) {
- return false
- }
-
- // Skip this type if it has already been used in another type
- // switch case.
- for _, seen := range c.inference.typeName.seenTypeSwitchCases {
- if types.Identical(candType, seen) {
- return false
- }
- }
-
- // We can expect a type name and have an expected type in cases like:
- //
- // var foo []int
- // foo = []i<>
- //
- // Where our expected type is "[]int", and we expect a type name.
- if c.inference.objType != nil {
- return types.AssignableTo(candType, c.inference.objType)
- }
-
- // Default to saying any type name is a match.
- return true
- }
-
- t := cand.obj.Type()
-
- if typeMatches(t) {
- return true
- }
-
- if !source.IsInterface(t) && typeMatches(types.NewPointer(t)) {
- if c.inference.typeName.compLitType {
- // If we are completing a composite literal type as in
- // "foo<>{}", to make a pointer we must prepend "&".
- cand.mods = append(cand.mods, reference)
- } else {
- // If we are completing a normal type name such as "foo<>", to
- // make a pointer we must prepend "*".
- cand.mods = append(cand.mods, dereference)
- }
- return true
- }
-
- return false
-}
-
-var (
- // "interface { Error() string }" (i.e. error)
- errorIntf = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
-
- // "interface { String() string }" (i.e. fmt.Stringer)
- stringerIntf = types.NewInterfaceType([]*types.Func{
- types.NewFunc(token.NoPos, nil, "String", types.NewSignature(
- nil,
- nil,
- types.NewTuple(types.NewParam(token.NoPos, nil, "", types.Typ[types.String])),
- false,
- )),
- }, nil).Complete()
-
- byteType = types.Universe.Lookup("byte").Type()
-)
-
-// candKind returns the objKind of candType, if any.
-func candKind(candType types.Type) objKind {
- var kind objKind
-
- switch t := candType.Underlying().(type) {
- case *types.Array:
- kind |= kindArray
- if t.Elem() == byteType {
- kind |= kindBytes
- }
- case *types.Slice:
- kind |= kindSlice
- if t.Elem() == byteType {
- kind |= kindBytes
- }
- case *types.Chan:
- kind |= kindChan
- case *types.Map:
- kind |= kindMap
- case *types.Pointer:
- kind |= kindPtr
-
- // Some builtins handle array pointers as arrays, so just report a pointer
- // to an array as an array.
- if _, isArray := t.Elem().Underlying().(*types.Array); isArray {
- kind |= kindArray
- }
- case *types.Basic:
- switch info := t.Info(); {
- case info&types.IsString > 0:
- kind |= kindString
- case info&types.IsInteger > 0:
- kind |= kindInt
- case info&types.IsFloat > 0:
- kind |= kindFloat
- case info&types.IsComplex > 0:
- kind |= kindComplex
- case info&types.IsBoolean > 0:
- kind |= kindBool
- }
- case *types.Signature:
- return kindFunc
- }
-
- if types.Implements(candType, errorIntf) {
- kind |= kindError
- }
-
- if types.Implements(candType, stringerIntf) {
- kind |= kindStringer
- }
-
- return kind
-}
diff --git a/internal/lsp/source/completion/deep_completion.go b/internal/lsp/source/completion/deep_completion.go
deleted file mode 100644
index a13d807d4..000000000
--- a/internal/lsp/source/completion/deep_completion.go
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "context"
- "go/types"
- "strings"
- "time"
-)
-
-// MaxDeepCompletions limits deep completion results because in most cases
-// there are too many to be useful.
-const MaxDeepCompletions = 3
-
-// deepCompletionState stores our state as we search for deep completions.
-// "deep completion" refers to searching into objects' fields and methods to
-// find more completion candidates.
-type deepCompletionState struct {
- // enabled indicates wether deep completion is permitted.
- enabled bool
-
- // queueClosed is used to disable adding new sub-fields to search queue
- // once we're running out of our time budget.
- queueClosed bool
-
- // thisQueue holds the current breadth first search queue.
- thisQueue []candidate
-
- // nextQueue holds the next breadth first search iteration's queue.
- nextQueue []candidate
-
- // highScores tracks the highest deep candidate scores we have found
- // so far. This is used to avoid work for low scoring deep candidates.
- highScores [MaxDeepCompletions]float64
-
- // candidateCount is the count of unique deep candidates encountered
- // so far.
- candidateCount int
-}
-
-// enqueue adds a candidate to the search queue.
-func (s *deepCompletionState) enqueue(cand candidate) {
- s.nextQueue = append(s.nextQueue, cand)
-}
-
-// dequeue removes and returns the leftmost element from the search queue.
-func (s *deepCompletionState) dequeue() *candidate {
- var cand *candidate
- cand, s.thisQueue = &s.thisQueue[len(s.thisQueue)-1], s.thisQueue[:len(s.thisQueue)-1]
- return cand
-}
-
-// scorePenalty computes a deep candidate score penalty. A candidate is
-// penalized based on depth to favor shallower candidates. We also give a
-// slight bonus to unexported objects and a slight additional penalty to
-// function objects.
-func (s *deepCompletionState) scorePenalty(cand *candidate) float64 {
- var deepPenalty float64
- for _, dc := range cand.path {
- deepPenalty++
-
- if !dc.Exported() {
- deepPenalty -= 0.1
- }
-
- if _, isSig := dc.Type().Underlying().(*types.Signature); isSig {
- deepPenalty += 0.1
- }
- }
-
- // Normalize penalty to a max depth of 10.
- return deepPenalty / 10
-}
-
-// isHighScore returns whether score is among the top MaxDeepCompletions deep
-// candidate scores encountered so far. If so, it adds score to highScores,
-// possibly displacing an existing high score.
-func (s *deepCompletionState) isHighScore(score float64) bool {
- // Invariant: s.highScores is sorted with highest score first. Unclaimed
- // positions are trailing zeros.
-
- // If we beat an existing score then take its spot.
- for i, deepScore := range s.highScores {
- if score <= deepScore {
- continue
- }
-
- if deepScore != 0 && i != len(s.highScores)-1 {
- // If this wasn't an empty slot then we need to scooch everyone
- // down one spot.
- copy(s.highScores[i+1:], s.highScores[i:])
- }
- s.highScores[i] = score
- return true
- }
-
- return false
-}
-
-// newPath returns path from search root for an object following a given
-// candidate.
-func (s *deepCompletionState) newPath(cand candidate, obj types.Object) []types.Object {
- path := make([]types.Object, len(cand.path)+1)
- copy(path, cand.path)
- path[len(path)-1] = obj
-
- return path
-}
-
-// deepSearch searches a candidate and its subordinate objects for completion
-// items if deep completion is enabled and adds the valid candidates to
-// completion items.
-func (c *completer) deepSearch(ctx context.Context) {
- defer func() {
- // We can return early before completing the search, so be sure to
- // clear out our queues to not impact any further invocations.
- c.deepState.thisQueue = c.deepState.thisQueue[:0]
- c.deepState.nextQueue = c.deepState.nextQueue[:0]
- }()
-
- for len(c.deepState.nextQueue) > 0 {
- c.deepState.thisQueue, c.deepState.nextQueue = c.deepState.nextQueue, c.deepState.thisQueue[:0]
-
- outer:
- for _, cand := range c.deepState.thisQueue {
- obj := cand.obj
-
- if obj == nil {
- continue
- }
-
- // At the top level, dedupe by object.
- if len(cand.path) == 0 {
- if c.seen[obj] {
- continue
- }
- c.seen[obj] = true
- }
-
- // If obj is not accessible because it lives in another package and is
- // not exported, don't treat it as a completion candidate unless it's
- // a package completion candidate.
- if !c.completionContext.packageCompletion &&
- obj.Pkg() != nil && obj.Pkg() != c.pkg.GetTypes() && !obj.Exported() {
- continue
- }
-
- // If we want a type name, don't offer non-type name candidates.
- // However, do offer package names since they can contain type names,
- // and do offer any candidate without a type since we aren't sure if it
- // is a type name or not (i.e. unimported candidate).
- if c.wantTypeName() && obj.Type() != nil && !isTypeName(obj) && !isPkgName(obj) {
- continue
- }
-
- // When searching deep, make sure we don't have a cycle in our chain.
- // We don't dedupe by object because we want to allow both "foo.Baz"
- // and "bar.Baz" even though "Baz" is represented the same types.Object
- // in both.
- for _, seenObj := range cand.path {
- if seenObj == obj {
- continue outer
- }
- }
-
- c.addCandidate(ctx, &cand)
-
- c.deepState.candidateCount++
- if c.opts.budget > 0 && c.deepState.candidateCount%100 == 0 {
- spent := float64(time.Since(c.startTime)) / float64(c.opts.budget)
- select {
- case <-ctx.Done():
- return
- default:
- // If we are almost out of budgeted time, no further elements
- // should be added to the queue. This ensures remaining time is
- // used for processing current queue.
- if !c.deepState.queueClosed && spent >= 0.85 {
- c.deepState.queueClosed = true
- }
- }
- }
-
- // if deep search is disabled, don't add any more candidates.
- if !c.deepState.enabled || c.deepState.queueClosed {
- continue
- }
-
- // Searching members for a type name doesn't make sense.
- if isTypeName(obj) {
- continue
- }
- if obj.Type() == nil {
- continue
- }
-
- // Don't search embedded fields because they were already included in their
- // parent's fields.
- if v, ok := obj.(*types.Var); ok && v.Embedded() {
- continue
- }
-
- if sig, ok := obj.Type().Underlying().(*types.Signature); ok {
- // If obj is a function that takes no arguments and returns one
- // value, keep searching across the function call.
- if sig.Params().Len() == 0 && sig.Results().Len() == 1 {
- path := c.deepState.newPath(cand, obj)
- // The result of a function call is not addressable.
- c.methodsAndFields(sig.Results().At(0).Type(), false, cand.imp, func(newCand candidate) {
- newCand.pathInvokeMask = cand.pathInvokeMask | (1 << uint64(len(cand.path)))
- newCand.path = path
- c.deepState.enqueue(newCand)
- })
- }
- }
-
- path := c.deepState.newPath(cand, obj)
- switch obj := obj.(type) {
- case *types.PkgName:
- c.packageMembers(obj.Imported(), stdScore, cand.imp, func(newCand candidate) {
- newCand.pathInvokeMask = cand.pathInvokeMask
- newCand.path = path
- c.deepState.enqueue(newCand)
- })
- default:
- c.methodsAndFields(obj.Type(), cand.addressable, cand.imp, func(newCand candidate) {
- newCand.pathInvokeMask = cand.pathInvokeMask
- newCand.path = path
- c.deepState.enqueue(newCand)
- })
- }
- }
- }
-}
-
-// addCandidate adds a completion candidate to suggestions, without searching
-// its members for more candidates.
-func (c *completer) addCandidate(ctx context.Context, cand *candidate) {
- obj := cand.obj
- if c.matchingCandidate(cand) {
- cand.score *= highScore
-
- if p := c.penalty(cand); p > 0 {
- cand.score *= (1 - p)
- }
- } else if isTypeName(obj) {
- // If obj is a *types.TypeName that didn't otherwise match, check
- // if a literal object of this type makes a good candidate.
-
- // We only care about named types (i.e. don't want builtin types).
- if _, isNamed := obj.Type().(*types.Named); isNamed {
- c.literal(ctx, obj.Type(), cand.imp)
- }
- }
-
- // Lower score of method calls so we prefer fields and vars over calls.
- if cand.hasMod(invoke) {
- if sig, ok := obj.Type().Underlying().(*types.Signature); ok && sig.Recv() != nil {
- cand.score *= 0.9
- }
- }
-
- // Prefer private objects over public ones.
- if !obj.Exported() && obj.Parent() != types.Universe {
- cand.score *= 1.1
- }
-
- // Slight penalty for index modifier (e.g. changing "foo" to
- // "foo[]") to curb false positives.
- if cand.hasMod(index) {
- cand.score *= 0.9
- }
-
- // Favor shallow matches by lowering score according to depth.
- cand.score -= cand.score * c.deepState.scorePenalty(cand)
-
- if cand.score < 0 {
- cand.score = 0
- }
-
- cand.name = deepCandName(cand)
- if item, err := c.item(ctx, *cand); err == nil {
- c.items = append(c.items, item)
- }
-}
-
-// deepCandName produces the full candidate name including any
-// ancestor objects. For example, "foo.bar().baz" for candidate "baz".
-func deepCandName(cand *candidate) string {
- totalLen := len(cand.obj.Name())
- for i, obj := range cand.path {
- totalLen += len(obj.Name()) + 1
- if cand.pathInvokeMask&(1<<uint16(i)) > 0 {
- totalLen += 2
- }
- }
-
- var buf strings.Builder
- buf.Grow(totalLen)
-
- for i, obj := range cand.path {
- buf.WriteString(obj.Name())
- if cand.pathInvokeMask&(1<<uint16(i)) > 0 {
- buf.WriteByte('(')
- buf.WriteByte(')')
- }
- buf.WriteByte('.')
- }
-
- buf.WriteString(cand.obj.Name())
-
- return buf.String()
-}
-
-// penalty reports a score penalty for cand in the range (0, 1).
-// For example, a candidate is penalized if it has already been used
-// in another switch case statement.
-func (c *completer) penalty(cand *candidate) float64 {
- for _, p := range c.inference.penalized {
- if c.objChainMatches(cand, p.objChain) {
- return p.penalty
- }
- }
-
- return 0
-}
-
-// objChainMatches reports whether cand combined with the surrounding
-// object prefix matches chain.
-func (c *completer) objChainMatches(cand *candidate, chain []types.Object) bool {
- // For example, when completing:
- //
- // foo.ba<>
- //
- // If we are considering the deep candidate "bar.baz", cand is baz,
- // objChain is [foo] and deepChain is [bar]. We would match the
- // chain [foo, bar, baz].
- if len(chain) != len(c.inference.objChain)+len(cand.path)+1 {
- return false
- }
-
- if chain[len(chain)-1] != cand.obj {
- return false
- }
-
- for i, o := range c.inference.objChain {
- if chain[i] != o {
- return false
- }
- }
-
- for i, o := range cand.path {
- if chain[i+len(c.inference.objChain)] != o {
- return false
- }
- }
-
- return true
-}
diff --git a/internal/lsp/source/completion/definition.go b/internal/lsp/source/completion/definition.go
deleted file mode 100644
index 17b251cb0..000000000
--- a/internal/lsp/source/completion/definition.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "go/ast"
- "go/token"
- "go/types"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/snippet"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// some definitions can be completed
-// So far, TestFoo(t *testing.T), TestMain(m *testing.M)
-// BenchmarkFoo(b *testing.B), FuzzFoo(f *testing.F)
-
-// path[0] is known to be *ast.Ident
-func definition(path []ast.Node, obj types.Object, fset *token.FileSet, mapper *protocol.ColumnMapper, fh source.FileHandle) ([]CompletionItem, *Selection) {
- if _, ok := obj.(*types.Func); !ok {
- return nil, nil // not a function at all
- }
- if !strings.HasSuffix(fh.URI().Filename(), "_test.go") {
- return nil, nil
- }
-
- name := path[0].(*ast.Ident).Name
- if len(name) == 0 {
- // can't happen
- return nil, nil
- }
- pos := path[0].Pos()
- sel := &Selection{
- content: "",
- cursor: pos,
- MappedRange: source.NewMappedRange(fset, mapper, pos, pos),
- }
- var ans []CompletionItem
-
- // Always suggest TestMain, if possible
- if strings.HasPrefix("TestMain", name) {
- ans = []CompletionItem{defItem("TestMain(m *testing.M)", obj)}
- }
-
- // If a snippet is possible, suggest it
- if strings.HasPrefix("Test", name) {
- ans = append(ans, defSnippet("Test", "Xxx", "(t *testing.T)", obj))
- return ans, sel
- } else if strings.HasPrefix("Benchmark", name) {
- ans = append(ans, defSnippet("Benchmark", "Xxx", "(b *testing.B)", obj))
- return ans, sel
- } else if strings.HasPrefix("Fuzz", name) {
- ans = append(ans, defSnippet("Fuzz", "Xxx", "(f *testing.F)", obj))
- return ans, sel
- }
-
- // Fill in the argument for what the user has already typed
- if got := defMatches(name, "Test", path, "(t *testing.T)"); got != "" {
- ans = append(ans, defItem(got, obj))
- } else if got := defMatches(name, "Benchmark", path, "(b *testing.B)"); got != "" {
- ans = append(ans, defItem(got, obj))
- } else if got := defMatches(name, "Fuzz", path, "(f *testing.F)"); got != "" {
- ans = append(ans, defItem(got, obj))
- }
- return ans, sel
-}
-
-func defMatches(name, pat string, path []ast.Node, arg string) string {
- idx := strings.Index(name, pat)
- if idx < 0 {
- return ""
- }
- c, _ := utf8.DecodeRuneInString(name[len(pat):])
- if unicode.IsLower(c) {
- return ""
- }
- fd, ok := path[1].(*ast.FuncDecl)
- if !ok {
- // we don't know what's going on
- return ""
- }
- fp := fd.Type.Params
- if fp != nil && len(fp.List) > 0 {
- // signature already there, minimal suggestion
- return name
- }
- // suggesting signature too
- return name + arg
-}
-
-func defSnippet(prefix, placeholder, suffix string, obj types.Object) CompletionItem {
- var sn snippet.Builder
- sn.WriteText(prefix)
- if placeholder != "" {
- sn.WritePlaceholder(func(b *snippet.Builder) { b.WriteText(placeholder) })
- }
- sn.WriteText(suffix + " {\n")
- sn.WriteFinalTabstop()
- sn.WriteText("\n}")
- return CompletionItem{
- Label: prefix + placeholder + suffix,
- Detail: "tab, type the rest of the name, then tab",
- Kind: protocol.FunctionCompletion,
- Depth: 0,
- Score: 10,
- snippet: &sn,
- Documentation: prefix + " test function",
- obj: obj,
- }
-}
-func defItem(val string, obj types.Object) CompletionItem {
- return CompletionItem{
- Label: val,
- InsertText: val,
- Kind: protocol.FunctionCompletion,
- Depth: 0,
- Score: 9, // prefer the snippets when available
- Documentation: "complete the parameter",
- obj: obj,
- }
-}
diff --git a/internal/lsp/source/completion/format.go b/internal/lsp/source/completion/format.go
deleted file mode 100644
index e67456911..000000000
--- a/internal/lsp/source/completion/format.go
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/doc"
- "go/types"
- "strings"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/snippet"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/typeparams"
- errors "golang.org/x/xerrors"
-)
-
-var (
- errNoMatch = errors.New("not a surrounding match")
- errLowScore = errors.New("not a high scoring candidate")
-)
-
-// item formats a candidate to a CompletionItem.
-func (c *completer) item(ctx context.Context, cand candidate) (CompletionItem, error) {
- obj := cand.obj
-
- // if the object isn't a valid match against the surrounding, return early.
- matchScore := c.matcher.Score(cand.name)
- if matchScore <= 0 {
- return CompletionItem{}, errNoMatch
- }
- cand.score *= float64(matchScore)
-
- // Ignore deep candidates that wont be in the MaxDeepCompletions anyway.
- if len(cand.path) != 0 && !c.deepState.isHighScore(cand.score) {
- return CompletionItem{}, errLowScore
- }
-
- // Handle builtin types separately.
- if obj.Parent() == types.Universe {
- return c.formatBuiltin(ctx, cand)
- }
-
- var (
- label = cand.name
- detail = types.TypeString(obj.Type(), c.qf)
- insert = label
- kind = protocol.TextCompletion
- snip snippet.Builder
- protocolEdits []protocol.TextEdit
- )
- if obj.Type() == nil {
- detail = ""
- }
- if isTypeName(obj) && c.wantTypeParams() {
- x := cand.obj.(*types.TypeName)
- if named, ok := x.Type().(*types.Named); ok {
- tp := typeparams.ForNamed(named)
- label += source.FormatTypeParams(tp)
- insert = label // maintain invariant above (label == insert)
- }
- }
-
- snip.WriteText(insert)
-
- switch obj := obj.(type) {
- case *types.TypeName:
- detail, kind = source.FormatType(obj.Type(), c.qf)
- case *types.Const:
- kind = protocol.ConstantCompletion
- case *types.Var:
- if _, ok := obj.Type().(*types.Struct); ok {
- detail = "struct{...}" // for anonymous structs
- } else if obj.IsField() {
- detail = source.FormatVarType(ctx, c.snapshot, c.pkg, obj, c.qf)
- }
- if obj.IsField() {
- kind = protocol.FieldCompletion
- c.structFieldSnippet(cand, detail, &snip)
- } else {
- kind = protocol.VariableCompletion
- }
- if obj.Type() == nil {
- break
- }
- case *types.Func:
- sig, ok := obj.Type().Underlying().(*types.Signature)
- if !ok {
- break
- }
- kind = protocol.FunctionCompletion
- if sig != nil && sig.Recv() != nil {
- kind = protocol.MethodCompletion
- }
- case *types.PkgName:
- kind = protocol.ModuleCompletion
- detail = fmt.Sprintf("%q", obj.Imported().Path())
- case *types.Label:
- kind = protocol.ConstantCompletion
- detail = "label"
- }
-
- var prefix string
- for _, mod := range cand.mods {
- switch mod {
- case reference:
- prefix = "&" + prefix
- case dereference:
- prefix = "*" + prefix
- case chanRead:
- prefix = "<-" + prefix
- }
- }
-
- var (
- suffix string
- funcType = obj.Type()
- )
-Suffixes:
- for _, mod := range cand.mods {
- switch mod {
- case invoke:
- if sig, ok := funcType.Underlying().(*types.Signature); ok {
- s := source.NewSignature(ctx, c.snapshot, c.pkg, sig, nil, c.qf)
- c.functionCallSnippet("", s.TypeParams(), s.Params(), &snip)
- if sig.Results().Len() == 1 {
- funcType = sig.Results().At(0).Type()
- }
- detail = "func" + s.Format()
- }
-
- if !c.opts.snippets {
- // Without snippets the candidate will not include "()". Don't
- // add further suffixes since they will be invalid. For
- // example, with snippets "foo()..." would become "foo..."
- // without snippets if we added the dotDotDot.
- break Suffixes
- }
- case takeSlice:
- suffix += "[:]"
- case takeDotDotDot:
- suffix += "..."
- case index:
- snip.WriteText("[")
- snip.WritePlaceholder(nil)
- snip.WriteText("]")
- }
- }
-
- // If this candidate needs an additional import statement,
- // add the additional text edits needed.
- if cand.imp != nil {
- addlEdits, err := c.importEdits(cand.imp)
-
- if err != nil {
- return CompletionItem{}, err
- }
-
- protocolEdits = append(protocolEdits, addlEdits...)
- if kind != protocol.ModuleCompletion {
- if detail != "" {
- detail += " "
- }
- detail += fmt.Sprintf("(from %q)", cand.imp.importPath)
- }
- }
-
- if cand.convertTo != nil {
- typeName := types.TypeString(cand.convertTo, c.qf)
-
- switch cand.convertTo.(type) {
- // We need extra parens when casting to these types. For example,
- // we need "(*int)(foo)", not "*int(foo)".
- case *types.Pointer, *types.Signature:
- typeName = "(" + typeName + ")"
- }
-
- prefix = typeName + "(" + prefix
- suffix = ")"
- }
-
- if prefix != "" {
- // If we are in a selector, add an edit to place prefix before selector.
- if sel := enclosingSelector(c.path, c.pos); sel != nil {
- edits, err := c.editText(sel.Pos(), sel.Pos(), prefix)
- if err != nil {
- return CompletionItem{}, err
- }
- protocolEdits = append(protocolEdits, edits...)
- } else {
- // If there is no selector, just stick the prefix at the start.
- insert = prefix + insert
- snip.PrependText(prefix)
- }
- }
-
- if suffix != "" {
- insert += suffix
- snip.WriteText(suffix)
- }
-
- detail = strings.TrimPrefix(detail, "untyped ")
- // override computed detail with provided detail, if something is provided.
- if cand.detail != "" {
- detail = cand.detail
- }
- item := CompletionItem{
- Label: label,
- InsertText: insert,
- AdditionalTextEdits: protocolEdits,
- Detail: detail,
- Kind: kind,
- Score: cand.score,
- Depth: len(cand.path),
- snippet: &snip,
- obj: obj,
- }
- // If the user doesn't want documentation for completion items.
- if !c.opts.documentation {
- return item, nil
- }
- pos := c.snapshot.FileSet().Position(obj.Pos())
-
- // We ignore errors here, because some types, like "unsafe" or "error",
- // may not have valid positions that we can use to get documentation.
- if !pos.IsValid() {
- return item, nil
- }
- uri := span.URIFromPath(pos.Filename)
-
- // Find the source file of the candidate.
- pkg, err := source.FindPackageFromPos(ctx, c.snapshot, obj.Pos())
- if err != nil {
- return item, nil
- }
-
- decl, err := c.snapshot.PosToDecl(ctx, pkg, obj.Pos())
- if err != nil {
- return CompletionItem{}, err
- }
- hover, err := source.FindHoverContext(ctx, c.snapshot, pkg, obj, decl, nil)
- if err != nil {
- event.Error(ctx, "failed to find Hover", err, tag.URI.Of(uri))
- return item, nil
- }
- if c.opts.fullDocumentation {
- item.Documentation = hover.Comment.Text()
- } else {
- item.Documentation = doc.Synopsis(hover.Comment.Text())
- }
- // The desired pattern is `^// Deprecated`, but the prefix has been removed
- if strings.HasPrefix(hover.Comment.Text(), "Deprecated") {
- if c.snapshot.View().Options().CompletionTags {
- item.Tags = []protocol.CompletionItemTag{protocol.ComplDeprecated}
- } else if c.snapshot.View().Options().CompletionDeprecated {
- item.Deprecated = true
- }
- }
-
- return item, nil
-}
-
-// importEdits produces the text edits necessary to add the given import to the current file.
-func (c *completer) importEdits(imp *importInfo) ([]protocol.TextEdit, error) {
- if imp == nil {
- return nil, nil
- }
-
- pgf, err := c.pkg.File(span.URIFromPath(c.filename))
- if err != nil {
- return nil, err
- }
-
- return source.ComputeOneImportFixEdits(c.snapshot, pgf, &imports.ImportFix{
- StmtInfo: imports.ImportInfo{
- ImportPath: imp.importPath,
- Name: imp.name,
- },
- // IdentName is unused on this path and is difficult to get.
- FixType: imports.AddImport,
- })
-}
-
-func (c *completer) formatBuiltin(ctx context.Context, cand candidate) (CompletionItem, error) {
- obj := cand.obj
- item := CompletionItem{
- Label: obj.Name(),
- InsertText: obj.Name(),
- Score: cand.score,
- }
- switch obj.(type) {
- case *types.Const:
- item.Kind = protocol.ConstantCompletion
- case *types.Builtin:
- item.Kind = protocol.FunctionCompletion
- sig, err := source.NewBuiltinSignature(ctx, c.snapshot, obj.Name())
- if err != nil {
- return CompletionItem{}, err
- }
- item.Detail = "func" + sig.Format()
- item.snippet = &snippet.Builder{}
- c.functionCallSnippet(obj.Name(), sig.TypeParams(), sig.Params(), item.snippet)
- case *types.TypeName:
- if types.IsInterface(obj.Type()) {
- item.Kind = protocol.InterfaceCompletion
- } else {
- item.Kind = protocol.ClassCompletion
- }
- case *types.Nil:
- item.Kind = protocol.VariableCompletion
- }
- return item, nil
-}
-
-// decide if the type params (if any) should be part of the completion
-// which only possible for types.Named and types.Signature
-// (so far, only in receivers, e.g.; func (s *GENERIC[K, V])..., which is a types.Named)
-func (c *completer) wantTypeParams() bool {
- // Need to be lexically in a receiver, and a child of an IndexListExpr
- // (but IndexListExpr only exists with go1.18)
- start := c.path[0].Pos()
- for i, nd := range c.path {
- if fd, ok := nd.(*ast.FuncDecl); ok {
- if i > 0 && fd.Recv != nil && start < fd.Recv.End() {
- return true
- } else {
- return false
- }
- }
- }
- return false
-}
diff --git a/internal/lsp/source/completion/fuzz.go b/internal/lsp/source/completion/fuzz.go
deleted file mode 100644
index 92349ab93..000000000
--- a/internal/lsp/source/completion/fuzz.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-// golang/go#51089
-// *testing.F deserves special treatment as member use is constrained:
-// The arguments to f.Fuzz are determined by the arguments to a previous f.Add
-// Inside f.Fuzz only f.Failed and f.Name are allowed.
-// PJW: are there other packages where we can deduce usage constraints?
-
-// if we find fuzz completions, then return true, as those are the only completions to offer
-func (c *completer) fuzz(typ types.Type, mset *types.MethodSet, imp *importInfo, cb func(candidate), fset *token.FileSet) bool {
- // 1. inside f.Fuzz? (only f.Failed and f.Name)
- // 2. possible completing f.Fuzz?
- // [Ident,SelectorExpr,Callexpr,ExprStmt,BlockiStmt,FuncDecl(Fuzz...)]
- // 3. before f.Fuzz, same (for 2., offer choice when looking at an F)
-
- // does the path contain FuncLit as arg to f.Fuzz CallExpr?
- inside := false
-Loop:
- for i, n := range c.path {
- switch v := n.(type) {
- case *ast.CallExpr:
- if len(v.Args) != 1 {
- continue Loop
- }
- if _, ok := v.Args[0].(*ast.FuncLit); !ok {
- continue
- }
- if s, ok := v.Fun.(*ast.SelectorExpr); !ok || s.Sel.Name != "Fuzz" {
- continue
- }
- if i > 2 { // avoid t.Fuzz itself in tests
- inside = true
- break Loop
- }
- }
- }
- if inside {
- for i := 0; i < mset.Len(); i++ {
- o := mset.At(i).Obj()
- if o.Name() == "Failed" || o.Name() == "Name" {
- cb(candidate{
- obj: o,
- score: stdScore,
- imp: imp,
- addressable: true,
- })
- }
- }
- return true
- }
- // if it could be t.Fuzz, look for the preceding t.Add
- id, ok := c.path[0].(*ast.Ident)
- if ok && strings.HasPrefix("Fuzz", id.Name) {
- var add *ast.CallExpr
- f := func(n ast.Node) bool {
- if n == nil {
- return true
- }
- call, ok := n.(*ast.CallExpr)
- if !ok {
- return true
- }
- s, ok := call.Fun.(*ast.SelectorExpr)
- if !ok {
- return true
- }
- if s.Sel.Name != "Add" {
- return true
- }
- // Sel.X should be of type *testing.F
- got := c.pkg.GetTypesInfo().Types[s.X]
- if got.Type.String() == "*testing.F" {
- add = call
- }
- return false // because we're done...
- }
- // look at the enclosing FuzzFoo functions
- if len(c.path) < 2 {
- return false
- }
- n := c.path[len(c.path)-2]
- if _, ok := n.(*ast.FuncDecl); !ok {
- // the path should start with ast.File, ast.FuncDecl, ...
- // but it didn't, so give up
- return false
- }
- ast.Inspect(n, f)
- if add == nil {
- // looks like f.Fuzz without a preceding f.Add.
- // let the regular completion handle it.
- return false
- }
-
- lbl := "Fuzz(func(t *testing.T"
- for i, a := range add.Args {
- info := c.pkg.GetTypesInfo().TypeOf(a)
- if info == nil {
- return false // How could this happen, but better safe than panic.
- }
- lbl += fmt.Sprintf(", %c %s", 'a'+i, info)
- }
- lbl += ")"
- xx := CompletionItem{
- Label: lbl,
- InsertText: lbl,
- Kind: protocol.FunctionCompletion,
- Depth: 0,
- Score: 10, // pretty confident the user should see this
- Documentation: "argument types from f.Add",
- obj: nil,
- }
- c.items = append(c.items, xx)
- for i := 0; i < mset.Len(); i++ {
- o := mset.At(i).Obj()
- if o.Name() != "Fuzz" {
- cb(candidate{
- obj: o,
- score: stdScore,
- imp: imp,
- addressable: true,
- })
- }
- }
- return true // done
- }
- // let the standard processing take care of it instead
- return false
-}
diff --git a/internal/lsp/source/completion/keywords.go b/internal/lsp/source/completion/keywords.go
deleted file mode 100644
index bbf59b022..000000000
--- a/internal/lsp/source/completion/keywords.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "go/ast"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-const (
- BREAK = "break"
- CASE = "case"
- CHAN = "chan"
- CONST = "const"
- CONTINUE = "continue"
- DEFAULT = "default"
- DEFER = "defer"
- ELSE = "else"
- FALLTHROUGH = "fallthrough"
- FOR = "for"
- FUNC = "func"
- GO = "go"
- GOTO = "goto"
- IF = "if"
- IMPORT = "import"
- INTERFACE = "interface"
- MAP = "map"
- PACKAGE = "package"
- RANGE = "range"
- RETURN = "return"
- SELECT = "select"
- STRUCT = "struct"
- SWITCH = "switch"
- TYPE = "type"
- VAR = "var"
-)
-
-// addKeywordCompletions offers keyword candidates appropriate at the position.
-func (c *completer) addKeywordCompletions() {
- seen := make(map[string]bool)
-
- if c.wantTypeName() && c.inference.objType == nil {
- // If we want a type name but don't have an expected obj type,
- // include "interface", "struct", "func", "chan", and "map".
-
- // "interface" and "struct" are more common declaring named types.
- // Give them a higher score if we are in a type declaration.
- structIntf, funcChanMap := stdScore, highScore
- if len(c.path) > 1 {
- if _, namedDecl := c.path[1].(*ast.TypeSpec); namedDecl {
- structIntf, funcChanMap = highScore, stdScore
- }
- }
-
- c.addKeywordItems(seen, structIntf, STRUCT, INTERFACE)
- c.addKeywordItems(seen, funcChanMap, FUNC, CHAN, MAP)
- }
-
- // If we are at the file scope, only offer decl keywords. We don't
- // get *ast.Idents at the file scope because non-keyword identifiers
- // turn into *ast.BadDecl, not *ast.Ident.
- if len(c.path) == 1 || isASTFile(c.path[1]) {
- c.addKeywordItems(seen, stdScore, TYPE, CONST, VAR, FUNC, IMPORT)
- return
- } else if _, ok := c.path[0].(*ast.Ident); !ok {
- // Otherwise only offer keywords if the client is completing an identifier.
- return
- }
-
- if len(c.path) > 2 {
- // Offer "range" if we are in ast.ForStmt.Init. This is what the
- // AST looks like before "range" is typed, e.g. "for i := r<>".
- if loop, ok := c.path[2].(*ast.ForStmt); ok && source.NodeContains(loop.Init, c.pos) {
- c.addKeywordItems(seen, stdScore, RANGE)
- }
- }
-
- // Only suggest keywords if we are beginning a statement.
- switch n := c.path[1].(type) {
- case *ast.BlockStmt, *ast.ExprStmt:
- // OK - our ident must be at beginning of statement.
- case *ast.CommClause:
- // Make sure we aren't in the Comm statement.
- if !n.Colon.IsValid() || c.pos <= n.Colon {
- return
- }
- case *ast.CaseClause:
- // Make sure we aren't in the case List.
- if !n.Colon.IsValid() || c.pos <= n.Colon {
- return
- }
- default:
- return
- }
-
- // Filter out keywords depending on scope
- // Skip the first one because we want to look at the enclosing scopes
- path := c.path[1:]
- for i, n := range path {
- switch node := n.(type) {
- case *ast.CaseClause:
- // only recommend "fallthrough" and "break" within the bodies of a case clause
- if c.pos > node.Colon {
- c.addKeywordItems(seen, stdScore, BREAK)
- // "fallthrough" is only valid in switch statements.
- // A case clause is always nested within a block statement in a switch statement,
- // that block statement is nested within either a TypeSwitchStmt or a SwitchStmt.
- if i+2 >= len(path) {
- continue
- }
- if _, ok := path[i+2].(*ast.SwitchStmt); ok {
- c.addKeywordItems(seen, stdScore, FALLTHROUGH)
- }
- }
- case *ast.CommClause:
- if c.pos > node.Colon {
- c.addKeywordItems(seen, stdScore, BREAK)
- }
- case *ast.TypeSwitchStmt, *ast.SelectStmt, *ast.SwitchStmt:
- c.addKeywordItems(seen, stdScore, CASE, DEFAULT)
- case *ast.ForStmt, *ast.RangeStmt:
- c.addKeywordItems(seen, stdScore, BREAK, CONTINUE)
- // This is a bit weak, functions allow for many keywords
- case *ast.FuncDecl:
- if node.Body != nil && c.pos > node.Body.Lbrace {
- c.addKeywordItems(seen, stdScore, DEFER, RETURN, FOR, GO, SWITCH, SELECT, IF, ELSE, VAR, CONST, GOTO, TYPE)
- }
- }
- }
-}
-
-// addKeywordItems dedupes and adds completion items for the specified
-// keywords with the specified score.
-func (c *completer) addKeywordItems(seen map[string]bool, score float64, kws ...string) {
- for _, kw := range kws {
- if seen[kw] {
- continue
- }
- seen[kw] = true
-
- if matchScore := c.matcher.Score(kw); matchScore > 0 {
- c.items = append(c.items, CompletionItem{
- Label: kw,
- Kind: protocol.KeywordCompletion,
- InsertText: kw,
- Score: score * float64(matchScore),
- })
- }
- }
-}
diff --git a/internal/lsp/source/completion/literal.go b/internal/lsp/source/completion/literal.go
deleted file mode 100644
index 5025f1f74..000000000
--- a/internal/lsp/source/completion/literal.go
+++ /dev/null
@@ -1,440 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "context"
- "fmt"
- "go/types"
- "strings"
- "unicode"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/snippet"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// literal generates composite literal, function literal, and make()
-// completion items.
-func (c *completer) literal(ctx context.Context, literalType types.Type, imp *importInfo) {
- if !c.opts.literal {
- return
- }
-
- expType := c.inference.objType
-
- if c.inference.matchesVariadic(literalType) {
- // Don't offer literal slice candidates for variadic arguments.
- // For example, don't offer "[]interface{}{}" in "fmt.Print(<>)".
- return
- }
-
- // Avoid literal candidates if the expected type is an empty
- // interface. It isn't very useful to suggest a literal candidate of
- // every possible type.
- if expType != nil && isEmptyInterface(expType) {
- return
- }
-
- // We handle unnamed literal completions explicitly before searching
- // for candidates. Avoid named-type literal completions for
- // unnamed-type expected type since that results in duplicate
- // candidates. For example, in
- //
- // type mySlice []int
- // var []int = <>
- //
- // don't offer "mySlice{}" since we have already added a candidate
- // of "[]int{}".
- if _, named := literalType.(*types.Named); named && expType != nil {
- if _, named := source.Deref(expType).(*types.Named); !named {
- return
- }
- }
-
- // Check if an object of type literalType would match our expected type.
- cand := candidate{
- obj: c.fakeObj(literalType),
- }
-
- switch literalType.Underlying().(type) {
- // These literal types are addressable (e.g. "&[]int{}"), others are
- // not (e.g. can't do "&(func(){})").
- case *types.Struct, *types.Array, *types.Slice, *types.Map:
- cand.addressable = true
- }
-
- if !c.matchingCandidate(&cand) || cand.convertTo != nil {
- return
- }
-
- var (
- qf = c.qf
- sel = enclosingSelector(c.path, c.pos)
- )
-
- // Don't qualify the type name if we are in a selector expression
- // since the package name is already present.
- if sel != nil {
- qf = func(_ *types.Package) string { return "" }
- }
-
- typeName := types.TypeString(literalType, qf)
-
- // A type name of "[]int" doesn't work very will with the matcher
- // since "[" isn't a valid identifier prefix. Here we strip off the
- // slice (and array) prefix yielding just "int".
- matchName := typeName
- switch t := literalType.(type) {
- case *types.Slice:
- matchName = types.TypeString(t.Elem(), qf)
- case *types.Array:
- matchName = types.TypeString(t.Elem(), qf)
- }
-
- addlEdits, err := c.importEdits(imp)
- if err != nil {
- event.Error(ctx, "error adding import for literal candidate", err)
- return
- }
-
- // If prefix matches the type name, client may want a composite literal.
- if score := c.matcher.Score(matchName); score > 0 {
- if cand.hasMod(reference) {
- if sel != nil {
- // If we are in a selector we must place the "&" before the selector.
- // For example, "foo.B<>" must complete to "&foo.Bar{}", not
- // "foo.&Bar{}".
- edits, err := c.editText(sel.Pos(), sel.Pos(), "&")
- if err != nil {
- event.Error(ctx, "error making edit for literal pointer completion", err)
- return
- }
- addlEdits = append(addlEdits, edits...)
- } else {
- // Otherwise we can stick the "&" directly before the type name.
- typeName = "&" + typeName
- }
- }
-
- switch t := literalType.Underlying().(type) {
- case *types.Struct, *types.Array, *types.Slice, *types.Map:
- c.compositeLiteral(t, typeName, float64(score), addlEdits)
- case *types.Signature:
- // Add a literal completion for a signature type that implements
- // an interface. For example, offer "http.HandlerFunc()" when
- // expected type is "http.Handler".
- if source.IsInterface(expType) {
- c.basicLiteral(t, typeName, float64(score), addlEdits)
- }
- case *types.Basic:
- // Add a literal completion for basic types that implement our
- // expected interface (e.g. named string type http.Dir
- // implements http.FileSystem), or are identical to our expected
- // type (i.e. yielding a type conversion such as "float64()").
- if source.IsInterface(expType) || types.Identical(expType, literalType) {
- c.basicLiteral(t, typeName, float64(score), addlEdits)
- }
- }
- }
-
- // If prefix matches "make", client may want a "make()"
- // invocation. We also include the type name to allow for more
- // flexible fuzzy matching.
- if score := c.matcher.Score("make." + matchName); !cand.hasMod(reference) && score > 0 {
- switch literalType.Underlying().(type) {
- case *types.Slice:
- // The second argument to "make()" for slices is required, so default to "0".
- c.makeCall(typeName, "0", float64(score), addlEdits)
- case *types.Map, *types.Chan:
- // Maps and channels don't require the second argument, so omit
- // to keep things simple for now.
- c.makeCall(typeName, "", float64(score), addlEdits)
- }
- }
-
- // If prefix matches "func", client may want a function literal.
- if score := c.matcher.Score("func"); !cand.hasMod(reference) && score > 0 && !source.IsInterface(expType) {
- switch t := literalType.Underlying().(type) {
- case *types.Signature:
- c.functionLiteral(ctx, t, float64(score))
- }
- }
-}
-
-// literalCandidateScore is the base score for literal candidates.
-// Literal candidates match the expected type so they should be high
-// scoring, but we want them ranked below lexical objects of the
-// correct type, so scale down highScore.
-const literalCandidateScore = highScore / 2
-
-// functionLiteral adds a function literal completion item for the
-// given signature.
-func (c *completer) functionLiteral(ctx context.Context, sig *types.Signature, matchScore float64) {
- snip := &snippet.Builder{}
- snip.WriteText("func(")
-
- // First we generate names for each param and keep a seen count so
- // we know if we need to uniquify param names. For example,
- // "func(int)" will become "func(i int)", but "func(int, int64)"
- // will become "func(i1 int, i2 int64)".
- var (
- paramNames = make([]string, sig.Params().Len())
- paramNameCount = make(map[string]int)
- )
- for i := 0; i < sig.Params().Len(); i++ {
- var (
- p = sig.Params().At(i)
- name = p.Name()
- )
- if name == "" {
- // If the param has no name in the signature, guess a name based
- // on the type. Use an empty qualifier to ignore the package.
- // For example, we want to name "http.Request" "r", not "hr".
- name = source.FormatVarType(ctx, c.snapshot, c.pkg, p, func(p *types.Package) string {
- return ""
- })
- name = abbreviateTypeName(name)
- }
- paramNames[i] = name
- if name != "_" {
- paramNameCount[name]++
- }
- }
-
- for n, c := range paramNameCount {
- // Any names we saw more than once will need a unique suffix added
- // on. Reset the count to 1 to act as the suffix for the first
- // name.
- if c >= 2 {
- paramNameCount[n] = 1
- } else {
- delete(paramNameCount, n)
- }
- }
-
- for i := 0; i < sig.Params().Len(); i++ {
- if i > 0 {
- snip.WriteText(", ")
- }
-
- var (
- p = sig.Params().At(i)
- name = paramNames[i]
- )
-
- // Uniquify names by adding on an incrementing numeric suffix.
- if idx, found := paramNameCount[name]; found {
- paramNameCount[name]++
- name = fmt.Sprintf("%s%d", name, idx)
- }
-
- if name != p.Name() && c.opts.placeholders {
- // If we didn't use the signature's param name verbatim then we
- // may have chosen a poor name. Give the user a placeholder so
- // they can easily fix the name.
- snip.WritePlaceholder(func(b *snippet.Builder) {
- b.WriteText(name)
- })
- } else {
- snip.WriteText(name)
- }
-
- // If the following param's type is identical to this one, omit
- // this param's type string. For example, emit "i, j int" instead
- // of "i int, j int".
- if i == sig.Params().Len()-1 || !types.Identical(p.Type(), sig.Params().At(i+1).Type()) {
- snip.WriteText(" ")
- typeStr := source.FormatVarType(ctx, c.snapshot, c.pkg, p, c.qf)
- if sig.Variadic() && i == sig.Params().Len()-1 {
- typeStr = strings.Replace(typeStr, "[]", "...", 1)
- }
- snip.WriteText(typeStr)
- }
- }
- snip.WriteText(")")
-
- results := sig.Results()
- if results.Len() > 0 {
- snip.WriteText(" ")
- }
-
- resultsNeedParens := results.Len() > 1 ||
- results.Len() == 1 && results.At(0).Name() != ""
-
- if resultsNeedParens {
- snip.WriteText("(")
- }
- for i := 0; i < results.Len(); i++ {
- if i > 0 {
- snip.WriteText(", ")
- }
- r := results.At(i)
- if name := r.Name(); name != "" {
- snip.WriteText(name + " ")
- }
- snip.WriteText(source.FormatVarType(ctx, c.snapshot, c.pkg, r, c.qf))
- }
- if resultsNeedParens {
- snip.WriteText(")")
- }
-
- snip.WriteText(" {")
- snip.WriteFinalTabstop()
- snip.WriteText("}")
-
- c.items = append(c.items, CompletionItem{
- Label: "func(...) {}",
- Score: matchScore * literalCandidateScore,
- Kind: protocol.VariableCompletion,
- snippet: snip,
- })
-}
-
-// conventionalAcronyms contains conventional acronyms for type names
-// in lower case. For example, "ctx" for "context" and "err" for "error".
-var conventionalAcronyms = map[string]string{
- "context": "ctx",
- "error": "err",
- "tx": "tx",
- "responsewriter": "w",
-}
-
-// abbreviateTypeName abbreviates type names into acronyms. For
-// example, "fooBar" is abbreviated "fb". Care is taken to ignore
-// non-identifier runes. For example, "[]int" becomes "i", and
-// "struct { i int }" becomes "s".
-func abbreviateTypeName(s string) string {
- var (
- b strings.Builder
- useNextUpper bool
- )
-
- // Trim off leading non-letters. We trim everything between "[" and
- // "]" to handle array types like "[someConst]int".
- var inBracket bool
- s = strings.TrimFunc(s, func(r rune) bool {
- if inBracket {
- inBracket = r != ']'
- return true
- }
-
- if r == '[' {
- inBracket = true
- }
-
- return !unicode.IsLetter(r)
- })
-
- if acr, ok := conventionalAcronyms[strings.ToLower(s)]; ok {
- return acr
- }
-
- for i, r := range s {
- // Stop if we encounter a non-identifier rune.
- if !unicode.IsLetter(r) && !unicode.IsNumber(r) {
- break
- }
-
- if i == 0 {
- b.WriteRune(unicode.ToLower(r))
- }
-
- if unicode.IsUpper(r) {
- if useNextUpper {
- b.WriteRune(unicode.ToLower(r))
- useNextUpper = false
- }
- } else {
- useNextUpper = true
- }
- }
-
- return b.String()
-}
-
-// compositeLiteral adds a composite literal completion item for the given typeName.
-func (c *completer) compositeLiteral(T types.Type, typeName string, matchScore float64, edits []protocol.TextEdit) {
- snip := &snippet.Builder{}
- snip.WriteText(typeName + "{")
- // Don't put the tab stop inside the composite literal curlies "{}"
- // for structs that have no accessible fields.
- if strct, ok := T.(*types.Struct); !ok || fieldsAccessible(strct, c.pkg.GetTypes()) {
- snip.WriteFinalTabstop()
- }
- snip.WriteText("}")
-
- nonSnippet := typeName + "{}"
-
- c.items = append(c.items, CompletionItem{
- Label: nonSnippet,
- InsertText: nonSnippet,
- Score: matchScore * literalCandidateScore,
- Kind: protocol.VariableCompletion,
- AdditionalTextEdits: edits,
- snippet: snip,
- })
-}
-
-// basicLiteral adds a literal completion item for the given basic
-// type name typeName.
-func (c *completer) basicLiteral(T types.Type, typeName string, matchScore float64, edits []protocol.TextEdit) {
- // Never give type conversions like "untyped int()".
- if isUntyped(T) {
- return
- }
-
- snip := &snippet.Builder{}
- snip.WriteText(typeName + "(")
- snip.WriteFinalTabstop()
- snip.WriteText(")")
-
- nonSnippet := typeName + "()"
-
- c.items = append(c.items, CompletionItem{
- Label: nonSnippet,
- InsertText: nonSnippet,
- Detail: T.String(),
- Score: matchScore * literalCandidateScore,
- Kind: protocol.VariableCompletion,
- AdditionalTextEdits: edits,
- snippet: snip,
- })
-}
-
-// makeCall adds a completion item for a "make()" call given a specific type.
-func (c *completer) makeCall(typeName string, secondArg string, matchScore float64, edits []protocol.TextEdit) {
- // Keep it simple and don't add any placeholders for optional "make()" arguments.
-
- snip := &snippet.Builder{}
- snip.WriteText("make(" + typeName)
- if secondArg != "" {
- snip.WriteText(", ")
- snip.WritePlaceholder(func(b *snippet.Builder) {
- if c.opts.placeholders {
- b.WriteText(secondArg)
- }
- })
- }
- snip.WriteText(")")
-
- var nonSnippet strings.Builder
- nonSnippet.WriteString("make(" + typeName)
- if secondArg != "" {
- nonSnippet.WriteString(", ")
- nonSnippet.WriteString(secondArg)
- }
- nonSnippet.WriteByte(')')
-
- c.items = append(c.items, CompletionItem{
- Label: nonSnippet.String(),
- InsertText: nonSnippet.String(),
- Score: matchScore * literalCandidateScore,
- Kind: protocol.FunctionCompletion,
- AdditionalTextEdits: edits,
- snippet: snip,
- })
-}
diff --git a/internal/lsp/source/completion/package.go b/internal/lsp/source/completion/package.go
deleted file mode 100644
index c7e52d718..000000000
--- a/internal/lsp/source/completion/package.go
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "go/types"
- "path/filepath"
- "strings"
- "unicode"
-
- "golang.org/x/tools/internal/lsp/debug"
- "golang.org/x/tools/internal/lsp/fuzzy"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// packageClauseCompletions offers completions for a package declaration when
-// one is not present in the given file.
-func packageClauseCompletions(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, pos protocol.Position) ([]CompletionItem, *Selection, error) {
- // We know that the AST for this file will be empty due to the missing
- // package declaration, but parse it anyway to get a mapper.
- pgf, err := snapshot.ParseGo(ctx, fh, source.ParseFull)
- if err != nil {
- return nil, nil, err
- }
-
- cursorSpan, err := pgf.Mapper.PointSpan(pos)
- if err != nil {
- return nil, nil, err
- }
- rng, err := cursorSpan.Range(pgf.Mapper.Converter)
- if err != nil {
- return nil, nil, err
- }
-
- surrounding, err := packageCompletionSurrounding(ctx, snapshot.FileSet(), pgf, rng.Start)
- if err != nil {
- return nil, nil, errors.Errorf("invalid position for package completion: %w", err)
- }
-
- packageSuggestions, err := packageSuggestions(ctx, snapshot, fh.URI(), "")
- if err != nil {
- return nil, nil, err
- }
-
- var items []CompletionItem
- for _, pkg := range packageSuggestions {
- insertText := fmt.Sprintf("package %s", pkg.name)
- items = append(items, CompletionItem{
- Label: insertText,
- Kind: protocol.ModuleCompletion,
- InsertText: insertText,
- Score: pkg.score,
- })
- }
-
- return items, surrounding, nil
-}
-
-// packageCompletionSurrounding returns surrounding for package completion if a
-// package completions can be suggested at a given position. A valid location
-// for package completion is above any declarations or import statements.
-func packageCompletionSurrounding(ctx context.Context, fset *token.FileSet, pgf *source.ParsedGoFile, pos token.Pos) (*Selection, error) {
- // If the file lacks a package declaration, the parser will return an empty
- // AST. As a work-around, try to parse an expression from the file contents.
- filename := pgf.URI.Filename()
- expr, _ := parser.ParseExprFrom(fset, filename, pgf.Src, parser.Mode(0))
- if expr == nil {
- return nil, fmt.Errorf("unparseable file (%s)", pgf.URI)
- }
- tok := fset.File(expr.Pos())
- offset, err := source.Offset(pgf.Tok, pos)
- if err != nil {
- return nil, err
- }
- if offset > tok.Size() {
- debug.Bug(ctx, "out of bounds cursor", "cursor offset (%d) out of bounds for %s (size: %d)", offset, pgf.URI, tok.Size())
- return nil, fmt.Errorf("cursor out of bounds")
- }
- cursor := tok.Pos(offset)
- m := &protocol.ColumnMapper{
- URI: pgf.URI,
- Content: pgf.Src,
- Converter: span.NewContentConverter(filename, pgf.Src),
- }
-
- // If we were able to parse out an identifier as the first expression from
- // the file, it may be the beginning of a package declaration ("pack ").
- // We can offer package completions if the cursor is in the identifier.
- if name, ok := expr.(*ast.Ident); ok {
- if cursor >= name.Pos() && cursor <= name.End() {
- if !strings.HasPrefix(PACKAGE, name.Name) {
- return nil, fmt.Errorf("cursor in non-matching ident")
- }
- return &Selection{
- content: name.Name,
- cursor: cursor,
- MappedRange: source.NewMappedRange(fset, m, name.Pos(), name.End()),
- }, nil
- }
- }
-
- // The file is invalid, but it contains an expression that we were able to
- // parse. We will use this expression to construct the cursor's
- // "surrounding".
-
- // First, consider the possibility that we have a valid "package" keyword
- // with an empty package name ("package "). "package" is parsed as an
- // *ast.BadDecl since it is a keyword. This logic would allow "package" to
- // appear on any line of the file as long as it's the first code expression
- // in the file.
- lines := strings.Split(string(pgf.Src), "\n")
- cursorLine := tok.Line(cursor)
- if cursorLine <= 0 || cursorLine > len(lines) {
- return nil, fmt.Errorf("invalid line number")
- }
- if fset.Position(expr.Pos()).Line == cursorLine {
- words := strings.Fields(lines[cursorLine-1])
- if len(words) > 0 && words[0] == PACKAGE {
- content := PACKAGE
- // Account for spaces if there are any.
- if len(words) > 1 {
- content += " "
- }
-
- start := expr.Pos()
- end := token.Pos(int(expr.Pos()) + len(content) + 1)
- // We have verified that we have a valid 'package' keyword as our
- // first expression. Ensure that cursor is in this keyword or
- // otherwise fallback to the general case.
- if cursor >= start && cursor <= end {
- return &Selection{
- content: content,
- cursor: cursor,
- MappedRange: source.NewMappedRange(fset, m, start, end),
- }, nil
- }
- }
- }
-
- // If the cursor is after the start of the expression, no package
- // declaration will be valid.
- if cursor > expr.Pos() {
- return nil, fmt.Errorf("cursor after expression")
- }
-
- // If the cursor is in a comment, don't offer any completions.
- if cursorInComment(fset, cursor, pgf.Src) {
- return nil, fmt.Errorf("cursor in comment")
- }
-
- // The surrounding range in this case is the cursor except for empty file,
- // in which case it's end of file - 1
- start, end := cursor, cursor
- if tok.Size() == 0 {
- start, end = tok.Pos(0)-1, tok.Pos(0)-1
- }
-
- return &Selection{
- content: "",
- cursor: cursor,
- MappedRange: source.NewMappedRange(fset, m, start, end),
- }, nil
-}
-
-func cursorInComment(fset *token.FileSet, cursor token.Pos, src []byte) bool {
- var s scanner.Scanner
- s.Init(fset.File(cursor), src, func(_ token.Position, _ string) {}, scanner.ScanComments)
- for {
- pos, tok, lit := s.Scan()
- if pos <= cursor && cursor <= token.Pos(int(pos)+len(lit)) {
- return tok == token.COMMENT
- }
- if tok == token.EOF {
- break
- }
- }
- return false
-}
-
-// packageNameCompletions returns name completions for a package clause using
-// the current name as prefix.
-func (c *completer) packageNameCompletions(ctx context.Context, fileURI span.URI, name *ast.Ident) error {
- cursor := int(c.pos - name.NamePos)
- if cursor < 0 || cursor > len(name.Name) {
- return errors.New("cursor is not in package name identifier")
- }
-
- c.completionContext.packageCompletion = true
-
- prefix := name.Name[:cursor]
- packageSuggestions, err := packageSuggestions(ctx, c.snapshot, fileURI, prefix)
- if err != nil {
- return err
- }
-
- for _, pkg := range packageSuggestions {
- c.deepState.enqueue(pkg)
- }
- return nil
-}
-
-// packageSuggestions returns a list of packages from workspace packages that
-// have the given prefix and are used in the same directory as the given
-// file. This also includes test packages for these packages (<pkg>_test) and
-// the directory name itself.
-func packageSuggestions(ctx context.Context, snapshot source.Snapshot, fileURI span.URI, prefix string) (packages []candidate, err error) {
- workspacePackages, err := snapshot.ActivePackages(ctx)
- if err != nil {
- return nil, err
- }
-
- toCandidate := func(name string, score float64) candidate {
- obj := types.NewPkgName(0, nil, name, types.NewPackage("", name))
- return candidate{obj: obj, name: name, detail: name, score: score}
- }
-
- matcher := fuzzy.NewMatcher(prefix)
-
- // Always try to suggest a main package
- defer func() {
- if score := float64(matcher.Score("main")); score > 0 {
- packages = append(packages, toCandidate("main", score*lowScore))
- }
- }()
-
- dirPath := filepath.Dir(fileURI.Filename())
- dirName := filepath.Base(dirPath)
- if !isValidDirName(dirName) {
- return packages, nil
- }
- pkgName := convertDirNameToPkgName(dirName)
-
- seenPkgs := make(map[string]struct{})
-
- // The `go` command by default only allows one package per directory but we
- // support multiple package suggestions since gopls is build system agnostic.
- for _, pkg := range workspacePackages {
- if pkg.Name() == "main" || pkg.Name() == "" {
- continue
- }
- if _, ok := seenPkgs[pkg.Name()]; ok {
- continue
- }
-
- // Only add packages that are previously used in the current directory.
- var relevantPkg bool
- for _, pgf := range pkg.CompiledGoFiles() {
- if filepath.Dir(pgf.URI.Filename()) == dirPath {
- relevantPkg = true
- break
- }
- }
- if !relevantPkg {
- continue
- }
-
- // Add a found package used in current directory as a high relevance
- // suggestion and the test package for it as a medium relevance
- // suggestion.
- if score := float64(matcher.Score(pkg.Name())); score > 0 {
- packages = append(packages, toCandidate(pkg.Name(), score*highScore))
- }
- seenPkgs[pkg.Name()] = struct{}{}
-
- testPkgName := pkg.Name() + "_test"
- if _, ok := seenPkgs[testPkgName]; ok || strings.HasSuffix(pkg.Name(), "_test") {
- continue
- }
- if score := float64(matcher.Score(testPkgName)); score > 0 {
- packages = append(packages, toCandidate(testPkgName, score*stdScore))
- }
- seenPkgs[testPkgName] = struct{}{}
- }
-
- // Add current directory name as a low relevance suggestion.
- if _, ok := seenPkgs[pkgName]; !ok {
- if score := float64(matcher.Score(pkgName)); score > 0 {
- packages = append(packages, toCandidate(pkgName, score*lowScore))
- }
-
- testPkgName := pkgName + "_test"
- if score := float64(matcher.Score(testPkgName)); score > 0 {
- packages = append(packages, toCandidate(testPkgName, score*lowScore))
- }
- }
-
- return packages, nil
-}
-
-// isValidDirName checks whether the passed directory name can be used in
-// a package path. Requirements for a package path can be found here:
-// https://golang.org/ref/mod#go-mod-file-ident.
-func isValidDirName(dirName string) bool {
- if dirName == "" {
- return false
- }
-
- for i, ch := range dirName {
- if isLetter(ch) || isDigit(ch) {
- continue
- }
- if i == 0 {
- // Directory name can start only with '_'. '.' is not allowed in module paths.
- // '-' and '~' are not allowed because elements of package paths must be
- // safe command-line arguments.
- if ch == '_' {
- continue
- }
- } else {
- // Modules path elements can't end with '.'
- if isAllowedPunctuation(ch) && (i != len(dirName)-1 || ch != '.') {
- continue
- }
- }
-
- return false
- }
- return true
-}
-
-// convertDirNameToPkgName converts a valid directory name to a valid package name.
-// It leaves only letters and digits. All letters are mapped to lower case.
-func convertDirNameToPkgName(dirName string) string {
- var buf bytes.Buffer
- for _, ch := range dirName {
- switch {
- case isLetter(ch):
- buf.WriteRune(unicode.ToLower(ch))
-
- case buf.Len() != 0 && isDigit(ch):
- buf.WriteRune(ch)
- }
- }
- return buf.String()
-}
-
-// isLetter and isDigit allow only ASCII characters because
-// "Each path element is a non-empty string made of up ASCII letters,
-// ASCII digits, and limited ASCII punctuation"
-// (see https://golang.org/ref/mod#go-mod-file-ident).
-
-func isLetter(ch rune) bool {
- return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z'
-}
-
-func isDigit(ch rune) bool {
- return '0' <= ch && ch <= '9'
-}
-
-func isAllowedPunctuation(ch rune) bool {
- return ch == '_' || ch == '-' || ch == '~' || ch == '.'
-}
diff --git a/internal/lsp/source/completion/package_test.go b/internal/lsp/source/completion/package_test.go
deleted file mode 100644
index 6436984fd..000000000
--- a/internal/lsp/source/completion/package_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import "testing"
-
-func TestIsValidDirName(t *testing.T) {
- tests := []struct {
- dirName string
- valid bool
- }{
- {dirName: "", valid: false},
- //
- {dirName: "a", valid: true},
- {dirName: "abcdef", valid: true},
- {dirName: "AbCdEf", valid: true},
- //
- {dirName: "1a35", valid: true},
- {dirName: "a16", valid: true},
- //
- {dirName: "_a", valid: true},
- {dirName: "a_", valid: true},
- //
- {dirName: "~a", valid: false},
- {dirName: "a~", valid: true},
- //
- {dirName: "-a", valid: false},
- {dirName: "a-", valid: true},
- //
- {dirName: ".a", valid: false},
- {dirName: "a.", valid: false},
- //
- {dirName: "a~_b--c.-e", valid: true},
- {dirName: "~a~_b--c.-e", valid: false},
- {dirName: "a~_b--c.-e--~", valid: true},
- {dirName: "a~_b--2134dc42.-e6--~", valid: true},
- {dirName: "abc`def", valid: false},
- {dirName: "тест", valid: false},
- {dirName: "你好", valid: false},
- }
- for _, tt := range tests {
- valid := isValidDirName(tt.dirName)
- if tt.valid != valid {
- t.Errorf("%s: expected %v, got %v", tt.dirName, tt.valid, valid)
- }
- }
-}
-
-func TestConvertDirNameToPkgName(t *testing.T) {
- tests := []struct {
- dirName string
- pkgName string
- }{
- {dirName: "a", pkgName: "a"},
- {dirName: "abcdef", pkgName: "abcdef"},
- {dirName: "AbCdEf", pkgName: "abcdef"},
- {dirName: "1a35", pkgName: "a35"},
- {dirName: "14a35", pkgName: "a35"},
- {dirName: "a16", pkgName: "a16"},
- {dirName: "_a", pkgName: "a"},
- {dirName: "a_", pkgName: "a"},
- {dirName: "a~", pkgName: "a"},
- {dirName: "a-", pkgName: "a"},
- {dirName: "a~_b--c.-e", pkgName: "abce"},
- {dirName: "a~_b--c.-e--~", pkgName: "abce"},
- {dirName: "a~_b--2134dc42.-e6--~", pkgName: "ab2134dc42e6"},
- }
- for _, tt := range tests {
- pkgName := convertDirNameToPkgName(tt.dirName)
- if tt.pkgName != pkgName {
- t.Errorf("%s: expected %v, got %v", tt.dirName, tt.pkgName, pkgName)
- continue
- }
- }
-}
diff --git a/internal/lsp/source/completion/postfix_snippets.go b/internal/lsp/source/completion/postfix_snippets.go
deleted file mode 100644
index 7ea962118..000000000
--- a/internal/lsp/source/completion/postfix_snippets.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "log"
- "reflect"
- "strings"
- "sync"
- "text/template"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/snippet"
- "golang.org/x/tools/internal/lsp/source"
- errors "golang.org/x/xerrors"
-)
-
-// Postfix snippets are artificial methods that allow the user to
-// compose common operations in an "argument oriented" fashion. For
-// example, instead of "sort.Slice(someSlice, ...)" a user can expand
-// "someSlice.sort!".
-
-// postfixTmpl represents a postfix snippet completion candidate.
-type postfixTmpl struct {
- // label is the completion candidate's label presented to the user.
- label string
-
- // details is passed along to the client as the candidate's details.
- details string
-
- // body is the template text. See postfixTmplArgs for details on the
- // facilities available to the template.
- body string
-
- tmpl *template.Template
-}
-
-// postfixTmplArgs are the template execution arguments available to
-// the postfix snippet templates.
-type postfixTmplArgs struct {
- // StmtOK is true if it is valid to replace the selector with a
- // statement. For example:
- //
- // func foo() {
- // bar.sort! // statement okay
- //
- // someMethod(bar.sort!) // statement not okay
- // }
- StmtOK bool
-
- // X is the textual SelectorExpr.X. For example, when completing
- // "foo.bar.print!", "X" is "foo.bar".
- X string
-
- // Obj is the types.Object of SelectorExpr.X, if any.
- Obj types.Object
-
- // Type is the type of "foo.bar" in "foo.bar.print!".
- Type types.Type
-
- scope *types.Scope
- snip snippet.Builder
- importIfNeeded func(pkgPath string, scope *types.Scope) (name string, edits []protocol.TextEdit, err error)
- edits []protocol.TextEdit
- qf types.Qualifier
- varNames map[string]bool
-}
-
-var postfixTmpls = []postfixTmpl{{
- label: "sort",
- details: "sort.Slice()",
- body: `{{if and (eq .Kind "slice") .StmtOK -}}
-{{.Import "sort"}}.Slice({{.X}}, func({{.VarName nil "i"}}, {{.VarName nil "j"}} int) bool {
- {{.Cursor}}
-})
-{{- end}}`,
-}, {
- label: "last",
- details: "s[len(s)-1]",
- body: `{{if and (eq .Kind "slice") .Obj -}}
-{{.X}}[len({{.X}})-1]
-{{- end}}`,
-}, {
- label: "reverse",
- details: "reverse slice",
- body: `{{if and (eq .Kind "slice") .StmtOK -}}
-{{$i := .VarName nil "i"}}{{$j := .VarName nil "j" -}}
-for {{$i}}, {{$j}} := 0, len({{.X}})-1; {{$i}} < {{$j}}; {{$i}}, {{$j}} = {{$i}}+1, {{$j}}-1 {
- {{.X}}[{{$i}}], {{.X}}[{{$j}}] = {{.X}}[{{$j}}], {{.X}}[{{$i}}]
-}
-{{end}}`,
-}, {
- label: "range",
- details: "range over slice",
- body: `{{if and (eq .Kind "slice") .StmtOK -}}
-for {{.VarName nil "i"}}, {{.VarName .ElemType "v"}} := range {{.X}} {
- {{.Cursor}}
-}
-{{- end}}`,
-}, {
- label: "append",
- details: "append and re-assign slice",
- body: `{{if and (eq .Kind "slice") .StmtOK .Obj -}}
-{{.X}} = append({{.X}}, {{.Cursor}})
-{{- end}}`,
-}, {
- label: "append",
- details: "append to slice",
- body: `{{if and (eq .Kind "slice") (not .StmtOK) -}}
-append({{.X}}, {{.Cursor}})
-{{- end}}`,
-}, {
- label: "copy",
- details: "duplicate slice",
- body: `{{if and (eq .Kind "slice") .StmtOK .Obj -}}
-{{$v := (.VarName nil (printf "%sCopy" .X))}}{{$v}} := make([]{{.TypeName .ElemType}}, len({{.X}}))
-copy({{$v}}, {{.X}})
-{{end}}`,
-}, {
- label: "range",
- details: "range over map",
- body: `{{if and (eq .Kind "map") .StmtOK -}}
-for {{.VarName .KeyType "k"}}, {{.VarName .ElemType "v"}} := range {{.X}} {
- {{.Cursor}}
-}
-{{- end}}`,
-}, {
- label: "clear",
- details: "clear map contents",
- body: `{{if and (eq .Kind "map") .StmtOK -}}
-{{$k := (.VarName .KeyType "k")}}for {{$k}} := range {{.X}} {
- delete({{.X}}, {{$k}})
-}
-{{end}}`,
-}, {
- label: "keys",
- details: "create slice of keys",
- body: `{{if and (eq .Kind "map") .StmtOK -}}
-{{$keysVar := (.VarName nil "keys")}}{{$keysVar}} := make([]{{.TypeName .KeyType}}, 0, len({{.X}}))
-{{$k := (.VarName .KeyType "k")}}for {{$k}} := range {{.X}} {
- {{$keysVar}} = append({{$keysVar}}, {{$k}})
-}
-{{end}}`,
-}, {
- label: "var",
- details: "assign to variables",
- body: `{{if and (eq .Kind "tuple") .StmtOK -}}
-{{$a := .}}{{range $i, $v := .Tuple}}{{if $i}}, {{end}}{{$a.VarName $v.Type $v.Name}}{{end}} := {{.X}}
-{{- end}}`,
-}, {
- label: "var",
- details: "assign to variable",
- body: `{{if and (ne .Kind "tuple") .StmtOK -}}
-{{.VarName .Type ""}} := {{.X}}
-{{- end}}`,
-}, {
- label: "print",
- details: "print to stdout",
- body: `{{if and (ne .Kind "tuple") .StmtOK -}}
-{{.Import "fmt"}}.Printf("{{.EscapeQuotes .X}}: %v\n", {{.X}})
-{{- end}}`,
-}, {
- label: "print",
- details: "print to stdout",
- body: `{{if and (eq .Kind "tuple") .StmtOK -}}
-{{.Import "fmt"}}.Println({{.X}})
-{{- end}}`,
-}, {
- label: "split",
- details: "split string",
- body: `{{if (eq (.TypeName .Type) "string") -}}
-{{.Import "strings"}}.Split({{.X}}, "{{.Cursor}}")
-{{- end}}`,
-}, {
- label: "join",
- details: "join string slice",
- body: `{{if and (eq .Kind "slice") (eq (.TypeName .ElemType) "string") -}}
-{{.Import "strings"}}.Join({{.X}}, "{{.Cursor}}")
-{{- end}}`,
-}}
-
-// Cursor indicates where the client's cursor should end up after the
-// snippet is done.
-func (a *postfixTmplArgs) Cursor() string {
- a.snip.WriteFinalTabstop()
- return ""
-}
-
-// Import makes sure the package corresponding to path is imported,
-// returning the identifier to use to refer to the package.
-func (a *postfixTmplArgs) Import(path string) (string, error) {
- name, edits, err := a.importIfNeeded(path, a.scope)
- if err != nil {
- return "", errors.Errorf("couldn't import %q: %w", path, err)
- }
- a.edits = append(a.edits, edits...)
- return name, nil
-}
-
-func (a *postfixTmplArgs) EscapeQuotes(v string) string {
- return strings.ReplaceAll(v, `"`, `\\"`)
-}
-
-// ElemType returns the Elem() type of xType, if applicable.
-func (a *postfixTmplArgs) ElemType() types.Type {
- if e, _ := a.Type.(interface{ Elem() types.Type }); e != nil {
- return e.Elem()
- }
- return nil
-}
-
-// Kind returns the underlying kind of type, e.g. "slice", "struct",
-// etc.
-func (a *postfixTmplArgs) Kind() string {
- t := reflect.TypeOf(a.Type.Underlying())
- return strings.ToLower(strings.TrimPrefix(t.String(), "*types."))
-}
-
-// KeyType returns the type of X's key. KeyType panics if X is not a
-// map.
-func (a *postfixTmplArgs) KeyType() types.Type {
- return a.Type.Underlying().(*types.Map).Key()
-}
-
-// Tuple returns the tuple result vars if X is a call expression.
-func (a *postfixTmplArgs) Tuple() []*types.Var {
- tuple, _ := a.Type.(*types.Tuple)
- if tuple == nil {
- return nil
- }
-
- typs := make([]*types.Var, 0, tuple.Len())
- for i := 0; i < tuple.Len(); i++ {
- typs = append(typs, tuple.At(i))
- }
- return typs
-}
-
-// TypeName returns the textual representation of type t.
-func (a *postfixTmplArgs) TypeName(t types.Type) (string, error) {
- if t == nil || t == types.Typ[types.Invalid] {
- return "", fmt.Errorf("invalid type: %v", t)
- }
- return types.TypeString(t, a.qf), nil
-}
-
-// VarName returns a suitable variable name for the type t. If t
-// implements the error interface, "err" is used. If t is not a named
-// type then nonNamedDefault is used. Otherwise a name is made by
-// abbreviating the type name. If the resultant name is already in
-// scope, an integer is appended to make a unique name.
-func (a *postfixTmplArgs) VarName(t types.Type, nonNamedDefault string) string {
- if t == nil {
- t = types.Typ[types.Invalid]
- }
-
- var name string
- if types.Implements(t, errorIntf) {
- name = "err"
- } else if _, isNamed := source.Deref(t).(*types.Named); !isNamed {
- name = nonNamedDefault
- }
-
- if name == "" {
- name = types.TypeString(t, func(p *types.Package) string {
- return ""
- })
- name = abbreviateTypeName(name)
- }
-
- if dot := strings.LastIndex(name, "."); dot > -1 {
- name = name[dot+1:]
- }
-
- uniqueName := name
- for i := 2; ; i++ {
- if s, _ := a.scope.LookupParent(uniqueName, token.NoPos); s == nil && !a.varNames[uniqueName] {
- break
- }
- uniqueName = fmt.Sprintf("%s%d", name, i)
- }
-
- a.varNames[uniqueName] = true
-
- return uniqueName
-}
-
-func (c *completer) addPostfixSnippetCandidates(ctx context.Context, sel *ast.SelectorExpr) {
- if !c.opts.postfix {
- return
- }
-
- initPostfixRules()
-
- if sel == nil || sel.Sel == nil {
- return
- }
-
- selType := c.pkg.GetTypesInfo().TypeOf(sel.X)
- if selType == nil {
- return
- }
-
- // Skip empty tuples since there is no value to operate on.
- if tuple, ok := selType.Underlying().(*types.Tuple); ok && tuple == nil {
- return
- }
-
- tokFile := c.snapshot.FileSet().File(c.pos)
-
- // Only replace sel with a statement if sel is already a statement.
- var stmtOK bool
- for i, n := range c.path {
- if n == sel && i < len(c.path)-1 {
- switch p := c.path[i+1].(type) {
- case *ast.ExprStmt:
- stmtOK = true
- case *ast.AssignStmt:
- // In cases like:
- //
- // foo.<>
- // bar = 123
- //
- // detect that "foo." makes up the entire statement since the
- // apparent selector spans lines.
- stmtOK = tokFile.Line(c.pos) < tokFile.Line(p.TokPos)
- }
- break
- }
- }
-
- scope := c.pkg.GetTypes().Scope().Innermost(c.pos)
- if scope == nil {
- return
- }
-
- // afterDot is the position after selector dot, e.g. "|" in
- // "foo.|print".
- afterDot := sel.Sel.Pos()
-
- // We must detect dangling selectors such as:
- //
- // foo.<>
- // bar
- //
- // and adjust afterDot so that we don't mistakenly delete the
- // newline thinking "bar" is part of our selector.
- if startLine := tokFile.Line(sel.Pos()); startLine != tokFile.Line(afterDot) {
- if tokFile.Line(c.pos) != startLine {
- return
- }
- afterDot = c.pos
- }
-
- for _, rule := range postfixTmpls {
- // When completing foo.print<>, "print" is naturally overwritten,
- // but we need to also remove "foo." so the snippet has a clean
- // slate.
- edits, err := c.editText(sel.Pos(), afterDot, "")
- if err != nil {
- event.Error(ctx, "error calculating postfix edits", err)
- return
- }
-
- tmplArgs := postfixTmplArgs{
- X: source.FormatNode(c.snapshot.FileSet(), sel.X),
- StmtOK: stmtOK,
- Obj: exprObj(c.pkg.GetTypesInfo(), sel.X),
- Type: selType,
- qf: c.qf,
- importIfNeeded: c.importIfNeeded,
- scope: scope,
- varNames: make(map[string]bool),
- }
-
- // Feed the template straight into the snippet builder. This
- // allows templates to build snippets as they are executed.
- err = rule.tmpl.Execute(&tmplArgs.snip, &tmplArgs)
- if err != nil {
- event.Error(ctx, "error executing postfix template", err)
- continue
- }
-
- if strings.TrimSpace(tmplArgs.snip.String()) == "" {
- continue
- }
-
- score := c.matcher.Score(rule.label)
- if score <= 0 {
- continue
- }
-
- c.items = append(c.items, CompletionItem{
- Label: rule.label + "!",
- Detail: rule.details,
- Score: float64(score) * 0.01,
- Kind: protocol.SnippetCompletion,
- snippet: &tmplArgs.snip,
- AdditionalTextEdits: append(edits, tmplArgs.edits...),
- })
- }
-}
-
-var postfixRulesOnce sync.Once
-
-func initPostfixRules() {
- postfixRulesOnce.Do(func() {
- var idx int
- for _, rule := range postfixTmpls {
- var err error
- rule.tmpl, err = template.New("postfix_snippet").Parse(rule.body)
- if err != nil {
- log.Panicf("error parsing postfix snippet template: %v", err)
- }
- postfixTmpls[idx] = rule
- idx++
- }
- postfixTmpls = postfixTmpls[:idx]
- })
-}
-
-// importIfNeeded returns the package identifier and any necessary
-// edits to import package pkgPath.
-func (c *completer) importIfNeeded(pkgPath string, scope *types.Scope) (string, []protocol.TextEdit, error) {
- defaultName := imports.ImportPathToAssumedName(pkgPath)
-
- // Check if file already imports pkgPath.
- for _, s := range c.file.Imports {
- if source.ImportPath(s) == pkgPath {
- if s.Name == nil {
- return defaultName, nil, nil
- }
- if s.Name.Name != "_" {
- return s.Name.Name, nil, nil
- }
- }
- }
-
- // Give up if the package's name is already in use by another object.
- if _, obj := scope.LookupParent(defaultName, token.NoPos); obj != nil {
- return "", nil, fmt.Errorf("import name %q of %q already in use", defaultName, pkgPath)
- }
-
- edits, err := c.importEdits(&importInfo{
- importPath: pkgPath,
- })
- if err != nil {
- return "", nil, err
- }
-
- return defaultName, edits, nil
-}
diff --git a/internal/lsp/source/completion/printf.go b/internal/lsp/source/completion/printf.go
deleted file mode 100644
index ce74af53b..000000000
--- a/internal/lsp/source/completion/printf.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "go/ast"
- "go/constant"
- "go/types"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-// printfArgKind returns the expected objKind when completing a
-// printf-like operand. call is the printf-like function call, and
-// argIdx is the index of call.Args being completed.
-func printfArgKind(info *types.Info, call *ast.CallExpr, argIdx int) objKind {
- // Printf-like function name must end in "f".
- fn := exprObj(info, call.Fun)
- if fn == nil || !strings.HasSuffix(fn.Name(), "f") {
- return kindAny
- }
-
- sig, _ := fn.Type().(*types.Signature)
- if sig == nil {
- return kindAny
- }
-
- // Must be variadic and take at least two params.
- numParams := sig.Params().Len()
- if !sig.Variadic() || numParams < 2 || argIdx < numParams-1 {
- return kindAny
- }
-
- // Param preceding variadic args must be a (format) string.
- if !types.Identical(sig.Params().At(numParams-2).Type(), types.Typ[types.String]) {
- return kindAny
- }
-
- // Format string must be a constant.
- strArg := info.Types[call.Args[numParams-2]].Value
- if strArg == nil || strArg.Kind() != constant.String {
- return kindAny
- }
-
- return formatOperandKind(constant.StringVal(strArg), argIdx-(numParams-1)+1)
-}
-
-// formatOperandKind returns the objKind corresponding to format's
-// operandIdx'th operand.
-func formatOperandKind(format string, operandIdx int) objKind {
- var (
- prevOperandIdx int
- kind = kindAny
- )
- for {
- i := strings.Index(format, "%")
- if i == -1 {
- break
- }
-
- var operands []formatOperand
- format, operands = parsePrintfVerb(format[i+1:], prevOperandIdx)
-
- // Check if any this verb's operands correspond to our target
- // operandIdx.
- for _, v := range operands {
- if v.idx == operandIdx {
- if kind == kindAny {
- kind = v.kind
- } else if v.kind != kindAny {
- // If multiple verbs refer to the same operand, take the
- // intersection of their kinds.
- kind &= v.kind
- }
- }
-
- prevOperandIdx = v.idx
- }
- }
- return kind
-}
-
-type formatOperand struct {
- // idx is the one-based printf operand index.
- idx int
- // kind is a mask of expected kinds of objects for this operand.
- kind objKind
-}
-
-// parsePrintfVerb parses the leading printf verb in f. The opening
-// "%" must already be trimmed from f. prevIdx is the previous
-// operand's index, or zero if this is the first verb. The format
-// string is returned with the leading verb removed. Multiple operands
-// can be returned in the case of dynamic widths such as "%*.*f".
-func parsePrintfVerb(f string, prevIdx int) (string, []formatOperand) {
- var verbs []formatOperand
-
- addVerb := func(k objKind) {
- verbs = append(verbs, formatOperand{
- idx: prevIdx + 1,
- kind: k,
- })
- prevIdx++
- }
-
- for len(f) > 0 {
- // Trim first rune off of f so we are guaranteed to make progress.
- r, l := utf8.DecodeRuneInString(f)
- f = f[l:]
-
- // We care about three things:
- // 1. The verb, which maps directly to object kind.
- // 2. Explicit operand indices like "%[2]s".
- // 3. Dynamic widths using "*".
- switch r {
- case '%':
- return f, nil
- case '*':
- addVerb(kindInt)
- continue
- case '[':
- // Parse operand index as in "%[2]s".
- i := strings.Index(f, "]")
- if i == -1 {
- return f, nil
- }
-
- idx, err := strconv.Atoi(f[:i])
- f = f[i+1:]
- if err != nil {
- return f, nil
- }
-
- prevIdx = idx - 1
- continue
- case 'v', 'T':
- addVerb(kindAny)
- case 't':
- addVerb(kindBool)
- case 'c', 'd', 'o', 'O', 'U':
- addVerb(kindInt)
- case 'e', 'E', 'f', 'F', 'g', 'G':
- addVerb(kindFloat | kindComplex)
- case 'b':
- addVerb(kindInt | kindFloat | kindComplex | kindBytes)
- case 'q', 's':
- addVerb(kindString | kindBytes | kindStringer | kindError)
- case 'x', 'X':
- // Omit kindStringer and kindError though technically allowed.
- addVerb(kindString | kindBytes | kindInt | kindFloat | kindComplex)
- case 'p':
- addVerb(kindPtr | kindSlice)
- case 'w':
- addVerb(kindError)
- case '+', '-', '#', ' ', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- // Flag or numeric width/precicision value.
- continue
- default:
- // Assume unrecognized rune is a custom fmt.Formatter verb.
- addVerb(kindAny)
- }
-
- if len(verbs) > 0 {
- break
- }
- }
-
- return f, verbs
-}
diff --git a/internal/lsp/source/completion/snippet.go b/internal/lsp/source/completion/snippet.go
deleted file mode 100644
index 72c351f94..000000000
--- a/internal/lsp/source/completion/snippet.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "go/ast"
-
- "golang.org/x/tools/internal/lsp/snippet"
-)
-
-// structFieldSnippets calculates the snippet for struct literal field names.
-func (c *completer) structFieldSnippet(cand candidate, detail string, snip *snippet.Builder) {
- if !c.wantStructFieldCompletions() {
- return
- }
-
- // If we are in a deep completion then we can't be completing a field
- // name (e.g. "Foo{f<>}" completing to "Foo{f.Bar}" should not generate
- // a snippet).
- if len(cand.path) > 0 {
- return
- }
-
- clInfo := c.enclosingCompositeLiteral
-
- // If we are already in a key-value expression, we don't want a snippet.
- if clInfo.kv != nil {
- return
- }
-
- // A plain snippet turns "Foo{Ba<>" into "Foo{Bar: <>".
- snip.WriteText(": ")
- snip.WritePlaceholder(func(b *snippet.Builder) {
- // A placeholder snippet turns "Foo{Ba<>" into "Foo{Bar: <*int*>".
- if c.opts.placeholders {
- b.WriteText(detail)
- }
- })
-
- fset := c.snapshot.FileSet()
-
- // If the cursor position is on a different line from the literal's opening brace,
- // we are in a multiline literal.
- if fset.Position(c.pos).Line != fset.Position(clInfo.cl.Lbrace).Line {
- snip.WriteText(",")
- }
-}
-
-// functionCallSnippets calculates the snippet for function calls.
-func (c *completer) functionCallSnippet(name string, tparams, params []string, snip *snippet.Builder) {
- // If there is no suffix then we need to reuse existing call parens
- // "()" if present. If there is an identifier suffix then we always
- // need to include "()" since we don't overwrite the suffix.
- if c.surrounding != nil && c.surrounding.Suffix() == "" && len(c.path) > 1 {
- // If we are the left side (i.e. "Fun") part of a call expression,
- // we don't want a snippet since there are already parens present.
- switch n := c.path[1].(type) {
- case *ast.CallExpr:
- // The Lparen != Rparen check detects fudged CallExprs we
- // inserted when fixing the AST. In this case, we do still need
- // to insert the calling "()" parens.
- if n.Fun == c.path[0] && n.Lparen != n.Rparen {
- return
- }
- case *ast.SelectorExpr:
- if len(c.path) > 2 {
- if call, ok := c.path[2].(*ast.CallExpr); ok && call.Fun == c.path[1] && call.Lparen != call.Rparen {
- return
- }
- }
- }
- }
-
- snip.WriteText(name)
-
- if len(tparams) > 0 {
- snip.WriteText("[")
- if c.opts.placeholders {
- for i, tp := range tparams {
- if i > 0 {
- snip.WriteText(", ")
- }
- snip.WritePlaceholder(func(b *snippet.Builder) {
- b.WriteText(tp)
- })
- }
- } else {
- snip.WritePlaceholder(nil)
- }
- snip.WriteText("]")
- }
-
- snip.WriteText("(")
-
- if c.opts.placeholders {
- // A placeholder snippet turns "someFun<>" into "someFunc(<*i int*>, *s string*)".
- for i, p := range params {
- if i > 0 {
- snip.WriteText(", ")
- }
- snip.WritePlaceholder(func(b *snippet.Builder) {
- b.WriteText(p)
- })
- }
- } else {
- // A plain snippet turns "someFun<>" into "someFunc(<>)".
- if len(params) > 0 {
- snip.WritePlaceholder(nil)
- }
- }
-
- snip.WriteText(")")
-}
diff --git a/internal/lsp/source/completion/statements.go b/internal/lsp/source/completion/statements.go
deleted file mode 100644
index 3280bb52c..000000000
--- a/internal/lsp/source/completion/statements.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/snippet"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// addStatementCandidates adds full statement completion candidates
-// appropriate for the current context.
-func (c *completer) addStatementCandidates() {
- c.addErrCheck()
- c.addAssignAppend()
-}
-
-// addAssignAppend offers a completion candidate of the form:
-//
-// someSlice = append(someSlice, )
-//
-// It will offer the "append" completion in two situations:
-//
-// 1. Position is in RHS of assign, prefix matches "append", and
-// corresponding LHS object is a slice. For example,
-// "foo = ap<>" completes to "foo = append(foo, )".
-//
-// Or
-//
-// 2. Prefix is an ident or selector in an *ast.ExprStmt (i.e.
-// beginning of statement), and our best matching candidate is a
-// slice. For example: "foo.ba" completes to "foo.bar = append(foo.bar, )".
-func (c *completer) addAssignAppend() {
- if len(c.path) < 3 {
- return
- }
-
- ident, _ := c.path[0].(*ast.Ident)
- if ident == nil {
- return
- }
-
- var (
- // sliceText is the full name of our slice object, e.g. "s.abc" in
- // "s.abc = app<>".
- sliceText string
- // needsLHS is true if we need to prepend the LHS slice name and
- // "=" to our candidate.
- needsLHS = false
- fset = c.snapshot.FileSet()
- )
-
- switch n := c.path[1].(type) {
- case *ast.AssignStmt:
- // We are already in an assignment. Make sure our prefix matches "append".
- if c.matcher.Score("append") <= 0 {
- return
- }
-
- exprIdx := exprAtPos(c.pos, n.Rhs)
- if exprIdx == len(n.Rhs) || exprIdx > len(n.Lhs)-1 {
- return
- }
-
- lhsType := c.pkg.GetTypesInfo().TypeOf(n.Lhs[exprIdx])
- if lhsType == nil {
- return
- }
-
- // Make sure our corresponding LHS object is a slice.
- if _, isSlice := lhsType.Underlying().(*types.Slice); !isSlice {
- return
- }
-
- // The name or our slice is whatever's in the LHS expression.
- sliceText = source.FormatNode(fset, n.Lhs[exprIdx])
- case *ast.SelectorExpr:
- // Make sure we are a selector at the beginning of a statement.
- if _, parentIsExprtStmt := c.path[2].(*ast.ExprStmt); !parentIsExprtStmt {
- return
- }
-
- // So far we only know the first part of our slice name. For
- // example in "s.a<>" we only know our slice begins with "s."
- // since the user could still be typing.
- sliceText = source.FormatNode(fset, n.X) + "."
- needsLHS = true
- case *ast.ExprStmt:
- needsLHS = true
- default:
- return
- }
-
- var (
- label string
- snip snippet.Builder
- score = highScore
- )
-
- if needsLHS {
- // Offer the long form assign + append candidate if our best
- // candidate is a slice.
- bestItem := c.topCandidate()
- if bestItem == nil || bestItem.obj == nil || bestItem.obj.Type() == nil {
- return
- }
-
- if _, isSlice := bestItem.obj.Type().Underlying().(*types.Slice); !isSlice {
- return
- }
-
- // Don't rank the full form assign + append candidate above the
- // slice itself.
- score = bestItem.Score - 0.01
-
- // Fill in rest of sliceText now that we have the object name.
- sliceText += bestItem.Label
-
- // Fill in the candidate's LHS bits.
- label = fmt.Sprintf("%s = ", bestItem.Label)
- snip.WriteText(label)
- }
-
- snip.WriteText(fmt.Sprintf("append(%s, ", sliceText))
- snip.WritePlaceholder(nil)
- snip.WriteText(")")
-
- c.items = append(c.items, CompletionItem{
- Label: label + fmt.Sprintf("append(%s, )", sliceText),
- Kind: protocol.FunctionCompletion,
- Score: score,
- snippet: &snip,
- })
-}
-
-// topCandidate returns the strictly highest scoring candidate
-// collected so far. If the top two candidates have the same score,
-// nil is returned.
-func (c *completer) topCandidate() *CompletionItem {
- var bestItem, secondBestItem *CompletionItem
- for i := range c.items {
- if bestItem == nil || c.items[i].Score > bestItem.Score {
- bestItem = &c.items[i]
- } else if secondBestItem == nil || c.items[i].Score > secondBestItem.Score {
- secondBestItem = &c.items[i]
- }
- }
-
- // If secondBestItem has the same score, bestItem isn't
- // the strict best.
- if secondBestItem != nil && secondBestItem.Score == bestItem.Score {
- return nil
- }
-
- return bestItem
-}
-
-// addErrCheck offers a completion candidate of the form:
-//
-// if err != nil {
-// return nil, err
-// }
-//
-// In the case of test functions, it offers a completion candidate of the form:
-//
-// if err != nil {
-// t.Fatal(err)
-// }
-//
-// The position must be in a function that returns an error, and the
-// statement preceding the position must be an assignment where the
-// final LHS object is an error. addErrCheck will synthesize
-// zero values as necessary to make the return statement valid.
-func (c *completer) addErrCheck() {
- if len(c.path) < 2 || c.enclosingFunc == nil || !c.opts.placeholders {
- return
- }
-
- var (
- errorType = types.Universe.Lookup("error").Type()
- result = c.enclosingFunc.sig.Results()
- testVar = getTestVar(c.enclosingFunc, c.pkg)
- isTest = testVar != ""
- doesNotReturnErr = result.Len() == 0 || !types.Identical(result.At(result.Len()-1).Type(), errorType)
- )
- // Make sure our enclosing function is a Test func or returns an error.
- if !isTest && doesNotReturnErr {
- return
- }
-
- prevLine := prevStmt(c.pos, c.path)
- if prevLine == nil {
- return
- }
-
- // Make sure our preceding statement was as assignment.
- assign, _ := prevLine.(*ast.AssignStmt)
- if assign == nil || len(assign.Lhs) == 0 {
- return
- }
-
- lastAssignee := assign.Lhs[len(assign.Lhs)-1]
-
- // Make sure the final assignee is an error.
- if !types.Identical(c.pkg.GetTypesInfo().TypeOf(lastAssignee), errorType) {
- return
- }
-
- var (
- // errVar is e.g. "err" in "foo, err := bar()".
- errVar = source.FormatNode(c.snapshot.FileSet(), lastAssignee)
-
- // Whether we need to include the "if" keyword in our candidate.
- needsIf = true
- )
-
- // If the returned error from the previous statement is "_", it is not a real object.
- // If we don't have an error, and the function signature takes a testing.TB that is either ignored
- // or an "_", then we also can't call t.Fatal(err).
- if errVar == "_" {
- return
- }
-
- // Below we try to detect if the user has already started typing "if
- // err" so we can replace what they've typed with our complete
- // statement.
- switch n := c.path[0].(type) {
- case *ast.Ident:
- switch c.path[1].(type) {
- case *ast.ExprStmt:
- // This handles:
- //
- // f, err := os.Open("foo")
- // i<>
-
- // Make sure they are typing "if".
- if c.matcher.Score("if") <= 0 {
- return
- }
- case *ast.IfStmt:
- // This handles:
- //
- // f, err := os.Open("foo")
- // if er<>
-
- // Make sure they are typing the error's name.
- if c.matcher.Score(errVar) <= 0 {
- return
- }
-
- needsIf = false
- default:
- return
- }
- case *ast.IfStmt:
- // This handles:
- //
- // f, err := os.Open("foo")
- // if <>
-
- // Avoid false positives by ensuring the if's cond is a bad
- // expression. For example, don't offer the completion in cases
- // like "if <> somethingElse".
- if _, bad := n.Cond.(*ast.BadExpr); !bad {
- return
- }
-
- // If "if" is our direct prefix, we need to include it in our
- // candidate since the existing "if" will be overwritten.
- needsIf = c.pos == n.Pos()+token.Pos(len("if"))
- }
-
- // Build up a snippet that looks like:
- //
- // if err != nil {
- // return <zero value>, ..., ${1:err}
- // }
- //
- // We make the error a placeholder so it is easy to alter the error.
- var snip snippet.Builder
- if needsIf {
- snip.WriteText("if ")
- }
- snip.WriteText(fmt.Sprintf("%s != nil {\n\t", errVar))
-
- var label string
- if isTest {
- snip.WriteText(fmt.Sprintf("%s.Fatal(%s)", testVar, errVar))
- label = fmt.Sprintf("%[1]s != nil { %[2]s.Fatal(%[1]s) }", errVar, testVar)
- } else {
- snip.WriteText("return ")
- for i := 0; i < result.Len()-1; i++ {
- snip.WriteText(formatZeroValue(result.At(i).Type(), c.qf))
- snip.WriteText(", ")
- }
- snip.WritePlaceholder(func(b *snippet.Builder) {
- b.WriteText(errVar)
- })
- label = fmt.Sprintf("%[1]s != nil { return %[1]s }", errVar)
- }
-
- snip.WriteText("\n}")
-
- if needsIf {
- label = "if " + label
- }
-
- c.items = append(c.items, CompletionItem{
- Label: label,
- // There doesn't seem to be a more appropriate kind.
- Kind: protocol.KeywordCompletion,
- Score: highScore,
- snippet: &snip,
- })
-}
-
-// getTestVar checks the function signature's input parameters and returns
-// the name of the first parameter that implements "testing.TB". For example,
-// func someFunc(t *testing.T) returns the string "t", func someFunc(b *testing.B)
-// returns "b" etc. An empty string indicates that the function signature
-// does not take a testing.TB parameter or does so but is ignored such
-// as func someFunc(*testing.T).
-func getTestVar(enclosingFunc *funcInfo, pkg source.Package) string {
- if enclosingFunc == nil || enclosingFunc.sig == nil {
- return ""
- }
-
- sig := enclosingFunc.sig
- for i := 0; i < sig.Params().Len(); i++ {
- param := sig.Params().At(i)
- if param.Name() == "_" {
- continue
- }
- testingPkg, err := pkg.GetImport("testing")
- if err != nil {
- continue
- }
- tbObj := testingPkg.GetTypes().Scope().Lookup("TB")
- if tbObj == nil {
- continue
- }
- iface, ok := tbObj.Type().Underlying().(*types.Interface)
- if !ok {
- continue
- }
- if !types.Implements(param.Type(), iface) {
- continue
- }
- return param.Name()
- }
-
- return ""
-}
diff --git a/internal/lsp/source/completion/util.go b/internal/lsp/source/completion/util.go
deleted file mode 100644
index 505c7e256..000000000
--- a/internal/lsp/source/completion/util.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package completion
-
-import (
- "go/ast"
- "go/token"
- "go/types"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// exprAtPos returns the index of the expression containing pos.
-func exprAtPos(pos token.Pos, args []ast.Expr) int {
- for i, expr := range args {
- if expr.Pos() <= pos && pos <= expr.End() {
- return i
- }
- }
- return len(args)
-}
-
-// eachField invokes fn for each field that can be selected from a
-// value of type T.
-func eachField(T types.Type, fn func(*types.Var)) {
- // TODO(adonovan): this algorithm doesn't exclude ambiguous
- // selections that match more than one field/method.
- // types.NewSelectionSet should do that for us.
-
- // for termination on recursive types
- var seen map[*types.Struct]bool
-
- var visit func(T types.Type)
- visit = func(T types.Type) {
- if T, ok := source.Deref(T).Underlying().(*types.Struct); ok {
- if seen[T] {
- return
- }
-
- for i := 0; i < T.NumFields(); i++ {
- f := T.Field(i)
- fn(f)
- if f.Anonymous() {
- if seen == nil {
- // Lazily create "seen" since it is only needed for
- // embedded structs.
- seen = make(map[*types.Struct]bool)
- }
- seen[T] = true
- visit(f.Type())
- }
- }
- }
- }
- visit(T)
-}
-
-// typeIsValid reports whether typ doesn't contain any Invalid types.
-func typeIsValid(typ types.Type) bool {
- // Check named types separately, because we don't want
- // to call Underlying() on them to avoid problems with recursive types.
- if _, ok := typ.(*types.Named); ok {
- return true
- }
-
- switch typ := typ.Underlying().(type) {
- case *types.Basic:
- return typ.Kind() != types.Invalid
- case *types.Array:
- return typeIsValid(typ.Elem())
- case *types.Slice:
- return typeIsValid(typ.Elem())
- case *types.Pointer:
- return typeIsValid(typ.Elem())
- case *types.Map:
- return typeIsValid(typ.Key()) && typeIsValid(typ.Elem())
- case *types.Chan:
- return typeIsValid(typ.Elem())
- case *types.Signature:
- return typeIsValid(typ.Params()) && typeIsValid(typ.Results())
- case *types.Tuple:
- for i := 0; i < typ.Len(); i++ {
- if !typeIsValid(typ.At(i).Type()) {
- return false
- }
- }
- return true
- case *types.Struct, *types.Interface:
- // Don't bother checking structs, interfaces for validity.
- return true
- default:
- return false
- }
-}
-
-// resolveInvalid traverses the node of the AST that defines the scope
-// containing the declaration of obj, and attempts to find a user-friendly
-// name for its invalid type. The resulting Object and its Type are fake.
-func resolveInvalid(fset *token.FileSet, obj types.Object, node ast.Node, info *types.Info) types.Object {
- var resultExpr ast.Expr
- ast.Inspect(node, func(node ast.Node) bool {
- switch n := node.(type) {
- case *ast.ValueSpec:
- for _, name := range n.Names {
- if info.Defs[name] == obj {
- resultExpr = n.Type
- }
- }
- return false
- case *ast.Field: // This case handles parameters and results of a FuncDecl or FuncLit.
- for _, name := range n.Names {
- if info.Defs[name] == obj {
- resultExpr = n.Type
- }
- }
- return false
- default:
- return true
- }
- })
- // Construct a fake type for the object and return a fake object with this type.
- typename := source.FormatNode(fset, resultExpr)
- typ := types.NewNamed(types.NewTypeName(token.NoPos, obj.Pkg(), typename, nil), types.Typ[types.Invalid], nil)
- return types.NewVar(obj.Pos(), obj.Pkg(), obj.Name(), typ)
-}
-
-func isPointer(T types.Type) bool {
- _, ok := T.(*types.Pointer)
- return ok
-}
-
-func isVar(obj types.Object) bool {
- _, ok := obj.(*types.Var)
- return ok
-}
-
-func isTypeName(obj types.Object) bool {
- _, ok := obj.(*types.TypeName)
- return ok
-}
-
-func isFunc(obj types.Object) bool {
- _, ok := obj.(*types.Func)
- return ok
-}
-
-func isEmptyInterface(T types.Type) bool {
- intf, _ := T.(*types.Interface)
- return intf != nil && intf.NumMethods() == 0
-}
-
-func isUntyped(T types.Type) bool {
- if basic, ok := T.(*types.Basic); ok {
- return basic.Info()&types.IsUntyped > 0
- }
- return false
-}
-
-func isPkgName(obj types.Object) bool {
- _, ok := obj.(*types.PkgName)
- return ok
-}
-
-func isASTFile(n ast.Node) bool {
- _, ok := n.(*ast.File)
- return ok
-}
-
-func deslice(T types.Type) types.Type {
- if slice, ok := T.Underlying().(*types.Slice); ok {
- return slice.Elem()
- }
- return nil
-}
-
-// isSelector returns the enclosing *ast.SelectorExpr when pos is in the
-// selector.
-func enclosingSelector(path []ast.Node, pos token.Pos) *ast.SelectorExpr {
- if len(path) == 0 {
- return nil
- }
-
- if sel, ok := path[0].(*ast.SelectorExpr); ok {
- return sel
- }
-
- if _, ok := path[0].(*ast.Ident); ok && len(path) > 1 {
- if sel, ok := path[1].(*ast.SelectorExpr); ok && pos >= sel.Sel.Pos() {
- return sel
- }
- }
-
- return nil
-}
-
-// enclosingDeclLHS returns LHS idents from containing value spec or
-// assign statement.
-func enclosingDeclLHS(path []ast.Node) []*ast.Ident {
- for _, n := range path {
- switch n := n.(type) {
- case *ast.ValueSpec:
- return n.Names
- case *ast.AssignStmt:
- ids := make([]*ast.Ident, 0, len(n.Lhs))
- for _, e := range n.Lhs {
- if id, ok := e.(*ast.Ident); ok {
- ids = append(ids, id)
- }
- }
- return ids
- }
- }
-
- return nil
-}
-
-// exprObj returns the types.Object associated with the *ast.Ident or
-// *ast.SelectorExpr e.
-func exprObj(info *types.Info, e ast.Expr) types.Object {
- var ident *ast.Ident
- switch expr := e.(type) {
- case *ast.Ident:
- ident = expr
- case *ast.SelectorExpr:
- ident = expr.Sel
- default:
- return nil
- }
-
- return info.ObjectOf(ident)
-}
-
-// typeConversion returns the type being converted to if call is a type
-// conversion expression.
-func typeConversion(call *ast.CallExpr, info *types.Info) types.Type {
- // Type conversion (e.g. "float64(foo)").
- if fun, _ := exprObj(info, call.Fun).(*types.TypeName); fun != nil {
- return fun.Type()
- }
-
- return nil
-}
-
-// fieldsAccessible returns whether s has at least one field accessible by p.
-func fieldsAccessible(s *types.Struct, p *types.Package) bool {
- for i := 0; i < s.NumFields(); i++ {
- f := s.Field(i)
- if f.Exported() || f.Pkg() == p {
- return true
- }
- }
- return false
-}
-
-// prevStmt returns the statement that precedes the statement containing pos.
-// For example:
-//
-// foo := 1
-// bar(1 + 2<>)
-//
-// If "<>" is pos, prevStmt returns "foo := 1"
-func prevStmt(pos token.Pos, path []ast.Node) ast.Stmt {
- var blockLines []ast.Stmt
- for i := 0; i < len(path) && blockLines == nil; i++ {
- switch n := path[i].(type) {
- case *ast.BlockStmt:
- blockLines = n.List
- case *ast.CommClause:
- blockLines = n.Body
- case *ast.CaseClause:
- blockLines = n.Body
- }
- }
-
- for i := len(blockLines) - 1; i >= 0; i-- {
- if blockLines[i].End() < pos {
- return blockLines[i]
- }
- }
-
- return nil
-}
-
-// formatZeroValue produces Go code representing the zero value of T. It
-// returns the empty string if T is invalid.
-func formatZeroValue(T types.Type, qf types.Qualifier) string {
- switch u := T.Underlying().(type) {
- case *types.Basic:
- switch {
- case u.Info()&types.IsNumeric > 0:
- return "0"
- case u.Info()&types.IsString > 0:
- return `""`
- case u.Info()&types.IsBoolean > 0:
- return "false"
- default:
- return ""
- }
- case *types.Pointer, *types.Interface, *types.Chan, *types.Map, *types.Slice, *types.Signature:
- return "nil"
- default:
- return types.TypeString(T, qf) + "{}"
- }
-}
-
-// isBasicKind returns whether t is a basic type of kind k.
-func isBasicKind(t types.Type, k types.BasicInfo) bool {
- b, _ := t.Underlying().(*types.Basic)
- return b != nil && b.Info()&k > 0
-}
-
-func (c *completer) editText(from, to token.Pos, newText string) ([]protocol.TextEdit, error) {
- rng := source.NewMappedRange(c.snapshot.FileSet(), c.mapper, from, to)
- spn, err := rng.Span()
- if err != nil {
- return nil, err
- }
- return source.ToProtocolEdits(c.mapper, []diff.TextEdit{{
- Span: spn,
- NewText: newText,
- }})
-}
diff --git a/internal/lsp/source/diagnostics.go b/internal/lsp/source/diagnostics.go
deleted file mode 100644
index e393c2f94..000000000
--- a/internal/lsp/source/diagnostics.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
-)
-
-type SuggestedFix struct {
- Title string
- Edits map[span.URI][]protocol.TextEdit
- Command *protocol.Command
- ActionKind protocol.CodeActionKind
-}
-
-type RelatedInformation struct {
- URI span.URI
- Range protocol.Range
- Message string
-}
-
-func Analyze(ctx context.Context, snapshot Snapshot, pkg Package, includeConvenience bool) (map[span.URI][]*Diagnostic, error) {
- // Exit early if the context has been canceled. This also protects us
- // from a race on Options, see golang/go#36699.
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
-
- categories := []map[string]*Analyzer{}
- if includeConvenience {
- categories = append(categories, snapshot.View().Options().ConvenienceAnalyzers)
- }
- // If we had type errors, don't run any other analyzers.
- if !pkg.HasTypeErrors() {
- categories = append(categories, snapshot.View().Options().DefaultAnalyzers, snapshot.View().Options().StaticcheckAnalyzers)
- }
- var analyzers []*Analyzer
- for _, cat := range categories {
- for _, a := range cat {
- analyzers = append(analyzers, a)
- }
- }
-
- analysisDiagnostics, err := snapshot.Analyze(ctx, pkg.ID(), analyzers)
- if err != nil {
- return nil, err
- }
-
- reports := map[span.URI][]*Diagnostic{}
- // Report diagnostics and errors from root analyzers.
- for _, diag := range analysisDiagnostics {
- reports[diag.URI] = append(reports[diag.URI], diag)
- }
- return reports, nil
-}
-
-func FileDiagnostics(ctx context.Context, snapshot Snapshot, uri span.URI) (VersionedFileIdentity, []*Diagnostic, error) {
- fh, err := snapshot.GetVersionedFile(ctx, uri)
- if err != nil {
- return VersionedFileIdentity{}, nil, err
- }
- pkg, _, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return VersionedFileIdentity{}, nil, err
- }
- diagnostics, err := snapshot.DiagnosePackage(ctx, pkg)
- if err != nil {
- return VersionedFileIdentity{}, nil, err
- }
- fileDiags := diagnostics[fh.URI()]
- if !pkg.HasListOrParseErrors() {
- analysisDiags, err := Analyze(ctx, snapshot, pkg, false)
- if err != nil {
- return VersionedFileIdentity{}, nil, err
- }
- fileDiags = append(fileDiags, analysisDiags[fh.URI()]...)
- }
- return fh.VersionedFileIdentity(), fileDiags, nil
-}
diff --git a/internal/lsp/source/extract.go b/internal/lsp/source/extract.go
deleted file mode 100644
index 43b414add..000000000
--- a/internal/lsp/source/extract.go
+++ /dev/null
@@ -1,1307 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "go/types"
- "strings"
- "unicode"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/analysisinternal"
- "golang.org/x/tools/internal/span"
-)
-
-func extractVariable(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, _ *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
- expr, path, ok, err := CanExtractVariable(rng, file)
- if !ok {
- return nil, fmt.Errorf("extractVariable: cannot extract %s: %v", fset.Position(rng.Start), err)
- }
-
- // Create new AST node for extracted code.
- var lhsNames []string
- switch expr := expr.(type) {
- // TODO: stricter rules for selectorExpr.
- case *ast.BasicLit, *ast.CompositeLit, *ast.IndexExpr, *ast.SliceExpr,
- *ast.UnaryExpr, *ast.BinaryExpr, *ast.SelectorExpr:
- lhsName, _ := generateAvailableIdentifier(expr.Pos(), file, path, info, "x", 0)
- lhsNames = append(lhsNames, lhsName)
- case *ast.CallExpr:
- tup, ok := info.TypeOf(expr).(*types.Tuple)
- if !ok {
- // If the call expression only has one return value, we can treat it the
- // same as our standard extract variable case.
- lhsName, _ := generateAvailableIdentifier(expr.Pos(), file, path, info, "x", 0)
- lhsNames = append(lhsNames, lhsName)
- break
- }
- idx := 0
- for i := 0; i < tup.Len(); i++ {
- // Generate a unique variable for each return value.
- var lhsName string
- lhsName, idx = generateAvailableIdentifier(expr.Pos(), file, path, info, "x", idx)
- lhsNames = append(lhsNames, lhsName)
- }
- default:
- return nil, fmt.Errorf("cannot extract %T", expr)
- }
-
- insertBeforeStmt := analysisinternal.StmtToInsertVarBefore(path)
- if insertBeforeStmt == nil {
- return nil, fmt.Errorf("cannot find location to insert extraction")
- }
- tok := fset.File(expr.Pos())
- if tok == nil {
- return nil, fmt.Errorf("no file for pos %v", fset.Position(file.Pos()))
- }
- indent, err := calculateIndentation(src, tok, insertBeforeStmt)
- if err != nil {
- return nil, err
- }
- newLineIndent := "\n" + indent
-
- lhs := strings.Join(lhsNames, ", ")
- assignStmt := &ast.AssignStmt{
- Lhs: []ast.Expr{ast.NewIdent(lhs)},
- Tok: token.DEFINE,
- Rhs: []ast.Expr{expr},
- }
- var buf bytes.Buffer
- if err := format.Node(&buf, fset, assignStmt); err != nil {
- return nil, err
- }
- assignment := strings.ReplaceAll(buf.String(), "\n", newLineIndent) + newLineIndent
-
- return &analysis.SuggestedFix{
- TextEdits: []analysis.TextEdit{
- {
- Pos: insertBeforeStmt.Pos(),
- End: insertBeforeStmt.Pos(),
- NewText: []byte(assignment),
- },
- {
- Pos: rng.Start,
- End: rng.End,
- NewText: []byte(lhs),
- },
- },
- }, nil
-}
-
-// CanExtractVariable reports whether the code in the given range can be
-// extracted to a variable.
-func CanExtractVariable(rng span.Range, file *ast.File) (ast.Expr, []ast.Node, bool, error) {
- if rng.Start == rng.End {
- return nil, nil, false, fmt.Errorf("start and end are equal")
- }
- path, _ := astutil.PathEnclosingInterval(file, rng.Start, rng.End)
- if len(path) == 0 {
- return nil, nil, false, fmt.Errorf("no path enclosing interval")
- }
- for _, n := range path {
- if _, ok := n.(*ast.ImportSpec); ok {
- return nil, nil, false, fmt.Errorf("cannot extract variable in an import block")
- }
- }
- node := path[0]
- if rng.Start != node.Pos() || rng.End != node.End() {
- return nil, nil, false, fmt.Errorf("range does not map to an AST node")
- }
- expr, ok := node.(ast.Expr)
- if !ok {
- return nil, nil, false, fmt.Errorf("node is not an expression")
- }
- switch expr.(type) {
- case *ast.BasicLit, *ast.CompositeLit, *ast.IndexExpr, *ast.CallExpr,
- *ast.SliceExpr, *ast.UnaryExpr, *ast.BinaryExpr, *ast.SelectorExpr:
- return expr, path, true, nil
- }
- return nil, nil, false, fmt.Errorf("cannot extract an %T to a variable", expr)
-}
-
-// Calculate indentation for insertion.
-// When inserting lines of code, we must ensure that the lines have consistent
-// formatting (i.e. the proper indentation). To do so, we observe the indentation on the
-// line of code on which the insertion occurs.
-func calculateIndentation(content []byte, tok *token.File, insertBeforeStmt ast.Node) (string, error) {
- line := tok.Line(insertBeforeStmt.Pos())
- lineOffset, err := Offset(tok, tok.LineStart(line))
- if err != nil {
- return "", err
- }
- stmtOffset, err := Offset(tok, insertBeforeStmt.Pos())
- if err != nil {
- return "", err
- }
- return string(content[lineOffset:stmtOffset]), nil
-}
-
-// generateAvailableIdentifier adjusts the new function name until there are no collisons in scope.
-// Possible collisions include other function and variable names. Returns the next index to check for prefix.
-func generateAvailableIdentifier(pos token.Pos, file *ast.File, path []ast.Node, info *types.Info, prefix string, idx int) (string, int) {
- scopes := CollectScopes(info, path, pos)
- return generateIdentifier(idx, prefix, func(name string) bool {
- return file.Scope.Lookup(name) != nil || !isValidName(name, scopes)
- })
-}
-
-func generateIdentifier(idx int, prefix string, hasCollision func(string) bool) (string, int) {
- name := prefix
- if idx != 0 {
- name += fmt.Sprintf("%d", idx)
- }
- for hasCollision(name) {
- idx++
- name = fmt.Sprintf("%v%d", prefix, idx)
- }
- return name, idx + 1
-}
-
-// isValidName checks for variable collision in scope.
-func isValidName(name string, scopes []*types.Scope) bool {
- for _, scope := range scopes {
- if scope == nil {
- continue
- }
- if scope.Lookup(name) != nil {
- return false
- }
- }
- return true
-}
-
-// returnVariable keeps track of the information we need to properly introduce a new variable
-// that we will return in the extracted function.
-type returnVariable struct {
- // name is the identifier that is used on the left-hand side of the call to
- // the extracted function.
- name ast.Expr
- // decl is the declaration of the variable. It is used in the type signature of the
- // extracted function and for variable declarations.
- decl *ast.Field
- // zeroVal is the "zero value" of the type of the variable. It is used in a return
- // statement in the extracted function.
- zeroVal ast.Expr
-}
-
-// extractMethod refactors the selected block of code into a new method.
-func extractMethod(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
- return extractFunctionMethod(fset, rng, src, file, pkg, info, true)
-}
-
-// extractFunction refactors the selected block of code into a new function.
-func extractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
- return extractFunctionMethod(fset, rng, src, file, pkg, info, false)
-}
-
-// extractFunctionMethod refactors the selected block of code into a new function/method.
-// It also replaces the selected block of code with a call to the extracted
-// function. First, we manually adjust the selection range. We remove trailing
-// and leading whitespace characters to ensure the range is precisely bounded
-// by AST nodes. Next, we determine the variables that will be the parameters
-// and return values of the extracted function/method. Lastly, we construct the call
-// of the function/method and insert this call as well as the extracted function/method into
-// their proper locations.
-func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info, isMethod bool) (*analysis.SuggestedFix, error) {
- errorPrefix := "extractFunction"
- if isMethod {
- errorPrefix = "extractMethod"
- }
- p, ok, methodOk, err := CanExtractFunction(fset, rng, src, file)
- if (!ok && !isMethod) || (!methodOk && isMethod) {
- return nil, fmt.Errorf("%s: cannot extract %s: %v", errorPrefix,
- fset.Position(rng.Start), err)
- }
- tok, path, rng, outer, start := p.tok, p.path, p.rng, p.outer, p.start
- fileScope := info.Scopes[file]
- if fileScope == nil {
- return nil, fmt.Errorf("%s: file scope is empty", errorPrefix)
- }
- pkgScope := fileScope.Parent()
- if pkgScope == nil {
- return nil, fmt.Errorf("%s: package scope is empty", errorPrefix)
- }
-
- // A return statement is non-nested if its parent node is equal to the parent node
- // of the first node in the selection. These cases must be handled separately because
- // non-nested return statements are guaranteed to execute.
- var retStmts []*ast.ReturnStmt
- var hasNonNestedReturn bool
- startParent := findParent(outer, start)
- ast.Inspect(outer, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- if n.Pos() < rng.Start || n.End() > rng.End {
- return n.Pos() <= rng.End
- }
- ret, ok := n.(*ast.ReturnStmt)
- if !ok {
- return true
- }
- if findParent(outer, n) == startParent {
- hasNonNestedReturn = true
- }
- retStmts = append(retStmts, ret)
- return false
- })
- containsReturnStatement := len(retStmts) > 0
-
- // Now that we have determined the correct range for the selection block,
- // we must determine the signature of the extracted function. We will then replace
- // the block with an assignment statement that calls the extracted function with
- // the appropriate parameters and return values.
- variables, err := collectFreeVars(info, file, fileScope, pkgScope, rng, path[0])
- if err != nil {
- return nil, err
- }
-
- var (
- receiverUsed bool
- receiver *ast.Field
- receiverName string
- receiverObj types.Object
- )
- if isMethod {
- if outer == nil || outer.Recv == nil || len(outer.Recv.List) == 0 {
- return nil, fmt.Errorf("%s: cannot extract need method receiver", errorPrefix)
- }
- receiver = outer.Recv.List[0]
- if len(receiver.Names) == 0 || receiver.Names[0] == nil {
- return nil, fmt.Errorf("%s: cannot extract need method receiver name", errorPrefix)
- }
- recvName := receiver.Names[0]
- receiverName = recvName.Name
- receiverObj = info.ObjectOf(recvName)
- }
-
- var (
- params, returns []ast.Expr // used when calling the extracted function
- paramTypes, returnTypes []*ast.Field // used in the signature of the extracted function
- uninitialized []types.Object // vars we will need to initialize before the call
- )
-
- // Avoid duplicates while traversing vars and uninitialzed.
- seenVars := make(map[types.Object]ast.Expr)
- seenUninitialized := make(map[types.Object]struct{})
-
- // Some variables on the left-hand side of our assignment statement may be free. If our
- // selection begins in the same scope in which the free variable is defined, we can
- // redefine it in our assignment statement. See the following example, where 'b' and
- // 'err' (both free variables) can be redefined in the second funcCall() while maintaining
- // correctness.
- //
- //
- // Not Redefined:
- //
- // a, err := funcCall()
- // var b int
- // b, err = funcCall()
- //
- // Redefined:
- //
- // a, err := funcCall()
- // b, err := funcCall()
- //
- // We track the number of free variables that can be redefined to maintain our preference
- // of using "x, y, z := fn()" style assignment statements.
- var canRedefineCount int
-
- // Each identifier in the selected block must become (1) a parameter to the
- // extracted function, (2) a return value of the extracted function, or (3) a local
- // variable in the extracted function. Determine the outcome(s) for each variable
- // based on whether it is free, altered within the selected block, and used outside
- // of the selected block.
- for _, v := range variables {
- if _, ok := seenVars[v.obj]; ok {
- continue
- }
- if v.obj.Name() == "_" {
- // The blank identifier is always a local variable
- continue
- }
- typ := analysisinternal.TypeExpr(fset, file, pkg, v.obj.Type())
- if typ == nil {
- return nil, fmt.Errorf("nil AST expression for type: %v", v.obj.Name())
- }
- seenVars[v.obj] = typ
- identifier := ast.NewIdent(v.obj.Name())
- // An identifier must meet three conditions to become a return value of the
- // extracted function. (1) its value must be defined or reassigned within
- // the selection (isAssigned), (2) it must be used at least once after the
- // selection (isUsed), and (3) its first use after the selection
- // cannot be its own reassignment or redefinition (objOverriden).
- if v.obj.Parent() == nil {
- return nil, fmt.Errorf("parent nil")
- }
- isUsed, firstUseAfter := objUsed(info, span.NewRange(fset, rng.End, v.obj.Parent().End()), v.obj)
- if v.assigned && isUsed && !varOverridden(info, firstUseAfter, v.obj, v.free, outer) {
- returnTypes = append(returnTypes, &ast.Field{Type: typ})
- returns = append(returns, identifier)
- if !v.free {
- uninitialized = append(uninitialized, v.obj)
- } else if v.obj.Parent().Pos() == startParent.Pos() {
- canRedefineCount++
- }
- }
- // An identifier must meet two conditions to become a parameter of the
- // extracted function. (1) it must be free (isFree), and (2) its first
- // use within the selection cannot be its own definition (isDefined).
- if v.free && !v.defined {
- // Skip the selector for a method.
- if isMethod && v.obj == receiverObj {
- receiverUsed = true
- continue
- }
- params = append(params, identifier)
- paramTypes = append(paramTypes, &ast.Field{
- Names: []*ast.Ident{identifier},
- Type: typ,
- })
- }
- }
-
- // Find the function literal that encloses the selection. The enclosing function literal
- // may not be the enclosing function declaration (i.e. 'outer'). For example, in the
- // following block:
- //
- // func main() {
- // ast.Inspect(node, func(n ast.Node) bool {
- // v := 1 // this line extracted
- // return true
- // })
- // }
- //
- // 'outer' is main(). However, the extracted selection most directly belongs to
- // the anonymous function literal, the second argument of ast.Inspect(). We use the
- // enclosing function literal to determine the proper return types for return statements
- // within the selection. We still need the enclosing function declaration because this is
- // the top-level declaration. We inspect the top-level declaration to look for variables
- // as well as for code replacement.
- enclosing := outer.Type
- for _, p := range path {
- if p == enclosing {
- break
- }
- if fl, ok := p.(*ast.FuncLit); ok {
- enclosing = fl.Type
- break
- }
- }
-
- // We put the selection in a constructed file. We can then traverse and edit
- // the extracted selection without modifying the original AST.
- startOffset, err := Offset(tok, rng.Start)
- if err != nil {
- return nil, err
- }
- endOffset, err := Offset(tok, rng.End)
- if err != nil {
- return nil, err
- }
- selection := src[startOffset:endOffset]
- extractedBlock, err := parseBlockStmt(fset, selection)
- if err != nil {
- return nil, err
- }
-
- // We need to account for return statements in the selected block, as they will complicate
- // the logical flow of the extracted function. See the following example, where ** denotes
- // the range to be extracted.
- //
- // Before:
- //
- // func _() int {
- // a := 1
- // b := 2
- // **if a == b {
- // return a
- // }**
- // ...
- // }
- //
- // After:
- //
- // func _() int {
- // a := 1
- // b := 2
- // cond0, ret0 := x0(a, b)
- // if cond0 {
- // return ret0
- // }
- // ...
- // }
- //
- // func x0(a int, b int) (bool, int) {
- // if a == b {
- // return true, a
- // }
- // return false, 0
- // }
- //
- // We handle returns by adding an additional boolean return value to the extracted function.
- // This bool reports whether the original function would have returned. Because the
- // extracted selection contains a return statement, we must also add the types in the
- // return signature of the enclosing function to the return signature of the
- // extracted function. We then add an extra if statement checking this boolean value
- // in the original function. If the condition is met, the original function should
- // return a value, mimicking the functionality of the original return statement(s)
- // in the selection.
- //
- // If there is a return that is guaranteed to execute (hasNonNestedReturns=true), then
- // we don't need to include this additional condition check and can simply return.
- //
- // Before:
- //
- // func _() int {
- // a := 1
- // b := 2
- // **if a == b {
- // return a
- // }
- // return b**
- // }
- //
- // After:
- //
- // func _() int {
- // a := 1
- // b := 2
- // return x0(a, b)
- // }
- //
- // func x0(a int, b int) int {
- // if a == b {
- // return a
- // }
- // return b
- // }
-
- var retVars []*returnVariable
- var ifReturn *ast.IfStmt
- if containsReturnStatement {
- if !hasNonNestedReturn {
- // The selected block contained return statements, so we have to modify the
- // signature of the extracted function as described above. Adjust all of
- // the return statements in the extracted function to reflect this change in
- // signature.
- if err := adjustReturnStatements(returnTypes, seenVars, fset, file,
- pkg, extractedBlock); err != nil {
- return nil, err
- }
- }
- // Collect the additional return values and types needed to accommodate return
- // statements in the selection. Update the type signature of the extracted
- // function and construct the if statement that will be inserted in the enclosing
- // function.
- retVars, ifReturn, err = generateReturnInfo(enclosing, pkg, path, file, info, fset, rng.Start, hasNonNestedReturn)
- if err != nil {
- return nil, err
- }
- }
-
- // Add a return statement to the end of the new function. This return statement must include
- // the values for the types of the original extracted function signature and (if a return
- // statement is present in the selection) enclosing function signature.
- // This only needs to be done if the selections does not have a non-nested return, otherwise
- // it already terminates with a return statement.
- hasReturnValues := len(returns)+len(retVars) > 0
- if hasReturnValues && !hasNonNestedReturn {
- extractedBlock.List = append(extractedBlock.List, &ast.ReturnStmt{
- Results: append(returns, getZeroVals(retVars)...),
- })
- }
-
- // Construct the appropriate call to the extracted function.
- // We must meet two conditions to use ":=" instead of '='. (1) there must be at least
- // one variable on the lhs that is uninitailized (non-free) prior to the assignment.
- // (2) all of the initialized (free) variables on the lhs must be able to be redefined.
- sym := token.ASSIGN
- canDefineCount := len(uninitialized) + canRedefineCount
- canDefine := len(uninitialized)+len(retVars) > 0 && canDefineCount == len(returns)
- if canDefine {
- sym = token.DEFINE
- }
- var name, funName string
- if isMethod {
- name = "newMethod"
- // TODO(suzmue): generate a name that does not conflict for "newMethod".
- funName = name
- } else {
- name = "newFunction"
- funName, _ = generateAvailableIdentifier(rng.Start, file, path, info, name, 0)
- }
- extractedFunCall := generateFuncCall(hasNonNestedReturn, hasReturnValues, params,
- append(returns, getNames(retVars)...), funName, sym, receiverName)
-
- // Build the extracted function.
- newFunc := &ast.FuncDecl{
- Name: ast.NewIdent(funName),
- Type: &ast.FuncType{
- Params: &ast.FieldList{List: paramTypes},
- Results: &ast.FieldList{List: append(returnTypes, getDecls(retVars)...)},
- },
- Body: extractedBlock,
- }
- if isMethod {
- var names []*ast.Ident
- if receiverUsed {
- names = append(names, ast.NewIdent(receiverName))
- }
- newFunc.Recv = &ast.FieldList{
- List: []*ast.Field{{
- Names: names,
- Type: receiver.Type,
- }},
- }
- }
-
- // Create variable declarations for any identifiers that need to be initialized prior to
- // calling the extracted function. We do not manually initialize variables if every return
- // value is unitialized. We can use := to initialize the variables in this situation.
- var declarations []ast.Stmt
- if canDefineCount != len(returns) {
- declarations = initializeVars(uninitialized, retVars, seenUninitialized, seenVars)
- }
-
- var declBuf, replaceBuf, newFuncBuf, ifBuf, commentBuf bytes.Buffer
- if err := format.Node(&declBuf, fset, declarations); err != nil {
- return nil, err
- }
- if err := format.Node(&replaceBuf, fset, extractedFunCall); err != nil {
- return nil, err
- }
- if ifReturn != nil {
- if err := format.Node(&ifBuf, fset, ifReturn); err != nil {
- return nil, err
- }
- }
- if err := format.Node(&newFuncBuf, fset, newFunc); err != nil {
- return nil, err
- }
- // Find all the comments within the range and print them to be put somewhere.
- // TODO(suzmue): print these in the extracted function at the correct place.
- for _, cg := range file.Comments {
- if cg.Pos().IsValid() && cg.Pos() < rng.End && cg.Pos() >= rng.Start {
- for _, c := range cg.List {
- fmt.Fprintln(&commentBuf, c.Text)
- }
- }
- }
-
- // We're going to replace the whole enclosing function,
- // so preserve the text before and after the selected block.
- outerStart, err := Offset(tok, outer.Pos())
- if err != nil {
- return nil, err
- }
- outerEnd, err := Offset(tok, outer.End())
- if err != nil {
- return nil, err
- }
- before := src[outerStart:startOffset]
- after := src[endOffset:outerEnd]
- indent, err := calculateIndentation(src, tok, start)
- if err != nil {
- return nil, err
- }
- newLineIndent := "\n" + indent
-
- var fullReplacement strings.Builder
- fullReplacement.Write(before)
- if commentBuf.Len() > 0 {
- comments := strings.ReplaceAll(commentBuf.String(), "\n", newLineIndent)
- fullReplacement.WriteString(comments)
- }
- if declBuf.Len() > 0 { // add any initializations, if needed
- initializations := strings.ReplaceAll(declBuf.String(), "\n", newLineIndent) +
- newLineIndent
- fullReplacement.WriteString(initializations)
- }
- fullReplacement.Write(replaceBuf.Bytes()) // call the extracted function
- if ifBuf.Len() > 0 { // add the if statement below the function call, if needed
- ifstatement := newLineIndent +
- strings.ReplaceAll(ifBuf.String(), "\n", newLineIndent)
- fullReplacement.WriteString(ifstatement)
- }
- fullReplacement.Write(after)
- fullReplacement.WriteString("\n\n") // add newlines after the enclosing function
- fullReplacement.Write(newFuncBuf.Bytes()) // insert the extracted function
-
- return &analysis.SuggestedFix{
- TextEdits: []analysis.TextEdit{{
- Pos: outer.Pos(),
- End: outer.End(),
- NewText: []byte(fullReplacement.String()),
- }},
- }, nil
-}
-
-// adjustRangeForWhitespace adjusts the given range to exclude unnecessary leading or
-// trailing whitespace characters from selection. In the following example, each line
-// of the if statement is indented once. There are also two extra spaces after the
-// closing bracket before the line break.
-//
-// \tif (true) {
-// \t _ = 1
-// \t} \n
-//
-// By default, a valid range begins at 'if' and ends at the first whitespace character
-// after the '}'. But, users are likely to highlight full lines rather than adjusting
-// their cursors for whitespace. To support this use case, we must manually adjust the
-// ranges to match the correct AST node. In this particular example, we would adjust
-// rng.Start forward by one byte, and rng.End backwards by two bytes.
-func adjustRangeForWhitespace(rng span.Range, tok *token.File, content []byte) (span.Range, error) {
- offset, err := Offset(tok, rng.Start)
- if err != nil {
- return span.Range{}, err
- }
- for offset < len(content) {
- if !unicode.IsSpace(rune(content[offset])) {
- break
- }
- // Move forwards one byte to find a non-whitespace character.
- offset += 1
- }
- rng.Start = tok.Pos(offset)
-
- // Move backwards to find a non-whitespace character.
- offset, err = Offset(tok, rng.End)
- if err != nil {
- return span.Range{}, err
- }
- for o := offset - 1; 0 <= o && o < len(content); o-- {
- if !unicode.IsSpace(rune(content[o])) {
- break
- }
- offset = o
- }
- rng.End = tok.Pos(offset)
- return rng, nil
-}
-
-// findParent finds the parent AST node of the given target node, if the target is a
-// descendant of the starting node.
-func findParent(start ast.Node, target ast.Node) ast.Node {
- var parent ast.Node
- analysisinternal.WalkASTWithParent(start, func(n, p ast.Node) bool {
- if n == target {
- parent = p
- return false
- }
- return true
- })
- return parent
-}
-
-// variable describes the status of a variable within a selection.
-type variable struct {
- obj types.Object
-
- // free reports whether the variable is a free variable, meaning it should
- // be a parameter to the extracted function.
- free bool
-
- // assigned reports whether the variable is assigned to in the selection.
- assigned bool
-
- // defined reports whether the variable is defined in the selection.
- defined bool
-}
-
-// collectFreeVars maps each identifier in the given range to whether it is "free."
-// Given a range, a variable in that range is defined as "free" if it is declared
-// outside of the range and neither at the file scope nor package scope. These free
-// variables will be used as arguments in the extracted function. It also returns a
-// list of identifiers that may need to be returned by the extracted function.
-// Some of the code in this function has been adapted from tools/cmd/guru/freevars.go.
-func collectFreeVars(info *types.Info, file *ast.File, fileScope, pkgScope *types.Scope, rng span.Range, node ast.Node) ([]*variable, error) {
- // id returns non-nil if n denotes an object that is referenced by the span
- // and defined either within the span or in the lexical environment. The bool
- // return value acts as an indicator for where it was defined.
- id := func(n *ast.Ident) (types.Object, bool) {
- obj := info.Uses[n]
- if obj == nil {
- return info.Defs[n], false
- }
- if obj.Name() == "_" {
- return nil, false // exclude objects denoting '_'
- }
- if _, ok := obj.(*types.PkgName); ok {
- return nil, false // imported package
- }
- if !(file.Pos() <= obj.Pos() && obj.Pos() <= file.End()) {
- return nil, false // not defined in this file
- }
- scope := obj.Parent()
- if scope == nil {
- return nil, false // e.g. interface method, struct field
- }
- if scope == fileScope || scope == pkgScope {
- return nil, false // defined at file or package scope
- }
- if rng.Start <= obj.Pos() && obj.Pos() <= rng.End {
- return obj, false // defined within selection => not free
- }
- return obj, true
- }
- // sel returns non-nil if n denotes a selection o.x.y that is referenced by the
- // span and defined either within the span or in the lexical environment. The bool
- // return value acts as an indicator for where it was defined.
- var sel func(n *ast.SelectorExpr) (types.Object, bool)
- sel = func(n *ast.SelectorExpr) (types.Object, bool) {
- switch x := astutil.Unparen(n.X).(type) {
- case *ast.SelectorExpr:
- return sel(x)
- case *ast.Ident:
- return id(x)
- }
- return nil, false
- }
- seen := make(map[types.Object]*variable)
- firstUseIn := make(map[types.Object]token.Pos)
- var vars []types.Object
- ast.Inspect(node, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- if rng.Start <= n.Pos() && n.End() <= rng.End {
- var obj types.Object
- var isFree, prune bool
- switch n := n.(type) {
- case *ast.Ident:
- obj, isFree = id(n)
- case *ast.SelectorExpr:
- obj, isFree = sel(n)
- prune = true
- }
- if obj != nil {
- seen[obj] = &variable{
- obj: obj,
- free: isFree,
- }
- vars = append(vars, obj)
- // Find the first time that the object is used in the selection.
- first, ok := firstUseIn[obj]
- if !ok || n.Pos() < first {
- firstUseIn[obj] = n.Pos()
- }
- if prune {
- return false
- }
- }
- }
- return n.Pos() <= rng.End
- })
-
- // Find identifiers that are initialized or whose values are altered at some
- // point in the selected block. For example, in a selected block from lines 2-4,
- // variables x, y, and z are included in assigned. However, in a selected block
- // from lines 3-4, only variables y and z are included in assigned.
- //
- // 1: var a int
- // 2: var x int
- // 3: y := 3
- // 4: z := x + a
- //
- ast.Inspect(node, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- if n.Pos() < rng.Start || n.End() > rng.End {
- return n.Pos() <= rng.End
- }
- switch n := n.(type) {
- case *ast.AssignStmt:
- for _, assignment := range n.Lhs {
- lhs, ok := assignment.(*ast.Ident)
- if !ok {
- continue
- }
- obj, _ := id(lhs)
- if obj == nil {
- continue
- }
- if _, ok := seen[obj]; !ok {
- continue
- }
- seen[obj].assigned = true
- if n.Tok != token.DEFINE {
- continue
- }
- // Find identifiers that are defined prior to being used
- // elsewhere in the selection.
- // TODO: Include identifiers that are assigned prior to being
- // used elsewhere in the selection. Then, change the assignment
- // to a definition in the extracted function.
- if firstUseIn[obj] != lhs.Pos() {
- continue
- }
- // Ensure that the object is not used in its own re-definition.
- // For example:
- // var f float64
- // f, e := math.Frexp(f)
- for _, expr := range n.Rhs {
- if referencesObj(info, expr, obj) {
- continue
- }
- if _, ok := seen[obj]; !ok {
- continue
- }
- seen[obj].defined = true
- break
- }
- }
- return false
- case *ast.DeclStmt:
- gen, ok := n.Decl.(*ast.GenDecl)
- if !ok {
- return false
- }
- for _, spec := range gen.Specs {
- vSpecs, ok := spec.(*ast.ValueSpec)
- if !ok {
- continue
- }
- for _, vSpec := range vSpecs.Names {
- obj, _ := id(vSpec)
- if obj == nil {
- continue
- }
- if _, ok := seen[obj]; !ok {
- continue
- }
- seen[obj].assigned = true
- }
- }
- return false
- case *ast.IncDecStmt:
- if ident, ok := n.X.(*ast.Ident); !ok {
- return false
- } else if obj, _ := id(ident); obj == nil {
- return false
- } else {
- if _, ok := seen[obj]; !ok {
- return false
- }
- seen[obj].assigned = true
- }
- }
- return true
- })
- var variables []*variable
- for _, obj := range vars {
- v, ok := seen[obj]
- if !ok {
- return nil, fmt.Errorf("no seen types.Object for %v", obj)
- }
- variables = append(variables, v)
- }
- return variables, nil
-}
-
-// referencesObj checks whether the given object appears in the given expression.
-func referencesObj(info *types.Info, expr ast.Expr, obj types.Object) bool {
- var hasObj bool
- ast.Inspect(expr, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- ident, ok := n.(*ast.Ident)
- if !ok {
- return true
- }
- objUse := info.Uses[ident]
- if obj == objUse {
- hasObj = true
- return false
- }
- return false
- })
- return hasObj
-}
-
-type fnExtractParams struct {
- tok *token.File
- path []ast.Node
- rng span.Range
- outer *ast.FuncDecl
- start ast.Node
-}
-
-// CanExtractFunction reports whether the code in the given range can be
-// extracted to a function.
-func CanExtractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) {
- if rng.Start == rng.End {
- return nil, false, false, fmt.Errorf("start and end are equal")
- }
- tok := fset.File(file.Pos())
- if tok == nil {
- return nil, false, false, fmt.Errorf("no file for pos %v", fset.Position(file.Pos()))
- }
- var err error
- rng, err = adjustRangeForWhitespace(rng, tok, src)
- if err != nil {
- return nil, false, false, err
- }
- path, _ := astutil.PathEnclosingInterval(file, rng.Start, rng.End)
- if len(path) == 0 {
- return nil, false, false, fmt.Errorf("no path enclosing interval")
- }
- // Node that encloses the selection must be a statement.
- // TODO: Support function extraction for an expression.
- _, ok := path[0].(ast.Stmt)
- if !ok {
- return nil, false, false, fmt.Errorf("node is not a statement")
- }
-
- // Find the function declaration that encloses the selection.
- var outer *ast.FuncDecl
- for _, p := range path {
- if p, ok := p.(*ast.FuncDecl); ok {
- outer = p
- break
- }
- }
- if outer == nil {
- return nil, false, false, fmt.Errorf("no enclosing function")
- }
-
- // Find the nodes at the start and end of the selection.
- var start, end ast.Node
- ast.Inspect(outer, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- // Do not override 'start' with a node that begins at the same location
- // but is nested further from 'outer'.
- if start == nil && n.Pos() == rng.Start && n.End() <= rng.End {
- start = n
- }
- if end == nil && n.End() == rng.End && n.Pos() >= rng.Start {
- end = n
- }
- return n.Pos() <= rng.End
- })
- if start == nil || end == nil {
- return nil, false, false, fmt.Errorf("range does not map to AST nodes")
- }
- // If the region is a blockStmt, use the first and last nodes in the block
- // statement.
- // <rng.start>{ ... }<rng.end> => { <rng.start>...<rng.end> }
- if blockStmt, ok := start.(*ast.BlockStmt); ok {
- if len(blockStmt.List) == 0 {
- return nil, false, false, fmt.Errorf("range maps to empty block statement")
- }
- start, end = blockStmt.List[0], blockStmt.List[len(blockStmt.List)-1]
- rng.Start, rng.End = start.Pos(), end.End()
- }
- return &fnExtractParams{
- tok: tok,
- path: path,
- rng: rng,
- outer: outer,
- start: start,
- }, true, outer.Recv != nil, nil
-}
-
-// objUsed checks if the object is used within the range. It returns the first
-// occurrence of the object in the range, if it exists.
-func objUsed(info *types.Info, rng span.Range, obj types.Object) (bool, *ast.Ident) {
- var firstUse *ast.Ident
- for id, objUse := range info.Uses {
- if obj != objUse {
- continue
- }
- if id.Pos() < rng.Start || id.End() > rng.End {
- continue
- }
- if firstUse == nil || id.Pos() < firstUse.Pos() {
- firstUse = id
- }
- }
- return firstUse != nil, firstUse
-}
-
-// varOverridden traverses the given AST node until we find the given identifier. Then, we
-// examine the occurrence of the given identifier and check for (1) whether the identifier
-// is being redefined. If the identifier is free, we also check for (2) whether the identifier
-// is being reassigned. We will not include an identifier in the return statement of the
-// extracted function if it meets one of the above conditions.
-func varOverridden(info *types.Info, firstUse *ast.Ident, obj types.Object, isFree bool, node ast.Node) bool {
- var isOverriden bool
- ast.Inspect(node, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- assignment, ok := n.(*ast.AssignStmt)
- if !ok {
- return true
- }
- // A free variable is initialized prior to the selection. We can always reassign
- // this variable after the selection because it has already been defined.
- // Conversely, a non-free variable is initialized within the selection. Thus, we
- // cannot reassign this variable after the selection unless it is initialized and
- // returned by the extracted function.
- if !isFree && assignment.Tok == token.ASSIGN {
- return false
- }
- for _, assigned := range assignment.Lhs {
- ident, ok := assigned.(*ast.Ident)
- // Check if we found the first use of the identifier.
- if !ok || ident != firstUse {
- continue
- }
- objUse := info.Uses[ident]
- if objUse == nil || objUse != obj {
- continue
- }
- // Ensure that the object is not used in its own definition.
- // For example:
- // var f float64
- // f, e := math.Frexp(f)
- for _, expr := range assignment.Rhs {
- if referencesObj(info, expr, obj) {
- return false
- }
- }
- isOverriden = true
- return false
- }
- return false
- })
- return isOverriden
-}
-
-// parseExtraction generates an AST file from the given text. We then return the portion of the
-// file that represents the text.
-func parseBlockStmt(fset *token.FileSet, src []byte) (*ast.BlockStmt, error) {
- text := "package main\nfunc _() { " + string(src) + " }"
- extract, err := parser.ParseFile(fset, "", text, 0)
- if err != nil {
- return nil, err
- }
- if len(extract.Decls) == 0 {
- return nil, fmt.Errorf("parsed file does not contain any declarations")
- }
- decl, ok := extract.Decls[0].(*ast.FuncDecl)
- if !ok {
- return nil, fmt.Errorf("parsed file does not contain expected function declaration")
- }
- if decl.Body == nil {
- return nil, fmt.Errorf("extracted function has no body")
- }
- return decl.Body, nil
-}
-
-// generateReturnInfo generates the information we need to adjust the return statements and
-// signature of the extracted function. We prepare names, signatures, and "zero values" that
-// represent the new variables. We also use this information to construct the if statement that
-// is inserted below the call to the extracted function.
-func generateReturnInfo(enclosing *ast.FuncType, pkg *types.Package, path []ast.Node, file *ast.File, info *types.Info, fset *token.FileSet, pos token.Pos, hasNonNestedReturns bool) ([]*returnVariable, *ast.IfStmt, error) {
- var retVars []*returnVariable
- var cond *ast.Ident
- if !hasNonNestedReturns {
- // Generate information for the added bool value.
- name, _ := generateAvailableIdentifier(pos, file, path, info, "shouldReturn", 0)
- cond = &ast.Ident{Name: name}
- retVars = append(retVars, &returnVariable{
- name: cond,
- decl: &ast.Field{Type: ast.NewIdent("bool")},
- zeroVal: ast.NewIdent("false"),
- })
- }
- // Generate information for the values in the return signature of the enclosing function.
- if enclosing.Results != nil {
- idx := 0
- for _, field := range enclosing.Results.List {
- typ := info.TypeOf(field.Type)
- if typ == nil {
- return nil, nil, fmt.Errorf(
- "failed type conversion, AST expression: %T", field.Type)
- }
- expr := analysisinternal.TypeExpr(fset, file, pkg, typ)
- if expr == nil {
- return nil, nil, fmt.Errorf("nil AST expression")
- }
- var name string
- name, idx = generateAvailableIdentifier(pos, file,
- path, info, "returnValue", idx)
- retVars = append(retVars, &returnVariable{
- name: ast.NewIdent(name),
- decl: &ast.Field{Type: expr},
- zeroVal: analysisinternal.ZeroValue(
- fset, file, pkg, typ),
- })
- }
- }
- var ifReturn *ast.IfStmt
- if !hasNonNestedReturns {
- // Create the return statement for the enclosing function. We must exclude the variable
- // for the condition of the if statement (cond) from the return statement.
- ifReturn = &ast.IfStmt{
- Cond: cond,
- Body: &ast.BlockStmt{
- List: []ast.Stmt{&ast.ReturnStmt{Results: getNames(retVars)[1:]}},
- },
- }
- }
- return retVars, ifReturn, nil
-}
-
-// adjustReturnStatements adds "zero values" of the given types to each return statement
-// in the given AST node.
-func adjustReturnStatements(returnTypes []*ast.Field, seenVars map[types.Object]ast.Expr, fset *token.FileSet, file *ast.File, pkg *types.Package, extractedBlock *ast.BlockStmt) error {
- var zeroVals []ast.Expr
- // Create "zero values" for each type.
- for _, returnType := range returnTypes {
- var val ast.Expr
- for obj, typ := range seenVars {
- if typ != returnType.Type {
- continue
- }
- val = analysisinternal.ZeroValue(fset, file, pkg, obj.Type())
- break
- }
- if val == nil {
- return fmt.Errorf(
- "could not find matching AST expression for %T", returnType.Type)
- }
- zeroVals = append(zeroVals, val)
- }
- // Add "zero values" to each return statement.
- // The bool reports whether the enclosing function should return after calling the
- // extracted function. We set the bool to 'true' because, if these return statements
- // execute, the extracted function terminates early, and the enclosing function must
- // return as well.
- zeroVals = append(zeroVals, ast.NewIdent("true"))
- ast.Inspect(extractedBlock, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- if n, ok := n.(*ast.ReturnStmt); ok {
- n.Results = append(zeroVals, n.Results...)
- return false
- }
- return true
- })
- return nil
-}
-
-// generateFuncCall constructs a call expression for the extracted function, described by the
-// given parameters and return variables.
-func generateFuncCall(hasNonNestedReturn, hasReturnVals bool, params, returns []ast.Expr, name string, token token.Token, selector string) ast.Node {
- var replace ast.Node
- callExpr := &ast.CallExpr{
- Fun: ast.NewIdent(name),
- Args: params,
- }
- if selector != "" {
- callExpr = &ast.CallExpr{
- Fun: &ast.SelectorExpr{
- X: ast.NewIdent(selector),
- Sel: ast.NewIdent(name),
- },
- Args: params,
- }
- }
- if hasReturnVals {
- if hasNonNestedReturn {
- // Create a return statement that returns the result of the function call.
- replace = &ast.ReturnStmt{
- Return: 0,
- Results: []ast.Expr{callExpr},
- }
- } else {
- // Assign the result of the function call.
- replace = &ast.AssignStmt{
- Lhs: returns,
- Tok: token,
- Rhs: []ast.Expr{callExpr},
- }
- }
- } else {
- replace = callExpr
- }
- return replace
-}
-
-// initializeVars creates variable declarations, if needed.
-// Our preference is to replace the selected block with an "x, y, z := fn()" style
-// assignment statement. We can use this style when all of the variables in the
-// extracted function's return statement are either not defined prior to the extracted block
-// or can be safely redefined. However, for example, if z is already defined
-// in a different scope, we replace the selected block with:
-//
-// var x int
-// var y string
-// x, y, z = fn()
-func initializeVars(uninitialized []types.Object, retVars []*returnVariable, seenUninitialized map[types.Object]struct{}, seenVars map[types.Object]ast.Expr) []ast.Stmt {
- var declarations []ast.Stmt
- for _, obj := range uninitialized {
- if _, ok := seenUninitialized[obj]; ok {
- continue
- }
- seenUninitialized[obj] = struct{}{}
- valSpec := &ast.ValueSpec{
- Names: []*ast.Ident{ast.NewIdent(obj.Name())},
- Type: seenVars[obj],
- }
- genDecl := &ast.GenDecl{
- Tok: token.VAR,
- Specs: []ast.Spec{valSpec},
- }
- declarations = append(declarations, &ast.DeclStmt{Decl: genDecl})
- }
- // Each variable added from a return statement in the selection
- // must be initialized.
- for i, retVar := range retVars {
- n := retVar.name.(*ast.Ident)
- valSpec := &ast.ValueSpec{
- Names: []*ast.Ident{n},
- Type: retVars[i].decl.Type,
- }
- genDecl := &ast.GenDecl{
- Tok: token.VAR,
- Specs: []ast.Spec{valSpec},
- }
- declarations = append(declarations, &ast.DeclStmt{Decl: genDecl})
- }
- return declarations
-}
-
-// getNames returns the names from the given list of returnVariable.
-func getNames(retVars []*returnVariable) []ast.Expr {
- var names []ast.Expr
- for _, retVar := range retVars {
- names = append(names, retVar.name)
- }
- return names
-}
-
-// getZeroVals returns the "zero values" from the given list of returnVariable.
-func getZeroVals(retVars []*returnVariable) []ast.Expr {
- var zvs []ast.Expr
- for _, retVar := range retVars {
- zvs = append(zvs, retVar.zeroVal)
- }
- return zvs
-}
-
-// getDecls returns the declarations from the given list of returnVariable.
-func getDecls(retVars []*returnVariable) []*ast.Field {
- var decls []*ast.Field
- for _, retVar := range retVars {
- decls = append(decls, retVar.decl)
- }
- return decls
-}
diff --git a/internal/lsp/source/fix.go b/internal/lsp/source/fix.go
deleted file mode 100644
index 2f921ad0c..000000000
--- a/internal/lsp/source/fix.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/internal/lsp/analysis/fillstruct"
- "golang.org/x/tools/internal/lsp/analysis/undeclaredname"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-type (
- // SuggestedFixFunc is a function used to get the suggested fixes for a given
- // gopls command, some of which are provided by go/analysis.Analyzers. Some of
- // the analyzers in internal/lsp/analysis are not efficient enough to include
- // suggested fixes with their diagnostics, so we have to compute them
- // separately. Such analyzers should provide a function with a signature of
- // SuggestedFixFunc.
- SuggestedFixFunc func(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle, pRng protocol.Range) (*analysis.SuggestedFix, error)
- singleFileFixFunc func(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error)
-)
-
-const (
- FillStruct = "fill_struct"
- StubMethods = "stub_methods"
- UndeclaredName = "undeclared_name"
- ExtractVariable = "extract_variable"
- ExtractFunction = "extract_function"
- ExtractMethod = "extract_method"
-)
-
-// suggestedFixes maps a suggested fix command id to its handler.
-var suggestedFixes = map[string]SuggestedFixFunc{
- FillStruct: singleFile(fillstruct.SuggestedFix),
- UndeclaredName: singleFile(undeclaredname.SuggestedFix),
- ExtractVariable: singleFile(extractVariable),
- ExtractFunction: singleFile(extractFunction),
- ExtractMethod: singleFile(extractMethod),
- StubMethods: stubSuggestedFixFunc,
-}
-
-// singleFile calls analyzers that expect inputs for a single file
-func singleFile(sf singleFileFixFunc) SuggestedFixFunc {
- return func(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle, pRng protocol.Range) (*analysis.SuggestedFix, error) {
- fset, rng, src, file, pkg, info, err := getAllSuggestedFixInputs(ctx, snapshot, fh, pRng)
- if err != nil {
- return nil, err
- }
- return sf(fset, rng, src, file, pkg, info)
- }
-}
-
-func SuggestedFixFromCommand(cmd protocol.Command, kind protocol.CodeActionKind) SuggestedFix {
- return SuggestedFix{
- Title: cmd.Title,
- Command: &cmd,
- ActionKind: kind,
- }
-}
-
-// ApplyFix applies the command's suggested fix to the given file and
-// range, returning the resulting edits.
-func ApplyFix(ctx context.Context, fix string, snapshot Snapshot, fh VersionedFileHandle, pRng protocol.Range) ([]protocol.TextDocumentEdit, error) {
- handler, ok := suggestedFixes[fix]
- if !ok {
- return nil, fmt.Errorf("no suggested fix function for %s", fix)
- }
- suggestion, err := handler(ctx, snapshot, fh, pRng)
- if err != nil {
- return nil, err
- }
- if suggestion == nil {
- return nil, nil
- }
- fset := snapshot.FileSet()
- editsPerFile := map[span.URI]*protocol.TextDocumentEdit{}
- for _, edit := range suggestion.TextEdits {
- spn, err := span.NewRange(fset, edit.Pos, edit.End).Span()
- if err != nil {
- return nil, err
- }
- fh, err := snapshot.GetVersionedFile(ctx, spn.URI())
- if err != nil {
- return nil, err
- }
- te, ok := editsPerFile[spn.URI()]
- if !ok {
- te = &protocol.TextDocumentEdit{
- TextDocument: protocol.OptionalVersionedTextDocumentIdentifier{
- Version: fh.Version(),
- TextDocumentIdentifier: protocol.TextDocumentIdentifier{
- URI: protocol.URIFromSpanURI(fh.URI()),
- },
- },
- }
- editsPerFile[spn.URI()] = te
- }
- _, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return nil, err
- }
- rng, err := pgf.Mapper.Range(spn)
- if err != nil {
- return nil, err
- }
- te.Edits = append(te.Edits, protocol.TextEdit{
- Range: rng,
- NewText: string(edit.NewText),
- })
- }
- var edits []protocol.TextDocumentEdit
- for _, edit := range editsPerFile {
- edits = append(edits, *edit)
- }
- return edits, nil
-}
-
-// getAllSuggestedFixInputs is a helper function to collect all possible needed
-// inputs for an AppliesFunc or SuggestedFixFunc.
-func getAllSuggestedFixInputs(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) (*token.FileSet, span.Range, []byte, *ast.File, *types.Package, *types.Info, error) {
- pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return nil, span.Range{}, nil, nil, nil, nil, errors.Errorf("getting file for Identifier: %w", err)
- }
- rng, err := pgf.Mapper.RangeToSpanRange(pRng)
- if err != nil {
- return nil, span.Range{}, nil, nil, nil, nil, err
- }
- return snapshot.FileSet(), rng, pgf.Src, pgf.File, pkg.GetTypes(), pkg.GetTypesInfo(), nil
-}
diff --git a/internal/lsp/source/folding_range.go b/internal/lsp/source/folding_range.go
deleted file mode 100644
index 576308f99..000000000
--- a/internal/lsp/source/folding_range.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "go/ast"
- "go/token"
- "sort"
- "strings"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-// FoldingRangeInfo holds range and kind info of folding for an ast.Node
-type FoldingRangeInfo struct {
- MappedRange
- Kind protocol.FoldingRangeKind
-}
-
-// FoldingRange gets all of the folding range for f.
-func FoldingRange(ctx context.Context, snapshot Snapshot, fh FileHandle, lineFoldingOnly bool) (ranges []*FoldingRangeInfo, err error) {
- // TODO(suzmue): consider limiting the number of folding ranges returned, and
- // implement a way to prioritize folding ranges in that case.
- pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
- if err != nil {
- return nil, err
- }
-
- // With parse errors, we wouldn't be able to produce accurate folding info.
- // LSP protocol (3.16) currently does not have a way to handle this case
- // (https://github.com/microsoft/language-server-protocol/issues/1200).
- // We cannot return an error either because we are afraid some editors
- // may not handle errors nicely. As a workaround, we now return an empty
- // result and let the client handle this case by double check the file
- // contents (i.e. if the file is not empty and the folding range result
- // is empty, raise an internal error).
- if pgf.ParseErr != nil {
- return nil, nil
- }
-
- fset := snapshot.FileSet()
-
- // Get folding ranges for comments separately as they are not walked by ast.Inspect.
- ranges = append(ranges, commentsFoldingRange(fset, pgf.Mapper, pgf.File)...)
-
- visit := func(n ast.Node) bool {
- rng := foldingRangeFunc(fset, pgf.Mapper, n, lineFoldingOnly)
- if rng != nil {
- ranges = append(ranges, rng)
- }
- return true
- }
- // Walk the ast and collect folding ranges.
- ast.Inspect(pgf.File, visit)
-
- sort.Slice(ranges, func(i, j int) bool {
- irng, _ := ranges[i].Range()
- jrng, _ := ranges[j].Range()
- return protocol.CompareRange(irng, jrng) < 0
- })
-
- return ranges, nil
-}
-
-// foldingRangeFunc calculates the line folding range for ast.Node n
-func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, lineFoldingOnly bool) *FoldingRangeInfo {
- // TODO(suzmue): include trailing empty lines before the closing
- // parenthesis/brace.
- var kind protocol.FoldingRangeKind
- var start, end token.Pos
- switch n := n.(type) {
- case *ast.BlockStmt:
- // Fold between positions of or lines between "{" and "}".
- var startList, endList token.Pos
- if num := len(n.List); num != 0 {
- startList, endList = n.List[0].Pos(), n.List[num-1].End()
- }
- start, end = validLineFoldingRange(fset, n.Lbrace, n.Rbrace, startList, endList, lineFoldingOnly)
- case *ast.CaseClause:
- // Fold from position of ":" to end.
- start, end = n.Colon+1, n.End()
- case *ast.CommClause:
- // Fold from position of ":" to end.
- start, end = n.Colon+1, n.End()
- case *ast.CallExpr:
- // Fold from position of "(" to position of ")".
- start, end = n.Lparen+1, n.Rparen
- case *ast.FieldList:
- // Fold between positions of or lines between opening parenthesis/brace and closing parenthesis/brace.
- var startList, endList token.Pos
- if num := len(n.List); num != 0 {
- startList, endList = n.List[0].Pos(), n.List[num-1].End()
- }
- start, end = validLineFoldingRange(fset, n.Opening, n.Closing, startList, endList, lineFoldingOnly)
- case *ast.GenDecl:
- // If this is an import declaration, set the kind to be protocol.Imports.
- if n.Tok == token.IMPORT {
- kind = protocol.Imports
- }
- // Fold between positions of or lines between "(" and ")".
- var startSpecs, endSpecs token.Pos
- if num := len(n.Specs); num != 0 {
- startSpecs, endSpecs = n.Specs[0].Pos(), n.Specs[num-1].End()
- }
- start, end = validLineFoldingRange(fset, n.Lparen, n.Rparen, startSpecs, endSpecs, lineFoldingOnly)
- case *ast.BasicLit:
- // Fold raw string literals from position of "`" to position of "`".
- if n.Kind == token.STRING && len(n.Value) >= 2 && n.Value[0] == '`' && n.Value[len(n.Value)-1] == '`' {
- start, end = n.Pos(), n.End()
- }
- case *ast.CompositeLit:
- // Fold between positions of or lines between "{" and "}".
- var startElts, endElts token.Pos
- if num := len(n.Elts); num != 0 {
- startElts, endElts = n.Elts[0].Pos(), n.Elts[num-1].End()
- }
- start, end = validLineFoldingRange(fset, n.Lbrace, n.Rbrace, startElts, endElts, lineFoldingOnly)
- }
-
- // Check that folding positions are valid.
- if !start.IsValid() || !end.IsValid() {
- return nil
- }
- // in line folding mode, do not fold if the start and end lines are the same.
- if lineFoldingOnly && fset.Position(start).Line == fset.Position(end).Line {
- return nil
- }
- return &FoldingRangeInfo{
- MappedRange: NewMappedRange(fset, m, start, end),
- Kind: kind,
- }
-}
-
-// validLineFoldingRange returns start and end token.Pos for folding range if the range is valid.
-// returns token.NoPos otherwise, which fails token.IsValid check
-func validLineFoldingRange(fset *token.FileSet, open, close, start, end token.Pos, lineFoldingOnly bool) (token.Pos, token.Pos) {
- if lineFoldingOnly {
- if !open.IsValid() || !close.IsValid() {
- return token.NoPos, token.NoPos
- }
-
- // Don't want to fold if the start/end is on the same line as the open/close
- // as an example, the example below should *not* fold:
- // var x = [2]string{"d",
- // "e" }
- if fset.Position(open).Line == fset.Position(start).Line ||
- fset.Position(close).Line == fset.Position(end).Line {
- return token.NoPos, token.NoPos
- }
-
- return open + 1, end
- }
- return open + 1, close
-}
-
-// commentsFoldingRange returns the folding ranges for all comment blocks in file.
-// The folding range starts at the end of the first line of the comment block, and ends at the end of the
-// comment block and has kind protocol.Comment.
-func commentsFoldingRange(fset *token.FileSet, m *protocol.ColumnMapper, file *ast.File) (comments []*FoldingRangeInfo) {
- for _, commentGrp := range file.Comments {
- startGrp, endGrp := fset.Position(commentGrp.Pos()), fset.Position(commentGrp.End())
- if startGrp.Line == endGrp.Line {
- // Don't fold single line comments.
- continue
- }
-
- firstComment := commentGrp.List[0]
- startPos, endLinePos := firstComment.Pos(), firstComment.End()
- startCmmnt, endCmmnt := fset.Position(startPos), fset.Position(endLinePos)
- if startCmmnt.Line != endCmmnt.Line {
- // If the first comment spans multiple lines, then we want to have the
- // folding range start at the end of the first line.
- endLinePos = token.Pos(int(startPos) + len(strings.Split(firstComment.Text, "\n")[0]))
- }
- comments = append(comments, &FoldingRangeInfo{
- // Fold from the end of the first line comment to the end of the comment block.
- MappedRange: NewMappedRange(fset, m, endLinePos, commentGrp.End()),
- Kind: protocol.Comment,
- })
- }
- return comments
-}
diff --git a/internal/lsp/source/format.go b/internal/lsp/source/format.go
deleted file mode 100644
index 79da0b3ad..000000000
--- a/internal/lsp/source/format.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package source provides core features for use by Go editors and tools.
-package source
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "strings"
- "text/scanner"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/lsppos"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
-)
-
-// Format formats a file with a given range.
-func Format(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.TextEdit, error) {
- ctx, done := event.Start(ctx, "source.Format")
- defer done()
-
- // Generated files shouldn't be edited. So, don't format them
- if IsGenerated(ctx, snapshot, fh.URI()) {
- return nil, fmt.Errorf("can't format %q: file is generated", fh.URI().Filename())
- }
-
- pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
- if err != nil {
- return nil, err
- }
- // Even if this file has parse errors, it might still be possible to format it.
- // Using format.Node on an AST with errors may result in code being modified.
- // Attempt to format the source of this file instead.
- if pgf.ParseErr != nil {
- formatted, err := formatSource(ctx, fh)
- if err != nil {
- return nil, err
- }
- return computeTextEdits(ctx, snapshot, pgf, string(formatted))
- }
-
- fset := snapshot.FileSet()
-
- // format.Node changes slightly from one release to another, so the version
- // of Go used to build the LSP server will determine how it formats code.
- // This should be acceptable for all users, who likely be prompted to rebuild
- // the LSP server on each Go release.
- buf := &bytes.Buffer{}
- if err := format.Node(buf, fset, pgf.File); err != nil {
- return nil, err
- }
- formatted := buf.String()
-
- // Apply additional formatting, if any is supported. Currently, the only
- // supported additional formatter is gofumpt.
- if format := snapshot.View().Options().GofumptFormat; snapshot.View().Options().Gofumpt && format != nil {
- // gofumpt can customize formatting based on language version and module
- // path, if available.
- //
- // Try to derive this information, but fall-back on the default behavior.
- //
- // TODO: under which circumstances can we fail to find module information?
- // Can this, for example, result in inconsistent formatting across saves,
- // due to pending calls to packages.Load?
- var langVersion, modulePath string
- mds, err := snapshot.MetadataForFile(ctx, fh.URI())
- if err == nil && len(mds) > 0 {
- if mi := mds[0].ModuleInfo(); mi != nil {
- langVersion = mi.GoVersion
- modulePath = mi.Path
- }
- }
- b, err := format(ctx, langVersion, modulePath, buf.Bytes())
- if err != nil {
- return nil, err
- }
- formatted = string(b)
- }
- return computeTextEdits(ctx, snapshot, pgf, formatted)
-}
-
-func formatSource(ctx context.Context, fh FileHandle) ([]byte, error) {
- _, done := event.Start(ctx, "source.formatSource")
- defer done()
-
- data, err := fh.Read()
- if err != nil {
- return nil, err
- }
- return format.Source(data)
-}
-
-type ImportFix struct {
- Fix *imports.ImportFix
- Edits []protocol.TextEdit
-}
-
-// AllImportsFixes formats f for each possible fix to the imports.
-// In addition to returning the result of applying all edits,
-// it returns a list of fixes that could be applied to the file, with the
-// corresponding TextEdits that would be needed to apply that fix.
-func AllImportsFixes(ctx context.Context, snapshot Snapshot, fh FileHandle) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) {
- ctx, done := event.Start(ctx, "source.AllImportsFixes")
- defer done()
-
- pgf, err := snapshot.ParseGo(ctx, fh, ParseFull)
- if err != nil {
- return nil, nil, err
- }
- if err := snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error {
- allFixEdits, editsPerFix, err = computeImportEdits(snapshot, pgf, opts)
- return err
- }); err != nil {
- return nil, nil, fmt.Errorf("AllImportsFixes: %v", err)
- }
- return allFixEdits, editsPerFix, nil
-}
-
-// computeImportEdits computes a set of edits that perform one or all of the
-// necessary import fixes.
-func computeImportEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) {
- filename := pgf.URI.Filename()
-
- // Build up basic information about the original file.
- allFixes, err := imports.FixImports(filename, pgf.Src, options)
- if err != nil {
- return nil, nil, err
- }
-
- allFixEdits, err = computeFixEdits(snapshot, pgf, options, allFixes)
- if err != nil {
- return nil, nil, err
- }
-
- // Apply all of the import fixes to the file.
- // Add the edits for each fix to the result.
- for _, fix := range allFixes {
- edits, err := computeFixEdits(snapshot, pgf, options, []*imports.ImportFix{fix})
- if err != nil {
- return nil, nil, err
- }
- editsPerFix = append(editsPerFix, &ImportFix{
- Fix: fix,
- Edits: edits,
- })
- }
- return allFixEdits, editsPerFix, nil
-}
-
-// ComputeOneImportFixEdits returns text edits for a single import fix.
-func ComputeOneImportFixEdits(snapshot Snapshot, pgf *ParsedGoFile, fix *imports.ImportFix) ([]protocol.TextEdit, error) {
- options := &imports.Options{
- LocalPrefix: snapshot.View().Options().Local,
- // Defaults.
- AllErrors: true,
- Comments: true,
- Fragment: true,
- FormatOnly: false,
- TabIndent: true,
- TabWidth: 8,
- }
- return computeFixEdits(snapshot, pgf, options, []*imports.ImportFix{fix})
-}
-
-func computeFixEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options, fixes []*imports.ImportFix) ([]protocol.TextEdit, error) {
- // trim the original data to match fixedData
- left, err := importPrefix(pgf.Src)
- if err != nil {
- return nil, err
- }
- extra := !strings.Contains(left, "\n") // one line may have more than imports
- if extra {
- left = string(pgf.Src)
- }
- if len(left) > 0 && left[len(left)-1] != '\n' {
- left += "\n"
- }
- // Apply the fixes and re-parse the file so that we can locate the
- // new imports.
- flags := parser.ImportsOnly
- if extra {
- // used all of origData above, use all of it here too
- flags = 0
- }
- fixedData, err := imports.ApplyFixes(fixes, "", pgf.Src, options, flags)
- if err != nil {
- return nil, err
- }
- if fixedData == nil || fixedData[len(fixedData)-1] != '\n' {
- fixedData = append(fixedData, '\n') // ApplyFixes may miss the newline, go figure.
- }
- edits, err := snapshot.View().Options().ComputeEdits(pgf.URI, left, string(fixedData))
- if err != nil {
- return nil, err
- }
- return ProtocolEditsFromSource([]byte(left), edits, pgf.Mapper.Converter)
-}
-
-// importPrefix returns the prefix of the given file content through the final
-// import statement. If there are no imports, the prefix is the package
-// statement and any comment groups below it.
-func importPrefix(src []byte) (string, error) {
- fset := token.NewFileSet()
- // do as little parsing as possible
- f, err := parser.ParseFile(fset, "", src, parser.ImportsOnly|parser.ParseComments)
- if err != nil { // This can happen if 'package' is misspelled
- return "", fmt.Errorf("importPrefix: failed to parse: %s", err)
- }
- tok := fset.File(f.Pos())
- var importEnd int
- for _, d := range f.Decls {
- if x, ok := d.(*ast.GenDecl); ok && x.Tok == token.IMPORT {
- if e, err := Offset(tok, d.End()); err != nil {
- return "", fmt.Errorf("importPrefix: %s", err)
- } else if e > importEnd {
- importEnd = e
- }
- }
- }
-
- maybeAdjustToLineEnd := func(pos token.Pos, isCommentNode bool) int {
- offset, err := Offset(tok, pos)
- if err != nil {
- return -1
- }
-
- // Don't go past the end of the file.
- if offset > len(src) {
- offset = len(src)
- }
- // The go/ast package does not account for different line endings, and
- // specifically, in the text of a comment, it will strip out \r\n line
- // endings in favor of \n. To account for these differences, we try to
- // return a position on the next line whenever possible.
- switch line := tok.Line(tok.Pos(offset)); {
- case line < tok.LineCount():
- nextLineOffset, err := Offset(tok, tok.LineStart(line+1))
- if err != nil {
- return -1
- }
- // If we found a position that is at the end of a line, move the
- // offset to the start of the next line.
- if offset+1 == nextLineOffset {
- offset = nextLineOffset
- }
- case isCommentNode, offset+1 == tok.Size():
- // If the last line of the file is a comment, or we are at the end
- // of the file, the prefix is the entire file.
- offset = len(src)
- }
- return offset
- }
- if importEnd == 0 {
- pkgEnd := f.Name.End()
- importEnd = maybeAdjustToLineEnd(pkgEnd, false)
- }
- for _, cgroup := range f.Comments {
- for _, c := range cgroup.List {
- if end, err := Offset(tok, c.End()); err != nil {
- return "", err
- } else if end > importEnd {
- startLine := tok.Position(c.Pos()).Line
- endLine := tok.Position(c.End()).Line
-
- // Work around golang/go#41197 by checking if the comment might
- // contain "\r", and if so, find the actual end position of the
- // comment by scanning the content of the file.
- startOffset, err := Offset(tok, c.Pos())
- if err != nil {
- return "", err
- }
- if startLine != endLine && bytes.Contains(src[startOffset:], []byte("\r")) {
- if commentEnd := scanForCommentEnd(src[startOffset:]); commentEnd > 0 {
- end = startOffset + commentEnd
- }
- }
- importEnd = maybeAdjustToLineEnd(tok.Pos(end), true)
- }
- }
- }
- if importEnd > len(src) {
- importEnd = len(src)
- }
- return string(src[:importEnd]), nil
-}
-
-// scanForCommentEnd returns the offset of the end of the multi-line comment
-// at the start of the given byte slice.
-func scanForCommentEnd(src []byte) int {
- var s scanner.Scanner
- s.Init(bytes.NewReader(src))
- s.Mode ^= scanner.SkipComments
-
- t := s.Scan()
- if t == scanner.Comment {
- return s.Pos().Offset
- }
- return 0
-}
-
-func computeTextEdits(ctx context.Context, snapshot Snapshot, pgf *ParsedGoFile, formatted string) ([]protocol.TextEdit, error) {
- _, done := event.Start(ctx, "source.computeTextEdits")
- defer done()
-
- edits, err := snapshot.View().Options().ComputeEdits(pgf.URI, string(pgf.Src), formatted)
- if err != nil {
- return nil, err
- }
- return ToProtocolEdits(pgf.Mapper, edits)
-}
-
-// ProtocolEditsFromSource converts text edits to LSP edits using the original
-// source.
-func ProtocolEditsFromSource(src []byte, edits []diff.TextEdit, converter span.Converter) ([]protocol.TextEdit, error) {
- m := lsppos.NewMapper(src)
- var result []protocol.TextEdit
- for _, edit := range edits {
- spn, err := edit.Span.WithOffset(converter)
- if err != nil {
- return nil, fmt.Errorf("computing offsets: %v", err)
- }
- startLine, startChar := m.Position(spn.Start().Offset())
- endLine, endChar := m.Position(spn.End().Offset())
- if startLine < 0 || endLine < 0 {
- return nil, fmt.Errorf("out of bound span: %v", spn)
- }
-
- pstart := protocol.Position{Line: uint32(startLine), Character: uint32(startChar)}
- pend := protocol.Position{Line: uint32(endLine), Character: uint32(endChar)}
- if pstart == pend && edit.NewText == "" {
- // Degenerate case, which may result from a diff tool wanting to delete
- // '\r' in line endings. Filter it out.
- continue
- }
- result = append(result, protocol.TextEdit{
- Range: protocol.Range{Start: pstart, End: pend},
- NewText: edit.NewText,
- })
- }
- return result, nil
-}
-
-func ToProtocolEdits(m *protocol.ColumnMapper, edits []diff.TextEdit) ([]protocol.TextEdit, error) {
- if edits == nil {
- return nil, nil
- }
- result := make([]protocol.TextEdit, len(edits))
- for i, edit := range edits {
- rng, err := m.Range(edit.Span)
- if err != nil {
- return nil, err
- }
- result[i] = protocol.TextEdit{
- Range: rng,
- NewText: edit.NewText,
- }
- }
- return result, nil
-}
-
-func FromProtocolEdits(m *protocol.ColumnMapper, edits []protocol.TextEdit) ([]diff.TextEdit, error) {
- if edits == nil {
- return nil, nil
- }
- result := make([]diff.TextEdit, len(edits))
- for i, edit := range edits {
- spn, err := m.RangeSpan(edit.Range)
- if err != nil {
- return nil, err
- }
- result[i] = diff.TextEdit{
- Span: spn,
- NewText: edit.NewText,
- }
- }
- return result, nil
-}
diff --git a/internal/lsp/source/format_test.go b/internal/lsp/source/format_test.go
deleted file mode 100644
index eac78d979..000000000
--- a/internal/lsp/source/format_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
-)
-
-func TestImportPrefix(t *testing.T) {
- for i, tt := range []struct {
- input, want string
- }{
- {"package foo", "package foo"},
- {"package foo\n", "package foo\n"},
- {"package foo\n\nfunc f(){}\n", "package foo\n"},
- {"package foo\n\nimport \"fmt\"\n", "package foo\n\nimport \"fmt\""},
- {"package foo\nimport (\n\"fmt\"\n)\n", "package foo\nimport (\n\"fmt\"\n)"},
- {"\n\n\npackage foo\n", "\n\n\npackage foo\n"},
- {"// hi \n\npackage foo //xx\nfunc _(){}\n", "// hi \n\npackage foo //xx\n"},
- {"package foo //hi\n", "package foo //hi\n"},
- {"//hi\npackage foo\n//a\n\n//b\n", "//hi\npackage foo\n//a\n\n//b\n"},
- {
- "package a\n\nimport (\n \"fmt\"\n)\n//hi\n",
- "package a\n\nimport (\n \"fmt\"\n)\n//hi\n",
- },
- {`package a /*hi*/`, `package a /*hi*/`},
- {"package main\r\n\r\nimport \"go/types\"\r\n\r\n/*\r\n\r\n */\r\n", "package main\r\n\r\nimport \"go/types\"\r\n\r\n/*\r\n\r\n */\r\n"},
- {"package x; import \"os\"; func f() {}\n\n", "package x; import \"os\""},
- {"package x; func f() {fmt.Println()}\n\n", "package x"},
- } {
- got, err := importPrefix([]byte(tt.input))
- if err != nil {
- t.Fatal(err)
- }
- if got != tt.want {
- t.Errorf("%d: failed for %q:\n%s", i, tt.input, diffStr(t, tt.want, got))
- }
- }
-}
-
-func TestCRLFFile(t *testing.T) {
- for i, tt := range []struct {
- input, want string
- }{
- {
- input: `package main
-
-/*
-Hi description
-*/
-func Hi() {
-}
-`,
- want: `package main
-
-/*
-Hi description
-*/`,
- },
- } {
- got, err := importPrefix([]byte(strings.ReplaceAll(tt.input, "\n", "\r\n")))
- if err != nil {
- t.Fatal(err)
- }
- want := strings.ReplaceAll(tt.want, "\n", "\r\n")
- if got != want {
- t.Errorf("%d: failed for %q:\n%s", i, tt.input, diffStr(t, want, got))
- }
- }
-}
-
-func diffStr(t *testing.T, want, got string) string {
- if want == got {
- return ""
- }
- // Add newlines to avoid newline messages in diff.
- want += "\n"
- got += "\n"
- d, err := myers.ComputeEdits("", want, got)
- if err != nil {
- t.Fatal(err)
- }
- return fmt.Sprintf("%q", diff.ToUnified("want", "got", want, d))
-}
diff --git a/internal/lsp/source/gc_annotations.go b/internal/lsp/source/gc_annotations.go
deleted file mode 100644
index 3616bbfb1..000000000
--- a/internal/lsp/source/gc_annotations.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
-)
-
-type Annotation string
-
-const (
- // Nil controls nil checks.
- Nil Annotation = "nil"
-
- // Escape controls diagnostics about escape choices.
- Escape Annotation = "escape"
-
- // Inline controls diagnostics about inlining choices.
- Inline Annotation = "inline"
-
- // Bounds controls bounds checking diagnostics.
- Bounds Annotation = "bounds"
-)
-
-func GCOptimizationDetails(ctx context.Context, snapshot Snapshot, pkg Package) (map[VersionedFileIdentity][]*Diagnostic, error) {
- if len(pkg.CompiledGoFiles()) == 0 {
- return nil, nil
- }
- pkgDir := filepath.Dir(pkg.CompiledGoFiles()[0].URI.Filename())
- outDir := filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.details", os.Getpid()))
-
- if err := os.MkdirAll(outDir, 0700); err != nil {
- return nil, err
- }
- tmpFile, err := ioutil.TempFile(os.TempDir(), "gopls-x")
- if err != nil {
- return nil, err
- }
- defer os.Remove(tmpFile.Name())
-
- outDirURI := span.URIFromPath(outDir)
- // GC details doesn't handle Windows URIs in the form of "file:///C:/...",
- // so rewrite them to "file://C:/...". See golang/go#41614.
- if !strings.HasPrefix(outDir, "/") {
- outDirURI = span.URI(strings.Replace(string(outDirURI), "file:///", "file://", 1))
- }
- inv := &gocommand.Invocation{
- Verb: "build",
- Args: []string{
- fmt.Sprintf("-gcflags=-json=0,%s", outDirURI),
- fmt.Sprintf("-o=%s", tmpFile.Name()),
- ".",
- },
- WorkingDir: pkgDir,
- }
- _, err = snapshot.RunGoCommandDirect(ctx, Normal, inv)
- if err != nil {
- return nil, err
- }
- files, err := findJSONFiles(outDir)
- if err != nil {
- return nil, err
- }
- reports := make(map[VersionedFileIdentity][]*Diagnostic)
- opts := snapshot.View().Options()
- var parseError error
- for _, fn := range files {
- uri, diagnostics, err := parseDetailsFile(fn, opts)
- if err != nil {
- // expect errors for all the files, save 1
- parseError = err
- }
- fh := snapshot.FindFile(uri)
- if fh == nil {
- continue
- }
- if pkgDir != filepath.Dir(fh.URI().Filename()) {
- // https://github.com/golang/go/issues/42198
- // sometimes the detail diagnostics generated for files
- // outside the package can never be taken back.
- continue
- }
- reports[fh.VersionedFileIdentity()] = diagnostics
- }
- return reports, parseError
-}
-
-func parseDetailsFile(filename string, options *Options) (span.URI, []*Diagnostic, error) {
- buf, err := ioutil.ReadFile(filename)
- if err != nil {
- return "", nil, err
- }
- var (
- uri span.URI
- i int
- diagnostics []*Diagnostic
- )
- type metadata struct {
- File string `json:"file,omitempty"`
- }
- for dec := json.NewDecoder(bytes.NewReader(buf)); dec.More(); {
- // The first element always contains metadata.
- if i == 0 {
- i++
- m := new(metadata)
- if err := dec.Decode(m); err != nil {
- return "", nil, err
- }
- if !strings.HasSuffix(m.File, ".go") {
- continue // <autogenerated>
- }
- uri = span.URIFromPath(m.File)
- continue
- }
- d := new(protocol.Diagnostic)
- if err := dec.Decode(d); err != nil {
- return "", nil, err
- }
- msg := d.Code.(string)
- if msg != "" {
- msg = fmt.Sprintf("%s(%s)", msg, d.Message)
- }
- if !showDiagnostic(msg, d.Source, options) {
- continue
- }
- var related []RelatedInformation
- for _, ri := range d.RelatedInformation {
- related = append(related, RelatedInformation{
- URI: ri.Location.URI.SpanURI(),
- Range: zeroIndexedRange(ri.Location.Range),
- Message: ri.Message,
- })
- }
- diagnostic := &Diagnostic{
- URI: uri,
- Range: zeroIndexedRange(d.Range),
- Message: msg,
- Severity: d.Severity,
- Source: OptimizationDetailsError, // d.Source is always "go compiler" as of 1.16, use our own
- Tags: d.Tags,
- Related: related,
- }
- diagnostics = append(diagnostics, diagnostic)
- i++
- }
- return uri, diagnostics, nil
-}
-
-// showDiagnostic reports whether a given diagnostic should be shown to the end
-// user, given the current options.
-func showDiagnostic(msg, source string, o *Options) bool {
- if source != "go compiler" {
- return false
- }
- if o.Annotations == nil {
- return true
- }
- switch {
- case strings.HasPrefix(msg, "canInline") ||
- strings.HasPrefix(msg, "cannotInline") ||
- strings.HasPrefix(msg, "inlineCall"):
- return o.Annotations[Inline]
- case strings.HasPrefix(msg, "escape") || msg == "leak":
- return o.Annotations[Escape]
- case strings.HasPrefix(msg, "nilcheck"):
- return o.Annotations[Nil]
- case strings.HasPrefix(msg, "isInBounds") ||
- strings.HasPrefix(msg, "isSliceInBounds"):
- return o.Annotations[Bounds]
- }
- return false
-}
-
-// The range produced by the compiler is 1-indexed, so subtract range by 1.
-func zeroIndexedRange(rng protocol.Range) protocol.Range {
- return protocol.Range{
- Start: protocol.Position{
- Line: rng.Start.Line - 1,
- Character: rng.Start.Character - 1,
- },
- End: protocol.Position{
- Line: rng.End.Line - 1,
- Character: rng.End.Character - 1,
- },
- }
-}
-
-func findJSONFiles(dir string) ([]string, error) {
- ans := []string{}
- f := func(path string, fi os.FileInfo, _ error) error {
- if fi.IsDir() {
- return nil
- }
- if strings.HasSuffix(path, ".json") {
- ans = append(ans, path)
- }
- return nil
- }
- err := filepath.Walk(dir, f)
- return ans, err
-}
diff --git a/internal/lsp/source/highlight.go b/internal/lsp/source/highlight.go
deleted file mode 100644
index 7cdb484a8..000000000
--- a/internal/lsp/source/highlight.go
+++ /dev/null
@@ -1,509 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- errors "golang.org/x/xerrors"
-)
-
-func Highlight(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) ([]protocol.Range, error) {
- ctx, done := event.Start(ctx, "source.Highlight")
- defer done()
-
- // Don't use GetParsedFile because it uses TypecheckWorkspace, and we
- // always want fully parsed files for highlight, regardless of whether
- // the file belongs to a workspace package.
- pkg, err := snapshot.PackageForFile(ctx, fh.URI(), TypecheckFull, WidestPackage)
- if err != nil {
- return nil, errors.Errorf("getting package for Highlight: %w", err)
- }
- pgf, err := pkg.File(fh.URI())
- if err != nil {
- return nil, errors.Errorf("getting file for Highlight: %w", err)
- }
-
- spn, err := pgf.Mapper.PointSpan(pos)
- if err != nil {
- return nil, err
- }
- rng, err := spn.Range(pgf.Mapper.Converter)
- if err != nil {
- return nil, err
- }
- path, _ := astutil.PathEnclosingInterval(pgf.File, rng.Start, rng.Start)
- if len(path) == 0 {
- return nil, fmt.Errorf("no enclosing position found for %v:%v", int(pos.Line), int(pos.Character))
- }
- // If start == end for astutil.PathEnclosingInterval, the 1-char interval
- // following start is used instead. As a result, we might not get an exact
- // match so we should check the 1-char interval to the left of the passed
- // in position to see if that is an exact match.
- if _, ok := path[0].(*ast.Ident); !ok {
- if p, _ := astutil.PathEnclosingInterval(pgf.File, rng.Start-1, rng.Start-1); p != nil {
- switch p[0].(type) {
- case *ast.Ident, *ast.SelectorExpr:
- path = p // use preceding ident/selector
- }
- }
- }
- result, err := highlightPath(pkg, path)
- if err != nil {
- return nil, err
- }
- var ranges []protocol.Range
- for rng := range result {
- mRng, err := posToMappedRange(snapshot, pkg, rng.start, rng.end)
- if err != nil {
- return nil, err
- }
- pRng, err := mRng.Range()
- if err != nil {
- return nil, err
- }
- ranges = append(ranges, pRng)
- }
- return ranges, nil
-}
-
-func highlightPath(pkg Package, path []ast.Node) (map[posRange]struct{}, error) {
- result := make(map[posRange]struct{})
- switch node := path[0].(type) {
- case *ast.BasicLit:
- if len(path) > 1 {
- if _, ok := path[1].(*ast.ImportSpec); ok {
- err := highlightImportUses(pkg, path, result)
- return result, err
- }
- }
- highlightFuncControlFlow(path, result)
- case *ast.ReturnStmt, *ast.FuncDecl, *ast.FuncType:
- highlightFuncControlFlow(path, result)
- case *ast.Ident:
- highlightIdentifiers(pkg, path, result)
- case *ast.ForStmt, *ast.RangeStmt:
- highlightLoopControlFlow(path, result)
- case *ast.SwitchStmt:
- highlightSwitchFlow(path, result)
- case *ast.BranchStmt:
- // BREAK can exit a loop, switch or select, while CONTINUE exit a loop so
- // these need to be handled separately. They can also be embedded in any
- // other loop/switch/select if they have a label. TODO: add support for
- // GOTO and FALLTHROUGH as well.
- if node.Label != nil {
- highlightLabeledFlow(node, result)
- } else {
- switch node.Tok {
- case token.BREAK:
- highlightUnlabeledBreakFlow(path, result)
- case token.CONTINUE:
- highlightLoopControlFlow(path, result)
- }
- }
- default:
- // If the cursor is in an unidentified area, return empty results.
- return nil, nil
- }
- return result, nil
-}
-
-type posRange struct {
- start, end token.Pos
-}
-
-func highlightFuncControlFlow(path []ast.Node, result map[posRange]struct{}) {
- var enclosingFunc ast.Node
- var returnStmt *ast.ReturnStmt
- var resultsList *ast.FieldList
- inReturnList := false
-
-Outer:
- // Reverse walk the path till we get to the func block.
- for i, n := range path {
- switch node := n.(type) {
- case *ast.KeyValueExpr:
- // If cursor is in a key: value expr, we don't want control flow highlighting
- return
- case *ast.CallExpr:
- // If cusor is an arg in a callExpr, we don't want control flow highlighting.
- if i > 0 {
- for _, arg := range node.Args {
- if arg == path[i-1] {
- return
- }
- }
- }
- case *ast.Field:
- inReturnList = true
- case *ast.FuncLit:
- enclosingFunc = n
- resultsList = node.Type.Results
- break Outer
- case *ast.FuncDecl:
- enclosingFunc = n
- resultsList = node.Type.Results
- break Outer
- case *ast.ReturnStmt:
- returnStmt = node
- // If the cursor is not directly in a *ast.ReturnStmt, then
- // we need to know if it is within one of the values that is being returned.
- inReturnList = inReturnList || path[0] != returnStmt
- }
- }
- // Cursor is not in a function.
- if enclosingFunc == nil {
- return
- }
- // If the cursor is on a "return" or "func" keyword, we should highlight all of the exit
- // points of the function, including the "return" and "func" keywords.
- highlightAllReturnsAndFunc := path[0] == returnStmt || path[0] == enclosingFunc
- switch path[0].(type) {
- case *ast.Ident, *ast.BasicLit:
- // Cursor is in an identifier and not in a return statement or in the results list.
- if returnStmt == nil && !inReturnList {
- return
- }
- case *ast.FuncType:
- highlightAllReturnsAndFunc = true
- }
- // The user's cursor may be within the return statement of a function,
- // or within the result section of a function's signature.
- // index := -1
- var nodes []ast.Node
- if returnStmt != nil {
- for _, n := range returnStmt.Results {
- nodes = append(nodes, n)
- }
- } else if resultsList != nil {
- for _, n := range resultsList.List {
- nodes = append(nodes, n)
- }
- }
- _, index := nodeAtPos(nodes, path[0].Pos())
-
- // Highlight the correct argument in the function declaration return types.
- if resultsList != nil && -1 < index && index < len(resultsList.List) {
- rng := posRange{
- start: resultsList.List[index].Pos(),
- end: resultsList.List[index].End(),
- }
- result[rng] = struct{}{}
- }
- // Add the "func" part of the func declaration.
- if highlightAllReturnsAndFunc {
- r := posRange{
- start: enclosingFunc.Pos(),
- end: enclosingFunc.Pos() + token.Pos(len("func")),
- }
- result[r] = struct{}{}
- }
- ast.Inspect(enclosingFunc, func(n ast.Node) bool {
- // Don't traverse any other functions.
- switch n.(type) {
- case *ast.FuncDecl, *ast.FuncLit:
- return enclosingFunc == n
- }
- ret, ok := n.(*ast.ReturnStmt)
- if !ok {
- return true
- }
- var toAdd ast.Node
- // Add the entire return statement, applies when highlight the word "return" or "func".
- if highlightAllReturnsAndFunc {
- toAdd = n
- }
- // Add the relevant field within the entire return statement.
- if -1 < index && index < len(ret.Results) {
- toAdd = ret.Results[index]
- }
- if toAdd != nil {
- result[posRange{start: toAdd.Pos(), end: toAdd.End()}] = struct{}{}
- }
- return false
- })
-}
-
-func highlightUnlabeledBreakFlow(path []ast.Node, result map[posRange]struct{}) {
- // Reverse walk the path until we find closest loop, select, or switch.
- for _, n := range path {
- switch n.(type) {
- case *ast.ForStmt, *ast.RangeStmt:
- highlightLoopControlFlow(path, result)
- return // only highlight the innermost statement
- case *ast.SwitchStmt:
- highlightSwitchFlow(path, result)
- return
- case *ast.SelectStmt:
- // TODO: add highlight when breaking a select.
- return
- }
- }
-}
-
-func highlightLabeledFlow(node *ast.BranchStmt, result map[posRange]struct{}) {
- obj := node.Label.Obj
- if obj == nil || obj.Decl == nil {
- return
- }
- label, ok := obj.Decl.(*ast.LabeledStmt)
- if !ok {
- return
- }
- switch label.Stmt.(type) {
- case *ast.ForStmt, *ast.RangeStmt:
- highlightLoopControlFlow([]ast.Node{label.Stmt, label}, result)
- case *ast.SwitchStmt:
- highlightSwitchFlow([]ast.Node{label.Stmt, label}, result)
- }
-}
-
-func labelFor(path []ast.Node) *ast.Ident {
- if len(path) > 1 {
- if n, ok := path[1].(*ast.LabeledStmt); ok {
- return n.Label
- }
- }
- return nil
-}
-
-func highlightLoopControlFlow(path []ast.Node, result map[posRange]struct{}) {
- var loop ast.Node
- var loopLabel *ast.Ident
- stmtLabel := labelFor(path)
-Outer:
- // Reverse walk the path till we get to the for loop.
- for i := range path {
- switch n := path[i].(type) {
- case *ast.ForStmt, *ast.RangeStmt:
- loopLabel = labelFor(path[i:])
-
- if stmtLabel == nil || loopLabel == stmtLabel {
- loop = n
- break Outer
- }
- }
- }
- if loop == nil {
- return
- }
-
- // Add the for statement.
- rng := posRange{
- start: loop.Pos(),
- end: loop.Pos() + token.Pos(len("for")),
- }
- result[rng] = struct{}{}
-
- // Traverse AST to find branch statements within the same for-loop.
- ast.Inspect(loop, func(n ast.Node) bool {
- switch n.(type) {
- case *ast.ForStmt, *ast.RangeStmt:
- return loop == n
- case *ast.SwitchStmt, *ast.SelectStmt:
- return false
- }
- b, ok := n.(*ast.BranchStmt)
- if !ok {
- return true
- }
- if b.Label == nil || labelDecl(b.Label) == loopLabel {
- result[posRange{start: b.Pos(), end: b.End()}] = struct{}{}
- }
- return true
- })
-
- // Find continue statements in the same loop or switches/selects.
- ast.Inspect(loop, func(n ast.Node) bool {
- switch n.(type) {
- case *ast.ForStmt, *ast.RangeStmt:
- return loop == n
- }
-
- if n, ok := n.(*ast.BranchStmt); ok && n.Tok == token.CONTINUE {
- result[posRange{start: n.Pos(), end: n.End()}] = struct{}{}
- }
- return true
- })
-
- // We don't need to check other for loops if we aren't looking for labeled statements.
- if loopLabel == nil {
- return
- }
-
- // Find labeled branch statements in any loop.
- ast.Inspect(loop, func(n ast.Node) bool {
- b, ok := n.(*ast.BranchStmt)
- if !ok {
- return true
- }
- // statement with labels that matches the loop
- if b.Label != nil && labelDecl(b.Label) == loopLabel {
- result[posRange{start: b.Pos(), end: b.End()}] = struct{}{}
- }
- return true
- })
-}
-
-func highlightSwitchFlow(path []ast.Node, result map[posRange]struct{}) {
- var switchNode ast.Node
- var switchNodeLabel *ast.Ident
- stmtLabel := labelFor(path)
-Outer:
- // Reverse walk the path till we get to the switch statement.
- for i := range path {
- switch n := path[i].(type) {
- case *ast.SwitchStmt:
- switchNodeLabel = labelFor(path[i:])
- if stmtLabel == nil || switchNodeLabel == stmtLabel {
- switchNode = n
- break Outer
- }
- }
- }
- // Cursor is not in a switch statement
- if switchNode == nil {
- return
- }
-
- // Add the switch statement.
- rng := posRange{
- start: switchNode.Pos(),
- end: switchNode.Pos() + token.Pos(len("switch")),
- }
- result[rng] = struct{}{}
-
- // Traverse AST to find break statements within the same switch.
- ast.Inspect(switchNode, func(n ast.Node) bool {
- switch n.(type) {
- case *ast.SwitchStmt:
- return switchNode == n
- case *ast.ForStmt, *ast.RangeStmt, *ast.SelectStmt:
- return false
- }
-
- b, ok := n.(*ast.BranchStmt)
- if !ok || b.Tok != token.BREAK {
- return true
- }
-
- if b.Label == nil || labelDecl(b.Label) == switchNodeLabel {
- result[posRange{start: b.Pos(), end: b.End()}] = struct{}{}
- }
- return true
- })
-
- // We don't need to check other switches if we aren't looking for labeled statements.
- if switchNodeLabel == nil {
- return
- }
-
- // Find labeled break statements in any switch
- ast.Inspect(switchNode, func(n ast.Node) bool {
- b, ok := n.(*ast.BranchStmt)
- if !ok || b.Tok != token.BREAK {
- return true
- }
-
- if b.Label != nil && labelDecl(b.Label) == switchNodeLabel {
- result[posRange{start: b.Pos(), end: b.End()}] = struct{}{}
- }
-
- return true
- })
-}
-
-func labelDecl(n *ast.Ident) *ast.Ident {
- if n == nil {
- return nil
- }
- if n.Obj == nil {
- return nil
- }
- if n.Obj.Decl == nil {
- return nil
- }
- stmt, ok := n.Obj.Decl.(*ast.LabeledStmt)
- if !ok {
- return nil
- }
- return stmt.Label
-}
-
-func highlightImportUses(pkg Package, path []ast.Node, result map[posRange]struct{}) error {
- basicLit, ok := path[0].(*ast.BasicLit)
- if !ok {
- return errors.Errorf("highlightImportUses called with an ast.Node of type %T", basicLit)
- }
- ast.Inspect(path[len(path)-1], func(node ast.Node) bool {
- if imp, ok := node.(*ast.ImportSpec); ok && imp.Path == basicLit {
- result[posRange{start: node.Pos(), end: node.End()}] = struct{}{}
- return false
- }
- n, ok := node.(*ast.Ident)
- if !ok {
- return true
- }
- obj, ok := pkg.GetTypesInfo().ObjectOf(n).(*types.PkgName)
- if !ok {
- return true
- }
- if !strings.Contains(basicLit.Value, obj.Name()) {
- return true
- }
- result[posRange{start: n.Pos(), end: n.End()}] = struct{}{}
- return false
- })
- return nil
-}
-
-func highlightIdentifiers(pkg Package, path []ast.Node, result map[posRange]struct{}) error {
- id, ok := path[0].(*ast.Ident)
- if !ok {
- return errors.Errorf("highlightIdentifiers called with an ast.Node of type %T", id)
- }
- // Check if ident is inside return or func decl.
- highlightFuncControlFlow(path, result)
-
- // TODO: maybe check if ident is a reserved word, if true then don't continue and return results.
-
- idObj := pkg.GetTypesInfo().ObjectOf(id)
- pkgObj, isImported := idObj.(*types.PkgName)
- ast.Inspect(path[len(path)-1], func(node ast.Node) bool {
- if imp, ok := node.(*ast.ImportSpec); ok && isImported {
- highlightImport(pkgObj, imp, result)
- }
- n, ok := node.(*ast.Ident)
- if !ok {
- return true
- }
- if n.Name != id.Name {
- return false
- }
- if nObj := pkg.GetTypesInfo().ObjectOf(n); nObj == idObj {
- result[posRange{start: n.Pos(), end: n.End()}] = struct{}{}
- }
- return false
- })
- return nil
-}
-
-func highlightImport(obj *types.PkgName, imp *ast.ImportSpec, result map[posRange]struct{}) {
- if imp.Name != nil || imp.Path == nil {
- return
- }
- if !strings.Contains(imp.Path.Value, obj.Name()) {
- return
- }
- result[posRange{start: imp.Path.Pos(), end: imp.Path.End()}] = struct{}{}
-}
diff --git a/internal/lsp/source/hover.go b/internal/lsp/source/hover.go
deleted file mode 100644
index b6fd9acf9..000000000
--- a/internal/lsp/source/hover.go
+++ /dev/null
@@ -1,870 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "go/ast"
- "go/constant"
- "go/doc"
- "go/format"
- "go/token"
- "go/types"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-
- "golang.org/x/text/unicode/runenames"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/typeparams"
- errors "golang.org/x/xerrors"
-)
-
-// HoverContext contains context extracted from the syntax and type information
-// of a given node, for use in various summaries (hover, autocomplete,
-// signature help).
-type HoverContext struct {
- // signatureSource is the object or node use to derive the hover signature.
- //
- // It may also hold a precomputed string.
- // TODO(rfindley): pre-compute all signatures to avoid this indirection.
- signatureSource interface{}
-
- // comment is the most relevant comment group associated with the hovered object.
- Comment *ast.CommentGroup
-}
-
-// HoverJSON contains information used by hover. It is also the JSON returned
-// for the "structured" hover format
-type HoverJSON struct {
- // Synopsis is a single sentence synopsis of the symbol's documentation.
- Synopsis string `json:"synopsis"`
-
- // FullDocumentation is the symbol's full documentation.
- FullDocumentation string `json:"fullDocumentation"`
-
- // Signature is the symbol's signature.
- Signature string `json:"signature"`
-
- // SingleLine is a single line describing the symbol.
- // This is recommended only for use in clients that show a single line for hover.
- SingleLine string `json:"singleLine"`
-
- // SymbolName is the types.Object.Name for the given symbol.
- SymbolName string `json:"symbolName"`
-
- // LinkPath is the pkg.go.dev link for the given symbol.
- // For example, the "go/ast" part of "pkg.go.dev/go/ast#Node".
- LinkPath string `json:"linkPath"`
-
- // LinkAnchor is the pkg.go.dev link anchor for the given symbol.
- // For example, the "Node" part of "pkg.go.dev/go/ast#Node".
- LinkAnchor string `json:"linkAnchor"`
-}
-
-func Hover(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.Hover, error) {
- ident, err := Identifier(ctx, snapshot, fh, position)
- if err != nil {
- if hover, innerErr := hoverRune(ctx, snapshot, fh, position); innerErr == nil {
- return hover, nil
- }
- return nil, nil
- }
- h, err := HoverIdentifier(ctx, ident)
- if err != nil {
- return nil, err
- }
- rng, err := ident.Range()
- if err != nil {
- return nil, err
- }
- hover, err := FormatHover(h, snapshot.View().Options())
- if err != nil {
- return nil, err
- }
- return &protocol.Hover{
- Contents: protocol.MarkupContent{
- Kind: snapshot.View().Options().PreferredContentFormat,
- Value: hover,
- },
- Range: rng,
- }, nil
-}
-
-func hoverRune(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (*protocol.Hover, error) {
- ctx, done := event.Start(ctx, "source.hoverRune")
- defer done()
-
- r, mrng, err := findRune(ctx, snapshot, fh, position)
- if err != nil {
- return nil, err
- }
- rng, err := mrng.Range()
- if err != nil {
- return nil, err
- }
-
- var desc string
- runeName := runenames.Name(r)
- if len(runeName) > 0 && runeName[0] == '<' {
- // Check if the rune looks like an HTML tag. If so, trim the surrounding <>
- // characters to work around https://github.com/microsoft/vscode/issues/124042.
- runeName = strings.TrimRight(runeName[1:], ">")
- }
- if strconv.IsPrint(r) {
- desc = fmt.Sprintf("'%s', U+%04X, %s", string(r), uint32(r), runeName)
- } else {
- desc = fmt.Sprintf("U+%04X, %s", uint32(r), runeName)
- }
- return &protocol.Hover{
- Contents: protocol.MarkupContent{
- Kind: snapshot.View().Options().PreferredContentFormat,
- Value: desc,
- },
- Range: rng,
- }, nil
-}
-
-// ErrNoRuneFound is the error returned when no rune is found at a particular position.
-var ErrNoRuneFound = errors.New("no rune found")
-
-// findRune returns rune information for a position in a file.
-func findRune(ctx context.Context, snapshot Snapshot, fh FileHandle, position protocol.Position) (rune, MappedRange, error) {
- pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return 0, MappedRange{}, err
- }
- spn, err := pgf.Mapper.PointSpan(position)
- if err != nil {
- return 0, MappedRange{}, err
- }
- rng, err := spn.Range(pgf.Mapper.Converter)
- if err != nil {
- return 0, MappedRange{}, err
- }
- pos := rng.Start
-
- // Find the basic literal enclosing the given position, if there is one.
- var lit *ast.BasicLit
- var found bool
- ast.Inspect(pgf.File, func(n ast.Node) bool {
- if found {
- return false
- }
- if n, ok := n.(*ast.BasicLit); ok && pos >= n.Pos() && pos <= n.End() {
- lit = n
- found = true
- }
- return !found
- })
- if !found {
- return 0, MappedRange{}, ErrNoRuneFound
- }
-
- var r rune
- var start, end token.Pos
- switch lit.Kind {
- case token.CHAR:
- s, err := strconv.Unquote(lit.Value)
- if err != nil {
- // If the conversion fails, it's because of an invalid syntax, therefore
- // there is no rune to be found.
- return 0, MappedRange{}, ErrNoRuneFound
- }
- r, _ = utf8.DecodeRuneInString(s)
- if r == utf8.RuneError {
- return 0, MappedRange{}, fmt.Errorf("rune error")
- }
- start, end = lit.Pos(), lit.End()
- case token.INT:
- // It's an integer, scan only if it is a hex litteral whose bitsize in
- // ranging from 8 to 32.
- if !(strings.HasPrefix(lit.Value, "0x") && len(lit.Value[2:]) >= 2 && len(lit.Value[2:]) <= 8) {
- return 0, MappedRange{}, ErrNoRuneFound
- }
- v, err := strconv.ParseUint(lit.Value[2:], 16, 32)
- if err != nil {
- return 0, MappedRange{}, err
- }
- r = rune(v)
- if r == utf8.RuneError {
- return 0, MappedRange{}, fmt.Errorf("rune error")
- }
- start, end = lit.Pos(), lit.End()
- case token.STRING:
- // It's a string, scan only if it contains a unicode escape sequence under or before the
- // current cursor position.
- var found bool
- litOffset, err := Offset(pgf.Tok, lit.Pos())
- if err != nil {
- return 0, MappedRange{}, err
- }
- offset, err := Offset(pgf.Tok, pos)
- if err != nil {
- return 0, MappedRange{}, err
- }
- for i := offset - litOffset; i > 0; i-- {
- // Start at the cursor position and search backward for the beginning of a rune escape sequence.
- rr, _ := utf8.DecodeRuneInString(lit.Value[i:])
- if rr == utf8.RuneError {
- return 0, MappedRange{}, fmt.Errorf("rune error")
- }
- if rr == '\\' {
- // Got the beginning, decode it.
- var tail string
- r, _, tail, err = strconv.UnquoteChar(lit.Value[i:], '"')
- if err != nil {
- // If the conversion fails, it's because of an invalid syntax, therefore is no rune to be found.
- return 0, MappedRange{}, ErrNoRuneFound
- }
- // Only the rune escape sequence part of the string has to be highlighted, recompute the range.
- runeLen := len(lit.Value) - (int(i) + len(tail))
- start = token.Pos(int(lit.Pos()) + int(i))
- end = token.Pos(int(start) + runeLen)
- found = true
- break
- }
- }
- if !found {
- // No escape sequence found
- return 0, MappedRange{}, ErrNoRuneFound
- }
- default:
- return 0, MappedRange{}, ErrNoRuneFound
- }
-
- mappedRange, err := posToMappedRange(snapshot, pkg, start, end)
- if err != nil {
- return 0, MappedRange{}, err
- }
- return r, mappedRange, nil
-}
-
-func HoverIdentifier(ctx context.Context, i *IdentifierInfo) (*HoverJSON, error) {
- ctx, done := event.Start(ctx, "source.Hover")
- defer done()
-
- hoverCtx, err := FindHoverContext(ctx, i.Snapshot, i.pkg, i.Declaration.obj, i.Declaration.node, i.Declaration.fullDecl)
- if err != nil {
- return nil, err
- }
-
- h := &HoverJSON{
- FullDocumentation: hoverCtx.Comment.Text(),
- Synopsis: doc.Synopsis(hoverCtx.Comment.Text()),
- }
-
- fset := i.Snapshot.FileSet()
- // Determine the symbol's signature.
- switch x := hoverCtx.signatureSource.(type) {
- case string:
- h.Signature = x // a pre-computed signature
-
- case *ast.TypeSpec:
- x2 := *x
- // Don't duplicate comments when formatting type specs.
- x2.Doc = nil
- x2.Comment = nil
- var b strings.Builder
- b.WriteString("type ")
- if err := format.Node(&b, fset, &x2); err != nil {
- return nil, err
- }
- h.Signature = b.String()
-
- case ast.Node:
- var b strings.Builder
- if err := format.Node(&b, fset, x); err != nil {
- return nil, err
- }
- h.Signature = b.String()
-
- // Check if the variable is an integer whose value we can present in a more
- // user-friendly way, i.e. `var hex = 0xe34e` becomes `var hex = 58190`
- if spec, ok := x.(*ast.ValueSpec); ok && len(spec.Values) > 0 {
- if lit, ok := spec.Values[0].(*ast.BasicLit); ok && len(spec.Names) > 0 {
- val := constant.MakeFromLiteral(types.ExprString(lit), lit.Kind, 0)
- h.Signature = fmt.Sprintf("var %s = %s", spec.Names[0], val)
- }
- }
-
- case types.Object:
- // If the variable is implicitly declared in a type switch, we need to
- // manually generate its object string.
- if typ := i.Declaration.typeSwitchImplicit; typ != nil {
- if v, ok := x.(*types.Var); ok {
- h.Signature = fmt.Sprintf("var %s %s", v.Name(), types.TypeString(typ, i.qf))
- break
- }
- }
- h.Signature = objectString(x, i.qf, i.Inferred)
- }
- if obj := i.Declaration.obj; obj != nil {
- h.SingleLine = objectString(obj, i.qf, nil)
- }
- obj := i.Declaration.obj
- if obj == nil {
- return h, nil
- }
-
- // Check if the identifier is test-only (and is therefore not part of a
- // package's API). This is true if the request originated in a test package,
- // and if the declaration is also found in the same test package.
- if i.pkg != nil && obj.Pkg() != nil && i.pkg.ForTest() != "" {
- if _, err := i.pkg.File(i.Declaration.MappedRange[0].URI()); err == nil {
- return h, nil
- }
- }
-
- h.SymbolName, h.LinkPath, h.LinkAnchor = linkData(obj, i.enclosing)
-
- // See golang/go#36998: don't link to modules matching GOPRIVATE.
- //
- // The path returned by linkData is an import path.
- if i.Snapshot.View().IsGoPrivatePath(h.LinkPath) {
- h.LinkPath = ""
- } else if mod, version, ok := moduleAtVersion(h.LinkPath, i); ok {
- h.LinkPath = strings.Replace(h.LinkPath, mod, mod+"@"+version, 1)
- }
-
- return h, nil
-}
-
-// linkData returns the name, import path, and anchor to use in building links
-// to obj.
-//
-// If obj is not visible in documentation, the returned name will be empty.
-func linkData(obj types.Object, enclosing *types.TypeName) (name, importPath, anchor string) {
- // Package names simply link to the package.
- if obj, ok := obj.(*types.PkgName); ok {
- return obj.Name(), obj.Imported().Path(), ""
- }
-
- // Builtins link to the special builtin package.
- if obj.Parent() == types.Universe {
- return obj.Name(), "builtin", obj.Name()
- }
-
- // In all other cases, the object must be exported.
- if !obj.Exported() {
- return "", "", ""
- }
-
- var recv types.Object // If non-nil, the field or method receiver base.
-
- switch obj := obj.(type) {
- case *types.Var:
- // If the object is a field, and we have an associated selector
- // composite literal, or struct, we can determine the link.
- if obj.IsField() && enclosing != nil {
- recv = enclosing
- }
- case *types.Func:
- typ, ok := obj.Type().(*types.Signature)
- if !ok {
- // Note: this should never happen. go/types guarantees that the type of
- // *Funcs are Signatures.
- //
- // TODO(rfindley): given a 'debug' mode, we should panic here.
- return "", "", ""
- }
- if r := typ.Recv(); r != nil {
- if rtyp, _ := Deref(r.Type()).(*types.Named); rtyp != nil {
- // If we have an unexported type, see if the enclosing type is
- // exported (we may have an interface or struct we can link
- // to). If not, don't show any link.
- if !rtyp.Obj().Exported() {
- if enclosing != nil {
- recv = enclosing
- } else {
- return "", "", ""
- }
- } else {
- recv = rtyp.Obj()
- }
- }
- }
- }
-
- if recv != nil && !recv.Exported() {
- return "", "", ""
- }
-
- // Either the object or its receiver must be in the package scope.
- scopeObj := obj
- if recv != nil {
- scopeObj = recv
- }
- if scopeObj.Pkg() == nil || scopeObj.Pkg().Scope().Lookup(scopeObj.Name()) != scopeObj {
- return "", "", ""
- }
-
- importPath = obj.Pkg().Path()
- if recv != nil {
- anchor = fmt.Sprintf("%s.%s", recv.Name(), obj.Name())
- name = fmt.Sprintf("(%s.%s).%s", obj.Pkg().Name(), recv.Name(), obj.Name())
- } else {
- // For most cases, the link is "package/path#symbol".
- anchor = obj.Name()
- name = fmt.Sprintf("%s.%s", obj.Pkg().Name(), obj.Name())
- }
- return name, importPath, anchor
-}
-
-func moduleAtVersion(path string, i *IdentifierInfo) (string, string, bool) {
- // TODO(rfindley): moduleAtVersion should not be responsible for deciding
- // whether or not the link target supports module version links.
- if strings.ToLower(i.Snapshot.View().Options().LinkTarget) != "pkg.go.dev" {
- return "", "", false
- }
- impPkg, err := i.pkg.GetImport(path)
- if err != nil {
- return "", "", false
- }
- if impPkg.Version() == nil {
- return "", "", false
- }
- version, modpath := impPkg.Version().Version, impPkg.Version().Path
- if modpath == "" || version == "" {
- return "", "", false
- }
- return modpath, version, true
-}
-
-// objectString is a wrapper around the types.ObjectString function.
-// It handles adding more information to the object string.
-func objectString(obj types.Object, qf types.Qualifier, inferred *types.Signature) string {
- // If the signature type was inferred, prefer the preferred signature with a
- // comment showing the generic signature.
- if sig, _ := obj.Type().(*types.Signature); sig != nil && typeparams.ForSignature(sig).Len() > 0 && inferred != nil {
- obj2 := types.NewFunc(obj.Pos(), obj.Pkg(), obj.Name(), inferred)
- str := types.ObjectString(obj2, qf)
- // Try to avoid overly long lines.
- if len(str) > 60 {
- str += "\n"
- } else {
- str += " "
- }
- str += "// " + types.TypeString(sig, qf)
- return str
- }
- str := types.ObjectString(obj, qf)
- switch obj := obj.(type) {
- case *types.Const:
- str = fmt.Sprintf("%s = %s", str, obj.Val())
-
- // Try to add a formatted duration as an inline comment
- typ, ok := obj.Type().(*types.Named)
- if !ok {
- break
- }
- pkg := typ.Obj().Pkg()
- if pkg.Path() == "time" && typ.Obj().Name() == "Duration" {
- if d, ok := constant.Int64Val(obj.Val()); ok {
- str += " // " + time.Duration(d).String()
- }
- }
- }
- return str
-}
-
-// FindHoverContext returns a HoverContext struct for an AST node and its
-// declaration object. node should be the actual node used in type checking,
-// while fullNode could be a separate node with more complete syntactic
-// information.
-func FindHoverContext(ctx context.Context, s Snapshot, pkg Package, obj types.Object, pkgNode ast.Node, fullDecl ast.Decl) (*HoverContext, error) {
- var info *HoverContext
-
- // Type parameters get their signature from their declaration object.
- if _, isTypeName := obj.(*types.TypeName); isTypeName {
- if _, isTypeParam := obj.Type().(*typeparams.TypeParam); isTypeParam {
- return &HoverContext{signatureSource: obj}, nil
- }
- }
-
- // This is problematic for a number of reasons. We really need to have a more
- // general mechanism to validate the coherency of AST with type information,
- // but absent that we must do our best to ensure that we don't use fullNode
- // when we actually need the node that was type checked.
- //
- // pkgNode may be nil, if it was eliminated from the type-checked syntax. In
- // that case, use fullDecl if available.
- node := pkgNode
- if node == nil && fullDecl != nil {
- node = fullDecl
- }
-
- switch node := node.(type) {
- case *ast.Ident:
- // The package declaration.
- for _, f := range pkg.GetSyntax() {
- if f.Name == pkgNode {
- info = &HoverContext{Comment: f.Doc}
- }
- }
- case *ast.ImportSpec:
- // Try to find the package documentation for an imported package.
- if pkgName, ok := obj.(*types.PkgName); ok {
- imp, err := pkg.GetImport(pkgName.Imported().Path())
- if err != nil {
- return nil, err
- }
- // Assume that only one file will contain package documentation,
- // so pick the first file that has a doc comment.
- for _, file := range imp.GetSyntax() {
- if file.Doc != nil {
- info = &HoverContext{signatureSource: obj, Comment: file.Doc}
- break
- }
- }
- }
- info = &HoverContext{signatureSource: node}
- case *ast.GenDecl:
- switch obj := obj.(type) {
- case *types.TypeName, *types.Var, *types.Const, *types.Func:
- // Always use the full declaration here if we have it, because the
- // dependent code doesn't rely on pointer identity. This is fragile.
- if d, _ := fullDecl.(*ast.GenDecl); d != nil {
- node = d
- }
- // obj may not have been produced by type checking the AST containing
- // node, so we need to be careful about using token.Pos.
- tok := s.FileSet().File(obj.Pos())
- offset, err := Offset(tok, obj.Pos())
- if err != nil {
- return nil, err
- }
-
- // fullTok and fullPos are the *token.File and object position in for the
- // full AST.
- fullTok := s.FileSet().File(node.Pos())
- fullPos, err := Pos(fullTok, offset)
- if err != nil {
- return nil, err
- }
-
- var spec ast.Spec
- for _, s := range node.Specs {
- // Avoid panics by guarding the calls to token.Offset (golang/go#48249).
- start, err := Offset(fullTok, s.Pos())
- if err != nil {
- return nil, err
- }
- end, err := Offset(fullTok, s.End())
- if err != nil {
- return nil, err
- }
- if start <= offset && offset <= end {
- spec = s
- break
- }
- }
-
- info, err = hoverGenDecl(node, spec, fullPos, obj)
- if err != nil {
- return nil, err
- }
- }
- case *ast.TypeSpec:
- if obj.Parent() == types.Universe {
- if genDecl, ok := fullDecl.(*ast.GenDecl); ok {
- info = hoverTypeSpec(node, genDecl)
- }
- }
- case *ast.FuncDecl:
- switch obj.(type) {
- case *types.Func:
- info = &HoverContext{signatureSource: obj, Comment: node.Doc}
- case *types.Builtin:
- info = &HoverContext{Comment: node.Doc}
- if sig, err := NewBuiltinSignature(ctx, s, obj.Name()); err == nil {
- info.signatureSource = "func " + sig.name + sig.Format()
- } else {
- // Fall back on the object as a signature source.
-
- // TODO(rfindley): refactor so that we can report bugs from the source
- // package.
-
- // debug.Bug(ctx, "invalid builtin hover", "did not find builtin signature: %v", err)
- info.signatureSource = obj
- }
- case *types.Var:
- // Object is a function param or the field of an anonymous struct
- // declared with ':='. Skip the first one because only fields
- // can have docs.
- if isFunctionParam(obj, node) {
- break
- }
-
- field, err := s.PosToField(ctx, pkg, obj.Pos())
- if err != nil {
- return nil, err
- }
-
- if field != nil {
- comment := field.Doc
- if comment.Text() == "" {
- comment = field.Comment
- }
- info = &HoverContext{signatureSource: obj, Comment: comment}
- }
- }
- }
-
- if info == nil {
- info = &HoverContext{signatureSource: obj}
- }
-
- return info, nil
-}
-
-// isFunctionParam returns true if the passed object is either an incoming
-// or an outgoing function param
-func isFunctionParam(obj types.Object, node *ast.FuncDecl) bool {
- for _, f := range node.Type.Params.List {
- if f.Pos() == obj.Pos() {
- return true
- }
- }
- if node.Type.Results != nil {
- for _, f := range node.Type.Results.List {
- if f.Pos() == obj.Pos() {
- return true
- }
- }
- }
- return false
-}
-
-// hoverGenDecl returns hover information an object declared via spec inside
-// of the GenDecl node. obj is the type-checked object corresponding to the
-// declaration, but may have been type-checked using a different AST than the
-// given nodes; fullPos is the position of obj in node's AST.
-func hoverGenDecl(node *ast.GenDecl, spec ast.Spec, fullPos token.Pos, obj types.Object) (*HoverContext, error) {
- if spec == nil {
- return nil, errors.Errorf("no spec for node %v at position %v", node, fullPos)
- }
-
- // If we have a field or method.
- switch obj.(type) {
- case *types.Var, *types.Const, *types.Func:
- return hoverVar(spec, fullPos, obj, node), nil
- }
- // Handle types.
- switch spec := spec.(type) {
- case *ast.TypeSpec:
- return hoverTypeSpec(spec, node), nil
- case *ast.ValueSpec:
- return &HoverContext{signatureSource: spec, Comment: spec.Doc}, nil
- case *ast.ImportSpec:
- return &HoverContext{signatureSource: spec, Comment: spec.Doc}, nil
- }
- return nil, errors.Errorf("unable to format spec %v (%T)", spec, spec)
-}
-
-// TODO(rfindley): rename this function.
-func hoverTypeSpec(spec *ast.TypeSpec, decl *ast.GenDecl) *HoverContext {
- comment := spec.Doc
- if comment == nil && decl != nil {
- comment = decl.Doc
- }
- if comment == nil {
- comment = spec.Comment
- }
- return &HoverContext{
- signatureSource: spec,
- Comment: comment,
- }
-}
-
-func hoverVar(node ast.Spec, fullPos token.Pos, obj types.Object, decl *ast.GenDecl) *HoverContext {
- var fieldList *ast.FieldList
- switch spec := node.(type) {
- case *ast.TypeSpec:
- switch t := spec.Type.(type) {
- case *ast.StructType:
- fieldList = t.Fields
- case *ast.InterfaceType:
- fieldList = t.Methods
- }
- case *ast.ValueSpec:
- // Try to extract the field list of an anonymous struct
- if fieldList = extractFieldList(spec.Type); fieldList != nil {
- break
- }
-
- comment := spec.Doc
- if comment == nil {
- comment = decl.Doc
- }
- if comment == nil {
- comment = spec.Comment
- }
-
- // We need the AST nodes for variable declarations of basic literals with
- // associated values so that we can augment their hover with more information.
- if _, ok := obj.(*types.Var); ok && spec.Type == nil && len(spec.Values) > 0 {
- if _, ok := spec.Values[0].(*ast.BasicLit); ok {
- return &HoverContext{signatureSource: spec, Comment: comment}
- }
- }
-
- return &HoverContext{signatureSource: obj, Comment: comment}
- }
-
- if fieldList != nil {
- comment := findFieldComment(fullPos, fieldList)
- return &HoverContext{signatureSource: obj, Comment: comment}
- }
- return &HoverContext{signatureSource: obj, Comment: decl.Doc}
-}
-
-// extractFieldList recursively tries to extract a field list.
-// If it is not found, nil is returned.
-func extractFieldList(specType ast.Expr) *ast.FieldList {
- switch t := specType.(type) {
- case *ast.StructType:
- return t.Fields
- case *ast.InterfaceType:
- return t.Methods
- case *ast.ArrayType:
- return extractFieldList(t.Elt)
- case *ast.MapType:
- // Map value has a greater chance to be a struct
- if fields := extractFieldList(t.Value); fields != nil {
- return fields
- }
- return extractFieldList(t.Key)
- case *ast.ChanType:
- return extractFieldList(t.Value)
- }
- return nil
-}
-
-// findFieldComment visits all fields in depth-first order and returns
-// the comment of a field with passed position. If no comment is found,
-// nil is returned.
-func findFieldComment(pos token.Pos, fieldList *ast.FieldList) *ast.CommentGroup {
- for _, field := range fieldList.List {
- if field.Pos() == pos {
- if field.Doc.Text() != "" {
- return field.Doc
- }
- return field.Comment
- }
-
- if nestedFieldList := extractFieldList(field.Type); nestedFieldList != nil {
- if c := findFieldComment(pos, nestedFieldList); c != nil {
- return c
- }
- }
- }
- return nil
-}
-
-func FormatHover(h *HoverJSON, options *Options) (string, error) {
- signature := formatSignature(h, options)
-
- switch options.HoverKind {
- case SingleLine:
- return h.SingleLine, nil
- case NoDocumentation:
- return signature, nil
- case Structured:
- b, err := json.Marshal(h)
- if err != nil {
- return "", err
- }
- return string(b), nil
- }
-
- link := formatLink(h, options)
- doc := formatDoc(h, options)
-
- var b strings.Builder
- parts := []string{signature, doc, link}
- for i, el := range parts {
- if el != "" {
- b.WriteString(el)
-
- // Don't write out final newline.
- if i == len(parts) {
- continue
- }
- // If any elements of the remainder of the list are non-empty,
- // write a newline.
- if anyNonEmpty(parts[i+1:]) {
- if options.PreferredContentFormat == protocol.Markdown {
- b.WriteString("\n\n")
- } else {
- b.WriteRune('\n')
- }
- }
- }
- }
- return b.String(), nil
-}
-
-func formatSignature(h *HoverJSON, options *Options) string {
- signature := h.Signature
- if signature != "" && options.PreferredContentFormat == protocol.Markdown {
- signature = fmt.Sprintf("```go\n%s\n```", signature)
- }
- return signature
-}
-
-func formatLink(h *HoverJSON, options *Options) string {
- if !options.LinksInHover || options.LinkTarget == "" || h.LinkPath == "" {
- return ""
- }
- plainLink := BuildLink(options.LinkTarget, h.LinkPath, h.LinkAnchor)
- switch options.PreferredContentFormat {
- case protocol.Markdown:
- return fmt.Sprintf("[`%s` on %s](%s)", h.SymbolName, options.LinkTarget, plainLink)
- case protocol.PlainText:
- return ""
- default:
- return plainLink
- }
-}
-
-// BuildLink constructs a link with the given target, path, and anchor.
-func BuildLink(target, path, anchor string) string {
- link := fmt.Sprintf("https://%s/%s", target, path)
- if target == "pkg.go.dev" {
- link += "?utm_source=gopls"
- }
- if anchor == "" {
- return link
- }
- return link + "#" + anchor
-}
-
-func formatDoc(h *HoverJSON, options *Options) string {
- var doc string
- switch options.HoverKind {
- case SynopsisDocumentation:
- doc = h.Synopsis
- case FullDocumentation:
- doc = h.FullDocumentation
- }
- if options.PreferredContentFormat == protocol.Markdown {
- return CommentToMarkdown(doc)
- }
- return doc
-}
-
-func anyNonEmpty(x []string) bool {
- for _, el := range x {
- if el != "" {
- return true
- }
- }
- return false
-}
diff --git a/internal/lsp/source/identifier.go b/internal/lsp/source/identifier.go
deleted file mode 100644
index bf4941f18..000000000
--- a/internal/lsp/source/identifier.go
+++ /dev/null
@@ -1,576 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/parser"
- "go/token"
- "go/types"
- "sort"
- "strconv"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/typeparams"
- errors "golang.org/x/xerrors"
-)
-
-// IdentifierInfo holds information about an identifier in Go source.
-type IdentifierInfo struct {
- Name string
- Snapshot Snapshot
- MappedRange
-
- Type struct {
- MappedRange
- Object types.Object
- }
-
- Inferred *types.Signature
-
- Declaration Declaration
-
- ident *ast.Ident
-
- // For struct fields or embedded interfaces, enclosing is the object
- // corresponding to the outer type declaration, if it is exported, for use in
- // documentation links.
- enclosing *types.TypeName
-
- pkg Package
- qf types.Qualifier
-}
-
-func (i *IdentifierInfo) IsImport() bool {
- _, ok := i.Declaration.node.(*ast.ImportSpec)
- return ok
-}
-
-type Declaration struct {
- MappedRange []MappedRange
-
- // The typechecked node.
- node ast.Node
-
- // Optional: the fully parsed node, to be used for formatting in cases where
- // node has missing information. This could be the case when node was parsed
- // in ParseExported mode.
- fullDecl ast.Decl
-
- // The typechecked object.
- obj types.Object
-
- // typeSwitchImplicit indicates that the declaration is in an implicit
- // type switch. Its type is the type of the variable on the right-hand
- // side of the type switch.
- typeSwitchImplicit types.Type
-}
-
-// Identifier returns identifier information for a position
-// in a file, accounting for a potentially incomplete selector.
-func Identifier(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) (*IdentifierInfo, error) {
- ctx, done := event.Start(ctx, "source.Identifier")
- defer done()
-
- pkgs, err := snapshot.PackagesForFile(ctx, fh.URI(), TypecheckAll, false)
- if err != nil {
- return nil, err
- }
- if len(pkgs) == 0 {
- return nil, fmt.Errorf("no packages for file %v", fh.URI())
- }
- sort.Slice(pkgs, func(i, j int) bool {
- // Prefer packages with a more complete parse mode.
- if pkgs[i].ParseMode() != pkgs[j].ParseMode() {
- return pkgs[i].ParseMode() > pkgs[j].ParseMode()
- }
- return len(pkgs[i].CompiledGoFiles()) < len(pkgs[j].CompiledGoFiles())
- })
- var findErr error
- for _, pkg := range pkgs {
- pgf, err := pkg.File(fh.URI())
- if err != nil {
- return nil, err
- }
- spn, err := pgf.Mapper.PointSpan(pos)
- if err != nil {
- return nil, err
- }
- rng, err := spn.Range(pgf.Mapper.Converter)
- if err != nil {
- return nil, err
- }
- var ident *IdentifierInfo
- ident, findErr = findIdentifier(ctx, snapshot, pkg, pgf, rng.Start)
- if findErr == nil {
- return ident, nil
- }
- }
- return nil, findErr
-}
-
-// ErrNoIdentFound is error returned when no identifer is found at a particular position
-var ErrNoIdentFound = errors.New("no identifier found")
-
-func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *ParsedGoFile, pos token.Pos) (*IdentifierInfo, error) {
- file := pgf.File
- // Handle import specs separately, as there is no formal position for a
- // package declaration.
- if result, err := importSpec(snapshot, pkg, file, pos); result != nil || err != nil {
- return result, err
- }
- path := pathEnclosingObjNode(file, pos)
- if path == nil {
- return nil, ErrNoIdentFound
- }
-
- qf := Qualifier(file, pkg.GetTypes(), pkg.GetTypesInfo())
-
- ident, _ := path[0].(*ast.Ident)
- if ident == nil {
- return nil, ErrNoIdentFound
- }
- // Special case for package declarations, since they have no
- // corresponding types.Object.
- if ident == file.Name {
- rng, err := posToMappedRange(snapshot, pkg, file.Name.Pos(), file.Name.End())
- if err != nil {
- return nil, err
- }
- var declAST *ast.File
- for _, pgf := range pkg.CompiledGoFiles() {
- if pgf.File.Doc != nil {
- declAST = pgf.File
- }
- }
- // If there's no package documentation, just use current file.
- if declAST == nil {
- declAST = file
- }
- declRng, err := posToMappedRange(snapshot, pkg, declAST.Name.Pos(), declAST.Name.End())
- if err != nil {
- return nil, err
- }
- return &IdentifierInfo{
- Name: file.Name.Name,
- ident: file.Name,
- MappedRange: rng,
- pkg: pkg,
- qf: qf,
- Snapshot: snapshot,
- Declaration: Declaration{
- node: declAST.Name,
- MappedRange: []MappedRange{declRng},
- },
- }, nil
- }
-
- result := &IdentifierInfo{
- Snapshot: snapshot,
- qf: qf,
- pkg: pkg,
- ident: ident,
- enclosing: searchForEnclosing(pkg.GetTypesInfo(), path),
- }
-
- result.Name = result.ident.Name
- var err error
- if result.MappedRange, err = posToMappedRange(snapshot, pkg, result.ident.Pos(), result.ident.End()); err != nil {
- return nil, err
- }
-
- result.Declaration.obj = pkg.GetTypesInfo().ObjectOf(result.ident)
- if result.Declaration.obj == nil {
- // If there was no types.Object for the declaration, there might be an
- // implicit local variable declaration in a type switch.
- if objs, typ := typeSwitchImplicits(pkg, path); len(objs) > 0 {
- // There is no types.Object for the declaration of an implicit local variable,
- // but all of the types.Objects associated with the usages of this variable can be
- // used to connect it back to the declaration.
- // Preserve the first of these objects and treat it as if it were the declaring object.
- result.Declaration.obj = objs[0]
- result.Declaration.typeSwitchImplicit = typ
- } else {
- // Probably a type error.
- return nil, errors.Errorf("%w for ident %v", errNoObjectFound, result.Name)
- }
- }
-
- // Handle builtins separately.
- if result.Declaration.obj.Parent() == types.Universe {
- builtin, err := snapshot.BuiltinFile(ctx)
- if err != nil {
- return nil, err
- }
- builtinObj := builtin.File.Scope.Lookup(result.Name)
- if builtinObj == nil {
- return nil, fmt.Errorf("no builtin object for %s", result.Name)
- }
- decl, ok := builtinObj.Decl.(ast.Node)
- if !ok {
- return nil, errors.Errorf("no declaration for %s", result.Name)
- }
- result.Declaration.node = decl
- if typeSpec, ok := decl.(*ast.TypeSpec); ok {
- // Find the GenDecl (which has the doc comments) for the TypeSpec.
- result.Declaration.fullDecl = findGenDecl(builtin.File, typeSpec)
- }
-
- // The builtin package isn't in the dependency graph, so the usual
- // utilities won't work here.
- rng := NewMappedRange(snapshot.FileSet(), builtin.Mapper, decl.Pos(), decl.Pos()+token.Pos(len(result.Name)))
- result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng)
- return result, nil
- }
-
- // (error).Error is a special case of builtin. Lots of checks to confirm
- // that this is the builtin Error.
- if obj := result.Declaration.obj; obj.Parent() == nil && obj.Pkg() == nil && obj.Name() == "Error" {
- if _, ok := obj.Type().(*types.Signature); ok {
- builtin, err := snapshot.BuiltinFile(ctx)
- if err != nil {
- return nil, err
- }
- // Look up "error" and then navigate to its only method.
- // The Error method does not appear in the builtin package's scope.log.Pri
- const errorName = "error"
- builtinObj := builtin.File.Scope.Lookup(errorName)
- if builtinObj == nil {
- return nil, fmt.Errorf("no builtin object for %s", errorName)
- }
- decl, ok := builtinObj.Decl.(ast.Node)
- if !ok {
- return nil, errors.Errorf("no declaration for %s", errorName)
- }
- spec, ok := decl.(*ast.TypeSpec)
- if !ok {
- return nil, fmt.Errorf("no type spec for %s", errorName)
- }
- iface, ok := spec.Type.(*ast.InterfaceType)
- if !ok {
- return nil, fmt.Errorf("%s is not an interface", errorName)
- }
- if iface.Methods.NumFields() != 1 {
- return nil, fmt.Errorf("expected 1 method for %s, got %v", errorName, iface.Methods.NumFields())
- }
- method := iface.Methods.List[0]
- if len(method.Names) != 1 {
- return nil, fmt.Errorf("expected 1 name for %v, got %v", method, len(method.Names))
- }
- name := method.Names[0].Name
- result.Declaration.node = method
- rng := NewMappedRange(snapshot.FileSet(), builtin.Mapper, method.Pos(), method.Pos()+token.Pos(len(name)))
- result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng)
- return result, nil
- }
- }
-
- // If the original position was an embedded field, we want to jump
- // to the field's type definition, not the field's definition.
- if v, ok := result.Declaration.obj.(*types.Var); ok && v.Embedded() {
- // types.Info.Uses contains the embedded field's *types.TypeName.
- if typeName := pkg.GetTypesInfo().Uses[ident]; typeName != nil {
- result.Declaration.obj = typeName
- }
- }
-
- rng, err := objToMappedRange(snapshot, pkg, result.Declaration.obj)
- if err != nil {
- return nil, err
- }
- result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng)
-
- declPkg, err := FindPackageFromPos(ctx, snapshot, result.Declaration.obj.Pos())
- if err != nil {
- return nil, err
- }
- if result.Declaration.node, err = snapshot.PosToDecl(ctx, declPkg, result.Declaration.obj.Pos()); err != nil {
- return nil, err
- }
- // Ensure that we have the full declaration, in case the declaration was
- // parsed in ParseExported and therefore could be missing information.
- if result.Declaration.fullDecl, err = fullNode(snapshot, result.Declaration.obj, declPkg); err != nil {
- return nil, err
- }
- typ := pkg.GetTypesInfo().TypeOf(result.ident)
- if typ == nil {
- return result, nil
- }
-
- result.Inferred = inferredSignature(pkg.GetTypesInfo(), ident)
-
- result.Type.Object = typeToObject(typ)
- if result.Type.Object != nil {
- // Identifiers with the type "error" are a special case with no position.
- if hasErrorType(result.Type.Object) {
- return result, nil
- }
- if result.Type.MappedRange, err = objToMappedRange(snapshot, pkg, result.Type.Object); err != nil {
- return nil, err
- }
- }
- return result, nil
-}
-
-// findGenDecl determines the parent ast.GenDecl for a given ast.Spec.
-func findGenDecl(f *ast.File, spec ast.Spec) *ast.GenDecl {
- for _, decl := range f.Decls {
- if genDecl, ok := decl.(*ast.GenDecl); ok {
- if genDecl.Pos() <= spec.Pos() && genDecl.End() >= spec.End() {
- return genDecl
- }
- }
- }
- return nil
-}
-
-// fullNode tries to extract the full spec corresponding to obj's declaration.
-// If the package was not parsed in full, the declaration file will be
-// re-parsed to ensure it has complete syntax.
-func fullNode(snapshot Snapshot, obj types.Object, pkg Package) (ast.Decl, error) {
- // declaration in a different package... make sure we have full AST information.
- tok := snapshot.FileSet().File(obj.Pos())
- uri := span.URIFromPath(tok.Name())
- pgf, err := pkg.File(uri)
- if err != nil {
- return nil, err
- }
- file := pgf.File
- pos := obj.Pos()
- if pgf.Mode != ParseFull {
- fset := snapshot.FileSet()
- file2, _ := parser.ParseFile(fset, tok.Name(), pgf.Src, parser.AllErrors|parser.ParseComments)
- if file2 != nil {
- offset, err := Offset(tok, obj.Pos())
- if err != nil {
- return nil, err
- }
- file = file2
- tok2 := fset.File(file2.Pos())
- pos = tok2.Pos(offset)
- }
- }
- path, _ := astutil.PathEnclosingInterval(file, pos, pos)
- for _, n := range path {
- if decl, ok := n.(ast.Decl); ok {
- return decl, nil
- }
- }
- return nil, nil
-}
-
-// inferredSignature determines the resolved non-generic signature for an
-// identifier in an instantiation expression.
-//
-// If no such signature exists, it returns nil.
-func inferredSignature(info *types.Info, id *ast.Ident) *types.Signature {
- inst := typeparams.GetInstances(info)[id]
- sig, _ := inst.Type.(*types.Signature)
- return sig
-}
-
-func searchForEnclosing(info *types.Info, path []ast.Node) *types.TypeName {
- for _, n := range path {
- switch n := n.(type) {
- case *ast.SelectorExpr:
- if sel, ok := info.Selections[n]; ok {
- recv := Deref(sel.Recv())
-
- // Keep track of the last exported type seen.
- var exported *types.TypeName
- if named, ok := recv.(*types.Named); ok && named.Obj().Exported() {
- exported = named.Obj()
- }
- // We don't want the last element, as that's the field or
- // method itself.
- for _, index := range sel.Index()[:len(sel.Index())-1] {
- if r, ok := recv.Underlying().(*types.Struct); ok {
- recv = Deref(r.Field(index).Type())
- if named, ok := recv.(*types.Named); ok && named.Obj().Exported() {
- exported = named.Obj()
- }
- }
- }
- return exported
- }
- case *ast.CompositeLit:
- if t, ok := info.Types[n]; ok {
- if named, _ := t.Type.(*types.Named); named != nil {
- return named.Obj()
- }
- }
- case *ast.TypeSpec:
- if _, ok := n.Type.(*ast.StructType); ok {
- if t, ok := info.Defs[n.Name]; ok {
- if tname, _ := t.(*types.TypeName); tname != nil {
- return tname
- }
- }
- }
- }
- }
- return nil
-}
-
-func typeToObject(typ types.Type) types.Object {
- switch typ := typ.(type) {
- case *types.Named:
- return typ.Obj()
- case *types.Pointer:
- return typeToObject(typ.Elem())
- case *types.Array:
- return typeToObject(typ.Elem())
- case *types.Slice:
- return typeToObject(typ.Elem())
- case *types.Chan:
- return typeToObject(typ.Elem())
- case *types.Signature:
- // Try to find a return value of a named type. If there's only one
- // such value, jump to its type definition.
- var res types.Object
-
- results := typ.Results()
- for i := 0; i < results.Len(); i++ {
- obj := typeToObject(results.At(i).Type())
- if obj == nil || hasErrorType(obj) {
- // Skip builtins.
- continue
- }
- if res != nil {
- // The function/method must have only one return value of a named type.
- return nil
- }
-
- res = obj
- }
- return res
- default:
- return nil
- }
-}
-
-func hasErrorType(obj types.Object) bool {
- return types.IsInterface(obj.Type()) && obj.Pkg() == nil && obj.Name() == "error"
-}
-
-// importSpec handles positions inside of an *ast.ImportSpec.
-func importSpec(snapshot Snapshot, pkg Package, file *ast.File, pos token.Pos) (*IdentifierInfo, error) {
- var imp *ast.ImportSpec
- for _, spec := range file.Imports {
- if spec.Path.Pos() <= pos && pos < spec.Path.End() {
- imp = spec
- }
- }
- if imp == nil {
- return nil, nil
- }
- importPath, err := strconv.Unquote(imp.Path.Value)
- if err != nil {
- return nil, errors.Errorf("import path not quoted: %s (%v)", imp.Path.Value, err)
- }
- result := &IdentifierInfo{
- Snapshot: snapshot,
- Name: importPath,
- pkg: pkg,
- }
- if result.MappedRange, err = posToMappedRange(snapshot, pkg, imp.Path.Pos(), imp.Path.End()); err != nil {
- return nil, err
- }
- // Consider the "declaration" of an import spec to be the imported package.
- importedPkg, err := pkg.GetImport(importPath)
- if err != nil {
- return nil, err
- }
- // Return all of the files in the package as the definition of the import spec.
- for _, dst := range importedPkg.GetSyntax() {
- rng, err := posToMappedRange(snapshot, pkg, dst.Pos(), dst.End())
- if err != nil {
- return nil, err
- }
- result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng)
- }
-
- result.Declaration.node = imp
- return result, nil
-}
-
-// typeSwitchImplicits returns all the implicit type switch objects that
-// correspond to the leaf *ast.Ident. It also returns the original type
-// associated with the identifier (outside of a case clause).
-func typeSwitchImplicits(pkg Package, path []ast.Node) ([]types.Object, types.Type) {
- ident, _ := path[0].(*ast.Ident)
- if ident == nil {
- return nil, nil
- }
-
- var (
- ts *ast.TypeSwitchStmt
- assign *ast.AssignStmt
- cc *ast.CaseClause
- obj = pkg.GetTypesInfo().ObjectOf(ident)
- )
-
- // Walk our ancestors to determine if our leaf ident refers to a
- // type switch variable, e.g. the "a" from "switch a := b.(type)".
-Outer:
- for i := 1; i < len(path); i++ {
- switch n := path[i].(type) {
- case *ast.AssignStmt:
- // Check if ident is the "a" in "a := foo.(type)". The "a" in
- // this case has no types.Object, so check for ident equality.
- if len(n.Lhs) == 1 && n.Lhs[0] == ident {
- assign = n
- }
- case *ast.CaseClause:
- // Check if ident is a use of "a" within a case clause. Each
- // case clause implicitly maps "a" to a different types.Object,
- // so check if ident's object is the case clause's implicit
- // object.
- if obj != nil && pkg.GetTypesInfo().Implicits[n] == obj {
- cc = n
- }
- case *ast.TypeSwitchStmt:
- // Look for the type switch that owns our previously found
- // *ast.AssignStmt or *ast.CaseClause.
- if n.Assign == assign {
- ts = n
- break Outer
- }
-
- for _, stmt := range n.Body.List {
- if stmt == cc {
- ts = n
- break Outer
- }
- }
- }
- }
- if ts == nil {
- return nil, nil
- }
- // Our leaf ident refers to a type switch variable. Fan out to the
- // type switch's implicit case clause objects.
- var objs []types.Object
- for _, cc := range ts.Body.List {
- if ccObj := pkg.GetTypesInfo().Implicits[cc]; ccObj != nil {
- objs = append(objs, ccObj)
- }
- }
- // The right-hand side of a type switch should only have one
- // element, and we need to track its type in order to generate
- // hover information for implicit type switch variables.
- var typ types.Type
- if assign, ok := ts.Assign.(*ast.AssignStmt); ok && len(assign.Rhs) == 1 {
- if rhs := assign.Rhs[0].(*ast.TypeAssertExpr); ok {
- typ = pkg.GetTypesInfo().TypeOf(rhs.X)
- }
- }
- return objs, typ
-}
diff --git a/internal/lsp/source/identifier_test.go b/internal/lsp/source/identifier_test.go
deleted file mode 100644
index 9bbdf58de..000000000
--- a/internal/lsp/source/identifier_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "go/ast"
- "go/parser"
- "go/token"
- "go/types"
- "testing"
-)
-
-func TestSearchForEnclosing(t *testing.T) {
- tests := []struct {
- desc string
- // For convenience, consider the first occurrence of the identifier "X" in
- // src.
- src string
- // By convention, "" means no type found.
- wantTypeName string
- }{
- {
- desc: "self enclosing",
- src: `package a; type X struct {}`,
- wantTypeName: "X",
- },
- {
- // TODO(rFindley): is this correct, or do we want to resolve I2 here?
- desc: "embedded interface in interface",
- src: `package a; var y = i1.X; type i1 interface {I2}; type I2 interface{X()}`,
- wantTypeName: "",
- },
- {
- desc: "embedded interface in struct",
- src: `package a; var y = t.X; type t struct {I}; type I interface{X()}`,
- wantTypeName: "I",
- },
- {
- desc: "double embedding",
- src: `package a; var y = t1.X; type t1 struct {t2}; type t2 struct {I}; type I interface{X()}`,
- wantTypeName: "I",
- },
- {
- desc: "struct field",
- src: `package a; type T struct { X int }`,
- wantTypeName: "T",
- },
- {
- desc: "nested struct field",
- src: `package a; type T struct { E struct { X int } }`,
- wantTypeName: "T",
- },
- {
- desc: "slice entry",
- src: `package a; type T []int; var S = T{X}; var X int = 2`,
- wantTypeName: "T",
- },
- {
- desc: "struct pointer literal",
- src: `package a; type T struct {i int}; var L = &T{X}; const X = 2`,
- wantTypeName: "T",
- },
- }
-
- for _, test := range tests {
- test := test
- t.Run(test.desc, func(t *testing.T) {
- fset := token.NewFileSet()
- file, err := parser.ParseFile(fset, "a.go", test.src, parser.AllErrors)
- if err != nil {
- t.Fatal(err)
- }
- column := 1 + bytes.IndexRune([]byte(test.src), 'X')
- pos := posAt(1, column, fset, "a.go")
- path := pathEnclosingObjNode(file, pos)
- if path == nil {
- t.Fatalf("no ident found at (1, %d)", column)
- }
- info := newInfo()
- if _, err = (*types.Config)(nil).Check("p", fset, []*ast.File{file}, info); err != nil {
- t.Fatal(err)
- }
- obj := searchForEnclosing(info, path)
- if obj == nil {
- if test.wantTypeName != "" {
- t.Errorf("searchForEnclosing(...) = <nil>, want %q", test.wantTypeName)
- }
- return
- }
- if got := obj.Name(); got != test.wantTypeName {
- t.Errorf("searchForEnclosing(...) = %q, want %q", got, test.wantTypeName)
- }
- })
- }
-}
-
-// posAt returns the token.Pos corresponding to the 1-based (line, column)
-// coordinates in the file fname of fset.
-func posAt(line, column int, fset *token.FileSet, fname string) token.Pos {
- var tok *token.File
- fset.Iterate(func(f *token.File) bool {
- if f.Name() == fname {
- tok = f
- return false
- }
- return true
- })
- if tok == nil {
- return token.NoPos
- }
- start := tok.LineStart(line)
- return start + token.Pos(column-1)
-}
-
-// newInfo returns a types.Info with all maps populated.
-func newInfo() *types.Info {
- return &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- Scopes: make(map[ast.Node]*types.Scope),
- }
-}
diff --git a/internal/lsp/source/implementation.go b/internal/lsp/source/implementation.go
deleted file mode 100644
index b53d7c994..000000000
--- a/internal/lsp/source/implementation.go
+++ /dev/null
@@ -1,446 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "errors"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "sort"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/xerrors"
-)
-
-func Implementation(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) ([]protocol.Location, error) {
- ctx, done := event.Start(ctx, "source.Implementation")
- defer done()
-
- impls, err := implementations(ctx, snapshot, f, pp)
- if err != nil {
- return nil, err
- }
- var locations []protocol.Location
- for _, impl := range impls {
- if impl.pkg == nil || len(impl.pkg.CompiledGoFiles()) == 0 {
- continue
- }
- rng, err := objToMappedRange(snapshot, impl.pkg, impl.obj)
- if err != nil {
- return nil, err
- }
- pr, err := rng.Range()
- if err != nil {
- return nil, err
- }
- locations = append(locations, protocol.Location{
- URI: protocol.URIFromSpanURI(rng.URI()),
- Range: pr,
- })
- }
- sort.Slice(locations, func(i, j int) bool {
- li, lj := locations[i], locations[j]
- if li.URI == lj.URI {
- return protocol.CompareRange(li.Range, lj.Range) < 0
- }
- return li.URI < lj.URI
- })
- return locations, nil
-}
-
-var ErrNotAType = errors.New("not a type name or method")
-
-// implementations returns the concrete implementations of the specified
-// interface, or the interfaces implemented by the specified concrete type.
-func implementations(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position) ([]qualifiedObject, error) {
- var (
- impls []qualifiedObject
- seen = make(map[token.Position]bool)
- fset = s.FileSet()
- )
-
- qos, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp)
- if err != nil {
- return nil, err
- }
- for _, qo := range qos {
- var (
- queryType types.Type
- queryMethod *types.Func
- )
-
- switch obj := qo.obj.(type) {
- case *types.Func:
- queryMethod = obj
- if recv := obj.Type().(*types.Signature).Recv(); recv != nil {
- queryType = ensurePointer(recv.Type())
- }
- case *types.TypeName:
- queryType = ensurePointer(obj.Type())
- }
-
- if queryType == nil {
- return nil, ErrNotAType
- }
-
- if types.NewMethodSet(queryType).Len() == 0 {
- return nil, nil
- }
-
- // Find all named types, even local types (which can have methods
- // due to promotion).
- var (
- allNamed []*types.Named
- pkgs = make(map[*types.Package]Package)
- )
- knownPkgs, err := s.KnownPackages(ctx)
- if err != nil {
- return nil, err
- }
- for _, pkg := range knownPkgs {
- pkgs[pkg.GetTypes()] = pkg
- info := pkg.GetTypesInfo()
- for _, obj := range info.Defs {
- obj, ok := obj.(*types.TypeName)
- // We ignore aliases 'type M = N' to avoid duplicate reporting
- // of the Named type N.
- if !ok || obj.IsAlias() {
- continue
- }
- if named, ok := obj.Type().(*types.Named); ok {
- allNamed = append(allNamed, named)
- }
- }
- }
-
- // Find all the named types that match our query.
- for _, named := range allNamed {
- var (
- candObj types.Object = named.Obj()
- candType = ensurePointer(named)
- )
-
- if !concreteImplementsIntf(candType, queryType) {
- continue
- }
-
- ms := types.NewMethodSet(candType)
- if ms.Len() == 0 {
- // Skip empty interfaces.
- continue
- }
-
- // If client queried a method, look up corresponding candType method.
- if queryMethod != nil {
- sel := ms.Lookup(queryMethod.Pkg(), queryMethod.Name())
- if sel == nil {
- continue
- }
- candObj = sel.Obj()
- }
-
- pos := fset.Position(candObj.Pos())
- if candObj == queryMethod || seen[pos] {
- continue
- }
-
- seen[pos] = true
-
- impls = append(impls, qualifiedObject{
- obj: candObj,
- pkg: pkgs[candObj.Pkg()],
- })
- }
- }
-
- return impls, nil
-}
-
-// concreteImplementsIntf returns true if a is an interface type implemented by
-// concrete type b, or vice versa.
-func concreteImplementsIntf(a, b types.Type) bool {
- aIsIntf, bIsIntf := IsInterface(a), IsInterface(b)
-
- // Make sure exactly one is an interface type.
- if aIsIntf == bIsIntf {
- return false
- }
-
- // Rearrange if needed so "a" is the concrete type.
- if aIsIntf {
- a, b = b, a
- }
-
- return types.AssignableTo(a, b)
-}
-
-// ensurePointer wraps T in a *types.Pointer if T is a named, non-interface
-// type. This is useful to make sure you consider a named type's full method
-// set.
-func ensurePointer(T types.Type) types.Type {
- if _, ok := T.(*types.Named); ok && !IsInterface(T) {
- return types.NewPointer(T)
- }
-
- return T
-}
-
-type qualifiedObject struct {
- obj types.Object
-
- // pkg is the Package that contains obj's definition.
- pkg Package
-
- // node is the *ast.Ident or *ast.ImportSpec we followed to find obj, if any.
- node ast.Node
-
- // sourcePkg is the Package that contains node, if any.
- sourcePkg Package
-}
-
-var (
- errBuiltin = errors.New("builtin object")
- errNoObjectFound = errors.New("no object found")
-)
-
-// qualifiedObjsAtProtocolPos returns info for all the type.Objects
-// referenced at the given position. An object will be returned for
-// every package that the file belongs to, in every typechecking mode
-// applicable.
-func qualifiedObjsAtProtocolPos(ctx context.Context, s Snapshot, uri span.URI, pp protocol.Position) ([]qualifiedObject, error) {
- pkgs, err := s.PackagesForFile(ctx, uri, TypecheckAll, false)
- if err != nil {
- return nil, err
- }
- if len(pkgs) == 0 {
- return nil, errNoObjectFound
- }
- pkg := pkgs[0]
- pgf, err := pkg.File(uri)
- if err != nil {
- return nil, err
- }
- spn, err := pgf.Mapper.PointSpan(pp)
- if err != nil {
- return nil, err
- }
- rng, err := spn.Range(pgf.Mapper.Converter)
- if err != nil {
- return nil, err
- }
- offset, err := Offset(pgf.Tok, rng.Start)
- if err != nil {
- return nil, err
- }
- return qualifiedObjsAtLocation(ctx, s, objSearchKey{uri, offset}, map[objSearchKey]bool{})
-}
-
-type objSearchKey struct {
- uri span.URI
- offset int
-}
-
-// qualifiedObjsAtLocation finds all objects referenced at offset in uri, across
-// all packages in the snapshot.
-func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key objSearchKey, seen map[objSearchKey]bool) ([]qualifiedObject, error) {
- if seen[key] {
- return nil, nil
- }
- seen[key] = true
-
- // We search for referenced objects starting with all packages containing the
- // current location, and then repeating the search for every distinct object
- // location discovered.
- //
- // In the common case, there should be at most one additional location to
- // consider: the definition of the object referenced by the location. But we
- // try to be comprehensive in case we ever support variations on build
- // constraints.
-
- pkgs, err := s.PackagesForFile(ctx, key.uri, TypecheckAll, false)
- if err != nil {
- return nil, err
- }
-
- // report objects in the order we encounter them. This ensures that the first
- // result is at the cursor...
- var qualifiedObjs []qualifiedObject
- // ...but avoid duplicates.
- seenObjs := map[types.Object]bool{}
-
- for _, searchpkg := range pkgs {
- pgf, err := searchpkg.File(key.uri)
- if err != nil {
- return nil, err
- }
- pos := pgf.Tok.Pos(key.offset)
- path := pathEnclosingObjNode(pgf.File, pos)
- if path == nil {
- continue
- }
- var objs []types.Object
- switch leaf := path[0].(type) {
- case *ast.Ident:
- // If leaf represents an implicit type switch object or the type
- // switch "assign" variable, expand to all of the type switch's
- // implicit objects.
- if implicits, _ := typeSwitchImplicits(searchpkg, path); len(implicits) > 0 {
- objs = append(objs, implicits...)
- } else {
- obj := searchpkg.GetTypesInfo().ObjectOf(leaf)
- if obj == nil {
- return nil, xerrors.Errorf("%w for %q", errNoObjectFound, leaf.Name)
- }
- objs = append(objs, obj)
- }
- case *ast.ImportSpec:
- // Look up the implicit *types.PkgName.
- obj := searchpkg.GetTypesInfo().Implicits[leaf]
- if obj == nil {
- return nil, xerrors.Errorf("%w for import %q", errNoObjectFound, ImportPath(leaf))
- }
- objs = append(objs, obj)
- }
- // Get all of the transitive dependencies of the search package.
- pkgs := make(map[*types.Package]Package)
- var addPkg func(pkg Package)
- addPkg = func(pkg Package) {
- pkgs[pkg.GetTypes()] = pkg
- for _, imp := range pkg.Imports() {
- if _, ok := pkgs[imp.GetTypes()]; !ok {
- addPkg(imp)
- }
- }
- }
- addPkg(searchpkg)
- for _, obj := range objs {
- if obj.Parent() == types.Universe {
- return nil, xerrors.Errorf("%q: %w", obj.Name(), errBuiltin)
- }
- pkg, ok := pkgs[obj.Pkg()]
- if !ok {
- event.Error(ctx, fmt.Sprintf("no package for obj %s: %v", obj, obj.Pkg()), err)
- continue
- }
- qualifiedObjs = append(qualifiedObjs, qualifiedObject{
- obj: obj,
- pkg: pkg,
- sourcePkg: searchpkg,
- node: path[0],
- })
- seenObjs[obj] = true
-
- // If the qualified object is in another file (or more likely, another
- // package), it's possible that there is another copy of it in a package
- // that we haven't searched, e.g. a test variant. See golang/go#47564.
- //
- // In order to be sure we've considered all packages, call
- // qualifiedObjsAtLocation recursively for all locations we encounter. We
- // could probably be more precise here, only continuing the search if obj
- // is in another package, but this should be good enough to find all
- // uses.
-
- pos := obj.Pos()
- var uri span.URI
- offset := -1
- for _, pgf := range pkg.CompiledGoFiles() {
- if pgf.Tok.Base() <= int(pos) && int(pos) <= pgf.Tok.Base()+pgf.Tok.Size() {
- var err error
- offset, err = Offset(pgf.Tok, pos)
- if err != nil {
- return nil, err
- }
- uri = pgf.URI
- }
- }
- if offset >= 0 {
- otherObjs, err := qualifiedObjsAtLocation(ctx, s, objSearchKey{uri, offset}, seen)
- if err != nil {
- return nil, err
- }
- for _, other := range otherObjs {
- if !seenObjs[other.obj] {
- qualifiedObjs = append(qualifiedObjs, other)
- seenObjs[other.obj] = true
- }
- }
- } else {
- return nil, fmt.Errorf("missing file for position of %q in %q", obj.Name(), obj.Pkg().Name())
- }
- }
- }
- // Return an error if no objects were found since callers will assume that
- // the slice has at least 1 element.
- if len(qualifiedObjs) == 0 {
- return nil, errNoObjectFound
- }
- return qualifiedObjs, nil
-}
-
-// pathEnclosingObjNode returns the AST path to the object-defining
-// node associated with pos. "Object-defining" means either an
-// *ast.Ident mapped directly to a types.Object or an ast.Node mapped
-// implicitly to a types.Object.
-func pathEnclosingObjNode(f *ast.File, pos token.Pos) []ast.Node {
- var (
- path []ast.Node
- found bool
- )
-
- ast.Inspect(f, func(n ast.Node) bool {
- if found {
- return false
- }
-
- if n == nil {
- path = path[:len(path)-1]
- return false
- }
-
- path = append(path, n)
-
- switch n := n.(type) {
- case *ast.Ident:
- // Include the position directly after identifier. This handles
- // the common case where the cursor is right after the
- // identifier the user is currently typing. Previously we
- // handled this by calling astutil.PathEnclosingInterval twice,
- // once for "pos" and once for "pos-1".
- found = n.Pos() <= pos && pos <= n.End()
- case *ast.ImportSpec:
- if n.Path.Pos() <= pos && pos < n.Path.End() {
- found = true
- // If import spec has a name, add name to path even though
- // position isn't in the name.
- if n.Name != nil {
- path = append(path, n.Name)
- }
- }
- case *ast.StarExpr:
- // Follow star expressions to the inner identifier.
- if pos == n.Star {
- pos = n.X.Pos()
- }
- }
-
- return !found
- })
-
- if len(path) == 0 {
- return nil
- }
-
- // Reverse path so leaf is first element.
- for i := 0; i < len(path)/2; i++ {
- path[i], path[len(path)-1-i] = path[len(path)-1-i], path[i]
- }
-
- return path
-}
diff --git a/internal/lsp/source/known_packages.go b/internal/lsp/source/known_packages.go
deleted file mode 100644
index 49ede162b..000000000
--- a/internal/lsp/source/known_packages.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "sort"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/imports"
- errors "golang.org/x/xerrors"
-)
-
-// KnownPackages returns a list of all known packages
-// in the package graph that could potentially be imported
-// by the given file.
-func KnownPackages(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle) ([]string, error) {
- pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return nil, errors.Errorf("GetParsedFile: %w", err)
- }
- alreadyImported := map[string]struct{}{}
- for _, imp := range pgf.File.Imports {
- alreadyImported[imp.Path.Value] = struct{}{}
- }
- pkgs, err := snapshot.CachedImportPaths(ctx)
- if err != nil {
- return nil, err
- }
- var (
- seen = make(map[string]struct{})
- paths []string
- )
- for path, knownPkg := range pkgs {
- gofiles := knownPkg.CompiledGoFiles()
- if len(gofiles) == 0 || gofiles[0].File.Name == nil {
- continue
- }
- pkgName := gofiles[0].File.Name.Name
- // package main cannot be imported
- if pkgName == "main" {
- continue
- }
- // test packages cannot be imported
- if knownPkg.ForTest() != "" {
- continue
- }
- // no need to import what the file already imports
- if _, ok := alreadyImported[path]; ok {
- continue
- }
- // snapshot.KnownPackages could have multiple versions of a pkg
- if _, ok := seen[path]; ok {
- continue
- }
- seen[path] = struct{}{}
- // make sure internal packages are importable by the file
- if !IsValidImport(pkg.PkgPath(), path) {
- continue
- }
- // naive check on cyclical imports
- if isDirectlyCyclical(pkg, knownPkg) {
- continue
- }
- paths = append(paths, path)
- seen[path] = struct{}{}
- }
- err = snapshot.RunProcessEnvFunc(ctx, func(o *imports.Options) error {
- var mu sync.Mutex
- ctx, cancel := context.WithTimeout(ctx, time.Millisecond*80)
- defer cancel()
- return imports.GetAllCandidates(ctx, func(ifix imports.ImportFix) {
- mu.Lock()
- defer mu.Unlock()
- if _, ok := seen[ifix.StmtInfo.ImportPath]; ok {
- return
- }
- paths = append(paths, ifix.StmtInfo.ImportPath)
- }, "", pgf.URI.Filename(), pkg.GetTypes().Name(), o.Env)
- })
- if err != nil {
- // if an error occurred, we stil have a decent list we can
- // show to the user through snapshot.CachedImportPaths
- event.Error(ctx, "imports.GetAllCandidates", err)
- }
- sort.Slice(paths, func(i, j int) bool {
- importI, importJ := paths[i], paths[j]
- iHasDot := strings.Contains(importI, ".")
- jHasDot := strings.Contains(importJ, ".")
- if iHasDot && !jHasDot {
- return false
- }
- if jHasDot && !iHasDot {
- return true
- }
- return importI < importJ
- })
- return paths, nil
-}
-
-// isDirectlyCyclical checks if imported directly imports pkg.
-// It does not (yet) offer a full cyclical check because showing a user
-// a list of importable packages already generates a very large list
-// and having a few false positives in there could be worth the
-// performance snappiness.
-func isDirectlyCyclical(pkg, imported Package) bool {
- for _, imp := range imported.Imports() {
- if imp.PkgPath() == pkg.PkgPath() {
- return true
- }
- }
- return false
-}
diff --git a/internal/lsp/source/offset_test.go b/internal/lsp/source/offset_test.go
deleted file mode 100644
index 10076773a..000000000
--- a/internal/lsp/source/offset_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source_test
-
-import (
- "go/token"
- "go/types"
- "testing"
-
- "golang.org/x/tools/go/packages"
-)
-
-// This test reports any unexpected uses of (*go/token.File).Offset within
-// the gopls codebase to ensure that we don't check in more code that is prone
-// to panicking. All calls to (*go/token.File).Offset should be replaced with
-// calls to source.Offset.
-func TestTokenOffset(t *testing.T) {
- fset := token.NewFileSet()
- pkgs, err := packages.Load(&packages.Config{
- Fset: fset,
- Mode: packages.NeedName | packages.NeedModule | packages.NeedCompiledGoFiles | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedImports | packages.NeedDeps,
- }, "go/token", "golang.org/x/tools/internal/lsp/...", "golang.org/x/tools/gopls/...")
- if err != nil {
- t.Fatal(err)
- }
- var tokPkg *packages.Package
- for _, pkg := range pkgs {
- if pkg.PkgPath == "go/token" {
- tokPkg = pkg
- break
- }
- }
- typname, ok := tokPkg.Types.Scope().Lookup("File").(*types.TypeName)
- if !ok {
- t.Fatal("expected go/token.File typename, got none")
- }
- named, ok := typname.Type().(*types.Named)
- if !ok {
- t.Fatalf("expected named type, got %T", typname.Type)
- }
- var offset *types.Func
- for i := 0; i < named.NumMethods(); i++ {
- meth := named.Method(i)
- if meth.Name() == "Offset" {
- offset = meth
- break
- }
- }
- for _, pkg := range pkgs {
- for ident, obj := range pkg.TypesInfo.Uses {
- if ident.Name != "Offset" {
- continue
- }
- if pkg.PkgPath == "go/token" {
- continue
- }
- if !types.Identical(offset.Type(), obj.Type()) {
- continue
- }
- // The only permitted use is in golang.org/x/tools/internal/lsp/source.Offset,
- // so check the enclosing function.
- sourceOffset := pkg.Types.Scope().Lookup("Offset").(*types.Func)
- if sourceOffset.Pos() <= ident.Pos() && ident.Pos() <= sourceOffset.Scope().End() {
- continue // accepted usage
- }
- t.Errorf(`%s: Unexpected use of (*go/token.File).Offset. Please use golang.org/x/tools/internal/lsp/source.Offset instead.`, fset.Position(ident.Pos()))
- }
- }
-}
diff --git a/internal/lsp/source/options.go b/internal/lsp/source/options.go
deleted file mode 100644
index 8e262c63b..000000000
--- a/internal/lsp/source/options.go
+++ /dev/null
@@ -1,1449 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "io"
- "path/filepath"
- "regexp"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/asmdecl"
- "golang.org/x/tools/go/analysis/passes/assign"
- "golang.org/x/tools/go/analysis/passes/atomic"
- "golang.org/x/tools/go/analysis/passes/atomicalign"
- "golang.org/x/tools/go/analysis/passes/bools"
- "golang.org/x/tools/go/analysis/passes/buildtag"
- "golang.org/x/tools/go/analysis/passes/cgocall"
- "golang.org/x/tools/go/analysis/passes/composite"
- "golang.org/x/tools/go/analysis/passes/copylock"
- "golang.org/x/tools/go/analysis/passes/deepequalerrors"
- "golang.org/x/tools/go/analysis/passes/errorsas"
- "golang.org/x/tools/go/analysis/passes/fieldalignment"
- "golang.org/x/tools/go/analysis/passes/httpresponse"
- "golang.org/x/tools/go/analysis/passes/ifaceassert"
- "golang.org/x/tools/go/analysis/passes/loopclosure"
- "golang.org/x/tools/go/analysis/passes/lostcancel"
- "golang.org/x/tools/go/analysis/passes/nilfunc"
- "golang.org/x/tools/go/analysis/passes/nilness"
- "golang.org/x/tools/go/analysis/passes/printf"
- "golang.org/x/tools/go/analysis/passes/shadow"
- "golang.org/x/tools/go/analysis/passes/shift"
- "golang.org/x/tools/go/analysis/passes/sortslice"
- "golang.org/x/tools/go/analysis/passes/stdmethods"
- "golang.org/x/tools/go/analysis/passes/stringintconv"
- "golang.org/x/tools/go/analysis/passes/structtag"
- "golang.org/x/tools/go/analysis/passes/testinggoroutine"
- "golang.org/x/tools/go/analysis/passes/tests"
- "golang.org/x/tools/go/analysis/passes/unmarshal"
- "golang.org/x/tools/go/analysis/passes/unreachable"
- "golang.org/x/tools/go/analysis/passes/unsafeptr"
- "golang.org/x/tools/go/analysis/passes/unusedresult"
- "golang.org/x/tools/go/analysis/passes/unusedwrite"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/lsp/analysis/fillreturns"
- "golang.org/x/tools/internal/lsp/analysis/fillstruct"
- "golang.org/x/tools/internal/lsp/analysis/infertypeargs"
- "golang.org/x/tools/internal/lsp/analysis/nonewvars"
- "golang.org/x/tools/internal/lsp/analysis/noresultvalues"
- "golang.org/x/tools/internal/lsp/analysis/simplifycompositelit"
- "golang.org/x/tools/internal/lsp/analysis/simplifyrange"
- "golang.org/x/tools/internal/lsp/analysis/simplifyslice"
- "golang.org/x/tools/internal/lsp/analysis/stubmethods"
- "golang.org/x/tools/internal/lsp/analysis/undeclaredname"
- "golang.org/x/tools/internal/lsp/analysis/unusedparams"
- "golang.org/x/tools/internal/lsp/analysis/useany"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/protocol"
- errors "golang.org/x/xerrors"
-)
-
-var (
- optionsOnce sync.Once
- defaultOptions *Options
-)
-
-// DefaultOptions is the options that are used for Gopls execution independent
-// of any externally provided configuration (LSP initialization, command
-// invocation, etc.).
-func DefaultOptions() *Options {
- optionsOnce.Do(func() {
- var commands []string
- for _, c := range command.Commands {
- commands = append(commands, c.ID())
- }
- defaultOptions = &Options{
- ClientOptions: ClientOptions{
- InsertTextFormat: protocol.PlainTextTextFormat,
- PreferredContentFormat: protocol.Markdown,
- ConfigurationSupported: true,
- DynamicConfigurationSupported: true,
- DynamicRegistrationSemanticTokensSupported: true,
- DynamicWatchedFilesSupported: true,
- LineFoldingOnly: false,
- HierarchicalDocumentSymbolSupport: true,
- },
- ServerOptions: ServerOptions{
- SupportedCodeActions: map[FileKind]map[protocol.CodeActionKind]bool{
- Go: {
- protocol.SourceFixAll: true,
- protocol.SourceOrganizeImports: true,
- protocol.QuickFix: true,
- protocol.RefactorRewrite: true,
- protocol.RefactorExtract: true,
- },
- Mod: {
- protocol.SourceOrganizeImports: true,
- protocol.QuickFix: true,
- },
- Work: {},
- Sum: {},
- Tmpl: {},
- },
- SupportedCommands: commands,
- },
- UserOptions: UserOptions{
- BuildOptions: BuildOptions{
- ExpandWorkspaceToModule: true,
- ExperimentalPackageCacheKey: true,
- MemoryMode: ModeNormal,
- DirectoryFilters: []string{"-node_modules"},
- TemplateExtensions: []string{},
- },
- UIOptions: UIOptions{
- DiagnosticOptions: DiagnosticOptions{
- DiagnosticsDelay: 250 * time.Millisecond,
- Annotations: map[Annotation]bool{
- Bounds: true,
- Escape: true,
- Inline: true,
- Nil: true,
- },
- },
- DocumentationOptions: DocumentationOptions{
- HoverKind: FullDocumentation,
- LinkTarget: "pkg.go.dev",
- LinksInHover: true,
- },
- NavigationOptions: NavigationOptions{
- ImportShortcut: Both,
- SymbolMatcher: SymbolFastFuzzy,
- SymbolStyle: DynamicSymbols,
- },
- CompletionOptions: CompletionOptions{
- Matcher: Fuzzy,
- CompletionBudget: 100 * time.Millisecond,
- ExperimentalPostfixCompletions: true,
- },
- Codelenses: map[string]bool{
- string(command.Generate): true,
- string(command.RegenerateCgo): true,
- string(command.Tidy): true,
- string(command.GCDetails): false,
- string(command.UpgradeDependency): true,
- string(command.Vendor): true,
- },
- },
- },
- InternalOptions: InternalOptions{
- LiteralCompletions: true,
- TempModfile: true,
- CompleteUnimported: true,
- CompletionDocumentation: true,
- DeepCompletion: true,
- },
- Hooks: Hooks{
- ComputeEdits: myers.ComputeEdits,
- URLRegexp: urlRegexp(),
- DefaultAnalyzers: defaultAnalyzers(),
- TypeErrorAnalyzers: typeErrorAnalyzers(),
- ConvenienceAnalyzers: convenienceAnalyzers(),
- StaticcheckAnalyzers: map[string]*Analyzer{},
- GoDiff: true,
- },
- }
- })
- return defaultOptions
-}
-
-// Options holds various configuration that affects Gopls execution, organized
-// by the nature or origin of the settings.
-type Options struct {
- ClientOptions
- ServerOptions
- UserOptions
- InternalOptions
- Hooks
-}
-
-// ClientOptions holds LSP-specific configuration that is provided by the
-// client.
-type ClientOptions struct {
- InsertTextFormat protocol.InsertTextFormat
- ConfigurationSupported bool
- DynamicConfigurationSupported bool
- DynamicRegistrationSemanticTokensSupported bool
- DynamicWatchedFilesSupported bool
- PreferredContentFormat protocol.MarkupKind
- LineFoldingOnly bool
- HierarchicalDocumentSymbolSupport bool
- SemanticTypes []string
- SemanticMods []string
- RelatedInformationSupported bool
- CompletionTags bool
- CompletionDeprecated bool
-}
-
-// ServerOptions holds LSP-specific configuration that is provided by the
-// server.
-type ServerOptions struct {
- SupportedCodeActions map[FileKind]map[protocol.CodeActionKind]bool
- SupportedCommands []string
-}
-
-type BuildOptions struct {
- // BuildFlags is the set of flags passed on to the build system when invoked.
- // It is applied to queries like `go list`, which is used when discovering files.
- // The most common use is to set `-tags`.
- BuildFlags []string
-
- // Env adds environment variables to external commands run by `gopls`, most notably `go list`.
- Env map[string]string
-
- // DirectoryFilters can be used to exclude unwanted directories from the
- // workspace. By default, all directories are included. Filters are an
- // operator, `+` to include and `-` to exclude, followed by a path prefix
- // relative to the workspace folder. They are evaluated in order, and
- // the last filter that applies to a path controls whether it is included.
- // The path prefix can be empty, so an initial `-` excludes everything.
- //
- // Examples:
- //
- // Exclude node_modules: `-node_modules`
- //
- // Include only project_a: `-` (exclude everything), `+project_a`
- //
- // Include only project_a, but not node_modules inside it: `-`, `+project_a`, `-project_a/node_modules`
- DirectoryFilters []string
-
- // TemplateExtensions gives the extensions of file names that are treateed
- // as template files. (The extension
- // is the part of the file name after the final dot.)
- TemplateExtensions []string
-
- // MemoryMode controls the tradeoff `gopls` makes between memory usage and
- // correctness.
- //
- // Values other than `Normal` are untested and may break in surprising ways.
- MemoryMode MemoryMode `status:"experimental"`
-
- // ExpandWorkspaceToModule instructs `gopls` to adjust the scope of the
- // workspace to find the best available module root. `gopls` first looks for
- // a go.mod file in any parent directory of the workspace folder, expanding
- // the scope to that directory if it exists. If no viable parent directory is
- // found, gopls will check if there is exactly one child directory containing
- // a go.mod file, narrowing the scope to that directory if it exists.
- ExpandWorkspaceToModule bool `status:"experimental"`
-
- // ExperimentalWorkspaceModule opts a user into the experimental support
- // for multi-module workspaces.
- ExperimentalWorkspaceModule bool `status:"experimental"`
-
- // ExperimentalPackageCacheKey controls whether to use a coarser cache key
- // for package type information to increase cache hits. This setting removes
- // the user's environment, build flags, and working directory from the cache
- // key, which should be a safe change as all relevant inputs into the type
- // checking pass are already hashed into the key. This is temporarily guarded
- // by an experiment because caching behavior is subtle and difficult to
- // comprehensively test.
- ExperimentalPackageCacheKey bool `status:"experimental"`
-
- // AllowModfileModifications disables -mod=readonly, allowing imports from
- // out-of-scope modules. This option will eventually be removed.
- AllowModfileModifications bool `status:"experimental"`
-
- // AllowImplicitNetworkAccess disables GOPROXY=off, allowing implicit module
- // downloads rather than requiring user action. This option will eventually
- // be removed.
- AllowImplicitNetworkAccess bool `status:"experimental"`
-
- // ExperimentalUseInvalidMetadata enables gopls to fall back on outdated
- // package metadata to provide editor features if the go command fails to
- // load packages for some reason (like an invalid go.mod file). This will
- // eventually be the default behavior, and this setting will be removed.
- ExperimentalUseInvalidMetadata bool `status:"experimental"`
-}
-
-type UIOptions struct {
- DocumentationOptions
- CompletionOptions
- NavigationOptions
- DiagnosticOptions
-
- // Codelenses overrides the enabled/disabled state of code lenses. See the
- // "Code Lenses" section of the
- // [Settings page](https://github.com/golang/tools/blob/master/gopls/doc/settings.md#code-lenses)
- // for the list of supported lenses.
- //
- // Example Usage:
- //
- // ```json5
- // "gopls": {
- // ...
- // "codelenses": {
- // "generate": false, // Don't show the `go generate` lens.
- // "gc_details": true // Show a code lens toggling the display of gc's choices.
- // }
- // ...
- // }
- // ```
- Codelenses map[string]bool
-
- // SemanticTokens controls whether the LSP server will send
- // semantic tokens to the client.
- SemanticTokens bool `status:"experimental"`
-}
-
-type CompletionOptions struct {
- // Placeholders enables placeholders for function parameters or struct
- // fields in completion responses.
- UsePlaceholders bool
-
- // CompletionBudget is the soft latency goal for completion requests. Most
- // requests finish in a couple milliseconds, but in some cases deep
- // completions can take much longer. As we use up our budget we
- // dynamically reduce the search scope to ensure we return timely
- // results. Zero means unlimited.
- CompletionBudget time.Duration `status:"debug"`
-
- // Matcher sets the algorithm that is used when calculating completion
- // candidates.
- Matcher Matcher `status:"advanced"`
-
- // ExperimentalPostfixCompletions enables artificial method snippets
- // such as "someSlice.sort!".
- ExperimentalPostfixCompletions bool `status:"experimental"`
-}
-
-type DocumentationOptions struct {
- // HoverKind controls the information that appears in the hover text.
- // SingleLine and Structured are intended for use only by authors of editor plugins.
- HoverKind HoverKind
-
- // LinkTarget controls where documentation links go.
- // It might be one of:
- //
- // * `"godoc.org"`
- // * `"pkg.go.dev"`
- //
- // If company chooses to use its own `godoc.org`, its address can be used as well.
- LinkTarget string
-
- // LinksInHover toggles the presence of links to documentation in hover.
- LinksInHover bool
-}
-
-type FormattingOptions struct {
- // Local is the equivalent of the `goimports -local` flag, which puts
- // imports beginning with this string after third-party packages. It should
- // be the prefix of the import path whose imports should be grouped
- // separately.
- Local string
-
- // Gofumpt indicates if we should run gofumpt formatting.
- Gofumpt bool
-}
-
-type DiagnosticOptions struct {
- // Analyses specify analyses that the user would like to enable or disable.
- // A map of the names of analysis passes that should be enabled/disabled.
- // A full list of analyzers that gopls uses can be found
- // [here](https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md).
- //
- // Example Usage:
- //
- // ```json5
- // ...
- // "analyses": {
- // "unreachable": false, // Disable the unreachable analyzer.
- // "unusedparams": true // Enable the unusedparams analyzer.
- // }
- // ...
- // ```
- Analyses map[string]bool
-
- // Staticcheck enables additional analyses from staticcheck.io.
- Staticcheck bool `status:"experimental"`
-
- // Annotations specifies the various kinds of optimization diagnostics
- // that should be reported by the gc_details command.
- Annotations map[Annotation]bool `status:"experimental"`
-
- // DiagnosticsDelay controls the amount of time that gopls waits
- // after the most recent file modification before computing deep diagnostics.
- // Simple diagnostics (parsing and type-checking) are always run immediately
- // on recently modified packages.
- //
- // This option must be set to a valid duration string, for example `"250ms"`.
- DiagnosticsDelay time.Duration `status:"advanced"`
-
- // ExperimentalWatchedFileDelay controls the amount of time that gopls waits
- // for additional workspace/didChangeWatchedFiles notifications to arrive,
- // before processing all such notifications in a single batch. This is
- // intended for use by LSP clients that don't support their own batching of
- // file system notifications.
- //
- // This option must be set to a valid duration string, for example `"100ms"`.
- ExperimentalWatchedFileDelay time.Duration `status:"experimental"`
-}
-
-type NavigationOptions struct {
- // ImportShortcut specifies whether import statements should link to
- // documentation or go to definitions.
- ImportShortcut ImportShortcut
-
- // SymbolMatcher sets the algorithm that is used when finding workspace symbols.
- SymbolMatcher SymbolMatcher `status:"advanced"`
-
- // SymbolStyle controls how symbols are qualified in symbol responses.
- //
- // Example Usage:
- //
- // ```json5
- // "gopls": {
- // ...
- // "symbolStyle": "Dynamic",
- // ...
- // }
- // ```
- SymbolStyle SymbolStyle `status:"advanced"`
-}
-
-// UserOptions holds custom Gopls configuration (not part of the LSP) that is
-// modified by the client.
-type UserOptions struct {
- BuildOptions
- UIOptions
- FormattingOptions
-
- // VerboseOutput enables additional debug logging.
- VerboseOutput bool `status:"debug"`
-}
-
-// EnvSlice returns Env as a slice of k=v strings.
-func (u *UserOptions) EnvSlice() []string {
- var result []string
- for k, v := range u.Env {
- result = append(result, fmt.Sprintf("%v=%v", k, v))
- }
- return result
-}
-
-// SetEnvSlice sets Env from a slice of k=v strings.
-func (u *UserOptions) SetEnvSlice(env []string) {
- u.Env = map[string]string{}
- for _, kv := range env {
- split := strings.SplitN(kv, "=", 2)
- if len(split) != 2 {
- continue
- }
- u.Env[split[0]] = split[1]
- }
-}
-
-// Hooks contains configuration that is provided to the Gopls command by the
-// main package.
-type Hooks struct {
- LicensesText string
- GoDiff bool
- ComputeEdits diff.ComputeEdits
- URLRegexp *regexp.Regexp
-
- // GofumptFormat allows the gopls module to wire-in a call to
- // gofumpt/format.Source. langVersion and modulePath are used for some
- // Gofumpt formatting rules -- see the Gofumpt documentation for details.
- GofumptFormat func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error)
-
- DefaultAnalyzers map[string]*Analyzer
- TypeErrorAnalyzers map[string]*Analyzer
- ConvenienceAnalyzers map[string]*Analyzer
- StaticcheckAnalyzers map[string]*Analyzer
-
- // Govulncheck is the implementation of the Govulncheck gopls command.
- Govulncheck func(context.Context, *packages.Config, command.VulncheckArgs) (command.VulncheckResult, error)
-}
-
-// InternalOptions contains settings that are not intended for use by the
-// average user. These may be settings used by tests or outdated settings that
-// will soon be deprecated. Some of these settings may not even be configurable
-// by the user.
-type InternalOptions struct {
- // LiteralCompletions controls whether literal candidates such as
- // "&someStruct{}" are offered. Tests disable this flag to simplify
- // their expected values.
- LiteralCompletions bool
-
- // VerboseWorkDoneProgress controls whether the LSP server should send
- // progress reports for all work done outside the scope of an RPC.
- // Used by the regression tests.
- VerboseWorkDoneProgress bool
-
- // The following options were previously available to users, but they
- // really shouldn't be configured by anyone other than "power users".
-
- // CompletionDocumentation enables documentation with completion results.
- CompletionDocumentation bool
-
- // CompleteUnimported enables completion for packages that you do not
- // currently import.
- CompleteUnimported bool
-
- // DeepCompletion enables the ability to return completions from deep
- // inside relevant entities, rather than just the locally accessible ones.
- //
- // Consider this example:
- //
- // ```go
- // package main
- //
- // import "fmt"
- //
- // type wrapString struct {
- // str string
- // }
- //
- // func main() {
- // x := wrapString{"hello world"}
- // fmt.Printf(<>)
- // }
- // ```
- //
- // At the location of the `<>` in this program, deep completion would suggest the result `x.str`.
- DeepCompletion bool
-
- // TempModfile controls the use of the -modfile flag in Go 1.14.
- TempModfile bool
-}
-
-type ImportShortcut string
-
-const (
- Both ImportShortcut = "Both"
- Link ImportShortcut = "Link"
- Definition ImportShortcut = "Definition"
-)
-
-func (s ImportShortcut) ShowLinks() bool {
- return s == Both || s == Link
-}
-
-func (s ImportShortcut) ShowDefinition() bool {
- return s == Both || s == Definition
-}
-
-type Matcher string
-
-const (
- Fuzzy Matcher = "Fuzzy"
- CaseInsensitive Matcher = "CaseInsensitive"
- CaseSensitive Matcher = "CaseSensitive"
-)
-
-type SymbolMatcher string
-
-const (
- SymbolFuzzy SymbolMatcher = "Fuzzy"
- SymbolFastFuzzy SymbolMatcher = "FastFuzzy"
- SymbolCaseInsensitive SymbolMatcher = "CaseInsensitive"
- SymbolCaseSensitive SymbolMatcher = "CaseSensitive"
-)
-
-type SymbolStyle string
-
-const (
- // PackageQualifiedSymbols is package qualified symbols i.e.
- // "pkg.Foo.Field".
- PackageQualifiedSymbols SymbolStyle = "Package"
- // FullyQualifiedSymbols is fully qualified symbols, i.e.
- // "path/to/pkg.Foo.Field".
- FullyQualifiedSymbols SymbolStyle = "Full"
- // DynamicSymbols uses whichever qualifier results in the highest scoring
- // match for the given symbol query. Here a "qualifier" is any "/" or "."
- // delimited suffix of the fully qualified symbol. i.e. "to/pkg.Foo.Field" or
- // just "Foo.Field".
- DynamicSymbols SymbolStyle = "Dynamic"
-)
-
-type HoverKind string
-
-const (
- SingleLine HoverKind = "SingleLine"
- NoDocumentation HoverKind = "NoDocumentation"
- SynopsisDocumentation HoverKind = "SynopsisDocumentation"
- FullDocumentation HoverKind = "FullDocumentation"
-
- // Structured is an experimental setting that returns a structured hover format.
- // This format separates the signature from the documentation, so that the client
- // can do more manipulation of these fields.
- //
- // This should only be used by clients that support this behavior.
- Structured HoverKind = "Structured"
-)
-
-type MemoryMode string
-
-const (
- ModeNormal MemoryMode = "Normal"
- // In DegradeClosed mode, `gopls` will collect less information about
- // packages without open files. As a result, features like Find
- // References and Rename will miss results in such packages.
- ModeDegradeClosed MemoryMode = "DegradeClosed"
-)
-
-type OptionResults []OptionResult
-
-type OptionResult struct {
- Name string
- Value interface{}
- Error error
-
- State OptionState
- Replacement string
-}
-
-type OptionState int
-
-const (
- OptionHandled = OptionState(iota)
- OptionDeprecated
- OptionUnexpected
-)
-
-type LinkTarget string
-
-func SetOptions(options *Options, opts interface{}) OptionResults {
- var results OptionResults
- switch opts := opts.(type) {
- case nil:
- case map[string]interface{}:
- // If the user's settings contains "allExperiments", set that first,
- // and then let them override individual settings independently.
- var enableExperiments bool
- for name, value := range opts {
- if b, ok := value.(bool); name == "allExperiments" && ok && b {
- enableExperiments = true
- options.EnableAllExperiments()
- }
- }
- seen := map[string]struct{}{}
- for name, value := range opts {
- results = append(results, options.set(name, value, seen))
- }
- // Finally, enable any experimental features that are specified in
- // maps, which allows users to individually toggle them on or off.
- if enableExperiments {
- options.enableAllExperimentMaps()
- }
- default:
- results = append(results, OptionResult{
- Value: opts,
- Error: errors.Errorf("Invalid options type %T", opts),
- })
- }
- return results
-}
-
-func (o *Options) ForClientCapabilities(caps protocol.ClientCapabilities) {
- // Check if the client supports snippets in completion items.
- if c := caps.TextDocument.Completion; c.CompletionItem.SnippetSupport {
- o.InsertTextFormat = protocol.SnippetTextFormat
- }
- // Check if the client supports configuration messages.
- o.ConfigurationSupported = caps.Workspace.Configuration
- o.DynamicConfigurationSupported = caps.Workspace.DidChangeConfiguration.DynamicRegistration
- o.DynamicRegistrationSemanticTokensSupported = caps.TextDocument.SemanticTokens.DynamicRegistration
- o.DynamicWatchedFilesSupported = caps.Workspace.DidChangeWatchedFiles.DynamicRegistration
-
- // Check which types of content format are supported by this client.
- if hover := caps.TextDocument.Hover; len(hover.ContentFormat) > 0 {
- o.PreferredContentFormat = hover.ContentFormat[0]
- }
- // Check if the client supports only line folding.
- fr := caps.TextDocument.FoldingRange
- o.LineFoldingOnly = fr.LineFoldingOnly
- // Check if the client supports hierarchical document symbols.
- o.HierarchicalDocumentSymbolSupport = caps.TextDocument.DocumentSymbol.HierarchicalDocumentSymbolSupport
- // Check if the client supports semantic tokens
- o.SemanticTypes = caps.TextDocument.SemanticTokens.TokenTypes
- o.SemanticMods = caps.TextDocument.SemanticTokens.TokenModifiers
- // we don't need Requests, as we support full functionality
- // we don't need Formats, as there is only one, for now
-
- // Check if the client supports diagnostic related information.
- o.RelatedInformationSupported = caps.TextDocument.PublishDiagnostics.RelatedInformation
- // Check if the client completion support incliudes tags (preferred) or deprecation
- if caps.TextDocument.Completion.CompletionItem.TagSupport.ValueSet != nil {
- o.CompletionTags = true
- } else if caps.TextDocument.Completion.CompletionItem.DeprecatedSupport {
- o.CompletionDeprecated = true
- }
-}
-
-func (o *Options) Clone() *Options {
- result := &Options{
- ClientOptions: o.ClientOptions,
- InternalOptions: o.InternalOptions,
- Hooks: Hooks{
- GoDiff: o.GoDiff,
- ComputeEdits: o.ComputeEdits,
- GofumptFormat: o.GofumptFormat,
- URLRegexp: o.URLRegexp,
- Govulncheck: o.Govulncheck,
- },
- ServerOptions: o.ServerOptions,
- UserOptions: o.UserOptions,
- }
- // Fully clone any slice or map fields. Only Hooks, ExperimentalOptions,
- // and UserOptions can be modified.
- copyStringMap := func(src map[string]bool) map[string]bool {
- dst := make(map[string]bool)
- for k, v := range src {
- dst[k] = v
- }
- return dst
- }
- result.Analyses = copyStringMap(o.Analyses)
- result.Codelenses = copyStringMap(o.Codelenses)
-
- copySlice := func(src []string) []string {
- dst := make([]string, len(src))
- copy(dst, src)
- return dst
- }
- result.SetEnvSlice(o.EnvSlice())
- result.BuildFlags = copySlice(o.BuildFlags)
- result.DirectoryFilters = copySlice(o.DirectoryFilters)
-
- copyAnalyzerMap := func(src map[string]*Analyzer) map[string]*Analyzer {
- dst := make(map[string]*Analyzer)
- for k, v := range src {
- dst[k] = v
- }
- return dst
- }
- result.DefaultAnalyzers = copyAnalyzerMap(o.DefaultAnalyzers)
- result.TypeErrorAnalyzers = copyAnalyzerMap(o.TypeErrorAnalyzers)
- result.ConvenienceAnalyzers = copyAnalyzerMap(o.ConvenienceAnalyzers)
- result.StaticcheckAnalyzers = copyAnalyzerMap(o.StaticcheckAnalyzers)
- return result
-}
-
-func (o *Options) AddStaticcheckAnalyzer(a *analysis.Analyzer, enabled bool, severity protocol.DiagnosticSeverity) {
- o.StaticcheckAnalyzers[a.Name] = &Analyzer{
- Analyzer: a,
- Enabled: enabled,
- Severity: severity,
- }
-}
-
-// EnableAllExperiments turns on all of the experimental "off-by-default"
-// features offered by gopls. Any experimental features specified in maps
-// should be enabled in enableAllExperimentMaps.
-func (o *Options) EnableAllExperiments() {
- o.SemanticTokens = true
- o.ExperimentalPostfixCompletions = true
- o.ExperimentalUseInvalidMetadata = true
- o.ExperimentalWatchedFileDelay = 50 * time.Millisecond
- o.SymbolMatcher = SymbolFastFuzzy
-}
-
-func (o *Options) enableAllExperimentMaps() {
- if _, ok := o.Codelenses[string(command.GCDetails)]; !ok {
- o.Codelenses[string(command.GCDetails)] = true
- }
- if _, ok := o.Analyses[unusedparams.Analyzer.Name]; !ok {
- o.Analyses[unusedparams.Analyzer.Name] = true
- }
-}
-
-func (o *Options) set(name string, value interface{}, seen map[string]struct{}) OptionResult {
- // Flatten the name in case we get options with a hierarchy.
- split := strings.Split(name, ".")
- name = split[len(split)-1]
-
- result := OptionResult{Name: name, Value: value}
- if _, ok := seen[name]; ok {
- result.errorf("duplicate configuration for %s", name)
- }
- seen[name] = struct{}{}
-
- switch name {
- case "env":
- menv, ok := value.(map[string]interface{})
- if !ok {
- result.errorf("invalid type %T, expect map", value)
- break
- }
- if o.Env == nil {
- o.Env = make(map[string]string)
- }
- for k, v := range menv {
- o.Env[k] = fmt.Sprint(v)
- }
-
- case "buildFlags":
- iflags, ok := value.([]interface{})
- if !ok {
- result.errorf("invalid type %T, expect list", value)
- break
- }
- flags := make([]string, 0, len(iflags))
- for _, flag := range iflags {
- flags = append(flags, fmt.Sprintf("%s", flag))
- }
- o.BuildFlags = flags
- case "directoryFilters":
- ifilters, ok := value.([]interface{})
- if !ok {
- result.errorf("invalid type %T, expect list", value)
- break
- }
- var filters []string
- for _, ifilter := range ifilters {
- filter := fmt.Sprint(ifilter)
- if filter == "" || (filter[0] != '+' && filter[0] != '-') {
- result.errorf("invalid filter %q, must start with + or -", filter)
- return result
- }
- filters = append(filters, strings.TrimRight(filepath.FromSlash(filter), "/"))
- }
- o.DirectoryFilters = filters
- case "memoryMode":
- if s, ok := result.asOneOf(
- string(ModeNormal),
- string(ModeDegradeClosed),
- ); ok {
- o.MemoryMode = MemoryMode(s)
- }
- case "completionDocumentation":
- result.setBool(&o.CompletionDocumentation)
- case "usePlaceholders":
- result.setBool(&o.UsePlaceholders)
- case "deepCompletion":
- result.setBool(&o.DeepCompletion)
- case "completeUnimported":
- result.setBool(&o.CompleteUnimported)
- case "completionBudget":
- result.setDuration(&o.CompletionBudget)
- case "matcher":
- if s, ok := result.asOneOf(
- string(Fuzzy),
- string(CaseSensitive),
- string(CaseInsensitive),
- ); ok {
- o.Matcher = Matcher(s)
- }
-
- case "symbolMatcher":
- if s, ok := result.asOneOf(
- string(SymbolFuzzy),
- string(SymbolFastFuzzy),
- string(SymbolCaseInsensitive),
- string(SymbolCaseSensitive),
- ); ok {
- o.SymbolMatcher = SymbolMatcher(s)
- }
-
- case "symbolStyle":
- if s, ok := result.asOneOf(
- string(FullyQualifiedSymbols),
- string(PackageQualifiedSymbols),
- string(DynamicSymbols),
- ); ok {
- o.SymbolStyle = SymbolStyle(s)
- }
-
- case "hoverKind":
- if s, ok := result.asOneOf(
- string(NoDocumentation),
- string(SingleLine),
- string(SynopsisDocumentation),
- string(FullDocumentation),
- string(Structured),
- ); ok {
- o.HoverKind = HoverKind(s)
- }
-
- case "linkTarget":
- result.setString(&o.LinkTarget)
-
- case "linksInHover":
- result.setBool(&o.LinksInHover)
-
- case "importShortcut":
- if s, ok := result.asOneOf(string(Both), string(Link), string(Definition)); ok {
- o.ImportShortcut = ImportShortcut(s)
- }
-
- case "analyses":
- result.setBoolMap(&o.Analyses)
-
- case "annotations":
- result.setAnnotationMap(&o.Annotations)
-
- case "codelenses", "codelens":
- var lensOverrides map[string]bool
- result.setBoolMap(&lensOverrides)
- if result.Error == nil {
- if o.Codelenses == nil {
- o.Codelenses = make(map[string]bool)
- }
- for lens, enabled := range lensOverrides {
- o.Codelenses[lens] = enabled
- }
- }
-
- // codelens is deprecated, but still works for now.
- // TODO(rstambler): Remove this for the gopls/v0.7.0 release.
- if name == "codelens" {
- result.State = OptionDeprecated
- result.Replacement = "codelenses"
- }
-
- case "staticcheck":
- result.setBool(&o.Staticcheck)
-
- case "local":
- result.setString(&o.Local)
-
- case "verboseOutput":
- result.setBool(&o.VerboseOutput)
-
- case "verboseWorkDoneProgress":
- result.setBool(&o.VerboseWorkDoneProgress)
-
- case "tempModfile":
- result.setBool(&o.TempModfile)
-
- case "gofumpt":
- result.setBool(&o.Gofumpt)
-
- case "semanticTokens":
- result.setBool(&o.SemanticTokens)
-
- case "expandWorkspaceToModule":
- result.setBool(&o.ExpandWorkspaceToModule)
-
- case "experimentalPostfixCompletions":
- result.setBool(&o.ExperimentalPostfixCompletions)
-
- case "experimentalWorkspaceModule":
- result.setBool(&o.ExperimentalWorkspaceModule)
-
- case "experimentalTemplateSupport": // remove after June 2022
- result.State = OptionDeprecated
-
- case "templateExtensions":
- if iexts, ok := value.([]interface{}); ok {
- ans := []string{}
- for _, x := range iexts {
- ans = append(ans, fmt.Sprint(x))
- }
- o.TemplateExtensions = ans
- break
- }
- if value == nil {
- o.TemplateExtensions = nil
- break
- }
- result.errorf(fmt.Sprintf("unexpected type %T not []string", value))
- case "experimentalDiagnosticsDelay", "diagnosticsDelay":
- if name == "experimentalDiagnosticsDelay" {
- result.State = OptionDeprecated
- result.Replacement = "diagnosticsDelay"
- }
- result.setDuration(&o.DiagnosticsDelay)
-
- case "experimentalWatchedFileDelay":
- result.setDuration(&o.ExperimentalWatchedFileDelay)
-
- case "experimentalPackageCacheKey":
- result.setBool(&o.ExperimentalPackageCacheKey)
-
- case "allowModfileModifications":
- result.setBool(&o.AllowModfileModifications)
-
- case "allowImplicitNetworkAccess":
- result.setBool(&o.AllowImplicitNetworkAccess)
-
- case "experimentalUseInvalidMetadata":
- result.setBool(&o.ExperimentalUseInvalidMetadata)
-
- case "allExperiments":
- // This setting should be handled before all of the other options are
- // processed, so do nothing here.
-
- // Replaced settings.
- case "experimentalDisabledAnalyses":
- result.State = OptionDeprecated
- result.Replacement = "analyses"
-
- case "disableDeepCompletion":
- result.State = OptionDeprecated
- result.Replacement = "deepCompletion"
-
- case "disableFuzzyMatching":
- result.State = OptionDeprecated
- result.Replacement = "fuzzyMatching"
-
- case "wantCompletionDocumentation":
- result.State = OptionDeprecated
- result.Replacement = "completionDocumentation"
-
- case "wantUnimportedCompletions":
- result.State = OptionDeprecated
- result.Replacement = "completeUnimported"
-
- case "fuzzyMatching":
- result.State = OptionDeprecated
- result.Replacement = "matcher"
-
- case "caseSensitiveCompletion":
- result.State = OptionDeprecated
- result.Replacement = "matcher"
-
- // Deprecated settings.
- case "wantSuggestedFixes":
- result.State = OptionDeprecated
-
- case "noIncrementalSync":
- result.State = OptionDeprecated
-
- case "watchFileChanges":
- result.State = OptionDeprecated
-
- case "go-diff":
- result.State = OptionDeprecated
-
- default:
- result.State = OptionUnexpected
- }
- return result
-}
-
-func (r *OptionResult) errorf(msg string, values ...interface{}) {
- prefix := fmt.Sprintf("parsing setting %q: ", r.Name)
- r.Error = errors.Errorf(prefix+msg, values...)
-}
-
-func (r *OptionResult) asBool() (bool, bool) {
- b, ok := r.Value.(bool)
- if !ok {
- r.errorf("invalid type %T, expect bool", r.Value)
- return false, false
- }
- return b, true
-}
-
-func (r *OptionResult) setBool(b *bool) {
- if v, ok := r.asBool(); ok {
- *b = v
- }
-}
-
-func (r *OptionResult) setDuration(d *time.Duration) {
- if v, ok := r.asString(); ok {
- parsed, err := time.ParseDuration(v)
- if err != nil {
- r.errorf("failed to parse duration %q: %v", v, err)
- return
- }
- *d = parsed
- }
-}
-
-func (r *OptionResult) setBoolMap(bm *map[string]bool) {
- m := r.asBoolMap()
- *bm = m
-}
-
-func (r *OptionResult) setAnnotationMap(bm *map[Annotation]bool) {
- all := r.asBoolMap()
- if all == nil {
- return
- }
- // Default to everything enabled by default.
- m := make(map[Annotation]bool)
- for k, enabled := range all {
- a, err := asOneOf(
- k,
- string(Nil),
- string(Escape),
- string(Inline),
- string(Bounds),
- )
- if err != nil {
- // In case of an error, process any legacy values.
- switch k {
- case "noEscape":
- m[Escape] = false
- r.errorf(`"noEscape" is deprecated, set "Escape: false" instead`)
- case "noNilcheck":
- m[Nil] = false
- r.errorf(`"noNilcheck" is deprecated, set "Nil: false" instead`)
- case "noInline":
- m[Inline] = false
- r.errorf(`"noInline" is deprecated, set "Inline: false" instead`)
- case "noBounds":
- m[Bounds] = false
- r.errorf(`"noBounds" is deprecated, set "Bounds: false" instead`)
- default:
- r.errorf(err.Error())
- }
- continue
- }
- m[Annotation(a)] = enabled
- }
- *bm = m
-}
-
-func (r *OptionResult) asBoolMap() map[string]bool {
- all, ok := r.Value.(map[string]interface{})
- if !ok {
- r.errorf("invalid type %T for map[string]bool option", r.Value)
- return nil
- }
- m := make(map[string]bool)
- for a, enabled := range all {
- if enabled, ok := enabled.(bool); ok {
- m[a] = enabled
- } else {
- r.errorf("invalid type %T for map key %q", enabled, a)
- return m
- }
- }
- return m
-}
-
-func (r *OptionResult) asString() (string, bool) {
- b, ok := r.Value.(string)
- if !ok {
- r.errorf("invalid type %T, expect string", r.Value)
- return "", false
- }
- return b, true
-}
-
-func (r *OptionResult) asOneOf(options ...string) (string, bool) {
- s, ok := r.asString()
- if !ok {
- return "", false
- }
- s, err := asOneOf(s, options...)
- if err != nil {
- r.errorf(err.Error())
- }
- return s, err == nil
-}
-
-func asOneOf(str string, options ...string) (string, error) {
- lower := strings.ToLower(str)
- for _, opt := range options {
- if strings.ToLower(opt) == lower {
- return opt, nil
- }
- }
- return "", fmt.Errorf("invalid option %q for enum", str)
-}
-
-func (r *OptionResult) setString(s *string) {
- if v, ok := r.asString(); ok {
- *s = v
- }
-}
-
-// EnabledAnalyzers returns all of the analyzers enabled for the given
-// snapshot.
-func EnabledAnalyzers(snapshot Snapshot) (analyzers []*Analyzer) {
- for _, a := range snapshot.View().Options().DefaultAnalyzers {
- if a.IsEnabled(snapshot.View()) {
- analyzers = append(analyzers, a)
- }
- }
- for _, a := range snapshot.View().Options().TypeErrorAnalyzers {
- if a.IsEnabled(snapshot.View()) {
- analyzers = append(analyzers, a)
- }
- }
- for _, a := range snapshot.View().Options().ConvenienceAnalyzers {
- if a.IsEnabled(snapshot.View()) {
- analyzers = append(analyzers, a)
- }
- }
- for _, a := range snapshot.View().Options().StaticcheckAnalyzers {
- if a.IsEnabled(snapshot.View()) {
- analyzers = append(analyzers, a)
- }
- }
- return analyzers
-}
-
-func typeErrorAnalyzers() map[string]*Analyzer {
- return map[string]*Analyzer{
- fillreturns.Analyzer.Name: {
- Analyzer: fillreturns.Analyzer,
- ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix},
- Enabled: true,
- },
- nonewvars.Analyzer.Name: {
- Analyzer: nonewvars.Analyzer,
- Enabled: true,
- },
- noresultvalues.Analyzer.Name: {
- Analyzer: noresultvalues.Analyzer,
- Enabled: true,
- },
- undeclaredname.Analyzer.Name: {
- Analyzer: undeclaredname.Analyzer,
- Fix: UndeclaredName,
- Enabled: true,
- },
- }
-}
-
-func convenienceAnalyzers() map[string]*Analyzer {
- return map[string]*Analyzer{
- fillstruct.Analyzer.Name: {
- Analyzer: fillstruct.Analyzer,
- Fix: FillStruct,
- Enabled: true,
- ActionKind: []protocol.CodeActionKind{protocol.RefactorRewrite},
- },
- stubmethods.Analyzer.Name: {
- Analyzer: stubmethods.Analyzer,
- ActionKind: []protocol.CodeActionKind{protocol.RefactorRewrite},
- Fix: StubMethods,
- Enabled: true,
- },
- }
-}
-
-func defaultAnalyzers() map[string]*Analyzer {
- return map[string]*Analyzer{
- // The traditional vet suite:
- asmdecl.Analyzer.Name: {Analyzer: asmdecl.Analyzer, Enabled: true},
- assign.Analyzer.Name: {Analyzer: assign.Analyzer, Enabled: true},
- atomic.Analyzer.Name: {Analyzer: atomic.Analyzer, Enabled: true},
- bools.Analyzer.Name: {Analyzer: bools.Analyzer, Enabled: true},
- buildtag.Analyzer.Name: {Analyzer: buildtag.Analyzer, Enabled: true},
- cgocall.Analyzer.Name: {Analyzer: cgocall.Analyzer, Enabled: true},
- composite.Analyzer.Name: {Analyzer: composite.Analyzer, Enabled: true},
- copylock.Analyzer.Name: {Analyzer: copylock.Analyzer, Enabled: true},
- errorsas.Analyzer.Name: {Analyzer: errorsas.Analyzer, Enabled: true},
- httpresponse.Analyzer.Name: {Analyzer: httpresponse.Analyzer, Enabled: true},
- ifaceassert.Analyzer.Name: {Analyzer: ifaceassert.Analyzer, Enabled: true},
- loopclosure.Analyzer.Name: {Analyzer: loopclosure.Analyzer, Enabled: true},
- lostcancel.Analyzer.Name: {Analyzer: lostcancel.Analyzer, Enabled: true},
- nilfunc.Analyzer.Name: {Analyzer: nilfunc.Analyzer, Enabled: true},
- printf.Analyzer.Name: {Analyzer: printf.Analyzer, Enabled: true},
- shift.Analyzer.Name: {Analyzer: shift.Analyzer, Enabled: true},
- stdmethods.Analyzer.Name: {Analyzer: stdmethods.Analyzer, Enabled: true},
- stringintconv.Analyzer.Name: {Analyzer: stringintconv.Analyzer, Enabled: true},
- structtag.Analyzer.Name: {Analyzer: structtag.Analyzer, Enabled: true},
- tests.Analyzer.Name: {Analyzer: tests.Analyzer, Enabled: true},
- unmarshal.Analyzer.Name: {Analyzer: unmarshal.Analyzer, Enabled: true},
- unreachable.Analyzer.Name: {Analyzer: unreachable.Analyzer, Enabled: true},
- unsafeptr.Analyzer.Name: {Analyzer: unsafeptr.Analyzer, Enabled: true},
- unusedresult.Analyzer.Name: {Analyzer: unusedresult.Analyzer, Enabled: true},
-
- // Non-vet analyzers:
- atomicalign.Analyzer.Name: {Analyzer: atomicalign.Analyzer, Enabled: true},
- deepequalerrors.Analyzer.Name: {Analyzer: deepequalerrors.Analyzer, Enabled: true},
- fieldalignment.Analyzer.Name: {Analyzer: fieldalignment.Analyzer, Enabled: false},
- nilness.Analyzer.Name: {Analyzer: nilness.Analyzer, Enabled: false},
- shadow.Analyzer.Name: {Analyzer: shadow.Analyzer, Enabled: false},
- sortslice.Analyzer.Name: {Analyzer: sortslice.Analyzer, Enabled: true},
- testinggoroutine.Analyzer.Name: {Analyzer: testinggoroutine.Analyzer, Enabled: true},
- unusedparams.Analyzer.Name: {Analyzer: unusedparams.Analyzer, Enabled: false},
- unusedwrite.Analyzer.Name: {Analyzer: unusedwrite.Analyzer, Enabled: false},
- useany.Analyzer.Name: {Analyzer: useany.Analyzer, Enabled: false},
- infertypeargs.Analyzer.Name: {Analyzer: infertypeargs.Analyzer, Enabled: true},
-
- // gofmt -s suite:
- simplifycompositelit.Analyzer.Name: {
- Analyzer: simplifycompositelit.Analyzer,
- Enabled: true,
- ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix},
- },
- simplifyrange.Analyzer.Name: {
- Analyzer: simplifyrange.Analyzer,
- Enabled: true,
- ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix},
- },
- simplifyslice.Analyzer.Name: {
- Analyzer: simplifyslice.Analyzer,
- Enabled: true,
- ActionKind: []protocol.CodeActionKind{protocol.SourceFixAll, protocol.QuickFix},
- },
- }
-}
-
-func urlRegexp() *regexp.Regexp {
- // Ensure links are matched as full words, not anywhere.
- re := regexp.MustCompile(`\b(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?\b`)
- re.Longest()
- return re
-}
-
-type APIJSON struct {
- Options map[string][]*OptionJSON
- Commands []*CommandJSON
- Lenses []*LensJSON
- Analyzers []*AnalyzerJSON
-}
-
-type OptionJSON struct {
- Name string
- Type string
- Doc string
- EnumKeys EnumKeys
- EnumValues []EnumValue
- Default string
- Status string
- Hierarchy string
-}
-
-func (o *OptionJSON) String() string {
- return o.Name
-}
-
-func (o *OptionJSON) Write(w io.Writer) {
- fmt.Fprintf(w, "**%v** *%v*\n\n", o.Name, o.Type)
- writeStatus(w, o.Status)
- enumValues := collectEnums(o)
- fmt.Fprintf(w, "%v%v\nDefault: `%v`.\n\n", o.Doc, enumValues, o.Default)
-}
-
-func writeStatus(section io.Writer, status string) {
- switch status {
- case "":
- case "advanced":
- fmt.Fprint(section, "**This is an advanced setting and should not be configured by most `gopls` users.**\n\n")
- case "debug":
- fmt.Fprint(section, "**This setting is for debugging purposes only.**\n\n")
- case "experimental":
- fmt.Fprint(section, "**This setting is experimental and may be deleted.**\n\n")
- default:
- fmt.Fprintf(section, "**Status: %s.**\n\n", status)
- }
-}
-
-var parBreakRE = regexp.MustCompile("\n{2,}")
-
-func collectEnums(opt *OptionJSON) string {
- var b strings.Builder
- write := func(name, doc string, index, len int) {
- if doc != "" {
- unbroken := parBreakRE.ReplaceAllString(doc, "\\\n")
- fmt.Fprintf(&b, "* %s\n", strings.TrimSpace(unbroken))
- } else {
- fmt.Fprintf(&b, "* `%s`\n", name)
- }
- }
- if len(opt.EnumValues) > 0 && opt.Type == "enum" {
- b.WriteString("\nMust be one of:\n\n")
- for i, val := range opt.EnumValues {
- write(val.Value, val.Doc, i, len(opt.EnumValues))
- }
- } else if len(opt.EnumKeys.Keys) > 0 && shouldShowEnumKeysInSettings(opt.Name) {
- b.WriteString("\nCan contain any of:\n\n")
- for i, val := range opt.EnumKeys.Keys {
- write(val.Name, val.Doc, i, len(opt.EnumKeys.Keys))
- }
- }
- return b.String()
-}
-
-func shouldShowEnumKeysInSettings(name string) bool {
- // Both of these fields have too many possible options to print.
- return !hardcodedEnumKeys(name)
-}
-
-func hardcodedEnumKeys(name string) bool {
- return name == "analyses" || name == "codelenses"
-}
-
-type EnumKeys struct {
- ValueType string
- Keys []EnumKey
-}
-
-type EnumKey struct {
- Name string
- Doc string
- Default string
-}
-
-type EnumValue struct {
- Value string
- Doc string
-}
-
-type CommandJSON struct {
- Command string
- Title string
- Doc string
- ArgDoc string
- ResultDoc string
-}
-
-func (c *CommandJSON) String() string {
- return c.Command
-}
-
-func (c *CommandJSON) Write(w io.Writer) {
- fmt.Fprintf(w, "### **%v**\nIdentifier: `%v`\n\n%v\n\n", c.Title, c.Command, c.Doc)
- if c.ArgDoc != "" {
- fmt.Fprintf(w, "Args:\n\n```\n%s\n```\n\n", c.ArgDoc)
- }
- if c.ResultDoc != "" {
- fmt.Fprintf(w, "Result:\n\n```\n%s\n```\n\n", c.ResultDoc)
- }
-}
-
-type LensJSON struct {
- Lens string
- Title string
- Doc string
-}
-
-func (l *LensJSON) String() string {
- return l.Title
-}
-
-func (l *LensJSON) Write(w io.Writer) {
- fmt.Fprintf(w, "%s (%s): %s", l.Title, l.Lens, l.Doc)
-}
-
-type AnalyzerJSON struct {
- Name string
- Doc string
- Default bool
-}
-
-func (a *AnalyzerJSON) String() string {
- return a.Name
-}
-
-func (a *AnalyzerJSON) Write(w io.Writer) {
- fmt.Fprintf(w, "%s (%s): %v", a.Name, a.Doc, a.Default)
-}
diff --git a/internal/lsp/source/options_test.go b/internal/lsp/source/options_test.go
deleted file mode 100644
index f8260c1dd..000000000
--- a/internal/lsp/source/options_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "testing"
- "time"
-)
-
-func TestSetOption(t *testing.T) {
- tests := []struct {
- name string
- value interface{}
- wantError bool
- check func(Options) bool
- }{
- {
- name: "symbolStyle",
- value: "Dynamic",
- check: func(o Options) bool { return o.SymbolStyle == DynamicSymbols },
- },
- {
- name: "symbolStyle",
- value: "",
- wantError: true,
- check: func(o Options) bool { return o.SymbolStyle == "" },
- },
- {
- name: "symbolStyle",
- value: false,
- wantError: true,
- check: func(o Options) bool { return o.SymbolStyle == "" },
- },
- {
- name: "symbolMatcher",
- value: "caseInsensitive",
- check: func(o Options) bool { return o.SymbolMatcher == SymbolCaseInsensitive },
- },
- {
- name: "completionBudget",
- value: "2s",
- check: func(o Options) bool { return o.CompletionBudget == 2*time.Second },
- },
- {
- name: "staticcheck",
- value: true,
- check: func(o Options) bool { return o.Staticcheck == true },
- },
- {
- name: "codelenses",
- value: map[string]interface{}{"generate": true},
- check: func(o Options) bool { return o.Codelenses["generate"] },
- },
- {
- name: "allExperiments",
- value: true,
- check: func(o Options) bool {
- return true // just confirm that we handle this setting
- },
- },
- {
- name: "hoverKind",
- value: "FullDocumentation",
- check: func(o Options) bool {
- return o.HoverKind == FullDocumentation
- },
- },
- {
- name: "hoverKind",
- value: "NoDocumentation",
- check: func(o Options) bool {
- return o.HoverKind == NoDocumentation
- },
- },
- {
- name: "hoverKind",
- value: "SingleLine",
- check: func(o Options) bool {
- return o.HoverKind == SingleLine
- },
- },
- {
- name: "hoverKind",
- value: "Structured",
- check: func(o Options) bool {
- return o.HoverKind == Structured
- },
- },
- {
- name: "ui.documentation.hoverKind",
- value: "Structured",
- check: func(o Options) bool {
- return o.HoverKind == Structured
- },
- },
- {
- name: "matcher",
- value: "Fuzzy",
- check: func(o Options) bool {
- return o.Matcher == Fuzzy
- },
- },
- {
- name: "matcher",
- value: "CaseSensitive",
- check: func(o Options) bool {
- return o.Matcher == CaseSensitive
- },
- },
- {
- name: "matcher",
- value: "CaseInsensitive",
- check: func(o Options) bool {
- return o.Matcher == CaseInsensitive
- },
- },
- {
- name: "env",
- value: map[string]interface{}{"testing": "true"},
- check: func(o Options) bool {
- v, found := o.Env["testing"]
- return found && v == "true"
- },
- },
- {
- name: "env",
- value: []string{"invalid", "input"},
- wantError: true,
- check: func(o Options) bool {
- return o.Env == nil
- },
- },
- {
- name: "directoryFilters",
- value: []interface{}{"-node_modules", "+project_a"},
- check: func(o Options) bool {
- return len(o.DirectoryFilters) == 2
- },
- },
- {
- name: "directoryFilters",
- value: []interface{}{"invalid"},
- wantError: true,
- check: func(o Options) bool {
- return len(o.DirectoryFilters) == 0
- },
- },
- {
- name: "directoryFilters",
- value: []string{"-invalid", "+type"},
- wantError: true,
- check: func(o Options) bool {
- return len(o.DirectoryFilters) == 0
- },
- },
- {
- name: "annotations",
- value: map[string]interface{}{
- "Nil": false,
- "noBounds": true,
- },
- wantError: true,
- check: func(o Options) bool {
- return !o.Annotations[Nil] && !o.Annotations[Bounds]
- },
- },
- }
-
- for _, test := range tests {
- var opts Options
- result := opts.set(test.name, test.value, map[string]struct{}{})
- if (result.Error != nil) != test.wantError {
- t.Fatalf("Options.set(%q, %v): result.Error = %v, want error: %t", test.name, test.value, result.Error, test.wantError)
- }
- // TODO: this could be made much better using cmp.Diff, if that becomes
- // available in this module.
- if !test.check(opts) {
- t.Errorf("Options.set(%q, %v): unexpected result %+v", test.name, test.value, opts)
- }
- }
-}
diff --git a/internal/lsp/source/references.go b/internal/lsp/source/references.go
deleted file mode 100644
index 5d3eac337..000000000
--- a/internal/lsp/source/references.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "sort"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// ReferenceInfo holds information about reference to an identifier in Go source.
-type ReferenceInfo struct {
- Name string
- MappedRange
- ident *ast.Ident
- obj types.Object
- pkg Package
- isDeclaration bool
-}
-
-// References returns a list of references for a given identifier within the packages
-// containing i.File. Declarations appear first in the result.
-func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position, includeDeclaration bool) ([]*ReferenceInfo, error) {
- ctx, done := event.Start(ctx, "source.References")
- defer done()
-
- qualifiedObjs, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp)
- // Don't return references for builtin types.
- if errors.Is(err, errBuiltin) {
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
-
- refs, err := references(ctx, s, qualifiedObjs, includeDeclaration, true, false)
- if err != nil {
- return nil, err
- }
-
- toSort := refs
- if includeDeclaration {
- toSort = refs[1:]
- }
- sort.Slice(toSort, func(i, j int) bool {
- x := CompareURI(toSort[i].URI(), toSort[j].URI())
- if x == 0 {
- return toSort[i].ident.Pos() < toSort[j].ident.Pos()
- }
- return x < 0
- })
- return refs, nil
-}
-
-// references is a helper function to avoid recomputing qualifiedObjsAtProtocolPos.
-func references(ctx context.Context, snapshot Snapshot, qos []qualifiedObject, includeDeclaration, includeInterfaceRefs, includeEmbeddedRefs bool) ([]*ReferenceInfo, error) {
- var (
- references []*ReferenceInfo
- seen = make(map[token.Pos]bool)
- )
-
- pos := qos[0].obj.Pos()
- if pos == token.NoPos {
- return nil, fmt.Errorf("no position for %s", qos[0].obj)
- }
- filename := snapshot.FileSet().Position(pos).Filename
- pgf, err := qos[0].pkg.File(span.URIFromPath(filename))
- if err != nil {
- return nil, err
- }
- declIdent, err := findIdentifier(ctx, snapshot, qos[0].pkg, pgf, qos[0].obj.Pos())
- if err != nil {
- return nil, err
- }
- // Make sure declaration is the first item in the response.
- if includeDeclaration {
- references = append(references, &ReferenceInfo{
- MappedRange: declIdent.MappedRange,
- Name: qos[0].obj.Name(),
- ident: declIdent.ident,
- obj: qos[0].obj,
- pkg: declIdent.pkg,
- isDeclaration: true,
- })
- }
-
- for _, qo := range qos {
- var searchPkgs []Package
-
- // Only search dependents if the object is exported.
- if qo.obj.Exported() {
- reverseDeps, err := snapshot.GetReverseDependencies(ctx, qo.pkg.ID())
- if err != nil {
- return nil, err
- }
- searchPkgs = append(searchPkgs, reverseDeps...)
- }
- // Add the package in which the identifier is declared.
- searchPkgs = append(searchPkgs, qo.pkg)
- for _, pkg := range searchPkgs {
- for ident, obj := range pkg.GetTypesInfo().Uses {
- // For instantiated objects (as in methods or fields on instantiated
- // types), we may not have pointer-identical objects but still want to
- // consider them references.
- if !equalOrigin(obj, qo.obj) {
- // If ident is not a use of qo.obj, skip it, with one exception:
- // uses of an embedded field can be considered references of the
- // embedded type name
- if !includeEmbeddedRefs {
- continue
- }
- v, ok := obj.(*types.Var)
- if !ok || !v.Embedded() {
- continue
- }
- named, ok := v.Type().(*types.Named)
- if !ok || named.Obj() != qo.obj {
- continue
- }
- }
- if seen[ident.Pos()] {
- continue
- }
- seen[ident.Pos()] = true
- rng, err := posToMappedRange(snapshot, pkg, ident.Pos(), ident.End())
- if err != nil {
- return nil, err
- }
- references = append(references, &ReferenceInfo{
- Name: ident.Name,
- ident: ident,
- pkg: pkg,
- obj: obj,
- MappedRange: rng,
- })
- }
- }
- }
-
- // When searching on type name, don't include interface references -- they
- // would be things like all references to Stringer for any type that
- // happened to have a String method.
- _, isType := declIdent.Declaration.obj.(*types.TypeName)
- if includeInterfaceRefs && !isType {
- declRange, err := declIdent.Range()
- if err != nil {
- return nil, err
- }
- fh, err := snapshot.GetFile(ctx, declIdent.URI())
- if err != nil {
- return nil, err
- }
- interfaceRefs, err := interfaceReferences(ctx, snapshot, fh, declRange.Start)
- if err != nil {
- return nil, err
- }
- references = append(references, interfaceRefs...)
- }
-
- return references, nil
-}
-
-// equalOrigin reports whether obj1 and obj2 have equivalent origin object.
-// This may be the case even if obj1 != obj2, if one or both of them is
-// instantiated.
-func equalOrigin(obj1, obj2 types.Object) bool {
- return obj1.Pkg() == obj2.Pkg() && obj1.Pos() == obj2.Pos() && obj1.Name() == obj2.Name()
-}
-
-// interfaceReferences returns the references to the interfaces implemented by
-// the type or method at the given position.
-func interfaceReferences(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position) ([]*ReferenceInfo, error) {
- implementations, err := implementations(ctx, s, f, pp)
- if err != nil {
- if errors.Is(err, ErrNotAType) {
- return nil, nil
- }
- return nil, err
- }
-
- var refs []*ReferenceInfo
- for _, impl := range implementations {
- implRefs, err := references(ctx, s, []qualifiedObject{impl}, false, false, false)
- if err != nil {
- return nil, err
- }
- refs = append(refs, implRefs...)
- }
- return refs, nil
-}
diff --git a/internal/lsp/source/rename.go b/internal/lsp/source/rename.go
deleted file mode 100644
index 2ad5d265f..000000000
--- a/internal/lsp/source/rename.go
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "context"
- "go/ast"
- "go/format"
- "go/token"
- "go/types"
- "regexp"
- "strings"
-
- "golang.org/x/tools/go/types/typeutil"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/refactor/satisfy"
- errors "golang.org/x/xerrors"
-)
-
-type renamer struct {
- ctx context.Context
- fset *token.FileSet
- refs []*ReferenceInfo
- objsToUpdate map[types.Object]bool
- hadConflicts bool
- errors string
- from, to string
- satisfyConstraints map[satisfy.Constraint]bool
- packages map[*types.Package]Package // may include additional packages that are a rdep of pkg
- msets typeutil.MethodSetCache
- changeMethods bool
-}
-
-type PrepareItem struct {
- Range protocol.Range
- Text string
-}
-
-// PrepareRename searches for a valid renaming at position pp.
-//
-// The returned usererr is intended to be displayed to the user to explain why
-// the prepare fails. Probably we could eliminate the redundancy in returning
-// two errors, but for now this is done defensively.
-func PrepareRename(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) (_ *PrepareItem, usererr, err error) {
- ctx, done := event.Start(ctx, "source.PrepareRename")
- defer done()
-
- qos, err := qualifiedObjsAtProtocolPos(ctx, snapshot, f.URI(), pp)
- if err != nil {
- return nil, nil, err
- }
- node, obj, pkg := qos[0].node, qos[0].obj, qos[0].sourcePkg
- if err := checkRenamable(obj); err != nil {
- return nil, err, err
- }
- mr, err := posToMappedRange(snapshot, pkg, node.Pos(), node.End())
- if err != nil {
- return nil, nil, err
- }
- rng, err := mr.Range()
- if err != nil {
- return nil, nil, err
- }
- if _, isImport := node.(*ast.ImportSpec); isImport {
- // We're not really renaming the import path.
- rng.End = rng.Start
- }
- return &PrepareItem{
- Range: rng,
- Text: obj.Name(),
- }, nil, nil
-}
-
-// checkRenamable verifies if an obj may be renamed.
-func checkRenamable(obj types.Object) error {
- if v, ok := obj.(*types.Var); ok && v.Embedded() {
- return errors.New("can't rename embedded fields: rename the type directly or name the field")
- }
- if obj.Name() == "_" {
- return errors.New("can't rename \"_\"")
- }
- return nil
-}
-
-// Rename returns a map of TextEdits for each file modified when renaming a
-// given identifier within a package.
-func Rename(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position, newName string) (map[span.URI][]protocol.TextEdit, error) {
- ctx, done := event.Start(ctx, "source.Rename")
- defer done()
-
- qos, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp)
- if err != nil {
- return nil, err
- }
-
- obj, pkg := qos[0].obj, qos[0].pkg
-
- if err := checkRenamable(obj); err != nil {
- return nil, err
- }
- if obj.Name() == newName {
- return nil, errors.Errorf("old and new names are the same: %s", newName)
- }
- if !isValidIdentifier(newName) {
- return nil, errors.Errorf("invalid identifier to rename: %q", newName)
- }
- if pkg == nil || pkg.IsIllTyped() {
- return nil, errors.Errorf("package for %s is ill typed", f.URI())
- }
- refs, err := references(ctx, s, qos, true, false, true)
- if err != nil {
- return nil, err
- }
- r := renamer{
- ctx: ctx,
- fset: s.FileSet(),
- refs: refs,
- objsToUpdate: make(map[types.Object]bool),
- from: obj.Name(),
- to: newName,
- packages: make(map[*types.Package]Package),
- }
-
- // A renaming initiated at an interface method indicates the
- // intention to rename abstract and concrete methods as needed
- // to preserve assignability.
- for _, ref := range refs {
- if obj, ok := ref.obj.(*types.Func); ok {
- recv := obj.Type().(*types.Signature).Recv()
- if recv != nil && IsInterface(recv.Type().Underlying()) {
- r.changeMethods = true
- break
- }
- }
- }
- for _, from := range refs {
- r.packages[from.pkg.GetTypes()] = from.pkg
- }
-
- // Check that the renaming of the identifier is ok.
- for _, ref := range refs {
- r.check(ref.obj)
- if r.hadConflicts { // one error is enough.
- break
- }
- }
- if r.hadConflicts {
- return nil, errors.Errorf(r.errors)
- }
-
- changes, err := r.update()
- if err != nil {
- return nil, err
- }
- result := make(map[span.URI][]protocol.TextEdit)
- for uri, edits := range changes {
- // These edits should really be associated with FileHandles for maximal correctness.
- // For now, this is good enough.
- fh, err := s.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- data, err := fh.Read()
- if err != nil {
- return nil, err
- }
- converter := span.NewContentConverter(uri.Filename(), data)
- m := &protocol.ColumnMapper{
- URI: uri,
- Converter: converter,
- Content: data,
- }
- // Sort the edits first.
- diff.SortTextEdits(edits)
- protocolEdits, err := ToProtocolEdits(m, edits)
- if err != nil {
- return nil, err
- }
- result[uri] = protocolEdits
- }
- return result, nil
-}
-
-// Rename all references to the identifier.
-func (r *renamer) update() (map[span.URI][]diff.TextEdit, error) {
- result := make(map[span.URI][]diff.TextEdit)
- seen := make(map[span.Span]bool)
-
- docRegexp, err := regexp.Compile(`\b` + r.from + `\b`)
- if err != nil {
- return nil, err
- }
- for _, ref := range r.refs {
- refSpan, err := ref.spanRange.Span()
- if err != nil {
- return nil, err
- }
- if seen[refSpan] {
- continue
- }
- seen[refSpan] = true
-
- // Renaming a types.PkgName may result in the addition or removal of an identifier,
- // so we deal with this separately.
- if pkgName, ok := ref.obj.(*types.PkgName); ok && ref.isDeclaration {
- edit, err := r.updatePkgName(pkgName)
- if err != nil {
- return nil, err
- }
- result[refSpan.URI()] = append(result[refSpan.URI()], *edit)
- continue
- }
-
- // Replace the identifier with r.to.
- edit := diff.TextEdit{
- Span: refSpan,
- NewText: r.to,
- }
-
- result[refSpan.URI()] = append(result[refSpan.URI()], edit)
-
- if !ref.isDeclaration || ref.ident == nil { // uses do not have doc comments to update.
- continue
- }
-
- doc := r.docComment(ref.pkg, ref.ident)
- if doc == nil {
- continue
- }
-
- // Perform the rename in doc comments declared in the original package.
- // go/parser strips out \r\n returns from the comment text, so go
- // line-by-line through the comment text to get the correct positions.
- for _, comment := range doc.List {
- if isDirective(comment.Text) {
- continue
- }
- lines := strings.Split(comment.Text, "\n")
- tok := r.fset.File(comment.Pos())
- commentLine := tok.Position(comment.Pos()).Line
- for i, line := range lines {
- lineStart := comment.Pos()
- if i > 0 {
- lineStart = tok.LineStart(commentLine + i)
- }
- for _, locs := range docRegexp.FindAllIndex([]byte(line), -1) {
- rng := span.NewRange(r.fset, lineStart+token.Pos(locs[0]), lineStart+token.Pos(locs[1]))
- spn, err := rng.Span()
- if err != nil {
- return nil, err
- }
- result[spn.URI()] = append(result[spn.URI()], diff.TextEdit{
- Span: spn,
- NewText: r.to,
- })
- }
- }
- }
- }
-
- return result, nil
-}
-
-// docComment returns the doc for an identifier.
-func (r *renamer) docComment(pkg Package, id *ast.Ident) *ast.CommentGroup {
- _, nodes, _ := pathEnclosingInterval(r.fset, pkg, id.Pos(), id.End())
- for _, node := range nodes {
- switch decl := node.(type) {
- case *ast.FuncDecl:
- return decl.Doc
- case *ast.Field:
- return decl.Doc
- case *ast.GenDecl:
- return decl.Doc
- // For {Type,Value}Spec, if the doc on the spec is absent,
- // search for the enclosing GenDecl
- case *ast.TypeSpec:
- if decl.Doc != nil {
- return decl.Doc
- }
- case *ast.ValueSpec:
- if decl.Doc != nil {
- return decl.Doc
- }
- case *ast.Ident:
- case *ast.AssignStmt:
- // *ast.AssignStmt doesn't have an associated comment group.
- // So, we try to find a comment just before the identifier.
-
- // Try to find a comment group only for short variable declarations (:=).
- if decl.Tok != token.DEFINE {
- return nil
- }
-
- var file *ast.File
- for _, f := range pkg.GetSyntax() {
- if f.Pos() <= id.Pos() && id.Pos() <= f.End() {
- file = f
- break
- }
- }
- if file == nil {
- return nil
- }
-
- identLine := r.fset.Position(id.Pos()).Line
- for _, comment := range file.Comments {
- if comment.Pos() > id.Pos() {
- // Comment is after the identifier.
- continue
- }
-
- lastCommentLine := r.fset.Position(comment.End()).Line
- if lastCommentLine+1 == identLine {
- return comment
- }
- }
- default:
- return nil
- }
- }
- return nil
-}
-
-// updatePkgName returns the updates to rename a pkgName in the import spec
-func (r *renamer) updatePkgName(pkgName *types.PkgName) (*diff.TextEdit, error) {
- // Modify ImportSpec syntax to add or remove the Name as needed.
- pkg := r.packages[pkgName.Pkg()]
- _, path, _ := pathEnclosingInterval(r.fset, pkg, pkgName.Pos(), pkgName.Pos())
- if len(path) < 2 {
- return nil, errors.Errorf("no path enclosing interval for %s", pkgName.Name())
- }
- spec, ok := path[1].(*ast.ImportSpec)
- if !ok {
- return nil, errors.Errorf("failed to update PkgName for %s", pkgName.Name())
- }
-
- var astIdent *ast.Ident // will be nil if ident is removed
- if pkgName.Imported().Name() != r.to {
- // ImportSpec.Name needed
- astIdent = &ast.Ident{NamePos: spec.Path.Pos(), Name: r.to}
- }
-
- // Make a copy of the ident that just has the name and path.
- updated := &ast.ImportSpec{
- Name: astIdent,
- Path: spec.Path,
- EndPos: spec.EndPos,
- }
-
- rng := span.NewRange(r.fset, spec.Pos(), spec.End())
- spn, err := rng.Span()
- if err != nil {
- return nil, err
- }
-
- var buf bytes.Buffer
- format.Node(&buf, r.fset, updated)
- newText := buf.String()
-
- return &diff.TextEdit{
- Span: spn,
- NewText: newText,
- }, nil
-}
diff --git a/internal/lsp/source/rename_check.go b/internal/lsp/source/rename_check.go
deleted file mode 100644
index 3aafc391e..000000000
--- a/internal/lsp/source/rename_check.go
+++ /dev/null
@@ -1,936 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-// Taken from golang.org/x/tools/refactor/rename.
-
-package source
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "reflect"
- "strconv"
- "strings"
- "unicode"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/refactor/satisfy"
-)
-
-// errorf reports an error (e.g. conflict) and prevents file modification.
-func (r *renamer) errorf(pos token.Pos, format string, args ...interface{}) {
- r.hadConflicts = true
- r.errors += fmt.Sprintf(format, args...)
-}
-
-// check performs safety checks of the renaming of the 'from' object to r.to.
-func (r *renamer) check(from types.Object) {
- if r.objsToUpdate[from] {
- return
- }
- r.objsToUpdate[from] = true
-
- // NB: order of conditions is important.
- if from_, ok := from.(*types.PkgName); ok {
- r.checkInFileBlock(from_)
- } else if from_, ok := from.(*types.Label); ok {
- r.checkLabel(from_)
- } else if isPackageLevel(from) {
- r.checkInPackageBlock(from)
- } else if v, ok := from.(*types.Var); ok && v.IsField() {
- r.checkStructField(v)
- } else if f, ok := from.(*types.Func); ok && recv(f) != nil {
- r.checkMethod(f)
- } else if isLocal(from) {
- r.checkInLocalScope(from)
- } else {
- r.errorf(from.Pos(), "unexpected %s object %q (please report a bug)\n",
- objectKind(from), from)
- }
-}
-
-// checkInFileBlock performs safety checks for renames of objects in the file block,
-// i.e. imported package names.
-func (r *renamer) checkInFileBlock(from *types.PkgName) {
- // Check import name is not "init".
- if r.to == "init" {
- r.errorf(from.Pos(), "%q is not a valid imported package name", r.to)
- }
-
- // Check for conflicts between file and package block.
- if prev := from.Pkg().Scope().Lookup(r.to); prev != nil {
- r.errorf(from.Pos(), "renaming this %s %q to %q would conflict",
- objectKind(from), from.Name(), r.to)
- r.errorf(prev.Pos(), "\twith this package member %s",
- objectKind(prev))
- return // since checkInPackageBlock would report redundant errors
- }
-
- // Check for conflicts in lexical scope.
- r.checkInLexicalScope(from, r.packages[from.Pkg()])
-}
-
-// checkInPackageBlock performs safety checks for renames of
-// func/var/const/type objects in the package block.
-func (r *renamer) checkInPackageBlock(from types.Object) {
- // Check that there are no references to the name from another
- // package if the renaming would make it unexported.
- if ast.IsExported(from.Name()) && !ast.IsExported(r.to) {
- for typ, pkg := range r.packages {
- if typ == from.Pkg() {
- continue
- }
- if id := someUse(pkg.GetTypesInfo(), from); id != nil &&
- !r.checkExport(id, typ, from) {
- break
- }
- }
- }
-
- pkg := r.packages[from.Pkg()]
- if pkg == nil {
- return
- }
-
- // Check that in the package block, "init" is a function, and never referenced.
- if r.to == "init" {
- kind := objectKind(from)
- if kind == "func" {
- // Reject if intra-package references to it exist.
- for id, obj := range pkg.GetTypesInfo().Uses {
- if obj == from {
- r.errorf(from.Pos(),
- "renaming this func %q to %q would make it a package initializer",
- from.Name(), r.to)
- r.errorf(id.Pos(), "\tbut references to it exist")
- break
- }
- }
- } else {
- r.errorf(from.Pos(), "you cannot have a %s at package level named %q",
- kind, r.to)
- }
- }
-
- // Check for conflicts between package block and all file blocks.
- for _, f := range pkg.GetSyntax() {
- fileScope := pkg.GetTypesInfo().Scopes[f]
- b, prev := fileScope.LookupParent(r.to, token.NoPos)
- if b == fileScope {
- r.errorf(from.Pos(), "renaming this %s %q to %q would conflict", objectKind(from), from.Name(), r.to)
- var prevPos token.Pos
- if prev != nil {
- prevPos = prev.Pos()
- }
- r.errorf(prevPos, "\twith this %s", objectKind(prev))
- return // since checkInPackageBlock would report redundant errors
- }
- }
-
- // Check for conflicts in lexical scope.
- if from.Exported() {
- for _, pkg := range r.packages {
- r.checkInLexicalScope(from, pkg)
- }
- } else {
- r.checkInLexicalScope(from, pkg)
- }
-}
-
-func (r *renamer) checkInLocalScope(from types.Object) {
- pkg := r.packages[from.Pkg()]
- r.checkInLexicalScope(from, pkg)
-}
-
-// checkInLexicalScope performs safety checks that a renaming does not
-// change the lexical reference structure of the specified package.
-//
-// For objects in lexical scope, there are three kinds of conflicts:
-// same-, sub-, and super-block conflicts. We will illustrate all three
-// using this example:
-//
-// var x int
-// var z int
-//
-// func f(y int) {
-// print(x)
-// print(y)
-// }
-//
-// Renaming x to z encounters a SAME-BLOCK CONFLICT, because an object
-// with the new name already exists, defined in the same lexical block
-// as the old object.
-//
-// Renaming x to y encounters a SUB-BLOCK CONFLICT, because there exists
-// a reference to x from within (what would become) a hole in its scope.
-// The definition of y in an (inner) sub-block would cast a shadow in
-// the scope of the renamed variable.
-//
-// Renaming y to x encounters a SUPER-BLOCK CONFLICT. This is the
-// converse situation: there is an existing definition of the new name
-// (x) in an (enclosing) super-block, and the renaming would create a
-// hole in its scope, within which there exist references to it. The
-// new name casts a shadow in scope of the existing definition of x in
-// the super-block.
-//
-// Removing the old name (and all references to it) is always safe, and
-// requires no checks.
-//
-func (r *renamer) checkInLexicalScope(from types.Object, pkg Package) {
- b := from.Parent() // the block defining the 'from' object
- if b != nil {
- toBlock, to := b.LookupParent(r.to, from.Parent().End())
- if toBlock == b {
- // same-block conflict
- r.errorf(from.Pos(), "renaming this %s %q to %q",
- objectKind(from), from.Name(), r.to)
- r.errorf(to.Pos(), "\tconflicts with %s in same block",
- objectKind(to))
- return
- } else if toBlock != nil {
- // Check for super-block conflict.
- // The name r.to is defined in a superblock.
- // Is that name referenced from within this block?
- forEachLexicalRef(pkg, to, func(id *ast.Ident, block *types.Scope) bool {
- _, obj := block.LookupParent(from.Name(), id.Pos())
- if obj == from {
- // super-block conflict
- r.errorf(from.Pos(), "renaming this %s %q to %q",
- objectKind(from), from.Name(), r.to)
- r.errorf(id.Pos(), "\twould shadow this reference")
- r.errorf(to.Pos(), "\tto the %s declared here",
- objectKind(to))
- return false // stop
- }
- return true
- })
- }
- }
- // Check for sub-block conflict.
- // Is there an intervening definition of r.to between
- // the block defining 'from' and some reference to it?
- forEachLexicalRef(pkg, from, func(id *ast.Ident, block *types.Scope) bool {
- // Find the block that defines the found reference.
- // It may be an ancestor.
- fromBlock, _ := block.LookupParent(from.Name(), id.Pos())
- // See what r.to would resolve to in the same scope.
- toBlock, to := block.LookupParent(r.to, id.Pos())
- if to != nil {
- // sub-block conflict
- if deeper(toBlock, fromBlock) {
- r.errorf(from.Pos(), "renaming this %s %q to %q",
- objectKind(from), from.Name(), r.to)
- r.errorf(id.Pos(), "\twould cause this reference to become shadowed")
- r.errorf(to.Pos(), "\tby this intervening %s definition",
- objectKind(to))
- return false // stop
- }
- }
- return true
- })
-
- // Renaming a type that is used as an embedded field
- // requires renaming the field too. e.g.
- // type T int // if we rename this to U..
- // var s struct {T}
- // print(s.T) // ...this must change too
- if _, ok := from.(*types.TypeName); ok {
- for id, obj := range pkg.GetTypesInfo().Uses {
- if obj == from {
- if field := pkg.GetTypesInfo().Defs[id]; field != nil {
- r.check(field)
- }
- }
- }
- }
-}
-
-// deeper reports whether block x is lexically deeper than y.
-func deeper(x, y *types.Scope) bool {
- if x == y || x == nil {
- return false
- } else if y == nil {
- return true
- } else {
- return deeper(x.Parent(), y.Parent())
- }
-}
-
-// forEachLexicalRef calls fn(id, block) for each identifier id in package
-// pkg that is a reference to obj in lexical scope. block is the
-// lexical block enclosing the reference. If fn returns false the
-// iteration is terminated and findLexicalRefs returns false.
-func forEachLexicalRef(pkg Package, obj types.Object, fn func(id *ast.Ident, block *types.Scope) bool) bool {
- ok := true
- var stack []ast.Node
-
- var visit func(n ast.Node) bool
- visit = func(n ast.Node) bool {
- if n == nil {
- stack = stack[:len(stack)-1] // pop
- return false
- }
- if !ok {
- return false // bail out
- }
-
- stack = append(stack, n) // push
- switch n := n.(type) {
- case *ast.Ident:
- if pkg.GetTypesInfo().Uses[n] == obj {
- block := enclosingBlock(pkg.GetTypesInfo(), stack)
- if !fn(n, block) {
- ok = false
- }
- }
- return visit(nil) // pop stack
-
- case *ast.SelectorExpr:
- // don't visit n.Sel
- ast.Inspect(n.X, visit)
- return visit(nil) // pop stack, don't descend
-
- case *ast.CompositeLit:
- // Handle recursion ourselves for struct literals
- // so we don't visit field identifiers.
- tv, ok := pkg.GetTypesInfo().Types[n]
- if !ok {
- return visit(nil) // pop stack, don't descend
- }
- if _, ok := Deref(tv.Type).Underlying().(*types.Struct); ok {
- if n.Type != nil {
- ast.Inspect(n.Type, visit)
- }
- for _, elt := range n.Elts {
- if kv, ok := elt.(*ast.KeyValueExpr); ok {
- ast.Inspect(kv.Value, visit)
- } else {
- ast.Inspect(elt, visit)
- }
- }
- return visit(nil) // pop stack, don't descend
- }
- }
- return true
- }
-
- for _, f := range pkg.GetSyntax() {
- ast.Inspect(f, visit)
- if len(stack) != 0 {
- panic(stack)
- }
- if !ok {
- break
- }
- }
- return ok
-}
-
-// enclosingBlock returns the innermost block enclosing the specified
-// AST node, specified in the form of a path from the root of the file,
-// [file...n].
-func enclosingBlock(info *types.Info, stack []ast.Node) *types.Scope {
- for i := range stack {
- n := stack[len(stack)-1-i]
- // For some reason, go/types always associates a
- // function's scope with its FuncType.
- // TODO(adonovan): feature or a bug?
- switch f := n.(type) {
- case *ast.FuncDecl:
- n = f.Type
- case *ast.FuncLit:
- n = f.Type
- }
- if b := info.Scopes[n]; b != nil {
- return b
- }
- }
- panic("no Scope for *ast.File")
-}
-
-func (r *renamer) checkLabel(label *types.Label) {
- // Check there are no identical labels in the function's label block.
- // (Label blocks don't nest, so this is easy.)
- if prev := label.Parent().Lookup(r.to); prev != nil {
- r.errorf(label.Pos(), "renaming this label %q to %q", label.Name(), prev.Name())
- r.errorf(prev.Pos(), "\twould conflict with this one")
- }
-}
-
-// checkStructField checks that the field renaming will not cause
-// conflicts at its declaration, or ambiguity or changes to any selection.
-func (r *renamer) checkStructField(from *types.Var) {
- // Check that the struct declaration is free of field conflicts,
- // and field/method conflicts.
-
- // go/types offers no easy way to get from a field (or interface
- // method) to its declaring struct (or interface), so we must
- // ascend the AST.
- fromPkg, ok := r.packages[from.Pkg()]
- if !ok {
- return
- }
- pkg, path, _ := pathEnclosingInterval(r.fset, fromPkg, from.Pos(), from.Pos())
- if pkg == nil || path == nil {
- return
- }
- // path matches this pattern:
- // [Ident SelectorExpr? StarExpr? Field FieldList StructType ParenExpr* ... File]
-
- // Ascend to FieldList.
- var i int
- for {
- if _, ok := path[i].(*ast.FieldList); ok {
- break
- }
- i++
- }
- i++
- tStruct := path[i].(*ast.StructType)
- i++
- // Ascend past parens (unlikely).
- for {
- _, ok := path[i].(*ast.ParenExpr)
- if !ok {
- break
- }
- i++
- }
- if spec, ok := path[i].(*ast.TypeSpec); ok {
- // This struct is also a named type.
- // We must check for direct (non-promoted) field/field
- // and method/field conflicts.
- named := pkg.GetTypesInfo().Defs[spec.Name].Type()
- prev, indices, _ := types.LookupFieldOrMethod(named, true, pkg.GetTypes(), r.to)
- if len(indices) == 1 {
- r.errorf(from.Pos(), "renaming this field %q to %q",
- from.Name(), r.to)
- r.errorf(prev.Pos(), "\twould conflict with this %s",
- objectKind(prev))
- return // skip checkSelections to avoid redundant errors
- }
- } else {
- // This struct is not a named type.
- // We need only check for direct (non-promoted) field/field conflicts.
- T := pkg.GetTypesInfo().Types[tStruct].Type.Underlying().(*types.Struct)
- for i := 0; i < T.NumFields(); i++ {
- if prev := T.Field(i); prev.Name() == r.to {
- r.errorf(from.Pos(), "renaming this field %q to %q",
- from.Name(), r.to)
- r.errorf(prev.Pos(), "\twould conflict with this field")
- return // skip checkSelections to avoid redundant errors
- }
- }
- }
-
- // Renaming an anonymous field requires renaming the type too. e.g.
- // print(s.T) // if we rename T to U,
- // type T int // this and
- // var s struct {T} // this must change too.
- if from.Anonymous() {
- if named, ok := from.Type().(*types.Named); ok {
- r.check(named.Obj())
- } else if named, ok := Deref(from.Type()).(*types.Named); ok {
- r.check(named.Obj())
- }
- }
-
- // Check integrity of existing (field and method) selections.
- r.checkSelections(from)
-}
-
-// checkSelection checks that all uses and selections that resolve to
-// the specified object would continue to do so after the renaming.
-func (r *renamer) checkSelections(from types.Object) {
- for typ, pkg := range r.packages {
- if id := someUse(pkg.GetTypesInfo(), from); id != nil {
- if !r.checkExport(id, typ, from) {
- return
- }
- }
-
- for syntax, sel := range pkg.GetTypesInfo().Selections {
- // There may be extant selections of only the old
- // name or only the new name, so we must check both.
- // (If neither, the renaming is sound.)
- //
- // In both cases, we wish to compare the lengths
- // of the implicit field path (Selection.Index)
- // to see if the renaming would change it.
- //
- // If a selection that resolves to 'from', when renamed,
- // would yield a path of the same or shorter length,
- // this indicates ambiguity or a changed referent,
- // analogous to same- or sub-block lexical conflict.
- //
- // If a selection using the name 'to' would
- // yield a path of the same or shorter length,
- // this indicates ambiguity or shadowing,
- // analogous to same- or super-block lexical conflict.
-
- // TODO(adonovan): fix: derive from Types[syntax.X].Mode
- // TODO(adonovan): test with pointer, value, addressable value.
- isAddressable := true
-
- if sel.Obj() == from {
- if obj, indices, _ := types.LookupFieldOrMethod(sel.Recv(), isAddressable, from.Pkg(), r.to); obj != nil {
- // Renaming this existing selection of
- // 'from' may block access to an existing
- // type member named 'to'.
- delta := len(indices) - len(sel.Index())
- if delta > 0 {
- continue // no ambiguity
- }
- r.selectionConflict(from, delta, syntax, obj)
- return
- }
- } else if sel.Obj().Name() == r.to {
- if obj, indices, _ := types.LookupFieldOrMethod(sel.Recv(), isAddressable, from.Pkg(), from.Name()); obj == from {
- // Renaming 'from' may cause this existing
- // selection of the name 'to' to change
- // its meaning.
- delta := len(indices) - len(sel.Index())
- if delta > 0 {
- continue // no ambiguity
- }
- r.selectionConflict(from, -delta, syntax, sel.Obj())
- return
- }
- }
- }
- }
-}
-
-func (r *renamer) selectionConflict(from types.Object, delta int, syntax *ast.SelectorExpr, obj types.Object) {
- r.errorf(from.Pos(), "renaming this %s %q to %q",
- objectKind(from), from.Name(), r.to)
-
- switch {
- case delta < 0:
- // analogous to sub-block conflict
- r.errorf(syntax.Sel.Pos(),
- "\twould change the referent of this selection")
- r.errorf(obj.Pos(), "\tof this %s", objectKind(obj))
- case delta == 0:
- // analogous to same-block conflict
- r.errorf(syntax.Sel.Pos(),
- "\twould make this reference ambiguous")
- r.errorf(obj.Pos(), "\twith this %s", objectKind(obj))
- case delta > 0:
- // analogous to super-block conflict
- r.errorf(syntax.Sel.Pos(),
- "\twould shadow this selection")
- r.errorf(obj.Pos(), "\tof the %s declared here",
- objectKind(obj))
- }
-}
-
-// checkMethod performs safety checks for renaming a method.
-// There are three hazards:
-// - declaration conflicts
-// - selection ambiguity/changes
-// - entailed renamings of assignable concrete/interface types.
-// We reject renamings initiated at concrete methods if it would
-// change the assignability relation. For renamings of abstract
-// methods, we rename all methods transitively coupled to it via
-// assignability.
-func (r *renamer) checkMethod(from *types.Func) {
- // e.g. error.Error
- if from.Pkg() == nil {
- r.errorf(from.Pos(), "you cannot rename built-in method %s", from)
- return
- }
-
- // ASSIGNABILITY: We reject renamings of concrete methods that
- // would break a 'satisfy' constraint; but renamings of abstract
- // methods are allowed to proceed, and we rename affected
- // concrete and abstract methods as necessary. It is the
- // initial method that determines the policy.
-
- // Check for conflict at point of declaration.
- // Check to ensure preservation of assignability requirements.
- R := recv(from).Type()
- if IsInterface(R) {
- // Abstract method
-
- // declaration
- prev, _, _ := types.LookupFieldOrMethod(R, false, from.Pkg(), r.to)
- if prev != nil {
- r.errorf(from.Pos(), "renaming this interface method %q to %q",
- from.Name(), r.to)
- r.errorf(prev.Pos(), "\twould conflict with this method")
- return
- }
-
- // Check all interfaces that embed this one for
- // declaration conflicts too.
- for _, pkg := range r.packages {
- // Start with named interface types (better errors)
- for _, obj := range pkg.GetTypesInfo().Defs {
- if obj, ok := obj.(*types.TypeName); ok && IsInterface(obj.Type()) {
- f, _, _ := types.LookupFieldOrMethod(
- obj.Type(), false, from.Pkg(), from.Name())
- if f == nil {
- continue
- }
- t, _, _ := types.LookupFieldOrMethod(
- obj.Type(), false, from.Pkg(), r.to)
- if t == nil {
- continue
- }
- r.errorf(from.Pos(), "renaming this interface method %q to %q",
- from.Name(), r.to)
- r.errorf(t.Pos(), "\twould conflict with this method")
- r.errorf(obj.Pos(), "\tin named interface type %q", obj.Name())
- }
- }
-
- // Now look at all literal interface types (includes named ones again).
- for e, tv := range pkg.GetTypesInfo().Types {
- if e, ok := e.(*ast.InterfaceType); ok {
- _ = e
- _ = tv.Type.(*types.Interface)
- // TODO(adonovan): implement same check as above.
- }
- }
- }
-
- // assignability
- //
- // Find the set of concrete or abstract methods directly
- // coupled to abstract method 'from' by some
- // satisfy.Constraint, and rename them too.
- for key := range r.satisfy() {
- // key = (lhs, rhs) where lhs is always an interface.
-
- lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name())
- if lsel == nil {
- continue
- }
- rmethods := r.msets.MethodSet(key.RHS)
- rsel := rmethods.Lookup(from.Pkg(), from.Name())
- if rsel == nil {
- continue
- }
-
- // If both sides have a method of this name,
- // and one of them is m, the other must be coupled.
- var coupled *types.Func
- switch from {
- case lsel.Obj():
- coupled = rsel.Obj().(*types.Func)
- case rsel.Obj():
- coupled = lsel.Obj().(*types.Func)
- default:
- continue
- }
-
- // We must treat concrete-to-interface
- // constraints like an implicit selection C.f of
- // each interface method I.f, and check that the
- // renaming leaves the selection unchanged and
- // unambiguous.
- //
- // Fun fact: the implicit selection of C.f
- // type I interface{f()}
- // type C struct{I}
- // func (C) g()
- // var _ I = C{} // here
- // yields abstract method I.f. This can make error
- // messages less than obvious.
- //
- if !IsInterface(key.RHS) {
- // The logic below was derived from checkSelections.
-
- rtosel := rmethods.Lookup(from.Pkg(), r.to)
- if rtosel != nil {
- rto := rtosel.Obj().(*types.Func)
- delta := len(rsel.Index()) - len(rtosel.Index())
- if delta < 0 {
- continue // no ambiguity
- }
-
- // TODO(adonovan): record the constraint's position.
- keyPos := token.NoPos
-
- r.errorf(from.Pos(), "renaming this method %q to %q",
- from.Name(), r.to)
- if delta == 0 {
- // analogous to same-block conflict
- r.errorf(keyPos, "\twould make the %s method of %s invoked via interface %s ambiguous",
- r.to, key.RHS, key.LHS)
- r.errorf(rto.Pos(), "\twith (%s).%s",
- recv(rto).Type(), r.to)
- } else {
- // analogous to super-block conflict
- r.errorf(keyPos, "\twould change the %s method of %s invoked via interface %s",
- r.to, key.RHS, key.LHS)
- r.errorf(coupled.Pos(), "\tfrom (%s).%s",
- recv(coupled).Type(), r.to)
- r.errorf(rto.Pos(), "\tto (%s).%s",
- recv(rto).Type(), r.to)
- }
- return // one error is enough
- }
- }
-
- if !r.changeMethods {
- // This should be unreachable.
- r.errorf(from.Pos(), "internal error: during renaming of abstract method %s", from)
- r.errorf(coupled.Pos(), "\tchangedMethods=false, coupled method=%s", coupled)
- r.errorf(from.Pos(), "\tPlease file a bug report")
- return
- }
-
- // Rename the coupled method to preserve assignability.
- r.check(coupled)
- }
- } else {
- // Concrete method
-
- // declaration
- prev, indices, _ := types.LookupFieldOrMethod(R, true, from.Pkg(), r.to)
- if prev != nil && len(indices) == 1 {
- r.errorf(from.Pos(), "renaming this method %q to %q",
- from.Name(), r.to)
- r.errorf(prev.Pos(), "\twould conflict with this %s",
- objectKind(prev))
- return
- }
-
- // assignability
- //
- // Find the set of abstract methods coupled to concrete
- // method 'from' by some satisfy.Constraint, and rename
- // them too.
- //
- // Coupling may be indirect, e.g. I.f <-> C.f via type D.
- //
- // type I interface {f()}
- // type C int
- // type (C) f()
- // type D struct{C}
- // var _ I = D{}
- //
- for key := range r.satisfy() {
- // key = (lhs, rhs) where lhs is always an interface.
- if IsInterface(key.RHS) {
- continue
- }
- rsel := r.msets.MethodSet(key.RHS).Lookup(from.Pkg(), from.Name())
- if rsel == nil || rsel.Obj() != from {
- continue // rhs does not have the method
- }
- lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name())
- if lsel == nil {
- continue
- }
- imeth := lsel.Obj().(*types.Func)
-
- // imeth is the abstract method (e.g. I.f)
- // and key.RHS is the concrete coupling type (e.g. D).
- if !r.changeMethods {
- r.errorf(from.Pos(), "renaming this method %q to %q",
- from.Name(), r.to)
- var pos token.Pos
- var iface string
-
- I := recv(imeth).Type()
- if named, ok := I.(*types.Named); ok {
- pos = named.Obj().Pos()
- iface = "interface " + named.Obj().Name()
- } else {
- pos = from.Pos()
- iface = I.String()
- }
- r.errorf(pos, "\twould make %s no longer assignable to %s",
- key.RHS, iface)
- r.errorf(imeth.Pos(), "\t(rename %s.%s if you intend to change both types)",
- I, from.Name())
- return // one error is enough
- }
-
- // Rename the coupled interface method to preserve assignability.
- r.check(imeth)
- }
- }
-
- // Check integrity of existing (field and method) selections.
- // We skip this if there were errors above, to avoid redundant errors.
- r.checkSelections(from)
-}
-
-func (r *renamer) checkExport(id *ast.Ident, pkg *types.Package, from types.Object) bool {
- // Reject cross-package references if r.to is unexported.
- // (Such references may be qualified identifiers or field/method
- // selections.)
- if !ast.IsExported(r.to) && pkg != from.Pkg() {
- r.errorf(from.Pos(),
- "renaming %q to %q would make it unexported",
- from.Name(), r.to)
- r.errorf(id.Pos(), "\tbreaking references from packages such as %q",
- pkg.Path())
- return false
- }
- return true
-}
-
-// satisfy returns the set of interface satisfaction constraints.
-func (r *renamer) satisfy() map[satisfy.Constraint]bool {
- if r.satisfyConstraints == nil {
- // Compute on demand: it's expensive.
- var f satisfy.Finder
- for _, pkg := range r.packages {
- // From satisfy.Finder documentation:
- //
- // The package must be free of type errors, and
- // info.{Defs,Uses,Selections,Types} must have been populated by the
- // type-checker.
- //
- // Only proceed if all packages have no errors.
- if pkg.HasListOrParseErrors() || pkg.HasTypeErrors() {
- r.errorf(token.NoPos, // we don't have a position for this error.
- "renaming %q to %q not possible because %q has errors",
- r.from, r.to, pkg.PkgPath())
- return nil
- }
- f.Find(pkg.GetTypesInfo(), pkg.GetSyntax())
- }
- r.satisfyConstraints = f.Result
- }
- return r.satisfyConstraints
-}
-
-// -- helpers ----------------------------------------------------------
-
-// recv returns the method's receiver.
-func recv(meth *types.Func) *types.Var {
- return meth.Type().(*types.Signature).Recv()
-}
-
-// someUse returns an arbitrary use of obj within info.
-func someUse(info *types.Info, obj types.Object) *ast.Ident {
- for id, o := range info.Uses {
- if o == obj {
- return id
- }
- }
- return nil
-}
-
-// pathEnclosingInterval returns the Package and ast.Node that
-// contain source interval [start, end), and all the node's ancestors
-// up to the AST root. It searches all ast.Files of all packages.
-// exact is defined as for astutil.PathEnclosingInterval.
-//
-// The zero value is returned if not found.
-//
-func pathEnclosingInterval(fset *token.FileSet, pkg Package, start, end token.Pos) (resPkg Package, path []ast.Node, exact bool) {
- pkgs := []Package{pkg}
- for _, f := range pkg.GetSyntax() {
- for _, imp := range f.Imports {
- if imp == nil {
- continue
- }
- importPath, err := strconv.Unquote(imp.Path.Value)
- if err != nil {
- continue
- }
- importPkg, err := pkg.GetImport(importPath)
- if err != nil {
- return nil, nil, false
- }
- pkgs = append(pkgs, importPkg)
- }
- }
- for _, p := range pkgs {
- for _, f := range p.GetSyntax() {
- if f.Pos() == token.NoPos {
- // This can happen if the parser saw
- // too many errors and bailed out.
- // (Use parser.AllErrors to prevent that.)
- continue
- }
- if !tokenFileContainsPos(fset.File(f.Pos()), start) {
- continue
- }
- if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
- return pkg, path, exact
- }
- }
- }
- return nil, nil, false
-}
-
-// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
-func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
- p := int(pos)
- base := f.Base()
- return base <= p && p < base+f.Size()
-}
-
-func objectKind(obj types.Object) string {
- if obj == nil {
- return "nil object"
- }
- switch obj := obj.(type) {
- case *types.PkgName:
- return "imported package name"
- case *types.TypeName:
- return "type"
- case *types.Var:
- if obj.IsField() {
- return "field"
- }
- case *types.Func:
- if obj.Type().(*types.Signature).Recv() != nil {
- return "method"
- }
- }
- // label, func, var, const
- return strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types."))
-}
-
-// NB: for renamings, blank is not considered valid.
-func isValidIdentifier(id string) bool {
- if id == "" || id == "_" {
- return false
- }
- for i, r := range id {
- if !isLetter(r) && (i == 0 || !isDigit(r)) {
- return false
- }
- }
- return token.Lookup(id) == token.IDENT
-}
-
-// isLocal reports whether obj is local to some function.
-// Precondition: not a struct field or interface method.
-func isLocal(obj types.Object) bool {
- // [... 5=stmt 4=func 3=file 2=pkg 1=universe]
- var depth int
- for scope := obj.Parent(); scope != nil; scope = scope.Parent() {
- depth++
- }
- return depth >= 4
-}
-
-func isPackageLevel(obj types.Object) bool {
- if obj == nil {
- return false
- }
- return obj.Pkg().Scope().Lookup(obj.Name()) == obj
-}
-
-// -- Plundered from go/scanner: ---------------------------------------
-
-func isLetter(ch rune) bool {
- return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
-}
-
-func isDigit(ch rune) bool {
- return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
-}
diff --git a/internal/lsp/source/signature_help.go b/internal/lsp/source/signature_help.go
deleted file mode 100644
index e7ed9cc8b..000000000
--- a/internal/lsp/source/signature_help.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "go/ast"
- "go/token"
- "go/types"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- errors "golang.org/x/xerrors"
-)
-
-func SignatureHelp(ctx context.Context, snapshot Snapshot, fh FileHandle, pos protocol.Position) (*protocol.SignatureInformation, int, error) {
- ctx, done := event.Start(ctx, "source.SignatureHelp")
- defer done()
-
- pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return nil, 0, errors.Errorf("getting file for SignatureHelp: %w", err)
- }
- spn, err := pgf.Mapper.PointSpan(pos)
- if err != nil {
- return nil, 0, err
- }
- rng, err := spn.Range(pgf.Mapper.Converter)
- if err != nil {
- return nil, 0, err
- }
- // Find a call expression surrounding the query position.
- var callExpr *ast.CallExpr
- path, _ := astutil.PathEnclosingInterval(pgf.File, rng.Start, rng.Start)
- if path == nil {
- return nil, 0, errors.Errorf("cannot find node enclosing position")
- }
-FindCall:
- for _, node := range path {
- switch node := node.(type) {
- case *ast.CallExpr:
- if rng.Start >= node.Lparen && rng.Start <= node.Rparen {
- callExpr = node
- break FindCall
- }
- case *ast.FuncLit, *ast.FuncType:
- // The user is within an anonymous function,
- // which may be the parameter to the *ast.CallExpr.
- // Don't show signature help in this case.
- return nil, 0, errors.Errorf("no signature help within a function declaration")
- case *ast.BasicLit:
- if node.Kind == token.STRING {
- return nil, 0, errors.Errorf("no signature help within a string literal")
- }
- }
-
- }
- if callExpr == nil || callExpr.Fun == nil {
- return nil, 0, errors.Errorf("cannot find an enclosing function")
- }
-
- qf := Qualifier(pgf.File, pkg.GetTypes(), pkg.GetTypesInfo())
-
- // Get the object representing the function, if available.
- // There is no object in certain cases such as calling a function returned by
- // a function (e.g. "foo()()").
- var obj types.Object
- switch t := callExpr.Fun.(type) {
- case *ast.Ident:
- obj = pkg.GetTypesInfo().ObjectOf(t)
- case *ast.SelectorExpr:
- obj = pkg.GetTypesInfo().ObjectOf(t.Sel)
- }
-
- // Handle builtin functions separately.
- if obj, ok := obj.(*types.Builtin); ok {
- return builtinSignature(ctx, snapshot, callExpr, obj.Name(), rng.Start)
- }
-
- // Get the type information for the function being called.
- sigType := pkg.GetTypesInfo().TypeOf(callExpr.Fun)
- if sigType == nil {
- return nil, 0, errors.Errorf("cannot get type for Fun %[1]T (%[1]v)", callExpr.Fun)
- }
-
- sig, _ := sigType.Underlying().(*types.Signature)
- if sig == nil {
- return nil, 0, errors.Errorf("cannot find signature for Fun %[1]T (%[1]v)", callExpr.Fun)
- }
-
- activeParam := activeParameter(callExpr, sig.Params().Len(), sig.Variadic(), rng.Start)
-
- var (
- name string
- comment *ast.CommentGroup
- )
- if obj != nil {
- declPkg, err := FindPackageFromPos(ctx, snapshot, obj.Pos())
- if err != nil {
- return nil, 0, err
- }
- node, err := snapshot.PosToDecl(ctx, declPkg, obj.Pos())
- if err != nil {
- return nil, 0, err
- }
- rng, err := objToMappedRange(snapshot, pkg, obj)
- if err != nil {
- return nil, 0, err
- }
- decl := Declaration{
- obj: obj,
- node: node,
- }
- decl.MappedRange = append(decl.MappedRange, rng)
- d, err := FindHoverContext(ctx, snapshot, pkg, decl.obj, decl.node, nil)
- if err != nil {
- return nil, 0, err
- }
- name = obj.Name()
- comment = d.Comment
- } else {
- name = "func"
- }
- s := NewSignature(ctx, snapshot, pkg, sig, comment, qf)
- paramInfo := make([]protocol.ParameterInformation, 0, len(s.params))
- for _, p := range s.params {
- paramInfo = append(paramInfo, protocol.ParameterInformation{Label: p})
- }
- return &protocol.SignatureInformation{
- Label: name + s.Format(),
- Documentation: s.doc,
- Parameters: paramInfo,
- }, activeParam, nil
-}
-
-func builtinSignature(ctx context.Context, snapshot Snapshot, callExpr *ast.CallExpr, name string, pos token.Pos) (*protocol.SignatureInformation, int, error) {
- sig, err := NewBuiltinSignature(ctx, snapshot, name)
- if err != nil {
- return nil, 0, err
- }
- paramInfo := make([]protocol.ParameterInformation, 0, len(sig.params))
- for _, p := range sig.params {
- paramInfo = append(paramInfo, protocol.ParameterInformation{Label: p})
- }
- activeParam := activeParameter(callExpr, len(sig.params), sig.variadic, pos)
- return &protocol.SignatureInformation{
- Label: sig.name + sig.Format(),
- Documentation: sig.doc,
- Parameters: paramInfo,
- }, activeParam, nil
-
-}
-
-func activeParameter(callExpr *ast.CallExpr, numParams int, variadic bool, pos token.Pos) (activeParam int) {
- if len(callExpr.Args) == 0 {
- return 0
- }
- // First, check if the position is even in the range of the arguments.
- start, end := callExpr.Lparen, callExpr.Rparen
- if !(start <= pos && pos <= end) {
- return 0
- }
- for _, expr := range callExpr.Args {
- if start == token.NoPos {
- start = expr.Pos()
- }
- end = expr.End()
- if start <= pos && pos <= end {
- break
- }
- // Don't advance the active parameter for the last parameter of a variadic function.
- if !variadic || activeParam < numParams-1 {
- activeParam++
- }
- start = expr.Pos() + 1 // to account for commas
- }
- return activeParam
-}
diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go
deleted file mode 100644
index dc5fe53b5..000000000
--- a/internal/lsp/source/source_test.go
+++ /dev/null
@@ -1,984 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source_test
-
-import (
- "context"
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
- "sort"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/cache"
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/fuzzy"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/source/completion"
- "golang.org/x/tools/internal/lsp/tests"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/testenv"
- errors "golang.org/x/xerrors"
-)
-
-func TestMain(m *testing.M) {
- testenv.ExitIfSmallMachine()
- os.Exit(m.Run())
-}
-
-func TestSource(t *testing.T) {
- tests.RunTests(t, "../testdata", true, testSource)
-}
-
-type runner struct {
- snapshot source.Snapshot
- view source.View
- data *tests.Data
- ctx context.Context
- normalizers []tests.Normalizer
-}
-
-func testSource(t *testing.T, datum *tests.Data) {
- ctx := tests.Context(t)
-
- cache := cache.New(nil)
- session := cache.NewSession(ctx)
- options := source.DefaultOptions().Clone()
- tests.DefaultOptions(options)
- options.SetEnvSlice(datum.Config.Env)
- view, _, release, err := session.NewView(ctx, "source_test", span.URIFromPath(datum.Config.Dir), options)
- release()
- if err != nil {
- t.Fatal(err)
- }
- defer view.Shutdown(ctx)
-
- // Enable type error analyses for tests.
- // TODO(golang/go#38212): Delete this once they are enabled by default.
- tests.EnableAllAnalyzers(view, options)
- view.SetOptions(ctx, options)
-
- var modifications []source.FileModification
- for filename, content := range datum.Config.Overlay {
- if filepath.Ext(filename) != ".go" {
- continue
- }
- modifications = append(modifications, source.FileModification{
- URI: span.URIFromPath(filename),
- Action: source.Open,
- Version: -1,
- Text: content,
- LanguageID: "go",
- })
- }
- if err := session.ModifyFiles(ctx, modifications); err != nil {
- t.Fatal(err)
- }
- snapshot, release := view.Snapshot(ctx)
- defer release()
- r := &runner{
- view: view,
- snapshot: snapshot,
- data: datum,
- ctx: ctx,
- normalizers: tests.CollectNormalizers(datum.Exported),
- }
- tests.Run(t, r, datum)
-}
-
-func (r *runner) CallHierarchy(t *testing.T, spn span.Span, expectedCalls *tests.CallHierarchyResult) {
- mapper, err := r.data.Mapper(spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := mapper.Location(spn)
- if err != nil {
- t.Fatalf("failed for %v: %v", spn, err)
- }
- fh, err := r.snapshot.GetFile(r.ctx, spn.URI())
- if err != nil {
- t.Fatal(err)
- }
-
- items, err := source.PrepareCallHierarchy(r.ctx, r.snapshot, fh, loc.Range.Start)
- if err != nil {
- t.Fatal(err)
- }
- if len(items) == 0 {
- t.Fatalf("expected call hierarchy item to be returned for identifier at %v\n", loc.Range)
- }
-
- callLocation := protocol.Location{
- URI: items[0].URI,
- Range: items[0].Range,
- }
- if callLocation != loc {
- t.Fatalf("expected source.PrepareCallHierarchy to return identifier at %v but got %v\n", loc, callLocation)
- }
-
- incomingCalls, err := source.IncomingCalls(r.ctx, r.snapshot, fh, loc.Range.Start)
- if err != nil {
- t.Error(err)
- }
- var incomingCallItems []protocol.CallHierarchyItem
- for _, item := range incomingCalls {
- incomingCallItems = append(incomingCallItems, item.From)
- }
- msg := tests.DiffCallHierarchyItems(incomingCallItems, expectedCalls.IncomingCalls)
- if msg != "" {
- t.Error(fmt.Sprintf("incoming calls differ: %s", msg))
- }
-
- outgoingCalls, err := source.OutgoingCalls(r.ctx, r.snapshot, fh, loc.Range.Start)
- if err != nil {
- t.Error(err)
- }
- var outgoingCallItems []protocol.CallHierarchyItem
- for _, item := range outgoingCalls {
- outgoingCallItems = append(outgoingCallItems, item.To)
- }
- msg = tests.DiffCallHierarchyItems(outgoingCallItems, expectedCalls.OutgoingCalls)
- if msg != "" {
- t.Error(fmt.Sprintf("outgoing calls differ: %s", msg))
- }
-}
-
-func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) {
- fileID, got, err := source.FileDiagnostics(r.ctx, r.snapshot, uri)
- if err != nil {
- t.Fatal(err)
- }
- // A special case to test that there are no diagnostics for a file.
- if len(want) == 1 && want[0].Source == "no_diagnostics" {
- if len(got) != 0 {
- t.Errorf("expected no diagnostics for %s, got %v", uri, got)
- }
- return
- }
- if diff := tests.DiffDiagnostics(fileID.URI, want, got); diff != "" {
- t.Error(diff)
- }
-}
-
-func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- var want []protocol.CompletionItem
- for _, pos := range test.CompletionItems {
- want = append(want, tests.ToProtocolCompletionItem(*items[pos]))
- }
- _, got := r.callCompletion(t, src, func(opts *source.Options) {
- opts.Matcher = source.CaseInsensitive
- opts.DeepCompletion = false
- opts.CompleteUnimported = false
- opts.InsertTextFormat = protocol.SnippetTextFormat
- opts.LiteralCompletions = strings.Contains(string(src.URI()), "literal")
- opts.ExperimentalPostfixCompletions = strings.Contains(string(src.URI()), "postfix")
- })
- got = tests.FilterBuiltins(src, got)
- if diff := tests.DiffCompletionItems(want, got); diff != "" {
- t.Errorf("%s: %s", src, diff)
- }
-}
-
-func (r *runner) CompletionSnippet(t *testing.T, src span.Span, expected tests.CompletionSnippet, placeholders bool, items tests.CompletionItems) {
- _, list := r.callCompletion(t, src, func(opts *source.Options) {
- opts.UsePlaceholders = placeholders
- opts.DeepCompletion = true
- opts.CompleteUnimported = false
- })
- got := tests.FindItem(list, *items[expected.CompletionItem])
- want := expected.PlainSnippet
- if placeholders {
- want = expected.PlaceholderSnippet
- }
- if diff := tests.DiffSnippets(want, got); diff != "" {
- t.Errorf("%s: %s", src, diff)
- }
-}
-
-func (r *runner) UnimportedCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- var want []protocol.CompletionItem
- for _, pos := range test.CompletionItems {
- want = append(want, tests.ToProtocolCompletionItem(*items[pos]))
- }
- _, got := r.callCompletion(t, src, func(opts *source.Options) {})
- got = tests.FilterBuiltins(src, got)
- if diff := tests.CheckCompletionOrder(want, got, false); diff != "" {
- t.Errorf("%s: %s", src, diff)
- }
-}
-
-func (r *runner) DeepCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- var want []protocol.CompletionItem
- for _, pos := range test.CompletionItems {
- want = append(want, tests.ToProtocolCompletionItem(*items[pos]))
- }
- prefix, list := r.callCompletion(t, src, func(opts *source.Options) {
- opts.DeepCompletion = true
- opts.Matcher = source.CaseInsensitive
- opts.CompleteUnimported = false
- })
- list = tests.FilterBuiltins(src, list)
- fuzzyMatcher := fuzzy.NewMatcher(prefix)
- var got []protocol.CompletionItem
- for _, item := range list {
- if fuzzyMatcher.Score(item.Label) <= 0 {
- continue
- }
- got = append(got, item)
- }
- if msg := tests.DiffCompletionItems(want, got); msg != "" {
- t.Errorf("%s: %s", src, msg)
- }
-}
-
-func (r *runner) FuzzyCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- var want []protocol.CompletionItem
- for _, pos := range test.CompletionItems {
- want = append(want, tests.ToProtocolCompletionItem(*items[pos]))
- }
- _, got := r.callCompletion(t, src, func(opts *source.Options) {
- opts.DeepCompletion = true
- opts.Matcher = source.Fuzzy
- opts.CompleteUnimported = false
- })
- got = tests.FilterBuiltins(src, got)
- if msg := tests.DiffCompletionItems(want, got); msg != "" {
- t.Errorf("%s: %s", src, msg)
- }
-}
-
-func (r *runner) CaseSensitiveCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- var want []protocol.CompletionItem
- for _, pos := range test.CompletionItems {
- want = append(want, tests.ToProtocolCompletionItem(*items[pos]))
- }
- _, list := r.callCompletion(t, src, func(opts *source.Options) {
- opts.Matcher = source.CaseSensitive
- opts.CompleteUnimported = false
- })
- list = tests.FilterBuiltins(src, list)
- if diff := tests.DiffCompletionItems(want, list); diff != "" {
- t.Errorf("%s: %s", src, diff)
- }
-}
-
-func (r *runner) RankCompletion(t *testing.T, src span.Span, test tests.Completion, items tests.CompletionItems) {
- var want []protocol.CompletionItem
- for _, pos := range test.CompletionItems {
- want = append(want, tests.ToProtocolCompletionItem(*items[pos]))
- }
- _, got := r.callCompletion(t, src, func(opts *source.Options) {
- opts.DeepCompletion = true
- opts.Matcher = source.Fuzzy
- opts.ExperimentalPostfixCompletions = true
- })
- if msg := tests.CheckCompletionOrder(want, got, true); msg != "" {
- t.Errorf("%s: %s", src, msg)
- }
-}
-
-func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*source.Options)) (string, []protocol.CompletionItem) {
- fh, err := r.snapshot.GetFile(r.ctx, src.URI())
- if err != nil {
- t.Fatal(err)
- }
- original := r.view.Options()
- modified := original.Clone()
- options(modified)
- newView, err := r.view.SetOptions(r.ctx, modified)
- if newView != r.view {
- t.Fatalf("options change unexpectedly created new view")
- }
- if err != nil {
- t.Fatal(err)
- }
- defer r.view.SetOptions(r.ctx, original)
-
- list, surrounding, err := completion.Completion(r.ctx, r.snapshot, fh, protocol.Position{
- Line: uint32(src.Start().Line() - 1),
- Character: uint32(src.Start().Column() - 1),
- }, protocol.CompletionContext{})
- if err != nil && !errors.As(err, &completion.ErrIsDefinition{}) {
- t.Fatalf("failed for %v: %v", src, err)
- }
- var prefix string
- if surrounding != nil {
- prefix = strings.ToLower(surrounding.Prefix())
- }
-
- var numDeepCompletionsSeen int
- var items []completion.CompletionItem
- // Apply deep completion filtering.
- for _, item := range list {
- if item.Depth > 0 {
- if !modified.DeepCompletion {
- continue
- }
- if numDeepCompletionsSeen >= completion.MaxDeepCompletions {
- continue
- }
- numDeepCompletionsSeen++
- }
- items = append(items, item)
- }
- return prefix, tests.ToProtocolCompletionItems(items)
-}
-
-func (r *runner) FoldingRanges(t *testing.T, spn span.Span) {
- uri := spn.URI()
-
- fh, err := r.snapshot.GetFile(r.ctx, spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- data, err := fh.Read()
- if err != nil {
- t.Error(err)
- return
- }
-
- // Test all folding ranges.
- ranges, err := source.FoldingRange(r.ctx, r.snapshot, fh, false)
- if err != nil {
- t.Error(err)
- return
- }
- r.foldingRanges(t, "foldingRange", uri, string(data), ranges)
-
- // Test folding ranges with lineFoldingOnly
- ranges, err = source.FoldingRange(r.ctx, r.snapshot, fh, true)
- if err != nil {
- t.Error(err)
- return
- }
- r.foldingRanges(t, "foldingRange-lineFolding", uri, string(data), ranges)
-}
-
-func (r *runner) foldingRanges(t *testing.T, prefix string, uri span.URI, data string, ranges []*source.FoldingRangeInfo) {
- t.Helper()
- // Fold all ranges.
- nonOverlapping := nonOverlappingRanges(t, ranges)
- for i, rngs := range nonOverlapping {
- got, err := foldRanges(string(data), rngs)
- if err != nil {
- t.Error(err)
- continue
- }
- tag := fmt.Sprintf("%s-%d", prefix, i)
- want := string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
-
- if diff := tests.Diff(t, want, got); diff != "" {
- t.Errorf("%s: foldingRanges failed for %s, diff:\n%v", tag, uri.Filename(), diff)
- }
- }
-
- // Filter by kind.
- kinds := []protocol.FoldingRangeKind{protocol.Imports, protocol.Comment}
- for _, kind := range kinds {
- var kindOnly []*source.FoldingRangeInfo
- for _, fRng := range ranges {
- if fRng.Kind == kind {
- kindOnly = append(kindOnly, fRng)
- }
- }
-
- nonOverlapping := nonOverlappingRanges(t, kindOnly)
- for i, rngs := range nonOverlapping {
- got, err := foldRanges(string(data), rngs)
- if err != nil {
- t.Error(err)
- continue
- }
- tag := fmt.Sprintf("%s-%s-%d", prefix, kind, i)
- want := string(r.data.Golden(tag, uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
-
- if diff := tests.Diff(t, want, got); diff != "" {
- t.Errorf("%s: failed for %s, diff:\n%v", tag, uri.Filename(), diff)
- }
- }
-
- }
-}
-
-func nonOverlappingRanges(t *testing.T, ranges []*source.FoldingRangeInfo) (res [][]*source.FoldingRangeInfo) {
- for _, fRng := range ranges {
- setNum := len(res)
- for i := 0; i < len(res); i++ {
- canInsert := true
- for _, rng := range res[i] {
- if conflict(t, rng, fRng) {
- canInsert = false
- break
- }
- }
- if canInsert {
- setNum = i
- break
- }
- }
- if setNum == len(res) {
- res = append(res, []*source.FoldingRangeInfo{})
- }
- res[setNum] = append(res[setNum], fRng)
- }
- return res
-}
-
-func conflict(t *testing.T, a, b *source.FoldingRangeInfo) bool {
- arng, err := a.Range()
- if err != nil {
- t.Fatal(err)
- }
- brng, err := b.Range()
- if err != nil {
- t.Fatal(err)
- }
- // a start position is <= b start positions
- return protocol.ComparePosition(arng.Start, brng.Start) <= 0 && protocol.ComparePosition(arng.End, brng.Start) > 0
-}
-
-func foldRanges(contents string, ranges []*source.FoldingRangeInfo) (string, error) {
- foldedText := "<>"
- res := contents
- // Apply the folds from the end of the file forward
- // to preserve the offsets.
- for i := len(ranges) - 1; i >= 0; i-- {
- fRange := ranges[i]
- spn, err := fRange.Span()
- if err != nil {
- return "", err
- }
- start := spn.Start().Offset()
- end := spn.End().Offset()
-
- tmp := res[0:start] + foldedText
- res = tmp + res[end:]
- }
- return res, nil
-}
-
-func (r *runner) Format(t *testing.T, spn span.Span) {
- gofmted := string(r.data.Golden("gofmt", spn.URI().Filename(), func() ([]byte, error) {
- cmd := exec.Command("gofmt", spn.URI().Filename())
- out, _ := cmd.Output() // ignore error, sometimes we have intentionally ungofmt-able files
- return out, nil
- }))
- fh, err := r.snapshot.GetFile(r.ctx, spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- edits, err := source.Format(r.ctx, r.snapshot, fh)
- if err != nil {
- if gofmted != "" {
- t.Error(err)
- }
- return
- }
- data, err := fh.Read()
- if err != nil {
- t.Fatal(err)
- }
- m, err := r.data.Mapper(spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- diffEdits, err := source.FromProtocolEdits(m, edits)
- if err != nil {
- t.Error(err)
- }
- got := diff.ApplyEdits(string(data), diffEdits)
- if gofmted != got {
- t.Errorf("format failed for %s, expected:\n%v\ngot:\n%v", spn.URI().Filename(), gofmted, got)
- }
-}
-
-func (r *runner) SemanticTokens(t *testing.T, spn span.Span) {
- t.Skip("nothing to test in source")
-}
-
-func (r *runner) Import(t *testing.T, spn span.Span) {
- fh, err := r.snapshot.GetFile(r.ctx, spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- edits, _, err := source.AllImportsFixes(r.ctx, r.snapshot, fh)
- if err != nil {
- t.Error(err)
- }
- data, err := fh.Read()
- if err != nil {
- t.Fatal(err)
- }
- m, err := r.data.Mapper(fh.URI())
- if err != nil {
- t.Fatal(err)
- }
- diffEdits, err := source.FromProtocolEdits(m, edits)
- if err != nil {
- t.Error(err)
- }
- got := diff.ApplyEdits(string(data), diffEdits)
- want := string(r.data.Golden("goimports", spn.URI().Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
- if want != got {
- d, err := myers.ComputeEdits(spn.URI(), want, got)
- if err != nil {
- t.Fatal(err)
- }
- t.Errorf("import failed for %s: %s", spn.URI().Filename(), diff.ToUnified("want", "got", want, d))
- }
-}
-
-func (r *runner) Definition(t *testing.T, spn span.Span, d tests.Definition) {
- _, srcRng, err := spanToRange(r.data, d.Src)
- if err != nil {
- t.Fatal(err)
- }
- fh, err := r.snapshot.GetFile(r.ctx, spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- ident, err := source.Identifier(r.ctx, r.snapshot, fh, srcRng.Start)
- if err != nil {
- t.Fatalf("failed for %v: %v", d.Src, err)
- }
- h, err := source.HoverIdentifier(r.ctx, ident)
- if err != nil {
- t.Fatalf("failed for %v: %v", d.Src, err)
- }
- hover, err := source.FormatHover(h, r.view.Options())
- if err != nil {
- t.Fatal(err)
- }
- rng, err := ident.Declaration.MappedRange[0].Range()
- if err != nil {
- t.Fatal(err)
- }
- if d.IsType {
- rng, err = ident.Type.Range()
- if err != nil {
- t.Fatal(err)
- }
- hover = ""
- }
- didSomething := false
- if hover != "" {
- didSomething = true
- tag := fmt.Sprintf("%s-hoverdef", d.Name)
- expectHover := string(r.data.Golden(tag, d.Src.URI().Filename(), func() ([]byte, error) {
- return []byte(hover), nil
- }))
- hover = tests.StripSubscripts(hover)
- expectHover = tests.StripSubscripts(expectHover)
- if hover != expectHover {
- t.Errorf("hoverdef for %s failed:\n%s", d.Src, tests.Diff(t, expectHover, hover))
- }
- }
- if !d.OnlyHover {
- didSomething = true
- if _, defRng, err := spanToRange(r.data, d.Def); err != nil {
- t.Fatal(err)
- } else if rng != defRng {
- t.Errorf("for %v got %v want %v", d.Src, rng, defRng)
- }
- }
- if !didSomething {
- t.Errorf("no tests ran for %s", d.Src.URI())
- }
-}
-
-func (r *runner) Implementation(t *testing.T, spn span.Span, impls []span.Span) {
- sm, err := r.data.Mapper(spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- loc, err := sm.Location(spn)
- if err != nil {
- t.Fatalf("failed for %v: %v", spn, err)
- }
- fh, err := r.snapshot.GetFile(r.ctx, spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- locs, err := source.Implementation(r.ctx, r.snapshot, fh, loc.Range.Start)
- if err != nil {
- t.Fatalf("failed for %v: %v", spn, err)
- }
- if len(locs) != len(impls) {
- t.Fatalf("got %d locations for implementation, expected %d", len(locs), len(impls))
- }
- var results []span.Span
- for i := range locs {
- locURI := locs[i].URI.SpanURI()
- lm, err := r.data.Mapper(locURI)
- if err != nil {
- t.Fatal(err)
- }
- imp, err := lm.Span(locs[i])
- if err != nil {
- t.Fatalf("failed for %v: %v", locs[i], err)
- }
- results = append(results, imp)
- }
- // Sort results and expected to make tests deterministic.
- sort.SliceStable(results, func(i, j int) bool {
- return span.Compare(results[i], results[j]) == -1
- })
- sort.SliceStable(impls, func(i, j int) bool {
- return span.Compare(impls[i], impls[j]) == -1
- })
- for i := range results {
- if results[i] != impls[i] {
- t.Errorf("for %dth implementation of %v got %v want %v", i, spn, results[i], impls[i])
- }
- }
-}
-
-func (r *runner) Highlight(t *testing.T, src span.Span, locations []span.Span) {
- ctx := r.ctx
- m, srcRng, err := spanToRange(r.data, src)
- if err != nil {
- t.Fatal(err)
- }
- fh, err := r.snapshot.GetFile(r.ctx, src.URI())
- if err != nil {
- t.Fatal(err)
- }
- highlights, err := source.Highlight(ctx, r.snapshot, fh, srcRng.Start)
- if err != nil {
- t.Errorf("highlight failed for %s: %v", src.URI(), err)
- }
- if len(highlights) != len(locations) {
- t.Fatalf("got %d highlights for highlight at %v:%v:%v, expected %d", len(highlights), src.URI().Filename(), src.Start().Line(), src.Start().Column(), len(locations))
- }
- // Check to make sure highlights have a valid range.
- var results []span.Span
- for i := range highlights {
- h, err := m.RangeSpan(highlights[i])
- if err != nil {
- t.Fatalf("failed for %v: %v", highlights[i], err)
- }
- results = append(results, h)
- }
- // Sort results to make tests deterministic since DocumentHighlight uses a map.
- sort.SliceStable(results, func(i, j int) bool {
- return span.Compare(results[i], results[j]) == -1
- })
- // Check to make sure all the expected highlights are found.
- for i := range results {
- if results[i] != locations[i] {
- t.Errorf("want %v, got %v\n", locations[i], results[i])
- }
- }
-}
-
-func (r *runner) Hover(t *testing.T, src span.Span, text string) {
- ctx := r.ctx
- _, srcRng, err := spanToRange(r.data, src)
- if err != nil {
- t.Fatal(err)
- }
- fh, err := r.snapshot.GetFile(r.ctx, src.URI())
- if err != nil {
- t.Fatal(err)
- }
- hover, err := source.Hover(ctx, r.snapshot, fh, srcRng.Start)
- if err != nil {
- t.Errorf("hover failed for %s: %v", src.URI(), err)
- }
- if text == "" {
- if hover != nil {
- t.Errorf("want nil, got %v\n", hover)
- }
- } else {
- if hover == nil {
- t.Fatalf("want hover result to not be nil")
- }
- if got := hover.Contents.Value; got != text {
- t.Errorf("want %v, got %v\n", got, text)
- }
- if want, got := srcRng, hover.Range; want != got {
- t.Errorf("want range %v, got %v instead", want, got)
- }
- }
-}
-
-func (r *runner) References(t *testing.T, src span.Span, itemList []span.Span) {
- ctx := r.ctx
- _, srcRng, err := spanToRange(r.data, src)
- if err != nil {
- t.Fatal(err)
- }
- snapshot := r.snapshot
- fh, err := snapshot.GetFile(r.ctx, src.URI())
- if err != nil {
- t.Fatal(err)
- }
- for _, includeDeclaration := range []bool{true, false} {
- t.Run(fmt.Sprintf("refs-declaration-%v", includeDeclaration), func(t *testing.T) {
- want := make(map[span.Span]bool)
- for i, pos := range itemList {
- // We don't want the first result if we aren't including the declaration.
- if i == 0 && !includeDeclaration {
- continue
- }
- want[pos] = true
- }
- refs, err := source.References(ctx, snapshot, fh, srcRng.Start, includeDeclaration)
- if err != nil {
- t.Fatalf("failed for %s: %v", src, err)
- }
- got := make(map[span.Span]bool)
- for _, refInfo := range refs {
- refSpan, err := refInfo.Span()
- if err != nil {
- t.Fatal(err)
- }
- got[refSpan] = true
- }
- if len(got) != len(want) {
- t.Errorf("references failed: different lengths got %v want %v", len(got), len(want))
- }
- for spn := range got {
- if !want[spn] {
- t.Errorf("references failed: incorrect references got %v want locations %v", got, want)
- }
- }
- })
- }
-}
-
-func (r *runner) Rename(t *testing.T, spn span.Span, newText string) {
- tag := fmt.Sprintf("%s-rename", newText)
-
- _, srcRng, err := spanToRange(r.data, spn)
- if err != nil {
- t.Fatal(err)
- }
- fh, err := r.snapshot.GetFile(r.ctx, spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- changes, err := source.Rename(r.ctx, r.snapshot, fh, srcRng.Start, newText)
- if err != nil {
- renamed := string(r.data.Golden(tag, spn.URI().Filename(), func() ([]byte, error) {
- return []byte(err.Error()), nil
- }))
- if err.Error() != renamed {
- t.Errorf("rename failed for %s, expected:\n%v\ngot:\n%v\n", newText, renamed, err)
- }
- return
- }
-
- var res []string
- for editURI, edits := range changes {
- fh, err := r.snapshot.GetFile(r.ctx, editURI)
- if err != nil {
- t.Fatal(err)
- }
- data, err := fh.Read()
- if err != nil {
- t.Fatal(err)
- }
- m, err := r.data.Mapper(fh.URI())
- if err != nil {
- t.Fatal(err)
- }
- diffEdits, err := source.FromProtocolEdits(m, edits)
- if err != nil {
- t.Fatal(err)
- }
- contents := applyEdits(string(data), diffEdits)
- if len(changes) > 1 {
- filename := filepath.Base(editURI.Filename())
- contents = fmt.Sprintf("%s:\n%s", filename, contents)
- }
- res = append(res, contents)
- }
-
- // Sort on filename
- sort.Strings(res)
-
- var got string
- for i, val := range res {
- if i != 0 {
- got += "\n"
- }
- got += val
- }
-
- renamed := string(r.data.Golden(tag, spn.URI().Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
-
- if renamed != got {
- t.Errorf("rename failed for %s, expected:\n%v\ngot:\n%v", newText, renamed, got)
- }
-}
-
-func applyEdits(contents string, edits []diff.TextEdit) string {
- res := contents
-
- // Apply the edits from the end of the file forward
- // to preserve the offsets
- for i := len(edits) - 1; i >= 0; i-- {
- edit := edits[i]
- start := edit.Span.Start().Offset()
- end := edit.Span.End().Offset()
- tmp := res[0:start] + edit.NewText
- res = tmp + res[end:]
- }
- return res
-}
-
-func (r *runner) PrepareRename(t *testing.T, src span.Span, want *source.PrepareItem) {
- _, srcRng, err := spanToRange(r.data, src)
- if err != nil {
- t.Fatal(err)
- }
- // Find the identifier at the position.
- fh, err := r.snapshot.GetFile(r.ctx, src.URI())
- if err != nil {
- t.Fatal(err)
- }
- item, _, err := source.PrepareRename(r.ctx, r.snapshot, fh, srcRng.Start)
- if err != nil {
- if want.Text != "" { // expected an ident.
- t.Errorf("prepare rename failed for %v: got error: %v", src, err)
- }
- return
- }
- if item == nil {
- if want.Text != "" {
- t.Errorf("prepare rename failed for %v: got nil", src)
- }
- return
- }
- if want.Text == "" {
- t.Errorf("prepare rename failed for %v: expected nil, got %v", src, item)
- return
- }
- if item.Range.Start == item.Range.End {
- // Special case for 0-length ranges. Marks can't specify a 0-length range,
- // so just compare the start.
- if item.Range.Start != want.Range.Start {
- t.Errorf("prepare rename failed: incorrect point, got %v want %v", item.Range.Start, want.Range.Start)
- }
- } else {
- if protocol.CompareRange(item.Range, want.Range) != 0 {
- t.Errorf("prepare rename failed: incorrect range got %v want %v", item.Range, want.Range)
- }
- }
-}
-
-func (r *runner) Symbols(t *testing.T, uri span.URI, expectedSymbols []protocol.DocumentSymbol) {
- fh, err := r.snapshot.GetFile(r.ctx, uri)
- if err != nil {
- t.Fatal(err)
- }
- symbols, err := source.DocumentSymbols(r.ctx, r.snapshot, fh)
- if err != nil {
- t.Errorf("symbols failed for %s: %v", uri, err)
- }
- if len(symbols) != len(expectedSymbols) {
- t.Errorf("want %d top-level symbols in %v, got %d", len(expectedSymbols), uri, len(symbols))
- return
- }
- if diff := tests.DiffSymbols(t, uri, expectedSymbols, symbols); diff != "" {
- t.Error(diff)
- }
-}
-
-func (r *runner) WorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) {
- r.callWorkspaceSymbols(t, uri, query, typ)
-}
-
-func (r *runner) callWorkspaceSymbols(t *testing.T, uri span.URI, query string, typ tests.WorkspaceSymbolsTestType) {
- t.Helper()
-
- matcher := tests.WorkspaceSymbolsTestTypeToMatcher(typ)
- gotSymbols, err := source.WorkspaceSymbols(r.ctx, matcher, r.view.Options().SymbolStyle, []source.View{r.view}, query)
- if err != nil {
- t.Fatal(err)
- }
- got, err := tests.WorkspaceSymbolsString(r.ctx, r.data, uri, gotSymbols)
- if err != nil {
- t.Fatal(err)
- }
- got = filepath.ToSlash(tests.Normalize(got, r.normalizers))
- want := string(r.data.Golden(fmt.Sprintf("workspace_symbol-%s-%s", strings.ToLower(string(matcher)), query), uri.Filename(), func() ([]byte, error) {
- return []byte(got), nil
- }))
- if diff := tests.Diff(t, want, got); diff != "" {
- t.Error(diff)
- }
-}
-
-func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.SignatureHelp) {
- _, rng, err := spanToRange(r.data, spn)
- if err != nil {
- t.Fatal(err)
- }
- fh, err := r.snapshot.GetFile(r.ctx, spn.URI())
- if err != nil {
- t.Fatal(err)
- }
- gotSignature, gotActiveParameter, err := source.SignatureHelp(r.ctx, r.snapshot, fh, rng.Start)
- if err != nil {
- // Only fail if we got an error we did not expect.
- if want != nil {
- t.Fatalf("failed for %v: %v", spn, err)
- }
- return
- }
- if gotSignature == nil {
- if want != nil {
- t.Fatalf("got nil signature, but expected %v", want)
- }
- return
- }
- got := &protocol.SignatureHelp{
- Signatures: []protocol.SignatureInformation{*gotSignature},
- ActiveParameter: uint32(gotActiveParameter),
- }
- diff, err := tests.DiffSignatures(spn, want, got)
- if err != nil {
- t.Fatal(err)
- }
- if diff != "" {
- t.Error(diff)
- }
-}
-
-// These are pure LSP features, no source level functionality to be tested.
-func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {}
-
-func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) {
-}
-func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {}
-func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {}
-func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) {}
-func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) {}
-
-func spanToRange(data *tests.Data, spn span.Span) (*protocol.ColumnMapper, protocol.Range, error) {
- m, err := data.Mapper(spn.URI())
- if err != nil {
- return nil, protocol.Range{}, err
- }
- srcRng, err := m.Range(spn)
- if err != nil {
- return nil, protocol.Range{}, err
- }
- return m, srcRng, nil
-}
diff --git a/internal/lsp/source/stub.go b/internal/lsp/source/stub.go
deleted file mode 100644
index 6810f1d20..000000000
--- a/internal/lsp/source/stub.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/lsp/analysis/stubmethods"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/typeparams"
-)
-
-func stubSuggestedFixFunc(ctx context.Context, snapshot Snapshot, fh VersionedFileHandle, rng protocol.Range) (*analysis.SuggestedFix, error) {
- pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return nil, fmt.Errorf("GetParsedFile: %w", err)
- }
- nodes, pos, err := getStubNodes(pgf, rng)
- if err != nil {
- return nil, fmt.Errorf("getNodes: %w", err)
- }
- si := stubmethods.GetStubInfo(pkg.GetTypesInfo(), nodes, pos)
- if si == nil {
- return nil, fmt.Errorf("nil interface request")
- }
- parsedConcreteFile, concreteFH, err := getStubFile(ctx, si.Concrete.Obj(), snapshot)
- if err != nil {
- return nil, fmt.Errorf("getFile(concrete): %w", err)
- }
- var (
- methodsSrc []byte
- stubImports []*stubImport // additional imports needed for method stubs
- )
- if si.Interface.Pkg() == nil && si.Interface.Name() == "error" && si.Interface.Parent() == types.Universe {
- methodsSrc = stubErr(ctx, parsedConcreteFile.File, si, snapshot)
- } else {
- methodsSrc, stubImports, err = stubMethods(ctx, parsedConcreteFile.File, si, snapshot)
- }
- if err != nil {
- return nil, fmt.Errorf("stubMethods: %w", err)
- }
- nodes, _ = astutil.PathEnclosingInterval(parsedConcreteFile.File, si.Concrete.Obj().Pos(), si.Concrete.Obj().Pos())
- concreteSrc, err := concreteFH.Read()
- if err != nil {
- return nil, fmt.Errorf("error reading concrete file source: %w", err)
- }
- insertPos := snapshot.FileSet().Position(nodes[1].End()).Offset
- if insertPos >= len(concreteSrc) {
- return nil, fmt.Errorf("insertion position is past the end of the file")
- }
- var buf bytes.Buffer
- buf.Write(concreteSrc[:insertPos])
- buf.WriteByte('\n')
- buf.Write(methodsSrc)
- buf.Write(concreteSrc[insertPos:])
- fset := token.NewFileSet()
- newF, err := parser.ParseFile(fset, parsedConcreteFile.File.Name.Name, buf.Bytes(), parser.ParseComments)
- if err != nil {
- return nil, fmt.Errorf("could not reparse file: %w", err)
- }
- for _, imp := range stubImports {
- astutil.AddNamedImport(fset, newF, imp.Name, imp.Path)
- }
- var source bytes.Buffer
- err = format.Node(&source, fset, newF)
- if err != nil {
- return nil, fmt.Errorf("format.Node: %w", err)
- }
- diffEdits, err := snapshot.View().Options().ComputeEdits(parsedConcreteFile.URI, string(parsedConcreteFile.Src), source.String())
- if err != nil {
- return nil, err
- }
- var edits []analysis.TextEdit
- for _, edit := range diffEdits {
- rng, err := edit.Span.Range(parsedConcreteFile.Mapper.Converter)
- if err != nil {
- return nil, err
- }
- edits = append(edits, analysis.TextEdit{
- Pos: rng.Start,
- End: rng.End,
- NewText: []byte(edit.NewText),
- })
- }
- return &analysis.SuggestedFix{
- TextEdits: edits,
- }, nil
-}
-
-// stubMethods returns the Go code of all methods
-// that implement the given interface
-func stubMethods(ctx context.Context, concreteFile *ast.File, si *stubmethods.StubInfo, snapshot Snapshot) ([]byte, []*stubImport, error) {
- ifacePkg, err := deducePkgFromTypes(ctx, snapshot, si.Interface)
- if err != nil {
- return nil, nil, err
- }
- si.Concrete.Obj().Type()
- concMS := types.NewMethodSet(types.NewPointer(si.Concrete.Obj().Type()))
- missing, err := missingMethods(ctx, snapshot, concMS, si.Concrete.Obj().Pkg(), si.Interface, ifacePkg, map[string]struct{}{})
- if err != nil {
- return nil, nil, fmt.Errorf("missingMethods: %w", err)
- }
- if len(missing) == 0 {
- return nil, nil, fmt.Errorf("no missing methods found")
- }
- var (
- stubImports []*stubImport
- methodsBuffer bytes.Buffer
- )
- for _, mi := range missing {
- for _, m := range mi.missing {
- // TODO(marwan-at-work): this should share the same logic with source.FormatVarType
- // as it also accounts for type aliases.
- sig := types.TypeString(m.Type(), stubmethods.RelativeToFiles(si.Concrete.Obj().Pkg(), concreteFile, mi.file, func(name, path string) {
- for _, imp := range stubImports {
- if imp.Name == name && imp.Path == path {
- return
- }
- }
- stubImports = append(stubImports, &stubImport{name, path})
- }))
- _, err = methodsBuffer.Write(printStubMethod(methodData{
- Method: m.Name(),
- Concrete: getStubReceiver(si),
- Interface: deduceIfaceName(si.Concrete.Obj().Pkg(), si.Interface.Pkg(), si.Interface),
- Signature: strings.TrimPrefix(sig, "func"),
- }))
- if err != nil {
- return nil, nil, fmt.Errorf("error printing method: %w", err)
- }
- methodsBuffer.WriteRune('\n')
- }
- }
- return methodsBuffer.Bytes(), stubImports, nil
-}
-
-// stubErr reurns the Go code implementation
-// of an error interface relevant to the
-// concrete type
-func stubErr(ctx context.Context, concreteFile *ast.File, si *stubmethods.StubInfo, snapshot Snapshot) []byte {
- return printStubMethod(methodData{
- Method: "Error",
- Interface: "error",
- Concrete: getStubReceiver(si),
- Signature: "() string",
- })
-}
-
-// getStubReceiver returns the concrete type's name as a method receiver.
-// It accounts for type parameters if they exist.
-func getStubReceiver(si *stubmethods.StubInfo) string {
- var concrete string
- if si.Pointer {
- concrete += "*"
- }
- concrete += si.Concrete.Obj().Name()
- concrete += FormatTypeParams(typeparams.ForNamed(si.Concrete))
- return concrete
-}
-
-type methodData struct {
- Method string
- Interface string
- Concrete string
- Signature string
-}
-
-// printStubMethod takes methodData and returns Go code that represents the given method such as:
-// // {{ .Method }} implements {{ .Interface }}
-// func ({{ .Concrete }}) {{ .Method }}{{ .Signature }} {
-// panic("unimplemented")
-// }
-func printStubMethod(md methodData) []byte {
- var b bytes.Buffer
- fmt.Fprintf(&b, "// %s implements %s\n", md.Method, md.Interface)
- fmt.Fprintf(&b, "func (%s) %s%s {\n\t", md.Concrete, md.Method, md.Signature)
- fmt.Fprintln(&b, `panic("unimplemented")`)
- fmt.Fprintln(&b, "}")
- return b.Bytes()
-}
-
-func deducePkgFromTypes(ctx context.Context, snapshot Snapshot, ifaceObj types.Object) (Package, error) {
- pkgs, err := snapshot.KnownPackages(ctx)
- if err != nil {
- return nil, err
- }
- for _, p := range pkgs {
- if p.PkgPath() == ifaceObj.Pkg().Path() {
- return p, nil
- }
- }
- return nil, fmt.Errorf("pkg %q not found", ifaceObj.Pkg().Path())
-}
-
-func deduceIfaceName(concretePkg, ifacePkg *types.Package, ifaceObj types.Object) string {
- if concretePkg.Path() == ifacePkg.Path() {
- return ifaceObj.Name()
- }
- return fmt.Sprintf("%s.%s", ifacePkg.Name(), ifaceObj.Name())
-}
-
-func getStubNodes(pgf *ParsedGoFile, pRng protocol.Range) ([]ast.Node, token.Pos, error) {
- spn, err := pgf.Mapper.RangeSpan(pRng)
- if err != nil {
- return nil, 0, err
- }
- rng, err := spn.Range(pgf.Mapper.Converter)
- if err != nil {
- return nil, 0, err
- }
- nodes, _ := astutil.PathEnclosingInterval(pgf.File, rng.Start, rng.End)
- return nodes, rng.Start, nil
-}
-
-/*
-missingMethods takes a concrete type and returns any missing methods for the given interface as well as
-any missing interface that might have been embedded to its parent. For example:
-
-type I interface {
- io.Writer
- Hello()
-}
-returns []*missingInterface{
- {
- iface: *types.Interface (io.Writer),
- file: *ast.File: io.go,
- missing []*types.Func{Write},
- },
- {
- iface: *types.Interface (I),
- file: *ast.File: myfile.go,
- missing: []*types.Func{Hello}
- },
-}
-*/
-func missingMethods(ctx context.Context, snapshot Snapshot, concMS *types.MethodSet, concPkg *types.Package, ifaceObj types.Object, ifacePkg Package, visited map[string]struct{}) ([]*missingInterface, error) {
- iface, ok := ifaceObj.Type().Underlying().(*types.Interface)
- if !ok {
- return nil, fmt.Errorf("expected %v to be an interface but got %T", iface, ifaceObj.Type().Underlying())
- }
- missing := []*missingInterface{}
- for i := 0; i < iface.NumEmbeddeds(); i++ {
- eiface := iface.Embedded(i).Obj()
- depPkg := ifacePkg
- if eiface.Pkg().Path() != ifacePkg.PkgPath() {
- var err error
- depPkg, err = ifacePkg.GetImport(eiface.Pkg().Path())
- if err != nil {
- return nil, err
- }
- }
- em, err := missingMethods(ctx, snapshot, concMS, concPkg, eiface, depPkg, visited)
- if err != nil {
- return nil, err
- }
- missing = append(missing, em...)
- }
- parsedFile, _, err := getStubFile(ctx, ifaceObj, snapshot)
- if err != nil {
- return nil, fmt.Errorf("error getting iface file: %w", err)
- }
- mi := &missingInterface{
- pkg: ifacePkg,
- iface: iface,
- file: parsedFile.File,
- }
- if mi.file == nil {
- return nil, fmt.Errorf("could not find ast.File for %v", ifaceObj.Name())
- }
- for i := 0; i < iface.NumExplicitMethods(); i++ {
- method := iface.ExplicitMethod(i)
- // if the concrete type does not have the interface method
- if concMS.Lookup(concPkg, method.Name()) == nil {
- if _, ok := visited[method.Name()]; !ok {
- mi.missing = append(mi.missing, method)
- visited[method.Name()] = struct{}{}
- }
- }
- if sel := concMS.Lookup(concPkg, method.Name()); sel != nil {
- implSig := sel.Type().(*types.Signature)
- ifaceSig := method.Type().(*types.Signature)
- if !types.Identical(ifaceSig, implSig) {
- return nil, fmt.Errorf("mimsatched %q function signatures:\nhave: %s\nwant: %s", method.Name(), implSig, ifaceSig)
- }
- }
- }
- if len(mi.missing) > 0 {
- missing = append(missing, mi)
- }
- return missing, nil
-}
-
-func getStubFile(ctx context.Context, obj types.Object, snapshot Snapshot) (*ParsedGoFile, VersionedFileHandle, error) {
- objPos := snapshot.FileSet().Position(obj.Pos())
- objFile := span.URIFromPath(objPos.Filename)
- objectFH := snapshot.FindFile(objFile)
- _, goFile, err := GetParsedFile(ctx, snapshot, objectFH, WidestPackage)
- if err != nil {
- return nil, nil, fmt.Errorf("GetParsedFile: %w", err)
- }
- return goFile, objectFH, nil
-}
-
-// missingInterface represents an interface
-// that has all or some of its methods missing
-// from the destination concrete type
-type missingInterface struct {
- iface *types.Interface
- file *ast.File
- pkg Package
- missing []*types.Func
-}
-
-// stubImport represents a newly added import
-// statement to the concrete type. If name is not
-// empty, then that import is required to have that name.
-type stubImport struct{ Name, Path string }
diff --git a/internal/lsp/source/symbols.go b/internal/lsp/source/symbols.go
deleted file mode 100644
index 16fb2223d..000000000
--- a/internal/lsp/source/symbols.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/types"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- errors "golang.org/x/xerrors"
-)
-
-func DocumentSymbols(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]protocol.DocumentSymbol, error) {
- ctx, done := event.Start(ctx, "source.DocumentSymbols")
- defer done()
-
- pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage)
- if err != nil {
- return nil, errors.Errorf("getting file for DocumentSymbols: %w", err)
- }
-
- info := pkg.GetTypesInfo()
- q := Qualifier(pgf.File, pkg.GetTypes(), info)
-
- symbolsToReceiver := make(map[types.Type]int)
- var symbols []protocol.DocumentSymbol
- for _, decl := range pgf.File.Decls {
- switch decl := decl.(type) {
- case *ast.FuncDecl:
- if decl.Name.Name == "_" {
- continue
- }
- if obj := info.ObjectOf(decl.Name); obj != nil {
- fs, err := funcSymbol(snapshot, pkg, decl, obj, q)
- if err != nil {
- return nil, err
- }
- // If function is a method, prepend the type of the method.
- if fs.Kind == protocol.Method {
- rtype := obj.Type().(*types.Signature).Recv().Type()
- fs.Name = fmt.Sprintf("(%s).%s", types.TypeString(rtype, q), fs.Name)
- }
- symbols = append(symbols, fs)
- }
- case *ast.GenDecl:
- for _, spec := range decl.Specs {
- switch spec := spec.(type) {
- case *ast.TypeSpec:
- if spec.Name.Name == "_" {
- continue
- }
- if obj := info.ObjectOf(spec.Name); obj != nil {
- ts, err := typeSymbol(snapshot, pkg, info, spec, obj, q)
- if err != nil {
- return nil, err
- }
- symbols = append(symbols, ts)
- symbolsToReceiver[obj.Type()] = len(symbols) - 1
- }
- case *ast.ValueSpec:
- for _, name := range spec.Names {
- if name.Name == "_" {
- continue
- }
- if obj := info.ObjectOf(name); obj != nil {
- vs, err := varSymbol(snapshot, pkg, decl, name, obj, q)
- if err != nil {
- return nil, err
- }
- symbols = append(symbols, vs)
- }
- }
- }
- }
- }
- }
- return symbols, nil
-}
-
-func funcSymbol(snapshot Snapshot, pkg Package, decl *ast.FuncDecl, obj types.Object, q types.Qualifier) (protocol.DocumentSymbol, error) {
- s := protocol.DocumentSymbol{
- Name: obj.Name(),
- Kind: protocol.Function,
- }
- var err error
- s.Range, err = nodeToProtocolRange(snapshot, pkg, decl)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- s.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, decl.Name)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- sig, _ := obj.Type().(*types.Signature)
- if sig != nil {
- if sig.Recv() != nil {
- s.Kind = protocol.Method
- }
- s.Detail += "("
- for i := 0; i < sig.Params().Len(); i++ {
- if i > 0 {
- s.Detail += ", "
- }
- param := sig.Params().At(i)
- label := types.TypeString(param.Type(), q)
- if param.Name() != "" {
- label = fmt.Sprintf("%s %s", param.Name(), label)
- }
- s.Detail += label
- }
- s.Detail += ")"
- }
- return s, nil
-}
-
-func typeSymbol(snapshot Snapshot, pkg Package, info *types.Info, spec *ast.TypeSpec, obj types.Object, qf types.Qualifier) (protocol.DocumentSymbol, error) {
- s := protocol.DocumentSymbol{
- Name: obj.Name(),
- }
- s.Detail, _ = FormatType(obj.Type(), qf)
- s.Kind = typeToKind(obj.Type())
-
- var err error
- s.Range, err = nodeToProtocolRange(snapshot, pkg, spec)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- s.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, spec.Name)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- t, objIsStruct := obj.Type().Underlying().(*types.Struct)
- st, specIsStruct := spec.Type.(*ast.StructType)
- if objIsStruct && specIsStruct {
- for i := 0; i < t.NumFields(); i++ {
- f := t.Field(i)
- child := protocol.DocumentSymbol{
- Name: f.Name(),
- Kind: protocol.Field,
- }
- child.Detail, _ = FormatType(f.Type(), qf)
-
- spanNode, selectionNode := nodesForStructField(i, st)
- if span, err := nodeToProtocolRange(snapshot, pkg, spanNode); err == nil {
- child.Range = span
- }
- if span, err := nodeToProtocolRange(snapshot, pkg, selectionNode); err == nil {
- child.SelectionRange = span
- }
- s.Children = append(s.Children, child)
- }
- }
-
- ti, objIsInterface := obj.Type().Underlying().(*types.Interface)
- ai, specIsInterface := spec.Type.(*ast.InterfaceType)
- if objIsInterface && specIsInterface {
- for i := 0; i < ti.NumExplicitMethods(); i++ {
- method := ti.ExplicitMethod(i)
- child := protocol.DocumentSymbol{
- Name: method.Name(),
- Kind: protocol.Method,
- }
-
- var spanNode, selectionNode ast.Node
- Methods:
- for _, f := range ai.Methods.List {
- for _, id := range f.Names {
- if id.Name == method.Name() {
- spanNode, selectionNode = f, id
- break Methods
- }
- }
- }
- child.Range, err = nodeToProtocolRange(snapshot, pkg, spanNode)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- child.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, selectionNode)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- s.Children = append(s.Children, child)
- }
-
- for i := 0; i < ti.NumEmbeddeds(); i++ {
- embedded := ti.EmbeddedType(i)
- nt, isNamed := embedded.(*types.Named)
- if !isNamed {
- continue
- }
-
- child := protocol.DocumentSymbol{
- Name: types.TypeString(embedded, qf),
- }
- child.Kind = typeToKind(embedded)
- var spanNode, selectionNode ast.Node
- Embeddeds:
- for _, f := range ai.Methods.List {
- if len(f.Names) > 0 {
- continue
- }
-
- if t := info.TypeOf(f.Type); types.Identical(nt, t) {
- spanNode, selectionNode = f, f.Type
- break Embeddeds
- }
- }
- child.Range, err = nodeToProtocolRange(snapshot, pkg, spanNode)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- child.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, selectionNode)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- s.Children = append(s.Children, child)
- }
- }
- return s, nil
-}
-
-func nodesForStructField(i int, st *ast.StructType) (span, selection ast.Node) {
- j := 0
- for _, field := range st.Fields.List {
- if len(field.Names) == 0 {
- if i == j {
- return field, field.Type
- }
- j++
- continue
- }
- for _, name := range field.Names {
- if i == j {
- return field, name
- }
- j++
- }
- }
- return nil, nil
-}
-
-func varSymbol(snapshot Snapshot, pkg Package, decl ast.Node, name *ast.Ident, obj types.Object, q types.Qualifier) (protocol.DocumentSymbol, error) {
- s := protocol.DocumentSymbol{
- Name: obj.Name(),
- Kind: protocol.Variable,
- }
- if _, ok := obj.(*types.Const); ok {
- s.Kind = protocol.Constant
- }
- var err error
- s.Range, err = nodeToProtocolRange(snapshot, pkg, decl)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- s.SelectionRange, err = nodeToProtocolRange(snapshot, pkg, name)
- if err != nil {
- return protocol.DocumentSymbol{}, err
- }
- s.Detail = types.TypeString(obj.Type(), q)
- return s, nil
-}
diff --git a/internal/lsp/source/types_format.go b/internal/lsp/source/types_format.go
deleted file mode 100644
index fcbf228ec..000000000
--- a/internal/lsp/source/types_format.go
+++ /dev/null
@@ -1,459 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/doc"
- "go/printer"
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/typeparams"
-)
-
-// FormatType returns the detail and kind for a types.Type.
-func FormatType(typ types.Type, qf types.Qualifier) (detail string, kind protocol.CompletionItemKind) {
- if types.IsInterface(typ) {
- detail = "interface{...}"
- kind = protocol.InterfaceCompletion
- } else if _, ok := typ.(*types.Struct); ok {
- detail = "struct{...}"
- kind = protocol.StructCompletion
- } else if typ != typ.Underlying() {
- detail, kind = FormatType(typ.Underlying(), qf)
- } else {
- detail = types.TypeString(typ, qf)
- kind = protocol.ClassCompletion
- }
- return detail, kind
-}
-
-type signature struct {
- name, doc string
- typeParams, params, results []string
- variadic bool
- needResultParens bool
-}
-
-func (s *signature) Format() string {
- var b strings.Builder
- b.WriteByte('(')
- for i, p := range s.params {
- if i > 0 {
- b.WriteString(", ")
- }
- b.WriteString(p)
- }
- b.WriteByte(')')
-
- // Add space between parameters and results.
- if len(s.results) > 0 {
- b.WriteByte(' ')
- }
- if s.needResultParens {
- b.WriteByte('(')
- }
- for i, r := range s.results {
- if i > 0 {
- b.WriteString(", ")
- }
- b.WriteString(r)
- }
- if s.needResultParens {
- b.WriteByte(')')
- }
- return b.String()
-}
-
-func (s *signature) TypeParams() []string {
- return s.typeParams
-}
-
-func (s *signature) Params() []string {
- return s.params
-}
-
-// NewBuiltinSignature returns signature for the builtin object with a given
-// name, if a builtin object with the name exists.
-func NewBuiltinSignature(ctx context.Context, s Snapshot, name string) (*signature, error) {
- builtin, err := s.BuiltinFile(ctx)
- if err != nil {
- return nil, err
- }
- obj := builtin.File.Scope.Lookup(name)
- if obj == nil {
- return nil, fmt.Errorf("no builtin object for %s", name)
- }
- decl, ok := obj.Decl.(*ast.FuncDecl)
- if !ok {
- return nil, fmt.Errorf("no function declaration for builtin: %s", name)
- }
- if decl.Type == nil {
- return nil, fmt.Errorf("no type for builtin decl %s", decl.Name)
- }
- var variadic bool
- if decl.Type.Params.List != nil {
- numParams := len(decl.Type.Params.List)
- lastParam := decl.Type.Params.List[numParams-1]
- if _, ok := lastParam.Type.(*ast.Ellipsis); ok {
- variadic = true
- }
- }
- params, _ := formatFieldList(ctx, s, decl.Type.Params, variadic)
- results, needResultParens := formatFieldList(ctx, s, decl.Type.Results, false)
- d := decl.Doc.Text()
- switch s.View().Options().HoverKind {
- case SynopsisDocumentation:
- d = doc.Synopsis(d)
- case NoDocumentation:
- d = ""
- }
- return &signature{
- doc: d,
- name: name,
- needResultParens: needResultParens,
- params: params,
- results: results,
- variadic: variadic,
- }, nil
-}
-
-var replacer = strings.NewReplacer(
- `ComplexType`, `complex128`,
- `FloatType`, `float64`,
- `IntegerType`, `int`,
-)
-
-func formatFieldList(ctx context.Context, snapshot Snapshot, list *ast.FieldList, variadic bool) ([]string, bool) {
- if list == nil {
- return nil, false
- }
- var writeResultParens bool
- var result []string
- for i := 0; i < len(list.List); i++ {
- if i >= 1 {
- writeResultParens = true
- }
- p := list.List[i]
- cfg := printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 4}
- b := &bytes.Buffer{}
- if err := cfg.Fprint(b, snapshot.FileSet(), p.Type); err != nil {
- event.Error(ctx, "unable to print type", nil, tag.Type.Of(p.Type))
- continue
- }
- typ := replacer.Replace(b.String())
- if len(p.Names) == 0 {
- result = append(result, typ)
- }
- for _, name := range p.Names {
- if name.Name != "" {
- if i == 0 {
- writeResultParens = true
- }
- result = append(result, fmt.Sprintf("%s %s", name.Name, typ))
- } else {
- result = append(result, typ)
- }
- }
- }
- if variadic {
- result[len(result)-1] = strings.Replace(result[len(result)-1], "[]", "...", 1)
- }
- return result, writeResultParens
-}
-
-// FormatTypeParams turns TypeParamList into its Go representation, such as:
-// [T, Y]. Note that it does not print constraints as this is mainly used for
-// formatting type params in method receivers.
-func FormatTypeParams(tparams *typeparams.TypeParamList) string {
- if tparams == nil || tparams.Len() == 0 {
- return ""
- }
- var buf bytes.Buffer
- buf.WriteByte('[')
- for i := 0; i < tparams.Len(); i++ {
- if i > 0 {
- buf.WriteString(", ")
- }
- buf.WriteString(tparams.At(i).Obj().Name())
- }
- buf.WriteByte(']')
- return buf.String()
-}
-
-// NewSignature returns formatted signature for a types.Signature struct.
-func NewSignature(ctx context.Context, s Snapshot, pkg Package, sig *types.Signature, comment *ast.CommentGroup, qf types.Qualifier) *signature {
- var tparams []string
- tpList := typeparams.ForSignature(sig)
- for i := 0; i < tpList.Len(); i++ {
- tparam := tpList.At(i)
- // TODO: is it possible to reuse the logic from FormatVarType here?
- s := tparam.Obj().Name() + " " + tparam.Constraint().String()
- tparams = append(tparams, s)
- }
-
- params := make([]string, 0, sig.Params().Len())
- for i := 0; i < sig.Params().Len(); i++ {
- el := sig.Params().At(i)
- typ := FormatVarType(ctx, s, pkg, el, qf)
- p := typ
- if el.Name() != "" {
- p = el.Name() + " " + typ
- }
- params = append(params, p)
- }
-
- var needResultParens bool
- results := make([]string, 0, sig.Results().Len())
- for i := 0; i < sig.Results().Len(); i++ {
- if i >= 1 {
- needResultParens = true
- }
- el := sig.Results().At(i)
- typ := FormatVarType(ctx, s, pkg, el, qf)
- if el.Name() == "" {
- results = append(results, typ)
- } else {
- if i == 0 {
- needResultParens = true
- }
- results = append(results, el.Name()+" "+typ)
- }
- }
- var d string
- if comment != nil {
- d = comment.Text()
- }
- switch s.View().Options().HoverKind {
- case SynopsisDocumentation:
- d = doc.Synopsis(d)
- case NoDocumentation:
- d = ""
- }
- return &signature{
- doc: d,
- typeParams: tparams,
- params: params,
- results: results,
- variadic: sig.Variadic(),
- needResultParens: needResultParens,
- }
-}
-
-// FormatVarType formats a *types.Var, accounting for type aliases.
-// To do this, it looks in the AST of the file in which the object is declared.
-// On any errors, it always falls back to types.TypeString.
-func FormatVarType(ctx context.Context, snapshot Snapshot, srcpkg Package, obj *types.Var, qf types.Qualifier) string {
- pkg, err := FindPackageFromPos(ctx, snapshot, obj.Pos())
- if err != nil {
- return types.TypeString(obj.Type(), qf)
- }
-
- expr, err := varType(ctx, snapshot, pkg, obj)
- if err != nil {
- return types.TypeString(obj.Type(), qf)
- }
-
- // If the given expr refers to a type parameter, then use the
- // object's Type instead of the type parameter declaration. This helps
- // format the instantiated type as opposed to the original undeclared
- // generic type.
- if typeparams.IsTypeParam(pkg.GetTypesInfo().Types[expr].Type) {
- return types.TypeString(obj.Type(), qf)
- }
-
- // The type names in the AST may not be correctly qualified.
- // Determine the package name to use based on the package that originated
- // the query and the package in which the type is declared.
- // We then qualify the value by cloning the AST node and editing it.
- clonedInfo := make(map[token.Pos]*types.PkgName)
- qualified := cloneExpr(expr, pkg.GetTypesInfo(), clonedInfo)
-
- // If the request came from a different package than the one in which the
- // types are defined, we may need to modify the qualifiers.
- qualified = qualifyExpr(qualified, srcpkg, pkg, clonedInfo, qf)
- fmted := FormatNode(snapshot.FileSet(), qualified)
- return fmted
-}
-
-// varType returns the type expression for a *types.Var.
-func varType(ctx context.Context, snapshot Snapshot, pkg Package, obj *types.Var) (ast.Expr, error) {
- field, err := snapshot.PosToField(ctx, pkg, obj.Pos())
- if err != nil {
- return nil, err
- }
- if field == nil {
- return nil, fmt.Errorf("no declaration for object %s", obj.Name())
- }
- return field.Type, nil
-}
-
-// qualifyExpr applies the "pkgName." prefix to any *ast.Ident in the expr.
-func qualifyExpr(expr ast.Expr, srcpkg, pkg Package, clonedInfo map[token.Pos]*types.PkgName, qf types.Qualifier) ast.Expr {
- ast.Inspect(expr, func(n ast.Node) bool {
- switch n := n.(type) {
- case *ast.ArrayType, *ast.ChanType, *ast.Ellipsis,
- *ast.FuncType, *ast.MapType, *ast.ParenExpr,
- *ast.StarExpr, *ast.StructType, *ast.FieldList, *ast.Field:
- // These are the only types that are cloned by cloneExpr below,
- // so these are the only types that we can traverse and potentially
- // modify. This is not an ideal approach, but it works for now.
-
- // TODO(rFindley): can we eliminate this filtering entirely? This caused
- // bugs in the past (golang/go#50539)
- return true
- case *ast.SelectorExpr:
- // We may need to change any selectors in which the X is a package
- // name and the Sel is exported.
- x, ok := n.X.(*ast.Ident)
- if !ok {
- return false
- }
- obj, ok := clonedInfo[x.Pos()]
- if !ok {
- return false
- }
- x.Name = qf(obj.Imported())
- return false
- case *ast.Ident:
- if srcpkg == pkg {
- return false
- }
- // Only add the qualifier if the identifier is exported.
- if ast.IsExported(n.Name) {
- pkgName := qf(pkg.GetTypes())
- n.Name = pkgName + "." + n.Name
- }
- }
- return false
- })
- return expr
-}
-
-// cloneExpr only clones expressions that appear in the parameters or return
-// values of a function declaration. The original expression may be returned
-// to the caller in 2 cases:
-// (1) The expression has no pointer fields.
-// (2) The expression cannot appear in an *ast.FuncType, making it
-// unnecessary to clone.
-// This function also keeps track of selector expressions in which the X is a
-// package name and marks them in a map along with their type information, so
-// that this information can be used when rewriting the expression.
-//
-// NOTE: This function is tailored to the use case of qualifyExpr, and should
-// be used with caution.
-func cloneExpr(expr ast.Expr, info *types.Info, clonedInfo map[token.Pos]*types.PkgName) ast.Expr {
- switch expr := expr.(type) {
- case *ast.ArrayType:
- return &ast.ArrayType{
- Lbrack: expr.Lbrack,
- Elt: cloneExpr(expr.Elt, info, clonedInfo),
- Len: expr.Len,
- }
- case *ast.ChanType:
- return &ast.ChanType{
- Arrow: expr.Arrow,
- Begin: expr.Begin,
- Dir: expr.Dir,
- Value: cloneExpr(expr.Value, info, clonedInfo),
- }
- case *ast.Ellipsis:
- return &ast.Ellipsis{
- Ellipsis: expr.Ellipsis,
- Elt: cloneExpr(expr.Elt, info, clonedInfo),
- }
- case *ast.FuncType:
- return &ast.FuncType{
- Func: expr.Func,
- Params: cloneFieldList(expr.Params, info, clonedInfo),
- Results: cloneFieldList(expr.Results, info, clonedInfo),
- }
- case *ast.Ident:
- return cloneIdent(expr)
- case *ast.MapType:
- return &ast.MapType{
- Map: expr.Map,
- Key: cloneExpr(expr.Key, info, clonedInfo),
- Value: cloneExpr(expr.Value, info, clonedInfo),
- }
- case *ast.ParenExpr:
- return &ast.ParenExpr{
- Lparen: expr.Lparen,
- Rparen: expr.Rparen,
- X: cloneExpr(expr.X, info, clonedInfo),
- }
- case *ast.SelectorExpr:
- s := &ast.SelectorExpr{
- Sel: cloneIdent(expr.Sel),
- X: cloneExpr(expr.X, info, clonedInfo),
- }
- if x, ok := expr.X.(*ast.Ident); ok && ast.IsExported(expr.Sel.Name) {
- if obj, ok := info.ObjectOf(x).(*types.PkgName); ok {
- clonedInfo[s.X.Pos()] = obj
- }
- }
- return s
- case *ast.StarExpr:
- return &ast.StarExpr{
- Star: expr.Star,
- X: cloneExpr(expr.X, info, clonedInfo),
- }
- case *ast.StructType:
- return &ast.StructType{
- Struct: expr.Struct,
- Fields: cloneFieldList(expr.Fields, info, clonedInfo),
- Incomplete: expr.Incomplete,
- }
- default:
- return expr
- }
-}
-
-func cloneFieldList(fl *ast.FieldList, info *types.Info, clonedInfo map[token.Pos]*types.PkgName) *ast.FieldList {
- if fl == nil {
- return nil
- }
- if fl.List == nil {
- return &ast.FieldList{
- Closing: fl.Closing,
- Opening: fl.Opening,
- }
- }
- list := make([]*ast.Field, 0, len(fl.List))
- for _, f := range fl.List {
- var names []*ast.Ident
- for _, n := range f.Names {
- names = append(names, cloneIdent(n))
- }
- list = append(list, &ast.Field{
- Comment: f.Comment,
- Doc: f.Doc,
- Names: names,
- Tag: f.Tag,
- Type: cloneExpr(f.Type, info, clonedInfo),
- })
- }
- return &ast.FieldList{
- Closing: fl.Closing,
- Opening: fl.Opening,
- List: list,
- }
-}
-
-func cloneIdent(ident *ast.Ident) *ast.Ident {
- return &ast.Ident{
- NamePos: ident.NamePos,
- Name: ident.Name,
- Obj: ident.Obj,
- }
-}
diff --git a/internal/lsp/source/util.go b/internal/lsp/source/util.go
deleted file mode 100644
index 71892eaa1..000000000
--- a/internal/lsp/source/util.go
+++ /dev/null
@@ -1,586 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "go/ast"
- "go/printer"
- "go/token"
- "go/types"
- "path/filepath"
- "regexp"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// MappedRange provides mapped protocol.Range for a span.Range, accounting for
-// UTF-16 code points.
-type MappedRange struct {
- spanRange span.Range
- m *protocol.ColumnMapper
-
- // protocolRange is the result of converting the spanRange using the mapper.
- // It is computed on-demand.
- protocolRange *protocol.Range
-}
-
-// NewMappedRange returns a MappedRange for the given start and end token.Pos.
-func NewMappedRange(fset *token.FileSet, m *protocol.ColumnMapper, start, end token.Pos) MappedRange {
- return MappedRange{
- spanRange: span.Range{
- FileSet: fset,
- Start: start,
- End: end,
- Converter: m.Converter,
- },
- m: m,
- }
-}
-
-func (s MappedRange) Range() (protocol.Range, error) {
- if s.protocolRange == nil {
- spn, err := s.spanRange.Span()
- if err != nil {
- return protocol.Range{}, err
- }
- prng, err := s.m.Range(spn)
- if err != nil {
- return protocol.Range{}, err
- }
- s.protocolRange = &prng
- }
- return *s.protocolRange, nil
-}
-
-func (s MappedRange) Span() (span.Span, error) {
- return s.spanRange.Span()
-}
-
-func (s MappedRange) SpanRange() span.Range {
- return s.spanRange
-}
-
-func (s MappedRange) URI() span.URI {
- return s.m.URI
-}
-
-// GetParsedFile is a convenience function that extracts the Package and
-// ParsedGoFile for a file in a Snapshot. pkgPolicy is one of NarrowestPackage/
-// WidestPackage.
-func GetParsedFile(ctx context.Context, snapshot Snapshot, fh FileHandle, pkgPolicy PackageFilter) (Package, *ParsedGoFile, error) {
- pkg, err := snapshot.PackageForFile(ctx, fh.URI(), TypecheckWorkspace, pkgPolicy)
- if err != nil {
- return nil, nil, err
- }
- pgh, err := pkg.File(fh.URI())
- return pkg, pgh, err
-}
-
-func IsGenerated(ctx context.Context, snapshot Snapshot, uri span.URI) bool {
- fh, err := snapshot.GetFile(ctx, uri)
- if err != nil {
- return false
- }
- pgf, err := snapshot.ParseGo(ctx, fh, ParseHeader)
- if err != nil {
- return false
- }
- tok := snapshot.FileSet().File(pgf.File.Pos())
- if tok == nil {
- return false
- }
- for _, commentGroup := range pgf.File.Comments {
- for _, comment := range commentGroup.List {
- if matched := generatedRx.MatchString(comment.Text); matched {
- // Check if comment is at the beginning of the line in source.
- if pos := tok.Position(comment.Slash); pos.Column == 1 {
- return true
- }
- }
- }
- }
- return false
-}
-
-func nodeToProtocolRange(snapshot Snapshot, pkg Package, n ast.Node) (protocol.Range, error) {
- mrng, err := posToMappedRange(snapshot, pkg, n.Pos(), n.End())
- if err != nil {
- return protocol.Range{}, err
- }
- return mrng.Range()
-}
-
-func objToMappedRange(snapshot Snapshot, pkg Package, obj types.Object) (MappedRange, error) {
- if pkgName, ok := obj.(*types.PkgName); ok {
- // An imported Go package has a package-local, unqualified name.
- // When the name matches the imported package name, there is no
- // identifier in the import spec with the local package name.
- //
- // For example:
- // import "go/ast" // name "ast" matches package name
- // import a "go/ast" // name "a" does not match package name
- //
- // When the identifier does not appear in the source, have the range
- // of the object be the import path, including quotes.
- if pkgName.Imported().Name() == pkgName.Name() {
- return posToMappedRange(snapshot, pkg, obj.Pos(), obj.Pos()+token.Pos(len(pkgName.Imported().Path())+2))
- }
- }
- return nameToMappedRange(snapshot, pkg, obj.Pos(), obj.Name())
-}
-
-func nameToMappedRange(snapshot Snapshot, pkg Package, pos token.Pos, name string) (MappedRange, error) {
- return posToMappedRange(snapshot, pkg, pos, pos+token.Pos(len(name)))
-}
-
-func posToMappedRange(snapshot Snapshot, pkg Package, pos, end token.Pos) (MappedRange, error) {
- logicalFilename := snapshot.FileSet().File(pos).Position(pos).Filename
- pgf, _, err := findFileInDeps(pkg, span.URIFromPath(logicalFilename))
- if err != nil {
- return MappedRange{}, err
- }
- if !pos.IsValid() {
- return MappedRange{}, errors.Errorf("invalid position for %v", pos)
- }
- if !end.IsValid() {
- return MappedRange{}, errors.Errorf("invalid position for %v", end)
- }
- return NewMappedRange(snapshot.FileSet(), pgf.Mapper, pos, end), nil
-}
-
-// Matches cgo generated comment as well as the proposed standard:
-// https://golang.org/s/generatedcode
-var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`)
-
-// FileKindForLang returns the file kind associated with the given language ID,
-// or UnknownKind if the language ID is not recognized.
-func FileKindForLang(langID string) FileKind {
- switch langID {
- case "go":
- return Go
- case "go.mod":
- return Mod
- case "go.sum":
- return Sum
- case "tmpl", "gotmpl":
- return Tmpl
- case "go.work":
- return Work
- default:
- return UnknownKind
- }
-}
-
-func (k FileKind) String() string {
- switch k {
- case Go:
- return "go"
- case Mod:
- return "go.mod"
- case Sum:
- return "go.sum"
- case Tmpl:
- return "tmpl"
- case Work:
- return "go.work"
- default:
- return fmt.Sprintf("unk%d", k)
- }
-}
-
-// nodeAtPos returns the index and the node whose position is contained inside
-// the node list.
-func nodeAtPos(nodes []ast.Node, pos token.Pos) (ast.Node, int) {
- if nodes == nil {
- return nil, -1
- }
- for i, node := range nodes {
- if node.Pos() <= pos && pos <= node.End() {
- return node, i
- }
- }
- return nil, -1
-}
-
-// IsInterface returns if a types.Type is an interface
-func IsInterface(T types.Type) bool {
- return T != nil && types.IsInterface(T)
-}
-
-// FormatNode returns the "pretty-print" output for an ast node.
-func FormatNode(fset *token.FileSet, n ast.Node) string {
- var buf strings.Builder
- if err := printer.Fprint(&buf, fset, n); err != nil {
- return ""
- }
- return buf.String()
-}
-
-// Deref returns a pointer's element type, traversing as many levels as needed.
-// Otherwise it returns typ.
-//
-// It can return a pointer type for cyclic types (see golang/go#45510).
-func Deref(typ types.Type) types.Type {
- var seen map[types.Type]struct{}
- for {
- p, ok := typ.Underlying().(*types.Pointer)
- if !ok {
- return typ
- }
- if _, ok := seen[p.Elem()]; ok {
- return typ
- }
-
- typ = p.Elem()
-
- if seen == nil {
- seen = make(map[types.Type]struct{})
- }
- seen[typ] = struct{}{}
- }
-}
-
-func SortDiagnostics(d []*Diagnostic) {
- sort.Slice(d, func(i int, j int) bool {
- return CompareDiagnostic(d[i], d[j]) < 0
- })
-}
-
-func CompareDiagnostic(a, b *Diagnostic) int {
- if r := protocol.CompareRange(a.Range, b.Range); r != 0 {
- return r
- }
- if a.Source < b.Source {
- return -1
- }
- if a.Message < b.Message {
- return -1
- }
- if a.Message == b.Message {
- return 0
- }
- return 1
-}
-
-// FindPackageFromPos finds the first package containing pos in its
-// type-checked AST.
-func FindPackageFromPos(ctx context.Context, snapshot Snapshot, pos token.Pos) (Package, error) {
- tok := snapshot.FileSet().File(pos)
- if tok == nil {
- return nil, errors.Errorf("no file for pos %v", pos)
- }
- uri := span.URIFromPath(tok.Name())
- pkgs, err := snapshot.PackagesForFile(ctx, uri, TypecheckAll, true)
- if err != nil {
- return nil, err
- }
- // Only return the package if it actually type-checked the given position.
- for _, pkg := range pkgs {
- parsed, err := pkg.File(uri)
- if err != nil {
- return nil, err
- }
- if parsed == nil {
- continue
- }
- if parsed.Tok.Base() != tok.Base() {
- continue
- }
- return pkg, nil
- }
- return nil, errors.Errorf("no package for given file position")
-}
-
-// findFileInDeps finds uri in pkg or its dependencies.
-func findFileInDeps(pkg Package, uri span.URI) (*ParsedGoFile, Package, error) {
- queue := []Package{pkg}
- seen := make(map[string]bool)
-
- for len(queue) > 0 {
- pkg := queue[0]
- queue = queue[1:]
- seen[pkg.ID()] = true
-
- if pgf, err := pkg.File(uri); err == nil {
- return pgf, pkg, nil
- }
- for _, dep := range pkg.Imports() {
- if !seen[dep.ID()] {
- queue = append(queue, dep)
- }
- }
- }
- return nil, nil, errors.Errorf("no file for %s in package %s", uri, pkg.ID())
-}
-
-// ImportPath returns the unquoted import path of s,
-// or "" if the path is not properly quoted.
-func ImportPath(s *ast.ImportSpec) string {
- t, err := strconv.Unquote(s.Path.Value)
- if err != nil {
- return ""
- }
- return t
-}
-
-// NodeContains returns true if a node encloses a given position pos.
-func NodeContains(n ast.Node, pos token.Pos) bool {
- return n != nil && n.Pos() <= pos && pos <= n.End()
-}
-
-// CollectScopes returns all scopes in an ast path, ordered as innermost scope
-// first.
-func CollectScopes(info *types.Info, path []ast.Node, pos token.Pos) []*types.Scope {
- // scopes[i], where i<len(path), is the possibly nil Scope of path[i].
- var scopes []*types.Scope
- for _, n := range path {
- // Include *FuncType scope if pos is inside the function body.
- switch node := n.(type) {
- case *ast.FuncDecl:
- if node.Body != nil && NodeContains(node.Body, pos) {
- n = node.Type
- }
- case *ast.FuncLit:
- if node.Body != nil && NodeContains(node.Body, pos) {
- n = node.Type
- }
- }
- scopes = append(scopes, info.Scopes[n])
- }
- return scopes
-}
-
-// Qualifier returns a function that appropriately formats a types.PkgName
-// appearing in a *ast.File.
-func Qualifier(f *ast.File, pkg *types.Package, info *types.Info) types.Qualifier {
- // Construct mapping of import paths to their defined or implicit names.
- imports := make(map[*types.Package]string)
- for _, imp := range f.Imports {
- var obj types.Object
- if imp.Name != nil {
- obj = info.Defs[imp.Name]
- } else {
- obj = info.Implicits[imp]
- }
- if pkgname, ok := obj.(*types.PkgName); ok {
- imports[pkgname.Imported()] = pkgname.Name()
- }
- }
- // Define qualifier to replace full package paths with names of the imports.
- return func(p *types.Package) string {
- if p == pkg {
- return ""
- }
- if name, ok := imports[p]; ok {
- if name == "." {
- return ""
- }
- return name
- }
- return p.Name()
- }
-}
-
-// isDirective reports whether c is a comment directive.
-//
-// Copied and adapted from go/src/go/ast/ast.go.
-func isDirective(c string) bool {
- if len(c) < 3 {
- return false
- }
- if c[1] != '/' {
- return false
- }
- //-style comment (no newline at the end)
- c = c[2:]
- if len(c) == 0 {
- // empty line
- return false
- }
- // "//line " is a line directive.
- // (The // has been removed.)
- if strings.HasPrefix(c, "line ") {
- return true
- }
-
- // "//[a-z0-9]+:[a-z0-9]"
- // (The // has been removed.)
- colon := strings.Index(c, ":")
- if colon <= 0 || colon+1 >= len(c) {
- return false
- }
- for i := 0; i <= colon+1; i++ {
- if i == colon {
- continue
- }
- b := c[i]
- if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') {
- return false
- }
- }
- return true
-}
-
-// honorSymlinks toggles whether or not we consider symlinks when comparing
-// file or directory URIs.
-const honorSymlinks = false
-
-func CompareURI(left, right span.URI) int {
- if honorSymlinks {
- return span.CompareURI(left, right)
- }
- if left == right {
- return 0
- }
- if left < right {
- return -1
- }
- return 1
-}
-
-// InDir checks whether path is in the file tree rooted at dir.
-// InDir makes some effort to succeed even in the presence of symbolic links.
-//
-// Copied and slightly adjusted from go/src/cmd/go/internal/search/search.go.
-func InDir(dir, path string) bool {
- if inDirLex(dir, path) {
- return true
- }
- if !honorSymlinks {
- return false
- }
- xpath, err := filepath.EvalSymlinks(path)
- if err != nil || xpath == path {
- xpath = ""
- } else {
- if inDirLex(dir, xpath) {
- return true
- }
- }
-
- xdir, err := filepath.EvalSymlinks(dir)
- if err == nil && xdir != dir {
- if inDirLex(xdir, path) {
- return true
- }
- if xpath != "" {
- if inDirLex(xdir, xpath) {
- return true
- }
- }
- }
- return false
-}
-
-// inDirLex is like inDir but only checks the lexical form of the file names.
-// It does not consider symbolic links.
-//
-// Copied from go/src/cmd/go/internal/search/search.go.
-func inDirLex(dir, path string) bool {
- pv := strings.ToUpper(filepath.VolumeName(path))
- dv := strings.ToUpper(filepath.VolumeName(dir))
- path = path[len(pv):]
- dir = dir[len(dv):]
- switch {
- default:
- return false
- case pv != dv:
- return false
- case len(path) == len(dir):
- if path == dir {
- return true
- }
- return false
- case dir == "":
- return path != ""
- case len(path) > len(dir):
- if dir[len(dir)-1] == filepath.Separator {
- if path[:len(dir)] == dir {
- return path[len(dir):] != ""
- }
- return false
- }
- if path[len(dir)] == filepath.Separator && path[:len(dir)] == dir {
- if len(path) == len(dir)+1 {
- return true
- }
- return path[len(dir)+1:] != ""
- }
- return false
- }
-}
-
-// IsValidImport returns whether importPkgPath is importable
-// by pkgPath
-func IsValidImport(pkgPath, importPkgPath string) bool {
- i := strings.LastIndex(string(importPkgPath), "/internal/")
- if i == -1 {
- return true
- }
- if IsCommandLineArguments(string(pkgPath)) {
- return true
- }
- return strings.HasPrefix(string(pkgPath), string(importPkgPath[:i]))
-}
-
-// IsCommandLineArguments reports whether a given value denotes
-// "command-line-arguments" package, which is a package with an unknown ID
-// created by the go command. It can have a test variant, which is why callers
-// should not check that a value equals "command-line-arguments" directly.
-func IsCommandLineArguments(s string) bool {
- return strings.Contains(s, "command-line-arguments")
-}
-
-// Offset returns tok.Offset(pos), but first checks that the pos is in range
-// for the given file.
-func Offset(tok *token.File, pos token.Pos) (int, error) {
- if !InRange(tok, pos) {
- return -1, fmt.Errorf("pos %v is not in range for file [%v:%v)", pos, tok.Base(), tok.Base()+tok.Size())
- }
- return tok.Offset(pos), nil
-}
-
-// Pos returns tok.Pos(offset), but first checks that the offset is valid for
-// the given file.
-func Pos(tok *token.File, offset int) (token.Pos, error) {
- if offset < 0 || offset > tok.Size() {
- return token.NoPos, fmt.Errorf("offset %v is not in range for file of size %v", offset, tok.Size())
- }
- return tok.Pos(offset), nil
-}
-
-// InRange reports whether the given position is in the given token.File.
-func InRange(tok *token.File, pos token.Pos) bool {
- size := tok.Pos(tok.Size())
- return int(pos) >= tok.Base() && pos <= size
-}
-
-// LineToRange creates a Range spanning start and end.
-func LineToRange(m *protocol.ColumnMapper, uri span.URI, start, end modfile.Position) (protocol.Range, error) {
- return ByteOffsetsToRange(m, uri, start.Byte, end.Byte)
-}
-
-// ByteOffsetsToRange creates a range spanning start and end.
-func ByteOffsetsToRange(m *protocol.ColumnMapper, uri span.URI, start, end int) (protocol.Range, error) {
- line, col, err := m.Converter.ToPosition(start)
- if err != nil {
- return protocol.Range{}, err
- }
- s := span.NewPoint(line, col, start)
- line, col, err = m.Converter.ToPosition(end)
- if err != nil {
- return protocol.Range{}, err
- }
- e := span.NewPoint(line, col, end)
- return m.Range(span.New(uri, s, e))
-}
diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go
deleted file mode 100644
index 4d7d411e0..000000000
--- a/internal/lsp/source/view.go
+++ /dev/null
@@ -1,696 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/ast"
- "go/scanner"
- "go/token"
- "go/types"
- "io"
- "strings"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/mod/module"
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/imports"
- "golang.org/x/tools/internal/lsp/progress"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-// Snapshot represents the current state for the given view.
-type Snapshot interface {
- ID() uint64
-
- // View returns the View associated with this snapshot.
- View() View
-
- // BackgroundContext returns a context used for all background processing
- // on behalf of this snapshot.
- BackgroundContext() context.Context
-
- // Fileset returns the Fileset used to parse all the Go files in this snapshot.
- FileSet() *token.FileSet
-
- // ValidBuildConfiguration returns true if there is some error in the
- // user's workspace. In particular, if they are both outside of a module
- // and their GOPATH.
- ValidBuildConfiguration() bool
-
- // WriteEnv writes the view-specific environment to the io.Writer.
- WriteEnv(ctx context.Context, w io.Writer) error
-
- // FindFile returns the FileHandle for the given URI, if it is already
- // in the given snapshot.
- FindFile(uri span.URI) VersionedFileHandle
-
- // GetVersionedFile returns the VersionedFileHandle for a given URI,
- // initializing it if it is not already part of the snapshot.
- GetVersionedFile(ctx context.Context, uri span.URI) (VersionedFileHandle, error)
-
- // GetFile returns the FileHandle for a given URI, initializing it if it is
- // not already part of the snapshot.
- GetFile(ctx context.Context, uri span.URI) (FileHandle, error)
-
- // AwaitInitialized waits until the snapshot's view is initialized.
- AwaitInitialized(ctx context.Context)
-
- // IsOpen returns whether the editor currently has a file open.
- IsOpen(uri span.URI) bool
-
- // IgnoredFile reports if a file would be ignored by a `go list` of the whole
- // workspace.
- IgnoredFile(uri span.URI) bool
-
- // Templates returns the .tmpl files
- Templates() map[span.URI]VersionedFileHandle
-
- // ParseGo returns the parsed AST for the file.
- // If the file is not available, returns nil and an error.
- ParseGo(ctx context.Context, fh FileHandle, mode ParseMode) (*ParsedGoFile, error)
-
- // PosToField is a cache of *ast.Fields by token.Pos. This allows us
- // to quickly find corresponding *ast.Field node given a *types.Var.
- // We must refer to the AST to render type aliases properly when
- // formatting signatures and other types.
- PosToField(ctx context.Context, pkg Package, pos token.Pos) (*ast.Field, error)
-
- // PosToDecl maps certain objects' positions to their surrounding
- // ast.Decl. This mapping is used when building the documentation
- // string for the objects.
- PosToDecl(ctx context.Context, pkg Package, pos token.Pos) (ast.Decl, error)
-
- // DiagnosePackage returns basic diagnostics, including list, parse, and type errors
- // for pkg, grouped by file.
- DiagnosePackage(ctx context.Context, pkg Package) (map[span.URI][]*Diagnostic, error)
-
- // Analyze runs the analyses for the given package at this snapshot.
- Analyze(ctx context.Context, pkgID string, analyzers []*Analyzer) ([]*Diagnostic, error)
-
- // RunGoCommandPiped runs the given `go` command, writing its output
- // to stdout and stderr. Verb, Args, and WorkingDir must be specified.
- RunGoCommandPiped(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error
-
- // RunGoCommandDirect runs the given `go` command. Verb, Args, and
- // WorkingDir must be specified.
- RunGoCommandDirect(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error)
-
- // RunGoCommands runs a series of `go` commands that updates the go.mod
- // and go.sum file for wd, and returns their updated contents.
- RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error)
-
- // RunProcessEnvFunc runs fn with the process env for this snapshot's view.
- // Note: the process env contains cached module and filesystem state.
- RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error
-
- // ModFiles are the go.mod files enclosed in the snapshot's view and known
- // to the snapshot.
- ModFiles() []span.URI
-
- // ParseMod is used to parse go.mod files.
- ParseMod(ctx context.Context, fh FileHandle) (*ParsedModule, error)
-
- // ModWhy returns the results of `go mod why` for the module specified by
- // the given go.mod file.
- ModWhy(ctx context.Context, fh FileHandle) (map[string]string, error)
-
- // ModTidy returns the results of `go mod tidy` for the module specified by
- // the given go.mod file.
- ModTidy(ctx context.Context, pm *ParsedModule) (*TidiedModule, error)
-
- // GoModForFile returns the URI of the go.mod file for the given URI.
- GoModForFile(uri span.URI) span.URI
-
- // WorkFile, if non-empty, is the go.work file for the workspace.
- WorkFile() span.URI
-
- // ParseWork is used to parse go.work files.
- ParseWork(ctx context.Context, fh FileHandle) (*ParsedWorkFile, error)
-
- // BuiltinFile returns information about the special builtin package.
- BuiltinFile(ctx context.Context) (*ParsedGoFile, error)
-
- // IsBuiltin reports whether uri is part of the builtin package.
- IsBuiltin(ctx context.Context, uri span.URI) bool
-
- // PackagesForFile returns the packages that this file belongs to, checked
- // in mode.
- PackagesForFile(ctx context.Context, uri span.URI, mode TypecheckMode, includeTestVariants bool) ([]Package, error)
-
- // PackageForFile returns a single package that this file belongs to,
- // checked in mode and filtered by the package policy.
- PackageForFile(ctx context.Context, uri span.URI, mode TypecheckMode, selectPackage PackageFilter) (Package, error)
-
- // GetActiveReverseDeps returns the active files belonging to the reverse
- // dependencies of this file's package, checked in TypecheckWorkspace mode.
- GetReverseDependencies(ctx context.Context, id string) ([]Package, error)
-
- // CachedImportPaths returns all the imported packages loaded in this
- // snapshot, indexed by their import path and checked in TypecheckWorkspace
- // mode.
- CachedImportPaths(ctx context.Context) (map[string]Package, error)
-
- // KnownPackages returns all the packages loaded in this snapshot, checked
- // in TypecheckWorkspace mode.
- KnownPackages(ctx context.Context) ([]Package, error)
-
- // ActivePackages returns the packages considered 'active' in the workspace.
- //
- // In normal memory mode, this is all workspace packages. In degraded memory
- // mode, this is just the reverse transitive closure of open packages.
- ActivePackages(ctx context.Context) ([]Package, error)
-
- // Symbols returns all symbols in the snapshot.
- Symbols(ctx context.Context) (map[span.URI][]Symbol, error)
-
- // Metadata returns package metadata associated with the given file URI.
- MetadataForFile(ctx context.Context, uri span.URI) ([]Metadata, error)
-
- // GetCriticalError returns any critical errors in the workspace.
- GetCriticalError(ctx context.Context) *CriticalError
-
- // BuildGoplsMod generates a go.mod file for all modules in the workspace.
- // It bypasses any existing gopls.mod.
- BuildGoplsMod(ctx context.Context) (*modfile.File, error)
-}
-
-// PackageFilter sets how a package is filtered out from a set of packages
-// containing a given file.
-type PackageFilter int
-
-const (
- // NarrowestPackage picks the "narrowest" package for a given file.
- // By "narrowest" package, we mean the package with the fewest number of
- // files that includes the given file. This solves the problem of test
- // variants, as the test will have more files than the non-test package.
- NarrowestPackage PackageFilter = iota
-
- // WidestPackage returns the Package containing the most files.
- // This is useful for something like diagnostics, where we'd prefer to
- // offer diagnostics for as many files as possible.
- WidestPackage
-)
-
-// InvocationFlags represents the settings of a particular go command invocation.
-// It is a mode, plus a set of flag bits.
-type InvocationFlags int
-
-const (
- // Normal is appropriate for commands that might be run by a user and don't
- // deliberately modify go.mod files, e.g. `go test`.
- Normal InvocationFlags = iota
- // WriteTemporaryModFile is for commands that need information from a
- // modified version of the user's go.mod file, e.g. `go mod tidy` used to
- // generate diagnostics.
- WriteTemporaryModFile
- // LoadWorkspace is for packages.Load, and other operations that should
- // consider the whole workspace at once.
- LoadWorkspace
-
- // AllowNetwork is a flag bit that indicates the invocation should be
- // allowed to access the network.
- AllowNetwork InvocationFlags = 1 << 10
-)
-
-func (m InvocationFlags) Mode() InvocationFlags {
- return m & (AllowNetwork - 1)
-}
-
-func (m InvocationFlags) AllowNetwork() bool {
- return m&AllowNetwork != 0
-}
-
-// View represents a single workspace.
-// This is the level at which we maintain configuration like working directory
-// and build tags.
-type View interface {
- // Name returns the name this view was constructed with.
- Name() string
-
- // Folder returns the folder with which this view was created.
- Folder() span.URI
-
- // Shutdown closes this view, and detaches it from its session.
- Shutdown(ctx context.Context)
-
- // Options returns a copy of the Options for this view.
- Options() *Options
-
- // SetOptions sets the options of this view to new values.
- // Calling this may cause the view to be invalidated and a replacement view
- // added to the session. If so the new view will be returned, otherwise the
- // original one will be.
- SetOptions(context.Context, *Options) (View, error)
-
- // Snapshot returns the current snapshot for the view.
- Snapshot(ctx context.Context) (Snapshot, func())
-
- // Rebuild rebuilds the current view, replacing the original view in its session.
- Rebuild(ctx context.Context) (Snapshot, func(), error)
-
- // IsGoPrivatePath reports whether target is a private import path, as identified
- // by the GOPRIVATE environment variable.
- IsGoPrivatePath(path string) bool
-
- // ModuleUpgrades returns known module upgrades.
- ModuleUpgrades() map[string]string
-
- // RegisterModuleUpgrades registers that upgrades exist for the given modules.
- RegisterModuleUpgrades(upgrades map[string]string)
-
- // FileKind returns the type of a file
- FileKind(FileHandle) FileKind
-}
-
-// A FileSource maps uris to FileHandles. This abstraction exists both for
-// testability, and so that algorithms can be run equally on session and
-// snapshot files.
-type FileSource interface {
- // GetFile returns the FileHandle for a given URI.
- GetFile(ctx context.Context, uri span.URI) (FileHandle, error)
-}
-
-// A ParsedGoFile contains the results of parsing a Go file.
-type ParsedGoFile struct {
- URI span.URI
- Mode ParseMode
- File *ast.File
- Tok *token.File
- // Source code used to build the AST. It may be different from the
- // actual content of the file if we have fixed the AST.
- Src []byte
- Mapper *protocol.ColumnMapper
- ParseErr scanner.ErrorList
-}
-
-// A ParsedModule contains the results of parsing a go.mod file.
-type ParsedModule struct {
- URI span.URI
- File *modfile.File
- Mapper *protocol.ColumnMapper
- ParseErrors []*Diagnostic
-}
-
-// A ParsedWorkFile contains the results of parsing a go.work file.
-type ParsedWorkFile struct {
- URI span.URI
- File *modfile.WorkFile
- Mapper *protocol.ColumnMapper
- ParseErrors []*Diagnostic
-}
-
-// A TidiedModule contains the results of running `go mod tidy` on a module.
-type TidiedModule struct {
- // Diagnostics representing changes made by `go mod tidy`.
- Diagnostics []*Diagnostic
- // The bytes of the go.mod file after it was tidied.
- TidiedContent []byte
-}
-
-// Metadata represents package metadata retrieved from go/packages.
-type Metadata interface {
- // PackageName is the package name.
- PackageName() string
-
- // PackagePath is the package path.
- PackagePath() string
-
- // ModuleInfo returns the go/packages module information for the given package.
- ModuleInfo() *packages.Module
-}
-
-// Session represents a single connection from a client.
-// This is the level at which things like open files are maintained on behalf
-// of the client.
-// A session may have many active views at any given time.
-type Session interface {
- // ID returns the unique identifier for this session on this server.
- ID() string
- // NewView creates a new View, returning it and its first snapshot. If a
- // non-empty tempWorkspace directory is provided, the View will record a copy
- // of its gopls workspace module in that directory, so that client tooling
- // can execute in the same main module.
- NewView(ctx context.Context, name string, folder span.URI, options *Options) (View, Snapshot, func(), error)
-
- // Cache returns the cache that created this session, for debugging only.
- Cache() interface{}
-
- // View returns a view with a matching name, if the session has one.
- View(name string) View
-
- // ViewOf returns a view corresponding to the given URI.
- ViewOf(uri span.URI) (View, error)
-
- // Views returns the set of active views built by this session.
- Views() []View
-
- // Shutdown the session and all views it has created.
- Shutdown(ctx context.Context)
-
- // GetFile returns a handle for the specified file.
- GetFile(ctx context.Context, uri span.URI) (FileHandle, error)
-
- // DidModifyFile reports a file modification to the session. It returns
- // the new snapshots after the modifications have been applied, paired with
- // the affected file URIs for those snapshots.
- DidModifyFiles(ctx context.Context, changes []FileModification) (map[Snapshot][]span.URI, []func(), error)
-
- // ExpandModificationsToDirectories returns the set of changes with the
- // directory changes removed and expanded to include all of the files in
- // the directory.
- ExpandModificationsToDirectories(ctx context.Context, changes []FileModification) []FileModification
-
- // Overlays returns a slice of file overlays for the session.
- Overlays() []Overlay
-
- // Options returns a copy of the SessionOptions for this session.
- Options() *Options
-
- // SetOptions sets the options of this session to new values.
- SetOptions(*Options)
-
- // FileWatchingGlobPatterns returns glob patterns to watch every directory
- // known by the view. For views within a module, this is the module root,
- // any directory in the module root, and any replace targets.
- FileWatchingGlobPatterns(ctx context.Context) map[string]struct{}
-
- // SetProgressTracker sets the progress tracker for the session.
- SetProgressTracker(tracker *progress.Tracker)
-}
-
-var ErrViewExists = errors.New("view already exists for session")
-
-// Overlay is the type for a file held in memory on a session.
-type Overlay interface {
- Kind() FileKind
- VersionedFileHandle
-}
-
-// FileModification represents a modification to a file.
-type FileModification struct {
- URI span.URI
- Action FileAction
-
- // OnDisk is true if a watched file is changed on disk.
- // If true, Version will be -1 and Text will be nil.
- OnDisk bool
-
- // Version will be -1 and Text will be nil when they are not supplied,
- // specifically on textDocument/didClose and for on-disk changes.
- Version int32
- Text []byte
-
- // LanguageID is only sent from the language client on textDocument/didOpen.
- LanguageID string
-}
-
-type FileAction int
-
-const (
- UnknownFileAction = FileAction(iota)
- Open
- Change
- Close
- Save
- Create
- Delete
- InvalidateMetadata
-)
-
-func (a FileAction) String() string {
- switch a {
- case Open:
- return "Open"
- case Change:
- return "Change"
- case Close:
- return "Close"
- case Save:
- return "Save"
- case Create:
- return "Create"
- case Delete:
- return "Delete"
- case InvalidateMetadata:
- return "InvalidateMetadata"
- default:
- return "Unknown"
- }
-}
-
-var ErrTmpModfileUnsupported = errors.New("-modfile is unsupported for this Go version")
-var ErrNoModOnDisk = errors.New("go.mod file is not on disk")
-
-func IsNonFatalGoModError(err error) bool {
- return err == ErrTmpModfileUnsupported || err == ErrNoModOnDisk
-}
-
-// ParseMode controls the content of the AST produced when parsing a source file.
-type ParseMode int
-
-const (
- // ParseHeader specifies that the main package declaration and imports are needed.
- // This is the mode used when attempting to examine the package graph structure.
- ParseHeader ParseMode = iota
-
- // ParseExported specifies that the package is used only as a dependency,
- // and only its exported declarations are needed. More may be included if
- // necessary to avoid type errors.
- ParseExported
-
- // ParseFull specifies the full AST is needed.
- // This is used for files of direct interest where the entire contents must
- // be considered.
- ParseFull
-)
-
-// TypecheckMode controls what kind of parsing should be done (see ParseMode)
-// while type checking a package.
-type TypecheckMode int
-
-const (
- // Invalid default value.
- TypecheckUnknown TypecheckMode = iota
- // TypecheckFull means to use ParseFull.
- TypecheckFull
- // TypecheckWorkspace means to use ParseFull for workspace packages, and
- // ParseExported for others.
- TypecheckWorkspace
- // TypecheckAll means ParseFull for workspace packages, and both Full and
- // Exported for others. Only valid for some functions.
- TypecheckAll
-)
-
-type VersionedFileHandle interface {
- FileHandle
- Version() int32
- Session() string
-
- // LSPIdentity returns the version identity of a file.
- VersionedFileIdentity() VersionedFileIdentity
-}
-
-type VersionedFileIdentity struct {
- URI span.URI
-
- // SessionID is the ID of the LSP session.
- SessionID string
-
- // Version is the version of the file, as specified by the client. It should
- // only be set in combination with SessionID.
- Version int32
-}
-
-// FileHandle represents a handle to a specific version of a single file.
-type FileHandle interface {
- URI() span.URI
-
- // FileIdentity returns a FileIdentity for the file, even if there was an
- // error reading it.
- FileIdentity() FileIdentity
- // Read reads the contents of a file.
- // If the file is not available, returns a nil slice and an error.
- Read() ([]byte, error)
- // Saved reports whether the file has the same content on disk.
- Saved() bool
-}
-
-// FileIdentity uniquely identifies a file at a version from a FileSystem.
-type FileIdentity struct {
- URI span.URI
-
- // Identifier represents a unique identifier for the file's content.
- Hash string
-}
-
-func (id FileIdentity) String() string {
- return fmt.Sprintf("%s%s", id.URI, id.Hash)
-}
-
-// FileKind describes the kind of the file in question.
-// It can be one of Go,mod, Sum, or Tmpl.
-type FileKind int
-
-const (
- // UnknownKind is a file type we don't know about.
- UnknownKind = FileKind(iota)
-
- // Go is a normal go source file.
- Go
- // Mod is a go.mod file.
- Mod
- // Sum is a go.sum file.
- Sum
- // Tmpl is a template file.
- Tmpl
- // Work is a go.work file.
- Work
-)
-
-// Analyzer represents a go/analysis analyzer with some boolean properties
-// that let the user know how to use the analyzer.
-type Analyzer struct {
- Analyzer *analysis.Analyzer
-
- // Enabled reports whether the analyzer is enabled. This value can be
- // configured per-analysis in user settings. For staticcheck analyzers,
- // the value of the Staticcheck setting overrides this field.
- Enabled bool
-
- // Fix is the name of the suggested fix name used to invoke the suggested
- // fixes for the analyzer. It is non-empty if we expect this analyzer to
- // provide its fix separately from its diagnostics. That is, we should apply
- // the analyzer's suggested fixes through a Command, not a TextEdit.
- Fix string
-
- // ActionKind is the kind of code action this analyzer produces. If
- // unspecified the type defaults to quickfix.
- ActionKind []protocol.CodeActionKind
-
- // Severity is the severity set for diagnostics reported by this
- // analyzer. If left unset it defaults to Warning.
- Severity protocol.DiagnosticSeverity
-}
-
-func (a Analyzer) IsEnabled(view View) bool {
- // Staticcheck analyzers can only be enabled when staticcheck is on.
- if _, ok := view.Options().StaticcheckAnalyzers[a.Analyzer.Name]; ok {
- if !view.Options().Staticcheck {
- return false
- }
- }
- if enabled, ok := view.Options().Analyses[a.Analyzer.Name]; ok {
- return enabled
- }
- return a.Enabled
-}
-
-// Package represents a Go package that has been type-checked. It maintains
-// only the relevant fields of a *go/packages.Package.
-type Package interface {
- ID() string
- Name() string
- PkgPath() string
- CompiledGoFiles() []*ParsedGoFile
- File(uri span.URI) (*ParsedGoFile, error)
- GetSyntax() []*ast.File
- GetTypes() *types.Package
- GetTypesInfo() *types.Info
- GetTypesSizes() types.Sizes
- IsIllTyped() bool
- ForTest() string
- GetImport(pkgPath string) (Package, error)
- MissingDependencies() []string
- Imports() []Package
- Version() *module.Version
- HasListOrParseErrors() bool
- HasTypeErrors() bool
- ParseMode() ParseMode
-}
-
-type CriticalError struct {
- // MainError is the primary error. Must be non-nil.
- MainError error
- // DiagList contains any supplemental (structured) diagnostics.
- DiagList []*Diagnostic
-}
-
-// An Diagnostic corresponds to an LSP Diagnostic.
-// https://microsoft.github.io/language-server-protocol/specification#diagnostic
-type Diagnostic struct {
- URI span.URI
- Range protocol.Range
- Severity protocol.DiagnosticSeverity
- Code string
- CodeHref string
-
- // Source is a human-readable description of the source of the error.
- // Diagnostics generated by an analysis.Analyzer set it to Analyzer.Name.
- Source DiagnosticSource
-
- Message string
-
- Tags []protocol.DiagnosticTag
- Related []RelatedInformation
-
- // Fields below are used internally to generate quick fixes. They aren't
- // part of the LSP spec and don't leave the server.
- SuggestedFixes []SuggestedFix
- Analyzer *Analyzer
-}
-
-type DiagnosticSource string
-
-const (
- UnknownError DiagnosticSource = "<Unknown source>"
- ListError DiagnosticSource = "go list"
- ParseError DiagnosticSource = "syntax"
- TypeError DiagnosticSource = "compiler"
- ModTidyError DiagnosticSource = "go mod tidy"
- OptimizationDetailsError DiagnosticSource = "optimizer details"
- UpgradeNotification DiagnosticSource = "upgrade available"
- TemplateError DiagnosticSource = "template"
- WorkFileError DiagnosticSource = "go.work file"
-)
-
-func AnalyzerErrorKind(name string) DiagnosticSource {
- return DiagnosticSource(name)
-}
-
-var (
- PackagesLoadError = errors.New("packages.Load error")
-)
-
-// WorkspaceModuleVersion is the nonexistent pseudoversion suffix used in the
-// construction of the workspace module. It is exported so that we can make
-// sure not to show this version to end users in error messages, to avoid
-// confusion.
-// The major version is not included, as that depends on the module path.
-//
-// If workspace module A is dependent on workspace module B, we need our
-// nonexistant version to be greater than the version A mentions.
-// Otherwise, the go command will try to update to that version. Use a very
-// high minor version to make that more likely.
-const workspaceModuleVersion = ".9999999.0-goplsworkspace"
-
-func IsWorkspaceModuleVersion(version string) bool {
- return strings.HasSuffix(version, workspaceModuleVersion)
-}
-
-func WorkspaceModuleVersion(majorVersion string) string {
- // Use the highest compatible major version to avoid unwanted upgrades.
- // See the comment on workspaceModuleVersion.
- if majorVersion == "v0" {
- majorVersion = "v1"
- }
- return majorVersion + workspaceModuleVersion
-}
diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go
deleted file mode 100644
index d9257c983..000000000
--- a/internal/lsp/source/workspace_symbol.go
+++ /dev/null
@@ -1,593 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "context"
- "fmt"
- "go/types"
- "path/filepath"
- "runtime"
- "sort"
- "strings"
- "unicode"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/fuzzy"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/span"
-)
-
-// Symbol holds a precomputed symbol value. Note: we avoid using the
-// protocol.SymbolInformation struct here in order to reduce the size of each
-// symbol.
-type Symbol struct {
- Name string
- Kind protocol.SymbolKind
- Range protocol.Range
-}
-
-// maxSymbols defines the maximum number of symbol results that should ever be
-// sent in response to a client.
-const maxSymbols = 100
-
-// WorkspaceSymbols matches symbols across all views using the given query,
-// according to the match semantics parameterized by matcherType and style.
-//
-// The workspace symbol method is defined in the spec as follows:
-//
-// The workspace symbol request is sent from the client to the server to
-// list project-wide symbols matching the query string.
-//
-// It is unclear what "project-wide" means here, but given the parameters of
-// workspace/symbol do not include any workspace identifier, then it has to be
-// assumed that "project-wide" means "across all workspaces". Hence why
-// WorkspaceSymbols receives the views []View.
-//
-// However, it then becomes unclear what it would mean to call WorkspaceSymbols
-// with a different configured SymbolMatcher per View. Therefore we assume that
-// Session level configuration will define the SymbolMatcher to be used for the
-// WorkspaceSymbols method.
-func WorkspaceSymbols(ctx context.Context, matcherType SymbolMatcher, style SymbolStyle, views []View, query string) ([]protocol.SymbolInformation, error) {
- ctx, done := event.Start(ctx, "source.WorkspaceSymbols")
- defer done()
- if query == "" {
- return nil, nil
- }
- sc := newSymbolCollector(matcherType, style, query)
- return sc.walk(ctx, views)
-}
-
-// A matcherFunc returns the index and score of a symbol match.
-//
-// See the comment for symbolCollector for more information.
-type matcherFunc func(chunks []string) (int, float64)
-
-// A symbolizer returns the best symbol match for a name with pkg, according to
-// some heuristic. The symbol name is passed as the slice nameParts of logical
-// name pieces. For example, for myType.field the caller can pass either
-// []string{"myType.field"} or []string{"myType.", "field"}.
-//
-// See the comment for symbolCollector for more information.
-type symbolizer func(name string, pkg Metadata, m matcherFunc) ([]string, float64)
-
-func fullyQualifiedSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
- _, score := dynamicSymbolMatch(name, pkg, matcher)
- if score > 0 {
- return []string{pkg.PackagePath(), ".", name}, score
- }
- return nil, 0
-}
-
-func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
- var score float64
-
- endsInPkgName := strings.HasSuffix(pkg.PackagePath(), pkg.PackageName())
-
- // If the package path does not end in the package name, we need to check the
- // package-qualified symbol as an extra pass first.
- if !endsInPkgName {
- pkgQualified := []string{pkg.PackageName(), ".", name}
- idx, score := matcher(pkgQualified)
- nameStart := len(pkg.PackageName()) + 1
- if score > 0 {
- // If our match is contained entirely within the unqualified portion,
- // just return that.
- if idx >= nameStart {
- return []string{name}, score
- }
- // Lower the score for matches that include the package name.
- return pkgQualified, score * 0.8
- }
- }
-
- // Now try matching the fully qualified symbol.
- fullyQualified := []string{pkg.PackagePath(), ".", name}
- idx, score := matcher(fullyQualified)
-
- // As above, check if we matched just the unqualified symbol name.
- nameStart := len(pkg.PackagePath()) + 1
- if idx >= nameStart {
- return []string{name}, score
- }
-
- // If our package path ends in the package name, we'll have skipped the
- // initial pass above, so check if we matched just the package-qualified
- // name.
- if endsInPkgName && idx >= 0 {
- pkgStart := len(pkg.PackagePath()) - len(pkg.PackageName())
- if idx >= pkgStart {
- return []string{pkg.PackageName(), ".", name}, score
- }
- }
-
- // Our match was not contained within the unqualified or package qualified
- // symbol. Return the fully qualified symbol but discount the score.
- return fullyQualified, score * 0.6
-}
-
-func packageSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
- qualified := []string{pkg.PackageName(), ".", name}
- if _, s := matcher(qualified); s > 0 {
- return qualified, s
- }
- return nil, 0
-}
-
-// symbolCollector holds context as we walk Packages, gathering symbols that
-// match a given query.
-//
-// How we match symbols is parameterized by two interfaces:
-// * A matcherFunc determines how well a string symbol matches a query. It
-// returns a non-negative score indicating the quality of the match. A score
-// of zero indicates no match.
-// * A symbolizer determines how we extract the symbol for an object. This
-// enables the 'symbolStyle' configuration option.
-type symbolCollector struct {
- // These types parameterize the symbol-matching pass.
- matchers []matcherFunc
- symbolizer symbolizer
-
- symbolStore
-}
-
-func newSymbolCollector(matcher SymbolMatcher, style SymbolStyle, query string) *symbolCollector {
- var s symbolizer
- switch style {
- case DynamicSymbols:
- s = dynamicSymbolMatch
- case FullyQualifiedSymbols:
- s = fullyQualifiedSymbolMatch
- case PackageQualifiedSymbols:
- s = packageSymbolMatch
- default:
- panic(fmt.Errorf("unknown symbol style: %v", style))
- }
- sc := &symbolCollector{symbolizer: s}
- sc.matchers = make([]matcherFunc, runtime.GOMAXPROCS(-1))
- for i := range sc.matchers {
- sc.matchers[i] = buildMatcher(matcher, query)
- }
- return sc
-}
-
-func buildMatcher(matcher SymbolMatcher, query string) matcherFunc {
- switch matcher {
- case SymbolFuzzy:
- return parseQuery(query, newFuzzyMatcher)
- case SymbolFastFuzzy:
- return parseQuery(query, func(query string) matcherFunc {
- return fuzzy.NewSymbolMatcher(query).Match
- })
- case SymbolCaseSensitive:
- return matchExact(query)
- case SymbolCaseInsensitive:
- q := strings.ToLower(query)
- exact := matchExact(q)
- wrapper := []string{""}
- return func(chunks []string) (int, float64) {
- s := strings.Join(chunks, "")
- wrapper[0] = strings.ToLower(s)
- return exact(wrapper)
- }
- }
- panic(fmt.Errorf("unknown symbol matcher: %v", matcher))
-}
-
-func newFuzzyMatcher(query string) matcherFunc {
- fm := fuzzy.NewMatcher(query)
- return func(chunks []string) (int, float64) {
- score := float64(fm.ScoreChunks(chunks))
- ranges := fm.MatchedRanges()
- if len(ranges) > 0 {
- return ranges[0], score
- }
- return -1, score
- }
-}
-
-// parseQuery parses a field-separated symbol query, extracting the special
-// characters listed below, and returns a matcherFunc corresponding to the AND
-// of all field queries.
-//
-// Special characters:
-// ^ match exact prefix
-// $ match exact suffix
-// ' match exact
-//
-// In all three of these special queries, matches are 'smart-cased', meaning
-// they are case sensitive if the symbol query contains any upper-case
-// characters, and case insensitive otherwise.
-func parseQuery(q string, newMatcher func(string) matcherFunc) matcherFunc {
- fields := strings.Fields(q)
- if len(fields) == 0 {
- return func([]string) (int, float64) { return -1, 0 }
- }
- var funcs []matcherFunc
- for _, field := range fields {
- var f matcherFunc
- switch {
- case strings.HasPrefix(field, "^"):
- prefix := field[1:]
- f = smartCase(prefix, func(chunks []string) (int, float64) {
- s := strings.Join(chunks, "")
- if strings.HasPrefix(s, prefix) {
- return 0, 1
- }
- return -1, 0
- })
- case strings.HasPrefix(field, "'"):
- exact := field[1:]
- f = smartCase(exact, matchExact(exact))
- case strings.HasSuffix(field, "$"):
- suffix := field[0 : len(field)-1]
- f = smartCase(suffix, func(chunks []string) (int, float64) {
- s := strings.Join(chunks, "")
- if strings.HasSuffix(s, suffix) {
- return len(s) - len(suffix), 1
- }
- return -1, 0
- })
- default:
- f = newMatcher(field)
- }
- funcs = append(funcs, f)
- }
- if len(funcs) == 1 {
- return funcs[0]
- }
- return comboMatcher(funcs).match
-}
-
-func matchExact(exact string) matcherFunc {
- return func(chunks []string) (int, float64) {
- s := strings.Join(chunks, "")
- if idx := strings.LastIndex(s, exact); idx >= 0 {
- return idx, 1
- }
- return -1, 0
- }
-}
-
-// smartCase returns a matcherFunc that is case-sensitive if q contains any
-// upper-case characters, and case-insensitive otherwise.
-func smartCase(q string, m matcherFunc) matcherFunc {
- insensitive := strings.ToLower(q) == q
- wrapper := []string{""}
- return func(chunks []string) (int, float64) {
- s := strings.Join(chunks, "")
- if insensitive {
- s = strings.ToLower(s)
- }
- wrapper[0] = s
- return m(wrapper)
- }
-}
-
-type comboMatcher []matcherFunc
-
-func (c comboMatcher) match(chunks []string) (int, float64) {
- score := 1.0
- first := 0
- for _, f := range c {
- idx, s := f(chunks)
- if idx < first {
- first = idx
- }
- score *= s
- }
- return first, score
-}
-
-func (sc *symbolCollector) walk(ctx context.Context, views []View) ([]protocol.SymbolInformation, error) {
- // Use the root view URIs for determining (lexically) whether a uri is in any
- // open workspace.
- var roots []string
- for _, v := range views {
- roots = append(roots, strings.TrimRight(string(v.Folder()), "/"))
- }
-
- results := make(chan *symbolStore)
- matcherlen := len(sc.matchers)
- files := make(map[span.URI]symbolFile)
-
- for _, v := range views {
- snapshot, release := v.Snapshot(ctx)
- defer release()
- psyms, err := snapshot.Symbols(ctx)
- if err != nil {
- return nil, err
- }
-
- filters := v.Options().DirectoryFilters
- folder := filepath.ToSlash(v.Folder().Filename())
- for uri, syms := range psyms {
- norm := filepath.ToSlash(uri.Filename())
- nm := strings.TrimPrefix(norm, folder)
- if FiltersDisallow(nm, filters) {
- continue
- }
- // Only scan each file once.
- if _, ok := files[uri]; ok {
- continue
- }
- mds, err := snapshot.MetadataForFile(ctx, uri)
- if err != nil {
- event.Error(ctx, fmt.Sprintf("missing metadata for %q", uri), err)
- continue
- }
- if len(mds) == 0 {
- // TODO: should use the bug reporting API
- continue
- }
- files[uri] = symbolFile{uri, mds[0], syms}
- }
- }
-
- var work []symbolFile
- for _, f := range files {
- work = append(work, f)
- }
-
- // Compute matches concurrently. Each symbolWorker has its own symbolStore,
- // which we merge at the end.
- for i, matcher := range sc.matchers {
- go func(i int, matcher matcherFunc) {
- w := &symbolWorker{
- symbolizer: sc.symbolizer,
- matcher: matcher,
- ss: &symbolStore{},
- roots: roots,
- }
- for j := i; j < len(work); j += matcherlen {
- w.matchFile(work[j])
- }
- results <- w.ss
- }(i, matcher)
- }
-
- for i := 0; i < matcherlen; i++ {
- ss := <-results
- for _, si := range ss.res {
- sc.store(si)
- }
- }
- return sc.results(), nil
-}
-
-// FilterDisallow is code from the body of cache.pathExcludedByFilter in cache/view.go
-// Exporting and using that function would cause an import cycle.
-// Moving it here and exporting it would leave behind view_test.go.
-// (This code is exported and used in the body of cache.pathExcludedByFilter)
-func FiltersDisallow(path string, filters []string) bool {
- path = strings.TrimPrefix(path, "/")
- var excluded bool
- for _, filter := range filters {
- op, prefix := filter[0], filter[1:]
- // Non-empty prefixes have to be precise directory matches.
- if prefix != "" {
- prefix = prefix + "/"
- path = path + "/"
- }
- if !strings.HasPrefix(path, prefix) {
- continue
- }
- excluded = op == '-'
- }
- return excluded
-}
-
-// symbolFile holds symbol information for a single file.
-type symbolFile struct {
- uri span.URI
- md Metadata
- syms []Symbol
-}
-
-// symbolWorker matches symbols and captures the highest scoring results.
-type symbolWorker struct {
- symbolizer symbolizer
- matcher matcherFunc
- ss *symbolStore
- roots []string
-}
-
-func (w *symbolWorker) matchFile(i symbolFile) {
- for _, sym := range i.syms {
- symbolParts, score := w.symbolizer(sym.Name, i.md, w.matcher)
-
- // Check if the score is too low before applying any downranking.
- if w.ss.tooLow(score) {
- continue
- }
-
- // Factors to apply to the match score for the purpose of downranking
- // results.
- //
- // These numbers were crudely calibrated based on trial-and-error using a
- // small number of sample queries. Adjust as necessary.
- //
- // All factors are multiplicative, meaning if more than one applies they are
- // multiplied together.
- const (
- // nonWorkspaceFactor is applied to symbols outside of any active
- // workspace. Developers are less likely to want to jump to code that they
- // are not actively working on.
- nonWorkspaceFactor = 0.5
- // nonWorkspaceUnexportedFactor is applied to unexported symbols outside of
- // any active workspace. Since one wouldn't usually jump to unexported
- // symbols to understand a package API, they are particularly irrelevant.
- nonWorkspaceUnexportedFactor = 0.5
- // every field or method nesting level to access the field decreases
- // the score by a factor of 1.0 - depth*depthFactor, up to a depth of
- // 3.
- depthFactor = 0.2
- )
-
- startWord := true
- exported := true
- depth := 0.0
- for _, r := range sym.Name {
- if startWord && !unicode.IsUpper(r) {
- exported = false
- }
- if r == '.' {
- startWord = true
- depth++
- } else {
- startWord = false
- }
- }
-
- inWorkspace := false
- for _, root := range w.roots {
- if strings.HasPrefix(string(i.uri), root) {
- inWorkspace = true
- break
- }
- }
-
- // Apply downranking based on workspace position.
- if !inWorkspace {
- score *= nonWorkspaceFactor
- if !exported {
- score *= nonWorkspaceUnexportedFactor
- }
- }
-
- // Apply downranking based on symbol depth.
- if depth > 3 {
- depth = 3
- }
- score *= 1.0 - depth*depthFactor
-
- if w.ss.tooLow(score) {
- continue
- }
-
- si := symbolInformation{
- score: score,
- symbol: strings.Join(symbolParts, ""),
- kind: sym.Kind,
- uri: i.uri,
- rng: sym.Range,
- container: i.md.PackagePath(),
- }
- w.ss.store(si)
- }
-}
-
-type symbolStore struct {
- res [maxSymbols]symbolInformation
-}
-
-// store inserts si into the sorted results, if si has a high enough score.
-func (sc *symbolStore) store(si symbolInformation) {
- if sc.tooLow(si.score) {
- return
- }
- insertAt := sort.Search(len(sc.res), func(i int) bool {
- // Sort by score, then symbol length, and finally lexically.
- if sc.res[i].score != si.score {
- return sc.res[i].score < si.score
- }
- if len(sc.res[i].symbol) != len(si.symbol) {
- return len(sc.res[i].symbol) > len(si.symbol)
- }
- return sc.res[i].symbol > si.symbol
- })
- if insertAt < len(sc.res)-1 {
- copy(sc.res[insertAt+1:], sc.res[insertAt:len(sc.res)-1])
- }
- sc.res[insertAt] = si
-}
-
-func (sc *symbolStore) tooLow(score float64) bool {
- return score <= sc.res[len(sc.res)-1].score
-}
-
-func (sc *symbolStore) results() []protocol.SymbolInformation {
- var res []protocol.SymbolInformation
- for _, si := range sc.res {
- if si.score <= 0 {
- return res
- }
- res = append(res, si.asProtocolSymbolInformation())
- }
- return res
-}
-
-func typeToKind(typ types.Type) protocol.SymbolKind {
- switch typ := typ.Underlying().(type) {
- case *types.Interface:
- return protocol.Interface
- case *types.Struct:
- return protocol.Struct
- case *types.Signature:
- if typ.Recv() != nil {
- return protocol.Method
- }
- return protocol.Function
- case *types.Named:
- return typeToKind(typ.Underlying())
- case *types.Basic:
- i := typ.Info()
- switch {
- case i&types.IsNumeric != 0:
- return protocol.Number
- case i&types.IsBoolean != 0:
- return protocol.Boolean
- case i&types.IsString != 0:
- return protocol.String
- }
- }
- return protocol.Variable
-}
-
-// symbolInformation is a cut-down version of protocol.SymbolInformation that
-// allows struct values of this type to be used as map keys.
-type symbolInformation struct {
- score float64
- symbol string
- container string
- kind protocol.SymbolKind
- uri span.URI
- rng protocol.Range
-}
-
-// asProtocolSymbolInformation converts s to a protocol.SymbolInformation value.
-//
-// TODO: work out how to handle tags if/when they are needed.
-func (s symbolInformation) asProtocolSymbolInformation() protocol.SymbolInformation {
- return protocol.SymbolInformation{
- Name: s.symbol,
- Kind: s.kind,
- Location: protocol.Location{
- URI: protocol.URIFromSpanURI(s.uri),
- Range: s.rng,
- },
- ContainerName: s.container,
- }
-}
diff --git a/internal/lsp/source/workspace_symbol_test.go b/internal/lsp/source/workspace_symbol_test.go
deleted file mode 100644
index 314ef785d..000000000
--- a/internal/lsp/source/workspace_symbol_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package source
-
-import (
- "testing"
-)
-
-func TestParseQuery(t *testing.T) {
- tests := []struct {
- query, s string
- wantMatch bool
- }{
- {"", "anything", false},
- {"any", "anything", true},
- {"any$", "anything", false},
- {"ing$", "anything", true},
- {"ing$", "anythinG", true},
- {"inG$", "anything", false},
- {"^any", "anything", true},
- {"^any", "Anything", true},
- {"^Any", "anything", false},
- {"at", "anything", true},
- // TODO: this appears to be a bug in the fuzzy matching algorithm. 'At'
- // should cause a case-sensitive match.
- // {"At", "anything", false},
- {"At", "Anything", true},
- {"'yth", "Anything", true},
- {"'yti", "Anything", false},
- {"'any 'thing", "Anything", true},
- {"anythn nythg", "Anything", true},
- {"ntx", "Anything", false},
- {"anythn", "anything", true},
- {"ing", "anything", true},
- {"anythn nythgx", "anything", false},
- }
-
- for _, test := range tests {
- matcher := parseQuery(test.query, newFuzzyMatcher)
- if _, score := matcher([]string{test.s}); score > 0 != test.wantMatch {
- t.Errorf("parseQuery(%q) match for %q: %.2g, want match: %t", test.query, test.s, score, test.wantMatch)
- }
- }
-}
diff --git a/internal/lsp/symbols.go b/internal/lsp/symbols.go
deleted file mode 100644
index f04e4572d..000000000
--- a/internal/lsp/symbols.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/template"
-)
-
-func (s *Server) documentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) ([]interface{}, error) {
- ctx, done := event.Start(ctx, "lsp.Server.documentSymbol")
- defer done()
-
- snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.UnknownKind)
- defer release()
- if !ok {
- return []interface{}{}, err
- }
- var docSymbols []protocol.DocumentSymbol
- if snapshot.View().FileKind(fh) == source.Tmpl {
- docSymbols, err = template.DocumentSymbols(snapshot, fh)
- } else {
- docSymbols, err = source.DocumentSymbols(ctx, snapshot, fh)
- }
- if err != nil {
- event.Error(ctx, "DocumentSymbols failed", err, tag.URI.Of(fh.URI()))
- return []interface{}{}, nil
- }
- // Convert the symbols to an interface array.
- // TODO: Remove this once the lsp deprecates SymbolInformation.
- symbols := make([]interface{}, len(docSymbols))
- for i, s := range docSymbols {
- if snapshot.View().Options().HierarchicalDocumentSymbolSupport {
- symbols[i] = s
- continue
- }
- // If the client does not support hierarchical document symbols, then
- // we need to be backwards compatible for now and return SymbolInformation.
- symbols[i] = protocol.SymbolInformation{
- Name: s.Name,
- Kind: s.Kind,
- Deprecated: s.Deprecated,
- Location: protocol.Location{
- URI: params.TextDocument.URI,
- Range: s.Range,
- },
- }
- }
- return symbols, nil
-}
diff --git a/internal/lsp/template/completion.go b/internal/lsp/template/completion.go
deleted file mode 100644
index 13dbdf1e5..000000000
--- a/internal/lsp/template/completion.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/scanner"
- "go/token"
- "strings"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// information needed for completion
-type completer struct {
- p *Parsed
- pos protocol.Position
- offset int // offset of the start of the Token
- ctx protocol.CompletionContext
- syms map[string]symbol
-}
-
-func Completion(ctx context.Context, snapshot source.Snapshot, fh source.VersionedFileHandle, pos protocol.Position, context protocol.CompletionContext) (*protocol.CompletionList, error) {
- all := New(snapshot.Templates())
- var start int // the beginning of the Token (completed or not)
- syms := make(map[string]symbol)
- var p *Parsed
- for fn, fc := range all.files {
- // collect symbols from all template files
- filterSyms(syms, fc.symbols)
- if fn.Filename() != fh.URI().Filename() {
- continue
- }
- if start = inTemplate(fc, pos); start == -1 {
- return nil, nil
- }
- p = fc
- }
- if p == nil {
- // this cannot happen unless the search missed a template file
- return nil, fmt.Errorf("%s not found", fh.FileIdentity().URI.Filename())
- }
- c := completer{
- p: p,
- pos: pos,
- offset: start + len(Left),
- ctx: context,
- syms: syms,
- }
- return c.complete()
-}
-
-func filterSyms(syms map[string]symbol, ns []symbol) {
- for _, xsym := range ns {
- switch xsym.kind {
- case protocol.Method, protocol.Package, protocol.Boolean, protocol.Namespace,
- protocol.Function:
- syms[xsym.name] = xsym // we don't care which symbol we get
- case protocol.Variable:
- if xsym.name != "dot" {
- syms[xsym.name] = xsym
- }
- case protocol.Constant:
- if xsym.name == "nil" {
- syms[xsym.name] = xsym
- }
- }
- }
-}
-
-// return the starting position of the enclosing token, or -1 if none
-func inTemplate(fc *Parsed, pos protocol.Position) int {
- // pos is the pos-th character. if the cursor is at the beginning
- // of the file, pos is 0. That is, we've only seen characters before pos
- // 1. pos might be in a Token, return tk.Start
- // 2. pos might be after an elided but before a Token, return elided
- // 3. return -1 for false
- offset := fc.FromPosition(pos)
- // this could be a binary search, as the tokens are ordered
- for _, tk := range fc.tokens {
- if tk.Start < offset && offset <= tk.End {
- return tk.Start
- }
- }
- for _, x := range fc.elided {
- if x > offset {
- // fc.elided is sorted
- break
- }
- // If the interval [x,offset] does not contain Left or Right
- // then provide completions. (do we need the test for Right?)
- if !bytes.Contains(fc.buf[x:offset], []byte(Left)) && !bytes.Contains(fc.buf[x:offset], []byte(Right)) {
- return x
- }
- }
- return -1
-}
-
-var (
- keywords = []string{"if", "with", "else", "block", "range", "template", "end}}", "end"}
- globals = []string{"and", "call", "html", "index", "slice", "js", "len", "not", "or",
- "urlquery", "printf", "println", "print", "eq", "ne", "le", "lt", "ge", "gt"}
-)
-
-// find the completions. start is the offset of either the Token enclosing pos, or where
-// the incomplete token starts.
-// The error return is always nil.
-func (c *completer) complete() (*protocol.CompletionList, error) {
- ans := &protocol.CompletionList{IsIncomplete: true, Items: []protocol.CompletionItem{}}
- start := c.p.FromPosition(c.pos)
- sofar := c.p.buf[c.offset:start]
- if len(sofar) == 0 || sofar[len(sofar)-1] == ' ' || sofar[len(sofar)-1] == '\t' {
- return ans, nil
- }
- // sofar could be parsed by either c.analyzer() or scan(). The latter is precise
- // and slower, but fast enough
- words := scan(sofar)
- // 1. if pattern starts $, show variables
- // 2. if pattern starts ., show methods (and . by itself?)
- // 3. if len(words) == 1, show firstWords (but if it were a |, show functions and globals)
- // 4. ...? (parenthetical expressions, arguments, ...) (packages, namespaces, nil?)
- if len(words) == 0 {
- return nil, nil // if this happens, why were we called?
- }
- pattern := string(words[len(words)-1])
- if pattern[0] == '$' {
- // should we also return a raw "$"?
- for _, s := range c.syms {
- if s.kind == protocol.Variable && weakMatch(s.name, pattern) > 0 {
- ans.Items = append(ans.Items, protocol.CompletionItem{
- Label: s.name,
- Kind: protocol.VariableCompletion,
- Detail: "Variable",
- })
- }
- }
- return ans, nil
- }
- if pattern[0] == '.' {
- for _, s := range c.syms {
- if s.kind == protocol.Method && weakMatch("."+s.name, pattern) > 0 {
- ans.Items = append(ans.Items, protocol.CompletionItem{
- Label: s.name,
- Kind: protocol.MethodCompletion,
- Detail: "Method/member",
- })
- }
- }
- return ans, nil
- }
- // could we get completion attempts in strings or numbers, and if so, do we care?
- // globals
- for _, kw := range globals {
- if weakMatch(kw, string(pattern)) != 0 {
- ans.Items = append(ans.Items, protocol.CompletionItem{
- Label: kw,
- Kind: protocol.KeywordCompletion,
- Detail: "Function",
- })
- }
- }
- // and functions
- for _, s := range c.syms {
- if s.kind == protocol.Function && weakMatch(s.name, pattern) != 0 {
- ans.Items = append(ans.Items, protocol.CompletionItem{
- Label: s.name,
- Kind: protocol.FunctionCompletion,
- Detail: "Function",
- })
- }
- }
- // keywords if we're at the beginning
- if len(words) <= 1 || len(words[len(words)-2]) == 1 && words[len(words)-2][0] == '|' {
- for _, kw := range keywords {
- if weakMatch(kw, string(pattern)) != 0 {
- ans.Items = append(ans.Items, protocol.CompletionItem{
- Label: kw,
- Kind: protocol.KeywordCompletion,
- Detail: "keyword",
- })
- }
- }
- }
- return ans, nil
-}
-
-// someday think about comments, strings, backslashes, etc
-// this would repeat some of the template parsing, but because the user is typing
-// there may be no parse tree here.
-// (go/scanner will report 2 tokens for $a, as $ is not a legal go identifier character)
-// (go/scanner is about 2.7 times more expensive)
-func (c *completer) analyze(buf []byte) [][]byte {
- // we want to split on whitespace and before dots
- var working []byte
- var ans [][]byte
- for _, ch := range buf {
- if ch == '.' && len(working) > 0 {
- ans = append(ans, working)
- working = []byte{'.'}
- continue
- }
- if ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' {
- if len(working) > 0 {
- ans = append(ans, working)
- working = []byte{}
- continue
- }
- }
- working = append(working, ch)
- }
- if len(working) > 0 {
- ans = append(ans, working)
- }
- ch := buf[len(buf)-1]
- if ch == ' ' || ch == '\t' {
- // avoid completing on whitespace
- ans = append(ans, []byte{ch})
- }
- return ans
-}
-
-// version of c.analyze that uses go/scanner.
-func scan(buf []byte) []string {
- fset := token.NewFileSet()
- fp := fset.AddFile("", -1, len(buf))
- var sc scanner.Scanner
- sc.Init(fp, buf, func(pos token.Position, msg string) {}, scanner.ScanComments)
- ans := make([]string, 0, 10) // preallocating gives a measurable savings
- for {
- _, tok, lit := sc.Scan() // tok is an int
- if tok == token.EOF {
- break // done
- } else if tok == token.SEMICOLON && lit == "\n" {
- continue // don't care, but probably can't happen
- } else if tok == token.PERIOD {
- ans = append(ans, ".") // lit is empty
- } else if tok == token.IDENT && len(ans) > 0 && ans[len(ans)-1] == "." {
- ans[len(ans)-1] = "." + lit
- } else if tok == token.IDENT && len(ans) > 0 && ans[len(ans)-1] == "$" {
- ans[len(ans)-1] = "$" + lit
- } else if lit != "" {
- ans = append(ans, lit)
- }
- }
- return ans
-}
-
-// pattern is what the user has typed
-func weakMatch(choice, pattern string) float64 {
- lower := strings.ToLower(choice)
- // for now, use only lower-case everywhere
- pattern = strings.ToLower(pattern)
- // The first char has to match
- if pattern[0] != lower[0] {
- return 0
- }
- // If they start with ., then the second char has to match
- from := 1
- if pattern[0] == '.' {
- if len(pattern) < 2 {
- return 1 // pattern just a ., so it matches
- }
- if pattern[1] != lower[1] {
- return 0
- }
- from = 2
- }
- // check that all the characters of pattern occur as a subsequence of choice
- i, j := from, from
- for ; i < len(lower) && j < len(pattern); j++ {
- if pattern[j] == lower[i] {
- i++
- if i >= len(lower) {
- return 0
- }
- }
- }
- if j < len(pattern) {
- return 0
- }
- return 1
-}
-
-// for debug printing
-func strContext(c protocol.CompletionContext) string {
- switch c.TriggerKind {
- case protocol.Invoked:
- return "invoked"
- case protocol.TriggerCharacter:
- return fmt.Sprintf("triggered(%s)", c.TriggerCharacter)
- case protocol.TriggerForIncompleteCompletions:
- // gopls doesn't seem to handle these explicitly anywhere
- return "incomplete"
- }
- return fmt.Sprintf("?%v", c)
-}
diff --git a/internal/lsp/template/completion_test.go b/internal/lsp/template/completion_test.go
deleted file mode 100644
index bfcdb5372..000000000
--- a/internal/lsp/template/completion_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "log"
- "sort"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
-)
-
-func init() {
- log.SetFlags(log.Lshortfile)
-}
-
-type tparse struct {
- marked string // ^ shows where to ask for completions. (The user just typed the following character.)
- wanted []string // expected completions
-}
-
-// Test completions in templates that parse enough (if completion needs symbols)
-// Seen characters up to the ^
-func TestParsed(t *testing.T) {
- var tests = []tparse{
- {"{{x}}{{12. xx^", nil}, // https://github.com/golang/go/issues/50430
- {`<table class="chroma" data-new-comment-url="{{if $.PageIsPullFiles}}{{$.Issue.HTMLURL}}/files/reviews/new_comment{{else}}{{$.CommitHTML}}/new_comment^{{end}}">`, nil},
- {"{{i^f}}", []string{"index", "if"}},
- {"{{if .}}{{e^ {{end}}", []string{"eq", "end}}", "else", "end"}},
- {"{{foo}}{{f^", []string{"foo"}},
- {"{{$^}}", []string{"$"}},
- {"{{$x:=4}}{{$^", []string{"$x"}},
- {"{{$x:=4}}{{$ ^ ", []string{}},
- {"{{len .Modified}}{{.^Mo", []string{"Modified"}},
- {"{{len .Modified}}{{.mf^", []string{"Modified"}},
- {"{{$^ }}", []string{"$"}},
- {"{{$a =3}}{{$^", []string{"$a"}},
- // .two is not good here: fix someday
- {`{{.Modified}}{{.^{{if $.one.two}}xxx{{end}}`, []string{"Modified", "one", "two"}},
- {`{{.Modified}}{{.o^{{if $.one.two}}xxx{{end}}`, []string{"one"}},
- {"{{.Modiifed}}{{.one.t^{{if $.one.two}}xxx{{end}}", []string{"two"}},
- {`{{block "foo" .}}{{i^`, []string{"index", "if"}},
- {"{{in^{{Internal}}", []string{"index", "Internal", "if"}},
- // simple number has no completions
- {"{{4^e", []string{}},
- // simple string has no completions
- {"{{`e^", []string{}},
- {"{{`No i^", []string{}}, // example of why go/scanner is used
- {"{{xavier}}{{12. x^", []string{"xavier"}},
- }
- for _, tx := range tests {
- c := testCompleter(t, tx)
- var v []string
- if c != nil {
- ans, _ := c.complete()
- for _, a := range ans.Items {
- v = append(v, a.Label)
- }
- }
- if len(v) != len(tx.wanted) {
- t.Errorf("%q: got %q, wanted %q %d,%d", tx.marked, v, tx.wanted, len(v), len(tx.wanted))
- continue
- }
- sort.Strings(tx.wanted)
- sort.Strings(v)
- for i := 0; i < len(v); i++ {
- if tx.wanted[i] != v[i] {
- t.Errorf("%q at %d: got %v, wanted %v", tx.marked, i, v, tx.wanted)
- break
- }
- }
- }
-}
-
-func testCompleter(t *testing.T, tx tparse) *completer {
- t.Helper()
- // seen chars up to ^
- col := strings.Index(tx.marked, "^")
- buf := strings.Replace(tx.marked, "^", "", 1)
- p := parseBuffer([]byte(buf))
- pos := protocol.Position{Line: 0, Character: uint32(col)}
- if p.ParseErr != nil {
- log.Printf("%q: %v", tx.marked, p.ParseErr)
- }
- offset := inTemplate(p, pos)
- if offset == -1 {
- return nil
- }
- syms := make(map[string]symbol)
- filterSyms(syms, p.symbols)
- c := &completer{
- p: p,
- pos: protocol.Position{Line: 0, Character: uint32(col)},
- offset: offset + len(Left),
- ctx: protocol.CompletionContext{TriggerKind: protocol.Invoked},
- syms: syms,
- }
- return c
-}
diff --git a/internal/lsp/template/highlight.go b/internal/lsp/template/highlight.go
deleted file mode 100644
index a45abaf50..000000000
--- a/internal/lsp/template/highlight.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "context"
- "fmt"
- "regexp"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func Highlight(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, loc protocol.Position) ([]protocol.DocumentHighlight, error) {
- buf, err := fh.Read()
- if err != nil {
- return nil, err
- }
- p := parseBuffer(buf)
- pos := p.FromPosition(loc)
- var ans []protocol.DocumentHighlight
- if p.ParseErr == nil {
- for _, s := range p.symbols {
- if s.start <= pos && pos < s.start+s.length {
- return markSymbols(p, s)
- }
- }
- }
- // these tokens exist whether or not there was a parse error
- // (symbols require a successful parse)
- for _, tok := range p.tokens {
- if tok.Start <= pos && pos < tok.End {
- wordAt := findWordAt(p, pos)
- if len(wordAt) > 0 {
- return markWordInToken(p, wordAt)
- }
- }
- }
- // find the 'word' at pos, etc: someday
- // until then we get the default action, which doesn't respect word boundaries
- return ans, nil
-}
-
-func markSymbols(p *Parsed, sym symbol) ([]protocol.DocumentHighlight, error) {
- var ans []protocol.DocumentHighlight
- for _, s := range p.symbols {
- if s.name == sym.name {
- kind := protocol.Read
- if s.vardef {
- kind = protocol.Write
- }
- ans = append(ans, protocol.DocumentHighlight{
- Range: p.Range(s.start, s.length),
- Kind: kind,
- })
- }
- }
- return ans, nil
-}
-
-// A token is {{...}}, and this marks words in the token that equal the give word
-func markWordInToken(p *Parsed, wordAt string) ([]protocol.DocumentHighlight, error) {
- var ans []protocol.DocumentHighlight
- pat, err := regexp.Compile(fmt.Sprintf(`\b%s\b`, wordAt))
- if err != nil {
- return nil, fmt.Errorf("%q: unmatchable word (%v)", wordAt, err)
- }
- for _, tok := range p.tokens {
- got := pat.FindAllIndex(p.buf[tok.Start:tok.End], -1)
- for i := 0; i < len(got); i++ {
- ans = append(ans, protocol.DocumentHighlight{
- Range: p.Range(got[i][0], got[i][1]-got[i][0]),
- Kind: protocol.Text,
- })
- }
- }
- return ans, nil
-}
-
-var wordRe = regexp.MustCompile(`[$]?\w+$`)
-var moreRe = regexp.MustCompile(`^[$]?\w+`)
-
-// findWordAt finds the word the cursor is in (meaning in or just before)
-func findWordAt(p *Parsed, pos int) string {
- if pos >= len(p.buf) {
- return "" // can't happen, as we are called with pos < tok.End
- }
- after := moreRe.Find(p.buf[pos:])
- if len(after) == 0 {
- return "" // end of the word
- }
- got := wordRe.Find(p.buf[:pos+len(after)])
- return string(got)
-}
diff --git a/internal/lsp/template/implementations.go b/internal/lsp/template/implementations.go
deleted file mode 100644
index 1de988871..000000000
--- a/internal/lsp/template/implementations.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "context"
- "fmt"
- "regexp"
- "strconv"
- "time"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-// line number (1-based) and message
-var errRe = regexp.MustCompile(`template.*:(\d+): (.*)`)
-
-// Diagnose returns parse errors. There is only one.
-// The errors are not always helpful. For instance { {end}}
-// will likely point to the end of the file.
-func Diagnose(f source.VersionedFileHandle) []*source.Diagnostic {
- // no need for skipTemplate check, as Diagnose is called on the
- // snapshot's template files
- buf, err := f.Read()
- if err != nil {
- // Is a Diagnostic with no Range useful? event.Error also?
- msg := fmt.Sprintf("failed to read %s (%v)", f.URI().Filename(), err)
- d := source.Diagnostic{Message: msg, Severity: protocol.SeverityError, URI: f.URI(),
- Source: source.TemplateError}
- return []*source.Diagnostic{&d}
- }
- p := parseBuffer(buf)
- if p.ParseErr == nil {
- return nil
- }
- unknownError := func(msg string) []*source.Diagnostic {
- s := fmt.Sprintf("malformed template error %q: %s", p.ParseErr.Error(), msg)
- d := source.Diagnostic{
- Message: s, Severity: protocol.SeverityError, Range: p.Range(p.nls[0], 1),
- URI: f.URI(), Source: source.TemplateError}
- return []*source.Diagnostic{&d}
- }
- // errors look like `template: :40: unexpected "}" in operand`
- // so the string needs to be parsed
- matches := errRe.FindStringSubmatch(p.ParseErr.Error())
- if len(matches) != 3 {
- msg := fmt.Sprintf("expected 3 matches, got %d (%v)", len(matches), matches)
- return unknownError(msg)
- }
- lineno, err := strconv.Atoi(matches[1])
- if err != nil {
- msg := fmt.Sprintf("couldn't convert %q to int, %v", matches[1], err)
- return unknownError(msg)
- }
- msg := matches[2]
- d := source.Diagnostic{Message: msg, Severity: protocol.SeverityError,
- Source: source.TemplateError}
- start := p.nls[lineno-1]
- if lineno < len(p.nls) {
- size := p.nls[lineno] - start
- d.Range = p.Range(start, size)
- } else {
- d.Range = p.Range(start, 1)
- }
- return []*source.Diagnostic{&d}
-}
-
-// Definition finds the definitions of the symbol at loc. It
-// does not understand scoping (if any) in templates. This code is
-// for defintions, type definitions, and implementations.
-// Results only for variables and templates.
-func Definition(snapshot source.Snapshot, fh source.VersionedFileHandle, loc protocol.Position) ([]protocol.Location, error) {
- x, _, err := symAtPosition(fh, loc)
- if err != nil {
- return nil, err
- }
- sym := x.name
- ans := []protocol.Location{}
- // PJW: this is probably a pattern to abstract
- a := New(snapshot.Templates())
- for k, p := range a.files {
- for _, s := range p.symbols {
- if !s.vardef || s.name != sym {
- continue
- }
- ans = append(ans, protocol.Location{URI: protocol.DocumentURI(k), Range: p.Range(s.start, s.length)})
- }
- }
- return ans, nil
-}
-
-func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) {
- sym, p, err := symAtPosition(fh, position)
- if sym == nil || err != nil {
- return nil, err
- }
- ans := protocol.Hover{Range: p.Range(sym.start, sym.length), Contents: protocol.MarkupContent{Kind: protocol.Markdown}}
- switch sym.kind {
- case protocol.Function:
- ans.Contents.Value = fmt.Sprintf("function: %s", sym.name)
- case protocol.Variable:
- ans.Contents.Value = fmt.Sprintf("variable: %s", sym.name)
- case protocol.Constant:
- ans.Contents.Value = fmt.Sprintf("constant %s", sym.name)
- case protocol.Method: // field or method
- ans.Contents.Value = fmt.Sprintf("%s: field or method", sym.name)
- case protocol.Package: // template use, template def (PJW: do we want two?)
- ans.Contents.Value = fmt.Sprintf("template %s\n(add definition)", sym.name)
- case protocol.Namespace:
- ans.Contents.Value = fmt.Sprintf("template %s defined", sym.name)
- case protocol.Number:
- ans.Contents.Value = "number"
- case protocol.String:
- ans.Contents.Value = "string"
- case protocol.Boolean:
- ans.Contents.Value = "boolean"
- default:
- ans.Contents.Value = fmt.Sprintf("oops, sym=%#v", sym)
- }
- return &ans, nil
-}
-
-func References(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, params *protocol.ReferenceParams) ([]protocol.Location, error) {
- sym, _, err := symAtPosition(fh, params.Position)
- if sym == nil || err != nil || sym.name == "" {
- return nil, err
- }
- ans := []protocol.Location{}
-
- a := New(snapshot.Templates())
- for k, p := range a.files {
- for _, s := range p.symbols {
- if s.name != sym.name {
- continue
- }
- if s.vardef && !params.Context.IncludeDeclaration {
- continue
- }
- ans = append(ans, protocol.Location{URI: protocol.DocumentURI(k), Range: p.Range(s.start, s.length)})
- }
- }
- // do these need to be sorted? (a.files is a map)
- return ans, nil
-}
-
-func SemanticTokens(ctx context.Context, snapshot source.Snapshot, spn span.URI, add func(line, start, len uint32), d func() []uint32) (*protocol.SemanticTokens, error) {
- fh, err := snapshot.GetFile(ctx, spn)
- if err != nil {
- return nil, err
- }
- buf, err := fh.Read()
- if err != nil {
- return nil, err
- }
- p := parseBuffer(buf)
-
- for _, t := range p.Tokens() {
- if t.Multiline {
- la, ca := p.LineCol(t.Start)
- lb, cb := p.LineCol(t.End)
- add(la, ca, p.RuneCount(la, ca, 0))
- for l := la + 1; l < lb; l++ {
- add(l, 0, p.RuneCount(l, 0, 0))
- }
- add(lb, 0, p.RuneCount(lb, 0, cb))
- continue
- }
- sz, err := p.TokenSize(t)
- if err != nil {
- return nil, err
- }
- line, col := p.LineCol(t.Start)
- add(line, col, uint32(sz))
- }
- data := d()
- ans := &protocol.SemanticTokens{
- Data: data,
- // for small cache, some day. for now, the LSP client ignores this
- // (that is, when the LSP client starts returning these, we can cache)
- ResultID: fmt.Sprintf("%v", time.Now()),
- }
- return ans, nil
-}
-
-// still need to do rename, etc
diff --git a/internal/lsp/template/parse.go b/internal/lsp/template/parse.go
deleted file mode 100644
index 194eeb3f5..000000000
--- a/internal/lsp/template/parse.go
+++ /dev/null
@@ -1,520 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package template contains code for dealing with templates
-package template
-
-// template files are small enough that the code reprocesses them each time
-// this may be a bad choice for projects with lots of template files.
-
-// This file contains the parsing code, some debugging printing, and
-// implementations for Diagnose, Definition, HJover, References
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "log"
- "regexp"
- "runtime"
- "sort"
- "text/template"
- "text/template/parse"
- "unicode/utf8"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-var (
- Left = []byte("{{")
- Right = []byte("}}")
-)
-
-type Parsed struct {
- buf []byte //contents
- lines [][]byte // needed?, other than for debugging?
- elided []int // offsets where Left was replaced by blanks
-
- // tokens are matched Left-Right pairs, computed before trying to parse
- tokens []Token
-
- // result of parsing
- named []*template.Template // the template and embedded templates
- ParseErr error
- symbols []symbol
- stack []parse.Node // used while computing symbols
-
- // for mapping from offsets in buf to LSP coordinates
- // See FromPosition() and LineCol()
- nls []int // offset of newlines before each line (nls[0]==-1)
- lastnl int // last line seen
- check int // used to decide whether to use lastnl or search through nls
- nonASCII bool // are there any non-ascii runes in buf?
-}
-
-// Token is a single {{...}}. More precisely, Left...Right
-type Token struct {
- Start, End int // offset from start of template
- Multiline bool
-}
-
-// All contains the Parse of all the template files
-type All struct {
- files map[span.URI]*Parsed
-}
-
-// New returns the Parses of the snapshot's tmpl files
-// (maybe cache these, but then avoiding import cycles needs code rearrangements)
-func New(tmpls map[span.URI]source.VersionedFileHandle) *All {
- all := make(map[span.URI]*Parsed)
- for k, v := range tmpls {
- buf, err := v.Read()
- if err != nil { // PJW: decide what to do with these errors
- log.Printf("failed to read %s (%v)", v.URI().Filename(), err)
- continue
- }
- all[k] = parseBuffer(buf)
- }
- return &All{files: all}
-}
-
-func parseBuffer(buf []byte) *Parsed {
- ans := &Parsed{
- buf: buf,
- check: -1,
- nls: []int{-1},
- }
- if len(buf) == 0 {
- return ans
- }
- // how to compute allAscii...
- for _, b := range buf {
- if b >= utf8.RuneSelf {
- ans.nonASCII = true
- break
- }
- }
- if buf[len(buf)-1] != '\n' {
- ans.buf = append(buf, '\n')
- }
- for i, p := range ans.buf {
- if p == '\n' {
- ans.nls = append(ans.nls, i)
- }
- }
- ans.setTokens() // ans.buf may be a new []byte
- ans.lines = bytes.Split(ans.buf, []byte{'\n'})
- t, err := template.New("").Parse(string(ans.buf))
- if err != nil {
- funcs := make(template.FuncMap)
- for t == nil && ans.ParseErr == nil {
- // in 1.17 it may be possible to avoid getting this error
- // template: :2: function "foo" not defined
- matches := parseErrR.FindStringSubmatch(err.Error())
- if len(matches) == 2 {
- // suppress the error by giving it a function with the right name
- funcs[matches[1]] = func() interface{} { return nil }
- t, err = template.New("").Funcs(funcs).Parse(string(ans.buf))
- continue
- }
- ans.ParseErr = err // unfixed error
- return ans
- }
- }
- ans.named = t.Templates()
- // set the symbols
- for _, t := range ans.named {
- ans.stack = append(ans.stack, t.Root)
- ans.findSymbols()
- if t.Name() != "" {
- // defining a template. The pos is just after {{define...}} (or {{block...}}?)
- at, sz := ans.FindLiteralBefore(int(t.Root.Pos))
- s := symbol{start: at, length: sz, name: t.Name(), kind: protocol.Namespace, vardef: true}
- ans.symbols = append(ans.symbols, s)
- }
- }
-
- sort.Slice(ans.symbols, func(i, j int) bool {
- left, right := ans.symbols[i], ans.symbols[j]
- if left.start != right.start {
- return left.start < right.start
- }
- if left.vardef != right.vardef {
- return left.vardef
- }
- return left.kind < right.kind
- })
- return ans
-}
-
-// FindLiteralBefore locates the first preceding string literal
-// returning its position and length in buf
-// or returns -1 if there is none. Assume "", rather than ``, for now
-func (p *Parsed) FindLiteralBefore(pos int) (int, int) {
- left, right := -1, -1
- for i := pos - 1; i >= 0; i-- {
- if p.buf[i] != '"' {
- continue
- }
- if right == -1 {
- right = i
- continue
- }
- left = i
- break
- }
- if left == -1 {
- return -1, 0
- }
- return left + 1, right - left - 1
-}
-
-var (
- parseErrR = regexp.MustCompile(`template:.*function "([^"]+)" not defined`)
-)
-
-func (p *Parsed) setTokens() {
- const (
- // InRaw and InString only occur inside an action (SeenLeft)
- Start = iota
- InRaw
- InString
- SeenLeft
- )
- state := Start
- var left, oldState int
- for n := 0; n < len(p.buf); n++ {
- c := p.buf[n]
- switch state {
- case InRaw:
- if c == '`' {
- state = oldState
- }
- case InString:
- if c == '"' && !isEscaped(p.buf[:n]) {
- state = oldState
- }
- case SeenLeft:
- if c == '`' {
- oldState = state // it's SeenLeft, but a little clearer this way
- state = InRaw
- continue
- }
- if c == '"' {
- oldState = state
- state = InString
- continue
- }
- if bytes.HasPrefix(p.buf[n:], Right) {
- right := n + len(Right)
- tok := Token{Start: left,
- End: right,
- Multiline: bytes.Contains(p.buf[left:right], []byte{'\n'}),
- }
- p.tokens = append(p.tokens, tok)
- state = Start
- }
- // If we see (unquoted) Left then the original left is probably the user
- // typing. Suppress the original left
- if bytes.HasPrefix(p.buf[n:], Left) {
- p.elideAt(left)
- left = n
- n += len(Left) - 1 // skip the rest
- }
- case Start:
- if bytes.HasPrefix(p.buf[n:], Left) {
- left = n
- state = SeenLeft
- n += len(Left) - 1 // skip the rest (avoids {{{ bug)
- }
- }
- }
- // this error occurs after typing {{ at the end of the file
- if state != Start {
- // Unclosed Left. remove the Left at left
- p.elideAt(left)
- }
-}
-
-func (p *Parsed) elideAt(left int) {
- if p.elided == nil {
- // p.buf is the same buffer that v.Read() returns, so copy it.
- // (otherwise the next time it's parsed, elided information is lost)
- b := make([]byte, len(p.buf))
- copy(b, p.buf)
- p.buf = b
- }
- for i := 0; i < len(Left); i++ {
- p.buf[left+i] = ' '
- }
- p.elided = append(p.elided, left)
-}
-
-// isEscaped reports whether the byte after buf is escaped
-func isEscaped(buf []byte) bool {
- backSlashes := 0
- for j := len(buf) - 1; j >= 0 && buf[j] == '\\'; j-- {
- backSlashes++
- }
- return backSlashes%2 == 1
-}
-
-func (p *Parsed) Tokens() []Token {
- return p.tokens
-}
-
-func (p *Parsed) utf16len(buf []byte) int {
- cnt := 0
- if !p.nonASCII {
- return len(buf)
- }
- // we need a utf16len(rune), but we don't have it
- for _, r := range string(buf) {
- cnt++
- if r >= 1<<16 {
- cnt++
- }
- }
- return cnt
-}
-
-func (p *Parsed) TokenSize(t Token) (int, error) {
- if t.Multiline {
- return -1, fmt.Errorf("TokenSize called with Multiline token %#v", t)
- }
- ans := p.utf16len(p.buf[t.Start:t.End])
- return ans, nil
-}
-
-// RuneCount counts runes in line l, from col s to e
-// (e==0 for end of line. called only for multiline tokens)
-func (p *Parsed) RuneCount(l, s, e uint32) uint32 {
- start := p.nls[l] + 1 + int(s)
- end := p.nls[l] + 1 + int(e)
- if e == 0 || end > p.nls[l+1] {
- end = p.nls[l+1]
- }
- return uint32(utf8.RuneCount(p.buf[start:end]))
-}
-
-// LineCol converts from a 0-based byte offset to 0-based line, col. col in runes
-func (p *Parsed) LineCol(x int) (uint32, uint32) {
- if x < p.check {
- p.lastnl = 0
- }
- p.check = x
- for i := p.lastnl; i < len(p.nls); i++ {
- if p.nls[i] <= x {
- continue
- }
- p.lastnl = i
- var count int
- if i > 0 && x == p.nls[i-1] { // \n
- count = 0
- } else {
- count = p.utf16len(p.buf[p.nls[i-1]+1 : x])
- }
- return uint32(i - 1), uint32(count)
- }
- if x == len(p.buf)-1 { // trailing \n
- return uint32(len(p.nls) - 1), 0
- }
- // shouldn't happen
- for i := 1; i < 4; i++ {
- _, f, l, ok := runtime.Caller(i)
- if !ok {
- break
- }
- log.Printf("%d: %s:%d", i, f, l)
- }
-
- msg := fmt.Errorf("LineCol off the end, %d of %d, nls=%v, %q", x, len(p.buf), p.nls, p.buf[x:])
- event.Error(context.Background(), "internal error", msg)
- return 0, 0
-}
-
-// Position produces a protocol.Position from an offset in the template
-func (p *Parsed) Position(pos int) protocol.Position {
- line, col := p.LineCol(pos)
- return protocol.Position{Line: line, Character: col}
-}
-
-func (p *Parsed) Range(x, length int) protocol.Range {
- line, col := p.LineCol(x)
- ans := protocol.Range{
- Start: protocol.Position{Line: line, Character: col},
- End: protocol.Position{Line: line, Character: col + uint32(length)},
- }
- return ans
-}
-
-// FromPosition translates a protocol.Position into an offset into the template
-func (p *Parsed) FromPosition(x protocol.Position) int {
- l, c := int(x.Line), int(x.Character)
- if l >= len(p.nls) || p.nls[l]+1 >= len(p.buf) {
- // paranoia to avoid panic. return the largest offset
- return len(p.buf)
- }
- line := p.buf[p.nls[l]+1:]
- cnt := 0
- for w := range string(line) {
- if cnt >= c {
- return w + p.nls[l] + 1
- }
- cnt++
- }
- // do we get here? NO
- pos := int(x.Character) + p.nls[int(x.Line)] + 1
- event.Error(context.Background(), "internal error", fmt.Errorf("surprise %#v", x))
- return pos
-}
-
-func symAtPosition(fh source.FileHandle, loc protocol.Position) (*symbol, *Parsed, error) {
- buf, err := fh.Read()
- if err != nil {
- return nil, nil, err
- }
- p := parseBuffer(buf)
- pos := p.FromPosition(loc)
- syms := p.SymsAtPos(pos)
- if len(syms) == 0 {
- return nil, p, fmt.Errorf("no symbol found")
- }
- if len(syms) > 1 {
- log.Printf("Hover: %d syms, not 1 %v", len(syms), syms)
- }
- sym := syms[0]
- return &sym, p, nil
-}
-
-func (p *Parsed) SymsAtPos(pos int) []symbol {
- ans := []symbol{}
- for _, s := range p.symbols {
- if s.start <= pos && pos < s.start+s.length {
- ans = append(ans, s)
- }
- }
- return ans
-}
-
-type wrNode struct {
- p *Parsed
- w io.Writer
-}
-
-// WriteNode is for debugging
-func (p *Parsed) WriteNode(w io.Writer, n parse.Node) {
- wr := wrNode{p: p, w: w}
- wr.writeNode(n, "")
-}
-
-func (wr wrNode) writeNode(n parse.Node, indent string) {
- if n == nil {
- return
- }
- at := func(pos parse.Pos) string {
- line, col := wr.p.LineCol(int(pos))
- return fmt.Sprintf("(%d)%v:%v", pos, line, col)
- }
- switch x := n.(type) {
- case *parse.ActionNode:
- fmt.Fprintf(wr.w, "%sActionNode at %s\n", indent, at(x.Pos))
- wr.writeNode(x.Pipe, indent+". ")
- case *parse.BoolNode:
- fmt.Fprintf(wr.w, "%sBoolNode at %s, %v\n", indent, at(x.Pos), x.True)
- case *parse.BranchNode:
- fmt.Fprintf(wr.w, "%sBranchNode at %s\n", indent, at(x.Pos))
- wr.writeNode(x.Pipe, indent+"Pipe. ")
- wr.writeNode(x.List, indent+"List. ")
- wr.writeNode(x.ElseList, indent+"Else. ")
- case *parse.ChainNode:
- fmt.Fprintf(wr.w, "%sChainNode at %s, %v\n", indent, at(x.Pos), x.Field)
- case *parse.CommandNode:
- fmt.Fprintf(wr.w, "%sCommandNode at %s, %d children\n", indent, at(x.Pos), len(x.Args))
- for _, a := range x.Args {
- wr.writeNode(a, indent+". ")
- }
- //case *parse.CommentNode: // 1.16
- case *parse.DotNode:
- fmt.Fprintf(wr.w, "%sDotNode at %s\n", indent, at(x.Pos))
- case *parse.FieldNode:
- fmt.Fprintf(wr.w, "%sFieldNode at %s, %v\n", indent, at(x.Pos), x.Ident)
- case *parse.IdentifierNode:
- fmt.Fprintf(wr.w, "%sIdentifierNode at %s, %v\n", indent, at(x.Pos), x.Ident)
- case *parse.IfNode:
- fmt.Fprintf(wr.w, "%sIfNode at %s\n", indent, at(x.Pos))
- wr.writeNode(&x.BranchNode, indent+". ")
- case *parse.ListNode:
- if x == nil {
- return // nil BranchNode.ElseList
- }
- fmt.Fprintf(wr.w, "%sListNode at %s, %d children\n", indent, at(x.Pos), len(x.Nodes))
- for _, n := range x.Nodes {
- wr.writeNode(n, indent+". ")
- }
- case *parse.NilNode:
- fmt.Fprintf(wr.w, "%sNilNode at %s\n", indent, at(x.Pos))
- case *parse.NumberNode:
- fmt.Fprintf(wr.w, "%sNumberNode at %s, %s\n", indent, at(x.Pos), x.Text)
- case *parse.PipeNode:
- if x == nil {
- return // {{template "xxx"}}
- }
- fmt.Fprintf(wr.w, "%sPipeNode at %s, %d vars, %d cmds, IsAssign:%v\n",
- indent, at(x.Pos), len(x.Decl), len(x.Cmds), x.IsAssign)
- for _, d := range x.Decl {
- wr.writeNode(d, indent+"Decl. ")
- }
- for _, c := range x.Cmds {
- wr.writeNode(c, indent+"Cmd. ")
- }
- case *parse.RangeNode:
- fmt.Fprintf(wr.w, "%sRangeNode at %s\n", indent, at(x.Pos))
- wr.writeNode(&x.BranchNode, indent+". ")
- case *parse.StringNode:
- fmt.Fprintf(wr.w, "%sStringNode at %s, %s\n", indent, at(x.Pos), x.Quoted)
- case *parse.TemplateNode:
- fmt.Fprintf(wr.w, "%sTemplateNode at %s, %s\n", indent, at(x.Pos), x.Name)
- wr.writeNode(x.Pipe, indent+". ")
- case *parse.TextNode:
- fmt.Fprintf(wr.w, "%sTextNode at %s, len %d\n", indent, at(x.Pos), len(x.Text))
- case *parse.VariableNode:
- fmt.Fprintf(wr.w, "%sVariableNode at %s, %v\n", indent, at(x.Pos), x.Ident)
- case *parse.WithNode:
- fmt.Fprintf(wr.w, "%sWithNode at %s\n", indent, at(x.Pos))
- wr.writeNode(&x.BranchNode, indent+". ")
- }
-}
-
-// short prints at most 40 bytes of node.String(), for debugging
-func short(n parse.Node) (ret string) {
- defer func() {
- if x := recover(); x != nil {
- // all because of typed nils
- ret = "NIL"
- }
- }()
- s := n.String()
- if len(s) > 40 {
- return s[:40] + "..."
- }
- return s
-}
-
-var kindNames = []string{"", "File", "Module", "Namespace", "Package", "Class", "Method", "Property",
- "Field", "Constructor", "Enum", "Interface", "Function", "Variable", "Constant", "String",
- "Number", "Boolean", "Array", "Object", "Key", "Null", "EnumMember", "Struct", "Event",
- "Operator", "TypeParameter"}
-
-func kindStr(k protocol.SymbolKind) string {
- n := int(k)
- if n < 1 || n >= len(kindNames) {
- return fmt.Sprintf("?SymbolKind %d?", n)
- }
- return kindNames[n]
-}
diff --git a/internal/lsp/template/symbols.go b/internal/lsp/template/symbols.go
deleted file mode 100644
index ce5a1e799..000000000
--- a/internal/lsp/template/symbols.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "context"
- "fmt"
- "text/template/parse"
- "unicode/utf8"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-// in local coordinates, to be translated to protocol.DocumentSymbol
-type symbol struct {
- start int // for sorting
- length int // in runes (unicode code points)
- name string
- kind protocol.SymbolKind
- vardef bool // is this a variable definition?
- // do we care about selection range, or children?
- // no children yet, and selection range is the same as range
-}
-
-func (s symbol) String() string {
- return fmt.Sprintf("{%d,%d,%s,%s,%v}", s.start, s.length, s.name, s.kind, s.vardef)
-}
-
-// for FieldNode or VariableNode (or ChainNode?)
-func (p *Parsed) fields(flds []string, x parse.Node) []symbol {
- ans := []symbol{}
- // guessing that there are no embedded blanks allowed. The doc is unclear
- lookfor := ""
- switch x.(type) {
- case *parse.FieldNode:
- for _, f := range flds {
- lookfor += "." + f // quadratic, but probably ok
- }
- case *parse.VariableNode:
- lookfor = flds[0]
- for i := 1; i < len(flds); i++ {
- lookfor += "." + flds[i]
- }
- case *parse.ChainNode: // PJW, what are these?
- for _, f := range flds {
- lookfor += "." + f // quadratic, but probably ok
- }
- default:
- // If these happen they will happen even if gopls is restarted
- // and the users does the same thing, so it is better not to panic.
- // context.Background() is used because we don't have access
- // to any other context. [we could, but it would be complicated]
- event.Log(context.Background(), fmt.Sprintf("%T unexpected in fields()", x))
- return nil
- }
- if len(lookfor) == 0 {
- event.Log(context.Background(), fmt.Sprintf("no strings in fields() %#v", x))
- return nil
- }
- startsAt := int(x.Position())
- ix := bytes.Index(p.buf[startsAt:], []byte(lookfor)) // HasPrefix? PJW?
- if ix < 0 || ix > len(lookfor) { // lookfor expected to be at start (or so)
- // probably golang.go/#43388, so back up
- startsAt -= len(flds[0]) + 1
- ix = bytes.Index(p.buf[startsAt:], []byte(lookfor)) // ix might be 1? PJW
- if ix < 0 {
- return ans
- }
- }
- at := ix + startsAt
- for _, f := range flds {
- at += 1 // .
- kind := protocol.Method
- if f[0] == '$' {
- kind = protocol.Variable
- }
- sym := symbol{name: f, kind: kind, start: at, length: utf8.RuneCount([]byte(f))}
- if kind == protocol.Variable && len(p.stack) > 1 {
- if pipe, ok := p.stack[len(p.stack)-2].(*parse.PipeNode); ok {
- for _, y := range pipe.Decl {
- if x == y {
- sym.vardef = true
- }
- }
- }
- }
- ans = append(ans, sym)
- at += len(f)
- }
- return ans
-}
-
-func (p *Parsed) findSymbols() {
- if len(p.stack) == 0 {
- return
- }
- n := p.stack[len(p.stack)-1]
- pop := func() {
- p.stack = p.stack[:len(p.stack)-1]
- }
- if n == nil { // allowing nil simplifies the code
- pop()
- return
- }
- nxt := func(nd parse.Node) {
- p.stack = append(p.stack, nd)
- p.findSymbols()
- }
- switch x := n.(type) {
- case *parse.ActionNode:
- nxt(x.Pipe)
- case *parse.BoolNode:
- // need to compute the length from the value
- msg := fmt.Sprintf("%v", x.True)
- p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: len(msg), kind: protocol.Boolean})
- case *parse.BranchNode:
- nxt(x.Pipe)
- nxt(x.List)
- nxt(x.ElseList)
- case *parse.ChainNode:
- p.symbols = append(p.symbols, p.fields(x.Field, x)...)
- nxt(x.Node)
- case *parse.CommandNode:
- for _, a := range x.Args {
- nxt(a)
- }
- //case *parse.CommentNode: // go 1.16
- // log.Printf("implement %d", x.Type())
- case *parse.DotNode:
- sym := symbol{name: "dot", kind: protocol.Variable, start: int(x.Pos), length: 1}
- p.symbols = append(p.symbols, sym)
- case *parse.FieldNode:
- p.symbols = append(p.symbols, p.fields(x.Ident, x)...)
- case *parse.IdentifierNode:
- sym := symbol{name: x.Ident, kind: protocol.Function, start: int(x.Pos),
- length: utf8.RuneCount([]byte(x.Ident))}
- p.symbols = append(p.symbols, sym)
- case *parse.IfNode:
- nxt(&x.BranchNode)
- case *parse.ListNode:
- if x != nil { // wretched typed nils. Node should have an IfNil
- for _, nd := range x.Nodes {
- nxt(nd)
- }
- }
- case *parse.NilNode:
- sym := symbol{name: "nil", kind: protocol.Constant, start: int(x.Pos), length: 3}
- p.symbols = append(p.symbols, sym)
- case *parse.NumberNode:
- // no name; ascii
- p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: len(x.Text), kind: protocol.Number})
- case *parse.PipeNode:
- if x == nil { // {{template "foo"}}
- return
- }
- for _, d := range x.Decl {
- nxt(d)
- }
- for _, c := range x.Cmds {
- nxt(c)
- }
- case *parse.RangeNode:
- nxt(&x.BranchNode)
- case *parse.StringNode:
- // no name
- sz := utf8.RuneCount([]byte(x.Text))
- p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: sz, kind: protocol.String})
- case *parse.TemplateNode: // invoking a template
- // x.Pos points to the quote before the name
- p.symbols = append(p.symbols, symbol{name: x.Name, kind: protocol.Package, start: int(x.Pos) + 1,
- length: utf8.RuneCount([]byte(x.Name))})
- nxt(x.Pipe)
- case *parse.TextNode:
- if len(x.Text) == 1 && x.Text[0] == '\n' {
- break
- }
- // nothing to report, but build one for hover
- sz := utf8.RuneCount([]byte(x.Text))
- p.symbols = append(p.symbols, symbol{start: int(x.Pos), length: sz, kind: protocol.Constant})
- case *parse.VariableNode:
- p.symbols = append(p.symbols, p.fields(x.Ident, x)...)
- case *parse.WithNode:
- nxt(&x.BranchNode)
-
- }
- pop()
-}
-
-// DocumentSymbols returns a hierarchy of the symbols defined in a template file.
-// (The hierarchy is flat. SymbolInformation might be better.)
-func DocumentSymbols(snapshot source.Snapshot, fh source.FileHandle) ([]protocol.DocumentSymbol, error) {
- buf, err := fh.Read()
- if err != nil {
- return nil, err
- }
- p := parseBuffer(buf)
- if p.ParseErr != nil {
- return nil, p.ParseErr
- }
- var ans []protocol.DocumentSymbol
- for _, s := range p.symbols {
- if s.kind == protocol.Constant {
- continue
- }
- d := kindStr(s.kind)
- if d == "Namespace" {
- d = "Template"
- }
- if s.vardef {
- d += "(def)"
- } else {
- d += "(use)"
- }
- r := p.Range(s.start, s.length)
- y := protocol.DocumentSymbol{
- Name: s.name,
- Detail: d,
- Kind: s.kind,
- Range: r,
- SelectionRange: r, // or should this be the entire {{...}}?
- }
- ans = append(ans, y)
- }
- return ans, nil
-}
diff --git a/internal/lsp/testdata/analyzer/bad_test.go b/internal/lsp/testdata/analyzer/bad_test.go
deleted file mode 100644
index c819cbc01..000000000
--- a/internal/lsp/testdata/analyzer/bad_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package analyzer
-
-import (
- "fmt"
- "sync"
- "testing"
-)
-
-func Testbad(t *testing.T) { //@diag("", "tests", "Testbad has malformed name: first letter after 'Test' must not be lowercase", "warning")
- var x sync.Mutex
- _ = x //@diag("x", "copylocks", "assignment copies lock value to _: sync.Mutex", "warning")
-
- printfWrapper("%s") //@diag(re`printfWrapper\(.*\)`, "printf", "golang.org/x/tools/internal/lsp/analyzer.printfWrapper format %s reads arg #1, but call has 0 args", "warning")
-}
-
-func printfWrapper(format string, args ...interface{}) {
- fmt.Printf(format, args...)
-}
diff --git a/internal/lsp/testdata/arraytype/array_type.go.in b/internal/lsp/testdata/arraytype/array_type.go.in
deleted file mode 100644
index 7e9a96f7b..000000000
--- a/internal/lsp/testdata/arraytype/array_type.go.in
+++ /dev/null
@@ -1,48 +0,0 @@
-package arraytype
-
-import (
- "golang.org/x/tools/internal/lsp/foo"
-)
-
-func _() {
- var (
- val string //@item(atVal, "val", "string", "var")
- )
-
- [] //@complete(" //", PackageFoo)
-
- []val //@complete(" //")
-
- []foo.StructFoo //@complete(" //", StructFoo)
-
- []foo.StructFoo(nil) //@complete("(", StructFoo)
-
- []*foo.StructFoo //@complete(" //", StructFoo)
-
- [...]foo.StructFoo //@complete(" //", StructFoo)
-
- [2][][4]foo.StructFoo //@complete(" //", StructFoo)
-
- []struct { f []foo.StructFoo } //@complete(" }", StructFoo)
-}
-
-func _() {
- type myInt int //@item(atMyInt, "myInt", "int", "type")
-
- var mark []myInt //@item(atMark, "mark", "[]myInt", "var")
-
- var s []myInt //@item(atS, "s", "[]myInt", "var")
- s = []m //@complete(" //", atMyInt)
- s = [] //@complete(" //", atMyInt, PackageFoo)
-
- var a [1]myInt
- a = [1]m //@complete(" //", atMyInt)
-
- var ds [][]myInt
- ds = [][]m //@complete(" //", atMyInt)
-}
-
-func _() {
- var b [0]byte //@item(atByte, "b", "[0]byte", "var")
- var _ []byte = b //@snippet(" //", atByte, "b[:]", "b[:]")
-}
diff --git a/internal/lsp/testdata/assign/assign.go.in b/internal/lsp/testdata/assign/assign.go.in
deleted file mode 100644
index 8c00ae9e0..000000000
--- a/internal/lsp/testdata/assign/assign.go.in
+++ /dev/null
@@ -1,26 +0,0 @@
-package assign
-
-import "golang.org/x/tools/internal/lsp/assign/internal/secret"
-
-func _() {
- secret.Hello()
- var (
- myInt int //@item(assignInt, "myInt", "int", "var")
- myStr string //@item(assignStr, "myStr", "string", "var")
- )
-
- var _ string = my //@rank(" //", assignStr, assignInt)
- var _ string = //@rank(" //", assignStr, assignInt)
-}
-
-func _() {
- var a string = a //@complete(" //")
-}
-
-func _() {
- fooBar := fooBa //@complete(" //"),item(assignFooBar, "fooBar", "", "var")
- abc, fooBar := 123, fooBa //@complete(" //", assignFooBar)
- {
- fooBar := fooBa //@complete(" //", assignFooBar)
- }
-}
diff --git a/internal/lsp/testdata/bad/bad0.go b/internal/lsp/testdata/bad/bad0.go
deleted file mode 100644
index 36a4e6b95..000000000
--- a/internal/lsp/testdata/bad/bad0.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// +build go1.11
-
-package bad
-
-import _ "golang.org/x/tools/internal/lsp/assign/internal/secret" //@diag("\"golang.org/x/tools/internal/lsp/assign/internal/secret\"", "compiler", "could not import golang.org/x/tools/internal/lsp/assign/internal/secret (invalid use of internal package golang.org/x/tools/internal/lsp/assign/internal/secret)", "error")
-
-func stuff() { //@item(stuff, "stuff", "func()", "func")
- x := "heeeeyyyy"
- random2(x) //@diag("x", "compiler", "cannot use x (variable of type string) as int value in argument to random2", "error")
- random2(1) //@complete("dom", random, random2, random3)
- y := 3 //@diag("y", "compiler", "y declared but not used", "error")
-}
-
-type bob struct { //@item(bob, "bob", "struct{...}", "struct")
- x int
-}
-
-func _() {
- var q int
- _ = &bob{
- f: q, //@diag("f: q", "compiler", "unknown field f in struct literal", "error")
- }
-}
diff --git a/internal/lsp/testdata/bad/bad1.go b/internal/lsp/testdata/bad/bad1.go
deleted file mode 100644
index 512f2d986..000000000
--- a/internal/lsp/testdata/bad/bad1.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build go1.11
-
-package bad
-
-// See #36637
-type stateFunc func() stateFunc //@item(stateFunc, "stateFunc", "func() stateFunc", "type")
-
-var a unknown //@item(global_a, "a", "unknown", "var"),diag("unknown", "compiler", "undeclared name: unknown", "error")
-
-func random() int { //@item(random, "random", "func() int", "func")
- //@complete("", global_a, bob, random, random2, random3, stateFunc, stuff)
- return 0
-}
-
-func random2(y int) int { //@item(random2, "random2", "func(y int) int", "func"),item(bad_y_param, "y", "int", "var")
- x := 6 //@item(x, "x", "int", "var"),diag("x", "compiler", "x declared but not used", "error")
- var q blah //@item(q, "q", "blah", "var"),diag("q", "compiler", "q declared but not used", "error"),diag("blah", "compiler", "undeclared name: blah", "error")
- var t **blob //@item(t, "t", "**blob", "var"),diag("t", "compiler", "t declared but not used", "error"),diag("blob", "compiler", "undeclared name: blob", "error")
- //@complete("", q, t, x, bad_y_param, global_a, bob, random, random2, random3, stateFunc, stuff)
-
- return y
-}
-
-func random3(y ...int) { //@item(random3, "random3", "func(y ...int)", "func"),item(y_variadic_param, "y", "[]int", "var")
- //@complete("", y_variadic_param, global_a, bob, random, random2, random3, stateFunc, stuff)
-
- var ch chan (favType1) //@item(ch, "ch", "chan (favType1)", "var"),diag("ch", "compiler", "ch declared but not used", "error"),diag("favType1", "compiler", "undeclared name: favType1", "error")
- var m map[keyType]int //@item(m, "m", "map[keyType]int", "var"),diag("m", "compiler", "m declared but not used", "error"),diag("keyType", "compiler", "undeclared name: keyType", "error")
- var arr []favType2 //@item(arr, "arr", "[]favType2", "var"),diag("arr", "compiler", "arr declared but not used", "error"),diag("favType2", "compiler", "undeclared name: favType2", "error")
- var fn1 func() badResult //@item(fn1, "fn1", "func() badResult", "var"),diag("fn1", "compiler", "fn1 declared but not used", "error"),diag("badResult", "compiler", "undeclared name: badResult", "error")
- var fn2 func(badParam) //@item(fn2, "fn2", "func(badParam)", "var"),diag("fn2", "compiler", "fn2 declared but not used", "error"),diag("badParam", "compiler", "undeclared name: badParam", "error")
- //@complete("", arr, ch, fn1, fn2, m, y_variadic_param, global_a, bob, random, random2, random3, stateFunc, stuff)
-}
diff --git a/internal/lsp/testdata/badstmt/badstmt.go.in b/internal/lsp/testdata/badstmt/badstmt.go.in
deleted file mode 100644
index 5a5607910..000000000
--- a/internal/lsp/testdata/badstmt/badstmt.go.in
+++ /dev/null
@@ -1,26 +0,0 @@
-package badstmt
-
-import (
- "golang.org/x/tools/internal/lsp/foo"
-)
-
-func _() {
- defer foo.F //@complete(" //", Foo),diag(" //", "syntax", "function must be invoked in defer statement", "error")
- y := 1
- defer foo.F //@complete(" //", Foo)
-}
-
-func _() {
- switch true {
- case true:
- go foo.F //@complete(" //", Foo)
- }
-}
-
-func _() {
- defer func() {
- foo.F //@complete(" //", Foo),snippet(" //", Foo, "Foo()", "Foo()")
-
- foo. //@rank(" //", Foo)
- }
-}
diff --git a/internal/lsp/testdata/badstmt/badstmt_2.go.in b/internal/lsp/testdata/badstmt/badstmt_2.go.in
deleted file mode 100644
index f754b46aa..000000000
--- a/internal/lsp/testdata/badstmt/badstmt_2.go.in
+++ /dev/null
@@ -1,9 +0,0 @@
-package badstmt
-
-import (
- "golang.org/x/tools/internal/lsp/foo"
-)
-
-func _() {
- defer func() { foo. } //@rank(" }", Foo)
-}
diff --git a/internal/lsp/testdata/badstmt/badstmt_3.go.in b/internal/lsp/testdata/badstmt/badstmt_3.go.in
deleted file mode 100644
index be774e84b..000000000
--- a/internal/lsp/testdata/badstmt/badstmt_3.go.in
+++ /dev/null
@@ -1,9 +0,0 @@
-package badstmt
-
-import (
- "golang.org/x/tools/internal/lsp/foo"
-)
-
-func _() {
- go foo. //@rank(" //", Foo, IntFoo),snippet(" //", Foo, "Foo()", "Foo()")
-}
diff --git a/internal/lsp/testdata/badstmt/badstmt_4.go.in b/internal/lsp/testdata/badstmt/badstmt_4.go.in
deleted file mode 100644
index a9b46fb02..000000000
--- a/internal/lsp/testdata/badstmt/badstmt_4.go.in
+++ /dev/null
@@ -1,11 +0,0 @@
-package badstmt
-
-import (
- "golang.org/x/tools/internal/lsp/foo"
-)
-
-func _() {
- go func() {
- defer foo. //@rank(" //", Foo, IntFoo)
- }
-}
diff --git a/internal/lsp/testdata/bar/bar.go.in b/internal/lsp/testdata/bar/bar.go.in
deleted file mode 100644
index c0f4b4c45..000000000
--- a/internal/lsp/testdata/bar/bar.go.in
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build go1.11
-
-package bar
-
-import (
- "golang.org/x/tools/internal/lsp/foo" //@item(foo, "foo", "\"golang.org/x/tools/internal/lsp/foo\"", "package")
-)
-
-func helper(i foo.IntFoo) {} //@item(helper, "helper", "func(i foo.IntFoo)", "func")
-
-func _() {
- help //@complete("l", helper)
- _ = foo.StructFoo{} //@complete("S", IntFoo, StructFoo)
-}
-
-// Bar is a function.
-func Bar() { //@item(Bar, "Bar", "func()", "func", "Bar is a function.")
- foo.Foo() //@complete("F", Foo, IntFoo, StructFoo)
- var _ foo.IntFoo //@complete("I", IntFoo, StructFoo)
- foo.() //@complete("(", Foo, IntFoo, StructFoo)
-}
-
-func _() {
- var Valentine int //@item(Valentine, "Valentine", "int", "var")
-
- _ = foo.StructFoo{
- Valu //@complete(" //", Value)
- }
- _ = foo.StructFoo{
- Va //@complete("a", Value, Valentine)
- }
- _ = foo.StructFoo{
- Value: 5, //@complete("a", Value)
- }
- _ = foo.StructFoo{
- //@complete("", Value, Valentine, foo, helper, Bar)
- }
- _ = foo.StructFoo{
- Value: Valen //@complete("le", Valentine)
- }
- _ = foo.StructFoo{
- Value: //@complete(" //", Valentine, foo, helper, Bar)
- }
- _ = foo.StructFoo{
- Value: //@complete(" ", Valentine, foo, helper, Bar)
- }
-}
diff --git a/internal/lsp/testdata/basiclit/basiclit.go b/internal/lsp/testdata/basiclit/basiclit.go
deleted file mode 100644
index 9829003d3..000000000
--- a/internal/lsp/testdata/basiclit/basiclit.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package basiclit
-
-func _() {
- var a int // something for lexical completions
-
- _ = "hello." //@complete(".")
-
- _ = 1 //@complete(" //")
-
- _ = 1. //@complete(".")
-
- _ = 'a' //@complete("' ")
-
- _ = 'a' //@hover("'a'", "'a', U+0061, LATIN SMALL LETTER A")
- _ = 0x61 //@hover("0x61", "'a', U+0061, LATIN SMALL LETTER A")
-
- _ = '\u2211' //@hover("'\\u2211'", "'∑', U+2211, N-ARY SUMMATION")
- _ = 0x2211 //@hover("0x2211", "'∑', U+2211, N-ARY SUMMATION")
- _ = "foo \u2211 bar" //@hover("\\u2211", "'∑', U+2211, N-ARY SUMMATION")
-
- _ = '\a' //@hover("'\\a'", "U+0007, control")
- _ = "foo \a bar" //@hover("\\a", "U+0007, control")
-
- _ = '\U0001F30A' //@hover("'\\U0001F30A'", "'🌊', U+1F30A, WATER WAVE")
- _ = 0x0001F30A //@hover("0x0001F30A", "'🌊', U+1F30A, WATER WAVE")
- _ = "foo \U0001F30A bar" //@hover("\\U0001F30A", "'🌊', U+1F30A, WATER WAVE")
-
- _ = '\x7E' //@hover("'\\x7E'", "'~', U+007E, TILDE")
- _ = "foo \x7E bar" //@hover("\\x7E", "'~', U+007E, TILDE")
- _ = "foo \a bar" //@hover("\\a", "U+0007, control")
-
- _ = '\173' //@hover("'\\173'", "'{', U+007B, LEFT CURLY BRACKET")
- _ = "foo \173 bar" //@hover("\\173", "'{', U+007B, LEFT CURLY BRACKET")
- _ = "foo \173 bar \u2211 baz" //@hover("\\173", "'{', U+007B, LEFT CURLY BRACKET")
- _ = "foo \173 bar \u2211 baz" //@hover("\\u2211", "'∑', U+2211, N-ARY SUMMATION")
- _ = "foo\173bar\u2211baz" //@hover("\\173", "'{', U+007B, LEFT CURLY BRACKET")
- _ = "foo\173bar\u2211baz" //@hover("\\u2211", "'∑', U+2211, N-ARY SUMMATION")
-
- // search for runes in string only if there is an escaped sequence
- _ = "hello" //@hover("\"hello\"", "")
-
- // incorrect escaped rune sequences
- _ = '\0' //@hover("'\\0'", "")
- _ = '\u22111' //@hover("'\\u22111'", "")
- _ = '\U00110000' //@hover("'\\U00110000'", "")
- _ = '\u12e45'//@hover("'\\u12e45'", "")
- _ = '\xa' //@hover("'\\xa'", "")
- _ = 'aa' //@hover("'aa'", "")
-
- // other basic lits
- _ = 1 //@hover("1", "")
- _ = 1.2 //@hover("1.2", "")
- _ = 1.2i //@hover("1.2i", "")
- _ = 0123 //@hover("0123", "")
- _ = 0x1234567890 //@hover("0x1234567890", "")
-}
diff --git a/internal/lsp/testdata/baz/baz.go.in b/internal/lsp/testdata/baz/baz.go.in
deleted file mode 100644
index 3b74ee580..000000000
--- a/internal/lsp/testdata/baz/baz.go.in
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build go1.11
-
-package baz
-
-import (
- "golang.org/x/tools/internal/lsp/bar"
-
- f "golang.org/x/tools/internal/lsp/foo"
-)
-
-var FooStruct f.StructFoo
-
-func Baz() {
- defer bar.Bar() //@complete("B", Bar)
- // TODO(rstambler): Test completion here.
- defer bar.B
- var x f.IntFoo //@complete("n", IntFoo),typdef("x", IntFoo)
- bar.Bar() //@complete("B", Bar)
-}
-
-func _() {
- bob := f.StructFoo{Value: 5}
- if x := bob. //@complete(" //", Value)
- switch true == false {
- case true:
- if x := bob. //@complete(" //", Value)
- case false:
- }
- if x := bob.Va //@complete("a", Value)
- switch true == true {
- default:
- }
-}
diff --git a/internal/lsp/testdata/builtins/builtins.go b/internal/lsp/testdata/builtins/builtins.go
deleted file mode 100644
index 25c29f21e..000000000
--- a/internal/lsp/testdata/builtins/builtins.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package builtins
-
-func _() {
- //@complete("", append, bool, byte, cap, close, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil)
-}
-
-/* Create markers for builtin types. Only for use by this test.
-/* append(slice []Type, elems ...Type) []Type */ //@item(append, "append", "func(slice []Type, elems ...Type) []Type", "func")
-/* bool */ //@item(bool, "bool", "", "type")
-/* byte */ //@item(byte, "byte", "", "type")
-/* cap(v Type) int */ //@item(cap, "cap", "func(v Type) int", "func")
-/* close(c chan<- Type) */ //@item(close, "close", "func(c chan<- Type)", "func")
-/* complex(r float64, i float64) */ //@item(complex, "complex", "func(r float64, i float64) complex128", "func")
-/* complex128 */ //@item(complex128, "complex128", "", "type")
-/* complex64 */ //@item(complex64, "complex64", "", "type")
-/* copy(dst []Type, src []Type) int */ //@item(copy, "copy", "func(dst []Type, src []Type) int", "func")
-/* delete(m map[Type]Type1, key Type) */ //@item(delete, "delete", "func(m map[Type]Type1, key Type)", "func")
-/* error */ //@item(error, "error", "", "interface")
-/* false */ //@item(_false, "false", "", "const")
-/* float32 */ //@item(float32, "float32", "", "type")
-/* float64 */ //@item(float64, "float64", "", "type")
-/* imag(c complex128) float64 */ //@item(imag, "imag", "func(c complex128) float64", "func")
-/* int */ //@item(int, "int", "", "type")
-/* int16 */ //@item(int16, "int16", "", "type")
-/* int32 */ //@item(int32, "int32", "", "type")
-/* int64 */ //@item(int64, "int64", "", "type")
-/* int8 */ //@item(int8, "int8", "", "type")
-/* iota */ //@item(iota, "iota", "", "const")
-/* len(v Type) int */ //@item(len, "len", "func(v Type) int", "func")
-/* make(t Type, size ...int) Type */ //@item(make, "make", "func(t Type, size ...int) Type", "func")
-/* new(Type) *Type */ //@item(new, "new", "func(Type) *Type", "func")
-/* nil */ //@item(_nil, "nil", "", "var")
-/* panic(v interface{}) */ //@item(panic, "panic", "func(v interface{})", "func")
-/* print(args ...Type) */ //@item(print, "print", "func(args ...Type)", "func")
-/* println(args ...Type) */ //@item(println, "println", "func(args ...Type)", "func")
-/* real(c complex128) float64 */ //@item(real, "real", "func(c complex128) float64", "func")
-/* recover() interface{} */ //@item(recover, "recover", "func() interface{}", "func")
-/* rune */ //@item(rune, "rune", "", "type")
-/* string */ //@item(string, "string", "", "type")
-/* true */ //@item(_true, "true", "", "const")
-/* uint */ //@item(uint, "uint", "", "type")
-/* uint16 */ //@item(uint16, "uint16", "", "type")
-/* uint32 */ //@item(uint32, "uint32", "", "type")
-/* uint64 */ //@item(uint64, "uint64", "", "type")
-/* uint8 */ //@item(uint8, "uint8", "", "type")
-/* uintptr */ //@item(uintptr, "uintptr", "", "type")
diff --git a/internal/lsp/testdata/callhierarchy/callhierarchy.go b/internal/lsp/testdata/callhierarchy/callhierarchy.go
deleted file mode 100644
index 58c23bdd6..000000000
--- a/internal/lsp/testdata/callhierarchy/callhierarchy.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package callhierarchy
-
-import "golang.org/x/tools/internal/lsp/callhierarchy/outgoing"
-
-func a() { //@mark(hierarchyA, "a")
- D()
-}
-
-func b() { //@mark(hierarchyB, "b")
- D()
-}
-
-// C is an exported function
-func C() { //@mark(hierarchyC, "C")
- D()
- D()
-}
-
-// To test hierarchy across function literals
-var x = func() { //@mark(hierarchyLiteral, "func"),mark(hierarchyLiteralOut, "x")
- D()
-}
-
-// D is exported to test incoming/outgoing calls across packages
-func D() { //@mark(hierarchyD, "D"),incomingcalls(hierarchyD, hierarchyA, hierarchyB, hierarchyC, hierarchyLiteral, incomingA),outgoingcalls(hierarchyD, hierarchyE, hierarchyF, hierarchyG, hierarchyLiteralOut, outgoingB, hierarchyFoo, hierarchyH, hierarchyI, hierarchyJ, hierarchyK)
- e()
- x()
- F()
- outgoing.B()
- foo := func() {} //@mark(hierarchyFoo, "foo"),incomingcalls(hierarchyFoo, hierarchyD),outgoingcalls(hierarchyFoo)
- foo()
-
- func() {
- g()
- }()
-
- var i Interface = impl{}
- i.H()
- i.I()
-
- s := Struct{}
- s.J()
- s.K()
-}
-
-func e() {} //@mark(hierarchyE, "e")
-
-// F is an exported function
-func F() {} //@mark(hierarchyF, "F")
-
-func g() {} //@mark(hierarchyG, "g")
-
-type Interface interface {
- H() //@mark(hierarchyH, "H")
- I() //@mark(hierarchyI, "I")
-}
-
-type impl struct{}
-
-func (i impl) H() {}
-func (i impl) I() {}
-
-type Struct struct {
- J func() //@mark(hierarchyJ, "J")
- K func() //@mark(hierarchyK, "K")
-}
diff --git a/internal/lsp/testdata/callhierarchy/incoming/incoming.go b/internal/lsp/testdata/callhierarchy/incoming/incoming.go
deleted file mode 100644
index 3bfb4ad99..000000000
--- a/internal/lsp/testdata/callhierarchy/incoming/incoming.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package incoming
-
-import "golang.org/x/tools/internal/lsp/callhierarchy"
-
-// A is exported to test incoming calls across packages
-func A() { //@mark(incomingA, "A")
- callhierarchy.D()
-}
diff --git a/internal/lsp/testdata/cgo/declarecgo.go.golden b/internal/lsp/testdata/cgo/declarecgo.go.golden
deleted file mode 100644
index b6d94d0c6..000000000
--- a/internal/lsp/testdata/cgo/declarecgo.go.golden
+++ /dev/null
@@ -1,30 +0,0 @@
--- funccgoexample-definition --
-cgo/declarecgo.go:18:6-13: defined here as ```go
-func Example()
-```
-
-[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example)
--- funccgoexample-definition-json --
-{
- "span": {
- "uri": "file://cgo/declarecgo.go",
- "start": {
- "line": 18,
- "column": 6,
- "offset": 151
- },
- "end": {
- "line": 18,
- "column": 13,
- "offset": 158
- }
- },
- "description": "```go\nfunc Example()\n```\n\n[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example)"
-}
-
--- funccgoexample-hoverdef --
-```go
-func Example()
-```
-
-[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example)
diff --git a/internal/lsp/testdata/cgoimport/usecgo.go.golden b/internal/lsp/testdata/cgoimport/usecgo.go.golden
deleted file mode 100644
index f33f94f84..000000000
--- a/internal/lsp/testdata/cgoimport/usecgo.go.golden
+++ /dev/null
@@ -1,30 +0,0 @@
--- funccgoexample-definition --
-cgo/declarecgo.go:18:6-13: defined here as ```go
-func cgo.Example()
-```
-
-[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example)
--- funccgoexample-definition-json --
-{
- "span": {
- "uri": "file://cgo/declarecgo.go",
- "start": {
- "line": 18,
- "column": 6,
- "offset": 151
- },
- "end": {
- "line": 18,
- "column": 13,
- "offset": 158
- }
- },
- "description": "```go\nfunc cgo.Example()\n```\n\n[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example)"
-}
-
--- funccgoexample-hoverdef --
-```go
-func cgo.Example()
-```
-
-[`cgo.Example` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/cgo?utm_source=gopls#Example)
diff --git a/internal/lsp/testdata/cgoimport/usecgo.go.in b/internal/lsp/testdata/cgoimport/usecgo.go.in
deleted file mode 100644
index f258682ea..000000000
--- a/internal/lsp/testdata/cgoimport/usecgo.go.in
+++ /dev/null
@@ -1,9 +0,0 @@
-package cgoimport
-
-import (
- "golang.org/x/tools/internal/lsp/cgo"
-)
-
-func _() {
- cgo.Example() //@godef("ample", funccgoexample),complete("ample", funccgoexample)
-}
diff --git a/internal/lsp/testdata/danglingstmt/dangling_selector_2.go b/internal/lsp/testdata/danglingstmt/dangling_selector_2.go
deleted file mode 100644
index a9e75e82a..000000000
--- a/internal/lsp/testdata/danglingstmt/dangling_selector_2.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package danglingstmt
-
-import "golang.org/x/tools/internal/lsp/foo"
-
-func _() {
- foo. //@rank(" //", Foo)
- var _ = []string{foo.} //@rank("}", Foo)
-}
diff --git a/internal/lsp/testdata/deep/deep.go b/internal/lsp/testdata/deep/deep.go
deleted file mode 100644
index 6ed5ff839..000000000
--- a/internal/lsp/testdata/deep/deep.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package deep
-
-import "context"
-
-type deepA struct {
- b deepB //@item(deepBField, "b", "deepB", "field")
-}
-
-type deepB struct {
-}
-
-func wantsDeepB(deepB) {}
-
-func _() {
- var a deepA //@item(deepAVar, "a", "deepA", "var")
- a.b //@item(deepABField, "a.b", "deepB", "field")
- wantsDeepB(a) //@deep(")", deepABField, deepAVar)
-
- deepA{a} //@snippet("}", deepABField, "a.b", "a.b")
-}
-
-func wantsContext(context.Context) {}
-
-func _() {
- context.Background() //@item(ctxBackground, "context.Background", "func() context.Context", "func", "Background returns a non-nil, empty Context.")
- context.TODO() //@item(ctxTODO, "context.TODO", "func() context.Context", "func", "TODO returns a non-nil, empty Context.")
-
- wantsContext(c) //@rank(")", ctxBackground),rank(")", ctxTODO)
-}
-
-func _() {
- // deepCircle is circular.
- type deepCircle struct {
- *deepCircle
- }
- var circle deepCircle //@item(deepCircle, "circle", "deepCircle", "var")
- circle.deepCircle //@item(deepCircleField, "circle.deepCircle", "*deepCircle", "field")
- var _ deepCircle = circ //@deep(" //", deepCircle, deepCircleField),snippet(" //", deepCircleField, "*circle.deepCircle", "*circle.deepCircle")
-}
-
-func _() {
- type deepEmbedC struct {
- }
- type deepEmbedB struct {
- deepEmbedC
- }
- type deepEmbedA struct {
- deepEmbedB
- }
-
- wantsC := func(deepEmbedC) {}
-
- var a deepEmbedA //@item(deepEmbedA, "a", "deepEmbedA", "var")
- a.deepEmbedB //@item(deepEmbedB, "a.deepEmbedB", "deepEmbedB", "field")
- a.deepEmbedC //@item(deepEmbedC, "a.deepEmbedC", "deepEmbedC", "field")
- wantsC(a) //@deep(")", deepEmbedC, deepEmbedA, deepEmbedB)
-}
-
-func _() {
- type nested struct {
- a int
- n *nested //@item(deepNestedField, "n", "*nested", "field")
- }
-
- nested{
- a: 123, //@deep(" //", deepNestedField)
- }
-}
-
-func _() {
- var a struct {
- b struct {
- c int
- }
- d int
- }
-
- a.d //@item(deepAD, "a.d", "int", "field")
- a.b.c //@item(deepABC, "a.b.c", "int", "field")
- a.b //@item(deepAB, "a.b", "struct{...}", "field")
- a //@item(deepA, "a", "struct{...}", "var")
-
- // "a.d" should be ranked above the deeper "a.b.c"
- var i int
- i = a //@deep(" //", deepAD, deepABC, deepA, deepAB)
-}
-
-type foo struct {
- b bar
-}
-
-func (f foo) bar() bar {
- return f.b
-}
-
-func (f foo) barPtr() *bar {
- return &f.b
-}
-
-type bar struct{}
-
-func (b bar) valueReceiver() int {
- return 0
-}
-
-func (b *bar) ptrReceiver() int {
- return 0
-}
-
-func _() {
- var (
- i int
- f foo
- )
-
- f.bar().valueReceiver //@item(deepBarValue, "f.bar().valueReceiver", "func() int", "method")
- f.barPtr().ptrReceiver //@item(deepBarPtrPtr, "f.barPtr().ptrReceiver", "func() int", "method")
- f.barPtr().valueReceiver //@item(deepBarPtrValue, "f.barPtr().valueReceiver", "func() int", "method")
-
- i = fbar //@fuzzy(" //", deepBarValue, deepBarPtrPtr, deepBarPtrValue)
-}
-
-func (b baz) Thing() struct{ val int } {
- return b.thing
-}
-
-type baz struct {
- thing struct{ val int }
-}
-
-func (b baz) _() {
- b.Thing().val //@item(deepBazMethVal, "b.Thing().val", "int", "field")
- b.thing.val //@item(deepBazFieldVal, "b.thing.val", "int", "field")
- var _ int = bval //@rank(" //", deepBazFieldVal, deepBazMethVal)
-}
diff --git a/internal/lsp/testdata/errors/errors.go b/internal/lsp/testdata/errors/errors.go
deleted file mode 100644
index 42105629e..000000000
--- a/internal/lsp/testdata/errors/errors.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package errors
-
-import (
- "golang.org/x/tools/internal/lsp/types"
-)
-
-func _() {
- bob.Bob() //@complete(".")
- types.b //@complete(" //", Bob_interface)
-}
diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go b/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go
deleted file mode 100644
index 4e2b12fbc..000000000
--- a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package extract
-
-func _() {
- a := /* comment in the middle of a line */ 1 //@mark(exSt18, "a")
- // Comment on its own line
- _ = 3 + 4 //@mark(exEn18, "4")
- //@extractfunc(exSt18, exEn18)
-}
diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden b/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden
deleted file mode 100644
index a43822a90..000000000
--- a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden
+++ /dev/null
@@ -1,17 +0,0 @@
--- functionextraction_extract_basic_comment_4_2 --
-package extract
-
-func _() {
- /* comment in the middle of a line */
- //@mark(exSt18, "a")
- // Comment on its own line
- newFunction() //@mark(exEn18, "4")
- //@extractfunc(exSt18, exEn18)
-}
-
-func newFunction() {
- a := 1
-
- _ = 3 + 4
-}
-
diff --git a/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden b/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden
deleted file mode 100644
index eab22a673..000000000
--- a/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden
+++ /dev/null
@@ -1,728 +0,0 @@
--- functionextraction_extract_basic_13_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func newFunction(a *A) int {
- sum := a.x + a.y
- return sum
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- functionextraction_extract_basic_14_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func newFunction(sum int) int {
- return sum
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- functionextraction_extract_basic_18_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func newFunction(a A) bool {
- return a.x < a.y
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- functionextraction_extract_basic_22_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func newFunction(a A) int {
- sum := a.x + a.y
- return sum
-}
-
--- functionextraction_extract_basic_23_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func newFunction(sum int) int {
- return sum
-}
-
--- functionextraction_extract_basic_9_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func newFunction(a *A) bool {
- return a.x < a.y
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- functionextraction_extract_method_13_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func newFunction(a *A) int {
- sum := a.x + a.y
- return sum
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- functionextraction_extract_method_14_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func newFunction(sum int) int {
- return sum
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- functionextraction_extract_method_18_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func newFunction(a A) bool {
- return a.x < a.y
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- functionextraction_extract_method_22_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func newFunction(a A) int {
- sum := a.x + a.y
- return sum
-}
-
--- functionextraction_extract_method_23_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return newFunction(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func newFunction(sum int) int {
- return sum
-}
-
--- functionextraction_extract_method_9_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func newFunction(a *A) bool {
- return a.x < a.y
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- methodextraction_extract_basic_13_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a *A) newMethod() int {
- sum := a.x + a.y
- return sum
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- methodextraction_extract_basic_14_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (*A) newMethod(sum int) int {
- return sum
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- methodextraction_extract_basic_18_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) newMethod() bool {
- return a.x < a.y
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- methodextraction_extract_basic_22_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) newMethod() int {
- sum := a.x + a.y
- return sum
-}
-
--- methodextraction_extract_basic_23_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (A) newMethod(sum int) int {
- return sum
-}
-
--- methodextraction_extract_basic_9_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) newMethod() bool {
- return a.x < a.y
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- methodextraction_extract_method_13_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a *A) newMethod() int {
- sum := a.x + a.y
- return sum
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- methodextraction_extract_method_14_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (*A) newMethod(sum int) int {
- return sum
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- methodextraction_extract_method_18_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) newMethod() bool {
- return a.x < a.y
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
--- methodextraction_extract_method_22_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) newMethod() int {
- sum := a.x + a.y
- return sum
-}
-
--- methodextraction_extract_method_23_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return a.newMethod(sum) //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (A) newMethod(sum int) int {
- return sum
-}
-
--- methodextraction_extract_method_9_2 --
-package extract
-
-type A struct {
- x int
- y int
-}
-
-func (a *A) XLessThanYP() bool {
- return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a *A) newMethod() bool {
- return a.x < a.y
-}
-
-func (a *A) AddP() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
-func (a A) XLessThanY() bool {
- return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
-}
-
-func (a A) Add() int {
- sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
- return sum //@extractmethod("return", "sum"),extractfunc("return", "sum")
-}
-
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go
deleted file mode 100644
index c49e5d6a0..000000000
--- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package extract
-
-func _() {
- var _ = 1 + 2 //@suggestedfix("1", "refactor.extract")
- var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract")
-}
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
deleted file mode 100644
index 00ee7b4f9..000000000
--- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
+++ /dev/null
@@ -1,18 +0,0 @@
--- suggestedfix_extract_basic_lit_4_10 --
-package extract
-
-func _() {
- x := 1
- var _ = x + 2 //@suggestedfix("1", "refactor.extract")
- var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract")
-}
-
--- suggestedfix_extract_basic_lit_5_10 --
-package extract
-
-func _() {
- var _ = 1 + 2 //@suggestedfix("1", "refactor.extract")
- x := 3 + 4
- var _ = x //@suggestedfix("3 + 4", "refactor.extract")
-}
-
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
deleted file mode 100644
index badc010dc..000000000
--- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package extract
-
-import "strconv"
-
-func _() {
- x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract")
- str := "1"
- b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
-}
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
deleted file mode 100644
index 74df67ee6..000000000
--- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
+++ /dev/null
@@ -1,36 +0,0 @@
--- suggestedfix_extract_func_call_6_7 --
-package extract
-
-import "strconv"
-
-func _() {
- x0 := append([]int{}, 1)
- a := x0 //@suggestedfix("append([]int{}, 1)", "refactor.extract")
- str := "1"
- b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
-}
-
--- suggestedfix_extract_func_call_6_8 --
-package extract
-
-import "strconv"
-
-func _() {
- x := append([]int{}, 1)
- x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract")
- str := "1"
- b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
-}
-
--- suggestedfix_extract_func_call_8_12 --
-package extract
-
-import "strconv"
-
-func _() {
- x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract")
- str := "1"
- x, x1 := strconv.Atoi(str)
- b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
-}
-
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go b/internal/lsp/testdata/extract/extract_variable/extract_scope.go
deleted file mode 100644
index 5dfcc3620..000000000
--- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package extract
-
-import "go/ast"
-
-func _() {
- x0 := 0
- if true {
- y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
- }
- if true {
- x1 := !false //@suggestedfix("!false", "refactor.extract")
- }
-}
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
deleted file mode 100644
index e0e6464b5..000000000
--- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
+++ /dev/null
@@ -1,32 +0,0 @@
--- suggestedfix_extract_scope_11_9 --
-package extract
-
-import "go/ast"
-
-func _() {
- x0 := 0
- if true {
- y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
- }
- if true {
- x := !false
- x1 := x //@suggestedfix("!false", "refactor.extract")
- }
-}
-
--- suggestedfix_extract_scope_8_8 --
-package extract
-
-import "go/ast"
-
-func _() {
- x0 := 0
- if true {
- x := ast.CompositeLit{}
- y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
- }
- if true {
- x1 := !false //@suggestedfix("!false", "refactor.extract")
- }
-}
-
diff --git a/internal/lsp/testdata/fillstruct/a.go b/internal/lsp/testdata/fillstruct/a.go
deleted file mode 100644
index 5c6df6c4a..000000000
--- a/internal/lsp/testdata/fillstruct/a.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package fillstruct
-
-import (
- "golang.org/x/tools/internal/lsp/fillstruct/data"
-)
-
-type basicStruct struct {
- foo int
-}
-
-var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type twoArgStruct struct {
- foo int
- bar string
-}
-
-var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type nestedStruct struct {
- bar string
- basic basicStruct
-}
-
-var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = data.B{} //@suggestedfix("}", "refactor.rewrite")
diff --git a/internal/lsp/testdata/fillstruct/a.go.golden b/internal/lsp/testdata/fillstruct/a.go.golden
deleted file mode 100644
index 5d6dbceb2..000000000
--- a/internal/lsp/testdata/fillstruct/a.go.golden
+++ /dev/null
@@ -1,126 +0,0 @@
--- suggestedfix_a_11_21 --
-package fillstruct
-
-import (
- "golang.org/x/tools/internal/lsp/fillstruct/data"
-)
-
-type basicStruct struct {
- foo int
-}
-
-var _ = basicStruct{
- foo: 0,
-} //@suggestedfix("}", "refactor.rewrite")
-
-type twoArgStruct struct {
- foo int
- bar string
-}
-
-var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type nestedStruct struct {
- bar string
- basic basicStruct
-}
-
-var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = data.B{} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a_18_22 --
-package fillstruct
-
-import (
- "golang.org/x/tools/internal/lsp/fillstruct/data"
-)
-
-type basicStruct struct {
- foo int
-}
-
-var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type twoArgStruct struct {
- foo int
- bar string
-}
-
-var _ = twoArgStruct{
- foo: 0,
- bar: "",
-} //@suggestedfix("}", "refactor.rewrite")
-
-type nestedStruct struct {
- bar string
- basic basicStruct
-}
-
-var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = data.B{} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a_25_22 --
-package fillstruct
-
-import (
- "golang.org/x/tools/internal/lsp/fillstruct/data"
-)
-
-type basicStruct struct {
- foo int
-}
-
-var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type twoArgStruct struct {
- foo int
- bar string
-}
-
-var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type nestedStruct struct {
- bar string
- basic basicStruct
-}
-
-var _ = nestedStruct{
- bar: "",
- basic: basicStruct{},
-} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = data.B{} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a_27_16 --
-package fillstruct
-
-import (
- "golang.org/x/tools/internal/lsp/fillstruct/data"
-)
-
-type basicStruct struct {
- foo int
-}
-
-var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type twoArgStruct struct {
- foo int
- bar string
-}
-
-var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type nestedStruct struct {
- bar string
- basic basicStruct
-}
-
-var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = data.B{
- ExportedInt: 0,
-} //@suggestedfix("}", "refactor.rewrite")
-
diff --git a/internal/lsp/testdata/fillstruct/a2.go b/internal/lsp/testdata/fillstruct/a2.go
deleted file mode 100644
index 8e12a6b54..000000000
--- a/internal/lsp/testdata/fillstruct/a2.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package fillstruct
-
-type typedStruct struct {
- m map[string]int
- s []int
- c chan int
- c1 <-chan int
- a [2]string
-}
-
-var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStruct struct {
- fn func(i int) int
-}
-
-var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructCompex struct {
- fn func(i int, s string) (string, int)
-}
-
-var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructEmpty struct {
- fn func()
-}
-
-var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite")
diff --git a/internal/lsp/testdata/fillstruct/a2.go.golden b/internal/lsp/testdata/fillstruct/a2.go.golden
deleted file mode 100644
index 78a6ee2b6..000000000
--- a/internal/lsp/testdata/fillstruct/a2.go.golden
+++ /dev/null
@@ -1,139 +0,0 @@
--- suggestedfix_a2_11_21 --
-package fillstruct
-
-type typedStruct struct {
- m map[string]int
- s []int
- c chan int
- c1 <-chan int
- a [2]string
-}
-
-var _ = typedStruct{
- m: map[string]int{},
- s: []int{},
- c: make(chan int),
- c1: make(<-chan int),
- a: [2]string{},
-} //@suggestedfix("}", "refactor.rewrite")
-
-type funStruct struct {
- fn func(i int) int
-}
-
-var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructCompex struct {
- fn func(i int, s string) (string, int)
-}
-
-var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructEmpty struct {
- fn func()
-}
-
-var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a2_17_19 --
-package fillstruct
-
-type typedStruct struct {
- m map[string]int
- s []int
- c chan int
- c1 <-chan int
- a [2]string
-}
-
-var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStruct struct {
- fn func(i int) int
-}
-
-var _ = funStruct{
- fn: func(i int) int {
- },
-} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructCompex struct {
- fn func(i int, s string) (string, int)
-}
-
-var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructEmpty struct {
- fn func()
-}
-
-var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a2_23_25 --
-package fillstruct
-
-type typedStruct struct {
- m map[string]int
- s []int
- c chan int
- c1 <-chan int
- a [2]string
-}
-
-var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStruct struct {
- fn func(i int) int
-}
-
-var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructCompex struct {
- fn func(i int, s string) (string, int)
-}
-
-var _ = funStructCompex{
- fn: func(i int, s string) (string, int) {
- },
-} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructEmpty struct {
- fn func()
-}
-
-var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a2_29_24 --
-package fillstruct
-
-type typedStruct struct {
- m map[string]int
- s []int
- c chan int
- c1 <-chan int
- a [2]string
-}
-
-var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStruct struct {
- fn func(i int) int
-}
-
-var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructCompex struct {
- fn func(i int, s string) (string, int)
-}
-
-var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite")
-
-type funStructEmpty struct {
- fn func()
-}
-
-var _ = funStructEmpty{
- fn: func() {
- },
-} //@suggestedfix("}", "refactor.rewrite")
-
diff --git a/internal/lsp/testdata/fillstruct/a3.go b/internal/lsp/testdata/fillstruct/a3.go
deleted file mode 100644
index 730db3054..000000000
--- a/internal/lsp/testdata/fillstruct/a3.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package fillstruct
-
-import (
- "go/ast"
- "go/token"
-)
-
-type Foo struct {
- A int
-}
-
-type Bar struct {
- X *Foo
- Y *Foo
-}
-
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
-
-type importedStruct struct {
- m map[*ast.CompositeLit]ast.Field
- s []ast.BadExpr
- a [3]token.Token
- c chan ast.EmptyStmt
- fn func(ast_decl ast.DeclStmt) ast.Ellipsis
- st ast.CompositeLit
-}
-
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type pointerBuiltinStruct struct {
- b *bool
- s *string
- i *int
-}
-
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = []ast.BasicLit{
- {}, //@suggestedfix("}", "refactor.rewrite")
-}
-
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
diff --git a/internal/lsp/testdata/fillstruct/a3.go.golden b/internal/lsp/testdata/fillstruct/a3.go.golden
deleted file mode 100644
index 1d8672927..000000000
--- a/internal/lsp/testdata/fillstruct/a3.go.golden
+++ /dev/null
@@ -1,243 +0,0 @@
--- suggestedfix_a3_17_13 --
-package fillstruct
-
-import (
- "go/ast"
- "go/token"
-)
-
-type Foo struct {
- A int
-}
-
-type Bar struct {
- X *Foo
- Y *Foo
-}
-
-var _ = Bar{
- X: &Foo{},
- Y: &Foo{},
-} //@suggestedfix("}", "refactor.rewrite")
-
-type importedStruct struct {
- m map[*ast.CompositeLit]ast.Field
- s []ast.BadExpr
- a [3]token.Token
- c chan ast.EmptyStmt
- fn func(ast_decl ast.DeclStmt) ast.Ellipsis
- st ast.CompositeLit
-}
-
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type pointerBuiltinStruct struct {
- b *bool
- s *string
- i *int
-}
-
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = []ast.BasicLit{
- {}, //@suggestedfix("}", "refactor.rewrite")
-}
-
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a3_28_24 --
-package fillstruct
-
-import (
- "go/ast"
- "go/token"
-)
-
-type Foo struct {
- A int
-}
-
-type Bar struct {
- X *Foo
- Y *Foo
-}
-
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
-
-type importedStruct struct {
- m map[*ast.CompositeLit]ast.Field
- s []ast.BadExpr
- a [3]token.Token
- c chan ast.EmptyStmt
- fn func(ast_decl ast.DeclStmt) ast.Ellipsis
- st ast.CompositeLit
-}
-
-var _ = importedStruct{
- m: map[*ast.CompositeLit]ast.Field{},
- s: []ast.BadExpr{},
- a: [3]token.Token{},
- c: make(chan ast.EmptyStmt),
- fn: func(ast_decl ast.DeclStmt) ast.Ellipsis {
- },
- st: ast.CompositeLit{},
-} //@suggestedfix("}", "refactor.rewrite")
-
-type pointerBuiltinStruct struct {
- b *bool
- s *string
- i *int
-}
-
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = []ast.BasicLit{
- {}, //@suggestedfix("}", "refactor.rewrite")
-}
-
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a3_36_30 --
-package fillstruct
-
-import (
- "go/ast"
- "go/token"
-)
-
-type Foo struct {
- A int
-}
-
-type Bar struct {
- X *Foo
- Y *Foo
-}
-
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
-
-type importedStruct struct {
- m map[*ast.CompositeLit]ast.Field
- s []ast.BadExpr
- a [3]token.Token
- c chan ast.EmptyStmt
- fn func(ast_decl ast.DeclStmt) ast.Ellipsis
- st ast.CompositeLit
-}
-
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type pointerBuiltinStruct struct {
- b *bool
- s *string
- i *int
-}
-
-var _ = pointerBuiltinStruct{
- b: new(bool),
- s: new(string),
- i: new(int),
-} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = []ast.BasicLit{
- {}, //@suggestedfix("}", "refactor.rewrite")
-}
-
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a3_39_3 --
-package fillstruct
-
-import (
- "go/ast"
- "go/token"
-)
-
-type Foo struct {
- A int
-}
-
-type Bar struct {
- X *Foo
- Y *Foo
-}
-
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
-
-type importedStruct struct {
- m map[*ast.CompositeLit]ast.Field
- s []ast.BadExpr
- a [3]token.Token
- c chan ast.EmptyStmt
- fn func(ast_decl ast.DeclStmt) ast.Ellipsis
- st ast.CompositeLit
-}
-
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type pointerBuiltinStruct struct {
- b *bool
- s *string
- i *int
-}
-
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = []ast.BasicLit{
- {
- ValuePos: 0,
- Kind: 0,
- Value: "",
- }, //@suggestedfix("}", "refactor.rewrite")
-}
-
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
-
--- suggestedfix_a3_42_25 --
-package fillstruct
-
-import (
- "go/ast"
- "go/token"
-)
-
-type Foo struct {
- A int
-}
-
-type Bar struct {
- X *Foo
- Y *Foo
-}
-
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
-
-type importedStruct struct {
- m map[*ast.CompositeLit]ast.Field
- s []ast.BadExpr
- a [3]token.Token
- c chan ast.EmptyStmt
- fn func(ast_decl ast.DeclStmt) ast.Ellipsis
- st ast.CompositeLit
-}
-
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-type pointerBuiltinStruct struct {
- b *bool
- s *string
- i *int
-}
-
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
-
-var _ = []ast.BasicLit{
- {}, //@suggestedfix("}", "refactor.rewrite")
-}
-
-var _ = []ast.BasicLit{{
- ValuePos: 0,
- Kind: 0,
- Value: "",
-}} //@suggestedfix("}", "refactor.rewrite")
-
diff --git a/internal/lsp/testdata/fillstruct/a4.go b/internal/lsp/testdata/fillstruct/a4.go
deleted file mode 100644
index 7833d338c..000000000
--- a/internal/lsp/testdata/fillstruct/a4.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package fillstruct
-
-import "go/ast"
-
-type iStruct struct {
- X int
-}
-
-type sStruct struct {
- str string
-}
-
-type multiFill struct {
- num int
- strin string
- arr []int
-}
-
-type assignStruct struct {
- n ast.Node
-}
-
-func fill() {
- var x int
- var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite")
-
- var s string
- var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite")
-
- var n int
- _ = []int{}
- if true {
- arr := []int{1, 2}
- }
- var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite")
-
- var node *ast.CompositeLit
- var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite")
-}
diff --git a/internal/lsp/testdata/fillstruct/a4.go.golden b/internal/lsp/testdata/fillstruct/a4.go.golden
deleted file mode 100644
index 109c6b5ea..000000000
--- a/internal/lsp/testdata/fillstruct/a4.go.golden
+++ /dev/null
@@ -1,174 +0,0 @@
--- suggestedfix_a4_25_18 --
-package fillstruct
-
-import "go/ast"
-
-type iStruct struct {
- X int
-}
-
-type sStruct struct {
- str string
-}
-
-type multiFill struct {
- num int
- strin string
- arr []int
-}
-
-type assignStruct struct {
- n ast.Node
-}
-
-func fill() {
- var x int
- var _ = iStruct{
- X: x,
- } //@suggestedfix("}", "refactor.rewrite")
-
- var s string
- var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite")
-
- var n int
- _ = []int{}
- if true {
- arr := []int{1, 2}
- }
- var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite")
-
- var node *ast.CompositeLit
- var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite")
-}
-
--- suggestedfix_a4_28_18 --
-package fillstruct
-
-import "go/ast"
-
-type iStruct struct {
- X int
-}
-
-type sStruct struct {
- str string
-}
-
-type multiFill struct {
- num int
- strin string
- arr []int
-}
-
-type assignStruct struct {
- n ast.Node
-}
-
-func fill() {
- var x int
- var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite")
-
- var s string
- var _ = sStruct{
- str: s,
- } //@suggestedfix("}", "refactor.rewrite")
-
- var n int
- _ = []int{}
- if true {
- arr := []int{1, 2}
- }
- var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite")
-
- var node *ast.CompositeLit
- var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite")
-}
-
--- suggestedfix_a4_35_20 --
-package fillstruct
-
-import "go/ast"
-
-type iStruct struct {
- X int
-}
-
-type sStruct struct {
- str string
-}
-
-type multiFill struct {
- num int
- strin string
- arr []int
-}
-
-type assignStruct struct {
- n ast.Node
-}
-
-func fill() {
- var x int
- var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite")
-
- var s string
- var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite")
-
- var n int
- _ = []int{}
- if true {
- arr := []int{1, 2}
- }
- var _ = multiFill{
- num: n,
- strin: s,
- arr: []int{},
- } //@suggestedfix("}", "refactor.rewrite")
-
- var node *ast.CompositeLit
- var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite")
-}
-
--- suggestedfix_a4_38_23 --
-package fillstruct
-
-import "go/ast"
-
-type iStruct struct {
- X int
-}
-
-type sStruct struct {
- str string
-}
-
-type multiFill struct {
- num int
- strin string
- arr []int
-}
-
-type assignStruct struct {
- n ast.Node
-}
-
-func fill() {
- var x int
- var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite")
-
- var s string
- var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite")
-
- var n int
- _ = []int{}
- if true {
- arr := []int{1, 2}
- }
- var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite")
-
- var node *ast.CompositeLit
- var _ = assignStruct{
- n: node,
- } //@suggestedfix("}", "refactor.rewrite")
-}
-
diff --git a/internal/lsp/testdata/fillstruct/fill_struct.go b/internal/lsp/testdata/fillstruct/fill_struct.go
deleted file mode 100644
index fccec1353..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package fillstruct
-
-type StructA struct {
- unexportedIntField int
- ExportedIntField int
- MapA map[int]string
- Array []int
- StructB
-}
-
-type StructA2 struct {
- B *StructB
-}
-
-type StructA3 struct {
- B StructB
-}
-
-func fill() {
- a := StructA{} //@suggestedfix("}", "refactor.rewrite")
- b := StructA2{} //@suggestedfix("}", "refactor.rewrite")
- c := StructA3{} //@suggestedfix("}", "refactor.rewrite")
- if true {
- _ = StructA3{} //@suggestedfix("}", "refactor.rewrite")
- }
-}
diff --git a/internal/lsp/testdata/fillstruct/fill_struct.go.golden b/internal/lsp/testdata/fillstruct/fill_struct.go.golden
deleted file mode 100644
index 8d9970315..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct.go.golden
+++ /dev/null
@@ -1,124 +0,0 @@
--- suggestedfix_fill_struct_20_15 --
-package fillstruct
-
-type StructA struct {
- unexportedIntField int
- ExportedIntField int
- MapA map[int]string
- Array []int
- StructB
-}
-
-type StructA2 struct {
- B *StructB
-}
-
-type StructA3 struct {
- B StructB
-}
-
-func fill() {
- a := StructA{
- unexportedIntField: 0,
- ExportedIntField: 0,
- MapA: map[int]string{},
- Array: []int{},
- StructB: StructB{},
- } //@suggestedfix("}", "refactor.rewrite")
- b := StructA2{} //@suggestedfix("}", "refactor.rewrite")
- c := StructA3{} //@suggestedfix("}", "refactor.rewrite")
- if true {
- _ = StructA3{} //@suggestedfix("}", "refactor.rewrite")
- }
-}
-
--- suggestedfix_fill_struct_21_16 --
-package fillstruct
-
-type StructA struct {
- unexportedIntField int
- ExportedIntField int
- MapA map[int]string
- Array []int
- StructB
-}
-
-type StructA2 struct {
- B *StructB
-}
-
-type StructA3 struct {
- B StructB
-}
-
-func fill() {
- a := StructA{} //@suggestedfix("}", "refactor.rewrite")
- b := StructA2{
- B: &StructB{},
- } //@suggestedfix("}", "refactor.rewrite")
- c := StructA3{} //@suggestedfix("}", "refactor.rewrite")
- if true {
- _ = StructA3{} //@suggestedfix("}", "refactor.rewrite")
- }
-}
-
--- suggestedfix_fill_struct_22_16 --
-package fillstruct
-
-type StructA struct {
- unexportedIntField int
- ExportedIntField int
- MapA map[int]string
- Array []int
- StructB
-}
-
-type StructA2 struct {
- B *StructB
-}
-
-type StructA3 struct {
- B StructB
-}
-
-func fill() {
- a := StructA{} //@suggestedfix("}", "refactor.rewrite")
- b := StructA2{} //@suggestedfix("}", "refactor.rewrite")
- c := StructA3{
- B: StructB{},
- } //@suggestedfix("}", "refactor.rewrite")
- if true {
- _ = StructA3{} //@suggestedfix("}", "refactor.rewrite")
- }
-}
-
--- suggestedfix_fill_struct_24_16 --
-package fillstruct
-
-type StructA struct {
- unexportedIntField int
- ExportedIntField int
- MapA map[int]string
- Array []int
- StructB
-}
-
-type StructA2 struct {
- B *StructB
-}
-
-type StructA3 struct {
- B StructB
-}
-
-func fill() {
- a := StructA{} //@suggestedfix("}", "refactor.rewrite")
- b := StructA2{} //@suggestedfix("}", "refactor.rewrite")
- c := StructA3{} //@suggestedfix("}", "refactor.rewrite")
- if true {
- _ = StructA3{
- B: StructB{},
- } //@suggestedfix("}", "refactor.rewrite")
- }
-}
-
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_anon.go b/internal/lsp/testdata/fillstruct/fill_struct_anon.go
deleted file mode 100644
index b5d2337fd..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_anon.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package fillstruct
-
-type StructAnon struct {
- a struct{}
- b map[string]interface{}
- c map[string]struct {
- d int
- e bool
- }
-}
-
-func fill() {
- _ := StructAnon{} //@suggestedfix("}", "refactor.rewrite")
-}
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden
deleted file mode 100644
index eb6ffd661..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden
+++ /dev/null
@@ -1,20 +0,0 @@
--- suggestedfix_fill_struct_anon_13_18 --
-package fillstruct
-
-type StructAnon struct {
- a struct{}
- b map[string]interface{}
- c map[string]struct {
- d int
- e bool
- }
-}
-
-func fill() {
- _ := StructAnon{
- a: struct{}{},
- b: map[string]interface{}{},
- c: map[string]struct{d int; e bool}{},
- } //@suggestedfix("}", "refactor.rewrite")
-}
-
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_nested.go b/internal/lsp/testdata/fillstruct/fill_struct_nested.go
deleted file mode 100644
index 79eb84b74..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_nested.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package fillstruct
-
-type StructB struct {
- StructC
-}
-
-type StructC struct {
- unexportedInt int
-}
-
-func nested() {
- c := StructB{
- StructC: StructC{}, //@suggestedfix("}", "refactor.rewrite")
- }
-}
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden
deleted file mode 100644
index 30061a5d7..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden
+++ /dev/null
@@ -1,19 +0,0 @@
--- suggestedfix_fill_struct_nested_13_20 --
-package fillstruct
-
-type StructB struct {
- StructC
-}
-
-type StructC struct {
- unexportedInt int
-}
-
-func nested() {
- c := StructB{
- StructC: StructC{
- unexportedInt: 0,
- }, //@suggestedfix("}", "refactor.rewrite")
- }
-}
-
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_package.go b/internal/lsp/testdata/fillstruct/fill_struct_package.go
deleted file mode 100644
index 71f124858..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_package.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package fillstruct
-
-import (
- h2 "net/http"
-
- "golang.org/x/tools/internal/lsp/fillstruct/data"
-)
-
-func unexported() {
- a := data.B{} //@suggestedfix("}", "refactor.rewrite")
- _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite")
-}
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden
deleted file mode 100644
index 13c857025..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden
+++ /dev/null
@@ -1,36 +0,0 @@
--- suggestedfix_fill_struct_package_10_14 --
-package fillstruct
-
-import (
- h2 "net/http"
-
- "golang.org/x/tools/internal/lsp/fillstruct/data"
-)
-
-func unexported() {
- a := data.B{
- ExportedInt: 0,
- } //@suggestedfix("}", "refactor.rewrite")
- _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite")
-}
-
--- suggestedfix_fill_struct_package_11_16 --
-package fillstruct
-
-import (
- h2 "net/http"
-
- "golang.org/x/tools/internal/lsp/fillstruct/data"
-)
-
-func unexported() {
- a := data.B{} //@suggestedfix("}", "refactor.rewrite")
- _ = h2.Client{
- Transport: nil,
- CheckRedirect: func(req *h2.Request, via []*h2.Request) error {
- },
- Jar: nil,
- Timeout: 0,
- } //@suggestedfix("}", "refactor.rewrite")
-}
-
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_partial.go b/internal/lsp/testdata/fillstruct/fill_struct_partial.go
deleted file mode 100644
index 97b517dcd..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_partial.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package fillstruct
-
-type StructPartialA struct {
- PrefilledInt int
- UnfilledInt int
- StructPartialB
-}
-
-type StructPartialB struct {
- PrefilledInt int
- UnfilledInt int
-}
-
-func fill() {
- a := StructPartialA{
- PrefilledInt: 5,
- } //@suggestedfix("}", "refactor.rewrite")
- b := StructPartialB{
- /* this comment should disappear */
- PrefilledInt: 7, // This comment should be blown away.
- /* As should
- this one */
- } //@suggestedfix("}", "refactor.rewrite")
-}
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden
deleted file mode 100644
index 2d063c14d..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden
+++ /dev/null
@@ -1,52 +0,0 @@
--- suggestedfix_fill_struct_partial_17_2 --
-package fillstruct
-
-type StructPartialA struct {
- PrefilledInt int
- UnfilledInt int
- StructPartialB
-}
-
-type StructPartialB struct {
- PrefilledInt int
- UnfilledInt int
-}
-
-func fill() {
- a := StructPartialA{
- PrefilledInt: 5,
- UnfilledInt: 0,
- StructPartialB: StructPartialB{},
- } //@suggestedfix("}", "refactor.rewrite")
- b := StructPartialB{
- /* this comment should disappear */
- PrefilledInt: 7, // This comment should be blown away.
- /* As should
- this one */
- } //@suggestedfix("}", "refactor.rewrite")
-}
-
--- suggestedfix_fill_struct_partial_23_2 --
-package fillstruct
-
-type StructPartialA struct {
- PrefilledInt int
- UnfilledInt int
- StructPartialB
-}
-
-type StructPartialB struct {
- PrefilledInt int
- UnfilledInt int
-}
-
-func fill() {
- a := StructPartialA{
- PrefilledInt: 5,
- } //@suggestedfix("}", "refactor.rewrite")
- b := StructPartialB{
- PrefilledInt: 7,
- UnfilledInt: 0,
- } //@suggestedfix("}", "refactor.rewrite")
-}
-
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go
deleted file mode 100644
index d5d1bbba5..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package fillstruct
-
-type StructD struct {
- ExportedIntField int
-}
-
-func spaces() {
- d := StructD{} //@suggestedfix("}", "refactor.rewrite")
-}
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden
deleted file mode 100644
index 0d755334c..000000000
--- a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden
+++ /dev/null
@@ -1,13 +0,0 @@
--- suggestedfix_fill_struct_spaces_8_15 --
-package fillstruct
-
-type StructD struct {
- ExportedIntField int
-}
-
-func spaces() {
- d := StructD{
- ExportedIntField: 0,
- } //@suggestedfix("}", "refactor.rewrite")
-}
-
diff --git a/internal/lsp/testdata/folding/a.go.golden b/internal/lsp/testdata/folding/a.go.golden
deleted file mode 100644
index ce6910233..000000000
--- a/internal/lsp/testdata/folding/a.go.golden
+++ /dev/null
@@ -1,759 +0,0 @@
--- foldingRange-0 --
-package folding //@fold("package")
-
-import (<>)
-
-import _ "os"
-
-// bar is a function.<>
-func bar(<>) string {<>}
-
--- foldingRange-1 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {<>}
- /* This is a multiline<>
-
- /* This is a multiline<>
- _ = []int{<>}
- _ = [2]string{<>}
- _ = map[string]int{<>}
- type T struct {<>}
- _ = T{<>}
- x, y := make(<>), make(<>)
- select {<>}
- // This is a multiline comment<>
- return <>
-}
-
--- foldingRange-2 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:<>
- case false:<>
- default:<>
- }
- /* This is a multiline
- block
- comment */
-
- /* This is a multiline
- block
- comment */
- // Followed by another comment.
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:<>
- case <-y:<>
- default:<>
- }
- // This is a multiline comment
- // that is not a doc comment.
- return `
-this string
-is not indented`
-}
-
--- foldingRange-3 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:
- if true {<>} else {<>}
- case false:
- fmt.Println(<>)
- default:
- fmt.Println(<>)
- }
- /* This is a multiline
- block
- comment */
-
- /* This is a multiline
- block
- comment */
- // Followed by another comment.
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:
- if val {<>} else {<>}
- case <-y:
- fmt.Println(<>)
- default:
- fmt.Println(<>)
- }
- // This is a multiline comment
- // that is not a doc comment.
- return `
-this string
-is not indented`
-}
-
--- foldingRange-4 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:
- if true {
- fmt.Println(<>)
- } else {
- fmt.Println(<>)
- }
- case false:
- fmt.Println("false")
- default:
- fmt.Println("default")
- }
- /* This is a multiline
- block
- comment */
-
- /* This is a multiline
- block
- comment */
- // Followed by another comment.
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:
- if val {
- fmt.Println(<>)
- } else {
- fmt.Println(<>)
- }
- case <-y:
- fmt.Println("y")
- default:
- fmt.Println("default")
- }
- // This is a multiline comment
- // that is not a doc comment.
- return `
-this string
-is not indented`
-}
-
--- foldingRange-cmd --
-3:9-6:0
-10:22-11:32
-12:10-12:9
-12:20-75:0
-14:10-25:1
-15:12-20:3
-16:12-18:2
-17:16-17:21
-18:11-20:2
-19:16-19:22
-21:13-22:22
-22:15-22:21
-23:10-24:24
-24:15-24:23
-26:24-28:11
-30:24-33:32
-34:12-38:1
-39:16-41:1
-42:21-46:1
-47:17-51:1
-52:8-56:1
-57:15-57:23
-57:32-57:40
-58:10-69:1
-59:18-64:3
-60:11-62:2
-61:16-61:28
-62:11-64:2
-63:16-63:29
-65:11-66:18
-66:15-66:17
-67:10-68:24
-68:15-68:23
-70:32-71:30
-72:9-74:16
-
--- foldingRange-comment-0 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.<>
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:
- if true {
- fmt.Println("true")
- } else {
- fmt.Println("false")
- }
- case false:
- fmt.Println("false")
- default:
- fmt.Println("default")
- }
- /* This is a multiline<>
-
- /* This is a multiline<>
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:
- if val {
- fmt.Println("true from x")
- } else {
- fmt.Println("false from x")
- }
- case <-y:
- fmt.Println("y")
- default:
- fmt.Println("default")
- }
- // This is a multiline comment<>
- return `
-this string
-is not indented`
-}
-
--- foldingRange-imports-0 --
-package folding //@fold("package")
-
-import (<>)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:
- if true {
- fmt.Println("true")
- } else {
- fmt.Println("false")
- }
- case false:
- fmt.Println("false")
- default:
- fmt.Println("default")
- }
- /* This is a multiline
- block
- comment */
-
- /* This is a multiline
- block
- comment */
- // Followed by another comment.
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:
- if val {
- fmt.Println("true from x")
- } else {
- fmt.Println("false from x")
- }
- case <-y:
- fmt.Println("y")
- default:
- fmt.Println("default")
- }
- // This is a multiline comment
- // that is not a doc comment.
- return `
-this string
-is not indented`
-}
-
--- foldingRange-lineFolding-0 --
-package folding //@fold("package")
-
-import (<>
-)
-
-import _ "os"
-
-// bar is a function.<>
-func bar() string {<>
-}
-
--- foldingRange-lineFolding-1 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {<>
- }
- /* This is a multiline<>
-
- /* This is a multiline<>
- _ = []int{<>,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{<>,
- }
- type T struct {<>
- }
- _ = T{<>,
- }
- x, y := make(chan bool), make(chan bool)
- select {<>
- }
- // This is a multiline comment<>
- return <>
-}
-
--- foldingRange-lineFolding-2 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:<>
- case false:<>
- default:<>
- }
- /* This is a multiline
- block
- comment */
-
- /* This is a multiline
- block
- comment */
- // Followed by another comment.
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:<>
- case <-y:<>
- default:<>
- }
- // This is a multiline comment
- // that is not a doc comment.
- return `
-this string
-is not indented`
-}
-
--- foldingRange-lineFolding-3 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:
- if true {<>
- } else {<>
- }
- case false:
- fmt.Println("false")
- default:
- fmt.Println("default")
- }
- /* This is a multiline
- block
- comment */
-
- /* This is a multiline
- block
- comment */
- // Followed by another comment.
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:
- if val {<>
- } else {<>
- }
- case <-y:
- fmt.Println("y")
- default:
- fmt.Println("default")
- }
- // This is a multiline comment
- // that is not a doc comment.
- return `
-this string
-is not indented`
-}
-
--- foldingRange-lineFolding-comment-0 --
-package folding //@fold("package")
-
-import (
- "fmt"
- _ "log"
-)
-
-import _ "os"
-
-// bar is a function.<>
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:
- if true {
- fmt.Println("true")
- } else {
- fmt.Println("false")
- }
- case false:
- fmt.Println("false")
- default:
- fmt.Println("default")
- }
- /* This is a multiline<>
-
- /* This is a multiline<>
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:
- if val {
- fmt.Println("true from x")
- } else {
- fmt.Println("false from x")
- }
- case <-y:
- fmt.Println("y")
- default:
- fmt.Println("default")
- }
- // This is a multiline comment<>
- return `
-this string
-is not indented`
-}
-
--- foldingRange-lineFolding-imports-0 --
-package folding //@fold("package")
-
-import (<>
-)
-
-import _ "os"
-
-// bar is a function.
-// With a multiline doc comment.
-func bar() string {
- /* This is a single line comment */
- switch {
- case true:
- if true {
- fmt.Println("true")
- } else {
- fmt.Println("false")
- }
- case false:
- fmt.Println("false")
- default:
- fmt.Println("default")
- }
- /* This is a multiline
- block
- comment */
-
- /* This is a multiline
- block
- comment */
- // Followed by another comment.
- _ = []int{
- 1,
- 2,
- 3,
- }
- _ = [2]string{"d",
- "e",
- }
- _ = map[string]int{
- "a": 1,
- "b": 2,
- "c": 3,
- }
- type T struct {
- f string
- g int
- h string
- }
- _ = T{
- f: "j",
- g: 4,
- h: "i",
- }
- x, y := make(chan bool), make(chan bool)
- select {
- case val := <-x:
- if val {
- fmt.Println("true from x")
- } else {
- fmt.Println("false from x")
- }
- case <-y:
- fmt.Println("y")
- default:
- fmt.Println("default")
- }
- // This is a multiline comment
- // that is not a doc comment.
- return `
-this string
-is not indented`
-}
-
diff --git a/internal/lsp/testdata/folding/bad.go.golden b/internal/lsp/testdata/folding/bad.go.golden
deleted file mode 100644
index d1bdfec60..000000000
--- a/internal/lsp/testdata/folding/bad.go.golden
+++ /dev/null
@@ -1,91 +0,0 @@
--- foldingRange-0 --
-package folding //@fold("package")
-
-import (<>)
-
-import (<>)
-
-// badBar is a function.
-func badBar(<>) string {<>}
-
--- foldingRange-1 --
-package folding //@fold("package")
-
-import ( "fmt"
- _ "log"
-)
-
-import (
- _ "os" )
-
-// badBar is a function.
-func badBar() string { x := true
- if x {<>} else {<>}
- return
-}
-
--- foldingRange-2 --
-package folding //@fold("package")
-
-import ( "fmt"
- _ "log"
-)
-
-import (
- _ "os" )
-
-// badBar is a function.
-func badBar() string { x := true
- if x {
- // This is the only foldable thing in this file when lineFoldingOnly
- fmt.Println(<>)
- } else {
- fmt.Println(<>) }
- return
-}
-
--- foldingRange-cmd --
-3:9-5:0
-7:9-8:8
-11:13-11:12
-11:23-18:0
-12:8-15:1
-14:15-14:20
-15:10-16:23
-16:15-16:21
-
--- foldingRange-imports-0 --
-package folding //@fold("package")
-
-import (<>)
-
-import (<>)
-
-// badBar is a function.
-func badBar() string { x := true
- if x {
- // This is the only foldable thing in this file when lineFoldingOnly
- fmt.Println("true")
- } else {
- fmt.Println("false") }
- return
-}
-
--- foldingRange-lineFolding-0 --
-package folding //@fold("package")
-
-import ( "fmt"
- _ "log"
-)
-
-import (
- _ "os" )
-
-// badBar is a function.
-func badBar() string { x := true
- if x {<>
- } else {
- fmt.Println("false") }
- return
-}
-
diff --git a/internal/lsp/testdata/foo/foo.go b/internal/lsp/testdata/foo/foo.go
deleted file mode 100644
index 20ea183e5..000000000
--- a/internal/lsp/testdata/foo/foo.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package foo //@mark(PackageFoo, "foo"),item(PackageFoo, "foo", "\"golang.org/x/tools/internal/lsp/foo\"", "package")
-
-type StructFoo struct { //@item(StructFoo, "StructFoo", "struct{...}", "struct")
- Value int //@item(Value, "Value", "int", "field")
-}
-
-// Pre-set this marker, as we don't have a "source" for it in this package.
-/* Error() */ //@item(Error, "Error", "func() string", "method")
-
-func Foo() { //@item(Foo, "Foo", "func()", "func")
- var err error
- err.Error() //@complete("E", Error)
-}
-
-func _() {
- var sFoo StructFoo //@mark(sFoo1, "sFoo"),complete("t", StructFoo)
- if x := sFoo; x.Value == 1 { //@mark(sFoo2, "sFoo"),complete("V", Value),typdef("sFoo", StructFoo),refs("sFo", sFoo1, sFoo2)
- return
- }
-}
-
-func _() {
- shadowed := 123
- {
- shadowed := "hi" //@item(shadowed, "shadowed", "string", "var"),refs("shadowed", shadowed)
- sha //@complete("a", shadowed)
- }
-}
-
-type IntFoo int //@item(IntFoo, "IntFoo", "int", "type")
diff --git a/internal/lsp/testdata/format/bad_format.go.golden b/internal/lsp/testdata/format/bad_format.go.golden
deleted file mode 100644
index c2ac5a1a1..000000000
--- a/internal/lsp/testdata/format/bad_format.go.golden
+++ /dev/null
@@ -1,21 +0,0 @@
--- gofmt --
-package format //@format("package")
-
-import (
- "fmt"
- "log"
- "runtime"
-)
-
-func hello() {
-
- var x int //@diag("x", "compiler", "x declared but not used", "error")
-}
-
-func hi() {
- runtime.GOROOT()
- fmt.Printf("")
-
- log.Printf("")
-}
-
diff --git a/internal/lsp/testdata/format/bad_format.go.in b/internal/lsp/testdata/format/bad_format.go.in
deleted file mode 100644
index 06187238e..000000000
--- a/internal/lsp/testdata/format/bad_format.go.in
+++ /dev/null
@@ -1,22 +0,0 @@
-package format //@format("package")
-
-import (
- "runtime"
- "fmt"
- "log"
-)
-
-func hello() {
-
-
-
-
- var x int //@diag("x", "compiler", "x declared but not used", "error")
-}
-
-func hi() {
- runtime.GOROOT()
- fmt.Printf("")
-
- log.Printf("")
-}
diff --git a/internal/lsp/testdata/generated/generated.go b/internal/lsp/testdata/generated/generated.go
deleted file mode 100644
index c92bd9eb8..000000000
--- a/internal/lsp/testdata/generated/generated.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package generated
-
-// Code generated by generator.go. DO NOT EDIT.
-
-func _() {
- var y int //@diag("y", "compiler", "y declared but not used", "error")
-}
diff --git a/internal/lsp/testdata/generated/generator.go b/internal/lsp/testdata/generated/generator.go
deleted file mode 100644
index f26e33c80..000000000
--- a/internal/lsp/testdata/generated/generator.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package generated
-
-func _() {
- var x int //@diag("x", "compiler", "x declared but not used", "error")
-}
diff --git a/internal/lsp/testdata/godef/a/a.go b/internal/lsp/testdata/godef/a/a.go
deleted file mode 100644
index 5cc85527a..000000000
--- a/internal/lsp/testdata/godef/a/a.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Package a is a package for testing go to definition.
-package a //@mark(aPackage, "a "),hoverdef("a ", aPackage)
-
-import (
- "fmt"
- "go/types"
- "sync"
-)
-
-var (
- // x is a variable.
- x string //@x,hoverdef("x", x)
-)
-
-// Constant block. When I hover on h, I should see this comment.
-const (
- // When I hover on g, I should see this comment.
- g = 1 //@g,hoverdef("g", g)
-
- h = 2 //@h,hoverdef("h", h)
-)
-
-// z is a variable too.
-var z string //@z,hoverdef("z", z)
-
-type A string //@mark(AString, "A")
-
-func AStuff() { //@AStuff
- x := 5
- Random2(x) //@godef("dom2", Random2)
- Random() //@godef("()", Random)
-
- var err error //@err
- fmt.Printf("%v", err) //@godef("err", err)
-
- var y string //@string,hoverdef("string", string)
- _ = make([]int, 0) //@make,hoverdef("make", make)
-
- var mu sync.Mutex
- mu.Lock() //@Lock,hoverdef("Lock", Lock)
-
- var typ *types.Named //@mark(typesImport, "types"),hoverdef("types", typesImport)
- typ.Obj().Name() //@Name,hoverdef("Name", Name)
-}
-
-type A struct {
-}
-
-func (_ A) Hi() {} //@mark(AHi, "Hi")
-
-type S struct {
- Field int //@mark(AField, "Field")
- R // embed a struct
- H // embed an interface
-}
-
-type R struct {
- Field2 int //@mark(AField2, "Field2")
-}
-
-func (_ R) Hey() {} //@mark(AHey, "Hey")
-
-type H interface {
- Goodbye() //@mark(AGoodbye, "Goodbye")
-}
-
-type I interface {
- B() //@mark(AB, "B")
- J
-}
-
-type J interface {
- Hello() //@mark(AHello, "Hello")
-}
-
-func _() {
- // 1st type declaration block
- type (
- a struct { //@mark(declBlockA, "a"),hoverdef("a", declBlockA)
- x string
- }
- )
-
- // 2nd type declaration block
- type (
- // b has a comment
- b struct{} //@mark(declBlockB, "b"),hoverdef("b", declBlockB)
- )
-
- // 3rd type declaration block
- type (
- // c is a struct
- c struct { //@mark(declBlockC, "c"),hoverdef("c", declBlockC)
- f string
- }
-
- d string //@mark(declBlockD, "d"),hoverdef("d", declBlockD)
- )
-
- type (
- e struct { //@mark(declBlockE, "e"),hoverdef("e", declBlockE)
- f float64
- } // e has a comment
- )
-}
diff --git a/internal/lsp/testdata/godef/a/a.go.golden b/internal/lsp/testdata/godef/a/a.go.golden
deleted file mode 100644
index 9f67a147d..000000000
--- a/internal/lsp/testdata/godef/a/a.go.golden
+++ /dev/null
@@ -1,190 +0,0 @@
--- Lock-hoverdef --
-```go
-func (*sync.Mutex).Lock()
-```
-
-Lock locks m\.
-
-[`(sync.Mutex).Lock` on pkg.go.dev](https://pkg.go.dev/sync?utm_source=gopls#Mutex.Lock)
--- Name-hoverdef --
-```go
-func (*types.object).Name() string
-```
-
-Name returns the object\'s \(package\-local, unqualified\) name\.
-
-[`(types.TypeName).Name` on pkg.go.dev](https://pkg.go.dev/go/types?utm_source=gopls#TypeName.Name)
--- Random-definition --
-godef/a/random.go:3:6-12: defined here as ```go
-func Random() int
-```
-
-[`a.Random` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random)
--- Random-definition-json --
-{
- "span": {
- "uri": "file://godef/a/random.go",
- "start": {
- "line": 3,
- "column": 6,
- "offset": 16
- },
- "end": {
- "line": 3,
- "column": 12,
- "offset": 22
- }
- },
- "description": "```go\nfunc Random() int\n```\n\n[`a.Random` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random)"
-}
-
--- Random-hoverdef --
-```go
-func Random() int
-```
-
-[`a.Random` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random)
--- Random2-definition --
-godef/a/random.go:8:6-13: defined here as ```go
-func Random2(y int) int
-```
-
-[`a.Random2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random2)
--- Random2-definition-json --
-{
- "span": {
- "uri": "file://godef/a/random.go",
- "start": {
- "line": 8,
- "column": 6,
- "offset": 71
- },
- "end": {
- "line": 8,
- "column": 13,
- "offset": 78
- }
- },
- "description": "```go\nfunc Random2(y int) int\n```\n\n[`a.Random2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random2)"
-}
-
--- Random2-hoverdef --
-```go
-func Random2(y int) int
-```
-
-[`a.Random2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random2)
--- aPackage-hoverdef --
-Package a is a package for testing go to definition\.
--- declBlockA-hoverdef --
-```go
-type a struct {
- x string
-}
-```
-
-1st type declaration block
--- declBlockB-hoverdef --
-```go
-type b struct{}
-```
-
-b has a comment
--- declBlockC-hoverdef --
-```go
-type c struct {
- f string
-}
-```
-
-c is a struct
--- declBlockD-hoverdef --
-```go
-type d string
-```
-
-3rd type declaration block
--- declBlockE-hoverdef --
-```go
-type e struct {
- f float64
-}
-```
-
-e has a comment
--- err-definition --
-godef/a/a.go:33:6-9: defined here as ```go
-var err error
-```
-
-\@err
--- err-definition-json --
-{
- "span": {
- "uri": "file://godef/a/a.go",
- "start": {
- "line": 33,
- "column": 6,
- "offset": 612
- },
- "end": {
- "line": 33,
- "column": 9,
- "offset": 615
- }
- },
- "description": "```go\nvar err error\n```\n\n\\@err"
-}
-
--- err-hoverdef --
-```go
-var err error
-```
-
-\@err
--- g-hoverdef --
-```go
-const g untyped int = 1
-```
-
-When I hover on g, I should see this comment\.
--- h-hoverdef --
-```go
-const h untyped int = 2
-```
-
-Constant block\.
--- make-hoverdef --
-```go
-func make(t Type, size ...int) Type
-```
-
-The make built\-in function allocates and initializes an object of type slice, map, or chan \(only\)\.
-
-[`make` on pkg.go.dev](https://pkg.go.dev/builtin?utm_source=gopls#make)
--- string-hoverdef --
-```go
-type string string
-```
-
-string is the set of all strings of 8\-bit bytes, conventionally but not necessarily representing UTF\-8\-encoded text\.
-
-[`string` on pkg.go.dev](https://pkg.go.dev/builtin?utm_source=gopls#string)
--- typesImport-hoverdef --
-```go
-package types ("go/types")
-```
-
-[`types` on pkg.go.dev](https://pkg.go.dev/go/types?utm_source=gopls)
--- x-hoverdef --
-```go
-var x string
-```
-
-x is a variable\.
--- z-hoverdef --
-```go
-var z string
-```
-
-z is a variable too\.
diff --git a/internal/lsp/testdata/godef/a/a_test.go b/internal/lsp/testdata/godef/a/a_test.go
deleted file mode 100644
index 77bd633b6..000000000
--- a/internal/lsp/testdata/godef/a/a_test.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package a
-
-import (
- "testing"
-)
-
-func TestA(t *testing.T) { //@TestA,godef(TestA, TestA)
-}
diff --git a/internal/lsp/testdata/godef/a/a_test.go.golden b/internal/lsp/testdata/godef/a/a_test.go.golden
deleted file mode 100644
index e5cb3d799..000000000
--- a/internal/lsp/testdata/godef/a/a_test.go.golden
+++ /dev/null
@@ -1,26 +0,0 @@
--- TestA-definition --
-godef/a/a_test.go:7:6-11: defined here as ```go
-func TestA(t *testing.T)
-```
--- TestA-definition-json --
-{
- "span": {
- "uri": "file://godef/a/a_test.go",
- "start": {
- "line": 7,
- "column": 6,
- "offset": 39
- },
- "end": {
- "line": 7,
- "column": 11,
- "offset": 44
- }
- },
- "description": "```go\nfunc TestA(t *testing.T)\n```"
-}
-
--- TestA-hoverdef --
-```go
-func TestA(t *testing.T)
-```
diff --git a/internal/lsp/testdata/godef/a/a_x_test.go b/internal/lsp/testdata/godef/a/a_x_test.go
deleted file mode 100644
index 4631eba2c..000000000
--- a/internal/lsp/testdata/godef/a/a_x_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package a_test
-
-import (
- "testing"
-)
-
-func TestA2(t *testing.T) { //@TestA2,godef(TestA2, TestA2)
- Nonexistant() //@diag("Nonexistant", "compiler", "undeclared name: Nonexistant", "error")
-}
diff --git a/internal/lsp/testdata/godef/a/d.go b/internal/lsp/testdata/godef/a/d.go
deleted file mode 100644
index 2da8d058e..000000000
--- a/internal/lsp/testdata/godef/a/d.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package a //@mark(a, "a "),hoverdef("a ", a)
-
-import "fmt"
-
-type Thing struct { //@Thing
- Member string //@Member
-}
-
-var Other Thing //@Other
-
-func Things(val []string) []Thing { //@Things
- return nil
-}
-
-func (t Thing) Method(i int) string { //@Method
- return t.Member
-}
-
-func useThings() {
- t := Thing{ //@mark(aStructType, "ing")
- Member: "string", //@mark(fMember, "ember")
- }
- fmt.Print(t.Member) //@mark(aMember, "ember")
- fmt.Print(Other) //@mark(aVar, "ther")
- Things() //@mark(aFunc, "ings")
- t.Method() //@mark(aMethod, "eth")
-}
-
-/*@
-godef(aStructType, Thing)
-godef(aMember, Member)
-godef(aVar, Other)
-godef(aFunc, Things)
-godef(aMethod, Method)
-godef(fMember, Member)
-godef(Member, Member)
-
-//param
-//package name
-//const
-//anon field
-
-*/
diff --git a/internal/lsp/testdata/godef/a/d.go.golden b/internal/lsp/testdata/godef/a/d.go.golden
deleted file mode 100644
index 47723b045..000000000
--- a/internal/lsp/testdata/godef/a/d.go.golden
+++ /dev/null
@@ -1,164 +0,0 @@
--- Member-definition --
-godef/a/d.go:6:2-8: defined here as ```go
-field Member string
-```
-
-\@Member
-
-[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member)
--- Member-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 6,
- "column": 2,
- "offset": 90
- },
- "end": {
- "line": 6,
- "column": 8,
- "offset": 96
- }
- },
- "description": "```go\nfield Member string\n```\n\n\\@Member\n\n[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member)"
-}
-
--- Member-hoverdef --
-```go
-field Member string
-```
-
-\@Member
-
-[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member)
--- Method-definition --
-godef/a/d.go:15:16-22: defined here as ```go
-func (Thing).Method(i int) string
-```
-
-[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Method)
--- Method-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 15,
- "column": 16,
- "offset": 219
- },
- "end": {
- "line": 15,
- "column": 22,
- "offset": 225
- }
- },
- "description": "```go\nfunc (Thing).Method(i int) string\n```\n\n[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Method)"
-}
-
--- Method-hoverdef --
-```go
-func (Thing).Method(i int) string
-```
-
-[`(a.Thing).Method` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Method)
--- Other-definition --
-godef/a/d.go:9:5-10: defined here as ```go
-var Other Thing
-```
-
-\@Other
-
-[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other)
--- Other-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 9,
- "column": 5,
- "offset": 121
- },
- "end": {
- "line": 9,
- "column": 10,
- "offset": 126
- }
- },
- "description": "```go\nvar Other Thing\n```\n\n\\@Other\n\n[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other)"
-}
-
--- Other-hoverdef --
-```go
-var Other Thing
-```
-
-\@Other
-
-[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other)
--- Thing-definition --
-godef/a/d.go:5:6-11: defined here as ```go
-type Thing struct {
- Member string //@Member
-}
-```
-
-[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing)
--- Thing-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 5,
- "column": 6,
- "offset": 65
- },
- "end": {
- "line": 5,
- "column": 11,
- "offset": 70
- }
- },
- "description": "```go\ntype Thing struct {\n\tMember string //@Member\n}\n```\n\n[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing)"
-}
-
--- Thing-hoverdef --
-```go
-type Thing struct {
- Member string //@Member
-}
-```
-
-[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing)
--- Things-definition --
-godef/a/d.go:11:6-12: defined here as ```go
-func Things(val []string) []Thing
-```
-
-[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things)
--- Things-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 11,
- "column": 6,
- "offset": 148
- },
- "end": {
- "line": 11,
- "column": 12,
- "offset": 154
- }
- },
- "description": "```go\nfunc Things(val []string) []Thing\n```\n\n[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things)"
-}
-
--- Things-hoverdef --
-```go
-func Things(val []string) []Thing
-```
-
-[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things)
--- a-hoverdef --
-Package a is a package for testing go to definition\.
diff --git a/internal/lsp/testdata/godef/a/f.go b/internal/lsp/testdata/godef/a/f.go
deleted file mode 100644
index 589c45fc1..000000000
--- a/internal/lsp/testdata/godef/a/f.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package a
-
-import "fmt"
-
-func TypeStuff() { //@Stuff
- var x string
-
- switch y := interface{}(x).(type) { //@mark(switchY, "y"),godef("y", switchY)
- case int: //@mark(intY, "int")
- fmt.Printf("%v", y) //@hoverdef("y", intY)
- case string: //@mark(stringY, "string")
- fmt.Printf("%v", y) //@hoverdef("y", stringY)
- }
-
-}
diff --git a/internal/lsp/testdata/godef/a/g.go.golden b/internal/lsp/testdata/godef/a/g.go.golden
deleted file mode 100644
index b7ed73928..000000000
--- a/internal/lsp/testdata/godef/a/g.go.golden
+++ /dev/null
@@ -1,6 +0,0 @@
--- dur-hoverdef --
-```go
-const dur time.Duration = 910350000000 // 15m10.35s
-```
-
-dur is a constant of type time\.Duration\.
diff --git a/internal/lsp/testdata/godef/a/h.go.golden b/internal/lsp/testdata/godef/a/h.go.golden
deleted file mode 100644
index 4b27211e9..000000000
--- a/internal/lsp/testdata/godef/a/h.go.golden
+++ /dev/null
@@ -1,136 +0,0 @@
--- arrD-hoverdef --
-```go
-field d int
-```
-
-d field
--- arrE-hoverdef --
-```go
-field e struct{f int}
-```
-
-e nested struct
--- arrF-hoverdef --
-```go
-field f int
-```
-
-f field of nested struct
--- complexH-hoverdef --
-```go
-field h int
-```
-
-h field
--- complexI-hoverdef --
-```go
-field i struct{j int}
-```
-
-i nested struct
--- complexJ-hoverdef --
-```go
-field j int
-```
-
-j field of nested struct
--- mapStructKeyX-hoverdef --
-```go
-field x []string
-```
-
-X key field
--- mapStructKeyY-hoverdef --
-```go
-field y string
-```
--- mapStructValueX-hoverdef --
-```go
-field x string
-```
-
-X value field
--- nestedMap-hoverdef --
-```go
-field m map[string]float64
-```
-
-nested map
--- nestedNumber-hoverdef --
-```go
-field number int64
-```
-
-nested number
--- nestedString-hoverdef --
-```go
-field str string
-```
-
-nested string
--- openMethod-hoverdef --
-```go
-func (interface).open() error
-```
-
-open method comment
--- returnX-hoverdef --
-```go
-field x int
-```
-
-X coord
--- returnY-hoverdef --
-```go
-field y int
-```
-
-Y coord
--- structA-hoverdef --
-```go
-field a int
-```
-
-a field
--- structB-hoverdef --
-```go
-field b struct{c int}
-```
-
-b nested struct
--- structC-hoverdef --
-```go
-field c int
-```
-
-c field of nested struct
--- testDescription-hoverdef --
-```go
-field desc string
-```
-
-test description
--- testInput-hoverdef --
-```go
-field in map[string][]struct{key string; value interface{}}
-```
-
-test input
--- testInputKey-hoverdef --
-```go
-field key string
-```
-
-test key
--- testInputValue-hoverdef --
-```go
-field value interface{}
-```
-
-test value
--- testResultValue-hoverdef --
-```go
-field value int
-```
-
-expected test value
diff --git a/internal/lsp/testdata/godef/a/random.go b/internal/lsp/testdata/godef/a/random.go
deleted file mode 100644
index 62055c1fc..000000000
--- a/internal/lsp/testdata/godef/a/random.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package a
-
-func Random() int { //@Random
- y := 6 + 7
- return y
-}
-
-func Random2(y int) int { //@Random2,mark(RandomParamY, "y")
- return y //@godef("y", RandomParamY)
-}
-
-type Pos struct {
- x, y int //@mark(PosX, "x"),mark(PosY, "y")
-}
-
-// Typ has a comment. Its fields do not.
-type Typ struct{ field string } //@mark(TypField, "field")
-
-func _() {
- x := &Typ{}
- x.field //@godef("field", TypField)
-}
-
-func (p *Pos) Sum() int { //@mark(PosSum, "Sum")
- return p.x + p.y //@godef("x", PosX)
-}
-
-func _() {
- var p Pos
- _ = p.Sum() //@godef("()", PosSum)
-}
diff --git a/internal/lsp/testdata/godef/a/random.go.golden b/internal/lsp/testdata/godef/a/random.go.golden
deleted file mode 100644
index 381a11ace..000000000
--- a/internal/lsp/testdata/godef/a/random.go.golden
+++ /dev/null
@@ -1,112 +0,0 @@
--- PosSum-definition --
-godef/a/random.go:24:15-18: defined here as ```go
-func (*Pos).Sum() int
-```
-
-[`(a.Pos).Sum` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Pos.Sum)
--- PosSum-definition-json --
-{
- "span": {
- "uri": "file://godef/a/random.go",
- "start": {
- "line": 24,
- "column": 15,
- "offset": 413
- },
- "end": {
- "line": 24,
- "column": 18,
- "offset": 416
- }
- },
- "description": "```go\nfunc (*Pos).Sum() int\n```\n\n[`(a.Pos).Sum` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Pos.Sum)"
-}
-
--- PosSum-hoverdef --
-```go
-func (*Pos).Sum() int
-```
-
-[`(a.Pos).Sum` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Pos.Sum)
--- PosX-definition --
-godef/a/random.go:13:2-3: defined here as ```go
-field x int
-```
-
-\@mark\(PosX, \"x\"\),mark\(PosY, \"y\"\)
--- PosX-definition-json --
-{
- "span": {
- "uri": "file://godef/a/random.go",
- "start": {
- "line": 13,
- "column": 2,
- "offset": 187
- },
- "end": {
- "line": 13,
- "column": 3,
- "offset": 188
- }
- },
- "description": "```go\nfield x int\n```\n\n\\@mark\\(PosX, \\\"x\\\"\\),mark\\(PosY, \\\"y\\\"\\)"
-}
-
--- PosX-hoverdef --
-```go
-field x int
-```
-
-\@mark\(PosX, \"x\"\),mark\(PosY, \"y\"\)
--- RandomParamY-definition --
-godef/a/random.go:8:14-15: defined here as ```go
-var y int
-```
--- RandomParamY-definition-json --
-{
- "span": {
- "uri": "file://godef/a/random.go",
- "start": {
- "line": 8,
- "column": 14,
- "offset": 79
- },
- "end": {
- "line": 8,
- "column": 15,
- "offset": 80
- }
- },
- "description": "```go\nvar y int\n```"
-}
-
--- RandomParamY-hoverdef --
-```go
-var y int
-```
--- TypField-definition --
-godef/a/random.go:17:18-23: defined here as ```go
-field field string
-```
--- TypField-definition-json --
-{
- "span": {
- "uri": "file://godef/a/random.go",
- "start": {
- "line": 17,
- "column": 18,
- "offset": 292
- },
- "end": {
- "line": 17,
- "column": 23,
- "offset": 297
- }
- },
- "description": "```go\nfield field string\n```"
-}
-
--- TypField-hoverdef --
-```go
-field field string
-```
diff --git a/internal/lsp/testdata/godef/b/b.go b/internal/lsp/testdata/godef/b/b.go
deleted file mode 100644
index f9c1d6402..000000000
--- a/internal/lsp/testdata/godef/b/b.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package b
-
-import (
- myFoo "golang.org/x/tools/internal/lsp/foo" //@mark(myFoo, "myFoo"),godef("myFoo", myFoo)
- "golang.org/x/tools/internal/lsp/godef/a" //@mark(AImport, re"\".*\"")
-)
-
-type Embed struct {
- *a.A
- a.I
- a.S
-}
-
-func _() {
- e := Embed{}
- e.Hi() //@hoverdef("Hi", AHi)
- e.B() //@hoverdef("B", AB)
- e.Field //@hoverdef("Field", AField)
- e.Field2 //@hoverdef("Field2", AField2)
- e.Hello() //@hoverdef("Hello", AHello)
- e.Hey() //@hoverdef("Hey", AHey)
- e.Goodbye() //@hoverdef("Goodbye", AGoodbye)
-}
-
-type aAlias = a.A //@mark(aAlias, "aAlias")
-
-type S1 struct { //@S1
- F1 int //@mark(S1F1, "F1")
- S2 //@godef("S2", S2),mark(S1S2, "S2")
- a.A //@godef("A", AString)
- aAlias //@godef("a", aAlias)
-}
-
-type S2 struct { //@S2
- F1 string //@mark(S2F1, "F1")
- F2 int //@mark(S2F2, "F2")
- *a.A //@godef("A", AString),godef("a",AImport)
-}
-
-type S3 struct {
- F1 struct {
- a.A //@godef("A", AString)
- }
-}
-
-func Bar() {
- a.AStuff() //@godef("AStuff", AStuff)
- var x S1 //@godef("S1", S1)
- _ = x.S2 //@godef("S2", S1S2)
- _ = x.F1 //@godef("F1", S1F1)
- _ = x.F2 //@godef("F2", S2F2)
- _ = x.S2.F1 //@godef("F1", S2F1)
-
- var _ *myFoo.StructFoo //@godef("myFoo", myFoo)
-}
-
-const X = 0 //@mark(bX, "X"),godef("X", bX)
diff --git a/internal/lsp/testdata/godef/b/b.go.golden b/internal/lsp/testdata/godef/b/b.go.golden
deleted file mode 100644
index 5f7669b77..000000000
--- a/internal/lsp/testdata/godef/b/b.go.golden
+++ /dev/null
@@ -1,454 +0,0 @@
--- AB-hoverdef --
-```go
-func (a.I).B()
-```
-
-\@mark\(AB, \"B\"\)
-
-[`(a.I).B` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#I.B)
--- AField-hoverdef --
-```go
-field Field int
-```
-
-\@mark\(AField, \"Field\"\)
-
-[`(a.S).Field` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#S.Field)
--- AField2-hoverdef --
-```go
-field Field2 int
-```
-
-\@mark\(AField2, \"Field2\"\)
-
-[`(a.R).Field2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#R.Field2)
--- AGoodbye-hoverdef --
-```go
-func (a.H).Goodbye()
-```
-
-\@mark\(AGoodbye, \"Goodbye\"\)
-
-[`(a.H).Goodbye` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#H.Goodbye)
--- AHello-hoverdef --
-```go
-func (a.J).Hello()
-```
-
-\@mark\(AHello, \"Hello\"\)
-
-[`(a.J).Hello` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#J.Hello)
--- AHey-hoverdef --
-```go
-func (a.R).Hey()
-```
-
-[`(a.R).Hey` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#R.Hey)
--- AHi-hoverdef --
-```go
-func (a.A).Hi()
-```
-
-[`(a.A).Hi` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#A.Hi)
--- AImport-definition --
-godef/b/b.go:5:2-43: defined here as ```go
-package a ("golang.org/x/tools/internal/lsp/godef/a")
-```
-
-[`a` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls)
--- AImport-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 5,
- "column": 2,
- "offset": 112
- },
- "end": {
- "line": 5,
- "column": 43,
- "offset": 153
- }
- },
- "description": "```go\npackage a (\"golang.org/x/tools/internal/lsp/godef/a\")\n```\n\n[`a` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls)"
-}
-
--- AImport-hoverdef --
-```go
-package a ("golang.org/x/tools/internal/lsp/godef/a")
-```
-
-[`a` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls)
--- AString-definition --
-godef/a/a.go:26:6-7: defined here as ```go
-type A string
-```
-
-\@mark\(AString, \"A\"\)
-
-[`a.A` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#A)
--- AString-definition-json --
-{
- "span": {
- "uri": "file://godef/a/a.go",
- "start": {
- "line": 26,
- "column": 6,
- "offset": 467
- },
- "end": {
- "line": 26,
- "column": 7,
- "offset": 468
- }
- },
- "description": "```go\ntype A string\n```\n\n\\@mark\\(AString, \\\"A\\\"\\)\n\n[`a.A` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#A)"
-}
-
--- AString-hoverdef --
-```go
-type A string
-```
-
-\@mark\(AString, \"A\"\)
-
-[`a.A` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#A)
--- AStuff-definition --
-godef/a/a.go:28:6-12: defined here as ```go
-func a.AStuff()
-```
-
-[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff)
--- AStuff-definition-json --
-{
- "span": {
- "uri": "file://godef/a/a.go",
- "start": {
- "line": 28,
- "column": 6,
- "offset": 504
- },
- "end": {
- "line": 28,
- "column": 12,
- "offset": 510
- }
- },
- "description": "```go\nfunc a.AStuff()\n```\n\n[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff)"
-}
-
--- AStuff-hoverdef --
-```go
-func a.AStuff()
-```
-
-[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff)
--- S1-definition --
-godef/b/b.go:27:6-8: defined here as ```go
-type S1 struct {
- F1 int //@mark(S1F1, "F1")
- S2 //@godef("S2", S2),mark(S1S2, "S2")
- a.A //@godef("A", AString)
- aAlias //@godef("a", aAlias)
-}
-```
-
-[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1)
--- S1-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 27,
- "column": 6,
- "offset": 587
- },
- "end": {
- "line": 27,
- "column": 8,
- "offset": 589
- }
- },
- "description": "```go\ntype S1 struct {\n\tF1 int //@mark(S1F1, \"F1\")\n\tS2 //@godef(\"S2\", S2),mark(S1S2, \"S2\")\n\ta.A //@godef(\"A\", AString)\n\taAlias //@godef(\"a\", aAlias)\n}\n```\n\n[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1)"
-}
-
--- S1-hoverdef --
-```go
-type S1 struct {
- F1 int //@mark(S1F1, "F1")
- S2 //@godef("S2", S2),mark(S1S2, "S2")
- a.A //@godef("A", AString)
- aAlias //@godef("a", aAlias)
-}
-```
-
-[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1)
--- S1F1-definition --
-godef/b/b.go:28:2-4: defined here as ```go
-field F1 int
-```
-
-\@mark\(S1F1, \"F1\"\)
-
-[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1)
--- S1F1-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 28,
- "column": 2,
- "offset": 606
- },
- "end": {
- "line": 28,
- "column": 4,
- "offset": 608
- }
- },
- "description": "```go\nfield F1 int\n```\n\n\\@mark\\(S1F1, \\\"F1\\\"\\)\n\n[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1)"
-}
-
--- S1F1-hoverdef --
-```go
-field F1 int
-```
-
-\@mark\(S1F1, \"F1\"\)
-
-[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1)
--- S1S2-definition --
-godef/b/b.go:29:2-4: defined here as ```go
-field S2 S2
-```
-
-\@godef\(\"S2\", S2\),mark\(S1S2, \"S2\"\)
-
-[`(b.S1).S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.S2)
--- S1S2-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 29,
- "column": 2,
- "offset": 638
- },
- "end": {
- "line": 29,
- "column": 4,
- "offset": 640
- }
- },
- "description": "```go\nfield S2 S2\n```\n\n\\@godef\\(\\\"S2\\\", S2\\),mark\\(S1S2, \\\"S2\\\"\\)\n\n[`(b.S1).S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.S2)"
-}
-
--- S1S2-hoverdef --
-```go
-field S2 S2
-```
-
-\@godef\(\"S2\", S2\),mark\(S1S2, \"S2\"\)
-
-[`(b.S1).S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.S2)
--- S2-definition --
-godef/b/b.go:34:6-8: defined here as ```go
-type S2 struct {
- F1 string //@mark(S2F1, "F1")
- F2 int //@mark(S2F2, "F2")
- *a.A //@godef("A", AString),godef("a",AImport)
-}
-```
-
-[`b.S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2)
--- S2-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 34,
- "column": 6,
- "offset": 762
- },
- "end": {
- "line": 34,
- "column": 8,
- "offset": 764
- }
- },
- "description": "```go\ntype S2 struct {\n\tF1 string //@mark(S2F1, \"F1\")\n\tF2 int //@mark(S2F2, \"F2\")\n\t*a.A //@godef(\"A\", AString),godef(\"a\",AImport)\n}\n```\n\n[`b.S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2)"
-}
-
--- S2-hoverdef --
-```go
-type S2 struct {
- F1 string //@mark(S2F1, "F1")
- F2 int //@mark(S2F2, "F2")
- *a.A //@godef("A", AString),godef("a",AImport)
-}
-```
-
-[`b.S2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2)
--- S2F1-definition --
-godef/b/b.go:35:2-4: defined here as ```go
-field F1 string
-```
-
-\@mark\(S2F1, \"F1\"\)
-
-[`(b.S2).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F1)
--- S2F1-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 35,
- "column": 2,
- "offset": 781
- },
- "end": {
- "line": 35,
- "column": 4,
- "offset": 783
- }
- },
- "description": "```go\nfield F1 string\n```\n\n\\@mark\\(S2F1, \\\"F1\\\"\\)\n\n[`(b.S2).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F1)"
-}
-
--- S2F1-hoverdef --
-```go
-field F1 string
-```
-
-\@mark\(S2F1, \"F1\"\)
-
-[`(b.S2).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F1)
--- S2F2-definition --
-godef/b/b.go:36:2-4: defined here as ```go
-field F2 int
-```
-
-\@mark\(S2F2, \"F2\"\)
-
-[`(b.S2).F2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F2)
--- S2F2-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 36,
- "column": 2,
- "offset": 814
- },
- "end": {
- "line": 36,
- "column": 4,
- "offset": 816
- }
- },
- "description": "```go\nfield F2 int\n```\n\n\\@mark\\(S2F2, \\\"F2\\\"\\)\n\n[`(b.S2).F2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F2)"
-}
-
--- S2F2-hoverdef --
-```go
-field F2 int
-```
-
-\@mark\(S2F2, \"F2\"\)
-
-[`(b.S2).F2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S2.F2)
--- aAlias-definition --
-godef/b/b.go:25:6-12: defined here as ```go
-type aAlias = a.A
-```
-
-\@mark\(aAlias, \"aAlias\"\)
--- aAlias-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 25,
- "column": 6,
- "offset": 542
- },
- "end": {
- "line": 25,
- "column": 12,
- "offset": 548
- }
- },
- "description": "```go\ntype aAlias = a.A\n```\n\n\\@mark\\(aAlias, \\\"aAlias\\\"\\)"
-}
-
--- aAlias-hoverdef --
-```go
-type aAlias = a.A
-```
-
-\@mark\(aAlias, \"aAlias\"\)
--- bX-definition --
-godef/b/b.go:57:7-8: defined here as ```go
-const X untyped int = 0
-```
-
-\@mark\(bX, \"X\"\),godef\(\"X\", bX\)
-
-[`b.X` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#X)
--- bX-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 57,
- "column": 7,
- "offset": 1249
- },
- "end": {
- "line": 57,
- "column": 8,
- "offset": 1250
- }
- },
- "description": "```go\nconst X untyped int = 0\n```\n\n\\@mark\\(bX, \\\"X\\\"\\),godef\\(\\\"X\\\", bX\\)\n\n[`b.X` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#X)"
-}
-
--- bX-hoverdef --
-```go
-const X untyped int = 0
-```
-
-\@mark\(bX, \"X\"\),godef\(\"X\", bX\)
-
-[`b.X` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#X)
--- myFoo-definition --
-godef/b/b.go:4:2-7: defined here as ```go
-package myFoo ("golang.org/x/tools/internal/lsp/foo")
-```
-
-[`myFoo` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/foo?utm_source=gopls)
--- myFoo-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 4,
- "column": 2,
- "offset": 21
- },
- "end": {
- "line": 4,
- "column": 7,
- "offset": 26
- }
- },
- "description": "```go\npackage myFoo (\"golang.org/x/tools/internal/lsp/foo\")\n```\n\n[`myFoo` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/foo?utm_source=gopls)"
-}
-
--- myFoo-hoverdef --
-```go
-package myFoo ("golang.org/x/tools/internal/lsp/foo")
-```
-
-[`myFoo` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/foo?utm_source=gopls)
diff --git a/internal/lsp/testdata/godef/b/c.go b/internal/lsp/testdata/godef/b/c.go
deleted file mode 100644
index c8daf6242..000000000
--- a/internal/lsp/testdata/godef/b/c.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package b
-
-// This is the in-editor version of the file.
-// The on-disk version is in c.go.saved.
-
-var _ = S1{ //@godef("S1", S1)
- F1: 99, //@godef("F1", S1F1)
-}
diff --git a/internal/lsp/testdata/godef/b/c.go.golden b/internal/lsp/testdata/godef/b/c.go.golden
deleted file mode 100644
index e6205b726..000000000
--- a/internal/lsp/testdata/godef/b/c.go.golden
+++ /dev/null
@@ -1,74 +0,0 @@
--- S1-definition --
-godef/b/b.go:27:6-8: defined here as ```go
-type S1 struct {
- F1 int //@mark(S1F1, "F1")
- S2 //@godef("S2", S2),mark(S1S2, "S2")
- a.A //@godef("A", AString)
- aAlias //@godef("a", aAlias)
-}
-```
-
-[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1)
--- S1-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 27,
- "column": 6,
- "offset": 587
- },
- "end": {
- "line": 27,
- "column": 8,
- "offset": 589
- }
- },
- "description": "```go\ntype S1 struct {\n\tF1 int //@mark(S1F1, \"F1\")\n\tS2 //@godef(\"S2\", S2),mark(S1S2, \"S2\")\n\ta.A //@godef(\"A\", AString)\n\taAlias //@godef(\"a\", aAlias)\n}\n```\n\n[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1)"
-}
-
--- S1-hoverdef --
-```go
-type S1 struct {
- F1 int //@mark(S1F1, "F1")
- S2 //@godef("S2", S2),mark(S1S2, "S2")
- a.A //@godef("A", AString)
- aAlias //@godef("a", aAlias)
-}
-```
-
-[`b.S1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1)
--- S1F1-definition --
-godef/b/b.go:28:2-4: defined here as ```go
-field F1 int
-```
-
-\@mark\(S1F1, \"F1\"\)
-
-[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1)
--- S1F1-definition-json --
-{
- "span": {
- "uri": "file://godef/b/b.go",
- "start": {
- "line": 28,
- "column": 2,
- "offset": 606
- },
- "end": {
- "line": 28,
- "column": 4,
- "offset": 608
- }
- },
- "description": "```go\nfield F1 int\n```\n\n\\@mark\\(S1F1, \\\"F1\\\"\\)\n\n[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1)"
-}
-
--- S1F1-hoverdef --
-```go
-field F1 int
-```
-
-\@mark\(S1F1, \"F1\"\)
-
-[`(b.S1).F1` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/b?utm_source=gopls#S1.F1)
diff --git a/internal/lsp/testdata/godef/b/c.go.saved b/internal/lsp/testdata/godef/b/c.go.saved
deleted file mode 100644
index ff1a8794b..000000000
--- a/internal/lsp/testdata/godef/b/c.go.saved
+++ /dev/null
@@ -1,7 +0,0 @@
-package b
-
-// This is the on-disk version of c.go, which represents
-// the in-editor version of the file.
-
-}
-
diff --git a/internal/lsp/testdata/godef/b/e.go b/internal/lsp/testdata/godef/b/e.go
deleted file mode 100644
index 7b96cd7e8..000000000
--- a/internal/lsp/testdata/godef/b/e.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package b
-
-import (
- "fmt"
-
- "golang.org/x/tools/internal/lsp/godef/a"
-)
-
-func useThings() {
- t := a.Thing{} //@mark(bStructType, "ing")
- fmt.Print(t.Member) //@mark(bMember, "ember")
- fmt.Print(a.Other) //@mark(bVar, "ther")
- a.Things() //@mark(bFunc, "ings")
-}
-
-/*@
-godef(bStructType, Thing)
-godef(bMember, Member)
-godef(bVar, Other)
-godef(bFunc, Things)
-*/
-
-func _() {
- var x interface{} //@mark(eInterface, "interface{}")
- switch x := x.(type) { //@hoverdef("x", eInterface)
- case string: //@mark(eString, "string")
- fmt.Println(x) //@hoverdef("x", eString)
- case int: //@mark(eInt, "int")
- fmt.Println(x) //@hoverdef("x", eInt)
- }
-}
diff --git a/internal/lsp/testdata/godef/b/e.go.golden b/internal/lsp/testdata/godef/b/e.go.golden
deleted file mode 100644
index f9af7b743..000000000
--- a/internal/lsp/testdata/godef/b/e.go.golden
+++ /dev/null
@@ -1,144 +0,0 @@
--- Member-definition --
-godef/a/d.go:6:2-8: defined here as ```go
-field Member string
-```
-
-\@Member
-
-[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member)
--- Member-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 6,
- "column": 2,
- "offset": 90
- },
- "end": {
- "line": 6,
- "column": 8,
- "offset": 96
- }
- },
- "description": "```go\nfield Member string\n```\n\n\\@Member\n\n[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member)"
-}
-
--- Member-hoverdef --
-```go
-field Member string
-```
-
-\@Member
-
-[`(a.Thing).Member` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing.Member)
--- Other-definition --
-godef/a/d.go:9:5-10: defined here as ```go
-var a.Other a.Thing
-```
-
-\@Other
-
-[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other)
--- Other-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 9,
- "column": 5,
- "offset": 121
- },
- "end": {
- "line": 9,
- "column": 10,
- "offset": 126
- }
- },
- "description": "```go\nvar a.Other a.Thing\n```\n\n\\@Other\n\n[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other)"
-}
-
--- Other-hoverdef --
-```go
-var a.Other a.Thing
-```
-
-\@Other
-
-[`a.Other` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Other)
--- Thing-definition --
-godef/a/d.go:5:6-11: defined here as ```go
-type Thing struct {
- Member string //@Member
-}
-```
-
-[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing)
--- Thing-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 5,
- "column": 6,
- "offset": 65
- },
- "end": {
- "line": 5,
- "column": 11,
- "offset": 70
- }
- },
- "description": "```go\ntype Thing struct {\n\tMember string //@Member\n}\n```\n\n[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing)"
-}
-
--- Thing-hoverdef --
-```go
-type Thing struct {
- Member string //@Member
-}
-```
-
-[`a.Thing` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Thing)
--- Things-definition --
-godef/a/d.go:11:6-12: defined here as ```go
-func a.Things(val []string) []a.Thing
-```
-
-[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things)
--- Things-definition-json --
-{
- "span": {
- "uri": "file://godef/a/d.go",
- "start": {
- "line": 11,
- "column": 6,
- "offset": 148
- },
- "end": {
- "line": 11,
- "column": 12,
- "offset": 154
- }
- },
- "description": "```go\nfunc a.Things(val []string) []a.Thing\n```\n\n[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things)"
-}
-
--- Things-hoverdef --
-```go
-func a.Things(val []string) []a.Thing
-```
-
-[`a.Things` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Things)
--- eInt-hoverdef --
-```go
-var x int
-```
--- eInterface-hoverdef --
-```go
-var x interface{}
-```
--- eString-hoverdef --
-```go
-var x string
-```
diff --git a/internal/lsp/testdata/godef/b/h.go b/internal/lsp/testdata/godef/b/h.go
deleted file mode 100644
index c8cbe850f..000000000
--- a/internal/lsp/testdata/godef/b/h.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package b
-
-import . "golang.org/x/tools/internal/lsp/godef/a"
-
-func _() {
- // variable of type a.A
- var _ A //@mark(AVariable, "_"),hoverdef("_", AVariable)
-
- AStuff() //@hoverdef("AStuff", AStuff)
-}
diff --git a/internal/lsp/testdata/godef/b/h.go.golden b/internal/lsp/testdata/godef/b/h.go.golden
deleted file mode 100644
index f32f0264f..000000000
--- a/internal/lsp/testdata/godef/b/h.go.golden
+++ /dev/null
@@ -1,12 +0,0 @@
--- AStuff-hoverdef --
-```go
-func AStuff()
-```
-
-[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff)
--- AVariable-hoverdef --
-```go
-var _ A
-```
-
-variable of type a\.A
diff --git a/internal/lsp/testdata/godef/broken/unclosedIf.go.golden b/internal/lsp/testdata/godef/broken/unclosedIf.go.golden
deleted file mode 100644
index 5c3329d8b..000000000
--- a/internal/lsp/testdata/godef/broken/unclosedIf.go.golden
+++ /dev/null
@@ -1,30 +0,0 @@
--- myUnclosedIf-definition --
-godef/broken/unclosedIf.go:7:7-19: defined here as ```go
-var myUnclosedIf string
-```
-
-\@myUnclosedIf
--- myUnclosedIf-definition-json --
-{
- "span": {
- "uri": "file://godef/broken/unclosedIf.go",
- "start": {
- "line": 7,
- "column": 7,
- "offset": 68
- },
- "end": {
- "line": 7,
- "column": 19,
- "offset": 80
- }
- },
- "description": "```go\nvar myUnclosedIf string\n```\n\n\\@myUnclosedIf"
-}
-
--- myUnclosedIf-hoverdef --
-```go
-var myUnclosedIf string
-```
-
-\@myUnclosedIf
diff --git a/internal/lsp/testdata/godef/hover_generics/hover.go b/internal/lsp/testdata/godef/hover_generics/hover.go
deleted file mode 100644
index 7400e1acd..000000000
--- a/internal/lsp/testdata/godef/hover_generics/hover.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package hover
-
-type value[T any] struct { //@mark(value, "value"),hoverdef("value", value),mark(valueTdecl, "T"),hoverdef("T",valueTdecl)
- val T //@mark(valueTparam, "T"),hoverdef("T", valueTparam)
- Q int //@mark(valueQfield, "Q"),hoverdef("Q", valueQfield)
-}
-
-type Value[T any] struct { //@mark(ValueTdecl, "T"),hoverdef("T",ValueTdecl)
- val T //@mark(ValueTparam, "T"),hoverdef("T", ValueTparam)
- Q int //@mark(ValueQfield, "Q"),hoverdef("Q", ValueQfield)
-}
-
-func F[P interface{ ~int | string }]() { //@mark(Pparam, "P"),hoverdef("P",Pparam)
- var _ P //@mark(Pvar, "P"),hoverdef("P",Pvar)
-}
diff --git a/internal/lsp/testdata/godef/hover_generics/hover.go.golden b/internal/lsp/testdata/godef/hover_generics/hover.go.golden
deleted file mode 100644
index cfebcc472..000000000
--- a/internal/lsp/testdata/godef/hover_generics/hover.go.golden
+++ /dev/null
@@ -1,45 +0,0 @@
--- Pparam-hoverdef --
-```go
-type parameter P interface{~int|string}
-```
--- Pvar-hoverdef --
-```go
-type parameter P interface{~int|string}
-```
--- ValueQfield-hoverdef --
-```go
-field Q int
-```
-
-\@mark\(ValueQfield, \"Q\"\),hoverdef\(\"Q\", ValueQfield\)
-
-[`(hover.Value).Q` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/hover_generics?utm_source=gopls#Value.Q)
--- ValueTdecl-hoverdef --
-```go
-type parameter T any
-```
--- ValueTparam-hoverdef --
-```go
-type parameter T any
-```
--- value-hoverdef --
-```go
-type value[T any] struct {
- val T //@mark(valueTparam, "T"),hoverdef("T", valueTparam)
- Q int //@mark(valueQfield, "Q"),hoverdef("Q", valueQfield)
-}
-```
--- valueQfield-hoverdef --
-```go
-field Q int
-```
-
-\@mark\(valueQfield, \"Q\"\),hoverdef\(\"Q\", valueQfield\)
--- valueTdecl-hoverdef --
-```go
-type parameter T any
-```
--- valueTparam-hoverdef --
-```go
-type parameter T any
-```
diff --git a/internal/lsp/testdata/godef/infer_generics/inferred.go b/internal/lsp/testdata/godef/infer_generics/inferred.go
deleted file mode 100644
index 2d92a9590..000000000
--- a/internal/lsp/testdata/godef/infer_generics/inferred.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package inferred
-
-func app[S interface{ ~[]E }, E interface{}](s S, e E) S {
- return append(s, e)
-}
-
-func _() {
- _ = app[[]int] //@mark(constrInfer, "app"),hoverdef("app", constrInfer)
- _ = app[[]int, int] //@mark(instance, "app"),hoverdef("app", instance)
- _ = app[[]int]([]int{}, 0) //@mark(partialInfer, "app"),hoverdef("app", partialInfer)
- _ = app([]int{}, 0) //@mark(argInfer, "app"),hoverdef("app", argInfer)
-}
diff --git a/internal/lsp/testdata/godef/infer_generics/inferred.go.golden b/internal/lsp/testdata/godef/infer_generics/inferred.go.golden
deleted file mode 100644
index 4a36ff460..000000000
--- a/internal/lsp/testdata/godef/infer_generics/inferred.go.golden
+++ /dev/null
@@ -1,20 +0,0 @@
--- argInfer-hoverdef --
-```go
-func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S
-```
--- constrInf-hoverdef --
-```go
-func app(s []int, e int) []int // func[S₁ interface{~[]E₂}, E₂ interface{}](s S₁, e E₂) S₁
-```
--- constrInfer-hoverdef --
-```go
-func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S
-```
--- instance-hoverdef --
-```go
-func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S
-```
--- partialInfer-hoverdef --
-```go
-func app(s []int, e int) []int // func[S interface{~[]E}, E interface{}](s S, e E) S
-```
diff --git a/internal/lsp/testdata/good/good1.go b/internal/lsp/testdata/good/good1.go
deleted file mode 100644
index c4664a7e5..000000000
--- a/internal/lsp/testdata/good/good1.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package good //@diag("package", "no_diagnostics", "", "error")
-
-import (
- "golang.org/x/tools/internal/lsp/types" //@item(types_import, "types", "\"golang.org/x/tools/internal/lsp/types\"", "package")
-)
-
-func random() int { //@item(good_random, "random", "func() int", "func")
- _ = "random() int" //@prepare("random", "", "")
- y := 6 + 7 //@prepare("7", "", "")
- return y //@prepare("return", "","")
-}
-
-func random2(y int) int { //@item(good_random2, "random2", "func(y int) int", "func"),item(good_y_param, "y", "int", "var")
- //@complete("", good_y_param, types_import, good_random, good_random2, good_stuff)
- var b types.Bob = &types.X{} //@prepare("ypes","types", "types")
- if _, ok := b.(*types.X); ok { //@complete("X", X_struct, Y_struct, Bob_interface, CoolAlias)
- }
-
- return y
-}
diff --git a/internal/lsp/testdata/implementation/implementation.go b/internal/lsp/testdata/implementation/implementation.go
deleted file mode 100644
index c3229121a..000000000
--- a/internal/lsp/testdata/implementation/implementation.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package implementation
-
-import "golang.org/x/tools/internal/lsp/implementation/other"
-
-type ImpP struct{} //@ImpP,implementations("ImpP", Laugher, OtherLaugher)
-
-func (*ImpP) Laugh() { //@mark(LaughP, "Laugh"),implementations("Laugh", Laugh, OtherLaugh)
-}
-
-type ImpS struct{} //@ImpS,implementations("ImpS", Laugher, OtherLaugher)
-
-func (ImpS) Laugh() { //@mark(LaughS, "Laugh"),implementations("Laugh", Laugh, OtherLaugh)
-}
-
-type Laugher interface { //@Laugher,implementations("Laugher", ImpP, OtherImpP, ImpS, OtherImpS)
- Laugh() //@Laugh,implementations("Laugh", LaughP, OtherLaughP, LaughS, OtherLaughS)
-}
-
-type Foo struct { //@implementations("Foo", Joker)
- other.Foo
-}
-
-type Joker interface { //@Joker
- Joke() //@Joke,implementations("Joke", ImpJoker)
-}
-
-type cryer int //@implementations("cryer", Cryer)
-
-func (cryer) Cry(other.CryType) {} //@mark(CryImpl, "Cry"),implementations("Cry", Cry)
-
-type Empty interface{} //@implementations("Empty")
diff --git a/internal/lsp/testdata/importedcomplit/imported_complit.go.in b/internal/lsp/testdata/importedcomplit/imported_complit.go.in
deleted file mode 100644
index 80d85245c..000000000
--- a/internal/lsp/testdata/importedcomplit/imported_complit.go.in
+++ /dev/null
@@ -1,42 +0,0 @@
-package importedcomplit
-
-import (
- "golang.org/x/tools/internal/lsp/foo"
-
- // import completions
- "fm" //@complete("\" //", fmtImport)
- "go/pars" //@complete("\" //", parserImport)
- "golang.org/x/tools/internal/lsp/signa" //@complete("na\" //", signatureImport)
- "golang.org/x/too" //@complete("\" //", toolsImport)
- "crypto/elli" //@complete("\" //", cryptoImport)
- "golang.org/x/tools/internal/lsp/sign" //@complete("\" //", signatureImport)
- "golang.org/x/tools/internal/lsp/sign" //@complete("ols", toolsImport)
- namedParser "go/pars" //@complete("\" //", parserImport)
-)
-
-func _() {
- var V int //@item(icVVar, "V", "int", "var")
- _ = foo.StructFoo{V} //@complete("}", Value, icVVar)
-}
-
-func _() {
- var (
- aa string //@item(icAAVar, "aa", "string", "var")
- ab int //@item(icABVar, "ab", "int", "var")
- )
-
- _ = foo.StructFoo{a} //@complete("}", abVar, aaVar)
-
- var s struct {
- AA string //@item(icFieldAA, "AA", "string", "field")
- AB int //@item(icFieldAB, "AB", "int", "field")
- }
-
- _ = foo.StructFoo{s.} //@complete("}", icFieldAB, icFieldAA)
-}
-
-/* "fmt" */ //@item(fmtImport, "fmt", "\"fmt\"", "package")
-/* "go/parser" */ //@item(parserImport, "parser", "\"go/parser\"", "package")
-/* "golang.org/x/tools/internal/lsp/signature" */ //@item(signatureImport, "signature", "\"golang.org/x/tools/internal/lsp/signature\"", "package")
-/* "golang.org/x/tools/" */ //@item(toolsImport, "tools/", "\"golang.org/x/tools/\"", "package")
-/* "crypto/elliptic" */ //@item(cryptoImport, "elliptic", "\"crypto/elliptic\"", "package")
diff --git a/internal/lsp/testdata/keywords/keywords.go b/internal/lsp/testdata/keywords/keywords.go
deleted file mode 100644
index 1fa2c12ba..000000000
--- a/internal/lsp/testdata/keywords/keywords.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package keywords
-
-//@rank("", type),rank("", func),rank("", var),rank("", const),rank("", import)
-
-func _() {
- var test int //@rank(" //", int, interface)
- var tChan chan int
- var _ m //@complete(" //", map)
- var _ f //@complete(" //", func)
- var _ c //@complete(" //", chan)
-
- var _ str //@rank(" //", string, struct)
-
- type _ int //@rank(" //", interface, int)
-
- type _ str //@rank(" //", struct, string)
-
- switch test {
- case 1: // TODO: trying to complete case here will break because the parser wont return *ast.Ident
- b //@complete(" //", break)
- case 2:
- f //@complete(" //", fallthrough, for)
- r //@complete(" //", return)
- d //@complete(" //", default, defer)
- c //@complete(" //", case, const)
- }
-
- switch test.(type) {
- case fo: //@complete(":")
- case int:
- b //@complete(" //", break)
- case int32:
- f //@complete(" //", for)
- d //@complete(" //", default, defer)
- r //@complete(" //", return)
- c //@complete(" //", case, const)
- }
-
- select {
- case <-tChan:
- b //@complete(" //", break)
- c //@complete(" //", case, const)
- }
-
- for index := 0; index < test; index++ {
- c //@complete(" //", const, continue)
- b //@complete(" //", break)
- }
-
- for range []int{} {
- c //@complete(" //", const, continue)
- b //@complete(" //", break)
- }
-
- // Test function level keywords
-
- //Using 2 characters to test because map output order is random
- sw //@complete(" //", switch)
- se //@complete(" //", select)
-
- f //@complete(" //", for)
- d //@complete(" //", defer)
- g //@rank(" //", go),rank(" //", goto)
- r //@complete(" //", return)
- i //@complete(" //", if)
- e //@complete(" //", else)
- v //@complete(" //", var)
- c //@complete(" //", const)
-
- for i := r //@complete(" //", range)
-}
-
-/* package */ //@item(package, "package", "", "keyword")
-/* import */ //@item(import, "import", "", "keyword")
-/* func */ //@item(func, "func", "", "keyword")
-/* type */ //@item(type, "type", "", "keyword")
-/* var */ //@item(var, "var", "", "keyword")
-/* const */ //@item(const, "const", "", "keyword")
-/* break */ //@item(break, "break", "", "keyword")
-/* default */ //@item(default, "default", "", "keyword")
-/* case */ //@item(case, "case", "", "keyword")
-/* defer */ //@item(defer, "defer", "", "keyword")
-/* go */ //@item(go, "go", "", "keyword")
-/* for */ //@item(for, "for", "", "keyword")
-/* if */ //@item(if, "if", "", "keyword")
-/* else */ //@item(else, "else", "", "keyword")
-/* switch */ //@item(switch, "switch", "", "keyword")
-/* select */ //@item(select, "select", "", "keyword")
-/* fallthrough */ //@item(fallthrough, "fallthrough", "", "keyword")
-/* continue */ //@item(continue, "continue", "", "keyword")
-/* return */ //@item(return, "return", "", "keyword")
-/* var */ //@item(var, "var", "", "keyword")
-/* const */ //@item(const, "const", "", "keyword")
-/* goto */ //@item(goto, "goto", "", "keyword")
-/* struct */ //@item(struct, "struct", "", "keyword")
-/* interface */ //@item(interface, "interface", "", "keyword")
-/* map */ //@item(map, "map", "", "keyword")
-/* func */ //@item(func, "func", "", "keyword")
-/* chan */ //@item(chan, "chan", "", "keyword")
-/* range */ //@item(range, "range", "", "keyword")
diff --git a/internal/lsp/testdata/links/links.go b/internal/lsp/testdata/links/links.go
deleted file mode 100644
index 89492bafe..000000000
--- a/internal/lsp/testdata/links/links.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package links
-
-import (
- "fmt" //@link(`fmt`,"https://pkg.go.dev/fmt?utm_source=gopls")
-
- "golang.org/x/tools/internal/lsp/foo" //@link(`golang.org/x/tools/internal/lsp/foo`,`https://pkg.go.dev/golang.org/x/tools/internal/lsp/foo?utm_source=gopls`)
-
- _ "database/sql" //@link(`database/sql`, `https://pkg.go.dev/database/sql?utm_source=gopls`)
-)
-
-var (
- _ fmt.Formatter
- _ foo.StructFoo
- _ errors.Formatter
-)
-
-// Foo function
-func Foo() string {
- /*https://example.com/comment */ //@link("https://example.com/comment","https://example.com/comment")
-
- url := "https://example.com/string_literal" //@link("https://example.com/string_literal","https://example.com/string_literal")
- return url
-
- // TODO(golang/go#1234): Link the relevant issue. //@link("golang/go#1234", "https://github.com/golang/go/issues/1234")
- // TODO(microsoft/vscode-go#12): Another issue. //@link("microsoft/vscode-go#12", "https://github.com/microsoft/vscode-go/issues/12")
-}
diff --git a/internal/lsp/testdata/missingfunction/channels.go b/internal/lsp/testdata/missingfunction/channels.go
deleted file mode 100644
index 436491c19..000000000
--- a/internal/lsp/testdata/missingfunction/channels.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package missingfunction
-
-func channels(s string) {
- undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix")
-}
-
-func c() (<-chan string, chan string) {
- return make(<-chan string), make(chan string)
-}
diff --git a/internal/lsp/testdata/missingfunction/channels.go.golden b/internal/lsp/testdata/missingfunction/channels.go.golden
deleted file mode 100644
index f5078fed1..000000000
--- a/internal/lsp/testdata/missingfunction/channels.go.golden
+++ /dev/null
@@ -1,15 +0,0 @@
--- suggestedfix_channels_4_2 --
-package missingfunction
-
-func channels(s string) {
- undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix")
-}
-
-func undefinedChannels(ch1 <-chan string, ch2 chan string) {
- panic("unimplemented")
-}
-
-func c() (<-chan string, chan string) {
- return make(<-chan string), make(chan string)
-}
-
diff --git a/internal/lsp/testdata/missingfunction/consecutive_params.go b/internal/lsp/testdata/missingfunction/consecutive_params.go
deleted file mode 100644
index d2ec3be32..000000000
--- a/internal/lsp/testdata/missingfunction/consecutive_params.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package missingfunction
-
-func consecutiveParams() {
- var s string
- undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix")
-}
diff --git a/internal/lsp/testdata/missingfunction/consecutive_params.go.golden b/internal/lsp/testdata/missingfunction/consecutive_params.go.golden
deleted file mode 100644
index 14a766496..000000000
--- a/internal/lsp/testdata/missingfunction/consecutive_params.go.golden
+++ /dev/null
@@ -1,12 +0,0 @@
--- suggestedfix_consecutive_params_5_2 --
-package missingfunction
-
-func consecutiveParams() {
- var s string
- undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix")
-}
-
-func undefinedConsecutiveParams(s1, s2 string) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/missingfunction/error_param.go b/internal/lsp/testdata/missingfunction/error_param.go
deleted file mode 100644
index 9fd943ffb..000000000
--- a/internal/lsp/testdata/missingfunction/error_param.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package missingfunction
-
-func errorParam() {
- var err error
- undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix")
-}
diff --git a/internal/lsp/testdata/missingfunction/error_param.go.golden b/internal/lsp/testdata/missingfunction/error_param.go.golden
deleted file mode 100644
index 2e1271181..000000000
--- a/internal/lsp/testdata/missingfunction/error_param.go.golden
+++ /dev/null
@@ -1,12 +0,0 @@
--- suggestedfix_error_param_5_2 --
-package missingfunction
-
-func errorParam() {
- var err error
- undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix")
-}
-
-func undefinedErrorParam(err error) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/missingfunction/literals.go b/internal/lsp/testdata/missingfunction/literals.go
deleted file mode 100644
index e276eae79..000000000
--- a/internal/lsp/testdata/missingfunction/literals.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package missingfunction
-
-type T struct{}
-
-func literals() {
- undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix")
-}
diff --git a/internal/lsp/testdata/missingfunction/literals.go.golden b/internal/lsp/testdata/missingfunction/literals.go.golden
deleted file mode 100644
index 04782b9bf..000000000
--- a/internal/lsp/testdata/missingfunction/literals.go.golden
+++ /dev/null
@@ -1,29 +0,0 @@
--- suggestedfix_literals_10_2 --
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package missingfunction
-
-type T struct{}
-
-func literals() {
- undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix")
-}
-
-func undefinedLiterals(s string, t1 T, t2 *T) {
- panic("implement me!")
-}
--- suggestedfix_literals_6_2 --
-package missingfunction
-
-type T struct{}
-
-func literals() {
- undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix")
-}
-
-func undefinedLiterals(s string, t1 T, t2 *T) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/missingfunction/operation.go b/internal/lsp/testdata/missingfunction/operation.go
deleted file mode 100644
index 0408219fe..000000000
--- a/internal/lsp/testdata/missingfunction/operation.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package missingfunction
-
-import "time"
-
-func operation() {
- undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix")
-}
diff --git a/internal/lsp/testdata/missingfunction/operation.go.golden b/internal/lsp/testdata/missingfunction/operation.go.golden
deleted file mode 100644
index 5e35f3005..000000000
--- a/internal/lsp/testdata/missingfunction/operation.go.golden
+++ /dev/null
@@ -1,29 +0,0 @@
--- suggestedfix_operation_10_2 --
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package missingfunction
-
-import "time"
-
-func operation() {
- undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix")
-}
-
-func undefinedOperation(duration time.Duration) {
- panic("implement me!")
-}
--- suggestedfix_operation_6_2 --
-package missingfunction
-
-import "time"
-
-func operation() {
- undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix")
-}
-
-func undefinedOperation(duration time.Duration) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/missingfunction/selector.go b/internal/lsp/testdata/missingfunction/selector.go
deleted file mode 100644
index afd1ab61f..000000000
--- a/internal/lsp/testdata/missingfunction/selector.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package missingfunction
-
-func selector() {
- m := map[int]bool{}
- undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix")
-}
diff --git a/internal/lsp/testdata/missingfunction/selector.go.golden b/internal/lsp/testdata/missingfunction/selector.go.golden
deleted file mode 100644
index c48691c4e..000000000
--- a/internal/lsp/testdata/missingfunction/selector.go.golden
+++ /dev/null
@@ -1,12 +0,0 @@
--- suggestedfix_selector_5_2 --
-package missingfunction
-
-func selector() {
- m := map[int]bool{}
- undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix")
-}
-
-func undefinedSelector(b bool) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/missingfunction/slice.go b/internal/lsp/testdata/missingfunction/slice.go
deleted file mode 100644
index 4a562a2e7..000000000
--- a/internal/lsp/testdata/missingfunction/slice.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package missingfunction
-
-func slice() {
- undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix")
-}
diff --git a/internal/lsp/testdata/missingfunction/slice.go.golden b/internal/lsp/testdata/missingfunction/slice.go.golden
deleted file mode 100644
index 0ccb8611b..000000000
--- a/internal/lsp/testdata/missingfunction/slice.go.golden
+++ /dev/null
@@ -1,11 +0,0 @@
--- suggestedfix_slice_4_2 --
-package missingfunction
-
-func slice() {
- undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix")
-}
-
-func undefinedSlice(i []int) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/missingfunction/tuple.go b/internal/lsp/testdata/missingfunction/tuple.go
deleted file mode 100644
index 1c4782c15..000000000
--- a/internal/lsp/testdata/missingfunction/tuple.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package missingfunction
-
-func tuple() {
- undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix")
-}
-
-func b() (string, error) {
- return "", nil
-}
diff --git a/internal/lsp/testdata/missingfunction/tuple.go.golden b/internal/lsp/testdata/missingfunction/tuple.go.golden
deleted file mode 100644
index 1e12bb708..000000000
--- a/internal/lsp/testdata/missingfunction/tuple.go.golden
+++ /dev/null
@@ -1,15 +0,0 @@
--- suggestedfix_tuple_4_2 --
-package missingfunction
-
-func tuple() {
- undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix")
-}
-
-func undefinedTuple(s string, err error) {
- panic("unimplemented")
-}
-
-func b() (string, error) {
- return "", nil
-}
-
diff --git a/internal/lsp/testdata/missingfunction/unique_params.go b/internal/lsp/testdata/missingfunction/unique_params.go
deleted file mode 100644
index ffaba3f9c..000000000
--- a/internal/lsp/testdata/missingfunction/unique_params.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package missingfunction
-
-func uniqueArguments() {
- var s string
- var i int
- undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix")
-}
diff --git a/internal/lsp/testdata/missingfunction/unique_params.go.golden b/internal/lsp/testdata/missingfunction/unique_params.go.golden
deleted file mode 100644
index 74fb91a8e..000000000
--- a/internal/lsp/testdata/missingfunction/unique_params.go.golden
+++ /dev/null
@@ -1,30 +0,0 @@
--- suggestedfix_unique_params_10_2 --
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package missingfunction
-
-func uniqueArguments() {
- var s string
- var i int
- undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix")
-}
-
-func undefinedUniqueArguments(s1 string, i int, s2 string) {
- panic("implement me!")
-}
-
--- suggestedfix_unique_params_6_2 --
-package missingfunction
-
-func uniqueArguments() {
- var s string
- var i int
- undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix")
-}
-
-func undefinedUniqueArguments(s1 string, i int, s2 string) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/nested_complit/nested_complit.go.in b/internal/lsp/testdata/nested_complit/nested_complit.go.in
deleted file mode 100644
index 1dddd5b1b..000000000
--- a/internal/lsp/testdata/nested_complit/nested_complit.go.in
+++ /dev/null
@@ -1,14 +0,0 @@
-package nested_complit
-
-type ncFoo struct {} //@item(structNCFoo, "ncFoo", "struct{...}", "struct")
-
-type ncBar struct { //@item(structNCBar, "ncBar", "struct{...}", "struct")
- baz []ncFoo
-}
-
-func _() {
- []ncFoo{} //@item(litNCFoo, "[]ncFoo{}", "", "var")
- _ := ncBar{
- baz: [] //@complete(" //", structNCFoo, structNCBar)
- }
-}
diff --git a/internal/lsp/testdata/nodisk/nodisk.overlay.go b/internal/lsp/testdata/nodisk/nodisk.overlay.go
deleted file mode 100644
index f9194be56..000000000
--- a/internal/lsp/testdata/nodisk/nodisk.overlay.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package nodisk
-
-import (
- "golang.org/x/tools/internal/lsp/foo"
-)
-
-func _() {
- foo.Foo() //@complete("F", Foo, IntFoo, StructFoo)
-}
diff --git a/internal/lsp/testdata/noparse/noparse.go.in b/internal/lsp/testdata/noparse/noparse.go.in
deleted file mode 100644
index 7dc23e025..000000000
--- a/internal/lsp/testdata/noparse/noparse.go.in
+++ /dev/null
@@ -1,11 +0,0 @@
-package noparse
-
-func bye(x int) {
- hi()
-}
-
-func stuff() {
- x := 5
-}
-
-func .() {} //@diag(".", "syntax", "expected 'IDENT', found '.'", "error")
diff --git a/internal/lsp/testdata/noparse_format/noparse_format.go.in b/internal/lsp/testdata/noparse_format/noparse_format.go.in
deleted file mode 100644
index 4fc3824d9..000000000
--- a/internal/lsp/testdata/noparse_format/noparse_format.go.in
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.11
-
-package noparse_format //@format("package")
-
-func what() {
- var b int
- if { hi() //@diag("{", "syntax", "missing condition in if statement", "error")
- }
-} \ No newline at end of file
diff --git a/internal/lsp/testdata/references/another/another.go b/internal/lsp/testdata/references/another/another.go
deleted file mode 100644
index 47bda1e4a..000000000
--- a/internal/lsp/testdata/references/another/another.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package another has another type.
-package another
-
-import (
- other "golang.org/x/tools/internal/lsp/references/other"
-)
-
-func _() {
- xes := other.GetXes()
- for _, x := range xes { //@mark(defX, "x")
- _ = x.Y //@mark(useX, "x"),mark(anotherXY, "Y"),refs("Y", typeXY, anotherXY, GetXesY),refs(".", defX, useX),refs("x", defX, useX)
- }
-}
diff --git a/internal/lsp/testdata/references/other/other.go b/internal/lsp/testdata/references/other/other.go
deleted file mode 100644
index de35cc81a..000000000
--- a/internal/lsp/testdata/references/other/other.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package other
-
-import (
- references "golang.org/x/tools/internal/lsp/references"
-)
-
-func GetXes() []references.X {
- return []references.X{
- {
- Y: 1, //@mark(GetXesY, "Y"),refs("Y", typeXY, GetXesY, anotherXY)
- },
- }
-}
-
-func _() {
- references.Q = "hello" //@mark(assignExpQ, "Q")
- bob := func(_ string) {}
- bob(references.Q) //@mark(bobExpQ, "Q")
-}
diff --git a/internal/lsp/testdata/references/refs.go b/internal/lsp/testdata/references/refs.go
deleted file mode 100644
index 933a36f54..000000000
--- a/internal/lsp/testdata/references/refs.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Package refs is a package used to test find references.
-package refs
-
-type i int //@mark(typeI, "i"),refs("i", typeI, argI, returnI, embeddedI)
-
-type X struct {
- Y int //@mark(typeXY, "Y")
-}
-
-func _(_ i) []bool { //@mark(argI, "i")
- return nil
-}
-
-func _(_ []byte) i { //@mark(returnI, "i")
- return 0
-}
-
-var q string //@mark(declQ, "q"),refs("q", declQ, assignQ, bobQ)
-
-var Q string //@mark(declExpQ, "Q"),refs("Q", declExpQ, assignExpQ, bobExpQ)
-
-func _() {
- q = "hello" //@mark(assignQ, "q")
- bob := func(_ string) {}
- bob(q) //@mark(bobQ, "q")
-}
-
-type e struct {
- i //@mark(embeddedI, "i"),refs("i", embeddedI, embeddedIUse)
-}
-
-func _() {
- _ = e{}.i //@mark(embeddedIUse, "i")
-}
-
-const (
- foo = iota //@refs("iota")
-)
diff --git a/internal/lsp/testdata/rename/b/b.go.golden b/internal/lsp/testdata/rename/b/b.go.golden
deleted file mode 100644
index 9cdc5677f..000000000
--- a/internal/lsp/testdata/rename/b/b.go.golden
+++ /dev/null
@@ -1,78 +0,0 @@
--- Bob-rename --
-package b
-
-var c int //@rename("int", "uint")
-
-func _() {
- a := 1 //@rename("a", "error")
- a = 2
- _ = a
-}
-
-var (
- // Hello there.
- // Bob does the thing.
- Bob int //@rename("Foo", "Bob")
-)
-
-/*
-Hello description
-*/
-func Hello() {} //@rename("Hello", "Goodbye")
-
--- Goodbye-rename --
-b.go:
-package b
-
-var c int //@rename("int", "uint")
-
-func _() {
- a := 1 //@rename("a", "error")
- a = 2
- _ = a
-}
-
-var (
- // Hello there.
- // Foo does the thing.
- Foo int //@rename("Foo", "Bob")
-)
-
-/*
-Goodbye description
-*/
-func Goodbye() {} //@rename("Hello", "Goodbye")
-
-c.go:
-package c
-
-import "golang.org/x/tools/internal/lsp/rename/b"
-
-func _() {
- b.Goodbye() //@rename("Hello", "Goodbye")
-}
-
--- error-rename --
-package b
-
-var c int //@rename("int", "uint")
-
-func _() {
- error := 1 //@rename("a", "error")
- error = 2
- _ = error
-}
-
-var (
- // Hello there.
- // Foo does the thing.
- Foo int //@rename("Foo", "Bob")
-)
-
-/*
-Hello description
-*/
-func Hello() {} //@rename("Hello", "Goodbye")
-
--- uint-rename --
-"int": builtin object
diff --git a/internal/lsp/testdata/rename/bad/bad.go.golden b/internal/lsp/testdata/rename/bad/bad.go.golden
deleted file mode 100644
index 7f4581392..000000000
--- a/internal/lsp/testdata/rename/bad/bad.go.golden
+++ /dev/null
@@ -1,2 +0,0 @@
--- rFunc-rename --
-renaming "sFunc" to "rFunc" not possible because "golang.org/x/tools/internal/lsp/rename/bad" has errors
diff --git a/internal/lsp/testdata/rename/c/c.go b/internal/lsp/testdata/rename/c/c.go
deleted file mode 100644
index 519d2f6fc..000000000
--- a/internal/lsp/testdata/rename/c/c.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package c
-
-import "golang.org/x/tools/internal/lsp/rename/b"
-
-func _() {
- b.Hello() //@rename("Hello", "Goodbye")
-}
diff --git a/internal/lsp/testdata/rename/c/c.go.golden b/internal/lsp/testdata/rename/c/c.go.golden
deleted file mode 100644
index 56937420c..000000000
--- a/internal/lsp/testdata/rename/c/c.go.golden
+++ /dev/null
@@ -1,32 +0,0 @@
--- Goodbye-rename --
-b.go:
-package b
-
-var c int //@rename("int", "uint")
-
-func _() {
- a := 1 //@rename("a", "error")
- a = 2
- _ = a
-}
-
-var (
- // Hello there.
- // Foo does the thing.
- Foo int //@rename("Foo", "Bob")
-)
-
-/*
-Goodbye description
-*/
-func Goodbye() {} //@rename("Hello", "Goodbye")
-
-c.go:
-package c
-
-import "golang.org/x/tools/internal/lsp/rename/b"
-
-func _() {
- b.Goodbye() //@rename("Hello", "Goodbye")
-}
-
diff --git a/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden b/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden
deleted file mode 100644
index 810926de6..000000000
--- a/internal/lsp/testdata/rename/crosspkg/crosspkg.go.golden
+++ /dev/null
@@ -1,40 +0,0 @@
--- Dolphin-rename --
-crosspkg.go:
-package crosspkg
-
-func Dolphin() { //@rename("Foo", "Dolphin")
-
-}
-
-var Bar int //@rename("Bar", "Tomato")
-
-other.go:
-package other
-
-import "golang.org/x/tools/internal/lsp/rename/crosspkg"
-
-func Other() {
- crosspkg.Bar
- crosspkg.Dolphin() //@rename("Foo", "Flamingo")
-}
-
--- Tomato-rename --
-crosspkg.go:
-package crosspkg
-
-func Foo() { //@rename("Foo", "Dolphin")
-
-}
-
-var Tomato int //@rename("Bar", "Tomato")
-
-other.go:
-package other
-
-import "golang.org/x/tools/internal/lsp/rename/crosspkg"
-
-func Other() {
- crosspkg.Tomato
- crosspkg.Foo() //@rename("Foo", "Flamingo")
-}
-
diff --git a/internal/lsp/testdata/rename/crosspkg/other/other.go b/internal/lsp/testdata/rename/crosspkg/other/other.go
deleted file mode 100644
index 10d17cd34..000000000
--- a/internal/lsp/testdata/rename/crosspkg/other/other.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package other
-
-import "golang.org/x/tools/internal/lsp/rename/crosspkg"
-
-func Other() {
- crosspkg.Bar
- crosspkg.Foo() //@rename("Foo", "Flamingo")
-}
diff --git a/internal/lsp/testdata/rename/crosspkg/other/other.go.golden b/internal/lsp/testdata/rename/crosspkg/other/other.go.golden
deleted file mode 100644
index 2722ad96e..000000000
--- a/internal/lsp/testdata/rename/crosspkg/other/other.go.golden
+++ /dev/null
@@ -1,20 +0,0 @@
--- Flamingo-rename --
-crosspkg.go:
-package crosspkg
-
-func Flamingo() { //@rename("Foo", "Dolphin")
-
-}
-
-var Bar int //@rename("Bar", "Tomato")
-
-other.go:
-package other
-
-import "golang.org/x/tools/internal/lsp/rename/crosspkg"
-
-func Other() {
- crosspkg.Bar
- crosspkg.Flamingo() //@rename("Foo", "Flamingo")
-}
-
diff --git a/internal/lsp/testdata/rename/shadow/shadow.go.golden b/internal/lsp/testdata/rename/shadow/shadow.go.golden
deleted file mode 100644
index 6281bcdd9..000000000
--- a/internal/lsp/testdata/rename/shadow/shadow.go.golden
+++ /dev/null
@@ -1,48 +0,0 @@
--- a-rename --
-renaming this func "A" to "a" would cause this reference to become shadowed by this intervening var definition
--- b-rename --
-package shadow
-
-func _() {
- a := true
- b, c, _ := A(), b(), D() //@rename("A", "a"),rename("B", "b"),rename("b", "c"),rename("D", "d")
- d := false
- _, _, _, _ = a, b, c, d
-}
-
-func A() int {
- return 0
-}
-
-func b() int {
- return 0
-}
-
-func D() int {
- return 0
-}
-
--- c-rename --
-renaming this var "b" to "c" conflicts with var in same block
--- d-rename --
-package shadow
-
-func _() {
- a := true
- b, c, _ := A(), B(), d() //@rename("A", "a"),rename("B", "b"),rename("b", "c"),rename("D", "d")
- d := false
- _, _, _, _ = a, b, c, d
-}
-
-func A() int {
- return 0
-}
-
-func B() int {
- return 0
-}
-
-func d() int {
- return 0
-}
-
diff --git a/internal/lsp/testdata/selector/selector.go.in b/internal/lsp/testdata/selector/selector.go.in
deleted file mode 100644
index 277f98bde..000000000
--- a/internal/lsp/testdata/selector/selector.go.in
+++ /dev/null
@@ -1,66 +0,0 @@
-// +build go1.11
-
-package selector
-
-import (
- "golang.org/x/tools/internal/lsp/bar"
-)
-
-type S struct {
- B, A, C int //@item(Bf, "B", "int", "field"),item(Af, "A", "int", "field"),item(Cf, "C", "int", "field")
-}
-
-func _() {
- _ = S{}.; //@complete(";", Af, Bf, Cf)
-}
-
-type bob struct { a int } //@item(a, "a", "int", "field")
-type george struct { b int }
-type jack struct { c int } //@item(c, "c", "int", "field")
-type jill struct { d int }
-
-func (b *bob) george() *george {} //@item(george, "george", "func() *george", "method")
-func (g *george) jack() *jack {}
-func (j *jack) jill() *jill {} //@item(jill, "jill", "func() *jill", "method")
-
-func _() {
- b := &bob{}
- y := b.george().
- jack();
- y.; //@complete(";", c, jill)
-}
-
-func _() {
- bar. //@complete(" /", Bar)
- x := 5
-
- var b *bob
- b. //@complete(" /", a, george)
- y, z := 5, 6
-
- b. //@complete(" /", a, george)
- y, z, a, b, c := 5, 6
-}
-
-func _() {
- bar. //@complete(" /", Bar)
- bar.Bar()
-
- bar. //@complete(" /", Bar)
- go f()
-}
-
-func _() {
- var b *bob
- if y != b. //@complete(" /", a, george)
- z := 5
-
- if z + y + 1 + b. //@complete(" /", a, george)
- r, s, t := 4, 5
-
- if y != b. //@complete(" /", a, george)
- z = 5
-
- if z + y + 1 + b. //@complete(" /", a, george)
- r = 4
-}
diff --git a/internal/lsp/testdata/semantic/a.go.golden b/internal/lsp/testdata/semantic/a.go.golden
deleted file mode 100644
index 4622ae4d7..000000000
--- a/internal/lsp/testdata/semantic/a.go.golden
+++ /dev/null
@@ -1,83 +0,0 @@
--- semantic --
-/*⇒7,keyword,[]*/package /*⇒14,namespace,[]*/semantictokens /*⇒16,comment,[]*///@ semantic("")
-
-/*⇒6,keyword,[]*/import (
- _ "encoding/utf8"
- /*⇒3,namespace,[]*/utf "encoding/utf8"
- "fmt"/*⇐3,namespace,[]*/ /*⇒19,comment,[]*///@ semantic("fmt")
- . "fmt"
- "unicode/utf8"/*⇐4,namespace,[]*/
-)
-
-/*⇒3,keyword,[]*/var (
- /*⇒1,variable,[definition]*/a = /*⇒3,namespace,[]*/fmt./*⇒5,function,[]*/Print
- /*⇒1,variable,[definition]*/b []/*⇒6,type,[defaultLibrary]*/string = []/*⇒6,type,[defaultLibrary]*/string{/*⇒5,string,[]*/"foo"}
- /*⇒2,variable,[definition]*/c1 /*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int
- /*⇒2,variable,[definition]*/c2 <-/*⇒4,keyword,[]*/chan /*⇒3,type,[defaultLibrary]*/int
- /*⇒2,variable,[definition]*/c3 = /*⇒4,function,[defaultLibrary]*/make([]/*⇒4,keyword,[]*/chan<- /*⇒3,type,[defaultLibrary]*/int)
- /*⇒1,variable,[definition]*/b = /*⇒1,type,[]*/A{/*⇒1,variable,[]*/X: /*⇒2,number,[]*/23}
- /*⇒1,variable,[definition]*/m /*⇒3,keyword,[]*/map[/*⇒4,type,[defaultLibrary]*/bool][/*⇒1,number,[]*/3]/*⇒1,operator,[]*/*/*⇒7,type,[defaultLibrary]*/float64
-)
-
-/*⇒5,keyword,[]*/const (
- /*⇒2,variable,[definition readonly]*/xx /*⇒1,type,[]*/F = /*⇒4,variable,[readonly]*/iota
- /*⇒2,variable,[definition readonly]*/yy = /*⇒2,variable,[readonly]*/xx /*⇒1,operator,[]*/+ /*⇒1,number,[]*/3
- /*⇒2,variable,[definition readonly]*/zz = /*⇒2,string,[]*/""
- /*⇒2,variable,[definition readonly]*/ww = /*⇒6,string,[]*/"not " /*⇒1,operator,[]*/+ /*⇒2,variable,[readonly]*/zz
-)
-
-/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/A /*⇒6,keyword,[]*/struct {
- /*⇒1,variable,[definition]*/X /*⇒3,type,[defaultLibrary]*/int /*⇒6,comment,[]*/`foof`
-}
-/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/B /*⇒9,keyword,[]*/interface {
- /*⇒1,type,[]*/A
- /*⇒3,method,[definition]*/sad(/*⇒3,type,[defaultLibrary]*/int) /*⇒4,type,[defaultLibrary]*/bool
-}
-
-/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/F /*⇒3,type,[defaultLibrary]*/int
-
-/*⇒4,keyword,[]*/func (/*⇒1,variable,[]*/a /*⇒1,operator,[]*/*/*⇒1,type,[]*/A) /*⇒1,method,[definition]*/f() /*⇒4,type,[defaultLibrary]*/bool {
- /*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/z /*⇒6,type,[defaultLibrary]*/string
- /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"foo"
- /*⇒1,variable,[]*/a(/*⇒1,variable,[]*/x)
- /*⇒1,variable,[definition]*/y /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"bar" /*⇒1,operator,[]*/+ /*⇒1,variable,[]*/x
- /*⇒6,keyword,[]*/switch /*⇒1,variable,[]*/z {
- /*⇒4,keyword,[]*/case /*⇒4,string,[]*/"xx":
- /*⇒7,keyword,[]*/default:
- }
- /*⇒6,keyword,[]*/select {
- /*⇒4,keyword,[]*/case /*⇒1,variable,[definition]*/z /*⇒2,operator,[]*/:= /*⇒2,operator,[]*/<-/*⇒2,variable,[]*/c3[/*⇒1,number,[]*/0]:
- /*⇒7,keyword,[]*/default:
- }
- /*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/k, /*⇒1,variable,[definition]*/v := /*⇒5,keyword,[]*/range /*⇒1,variable,[]*/m {
- /*⇒6,keyword,[]*/return (/*⇒1,operator,[]*/!/*⇒1,variable,[]*/k) /*⇒2,operator,[]*/&& /*⇒1,variable,[]*/v[/*⇒1,number,[]*/0] /*⇒2,operator,[]*/== /*⇒3,variable,[readonly defaultLibrary]*/nil
- }
- /*⇒2,variable,[]*/c2 /*⇒2,operator,[]*/<- /*⇒1,type,[]*/A./*⇒1,variable,[]*/X
- /*⇒1,variable,[definition]*/w /*⇒2,operator,[]*/:= /*⇒1,variable,[]*/b[/*⇒1,number,[]*/4:]
- /*⇒1,variable,[definition]*/j /*⇒2,operator,[]*/:= /*⇒3,function,[defaultLibrary]*/len(/*⇒1,variable,[]*/x)
- /*⇒1,variable,[]*/j/*⇒2,operator,[]*/--
- /*⇒1,variable,[definition]*/q /*⇒2,operator,[]*/:= []/*⇒9,keyword,[]*/interface{}{/*⇒1,variable,[]*/j, /*⇒3,number,[]*/23i, /*⇒1,operator,[]*/&/*⇒1,variable,[]*/y}
- /*⇒1,function,[]*/g(/*⇒1,variable,[]*/q/*⇒3,operator,[]*/...)
- /*⇒6,keyword,[]*/return /*⇒4,variable,[readonly]*/true
-}
-
-/*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/g(/*⇒2,parameter,[definition]*/vv /*⇒3,operator,[]*/.../*⇒9,keyword,[]*/interface{}) {
- /*⇒2,variable,[definition]*/ff /*⇒2,operator,[]*/:= /*⇒4,keyword,[]*/func() {}
- /*⇒5,keyword,[]*/defer /*⇒2,variable,[]*/ff()
- /*⇒2,keyword,[]*/go /*⇒3,namespace,[]*/utf./*⇒9,function,[]*/RuneCount(/*⇒2,string,[]*/"")
- /*⇒2,keyword,[]*/go /*⇒4,namespace,[]*/utf8./*⇒9,function,[]*/RuneCount(/*⇒2,variable,[]*/vv.(/*⇒6,type,[]*/string))
- /*⇒2,keyword,[]*/if /*⇒4,variable,[readonly]*/true {
- } /*⇒4,keyword,[]*/else {
- }
-/*⇒5,parameter,[definition]*/Never:
- /*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/i /*⇒2,operator,[]*/:= /*⇒1,number,[]*/0; /*⇒1,variable,[]*/i /*⇒1,operator,[]*/< /*⇒2,number,[]*/10; {
- /*⇒5,keyword,[]*/break Never
- }
- _, /*⇒2,variable,[definition]*/ok /*⇒2,operator,[]*/:= /*⇒2,variable,[]*/vv[/*⇒1,number,[]*/0].(/*⇒1,type,[]*/A)
- /*⇒2,keyword,[]*/if /*⇒1,operator,[]*/!/*⇒2,variable,[]*/ok {
- /*⇒6,keyword,[]*/switch /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒2,variable,[]*/vv[/*⇒1,number,[]*/0].(/*⇒4,keyword,[]*/type) {
- }
- /*⇒4,keyword,[]*/goto Never
- }
-}
-
diff --git a/internal/lsp/testdata/semantic/b.go b/internal/lsp/testdata/semantic/b.go
deleted file mode 100644
index a536f97bf..000000000
--- a/internal/lsp/testdata/semantic/b.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package semantictokens //@ semantic("")
-
-func f(x ...interface{}) {
-}
-
-func weirⰀd() { /*😀*/ // comment
- const (
- snil = nil
- nil = true
- true = false
- false = snil
- cmd = `foof`
- double = iota
- iota = copy
- four = (len(cmd)/2 < 5)
- five = four
- )
- f(cmd, nil, double, iota)
-}
-
-/*
-
-multiline */ /*
-multiline
-*/
-type AA int
-type BB struct {
- AA
-}
-type CC struct {
- AA int
-}
-type D func(aa AA) (BB error)
-type E func(AA) BB
diff --git a/internal/lsp/testdata/semantic/b.go.golden b/internal/lsp/testdata/semantic/b.go.golden
deleted file mode 100644
index 203f6b189..000000000
--- a/internal/lsp/testdata/semantic/b.go.golden
+++ /dev/null
@@ -1,36 +0,0 @@
--- semantic --
-/*⇒7,keyword,[]*/package /*⇒14,namespace,[]*/semantictokens /*⇒16,comment,[]*///@ semantic("")
-
-/*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/f(/*⇒1,parameter,[definition]*/x /*⇒3,operator,[]*/.../*⇒9,keyword,[]*/interface{}) {
-}
-
-/*⇒4,keyword,[]*/func /*⇒6,function,[definition]*/weirⰀd() { /*⇒5,comment,[]*//*😀*/ /*⇒10,comment,[]*/// comment
- /*⇒5,keyword,[]*/const (
- /*⇒4,variable,[definition readonly]*/snil = /*⇒3,variable,[readonly defaultLibrary]*/nil
- /*⇒3,variable,[definition readonly]*/nil = /*⇒4,variable,[readonly]*/true
- /*⇒4,variable,[definition readonly]*/true = /*⇒5,variable,[readonly]*/false
- /*⇒5,variable,[definition readonly]*/false = /*⇒4,variable,[readonly]*/snil
- /*⇒3,variable,[definition readonly]*/cmd = /*⇒6,string,[]*/`foof`
- /*⇒6,variable,[definition readonly]*/double = /*⇒4,variable,[readonly]*/iota
- /*⇒4,variable,[definition readonly]*/iota = /*⇒4,function,[defaultLibrary]*/copy
- /*⇒4,variable,[definition readonly]*/four = (/*⇒3,function,[defaultLibrary]*/len(/*⇒3,variable,[readonly]*/cmd)/*⇒1,operator,[]*// /*⇒1,number,[]*/2 /*⇒1,operator,[]*/< /*⇒1,number,[]*/5)
- /*⇒4,variable,[definition readonly]*/five = /*⇒4,variable,[readonly]*/four
- )
- /*⇒1,function,[]*/f(/*⇒3,variable,[readonly]*/cmd, /*⇒3,variable,[readonly]*/nil, /*⇒6,variable,[readonly]*/double, /*⇒4,variable,[readonly]*/iota)
-}
-
-/*⇒2,comment,[]*//*
-/*⇒0,comment,[]*/
-/*⇒12,comment,[]*/multiline */ /*⇒2,comment,[]*//*
-/*⇒9,comment,[]*/multiline
-/*⇒2,comment,[]*/*/
-/*⇒4,keyword,[]*/type /*⇒2,type,[definition]*/AA /*⇒3,type,[defaultLibrary]*/int
-/*⇒4,keyword,[]*/type /*⇒2,type,[definition]*/BB /*⇒6,keyword,[]*/struct {
- /*⇒2,type,[]*/AA
-}
-/*⇒4,keyword,[]*/type /*⇒2,type,[definition]*/CC /*⇒6,keyword,[]*/struct {
- /*⇒2,variable,[definition]*/AA /*⇒3,type,[defaultLibrary]*/int
-}
-/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/D /*⇒4,keyword,[]*/func(/*⇒2,parameter,[definition]*/aa /*⇒2,type,[]*/AA) (/*⇒2,parameter,[definition]*/BB /*⇒5,type,[]*/error)
-/*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/E /*⇒4,keyword,[]*/func(/*⇒2,type,[]*/AA) /*⇒2,type,[]*/BB
-
diff --git a/internal/lsp/testdata/signature/signature.go.golden b/internal/lsp/testdata/signature/signature.go.golden
deleted file mode 100644
index d7a65b3b8..000000000
--- a/internal/lsp/testdata/signature/signature.go.golden
+++ /dev/null
@@ -1,65 +0,0 @@
--- AliasMap(a map[*Alias]StringAlias) (b map[*Alias]StringAlias, c map[*Alias]StringAlias)-signature --
-AliasMap(a map[*Alias]StringAlias) (b map[*Alias]StringAlias, c map[*Alias]StringAlias)
-
--- AliasSlice(a []*Alias) (b Alias)-signature --
-AliasSlice(a []*Alias) (b Alias)
-
--- Bar(float64, ...byte)-signature --
-Bar(float64, ...byte)
-
--- Foo(a string, b int) (c bool)-signature --
-Foo(a string, b int) (c bool)
-
--- GetAlias() Alias-signature --
-GetAlias() Alias
-
--- GetAliasPtr() *Alias-signature --
-GetAliasPtr() *Alias
-
--- Next(n int) []byte-signature --
-Next(n int) []byte
-
-Next returns a slice containing the next n bytes from the buffer, advancing the buffer as if the bytes had been returned by Read.
-
--- OtherAliasMap(a map[Alias]OtherAlias, b map[Alias]OtherAlias) map[Alias]OtherAlias-signature --
-OtherAliasMap(a map[Alias]OtherAlias, b map[Alias]OtherAlias) map[Alias]OtherAlias
-
--- SetAliasSlice(a []*Alias)-signature --
-SetAliasSlice(a []*Alias)
-
--- SetOtherAliasMap(a map[*Alias]OtherAlias)-signature --
-SetOtherAliasMap(a map[*Alias]OtherAlias)
-
--- fn(hi string, there string) func(i int) rune-signature --
-fn(hi string, there string) func(i int) rune
-
--- foo(e *json.Decoder) (*big.Int, error)-signature --
-foo(e *json.Decoder) (*big.Int, error)
-
--- func(hi string, there string) func(i int) rune-signature --
-func(hi string, there string) func(i int) rune
-
--- func(i int) rune-signature --
-func(i int) rune
-
--- func(string, int) bool-signature --
-func(string, int) bool
-
--- make(t Type, size ...int) Type-signature --
-make(t Type, size ...int) Type
-
-The make built-in function allocates and initializes an object of type slice, map, or chan (only).
-
--- myFunc(foo int) string-signature --
-myFunc(foo int) string
-
--- panic(v interface{})-signature --
-panic(v any)
-
-The panic built-in function stops normal execution of the current goroutine.
-
--- println(args ...Type)-signature --
-println(args ...Type)
-
-The println built-in function formats its arguments in an implementation-specific way and writes the result to standard error.
-
diff --git a/internal/lsp/testdata/signature/signature_test.go b/internal/lsp/testdata/signature/signature_test.go
deleted file mode 100644
index 62e54a238..000000000
--- a/internal/lsp/testdata/signature/signature_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package signature_test
-
-import (
- "testing"
-
- sig "golang.org/x/tools/internal/lsp/signature"
-)
-
-func TestSignature(t *testing.T) {
- sig.AliasSlice() //@signature(")", "AliasSlice(a []*sig.Alias) (b sig.Alias)", 0)
- sig.AliasMap() //@signature(")", "AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias)", 0)
- sig.OtherAliasMap() //@signature(")", "OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias", 0)
-}
diff --git a/internal/lsp/testdata/signature/signature_test.go.golden b/internal/lsp/testdata/signature/signature_test.go.golden
deleted file mode 100644
index 3853dffc9..000000000
--- a/internal/lsp/testdata/signature/signature_test.go.golden
+++ /dev/null
@@ -1,30 +0,0 @@
--- AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias)-signature --
-AliasMap(a map[*sig.Alias]sig.StringAlias) (b map[*sig.Alias]sig.StringAlias, c map[*sig.Alias]sig.StringAlias)
-
--- AliasMap(a map[*signature.Alias]signature.StringAlias) (b map[*signature.Alias]signature.StringAlias, c map[*signature.Alias]signature.StringAlias)-signature --
-AliasMap(a map[*signature.Alias]signature.StringAlias) (b map[*signature.Alias]signature.StringAlias, c map[*signature.Alias]signature.StringAlias)
-
--- AliasSlice(a []*sig.Alias) (b sig.Alias)-signature --
-AliasSlice(a []*sig.Alias) (b sig.Alias)
-
--- AliasSlice(a []*signature.Alias) (b signature.Alias)-signature --
-AliasSlice(a []*signature.Alias) (b signature.Alias)
-
--- GetAlias() signature.Alias-signature --
-GetAlias() signature.Alias
-
--- GetAliasPtr() *signature.Alias-signature --
-GetAliasPtr() *signature.Alias
-
--- OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias-signature --
-OtherAliasMap(a map[sig.Alias]sig.OtherAlias, b map[sig.Alias]sig.OtherAlias) map[sig.Alias]sig.OtherAlias
-
--- OtherAliasMap(a map[signature.Alias]signature.OtherAlias, b map[signature.Alias]signature.OtherAlias) map[signature.Alias]signature.OtherAlias-signature --
-OtherAliasMap(a map[signature.Alias]signature.OtherAlias, b map[signature.Alias]signature.OtherAlias) map[signature.Alias]signature.OtherAlias
-
--- SetAliasSlice(a []*signature.Alias)-signature --
-SetAliasSlice(a []*signature.Alias)
-
--- SetOtherAliasMap(a map[*signature.Alias]signature.OtherAlias)-signature --
-SetOtherAliasMap(a map[*signature.Alias]signature.OtherAlias)
-
diff --git a/internal/lsp/testdata/snippets/literal.go b/internal/lsp/testdata/snippets/literal.go
deleted file mode 100644
index 43931d18e..000000000
--- a/internal/lsp/testdata/snippets/literal.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package snippets
-
-import (
- "golang.org/x/tools/internal/lsp/signature"
- t "golang.org/x/tools/internal/lsp/types"
-)
-
-type structy struct {
- x signature.MyType
-}
-
-func X(_ map[signature.Alias]t.CoolAlias) (map[signature.Alias]t.CoolAlias) {
- return nil
-}
-
-func _() {
- X() //@signature(")", "X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias", 0)
- _ = signature.MyType{} //@item(literalMyType, "signature.MyType{}", "", "var")
- s := structy{
- x: //@snippet(" //", literalMyType, "signature.MyType{\\}", "signature.MyType{\\}")
- }
-} \ No newline at end of file
diff --git a/internal/lsp/testdata/snippets/literal.go.golden b/internal/lsp/testdata/snippets/literal.go.golden
deleted file mode 100644
index f9725f733..000000000
--- a/internal/lsp/testdata/snippets/literal.go.golden
+++ /dev/null
@@ -1,6 +0,0 @@
--- X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias-signature --
-X(_ map[signature.Alias]t.CoolAlias) map[signature.Alias]t.CoolAlias
-
--- X(_ map[signatures.Alias]types.CoolAlias) map[signatures.Alias]types.CoolAlias-signature --
-X(_ map[signatures.Alias]types.CoolAlias) map[signatures.Alias]types.CoolAlias
-
diff --git a/internal/lsp/testdata/snippets/literal_snippets.go.in b/internal/lsp/testdata/snippets/literal_snippets.go.in
deleted file mode 100644
index 4a2a01dfa..000000000
--- a/internal/lsp/testdata/snippets/literal_snippets.go.in
+++ /dev/null
@@ -1,233 +0,0 @@
-package snippets
-
-import (
- "bytes"
- "context"
- "go/ast"
- "net/http"
- "sort"
-
- "golang.org/x/tools/internal/lsp/foo"
-)
-
-func _() {
- []int{} //@item(litIntSlice, "[]int{}", "", "var")
- &[]int{} //@item(litIntSliceAddr, "&[]int{}", "", "var")
- make([]int, 0) //@item(makeIntSlice, "make([]int, 0)", "", "func")
-
- var _ *[]int = in //@snippet(" //", litIntSliceAddr, "&[]int{$0\\}", "&[]int{$0\\}")
- var _ **[]int = in //@complete(" //")
-
- var slice []int
- slice = i //@snippet(" //", litIntSlice, "[]int{$0\\}", "[]int{$0\\}")
- slice = m //@snippet(" //", makeIntSlice, "make([]int, ${1:})", "make([]int, ${1:0})")
-}
-
-func _() {
- type namedInt []int
-
- namedInt{} //@item(litNamedSlice, "namedInt{}", "", "var")
- make(namedInt, 0) //@item(makeNamedSlice, "make(namedInt, 0)", "", "func")
-
- var namedSlice namedInt
- namedSlice = n //@snippet(" //", litNamedSlice, "namedInt{$0\\}", "namedInt{$0\\}")
- namedSlice = m //@snippet(" //", makeNamedSlice, "make(namedInt, ${1:})", "make(namedInt, ${1:0})")
-}
-
-func _() {
- make(chan int) //@item(makeChan, "make(chan int)", "", "func")
-
- var ch chan int
- ch = m //@snippet(" //", makeChan, "make(chan int)", "make(chan int)")
-}
-
-func _() {
- map[string]struct{}{} //@item(litMap, "map[string]struct{}{}", "", "var")
- make(map[string]struct{}) //@item(makeMap, "make(map[string]struct{})", "", "func")
-
- var m map[string]struct{}
- m = m //@snippet(" //", litMap, "map[string]struct{\\}{$0\\}", "map[string]struct{\\}{$0\\}")
- m = m //@snippet(" //", makeMap, "make(map[string]struct{\\})", "make(map[string]struct{\\})")
-
- struct{}{} //@item(litEmptyStruct, "struct{}{}", "", "var")
-
- m["hi"] = s //@snippet(" //", litEmptyStruct, "struct{\\}{\\}", "struct{\\}{\\}")
-}
-
-func _() {
- type myStruct struct{ i int } //@item(myStructType, "myStruct", "struct{...}", "struct")
-
- myStruct{} //@item(litStruct, "myStruct{}", "", "var")
- &myStruct{} //@item(litStructPtr, "&myStruct{}", "", "var")
-
- var ms myStruct
- ms = m //@snippet(" //", litStruct, "myStruct{$0\\}", "myStruct{$0\\}")
-
- var msPtr *myStruct
- msPtr = m //@snippet(" //", litStructPtr, "&myStruct{$0\\}", "&myStruct{$0\\}")
-
- msPtr = &m //@snippet(" //", litStruct, "myStruct{$0\\}", "myStruct{$0\\}")
-
- type myStructCopy struct { i int } //@item(myStructCopyType, "myStructCopy", "struct{...}", "struct")
-
- // Don't offer literal completion for convertible structs.
- ms = myStruct //@complete(" //", litStruct, myStructType, myStructCopyType)
-}
-
-type myImpl struct{}
-
-func (myImpl) foo() {}
-
-func (*myImpl) bar() {}
-
-type myBasicImpl string
-
-func (myBasicImpl) foo() {}
-
-func _() {
- type myIntf interface {
- foo()
- }
-
- myImpl{} //@item(litImpl, "myImpl{}", "", "var")
-
- var mi myIntf
- mi = m //@snippet(" //", litImpl, "myImpl{\\}", "myImpl{\\}")
-
- myBasicImpl() //@item(litBasicImpl, "myBasicImpl()", "string", "var")
-
- mi = m //@snippet(" //", litBasicImpl, "myBasicImpl($0)", "myBasicImpl($0)")
-
- // only satisfied by pointer to myImpl
- type myPtrIntf interface {
- bar()
- }
-
- &myImpl{} //@item(litImplPtr, "&myImpl{}", "", "var")
-
- var mpi myPtrIntf
- mpi = m //@snippet(" //", litImplPtr, "&myImpl{\\}", "&myImpl{\\}")
-}
-
-func _() {
- var s struct{ i []int } //@item(litSliceField, "i", "[]int", "field")
- var foo []int
- // no literal completions after selector
- foo = s.i //@complete(" //", litSliceField)
-}
-
-func _() {
- type myStruct struct{ i int } //@item(litMyStructType, "myStruct", "struct{...}", "struct")
- myStruct{} //@item(litMyStruct, "myStruct{}", "", "var")
-
- foo := func(s string, args ...myStruct) {}
- // Don't give literal slice candidate for variadic arg.
- // Do give literal candidates for variadic element.
- foo("", myStruct) //@complete(")", litMyStruct, litMyStructType)
-}
-
-func _() {
- Buffer{} //@item(litBuffer, "Buffer{}", "", "var")
-
- var b *bytes.Buffer
- b = bytes.Bu //@snippet(" //", litBuffer, "Buffer{\\}", "Buffer{\\}")
-}
-
-func _() {
- _ = "func(...) {}" //@item(litFunc, "func(...) {}", "", "var")
-
- sort.Slice(nil, fun) //@complete(")", litFunc),snippet(")", litFunc, "func(i, j int) bool {$0\\}", "func(i, j int) bool {$0\\}")
-
- http.HandleFunc("", f) //@snippet(")", litFunc, "func(w http.ResponseWriter, r *http.Request) {$0\\}", "func(${1:w} http.ResponseWriter, ${2:r} *http.Request) {$0\\}")
-
- // no literal "func" completions
- http.Handle("", fun) //@complete(")")
-
- http.HandlerFunc() //@item(handlerFunc, "http.HandlerFunc()", "", "var")
- http.Handle("", h) //@snippet(")", handlerFunc, "http.HandlerFunc($0)", "http.HandlerFunc($0)")
- http.Handle("", http.HandlerFunc()) //@snippet("))", litFunc, "func(w http.ResponseWriter, r *http.Request) {$0\\}", "func(${1:w} http.ResponseWriter, ${2:r} *http.Request) {$0\\}")
-
- var namedReturn func(s string) (b bool)
- namedReturn = f //@snippet(" //", litFunc, "func(s string) (b bool) {$0\\}", "func(s string) (b bool) {$0\\}")
-
- var multiReturn func() (bool, int)
- multiReturn = f //@snippet(" //", litFunc, "func() (bool, int) {$0\\}", "func() (bool, int) {$0\\}")
-
- var multiNamedReturn func() (b bool, i int)
- multiNamedReturn = f //@snippet(" //", litFunc, "func() (b bool, i int) {$0\\}", "func() (b bool, i int) {$0\\}")
-
- var duplicateParams func(myImpl, int, myImpl)
- duplicateParams = f //@snippet(" //", litFunc, "func(mi1 myImpl, i int, mi2 myImpl) {$0\\}", "func(${1:mi1} myImpl, ${2:i} int, ${3:mi2} myImpl) {$0\\}")
-
- type aliasImpl = myImpl
- var aliasParams func(aliasImpl) aliasImpl
- aliasParams = f //@snippet(" //", litFunc, "func(ai aliasImpl) aliasImpl {$0\\}", "func(${1:ai} aliasImpl) aliasImpl {$0\\}")
-
- const two = 2
- var builtinTypes func([]int, [two]bool, map[string]string, struct{ i int }, interface{ foo() }, <-chan int)
- builtinTypes = f //@snippet(" //", litFunc, "func(i1 []int, b [two]bool, m map[string]string, s struct{ i int \\}, i2 interface{ foo() \\}, c <-chan int) {$0\\}", "func(${1:i1} []int, ${2:b} [two]bool, ${3:m} map[string]string, ${4:s} struct{ i int \\}, ${5:i2} interface{ foo() \\}, ${6:c} <-chan int) {$0\\}")
-
- var _ func(ast.Node) = f //@snippet(" //", litFunc, "func(n ast.Node) {$0\\}", "func(${1:n} ast.Node) {$0\\}")
- var _ func(error) = f //@snippet(" //", litFunc, "func(err error) {$0\\}", "func(${1:err} error) {$0\\}")
- var _ func(context.Context) = f //@snippet(" //", litFunc, "func(ctx context.Context) {$0\\}", "func(${1:ctx} context.Context) {$0\\}")
-
- type context struct {}
- var _ func(context) = f //@snippet(" //", litFunc, "func(ctx context) {$0\\}", "func(${1:ctx} context) {$0\\}")
-}
-
-func _() {
- StructFoo{} //@item(litStructFoo, "StructFoo{}", "struct{...}", "struct")
-
- var sfp *foo.StructFoo
- // Don't insert the "&" before "StructFoo{}".
- sfp = foo.Str //@snippet(" //", litStructFoo, "StructFoo{$0\\}", "StructFoo{$0\\}")
-
- var sf foo.StructFoo
- sf = foo.Str //@snippet(" //", litStructFoo, "StructFoo{$0\\}", "StructFoo{$0\\}")
- sf = foo. //@snippet(" //", litStructFoo, "StructFoo{$0\\}", "StructFoo{$0\\}")
-}
-
-func _() {
- float64() //@item(litFloat64, "float64()", "float64", "var")
-
- // don't complete to "&float64()"
- var _ *float64 = float64 //@complete(" //")
-
- var f float64
- f = fl //@complete(" //", litFloat64),snippet(" //", litFloat64, "float64($0)", "float64($0)")
-
- type myInt int
- myInt() //@item(litMyInt, "myInt()", "", "var")
-
- var mi myInt
- mi = my //@snippet(" //", litMyInt, "myInt($0)", "myInt($0)")
-}
-
-func _() {
- type ptrStruct struct {
- p *ptrStruct
- }
-
- ptrStruct{} //@item(litPtrStruct, "ptrStruct{}", "", "var")
-
- ptrStruct{
- p: &ptrSt, //@rank(",", litPtrStruct)
- }
-
- &ptrStruct{} //@item(litPtrStructPtr, "&ptrStruct{}", "", "var")
-
- &ptrStruct{
- p: ptrSt, //@rank(",", litPtrStructPtr)
- }
-}
-
-func _() {
- f := func(...[]int) {}
- f() //@snippet(")", litIntSlice, "[]int{$0\\}", "[]int{$0\\}")
-}
-
-
-func _() {
- // don't complete to "untyped int()"
- []int{}[untyped] //@complete("] //")
-}
diff --git a/internal/lsp/testdata/stub/stub_add_selector.go b/internal/lsp/testdata/stub/stub_add_selector.go
deleted file mode 100644
index a15afd7c2..000000000
--- a/internal/lsp/testdata/stub/stub_add_selector.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package stub
-
-import "io"
-
-// This file tests that if an interface
-// method references a type from its own package
-// then our implementation must add the import/package selector
-// in the concrete method if the concrete type is outside of the interface
-// package
-var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite")
-
-type readerFrom struct{}
diff --git a/internal/lsp/testdata/stub/stub_add_selector.go.golden b/internal/lsp/testdata/stub/stub_add_selector.go.golden
deleted file mode 100644
index e885483ea..000000000
--- a/internal/lsp/testdata/stub/stub_add_selector.go.golden
+++ /dev/null
@@ -1,19 +0,0 @@
--- suggestedfix_stub_add_selector_10_23 --
-package stub
-
-import "io"
-
-// This file tests that if an interface
-// method references a type from its own package
-// then our implementation must add the import/package selector
-// in the concrete method if the concrete type is outside of the interface
-// package
-var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite")
-
-type readerFrom struct{}
-
-// ReadFrom implements io.ReaderFrom
-func (*readerFrom) ReadFrom(r io.Reader) (n int64, err error) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_assign.go b/internal/lsp/testdata/stub/stub_assign.go
deleted file mode 100644
index 9336361d0..000000000
--- a/internal/lsp/testdata/stub/stub_assign.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package stub
-
-import "io"
-
-func main() {
- var br io.ByteWriter
- br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite")
-}
-
-type byteWriter struct{}
diff --git a/internal/lsp/testdata/stub/stub_assign.go.golden b/internal/lsp/testdata/stub/stub_assign.go.golden
deleted file mode 100644
index a52a82367..000000000
--- a/internal/lsp/testdata/stub/stub_assign.go.golden
+++ /dev/null
@@ -1,17 +0,0 @@
--- suggestedfix_stub_assign_7_7 --
-package stub
-
-import "io"
-
-func main() {
- var br io.ByteWriter
- br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite")
-}
-
-type byteWriter struct{}
-
-// WriteByte implements io.ByteWriter
-func (*byteWriter) WriteByte(c byte) error {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_assign_multivars.go b/internal/lsp/testdata/stub/stub_assign_multivars.go
deleted file mode 100644
index 01b330fda..000000000
--- a/internal/lsp/testdata/stub/stub_assign_multivars.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package stub
-
-import "io"
-
-func main() {
- var br io.ByteWriter
- var i int
- i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite")
-}
-
-type multiByteWriter struct{}
diff --git a/internal/lsp/testdata/stub/stub_assign_multivars.go.golden b/internal/lsp/testdata/stub/stub_assign_multivars.go.golden
deleted file mode 100644
index e1e71adbd..000000000
--- a/internal/lsp/testdata/stub/stub_assign_multivars.go.golden
+++ /dev/null
@@ -1,18 +0,0 @@
--- suggestedfix_stub_assign_multivars_8_13 --
-package stub
-
-import "io"
-
-func main() {
- var br io.ByteWriter
- var i int
- i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite")
-}
-
-type multiByteWriter struct{}
-
-// WriteByte implements io.ByteWriter
-func (*multiByteWriter) WriteByte(c byte) error {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_embedded.go b/internal/lsp/testdata/stub/stub_embedded.go
deleted file mode 100644
index 6d6a986bf..000000000
--- a/internal/lsp/testdata/stub/stub_embedded.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package stub
-
-import (
- "io"
- "sort"
-)
-
-var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite")
-
-type embeddedConcrete struct{}
-
-type embeddedInterface interface {
- sort.Interface
- io.Reader
-}
diff --git a/internal/lsp/testdata/stub/stub_embedded.go.golden b/internal/lsp/testdata/stub/stub_embedded.go.golden
deleted file mode 100644
index c258ebaf4..000000000
--- a/internal/lsp/testdata/stub/stub_embedded.go.golden
+++ /dev/null
@@ -1,37 +0,0 @@
--- suggestedfix_stub_embedded_8_27 --
-package stub
-
-import (
- "io"
- "sort"
-)
-
-var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite")
-
-type embeddedConcrete struct{}
-
-// Len implements embeddedInterface
-func (*embeddedConcrete) Len() int {
- panic("unimplemented")
-}
-
-// Less implements embeddedInterface
-func (*embeddedConcrete) Less(i int, j int) bool {
- panic("unimplemented")
-}
-
-// Swap implements embeddedInterface
-func (*embeddedConcrete) Swap(i int, j int) {
- panic("unimplemented")
-}
-
-// Read implements embeddedInterface
-func (*embeddedConcrete) Read(p []byte) (n int, err error) {
- panic("unimplemented")
-}
-
-type embeddedInterface interface {
- sort.Interface
- io.Reader
-}
-
diff --git a/internal/lsp/testdata/stub/stub_err.go b/internal/lsp/testdata/stub/stub_err.go
deleted file mode 100644
index 908c7d315..000000000
--- a/internal/lsp/testdata/stub/stub_err.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package stub
-
-func main() {
- var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite")
-}
-
-type customErr struct{}
diff --git a/internal/lsp/testdata/stub/stub_err.go.golden b/internal/lsp/testdata/stub/stub_err.go.golden
deleted file mode 100644
index 717aed862..000000000
--- a/internal/lsp/testdata/stub/stub_err.go.golden
+++ /dev/null
@@ -1,14 +0,0 @@
--- suggestedfix_stub_err_4_17 --
-package stub
-
-func main() {
- var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite")
-}
-
-type customErr struct{}
-
-// Error implements error
-func (*customErr) Error() string {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_function_return.go b/internal/lsp/testdata/stub/stub_function_return.go
deleted file mode 100644
index bbf05885a..000000000
--- a/internal/lsp/testdata/stub/stub_function_return.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package stub
-
-import (
- "io"
-)
-
-func newCloser() io.Closer {
- return closer{} //@suggestedfix("c", "refactor.rewrite")
-}
-
-type closer struct{}
diff --git a/internal/lsp/testdata/stub/stub_function_return.go.golden b/internal/lsp/testdata/stub/stub_function_return.go.golden
deleted file mode 100644
index f80874d2b..000000000
--- a/internal/lsp/testdata/stub/stub_function_return.go.golden
+++ /dev/null
@@ -1,18 +0,0 @@
--- suggestedfix_stub_function_return_8_9 --
-package stub
-
-import (
- "io"
-)
-
-func newCloser() io.Closer {
- return closer{} //@suggestedfix("c", "refactor.rewrite")
-}
-
-type closer struct{}
-
-// Close implements io.Closer
-func (closer) Close() error {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_generic_receiver.go b/internal/lsp/testdata/stub/stub_generic_receiver.go
deleted file mode 100644
index 64e90fcf6..000000000
--- a/internal/lsp/testdata/stub/stub_generic_receiver.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-package stub
-
-import "io"
-
-// This file tests that that the stub method generator accounts for concrete
-// types that have type parameters defined.
-var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite")
-
-type genReader[T, Y any] struct {
- T T
- Y Y
-}
diff --git a/internal/lsp/testdata/stub/stub_generic_receiver.go.golden b/internal/lsp/testdata/stub/stub_generic_receiver.go.golden
deleted file mode 100644
index 1fc7157b4..000000000
--- a/internal/lsp/testdata/stub/stub_generic_receiver.go.golden
+++ /dev/null
@@ -1,22 +0,0 @@
--- suggestedfix_stub_generic_receiver_10_23 --
-//go:build go1.18
-// +build go1.18
-
-package stub
-
-import "io"
-
-// This file tests that that the stub method generator accounts for concrete
-// types that have type parameters defined.
-var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite")
-
-type genReader[T, Y any] struct {
- T T
- Y Y
-}
-
-// ReadFrom implements io.ReaderFrom
-func (*genReader[T, Y]) ReadFrom(r io.Reader) (n int64, err error) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_ignored_imports.go b/internal/lsp/testdata/stub/stub_ignored_imports.go
deleted file mode 100644
index 8f6ec73de..000000000
--- a/internal/lsp/testdata/stub/stub_ignored_imports.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package stub
-
-import (
- "compress/zlib"
- . "io"
- _ "io"
-)
-
-// This file tests that dot-imports and underscore imports
-// are properly ignored and that a new import is added to
-// reference method types
-
-var (
- _ Reader
- _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite")
-)
-
-type ignoredResetter struct{}
diff --git a/internal/lsp/testdata/stub/stub_ignored_imports.go.golden b/internal/lsp/testdata/stub/stub_ignored_imports.go.golden
deleted file mode 100644
index a0ddc1793..000000000
--- a/internal/lsp/testdata/stub/stub_ignored_imports.go.golden
+++ /dev/null
@@ -1,26 +0,0 @@
--- suggestedfix_stub_ignored_imports_15_20 --
-package stub
-
-import (
- "compress/zlib"
- "io"
- . "io"
- _ "io"
-)
-
-// This file tests that dot-imports and underscore imports
-// are properly ignored and that a new import is added to
-// reference method types
-
-var (
- _ Reader
- _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite")
-)
-
-type ignoredResetter struct{}
-
-// Reset implements zlib.Resetter
-func (*ignoredResetter) Reset(r io.Reader, dict []byte) error {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_multi_var.go b/internal/lsp/testdata/stub/stub_multi_var.go
deleted file mode 100644
index 4276b7994..000000000
--- a/internal/lsp/testdata/stub/stub_multi_var.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package stub
-
-import "io"
-
-// This test ensures that a variable declaration that
-// has multiple values on the same line can still be
-// analyzed correctly to target the interface implementation
-// diagnostic.
-var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite")
-
-type multiVar struct{}
diff --git a/internal/lsp/testdata/stub/stub_multi_var.go.golden b/internal/lsp/testdata/stub/stub_multi_var.go.golden
deleted file mode 100644
index b9ac42367..000000000
--- a/internal/lsp/testdata/stub/stub_multi_var.go.golden
+++ /dev/null
@@ -1,18 +0,0 @@
--- suggestedfix_stub_multi_var_9_38 --
-package stub
-
-import "io"
-
-// This test ensures that a variable declaration that
-// has multiple values on the same line can still be
-// analyzed correctly to target the interface implementation
-// diagnostic.
-var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite")
-
-type multiVar struct{}
-
-// Read implements io.Reader
-func (*multiVar) Read(p []byte) (n int, err error) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_pointer.go b/internal/lsp/testdata/stub/stub_pointer.go
deleted file mode 100644
index 2b3681b83..000000000
--- a/internal/lsp/testdata/stub/stub_pointer.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package stub
-
-import "io"
-
-func getReaderFrom() io.ReaderFrom {
- return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite")
-}
-
-type pointerImpl struct{}
diff --git a/internal/lsp/testdata/stub/stub_pointer.go.golden b/internal/lsp/testdata/stub/stub_pointer.go.golden
deleted file mode 100644
index c4133d7a4..000000000
--- a/internal/lsp/testdata/stub/stub_pointer.go.golden
+++ /dev/null
@@ -1,16 +0,0 @@
--- suggestedfix_stub_pointer_6_9 --
-package stub
-
-import "io"
-
-func getReaderFrom() io.ReaderFrom {
- return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite")
-}
-
-type pointerImpl struct{}
-
-// ReadFrom implements io.ReaderFrom
-func (*pointerImpl) ReadFrom(r io.Reader) (n int64, err error) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_renamed_import.go b/internal/lsp/testdata/stub/stub_renamed_import.go
deleted file mode 100644
index eaebe2510..000000000
--- a/internal/lsp/testdata/stub/stub_renamed_import.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package stub
-
-import (
- "compress/zlib"
- myio "io"
-)
-
-var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite")
-var _ myio.Reader
-
-type myIO struct{}
diff --git a/internal/lsp/testdata/stub/stub_renamed_import.go.golden b/internal/lsp/testdata/stub/stub_renamed_import.go.golden
deleted file mode 100644
index 48ff4f153..000000000
--- a/internal/lsp/testdata/stub/stub_renamed_import.go.golden
+++ /dev/null
@@ -1,18 +0,0 @@
--- suggestedfix_stub_renamed_import_8_23 --
-package stub
-
-import (
- "compress/zlib"
- myio "io"
-)
-
-var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite")
-var _ myio.Reader
-
-type myIO struct{}
-
-// Reset implements zlib.Resetter
-func (*myIO) Reset(r myio.Reader, dict []byte) error {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_renamed_import_iface.go b/internal/lsp/testdata/stub/stub_renamed_import_iface.go
deleted file mode 100644
index 96caf540d..000000000
--- a/internal/lsp/testdata/stub/stub_renamed_import_iface.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package stub
-
-import (
- "golang.org/x/tools/internal/lsp/stub/other"
-)
-
-// This file tests that if an interface
-// method references an import from its own package
-// that the concrete type does not yet import, and that import happens
-// to be renamed, then we prefer the renaming of the interface.
-var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite")
-
-type otherInterfaceImpl struct{}
diff --git a/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden b/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden
deleted file mode 100644
index 9ba2cb440..000000000
--- a/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden
+++ /dev/null
@@ -1,22 +0,0 @@
--- suggestedfix_stub_renamed_import_iface_11_25 --
-package stub
-
-import (
- "bytes"
- renamed_context "context"
- "golang.org/x/tools/internal/lsp/stub/other"
-)
-
-// This file tests that if an interface
-// method references an import from its own package
-// that the concrete type does not yet import, and that import happens
-// to be renamed, then we prefer the renaming of the interface.
-var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite")
-
-type otherInterfaceImpl struct{}
-
-// Get implements other.Interface
-func (*otherInterfaceImpl) Get(renamed_context.Context) *bytes.Buffer {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/stub/stub_stdlib.go b/internal/lsp/testdata/stub/stub_stdlib.go
deleted file mode 100644
index 0d54a6daa..000000000
--- a/internal/lsp/testdata/stub/stub_stdlib.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package stub
-
-import (
- "io"
-)
-
-var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite")
-
-type writer struct{}
diff --git a/internal/lsp/testdata/stub/stub_stdlib.go.golden b/internal/lsp/testdata/stub/stub_stdlib.go.golden
deleted file mode 100644
index 8636cead4..000000000
--- a/internal/lsp/testdata/stub/stub_stdlib.go.golden
+++ /dev/null
@@ -1,16 +0,0 @@
--- suggestedfix_stub_stdlib_7_19 --
-package stub
-
-import (
- "io"
-)
-
-var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite")
-
-type writer struct{}
-
-// Write implements io.Writer
-func (writer) Write(p []byte) (n int, err error) {
- panic("unimplemented")
-}
-
diff --git a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go
deleted file mode 100644
index e06dce0a8..000000000
--- a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package suggestedfix
-
-import (
- "log"
-)
-
-func goodbye() {
- s := "hiiiiiii"
- s = s //@suggestedfix("s = s", "quickfix")
- log.Print(s)
-}
diff --git a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden
deleted file mode 100644
index 9ccaa1994..000000000
--- a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden
+++ /dev/null
@@ -1,13 +0,0 @@
--- suggestedfix_has_suggested_fix_9_2 --
-package suggestedfix
-
-import (
- "log"
-)
-
-func goodbye() {
- s := "hiiiiiii"
- //@suggestedfix("s = s", "quickfix")
- log.Print(s)
-}
-
diff --git a/internal/lsp/testdata/summary.txt.golden b/internal/lsp/testdata/summary.txt.golden
deleted file mode 100644
index 29493920f..000000000
--- a/internal/lsp/testdata/summary.txt.golden
+++ /dev/null
@@ -1,30 +0,0 @@
--- summary --
-CallHierarchyCount = 2
-CodeLensCount = 5
-CompletionsCount = 265
-CompletionSnippetCount = 106
-UnimportedCompletionsCount = 5
-DeepCompletionsCount = 5
-FuzzyCompletionsCount = 8
-RankedCompletionsCount = 163
-CaseSensitiveCompletionsCount = 4
-DiagnosticsCount = 37
-FoldingRangesCount = 2
-FormatCount = 6
-ImportCount = 8
-SemanticTokenCount = 3
-SuggestedFixCount = 61
-FunctionExtractionCount = 25
-MethodExtractionCount = 6
-DefinitionsCount = 95
-TypeDefinitionsCount = 18
-HighlightsCount = 69
-ReferencesCount = 27
-RenamesCount = 41
-PrepareRenamesCount = 7
-SymbolsCount = 5
-WorkspaceSymbolsCount = 20
-SignaturesCount = 33
-LinksCount = 7
-ImplementationsCount = 14
-
diff --git a/internal/lsp/testdata/summary_go1.18.txt.golden b/internal/lsp/testdata/summary_go1.18.txt.golden
deleted file mode 100644
index 48639899e..000000000
--- a/internal/lsp/testdata/summary_go1.18.txt.golden
+++ /dev/null
@@ -1,30 +0,0 @@
--- summary --
-CallHierarchyCount = 2
-CodeLensCount = 5
-CompletionsCount = 266
-CompletionSnippetCount = 110
-UnimportedCompletionsCount = 5
-DeepCompletionsCount = 5
-FuzzyCompletionsCount = 8
-RankedCompletionsCount = 169
-CaseSensitiveCompletionsCount = 4
-DiagnosticsCount = 37
-FoldingRangesCount = 2
-FormatCount = 6
-ImportCount = 8
-SemanticTokenCount = 3
-SuggestedFixCount = 62
-FunctionExtractionCount = 25
-MethodExtractionCount = 6
-DefinitionsCount = 108
-TypeDefinitionsCount = 18
-HighlightsCount = 69
-ReferencesCount = 27
-RenamesCount = 48
-PrepareRenamesCount = 7
-SymbolsCount = 5
-WorkspaceSymbolsCount = 20
-SignaturesCount = 33
-LinksCount = 7
-ImplementationsCount = 14
-
diff --git a/internal/lsp/testdata/symbols/main.go b/internal/lsp/testdata/symbols/main.go
deleted file mode 100644
index 8111250f3..000000000
--- a/internal/lsp/testdata/symbols/main.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package main
-
-import (
- "io"
-)
-
-var _ = 1
-
-var x = 42 //@mark(symbolsx, "x"), symbol("x", "x", "Variable", "", "main.x")
-
-const y = 43 //@symbol("y", "y", "Constant", "", "main.y")
-
-type Number int //@symbol("Number", "Number", "Number", "", "main.Number")
-
-type Alias = string //@symbol("Alias", "Alias", "String", "", "main.Alias")
-
-type NumberAlias = Number //@symbol("NumberAlias", "NumberAlias", "Number", "", "main.NumberAlias")
-
-type (
- Boolean bool //@symbol("Boolean", "Boolean", "Boolean", "", "main.Boolean")
- BoolAlias = bool //@symbol("BoolAlias", "BoolAlias", "Boolean", "", "main.BoolAlias")
-)
-
-type Foo struct { //@mark(symbolsFoo, "Foo"), symbol("Foo", "Foo", "Struct", "", "main.Foo")
- Quux //@mark(fQuux, "Quux"), symbol("Quux", "Quux", "Field", "Foo", "main.Foo.Quux")
- W io.Writer //@symbol("W" , "W", "Field", "Foo", "main.Foo.W")
- Bar int //@mark(fBar, "Bar"), symbol("Bar", "Bar", "Field", "Foo", "main.Foo.Bar")
- baz string //@symbol("baz", "baz", "Field", "Foo", "main.Foo.baz")
-}
-
-type Quux struct { //@symbol("Quux", "Quux", "Struct", "", "main.Quux")
- X, Y float64 //@mark(qX, "X"), symbol("X", "X", "Field", "Quux", "main.X"), symbol("Y", "Y", "Field", "Quux", "main.Y")
-}
-
-func (f Foo) Baz() string { //@symbol("(Foo).Baz", "Baz", "Method", "", "main.Foo.Baz")
- return f.baz
-}
-
-func _() {}
-
-func (q *Quux) Do() {} //@mark(qDo, "Do"), symbol("(*Quux).Do", "Do", "Method", "", "main.Quux.Do")
-
-func main() { //@symbol("main", "main", "Function", "", "main.main")
-
-}
-
-type Stringer interface { //@symbol("Stringer", "Stringer", "Interface", "", "main.Stringer")
- String() string //@symbol("String", "String", "Method", "Stringer", "main.Stringer.String")
-}
-
-type ABer interface { //@mark(ABerInterface, "ABer"), symbol("ABer", "ABer", "Interface", "", "main.ABer")
- B() //@symbol("B", "B", "Method", "ABer", "main.ABer.B")
- A() string //@mark(ABerA, "A"), symbol("A", "A", "Method", "ABer", "main.ABer.A")
-}
-
-type WithEmbeddeds interface { //@symbol("WithEmbeddeds", "WithEmbeddeds", "Interface", "", "main.WithEmbeddeds")
- Do() //@symbol("Do", "Do", "Method", "WithEmbeddeds", "main.WithEmbeddeds.Do")
- ABer //@symbol("ABer", "ABer", "Interface", "WithEmbeddeds", "main.WithEmbeddeds.ABer")
- io.Writer //@mark(ioWriter, "io.Writer"), symbol("io.Writer", "io.Writer", "Interface", "WithEmbeddeds", "main.WithEmbeddeds.Writer")
-}
-
-func Dunk() int { return 0 } //@symbol("Dunk", "Dunk", "Function", "", "main.Dunk")
-
-func dunk() {} //@symbol("dunk", "dunk", "Function", "", "main.dunk")
diff --git a/internal/lsp/testdata/symbols/main.go.golden b/internal/lsp/testdata/symbols/main.go.golden
deleted file mode 100644
index ebb6a8a5d..000000000
--- a/internal/lsp/testdata/symbols/main.go.golden
+++ /dev/null
@@ -1,31 +0,0 @@
--- symbols --
-x Variable 9:5-9:6
-y Constant 11:7-11:8
-Number Number 13:6-13:12
-Alias String 15:6-15:11
-NumberAlias Number 17:6-17:17
-Boolean Boolean 20:2-20:9
-BoolAlias Boolean 21:2-21:11
-Foo Struct 24:6-24:9
- Bar Field 27:2-27:5
- Quux Field 25:2-25:6
- W Field 26:2-26:3
- baz Field 28:2-28:5
-Quux Struct 31:6-31:10
- X Field 32:2-32:3
- Y Field 32:5-32:6
-(Foo).Baz Method 35:14-35:17
-(*Quux).Do Method 41:16-41:18
-main Function 43:6-43:10
-Stringer Interface 47:6-47:14
- String Method 48:2-48:8
-ABer Interface 51:6-51:10
- A Method 53:2-53:3
- B Method 52:2-52:3
-WithEmbeddeds Interface 56:6-56:19
- ABer Interface 58:2-58:6
- Do Method 57:2-57:4
- io.Writer Interface 59:2-59:11
-Dunk Function 62:6-62:10
-dunk Function 64:6-64:10
-
diff --git a/internal/lsp/testdata/testy/testy_test.go b/internal/lsp/testdata/testy/testy_test.go
deleted file mode 100644
index 4939f86b5..000000000
--- a/internal/lsp/testdata/testy/testy_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package testy
-
-import (
- "testing"
-
- sig "golang.org/x/tools/internal/lsp/signature"
- "golang.org/x/tools/internal/lsp/snippets"
-)
-
-func TestSomething(t *testing.T) { //@item(TestSomething, "TestSomething(t *testing.T)", "", "func")
- var x int //@mark(testyX, "x"),diag("x", "compiler", "x declared but not used", "error"),refs("x", testyX)
- a() //@mark(testyA, "a")
-}
-
-func _() {
- _ = snippets.X(nil) //@signature("nil", "X(_ map[sig.Alias]types.CoolAlias) map[sig.Alias]types.CoolAlias", 0)
- var _ sig.Alias
-}
diff --git a/internal/lsp/testdata/typeerrors/noresultvalues.go b/internal/lsp/testdata/typeerrors/noresultvalues.go
deleted file mode 100644
index 84234c4b9..000000000
--- a/internal/lsp/testdata/typeerrors/noresultvalues.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package typeerrors
-
-func x() { return nil } //@suggestedfix("nil", "quickfix")
-
-func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix")
diff --git a/internal/lsp/testdata/typeerrors/noresultvalues.go.golden b/internal/lsp/testdata/typeerrors/noresultvalues.go.golden
deleted file mode 100644
index 07c54d445..000000000
--- a/internal/lsp/testdata/typeerrors/noresultvalues.go.golden
+++ /dev/null
@@ -1,14 +0,0 @@
--- suggestedfix_noresultvalues_3_19 --
-package typeerrors
-
-func x() { return } //@suggestedfix("nil", "quickfix")
-
-func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix")
-
--- suggestedfix_noresultvalues_5_19 --
-package typeerrors
-
-func x() { return nil } //@suggestedfix("nil", "quickfix")
-
-func y() { return } //@suggestedfix("nil", "quickfix")
-
diff --git a/internal/lsp/testdata/typeparams/type_params.go b/internal/lsp/testdata/typeparams/type_params.go
deleted file mode 100644
index 1dfb1034a..000000000
--- a/internal/lsp/testdata/typeparams/type_params.go
+++ /dev/null
@@ -1,33 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-package typeparams
-
-func one[a int | string]() {}
-func two[a int | string, b float64 | int]() {}
-
-func _() {
- one[]() //@rank("]", string, float64)
- two[]() //@rank("]", int, float64)
- two[int, f]() //@rank("]", float64, float32)
-}
-
-func slices[a []int | []float64]() {} //@item(tpInts, "[]int", "[]int", "type"),item(tpFloats, "[]float64", "[]float64", "type")
-
-func _() {
- slices[]() //@rank("]", tpInts),rank("]", tpFloats)
-}
-
-type s[a int | string] struct{}
-
-func _() {
- s[]{} //@rank("]", int, float64)
-}
-
-func returnTP[A int | float64](a A) A { //@item(returnTP, "returnTP", "something", "func")
- return a
-}
-
-func _() {
- var _ int = returnTP //@snippet(" //", returnTP, "returnTP[${1:}](${2:})", "returnTP[${1:A int|float64}](${2:a A})")
-}
diff --git a/internal/lsp/testdata/undeclared/var.go b/internal/lsp/testdata/undeclared/var.go
deleted file mode 100644
index b5f9287d4..000000000
--- a/internal/lsp/testdata/undeclared/var.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package undeclared
-
-func m() int {
- z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix")
- if 100 < 90 {
- z = 1
- } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix")
- z = 4
- }
- for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix")
- }
- r() //@diag("r", "compiler", "undeclared name: r", "error")
- return z
-}
diff --git a/internal/lsp/testdata/undeclared/var.go.golden b/internal/lsp/testdata/undeclared/var.go.golden
deleted file mode 100644
index 74adbe8ff..000000000
--- a/internal/lsp/testdata/undeclared/var.go.golden
+++ /dev/null
@@ -1,51 +0,0 @@
--- suggestedfix_var_10_6 --
-package undeclared
-
-func m() int {
- z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix")
- if 100 < 90 {
- z = 1
- } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix")
- z = 4
- }
- i :=
- for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix")
- }
- r() //@diag("r", "compiler", "undeclared name: r", "error")
- return z
-}
-
--- suggestedfix_var_4_12 --
-package undeclared
-
-func m() int {
- y :=
- z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix")
- if 100 < 90 {
- z = 1
- } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix")
- z = 4
- }
- for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix")
- }
- r() //@diag("r", "compiler", "undeclared name: r", "error")
- return z
-}
-
--- suggestedfix_var_7_18 --
-package undeclared
-
-func m() int {
- z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix")
- n :=
- if 100 < 90 {
- z = 1
- } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix")
- z = 4
- }
- for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix")
- }
- r() //@diag("r", "compiler", "undeclared name: r", "error")
- return z
-}
-
diff --git a/internal/lsp/testdata/unimported/export_test.go b/internal/lsp/testdata/unimported/export_test.go
deleted file mode 100644
index 4f85700fa..000000000
--- a/internal/lsp/testdata/unimported/export_test.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package unimported
-
-var TestExport int //@item(testexport, "TestExport", "(from \"golang.org/x/tools/internal/lsp/unimported\")", "var")
diff --git a/internal/lsp/testdata/unimported/unimported.go.in b/internal/lsp/testdata/unimported/unimported.go.in
deleted file mode 100644
index c3c0243d9..000000000
--- a/internal/lsp/testdata/unimported/unimported.go.in
+++ /dev/null
@@ -1,20 +0,0 @@
-package unimported
-
-func _() {
- http //@unimported("p", nethttp)
- // container/ring is extremely unlikely to be imported by anything, so shouldn't have type information.
- ring.Ring //@unimported("Ring", ringring)
- signature.Foo //@unimported("Foo", signaturefoo)
-
- context.Bac //@unimported(" //", contextBackground, contextBackgroundErr)
-}
-
-// Create markers for unimported std lib packages. Only for use by this test.
-/* http */ //@item(nethttp, "http", "\"net/http\"", "package")
-
-/* ring.Ring */ //@item(ringring, "Ring", "(from \"container/ring\")", "var")
-
-/* signature.Foo */ //@item(signaturefoo, "Foo", "func(a string, b int) (c bool) (from \"golang.org/x/tools/internal/lsp/signature\")", "func")
-
-/* context.Background */ //@item(contextBackground, "Background", "func() context.Context (from \"context\")", "func")
-/* context.Background().Err */ //@item(contextBackgroundErr, "Background().Err", "func() error (from \"context\")", "method")
diff --git a/internal/lsp/testdata/unimported/unimported_cand_type.go b/internal/lsp/testdata/unimported/unimported_cand_type.go
deleted file mode 100644
index 531aa2d18..000000000
--- a/internal/lsp/testdata/unimported/unimported_cand_type.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package unimported
-
-import (
- _ "context"
-
- "golang.org/x/tools/internal/lsp/baz"
- _ "golang.org/x/tools/internal/lsp/signature" // provide type information for unimported completions in the other file
-)
-
-func _() {
- foo.StructFoo{} //@item(litFooStructFoo, "foo.StructFoo{}", "struct{...}", "struct")
-
- // We get the literal completion for "foo.StructFoo{}" even though we haven't
- // imported "foo" yet.
- baz.FooStruct = f //@snippet(" //", litFooStructFoo, "foo.StructFoo{$0\\}", "foo.StructFoo{$0\\}")
-}
diff --git a/internal/lsp/testdata/workspacesymbol/a/a.go b/internal/lsp/testdata/workspacesymbol/a/a.go
deleted file mode 100644
index 6e5a68b16..000000000
--- a/internal/lsp/testdata/workspacesymbol/a/a.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package a
-
-var RandomGopherVariableA = "a" //@symbol("RandomGopherVariableA", "RandomGopherVariableA", "Variable", "", "a.RandomGopherVariableA")
-
-const RandomGopherConstantA = "a" //@symbol("RandomGopherConstantA", "RandomGopherConstantA", "Constant", "", "a.RandomGopherConstantA")
-
-const (
- randomgopherinvariable = iota //@symbol("randomgopherinvariable", "randomgopherinvariable", "Constant", "", "a.randomgopherinvariable")
-)
diff --git a/internal/lsp/testdata/workspacesymbol/a/a.go.golden b/internal/lsp/testdata/workspacesymbol/a/a.go.golden
deleted file mode 100644
index c3f088577..000000000
--- a/internal/lsp/testdata/workspacesymbol/a/a.go.golden
+++ /dev/null
@@ -1,5 +0,0 @@
--- symbols --
-RandomGopherVariableA Variable 3:5-3:26
-RandomGopherConstantA Constant 5:7-5:28
-randomgopherinvariable Constant 8:2-8:24
-
diff --git a/internal/lsp/testdata/workspacesymbol/a/a_test.go b/internal/lsp/testdata/workspacesymbol/a/a_test.go
deleted file mode 100644
index 30d534097..000000000
--- a/internal/lsp/testdata/workspacesymbol/a/a_test.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package a
-
-var RandomGopherTestVariableA = "a" //@symbol("RandomGopherTestVariableA", "RandomGopherTestVariableA", "Variable", "", "a.RandomGopherTestVariableA")
diff --git a/internal/lsp/testdata/workspacesymbol/a/a_test.go.golden b/internal/lsp/testdata/workspacesymbol/a/a_test.go.golden
deleted file mode 100644
index af7461943..000000000
--- a/internal/lsp/testdata/workspacesymbol/a/a_test.go.golden
+++ /dev/null
@@ -1,3 +0,0 @@
--- symbols --
-RandomGopherTestVariableA Variable 3:5-3:30
-
diff --git a/internal/lsp/testdata/workspacesymbol/a/a_x_test.go b/internal/lsp/testdata/workspacesymbol/a/a_x_test.go
deleted file mode 100644
index 76eb8487d..000000000
--- a/internal/lsp/testdata/workspacesymbol/a/a_x_test.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package a_test
-
-var RandomGopherXTestVariableA = "a" //@symbol("RandomGopherXTestVariableA", "RandomGopherXTestVariableA", "Variable", "", "a_test.RandomGopherXTestVariableA")
diff --git a/internal/lsp/testdata/workspacesymbol/a/a_x_test.go.golden b/internal/lsp/testdata/workspacesymbol/a/a_x_test.go.golden
deleted file mode 100644
index dfd02a5c4..000000000
--- a/internal/lsp/testdata/workspacesymbol/a/a_x_test.go.golden
+++ /dev/null
@@ -1,3 +0,0 @@
--- symbols --
-RandomGopherXTestVariableA Variable 3:5-3:31
-
diff --git a/internal/lsp/testdata/workspacesymbol/b/b.go b/internal/lsp/testdata/workspacesymbol/b/b.go
deleted file mode 100644
index 89ce0d92e..000000000
--- a/internal/lsp/testdata/workspacesymbol/b/b.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package b
-
-var RandomGopherVariableB = "b" //@symbol("RandomGopherVariableB", "RandomGopherVariableB", "Variable", "", "b.RandomGopherVariableB")
-
-type RandomGopherStructB struct { //@symbol("RandomGopherStructB", "RandomGopherStructB", "Struct", "", "b.RandomGopherStructB")
- Bar int //@mark(bBar, "Bar"), symbol("Bar", "Bar", "Field", "RandomGopherStructB", "b.RandomGopherStructB.Bar")
-}
diff --git a/internal/lsp/testdata/workspacesymbol/b/b.go.golden b/internal/lsp/testdata/workspacesymbol/b/b.go.golden
deleted file mode 100644
index 4711c9d91..000000000
--- a/internal/lsp/testdata/workspacesymbol/b/b.go.golden
+++ /dev/null
@@ -1,5 +0,0 @@
--- symbols --
-RandomGopherVariableB Variable 3:5-3:26
-RandomGopherStructB Struct 5:6-5:25
- Bar Field 6:2-6:5
-
diff --git a/internal/lsp/tests/README.md b/internal/lsp/tests/README.md
deleted file mode 100644
index 2c18675f7..000000000
--- a/internal/lsp/tests/README.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# Testing
-
-LSP has "marker tests" defined in `internal/lsp/testdata`, as well as
-traditional tests.
-
-## Marker tests
-
-Marker tests have a standard input file, like
-`internal/lsp/testdata/foo/bar.go`, and some may have a corresponding golden
-file, like `internal/lsp/testdata/foo/bar.go.golden`. The former is the "input"
-and the latter is the expected output.
-
-Each input file contains annotations like
-`//@suggestedfix("}", "refactor.rewrite")`. These annotations are interpreted by
-test runners to perform certain actions. The expected output after those actions
-is encoded in the golden file.
-
-When tests are run, each annotation results in a new subtest, which is encoded
-in the golden file with a heading like,
-
-```bash
--- suggestedfix_bar_11_21 --
-// expected contents go here
--- suggestedfix_bar_13_20 --
-// expected contents go here
-```
-
-The format of these headings vary: they are defined by the
-[`Golden`](https://pkg.go.dev/golang.org/x/tools/internal/lsp/tests#Data.Golden)
-function for each annotation. In the case above, the format is: annotation
-name, file name, annotation line location, annotation character location.
-
-So, if `internal/lsp/testdata/foo/bar.go` has three `suggestedfix` annotations,
-the golden file should have three headers with `suggestedfix_bar_xx_yy`
-headings.
-
-To see a list of all available annotations, see the exported "expectations" in
-[tests.go](https://github.com/golang/tools/blob/299f270db45902e93469b1152fafed034bb3f033/internal/lsp/tests/tests.go#L418-L447).
-
-To run marker tests,
-
-```bash
-cd /path/to/tools
-
-# The marker tests are located in "internal/lsp", "internal/lsp/cmd, and
-# "internal/lsp/source".
-go test ./internal/lsp/...
-```
-
-There are quite a lot of marker tests, so to run one individually, pass the test
-path and heading into a -run argument:
-
-```bash
-cd /path/to/tools
-go test ./internal/lsp/... -v -run TestLSP/Modules/SuggestedFix/bar_11_21
-```
-
-## Resetting marker tests
-
-Sometimes, a change is made to lsp that requires a change to multiple golden
-files. When this happens, you can run,
-
-```bash
-cd /path/to/tools
-./internal/lsp/reset_golden.sh
-```
diff --git a/internal/lsp/tests/normalizer.go b/internal/lsp/tests/normalizer.go
deleted file mode 100644
index 77d9e66a8..000000000
--- a/internal/lsp/tests/normalizer.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tests
-
-import (
- "path/filepath"
- "strconv"
- "strings"
-
- "golang.org/x/tools/go/packages/packagestest"
-)
-
-type Normalizer struct {
- path string
- slashed string
- escaped string
- fragment string
-}
-
-func CollectNormalizers(exported *packagestest.Exported) []Normalizer {
- // build the path normalizing patterns
- var normalizers []Normalizer
- for _, m := range exported.Modules {
- for fragment := range m.Files {
- n := Normalizer{
- path: exported.File(m.Name, fragment),
- fragment: fragment,
- }
- if n.slashed = filepath.ToSlash(n.path); n.slashed == n.path {
- n.slashed = ""
- }
- quoted := strconv.Quote(n.path)
- if n.escaped = quoted[1 : len(quoted)-1]; n.escaped == n.path {
- n.escaped = ""
- }
- normalizers = append(normalizers, n)
- }
- }
- return normalizers
-}
-
-// NormalizePrefix normalizes a single path at the front of the input string.
-func NormalizePrefix(s string, normalizers []Normalizer) string {
- for _, n := range normalizers {
- if t := strings.TrimPrefix(s, n.path); t != s {
- return n.fragment + t
- }
- if t := strings.TrimPrefix(s, n.slashed); t != s {
- return n.fragment + t
- }
- if t := strings.TrimPrefix(s, n.escaped); t != s {
- return n.fragment + t
- }
- }
- return s
-}
-
-// Normalize replaces all paths present in s with just the fragment portion
-// this is used to make golden files not depend on the temporary paths of the files
-func Normalize(s string, normalizers []Normalizer) string {
- type entry struct {
- path string
- index int
- fragment string
- }
- var match []entry
- // collect the initial state of all the matchers
- for _, n := range normalizers {
- index := strings.Index(s, n.path)
- if index >= 0 {
- match = append(match, entry{n.path, index, n.fragment})
- }
- if n.slashed != "" {
- index := strings.Index(s, n.slashed)
- if index >= 0 {
- match = append(match, entry{n.slashed, index, n.fragment})
- }
- }
- if n.escaped != "" {
- index := strings.Index(s, n.escaped)
- if index >= 0 {
- match = append(match, entry{n.escaped, index, n.fragment})
- }
- }
- }
- // result should be the same or shorter than the input
- var b strings.Builder
- last := 0
- for {
- // find the nearest path match to the start of the buffer
- next := -1
- nearest := len(s)
- for i, c := range match {
- if c.index >= 0 && nearest > c.index {
- nearest = c.index
- next = i
- }
- }
- // if there are no matches, we copy the rest of the string and are done
- if next < 0 {
- b.WriteString(s[last:])
- return b.String()
- }
- // we have a match
- n := &match[next]
- // copy up to the start of the match
- b.WriteString(s[last:n.index])
- // skip over the filename
- last = n.index + len(n.path)
-
- // Hack: In multi-module mode, we add a "testmodule/" prefix, so trim
- // it from the fragment.
- fragment := n.fragment
- if strings.HasPrefix(fragment, "testmodule") {
- split := strings.Split(filepath.ToSlash(fragment), "/")
- fragment = filepath.FromSlash(strings.Join(split[1:], "/"))
- }
-
- // add in the fragment instead
- b.WriteString(fragment)
- // see what the next match for this path is
- n.index = strings.Index(s[last:], n.path)
- if n.index >= 0 {
- n.index += last
- }
- }
-}
diff --git a/internal/lsp/tests/tests.go b/internal/lsp/tests/tests.go
deleted file mode 100644
index 6a77fc775..000000000
--- a/internal/lsp/tests/tests.go
+++ /dev/null
@@ -1,1458 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tests exports functionality to be used across a variety of gopls tests.
-package tests
-
-import (
- "bytes"
- "context"
- "flag"
- "fmt"
- "go/ast"
- "go/token"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "golang.org/x/tools/go/expect"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/go/packages/packagestest"
- "golang.org/x/tools/internal/lsp/command"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/source/completion"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/testenv"
- "golang.org/x/tools/internal/typeparams"
- "golang.org/x/tools/txtar"
-)
-
-const (
- overlayFileSuffix = ".overlay"
- goldenFileSuffix = ".golden"
- inFileSuffix = ".in"
- testModule = "golang.org/x/tools/internal/lsp"
-)
-
-var summaryFile = "summary.txt"
-
-func init() {
- if typeparams.Enabled {
- summaryFile = "summary_go1.18.txt"
- }
-}
-
-var UpdateGolden = flag.Bool("golden", false, "Update golden files")
-
-type CallHierarchy map[span.Span]*CallHierarchyResult
-type CodeLens map[span.URI][]protocol.CodeLens
-type Diagnostics map[span.URI][]*source.Diagnostic
-type CompletionItems map[token.Pos]*completion.CompletionItem
-type Completions map[span.Span][]Completion
-type CompletionSnippets map[span.Span][]CompletionSnippet
-type UnimportedCompletions map[span.Span][]Completion
-type DeepCompletions map[span.Span][]Completion
-type FuzzyCompletions map[span.Span][]Completion
-type CaseSensitiveCompletions map[span.Span][]Completion
-type RankCompletions map[span.Span][]Completion
-type FoldingRanges []span.Span
-type Formats []span.Span
-type Imports []span.Span
-type SemanticTokens []span.Span
-type SuggestedFixes map[span.Span][]string
-type FunctionExtractions map[span.Span]span.Span
-type MethodExtractions map[span.Span]span.Span
-type Definitions map[span.Span]Definition
-type Implementations map[span.Span][]span.Span
-type Highlights map[span.Span][]span.Span
-type References map[span.Span][]span.Span
-type Renames map[span.Span]string
-type PrepareRenames map[span.Span]*source.PrepareItem
-type Symbols map[span.URI][]protocol.DocumentSymbol
-type SymbolsChildren map[string][]protocol.DocumentSymbol
-type SymbolInformation map[span.Span]protocol.SymbolInformation
-type WorkspaceSymbols map[WorkspaceSymbolsTestType]map[span.URI][]string
-type Signatures map[span.Span]*protocol.SignatureHelp
-type Links map[span.URI][]Link
-type AddImport map[span.URI]string
-type Hovers map[span.Span]string
-
-type Data struct {
- Config packages.Config
- Exported *packagestest.Exported
- CallHierarchy CallHierarchy
- CodeLens CodeLens
- Diagnostics Diagnostics
- CompletionItems CompletionItems
- Completions Completions
- CompletionSnippets CompletionSnippets
- UnimportedCompletions UnimportedCompletions
- DeepCompletions DeepCompletions
- FuzzyCompletions FuzzyCompletions
- CaseSensitiveCompletions CaseSensitiveCompletions
- RankCompletions RankCompletions
- FoldingRanges FoldingRanges
- Formats Formats
- Imports Imports
- SemanticTokens SemanticTokens
- SuggestedFixes SuggestedFixes
- FunctionExtractions FunctionExtractions
- MethodExtractions MethodExtractions
- Definitions Definitions
- Implementations Implementations
- Highlights Highlights
- References References
- Renames Renames
- PrepareRenames PrepareRenames
- Symbols Symbols
- symbolsChildren SymbolsChildren
- symbolInformation SymbolInformation
- WorkspaceSymbols WorkspaceSymbols
- Signatures Signatures
- Links Links
- AddImport AddImport
- Hovers Hovers
-
- t testing.TB
- fragments map[string]string
- dir string
- golden map[string]*Golden
- mode string
-
- ModfileFlagAvailable bool
-
- mappersMu sync.Mutex
- mappers map[span.URI]*protocol.ColumnMapper
-}
-
-type Tests interface {
- CallHierarchy(*testing.T, span.Span, *CallHierarchyResult)
- CodeLens(*testing.T, span.URI, []protocol.CodeLens)
- Diagnostics(*testing.T, span.URI, []*source.Diagnostic)
- Completion(*testing.T, span.Span, Completion, CompletionItems)
- CompletionSnippet(*testing.T, span.Span, CompletionSnippet, bool, CompletionItems)
- UnimportedCompletion(*testing.T, span.Span, Completion, CompletionItems)
- DeepCompletion(*testing.T, span.Span, Completion, CompletionItems)
- FuzzyCompletion(*testing.T, span.Span, Completion, CompletionItems)
- CaseSensitiveCompletion(*testing.T, span.Span, Completion, CompletionItems)
- RankCompletion(*testing.T, span.Span, Completion, CompletionItems)
- FoldingRanges(*testing.T, span.Span)
- Format(*testing.T, span.Span)
- Import(*testing.T, span.Span)
- SemanticTokens(*testing.T, span.Span)
- SuggestedFix(*testing.T, span.Span, []string, int)
- FunctionExtraction(*testing.T, span.Span, span.Span)
- MethodExtraction(*testing.T, span.Span, span.Span)
- Definition(*testing.T, span.Span, Definition)
- Implementation(*testing.T, span.Span, []span.Span)
- Highlight(*testing.T, span.Span, []span.Span)
- References(*testing.T, span.Span, []span.Span)
- Rename(*testing.T, span.Span, string)
- PrepareRename(*testing.T, span.Span, *source.PrepareItem)
- Symbols(*testing.T, span.URI, []protocol.DocumentSymbol)
- WorkspaceSymbols(*testing.T, span.URI, string, WorkspaceSymbolsTestType)
- SignatureHelp(*testing.T, span.Span, *protocol.SignatureHelp)
- Link(*testing.T, span.URI, []Link)
- AddImport(*testing.T, span.URI, string)
- Hover(*testing.T, span.Span, string)
-}
-
-type Definition struct {
- Name string
- IsType bool
- OnlyHover bool
- Src, Def span.Span
-}
-
-type CompletionTestType int
-
-const (
- // Default runs the standard completion tests.
- CompletionDefault = CompletionTestType(iota)
-
- // Unimported tests the autocompletion of unimported packages.
- CompletionUnimported
-
- // Deep tests deep completion.
- CompletionDeep
-
- // Fuzzy tests deep completion and fuzzy matching.
- CompletionFuzzy
-
- // CaseSensitive tests case sensitive completion.
- CompletionCaseSensitive
-
- // CompletionRank candidates in test must be valid and in the right relative order.
- CompletionRank
-)
-
-type WorkspaceSymbolsTestType int
-
-const (
- // Default runs the standard workspace symbols tests.
- WorkspaceSymbolsDefault = WorkspaceSymbolsTestType(iota)
-
- // Fuzzy tests workspace symbols with fuzzy matching.
- WorkspaceSymbolsFuzzy
-
- // CaseSensitive tests workspace symbols with case sensitive.
- WorkspaceSymbolsCaseSensitive
-)
-
-type Completion struct {
- CompletionItems []token.Pos
-}
-
-type CompletionSnippet struct {
- CompletionItem token.Pos
- PlainSnippet string
- PlaceholderSnippet string
-}
-
-type CallHierarchyResult struct {
- IncomingCalls, OutgoingCalls []protocol.CallHierarchyItem
-}
-
-type Link struct {
- Src span.Span
- Target string
- NotePosition token.Position
-}
-
-type Golden struct {
- Filename string
- Archive *txtar.Archive
- Modified bool
-}
-
-func Context(t testing.TB) context.Context {
- return context.Background()
-}
-
-func DefaultOptions(o *source.Options) {
- o.SupportedCodeActions = map[source.FileKind]map[protocol.CodeActionKind]bool{
- source.Go: {
- protocol.SourceOrganizeImports: true,
- protocol.QuickFix: true,
- protocol.RefactorRewrite: true,
- protocol.RefactorExtract: true,
- protocol.SourceFixAll: true,
- },
- source.Mod: {
- protocol.SourceOrganizeImports: true,
- },
- source.Sum: {},
- source.Work: {},
- source.Tmpl: {},
- }
- o.UserOptions.Codelenses[string(command.Test)] = true
- o.HoverKind = source.SynopsisDocumentation
- o.InsertTextFormat = protocol.SnippetTextFormat
- o.CompletionBudget = time.Minute
- o.HierarchicalDocumentSymbolSupport = true
- o.ExperimentalWorkspaceModule = true
- o.SemanticTokens = true
-}
-
-func RunTests(t *testing.T, dataDir string, includeMultiModule bool, f func(*testing.T, *Data)) {
- t.Helper()
- modes := []string{"Modules", "GOPATH"}
- if includeMultiModule {
- modes = append(modes, "MultiModule")
- }
- for _, mode := range modes {
- t.Run(mode, func(t *testing.T) {
- if mode == "MultiModule" {
- // Some bug in 1.12 breaks reading markers, and it's not worth figuring out.
- testenv.NeedsGo1Point(t, 13)
- }
- datum := load(t, mode, dataDir)
- t.Helper()
- f(t, datum)
- })
- }
-}
-
-func load(t testing.TB, mode string, dir string) *Data {
- datum := &Data{
- CallHierarchy: make(CallHierarchy),
- CodeLens: make(CodeLens),
- Diagnostics: make(Diagnostics),
- CompletionItems: make(CompletionItems),
- Completions: make(Completions),
- CompletionSnippets: make(CompletionSnippets),
- UnimportedCompletions: make(UnimportedCompletions),
- DeepCompletions: make(DeepCompletions),
- FuzzyCompletions: make(FuzzyCompletions),
- RankCompletions: make(RankCompletions),
- CaseSensitiveCompletions: make(CaseSensitiveCompletions),
- Definitions: make(Definitions),
- Implementations: make(Implementations),
- Highlights: make(Highlights),
- References: make(References),
- Renames: make(Renames),
- PrepareRenames: make(PrepareRenames),
- SuggestedFixes: make(SuggestedFixes),
- FunctionExtractions: make(FunctionExtractions),
- MethodExtractions: make(MethodExtractions),
- Symbols: make(Symbols),
- symbolsChildren: make(SymbolsChildren),
- symbolInformation: make(SymbolInformation),
- WorkspaceSymbols: make(WorkspaceSymbols),
- Signatures: make(Signatures),
- Links: make(Links),
- AddImport: make(AddImport),
- Hovers: make(Hovers),
-
- t: t,
- dir: dir,
- fragments: map[string]string{},
- golden: map[string]*Golden{},
- mode: mode,
- mappers: map[span.URI]*protocol.ColumnMapper{},
- }
-
- if !*UpdateGolden {
- summary := filepath.Join(filepath.FromSlash(dir), summaryFile+goldenFileSuffix)
- if _, err := os.Stat(summary); os.IsNotExist(err) {
- t.Fatalf("could not find golden file summary.txt in %#v", dir)
- }
- archive, err := txtar.ParseFile(summary)
- if err != nil {
- t.Fatalf("could not read golden file %v/%v: %v", dir, summary, err)
- }
- datum.golden[summaryFile] = &Golden{
- Filename: summary,
- Archive: archive,
- }
- }
-
- files := packagestest.MustCopyFileTree(dir)
- // Prune test cases that exercise generics.
- if !typeparams.Enabled {
- for name := range files {
- if strings.Contains(name, "_generics") {
- delete(files, name)
- }
- }
- }
- overlays := map[string][]byte{}
- for fragment, operation := range files {
- if trimmed := strings.TrimSuffix(fragment, goldenFileSuffix); trimmed != fragment {
- delete(files, fragment)
- goldFile := filepath.Join(dir, fragment)
- archive, err := txtar.ParseFile(goldFile)
- if err != nil {
- t.Fatalf("could not read golden file %v: %v", fragment, err)
- }
- datum.golden[trimmed] = &Golden{
- Filename: goldFile,
- Archive: archive,
- }
- } else if trimmed := strings.TrimSuffix(fragment, inFileSuffix); trimmed != fragment {
- delete(files, fragment)
- files[trimmed] = operation
- } else if index := strings.Index(fragment, overlayFileSuffix); index >= 0 {
- delete(files, fragment)
- partial := fragment[:index] + fragment[index+len(overlayFileSuffix):]
- contents, err := ioutil.ReadFile(filepath.Join(dir, fragment))
- if err != nil {
- t.Fatal(err)
- }
- overlays[partial] = contents
- }
- }
-
- modules := []packagestest.Module{
- {
- Name: testModule,
- Files: files,
- Overlay: overlays,
- },
- }
- switch mode {
- case "Modules":
- datum.Exported = packagestest.Export(t, packagestest.Modules, modules)
- case "GOPATH":
- datum.Exported = packagestest.Export(t, packagestest.GOPATH, modules)
- case "MultiModule":
- files := map[string]interface{}{}
- for k, v := range modules[0].Files {
- files[filepath.Join("testmodule", k)] = v
- }
- modules[0].Files = files
-
- overlays := map[string][]byte{}
- for k, v := range modules[0].Overlay {
- overlays[filepath.Join("testmodule", k)] = v
- }
- modules[0].Overlay = overlays
-
- golden := map[string]*Golden{}
- for k, v := range datum.golden {
- if k == summaryFile {
- golden[k] = v
- } else {
- golden[filepath.Join("testmodule", k)] = v
- }
- }
- datum.golden = golden
-
- datum.Exported = packagestest.Export(t, packagestest.Modules, modules)
- default:
- panic("unknown mode " + mode)
- }
-
- for _, m := range modules {
- for fragment := range m.Files {
- filename := datum.Exported.File(m.Name, fragment)
- datum.fragments[filename] = fragment
- }
- }
-
- // Turn off go/packages debug logging.
- datum.Exported.Config.Logf = nil
- datum.Config.Logf = nil
-
- // Merge the exported.Config with the view.Config.
- datum.Config = *datum.Exported.Config
- datum.Config.Fset = token.NewFileSet()
- datum.Config.Context = Context(nil)
- datum.Config.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
- panic("ParseFile should not be called")
- }
-
- // Do a first pass to collect special markers for completion and workspace symbols.
- if err := datum.Exported.Expect(map[string]interface{}{
- "item": func(name string, r packagestest.Range, _ []string) {
- datum.Exported.Mark(name, r)
- },
- "symbol": func(name string, r packagestest.Range, _ []string) {
- datum.Exported.Mark(name, r)
- },
- }); err != nil {
- t.Fatal(err)
- }
-
- // Collect any data that needs to be used by subsequent tests.
- if err := datum.Exported.Expect(map[string]interface{}{
- "codelens": datum.collectCodeLens,
- "diag": datum.collectDiagnostics,
- "item": datum.collectCompletionItems,
- "complete": datum.collectCompletions(CompletionDefault),
- "unimported": datum.collectCompletions(CompletionUnimported),
- "deep": datum.collectCompletions(CompletionDeep),
- "fuzzy": datum.collectCompletions(CompletionFuzzy),
- "casesensitive": datum.collectCompletions(CompletionCaseSensitive),
- "rank": datum.collectCompletions(CompletionRank),
- "snippet": datum.collectCompletionSnippets,
- "fold": datum.collectFoldingRanges,
- "format": datum.collectFormats,
- "import": datum.collectImports,
- "semantic": datum.collectSemanticTokens,
- "godef": datum.collectDefinitions,
- "implementations": datum.collectImplementations,
- "typdef": datum.collectTypeDefinitions,
- "hoverdef": datum.collectHoverDefinitions,
- "hover": datum.collectHovers,
- "highlight": datum.collectHighlights,
- "refs": datum.collectReferences,
- "rename": datum.collectRenames,
- "prepare": datum.collectPrepareRenames,
- "symbol": datum.collectSymbols,
- "signature": datum.collectSignatures,
- "link": datum.collectLinks,
- "suggestedfix": datum.collectSuggestedFixes,
- "extractfunc": datum.collectFunctionExtractions,
- "extractmethod": datum.collectMethodExtractions,
- "incomingcalls": datum.collectIncomingCalls,
- "outgoingcalls": datum.collectOutgoingCalls,
- "addimport": datum.collectAddImports,
- }); err != nil {
- t.Fatal(err)
- }
- for _, symbols := range datum.Symbols {
- for i := range symbols {
- children := datum.symbolsChildren[symbols[i].Name]
- symbols[i].Children = children
- }
- }
- // Collect names for the entries that require golden files.
- if err := datum.Exported.Expect(map[string]interface{}{
- "godef": datum.collectDefinitionNames,
- "hoverdef": datum.collectDefinitionNames,
- "workspacesymbol": datum.collectWorkspaceSymbols(WorkspaceSymbolsDefault),
- "workspacesymbolfuzzy": datum.collectWorkspaceSymbols(WorkspaceSymbolsFuzzy),
- "workspacesymbolcasesensitive": datum.collectWorkspaceSymbols(WorkspaceSymbolsCaseSensitive),
- }); err != nil {
- t.Fatal(err)
- }
- if mode == "MultiModule" {
- if err := moveFile(filepath.Join(datum.Config.Dir, "go.mod"), filepath.Join(datum.Config.Dir, "testmodule/go.mod")); err != nil {
- t.Fatal(err)
- }
- }
-
- return datum
-}
-
-// moveFile moves the file at oldpath to newpath, by renaming if possible
-// or copying otherwise.
-func moveFile(oldpath, newpath string) (err error) {
- renameErr := os.Rename(oldpath, newpath)
- if renameErr == nil {
- return nil
- }
-
- src, err := os.Open(oldpath)
- if err != nil {
- return err
- }
- defer func() {
- src.Close()
- if err == nil {
- err = os.Remove(oldpath)
- }
- }()
-
- perm := os.ModePerm
- fi, err := src.Stat()
- if err == nil {
- perm = fi.Mode().Perm()
- }
-
- dst, err := os.OpenFile(newpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
- if err != nil {
- return err
- }
-
- _, err = io.Copy(dst, src)
- if closeErr := dst.Close(); err == nil {
- err = closeErr
- }
- return err
-}
-
-func Run(t *testing.T, tests Tests, data *Data) {
- t.Helper()
- checkData(t, data)
-
- eachCompletion := func(t *testing.T, cases map[span.Span][]Completion, test func(*testing.T, span.Span, Completion, CompletionItems)) {
- t.Helper()
-
- for src, exp := range cases {
- for i, e := range exp {
- t.Run(SpanName(src)+"_"+strconv.Itoa(i), func(t *testing.T) {
- t.Helper()
- if strings.Contains(t.Name(), "cgo") {
- testenv.NeedsTool(t, "cgo")
- }
- if strings.Contains(t.Name(), "declarecgo") {
- testenv.NeedsGo1Point(t, 15)
- }
- test(t, src, e, data.CompletionItems)
- })
- }
-
- }
- }
-
- t.Run("CallHierarchy", func(t *testing.T) {
- t.Helper()
- for spn, callHierarchyResult := range data.CallHierarchy {
- t.Run(SpanName(spn), func(t *testing.T) {
- t.Helper()
- tests.CallHierarchy(t, spn, callHierarchyResult)
- })
- }
- })
-
- t.Run("Completion", func(t *testing.T) {
- t.Helper()
- eachCompletion(t, data.Completions, tests.Completion)
- })
-
- t.Run("CompletionSnippets", func(t *testing.T) {
- t.Helper()
- for _, placeholders := range []bool{true, false} {
- for src, expecteds := range data.CompletionSnippets {
- for i, expected := range expecteds {
- name := SpanName(src) + "_" + strconv.Itoa(i+1)
- if placeholders {
- name += "_placeholders"
- }
-
- t.Run(name, func(t *testing.T) {
- t.Helper()
- tests.CompletionSnippet(t, src, expected, placeholders, data.CompletionItems)
- })
- }
- }
- }
- })
-
- t.Run("UnimportedCompletion", func(t *testing.T) {
- t.Helper()
- eachCompletion(t, data.UnimportedCompletions, tests.UnimportedCompletion)
- })
-
- t.Run("DeepCompletion", func(t *testing.T) {
- t.Helper()
- eachCompletion(t, data.DeepCompletions, tests.DeepCompletion)
- })
-
- t.Run("FuzzyCompletion", func(t *testing.T) {
- t.Helper()
- eachCompletion(t, data.FuzzyCompletions, tests.FuzzyCompletion)
- })
-
- t.Run("CaseSensitiveCompletion", func(t *testing.T) {
- t.Helper()
- eachCompletion(t, data.CaseSensitiveCompletions, tests.CaseSensitiveCompletion)
- })
-
- t.Run("RankCompletions", func(t *testing.T) {
- t.Helper()
- eachCompletion(t, data.RankCompletions, tests.RankCompletion)
- })
-
- t.Run("CodeLens", func(t *testing.T) {
- t.Helper()
- for uri, want := range data.CodeLens {
- // Check if we should skip this URI if the -modfile flag is not available.
- if shouldSkip(data, uri) {
- continue
- }
- t.Run(uriName(uri), func(t *testing.T) {
- t.Helper()
- tests.CodeLens(t, uri, want)
- })
- }
- })
-
- t.Run("Diagnostics", func(t *testing.T) {
- t.Helper()
- for uri, want := range data.Diagnostics {
- // Check if we should skip this URI if the -modfile flag is not available.
- if shouldSkip(data, uri) {
- continue
- }
- t.Run(uriName(uri), func(t *testing.T) {
- t.Helper()
- tests.Diagnostics(t, uri, want)
- })
- }
- })
-
- t.Run("FoldingRange", func(t *testing.T) {
- t.Helper()
- for _, spn := range data.FoldingRanges {
- t.Run(uriName(spn.URI()), func(t *testing.T) {
- t.Helper()
- tests.FoldingRanges(t, spn)
- })
- }
- })
-
- t.Run("Format", func(t *testing.T) {
- t.Helper()
- for _, spn := range data.Formats {
- t.Run(uriName(spn.URI()), func(t *testing.T) {
- t.Helper()
- tests.Format(t, spn)
- })
- }
- })
-
- t.Run("Import", func(t *testing.T) {
- t.Helper()
- for _, spn := range data.Imports {
- t.Run(uriName(spn.URI()), func(t *testing.T) {
- t.Helper()
- tests.Import(t, spn)
- })
- }
- })
-
- t.Run("SemanticTokens", func(t *testing.T) {
- t.Helper()
- for _, spn := range data.SemanticTokens {
- t.Run(uriName(spn.URI()), func(t *testing.T) {
- t.Helper()
- tests.SemanticTokens(t, spn)
- })
- }
- })
-
- t.Run("SuggestedFix", func(t *testing.T) {
- t.Helper()
- for spn, actionKinds := range data.SuggestedFixes {
- // Check if we should skip this spn if the -modfile flag is not available.
- if shouldSkip(data, spn.URI()) {
- continue
- }
- t.Run(SpanName(spn), func(t *testing.T) {
- t.Helper()
- tests.SuggestedFix(t, spn, actionKinds, 1)
- })
- }
- })
-
- t.Run("FunctionExtraction", func(t *testing.T) {
- t.Helper()
- for start, end := range data.FunctionExtractions {
- // Check if we should skip this spn if the -modfile flag is not available.
- if shouldSkip(data, start.URI()) {
- continue
- }
- t.Run(SpanName(start), func(t *testing.T) {
- t.Helper()
- tests.FunctionExtraction(t, start, end)
- })
- }
- })
-
- t.Run("MethodExtraction", func(t *testing.T) {
- t.Helper()
- for start, end := range data.MethodExtractions {
- // Check if we should skip this spn if the -modfile flag is not available.
- if shouldSkip(data, start.URI()) {
- continue
- }
- t.Run(SpanName(start), func(t *testing.T) {
- t.Helper()
- tests.MethodExtraction(t, start, end)
- })
- }
- })
-
- t.Run("Definition", func(t *testing.T) {
- t.Helper()
- for spn, d := range data.Definitions {
- t.Run(SpanName(spn), func(t *testing.T) {
- t.Helper()
- if strings.Contains(t.Name(), "cgo") {
- testenv.NeedsTool(t, "cgo")
- }
- if strings.Contains(t.Name(), "declarecgo") {
- testenv.NeedsGo1Point(t, 15)
- }
- tests.Definition(t, spn, d)
- })
- }
- })
-
- t.Run("Implementation", func(t *testing.T) {
- t.Helper()
- for spn, m := range data.Implementations {
- t.Run(SpanName(spn), func(t *testing.T) {
- t.Helper()
- tests.Implementation(t, spn, m)
- })
- }
- })
-
- t.Run("Highlight", func(t *testing.T) {
- t.Helper()
- for pos, locations := range data.Highlights {
- t.Run(SpanName(pos), func(t *testing.T) {
- t.Helper()
- tests.Highlight(t, pos, locations)
- })
- }
- })
-
- t.Run("Hover", func(t *testing.T) {
- t.Helper()
- for pos, info := range data.Hovers {
- t.Run(SpanName(pos), func(t *testing.T) {
- t.Helper()
- tests.Hover(t, pos, info)
- })
- }
- })
-
- t.Run("References", func(t *testing.T) {
- t.Helper()
- for src, itemList := range data.References {
- t.Run(SpanName(src), func(t *testing.T) {
- t.Helper()
- tests.References(t, src, itemList)
- })
- }
- })
-
- t.Run("Renames", func(t *testing.T) {
- t.Helper()
- for spn, newText := range data.Renames {
- t.Run(uriName(spn.URI())+"_"+newText, func(t *testing.T) {
- t.Helper()
- tests.Rename(t, spn, newText)
- })
- }
- })
-
- t.Run("PrepareRenames", func(t *testing.T) {
- t.Helper()
- for src, want := range data.PrepareRenames {
- t.Run(SpanName(src), func(t *testing.T) {
- t.Helper()
- tests.PrepareRename(t, src, want)
- })
- }
- })
-
- t.Run("Symbols", func(t *testing.T) {
- t.Helper()
- for uri, expectedSymbols := range data.Symbols {
- t.Run(uriName(uri), func(t *testing.T) {
- t.Helper()
- tests.Symbols(t, uri, expectedSymbols)
- })
- }
- })
-
- t.Run("WorkspaceSymbols", func(t *testing.T) {
- t.Helper()
-
- for _, typ := range []WorkspaceSymbolsTestType{
- WorkspaceSymbolsDefault,
- WorkspaceSymbolsCaseSensitive,
- WorkspaceSymbolsFuzzy,
- } {
- for uri, cases := range data.WorkspaceSymbols[typ] {
- for _, query := range cases {
- name := query
- if name == "" {
- name = "EmptyQuery"
- }
- t.Run(name, func(t *testing.T) {
- t.Helper()
- tests.WorkspaceSymbols(t, uri, query, typ)
- })
- }
- }
- }
-
- })
-
- t.Run("SignatureHelp", func(t *testing.T) {
- t.Helper()
- for spn, expectedSignature := range data.Signatures {
- t.Run(SpanName(spn), func(t *testing.T) {
- t.Helper()
- tests.SignatureHelp(t, spn, expectedSignature)
- })
- }
- })
-
- t.Run("Link", func(t *testing.T) {
- t.Helper()
- for uri, wantLinks := range data.Links {
- // If we are testing GOPATH, then we do not want links with the versions
- // attached (pkg.go.dev/repoa/moda@v1.1.0/pkg), unless the file is a
- // go.mod, then we can skip it altogether.
- if data.Exported.Exporter == packagestest.GOPATH {
- if strings.HasSuffix(uri.Filename(), ".mod") {
- continue
- }
- re := regexp.MustCompile(`@v\d+\.\d+\.[\w-]+`)
- for i, link := range wantLinks {
- wantLinks[i].Target = re.ReplaceAllString(link.Target, "")
- }
- }
- t.Run(uriName(uri), func(t *testing.T) {
- t.Helper()
- tests.Link(t, uri, wantLinks)
- })
- }
- })
-
- t.Run("AddImport", func(t *testing.T) {
- t.Helper()
- for uri, exp := range data.AddImport {
- t.Run(uriName(uri), func(t *testing.T) {
- tests.AddImport(t, uri, exp)
- })
- }
- })
-
- if *UpdateGolden {
- for _, golden := range data.golden {
- if !golden.Modified {
- continue
- }
- sort.Slice(golden.Archive.Files, func(i, j int) bool {
- return golden.Archive.Files[i].Name < golden.Archive.Files[j].Name
- })
- if err := ioutil.WriteFile(golden.Filename, txtar.Format(golden.Archive), 0666); err != nil {
- t.Fatal(err)
- }
- }
- }
-}
-
-func checkData(t *testing.T, data *Data) {
- buf := &bytes.Buffer{}
- diagnosticsCount := 0
- for _, want := range data.Diagnostics {
- diagnosticsCount += len(want)
- }
- linksCount := 0
- for _, want := range data.Links {
- linksCount += len(want)
- }
- definitionCount := 0
- typeDefinitionCount := 0
- for _, d := range data.Definitions {
- if d.IsType {
- typeDefinitionCount++
- } else {
- definitionCount++
- }
- }
-
- snippetCount := 0
- for _, want := range data.CompletionSnippets {
- snippetCount += len(want)
- }
-
- countCompletions := func(c map[span.Span][]Completion) (count int) {
- for _, want := range c {
- count += len(want)
- }
- return count
- }
-
- countCodeLens := func(c map[span.URI][]protocol.CodeLens) (count int) {
- for _, want := range c {
- count += len(want)
- }
- return count
- }
-
- countWorkspaceSymbols := func(c map[WorkspaceSymbolsTestType]map[span.URI][]string) (count int) {
- for _, typs := range c {
- for _, queries := range typs {
- count += len(queries)
- }
- }
- return count
- }
-
- fmt.Fprintf(buf, "CallHierarchyCount = %v\n", len(data.CallHierarchy))
- fmt.Fprintf(buf, "CodeLensCount = %v\n", countCodeLens(data.CodeLens))
- fmt.Fprintf(buf, "CompletionsCount = %v\n", countCompletions(data.Completions))
- fmt.Fprintf(buf, "CompletionSnippetCount = %v\n", snippetCount)
- fmt.Fprintf(buf, "UnimportedCompletionsCount = %v\n", countCompletions(data.UnimportedCompletions))
- fmt.Fprintf(buf, "DeepCompletionsCount = %v\n", countCompletions(data.DeepCompletions))
- fmt.Fprintf(buf, "FuzzyCompletionsCount = %v\n", countCompletions(data.FuzzyCompletions))
- fmt.Fprintf(buf, "RankedCompletionsCount = %v\n", countCompletions(data.RankCompletions))
- fmt.Fprintf(buf, "CaseSensitiveCompletionsCount = %v\n", countCompletions(data.CaseSensitiveCompletions))
- fmt.Fprintf(buf, "DiagnosticsCount = %v\n", diagnosticsCount)
- fmt.Fprintf(buf, "FoldingRangesCount = %v\n", len(data.FoldingRanges))
- fmt.Fprintf(buf, "FormatCount = %v\n", len(data.Formats))
- fmt.Fprintf(buf, "ImportCount = %v\n", len(data.Imports))
- fmt.Fprintf(buf, "SemanticTokenCount = %v\n", len(data.SemanticTokens))
- fmt.Fprintf(buf, "SuggestedFixCount = %v\n", len(data.SuggestedFixes))
- fmt.Fprintf(buf, "FunctionExtractionCount = %v\n", len(data.FunctionExtractions))
- fmt.Fprintf(buf, "MethodExtractionCount = %v\n", len(data.MethodExtractions))
- fmt.Fprintf(buf, "DefinitionsCount = %v\n", definitionCount)
- fmt.Fprintf(buf, "TypeDefinitionsCount = %v\n", typeDefinitionCount)
- fmt.Fprintf(buf, "HighlightsCount = %v\n", len(data.Highlights))
- fmt.Fprintf(buf, "ReferencesCount = %v\n", len(data.References))
- fmt.Fprintf(buf, "RenamesCount = %v\n", len(data.Renames))
- fmt.Fprintf(buf, "PrepareRenamesCount = %v\n", len(data.PrepareRenames))
- fmt.Fprintf(buf, "SymbolsCount = %v\n", len(data.Symbols))
- fmt.Fprintf(buf, "WorkspaceSymbolsCount = %v\n", countWorkspaceSymbols(data.WorkspaceSymbols))
- fmt.Fprintf(buf, "SignaturesCount = %v\n", len(data.Signatures))
- fmt.Fprintf(buf, "LinksCount = %v\n", linksCount)
- fmt.Fprintf(buf, "ImplementationsCount = %v\n", len(data.Implementations))
-
- want := string(data.Golden("summary", summaryFile, func() ([]byte, error) {
- return buf.Bytes(), nil
- }))
- got := buf.String()
- if want != got {
- t.Errorf("test summary does not match:\n%s", Diff(t, want, got))
- }
-}
-
-func (data *Data) Mapper(uri span.URI) (*protocol.ColumnMapper, error) {
- data.mappersMu.Lock()
- defer data.mappersMu.Unlock()
-
- if _, ok := data.mappers[uri]; !ok {
- content, err := data.Exported.FileContents(uri.Filename())
- if err != nil {
- return nil, err
- }
- converter := span.NewContentConverter(uri.Filename(), content)
- data.mappers[uri] = &protocol.ColumnMapper{
- URI: uri,
- Converter: converter,
- Content: content,
- }
- }
- return data.mappers[uri], nil
-}
-
-func (data *Data) Golden(tag string, target string, update func() ([]byte, error)) []byte {
- data.t.Helper()
- fragment, found := data.fragments[target]
- if !found {
- if filepath.IsAbs(target) {
- data.t.Fatalf("invalid golden file fragment %v", target)
- }
- fragment = target
- }
- golden := data.golden[fragment]
- if golden == nil {
- if !*UpdateGolden {
- data.t.Fatalf("could not find golden file %v: %v", fragment, tag)
- }
- golden = &Golden{
- Filename: filepath.Join(data.dir, fragment+goldenFileSuffix),
- Archive: &txtar.Archive{},
- Modified: true,
- }
- data.golden[fragment] = golden
- }
- var file *txtar.File
- for i := range golden.Archive.Files {
- f := &golden.Archive.Files[i]
- if f.Name == tag {
- file = f
- break
- }
- }
- if *UpdateGolden {
- if file == nil {
- golden.Archive.Files = append(golden.Archive.Files, txtar.File{
- Name: tag,
- })
- file = &golden.Archive.Files[len(golden.Archive.Files)-1]
- }
- contents, err := update()
- if err != nil {
- data.t.Fatalf("could not update golden file %v: %v", fragment, err)
- }
- file.Data = append(contents, '\n') // add trailing \n for txtar
- golden.Modified = true
-
- }
- if file == nil {
- data.t.Fatalf("could not find golden contents %v: %v", fragment, tag)
- }
- if len(file.Data) == 0 {
- return file.Data
- }
- return file.Data[:len(file.Data)-1] // drop the trailing \n
-}
-
-func (data *Data) collectCodeLens(spn span.Span, title, cmd string) {
- if _, ok := data.CodeLens[spn.URI()]; !ok {
- data.CodeLens[spn.URI()] = []protocol.CodeLens{}
- }
- m, err := data.Mapper(spn.URI())
- if err != nil {
- return
- }
- rng, err := m.Range(spn)
- if err != nil {
- return
- }
- data.CodeLens[spn.URI()] = append(data.CodeLens[spn.URI()], protocol.CodeLens{
- Range: rng,
- Command: protocol.Command{
- Title: title,
- Command: cmd,
- },
- })
-}
-
-func (data *Data) collectDiagnostics(spn span.Span, msgSource, msg, msgSeverity string) {
- if _, ok := data.Diagnostics[spn.URI()]; !ok {
- data.Diagnostics[spn.URI()] = []*source.Diagnostic{}
- }
- m, err := data.Mapper(spn.URI())
- if err != nil {
- return
- }
- rng, err := m.Range(spn)
- if err != nil {
- return
- }
- severity := protocol.SeverityError
- switch msgSeverity {
- case "error":
- severity = protocol.SeverityError
- case "warning":
- severity = protocol.SeverityWarning
- case "hint":
- severity = protocol.SeverityHint
- case "information":
- severity = protocol.SeverityInformation
- }
- // This is not the correct way to do this, but it seems excessive to do the full conversion here.
- want := &source.Diagnostic{
- Range: rng,
- Severity: severity,
- Source: source.DiagnosticSource(msgSource),
- Message: msg,
- }
- data.Diagnostics[spn.URI()] = append(data.Diagnostics[spn.URI()], want)
-}
-
-func (data *Data) collectCompletions(typ CompletionTestType) func(span.Span, []token.Pos) {
- result := func(m map[span.Span][]Completion, src span.Span, expected []token.Pos) {
- m[src] = append(m[src], Completion{
- CompletionItems: expected,
- })
- }
- switch typ {
- case CompletionDeep:
- return func(src span.Span, expected []token.Pos) {
- result(data.DeepCompletions, src, expected)
- }
- case CompletionUnimported:
- return func(src span.Span, expected []token.Pos) {
- result(data.UnimportedCompletions, src, expected)
- }
- case CompletionFuzzy:
- return func(src span.Span, expected []token.Pos) {
- result(data.FuzzyCompletions, src, expected)
- }
- case CompletionRank:
- return func(src span.Span, expected []token.Pos) {
- result(data.RankCompletions, src, expected)
- }
- case CompletionCaseSensitive:
- return func(src span.Span, expected []token.Pos) {
- result(data.CaseSensitiveCompletions, src, expected)
- }
- default:
- return func(src span.Span, expected []token.Pos) {
- result(data.Completions, src, expected)
- }
- }
-}
-
-func (data *Data) collectCompletionItems(pos token.Pos, args []string) {
- if len(args) < 3 {
- loc := data.Exported.ExpectFileSet.Position(pos)
- data.t.Fatalf("%s:%d: @item expects at least 3 args, got %d",
- loc.Filename, loc.Line, len(args))
- }
- label, detail, kind := args[0], args[1], args[2]
- var documentation string
- if len(args) == 4 {
- documentation = args[3]
- }
- data.CompletionItems[pos] = &completion.CompletionItem{
- Label: label,
- Detail: detail,
- Kind: protocol.ParseCompletionItemKind(kind),
- Documentation: documentation,
- }
-}
-
-func (data *Data) collectFoldingRanges(spn span.Span) {
- data.FoldingRanges = append(data.FoldingRanges, spn)
-}
-
-func (data *Data) collectFormats(spn span.Span) {
- data.Formats = append(data.Formats, spn)
-}
-
-func (data *Data) collectImports(spn span.Span) {
- data.Imports = append(data.Imports, spn)
-}
-
-func (data *Data) collectAddImports(spn span.Span, imp string) {
- data.AddImport[spn.URI()] = imp
-}
-
-func (data *Data) collectSemanticTokens(spn span.Span) {
- data.SemanticTokens = append(data.SemanticTokens, spn)
-}
-
-func (data *Data) collectSuggestedFixes(spn span.Span, actionKind string) {
- if _, ok := data.SuggestedFixes[spn]; !ok {
- data.SuggestedFixes[spn] = []string{}
- }
- data.SuggestedFixes[spn] = append(data.SuggestedFixes[spn], actionKind)
-}
-
-func (data *Data) collectFunctionExtractions(start span.Span, end span.Span) {
- if _, ok := data.FunctionExtractions[start]; !ok {
- data.FunctionExtractions[start] = end
- }
-}
-
-func (data *Data) collectMethodExtractions(start span.Span, end span.Span) {
- if _, ok := data.MethodExtractions[start]; !ok {
- data.MethodExtractions[start] = end
- }
-}
-
-func (data *Data) collectDefinitions(src, target span.Span) {
- data.Definitions[src] = Definition{
- Src: src,
- Def: target,
- }
-}
-
-func (data *Data) collectImplementations(src span.Span, targets []span.Span) {
- data.Implementations[src] = targets
-}
-
-func (data *Data) collectIncomingCalls(src span.Span, calls []span.Span) {
- for _, call := range calls {
- m, err := data.Mapper(call.URI())
- if err != nil {
- data.t.Fatal(err)
- }
- rng, err := m.Range(call)
- if err != nil {
- data.t.Fatal(err)
- }
- // we're only comparing protocol.range
- if data.CallHierarchy[src] != nil {
- data.CallHierarchy[src].IncomingCalls = append(data.CallHierarchy[src].IncomingCalls,
- protocol.CallHierarchyItem{
- URI: protocol.DocumentURI(call.URI()),
- Range: rng,
- })
- } else {
- data.CallHierarchy[src] = &CallHierarchyResult{
- IncomingCalls: []protocol.CallHierarchyItem{
- {URI: protocol.DocumentURI(call.URI()), Range: rng},
- },
- }
- }
- }
-}
-
-func (data *Data) collectOutgoingCalls(src span.Span, calls []span.Span) {
- if data.CallHierarchy[src] == nil {
- data.CallHierarchy[src] = &CallHierarchyResult{}
- }
- for _, call := range calls {
- m, err := data.Mapper(call.URI())
- if err != nil {
- data.t.Fatal(err)
- }
- rng, err := m.Range(call)
- if err != nil {
- data.t.Fatal(err)
- }
- // we're only comparing protocol.range
- data.CallHierarchy[src].OutgoingCalls = append(data.CallHierarchy[src].OutgoingCalls,
- protocol.CallHierarchyItem{
- URI: protocol.DocumentURI(call.URI()),
- Range: rng,
- })
- }
-}
-
-func (data *Data) collectHoverDefinitions(src, target span.Span) {
- data.Definitions[src] = Definition{
- Src: src,
- Def: target,
- OnlyHover: true,
- }
-}
-
-func (data *Data) collectHovers(src span.Span, expected string) {
- data.Hovers[src] = expected
-}
-
-func (data *Data) collectTypeDefinitions(src, target span.Span) {
- data.Definitions[src] = Definition{
- Src: src,
- Def: target,
- IsType: true,
- }
-}
-
-func (data *Data) collectDefinitionNames(src span.Span, name string) {
- d := data.Definitions[src]
- d.Name = name
- data.Definitions[src] = d
-}
-
-func (data *Data) collectHighlights(src span.Span, expected []span.Span) {
- // Declaring a highlight in a test file: @highlight(src, expected1, expected2)
- data.Highlights[src] = append(data.Highlights[src], expected...)
-}
-
-func (data *Data) collectReferences(src span.Span, expected []span.Span) {
- data.References[src] = expected
-}
-
-func (data *Data) collectRenames(src span.Span, newText string) {
- data.Renames[src] = newText
-}
-
-func (data *Data) collectPrepareRenames(src span.Span, rng span.Range, placeholder string) {
- m, err := data.Mapper(src.URI())
- if err != nil {
- data.t.Fatal(err)
- }
- // Convert range to span and then to protocol.Range.
- spn, err := rng.Span()
- if err != nil {
- data.t.Fatal(err)
- }
- prng, err := m.Range(spn)
- if err != nil {
- data.t.Fatal(err)
- }
- data.PrepareRenames[src] = &source.PrepareItem{
- Range: prng,
- Text: placeholder,
- }
-}
-
-// collectSymbols is responsible for collecting @symbol annotations.
-func (data *Data) collectSymbols(name string, spn span.Span, kind string, parentName string, siName string) {
- m, err := data.Mapper(spn.URI())
- if err != nil {
- data.t.Fatal(err)
- }
- rng, err := m.Range(spn)
- if err != nil {
- data.t.Fatal(err)
- }
- sym := protocol.DocumentSymbol{
- Name: name,
- Kind: protocol.ParseSymbolKind(kind),
- SelectionRange: rng,
- }
- if parentName == "" {
- data.Symbols[spn.URI()] = append(data.Symbols[spn.URI()], sym)
- } else {
- data.symbolsChildren[parentName] = append(data.symbolsChildren[parentName], sym)
- }
-
- // Reuse @symbol in the workspace symbols tests.
- si := protocol.SymbolInformation{
- Name: siName,
- Kind: sym.Kind,
- Location: protocol.Location{
- URI: protocol.URIFromSpanURI(spn.URI()),
- Range: sym.SelectionRange,
- },
- }
- data.symbolInformation[spn] = si
-}
-
-func (data *Data) collectWorkspaceSymbols(typ WorkspaceSymbolsTestType) func(*expect.Note, string) {
- return func(note *expect.Note, query string) {
- if data.WorkspaceSymbols[typ] == nil {
- data.WorkspaceSymbols[typ] = make(map[span.URI][]string)
- }
- pos := data.Exported.ExpectFileSet.Position(note.Pos)
- uri := span.URIFromPath(pos.Filename)
- data.WorkspaceSymbols[typ][uri] = append(data.WorkspaceSymbols[typ][uri], query)
- }
-}
-
-func (data *Data) collectSignatures(spn span.Span, signature string, activeParam int64) {
- data.Signatures[spn] = &protocol.SignatureHelp{
- Signatures: []protocol.SignatureInformation{
- {
- Label: signature,
- },
- },
- ActiveParameter: uint32(activeParam),
- }
- // Hardcode special case to test the lack of a signature.
- if signature == "" && activeParam == 0 {
- data.Signatures[spn] = nil
- }
-}
-
-func (data *Data) collectCompletionSnippets(spn span.Span, item token.Pos, plain, placeholder string) {
- data.CompletionSnippets[spn] = append(data.CompletionSnippets[spn], CompletionSnippet{
- CompletionItem: item,
- PlainSnippet: plain,
- PlaceholderSnippet: placeholder,
- })
-}
-
-func (data *Data) collectLinks(spn span.Span, link string, note *expect.Note, fset *token.FileSet) {
- position := fset.Position(note.Pos)
- uri := spn.URI()
- data.Links[uri] = append(data.Links[uri], Link{
- Src: spn,
- Target: link,
- NotePosition: position,
- })
-}
-
-func uriName(uri span.URI) string {
- return filepath.Base(strings.TrimSuffix(uri.Filename(), ".go"))
-}
-
-func SpanName(spn span.Span) string {
- return fmt.Sprintf("%v_%v_%v", uriName(spn.URI()), spn.Start().Line(), spn.Start().Column())
-}
-
-func CopyFolderToTempDir(folder string) (string, error) {
- if _, err := os.Stat(folder); err != nil {
- return "", err
- }
- dst, err := ioutil.TempDir("", "modfile_test")
- if err != nil {
- return "", err
- }
- fds, err := ioutil.ReadDir(folder)
- if err != nil {
- return "", err
- }
- for _, fd := range fds {
- srcfp := filepath.Join(folder, fd.Name())
- stat, err := os.Stat(srcfp)
- if err != nil {
- return "", err
- }
- if !stat.Mode().IsRegular() {
- return "", fmt.Errorf("cannot copy non regular file %s", srcfp)
- }
- contents, err := ioutil.ReadFile(srcfp)
- if err != nil {
- return "", err
- }
- if err := ioutil.WriteFile(filepath.Join(dst, fd.Name()), contents, stat.Mode()); err != nil {
- return "", err
- }
- }
- return dst, nil
-}
-
-func shouldSkip(data *Data, uri span.URI) bool {
- if data.ModfileFlagAvailable {
- return false
- }
- // If the -modfile flag is not available, then we do not want to run
- // any tests on the go.mod file.
- if strings.HasSuffix(uri.Filename(), ".mod") {
- return true
- }
- // If the -modfile flag is not available, then we do not want to test any
- // uri that contains "go mod tidy".
- m, err := data.Mapper(uri)
- return err == nil && strings.Contains(string(m.Content), ", \"go mod tidy\",")
-}
diff --git a/internal/lsp/tests/util.go b/internal/lsp/tests/util.go
deleted file mode 100644
index 11dda1f8e..000000000
--- a/internal/lsp/tests/util.go
+++ /dev/null
@@ -1,580 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tests
-
-import (
- "bytes"
- "context"
- "fmt"
- "go/token"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/lsp/diff"
- "golang.org/x/tools/internal/lsp/diff/myers"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/lsp/source/completion"
- "golang.org/x/tools/internal/span"
-)
-
-// DiffLinks takes the links we got and checks if they are located within the source or a Note.
-// If the link is within a Note, the link is removed.
-// Returns an diff comment if there are differences and empty string if no diffs.
-func DiffLinks(mapper *protocol.ColumnMapper, wantLinks []Link, gotLinks []protocol.DocumentLink) string {
- var notePositions []token.Position
- links := make(map[span.Span]string, len(wantLinks))
- for _, link := range wantLinks {
- links[link.Src] = link.Target
- notePositions = append(notePositions, link.NotePosition)
- }
- for _, link := range gotLinks {
- spn, err := mapper.RangeSpan(link.Range)
- if err != nil {
- return fmt.Sprintf("%v", err)
- }
- linkInNote := false
- for _, notePosition := range notePositions {
- // Drop the links found inside expectation notes arguments as this links are not collected by expect package.
- if notePosition.Line == spn.Start().Line() &&
- notePosition.Column <= spn.Start().Column() {
- delete(links, spn)
- linkInNote = true
- }
- }
- if linkInNote {
- continue
- }
- if target, ok := links[spn]; ok {
- delete(links, spn)
- if target != link.Target {
- return fmt.Sprintf("for %v want %v, got %v\n", spn, target, link.Target)
- }
- } else {
- return fmt.Sprintf("unexpected link %v:%v\n", spn, link.Target)
- }
- }
- for spn, target := range links {
- return fmt.Sprintf("missing link %v:%v\n", spn, target)
- }
- return ""
-}
-
-// DiffSymbols prints the diff between expected and actual symbols test results.
-func DiffSymbols(t *testing.T, uri span.URI, want, got []protocol.DocumentSymbol) string {
- sort.Slice(want, func(i, j int) bool { return want[i].Name < want[j].Name })
- sort.Slice(got, func(i, j int) bool { return got[i].Name < got[j].Name })
- if len(got) != len(want) {
- return summarizeSymbols(-1, want, got, "different lengths got %v want %v", len(got), len(want))
- }
- for i, w := range want {
- g := got[i]
- if w.Name != g.Name {
- return summarizeSymbols(i, want, got, "incorrect name got %v want %v", g.Name, w.Name)
- }
- if w.Kind != g.Kind {
- return summarizeSymbols(i, want, got, "incorrect kind got %v want %v", g.Kind, w.Kind)
- }
- if protocol.CompareRange(w.SelectionRange, g.SelectionRange) != 0 {
- return summarizeSymbols(i, want, got, "incorrect span got %v want %v", g.SelectionRange, w.SelectionRange)
- }
- if msg := DiffSymbols(t, uri, w.Children, g.Children); msg != "" {
- return fmt.Sprintf("children of %s: %s", w.Name, msg)
- }
- }
- return ""
-}
-
-func summarizeSymbols(i int, want, got []protocol.DocumentSymbol, reason string, args ...interface{}) string {
- msg := &bytes.Buffer{}
- fmt.Fprint(msg, "document symbols failed")
- if i >= 0 {
- fmt.Fprintf(msg, " at %d", i)
- }
- fmt.Fprint(msg, " because of ")
- fmt.Fprintf(msg, reason, args...)
- fmt.Fprint(msg, ":\nexpected:\n")
- for _, s := range want {
- fmt.Fprintf(msg, " %v %v %v\n", s.Name, s.Kind, s.SelectionRange)
- }
- fmt.Fprintf(msg, "got:\n")
- for _, s := range got {
- fmt.Fprintf(msg, " %v %v %v\n", s.Name, s.Kind, s.SelectionRange)
- }
- return msg.String()
-}
-
-// DiffDiagnostics prints the diff between expected and actual diagnostics test
-// results.
-func DiffDiagnostics(uri span.URI, want, got []*source.Diagnostic) string {
- source.SortDiagnostics(want)
- source.SortDiagnostics(got)
-
- if len(got) != len(want) {
- return summarizeDiagnostics(-1, uri, want, got, "different lengths got %v want %v", len(got), len(want))
- }
- for i, w := range want {
- g := got[i]
- if w.Message != g.Message {
- return summarizeDiagnostics(i, uri, want, got, "incorrect Message got %v want %v", g.Message, w.Message)
- }
- if w.Severity != g.Severity {
- return summarizeDiagnostics(i, uri, want, got, "incorrect Severity got %v want %v", g.Severity, w.Severity)
- }
- if w.Source != g.Source {
- return summarizeDiagnostics(i, uri, want, got, "incorrect Source got %v want %v", g.Source, w.Source)
- }
- if !rangeOverlaps(g.Range, w.Range) {
- return summarizeDiagnostics(i, uri, want, got, "range %v does not overlap %v", g.Range, w.Range)
- }
- }
- return ""
-}
-
-// rangeOverlaps reports whether r1 and r2 overlap.
-func rangeOverlaps(r1, r2 protocol.Range) bool {
- if inRange(r2.Start, r1) || inRange(r1.Start, r2) {
- return true
- }
- return false
-}
-
-// inRange reports whether p is contained within [r.Start, r.End), or if p ==
-// r.Start == r.End (special handling for the case where the range is a single
-// point).
-func inRange(p protocol.Position, r protocol.Range) bool {
- if protocol.IsPoint(r) {
- return protocol.ComparePosition(r.Start, p) == 0
- }
- if protocol.ComparePosition(r.Start, p) <= 0 && protocol.ComparePosition(p, r.End) < 0 {
- return true
- }
- return false
-}
-
-func summarizeDiagnostics(i int, uri span.URI, want, got []*source.Diagnostic, reason string, args ...interface{}) string {
- msg := &bytes.Buffer{}
- fmt.Fprint(msg, "diagnostics failed")
- if i >= 0 {
- fmt.Fprintf(msg, " at %d", i)
- }
- fmt.Fprint(msg, " because of ")
- fmt.Fprintf(msg, reason, args...)
- fmt.Fprint(msg, ":\nexpected:\n")
- for _, d := range want {
- fmt.Fprintf(msg, " %s:%v: %s\n", uri, d.Range, d.Message)
- }
- fmt.Fprintf(msg, "got:\n")
- for _, d := range got {
- fmt.Fprintf(msg, " %s:%v: %s\n", uri, d.Range, d.Message)
- }
- return msg.String()
-}
-
-func DiffCodeLens(uri span.URI, want, got []protocol.CodeLens) string {
- sortCodeLens(want)
- sortCodeLens(got)
-
- if len(got) != len(want) {
- return summarizeCodeLens(-1, uri, want, got, "different lengths got %v want %v", len(got), len(want))
- }
- for i, w := range want {
- g := got[i]
- if w.Command.Command != g.Command.Command {
- return summarizeCodeLens(i, uri, want, got, "incorrect Command Name got %v want %v", g.Command.Command, w.Command.Command)
- }
- if w.Command.Title != g.Command.Title {
- return summarizeCodeLens(i, uri, want, got, "incorrect Command Title got %v want %v", g.Command.Title, w.Command.Title)
- }
- if protocol.ComparePosition(w.Range.Start, g.Range.Start) != 0 {
- return summarizeCodeLens(i, uri, want, got, "incorrect Start got %v want %v", g.Range.Start, w.Range.Start)
- }
- if !protocol.IsPoint(g.Range) { // Accept any 'want' range if the codelens returns a zero-length range.
- if protocol.ComparePosition(w.Range.End, g.Range.End) != 0 {
- return summarizeCodeLens(i, uri, want, got, "incorrect End got %v want %v", g.Range.End, w.Range.End)
- }
- }
- }
- return ""
-}
-
-func sortCodeLens(c []protocol.CodeLens) {
- sort.Slice(c, func(i int, j int) bool {
- if r := protocol.CompareRange(c[i].Range, c[j].Range); r != 0 {
- return r < 0
- }
- if c[i].Command.Command < c[j].Command.Command {
- return true
- } else if c[i].Command.Command == c[j].Command.Command {
- return c[i].Command.Title < c[j].Command.Title
- } else {
- return false
- }
- })
-}
-
-func summarizeCodeLens(i int, uri span.URI, want, got []protocol.CodeLens, reason string, args ...interface{}) string {
- msg := &bytes.Buffer{}
- fmt.Fprint(msg, "codelens failed")
- if i >= 0 {
- fmt.Fprintf(msg, " at %d", i)
- }
- fmt.Fprint(msg, " because of ")
- fmt.Fprintf(msg, reason, args...)
- fmt.Fprint(msg, ":\nexpected:\n")
- for _, d := range want {
- fmt.Fprintf(msg, " %s:%v: %s | %s\n", uri, d.Range, d.Command.Command, d.Command.Title)
- }
- fmt.Fprintf(msg, "got:\n")
- for _, d := range got {
- fmt.Fprintf(msg, " %s:%v: %s | %s\n", uri, d.Range, d.Command.Command, d.Command.Title)
- }
- return msg.String()
-}
-
-func DiffSignatures(spn span.Span, want, got *protocol.SignatureHelp) (string, error) {
- decorate := func(f string, args ...interface{}) string {
- return fmt.Sprintf("invalid signature at %s: %s", spn, fmt.Sprintf(f, args...))
- }
- if len(got.Signatures) != 1 {
- return decorate("wanted 1 signature, got %d", len(got.Signatures)), nil
- }
- if got.ActiveSignature != 0 {
- return decorate("wanted active signature of 0, got %d", int(got.ActiveSignature)), nil
- }
- if want.ActiveParameter != got.ActiveParameter {
- return decorate("wanted active parameter of %d, got %d", want.ActiveParameter, int(got.ActiveParameter)), nil
- }
- g := got.Signatures[0]
- w := want.Signatures[0]
- if NormalizeAny(w.Label) != NormalizeAny(g.Label) {
- wLabel := w.Label + "\n"
- d, err := myers.ComputeEdits("", wLabel, g.Label+"\n")
- if err != nil {
- return "", err
- }
- return decorate("mismatched labels:\n%q", diff.ToUnified("want", "got", wLabel, d)), err
- }
- var paramParts []string
- for _, p := range g.Parameters {
- paramParts = append(paramParts, p.Label)
- }
- paramsStr := strings.Join(paramParts, ", ")
- if !strings.Contains(g.Label, paramsStr) {
- return decorate("expected signature %q to contain params %q", g.Label, paramsStr), nil
- }
- return "", nil
-}
-
-// NormalizeAny replaces occurrences of interface{} in input with any.
-//
-// In Go 1.18, standard library functions were changed to use the 'any'
-// alias in place of interface{}, which affects their type string.
-func NormalizeAny(input string) string {
- return strings.ReplaceAll(input, "interface{}", "any")
-}
-
-// DiffCallHierarchyItems returns the diff between expected and actual call locations for incoming/outgoing call hierarchies
-func DiffCallHierarchyItems(gotCalls []protocol.CallHierarchyItem, expectedCalls []protocol.CallHierarchyItem) string {
- expected := make(map[protocol.Location]bool)
- for _, call := range expectedCalls {
- expected[protocol.Location{URI: call.URI, Range: call.Range}] = true
- }
-
- got := make(map[protocol.Location]bool)
- for _, call := range gotCalls {
- got[protocol.Location{URI: call.URI, Range: call.Range}] = true
- }
- if len(got) != len(expected) {
- return fmt.Sprintf("expected %d calls but got %d", len(expected), len(got))
- }
- for spn := range got {
- if !expected[spn] {
- return fmt.Sprintf("incorrect calls, expected locations %v but got locations %v", expected, got)
- }
- }
- return ""
-}
-
-func ToProtocolCompletionItems(items []completion.CompletionItem) []protocol.CompletionItem {
- var result []protocol.CompletionItem
- for _, item := range items {
- result = append(result, ToProtocolCompletionItem(item))
- }
- return result
-}
-
-func ToProtocolCompletionItem(item completion.CompletionItem) protocol.CompletionItem {
- pItem := protocol.CompletionItem{
- Label: item.Label,
- Kind: item.Kind,
- Detail: item.Detail,
- Documentation: item.Documentation,
- InsertText: item.InsertText,
- TextEdit: &protocol.TextEdit{
- NewText: item.Snippet(),
- },
- // Negate score so best score has lowest sort text like real API.
- SortText: fmt.Sprint(-item.Score),
- }
- if pItem.InsertText == "" {
- pItem.InsertText = pItem.Label
- }
- return pItem
-}
-
-func FilterBuiltins(src span.Span, items []protocol.CompletionItem) []protocol.CompletionItem {
- var (
- got []protocol.CompletionItem
- wantBuiltins = strings.Contains(string(src.URI()), "builtins")
- wantKeywords = strings.Contains(string(src.URI()), "keywords")
- )
- for _, item := range items {
- if !wantBuiltins && isBuiltin(item.Label, item.Detail, item.Kind) {
- continue
- }
-
- if !wantKeywords && token.Lookup(item.Label).IsKeyword() {
- continue
- }
-
- got = append(got, item)
- }
- return got
-}
-
-func isBuiltin(label, detail string, kind protocol.CompletionItemKind) bool {
- if detail == "" && kind == protocol.ClassCompletion {
- return true
- }
- // Remaining builtin constants, variables, interfaces, and functions.
- trimmed := label
- if i := strings.Index(trimmed, "("); i >= 0 {
- trimmed = trimmed[:i]
- }
- switch trimmed {
- case "append", "cap", "close", "complex", "copy", "delete",
- "error", "false", "imag", "iota", "len", "make", "new",
- "nil", "panic", "print", "println", "real", "recover", "true":
- return true
- }
- return false
-}
-
-func CheckCompletionOrder(want, got []protocol.CompletionItem, strictScores bool) string {
- var (
- matchedIdxs []int
- lastGotIdx int
- lastGotSort float64
- inOrder = true
- errorMsg = "completions out of order"
- )
- for _, w := range want {
- var found bool
- for i, g := range got {
- if w.Label == g.Label && NormalizeAny(w.Detail) == NormalizeAny(g.Detail) && w.Kind == g.Kind {
- matchedIdxs = append(matchedIdxs, i)
- found = true
-
- if i < lastGotIdx {
- inOrder = false
- }
- lastGotIdx = i
-
- sort, _ := strconv.ParseFloat(g.SortText, 64)
- if strictScores && len(matchedIdxs) > 1 && sort <= lastGotSort {
- inOrder = false
- errorMsg = "candidate scores not strictly decreasing"
- }
- lastGotSort = sort
-
- break
- }
- }
- if !found {
- return summarizeCompletionItems(-1, []protocol.CompletionItem{w}, got, "didn't find expected completion")
- }
- }
-
- sort.Ints(matchedIdxs)
- matched := make([]protocol.CompletionItem, 0, len(matchedIdxs))
- for _, idx := range matchedIdxs {
- matched = append(matched, got[idx])
- }
-
- if !inOrder {
- return summarizeCompletionItems(-1, want, matched, errorMsg)
- }
-
- return ""
-}
-
-func DiffSnippets(want string, got *protocol.CompletionItem) string {
- if want == "" {
- if got != nil {
- x := got.TextEdit
- return fmt.Sprintf("expected no snippet but got %s", x.NewText)
- }
- } else {
- if got == nil {
- return fmt.Sprintf("couldn't find completion matching %q", want)
- }
- x := got.TextEdit
- if want != x.NewText {
- return fmt.Sprintf("expected snippet %q, got %q", want, x.NewText)
- }
- }
- return ""
-}
-
-func FindItem(list []protocol.CompletionItem, want completion.CompletionItem) *protocol.CompletionItem {
- for _, item := range list {
- if item.Label == want.Label {
- return &item
- }
- }
- return nil
-}
-
-// DiffCompletionItems prints the diff between expected and actual completion
-// test results.
-func DiffCompletionItems(want, got []protocol.CompletionItem) string {
- if len(got) != len(want) {
- return summarizeCompletionItems(-1, want, got, "different lengths got %v want %v", len(got), len(want))
- }
- for i, w := range want {
- g := got[i]
- if w.Label != g.Label {
- return summarizeCompletionItems(i, want, got, "incorrect Label got %v want %v", g.Label, w.Label)
- }
- if NormalizeAny(w.Detail) != NormalizeAny(g.Detail) {
- return summarizeCompletionItems(i, want, got, "incorrect Detail got %v want %v", g.Detail, w.Detail)
- }
- if w.Documentation != "" && !strings.HasPrefix(w.Documentation, "@") {
- if w.Documentation != g.Documentation {
- return summarizeCompletionItems(i, want, got, "incorrect Documentation got %v want %v", g.Documentation, w.Documentation)
- }
- }
- if w.Kind != g.Kind {
- return summarizeCompletionItems(i, want, got, "incorrect Kind got %v want %v", g.Kind, w.Kind)
- }
- }
- return ""
-}
-
-func summarizeCompletionItems(i int, want, got []protocol.CompletionItem, reason string, args ...interface{}) string {
- msg := &bytes.Buffer{}
- fmt.Fprint(msg, "completion failed")
- if i >= 0 {
- fmt.Fprintf(msg, " at %d", i)
- }
- fmt.Fprint(msg, " because of ")
- fmt.Fprintf(msg, reason, args...)
- fmt.Fprint(msg, ":\nexpected:\n")
- for _, d := range want {
- fmt.Fprintf(msg, " %v\n", d)
- }
- fmt.Fprintf(msg, "got:\n")
- for _, d := range got {
- fmt.Fprintf(msg, " %v\n", d)
- }
- return msg.String()
-}
-
-func EnableAllAnalyzers(view source.View, opts *source.Options) {
- if opts.Analyses == nil {
- opts.Analyses = make(map[string]bool)
- }
- for _, a := range opts.DefaultAnalyzers {
- if !a.IsEnabled(view) {
- opts.Analyses[a.Analyzer.Name] = true
- }
- }
- for _, a := range opts.TypeErrorAnalyzers {
- if !a.IsEnabled(view) {
- opts.Analyses[a.Analyzer.Name] = true
- }
- }
- for _, a := range opts.ConvenienceAnalyzers {
- if !a.IsEnabled(view) {
- opts.Analyses[a.Analyzer.Name] = true
- }
- }
- for _, a := range opts.StaticcheckAnalyzers {
- if !a.IsEnabled(view) {
- opts.Analyses[a.Analyzer.Name] = true
- }
- }
-}
-
-func WorkspaceSymbolsString(ctx context.Context, data *Data, queryURI span.URI, symbols []protocol.SymbolInformation) (string, error) {
- queryDir := filepath.Dir(queryURI.Filename())
- var filtered []string
- for _, s := range symbols {
- uri := s.Location.URI.SpanURI()
- dir := filepath.Dir(uri.Filename())
- if !source.InDir(queryDir, dir) { // assume queries always issue from higher directories
- continue
- }
- m, err := data.Mapper(uri)
- if err != nil {
- return "", err
- }
- spn, err := m.Span(s.Location)
- if err != nil {
- return "", err
- }
- filtered = append(filtered, fmt.Sprintf("%s %s %s", spn, s.Name, s.Kind))
- }
- sort.Strings(filtered)
- return strings.Join(filtered, "\n") + "\n", nil
-}
-
-func WorkspaceSymbolsTestTypeToMatcher(typ WorkspaceSymbolsTestType) source.SymbolMatcher {
- switch typ {
- case WorkspaceSymbolsFuzzy:
- return source.SymbolFuzzy
- case WorkspaceSymbolsCaseSensitive:
- return source.SymbolCaseSensitive
- default:
- return source.SymbolCaseInsensitive
- }
-}
-
-func Diff(t *testing.T, want, got string) string {
- if want == got {
- return ""
- }
- // Add newlines to avoid newline messages in diff.
- want += "\n"
- got += "\n"
- d, err := myers.ComputeEdits("", want, got)
- if err != nil {
- t.Fatal(err)
- }
- return fmt.Sprintf("%q", diff.ToUnified("want", "got", want, d))
-}
-
-// StripSubscripts removes type parameter id subscripts.
-//
-// TODO(rfindley): remove this function once subscripts are removed from the
-// type parameter type string.
-func StripSubscripts(s string) string {
- var runes []rune
- for _, r := range s {
- // For debugging/uniqueness purposes, TypeString on a type parameter adds a
- // subscript corresponding to the type parameter's unique id. This is going
- // to be removed, but in the meantime we skip the subscript runes to get a
- // deterministic output.
- if '₀' <= r && r < '₀'+10 {
- continue // trim type parameter subscripts
- }
- runes = append(runes, r)
- }
- return string(runes)
-}
diff --git a/internal/lsp/text_synchronization.go b/internal/lsp/text_synchronization.go
deleted file mode 100644
index d9a696140..000000000
--- a/internal/lsp/text_synchronization.go
+++ /dev/null
@@ -1,382 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "bytes"
- "context"
- "fmt"
- "path/filepath"
- "time"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/jsonrpc2"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- "golang.org/x/tools/internal/xcontext"
- errors "golang.org/x/xerrors"
-)
-
-// ModificationSource identifies the originating cause of a file modification.
-type ModificationSource int
-
-const (
- // FromDidOpen is a file modification caused by opening a file.
- FromDidOpen = ModificationSource(iota)
-
- // FromDidChange is a file modification caused by changing a file.
- FromDidChange
-
- // FromDidChangeWatchedFiles is a file modification caused by a change to a
- // watched file.
- FromDidChangeWatchedFiles
-
- // FromDidSave is a file modification caused by a file save.
- FromDidSave
-
- // FromDidClose is a file modification caused by closing a file.
- FromDidClose
-
- // FromRegenerateCgo refers to file modifications caused by regenerating
- // the cgo sources for the workspace.
- FromRegenerateCgo
-
- // FromInitialWorkspaceLoad refers to the loading of all packages in the
- // workspace when the view is first created.
- FromInitialWorkspaceLoad
-)
-
-func (m ModificationSource) String() string {
- switch m {
- case FromDidOpen:
- return "opened files"
- case FromDidChange:
- return "changed files"
- case FromDidChangeWatchedFiles:
- return "files changed on disk"
- case FromDidSave:
- return "saved files"
- case FromDidClose:
- return "close files"
- case FromRegenerateCgo:
- return "regenerate cgo"
- case FromInitialWorkspaceLoad:
- return "initial workspace load"
- default:
- return "unknown file modification"
- }
-}
-
-func (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
- uri := params.TextDocument.URI.SpanURI()
- if !uri.IsFile() {
- return nil
- }
- // There may not be any matching view in the current session. If that's
- // the case, try creating a new view based on the opened file path.
- //
- // TODO(rstambler): This seems like it would continuously add new
- // views, but it won't because ViewOf only returns an error when there
- // are no views in the session. I don't know if that logic should go
- // here, or if we can continue to rely on that implementation detail.
- if _, err := s.session.ViewOf(uri); err != nil {
- dir := filepath.Dir(uri.Filename())
- if err := s.addFolders(ctx, []protocol.WorkspaceFolder{{
- URI: string(protocol.URIFromPath(dir)),
- Name: filepath.Base(dir),
- }}); err != nil {
- return err
- }
- }
- return s.didModifyFiles(ctx, []source.FileModification{{
- URI: uri,
- Action: source.Open,
- Version: params.TextDocument.Version,
- Text: []byte(params.TextDocument.Text),
- LanguageID: params.TextDocument.LanguageID,
- }}, FromDidOpen)
-}
-
-func (s *Server) didChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error {
- uri := params.TextDocument.URI.SpanURI()
- if !uri.IsFile() {
- return nil
- }
-
- text, err := s.changedText(ctx, uri, params.ContentChanges)
- if err != nil {
- return err
- }
- c := source.FileModification{
- URI: uri,
- Action: source.Change,
- Version: params.TextDocument.Version,
- Text: text,
- }
- if err := s.didModifyFiles(ctx, []source.FileModification{c}, FromDidChange); err != nil {
- return err
- }
- return s.warnAboutModifyingGeneratedFiles(ctx, uri)
-}
-
-// warnAboutModifyingGeneratedFiles shows a warning if a user tries to edit a
-// generated file for the first time.
-func (s *Server) warnAboutModifyingGeneratedFiles(ctx context.Context, uri span.URI) error {
- s.changedFilesMu.Lock()
- _, ok := s.changedFiles[uri]
- if !ok {
- s.changedFiles[uri] = struct{}{}
- }
- s.changedFilesMu.Unlock()
-
- // This file has already been edited before.
- if ok {
- return nil
- }
-
- // Ideally, we should be able to specify that a generated file should
- // be opened as read-only. Tell the user that they should not be
- // editing a generated file.
- view, err := s.session.ViewOf(uri)
- if err != nil {
- return err
- }
- snapshot, release := view.Snapshot(ctx)
- isGenerated := source.IsGenerated(ctx, snapshot, uri)
- release()
-
- if !isGenerated {
- return nil
- }
- return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
- Message: fmt.Sprintf("Do not edit this file! %s is a generated file.", uri.Filename()),
- Type: protocol.Warning,
- })
-}
-
-func (s *Server) didChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) error {
- var modifications []source.FileModification
- for _, change := range params.Changes {
- uri := change.URI.SpanURI()
- if !uri.IsFile() {
- continue
- }
- action := changeTypeToFileAction(change.Type)
- modifications = append(modifications, source.FileModification{
- URI: uri,
- Action: action,
- OnDisk: true,
- })
- }
- return s.didModifyFiles(ctx, modifications, FromDidChangeWatchedFiles)
-}
-
-func (s *Server) didSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) error {
- uri := params.TextDocument.URI.SpanURI()
- if !uri.IsFile() {
- return nil
- }
- c := source.FileModification{
- URI: uri,
- Action: source.Save,
- }
- if params.Text != nil {
- c.Text = []byte(*params.Text)
- }
- return s.didModifyFiles(ctx, []source.FileModification{c}, FromDidSave)
-}
-
-func (s *Server) didClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) error {
- uri := params.TextDocument.URI.SpanURI()
- if !uri.IsFile() {
- return nil
- }
- return s.didModifyFiles(ctx, []source.FileModification{
- {
- URI: uri,
- Action: source.Close,
- Version: -1,
- Text: nil,
- },
- }, FromDidClose)
-}
-
-func (s *Server) didModifyFiles(ctx context.Context, modifications []source.FileModification, cause ModificationSource) error {
- diagnoseDone := make(chan struct{})
- if s.session.Options().VerboseWorkDoneProgress {
- work := s.progress.Start(ctx, DiagnosticWorkTitle(cause), "Calculating file diagnostics...", nil, nil)
- defer func() {
- go func() {
- <-diagnoseDone
- work.End("Done.")
- }()
- }()
- }
-
- onDisk := cause == FromDidChangeWatchedFiles
- delay := s.session.Options().ExperimentalWatchedFileDelay
- s.fileChangeMu.Lock()
- defer s.fileChangeMu.Unlock()
- if !onDisk || delay == 0 {
- // No delay: process the modifications immediately.
- return s.processModifications(ctx, modifications, onDisk, diagnoseDone)
- }
- // Debounce and batch up pending modifications from watched files.
- pending := &pendingModificationSet{
- diagnoseDone: diagnoseDone,
- changes: modifications,
- }
- // Invariant: changes appended to s.pendingOnDiskChanges are eventually
- // handled in the order they arrive. This guarantee is only partially
- // enforced here. Specifically:
- // 1. s.fileChangesMu ensures that the append below happens in the order
- // notifications were received, so that the changes within each batch are
- // ordered properly.
- // 2. The debounced func below holds s.fileChangesMu while processing all
- // changes in s.pendingOnDiskChanges, ensuring that no batches are
- // processed out of order.
- // 3. Session.ExpandModificationsToDirectories and Session.DidModifyFiles
- // process changes in order.
- s.pendingOnDiskChanges = append(s.pendingOnDiskChanges, pending)
- ctx = xcontext.Detach(ctx)
- okc := s.watchedFileDebouncer.debounce("", 0, time.After(delay))
- go func() {
- if ok := <-okc; !ok {
- return
- }
- s.fileChangeMu.Lock()
- var allChanges []source.FileModification
- // For accurate progress notifications, we must notify all goroutines
- // waiting for the diagnose pass following a didChangeWatchedFiles
- // notification. This is necessary for regtest assertions.
- var dones []chan struct{}
- for _, pending := range s.pendingOnDiskChanges {
- allChanges = append(allChanges, pending.changes...)
- dones = append(dones, pending.diagnoseDone)
- }
-
- allDone := make(chan struct{})
- if err := s.processModifications(ctx, allChanges, onDisk, allDone); err != nil {
- event.Error(ctx, "processing delayed file changes", err)
- }
- s.pendingOnDiskChanges = nil
- s.fileChangeMu.Unlock()
- <-allDone
- for _, done := range dones {
- close(done)
- }
- }()
- return nil
-}
-
-// processModifications update server state to reflect file changes, and
-// triggers diagnostics to run asynchronously. The diagnoseDone channel will be
-// closed once diagnostics complete.
-func (s *Server) processModifications(ctx context.Context, modifications []source.FileModification, onDisk bool, diagnoseDone chan struct{}) error {
- s.stateMu.Lock()
- if s.state >= serverShutDown {
- // This state check does not prevent races below, and exists only to
- // produce a better error message. The actual race to the cache should be
- // guarded by Session.viewMu.
- s.stateMu.Unlock()
- close(diagnoseDone)
- return errors.New("server is shut down")
- }
- s.stateMu.Unlock()
- // If the set of changes included directories, expand those directories
- // to their files.
- modifications = s.session.ExpandModificationsToDirectories(ctx, modifications)
-
- snapshots, releases, err := s.session.DidModifyFiles(ctx, modifications)
- if err != nil {
- close(diagnoseDone)
- return err
- }
-
- go func() {
- s.diagnoseSnapshots(snapshots, onDisk)
- for _, release := range releases {
- release()
- }
- close(diagnoseDone)
- }()
-
- // After any file modifications, we need to update our watched files,
- // in case something changed. Compute the new set of directories to watch,
- // and if it differs from the current set, send updated registrations.
- return s.updateWatchedDirectories(ctx)
-}
-
-// DiagnosticWorkTitle returns the title of the diagnostic work resulting from a
-// file change originating from the given cause.
-func DiagnosticWorkTitle(cause ModificationSource) string {
- return fmt.Sprintf("diagnosing %v", cause)
-}
-
-func (s *Server) changedText(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) {
- if len(changes) == 0 {
- return nil, errors.Errorf("%w: no content changes provided", jsonrpc2.ErrInternal)
- }
-
- // Check if the client sent the full content of the file.
- // We accept a full content change even if the server expected incremental changes.
- if len(changes) == 1 && changes[0].Range == nil && changes[0].RangeLength == 0 {
- return []byte(changes[0].Text), nil
- }
- return s.applyIncrementalChanges(ctx, uri, changes)
-}
-
-func (s *Server) applyIncrementalChanges(ctx context.Context, uri span.URI, changes []protocol.TextDocumentContentChangeEvent) ([]byte, error) {
- fh, err := s.session.GetFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- content, err := fh.Read()
- if err != nil {
- return nil, errors.Errorf("%w: file not found (%v)", jsonrpc2.ErrInternal, err)
- }
- for _, change := range changes {
- // Make sure to update column mapper along with the content.
- converter := span.NewContentConverter(uri.Filename(), content)
- m := &protocol.ColumnMapper{
- URI: uri,
- Converter: converter,
- Content: content,
- }
- if change.Range == nil {
- return nil, errors.Errorf("%w: unexpected nil range for change", jsonrpc2.ErrInternal)
- }
- spn, err := m.RangeSpan(*change.Range)
- if err != nil {
- return nil, err
- }
- if !spn.HasOffset() {
- return nil, errors.Errorf("%w: invalid range for content change", jsonrpc2.ErrInternal)
- }
- start, end := spn.Start().Offset(), spn.End().Offset()
- if end < start {
- return nil, errors.Errorf("%w: invalid range for content change", jsonrpc2.ErrInternal)
- }
- var buf bytes.Buffer
- buf.Write(content[:start])
- buf.WriteString(change.Text)
- buf.Write(content[end:])
- content = buf.Bytes()
- }
- return content, nil
-}
-
-func changeTypeToFileAction(ct protocol.FileChangeType) source.FileAction {
- switch ct {
- case protocol.Changed:
- return source.Change
- case protocol.Created:
- return source.Create
- case protocol.Deleted:
- return source.Delete
- }
- return source.UnknownFileAction
-}
diff --git a/internal/lsp/work/completion.go b/internal/lsp/work/completion.go
deleted file mode 100644
index 60b69f12f..000000000
--- a/internal/lsp/work/completion.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package work
-
-import (
- "context"
- "go/token"
- "os"
- "path/filepath"
- "sort"
- "strings"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- errors "golang.org/x/xerrors"
-)
-
-func Completion(ctx context.Context, snapshot source.Snapshot, fh source.VersionedFileHandle, position protocol.Position) (*protocol.CompletionList, error) {
- ctx, done := event.Start(ctx, "work.Completion")
- defer done()
-
- // Get the position of the cursor.
- pw, err := snapshot.ParseWork(ctx, fh)
- if err != nil {
- return nil, errors.Errorf("getting go.work file handle: %w", err)
- }
- spn, err := pw.Mapper.PointSpan(position)
- if err != nil {
- return nil, errors.Errorf("computing cursor position: %w", err)
- }
- rng, err := spn.Range(pw.Mapper.Converter)
- if err != nil {
- return nil, errors.Errorf("computing range: %w", err)
- }
-
- // Find the use statement the user is in.
- cursor := rng.Start - 1
- use, pathStart, _ := usePath(pw, cursor)
- if use == nil {
- return &protocol.CompletionList{}, nil
- }
- completingFrom := use.Path[:cursor-token.Pos(pathStart)]
-
- // We're going to find the completions of the user input
- // (completingFrom) by doing a walk on the innermost directory
- // of the given path, and comparing the found paths to make sure
- // that they match the component of the path after the
- // innermost directory.
- //
- // We'll maintain two paths when doing this: pathPrefixSlash
- // is essentially the path the user typed in, and pathPrefixAbs
- // is the path made absolute from the go.work directory.
-
- pathPrefixSlash := completingFrom
- pathPrefixAbs := filepath.FromSlash(pathPrefixSlash)
- if !filepath.IsAbs(pathPrefixAbs) {
- pathPrefixAbs = filepath.Join(filepath.Dir(pw.URI.Filename()), pathPrefixAbs)
- }
-
- // pathPrefixDir is the directory that will be walked to find matches.
- // If pathPrefixSlash is not explicitly a directory boundary (is either equivalent to "." or
- // ends in a separator) we need to examine its parent directory to find sibling files that
- // match.
- depthBound := 5
- pathPrefixDir, pathPrefixBase := pathPrefixAbs, ""
- pathPrefixSlashDir := pathPrefixSlash
- if filepath.Clean(pathPrefixSlash) != "." && !strings.HasSuffix(pathPrefixSlash, "/") {
- depthBound++
- pathPrefixDir, pathPrefixBase = filepath.Split(pathPrefixAbs)
- pathPrefixSlashDir = dirNonClean(pathPrefixSlash)
- }
-
- var completions []string
- // Stop traversing deeper once we've hit 10k files to try to stay generally under 100ms.
- const numSeenBound = 10000
- var numSeen int
- stopWalking := errors.New("hit numSeenBound")
- err = filepath.Walk(pathPrefixDir, func(wpath string, info os.FileInfo, err error) error {
- if numSeen > numSeenBound {
- // Stop traversing if we hit bound.
- return stopWalking
- }
- numSeen++
-
- // rel is the path relative to pathPrefixDir.
- // Make sure that it has pathPrefixBase as a prefix
- // otherwise it won't match the beginning of the
- // base component of the path the user typed in.
- rel := strings.TrimPrefix(wpath[len(pathPrefixDir):], string(filepath.Separator))
- if info.IsDir() && wpath != pathPrefixDir && !strings.HasPrefix(rel, pathPrefixBase) {
- return filepath.SkipDir
- }
-
- // Check for a match (a module directory).
- if filepath.Base(rel) == "go.mod" {
- relDir := strings.TrimSuffix(dirNonClean(rel), string(os.PathSeparator))
- completionPath := join(pathPrefixSlashDir, filepath.ToSlash(relDir))
-
- if !strings.HasPrefix(completionPath, completingFrom) {
- return nil
- }
- if strings.HasSuffix(completionPath, "/") {
- // Don't suggest paths that end in "/". This happens
- // when the input is a path that ends in "/" and
- // the completion is empty.
- return nil
- }
- completion := completionPath[len(completingFrom):]
- if completingFrom == "" && !strings.HasPrefix(completion, "./") {
- // Bias towards "./" prefixes.
- completion = join(".", completion)
- }
-
- completions = append(completions, completion)
- }
-
- if depth := strings.Count(rel, string(filepath.Separator)); depth >= depthBound {
- return filepath.SkipDir
- }
- return nil
- })
- if err != nil && !errors.Is(err, stopWalking) {
- return nil, errors.Errorf("walking to find completions: %w", err)
- }
-
- sort.Strings(completions)
-
- var items []protocol.CompletionItem
- for _, c := range completions {
- items = append(items, protocol.CompletionItem{
- Label: c,
- InsertText: c,
- })
- }
- return &protocol.CompletionList{Items: items}, nil
-}
-
-// dirNonClean is filepath.Dir, without the Clean at the end.
-func dirNonClean(path string) string {
- vol := filepath.VolumeName(path)
- i := len(path) - 1
- for i >= len(vol) && !os.IsPathSeparator(path[i]) {
- i--
- }
- return path[len(vol) : i+1]
-}
-
-func join(a, b string) string {
- if a == "" {
- return b
- }
- if b == "" {
- return a
- }
- return strings.TrimSuffix(a, "/") + "/" + b
-}
diff --git a/internal/lsp/work/diagnostics.go b/internal/lsp/work/diagnostics.go
deleted file mode 100644
index e583e60fd..000000000
--- a/internal/lsp/work/diagnostics.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package work
-
-import (
- "context"
- "fmt"
- "os"
- "path/filepath"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/debug/tag"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
-)
-
-func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.VersionedFileIdentity][]*source.Diagnostic, error) {
- ctx, done := event.Start(ctx, "work.Diagnostics", tag.Snapshot.Of(snapshot.ID()))
- defer done()
-
- reports := map[source.VersionedFileIdentity][]*source.Diagnostic{}
- uri := snapshot.WorkFile()
- if uri == "" {
- return nil, nil
- }
- fh, err := snapshot.GetVersionedFile(ctx, uri)
- if err != nil {
- return nil, err
- }
- reports[fh.VersionedFileIdentity()] = []*source.Diagnostic{}
- diagnostics, err := DiagnosticsForWork(ctx, snapshot, fh)
- if err != nil {
- return nil, err
- }
- for _, d := range diagnostics {
- fh, err := snapshot.GetVersionedFile(ctx, d.URI)
- if err != nil {
- return nil, err
- }
- reports[fh.VersionedFileIdentity()] = append(reports[fh.VersionedFileIdentity()], d)
- }
-
- return reports, nil
-}
-
-func DiagnosticsForWork(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]*source.Diagnostic, error) {
- pw, err := snapshot.ParseWork(ctx, fh)
- if err != nil {
- if pw == nil || len(pw.ParseErrors) == 0 {
- return nil, err
- }
- return pw.ParseErrors, nil
- }
-
- // Add diagnostic if a directory does not contain a module.
- var diagnostics []*source.Diagnostic
- for _, use := range pw.File.Use {
- rng, err := source.LineToRange(pw.Mapper, fh.URI(), use.Syntax.Start, use.Syntax.End)
- if err != nil {
- return nil, err
- }
-
- modfh, err := snapshot.GetFile(ctx, modFileURI(pw, use))
- if err != nil {
- return nil, err
- }
- if _, err := modfh.Read(); err != nil && os.IsNotExist(err) {
- diagnostics = append(diagnostics, &source.Diagnostic{
- URI: fh.URI(),
- Range: rng,
- Severity: protocol.SeverityError,
- Source: source.UnknownError, // Do we need a new source for this?
- Message: fmt.Sprintf("directory %v does not contain a module", use.Path),
- })
- }
- }
- return diagnostics, nil
-}
-
-func modFileURI(pw *source.ParsedWorkFile, use *modfile.Use) span.URI {
- workdir := filepath.Dir(pw.URI.Filename())
-
- modroot := filepath.FromSlash(use.Path)
- if !filepath.IsAbs(modroot) {
- modroot = filepath.Join(workdir, modroot)
- }
-
- return span.URIFromPath(filepath.Join(modroot, "go.mod"))
-}
diff --git a/internal/lsp/work/format.go b/internal/lsp/work/format.go
deleted file mode 100644
index 35b804a73..000000000
--- a/internal/lsp/work/format.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package work
-
-import (
- "context"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func Format(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.TextEdit, error) {
- ctx, done := event.Start(ctx, "work.Format")
- defer done()
-
- pw, err := snapshot.ParseWork(ctx, fh)
- if err != nil {
- return nil, err
- }
- formatted := modfile.Format(pw.File.Syntax)
- // Calculate the edits to be made due to the change.
- diff, err := snapshot.View().Options().ComputeEdits(fh.URI(), string(pw.Mapper.Content), string(formatted))
- if err != nil {
- return nil, err
- }
- return source.ToProtocolEdits(pw.Mapper, diff)
-}
diff --git a/internal/lsp/work/hover.go b/internal/lsp/work/hover.go
deleted file mode 100644
index 1699c5cba..000000000
--- a/internal/lsp/work/hover.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package work
-
-import (
- "bytes"
- "context"
- "go/token"
-
- "golang.org/x/mod/modfile"
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- errors "golang.org/x/xerrors"
-)
-
-func Hover(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle, position protocol.Position) (*protocol.Hover, error) {
- // We only provide hover information for the view's go.work file.
- if fh.URI() != snapshot.WorkFile() {
- return nil, nil
- }
-
- ctx, done := event.Start(ctx, "work.Hover")
- defer done()
-
- // Get the position of the cursor.
- pw, err := snapshot.ParseWork(ctx, fh)
- if err != nil {
- return nil, errors.Errorf("getting go.work file handle: %w", err)
- }
- spn, err := pw.Mapper.PointSpan(position)
- if err != nil {
- return nil, errors.Errorf("computing cursor position: %w", err)
- }
- hoverRng, err := spn.Range(pw.Mapper.Converter)
- if err != nil {
- return nil, errors.Errorf("computing hover range: %w", err)
- }
-
- // Confirm that the cursor is inside a use statement, and then find
- // the position of the use statement's directory path.
- use, pathStart, pathEnd := usePath(pw, hoverRng.Start)
-
- // The cursor position is not on a use statement.
- if use == nil {
- return nil, nil
- }
-
- // Get the mod file denoted by the use.
- modfh, err := snapshot.GetFile(ctx, modFileURI(pw, use))
- if err != nil {
- return nil, errors.Errorf("getting modfile handle: %w", err)
- }
- pm, err := snapshot.ParseMod(ctx, modfh)
- if err != nil {
- return nil, errors.Errorf("getting modfile handle: %w", err)
- }
- mod := pm.File.Module.Mod
-
- // Get the range to highlight for the hover.
- rng, err := source.ByteOffsetsToRange(pw.Mapper, fh.URI(), pathStart, pathEnd)
- if err != nil {
- return nil, err
- }
- options := snapshot.View().Options()
- return &protocol.Hover{
- Contents: protocol.MarkupContent{
- Kind: options.PreferredContentFormat,
- Value: mod.Path,
- },
- Range: rng,
- }, nil
-}
-
-func usePath(pw *source.ParsedWorkFile, pos token.Pos) (use *modfile.Use, pathStart, pathEnd int) {
- for _, u := range pw.File.Use {
- path := []byte(u.Path)
- s, e := u.Syntax.Start.Byte, u.Syntax.End.Byte
- i := bytes.Index(pw.Mapper.Content[s:e], path)
- if i == -1 {
- // This should not happen.
- continue
- }
- // Shift the start position to the location of the
- // module directory within the use statement.
- pathStart, pathEnd = s+i, s+i+len(path)
- if token.Pos(pathStart) <= pos && pos <= token.Pos(pathEnd) {
- return u, pathStart, pathEnd
- }
- }
- return nil, 0, 0
-}
diff --git a/internal/lsp/workspace.go b/internal/lsp/workspace.go
deleted file mode 100644
index 1f01b3b3b..000000000
--- a/internal/lsp/workspace.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
- "golang.org/x/tools/internal/span"
- errors "golang.org/x/xerrors"
-)
-
-func (s *Server) didChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) error {
- event := params.Event
- for _, folder := range event.Removed {
- view := s.session.View(folder.Name)
- if view != nil {
- view.Shutdown(ctx)
- } else {
- return errors.Errorf("view %s for %v not found", folder.Name, folder.URI)
- }
- }
- return s.addFolders(ctx, event.Added)
-}
-
-func (s *Server) addView(ctx context.Context, name string, uri span.URI) (source.Snapshot, func(), error) {
- s.stateMu.Lock()
- state := s.state
- s.stateMu.Unlock()
- if state < serverInitialized {
- return nil, func() {}, errors.Errorf("addView called before server initialized")
- }
- options := s.session.Options().Clone()
- if err := s.fetchConfig(ctx, name, uri, options); err != nil {
- return nil, func() {}, err
- }
- _, snapshot, release, err := s.session.NewView(ctx, name, uri, options)
- return snapshot, release, err
-}
-
-func (s *Server) didChangeConfiguration(ctx context.Context, _ *protocol.DidChangeConfigurationParams) error {
- // Apply any changes to the session-level settings.
- options := s.session.Options().Clone()
- semanticTokensRegistered := options.SemanticTokens
- if err := s.fetchConfig(ctx, "", "", options); err != nil {
- return err
- }
- s.session.SetOptions(options)
-
- // Go through each view, getting and updating its configuration.
- for _, view := range s.session.Views() {
- options := s.session.Options().Clone()
- if err := s.fetchConfig(ctx, view.Name(), view.Folder(), options); err != nil {
- return err
- }
- view, err := view.SetOptions(ctx, options)
- if err != nil {
- return err
- }
- go func() {
- snapshot, release := view.Snapshot(ctx)
- defer release()
- s.diagnoseDetached(snapshot)
- }()
- }
-
- registration := semanticTokenRegistration(options.SemanticTypes, options.SemanticMods)
- // Update any session-specific registrations or unregistrations.
- if !semanticTokensRegistered && options.SemanticTokens {
- if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
- Registrations: []protocol.Registration{registration},
- }); err != nil {
- return err
- }
- } else if semanticTokensRegistered && !options.SemanticTokens {
- if err := s.client.UnregisterCapability(ctx, &protocol.UnregistrationParams{
- Unregisterations: []protocol.Unregistration{
- {
- ID: registration.ID,
- Method: registration.Method,
- },
- },
- }); err != nil {
- return err
- }
- }
- return nil
-}
-
-func semanticTokenRegistration(tokenTypes, tokenModifiers []string) protocol.Registration {
- return protocol.Registration{
- ID: "textDocument/semanticTokens",
- Method: "textDocument/semanticTokens",
- RegisterOptions: &protocol.SemanticTokensOptions{
- Legend: protocol.SemanticTokensLegend{
- // TODO(pjw): trim these to what we use (and an unused one
- // at position 0 of TokTypes, to catch typos)
- TokenTypes: tokenTypes,
- TokenModifiers: tokenModifiers,
- },
- Full: true,
- Range: true,
- },
- }
-}
diff --git a/internal/lsp/workspace_symbol.go b/internal/lsp/workspace_symbol.go
deleted file mode 100644
index 20c5763ab..000000000
--- a/internal/lsp/workspace_symbol.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lsp
-
-import (
- "context"
-
- "golang.org/x/tools/internal/event"
- "golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/lsp/source"
-)
-
-func (s *Server) symbol(ctx context.Context, params *protocol.WorkspaceSymbolParams) ([]protocol.SymbolInformation, error) {
- ctx, done := event.Start(ctx, "lsp.Server.symbol")
- defer done()
-
- views := s.session.Views()
- matcher := s.session.Options().SymbolMatcher
- style := s.session.Options().SymbolStyle
- return source.WorkspaceSymbols(ctx, matcher, style, views, params.Query)
-}
diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go
index 0037342a7..e56af3bb4 100644
--- a/internal/memoize/memoize.go
+++ b/internal/memoize/memoize.go
@@ -2,151 +2,88 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package memoize supports memoizing the return values of functions with
-// idempotent results that are expensive to compute.
+// Package memoize defines a "promise" abstraction that enables
+// memoization of the result of calling an expensive but idempotent
+// function.
//
-// To use this package, build a store and use it to acquire handles with the
-// Bind method.
+// Call p = NewPromise(f) to obtain a promise for the future result of
+// calling f(), and call p.Get() to obtain that result. All calls to
+// p.Get return the result of a single call of f().
+// Get blocks if the function has not finished (or started).
//
+// A Store is a map of arbitrary keys to promises. Use Store.Promise
+// to create a promise in the store. All calls to Handle(k) return the
+// same promise as long as it is in the store. These promises are
+// reference-counted and must be explicitly released. Once the last
+// reference is released, the promise is removed from the store.
package memoize
import (
"context"
- "flag"
"fmt"
"reflect"
+ "runtime/trace"
"sync"
"sync/atomic"
"golang.org/x/tools/internal/xcontext"
)
-var (
- panicOnDestroyed = flag.Bool("memoize_panic_on_destroyed", false,
- "Panic when a destroyed generation is read rather than returning an error. "+
- "Panicking may make it easier to debug lifetime errors, especially when "+
- "used with GOTRACEBACK=crash to see all running goroutines.")
-)
-
-// Store binds keys to functions, returning handles that can be used to access
-// the functions results.
-type Store struct {
- mu sync.Mutex
- // handles is the set of values stored.
- handles map[interface{}]*Handle
-
- // generations is the set of generations live in this store.
- generations map[*Generation]struct{}
-}
-
-// Generation creates a new Generation associated with s. Destroy must be
-// called on the returned Generation once it is no longer in use. name is
-// for debugging purposes only.
-func (s *Store) Generation(name string) *Generation {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.handles == nil {
- s.handles = map[interface{}]*Handle{}
- s.generations = map[*Generation]struct{}{}
- }
- g := &Generation{store: s, name: name}
- s.generations[g] = struct{}{}
- return g
-}
-
-// A Generation is a logical point in time of the cache life-cycle. Cache
-// entries associated with a Generation will not be removed until the
-// Generation is destroyed.
-type Generation struct {
- // destroyed is 1 after the generation is destroyed. Atomic.
- destroyed uint32
- store *Store
- name string
- // destroyedBy describes the caller that togged destroyed from 0 to 1.
- destroyedBy string
- // wg tracks the reference count of this generation.
- wg sync.WaitGroup
-}
-
-// Destroy waits for all operations referencing g to complete, then removes
-// all references to g from cache entries. Cache entries that no longer
-// reference any non-destroyed generation are removed. Destroy must be called
-// exactly once for each generation, and destroyedBy describes the caller.
-func (g *Generation) Destroy(destroyedBy string) {
- g.wg.Wait()
-
- prevDestroyedBy := g.destroyedBy
- g.destroyedBy = destroyedBy
- if ok := atomic.CompareAndSwapUint32(&g.destroyed, 0, 1); !ok {
- panic("Destroy on generation " + g.name + " already destroyed by " + prevDestroyedBy)
- }
-
- g.store.mu.Lock()
- defer g.store.mu.Unlock()
- for k, e := range g.store.handles {
- e.mu.Lock()
- if _, ok := e.generations[g]; ok {
- delete(e.generations, g) // delete even if it's dead, in case of dangling references to the entry.
- if len(e.generations) == 0 {
- delete(g.store.handles, k)
- e.state = stateDestroyed
- if e.cleanup != nil && e.value != nil {
- e.cleanup(e.value)
- }
- }
- }
- e.mu.Unlock()
- }
- delete(g.store.generations, g)
-}
+// Function is the type of a function that can be memoized.
+//
+// If the arg is a RefCounted, its Acquire/Release operations are called.
+//
+// The argument must not materially affect the result of the function
+// in ways that are not captured by the promise's key, since if
+// Promise.Get is called twice concurrently, with the same (implicit)
+// key but different arguments, the Function is called only once but
+// its result must be suitable for both callers.
+//
+// The main purpose of the argument is to avoid the Function closure
+// needing to retain large objects (in practice: the snapshot) in
+// memory that can be supplied at call time by any caller.
+type Function func(ctx context.Context, arg interface{}) interface{}
-// Acquire creates a new reference to g, and returns a func to release that
-// reference.
-func (g *Generation) Acquire() func() {
- destroyed := atomic.LoadUint32(&g.destroyed)
- if destroyed != 0 {
- panic("acquire on generation " + g.name + " destroyed by " + g.destroyedBy)
- }
- g.wg.Add(1)
- return g.wg.Done
+// A RefCounted is a value whose functional lifetime is determined by
+// reference counting.
+//
+// Its Acquire method is called before the Function is invoked, and
+// the corresponding release is called when the Function returns.
+// Usually both events happen within a single call to Get, so Get
+// would be fine with a "borrowed" reference, but if the context is
+// cancelled, Get may return before the Function is complete, causing
+// the argument to escape, and potential premature destruction of the
+// value. For a reference-counted type, this requires a pair of
+// increment/decrement operations to extend its life.
+type RefCounted interface {
+ // Acquire prevents the value from being destroyed until the
+ // returned function is called.
+ Acquire() func()
}
-// Arg is a marker interface that can be embedded to indicate a type is
-// intended for use as a Function argument.
-type Arg interface{ memoizeArg() }
+// A Promise represents the future result of a call to a function.
+type Promise struct {
+ debug string // for observability
-// Function is the type for functions that can be memoized.
-// The result must be a pointer.
-type Function func(ctx context.Context, arg Arg) interface{}
-
-type state int
-
-const (
- stateIdle = iota
- stateRunning
- stateCompleted
- stateDestroyed
-)
+ // refcount is the reference count in the containing Store, used by
+ // Store.Promise. It is guarded by Store.promisesMu on the containing Store.
+ refcount int32
-// Handle is returned from a store when a key is bound to a function.
-// It is then used to access the results of that function.
-//
-// A Handle starts out in idle state, waiting for something to demand its
-// evaluation. It then transitions into running state. While it's running,
-// waiters tracks the number of Get calls waiting for a result, and the done
-// channel is used to notify waiters of the next state transition. Once the
-// evaluation finishes, value is set, state changes to completed, and done
-// is closed, unblocking waiters. Alternatively, as Get calls are cancelled,
-// they decrement waiters. If it drops to zero, the inner context is cancelled,
-// computation is abandoned, and state resets to idle to start the process over
-// again.
-type Handle struct {
- key interface{}
- mu sync.Mutex
-
- // generations is the set of generations in which this handle is valid.
- generations map[*Generation]struct{}
+ mu sync.Mutex
+ // A Promise starts out IDLE, waiting for something to demand
+ // its evaluation. It then transitions into RUNNING state.
+ //
+ // While RUNNING, waiters tracks the number of Get calls
+ // waiting for a result, and the done channel is used to
+ // notify waiters of the next state transition. Once
+ // evaluation finishes, value is set, state changes to
+ // COMPLETED, and done is closed, unblocking waiters.
+ //
+ // Alternatively, as Get calls are cancelled, they decrement
+ // waiters. If it drops to zero, the inner context is
+ // cancelled, computation is abandoned, and state resets to
+ // IDLE to start the process over again.
state state
// done is set in running state, and closed when exiting it.
done chan struct{}
@@ -158,230 +95,241 @@ type Handle struct {
function Function
// value is set in completed state.
value interface{}
- // cleanup, if non-nil, is used to perform any necessary clean-up on values
- // produced by function.
- cleanup func(interface{})
}
-// Bind returns a handle for the given key and function.
+// NewPromise returns a promise for the future result of calling the
+// specified function.
//
-// Each call to bind will return the same handle if it is already bound. Bind
-// will always return a valid handle, creating one if needed. Each key can
-// only have one handle at any given time. The value will be held at least
-// until the associated generation is destroyed. Bind does not cause the value
-// to be generated.
-//
-// If cleanup is non-nil, it will be called on any non-nil values produced by
-// function when they are no longer referenced.
-func (g *Generation) Bind(key interface{}, function Function, cleanup func(interface{})) *Handle {
- // panic early if the function is nil
- // it would panic later anyway, but in a way that was much harder to debug
+// The debug string is used to classify promises in logs and metrics.
+// It should be drawn from a small set.
+func NewPromise(debug string, function Function) *Promise {
if function == nil {
- panic("the function passed to bind must not be nil")
- }
- if atomic.LoadUint32(&g.destroyed) != 0 {
- panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy)
- }
- g.store.mu.Lock()
- defer g.store.mu.Unlock()
- h, ok := g.store.handles[key]
- if !ok {
- h := &Handle{
- key: key,
- function: function,
- generations: map[*Generation]struct{}{g: {}},
- cleanup: cleanup,
- }
- g.store.handles[key] = h
- return h
+ panic("nil function")
}
- h.mu.Lock()
- defer h.mu.Unlock()
- if _, ok := h.generations[g]; !ok {
- h.generations[g] = struct{}{}
+ return &Promise{
+ debug: debug,
+ function: function,
}
- return h
}
-// Stats returns the number of each type of value in the store.
-func (s *Store) Stats() map[reflect.Type]int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- result := map[reflect.Type]int{}
- for k := range s.handles {
- result[reflect.TypeOf(k)]++
- }
- return result
-}
-
-// DebugOnlyIterate iterates through all live cache entries and calls f on them.
-// It should only be used for debugging purposes.
-func (s *Store) DebugOnlyIterate(f func(k, v interface{})) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- for k, e := range s.handles {
- var v interface{}
- e.mu.Lock()
- if e.state == stateCompleted {
- v = e.value
- }
- e.mu.Unlock()
- if v == nil {
- continue
- }
- f(k, v)
- }
-}
-
-func (g *Generation) Inherit(hs ...*Handle) {
- for _, h := range hs {
- if atomic.LoadUint32(&g.destroyed) != 0 {
- panic("inherit on generation " + g.name + " destroyed by " + g.destroyedBy)
- }
+type state int
- h.mu.Lock()
- defer h.mu.Unlock()
- if h.state == stateDestroyed {
- panic(fmt.Sprintf("inheriting destroyed handle %#v (type %T) into generation %v", h.key, h.key, g.name))
- }
- h.generations[g] = struct{}{}
- }
-}
+const (
+ stateIdle = iota // newly constructed, or last waiter was cancelled
+ stateRunning // start was called and not cancelled
+ stateCompleted // function call ran to completion
+)
-// Cached returns the value associated with a handle.
+// Cached returns the value associated with a promise.
//
// It will never cause the value to be generated.
// It will return the cached value, if present.
-func (h *Handle) Cached(g *Generation) interface{} {
- h.mu.Lock()
- defer h.mu.Unlock()
- if _, ok := h.generations[g]; !ok {
- return nil
- }
- if h.state == stateCompleted {
- return h.value
+func (p *Promise) Cached() interface{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.state == stateCompleted {
+ return p.value
}
return nil
}
-// Get returns the value associated with a handle.
+// Get returns the value associated with a promise.
+//
+// All calls to Promise.Get on a given promise return the
+// same result but the function is called (to completion) at most once.
//
// If the value is not yet ready, the underlying function will be invoked.
-// If ctx is cancelled, Get returns nil.
-func (h *Handle) Get(ctx context.Context, g *Generation, arg Arg) (interface{}, error) {
- release := g.Acquire()
- defer release()
-
+//
+// If ctx is cancelled, Get returns (nil, Canceled).
+// If all concurrent calls to Get are cancelled, the context provided
+// to the function is cancelled. A later call to Get may attempt to
+// call the function again.
+func (p *Promise) Get(ctx context.Context, arg interface{}) (interface{}, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
- h.mu.Lock()
- if _, ok := h.generations[g]; !ok {
- h.mu.Unlock()
-
- err := fmt.Errorf("reading key %#v: generation %v is not known", h.key, g.name)
- if *panicOnDestroyed && ctx.Err() != nil {
- panic(err)
- }
- return nil, err
- }
- switch h.state {
+ p.mu.Lock()
+ switch p.state {
case stateIdle:
- return h.run(ctx, g, arg)
+ return p.run(ctx, arg)
case stateRunning:
- return h.wait(ctx)
+ return p.wait(ctx)
case stateCompleted:
- defer h.mu.Unlock()
- return h.value, nil
- case stateDestroyed:
- h.mu.Unlock()
- err := fmt.Errorf("Get on destroyed entry %#v (type %T) in generation %v", h.key, h.key, g.name)
- if *panicOnDestroyed {
- panic(err)
- }
- return nil, err
+ defer p.mu.Unlock()
+ return p.value, nil
default:
panic("unknown state")
}
}
-// run starts h.function and returns the result. h.mu must be locked.
-func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, error) {
+// run starts p.function and returns the result. p.mu must be locked.
+func (p *Promise) run(ctx context.Context, arg interface{}) (interface{}, error) {
childCtx, cancel := context.WithCancel(xcontext.Detach(ctx))
- h.cancel = cancel
- h.state = stateRunning
- h.done = make(chan struct{})
- function := h.function // Read under the lock
+ p.cancel = cancel
+ p.state = stateRunning
+ p.done = make(chan struct{})
+ function := p.function // Read under the lock
+
+ // Make sure that the argument isn't destroyed while we're running in it.
+ release := func() {}
+ if rc, ok := arg.(RefCounted); ok {
+ release = rc.Acquire()
+ }
- // Make sure that the generation isn't destroyed while we're running in it.
- release := g.Acquire()
go func() {
- defer release()
- // Just in case the function does something expensive without checking
- // the context, double-check we're still alive.
- if childCtx.Err() != nil {
- return
- }
- v := function(childCtx, arg)
- if childCtx.Err() != nil {
- // It's possible that v was computed despite the context cancellation. In
- // this case we should ensure that it is cleaned up.
- if h.cleanup != nil && v != nil {
- h.cleanup(v)
+ trace.WithRegion(childCtx, fmt.Sprintf("Promise.run %s", p.debug), func() {
+ defer release()
+ // Just in case the function does something expensive without checking
+ // the context, double-check we're still alive.
+ if childCtx.Err() != nil {
+ return
+ }
+ v := function(childCtx, arg)
+ if childCtx.Err() != nil {
+ return
}
- return
- }
- h.mu.Lock()
- defer h.mu.Unlock()
- // It's theoretically possible that the handle has been cancelled out
- // of the run that started us, and then started running again since we
- // checked childCtx above. Even so, that should be harmless, since each
- // run should produce the same results.
- if h.state != stateRunning {
- // v will never be used, so ensure that it is cleaned up.
- if h.cleanup != nil && v != nil {
- h.cleanup(v)
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // It's theoretically possible that the promise has been cancelled out
+ // of the run that started us, and then started running again since we
+ // checked childCtx above. Even so, that should be harmless, since each
+ // run should produce the same results.
+ if p.state != stateRunning {
+ return
}
- return
- }
- // At this point v will be cleaned up whenever h is destroyed.
- h.value = v
- h.function = nil
- h.state = stateCompleted
- close(h.done)
+
+ p.value = v
+ p.function = nil // aid GC
+ p.state = stateCompleted
+ close(p.done)
+ })
}()
- return h.wait(ctx)
+ return p.wait(ctx)
}
-// wait waits for the value to be computed, or ctx to be cancelled. h.mu must be locked.
-func (h *Handle) wait(ctx context.Context) (interface{}, error) {
- h.waiters++
- done := h.done
- h.mu.Unlock()
+// wait waits for the value to be computed, or ctx to be cancelled. p.mu must be locked.
+func (p *Promise) wait(ctx context.Context) (interface{}, error) {
+ p.waiters++
+ done := p.done
+ p.mu.Unlock()
select {
case <-done:
- h.mu.Lock()
- defer h.mu.Unlock()
- if h.state == stateCompleted {
- return h.value, nil
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.state == stateCompleted {
+ return p.value, nil
}
return nil, nil
case <-ctx.Done():
- h.mu.Lock()
- defer h.mu.Unlock()
- h.waiters--
- if h.waiters == 0 && h.state == stateRunning {
- h.cancel()
- close(h.done)
- h.state = stateIdle
- h.done = nil
- h.cancel = nil
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.waiters--
+ if p.waiters == 0 && p.state == stateRunning {
+ p.cancel()
+ close(p.done)
+ p.state = stateIdle
+ p.done = nil
+ p.cancel = nil
}
return nil, ctx.Err()
}
}
+
+// An EvictionPolicy controls the eviction behavior of keys in a Store when
+// they no longer have any references.
+type EvictionPolicy int
+
+const (
+ // ImmediatelyEvict evicts keys as soon as they no longer have references.
+ ImmediatelyEvict EvictionPolicy = iota
+
+ // NeverEvict does not evict keys.
+ NeverEvict
+)
+
+// A Store maps arbitrary keys to reference-counted promises.
+//
+// The zero value is a valid Store, though a store may also be created via
+// NewStore if a custom EvictionPolicy is required.
+type Store struct {
+ evictionPolicy EvictionPolicy
+
+ promisesMu sync.Mutex
+ promises map[interface{}]*Promise
+}
+
+// NewStore creates a new store with the given eviction policy.
+func NewStore(policy EvictionPolicy) *Store {
+ return &Store{evictionPolicy: policy}
+}
+
+// Promise returns a reference-counted promise for the future result of
+// calling the specified function.
+//
+// Calls to Promise with the same key return the same promise, incrementing its
+// reference count. The caller must call the returned function to decrement
+// the promise's reference count when it is no longer needed. The returned
+// function must not be called more than once.
+//
+// Once the last reference has been released, the promise is removed from the
+// store.
+func (store *Store) Promise(key interface{}, function Function) (*Promise, func()) {
+ store.promisesMu.Lock()
+ p, ok := store.promises[key]
+ if !ok {
+ p = NewPromise(reflect.TypeOf(key).String(), function)
+ if store.promises == nil {
+ store.promises = map[interface{}]*Promise{}
+ }
+ store.promises[key] = p
+ }
+ p.refcount++
+ store.promisesMu.Unlock()
+
+ var released int32
+ release := func() {
+ if !atomic.CompareAndSwapInt32(&released, 0, 1) {
+ panic("release called more than once")
+ }
+ store.promisesMu.Lock()
+
+ p.refcount--
+ if p.refcount == 0 && store.evictionPolicy != NeverEvict {
+ // Inv: if p.refcount > 0, then store.promises[key] == p.
+ delete(store.promises, key)
+ }
+ store.promisesMu.Unlock()
+ }
+
+ return p, release
+}
+
+// Stats returns the number of each type of key in the store.
+func (s *Store) Stats() map[reflect.Type]int {
+ result := map[reflect.Type]int{}
+
+ s.promisesMu.Lock()
+ defer s.promisesMu.Unlock()
+
+ for k := range s.promises {
+ result[reflect.TypeOf(k)]++
+ }
+ return result
+}
+
+// DebugOnlyIterate iterates through the store and, for each completed
+// promise, calls f(k, v) for the map key k and function result v. It
+// should only be used for debugging purposes.
+func (s *Store) DebugOnlyIterate(f func(k, v interface{})) {
+ s.promisesMu.Lock()
+ defer s.promisesMu.Unlock()
+
+ for k, p := range s.promises {
+ if v := p.Cached(); v != nil {
+ f(k, v)
+ }
+ }
+}
diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go
index f05966b46..c54572d59 100644
--- a/internal/memoize/memoize_test.go
+++ b/internal/memoize/memoize_test.go
@@ -6,102 +6,161 @@ package memoize_test
import (
"context"
- "strings"
+ "sync"
"testing"
+ "time"
"golang.org/x/tools/internal/memoize"
)
func TestGet(t *testing.T) {
- s := &memoize.Store{}
- g := s.Generation("x")
+ var store memoize.Store
evaled := 0
- h := g.Bind("key", func(context.Context, memoize.Arg) interface{} {
+ h, release := store.Promise("key", func(context.Context, interface{}) interface{} {
evaled++
return "res"
- }, nil)
- expectGet(t, h, g, "res")
- expectGet(t, h, g, "res")
+ })
+ defer release()
+ expectGet(t, h, "res")
+ expectGet(t, h, "res")
if evaled != 1 {
t.Errorf("got %v calls to function, wanted 1", evaled)
}
}
-func expectGet(t *testing.T, h *memoize.Handle, g *memoize.Generation, wantV interface{}) {
+func expectGet(t *testing.T, h *memoize.Promise, wantV interface{}) {
t.Helper()
- gotV, gotErr := h.Get(context.Background(), g, nil)
+ gotV, gotErr := h.Get(context.Background(), nil)
if gotV != wantV || gotErr != nil {
t.Fatalf("Get() = %v, %v, wanted %v, nil", gotV, gotErr, wantV)
}
}
-func expectGetError(t *testing.T, h *memoize.Handle, g *memoize.Generation, substr string) {
- gotV, gotErr := h.Get(context.Background(), g, nil)
- if gotErr == nil || !strings.Contains(gotErr.Error(), substr) {
- t.Fatalf("Get() = %v, %v, wanted err %q", gotV, gotErr, substr)
+func TestNewPromise(t *testing.T) {
+ calls := 0
+ f := func(context.Context, interface{}) interface{} {
+ calls++
+ return calls
}
-}
-func TestGenerations(t *testing.T) {
- s := &memoize.Store{}
- // Evaluate key in g1.
- g1 := s.Generation("g1")
- h1 := g1.Bind("key", func(context.Context, memoize.Arg) interface{} { return "res" }, nil)
- expectGet(t, h1, g1, "res")
-
- // Get key in g2. It should inherit the value from g1.
- g2 := s.Generation("g2")
- h2 := g2.Bind("key", func(context.Context, memoize.Arg) interface{} {
- t.Fatal("h2 should not need evaluation")
- return "error"
- }, nil)
- expectGet(t, h2, g2, "res")
-
- // With g1 destroyed, g2 should still work.
- g1.Destroy("TestGenerations")
- expectGet(t, h2, g2, "res")
-
- // With all generations destroyed, key should be re-evaluated.
- g2.Destroy("TestGenerations")
- g3 := s.Generation("g3")
- h3 := g3.Bind("key", func(context.Context, memoize.Arg) interface{} { return "new res" }, nil)
- expectGet(t, h3, g3, "new res")
+ // All calls to Get on the same promise return the same result.
+ p1 := memoize.NewPromise("debug", f)
+ expectGet(t, p1, 1)
+ expectGet(t, p1, 1)
+
+ // A new promise calls the function again.
+ p2 := memoize.NewPromise("debug", f)
+ expectGet(t, p2, 2)
+ expectGet(t, p2, 2)
+
+ // The original promise is unchanged.
+ expectGet(t, p1, 1)
}
-func TestCleanup(t *testing.T) {
- s := &memoize.Store{}
- g1 := s.Generation("g1")
+func TestStoredPromiseRefCounting(t *testing.T) {
+ var store memoize.Store
v1 := false
v2 := false
- cleanup := func(v interface{}) {
- *(v.(*bool)) = true
- }
- h1 := g1.Bind("key1", func(context.Context, memoize.Arg) interface{} {
+ p1, release1 := store.Promise("key1", func(context.Context, interface{}) interface{} {
return &v1
- }, nil)
- h2 := g1.Bind("key2", func(context.Context, memoize.Arg) interface{} {
+ })
+ p2, release2 := store.Promise("key2", func(context.Context, interface{}) interface{} {
return &v2
- }, cleanup)
- expectGet(t, h1, g1, &v1)
- expectGet(t, h2, g1, &v2)
- g2 := s.Generation("g2")
- g2.Inherit(h1, h2)
-
- g1.Destroy("TestCleanup")
- expectGet(t, h1, g2, &v1)
- expectGet(t, h2, g2, &v2)
- for k, v := range map[string]*bool{"key1": &v1, "key2": &v2} {
- if got, want := *v, false; got != want {
- t.Errorf("after destroying g1, bound value %q is cleaned up", k)
- }
+ })
+ expectGet(t, p1, &v1)
+ expectGet(t, p2, &v2)
+
+ expectGet(t, p1, &v1)
+ expectGet(t, p2, &v2)
+
+ p2Copy, release2Copy := store.Promise("key2", func(context.Context, interface{}) interface{} {
+ return &v1
+ })
+ if p2 != p2Copy {
+ t.Error("Promise returned a new value while old is not destroyed yet")
}
- g2.Destroy("TestCleanup")
+ expectGet(t, p2Copy, &v2)
+
+ release2()
+ if got, want := v2, false; got != want {
+ t.Errorf("after destroying first v2 ref, got %v, want %v", got, want)
+ }
+ release2Copy()
if got, want := v1, false; got != want {
- t.Error("after destroying g2, v1 is cleaned up")
+ t.Errorf("after destroying v2, got %v, want %v", got, want)
+ }
+ release1()
+
+ p2Copy, release2Copy = store.Promise("key2", func(context.Context, interface{}) interface{} {
+ return &v2
+ })
+ if p2 == p2Copy {
+ t.Error("Promise returned previously destroyed value")
+ }
+ release2Copy()
+}
+
+func TestPromiseDestroyedWhileRunning(t *testing.T) {
+ // Test that calls to Promise.Get return even if the promise is destroyed while running.
+
+ var store memoize.Store
+ c := make(chan int)
+
+ var v int
+ h, release := store.Promise("key", func(ctx context.Context, _ interface{}) interface{} {
+ <-c
+ <-c
+ if err := ctx.Err(); err != nil {
+ t.Errorf("ctx.Err() = %v, want nil", err)
+ }
+ return &v
+ })
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) // arbitrary timeout; may be removed if it causes flakes
+ defer cancel()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ var got interface{}
+ var err error
+ go func() {
+ got, err = h.Get(ctx, nil)
+ wg.Done()
+ }()
+
+ c <- 0 // send once to enter the promise function
+ release() // release before the promise function returns
+ c <- 0 // let the promise function proceed
+
+ wg.Wait()
+
+ if err != nil {
+ t.Errorf("Get() failed: %v", err)
+ }
+ if got != &v {
+ t.Errorf("Get() = %v, want %v", got, v)
}
- if got, want := v2, true; got != want {
- t.Error("after destroying g2, v2 is not cleaned up")
+}
+
+func TestDoubleReleasePanics(t *testing.T) {
+ var store memoize.Store
+ _, release := store.Promise("key", func(ctx context.Context, _ interface{}) interface{} { return 0 })
+
+ panicked := false
+
+ func() {
+ defer func() {
+ if recover() != nil {
+ panicked = true
+ }
+ }()
+ release()
+ release()
+ }()
+
+ if !panicked {
+ t.Errorf("calling release() twice did not panic")
}
}
diff --git a/internal/packagesinternal/packages.go b/internal/packagesinternal/packages.go
index 9702094c5..d9950b1f0 100644
--- a/internal/packagesinternal/packages.go
+++ b/internal/packagesinternal/packages.go
@@ -23,6 +23,8 @@ var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil }
var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {}
var TypecheckCgo int
+var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
+var ForTest int // must be set as a LoadMode to call GetForTest
var SetModFlag = func(config interface{}, value string) {}
var SetModFile = func(config interface{}, value string) {}
diff --git a/internal/persistent/map.go b/internal/persistent/map.go
new file mode 100644
index 000000000..b29cfe419
--- /dev/null
+++ b/internal/persistent/map.go
@@ -0,0 +1,311 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The persistent package defines various persistent data structures;
+// that is, data structures that can be efficiently copied and modified
+// in sublinear time.
+package persistent
+
+import (
+ "fmt"
+ "math/rand"
+ "strings"
+ "sync/atomic"
+)
+
+// Implementation details:
+// * Each value is reference counted by nodes which hold it.
+// * Each node is reference counted by its parent nodes.
+// * Each map is considered a top-level parent node from reference counting perspective.
+// * Each change does always effectivelly produce a new top level node.
+//
+// Functions which operate directly with nodes do have a notation in form of
+// `foo(arg1:+n1, arg2:+n2) (ret1:+n3)`.
+// Each argument is followed by a delta change to its reference counter.
+// In case if no change is expected, the delta will be `-0`.
+
+// Map is an associative mapping from keys to values, both represented as
+// interface{}. Key comparison and iteration order is defined by a
+// client-provided function that implements a strict weak order.
+//
+// Maps can be Cloned in constant time.
+// Get, Store, and Delete operations are done on average in logarithmic time.
+// Maps can be Updated in O(m log(n/m)) time for maps of size n and m, where m < n.
+//
+// Values are reference counted, and a client-supplied release function
+// is called when a value is no longer referenced by a map or any clone.
+//
+// Internally the implementation is based on a randomized persistent treap:
+// https://en.wikipedia.org/wiki/Treap.
+type Map struct {
+ less func(a, b interface{}) bool
+ root *mapNode
+}
+
+func (m *Map) String() string {
+ var buf strings.Builder
+ buf.WriteByte('{')
+ var sep string
+ m.Range(func(k, v interface{}) {
+ fmt.Fprintf(&buf, "%s%v: %v", sep, k, v)
+ sep = ", "
+ })
+ buf.WriteByte('}')
+ return buf.String()
+}
+
+type mapNode struct {
+ key interface{}
+ value *refValue
+ weight uint64
+ refCount int32
+ left, right *mapNode
+}
+
+type refValue struct {
+ refCount int32
+ value interface{}
+ release func(key, value interface{})
+}
+
+func newNodeWithRef(key, value interface{}, release func(key, value interface{})) *mapNode {
+ return &mapNode{
+ key: key,
+ value: &refValue{
+ value: value,
+ release: release,
+ refCount: 1,
+ },
+ refCount: 1,
+ weight: rand.Uint64(),
+ }
+}
+
+func (node *mapNode) shallowCloneWithRef() *mapNode {
+ atomic.AddInt32(&node.value.refCount, 1)
+ return &mapNode{
+ key: node.key,
+ value: node.value,
+ weight: node.weight,
+ refCount: 1,
+ }
+}
+
+func (node *mapNode) incref() *mapNode {
+ if node != nil {
+ atomic.AddInt32(&node.refCount, 1)
+ }
+ return node
+}
+
+func (node *mapNode) decref() {
+ if node == nil {
+ return
+ }
+ if atomic.AddInt32(&node.refCount, -1) == 0 {
+ if atomic.AddInt32(&node.value.refCount, -1) == 0 {
+ if node.value.release != nil {
+ node.value.release(node.key, node.value.value)
+ }
+ node.value.value = nil
+ node.value.release = nil
+ }
+ node.left.decref()
+ node.right.decref()
+ }
+}
+
+// NewMap returns a new map whose keys are ordered by the given comparison
+// function (a strict weak order). It is the responsibility of the caller to
+// Destroy it at later time.
+func NewMap(less func(a, b interface{}) bool) *Map {
+ return &Map{
+ less: less,
+ }
+}
+
+// Clone returns a copy of the given map. It is a responsibility of the caller
+// to Destroy it at later time.
+func (pm *Map) Clone() *Map {
+ return &Map{
+ less: pm.less,
+ root: pm.root.incref(),
+ }
+}
+
+// Destroy destroys the map.
+//
+// After Destroy, the Map should not be used again.
+func (pm *Map) Destroy() {
+ // The implementation of these two functions is the same,
+ // but their intent is different.
+ pm.Clear()
+}
+
+// Clear removes all entries from the map.
+func (pm *Map) Clear() {
+ pm.root.decref()
+ pm.root = nil
+}
+
+// Range calls f sequentially in ascending key order for all entries in the map.
+func (pm *Map) Range(f func(key, value interface{})) {
+ pm.root.forEach(f)
+}
+
+func (node *mapNode) forEach(f func(key, value interface{})) {
+ if node == nil {
+ return
+ }
+ node.left.forEach(f)
+ f(node.key, node.value.value)
+ node.right.forEach(f)
+}
+
+// Get returns the map value associated with the specified key, or nil if no entry
+// is present. The ok result indicates whether an entry was found in the map.
+func (pm *Map) Get(key interface{}) (interface{}, bool) {
+ node := pm.root
+ for node != nil {
+ if pm.less(key, node.key) {
+ node = node.left
+ } else if pm.less(node.key, key) {
+ node = node.right
+ } else {
+ return node.value.value, true
+ }
+ }
+ return nil, false
+}
+
+// SetAll updates the map with key/value pairs from the other map, overwriting existing keys.
+// It is equivalent to calling Set for each entry in the other map but is more efficient.
+// Both maps must have the same comparison function, otherwise behavior is undefined.
+func (pm *Map) SetAll(other *Map) {
+ root := pm.root
+ pm.root = union(root, other.root, pm.less, true)
+ root.decref()
+}
+
+// Set updates the value associated with the specified key.
+// If release is non-nil, it will be called with entry's key and value once the
+// key is no longer contained in the map or any clone.
+func (pm *Map) Set(key, value interface{}, release func(key, value interface{})) {
+ first := pm.root
+ second := newNodeWithRef(key, value, release)
+ pm.root = union(first, second, pm.less, true)
+ first.decref()
+ second.decref()
+}
+
+// union returns a new tree which is a union of first and second one.
+// If overwrite is set to true, second one would override a value for any duplicate keys.
+//
+// union(first:-0, second:-0) (result:+1)
+// Union borrows both subtrees without affecting their refcount and returns a
+// new reference that the caller is expected to call decref.
+func union(first, second *mapNode, less func(a, b interface{}) bool, overwrite bool) *mapNode {
+ if first == nil {
+ return second.incref()
+ }
+ if second == nil {
+ return first.incref()
+ }
+
+ if first.weight < second.weight {
+ second, first, overwrite = first, second, !overwrite
+ }
+
+ left, mid, right := split(second, first.key, less, false)
+ var result *mapNode
+ if overwrite && mid != nil {
+ result = mid.shallowCloneWithRef()
+ } else {
+ result = first.shallowCloneWithRef()
+ }
+ result.weight = first.weight
+ result.left = union(first.left, left, less, overwrite)
+ result.right = union(first.right, right, less, overwrite)
+ left.decref()
+ mid.decref()
+ right.decref()
+ return result
+}
+
+// split the tree midway by the key into three different ones.
+// Return three new trees: left with all nodes with smaller than key, mid with
+// the node matching the key, right with all nodes larger than key.
+// If there are no nodes in one of trees, return nil instead of it.
+// If requireMid is set (such as during deletion), then all return arguments
+// are nil if mid is not found.
+//
+// split(n:-0) (left:+1, mid:+1, right:+1)
+// Split borrows n without affecting its refcount, and returns three
+// new references that that caller is expected to call decref.
+func split(n *mapNode, key interface{}, less func(a, b interface{}) bool, requireMid bool) (left, mid, right *mapNode) {
+ if n == nil {
+ return nil, nil, nil
+ }
+
+ if less(n.key, key) {
+ left, mid, right := split(n.right, key, less, requireMid)
+ if requireMid && mid == nil {
+ return nil, nil, nil
+ }
+ newN := n.shallowCloneWithRef()
+ newN.left = n.left.incref()
+ newN.right = left
+ return newN, mid, right
+ } else if less(key, n.key) {
+ left, mid, right := split(n.left, key, less, requireMid)
+ if requireMid && mid == nil {
+ return nil, nil, nil
+ }
+ newN := n.shallowCloneWithRef()
+ newN.left = right
+ newN.right = n.right.incref()
+ return left, mid, newN
+ }
+ mid = n.shallowCloneWithRef()
+ return n.left.incref(), mid, n.right.incref()
+}
+
+// Delete deletes the value for a key.
+func (pm *Map) Delete(key interface{}) {
+ root := pm.root
+ left, mid, right := split(root, key, pm.less, true)
+ if mid == nil {
+ return
+ }
+ pm.root = merge(left, right)
+ left.decref()
+ mid.decref()
+ right.decref()
+ root.decref()
+}
+
+// merge two trees while preserving the weight invariant.
+// All nodes in left must have smaller keys than any node in right.
+//
+// merge(left:-0, right:-0) (result:+1)
+// Merge borrows its arguments without affecting their refcount
+// and returns a new reference that the caller is expected to call decref.
+func merge(left, right *mapNode) *mapNode {
+ switch {
+ case left == nil:
+ return right.incref()
+ case right == nil:
+ return left.incref()
+ case left.weight > right.weight:
+ root := left.shallowCloneWithRef()
+ root.left = left.left.incref()
+ root.right = merge(left.right, right)
+ return root
+ default:
+ root := right.shallowCloneWithRef()
+ root.left = merge(left, right.left)
+ root.right = right.right.incref()
+ return root
+ }
+}
diff --git a/internal/persistent/map_test.go b/internal/persistent/map_test.go
new file mode 100644
index 000000000..9f89a1d30
--- /dev/null
+++ b/internal/persistent/map_test.go
@@ -0,0 +1,355 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package persistent
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+ "sync/atomic"
+ "testing"
+)
+
+type mapEntry struct {
+ key int
+ value int
+}
+
+type validatedMap struct {
+ impl *Map
+ expected map[int]int // current key-value mapping.
+ deleted map[mapEntry]int // maps deleted entries to their clock time of last deletion
+ seen map[mapEntry]int // maps seen entries to their clock time of last insertion
+ clock int
+}
+
+func TestSimpleMap(t *testing.T) {
+ deletedEntries := make(map[mapEntry]int)
+ seenEntries := make(map[mapEntry]int)
+
+ m1 := &validatedMap{
+ impl: NewMap(func(a, b interface{}) bool {
+ return a.(int) < b.(int)
+ }),
+ expected: make(map[int]int),
+ deleted: deletedEntries,
+ seen: seenEntries,
+ }
+
+ m3 := m1.clone()
+ validateRef(t, m1, m3)
+ m3.set(t, 8, 8)
+ validateRef(t, m1, m3)
+ m3.destroy()
+
+ assertSameMap(t, entrySet(deletedEntries), map[mapEntry]struct{}{
+ {key: 8, value: 8}: {},
+ })
+
+ validateRef(t, m1)
+ m1.set(t, 1, 1)
+ validateRef(t, m1)
+ m1.set(t, 2, 2)
+ validateRef(t, m1)
+ m1.set(t, 3, 3)
+ validateRef(t, m1)
+ m1.remove(t, 2)
+ validateRef(t, m1)
+ m1.set(t, 6, 6)
+ validateRef(t, m1)
+
+ assertSameMap(t, entrySet(deletedEntries), map[mapEntry]struct{}{
+ {key: 2, value: 2}: {},
+ {key: 8, value: 8}: {},
+ })
+
+ m2 := m1.clone()
+ validateRef(t, m1, m2)
+ m1.set(t, 6, 60)
+ validateRef(t, m1, m2)
+ m1.remove(t, 1)
+ validateRef(t, m1, m2)
+
+ gotAllocs := int(testing.AllocsPerRun(10, func() {
+ m1.impl.Delete(100)
+ m1.impl.Delete(1)
+ }))
+ wantAllocs := 0
+ if gotAllocs != wantAllocs {
+ t.Errorf("wanted %d allocs, got %d", wantAllocs, gotAllocs)
+ }
+
+ for i := 10; i < 14; i++ {
+ m1.set(t, i, i)
+ validateRef(t, m1, m2)
+ }
+
+ m1.set(t, 10, 100)
+ validateRef(t, m1, m2)
+
+ m1.remove(t, 12)
+ validateRef(t, m1, m2)
+
+ m2.set(t, 4, 4)
+ validateRef(t, m1, m2)
+ m2.set(t, 5, 5)
+ validateRef(t, m1, m2)
+
+ m1.destroy()
+
+ assertSameMap(t, entrySet(deletedEntries), map[mapEntry]struct{}{
+ {key: 2, value: 2}: {},
+ {key: 6, value: 60}: {},
+ {key: 8, value: 8}: {},
+ {key: 10, value: 10}: {},
+ {key: 10, value: 100}: {},
+ {key: 11, value: 11}: {},
+ {key: 12, value: 12}: {},
+ {key: 13, value: 13}: {},
+ })
+
+ m2.set(t, 7, 7)
+ validateRef(t, m2)
+
+ m2.destroy()
+
+ assertSameMap(t, entrySet(seenEntries), entrySet(deletedEntries))
+}
+
+func TestRandomMap(t *testing.T) {
+ deletedEntries := make(map[mapEntry]int)
+ seenEntries := make(map[mapEntry]int)
+
+ m := &validatedMap{
+ impl: NewMap(func(a, b interface{}) bool {
+ return a.(int) < b.(int)
+ }),
+ expected: make(map[int]int),
+ deleted: deletedEntries,
+ seen: seenEntries,
+ }
+
+ keys := make([]int, 0, 1000)
+ for i := 0; i < 1000; i++ {
+ key := rand.Intn(10000)
+ m.set(t, key, key)
+ keys = append(keys, key)
+
+ if i%10 == 1 {
+ index := rand.Intn(len(keys))
+ last := len(keys) - 1
+ key = keys[index]
+ keys[index], keys[last] = keys[last], keys[index]
+ keys = keys[:last]
+
+ m.remove(t, key)
+ }
+ }
+
+ m.destroy()
+ assertSameMap(t, entrySet(seenEntries), entrySet(deletedEntries))
+}
+
+func entrySet(m map[mapEntry]int) map[mapEntry]struct{} {
+ set := make(map[mapEntry]struct{})
+ for k := range m {
+ set[k] = struct{}{}
+ }
+ return set
+}
+
+func TestUpdate(t *testing.T) {
+ deletedEntries := make(map[mapEntry]int)
+ seenEntries := make(map[mapEntry]int)
+
+ m1 := &validatedMap{
+ impl: NewMap(func(a, b interface{}) bool {
+ return a.(int) < b.(int)
+ }),
+ expected: make(map[int]int),
+ deleted: deletedEntries,
+ seen: seenEntries,
+ }
+ m2 := m1.clone()
+
+ m1.set(t, 1, 1)
+ m1.set(t, 2, 2)
+ m2.set(t, 2, 20)
+ m2.set(t, 3, 3)
+ m1.setAll(t, m2)
+
+ m1.destroy()
+ m2.destroy()
+ assertSameMap(t, entrySet(seenEntries), entrySet(deletedEntries))
+}
+
+func validateRef(t *testing.T, maps ...*validatedMap) {
+ t.Helper()
+
+ actualCountByEntry := make(map[mapEntry]int32)
+ nodesByEntry := make(map[mapEntry]map[*mapNode]struct{})
+ expectedCountByEntry := make(map[mapEntry]int32)
+ for i, m := range maps {
+ dfsRef(m.impl.root, actualCountByEntry, nodesByEntry)
+ dumpMap(t, fmt.Sprintf("%d:", i), m.impl.root)
+ }
+ for entry, nodes := range nodesByEntry {
+ expectedCountByEntry[entry] = int32(len(nodes))
+ }
+ assertSameMap(t, expectedCountByEntry, actualCountByEntry)
+}
+
+func dfsRef(node *mapNode, countByEntry map[mapEntry]int32, nodesByEntry map[mapEntry]map[*mapNode]struct{}) {
+ if node == nil {
+ return
+ }
+
+ entry := mapEntry{key: node.key.(int), value: node.value.value.(int)}
+ countByEntry[entry] = atomic.LoadInt32(&node.value.refCount)
+
+ nodes, ok := nodesByEntry[entry]
+ if !ok {
+ nodes = make(map[*mapNode]struct{})
+ nodesByEntry[entry] = nodes
+ }
+ nodes[node] = struct{}{}
+
+ dfsRef(node.left, countByEntry, nodesByEntry)
+ dfsRef(node.right, countByEntry, nodesByEntry)
+}
+
+func dumpMap(t *testing.T, prefix string, n *mapNode) {
+ if n == nil {
+ t.Logf("%s nil", prefix)
+ return
+ }
+ t.Logf("%s {key: %v, value: %v (ref: %v), ref: %v, weight: %v}", prefix, n.key, n.value.value, n.value.refCount, n.refCount, n.weight)
+ dumpMap(t, prefix+"l", n.left)
+ dumpMap(t, prefix+"r", n.right)
+}
+
+func (vm *validatedMap) validate(t *testing.T) {
+ t.Helper()
+
+ validateNode(t, vm.impl.root, vm.impl.less)
+
+ // Note: this validation may not make sense if maps were constructed using
+ // SetAll operations. If this proves to be problematic, remove the clock,
+ // deleted, and seen fields.
+ for key, value := range vm.expected {
+ entry := mapEntry{key: key, value: value}
+ if deleteAt := vm.deleted[entry]; deleteAt > vm.seen[entry] {
+ t.Fatalf("entry is deleted prematurely, key: %d, value: %d", key, value)
+ }
+ }
+
+ actualMap := make(map[int]int, len(vm.expected))
+ vm.impl.Range(func(key, value interface{}) {
+ if other, ok := actualMap[key.(int)]; ok {
+ t.Fatalf("key is present twice, key: %d, first value: %d, second value: %d", key, value, other)
+ }
+ actualMap[key.(int)] = value.(int)
+ })
+
+ assertSameMap(t, actualMap, vm.expected)
+}
+
+func validateNode(t *testing.T, node *mapNode, less func(a, b interface{}) bool) {
+ if node == nil {
+ return
+ }
+
+ if node.left != nil {
+ if less(node.key, node.left.key) {
+ t.Fatalf("left child has larger key: %v vs %v", node.left.key, node.key)
+ }
+ if node.left.weight > node.weight {
+ t.Fatalf("left child has larger weight: %v vs %v", node.left.weight, node.weight)
+ }
+ }
+
+ if node.right != nil {
+ if less(node.right.key, node.key) {
+ t.Fatalf("right child has smaller key: %v vs %v", node.right.key, node.key)
+ }
+ if node.right.weight > node.weight {
+ t.Fatalf("right child has larger weight: %v vs %v", node.right.weight, node.weight)
+ }
+ }
+
+ validateNode(t, node.left, less)
+ validateNode(t, node.right, less)
+}
+
+func (vm *validatedMap) setAll(t *testing.T, other *validatedMap) {
+ vm.impl.SetAll(other.impl)
+
+ // Note: this is buggy because we are not updating vm.clock, vm.deleted, or
+ // vm.seen.
+ for key, value := range other.expected {
+ vm.expected[key] = value
+ }
+ vm.validate(t)
+}
+
+func (vm *validatedMap) set(t *testing.T, key, value int) {
+ entry := mapEntry{key: key, value: value}
+
+ vm.clock++
+ vm.seen[entry] = vm.clock
+
+ vm.impl.Set(key, value, func(deletedKey, deletedValue interface{}) {
+ if deletedKey != key || deletedValue != value {
+ t.Fatalf("unexpected passed in deleted entry: %v/%v, expected: %v/%v", deletedKey, deletedValue, key, value)
+ }
+ // Not safe if closure shared between two validatedMaps.
+ vm.deleted[entry] = vm.clock
+ })
+ vm.expected[key] = value
+ vm.validate(t)
+
+ gotValue, ok := vm.impl.Get(key)
+ if !ok || gotValue != value {
+ t.Fatalf("unexpected get result after insertion, key: %v, expected: %v, got: %v (%v)", key, value, gotValue, ok)
+ }
+}
+
+func (vm *validatedMap) remove(t *testing.T, key int) {
+ vm.clock++
+ vm.impl.Delete(key)
+ delete(vm.expected, key)
+ vm.validate(t)
+
+ gotValue, ok := vm.impl.Get(key)
+ if ok {
+ t.Fatalf("unexpected get result after removal, key: %v, got: %v", key, gotValue)
+ }
+}
+
+func (vm *validatedMap) clone() *validatedMap {
+ expected := make(map[int]int, len(vm.expected))
+ for key, value := range vm.expected {
+ expected[key] = value
+ }
+
+ return &validatedMap{
+ impl: vm.impl.Clone(),
+ expected: expected,
+ deleted: vm.deleted,
+ seen: vm.seen,
+ }
+}
+
+func (vm *validatedMap) destroy() {
+ vm.impl.Destroy()
+}
+
+func assertSameMap(t *testing.T, map1, map2 interface{}) {
+ t.Helper()
+
+ if !reflect.DeepEqual(map1, map2) {
+ t.Fatalf("different maps:\n%v\nvs\n%v", map1, map2)
+ }
+}
diff --git a/internal/pkgbits/codes.go b/internal/pkgbits/codes.go
new file mode 100644
index 000000000..f0cabde96
--- /dev/null
+++ b/internal/pkgbits/codes.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A Code is an enum value that can be encoded into bitstreams.
+//
+// Code types are preferable for enum types, because they allow
+// Decoder to detect desyncs.
+type Code interface {
+ // Marker returns the SyncMarker for the Code's dynamic type.
+ Marker() SyncMarker
+
+ // Value returns the Code's ordinal value.
+ Value() int
+}
+
+// A CodeVal distinguishes among go/constant.Value encodings.
+type CodeVal int
+
+func (c CodeVal) Marker() SyncMarker { return SyncVal }
+func (c CodeVal) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ValBool CodeVal = iota
+ ValString
+ ValInt64
+ ValBigInt
+ ValBigRat
+ ValBigFloat
+)
+
+// A CodeType distinguishes among go/types.Type encodings.
+type CodeType int
+
+func (c CodeType) Marker() SyncMarker { return SyncType }
+func (c CodeType) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ TypeBasic CodeType = iota
+ TypeNamed
+ TypePointer
+ TypeSlice
+ TypeArray
+ TypeChan
+ TypeMap
+ TypeSignature
+ TypeStruct
+ TypeInterface
+ TypeUnion
+ TypeTypeParam
+)
+
+// A CodeObj distinguishes among go/types.Object encodings.
+type CodeObj int
+
+func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
+func (c CodeObj) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ObjAlias CodeObj = iota
+ ObjConst
+ ObjType
+ ObjFunc
+ ObjVar
+ ObjStub
+)
diff --git a/internal/pkgbits/decoder.go b/internal/pkgbits/decoder.go
new file mode 100644
index 000000000..b92e8e6eb
--- /dev/null
+++ b/internal/pkgbits/decoder.go
@@ -0,0 +1,517 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "io"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// A PkgDecoder provides methods for decoding a package's Unified IR
+// export data.
+type PkgDecoder struct {
+ // version is the file format version.
+ version uint32
+
+ // sync indicates whether the file uses sync markers.
+ sync bool
+
+ // pkgPath is the package path for the package to be decoded.
+ //
+ // TODO(mdempsky): Remove; unneeded since CL 391014.
+ pkgPath string
+
+ // elemData is the full data payload of the encoded package.
+ // Elements are densely and contiguously packed together.
+ //
+ // The last 8 bytes of elemData are the package fingerprint.
+ elemData string
+
+ // elemEnds stores the byte-offset end positions of element
+ // bitstreams within elemData.
+ //
+ // For example, element I's bitstream data starts at elemEnds[I-1]
+ // (or 0, if I==0) and ends at elemEnds[I].
+ //
+ // Note: elemEnds is indexed by absolute indices, not
+ // section-relative indices.
+ elemEnds []uint32
+
+ // elemEndsEnds stores the index-offset end positions of relocation
+ // sections within elemEnds.
+ //
+ // For example, section K's end positions start at elemEndsEnds[K-1]
+ // (or 0, if K==0) and end at elemEndsEnds[K].
+ elemEndsEnds [numRelocs]uint32
+
+ scratchRelocEnt []RelocEnt
+}
+
+// PkgPath returns the package path for the package
+//
+// TODO(mdempsky): Remove; unneeded since CL 391014.
+func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
+
+// SyncMarkers reports whether pr uses sync markers.
+func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
+
+// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
+// IR export data from input. pkgPath is the package path for the
+// compilation unit that produced the export data.
+//
+// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
+func NewPkgDecoder(pkgPath, input string) PkgDecoder {
+ pr := PkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+
+ switch pr.version {
+ default:
+ panic(fmt.Errorf("unsupported version: %v", pr.version))
+ case 0:
+ // no flags
+ case 1:
+ var flags uint32
+ assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
+ pr.sync = flags&flagSyncMarkers != 0
+ }
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, io.SeekCurrent)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+ assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+// NumElems returns the number of elements in section k.
+func (pr *PkgDecoder) NumElems(k RelocKind) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+// TotalElems returns the total number of elements across all sections.
+func (pr *PkgDecoder) TotalElems() int {
+ return len(pr.elemEnds)
+}
+
+// Fingerprint returns the package fingerprint.
+func (pr *PkgDecoder) Fingerprint() [8]byte {
+ var fp [8]byte
+ copy(fp[:], pr.elemData[len(pr.elemData)-8:])
+ return fp
+}
+
+// AbsIdx returns the absolute index for the given (section, index)
+// pair.
+func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
+ absIdx := int(idx)
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+// DataIdx returns the raw element bitstream for the given (section,
+// index) pair.
+func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string {
+ absIdx := pr.AbsIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+// StringIdx returns the string value for the given string index.
+func (pr *PkgDecoder) StringIdx(idx Index) string {
+ return pr.DataIdx(RelocString, idx)
+}
+
+// NewDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.NewDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+// TempDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+// If possible the Decoder should be RetireDecoder'd when it is no longer
+// needed, this will avoid heap allocations.
+func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.TempDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
+ pr.scratchRelocEnt = d.Relocs
+ d.Relocs = nil
+}
+
+// NewDecoderRaw returns a Decoder for the given (section, index) pair.
+//
+// Most callers should use NewDecoder instead.
+func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
+ r.Data = *strings.NewReader(pr.DataIdx(k, idx))
+
+ r.Sync(SyncRelocs)
+ r.Relocs = make([]RelocEnt, r.Len())
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ r.Data.Reset(pr.DataIdx(k, idx))
+ r.Sync(SyncRelocs)
+ l := r.Len()
+ if cap(pr.scratchRelocEnt) >= l {
+ r.Relocs = pr.scratchRelocEnt[:l]
+ pr.scratchRelocEnt = nil
+ } else {
+ r.Relocs = make([]RelocEnt, l)
+ }
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+// A Decoder provides methods for decoding an individual element's
+// bitstream data.
+type Decoder struct {
+ common *PkgDecoder
+
+ Relocs []RelocEnt
+ Data strings.Reader
+
+ k RelocKind
+ Idx Index
+}
+
+func (r *Decoder) checkErr(err error) {
+ if err != nil {
+ errorf("unexpected decoding error: %w", err)
+ }
+}
+
+func (r *Decoder) rawUvarint() uint64 {
+ x, err := readUvarint(&r.Data)
+ r.checkErr(err)
+ return x
+}
+
+// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
+// This avoids the interface conversion and thus has better escape properties,
+// which flows up the stack.
+func readUvarint(r *strings.Reader) (uint64, error) {
+ var x uint64
+ var s uint
+ for i := 0; i < binary.MaxVarintLen64; i++ {
+ b, err := r.ReadByte()
+ if err != nil {
+ if i > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return x, err
+ }
+ if b < 0x80 {
+ if i == binary.MaxVarintLen64-1 && b > 1 {
+ return x, overflow
+ }
+ return x | uint64(b)<<s, nil
+ }
+ x |= uint64(b&0x7f) << s
+ s += 7
+ }
+ return x, overflow
+}
+
+var overflow = errors.New("pkgbits: readUvarint overflows a 64-bit integer")
+
+func (r *Decoder) rawVarint() int64 {
+ ux := r.rawUvarint()
+
+ // Zig-zag decode.
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
+ e := r.Relocs[idx]
+ assert(e.Kind == k)
+ return e.Idx
+}
+
+// Sync decodes a sync marker from the element bitstream and asserts
+// that it matches the expected marker.
+//
+// If r.common.sync is false, then Sync is a no-op.
+func (r *Decoder) Sync(mWant SyncMarker) {
+ if !r.common.sync {
+ return
+ }
+
+ pos, _ := r.Data.Seek(0, io.SeekCurrent)
+ mHave := SyncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+// Bool decodes and returns a bool value from the element bitstream.
+func (r *Decoder) Bool() bool {
+ r.Sync(SyncBool)
+ x, err := r.Data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+// Int64 decodes and returns an int64 value from the element bitstream.
+func (r *Decoder) Int64() int64 {
+ r.Sync(SyncInt64)
+ return r.rawVarint()
+}
+
+// Uint64 decodes and returns a uint64 value from the element bitstream.
+func (r *Decoder) Uint64() uint64 {
+ r.Sync(SyncUint64)
+ return r.rawUvarint()
+}
+
+// Len decodes and returns a non-negative int value from the element bitstream.
+func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
+
+// Int decodes and returns an int value from the element bitstream.
+func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
+
+// Uint decodes and returns a uint value from the element bitstream.
+func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+// Code decodes a Code value from the element bitstream and returns
+// its ordinal value. It's the caller's responsibility to convert the
+// result to an appropriate Code type.
+//
+// TODO(mdempsky): Ideally this method would have signature "Code[T
+// Code] T" instead, but we don't allow generic methods and the
+// compiler can't depend on generics yet anyway.
+func (r *Decoder) Code(mark SyncMarker) int {
+ r.Sync(mark)
+ return r.Len()
+}
+
+// Reloc decodes a relocation of expected section k from the element
+// bitstream and returns an index to the referenced element.
+func (r *Decoder) Reloc(k RelocKind) Index {
+ r.Sync(SyncUseReloc)
+ return r.rawReloc(k, r.Len())
+}
+
+// String decodes and returns a string value from the element
+// bitstream.
+func (r *Decoder) String() string {
+ r.Sync(SyncString)
+ return r.common.StringIdx(r.Reloc(RelocString))
+}
+
+// Strings decodes and returns a variable-length slice of strings from
+// the element bitstream.
+func (r *Decoder) Strings() []string {
+ res := make([]string, r.Len())
+ for i := range res {
+ res[i] = r.String()
+ }
+ return res
+}
+
+// Value decodes and returns a constant.Value from the element
+// bitstream.
+func (r *Decoder) Value() constant.Value {
+ r.Sync(SyncValue)
+ isComplex := r.Bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *Decoder) scalar() constant.Value {
+ switch tag := CodeVal(r.Code(SyncVal)); tag {
+ default:
+ panic(fmt.Errorf("unexpected scalar tag: %v", tag))
+
+ case ValBool:
+ return constant.MakeBool(r.Bool())
+ case ValString:
+ return constant.MakeString(r.String())
+ case ValInt64:
+ return constant.MakeInt64(r.Int64())
+ case ValBigInt:
+ return constant.Make(r.bigInt())
+ case ValBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case ValBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *Decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.String()))
+ if r.Bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *Decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.String())) == nil)
+ return v
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+// PeekPkgPath returns the package path for the specified package
+// index.
+func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
+ var path string
+ {
+ r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
+ path = r.String()
+ pr.RetireDecoder(&r)
+ }
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+// PeekObj returns the package path, object name, and CodeObj for the
+// specified object index.
+func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
+ var ridx Index
+ var name string
+ var rcode int
+ {
+ r := pr.TempDecoder(RelocName, idx, SyncObject1)
+ r.Sync(SyncSym)
+ r.Sync(SyncPkg)
+ ridx = r.Reloc(RelocPkg)
+ name = r.String()
+ rcode = r.Code(SyncCodeObj)
+ pr.RetireDecoder(&r)
+ }
+
+ path := pr.PeekPkgPath(ridx)
+ assert(name != "")
+
+ tag := CodeObj(rcode)
+
+ return path, name, tag
+}
diff --git a/internal/pkgbits/doc.go b/internal/pkgbits/doc.go
new file mode 100644
index 000000000..c8a2796b5
--- /dev/null
+++ b/internal/pkgbits/doc.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkgbits implements low-level coding abstractions for
+// Unified IR's export data format.
+//
+// At a low-level, a package is a collection of bitstream elements.
+// Each element has a "kind" and a dense, non-negative index.
+// Elements can be randomly accessed given their kind and index.
+//
+// Individual elements are sequences of variable-length values (e.g.,
+// integers, booleans, strings, go/constant values, cross-references
+// to other elements). Package pkgbits provides APIs for encoding and
+// decoding these low-level values, but the details of mapping
+// higher-level Go constructs into elements is left to higher-level
+// abstractions.
+//
+// Elements may cross-reference each other with "relocations." For
+// example, an element representing a pointer type has a relocation
+// referring to the element type.
+//
+// Go constructs may be composed as a constellation of multiple
+// elements. For example, a declared function may have one element to
+// describe the object (e.g., its name, type, position), and a
+// separate element to describe its function body. This allows readers
+// some flexibility in efficiently seeking or re-reading data (e.g.,
+// inlining requires re-reading the function body for each inlined
+// call, without needing to re-read the object-level details).
+//
+// This is a copy of internal/pkgbits in the Go implementation.
+package pkgbits
diff --git a/internal/pkgbits/encoder.go b/internal/pkgbits/encoder.go
new file mode 100644
index 000000000..6482617a4
--- /dev/null
+++ b/internal/pkgbits/encoder.go
@@ -0,0 +1,383 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+)
+
+// currentVersion is the current version number.
+//
+// - v0: initial prototype
+//
+// - v1: adds the flags uint32 word
+const currentVersion uint32 = 1
+
+// A PkgEncoder provides methods for encoding a package's Unified IR
+// export data.
+type PkgEncoder struct {
+ // elems holds the bitstream for previously encoded elements.
+ elems [numRelocs][]string
+
+ // stringsIdx maps previously encoded strings to their index within
+ // the RelocString section, to allow deduplication. That is,
+ // elems[RelocString][stringsIdx[s]] == s (if present).
+ stringsIdx map[string]Index
+
+ // syncFrames is the number of frames to write at each sync
+ // marker. A negative value means sync markers are omitted.
+ syncFrames int
+}
+
+// SyncMarkers reports whether pw uses sync markers.
+func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
+
+// NewPkgEncoder returns an initialized PkgEncoder.
+//
+// syncFrames is the number of caller frames that should be serialized
+// at Sync points. Serializing additional frames results in larger
+// export data files, but can help diagnosing desync errors in
+// higher-level Unified IR reader/writer code. If syncFrames is
+// negative, then sync markers are omitted entirely.
+func NewPkgEncoder(syncFrames int) PkgEncoder {
+ return PkgEncoder{
+ stringsIdx: make(map[string]Index),
+ syncFrames: syncFrames,
+ }
+}
+
+// DumpTo writes the package's encoded data to out0 and returns the
+// package fingerprint.
+func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
+ h := md5.New()
+ out := io.MultiWriter(out0, h)
+
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ writeUint32(currentVersion)
+
+ var flags uint32
+ if pw.SyncMarkers() {
+ flags |= flagSyncMarkers
+ }
+ writeUint32(flags)
+
+ // Write elemEndsEnds.
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ // Write elemEnds.
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ // Write elemData.
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+
+ // Write fingerprint.
+ copy(fingerprint[:], h.Sum(nil))
+ _, err := out0.Write(fingerprint[:])
+ assert(err == nil)
+
+ return
+}
+
+// StringIdx adds a string value to the strings section, if not
+// already present, and returns its index.
+func (pw *PkgEncoder) StringIdx(s string) Index {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[RelocString][idx] == s)
+ return idx
+ }
+
+ idx := Index(len(pw.elems[RelocString]))
+ pw.elems[RelocString] = append(pw.elems[RelocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+// NewEncoder returns an Encoder for a new element within the given
+// section, and encodes the given SyncMarker as the start of the
+// element bitstream.
+func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
+ e := pw.NewEncoderRaw(k)
+ e.Sync(marker)
+ return e
+}
+
+// NewEncoderRaw returns an Encoder for a new element within the given
+// section.
+//
+// Most callers should use NewEncoder instead.
+func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
+ idx := Index(len(pw.elems[k]))
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return Encoder{
+ p: pw,
+ k: k,
+ Idx: idx,
+ }
+}
+
+// An Encoder provides methods for encoding an individual element's
+// bitstream data.
+type Encoder struct {
+ p *PkgEncoder
+
+ Relocs []RelocEnt
+ RelocMap map[RelocEnt]uint32
+ Data bytes.Buffer // accumulated element bitstream data
+
+ encodingRelocHeader bool
+
+ k RelocKind
+ Idx Index // index within relocation section
+}
+
+// Flush finalizes the element's bitstream and returns its Index.
+func (w *Encoder) Flush() Index {
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.Data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ panic("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.Sync(SyncRelocs)
+ w.Len(len(w.Relocs))
+ for _, rEnt := range w.Relocs {
+ w.Sync(SyncReloc)
+ w.Len(int(rEnt.Kind))
+ w.Len(int(rEnt.Idx))
+ }
+
+ io.Copy(&sb, &w.Data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.Idx] = sb.String()
+
+ return w.Idx
+}
+
+func (w *Encoder) checkErr(err error) {
+ if err != nil {
+ errorf("unexpected encoding error: %v", err)
+ }
+}
+
+func (w *Encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.Data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *Encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
+ e := RelocEnt{r, idx}
+ if w.RelocMap != nil {
+ if i, ok := w.RelocMap[e]; ok {
+ return int(i)
+ }
+ } else {
+ w.RelocMap = make(map[RelocEnt]uint32)
+ }
+
+ i := len(w.Relocs)
+ w.RelocMap[e] = uint32(i)
+ w.Relocs = append(w.Relocs, e)
+ return i
+}
+
+func (w *Encoder) Sync(m SyncMarker) {
+ if !w.p.SyncMarkers() {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && w.p.syncFrames > 0 {
+ pcs := make([]uintptr, w.p.syncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
+ }
+}
+
+// Bool encodes and writes a bool value into the element bitstream,
+// and then returns the bool value.
+//
+// For simple, 2-alternative encodings, the idiomatic way to call Bool
+// is something like:
+//
+// if w.Bool(x != 0) {
+// // alternative #1
+// } else {
+// // alternative #2
+// }
+//
+// For multi-alternative encodings, use Code instead.
+func (w *Encoder) Bool(b bool) bool {
+ w.Sync(SyncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.Data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+// Int64 encodes and writes an int64 value into the element bitstream.
+func (w *Encoder) Int64(x int64) {
+ w.Sync(SyncInt64)
+ w.rawVarint(x)
+}
+
+// Uint64 encodes and writes a uint64 value into the element bitstream.
+func (w *Encoder) Uint64(x uint64) {
+ w.Sync(SyncUint64)
+ w.rawUvarint(x)
+}
+
+// Len encodes and writes a non-negative int value into the element bitstream.
+func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
+
+// Int encodes and writes an int value into the element bitstream.
+func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
+
+// Uint encodes and writes a uint value into the element bitstream.
+func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
+
+// Reloc encodes and writes a relocation for the given (section,
+// index) pair into the element bitstream.
+//
+// Note: Only the index is formally written into the element
+// bitstream, so bitstream decoders must know from context which
+// section an encoded relocation refers to.
+func (w *Encoder) Reloc(r RelocKind, idx Index) {
+ w.Sync(SyncUseReloc)
+ w.Len(w.rawReloc(r, idx))
+}
+
+// Code encodes and writes a Code value into the element bitstream.
+func (w *Encoder) Code(c Code) {
+ w.Sync(c.Marker())
+ w.Len(c.Value())
+}
+
+// String encodes and writes a string value into the element
+// bitstream.
+//
+// Internally, strings are deduplicated by adding them to the strings
+// section (if not already present), and then writing a relocation
+// into the element bitstream.
+func (w *Encoder) String(s string) {
+ w.Sync(SyncString)
+ w.Reloc(RelocString, w.p.StringIdx(s))
+}
+
+// Strings encodes and writes a variable-length slice of strings into
+// the element bitstream.
+func (w *Encoder) Strings(ss []string) {
+ w.Len(len(ss))
+ for _, s := range ss {
+ w.String(s)
+ }
+}
+
+// Value encodes and writes a constant.Value into the element
+// bitstream.
+func (w *Encoder) Value(val constant.Value) {
+ w.Sync(SyncValue)
+ if w.Bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *Encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ errorf("unhandled %v (%v)", val, val.Kind())
+ case bool:
+ w.Code(ValBool)
+ w.Bool(v)
+ case string:
+ w.Code(ValString)
+ w.String(v)
+ case int64:
+ w.Code(ValInt64)
+ w.Int64(v)
+ case *big.Int:
+ w.Code(ValBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.Code(ValBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.Code(ValBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *Encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.String(string(b)) // TODO: More efficient encoding.
+ w.Bool(v.Sign() < 0)
+}
+
+func (w *Encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.String(string(b)) // TODO: More efficient encoding.
+}
diff --git a/internal/pkgbits/flags.go b/internal/pkgbits/flags.go
new file mode 100644
index 000000000..654222745
--- /dev/null
+++ b/internal/pkgbits/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+const (
+ flagSyncMarkers = 1 << iota // file format contains sync markers
+)
diff --git a/internal/pkgbits/frames_go1.go b/internal/pkgbits/frames_go1.go
new file mode 100644
index 000000000..5294f6a63
--- /dev/null
+++ b/internal/pkgbits/frames_go1.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.7
+// +build !go1.7
+
+// TODO(mdempsky): Remove after #44505 is resolved
+
+package pkgbits
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ for _, pc := range pcs {
+ fn := runtime.FuncForPC(pc)
+ file, line := fn.FileLine(pc)
+
+ visit(file, line, fn.Name(), pc-fn.Entry())
+ }
+}
diff --git a/internal/pkgbits/frames_go17.go b/internal/pkgbits/frames_go17.go
new file mode 100644
index 000000000..2324ae7ad
--- /dev/null
+++ b/internal/pkgbits/frames_go17.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+// +build go1.7
+
+package pkgbits
+
+import "runtime"
+
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
diff --git a/internal/pkgbits/reloc.go b/internal/pkgbits/reloc.go
new file mode 100644
index 000000000..fcdfb97ca
--- /dev/null
+++ b/internal/pkgbits/reloc.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A RelocKind indicates a particular section within a unified IR export.
+type RelocKind int32
+
+// An Index represents a bitstream element index within a particular
+// section.
+type Index int32
+
+// A relocEnt (relocation entry) is an entry in an element's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type RelocEnt struct {
+ Kind RelocKind
+ Idx Index
+}
+
+// Reserved indices within the meta relocation section.
+const (
+ PublicRootIdx Index = 0
+ PrivateRootIdx Index = 1
+)
+
+const (
+ RelocString RelocKind = iota
+ RelocMeta
+ RelocPosBase
+ RelocPkg
+ RelocName
+ RelocType
+ RelocObj
+ RelocObjExt
+ RelocObjDict
+ RelocBody
+
+ numRelocs = iota
+)
diff --git a/internal/pkgbits/support.go b/internal/pkgbits/support.go
new file mode 100644
index 000000000..ad26d3b28
--- /dev/null
+++ b/internal/pkgbits/support.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import "fmt"
+
+func assert(b bool) {
+ if !b {
+ panic("assertion failed")
+ }
+}
+
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Errorf(format, args...))
+}
diff --git a/internal/pkgbits/sync.go b/internal/pkgbits/sync.go
new file mode 100644
index 000000000..5bd51ef71
--- /dev/null
+++ b/internal/pkgbits/sync.go
@@ -0,0 +1,113 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "fmt"
+ "strings"
+)
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// SyncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type SyncMarker int
+
+//go:generate stringer -type=SyncMarker -trimprefix=Sync
+
+const (
+ _ SyncMarker = iota
+
+ // Public markers (known to go/types importers).
+
+ // Low-level coding markers.
+ SyncEOF
+ SyncBool
+ SyncInt64
+ SyncUint64
+ SyncString
+ SyncValue
+ SyncVal
+ SyncRelocs
+ SyncReloc
+ SyncUseReloc
+
+ // Higher-level object and type markers.
+ SyncPublic
+ SyncPos
+ SyncPosBase
+ SyncObject
+ SyncObject1
+ SyncPkg
+ SyncPkgDef
+ SyncMethod
+ SyncType
+ SyncTypeIdx
+ SyncTypeParamNames
+ SyncSignature
+ SyncParams
+ SyncParam
+ SyncCodeObj
+ SyncSym
+ SyncLocalIdent
+ SyncSelector
+
+ // Private markers (only known to cmd/compile).
+ SyncPrivate
+
+ SyncFuncExt
+ SyncVarExt
+ SyncTypeExt
+ SyncPragma
+
+ SyncExprList
+ SyncExprs
+ SyncExpr
+ SyncExprType
+ SyncAssign
+ SyncOp
+ SyncFuncLit
+ SyncCompLit
+
+ SyncDecl
+ SyncFuncBody
+ SyncOpenScope
+ SyncCloseScope
+ SyncCloseAnotherScope
+ SyncDeclNames
+ SyncDeclName
+
+ SyncStmts
+ SyncBlockStmt
+ SyncIfStmt
+ SyncForStmt
+ SyncSwitchStmt
+ SyncRangeStmt
+ SyncCaseClause
+ SyncCommClause
+ SyncSelectStmt
+ SyncDecls
+ SyncLabeledStmt
+ SyncUseObjLocal
+ SyncAddLocal
+ SyncLinkname
+ SyncStmt1
+ SyncStmtsEnd
+ SyncLabel
+ SyncOptLabel
+)
diff --git a/internal/pkgbits/syncmarker_string.go b/internal/pkgbits/syncmarker_string.go
new file mode 100644
index 000000000..4a5b0ca5f
--- /dev/null
+++ b/internal/pkgbits/syncmarker_string.go
@@ -0,0 +1,89 @@
+// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
+
+package pkgbits
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[SyncEOF-1]
+ _ = x[SyncBool-2]
+ _ = x[SyncInt64-3]
+ _ = x[SyncUint64-4]
+ _ = x[SyncString-5]
+ _ = x[SyncValue-6]
+ _ = x[SyncVal-7]
+ _ = x[SyncRelocs-8]
+ _ = x[SyncReloc-9]
+ _ = x[SyncUseReloc-10]
+ _ = x[SyncPublic-11]
+ _ = x[SyncPos-12]
+ _ = x[SyncPosBase-13]
+ _ = x[SyncObject-14]
+ _ = x[SyncObject1-15]
+ _ = x[SyncPkg-16]
+ _ = x[SyncPkgDef-17]
+ _ = x[SyncMethod-18]
+ _ = x[SyncType-19]
+ _ = x[SyncTypeIdx-20]
+ _ = x[SyncTypeParamNames-21]
+ _ = x[SyncSignature-22]
+ _ = x[SyncParams-23]
+ _ = x[SyncParam-24]
+ _ = x[SyncCodeObj-25]
+ _ = x[SyncSym-26]
+ _ = x[SyncLocalIdent-27]
+ _ = x[SyncSelector-28]
+ _ = x[SyncPrivate-29]
+ _ = x[SyncFuncExt-30]
+ _ = x[SyncVarExt-31]
+ _ = x[SyncTypeExt-32]
+ _ = x[SyncPragma-33]
+ _ = x[SyncExprList-34]
+ _ = x[SyncExprs-35]
+ _ = x[SyncExpr-36]
+ _ = x[SyncExprType-37]
+ _ = x[SyncAssign-38]
+ _ = x[SyncOp-39]
+ _ = x[SyncFuncLit-40]
+ _ = x[SyncCompLit-41]
+ _ = x[SyncDecl-42]
+ _ = x[SyncFuncBody-43]
+ _ = x[SyncOpenScope-44]
+ _ = x[SyncCloseScope-45]
+ _ = x[SyncCloseAnotherScope-46]
+ _ = x[SyncDeclNames-47]
+ _ = x[SyncDeclName-48]
+ _ = x[SyncStmts-49]
+ _ = x[SyncBlockStmt-50]
+ _ = x[SyncIfStmt-51]
+ _ = x[SyncForStmt-52]
+ _ = x[SyncSwitchStmt-53]
+ _ = x[SyncRangeStmt-54]
+ _ = x[SyncCaseClause-55]
+ _ = x[SyncCommClause-56]
+ _ = x[SyncSelectStmt-57]
+ _ = x[SyncDecls-58]
+ _ = x[SyncLabeledStmt-59]
+ _ = x[SyncUseObjLocal-60]
+ _ = x[SyncAddLocal-61]
+ _ = x[SyncLinkname-62]
+ _ = x[SyncStmt1-63]
+ _ = x[SyncStmtsEnd-64]
+ _ = x[SyncLabel-65]
+ _ = x[SyncOptLabel-66]
+}
+
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
+
+func (i SyncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
+ return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
+}
diff --git a/internal/robustio/copyfiles.go b/internal/robustio/copyfiles.go
new file mode 100644
index 000000000..6e9f4b387
--- /dev/null
+++ b/internal/robustio/copyfiles.go
@@ -0,0 +1,117 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+// The copyfiles script copies the contents of the internal cmd/go robustio
+// package to the current directory, with adjustments to make it build.
+//
+// NOTE: In retrospect this script got out of hand, as we have to perform
+// various operations on the package to get it to build at old Go versions. If
+// in the future it proves to be flaky, delete it and just copy code manually.
+package main
+
+import (
+ "bytes"
+ "go/build/constraint"
+ "go/scanner"
+ "go/token"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+func main() {
+ dir := filepath.Join(runtime.GOROOT(), "src", "cmd", "go", "internal", "robustio")
+
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ log.Fatalf("reading the robustio dir: %v", err)
+ }
+
+ // Collect file content so that we can validate before copying.
+ fileContent := make(map[string][]byte)
+ windowsImport := []byte("\t\"internal/syscall/windows\"\n")
+ foundWindowsImport := false
+ for _, entry := range entries {
+ if strings.HasSuffix(entry.Name(), ".go") {
+ pth := filepath.Join(dir, entry.Name())
+ content, err := os.ReadFile(pth)
+ if err != nil {
+ log.Fatalf("reading %q: %v", entry.Name(), err)
+ }
+
+ // Replace the use of internal/syscall/windows.ERROR_SHARING_VIOLATION
+ // with a local constant.
+ if entry.Name() == "robustio_windows.go" && bytes.Contains(content, windowsImport) {
+ foundWindowsImport = true
+ content = bytes.Replace(content, windowsImport, nil, 1)
+ content = bytes.Replace(content, []byte("windows.ERROR_SHARING_VIOLATION"), []byte("ERROR_SHARING_VIOLATION"), -1)
+ }
+
+ // Replace os.ReadFile with ioutil.ReadFile (for 1.15 and older). We
+ // attempt to match calls (via the '('), to avoid matching mentions of
+ // os.ReadFile in comments.
+ //
+ // TODO(rfindley): once we (shortly!) no longer support 1.15, remove
+ // this and break the build.
+ if bytes.Contains(content, []byte("os.ReadFile(")) {
+ content = bytes.Replace(content, []byte("\"os\""), []byte("\"io/ioutil\"\n\t\"os\""), 1)
+ content = bytes.Replace(content, []byte("os.ReadFile("), []byte("ioutil.ReadFile("), -1)
+ }
+
+ // Add +build constraints, for 1.16.
+ content = addPlusBuildConstraints(content)
+
+ fileContent[entry.Name()] = content
+ }
+ }
+
+ if !foundWindowsImport {
+ log.Fatal("missing expected import of internal/syscall/windows in robustio_windows.go")
+ }
+
+ for name, content := range fileContent {
+ if err := os.WriteFile(name, content, 0644); err != nil {
+ log.Fatalf("writing %q: %v", name, err)
+ }
+ }
+}
+
+// addPlusBuildConstraints splices in +build constraints for go:build
+// constraints encountered in the source.
+//
+// Gopls still builds at Go 1.16, which requires +build constraints.
+func addPlusBuildConstraints(src []byte) []byte {
+ var s scanner.Scanner
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+ s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
+
+ result := make([]byte, 0, len(src))
+ lastInsertion := 0
+ for {
+ pos, tok, lit := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ if tok == token.COMMENT {
+ if c, err := constraint.Parse(lit); err == nil {
+ plusBuild, err := constraint.PlusBuildLines(c)
+ if err != nil {
+ log.Fatalf("computing +build constraint for %q: %v", lit, err)
+ }
+ insertAt := file.Offset(pos) + len(lit)
+ result = append(result, src[lastInsertion:insertAt]...)
+ result = append(result, []byte("\n"+strings.Join(plusBuild, "\n"))...)
+ lastInsertion = insertAt
+ }
+ }
+ }
+ result = append(result, src[lastInsertion:]...)
+ return result
+}
diff --git a/internal/robustio/gopls_windows.go b/internal/robustio/gopls_windows.go
new file mode 100644
index 000000000..949f27816
--- /dev/null
+++ b/internal/robustio/gopls_windows.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio
+
+import "syscall"
+
+// The robustio package is copied from cmd/go/internal/robustio, a package used
+// by the go command to retry known flaky operations on certain operating systems.
+
+//go:generate go run copyfiles.go
+
+// Since the gopls module cannot access internal/syscall/windows, copy a
+// necessary constant.
+const ERROR_SHARING_VIOLATION syscall.Errno = 32
diff --git a/internal/robustio/robustio.go b/internal/robustio/robustio.go
new file mode 100644
index 000000000..0a559fc9b
--- /dev/null
+++ b/internal/robustio/robustio.go
@@ -0,0 +1,69 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package robustio wraps I/O functions that are prone to failure on Windows,
+// transparently retrying errors up to an arbitrary timeout.
+//
+// Errors are classified heuristically and retries are bounded, so the functions
+// in this package do not completely eliminate spurious errors. However, they do
+// significantly reduce the rate of failure in practice.
+//
+// If so, the error will likely wrap one of:
+// The functions in this package do not completely eliminate spurious errors,
+// but substantially reduce their rate of occurrence in practice.
+package robustio
+
+import "time"
+
+// Rename is like os.Rename, but on Windows retries errors that may occur if the
+// file is concurrently read or overwritten.
+//
+// (See golang.org/issue/31247 and golang.org/issue/32188.)
+func Rename(oldpath, newpath string) error {
+ return rename(oldpath, newpath)
+}
+
+// ReadFile is like os.ReadFile, but on Windows retries errors that may
+// occur if the file is concurrently replaced.
+//
+// (See golang.org/issue/31247 and golang.org/issue/32188.)
+func ReadFile(filename string) ([]byte, error) {
+ return readFile(filename)
+}
+
+// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur
+// if an executable file in the directory has recently been executed.
+//
+// (See golang.org/issue/19491.)
+func RemoveAll(path string) error {
+ return removeAll(path)
+}
+
+// IsEphemeralError reports whether err is one of the errors that the functions
+// in this package attempt to mitigate.
+//
+// Errors considered ephemeral include:
+// - syscall.ERROR_ACCESS_DENIED
+// - syscall.ERROR_FILE_NOT_FOUND
+// - internal/syscall/windows.ERROR_SHARING_VIOLATION
+//
+// This set may be expanded in the future; programs must not rely on the
+// non-ephemerality of any given error.
+func IsEphemeralError(err error) bool {
+ return isEphemeralError(err)
+}
+
+// A FileID uniquely identifies a file in the file system.
+//
+// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file
+// names denote the same file.
+// A FileID is comparable, and thus suitable for use as a map key.
+type FileID struct {
+ device, inode uint64
+}
+
+// GetFileID returns the file system's identifier for the file, and its
+// modification time.
+// Like os.Stat, it reads through symbolic links.
+func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) }
diff --git a/internal/robustio/robustio_darwin.go b/internal/robustio/robustio_darwin.go
new file mode 100644
index 000000000..99fd8ebc2
--- /dev/null
+++ b/internal/robustio/robustio_darwin.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio
+
+import (
+ "errors"
+ "syscall"
+)
+
+const errFileNotFound = syscall.ENOENT
+
+// isEphemeralError returns true if err may be resolved by waiting.
+func isEphemeralError(err error) bool {
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
+ return errno == errFileNotFound
+ }
+ return false
+}
diff --git a/internal/robustio/robustio_flaky.go b/internal/robustio/robustio_flaky.go
new file mode 100644
index 000000000..c6f997244
--- /dev/null
+++ b/internal/robustio/robustio_flaky.go
@@ -0,0 +1,93 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows || darwin
+// +build windows darwin
+
+package robustio
+
+import (
+ "errors"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "syscall"
+ "time"
+)
+
+const arbitraryTimeout = 2000 * time.Millisecond
+
+// retry retries ephemeral errors from f up to an arbitrary timeout
+// to work around filesystem flakiness on Windows and Darwin.
+func retry(f func() (err error, mayRetry bool)) error {
+ var (
+ bestErr error
+ lowestErrno syscall.Errno
+ start time.Time
+ nextSleep time.Duration = 1 * time.Millisecond
+ )
+ for {
+ err, mayRetry := f()
+ if err == nil || !mayRetry {
+ return err
+ }
+
+ var errno syscall.Errno
+ if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) {
+ bestErr = err
+ lowestErrno = errno
+ } else if bestErr == nil {
+ bestErr = err
+ }
+
+ if start.IsZero() {
+ start = time.Now()
+ } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout {
+ break
+ }
+ time.Sleep(nextSleep)
+ nextSleep += time.Duration(rand.Int63n(int64(nextSleep)))
+ }
+
+ return bestErr
+}
+
+// rename is like os.Rename, but retries ephemeral errors.
+//
+// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with
+// MOVEFILE_REPLACE_EXISTING.
+//
+// Windows also provides a different system call, ReplaceFile,
+// that provides similar semantics, but perhaps preserves more metadata. (The
+// documentation on the differences between the two is very sparse.)
+//
+// Empirical error rates with MoveFileEx are lower under modest concurrency, so
+// for now we're sticking with what the os package already provides.
+func rename(oldpath, newpath string) (err error) {
+ return retry(func() (err error, mayRetry bool) {
+ err = os.Rename(oldpath, newpath)
+ return err, isEphemeralError(err)
+ })
+}
+
+// readFile is like os.ReadFile, but retries ephemeral errors.
+func readFile(filename string) ([]byte, error) {
+ var b []byte
+ err := retry(func() (err error, mayRetry bool) {
+ b, err = ioutil.ReadFile(filename)
+
+ // Unlike in rename, we do not retry errFileNotFound here: it can occur
+ // as a spurious error, but the file may also genuinely not exist, so the
+ // increase in robustness is probably not worth the extra latency.
+ return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound)
+ })
+ return b, err
+}
+
+func removeAll(path string) error {
+ return retry(func() (err error, mayRetry bool) {
+ err = os.RemoveAll(path)
+ return err, isEphemeralError(err)
+ })
+}
diff --git a/internal/robustio/robustio_other.go b/internal/robustio/robustio_other.go
new file mode 100644
index 000000000..c11dbf9f1
--- /dev/null
+++ b/internal/robustio/robustio_other.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows && !darwin
+// +build !windows,!darwin
+
+package robustio
+
+import (
+ "io/ioutil"
+ "os"
+)
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func readFile(filename string) ([]byte, error) {
+ return ioutil.ReadFile(filename)
+}
+
+func removeAll(path string) error {
+ return os.RemoveAll(path)
+}
+
+func isEphemeralError(err error) bool {
+ return false
+}
diff --git a/internal/robustio/robustio_plan9.go b/internal/robustio/robustio_plan9.go
new file mode 100644
index 000000000..9fa4cacb5
--- /dev/null
+++ b/internal/robustio/robustio_plan9.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9
+// +build plan9
+
+package robustio
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func getFileID(filename string) (FileID, time.Time, error) {
+ fi, err := os.Stat(filename)
+ if err != nil {
+ return FileID{}, time.Time{}, err
+ }
+ dir := fi.Sys().(*syscall.Dir)
+ return FileID{
+ device: uint64(dir.Type)<<32 | uint64(dir.Dev),
+ inode: dir.Qid.Path,
+ }, fi.ModTime(), nil
+}
diff --git a/internal/robustio/robustio_posix.go b/internal/robustio/robustio_posix.go
new file mode 100644
index 000000000..8aa13d027
--- /dev/null
+++ b/internal/robustio/robustio_posix.go
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows && !plan9
+// +build !windows,!plan9
+
+// TODO(adonovan): use 'unix' tag when go1.19 can be assumed.
+
+package robustio
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func getFileID(filename string) (FileID, time.Time, error) {
+ fi, err := os.Stat(filename)
+ if err != nil {
+ return FileID{}, time.Time{}, err
+ }
+ stat := fi.Sys().(*syscall.Stat_t)
+ return FileID{
+ device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux)
+ inode: stat.Ino,
+ }, fi.ModTime(), nil
+}
diff --git a/internal/robustio/robustio_test.go b/internal/robustio/robustio_test.go
new file mode 100644
index 000000000..10244e21d
--- /dev/null
+++ b/internal/robustio/robustio_test.go
@@ -0,0 +1,88 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio_test
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/tools/internal/robustio"
+)
+
+func TestFileInfo(t *testing.T) {
+ // A nonexistent file has no ID.
+ nonexistent := filepath.Join(t.TempDir(), "nonexistent")
+ if _, _, err := robustio.GetFileID(nonexistent); err == nil {
+ t.Fatalf("GetFileID(nonexistent) succeeded unexpectedly")
+ }
+
+ // A regular file has an ID.
+ real := filepath.Join(t.TempDir(), "real")
+ if err := os.WriteFile(real, nil, 0644); err != nil {
+ t.Fatalf("can't create regular file: %v", err)
+ }
+ realID, realMtime, err := robustio.GetFileID(real)
+ if err != nil {
+ t.Fatalf("can't get ID of regular file: %v", err)
+ }
+
+ // Sleep so that we get a new mtime for subsequent writes.
+ time.Sleep(2 * time.Second)
+
+ // A second regular file has a different ID.
+ real2 := filepath.Join(t.TempDir(), "real2")
+ if err := os.WriteFile(real2, nil, 0644); err != nil {
+ t.Fatalf("can't create second regular file: %v", err)
+ }
+ real2ID, real2Mtime, err := robustio.GetFileID(real2)
+ if err != nil {
+ t.Fatalf("can't get ID of second regular file: %v", err)
+ }
+ if realID == real2ID {
+ t.Errorf("realID %+v == real2ID %+v", realID, real2ID)
+ }
+ if realMtime.Equal(real2Mtime) {
+ t.Errorf("realMtime %v == real2Mtime %v", realMtime, real2Mtime)
+ }
+
+ // A symbolic link has the same ID as its target.
+ if runtime.GOOS != "plan9" {
+ symlink := filepath.Join(t.TempDir(), "symlink")
+ if err := os.Symlink(real, symlink); err != nil {
+ t.Fatalf("can't create symbolic link: %v", err)
+ }
+ symlinkID, symlinkMtime, err := robustio.GetFileID(symlink)
+ if err != nil {
+ t.Fatalf("can't get ID of symbolic link: %v", err)
+ }
+ if realID != symlinkID {
+ t.Errorf("realID %+v != symlinkID %+v", realID, symlinkID)
+ }
+ if !realMtime.Equal(symlinkMtime) {
+ t.Errorf("realMtime %v != symlinkMtime %v", realMtime, symlinkMtime)
+ }
+ }
+
+ // Two hard-linked files have the same ID.
+ if runtime.GOOS != "plan9" && runtime.GOOS != "android" {
+ hardlink := filepath.Join(t.TempDir(), "hardlink")
+ if err := os.Link(real, hardlink); err != nil {
+ t.Fatal(err)
+ }
+ hardlinkID, hardlinkMtime, err := robustio.GetFileID(hardlink)
+ if err != nil {
+ t.Fatalf("can't get ID of hard link: %v", err)
+ }
+ if realID != hardlinkID {
+ t.Errorf("realID %+v != hardlinkID %+v", realID, hardlinkID)
+ }
+ if !realMtime.Equal(hardlinkMtime) {
+ t.Errorf("realMtime %v != hardlinkMtime %v", realMtime, hardlinkMtime)
+ }
+ }
+}
diff --git a/internal/robustio/robustio_windows.go b/internal/robustio/robustio_windows.go
new file mode 100644
index 000000000..616c32883
--- /dev/null
+++ b/internal/robustio/robustio_windows.go
@@ -0,0 +1,51 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio
+
+import (
+ "errors"
+ "syscall"
+ "time"
+)
+
+const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND
+
+// isEphemeralError returns true if err may be resolved by waiting.
+func isEphemeralError(err error) bool {
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
+ switch errno {
+ case syscall.ERROR_ACCESS_DENIED,
+ syscall.ERROR_FILE_NOT_FOUND,
+ ERROR_SHARING_VIOLATION:
+ return true
+ }
+ }
+ return false
+}
+
+// Note: it may be convenient to have this helper return fs.FileInfo, but
+// implementing this is actually quite involved on Windows. Since we only
+// currently use mtime, keep it simple.
+func getFileID(filename string) (FileID, time.Time, error) {
+ filename16, err := syscall.UTF16PtrFromString(filename)
+ if err != nil {
+ return FileID{}, time.Time{}, err
+ }
+ h, err := syscall.CreateFile(filename16, 0, 0, nil, syscall.OPEN_EXISTING, uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS), 0)
+ if err != nil {
+ return FileID{}, time.Time{}, err
+ }
+ defer syscall.CloseHandle(h)
+ var i syscall.ByHandleFileInformation
+ if err := syscall.GetFileInformationByHandle(h, &i); err != nil {
+ return FileID{}, time.Time{}, err
+ }
+ mtime := time.Unix(0, i.LastWriteTime.Nanoseconds())
+ return FileID{
+ device: uint64(i.VolumeSerialNumber),
+ inode: uint64(i.FileIndexHigh)<<32 | uint64(i.FileIndexLow),
+ }, mtime, nil
+}
diff --git a/internal/span/parse.go b/internal/span/parse.go
deleted file mode 100644
index c4cec16e9..000000000
--- a/internal/span/parse.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package span
-
-import (
- "path/filepath"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-// Parse returns the location represented by the input.
-// Only file paths are accepted, not URIs.
-// The returned span will be normalized, and thus if printed may produce a
-// different string.
-func Parse(input string) Span {
- return ParseInDir(input, ".")
-}
-
-// ParseInDir is like Parse, but interprets paths relative to wd.
-func ParseInDir(input, wd string) Span {
- uri := func(path string) URI {
- if !filepath.IsAbs(path) {
- path = filepath.Join(wd, path)
- }
- return URIFromPath(path)
- }
- // :0:0#0-0:0#0
- valid := input
- var hold, offset int
- hadCol := false
- suf := rstripSuffix(input)
- if suf.sep == "#" {
- offset = suf.num
- suf = rstripSuffix(suf.remains)
- }
- if suf.sep == ":" {
- valid = suf.remains
- hold = suf.num
- hadCol = true
- suf = rstripSuffix(suf.remains)
- }
- switch {
- case suf.sep == ":":
- return New(uri(suf.remains), NewPoint(suf.num, hold, offset), Point{})
- case suf.sep == "-":
- // we have a span, fall out of the case to continue
- default:
- // separator not valid, rewind to either the : or the start
- return New(uri(valid), NewPoint(hold, 0, offset), Point{})
- }
- // only the span form can get here
- // at this point we still don't know what the numbers we have mean
- // if have not yet seen a : then we might have either a line or a column depending
- // on whether start has a column or not
- // we build an end point and will fix it later if needed
- end := NewPoint(suf.num, hold, offset)
- hold, offset = 0, 0
- suf = rstripSuffix(suf.remains)
- if suf.sep == "#" {
- offset = suf.num
- suf = rstripSuffix(suf.remains)
- }
- if suf.sep != ":" {
- // turns out we don't have a span after all, rewind
- return New(uri(valid), end, Point{})
- }
- valid = suf.remains
- hold = suf.num
- suf = rstripSuffix(suf.remains)
- if suf.sep != ":" {
- // line#offset only
- return New(uri(valid), NewPoint(hold, 0, offset), end)
- }
- // we have a column, so if end only had one number, it is also the column
- if !hadCol {
- end = NewPoint(suf.num, end.v.Line, end.v.Offset)
- }
- return New(uri(suf.remains), NewPoint(suf.num, hold, offset), end)
-}
-
-type suffix struct {
- remains string
- sep string
- num int
-}
-
-func rstripSuffix(input string) suffix {
- if len(input) == 0 {
- return suffix{"", "", -1}
- }
- remains := input
- num := -1
- // first see if we have a number at the end
- last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
- if last >= 0 && last < len(remains)-1 {
- number, err := strconv.ParseInt(remains[last+1:], 10, 64)
- if err == nil {
- num = int(number)
- remains = remains[:last+1]
- }
- }
- // now see if we have a trailing separator
- r, w := utf8.DecodeLastRuneInString(remains)
- if r != ':' && r != '#' && r == '#' {
- return suffix{input, "", -1}
- }
- remains = remains[:len(remains)-w]
- return suffix{remains, string(r), num}
-}
diff --git a/internal/span/span.go b/internal/span/span.go
deleted file mode 100644
index 4d2ad0986..000000000
--- a/internal/span/span.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package span contains support for representing with positions and ranges in
-// text files.
-package span
-
-import (
- "encoding/json"
- "fmt"
- "path"
-)
-
-// Span represents a source code range in standardized form.
-type Span struct {
- v span
-}
-
-// Point represents a single point within a file.
-// In general this should only be used as part of a Span, as on its own it
-// does not carry enough information.
-type Point struct {
- v point
-}
-
-type span struct {
- URI URI `json:"uri"`
- Start point `json:"start"`
- End point `json:"end"`
-}
-
-type point struct {
- Line int `json:"line"`
- Column int `json:"column"`
- Offset int `json:"offset"`
-}
-
-// Invalid is a span that reports false from IsValid
-var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
-
-var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
-
-// Converter is the interface to an object that can convert between line:column
-// and offset forms for a single file.
-type Converter interface {
- //ToPosition converts from an offset to a line:column pair.
- ToPosition(offset int) (int, int, error)
- //ToOffset converts from a line:column pair to an offset.
- ToOffset(line, col int) (int, error)
-}
-
-func New(uri URI, start Point, end Point) Span {
- s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
- s.v.clean()
- return s
-}
-
-func NewPoint(line, col, offset int) Point {
- p := Point{v: point{Line: line, Column: col, Offset: offset}}
- p.v.clean()
- return p
-}
-
-func Compare(a, b Span) int {
- if r := CompareURI(a.URI(), b.URI()); r != 0 {
- return r
- }
- if r := comparePoint(a.v.Start, b.v.Start); r != 0 {
- return r
- }
- return comparePoint(a.v.End, b.v.End)
-}
-
-func ComparePoint(a, b Point) int {
- return comparePoint(a.v, b.v)
-}
-
-func comparePoint(a, b point) int {
- if !a.hasPosition() {
- if a.Offset < b.Offset {
- return -1
- }
- if a.Offset > b.Offset {
- return 1
- }
- return 0
- }
- if a.Line < b.Line {
- return -1
- }
- if a.Line > b.Line {
- return 1
- }
- if a.Column < b.Column {
- return -1
- }
- if a.Column > b.Column {
- return 1
- }
- return 0
-}
-
-func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
-func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
-func (s Span) IsValid() bool { return s.v.Start.isValid() }
-func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
-func (s Span) URI() URI { return s.v.URI }
-func (s Span) Start() Point { return Point{s.v.Start} }
-func (s Span) End() Point { return Point{s.v.End} }
-func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
-func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
-
-func (p Point) HasPosition() bool { return p.v.hasPosition() }
-func (p Point) HasOffset() bool { return p.v.hasOffset() }
-func (p Point) IsValid() bool { return p.v.isValid() }
-func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
-func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
-func (p Point) Line() int {
- if !p.v.hasPosition() {
- panic(fmt.Errorf("position not set in %v", p.v))
- }
- return p.v.Line
-}
-func (p Point) Column() int {
- if !p.v.hasPosition() {
- panic(fmt.Errorf("position not set in %v", p.v))
- }
- return p.v.Column
-}
-func (p Point) Offset() int {
- if !p.v.hasOffset() {
- panic(fmt.Errorf("offset not set in %v", p.v))
- }
- return p.v.Offset
-}
-
-func (p point) hasPosition() bool { return p.Line > 0 }
-func (p point) hasOffset() bool { return p.Offset >= 0 }
-func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
-func (p point) isZero() bool {
- return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
-}
-
-func (s *span) clean() {
- //this presumes the points are already clean
- if !s.End.isValid() || (s.End == point{}) {
- s.End = s.Start
- }
-}
-
-func (p *point) clean() {
- if p.Line < 0 {
- p.Line = 0
- }
- if p.Column <= 0 {
- if p.Line > 0 {
- p.Column = 1
- } else {
- p.Column = 0
- }
- }
- if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
- p.Offset = -1
- }
-}
-
-// Format implements fmt.Formatter to print the Location in a standard form.
-// The format produced is one that can be read back in using Parse.
-func (s Span) Format(f fmt.State, c rune) {
- fullForm := f.Flag('+')
- preferOffset := f.Flag('#')
- // we should always have a uri, simplify if it is file format
- //TODO: make sure the end of the uri is unambiguous
- uri := string(s.v.URI)
- if c == 'f' {
- uri = path.Base(uri)
- } else if !fullForm {
- uri = s.v.URI.Filename()
- }
- fmt.Fprint(f, uri)
- if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
- return
- }
- // see which bits of start to write
- printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
- printLine := s.HasPosition() && (fullForm || !printOffset)
- printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
- fmt.Fprint(f, ":")
- if printLine {
- fmt.Fprintf(f, "%d", s.v.Start.Line)
- }
- if printColumn {
- fmt.Fprintf(f, ":%d", s.v.Start.Column)
- }
- if printOffset {
- fmt.Fprintf(f, "#%d", s.v.Start.Offset)
- }
- // start is written, do we need end?
- if s.IsPoint() {
- return
- }
- // we don't print the line if it did not change
- printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
- fmt.Fprint(f, "-")
- if printLine {
- fmt.Fprintf(f, "%d", s.v.End.Line)
- }
- if printColumn {
- if printLine {
- fmt.Fprint(f, ":")
- }
- fmt.Fprintf(f, "%d", s.v.End.Column)
- }
- if printOffset {
- fmt.Fprintf(f, "#%d", s.v.End.Offset)
- }
-}
-
-func (s Span) WithPosition(c Converter) (Span, error) {
- if err := s.update(c, true, false); err != nil {
- return Span{}, err
- }
- return s, nil
-}
-
-func (s Span) WithOffset(c Converter) (Span, error) {
- if err := s.update(c, false, true); err != nil {
- return Span{}, err
- }
- return s, nil
-}
-
-func (s Span) WithAll(c Converter) (Span, error) {
- if err := s.update(c, true, true); err != nil {
- return Span{}, err
- }
- return s, nil
-}
-
-func (s *Span) update(c Converter, withPos, withOffset bool) error {
- if !s.IsValid() {
- return fmt.Errorf("cannot add information to an invalid span")
- }
- if withPos && !s.HasPosition() {
- if err := s.v.Start.updatePosition(c); err != nil {
- return err
- }
- if s.v.End.Offset == s.v.Start.Offset {
- s.v.End = s.v.Start
- } else if err := s.v.End.updatePosition(c); err != nil {
- return err
- }
- }
- if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) {
- if err := s.v.Start.updateOffset(c); err != nil {
- return err
- }
- if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column {
- s.v.End.Offset = s.v.Start.Offset
- } else if err := s.v.End.updateOffset(c); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *point) updatePosition(c Converter) error {
- line, col, err := c.ToPosition(p.Offset)
- if err != nil {
- return err
- }
- p.Line = line
- p.Column = col
- return nil
-}
-
-func (p *point) updateOffset(c Converter) error {
- offset, err := c.ToOffset(p.Line, p.Column)
- if err != nil {
- return err
- }
- p.Offset = offset
- return nil
-}
diff --git a/internal/span/span_test.go b/internal/span/span_test.go
deleted file mode 100644
index 150ea3fba..000000000
--- a/internal/span/span_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package span_test
-
-import (
- "fmt"
- "path/filepath"
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-var (
- tests = [][]string{
- {"C:/file_a", "C:/file_a", "file:///C:/file_a:1:1#0"},
- {"C:/file_b:1:2", "C:/file_b:#1", "file:///C:/file_b:1:2#1"},
- {"C:/file_c:1000", "C:/file_c:#9990", "file:///C:/file_c:1000:1#9990"},
- {"C:/file_d:14:9", "C:/file_d:#138", "file:///C:/file_d:14:9#138"},
- {"C:/file_e:1:2-7", "C:/file_e:#1-#6", "file:///C:/file_e:1:2#1-1:7#6"},
- {"C:/file_f:500-502", "C:/file_f:#4990-#5010", "file:///C:/file_f:500:1#4990-502:1#5010"},
- {"C:/file_g:3:7-8", "C:/file_g:#26-#27", "file:///C:/file_g:3:7#26-3:8#27"},
- {"C:/file_h:3:7-4:8", "C:/file_h:#26-#37", "file:///C:/file_h:3:7#26-4:8#37"},
- }
-)
-
-func TestFormat(t *testing.T) {
- converter := lines(10)
- for _, test := range tests {
- for ti, text := range test[:2] {
- spn := span.Parse(text)
- if ti <= 1 {
- // we can check %v produces the same as the input
- expect := toPath(test[ti])
- if got := fmt.Sprintf("%v", spn); got != expect {
- t.Errorf("printing %q got %q expected %q", text, got, expect)
- }
- }
- complete, err := spn.WithAll(converter)
- if err != nil {
- t.Error(err)
- }
- for fi, format := range []string{"%v", "%#v", "%+v"} {
- expect := toPath(test[fi])
- if got := fmt.Sprintf(format, complete); got != expect {
- t.Errorf("printing completed %q as %q got %q expected %q [%+v]", text, format, got, expect, spn)
- }
- }
- }
- }
-}
-
-func toPath(value string) string {
- if strings.HasPrefix(value, "file://") {
- return value
- }
- return filepath.FromSlash(value)
-}
-
-type lines int
-
-func (l lines) ToPosition(offset int) (int, int, error) {
- return (offset / int(l)) + 1, (offset % int(l)) + 1, nil
-}
-
-func (l lines) ToOffset(line, col int) (int, error) {
- return (int(l) * (line - 1)) + (col - 1), nil
-}
diff --git a/internal/span/token.go b/internal/span/token.go
deleted file mode 100644
index 6f8b9b570..000000000
--- a/internal/span/token.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package span
-
-import (
- "fmt"
- "go/token"
-)
-
-// Range represents a source code range in token.Pos form.
-// It also carries the FileSet that produced the positions, so that it is
-// self contained.
-type Range struct {
- FileSet *token.FileSet
- Start token.Pos
- End token.Pos
- Converter Converter
-}
-
-type FileConverter struct {
- file *token.File
-}
-
-// TokenConverter is a Converter backed by a token file set and file.
-// It uses the file set methods to work out the conversions, which
-// makes it fast and does not require the file contents.
-type TokenConverter struct {
- FileConverter
- fset *token.FileSet
-}
-
-// NewRange creates a new Range from a FileSet and two positions.
-// To represent a point pass a 0 as the end pos.
-func NewRange(fset *token.FileSet, start, end token.Pos) Range {
- return Range{
- FileSet: fset,
- Start: start,
- End: end,
- }
-}
-
-// NewTokenConverter returns an implementation of Converter backed by a
-// token.File.
-func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter {
- return &TokenConverter{fset: fset, FileConverter: FileConverter{file: f}}
-}
-
-// NewContentConverter returns an implementation of Converter for the
-// given file content.
-func NewContentConverter(filename string, content []byte) *TokenConverter {
- fset := token.NewFileSet()
- f := fset.AddFile(filename, -1, len(content))
- f.SetLinesForContent(content)
- return NewTokenConverter(fset, f)
-}
-
-// IsPoint returns true if the range represents a single point.
-func (r Range) IsPoint() bool {
- return r.Start == r.End
-}
-
-// Span converts a Range to a Span that represents the Range.
-// It will fill in all the members of the Span, calculating the line and column
-// information.
-func (r Range) Span() (Span, error) {
- if !r.Start.IsValid() {
- return Span{}, fmt.Errorf("start pos is not valid")
- }
- f := r.FileSet.File(r.Start)
- if f == nil {
- return Span{}, fmt.Errorf("file not found in FileSet")
- }
- return FileSpan(f, r.Converter, r.Start, r.End)
-}
-
-// FileSpan returns a span within tok, using converter to translate between
-// offsets and positions.
-func FileSpan(tok *token.File, converter Converter, start, end token.Pos) (Span, error) {
- var s Span
- var err error
- var startFilename string
- startFilename, s.v.Start.Line, s.v.Start.Column, err = position(tok, start)
- if err != nil {
- return Span{}, err
- }
- s.v.URI = URIFromPath(startFilename)
- if end.IsValid() {
- var endFilename string
- endFilename, s.v.End.Line, s.v.End.Column, err = position(tok, end)
- if err != nil {
- return Span{}, err
- }
- // In the presence of line directives, a single File can have sections from
- // multiple file names.
- if endFilename != startFilename {
- return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename)
- }
- }
- s.v.Start.clean()
- s.v.End.clean()
- s.v.clean()
- if converter != nil {
- return s.WithOffset(converter)
- }
- if startFilename != tok.Name() {
- return Span{}, fmt.Errorf("must supply Converter for file %q containing lines from %q", tok.Name(), startFilename)
- }
- return s.WithOffset(&FileConverter{tok})
-}
-
-func position(f *token.File, pos token.Pos) (string, int, int, error) {
- off, err := offset(f, pos)
- if err != nil {
- return "", 0, 0, err
- }
- return positionFromOffset(f, off)
-}
-
-func positionFromOffset(f *token.File, offset int) (string, int, int, error) {
- if offset > f.Size() {
- return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, f.Size())
- }
- pos := f.Pos(offset)
- p := f.Position(pos)
- // TODO(golang/go#41029): Consider returning line, column instead of line+1, 1 if
- // the file's last character is not a newline.
- if offset == f.Size() {
- return p.Filename, p.Line + 1, 1, nil
- }
- return p.Filename, p.Line, p.Column, nil
-}
-
-// offset is a copy of the Offset function in go/token, but with the adjustment
-// that it does not panic on invalid positions.
-func offset(f *token.File, pos token.Pos) (int, error) {
- if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() {
- return 0, fmt.Errorf("invalid pos")
- }
- return int(pos) - f.Base(), nil
-}
-
-// Range converts a Span to a Range that represents the Span for the supplied
-// File.
-func (s Span) Range(converter *TokenConverter) (Range, error) {
- s, err := s.WithOffset(converter)
- if err != nil {
- return Range{}, err
- }
- // go/token will panic if the offset is larger than the file's size,
- // so check here to avoid panicking.
- if s.Start().Offset() > converter.file.Size() {
- return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size())
- }
- if s.End().Offset() > converter.file.Size() {
- return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size())
- }
- return Range{
- FileSet: converter.fset,
- Start: converter.file.Pos(s.Start().Offset()),
- End: converter.file.Pos(s.End().Offset()),
- Converter: converter,
- }, nil
-}
-
-func (l *FileConverter) ToPosition(offset int) (int, int, error) {
- _, line, col, err := positionFromOffset(l.file, offset)
- return line, col, err
-}
-
-func (l *FileConverter) ToOffset(line, col int) (int, error) {
- if line < 0 {
- return -1, fmt.Errorf("line is not valid")
- }
- lineMax := l.file.LineCount() + 1
- if line > lineMax {
- return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
- } else if line == lineMax {
- if col > 1 {
- return -1, fmt.Errorf("column is beyond end of file")
- }
- // at the end of the file, allowing for a trailing eol
- return l.file.Size(), nil
- }
- pos := lineStart(l.file, line)
- if !pos.IsValid() {
- return -1, fmt.Errorf("line is not in file")
- }
- // we assume that column is in bytes here, and that the first byte of a
- // line is at column 1
- pos += token.Pos(col - 1)
- return offset(l.file, pos)
-}
diff --git a/internal/span/token111.go b/internal/span/token111.go
deleted file mode 100644
index c41e94b8f..000000000
--- a/internal/span/token111.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.12
-// +build !go1.12
-
-package span
-
-import (
- "go/token"
-)
-
-// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go
-// versions <= 1.11, we borrow logic from the analysisutil package.
-// TODO(rstambler): Delete this file when we no longer support Go 1.11.
-func lineStart(f *token.File, line int) token.Pos {
- // Use binary search to find the start offset of this line.
-
- min := 0 // inclusive
- max := f.Size() // exclusive
- for {
- offset := (min + max) / 2
- pos := f.Pos(offset)
- posn := f.Position(pos)
- if posn.Line == line {
- return pos - (token.Pos(posn.Column) - 1)
- }
-
- if min+1 >= max {
- return token.NoPos
- }
-
- if posn.Line < line {
- min = offset
- } else {
- max = offset
- }
- }
-}
diff --git a/internal/span/token112.go b/internal/span/token112.go
deleted file mode 100644
index 4c4dea170..000000000
--- a/internal/span/token112.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.12
-// +build go1.12
-
-package span
-
-import (
- "go/token"
-)
-
-// TODO(rstambler): Delete this file when we no longer support Go 1.11.
-func lineStart(f *token.File, line int) token.Pos {
- return f.LineStart(line)
-}
diff --git a/internal/span/token_test.go b/internal/span/token_test.go
deleted file mode 100644
index 81b263180..000000000
--- a/internal/span/token_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package span_test
-
-import (
- "fmt"
- "go/token"
- "path"
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-var testdata = []struct {
- uri string
- content []byte
-}{
- {"/a.go", []byte(`
-// file a.go
-package test
-`)},
- {"/b.go", []byte(`
-//
-//
-// file b.go
-package test`)},
- {"/c.go", []byte(`
-// file c.go
-package test`)},
-}
-
-var tokenTests = []span.Span{
- span.New(span.URIFromPath("/a.go"), span.NewPoint(1, 1, 0), span.Point{}),
- span.New(span.URIFromPath("/a.go"), span.NewPoint(3, 7, 20), span.NewPoint(3, 7, 20)),
- span.New(span.URIFromPath("/b.go"), span.NewPoint(4, 9, 15), span.NewPoint(4, 13, 19)),
- span.New(span.URIFromPath("/c.go"), span.NewPoint(4, 1, 26), span.Point{}),
-}
-
-func TestToken(t *testing.T) {
- fset := token.NewFileSet()
- files := map[span.URI]*token.File{}
- for _, f := range testdata {
- file := fset.AddFile(f.uri, -1, len(f.content))
- file.SetLinesForContent(f.content)
- files[span.URIFromPath(f.uri)] = file
- }
- for _, test := range tokenTests {
- f := files[test.URI()]
- c := span.NewTokenConverter(fset, f)
- t.Run(path.Base(f.Name()), func(t *testing.T) {
- checkToken(t, c, span.New(
- test.URI(),
- span.NewPoint(test.Start().Line(), test.Start().Column(), 0),
- span.NewPoint(test.End().Line(), test.End().Column(), 0),
- ), test)
- checkToken(t, c, span.New(
- test.URI(),
- span.NewPoint(0, 0, test.Start().Offset()),
- span.NewPoint(0, 0, test.End().Offset()),
- ), test)
- })
- }
-}
-
-func checkToken(t *testing.T, c *span.TokenConverter, in, expect span.Span) {
- rng, err := in.Range(c)
- if err != nil {
- t.Error(err)
- }
- gotLoc, err := rng.Span()
- if err != nil {
- t.Error(err)
- }
- expected := fmt.Sprintf("%+v", expect)
- got := fmt.Sprintf("%+v", gotLoc)
- if expected != got {
- t.Errorf("For %v expected %q got %q", in, expected, got)
- }
-}
diff --git a/internal/span/uri.go b/internal/span/uri.go
deleted file mode 100644
index a9777ff85..000000000
--- a/internal/span/uri.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package span
-
-import (
- "fmt"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "runtime"
- "strings"
- "unicode"
-)
-
-const fileScheme = "file"
-
-// URI represents the full URI for a file.
-type URI string
-
-func (uri URI) IsFile() bool {
- return strings.HasPrefix(string(uri), "file://")
-}
-
-// Filename returns the file path for the given URI.
-// It is an error to call this on a URI that is not a valid filename.
-func (uri URI) Filename() string {
- filename, err := filename(uri)
- if err != nil {
- panic(err)
- }
- return filepath.FromSlash(filename)
-}
-
-func filename(uri URI) (string, error) {
- if uri == "" {
- return "", nil
- }
- u, err := url.ParseRequestURI(string(uri))
- if err != nil {
- return "", err
- }
- if u.Scheme != fileScheme {
- return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
- }
- // If the URI is a Windows URI, we trim the leading "/" and uppercase
- // the drive letter, which will never be case sensitive.
- if isWindowsDriveURIPath(u.Path) {
- u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:]
- }
- return u.Path, nil
-}
-
-func URIFromURI(s string) URI {
- if !strings.HasPrefix(s, "file://") {
- return URI(s)
- }
-
- if !strings.HasPrefix(s, "file:///") {
- // VS Code sends URLs with only two slashes, which are invalid. golang/go#39789.
- s = "file:///" + s[len("file://"):]
- }
- // Even though the input is a URI, it may not be in canonical form. VS Code
- // in particular over-escapes :, @, etc. Unescape and re-encode to canonicalize.
- path, err := url.PathUnescape(s[len("file://"):])
- if err != nil {
- panic(err)
- }
-
- // File URIs from Windows may have lowercase drive letters.
- // Since drive letters are guaranteed to be case insensitive,
- // we change them to uppercase to remain consistent.
- // For example, file:///c:/x/y/z becomes file:///C:/x/y/z.
- if isWindowsDriveURIPath(path) {
- path = path[:1] + strings.ToUpper(string(path[1])) + path[2:]
- }
- u := url.URL{Scheme: fileScheme, Path: path}
- return URI(u.String())
-}
-
-func CompareURI(a, b URI) int {
- if equalURI(a, b) {
- return 0
- }
- if a < b {
- return -1
- }
- return 1
-}
-
-func equalURI(a, b URI) bool {
- if a == b {
- return true
- }
- // If we have the same URI basename, we may still have the same file URIs.
- if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) {
- return false
- }
- fa, err := filename(a)
- if err != nil {
- return false
- }
- fb, err := filename(b)
- if err != nil {
- return false
- }
- // Stat the files to check if they are equal.
- infoa, err := os.Stat(filepath.FromSlash(fa))
- if err != nil {
- return false
- }
- infob, err := os.Stat(filepath.FromSlash(fb))
- if err != nil {
- return false
- }
- return os.SameFile(infoa, infob)
-}
-
-// URIFromPath returns a span URI for the supplied file path.
-// It will always have the file scheme.
-func URIFromPath(path string) URI {
- if path == "" {
- return ""
- }
- // Handle standard library paths that contain the literal "$GOROOT".
- // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
- const prefix = "$GOROOT"
- if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
- suffix := path[len(prefix):]
- path = runtime.GOROOT() + suffix
- }
- if !isWindowsDrivePath(path) {
- if abs, err := filepath.Abs(path); err == nil {
- path = abs
- }
- }
- // Check the file path again, in case it became absolute.
- if isWindowsDrivePath(path) {
- path = "/" + strings.ToUpper(string(path[0])) + path[1:]
- }
- path = filepath.ToSlash(path)
- u := url.URL{
- Scheme: fileScheme,
- Path: path,
- }
- return URI(u.String())
-}
-
-// isWindowsDrivePath returns true if the file path is of the form used by
-// Windows. We check if the path begins with a drive letter, followed by a ":".
-// For example: C:/x/y/z.
-func isWindowsDrivePath(path string) bool {
- if len(path) < 3 {
- return false
- }
- return unicode.IsLetter(rune(path[0])) && path[1] == ':'
-}
-
-// isWindowsDriveURI returns true if the file URI is of the format used by
-// Windows URIs. The url.Parse package does not specially handle Windows paths
-// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:").
-func isWindowsDriveURIPath(uri string) bool {
- if len(uri) < 4 {
- return false
- }
- return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
-}
diff --git a/internal/span/uri_test.go b/internal/span/uri_test.go
deleted file mode 100644
index bcbad8712..000000000
--- a/internal/span/uri_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !windows
-// +build !windows
-
-package span_test
-
-import (
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-// TestURI tests the conversion between URIs and filenames. The test cases
-// include Windows-style URIs and filepaths, but we avoid having OS-specific
-// tests by using only forward slashes, assuming that the standard library
-// functions filepath.ToSlash and filepath.FromSlash do not need testing.
-func TestURIFromPath(t *testing.T) {
- for _, test := range []struct {
- path, wantFile string
- wantURI span.URI
- }{
- {
- path: ``,
- wantFile: ``,
- wantURI: span.URI(""),
- },
- {
- path: `C:/Windows/System32`,
- wantFile: `C:/Windows/System32`,
- wantURI: span.URI("file:///C:/Windows/System32"),
- },
- {
- path: `C:/Go/src/bob.go`,
- wantFile: `C:/Go/src/bob.go`,
- wantURI: span.URI("file:///C:/Go/src/bob.go"),
- },
- {
- path: `c:/Go/src/bob.go`,
- wantFile: `C:/Go/src/bob.go`,
- wantURI: span.URI("file:///C:/Go/src/bob.go"),
- },
- {
- path: `/path/to/dir`,
- wantFile: `/path/to/dir`,
- wantURI: span.URI("file:///path/to/dir"),
- },
- {
- path: `/a/b/c/src/bob.go`,
- wantFile: `/a/b/c/src/bob.go`,
- wantURI: span.URI("file:///a/b/c/src/bob.go"),
- },
- {
- path: `c:/Go/src/bob george/george/george.go`,
- wantFile: `C:/Go/src/bob george/george/george.go`,
- wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
- },
- } {
- got := span.URIFromPath(test.path)
- if got != test.wantURI {
- t.Errorf("URIFromPath(%q): got %q, expected %q", test.path, got, test.wantURI)
- }
- gotFilename := got.Filename()
- if gotFilename != test.wantFile {
- t.Errorf("Filename(%q): got %q, expected %q", got, gotFilename, test.wantFile)
- }
- }
-}
-
-func TestURIFromURI(t *testing.T) {
- for _, test := range []struct {
- inputURI, wantFile string
- wantURI span.URI
- }{
- {
- inputURI: `file:///c:/Go/src/bob%20george/george/george.go`,
- wantFile: `C:/Go/src/bob george/george/george.go`,
- wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
- },
- {
- inputURI: `file:///C%3A/Go/src/bob%20george/george/george.go`,
- wantFile: `C:/Go/src/bob george/george/george.go`,
- wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
- },
- {
- inputURI: `file:///path/to/%25p%25ercent%25/per%25cent.go`,
- wantFile: `/path/to/%p%ercent%/per%cent.go`,
- wantURI: span.URI(`file:///path/to/%25p%25ercent%25/per%25cent.go`),
- },
- {
- inputURI: `file:///C%3A/`,
- wantFile: `C:/`,
- wantURI: span.URI(`file:///C:/`),
- },
- {
- inputURI: `file:///`,
- wantFile: `/`,
- wantURI: span.URI(`file:///`),
- },
- {
- inputURI: `file://wsl%24/Ubuntu/home/wdcui/repo/VMEnclaves/cvm-runtime`,
- wantFile: `/wsl$/Ubuntu/home/wdcui/repo/VMEnclaves/cvm-runtime`,
- wantURI: span.URI(`file:///wsl$/Ubuntu/home/wdcui/repo/VMEnclaves/cvm-runtime`),
- },
- } {
- got := span.URIFromURI(test.inputURI)
- if got != test.wantURI {
- t.Errorf("NewURI(%q): got %q, expected %q", test.inputURI, got, test.wantURI)
- }
- gotFilename := got.Filename()
- if gotFilename != test.wantFile {
- t.Errorf("Filename(%q): got %q, expected %q", got, gotFilename, test.wantFile)
- }
- }
-}
diff --git a/internal/span/uri_windows_test.go b/internal/span/uri_windows_test.go
deleted file mode 100644
index e50b58f1b..000000000
--- a/internal/span/uri_windows_test.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build windows
-// +build windows
-
-package span_test
-
-import (
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-// TestURI tests the conversion between URIs and filenames. The test cases
-// include Windows-style URIs and filepaths, but we avoid having OS-specific
-// tests by using only forward slashes, assuming that the standard library
-// functions filepath.ToSlash and filepath.FromSlash do not need testing.
-func TestURIFromPath(t *testing.T) {
- for _, test := range []struct {
- path, wantFile string
- wantURI span.URI
- }{
- {
- path: ``,
- wantFile: ``,
- wantURI: span.URI(""),
- },
- {
- path: `C:\Windows\System32`,
- wantFile: `C:\Windows\System32`,
- wantURI: span.URI("file:///C:/Windows/System32"),
- },
- {
- path: `C:\Go\src\bob.go`,
- wantFile: `C:\Go\src\bob.go`,
- wantURI: span.URI("file:///C:/Go/src/bob.go"),
- },
- {
- path: `c:\Go\src\bob.go`,
- wantFile: `C:\Go\src\bob.go`,
- wantURI: span.URI("file:///C:/Go/src/bob.go"),
- },
- {
- path: `\path\to\dir`,
- wantFile: `C:\path\to\dir`,
- wantURI: span.URI("file:///C:/path/to/dir"),
- },
- {
- path: `\a\b\c\src\bob.go`,
- wantFile: `C:\a\b\c\src\bob.go`,
- wantURI: span.URI("file:///C:/a/b/c/src/bob.go"),
- },
- {
- path: `c:\Go\src\bob george\george\george.go`,
- wantFile: `C:\Go\src\bob george\george\george.go`,
- wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
- },
- } {
- got := span.URIFromPath(test.path)
- if got != test.wantURI {
- t.Errorf("URIFromPath(%q): got %q, expected %q", test.path, got, test.wantURI)
- }
- gotFilename := got.Filename()
- if gotFilename != test.wantFile {
- t.Errorf("Filename(%q): got %q, expected %q", got, gotFilename, test.wantFile)
- }
- }
-}
-
-func TestURIFromURI(t *testing.T) {
- for _, test := range []struct {
- inputURI, wantFile string
- wantURI span.URI
- }{
- {
- inputURI: `file:///c:/Go/src/bob%20george/george/george.go`,
- wantFile: `C:\Go\src\bob george\george\george.go`,
- wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
- },
- {
- inputURI: `file:///C%3A/Go/src/bob%20george/george/george.go`,
- wantFile: `C:\Go\src\bob george\george\george.go`,
- wantURI: span.URI("file:///C:/Go/src/bob%20george/george/george.go"),
- },
- {
- inputURI: `file:///c:/path/to/%25p%25ercent%25/per%25cent.go`,
- wantFile: `C:\path\to\%p%ercent%\per%cent.go`,
- wantURI: span.URI(`file:///C:/path/to/%25p%25ercent%25/per%25cent.go`),
- },
- {
- inputURI: `file:///C%3A/`,
- wantFile: `C:\`,
- wantURI: span.URI(`file:///C:/`),
- },
- {
- inputURI: `file:///`,
- wantFile: `\`,
- wantURI: span.URI(`file:///`),
- },
- } {
- got := span.URIFromURI(test.inputURI)
- if got != test.wantURI {
- t.Errorf("NewURI(%q): got %q, expected %q", test.inputURI, got, test.wantURI)
- }
- gotFilename := got.Filename()
- if gotFilename != test.wantFile {
- t.Errorf("Filename(%q): got %q, expected %q", got, gotFilename, test.wantFile)
- }
- }
-}
diff --git a/internal/span/utf16.go b/internal/span/utf16.go
deleted file mode 100644
index dce2e8e87..000000000
--- a/internal/span/utf16.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package span
-
-import (
- "fmt"
- "unicode/utf8"
-)
-
-// ToUTF16Column calculates the utf16 column expressed by the point given the
-// supplied file contents.
-// This is used to convert from the native (always in bytes) column
-// representation and the utf16 counts used by some editors.
-func ToUTF16Column(p Point, content []byte) (int, error) {
- if !p.HasPosition() {
- return -1, fmt.Errorf("ToUTF16Column: point is missing position")
- }
- if !p.HasOffset() {
- return -1, fmt.Errorf("ToUTF16Column: point is missing offset")
- }
- offset := p.Offset() // 0-based
- colZero := p.Column() - 1 // 0-based
- if colZero == 0 {
- // 0-based column 0, so it must be chr 1
- return 1, nil
- } else if colZero < 0 {
- return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero)
- }
- // work out the offset at the start of the line using the column
- lineOffset := offset - colZero
- if lineOffset < 0 || offset > len(content) {
- return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content))
- }
- // Use the offset to pick out the line start.
- // This cannot panic: offset > len(content) and lineOffset < offset.
- start := content[lineOffset:]
-
- // Now, truncate down to the supplied column.
- start = start[:colZero]
-
- cnt := 0
- for _, r := range string(start) {
- cnt++
- if r > 0xffff {
- cnt++
- }
- }
- return cnt + 1, nil // the +1 is for 1-based columns
-}
-
-// FromUTF16Column advances the point by the utf16 character offset given the
-// supplied line contents.
-// This is used to convert from the utf16 counts used by some editors to the
-// native (always in bytes) column representation.
-func FromUTF16Column(p Point, chr int, content []byte) (Point, error) {
- if !p.HasOffset() {
- return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset")
- }
- // if chr is 1 then no adjustment needed
- if chr <= 1 {
- return p, nil
- }
- if p.Offset() >= len(content) {
- return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content))
- }
- remains := content[p.Offset():]
- // scan forward the specified number of characters
- for count := 1; count < chr; count++ {
- if len(remains) <= 0 {
- return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content")
- }
- r, w := utf8.DecodeRune(remains)
- if r == '\n' {
- // Per the LSP spec:
- //
- // > If the character value is greater than the line length it
- // > defaults back to the line length.
- break
- }
- remains = remains[w:]
- if r >= 0x10000 {
- // a two point rune
- count++
- // if we finished in a two point rune, do not advance past the first
- if count >= chr {
- break
- }
- }
- p.v.Column += w
- p.v.Offset += w
- }
- return p, nil
-}
diff --git a/internal/span/utf16_test.go b/internal/span/utf16_test.go
deleted file mode 100644
index 1eae7975b..000000000
--- a/internal/span/utf16_test.go
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package span_test
-
-import (
- "strings"
- "testing"
-
- "golang.org/x/tools/internal/span"
-)
-
-// The funny character below is 4 bytes long in UTF-8; two UTF-16 code points
-var funnyString = []byte("𐐀23\n𐐀45")
-
-var toUTF16Tests = []struct {
- scenario string
- input []byte
- line int // 1-indexed count
- col int // 1-indexed byte position in line
- offset int // 0-indexed byte offset into input
- resUTF16col int // 1-indexed UTF-16 col number
- pre string // everything before the cursor on the line
- post string // everything from the cursor onwards
- err string // expected error string in call to ToUTF16Column
- issue *bool
-}{
- {
- scenario: "cursor missing content",
- input: nil,
- err: "ToUTF16Column: point is missing position",
- },
- {
- scenario: "cursor missing position",
- input: funnyString,
- line: -1,
- col: -1,
- err: "ToUTF16Column: point is missing position",
- },
- {
- scenario: "cursor missing offset",
- input: funnyString,
- line: 1,
- col: 1,
- offset: -1,
- err: "ToUTF16Column: point is missing offset",
- },
- {
- scenario: "zero length input; cursor at first col, first line",
- input: []byte(""),
- line: 1,
- col: 1,
- offset: 0,
- resUTF16col: 1,
- },
- {
- scenario: "cursor before funny character; first line",
- input: funnyString,
- line: 1,
- col: 1,
- offset: 0,
- resUTF16col: 1,
- pre: "",
- post: "𐐀23",
- },
- {
- scenario: "cursor after funny character; first line",
- input: funnyString,
- line: 1,
- col: 5, // 4 + 1 (1-indexed)
- offset: 4,
- resUTF16col: 3, // 2 + 1 (1-indexed)
- pre: "𐐀",
- post: "23",
- },
- {
- scenario: "cursor after last character on first line",
- input: funnyString,
- line: 1,
- col: 7, // 4 + 1 + 1 + 1 (1-indexed)
- offset: 6, // 4 + 1 + 1
- resUTF16col: 5, // 2 + 1 + 1 + 1 (1-indexed)
- pre: "𐐀23",
- post: "",
- },
- {
- scenario: "cursor before funny character; second line",
- input: funnyString,
- line: 2,
- col: 1,
- offset: 7, // length of first line
- resUTF16col: 1,
- pre: "",
- post: "𐐀45",
- },
- {
- scenario: "cursor after funny character; second line",
- input: funnyString,
- line: 1,
- col: 5, // 4 + 1 (1-indexed)
- offset: 11, // 7 (length of first line) + 4
- resUTF16col: 3, // 2 + 1 (1-indexed)
- pre: "𐐀",
- post: "45",
- },
- {
- scenario: "cursor after last character on second line",
- input: funnyString,
- line: 2,
- col: 7, // 4 + 1 + 1 + 1 (1-indexed)
- offset: 13, // 7 (length of first line) + 4 + 1 + 1
- resUTF16col: 5, // 2 + 1 + 1 + 1 (1-indexed)
- pre: "𐐀45",
- post: "",
- },
- {
- scenario: "cursor beyond end of file",
- input: funnyString,
- line: 2,
- col: 8, // 4 + 1 + 1 + 1 + 1 (1-indexed)
- offset: 14, // 4 + 1 + 1 + 1
- err: "ToUTF16Column: offsets 7-14 outside file contents (13)",
- },
-}
-
-var fromUTF16Tests = []struct {
- scenario string
- input []byte
- line int // 1-indexed line number (isn't actually used)
- offset int // 0-indexed byte offset to beginning of line
- utf16col int // 1-indexed UTF-16 col number
- resCol int // 1-indexed byte position in line
- resOffset int // 0-indexed byte offset into input
- pre string // everything before the cursor on the line
- post string // everything from the cursor onwards
- err string // expected error string in call to ToUTF16Column
-}{
- {
- scenario: "zero length input; cursor at first col, first line",
- input: []byte(""),
- line: 1,
- offset: 0,
- utf16col: 1,
- resCol: 1,
- resOffset: 0,
- pre: "",
- post: "",
- },
- {
- scenario: "missing offset",
- input: funnyString,
- line: 1,
- offset: -1,
- err: "FromUTF16Column: point is missing offset",
- },
- {
- scenario: "cursor before funny character",
- input: funnyString,
- line: 1,
- utf16col: 1,
- resCol: 1,
- resOffset: 0,
- pre: "",
- post: "𐐀23",
- },
- {
- scenario: "cursor after funny character",
- input: funnyString,
- line: 1,
- utf16col: 3,
- resCol: 5,
- resOffset: 4,
- pre: "𐐀",
- post: "23",
- },
- {
- scenario: "cursor after last character on line",
- input: funnyString,
- line: 1,
- utf16col: 5,
- resCol: 7,
- resOffset: 6,
- pre: "𐐀23",
- post: "",
- },
- {
- scenario: "cursor beyond last character on line",
- input: funnyString,
- line: 1,
- offset: 0,
- utf16col: 6,
- resCol: 7,
- resOffset: 6,
- pre: "𐐀23",
- post: "",
- },
- {
- scenario: "cursor before funny character; second line",
- input: funnyString,
- line: 2,
- offset: 7, // length of first line
- utf16col: 1,
- resCol: 1,
- resOffset: 7,
- pre: "",
- post: "𐐀45",
- },
- {
- scenario: "cursor after funny character; second line",
- input: funnyString,
- line: 2,
- offset: 7, // length of first line
- utf16col: 3, // 2 + 1 (1-indexed)
- resCol: 5, // 4 + 1 (1-indexed)
- resOffset: 11, // 7 (length of first line) + 4
- pre: "𐐀",
- post: "45",
- },
- {
- scenario: "cursor after last character on second line",
- input: funnyString,
- line: 2,
- offset: 7, // length of first line
- utf16col: 5, // 2 + 1 + 1 + 1 (1-indexed)
- resCol: 7, // 4 + 1 + 1 + 1 (1-indexed)
- resOffset: 13, // 7 (length of first line) + 4 + 1 + 1
- pre: "𐐀45",
- post: "",
- },
- {
- scenario: "cursor beyond end of file",
- input: funnyString,
- line: 2,
- offset: 7,
- utf16col: 6, // 2 + 1 + 1 + 1 + 1(1-indexed)
- resCol: 8, // 4 + 1 + 1 + 1 + 1 (1-indexed)
- resOffset: 14, // 7 (length of first line) + 4 + 1 + 1 + 1
- err: "FromUTF16Column: chr goes beyond the content",
- },
- {
- scenario: "offset beyond end of file",
- input: funnyString,
- line: 2,
- offset: 14,
- utf16col: 2,
- err: "FromUTF16Column: offset (14) greater than length of content (13)",
- },
-}
-
-func TestToUTF16(t *testing.T) {
- for _, e := range toUTF16Tests {
- t.Run(e.scenario, func(t *testing.T) {
- if e.issue != nil && !*e.issue {
- t.Skip("expected to fail")
- }
- p := span.NewPoint(e.line, e.col, e.offset)
- got, err := span.ToUTF16Column(p, e.input)
- if err != nil {
- if err.Error() != e.err {
- t.Fatalf("expected error %v; got %v", e.err, err)
- }
- return
- }
- if e.err != "" {
- t.Fatalf("unexpected success; wanted %v", e.err)
- }
- if got != e.resUTF16col {
- t.Fatalf("expected result %v; got %v", e.resUTF16col, got)
- }
- pre, post := getPrePost(e.input, p.Offset())
- if string(pre) != e.pre {
- t.Fatalf("expected #%d pre %q; got %q", p.Offset(), e.pre, pre)
- }
- if string(post) != e.post {
- t.Fatalf("expected #%d, post %q; got %q", p.Offset(), e.post, post)
- }
- })
- }
-}
-
-func TestFromUTF16(t *testing.T) {
- for _, e := range fromUTF16Tests {
- t.Run(e.scenario, func(t *testing.T) {
- p := span.NewPoint(e.line, 1, e.offset)
- p, err := span.FromUTF16Column(p, e.utf16col, []byte(e.input))
- if err != nil {
- if err.Error() != e.err {
- t.Fatalf("expected error %v; got %v", e.err, err)
- }
- return
- }
- if e.err != "" {
- t.Fatalf("unexpected success; wanted %v", e.err)
- }
- if p.Column() != e.resCol {
- t.Fatalf("expected resulting col %v; got %v", e.resCol, p.Column())
- }
- if p.Offset() != e.resOffset {
- t.Fatalf("expected resulting offset %v; got %v", e.resOffset, p.Offset())
- }
- pre, post := getPrePost(e.input, p.Offset())
- if string(pre) != e.pre {
- t.Fatalf("expected #%d pre %q; got %q", p.Offset(), e.pre, pre)
- }
- if string(post) != e.post {
- t.Fatalf("expected #%d post %q; got %q", p.Offset(), e.post, post)
- }
- })
- }
-}
-
-func getPrePost(content []byte, offset int) (string, string) {
- pre, post := string(content)[:offset], string(content)[offset:]
- if i := strings.LastIndex(pre, "\n"); i >= 0 {
- pre = pre[i+1:]
- }
- if i := strings.IndexRune(post, '\n'); i >= 0 {
- post = post[:i]
- }
- return pre, post
-}
diff --git a/internal/stack/process.go b/internal/stack/process.go
index ac1936664..8812de952 100644
--- a/internal/stack/process.go
+++ b/internal/stack/process.go
@@ -96,7 +96,7 @@ func (s *Summary) addGoroutine(gr Goroutine) {
s.Calls[index].merge(gr)
}
-//TODO: do we want other grouping strategies?
+// TODO: do we want other grouping strategies?
func (c *Call) merge(gr Goroutine) {
for i := range c.Groups {
canditate := &c.Groups[i]
diff --git a/internal/stack/stacktest/stacktest.go b/internal/stack/stacktest/stacktest.go
index e23f03e03..d778d3c33 100644
--- a/internal/stack/stacktest/stacktest.go
+++ b/internal/stack/stacktest/stacktest.go
@@ -11,7 +11,7 @@ import (
"golang.org/x/tools/internal/stack"
)
-//this is only needed to support pre 1.14 when testing.TB did not have Cleanup
+// this is only needed to support pre 1.14 when testing.TB did not have Cleanup
type withCleanup interface {
Cleanup(func())
}
diff --git a/internal/testenv/exec.go b/internal/testenv/exec.go
new file mode 100644
index 000000000..f103ad9d8
--- /dev/null
+++ b/internal/testenv/exec.go
@@ -0,0 +1,149 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testenv
+
+import (
+ "context"
+ "os"
+ "os/exec"
+ "reflect"
+ "runtime"
+ "strconv"
+ "testing"
+ "time"
+)
+
+// HasExec reports whether the current system can start new processes
+// using os.StartProcess or (more commonly) exec.Command.
+func HasExec() bool {
+ switch runtime.GOOS {
+ case "js", "ios":
+ return false
+ }
+ return true
+}
+
+// NeedsExec checks that the current system can start new processes
+// using os.StartProcess or (more commonly) exec.Command.
+// If not, NeedsExec calls t.Skip with an explanation.
+func NeedsExec(t testing.TB) {
+ if !HasExec() {
+ t.Skipf("skipping test: cannot exec subprocess on %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+}
+
+// CommandContext is like exec.CommandContext, but:
+// - skips t if the platform does not support os/exec,
+// - if supported, sends SIGQUIT instead of SIGKILL in its Cancel function
+// - if the test has a deadline, adds a Context timeout and (if supported) WaitDelay
+// for an arbitrary grace period before the test's deadline expires,
+// - if Cmd has the Cancel field, fails the test if the command is canceled
+// due to the test's deadline, and
+// - if supported, sets a Cleanup function that verifies that the test did not
+// leak a subprocess.
+func CommandContext(t testing.TB, ctx context.Context, name string, args ...string) *exec.Cmd {
+ t.Helper()
+ NeedsExec(t)
+
+ var (
+ cancelCtx context.CancelFunc
+ gracePeriod time.Duration // unlimited unless the test has a deadline (to allow for interactive debugging)
+ )
+
+ if td, ok := Deadline(t); ok {
+ // Start with a minimum grace period, just long enough to consume the
+ // output of a reasonable program after it terminates.
+ gracePeriod = 100 * time.Millisecond
+ if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
+ scale, err := strconv.Atoi(s)
+ if err != nil {
+ t.Fatalf("invalid GO_TEST_TIMEOUT_SCALE: %v", err)
+ }
+ gracePeriod *= time.Duration(scale)
+ }
+
+ // If time allows, increase the termination grace period to 5% of the
+ // test's remaining time.
+ testTimeout := time.Until(td)
+ if gp := testTimeout / 20; gp > gracePeriod {
+ gracePeriod = gp
+ }
+
+ // When we run commands that execute subprocesses, we want to reserve two
+ // grace periods to clean up: one for the delay between the first
+ // termination signal being sent (via the Cancel callback when the Context
+ // expires) and the process being forcibly terminated (via the WaitDelay
+ // field), and a second one for the delay becween the process being
+ // terminated and and the test logging its output for debugging.
+ //
+ // (We want to ensure that the test process itself has enough time to
+ // log the output before it is also terminated.)
+ cmdTimeout := testTimeout - 2*gracePeriod
+
+ if cd, ok := ctx.Deadline(); !ok || time.Until(cd) > cmdTimeout {
+ // Either ctx doesn't have a deadline, or its deadline would expire
+ // after (or too close before) the test has already timed out.
+ // Add a shorter timeout so that the test will produce useful output.
+ ctx, cancelCtx = context.WithTimeout(ctx, cmdTimeout)
+ }
+ }
+
+ cmd := exec.CommandContext(ctx, name, args...)
+
+ // Use reflection to set the Cancel and WaitDelay fields, if present.
+ // TODO(bcmills): When we no longer support Go versions below 1.20,
+ // remove the use of reflect and assume that the fields are always present.
+ rc := reflect.ValueOf(cmd).Elem()
+
+ if rCancel := rc.FieldByName("Cancel"); rCancel.IsValid() {
+ rCancel.Set(reflect.ValueOf(func() error {
+ if cancelCtx != nil && ctx.Err() == context.DeadlineExceeded {
+ // The command timed out due to running too close to the test's deadline
+ // (because we specifically set a shorter Context deadline for that
+ // above). There is no way the test did that intentionally — it's too
+ // close to the wire! — so mark it as a test failure. That way, if the
+ // test expects the command to fail for some other reason, it doesn't
+ // have to distinguish between that reason and a timeout.
+ t.Errorf("test timed out while running command: %v", cmd)
+ } else {
+ // The command is being terminated due to ctx being canceled, but
+ // apparently not due to an explicit test deadline that we added.
+ // Log that information in case it is useful for diagnosing a failure,
+ // but don't actually fail the test because of it.
+ t.Logf("%v: terminating command: %v", ctx.Err(), cmd)
+ }
+ return cmd.Process.Signal(Sigquit)
+ }))
+ }
+
+ if rWaitDelay := rc.FieldByName("WaitDelay"); rWaitDelay.IsValid() {
+ rWaitDelay.Set(reflect.ValueOf(gracePeriod))
+ }
+
+ // t.Cleanup was added in Go 1.14; for earlier Go versions,
+ // we just let the Context leak.
+ type Cleanupper interface {
+ Cleanup(func())
+ }
+ if ct, ok := t.(Cleanupper); ok {
+ ct.Cleanup(func() {
+ if cancelCtx != nil {
+ cancelCtx()
+ }
+ if cmd.Process != nil && cmd.ProcessState == nil {
+ t.Errorf("command was started, but test did not wait for it to complete: %v", cmd)
+ }
+ })
+ }
+
+ return cmd
+}
+
+// Command is like exec.Command, but applies the same changes as
+// testenv.CommandContext (with a default Context).
+func Command(t testing.TB, name string, args ...string) *exec.Cmd {
+ t.Helper()
+ return CommandContext(t, context.Background(), name, args...)
+}
diff --git a/internal/testenv/testenv.go b/internal/testenv/testenv.go
index b38123264..8184db0ba 100644
--- a/internal/testenv/testenv.go
+++ b/internal/testenv/testenv.go
@@ -13,29 +13,32 @@ import (
"io/ioutil"
"os"
"runtime"
+ "runtime/debug"
"strings"
"sync"
+ "testing"
"time"
+ "golang.org/x/tools/internal/goroot"
+
exec "golang.org/x/sys/execabs"
)
-// Testing is an abstraction of a *testing.T.
-type Testing interface {
- Skipf(format string, args ...interface{})
- Fatalf(format string, args ...interface{})
-}
-
-type helperer interface {
- Helper()
-}
-
// packageMainIsDevel reports whether the module containing package main
// is a development version (if module information is available).
-//
-// Builds in GOPATH mode and builds that lack module information are assumed to
-// be development versions.
-var packageMainIsDevel = func() bool { return true }
+func packageMainIsDevel() bool {
+ info, ok := debug.ReadBuildInfo()
+ if !ok {
+ // Most test binaries currently lack build info, but this should become more
+ // permissive once https://golang.org/issue/33976 is fixed.
+ return true
+ }
+
+ // Note: info.Main.Version describes the version of the module containing
+ // package main, not the version of “the main module”.
+ // See https://golang.org/issue/33975.
+ return info.Main.Version == "(devel)"
+}
var checkGoGoroot struct {
once sync.Once
@@ -87,6 +90,23 @@ func hasTool(tool string) error {
GOROOT := strings.TrimSpace(string(out))
if GOROOT != runtime.GOROOT() {
checkGoGoroot.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT())
+ return
+ }
+
+ // Also ensure that that GOROOT includes a compiler: 'go' commands
+ // don't in general work without it, and some builders
+ // (such as android-amd64-emu) seem to lack it in the test environment.
+ cmd := exec.Command(tool, "tool", "-n", "compile")
+ stderr := new(bytes.Buffer)
+ stderr.Write([]byte("\n"))
+ cmd.Stderr = stderr
+ out, err = cmd.Output()
+ if err != nil {
+ checkGoGoroot.err = fmt.Errorf("%v: %v%s", cmd, err, stderr)
+ return
+ }
+ if _, err := os.Stat(string(bytes.TrimSpace(out))); err != nil {
+ checkGoGoroot.err = err
}
})
if checkGoGoroot.err != nil {
@@ -162,14 +182,13 @@ func allowMissingTool(tool string) bool {
// NeedsTool skips t if the named tool is not present in the path.
// As a special case, "cgo" means "go" is present and can compile cgo programs.
-func NeedsTool(t Testing, tool string) {
- if t, ok := t.(helperer); ok {
- t.Helper()
- }
+func NeedsTool(t testing.TB, tool string) {
err := hasTool(tool)
if err == nil {
return
}
+
+ t.Helper()
if allowMissingTool(tool) {
t.Skipf("skipping because %s tool not available: %v", tool, err)
} else {
@@ -179,10 +198,8 @@ func NeedsTool(t Testing, tool string) {
// NeedsGoPackages skips t if the go/packages driver (or 'go' tool) implied by
// the current process environment is not present in the path.
-func NeedsGoPackages(t Testing) {
- if t, ok := t.(helperer); ok {
- t.Helper()
- }
+func NeedsGoPackages(t testing.TB) {
+ t.Helper()
tool := os.Getenv("GOPACKAGESDRIVER")
switch tool {
@@ -202,10 +219,8 @@ func NeedsGoPackages(t Testing) {
// NeedsGoPackagesEnv skips t if the go/packages driver (or 'go' tool) implied
// by env is not present in the path.
-func NeedsGoPackagesEnv(t Testing, env []string) {
- if t, ok := t.(helperer); ok {
- t.Helper()
- }
+func NeedsGoPackagesEnv(t testing.TB, env []string) {
+ t.Helper()
for _, v := range env {
if strings.HasPrefix(v, "GOPACKAGESDRIVER=") {
@@ -222,24 +237,21 @@ func NeedsGoPackagesEnv(t Testing, env []string) {
NeedsGoPackages(t)
}
-// NeedsGoBuild skips t if the current system can't build programs with ``go build''
+// NeedsGoBuild skips t if the current system can't build programs with “go build”
// and then run them with os.StartProcess or exec.Command.
-// android, and darwin/arm systems don't have the userspace go build needs to run,
+// Android doesn't have the userspace go build needs to run,
// and js/wasm doesn't support running subprocesses.
-func NeedsGoBuild(t Testing) {
- if t, ok := t.(helperer); ok {
- t.Helper()
- }
+func NeedsGoBuild(t testing.TB) {
+ t.Helper()
+
+ // This logic was derived from internal/testing.HasGoBuild and
+ // may need to be updated as that function evolves.
NeedsTool(t, "go")
switch runtime.GOOS {
case "android", "js":
t.Skipf("skipping test: %v can't build and run Go binaries", runtime.GOOS)
- case "darwin":
- if strings.HasPrefix(runtime.GOARCH, "arm") {
- t.Skipf("skipping test: darwin/arm can't build and run Go binaries")
- }
}
}
@@ -289,29 +301,25 @@ func Go1Point() int {
// NeedsGo1Point skips t if the Go version used to run the test is older than
// 1.x.
-func NeedsGo1Point(t Testing, x int) {
- if t, ok := t.(helperer); ok {
- t.Helper()
- }
+func NeedsGo1Point(t testing.TB, x int) {
if Go1Point() < x {
+ t.Helper()
t.Skipf("running Go version %q is version 1.%d, older than required 1.%d", runtime.Version(), Go1Point(), x)
}
}
// SkipAfterGo1Point skips t if the Go version used to run the test is newer than
// 1.x.
-func SkipAfterGo1Point(t Testing, x int) {
- if t, ok := t.(helperer); ok {
- t.Helper()
- }
+func SkipAfterGo1Point(t testing.TB, x int) {
if Go1Point() > x {
+ t.Helper()
t.Skipf("running Go version %q is version 1.%d, newer than maximum 1.%d", runtime.Version(), Go1Point(), x)
}
}
// Deadline returns the deadline of t, if known,
// using the Deadline method added in Go 1.15.
-func Deadline(t Testing) (time.Time, bool) {
+func Deadline(t testing.TB) (time.Time, bool) {
td, ok := t.(interface {
Deadline() (time.Time, bool)
})
@@ -320,3 +328,65 @@ func Deadline(t Testing) (time.Time, bool) {
}
return td.Deadline()
}
+
+// WriteImportcfg writes an importcfg file used by the compiler or linker to
+// dstPath containing entries for the packages in std and cmd in addition
+// to the package to package file mappings in additionalPackageFiles.
+func WriteImportcfg(t testing.TB, dstPath string, additionalPackageFiles map[string]string) {
+ importcfg, err := goroot.Importcfg()
+ for k, v := range additionalPackageFiles {
+ importcfg += fmt.Sprintf("\npackagefile %s=%s", k, v)
+ }
+ if err != nil {
+ t.Fatalf("preparing the importcfg failed: %s", err)
+ }
+ ioutil.WriteFile(dstPath, []byte(importcfg), 0655)
+ if err != nil {
+ t.Fatalf("writing the importcfg failed: %s", err)
+ }
+}
+
+var (
+ gorootOnce sync.Once
+ gorootPath string
+ gorootErr error
+)
+
+func findGOROOT() (string, error) {
+ gorootOnce.Do(func() {
+ gorootPath = runtime.GOROOT()
+ if gorootPath != "" {
+ // If runtime.GOROOT() is non-empty, assume that it is valid. (It might
+ // not be: for example, the user may have explicitly set GOROOT
+ // to the wrong directory.)
+ return
+ }
+
+ cmd := exec.Command("go", "env", "GOROOT")
+ out, err := cmd.Output()
+ if err != nil {
+ gorootErr = fmt.Errorf("%v: %v", cmd, err)
+ }
+ gorootPath = strings.TrimSpace(string(out))
+ })
+
+ return gorootPath, gorootErr
+}
+
+// GOROOT reports the path to the directory containing the root of the Go
+// project source tree. This is normally equivalent to runtime.GOROOT, but
+// works even if the test binary was built with -trimpath.
+//
+// If GOROOT cannot be found, GOROOT skips t if t is non-nil,
+// or panics otherwise.
+func GOROOT(t testing.TB) string {
+ path, err := findGOROOT()
+ if err != nil {
+ if t == nil {
+ panic(err)
+ }
+ t.Helper()
+ t.Skip(err)
+ }
+ return path
+}
diff --git a/internal/testenv/testenv_112.go b/internal/testenv/testenv_112.go
deleted file mode 100644
index 4b6e57d68..000000000
--- a/internal/testenv/testenv_112.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.12
-// +build go1.12
-
-package testenv
-
-import "runtime/debug"
-
-func packageMainIsDevelModule() bool {
- info, ok := debug.ReadBuildInfo()
- if !ok {
- // Most test binaries currently lack build info, but this should become more
- // permissive once https://golang.org/issue/33976 is fixed.
- return true
- }
-
- // Note: info.Main.Version describes the version of the module containing
- // package main, not the version of “the main module”.
- // See https://golang.org/issue/33975.
- return info.Main.Version == "(devel)"
-}
-
-func init() {
- packageMainIsDevel = packageMainIsDevelModule
-}
diff --git a/internal/testenv/testenv_notunix.go b/internal/testenv/testenv_notunix.go
new file mode 100644
index 000000000..74de6f0a8
--- /dev/null
+++ b/internal/testenv/testenv_notunix.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows || plan9 || (js && wasm)
+// +build windows plan9 js,wasm
+
+package testenv
+
+import "os"
+
+// Sigquit is the signal to send to kill a hanging subprocess.
+// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
+var Sigquit = os.Kill
diff --git a/internal/testenv/testenv_unix.go b/internal/testenv/testenv_unix.go
new file mode 100644
index 000000000..bc6af1ff8
--- /dev/null
+++ b/internal/testenv/testenv_unix.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package testenv
+
+import "syscall"
+
+// Sigquit is the signal to send to kill a hanging subprocess.
+// Send SIGQUIT to get a stack trace.
+var Sigquit = syscall.SIGQUIT
diff --git a/internal/tokeninternal/tokeninternal.go b/internal/tokeninternal/tokeninternal.go
new file mode 100644
index 000000000..a3fb2d4f2
--- /dev/null
+++ b/internal/tokeninternal/tokeninternal.go
@@ -0,0 +1,59 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package tokeninternal provides access to some internal features of the token
+// package.
+package tokeninternal
+
+import (
+ "go/token"
+ "sync"
+ "unsafe"
+)
+
+// GetLines returns the table of line-start offsets from a token.File.
+func GetLines(file *token.File) []int {
+ // token.File has a Lines method on Go 1.21 and later.
+ if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
+ return file.Lines()
+ }
+
+ // This declaration must match that of token.File.
+ // This creates a risk of dependency skew.
+ // For now we check that the size of the two
+ // declarations is the same, on the (fragile) assumption
+ // that future changes would add fields.
+ type tokenFile119 struct {
+ _ string
+ _ int
+ _ int
+ mu sync.Mutex // we're not complete monsters
+ lines []int
+ _ []struct{}
+ }
+ type tokenFile118 struct {
+ _ *token.FileSet // deleted in go1.19
+ tokenFile119
+ }
+
+ type uP = unsafe.Pointer
+ switch unsafe.Sizeof(*file) {
+ case unsafe.Sizeof(tokenFile118{}):
+ var ptr *tokenFile118
+ *(*uP)(uP(&ptr)) = uP(file)
+ ptr.mu.Lock()
+ defer ptr.mu.Unlock()
+ return ptr.lines
+
+ case unsafe.Sizeof(tokenFile119{}):
+ var ptr *tokenFile119
+ *(*uP)(uP(&ptr)) = uP(file)
+ ptr.mu.Lock()
+ defer ptr.mu.Unlock()
+ return ptr.lines
+
+ default:
+ panic("unexpected token.File size")
+ }
+}
diff --git a/internal/tool/tool.go b/internal/tool/tool.go
index 526b6b7ae..ec3a6bb06 100644
--- a/internal/tool/tool.go
+++ b/internal/tool/tool.go
@@ -92,6 +92,10 @@ func Main(ctx context.Context, app Application, args []string) {
if err := Run(ctx, s, app, args); err != nil {
fmt.Fprintf(s.Output(), "%s: %v\n", app.Name(), err)
if _, printHelp := err.(commandLineError); printHelp {
+ // TODO(adonovan): refine this. It causes
+ // any command-line error to result in the full
+ // usage message, which typically obscures
+ // the actual error.
s.Usage()
}
os.Exit(2)
diff --git a/internal/typeparams/common.go b/internal/typeparams/common.go
index ab6b30b83..cfba8189f 100644
--- a/internal/typeparams/common.go
+++ b/internal/typeparams/common.go
@@ -16,11 +16,10 @@
// Additionally, this package contains common utilities for working with the
// new generic constructs, to supplement the standard library APIs. Notably,
// the StructuralTerms API computes a minimal representation of the structural
-// restrictions on a type parameter. In the future, this API may be available
-// from go/types.
+// restrictions on a type parameter.
//
-// See the example/README.md for a more detailed guide on how to update tools
-// to support generics.
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
package typeparams
import (
@@ -88,7 +87,6 @@ func IsTypeParam(t types.Type) bool {
func OriginMethod(fn *types.Func) *types.Func {
recv := fn.Type().(*types.Signature).Recv()
if recv == nil {
-
return fn
}
base := recv.Type()
@@ -121,15 +119,15 @@ func OriginMethod(fn *types.Func) *types.Func {
//
// For example, consider the following type declarations:
//
-// type Interface[T any] interface {
-// Accept(T)
-// }
+// type Interface[T any] interface {
+// Accept(T)
+// }
//
-// type Container[T any] struct {
-// Element T
-// }
+// type Container[T any] struct {
+// Element T
+// }
//
-// func (c Container[T]) Accept(t T) { c.Element = t }
+// func (c Container[T]) Accept(t T) { c.Element = t }
//
// In this case, GenericAssignableTo reports that instantiations of Container
// are assignable to the corresponding instantiation of Interface.
diff --git a/internal/typeparams/copytermlist.go b/internal/typeparams/copytermlist.go
index b8f458ac3..5357f9d2f 100644
--- a/internal/typeparams/copytermlist.go
+++ b/internal/typeparams/copytermlist.go
@@ -42,7 +42,7 @@ func doCopy() error {
return err
}
file.Name.Name = "typeparams"
- file.Doc = &ast.CommentGroup{List: []*ast.Comment{&ast.Comment{Text: "DO NOT MODIFY"}}}
+ file.Doc = &ast.CommentGroup{List: []*ast.Comment{{Text: "DO NOT MODIFY"}}}
var needImport bool
selectorType := reflect.TypeOf((*ast.SelectorExpr)(nil))
astutil.Apply(file, func(c *astutil.Cursor) bool {
@@ -70,8 +70,8 @@ func doCopy() error {
}
needImport = true
c.Replace(&ast.SelectorExpr{
- X: ast.NewIdent("types"),
- Sel: ast.NewIdent(id.Name),
+ X: &ast.Ident{NamePos: id.NamePos, Name: "types"},
+ Sel: &ast.Ident{NamePos: id.NamePos, Name: id.Name, Obj: id.Obj},
})
}
return true
diff --git a/internal/typeparams/coretype.go b/internal/typeparams/coretype.go
new file mode 100644
index 000000000..993135ec9
--- /dev/null
+++ b/internal/typeparams/coretype.go
@@ -0,0 +1,122 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return U // for non-interface types,
+ }
+
+ terms, err := _NormalTerms(U)
+ if len(terms) == 0 || err != nil {
+ // len(terms) -> empty type set of interface.
+ // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+ return nil // no core type.
+ }
+
+ U = terms[0].Type().Underlying()
+ var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+ for identical = 1; identical < len(terms); identical++ {
+ if !types.Identical(U, terms[identical].Type().Underlying()) {
+ break
+ }
+ }
+
+ if identical == len(terms) {
+ // https://go.dev/ref/spec#Core_types
+ // "There is a single type U which is the underlying type of all types in the type set of T"
+ return U
+ }
+ ch, ok := U.(*types.Chan)
+ if !ok {
+ return nil // no core type as identical < len(terms) and U is not a channel.
+ }
+ // https://go.dev/ref/spec#Core_types
+ // "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+ // <-chan E depending on the direction of the directional channels present."
+ for chans := identical; chans < len(terms); chans++ {
+ curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+ if !ok {
+ return nil
+ }
+ if !types.Identical(ch.Elem(), curr.Elem()) {
+ return nil // channel elements are not identical.
+ }
+ if ch.Dir() == types.SendRecv {
+ // ch is bidirectional. We can safely always use curr's direction.
+ ch = curr
+ } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+ // ch and curr are not bidirectional and not the same direction.
+ return nil
+ }
+ }
+ return ch
+}
+
+// _NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// _NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, _NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, _NormalTerms returns ErrEmptyTypeSet.
+//
+// _NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func _NormalTerms(typ types.Type) ([]*Term, error) {
+ switch typ := typ.(type) {
+ case *TypeParam:
+ return StructuralTerms(typ)
+ case *Union:
+ return UnionTermSet(typ)
+ case *types.Interface:
+ return InterfaceTermSet(typ)
+ default:
+ return []*Term{NewTerm(false, typ)}, nil
+ }
+}
diff --git a/internal/typeparams/coretype_test.go b/internal/typeparams/coretype_test.go
new file mode 100644
index 000000000..288439952
--- /dev/null
+++ b/internal/typeparams/coretype_test.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+func TestCoreType(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("TestCoreType requires type parameters.")
+ }
+
+ const source = `
+ package P
+
+ type Named int
+
+ type A any
+ type B interface{~int}
+ type C interface{int}
+ type D interface{Named}
+ type E interface{~int|interface{Named}}
+ type F interface{~int|~float32}
+ type G interface{chan int|interface{chan int}}
+ type H interface{chan int|chan float32}
+ type I interface{chan<- int|chan int}
+ type J interface{chan int|chan<- int}
+ type K interface{<-chan int|chan int}
+ type L interface{chan int|<-chan int}
+ type M interface{chan int|chan Named}
+ type N interface{<-chan int|chan<- int}
+ type O interface{chan int|bool}
+ type P struct{ Named }
+ type Q interface{ Foo() }
+ type R interface{ Foo() ; Named }
+ type S interface{ Foo() ; ~int }
+
+ type T interface{chan int|interface{chan int}|<-chan int}
+`
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "hello.go", source, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf types.Config
+ pkg, err := conf.Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ expr string // type expression of Named type
+ want string // expected core type (or "<nil>" if none)
+ }{
+ {"Named", "int"}, // Underlying type is not interface.
+ {"A", "<nil>"}, // Interface has no terms.
+ {"B", "int"}, // Tilde term.
+ {"C", "int"}, // Non-tilde term.
+ {"D", "int"}, // Named term.
+ {"E", "int"}, // Identical underlying types.
+ {"F", "<nil>"}, // Differing underlying types.
+ {"G", "chan int"}, // Identical Element types.
+ {"H", "<nil>"}, // Element type int has differing underlying type to float32.
+ {"I", "chan<- int"}, // SendRecv followed by SendOnly
+ {"J", "chan<- int"}, // SendOnly followed by SendRecv
+ {"K", "<-chan int"}, // RecvOnly followed by SendRecv
+ {"L", "<-chan int"}, // SendRecv followed by RecvOnly
+ {"M", "<nil>"}, // Element type int is not *identical* to Named.
+ {"N", "<nil>"}, // Differing channel directions
+ {"O", "<nil>"}, // A channel followed by a non-channel.
+ {"P", "struct{P.Named}"}, // Embedded type.
+ {"Q", "<nil>"}, // interface type with no terms and functions
+ {"R", "int"}, // interface type with both terms and functions.
+ {"S", "int"}, // interface type with a tilde term
+ {"T", "<-chan int"}, // Prefix of 2 terms that are identical before switching to channel.
+ } {
+ // Eval() expr for its type.
+ tv, err := types.Eval(fset, pkg, 0, test.expr)
+ if err != nil {
+ t.Fatalf("Eval(%s) failed: %v", test.expr, err)
+ }
+
+ ct := typeparams.CoreType(tv.Type)
+ var got string
+ if ct == nil {
+ got = "<nil>"
+ } else {
+ got = ct.String()
+ }
+ if got != test.want {
+ t.Errorf("coreType(%s) = %v, want %v", test.expr, got, test.want)
+ }
+ }
+}
diff --git a/internal/typeparams/example/README.md b/internal/typeparams/example/README.md
deleted file mode 100644
index 9877735bf..000000000
--- a/internal/typeparams/example/README.md
+++ /dev/null
@@ -1,328 +0,0 @@
-<!-- Autogenerated by weave; DO NOT EDIT -->
-<!-- To regenerate the readme, run: -->
-<!-- go run golang.org/x/example/gotypes@latest generic-go-types.md -->
-
-# Updating tools to support type parameters.
-
-This guide is maintained by Rob Findley (`rfindley@google.com`).
-
-**status**: this document is currently a work-in-progress. See
-[golang/go#50447](https://go.dev/issues/50447) for more details.
-
-1. [Introduction](#introduction)
-1. [Summary of new language features and their APIs](#summary-of-new-language-features-and-their-apis)
-1. [Examples](#examples)
- 1. [Generic types](#generic-types)
- 1. [Constraint Interfaces](#constraint-interfaces)
- 1. [Instantiation](#instantiation)
-1. [Updating tools while building at older Go versions](#updating-tools-while-building-at-older-go-versions)
-1. [Further help](#further-help)
-
-# Introduction
-
-With Go 1.18, Go now supports generic programming via type parameters. This
-document is intended to serve as a guide for tool authors that want to update
-their tools to support the new language constructs introduced for generic Go.
-
-This guide assumes some knowledge of the language changes to support generics.
-See the following references for more information:
-
-- The [original proposal](https://go.dev/issue/43651) for type parameters.
-- The [addendum for type sets](https://go.dev/issue/45346).
-- The [latest language specfication](https://tip.golang.org/ref/spec) (still in-progress as of 2021-01-11).
-- The proposals for new APIs in
- [go/token and go/ast](https://go.dev/issue/47781), and in
- [go/types](https://go.dev/issue/47916).
-
-It also assumes existing knowledge of `go/ast` and `go/types`. If you're just
-getting started,
-[x/example/gotypes](https://github.com/golang/example/tree/master/gotypes) is
-a great introduction (and was the inspiration for this guide).
-
-# Summary of new language features and their APIs
-
-While generic Go programming is a large change to the language, at a high level
-it introduces only a few new concepts. Specifically, we can break down our
-discussion into the following three broad categories. In each category, the
-relevant new APIs are listed (some constructors and getters/setters may be
-elided where they are trivial).
-
-**Generic types**. Types and functions may be _generic_, meaning their
-declaration has a non-empty _type parameter list_: as in `type List[T any]
-...` or `func f[T1, T2 any]() { ... }`. Type parameter lists define placeholder
-types (_type parameters_), scoped to the declaration, which may be substituted
-by any type satisfying their corresponding _constraint interface_ to
-_instantiate_ a new type or function.
-
-Generic types may have methods, which declare `receiver type parameters` via
-their receiver type expression: `func (r T[P1, ..., PN]) method(...) (...)
-{...}`.
-
-_New APIs_:
- - The field `ast.TypeSpec.TypeParams` holds the type parameter list syntax for
- type declarations.
- - The field `ast.FuncType.TypeParams` holds the type parameter list syntax for
- function declarations.
- - The type `types.TypeParam` is a `types.Type` representing a type parameter.
- On this type, the `Constraint` and `SetConstraint` methods allow
- getting/setting the constraint, the `Index` method returns the index of the
- type parameter in the type parameter list that declares it, and the `Obj`
- method returns the object declared in the declaration scope for the type
- parameter (a `types.TypeName`).
- - The type `types.TypeParamList` holds a list of type parameters.
- - The method `types.Named.TypeParams` returns the type parameters for a type
- declaration.
- - The method `types.Named.SetTypeParams` sets type parameters on a defined
- type.
- - The function `types.NewSignatureType` creates a new (possibly generic)
- signature type.
- - The method `types.Signature.RecvTypeParams` returns the receiver type
- parameters for a method.
- - The method `types.Signature.TypeParams` returns the type parameters for
- a function.
-
-**Constraint Interfaces**: type parameter constraints are interfaces, expressed
-via an interface type expression. Interfaces that are only used in constraint
-position are permitted new embedded elements composed of tilde expressions
-(`~T`) and unions (`A | B | ~C`). The new builtin interface type `comparable`
-is implemented by types for which `==` and `!=` are valid. As a special case,
-the `interface` keyword may be omitted from constraint expressions if it may be
-implied (in which case we say the interface is _implicit_).
-
-_New APIs_:
- - The constant `token.TILDE` is used to represent tilde expressions as an
- `ast.UnaryExpr`.
- - Union expressions are represented as an `ast.BinaryExpr` using `|`. This
- means that `ast.BinaryExpr` may now be both a type and value expression.
- - The method `types.Interface.IsImplicit` reports whether the `interface`
- keyword was elided from this interface.
- - The method `types.Interface.MarkImplicit` marks an interface as being
- implicit.
- - The method `types.Interface.IsComparable` reports whether every type in an
- interface's type set is comparable.
- - The method `types.Interface.IsMethodSet` reports whether an interface is
- defined entirely by its methods (has no _specific types_).
- - The type `types.Union` is a type that represents an embedded union
- expression in an interface. May only appear as an embedded element in
- interfaces.
- - The type `types.Term` represents a (possibly tilde) term of a union.
-
-**Instantiation**: generic types and functions may be _instantiated_ to create
-non-generic types and functions by providing _type arguments_ (`var x T[int]`).
-Function type arguments may be _inferred_ via function arguments, or via
-type parameter constraints.
-
-_New APIs_:
- - The type `ast.IndexListExpr` holds index expressions with multiple indices,
- as occurs in instantiation expressions with multiple type arguments, or in
- receivers with multiple type parameters.
- - The function `types.Instantiate` instantiates a generic type with type arguments.
- - The type `types.Context` is an opaque instantiation context that may be
- shared to reduce duplicate instances.
- - The field `types.Config.Context` holds a shared `Context` to use for
- instantiation while type-checking.
- - The type `types.TypeList` holds a list of types.
- - The type `types.ArgumentError` holds an error associated with a specific
- argument index. Used to represent instantiation errors.
- - The field `types.Info.Instances` maps instantiated identifiers to information
- about the resulting type instance.
- - The type `types.Instance` holds information about a type or function
- instance.
- - The method `types.Named.TypeArgs` reports the type arguments used to
- instantiate a named type.
-
-# Examples
-
-The following examples demonstrate the new APIs above, and discuss their
-properties. All examples are runnable, contained in subdirectories of the
-directory holding this README.
-
-## Generic types
-
-### Type parameter lists
-
-Suppose we want to understand the generic library below, which defines a generic
-`Pair`, a constraint interface `Constraint`, and a generic function `MakePair`.
-
-```
-package main
-
-type Constraint interface {
- Value() interface{}
-}
-
-type Pair[L, R any] struct {
- left L
- right R
-}
-
-func MakePair[L, R Constraint](l L, r R) Pair[L, R] {
- return Pair[L, R]{l, r}
-}
-```
-
-We can use the new `TypeParams` fields in `ast.TypeSpec` and `ast.FuncType` to
-access the syntax of the type parameter list. From there, we can access type
-parameter types in at least three ways:
- - by looking up type parameter definitions in `types.Info`
- - by calling `TypeParams()` on `types.Named` or `types.Signature`
- - by looking up type parameter objects in the declaration scope. Note that
- there now may be a scope associated with an `ast.TypeSpec` node.
-
-```
-func PrintTypeParams(fset *token.FileSet, file *ast.File) error {
- conf := types.Config{Importer: importer.Default()}
- info := &types.Info{
- Scopes: make(map[ast.Node]*types.Scope),
- Defs: make(map[*ast.Ident]types.Object),
- }
- _, err := conf.Check("hello", fset, []*ast.File{file}, info)
- if err != nil {
- return err
- }
-
- // For convenience, we can use ast.Inspect to find the nodes we want to
- // investigate.
- ast.Inspect(file, func(n ast.Node) bool {
- var name *ast.Ident // the name of the generic object, or nil
- var tparamSyntax *ast.FieldList // the list of type parameter fields
- var tparamTypes *types.TypeParamList // the list of type parameter types
- var scopeNode ast.Node // the node associated with the declaration scope
-
- switch n := n.(type) {
- case *ast.TypeSpec:
- name = n.Name
- tparamSyntax = n.TypeParams
- tparamTypes = info.Defs[name].Type().(*types.Named).TypeParams()
- name = n.Name
- scopeNode = n
- case *ast.FuncDecl:
- name = n.Name
- tparamSyntax = n.Type.TypeParams
- tparamTypes = info.Defs[name].Type().(*types.Signature).TypeParams()
- scopeNode = n.Type
- }
-
- if name == nil {
- return true // not a generic object
- }
-
- // Option 1: find type parameters by looking at their declaring field list.
- if tparamSyntax != nil {
- fmt.Printf("%s has a type parameter field list with %d fields\n", name.Name, tparamSyntax.NumFields())
- for _, field := range tparamSyntax.List {
- for _, name := range field.Names {
- tparam := info.Defs[name]
- fmt.Printf(" field %s defines an object %q\n", name.Name, tparam)
- }
- }
- } else {
- fmt.Printf("%s does not have a type parameter list\n", name.Name)
- }
-
- // Option 2: find type parameters via the TypeParams() method on the
- // generic type.
- fmt.Printf("%s has %d type parameters:\n", name.Name, tparamTypes.Len())
- for i := 0; i < tparamTypes.Len(); i++ {
- tparam := tparamTypes.At(i)
- fmt.Printf(" %s has constraint %s\n", tparam, tparam.Constraint())
- }
-
- // Option 3: find type parameters by looking in the declaration scope.
- scope, ok := info.Scopes[scopeNode]
- if ok {
- fmt.Printf("%s has a scope with %d objects:\n", name.Name, scope.Len())
- for _, name := range scope.Names() {
- fmt.Printf(" %s is a %T\n", name, scope.Lookup(name))
- }
- } else {
- fmt.Printf("%s does not have a scope\n", name.Name)
- }
-
- return true
- })
- return nil
-}
-```
-
-This program produces the following output. Note that not every type spec has
-a scope.
-
-```
-> go run golang.org/x/tools/internal/typeparams/example/findtypeparams
-Constraint does not have a type parameter list
-Constraint has 0 type parameters:
-Constraint does not have a scope
-Pair has a type parameter field list with 2 fields
- field L defines an object "type parameter L any"
- field R defines an object "type parameter R any"
-Pair has 2 type parameters:
- L has constraint any
- R has constraint any
-Pair has a scope with 2 objects:
- L is a *types.TypeName
- R is a *types.TypeName
-MakePair has a type parameter field list with 2 fields
- field L defines an object "type parameter L hello.Constraint"
- field R defines an object "type parameter R hello.Constraint"
-MakePair has 2 type parameters:
- L has constraint hello.Constraint
- R has constraint hello.Constraint
-MakePair has a scope with 4 objects:
- L is a *types.TypeName
- R is a *types.TypeName
- l is a *types.Var
- r is a *types.Var
-```
-
-### Methods on generic types
-
-**TODO**
-
-## Constraint Interfaces
-
-### New interface elements
-
-**TODO**
-
-### Implicit interfaces
-
-**TODO**
-
-### Type sets
-
-**TODO**
-
-## Instantiation
-
-### Finding instantiated types
-
-**TODO**
-
-### Creating new instantiated types
-
-**TODO**
-
-### Using a shared context
-
-**TODO**
-
-# Updating tools while building at older Go versions
-
-In the examples above, we can see how a lot of the new APIs integrate with
-existing usage of `go/ast` or `go/types`. However, most tools still need to
-build at older Go versions, and handling the new language constructs in-line
-will break builds at older Go versions.
-
-For this purpose, the `x/exp/typeparams` package provides functions and types
-that proxy the new APIs (with stub implementations at older Go versions).
-**NOTE**: does not yet exist -- see
-[golang/go#50447](https://go.dev/issues/50447) for more information.
-
-# Further help
-
-If you're working on updating a tool to support generics, and need help, please
-feel free to reach out for help in any of the following ways:
- - Via the [golang-tools](https://groups.google.com/g/golang-tools) mailing list.
- - Directly to me via email (`rfindley@google.com`).
- - For bugs, you can [file an issue](https://github.com/golang/go/issues/new/choose).
diff --git a/internal/typeparams/example/findtypeparams/main.go b/internal/typeparams/example/findtypeparams/main.go
deleted file mode 100644
index 0fe801123..000000000
--- a/internal/typeparams/example/findtypeparams/main.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package main
-
-import (
- "fmt"
- "go/ast"
- "go/importer"
- "go/parser"
- "go/token"
- "go/types"
- "log"
-)
-
-const hello = `
-//!+input
-package main
-
-type Constraint interface {
- Value() interface{}
-}
-
-type Pair[L, R any] struct {
- left L
- right R
-}
-
-func MakePair[L, R Constraint](l L, r R) Pair[L, R] {
- return Pair[L, R]{l, r}
-}
-//!-input
-`
-
-//!+print
-func PrintTypeParams(fset *token.FileSet, file *ast.File) error {
- conf := types.Config{Importer: importer.Default()}
- info := &types.Info{
- Scopes: make(map[ast.Node]*types.Scope),
- Defs: make(map[*ast.Ident]types.Object),
- }
- _, err := conf.Check("hello", fset, []*ast.File{file}, info)
- if err != nil {
- return err
- }
-
- // For convenience, we can use ast.Inspect to find the nodes we want to
- // investigate.
- ast.Inspect(file, func(n ast.Node) bool {
- var name *ast.Ident // the name of the generic object, or nil
- var tparamSyntax *ast.FieldList // the list of type parameter fields
- var tparamTypes *types.TypeParamList // the list of type parameter types
- var scopeNode ast.Node // the node associated with the declaration scope
-
- switch n := n.(type) {
- case *ast.TypeSpec:
- name = n.Name
- tparamSyntax = n.TypeParams
- tparamTypes = info.Defs[name].Type().(*types.Named).TypeParams()
- name = n.Name
- scopeNode = n
- case *ast.FuncDecl:
- name = n.Name
- tparamSyntax = n.Type.TypeParams
- tparamTypes = info.Defs[name].Type().(*types.Signature).TypeParams()
- scopeNode = n.Type
- }
-
- if name == nil {
- return true // not a generic object
- }
-
- // Option 1: find type parameters by looking at their declaring field list.
- if tparamSyntax != nil {
- fmt.Printf("%s has a type parameter field list with %d fields\n", name.Name, tparamSyntax.NumFields())
- for _, field := range tparamSyntax.List {
- for _, name := range field.Names {
- tparam := info.Defs[name]
- fmt.Printf(" field %s defines an object %q\n", name.Name, tparam)
- }
- }
- } else {
- fmt.Printf("%s does not have a type parameter list\n", name.Name)
- }
-
- // Option 2: find type parameters via the TypeParams() method on the
- // generic type.
- fmt.Printf("%s has %d type parameters:\n", name.Name, tparamTypes.Len())
- for i := 0; i < tparamTypes.Len(); i++ {
- tparam := tparamTypes.At(i)
- fmt.Printf(" %s has constraint %s\n", tparam, tparam.Constraint())
- }
-
- // Option 3: find type parameters by looking in the declaration scope.
- scope, ok := info.Scopes[scopeNode]
- if ok {
- fmt.Printf("%s has a scope with %d objects:\n", name.Name, scope.Len())
- for _, name := range scope.Names() {
- fmt.Printf(" %s is a %T\n", name, scope.Lookup(name))
- }
- } else {
- fmt.Printf("%s does not have a scope\n", name.Name)
- }
-
- return true
- })
- return nil
-}
-
-//!-print
-
-/*
-//!+output
-> go run golang.org/x/tools/internal/typeparams/example/findtypeparams
-Constraint does not have a type parameter list
-Constraint has 0 type parameters:
-Constraint does not have a scope
-Pair has a type parameter field list with 2 fields
- field L defines an object "type parameter L any"
- field R defines an object "type parameter R any"
-Pair has 2 type parameters:
- L has constraint any
- R has constraint any
-Pair has a scope with 2 objects:
- L is a *types.TypeName
- R is a *types.TypeName
-MakePair has a type parameter field list with 2 fields
- field L defines an object "type parameter L hello.Constraint"
- field R defines an object "type parameter R hello.Constraint"
-MakePair has 2 type parameters:
- L has constraint hello.Constraint
- R has constraint hello.Constraint
-MakePair has a scope with 4 objects:
- L is a *types.TypeName
- R is a *types.TypeName
- l is a *types.Var
- r is a *types.Var
-//!-output
-*/
-
-func main() {
- // Parse one file.
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, "hello.go", hello, 0)
- if err != nil {
- log.Fatal(err) // parse error
- }
- if err := PrintTypeParams(fset, f); err != nil {
- log.Fatal(err) // type error
- }
-}
diff --git a/internal/typeparams/example/generic-go-types.md b/internal/typeparams/example/generic-go-types.md
deleted file mode 100644
index 8d2f6ffa9..000000000
--- a/internal/typeparams/example/generic-go-types.md
+++ /dev/null
@@ -1,206 +0,0 @@
-<!-- To regenerate the readme, run: -->
-<!-- go run golang.org/x/example/gotypes@latest generic-go-types.md -->
-
-# Updating tools to support type parameters.
-
-This guide is maintained by Rob Findley (`rfindley@google.com`).
-
-**status**: this document is currently a work-in-progress. See
-[golang/go#50447](https://go.dev/issues/50447) for more details.
-
-%toc
-
-# Introduction
-
-With Go 1.18, Go now supports generic programming via type parameters. This
-document is intended to serve as a guide for tool authors that want to update
-their tools to support the new language constructs introduced for generic Go.
-
-This guide assumes some knowledge of the language changes to support generics.
-See the following references for more information:
-
-- The [original proposal](https://go.dev/issue/43651) for type parameters.
-- The [addendum for type sets](https://go.dev/issue/45346).
-- The [latest language specfication](https://tip.golang.org/ref/spec) (still in-progress as of 2021-01-11).
-- The proposals for new APIs in
- [go/token and go/ast](https://go.dev/issue/47781), and in
- [go/types](https://go.dev/issue/47916).
-
-It also assumes existing knowledge of `go/ast` and `go/types`. If you're just
-getting started,
-[x/example/gotypes](https://github.com/golang/example/tree/master/gotypes) is
-a great introduction (and was the inspiration for this guide).
-
-# Summary of new language features and their APIs
-
-While generic Go programming is a large change to the language, at a high level
-it introduces only a few new concepts. Specifically, we can break down our
-discussion into the following three broad categories. In each category, the
-relevant new APIs are listed (some constructors and getters/setters may be
-elided where they are trivial).
-
-**Generic types**. Types and functions may be _generic_, meaning their
-declaration has a non-empty _type parameter list_: as in `type List[T any]
-...` or `func f[T1, T2 any]() { ... }`. Type parameter lists define placeholder
-types (_type parameters_), scoped to the declaration, which may be substituted
-by any type satisfying their corresponding _constraint interface_ to
-_instantiate_ a new type or function.
-
-Generic types may have methods, which declare `receiver type parameters` via
-their receiver type expression: `func (r T[P1, ..., PN]) method(...) (...)
-{...}`.
-
-_New APIs_:
- - The field `ast.TypeSpec.TypeParams` holds the type parameter list syntax for
- type declarations.
- - The field `ast.FuncType.TypeParams` holds the type parameter list syntax for
- function declarations.
- - The type `types.TypeParam` is a `types.Type` representing a type parameter.
- On this type, the `Constraint` and `SetConstraint` methods allow
- getting/setting the constraint, the `Index` method returns the index of the
- type parameter in the type parameter list that declares it, and the `Obj`
- method returns the object declared in the declaration scope for the type
- parameter (a `types.TypeName`).
- - The type `types.TypeParamList` holds a list of type parameters.
- - The method `types.Named.TypeParams` returns the type parameters for a type
- declaration.
- - The method `types.Named.SetTypeParams` sets type parameters on a defined
- type.
- - The function `types.NewSignatureType` creates a new (possibly generic)
- signature type.
- - The method `types.Signature.RecvTypeParams` returns the receiver type
- parameters for a method.
- - The method `types.Signature.TypeParams` returns the type parameters for
- a function.
-
-**Constraint Interfaces**: type parameter constraints are interfaces, expressed
-via an interface type expression. Interfaces that are only used in constraint
-position are permitted new embedded elements composed of tilde expressions
-(`~T`) and unions (`A | B | ~C`). The new builtin interface type `comparable`
-is implemented by types for which `==` and `!=` are valid. As a special case,
-the `interface` keyword may be omitted from constraint expressions if it may be
-implied (in which case we say the interface is _implicit_).
-
-_New APIs_:
- - The constant `token.TILDE` is used to represent tilde expressions as an
- `ast.UnaryExpr`.
- - Union expressions are represented as an `ast.BinaryExpr` using `|`. This
- means that `ast.BinaryExpr` may now be both a type and value expression.
- - The method `types.Interface.IsImplicit` reports whether the `interface`
- keyword was elided from this interface.
- - The method `types.Interface.MarkImplicit` marks an interface as being
- implicit.
- - The method `types.Interface.IsComparable` reports whether every type in an
- interface's type set is comparable.
- - The method `types.Interface.IsMethodSet` reports whether an interface is
- defined entirely by its methods (has no _specific types_).
- - The type `types.Union` is a type that represents an embedded union
- expression in an interface. May only appear as an embedded element in
- interfaces.
- - The type `types.Term` represents a (possibly tilde) term of a union.
-
-**Instantiation**: generic types and functions may be _instantiated_ to create
-non-generic types and functions by providing _type arguments_ (`var x T[int]`).
-Function type arguments may be _inferred_ via function arguments, or via
-type parameter constraints.
-
-_New APIs_:
- - The type `ast.IndexListExpr` holds index expressions with multiple indices,
- as occurs in instantiation expressions with multiple type arguments, or in
- receivers with multiple type parameters.
- - The function `types.Instantiate` instantiates a generic type with type arguments.
- - The type `types.Context` is an opaque instantiation context that may be
- shared to reduce duplicate instances.
- - The field `types.Config.Context` holds a shared `Context` to use for
- instantiation while type-checking.
- - The type `types.TypeList` holds a list of types.
- - The type `types.ArgumentError` holds an error associated with a specific
- argument index. Used to represent instantiation errors.
- - The field `types.Info.Instances` maps instantiated identifiers to information
- about the resulting type instance.
- - The type `types.Instance` holds information about a type or function
- instance.
- - The method `types.Named.TypeArgs` reports the type arguments used to
- instantiate a named type.
-
-# Examples
-
-The following examples demonstrate the new APIs above, and discuss their
-properties. All examples are runnable, contained in subdirectories of the
-directory holding this README.
-
-## Generic types
-
-### Type parameter lists
-
-Suppose we want to understand the generic library below, which defines a generic
-`Pair`, a constraint interface `Constraint`, and a generic function `MakePair`.
-
-%include findtypeparams/main.go input -
-
-We can use the new `TypeParams` fields in `ast.TypeSpec` and `ast.FuncType` to
-access the syntax of the type parameter list. From there, we can access type
-parameter types in at least three ways:
- - by looking up type parameter definitions in `types.Info`
- - by calling `TypeParams()` on `types.Named` or `types.Signature`
- - by looking up type parameter objects in the declaration scope. Note that
- there now may be a scope associated with an `ast.TypeSpec` node.
-
-%include findtypeparams/main.go print -
-
-This program produces the following output. Note that not every type spec has
-a scope.
-
-%include findtypeparams/main.go output -
-
-### Methods on generic types
-
-**TODO**
-
-## Constraint Interfaces
-
-### New interface elements
-
-**TODO**
-
-### Implicit interfaces
-
-**TODO**
-
-### Type sets
-
-**TODO**
-
-## Instantiation
-
-### Finding instantiated types
-
-**TODO**
-
-### Creating new instantiated types
-
-**TODO**
-
-### Using a shared context
-
-**TODO**
-
-# Updating tools while building at older Go versions
-
-In the examples above, we can see how a lot of the new APIs integrate with
-existing usage of `go/ast` or `go/types`. However, most tools still need to
-build at older Go versions, and handling the new language constructs in-line
-will break builds at older Go versions.
-
-For this purpose, the `x/exp/typeparams` package provides functions and types
-that proxy the new APIs (with stub implementations at older Go versions).
-**NOTE**: does not yet exist -- see
-[golang/go#50447](https://go.dev/issues/50447) for more information.
-
-# Further help
-
-If you're working on updating a tool to support generics, and need help, please
-feel free to reach out for help in any of the following ways:
- - Via the [golang-tools](https://groups.google.com/g/golang-tools) mailing list.
- - Directly to me via email (`rfindley@google.com`).
- - For bugs, you can [file an issue](https://github.com/golang/go/issues/new/choose).
diff --git a/internal/typeparams/normalize.go b/internal/typeparams/normalize.go
index 090f142a5..9c631b651 100644
--- a/internal/typeparams/normalize.go
+++ b/internal/typeparams/normalize.go
@@ -24,20 +24,22 @@ var ErrEmptyTypeSet = errors.New("empty type set")
// Structural type restrictions of a type parameter are created via
// non-interface types embedded in its constraint interface (directly, or via a
// chain of interface embeddings). For example, in the declaration
-// type T[P interface{~int; m()}] int
+//
+// type T[P interface{~int; m()}] int
+//
// the structural restriction of the type parameter P is ~int.
//
// With interface embedding and unions, the specification of structural type
// restrictions may be arbitrarily complex. For example, consider the
// following:
//
-// type A interface{ ~string|~[]byte }
+// type A interface{ ~string|~[]byte }
//
-// type B interface{ int|string }
+// type B interface{ int|string }
//
-// type C interface { ~string|~int }
+// type C interface { ~string|~int }
//
-// type T[P interface{ A|B; C }] int
+// type T[P interface{ A|B; C }] int
//
// In this example, the structural type restriction of P is ~string|int: A|B
// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
diff --git a/internal/typeparams/normalize_test.go b/internal/typeparams/normalize_test.go
index 5969eee3d..769433d70 100644
--- a/internal/typeparams/normalize_test.go
+++ b/internal/typeparams/normalize_test.go
@@ -9,6 +9,7 @@ import (
"go/parser"
"go/token"
"go/types"
+ "regexp"
"strings"
"testing"
@@ -38,7 +39,7 @@ func TestStructuralTerms(t *testing.T) {
{"package emptyintersection; type T[P interface{ ~int; string }] int", "", "empty type set"},
{"package embedded0; type T[P interface{ I }] int; type I interface { int }", "int", ""},
- {"package embedded1; type T[P interface{ I | string }] int; type I interface{ int | ~string }", "int|~string", ""},
+ {"package embedded1; type T[P interface{ I | string }] int; type I interface{ int | ~string }", "int ?\\| ?~string", ""},
{"package embedded2; type T[P interface{ I; string }] int; type I interface{ int | ~string }", "string", ""},
{"package named; type T[P C] int; type C interface{ ~int|int }", "~int", ""},
@@ -52,7 +53,7 @@ type B interface{ int|string }
type C interface { ~string|~int }
type T[P interface{ A|B; C }] int
-`, "~string|int", ""},
+`, "~string ?\\| ?int", ""},
}
for _, test := range tests {
@@ -96,7 +97,8 @@ type T[P interface{ A|B; C }] int
qf := types.RelativeTo(pkg)
got = types.TypeString(NewUnion(terms), qf)
}
- if got != test.want {
+ want := regexp.MustCompile(test.want)
+ if !want.MatchString(got) {
t.Errorf("StructuralTerms(%s) = %q, want %q", T, got, test.want)
}
})
diff --git a/internal/typeparams/termlist.go b/internal/typeparams/termlist.go
index 10857d504..933106a23 100644
--- a/internal/typeparams/termlist.go
+++ b/internal/typeparams/termlist.go
@@ -97,15 +97,6 @@ func (xl termlist) norm() termlist {
return rl
}
-// If the type set represented by xl is specified by a single (non-𝓤) term,
-// structuralType returns that type. Otherwise it returns nil.
-func (xl termlist) structuralType() types.Type {
- if nl := xl.norm(); len(nl) == 1 {
- return nl[0].typ // if nl.isAll() then typ is nil, which is ok
- }
- return nil
-}
-
// union returns the union xl ∪ yl.
func (xl termlist) union(yl termlist) termlist {
return append(xl, yl...).norm()
diff --git a/internal/typesinternal/errorcode.go b/internal/typesinternal/errorcode.go
index d38ee3c27..07484073a 100644
--- a/internal/typesinternal/errorcode.go
+++ b/internal/typesinternal/errorcode.go
@@ -31,6 +31,12 @@ type ErrorCode int
// problem with types.
const (
+ // InvalidSyntaxTree occurs if an invalid syntax tree is provided
+ // to the type checker. It should never happen.
+ InvalidSyntaxTree ErrorCode = -1
+)
+
+const (
_ ErrorCode = iota
// Test is reserved for errors that only apply while in self-test mode.
@@ -153,12 +159,12 @@ const (
/* decls > var (+ other variable assignment codes) */
- // UntypedNil occurs when the predeclared (untyped) value nil is used to
+ // UntypedNilUse occurs when the predeclared (untyped) value nil is used to
// initialize a variable declared without an explicit type.
//
// Example:
// var x = nil
- UntypedNil
+ UntypedNilUse
// WrongAssignCount occurs when the number of values on the right-hand side
// of an assignment or or initialization expression does not match the number
@@ -1523,4 +1529,32 @@ const (
// Example:
// type T[P any] struct{ *P }
MisplacedTypeParam
+
+ // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with
+ // an argument that is not of slice type. It also occurs if it is used
+ // in a package compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.SliceData(x)
+ InvalidUnsafeSliceData
+
+ // InvalidUnsafeString occurs when unsafe.String is called with
+ // a length argument that is not of integer type, negative, or
+ // out of bounds. It also occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var b [10]byte
+ // var _ = unsafe.String(&b[0], -1)
+ InvalidUnsafeString
+
+ // InvalidUnsafeStringData occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ _ // not used anymore
+
)
diff --git a/internal/typesinternal/errorcode_string.go b/internal/typesinternal/errorcode_string.go
index de90e9515..15ecf7c5d 100644
--- a/internal/typesinternal/errorcode_string.go
+++ b/internal/typesinternal/errorcode_string.go
@@ -8,6 +8,7 @@ func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
+ _ = x[InvalidSyntaxTree - -1]
_ = x[Test-1]
_ = x[BlankPkgName-2]
_ = x[MismatchedPkgName-3]
@@ -23,7 +24,7 @@ func _() {
_ = x[InvalidConstInit-13]
_ = x[InvalidConstVal-14]
_ = x[InvalidConstType-15]
- _ = x[UntypedNil-16]
+ _ = x[UntypedNilUse-16]
_ = x[WrongAssignCount-17]
_ = x[UnassignableOperand-18]
_ = x[NoNewVar-19]
@@ -152,16 +153,27 @@ func _() {
_ = x[MisplacedConstraintIface-142]
_ = x[InvalidMethodTypeParams-143]
_ = x[MisplacedTypeParam-144]
+ _ = x[InvalidUnsafeSliceData-145]
+ _ = x[InvalidUnsafeString-146]
}
-const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParam"
+const (
+ _ErrorCode_name_0 = "InvalidSyntaxTree"
+ _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString"
+)
-var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903, 1910, 1922, 1938, 1956, 1974, 1989, 2006, 2025, 2039, 2059, 2071, 2095, 2118, 2136}
+var (
+ _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180}
+)
func (i ErrorCode) String() string {
- i -= 1
- if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) {
- return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ switch {
+ case i == -1:
+ return _ErrorCode_name_0
+ case 1 <= i && i <= 146:
+ i -= 1
+ return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
+ default:
+ return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")"
}
- return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]]
}
diff --git a/internal/typesinternal/errorcode_test.go b/internal/typesinternal/errorcode_test.go
new file mode 100644
index 000000000..63d13f19e
--- /dev/null
+++ b/internal/typesinternal/errorcode_test.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+)
+
+func TestErrorCodes(t *testing.T) {
+ t.Skip("unskip this test to verify the correctness of errorcode.go for the current Go version")
+
+ // For older go versions, this file was src/go/types/errorcodes.go.
+ stdPath := filepath.Join(runtime.GOROOT(), "src", "internal", "types", "errors", "codes.go")
+ stdCodes, err := loadCodes(stdPath)
+ if err != nil {
+ t.Fatalf("loading std codes: %v", err)
+ }
+
+ localPath := "errorcode.go"
+ localCodes, err := loadCodes(localPath)
+ if err != nil {
+ t.Fatalf("loading local codes: %v", err)
+ }
+
+ // Verify that all std codes are present, with the correct value.
+ type codeVal struct {
+ Name string
+ Value int64
+ }
+ var byValue []codeVal
+ for k, v := range stdCodes {
+ byValue = append(byValue, codeVal{k, v})
+ }
+ sort.Slice(byValue, func(i, j int) bool {
+ return byValue[i].Value < byValue[j].Value
+ })
+
+ localLookup := make(map[int64]string)
+ for k, v := range localCodes {
+ if _, ok := localLookup[v]; ok {
+ t.Errorf("duplicate error code value %d", v)
+ }
+ localLookup[v] = k
+ }
+
+ for _, std := range byValue {
+ local, ok := localCodes[std.Name]
+ if !ok {
+ if v, ok := localLookup[std.Value]; ok {
+ t.Errorf("Missing code for %s (code %d is %s)", std.Name, std.Value, v)
+ } else {
+ t.Errorf("Missing code for %s", std.Name)
+ }
+ }
+ if local != std.Value {
+ t.Errorf("Mismatching value for %s: got %d, but stdlib has %d", std.Name, local, std.Value)
+ }
+ }
+}
+
+// loadCodes loads all constant values found in filepath.
+//
+// The given file must type-check cleanly as a standalone file.
+func loadCodes(filepath string) (map[string]int64, error) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, filepath, nil, 0)
+ if err != nil {
+ return nil, err
+ }
+ var config types.Config
+ pkg, err := config.Check("p", fset, []*ast.File{f}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ codes := make(map[string]int64)
+ for _, name := range pkg.Scope().Names() {
+ obj := pkg.Scope().Lookup(name)
+ c, ok := obj.(*types.Const)
+ if !ok {
+ continue
+ }
+ name := strings.TrimPrefix(name, "_") // compatibility with earlier go versions
+ codes[name], ok = constant.Int64Val(c.Val())
+ if !ok {
+ return nil, fmt.Errorf("non integral value %v for %s", c.Val(), name)
+ }
+ }
+ if len(codes) < 100 {
+ return nil, fmt.Errorf("sanity check: got %d codes but expected at least 100", len(codes))
+ }
+ return codes, nil
+}
diff --git a/internal/typesinternal/types.go b/internal/typesinternal/types.go
index ce7d4351b..3c53fbc63 100644
--- a/internal/typesinternal/types.go
+++ b/internal/typesinternal/types.go
@@ -11,6 +11,8 @@ import (
"go/types"
"reflect"
"unsafe"
+
+ "golang.org/x/tools/go/types/objectpath"
)
func SetUsesCgo(conf *types.Config) bool {
@@ -50,3 +52,10 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
}
var SetGoVersion = func(conf *types.Config, version string) bool { return false }
+
+// NewObjectpathEncoder returns a function closure equivalent to
+// objectpath.For but amortized for multiple (sequential) calls.
+// It is a temporary workaround, pending the approval of proposal 58668.
+//
+//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor
+func NewObjectpathFunc() func(types.Object) (objectpath.Path, error)
diff --git a/playground/socket/socket_test.go b/playground/socket/socket_test.go
index b866e37af..d410afea8 100644
--- a/playground/socket/socket_test.go
+++ b/playground/socket/socket_test.go
@@ -69,9 +69,5 @@ func TestLimiter(t *testing.T) {
if n != msgLimit+1 {
t.Errorf("received %v messages, want %v", n, msgLimit+1)
}
- select {
- case <-kr:
- case <-time.After(100 * time.Millisecond):
- t.Errorf("process wasn't killed after reaching limit")
- }
+ <-kr
}
diff --git a/present/args.go b/present/args.go
index d63196e02..b4f7503b6 100644
--- a/present/args.go
+++ b/present/args.go
@@ -18,7 +18,7 @@ import (
// regular expressions. That is the only change to the code from codewalk.go.
// See http://9p.io/sys/doc/sam/sam.html Table II for details on the syntax.
-// addrToByte evaluates the given address starting at offset start in data.
+// addrToByteRange evaluates the given address starting at offset start in data.
// It returns the lo and hi byte offset of the matched region within data.
func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err error) {
if addr == "" {
diff --git a/present/code.go b/present/code.go
index eb91555b7..f00f1f49d 100644
--- a/present/code.go
+++ b/present/code.go
@@ -56,7 +56,9 @@ var (
)
// parseCode parses a code present directive. Its syntax:
-// .code [-numbers] [-edit] <filename> [address] [highlight]
+//
+// .code [-numbers] [-edit] <filename> [address] [highlight]
+//
// The directive may also be ".play" if the snippet is executable.
func parseCode(ctx *Context, sourceFile string, sourceLine int, cmd string) (Elem, error) {
cmd = strings.TrimSpace(cmd)
diff --git a/present/doc.go b/present/doc.go
index b1e3fc40c..71f758fb3 100644
--- a/present/doc.go
+++ b/present/doc.go
@@ -7,7 +7,7 @@ Package present implements parsing and rendering of present files,
which can be slide presentations as in golang.org/x/tools/cmd/present
or articles as in golang.org/x/blog (the Go blog).
-File Format
+# File Format
Present files begin with a header giving the title of the document
and other metadata, which looks like:
@@ -26,7 +26,9 @@ If the "# " prefix is missing, the file uses
legacy present markup, described below.
The date line may be written without a time:
+
2 Jan 2006
+
In this case, the time will be interpreted as 10am UTC on that date.
The tags line is a comma-separated list of tags that may be used to categorize
@@ -82,7 +84,7 @@ with a dot, as in:
Other than the commands, the text in a section is interpreted
either as Markdown or as legacy present markup.
-Markdown Syntax
+# Markdown Syntax
Markdown typically means the generic name for a family of similar markup languages.
The specific variant used in present is CommonMark.
@@ -138,7 +140,7 @@ Example:
Visit [the Go home page](https://golang.org/).
-Legacy Present Syntax
+# Legacy Present Syntax
Compared to Markdown,
in legacy present
@@ -201,7 +203,7 @@ marker quotes the marker character.
Links can be included in any text with the form [[url][label]], or
[[url]] to use the URL itself as the label.
-Command Invocations
+# Command Invocations
A number of special commands are available through invocations
in the input text. Each such invocation contains a period as the
@@ -224,38 +226,55 @@ a file name followed by an optional address that specifies what
section of the file to display. The address syntax is similar in
its simplest form to that of ed, but comes from sam and is more
general. See
+
https://plan9.io/sys/doc/sam/sam.html Table II
+
for full details. The displayed block is always rounded out to a
full line at both ends.
If no pattern is present, the entire file is displayed.
Any line in the program that ends with the four characters
+
OMIT
+
is deleted from the source before inclusion, making it easy
to write things like
+
.code test.go /START OMIT/,/END OMIT/
+
to find snippets like this
+
tedious_code = boring_function()
// START OMIT
interesting_code = fascinating_function()
// END OMIT
+
and see only this:
+
interesting_code = fascinating_function()
Also, inside the displayed text a line that ends
+
// HL
+
will be highlighted in the display. A highlighting mark may have a
suffix word, such as
+
// HLxxx
+
Such highlights are enabled only if the code invocation ends with
"HL" followed by the word:
+
.code test.go /^type Foo/,/^}/ HLxxx
The .code function may take one or more flags immediately preceding
the filename. This command shows test.go in an editable text area:
+
.code -edit test.go
+
This command shows test.go with line numbers:
+
.code -numbers test.go
play:
@@ -333,7 +352,7 @@ It is your responsibility to make sure the included HTML is valid and safe.
.html file.html
-Presenter Notes
+# Presenter Notes
Lines that begin with ": " are treated as presenter notes,
in both Markdown and legacy present syntax.
@@ -347,7 +366,7 @@ window, except that presenter notes are only visible in the second window.
Notes may appear anywhere within the slide text. For example:
- * Title of slide
+ ## Title of slide
Some text.
@@ -356,6 +375,5 @@ Notes may appear anywhere within the slide text. For example:
Some more text.
: Presenter notes (subsequent paragraph(s))
-
*/
package present // import "golang.org/x/tools/present"
diff --git a/refactor/eg/eg.go b/refactor/eg/eg.go
index 0cd1937ac..15dfbd6ca 100644
--- a/refactor/eg/eg.go
+++ b/refactor/eg/eg.go
@@ -157,7 +157,6 @@ type Transformer struct {
// a single-file package containing "before" and "after" functions as
// described in the package documentation.
// tmplInfo is the type information for tmplFile.
-//
func NewTransformer(fset *token.FileSet, tmplPkg *types.Package, tmplFile *ast.File, tmplInfo *types.Info, verbose bool) (*Transformer, error) {
// Check the template.
beforeSig := funcSig(tmplPkg, "before")
diff --git a/refactor/eg/match.go b/refactor/eg/match.go
index 89c0f8d45..31f8af28f 100644
--- a/refactor/eg/match.go
+++ b/refactor/eg/match.go
@@ -27,7 +27,6 @@ import (
//
// A wildcard appearing more than once in the pattern must
// consistently match the same tree.
-//
func (tr *Transformer) matchExpr(x, y ast.Expr) bool {
if x == nil && y == nil {
return true
diff --git a/refactor/eg/rewrite.go b/refactor/eg/rewrite.go
index 1c3ee6185..3f71c53b7 100644
--- a/refactor/eg/rewrite.go
+++ b/refactor/eg/rewrite.go
@@ -77,7 +77,6 @@ func (tr *Transformer) transformItem(rv reflect.Value) (reflect.Value, bool, map
// available in info.
//
// Derived from rewriteFile in $GOROOT/src/cmd/gofmt/rewrite.go.
-//
func (tr *Transformer) Transform(info *types.Info, pkg *types.Package, file *ast.File) int {
if !tr.seenInfos[info] {
tr.seenInfos[info] = true
diff --git a/refactor/rename/check.go b/refactor/rename/check.go
index 838fc7b79..9f29b98a0 100644
--- a/refactor/rename/check.go
+++ b/refactor/rename/check.go
@@ -206,7 +206,6 @@ func (r *renamer) checkInLocalScope(from types.Object) {
//
// Removing the old name (and all references to it) is always safe, and
// requires no checks.
-//
func (r *renamer) checkInLexicalScope(from types.Object, info *loader.PackageInfo) {
b := from.Parent() // the block defining the 'from' object
if b != nil {
@@ -479,7 +478,7 @@ func (r *renamer) checkStructField(from *types.Var) {
r.checkSelections(from)
}
-// checkSelection checks that all uses and selections that resolve to
+// checkSelections checks that all uses and selections that resolve to
// the specified object would continue to do so after the renaming.
func (r *renamer) checkSelections(from types.Object) {
for pkg, info := range r.packages {
@@ -568,13 +567,14 @@ func (r *renamer) selectionConflict(from types.Object, delta int, syntax *ast.Se
// checkMethod performs safety checks for renaming a method.
// There are three hazards:
-// - declaration conflicts
-// - selection ambiguity/changes
-// - entailed renamings of assignable concrete/interface types.
-// We reject renamings initiated at concrete methods if it would
-// change the assignability relation. For renamings of abstract
-// methods, we rename all methods transitively coupled to it via
-// assignability.
+// - declaration conflicts
+// - selection ambiguity/changes
+// - entailed renamings of assignable concrete/interface types.
+//
+// We reject renamings initiated at concrete methods if it would
+// change the assignability relation. For renamings of abstract
+// methods, we rename all methods transitively coupled to it via
+// assignability.
func (r *renamer) checkMethod(from *types.Func) {
// e.g. error.Error
if from.Pkg() == nil {
diff --git a/refactor/rename/spec.go b/refactor/rename/spec.go
index 0c4526d15..22a268a79 100644
--- a/refactor/rename/spec.go
+++ b/refactor/rename/spec.go
@@ -31,7 +31,6 @@ import (
//
// It is populated from an -offset flag or -from query;
// see Usage for the allowed -from query forms.
-//
type spec struct {
// pkg is the package containing the position
// specified by the -from or -offset flag.
@@ -413,7 +412,6 @@ func typeSwitchVar(info *types.Info, path []ast.Node) types.Object {
// spec.fromName matching the spec. On success, the result has exactly
// one element unless spec.searchFor!="", in which case it has at least one
// element.
-//
func findObjects(info *loader.PackageInfo, spec *spec) ([]types.Object, error) {
if spec.pkgMember == "" {
if spec.searchFor == "" {
@@ -572,6 +570,7 @@ func ambiguityError(fset *token.FileSet, objects []types.Object) error {
}
// Matches cgo generated comment as well as the proposed standard:
+//
// https://golang.org/s/generatedcode
var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`)
diff --git a/refactor/rename/util.go b/refactor/rename/util.go
index e8f8d7498..258ba786c 100644
--- a/refactor/rename/util.go
+++ b/refactor/rename/util.go
@@ -83,7 +83,6 @@ func isDigit(ch rune) bool {
// sameFile returns true if x and y have the same basename and denote
// the same file.
-//
func sameFile(x, y string) bool {
if runtime.GOOS == "windows" {
x = filepath.ToSlash(x)
diff --git a/refactor/satisfy/find.go b/refactor/satisfy/find.go
index 34b349e15..6b4d5284a 100644
--- a/refactor/satisfy/find.go
+++ b/refactor/satisfy/find.go
@@ -10,11 +10,7 @@
//
// THIS PACKAGE IS EXPERIMENTAL AND MAY CHANGE AT ANY TIME.
//
-// It is provided only for the gorename tool. Ideally this
-// functionality will become part of the type-checker in due course,
-// since it is computing it anyway, and it is robust for ill-typed
-// inputs, which this package is not.
-//
+// It is provided only for the gopls tool. It requires well-typed inputs.
package satisfy // import "golang.org/x/tools/refactor/satisfy"
// NOTES:
@@ -26,9 +22,6 @@ package satisfy // import "golang.org/x/tools/refactor/satisfy"
// ...
// }})
//
-// TODO(adonovan): make this robust against ill-typed input.
-// Or move it into the type-checker.
-//
// Assignability conversions are possible in the following places:
// - in assignments y = x, y := x, var y = x.
// - from call argument types to formal parameter types
@@ -52,11 +45,15 @@ import (
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
// A Constraint records the fact that the RHS type does and must
// satisfy the LHS type, which is an interface.
// The names are suggestive of an assignment statement LHS = RHS.
+//
+// The constraint is implicitly universally quantified over any type
+// parameters appearing within the two types.
type Constraint struct {
LHS, RHS types.Type
}
@@ -69,7 +66,6 @@ type Constraint struct {
// that is checked during compilation of a package. Refactoring tools
// will need to preserve at least this part of the relation to ensure
// continued compilation.
-//
type Finder struct {
Result map[Constraint]bool
msetcache typeutil.MethodSetCache
@@ -88,7 +84,6 @@ type Finder struct {
// The package must be free of type errors, and
// info.{Defs,Uses,Selections,Types} must have been populated by the
// type-checker.
-//
func (f *Finder) Find(info *types.Info, files []*ast.File) {
if f.Result == nil {
f.Result = make(map[Constraint]bool)
@@ -132,13 +127,13 @@ func (f *Finder) exprN(e ast.Expr) types.Type {
case *ast.CallExpr:
// x, err := f(args)
- sig := f.expr(e.Fun).Underlying().(*types.Signature)
+ sig := coreType(f.expr(e.Fun)).(*types.Signature)
f.call(sig, e.Args)
case *ast.IndexExpr:
// y, ok := x[i]
x := f.expr(e.X)
- f.assign(f.expr(e.Index), x.Underlying().(*types.Map).Key())
+ f.assign(f.expr(e.Index), coreType(x).(*types.Map).Key())
case *ast.TypeAssertExpr:
// y, ok := x.(T)
@@ -203,7 +198,8 @@ func (f *Finder) call(sig *types.Signature, args []ast.Expr) {
}
}
-func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr, T types.Type) types.Type {
+// builtin visits the arguments of a builtin type with signature sig.
+func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr) {
switch obj.Name() {
case "make", "new":
// skip the type operand
@@ -218,7 +214,7 @@ func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Ex
f.expr(args[1])
} else {
// append(x, y, z)
- tElem := s.Underlying().(*types.Slice).Elem()
+ tElem := coreType(s).(*types.Slice).Elem()
for _, arg := range args[1:] {
f.assign(tElem, f.expr(arg))
}
@@ -227,14 +223,12 @@ func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Ex
case "delete":
m := f.expr(args[0])
k := f.expr(args[1])
- f.assign(m.Underlying().(*types.Map).Key(), k)
+ f.assign(coreType(m).(*types.Map).Key(), k)
default:
// ordinary call
f.call(sig, args)
}
-
- return T
}
func (f *Finder) extract(tuple types.Type, i int) types.Type {
@@ -276,7 +270,6 @@ func (f *Finder) valueSpec(spec *ast.ValueSpec) {
// explicit conversions and comparisons between two types, unless the
// types are uninteresting (e.g. lhs is a concrete type, or the empty
// interface; rhs has no methods).
-//
func (f *Finder) assign(lhs, rhs types.Type) {
if types.Identical(lhs, rhs) {
return
@@ -362,6 +355,7 @@ func (f *Finder) expr(e ast.Expr) types.Type {
f.sig = saved
case *ast.CompositeLit:
+ // No need for coreType here: go1.18 disallows P{...} for type param P.
switch T := deref(tv.Type).Underlying().(type) {
case *types.Struct:
for i, elem := range e.Elts {
@@ -407,12 +401,20 @@ func (f *Finder) expr(e ast.Expr) types.Type {
}
case *ast.IndexExpr:
- x := f.expr(e.X)
- i := f.expr(e.Index)
- if ux, ok := x.Underlying().(*types.Map); ok {
- f.assign(ux.Key(), i)
+ if instance(f.info, e.X) {
+ // f[T] or C[T] -- generic instantiation
+ } else {
+ // x[i] or m[k] -- index or lookup operation
+ x := f.expr(e.X)
+ i := f.expr(e.Index)
+ if ux, ok := coreType(x).(*types.Map); ok {
+ f.assign(ux.Key(), i)
+ }
}
+ case *typeparams.IndexListExpr:
+ // f[X, Y] -- generic instantiation
+
case *ast.SliceExpr:
f.expr(e.X)
if e.Low != nil {
@@ -436,14 +438,29 @@ func (f *Finder) expr(e ast.Expr) types.Type {
f.assign(tvFun.Type, arg0)
} else {
// function call
+
+ // unsafe call. Treat calls to functions in unsafe like ordinary calls,
+ // except that their signature cannot be determined by their func obj.
+ // Without this special handling, f.expr(e.Fun) would fail below.
+ if s, ok := unparen(e.Fun).(*ast.SelectorExpr); ok {
+ if obj, ok := f.info.Uses[s.Sel].(*types.Builtin); ok && obj.Pkg().Path() == "unsafe" {
+ sig := f.info.Types[e.Fun].Type.(*types.Signature)
+ f.call(sig, e.Args)
+ return tv.Type
+ }
+ }
+
+ // builtin call
if id, ok := unparen(e.Fun).(*ast.Ident); ok {
if obj, ok := f.info.Uses[id].(*types.Builtin); ok {
sig := f.info.Types[id].Type.(*types.Signature)
- return f.builtin(obj, sig, e.Args, tv.Type)
+ f.builtin(obj, sig, e.Args)
+ return tv.Type
}
}
+
// ordinary call
- f.call(f.expr(e.Fun).Underlying().(*types.Signature), e.Args)
+ f.call(coreType(f.expr(e.Fun)).(*types.Signature), e.Args)
}
case *ast.StarExpr:
@@ -503,7 +520,7 @@ func (f *Finder) stmt(s ast.Stmt) {
case *ast.SendStmt:
ch := f.expr(s.Chan)
val := f.expr(s.Value)
- f.assign(ch.Underlying().(*types.Chan).Elem(), val)
+ f.assign(coreType(ch).(*types.Chan).Elem(), val)
case *ast.IncDecStmt:
f.expr(s.X)
@@ -651,35 +668,35 @@ func (f *Finder) stmt(s ast.Stmt) {
if s.Key != nil {
k := f.expr(s.Key)
var xelem types.Type
- // keys of array, *array, slice, string aren't interesting
- switch ux := x.Underlying().(type) {
+ // Keys of array, *array, slice, string aren't interesting
+ // since the RHS key type is just an int.
+ switch ux := coreType(x).(type) {
case *types.Chan:
xelem = ux.Elem()
case *types.Map:
xelem = ux.Key()
}
if xelem != nil {
- f.assign(xelem, k)
+ f.assign(k, xelem)
}
}
if s.Value != nil {
val := f.expr(s.Value)
var xelem types.Type
- // values of strings aren't interesting
- switch ux := x.Underlying().(type) {
+ // Values of type strings aren't interesting because
+ // the RHS value type is just a rune.
+ switch ux := coreType(x).(type) {
case *types.Array:
xelem = ux.Elem()
- case *types.Chan:
- xelem = ux.Elem()
case *types.Map:
xelem = ux.Elem()
case *types.Pointer: // *array
- xelem = deref(ux).(*types.Array).Elem()
+ xelem = coreType(deref(ux)).(*types.Array).Elem()
case *types.Slice:
xelem = ux.Elem()
}
if xelem != nil {
- f.assign(xelem, val)
+ f.assign(val, xelem)
}
}
}
@@ -694,7 +711,7 @@ func (f *Finder) stmt(s ast.Stmt) {
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
- if p, ok := typ.Underlying().(*types.Pointer); ok {
+ if p, ok := coreType(typ).(*types.Pointer); ok {
return p.Elem()
}
return typ
@@ -703,3 +720,19 @@ func deref(typ types.Type) types.Type {
func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
func isInterface(T types.Type) bool { return types.IsInterface(T) }
+
+func coreType(T types.Type) types.Type { return typeparams.CoreType(T) }
+
+func instance(info *types.Info, expr ast.Expr) bool {
+ var id *ast.Ident
+ switch x := expr.(type) {
+ case *ast.Ident:
+ id = x
+ case *ast.SelectorExpr:
+ id = x.Sel
+ default:
+ return false
+ }
+ _, ok := typeparams.GetInstances(info)[id]
+ return ok
+}
diff --git a/refactor/satisfy/find_test.go b/refactor/satisfy/find_test.go
new file mode 100644
index 000000000..35a1e87ca
--- /dev/null
+++ b/refactor/satisfy/find_test.go
@@ -0,0 +1,238 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package satisfy_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "reflect"
+ "sort"
+ "testing"
+
+ "golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/refactor/satisfy"
+)
+
+// This test exercises various operations on core types of type parameters.
+// (It also provides pretty decent coverage of the non-generic operations.)
+func TestGenericCoreOperations(t *testing.T) {
+ if !typeparams.Enabled {
+ t.Skip("!typeparams.Enabled")
+ }
+
+ const src = `package foo
+
+import "unsafe"
+
+type I interface { f() }
+
+type impl struct{}
+func (impl) f() {}
+
+// A big pile of single-serving types that implement I.
+type A struct{impl}
+type B struct{impl}
+type C struct{impl}
+type D struct{impl}
+type E struct{impl}
+type F struct{impl}
+type G struct{impl}
+type H struct{impl}
+type J struct{impl}
+type K struct{impl}
+type L struct{impl}
+type M struct{impl}
+type N struct{impl}
+type O struct{impl}
+type P struct{impl}
+type Q struct{impl}
+type R struct{impl}
+type S struct{impl}
+type T struct{impl}
+type U struct{impl}
+type V struct{impl}
+
+type Generic[T any] struct{impl}
+func (Generic[T]) g(T) {}
+
+type GI[T any] interface{
+ g(T)
+}
+
+func _[Slice interface{ []I }](s Slice) Slice {
+ s[0] = L{} // I <- L
+ return append(s, A{}) // I <- A
+}
+
+func _[Func interface{ func(I) B }](fn Func) {
+ b := fn(C{}) // I <- C
+ var _ I = b // I <- B
+}
+
+func _[Chan interface{ chan D }](ch Chan) {
+ var i I
+ for i = range ch {} // I <- D
+ _ = i
+}
+
+func _[Chan interface{ chan E }](ch Chan) {
+ var _ I = <-ch // I <- E
+}
+
+func _[Chan interface{ chan I }](ch Chan) {
+ ch <- F{} // I <- F
+}
+
+func _[Map interface{ map[G]H }](m Map) {
+ var k, v I
+ for k, v = range m {} // I <- G, I <- H
+ _, _ = k, v
+}
+
+func _[Map interface{ map[I]K }](m Map) {
+ var _ I = m[J{}] // I <- J, I <- K
+ delete(m, R{}) // I <- R
+ _, _ = m[J{}]
+}
+
+func _[Array interface{ [1]I }](a Array) {
+ a[0] = M{} // I <- M
+}
+
+func _[Array interface{ [1]N }](a Array) {
+ var _ I = a[0] // I <- N
+}
+
+func _[Array interface{ [1]O }](a Array) {
+ var v I
+ for _, v = range a {} // I <- O
+ _ = v
+}
+
+func _[ArrayPtr interface{ *[1]P }](a ArrayPtr) {
+ var v I
+ for _, v = range a {} // I <- P
+ _ = v
+}
+
+func _[Slice interface{ []Q }](s Slice) {
+ var v I
+ for _, v = range s {} // I <- Q
+ _ = v
+}
+
+func _[Func interface{ func() (S, bool) }](fn Func) {
+ var i I
+ i, _ = fn() // I <- S
+ _ = i
+}
+
+func _() I {
+ var _ I = T{} // I <- T
+ var _ I = Generic[T]{} // I <- Generic[T]
+ var _ I = Generic[string]{} // I <- Generic[string]
+ return U{} // I <- U
+}
+
+var _ GI[string] = Generic[string]{} // GI[string] <- Generic[string]
+
+// universally quantified constraints:
+// the type parameter may appear on the left, the right, or both sides.
+
+func _[T any](g Generic[T]) GI[T] {
+ return g // GI[T] <- Generic[T]
+}
+
+func _[T any]() {
+ type GI2[T any] interface{ g(string) }
+ var _ GI2[T] = Generic[string]{} // GI2[T] <- Generic[string]
+}
+
+type Gen2[T any] struct{}
+func (f Gen2[T]) g(string) { global = f } // GI[string] <- Gen2[T]
+
+var global GI[string]
+
+func _() {
+ var x [3]V
+ // golang/go#56227: the finder should visit calls in the unsafe package.
+ _ = unsafe.Slice(&x[0], func() int { var _ I = x[0]; return 3 }()) // I <- V
+}
+`
+ got := constraints(t, src)
+ want := []string{
+ "p.GI2[T] <- p.Generic[string]", // implicitly "forall T" quantified
+ "p.GI[T] <- p.Generic[T]", // implicitly "forall T" quantified
+ "p.GI[string] <- p.Gen2[T]", // implicitly "forall T" quantified
+ "p.GI[string] <- p.Generic[string]",
+ "p.I <- p.A",
+ "p.I <- p.B",
+ "p.I <- p.C",
+ "p.I <- p.D",
+ "p.I <- p.E",
+ "p.I <- p.F",
+ "p.I <- p.G",
+ "p.I <- p.Generic[p.T]",
+ "p.I <- p.Generic[string]",
+ "p.I <- p.H",
+ "p.I <- p.J",
+ "p.I <- p.K",
+ "p.I <- p.L",
+ "p.I <- p.M",
+ "p.I <- p.N",
+ "p.I <- p.O",
+ "p.I <- p.P",
+ "p.I <- p.Q",
+ "p.I <- p.R",
+ "p.I <- p.S",
+ "p.I <- p.T",
+ "p.I <- p.U",
+ "p.I <- p.V",
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Fatalf("found unexpected constraints: got %s, want %s", got, want)
+ }
+}
+
+func constraints(t *testing.T, src string) []string {
+ // parse
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "p.go", src, 0)
+ if err != nil {
+ t.Fatal(err) // parse error
+ }
+ files := []*ast.File{f}
+
+ // type-check
+ info := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+ typeparams.InitInstanceInfo(info)
+ conf := types.Config{
+ Importer: importer.Default(),
+ }
+ if _, err := conf.Check("p", fset, files, info); err != nil {
+ t.Fatal(err) // type error
+ }
+
+ // gather constraints
+ var finder satisfy.Finder
+ finder.Find(info, files)
+ var constraints []string
+ for c := range finder.Result {
+ constraints = append(constraints, fmt.Sprintf("%v <- %v", c.LHS, c.RHS))
+ }
+ sort.Strings(constraints)
+ return constraints
+}
diff --git a/txtar/archive.go b/txtar/archive.go
index 214256617..81b314545 100644
--- a/txtar/archive.go
+++ b/txtar/archive.go
@@ -6,15 +6,15 @@
//
// The goals for the format are:
//
-// - be trivial enough to create and edit by hand.
-// - be able to store trees of text files describing go command test cases.
-// - diff nicely in git history and code reviews.
+// - be trivial enough to create and edit by hand.
+// - be able to store trees of text files describing go command test cases.
+// - diff nicely in git history and code reviews.
//
// Non-goals include being a completely general archive format,
// storing binary data, storing file modes, storing special files like
// symbolic links, and so on.
//
-// Txtar format
+// # Txtar format
//
// A txtar archive is zero or more comment lines and then a sequence of file entries.
// Each file entry begins with a file marker line of the form "-- FILENAME --"