aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitattributes10
-rw-r--r--.gitignore2
-rw-r--r--.hgignore6
-rw-r--r--CONTRIBUTING.md31
-rw-r--r--README1
-rw-r--r--benchmark/parse/parse.go131
-rw-r--r--benchmark/parse/parse_test.go (renamed from cmd/benchcmp/parse_test.go)114
-rw-r--r--blog/atom/atom.go2
-rw-r--r--blog/blog.go2
-rw-r--r--cmd/benchcmp/benchcmp.go59
-rw-r--r--cmd/benchcmp/benchcmp_test.go59
-rw-r--r--cmd/benchcmp/compare.go68
-rw-r--r--cmd/benchcmp/compare_test.go36
-rw-r--r--cmd/benchcmp/doc.go2
-rw-r--r--cmd/benchcmp/parse.go135
-rw-r--r--cmd/callgraph/main.go111
-rw-r--r--cmd/callgraph/main_test.go6
-rw-r--r--cmd/cover/doc.go2
-rw-r--r--cmd/digraph/digraph.go20
-rw-r--r--cmd/eg/eg.go18
-rw-r--r--cmd/godex/doc.go2
-rw-r--r--cmd/godoc/appinit.go2
-rw-r--r--cmd/godoc/doc.go4
-rw-r--r--cmd/godoc/godoc_test.go5
-rw-r--r--cmd/godoc/main.go5
-rwxr-xr-xcmd/godoc/setup-godoc-app.bash12
-rw-r--r--cmd/godoc/x.go43
-rw-r--r--cmd/goimports/doc.go6
-rw-r--r--cmd/gomvpkg/main.go89
-rw-r--r--cmd/gorename/main.go97
-rw-r--r--cmd/gotype/doc.go2
-rw-r--r--cmd/html2article/conv.go4
-rw-r--r--cmd/oracle/main.go26
-rw-r--r--cmd/oracle/oracle.el6
-rw-r--r--cmd/present/dir.go4
-rw-r--r--cmd/present/doc.go4
-rw-r--r--cmd/present/static/print.css51
-rw-r--r--cmd/present/static/slides.js24
-rw-r--r--cmd/present/static/styles.css482
-rw-r--r--cmd/present/templates/dir.tmpl2
-rw-r--r--cmd/present/templates/slides.tmpl10
-rw-r--r--cmd/ssadump/main.go63
-rw-r--r--cmd/stringer/endtoend_test.go8
-rw-r--r--cmd/stringer/golden_test.go67
-rw-r--r--cmd/stringer/stringer.go31
-rw-r--r--cmd/stringer/testdata/cgo.go32
-rw-r--r--cmd/tipgodoc/Dockerfile13
-rw-r--r--cmd/tipgodoc/README3
-rw-r--r--cmd/tipgodoc/app.yaml15
-rw-r--r--cmd/tipgodoc/tip.go278
-rw-r--r--cmd/vet/asmdecl.go16
-rw-r--r--cmd/vet/bool.go14
-rw-r--r--cmd/vet/doc.go2
-rw-r--r--cmd/vet/print.go4
-rw-r--r--cmd/vet/structtag.go74
-rw-r--r--cmd/vet/testdata/print.go2
-rw-r--r--cmd/vet/testdata/structtag.go15
-rw-r--r--cmd/vet/types.go4
-rw-r--r--cmd/vet/whitelist/whitelist.go3
-rw-r--r--codereview.cfg2
-rw-r--r--container/intsets/sparse.go167
-rw-r--r--container/intsets/sparse_test.go123
-rw-r--r--cover/profile.go4
-rw-r--r--dashboard/README32
-rw-r--r--dashboard/app/app.yaml21
-rw-r--r--dashboard/app/build/build.go911
-rw-r--r--dashboard/app/build/dash.go118
-rw-r--r--dashboard/app/build/handler.go906
-rw-r--r--dashboard/app/build/init.go46
-rw-r--r--dashboard/app/build/notify.go378
-rw-r--r--dashboard/app/build/notify.txt9
-rw-r--r--dashboard/app/build/perf.go312
-rw-r--r--dashboard/app/build/perf_changes.go282
-rw-r--r--dashboard/app/build/perf_changes.html89
-rw-r--r--dashboard/app/build/perf_detail.go221
-rw-r--r--dashboard/app/build/perf_detail.html101
-rw-r--r--dashboard/app/build/perf_graph.go270
-rw-r--r--dashboard/app/build/perf_graph.html120
-rw-r--r--dashboard/app/build/perf_learn.go186
-rw-r--r--dashboard/app/build/perf_learn.html45
-rw-r--r--dashboard/app/build/perf_notify.txt13
-rw-r--r--dashboard/app/build/test.go378
-rw-r--r--dashboard/app/build/ui.go460
-rw-r--r--dashboard/app/build/ui.html210
-rw-r--r--dashboard/app/build/update.go117
-rw-r--r--dashboard/app/cache/cache.go91
-rw-r--r--dashboard/app/cron.yaml5
-rw-r--r--dashboard/app/index.yaml54
-rw-r--r--dashboard/app/key/key.go64
-rw-r--r--dashboard/app/static/status_alert.gifbin570 -> 0 bytes
-rw-r--r--dashboard/app/static/status_good.gifbin328 -> 0 bytes
-rw-r--r--dashboard/app/static/style.css308
-rw-r--r--dashboard/builder/bench.go256
-rw-r--r--dashboard/builder/doc.go58
-rw-r--r--dashboard/builder/env.go277
-rw-r--r--dashboard/builder/exec.go98
-rw-r--r--dashboard/builder/filemutex_flock.go66
-rw-r--r--dashboard/builder/filemutex_local.go27
-rw-r--r--dashboard/builder/filemutex_windows.go105
-rw-r--r--dashboard/builder/http.go219
-rw-r--r--dashboard/builder/main.go831
-rw-r--r--dashboard/builder/vcs.go212
-rw-r--r--dashboard/coordinator/Makefile6
-rw-r--r--dashboard/coordinator/buildongce/create.go306
-rw-r--r--dashboard/coordinator/main.go458
-rw-r--r--dashboard/env/linux-x86-base/Dockerfile16
-rw-r--r--dashboard/env/linux-x86-base/Makefile12
-rw-r--r--dashboard/env/linux-x86-base/README11
-rwxr-xr-xdashboard/env/linux-x86-base/scripts/build-go-builder.sh20
-rwxr-xr-xdashboard/env/linux-x86-base/scripts/install-apt-deps.sh17
-rw-r--r--dashboard/env/linux-x86-clang/Dockerfile20
-rw-r--r--dashboard/env/linux-x86-clang/Makefile15
-rwxr-xr-xdashboard/env/linux-x86-clang/scripts/build-go-builder.sh20
-rwxr-xr-xdashboard/env/linux-x86-clang/scripts/install-apt-deps.sh21
-rw-r--r--dashboard/env/linux-x86-clang/sources/clang-deps.list3
-rw-r--r--dashboard/env/linux-x86-gccgo/Dockerfile19
-rw-r--r--dashboard/env/linux-x86-gccgo/Makefile15
-rw-r--r--dashboard/env/linux-x86-gccgo/README6
-rwxr-xr-xdashboard/env/linux-x86-gccgo/scripts/install-apt-deps.sh20
-rwxr-xr-xdashboard/env/linux-x86-gccgo/scripts/install-gccgo-builder.sh7
-rwxr-xr-xdashboard/env/linux-x86-gccgo/scripts/install-gold.sh9
-rw-r--r--dashboard/env/linux-x86-nacl/Dockerfile27
-rw-r--r--dashboard/env/linux-x86-nacl/Makefile12
-rw-r--r--dashboard/env/linux-x86-nacl/README6
-rwxr-xr-xdashboard/env/linux-x86-nacl/build-command.pl13
-rwxr-xr-xdashboard/env/linux-x86-nacl/scripts/build-go-builder.sh26
-rwxr-xr-xdashboard/env/linux-x86-nacl/scripts/install-apt-deps.sh14
-rw-r--r--dashboard/env/linux-x86-sid/Dockerfile14
-rw-r--r--dashboard/env/linux-x86-sid/Makefile12
-rwxr-xr-xdashboard/env/linux-x86-sid/scripts/build-go-builder.sh20
-rwxr-xr-xdashboard/env/linux-x86-sid/scripts/install-apt-deps.sh17
-rw-r--r--dashboard/updater/updater.go128
-rw-r--r--dashboard/watcher/watcher.go589
-rw-r--r--go/ast/astutil/enclosing.go (renamed from astutil/enclosing.go)0
-rw-r--r--go/ast/astutil/enclosing_test.go (renamed from astutil/enclosing_test.go)2
-rw-r--r--go/ast/astutil/imports.go (renamed from astutil/imports.go)115
-rw-r--r--go/ast/astutil/imports_test.go (renamed from astutil/imports_test.go)154
-rw-r--r--go/ast/astutil/util.go14
-rw-r--r--go/buildutil/allpackages.go32
-rw-r--r--go/buildutil/fakecontext.go108
-rw-r--r--go/callgraph/callgraph.go2
-rw-r--r--go/callgraph/cha/cha.go120
-rw-r--r--go/callgraph/cha/cha_test.go106
-rw-r--r--go/callgraph/cha/testdata/func.go23
-rw-r--r--go/callgraph/cha/testdata/iface.go65
-rw-r--r--go/callgraph/cha/testdata/recv.go37
-rw-r--r--go/callgraph/rta/rta.go2
-rw-r--r--go/callgraph/rta/rta_test.go3
-rw-r--r--go/callgraph/static/static.go33
-rw-r--r--go/callgraph/static/static_test.go88
-rw-r--r--go/exact/exact.go12
-rw-r--r--go/exact/exact_test.go27
-rw-r--r--go/gccgoimporter/importer.go2
-rw-r--r--go/gccgoimporter/importer_test.go3
-rw-r--r--go/gcimporter/gcimporter.go2
-rw-r--r--go/importer/import.go2
-rw-r--r--go/loader/loader.go672
-rw-r--r--go/loader/loader_test.go600
-rw-r--r--go/loader/source_test.go2
-rw-r--r--go/loader/stdlib_test.go4
-rw-r--r--go/loader/testdata/badpkgdecl.go1
-rw-r--r--go/loader/util.go42
-rw-r--r--go/pointer/analysis.go2
-rw-r--r--go/pointer/doc.go2
-rw-r--r--go/pointer/example_test.go6
-rw-r--r--go/pointer/gen.go6
-rw-r--r--go/pointer/pointer_test.go8
-rw-r--r--go/pointer/stdlib_test.go5
-rw-r--r--go/pointer/testdata/another.go2
-rw-r--r--go/pointer/util.go6
-rw-r--r--go/ssa/builder.go211
-rw-r--r--go/ssa/builder_test.go124
-rw-r--r--go/ssa/create.go34
-rw-r--r--go/ssa/doc.go2
-rw-r--r--go/ssa/emit.go9
-rw-r--r--go/ssa/func.go7
-rw-r--r--go/ssa/interp/external.go1
-rw-r--r--go/ssa/interp/interp.go35
-rw-r--r--go/ssa/interp/interp_test.go25
-rw-r--r--go/ssa/interp/reflect.go7
-rw-r--r--go/ssa/interp/testdata/coverage.go23
-rw-r--r--go/ssa/interp/testdata/reflect.go11
-rw-r--r--go/ssa/lvalue.go11
-rw-r--r--go/ssa/methods.go184
-rw-r--r--go/ssa/mode.go107
-rw-r--r--go/ssa/sanity.go9
-rw-r--r--go/ssa/source.go2
-rw-r--r--go/ssa/source_test.go2
-rw-r--r--go/ssa/ssa.go116
-rw-r--r--go/ssa/ssautil/visit.go4
-rw-r--r--go/ssa/stdlib_test.go10
-rw-r--r--go/ssa/testdata/objlookup.go6
-rw-r--r--go/ssa/testdata/valueforexpr.go2
-rw-r--r--go/ssa/testmain.go30
-rw-r--r--go/ssa/util.go23
-rw-r--r--go/ssa/wrappers.go17
-rw-r--r--go/types/api.go6
-rw-r--r--go/types/api_test.go4
-rw-r--r--go/types/assignments.go2
-rw-r--r--go/types/builtins.go12
-rw-r--r--go/types/call.go38
-rw-r--r--go/types/check.go4
-rw-r--r--go/types/conversions.go2
-rw-r--r--go/types/eval.go30
-rw-r--r--go/types/eval_test.go14
-rw-r--r--go/types/expr.go14
-rw-r--r--go/types/predicates.go3
-rw-r--r--go/types/resolver.go10
-rw-r--r--go/types/stmt.go11
-rw-r--r--go/types/testdata/builtins.src6
-rw-r--r--go/types/testdata/constdecl.src19
-rw-r--r--go/types/testdata/expr3.src26
-rw-r--r--go/types/testdata/issues.src36
-rw-r--r--go/types/testdata/stmt0.src12
-rw-r--r--go/types/testdata/vardecl.src9
-rw-r--r--go/types/typeutil/example_test.go64
-rw-r--r--go/types/typeutil/map.go2
-rw-r--r--go/vcs/discovery.go3
-rw-r--r--go/vcs/vcs.go18
-rw-r--r--go/vcs/vcs_test.go44
-rw-r--r--godoc/analysis/analysis.go9
-rw-r--r--godoc/analysis/typeinfo.go5
-rw-r--r--godoc/godoc.go2
-rw-r--r--godoc/redirect/hash.go138
-rw-r--r--godoc/redirect/redirect.go95
-rw-r--r--godoc/static/analysis/help.html10
-rw-r--r--godoc/static/doc.go2
-rw-r--r--godoc/static/makestatic.go9
-rw-r--r--godoc/static/search.txt2
-rw-r--r--godoc/static/static.go14
-rw-r--r--godoc/util/util.go2
-rw-r--r--godoc/vfs/gatefs/gatefs.go2
-rw-r--r--godoc/vfs/httpfs/httpfs.go2
-rw-r--r--godoc/vfs/mapfs/mapfs.go2
-rw-r--r--godoc/vfs/vfs.go2
-rw-r--r--godoc/vfs/zipfs/zipfs.go2
-rw-r--r--imports/fix.go2
-rw-r--r--imports/imports.go4
-rw-r--r--oracle/TODO2
-rw-r--r--oracle/describe.go21
-rw-r--r--oracle/implements.go159
-rw-r--r--oracle/oracle.go23
-rw-r--r--oracle/oracle_test.go9
-rw-r--r--oracle/pointsto.go2
-rw-r--r--oracle/pos.go2
-rw-r--r--oracle/serial/serial.go26
-rw-r--r--oracle/testdata/src/main/describe.golden4
-rw-r--r--oracle/testdata/src/main/implements-methods-json.go38
-rw-r--r--oracle/testdata/src/main/implements-methods-json.golden283
-rw-r--r--oracle/testdata/src/main/implements-methods.go38
-rw-r--r--oracle/testdata/src/main/implements-methods.golden37
-rw-r--r--oracle/testdata/src/main/whicherrs.go27
-rw-r--r--oracle/testdata/src/main/whicherrs.golden8
-rw-r--r--oracle/what.go2
-rw-r--r--oracle/whicherrs.go294
-rw-r--r--playground/common.go2
-rw-r--r--playground/socket/socket.go2
-rw-r--r--present/doc.go2
-rw-r--r--refactor/eg/eg.go5
-rw-r--r--refactor/eg/eg_test.go9
-rw-r--r--refactor/eg/match.go14
-rw-r--r--refactor/eg/rewrite.go2
-rw-r--r--refactor/eg/testdata/A1.golden2
-rw-r--r--refactor/eg/testdata/A2.golden2
-rw-r--r--refactor/eg/testdata/D1.golden8
-rw-r--r--refactor/eg/testdata/E1.golden4
-rw-r--r--refactor/eg/testdata/F1.go2
-rw-r--r--refactor/eg/testdata/F1.golden2
-rw-r--r--refactor/importgraph/graph.go82
-rw-r--r--refactor/lexical/lexical.go2
-rw-r--r--refactor/lexical/lexical_test.go10
-rw-r--r--refactor/rename/check.go240
-rw-r--r--refactor/rename/mvpkg.go320
-rw-r--r--refactor/rename/mvpkg_test.go284
-rw-r--r--refactor/rename/rename.el10
-rw-r--r--refactor/rename/rename.go193
-rw-r--r--refactor/rename/rename_test.go397
-rw-r--r--refactor/rename/spec.go30
-rw-r--r--refactor/rename/util.go13
-rw-r--r--refactor/satisfy/find.go41
280 files changed, 7005 insertions, 12073 deletions
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..d2f212e
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,10 @@
+# Treat all files in this repo as binary, with no git magic updating
+# line endings. Windows users contributing to Go will need to use a
+# modern version of git and editors capable of LF line endings.
+#
+# We'll prevent accidental CRLF line endings from entering the repo
+# via the git-review gofmt checks.
+#
+# See golang.org/issue/9281
+
+* -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..5a9d62e
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+# Add no patterns to .gitignore except for files generated by the build.
+last-change
diff --git a/.hgignore b/.hgignore
deleted file mode 100644
index f9ddce8..0000000
--- a/.hgignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# Add no patterns to .hgignore except for files generated by the build.
-syntax:glob
-last-change
-dashboard/coordinator/buildongce/client-*.dat
-dashboard/coordinator/buildongce/token.dat
-dashboard/coordinator/coordinator
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..88dff59
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/README b/README
index 8231ee2..916ae2e 100644
--- a/README
+++ b/README
@@ -8,4 +8,3 @@ Packages include a type-checker for Go and an implementation of the
Static Single Assignment form (SSA) representation for Go programs.
To submit changes to this repository, see http://golang.org/doc/contribute.html.
-
diff --git a/benchmark/parse/parse.go b/benchmark/parse/parse.go
new file mode 100644
index 0000000..b37e6f0
--- /dev/null
+++ b/benchmark/parse/parse.go
@@ -0,0 +1,131 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parse provides support for parsing benchmark results as
+// generated by 'go test -bench'.
+package parse // import "golang.org/x/tools/benchmark/parse"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Flags used by Benchmark.Measured to indicate
+// which measurements a Benchmark contains.
+const (
+ NsPerOp = 1 << iota
+ MBPerS
+ AllocedBytesPerOp
+ AllocsPerOp
+)
+
+// Benchmark is one run of a single benchmark.
+type Benchmark struct {
+ Name string // benchmark name
+ N int // number of iterations
+ NsPerOp float64 // nanoseconds per iteration
+ AllocedBytesPerOp uint64 // bytes allocated per iteration
+ AllocsPerOp uint64 // allocs per iteration
+ MBPerS float64 // MB processed per second
+ Measured int // which measurements were recorded
+ Ord int // ordinal position within a benchmark run
+}
+
+// ParseLine extracts a Benchmark from a single line of testing.B
+// output.
+func ParseLine(line string) (*Benchmark, error) {
+ fields := strings.Fields(line)
+
+ // Two required, positional fields: Name and iterations.
+ if len(fields) < 2 {
+ return nil, fmt.Errorf("two fields required, have %d", len(fields))
+ }
+ if !strings.HasPrefix(fields[0], "Benchmark") {
+ return nil, fmt.Errorf(`first field does not start with "Benchmark"`)
+ }
+ n, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ b := &Benchmark{Name: fields[0], N: n}
+
+ // Parse any remaining pairs of fields; we've parsed one pair already.
+ for i := 1; i < len(fields)/2; i++ {
+ b.parseMeasurement(fields[i*2], fields[i*2+1])
+ }
+ return b, nil
+}
+
+func (b *Benchmark) parseMeasurement(quant string, unit string) {
+ switch unit {
+ case "ns/op":
+ if f, err := strconv.ParseFloat(quant, 64); err == nil {
+ b.NsPerOp = f
+ b.Measured |= NsPerOp
+ }
+ case "MB/s":
+ if f, err := strconv.ParseFloat(quant, 64); err == nil {
+ b.MBPerS = f
+ b.Measured |= MBPerS
+ }
+ case "B/op":
+ if i, err := strconv.ParseUint(quant, 10, 64); err == nil {
+ b.AllocedBytesPerOp = i
+ b.Measured |= AllocedBytesPerOp
+ }
+ case "allocs/op":
+ if i, err := strconv.ParseUint(quant, 10, 64); err == nil {
+ b.AllocsPerOp = i
+ b.Measured |= AllocsPerOp
+ }
+ }
+}
+
+func (b *Benchmark) String() string {
+ buf := new(bytes.Buffer)
+ fmt.Fprintf(buf, "%s %d", b.Name, b.N)
+ if (b.Measured & NsPerOp) != 0 {
+ fmt.Fprintf(buf, " %.2f ns/op", b.NsPerOp)
+ }
+ if (b.Measured & MBPerS) != 0 {
+ fmt.Fprintf(buf, " %.2f MB/s", b.MBPerS)
+ }
+ if (b.Measured & AllocedBytesPerOp) != 0 {
+ fmt.Fprintf(buf, " %d B/op", b.AllocedBytesPerOp)
+ }
+ if (b.Measured & AllocsPerOp) != 0 {
+ fmt.Fprintf(buf, " %d allocs/op", b.AllocsPerOp)
+ }
+ return buf.String()
+}
+
+// Set is a collection of benchmarks from one
+// testing.B run, keyed by name to facilitate comparison.
+type Set map[string][]*Benchmark
+
+// ParseSet extracts a Set from testing.B output.
+// ParseSet preserves the order of benchmarks that have identical
+// names.
+func ParseSet(r io.Reader) (Set, error) {
+ bb := make(Set)
+ scan := bufio.NewScanner(r)
+ ord := 0
+ for scan.Scan() {
+ if b, err := ParseLine(scan.Text()); err == nil {
+ b.Ord = ord
+ ord++
+ bb[b.Name] = append(bb[b.Name], b)
+ }
+ }
+
+ if err := scan.Err(); err != nil {
+ return nil, err
+ }
+
+ return bb, nil
+}
diff --git a/cmd/benchcmp/parse_test.go b/benchmark/parse/parse_test.go
index a59b20c..06db848 100644
--- a/cmd/benchcmp/parse_test.go
+++ b/benchmark/parse/parse_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package main
+package parse
import (
"reflect"
@@ -13,47 +13,47 @@ import (
func TestParseLine(t *testing.T) {
cases := []struct {
line string
- want *Bench
+ want *Benchmark
err bool // expect an error
}{
{
line: "BenchmarkEncrypt 100000000 19.6 ns/op",
- want: &Bench{
+ want: &Benchmark{
Name: "BenchmarkEncrypt",
- N: 100000000, NsOp: 19.6,
- Measured: NsOp,
+ N: 100000000, NsPerOp: 19.6,
+ Measured: NsPerOp,
},
},
{
line: "BenchmarkEncrypt 100000000 19.6 ns/op 817.77 MB/s",
- want: &Bench{
+ want: &Benchmark{
Name: "BenchmarkEncrypt",
- N: 100000000, NsOp: 19.6, MbS: 817.77,
- Measured: NsOp | MbS,
+ N: 100000000, NsPerOp: 19.6, MBPerS: 817.77,
+ Measured: NsPerOp | MBPerS,
},
},
{
line: "BenchmarkEncrypt 100000000 19.6 ns/op 817.77",
- want: &Bench{
+ want: &Benchmark{
Name: "BenchmarkEncrypt",
- N: 100000000, NsOp: 19.6,
- Measured: NsOp,
+ N: 100000000, NsPerOp: 19.6,
+ Measured: NsPerOp,
},
},
{
line: "BenchmarkEncrypt 100000000 19.6 ns/op 817.77 MB/s 5 allocs/op",
- want: &Bench{
+ want: &Benchmark{
Name: "BenchmarkEncrypt",
- N: 100000000, NsOp: 19.6, MbS: 817.77, AllocsOp: 5,
- Measured: NsOp | MbS | AllocsOp,
+ N: 100000000, NsPerOp: 19.6, MBPerS: 817.77, AllocsPerOp: 5,
+ Measured: NsPerOp | MBPerS | AllocsPerOp,
},
},
{
line: "BenchmarkEncrypt 100000000 19.6 ns/op 817.77 MB/s 3 B/op 5 allocs/op",
- want: &Bench{
+ want: &Benchmark{
Name: "BenchmarkEncrypt",
- N: 100000000, NsOp: 19.6, MbS: 817.77, BOp: 3, AllocsOp: 5,
- Measured: NsOp | MbS | BOp | AllocsOp,
+ N: 100000000, NsPerOp: 19.6, MBPerS: 817.77, AllocedBytesPerOp: 3, AllocsPerOp: 5,
+ Measured: NsPerOp | MBPerS | AllocedBytesPerOp | AllocsPerOp,
},
},
// error handling cases
@@ -67,7 +67,7 @@ func TestParseLine(t *testing.T) {
},
{
line: "BenchmarkBridge 100000000 19.6 smoots", // unknown unit
- want: &Bench{
+ want: &Benchmark{
Name: "BenchmarkBridge",
N: 100000000,
},
@@ -90,7 +90,7 @@ func TestParseLine(t *testing.T) {
}
}
-func TestParseBenchSet(t *testing.T) {
+func TestParseSet(t *testing.T) {
// Test two things:
// 1. The noise that can accompany testing.B output gets ignored.
// 2. Benchmarks with the same name have their order preserved.
@@ -111,82 +111,42 @@ func TestParseBenchSet(t *testing.T) {
ok net/http 95.783s
`
- want := BenchSet{
- "BenchmarkReadRequestApachebench": []*Bench{
+ want := Set{
+ "BenchmarkReadRequestApachebench": []*Benchmark{
{
Name: "BenchmarkReadRequestApachebench",
- N: 1000000, NsOp: 2960, MbS: 27.70, BOp: 839, AllocsOp: 9,
- Measured: NsOp | MbS | BOp | AllocsOp,
- ord: 2,
+ N: 1000000, NsPerOp: 2960, MBPerS: 27.70, AllocedBytesPerOp: 839, AllocsPerOp: 9,
+ Measured: NsPerOp | MBPerS | AllocedBytesPerOp | AllocsPerOp,
+ Ord: 2,
},
},
- "BenchmarkClientServerParallel64": []*Bench{
+ "BenchmarkClientServerParallel64": []*Benchmark{
{
Name: "BenchmarkClientServerParallel64",
- N: 50000, NsOp: 59192, BOp: 7028, AllocsOp: 60,
- Measured: NsOp | BOp | AllocsOp,
- ord: 3,
+ N: 50000, NsPerOp: 59192, AllocedBytesPerOp: 7028, AllocsPerOp: 60,
+ Measured: NsPerOp | AllocedBytesPerOp | AllocsPerOp,
+ Ord: 3,
},
},
- "BenchmarkEncrypt": []*Bench{
+ "BenchmarkEncrypt": []*Benchmark{
{
Name: "BenchmarkEncrypt",
- N: 100000000, NsOp: 19.6,
- Measured: NsOp,
- ord: 0,
+ N: 100000000, NsPerOp: 19.6,
+ Measured: NsPerOp,
+ Ord: 0,
},
{
Name: "BenchmarkEncrypt",
- N: 5000000, NsOp: 517,
- Measured: NsOp,
- ord: 1,
+ N: 5000000, NsPerOp: 517,
+ Measured: NsPerOp,
+ Ord: 1,
},
},
}
- have, err := ParseBenchSet(strings.NewReader(in))
+ have, err := ParseSet(strings.NewReader(in))
if err != nil {
- t.Fatalf("unexpected err during ParseBenchSet: %v", err)
- }
- if !reflect.DeepEqual(want, have) {
- t.Errorf("parsed bench set incorrectly, want %v have %v", want, have)
- }
-}
-
-func TestParseBenchSetBest(t *testing.T) {
- // Test that -best mode takes best ns/op.
- *best = true
- defer func() {
- *best = false
- }()
-
- in := `
- Benchmark1 10 100 ns/op
- Benchmark2 10 60 ns/op
- Benchmark2 10 500 ns/op
- Benchmark1 10 50 ns/op
- `
-
- want := BenchSet{
- "Benchmark1": []*Bench{
- {
- Name: "Benchmark1",
- N: 10, NsOp: 50, Measured: NsOp,
- ord: 0,
- },
- },
- "Benchmark2": []*Bench{
- {
- Name: "Benchmark2",
- N: 10, NsOp: 60, Measured: NsOp,
- ord: 1,
- },
- },
- }
-
- have, err := ParseBenchSet(strings.NewReader(in))
- if err != nil {
- t.Fatalf("unexpected err during ParseBenchSet: %v", err)
+ t.Fatalf("unexpected err during ParseSet: %v", err)
}
if !reflect.DeepEqual(want, have) {
t.Errorf("parsed bench set incorrectly, want %v have %v", want, have)
diff --git a/blog/atom/atom.go b/blog/atom/atom.go
index bc114dd..f12c31d 100644
--- a/blog/atom/atom.go
+++ b/blog/atom/atom.go
@@ -5,7 +5,7 @@
// Adapted from encoding/xml/read_test.go.
// Package atom defines XML data structures for an Atom feed.
-package atom
+package atom // import "golang.org/x/tools/blog/atom"
import (
"encoding/xml"
diff --git a/blog/blog.go b/blog/blog.go
index b53cf5b..23c8dc6 100644
--- a/blog/blog.go
+++ b/blog/blog.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package blog implements a web server for articles written in present format.
-package blog
+package blog // import "golang.org/x/tools/blog"
import (
"bytes"
diff --git a/cmd/benchcmp/benchcmp.go b/cmd/benchcmp/benchcmp.go
index 9bfe2e3..32f3a1c 100644
--- a/cmd/benchcmp/benchcmp.go
+++ b/cmd/benchcmp/benchcmp.go
@@ -11,6 +11,8 @@ import (
"sort"
"strconv"
"text/tabwriter"
+
+ "golang.org/x/tools/benchmark/parse"
)
var (
@@ -61,71 +63,71 @@ func main() {
var header bool // Has the header has been displayed yet for a given block?
if *magSort {
- sort.Sort(ByDeltaNsOp(cmps))
+ sort.Sort(ByDeltaNsPerOp(cmps))
} else {
sort.Sort(ByParseOrder(cmps))
}
for _, cmp := range cmps {
- if !cmp.Measured(NsOp) {
+ if !cmp.Measured(parse.NsPerOp) {
continue
}
- if delta := cmp.DeltaNsOp(); !*changedOnly || delta.Changed() {
+ if delta := cmp.DeltaNsPerOp(); !*changedOnly || delta.Changed() {
if !header {
fmt.Fprint(w, "benchmark\told ns/op\tnew ns/op\tdelta\n")
header = true
}
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", cmp.Name(), formatNs(cmp.Before.NsOp), formatNs(cmp.After.NsOp), delta.Percent())
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", cmp.Name(), formatNs(cmp.Before.NsPerOp), formatNs(cmp.After.NsPerOp), delta.Percent())
}
}
header = false
if *magSort {
- sort.Sort(ByDeltaMbS(cmps))
+ sort.Sort(ByDeltaMBPerS(cmps))
}
for _, cmp := range cmps {
- if !cmp.Measured(MbS) {
+ if !cmp.Measured(parse.MBPerS) {
continue
}
- if delta := cmp.DeltaMbS(); !*changedOnly || delta.Changed() {
+ if delta := cmp.DeltaMBPerS(); !*changedOnly || delta.Changed() {
if !header {
fmt.Fprint(w, "\nbenchmark\told MB/s\tnew MB/s\tspeedup\n")
header = true
}
- fmt.Fprintf(w, "%s\t%.2f\t%.2f\t%s\n", cmp.Name(), cmp.Before.MbS, cmp.After.MbS, delta.Multiple())
+ fmt.Fprintf(w, "%s\t%.2f\t%.2f\t%s\n", cmp.Name(), cmp.Before.MBPerS, cmp.After.MBPerS, delta.Multiple())
}
}
header = false
if *magSort {
- sort.Sort(ByDeltaAllocsOp(cmps))
+ sort.Sort(ByDeltaAllocsPerOp(cmps))
}
for _, cmp := range cmps {
- if !cmp.Measured(AllocsOp) {
+ if !cmp.Measured(parse.AllocsPerOp) {
continue
}
- if delta := cmp.DeltaAllocsOp(); !*changedOnly || delta.Changed() {
+ if delta := cmp.DeltaAllocsPerOp(); !*changedOnly || delta.Changed() {
if !header {
fmt.Fprint(w, "\nbenchmark\told allocs\tnew allocs\tdelta\n")
header = true
}
- fmt.Fprintf(w, "%s\t%d\t%d\t%s\n", cmp.Name(), cmp.Before.AllocsOp, cmp.After.AllocsOp, delta.Percent())
+ fmt.Fprintf(w, "%s\t%d\t%d\t%s\n", cmp.Name(), cmp.Before.AllocsPerOp, cmp.After.AllocsPerOp, delta.Percent())
}
}
header = false
if *magSort {
- sort.Sort(ByDeltaBOp(cmps))
+ sort.Sort(ByDeltaAllocedBytesPerOp(cmps))
}
for _, cmp := range cmps {
- if !cmp.Measured(BOp) {
+ if !cmp.Measured(parse.AllocedBytesPerOp) {
continue
}
- if delta := cmp.DeltaBOp(); !*changedOnly || delta.Changed() {
+ if delta := cmp.DeltaAllocedBytesPerOp(); !*changedOnly || delta.Changed() {
if !header {
fmt.Fprint(w, "\nbenchmark\told bytes\tnew bytes\tdelta\n")
header = true
}
- fmt.Fprintf(w, "%s\t%d\t%d\t%s\n", cmp.Name(), cmp.Before.BOp, cmp.After.BOp, cmp.DeltaBOp().Percent())
+ fmt.Fprintf(w, "%s\t%d\t%d\t%s\n", cmp.Name(), cmp.Before.AllocedBytesPerOp, cmp.After.AllocedBytesPerOp, cmp.DeltaAllocedBytesPerOp().Percent())
}
}
}
@@ -135,18 +137,39 @@ func fatal(msg interface{}) {
os.Exit(1)
}
-func parseFile(path string) BenchSet {
+func parseFile(path string) parse.Set {
f, err := os.Open(path)
if err != nil {
fatal(err)
}
- bb, err := ParseBenchSet(f)
+ defer f.Close()
+ bb, err := parse.ParseSet(f)
if err != nil {
fatal(err)
}
+ if *best {
+ selectBest(bb)
+ }
return bb
}
+func selectBest(bs parse.Set) {
+ for name, bb := range bs {
+ if len(bb) < 2 {
+ continue
+ }
+ ord := bb[0].Ord
+ best := bb[0]
+ for _, b := range bb {
+ if b.NsPerOp < best.NsPerOp {
+ b.Ord = ord
+ best = b
+ }
+ }
+ bs[name] = []*parse.Benchmark{best}
+ }
+}
+
// formatNs formats ns measurements to expose a useful amount of
// precision. It mirrors the ns precision logic of testing.B.
func formatNs(ns float64) string {
diff --git a/cmd/benchcmp/benchcmp_test.go b/cmd/benchcmp/benchcmp_test.go
new file mode 100644
index 0000000..2226079
--- /dev/null
+++ b/cmd/benchcmp/benchcmp_test.go
@@ -0,0 +1,59 @@
+package main
+
+import (
+ "reflect"
+ "testing"
+
+ "golang.org/x/tools/benchmark/parse"
+)
+
+func TestSelectBest(t *testing.T) {
+ have := parse.Set{
+ "Benchmark1": []*parse.Benchmark{
+ {
+ Name: "Benchmark1",
+ N: 10, NsPerOp: 100, Measured: parse.NsPerOp,
+ Ord: 0,
+ },
+ {
+ Name: "Benchmark1",
+ N: 10, NsPerOp: 50, Measured: parse.NsPerOp,
+ Ord: 3,
+ },
+ },
+ "Benchmark2": []*parse.Benchmark{
+ {
+ Name: "Benchmark2",
+ N: 10, NsPerOp: 60, Measured: parse.NsPerOp,
+ Ord: 1,
+ },
+ {
+ Name: "Benchmark2",
+ N: 10, NsPerOp: 500, Measured: parse.NsPerOp,
+ Ord: 2,
+ },
+ },
+ }
+
+ want := parse.Set{
+ "Benchmark1": []*parse.Benchmark{
+ {
+ Name: "Benchmark1",
+ N: 10, NsPerOp: 50, Measured: parse.NsPerOp,
+ Ord: 0,
+ },
+ },
+ "Benchmark2": []*parse.Benchmark{
+ {
+ Name: "Benchmark2",
+ N: 10, NsPerOp: 60, Measured: parse.NsPerOp,
+ Ord: 1,
+ },
+ },
+ }
+
+ selectBest(have)
+ if !reflect.DeepEqual(want, have) {
+ t.Errorf("filtered bench set incorrectly, want %v have %v", want, have)
+ }
+}
diff --git a/cmd/benchcmp/compare.go b/cmd/benchcmp/compare.go
index 9ebe426..c3f5e89 100644
--- a/cmd/benchcmp/compare.go
+++ b/cmd/benchcmp/compare.go
@@ -7,16 +7,18 @@ package main
import (
"fmt"
"math"
+
+ "golang.org/x/tools/benchmark/parse"
)
// BenchCmp is a pair of benchmarks.
type BenchCmp struct {
- Before *Bench
- After *Bench
+ Before *parse.Benchmark
+ After *parse.Benchmark
}
// Correlate correlates benchmarks from two BenchSets.
-func Correlate(before, after BenchSet) (cmps []BenchCmp, warnings []string) {
+func Correlate(before, after parse.Set) (cmps []BenchCmp, warnings []string) {
cmps = make([]BenchCmp, 0, len(after))
for name, beforebb := range before {
afterbb := after[name]
@@ -34,12 +36,14 @@ func Correlate(before, after BenchSet) (cmps []BenchCmp, warnings []string) {
func (c BenchCmp) Name() string { return c.Before.Name }
func (c BenchCmp) String() string { return fmt.Sprintf("<%s, %s>", c.Before, c.After) }
-func (c BenchCmp) Measured(flag int) bool { return c.Before.Measured&c.After.Measured&flag != 0 }
-func (c BenchCmp) DeltaNsOp() Delta { return Delta{c.Before.NsOp, c.After.NsOp} }
-func (c BenchCmp) DeltaMbS() Delta { return Delta{c.Before.MbS, c.After.MbS} }
-func (c BenchCmp) DeltaBOp() Delta { return Delta{float64(c.Before.BOp), float64(c.After.BOp)} }
-func (c BenchCmp) DeltaAllocsOp() Delta {
- return Delta{float64(c.Before.AllocsOp), float64(c.After.AllocsOp)}
+func (c BenchCmp) Measured(flag int) bool { return (c.Before.Measured & c.After.Measured & flag) != 0 }
+func (c BenchCmp) DeltaNsPerOp() Delta { return Delta{c.Before.NsPerOp, c.After.NsPerOp} }
+func (c BenchCmp) DeltaMBPerS() Delta { return Delta{c.Before.MBPerS, c.After.MBPerS} }
+func (c BenchCmp) DeltaAllocedBytesPerOp() Delta {
+ return Delta{float64(c.Before.AllocedBytesPerOp), float64(c.After.AllocedBytesPerOp)}
+}
+func (c BenchCmp) DeltaAllocsPerOp() Delta {
+ return Delta{float64(c.Before.AllocsPerOp), float64(c.After.AllocsPerOp)}
}
// Delta is the before and after value for a benchmark measurement.
@@ -102,7 +106,7 @@ type ByParseOrder []BenchCmp
func (x ByParseOrder) Len() int { return len(x) }
func (x ByParseOrder) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x ByParseOrder) Less(i, j int) bool { return x[i].Before.ord < x[j].Before.ord }
+func (x ByParseOrder) Less(i, j int) bool { return x[i].Before.Ord < x[j].Before.Ord }
// lessByDelta provides lexicographic ordering:
// * largest delta by magnitude
@@ -115,34 +119,38 @@ func lessByDelta(i, j BenchCmp, calcDelta func(BenchCmp) Delta) bool {
return i.Name() < j.Name()
}
-// ByDeltaNsOp sorts BenchCmps lexicographically by change
+// ByDeltaNsPerOp sorts BenchCmps lexicographically by change
// in ns/op, descending, then by benchmark name.
-type ByDeltaNsOp []BenchCmp
+type ByDeltaNsPerOp []BenchCmp
-func (x ByDeltaNsOp) Len() int { return len(x) }
-func (x ByDeltaNsOp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x ByDeltaNsOp) Less(i, j int) bool { return lessByDelta(x[i], x[j], BenchCmp.DeltaNsOp) }
+func (x ByDeltaNsPerOp) Len() int { return len(x) }
+func (x ByDeltaNsPerOp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ByDeltaNsPerOp) Less(i, j int) bool { return lessByDelta(x[i], x[j], BenchCmp.DeltaNsPerOp) }
-// ByDeltaMbS sorts BenchCmps lexicographically by change
+// ByDeltaMBPerS sorts BenchCmps lexicographically by change
// in MB/s, descending, then by benchmark name.
-type ByDeltaMbS []BenchCmp
+type ByDeltaMBPerS []BenchCmp
-func (x ByDeltaMbS) Len() int { return len(x) }
-func (x ByDeltaMbS) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x ByDeltaMbS) Less(i, j int) bool { return lessByDelta(x[i], x[j], BenchCmp.DeltaMbS) }
+func (x ByDeltaMBPerS) Len() int { return len(x) }
+func (x ByDeltaMBPerS) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ByDeltaMBPerS) Less(i, j int) bool { return lessByDelta(x[i], x[j], BenchCmp.DeltaMBPerS) }
-// ByDeltaBOp sorts BenchCmps lexicographically by change
+// ByDeltaAllocedBytesPerOp sorts BenchCmps lexicographically by change
// in B/op, descending, then by benchmark name.
-type ByDeltaBOp []BenchCmp
+type ByDeltaAllocedBytesPerOp []BenchCmp
-func (x ByDeltaBOp) Len() int { return len(x) }
-func (x ByDeltaBOp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x ByDeltaBOp) Less(i, j int) bool { return lessByDelta(x[i], x[j], BenchCmp.DeltaBOp) }
+func (x ByDeltaAllocedBytesPerOp) Len() int { return len(x) }
+func (x ByDeltaAllocedBytesPerOp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ByDeltaAllocedBytesPerOp) Less(i, j int) bool {
+ return lessByDelta(x[i], x[j], BenchCmp.DeltaAllocedBytesPerOp)
+}
-// ByDeltaAllocsOp sorts BenchCmps lexicographically by change
+// ByDeltaAllocsPerOp sorts BenchCmps lexicographically by change
// in allocs/op, descending, then by benchmark name.
-type ByDeltaAllocsOp []BenchCmp
+type ByDeltaAllocsPerOp []BenchCmp
-func (x ByDeltaAllocsOp) Len() int { return len(x) }
-func (x ByDeltaAllocsOp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x ByDeltaAllocsOp) Less(i, j int) bool { return lessByDelta(x[i], x[j], BenchCmp.DeltaAllocsOp) }
+func (x ByDeltaAllocsPerOp) Len() int { return len(x) }
+func (x ByDeltaAllocsPerOp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ByDeltaAllocsPerOp) Less(i, j int) bool {
+ return lessByDelta(x[i], x[j], BenchCmp.DeltaAllocsPerOp)
+}
diff --git a/cmd/benchcmp/compare_test.go b/cmd/benchcmp/compare_test.go
index 5baca66..3403796 100644
--- a/cmd/benchcmp/compare_test.go
+++ b/cmd/benchcmp/compare_test.go
@@ -9,6 +9,8 @@ import (
"reflect"
"sort"
"testing"
+
+ "golang.org/x/tools/benchmark/parse"
)
func TestDelta(t *testing.T) {
@@ -52,29 +54,29 @@ func TestCorrelate(t *testing.T) {
// Benches that are going to be successfully correlated get N thus:
// 0x<counter><num benches><b = before | a = after>
// Read this: "<counter> of <num benches>, from <before|after>".
- before := BenchSet{
- "BenchmarkOneEach": []*Bench{{Name: "BenchmarkOneEach", N: 0x11b}},
- "BenchmarkOneToNone": []*Bench{{Name: "BenchmarkOneToNone"}},
- "BenchmarkOneToTwo": []*Bench{{Name: "BenchmarkOneToTwo"}},
- "BenchmarkTwoToOne": []*Bench{
+ before := parse.Set{
+ "BenchmarkOneEach": []*parse.Benchmark{{Name: "BenchmarkOneEach", N: 0x11b}},
+ "BenchmarkOneToNone": []*parse.Benchmark{{Name: "BenchmarkOneToNone"}},
+ "BenchmarkOneToTwo": []*parse.Benchmark{{Name: "BenchmarkOneToTwo"}},
+ "BenchmarkTwoToOne": []*parse.Benchmark{
{Name: "BenchmarkTwoToOne"},
{Name: "BenchmarkTwoToOne"},
},
- "BenchmarkTwoEach": []*Bench{
+ "BenchmarkTwoEach": []*parse.Benchmark{
{Name: "BenchmarkTwoEach", N: 0x12b},
{Name: "BenchmarkTwoEach", N: 0x22b},
},
}
- after := BenchSet{
- "BenchmarkOneEach": []*Bench{{Name: "BenchmarkOneEach", N: 0x11a}},
- "BenchmarkNoneToOne": []*Bench{{Name: "BenchmarkNoneToOne"}},
- "BenchmarkTwoToOne": []*Bench{{Name: "BenchmarkTwoToOne"}},
- "BenchmarkOneToTwo": []*Bench{
+ after := parse.Set{
+ "BenchmarkOneEach": []*parse.Benchmark{{Name: "BenchmarkOneEach", N: 0x11a}},
+ "BenchmarkNoneToOne": []*parse.Benchmark{{Name: "BenchmarkNoneToOne"}},
+ "BenchmarkTwoToOne": []*parse.Benchmark{{Name: "BenchmarkTwoToOne"}},
+ "BenchmarkOneToTwo": []*parse.Benchmark{
{Name: "BenchmarkOneToTwo"},
{Name: "BenchmarkOneToTwo"},
},
- "BenchmarkTwoEach": []*Bench{
+ "BenchmarkTwoEach": []*parse.Benchmark{
{Name: "BenchmarkTwoEach", N: 0x12a},
{Name: "BenchmarkTwoEach", N: 0x22a},
},
@@ -108,14 +110,14 @@ func TestCorrelate(t *testing.T) {
func TestBenchCmpSorting(t *testing.T) {
c := []BenchCmp{
- {&Bench{Name: "BenchmarkMuchFaster", NsOp: 10, ord: 3}, &Bench{Name: "BenchmarkMuchFaster", NsOp: 1}},
- {&Bench{Name: "BenchmarkSameB", NsOp: 5, ord: 1}, &Bench{Name: "BenchmarkSameB", NsOp: 5}},
- {&Bench{Name: "BenchmarkSameA", NsOp: 5, ord: 2}, &Bench{Name: "BenchmarkSameA", NsOp: 5}},
- {&Bench{Name: "BenchmarkSlower", NsOp: 10, ord: 0}, &Bench{Name: "BenchmarkSlower", NsOp: 11}},
+ {&parse.Benchmark{Name: "BenchmarkMuchFaster", NsPerOp: 10, Ord: 3}, &parse.Benchmark{Name: "BenchmarkMuchFaster", NsPerOp: 1}},
+ {&parse.Benchmark{Name: "BenchmarkSameB", NsPerOp: 5, Ord: 1}, &parse.Benchmark{Name: "BenchmarkSameB", NsPerOp: 5}},
+ {&parse.Benchmark{Name: "BenchmarkSameA", NsPerOp: 5, Ord: 2}, &parse.Benchmark{Name: "BenchmarkSameA", NsPerOp: 5}},
+ {&parse.Benchmark{Name: "BenchmarkSlower", NsPerOp: 10, Ord: 0}, &parse.Benchmark{Name: "BenchmarkSlower", NsPerOp: 11}},
}
// Test just one magnitude-based sort order; they are symmetric.
- sort.Sort(ByDeltaNsOp(c))
+ sort.Sort(ByDeltaNsPerOp(c))
want := []string{"BenchmarkMuchFaster", "BenchmarkSlower", "BenchmarkSameA", "BenchmarkSameB"}
have := []string{c[0].Name(), c[1].Name(), c[2].Name(), c[3].Name()}
if !reflect.DeepEqual(want, have) {
diff --git a/cmd/benchcmp/doc.go b/cmd/benchcmp/doc.go
index b0714b9..f5c7a36 100644
--- a/cmd/benchcmp/doc.go
+++ b/cmd/benchcmp/doc.go
@@ -34,4 +34,4 @@ in a format like this:
BenchmarkConcat 80 48 -40.00%
*/
-package main
+package main // import "golang.org/x/tools/cmd/benchcmp"
diff --git a/cmd/benchcmp/parse.go b/cmd/benchcmp/parse.go
deleted file mode 100644
index f1df681..0000000
--- a/cmd/benchcmp/parse.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-)
-
-// Flags used by Bench.Measured to indicate
-// which measurements a Bench contains.
-const (
- NsOp = 1 << iota
- MbS
- BOp
- AllocsOp
-)
-
-// Bench is one run of a single benchmark.
-type Bench struct {
- Name string // benchmark name
- N int // number of iterations
- NsOp float64 // nanoseconds per iteration
- MbS float64 // MB processed per second
- BOp uint64 // bytes allocated per iteration
- AllocsOp uint64 // allocs per iteration
- Measured int // which measurements were recorded
- ord int // ordinal position within a benchmark run, used for sorting
-}
-
-// ParseLine extracts a Bench from a single line of testing.B output.
-func ParseLine(line string) (*Bench, error) {
- fields := strings.Fields(line)
-
- // Two required, positional fields: Name and iterations.
- if len(fields) < 2 {
- return nil, fmt.Errorf("two fields required, have %d", len(fields))
- }
- if !strings.HasPrefix(fields[0], "Benchmark") {
- return nil, fmt.Errorf(`first field does not start with "Benchmark`)
- }
- n, err := strconv.Atoi(fields[1])
- if err != nil {
- return nil, err
- }
- b := &Bench{Name: fields[0], N: n}
-
- // Parse any remaining pairs of fields; we've parsed one pair already.
- for i := 1; i < len(fields)/2; i++ {
- b.parseMeasurement(fields[i*2], fields[i*2+1])
- }
- return b, nil
-}
-
-func (b *Bench) parseMeasurement(quant string, unit string) {
- switch unit {
- case "ns/op":
- if f, err := strconv.ParseFloat(quant, 64); err == nil {
- b.NsOp = f
- b.Measured |= NsOp
- }
- case "MB/s":
- if f, err := strconv.ParseFloat(quant, 64); err == nil {
- b.MbS = f
- b.Measured |= MbS
- }
- case "B/op":
- if i, err := strconv.ParseUint(quant, 10, 64); err == nil {
- b.BOp = i
- b.Measured |= BOp
- }
- case "allocs/op":
- if i, err := strconv.ParseUint(quant, 10, 64); err == nil {
- b.AllocsOp = i
- b.Measured |= AllocsOp
- }
- }
-}
-
-func (b *Bench) String() string {
- buf := new(bytes.Buffer)
- fmt.Fprintf(buf, "%s %d", b.Name, b.N)
- if b.Measured&NsOp != 0 {
- fmt.Fprintf(buf, " %.2f ns/op", b.NsOp)
- }
- if b.Measured&MbS != 0 {
- fmt.Fprintf(buf, " %.2f MB/s", b.MbS)
- }
- if b.Measured&BOp != 0 {
- fmt.Fprintf(buf, " %d B/op", b.BOp)
- }
- if b.Measured&AllocsOp != 0 {
- fmt.Fprintf(buf, " %d allocs/op", b.AllocsOp)
- }
- return buf.String()
-}
-
-// BenchSet is a collection of benchmarks from one
-// testing.B run, keyed by name to facilitate comparison.
-type BenchSet map[string][]*Bench
-
-// Parse extracts a BenchSet from testing.B output. Parse
-// preserves the order of benchmarks that have identical names.
-func ParseBenchSet(r io.Reader) (BenchSet, error) {
- bb := make(BenchSet)
- scan := bufio.NewScanner(r)
- ord := 0
- for scan.Scan() {
- if b, err := ParseLine(scan.Text()); err == nil {
- b.ord = ord
- ord++
- old := bb[b.Name]
- if *best && old != nil {
- if old[0].NsOp < b.NsOp {
- continue
- }
- b.ord = old[0].ord
- bb[b.Name] = old[:0]
- }
- bb[b.Name] = append(bb[b.Name], b)
- }
- }
-
- if err := scan.Err(); err != nil {
- return nil, err
- }
-
- return bb, nil
-}
diff --git a/cmd/callgraph/main.go b/cmd/callgraph/main.go
index 57ad2dd..0411395 100644
--- a/cmd/callgraph/main.go
+++ b/cmd/callgraph/main.go
@@ -4,7 +4,7 @@
// callgraph: a tool for reporting the call graph of a Go program.
// See Usage for details, or run with -help.
-package main
+package main // import "golang.org/x/tools/cmd/callgraph"
// TODO(adonovan):
//
@@ -31,7 +31,9 @@ import (
"text/template"
"golang.org/x/tools/go/callgraph"
+ "golang.org/x/tools/go/callgraph/cha"
"golang.org/x/tools/go/callgraph/rta"
+ "golang.org/x/tools/go/callgraph/static"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
@@ -51,21 +53,30 @@ const Usage = `callgraph: display the the call graph of a Go program.
Usage:
- callgraph [-algo=rta|pta] [-test] [-format=...] <args>...
+ callgraph [-algo=static|cha|rta|pta] [-test] [-format=...] <args>...
Flags:
--algo Specifies the call-graph construction algorithm. One of:
- "rta": Rapid Type Analysis (simple and fast)
- "pta": inclusion-based Points-To Analysis (slower but more precise)
+-algo Specifies the call-graph construction algorithm, one of:
+
+ static static calls only (unsound)
+ cha Class Hierarchy Analysis
+ rta Rapid Type Analysis
+ pta inclusion-based Points-To Analysis
+
+ The algorithms are ordered by increasing precision in their
+ treatment of dynamic calls (and thus also computational cost).
+ RTA and PTA require a whole program (main or test), and
+ include only functions reachable from main.
-test Include the package's tests in the analysis.
-format Specifies the format in which each call graph edge is displayed.
One of:
- "digraph": output suitable for input to
+
+ digraph output suitable for input to
golang.org/x/tools/cmd/digraph.
- "graphviz": output in AT&T GraphViz (.dot) format.
+ graphviz output in AT&T GraphViz (.dot) format.
All other values are interpreted using text/template syntax.
The default value is:
@@ -134,7 +145,7 @@ func init() {
func main() {
flag.Parse()
if err := doCallgraph(&build.Default, *algoFlag, *formatFlag, *testFlag, flag.Args()); err != nil {
- fmt.Fprintf(os.Stderr, "callgraph: %s.\n", err)
+ fmt.Fprintf(os.Stderr, "callgraph: %s\n", err)
os.Exit(1)
}
}
@@ -142,10 +153,7 @@ func main() {
var stdout io.Writer = os.Stdout
func doCallgraph(ctxt *build.Context, algo, format string, tests bool, args []string) error {
- conf := loader.Config{
- Build: ctxt,
- SourceImports: true,
- }
+ conf := loader.Config{Build: ctxt}
if len(args) == 0 {
fmt.Fprintln(os.Stderr, Usage)
@@ -168,44 +176,22 @@ func doCallgraph(ctxt *build.Context, algo, format string, tests bool, args []st
prog := ssa.Create(iprog, 0)
prog.BuildAll()
- // Determine the main package.
- // TODO(adonovan): allow independent control over tests, mains
- // and libraries.
- // TODO(adonovan): put this logic in a library; we keep reinventing it.
- var main *ssa.Package
- pkgs := prog.AllPackages()
- if tests {
- // If -test, use all packages' tests.
- if len(pkgs) > 0 {
- main = prog.CreateTestMainPackage(pkgs...)
- }
- if main == nil {
- return fmt.Errorf("no tests")
- }
- } else {
- // Otherwise, use main.main.
- for _, pkg := range pkgs {
- if pkg.Object.Name() == "main" {
- main = pkg
- if main.Func("main") == nil {
- return fmt.Errorf("no func main() in main package")
- }
- break
- }
- }
- if main == nil {
- return fmt.Errorf("no main package")
- }
- }
-
- // Invariant: main package has a main() function.
-
// -- call graph construction ------------------------------------------
var cg *callgraph.Graph
switch algo {
+ case "static":
+ cg = static.CallGraph(prog)
+
+ case "cha":
+ cg = cha.CallGraph(prog)
+
case "pta":
+ main, err := mainPackage(prog, tests)
+ if err != nil {
+ return err
+ }
config := &pointer.Config{
Mains: []*ssa.Package{main},
BuildCallGraph: true,
@@ -217,6 +203,10 @@ func doCallgraph(ctxt *build.Context, algo, format string, tests bool, args []st
cg = ptares.CallGraph
case "rta":
+ main, err := mainPackage(prog, tests)
+ if err != nil {
+ return err
+ }
roots := []*ssa.Function{
main.Func("init"),
main.Func("main"),
@@ -244,7 +234,7 @@ func doCallgraph(ctxt *build.Context, algo, format string, tests bool, args []st
case "graphviz":
before = "digraph callgraph {\n"
after = "}\n"
- format = ` {{printf "%q" .Caller}} -> {{printf "%q" .Callee}}"`
+ format = ` {{printf "%q" .Caller}} -> {{printf "%q" .Callee}}`
}
tmpl, err := template.New("-format").Parse(format)
@@ -279,6 +269,37 @@ func doCallgraph(ctxt *build.Context, algo, format string, tests bool, args []st
return nil
}
+// mainPackage returns the main package to analyze.
+// The resulting package has a main() function.
+func mainPackage(prog *ssa.Program, tests bool) (*ssa.Package, error) {
+ pkgs := prog.AllPackages()
+
+ // TODO(adonovan): allow independent control over tests, mains and libraries.
+ // TODO(adonovan): put this logic in a library; we keep reinventing it.
+
+ if tests {
+ // If -test, use all packages' tests.
+ if len(pkgs) > 0 {
+ if main := prog.CreateTestMainPackage(pkgs...); main != nil {
+ return main, nil
+ }
+ }
+ return nil, fmt.Errorf("no tests")
+ }
+
+ // Otherwise, use the first package named main.
+ for _, pkg := range pkgs {
+ if pkg.Object.Name() == "main" {
+ if pkg.Func("main") == nil {
+ return nil, fmt.Errorf("no func main() in main package")
+ }
+ return pkg, nil
+ }
+ }
+
+ return nil, fmt.Errorf("no main package")
+}
+
type Edge struct {
Caller *ssa.Function
Callee *ssa.Function
diff --git a/cmd/callgraph/main_test.go b/cmd/callgraph/main_test.go
index 93ccb61..81fa490 100644
--- a/cmd/callgraph/main_test.go
+++ b/cmd/callgraph/main_test.go
@@ -40,13 +40,13 @@ func TestCallgraph(t *testing.T) {
// tests: main is not called.
{"rta", format, true, []string{
`pkg.Example --> (pkg.C).f`,
- `testmain.init --> pkg.init`,
+ `test$main.init --> pkg.init`,
}},
{"pta", format, true, []string{
`<root> --> pkg.Example`,
- `<root> --> testmain.init`,
+ `<root> --> test$main.init`,
`pkg.Example --> (pkg.C).f`,
- `testmain.init --> pkg.init`,
+ `test$main.init --> pkg.init`,
}},
} {
stdout = new(bytes.Buffer)
diff --git a/cmd/cover/doc.go b/cmd/cover/doc.go
index 636d7e0..c90f460 100644
--- a/cmd/cover/doc.go
+++ b/cmd/cover/doc.go
@@ -18,4 +18,4 @@ For usage information, please see:
go help testflag
go tool cover -help
*/
-package main
+package main // import "golang.org/x/tools/cmd/cover"
diff --git a/cmd/digraph/digraph.go b/cmd/digraph/digraph.go
index 76d39ec..3ad2950 100644
--- a/cmd/digraph/digraph.go
+++ b/cmd/digraph/digraph.go
@@ -12,7 +12,7 @@
// a comment syntax, etc.
// - allow queries to nest, like Blaze query language.
//
-package main
+package main // import "golang.org/x/tools/cmd/digraph"
import (
"bufio"
@@ -98,7 +98,7 @@ func main() {
}
if err := digraph(args[0], args[1:]); err != nil {
- fmt.Fprintf(os.Stderr, "Error: %s\n", err)
+ fmt.Fprintf(os.Stderr, "digraph: %s\n", err)
os.Exit(1)
}
}
@@ -265,7 +265,7 @@ func digraph(cmd string, args []string) error {
switch cmd {
case "nodes":
if len(args) != 0 {
- return fmt.Errorf("usage: nodes")
+ return fmt.Errorf("usage: digraph nodes")
}
nodes := make(nodeset)
for label := range g {
@@ -275,7 +275,7 @@ func digraph(cmd string, args []string) error {
case "degree":
if len(args) != 0 {
- return fmt.Errorf("usage: degree")
+ return fmt.Errorf("usage: digraph degree")
}
nodes := make(nodeset)
for label := range g {
@@ -288,7 +288,7 @@ func digraph(cmd string, args []string) error {
case "succs", "preds":
if len(args) == 0 {
- return fmt.Errorf("usage: %s <label> ...", cmd)
+ return fmt.Errorf("usage: digraph %s <label> ...", cmd)
}
g := g
if cmd == "preds" {
@@ -306,7 +306,7 @@ func digraph(cmd string, args []string) error {
case "forward", "reverse":
if len(args) == 0 {
- return fmt.Errorf("usage: %s <label> ...", cmd)
+ return fmt.Errorf("usage: digraph %s <label> ...", cmd)
}
roots := make(nodeset)
for _, root := range args {
@@ -323,7 +323,7 @@ func digraph(cmd string, args []string) error {
case "somepath":
if len(args) != 2 {
- return fmt.Errorf("usage: somepath <from> <to>")
+ return fmt.Errorf("usage: digraph somepath <from> <to>")
}
from, to := args[0], args[1]
if g[from] == nil {
@@ -356,7 +356,7 @@ func digraph(cmd string, args []string) error {
case "allpaths":
if len(args) != 2 {
- return fmt.Errorf("usage: allpaths <from> <to>")
+ return fmt.Errorf("usage: digraph allpaths <from> <to>")
}
from, to := args[0], args[1]
if g[from] == nil {
@@ -395,7 +395,7 @@ func digraph(cmd string, args []string) error {
case "sccs":
if len(args) != 0 {
- return fmt.Errorf("usage: sccs")
+ return fmt.Errorf("usage: digraph sccs")
}
for _, scc := range g.sccs() {
scc.sort().println(" ")
@@ -403,7 +403,7 @@ func digraph(cmd string, args []string) error {
case "scc":
if len(args) != 1 {
- return fmt.Errorf("usage: scc <label>")
+ return fmt.Errorf("usage: digraph scc <label>")
}
label := args[0]
if g[label] == nil {
diff --git a/cmd/eg/eg.go b/cmd/eg/eg.go
index 8d2342a..0ad4331 100644
--- a/cmd/eg/eg.go
+++ b/cmd/eg/eg.go
@@ -1,7 +1,7 @@
// The eg command performs example-based refactoring.
// For documentation, run the command, or see Help in
-// code.google.com/p/go.tools/refactor/eg.
-package main
+// golang.org/x/tools/refactor/eg.
+package main // import "golang.org/x/tools/cmd/eg"
import (
"flag"
@@ -11,7 +11,6 @@ import (
"go/token"
"os"
"os/exec"
- "path/filepath"
"strings"
"golang.org/x/tools/go/loader"
@@ -37,7 +36,7 @@ Usage: eg -t template.go [-w] [-transitive] <args>...
func main() {
if err := doMain(); err != nil {
- fmt.Fprintf(os.Stderr, "%s: %s.\n", filepath.Base(os.Args[0]), err)
+ fmt.Fprintf(os.Stderr, "eg: %s\n", err)
os.Exit(1)
}
}
@@ -56,15 +55,12 @@ func doMain() error {
}
conf := loader.Config{
- Fset: token.NewFileSet(),
- ParserMode: parser.ParseComments,
- SourceImports: true,
+ Fset: token.NewFileSet(),
+ ParserMode: parser.ParseComments,
}
// The first Created package is the template.
- if err := conf.CreateFromFilenames("template", *templateFlag); err != nil {
- return err // e.g. "foo.go:1: syntax error"
- }
+ conf.CreateFromFilenames("template", *templateFlag)
if len(args) == 0 {
fmt.Fprint(os.Stderr, usage)
@@ -128,7 +124,7 @@ func doMain() error {
}
}
if err := eg.WriteAST(iprog.Fset, filename, file); err != nil {
- fmt.Fprintf(os.Stderr, "Error: %s\n", err)
+ fmt.Fprintf(os.Stderr, "eg: %s\n", err)
hadErrors = true
}
} else {
diff --git a/cmd/godex/doc.go b/cmd/godex/doc.go
index 90268e2..ceb7c2f 100644
--- a/cmd/godex/doc.go
+++ b/cmd/godex/doc.go
@@ -63,7 +63,7 @@
//
// If no -s argument is provided, godex will try to find a matching source.
//
-package main
+package main // import "golang.org/x/tools/cmd/godex"
// BUG(gri): support for -s=source is not yet implemented
// BUG(gri): gccgo-importing appears to have occasional problems stalling godex; try -s=gc as work-around
diff --git a/cmd/godoc/appinit.go b/cmd/godoc/appinit.go
index d91c84f..ad95b21 100644
--- a/cmd/godoc/appinit.go
+++ b/cmd/godoc/appinit.go
@@ -13,6 +13,7 @@ import (
"archive/zip"
"log"
"path"
+ "regexp"
"golang.org/x/tools/godoc"
"golang.org/x/tools/godoc/static"
@@ -56,6 +57,7 @@ func init() {
pres.ShowPlayground = true
pres.ShowExamples = true
pres.DeclLinks = true
+ pres.NotesRx = regexp.MustCompile("BUG")
readTemplates(pres, true)
registerHandlers(pres)
diff --git a/cmd/godoc/doc.go b/cmd/godoc/doc.go
index 88ff201..17cf23e 100644
--- a/cmd/godoc/doc.go
+++ b/cmd/godoc/doc.go
@@ -137,8 +137,10 @@ one may run godoc as follows:
Godoc documentation is converted to HTML or to text using the go/doc package;
see http://golang.org/pkg/go/doc/#ToHTML for the exact rules.
+Godoc also shows example code that is runnable by the testing package;
+see http://golang.org/pkg/testing/#hdr-Examples for the conventions.
See "Godoc: documenting Go code" for how to write good comments for godoc:
http://golang.org/doc/articles/godoc_documenting_go_code.html
*/
-package main
+package main // import "golang.org/x/tools/cmd/godoc"
diff --git a/cmd/godoc/godoc_test.go b/cmd/godoc/godoc_test.go
index 946c1ec..feaad2f 100644
--- a/cmd/godoc/godoc_test.go
+++ b/cmd/godoc/godoc_test.go
@@ -74,6 +74,11 @@ func buildGodoc(t *testing.T) (bin string, cleanup func()) {
if err != nil {
t.Fatal(err)
}
+ defer func() {
+ if cleanup == nil { // probably, go build failed.
+ os.RemoveAll(tmp)
+ }
+ }()
bin = filepath.Join(tmp, "godoc")
if runtime.GOOS == "windows" {
diff --git a/cmd/godoc/main.go b/cmd/godoc/main.go
index 318726d..03d29d0 100644
--- a/cmd/godoc/main.go
+++ b/cmd/godoc/main.go
@@ -95,9 +95,8 @@ var (
declLinks = flag.Bool("links", true, "link identifiers to their declarations")
// search index
- indexEnabled = flag.Bool("index", false, "enable search index")
- indexFiles = flag.String("index_files", "", "glob pattern specifying index files;"+
- "if not empty, the index is read from these files in sorted order")
+ indexEnabled = flag.Bool("index", false, "enable search index")
+ indexFiles = flag.String("index_files", "", "glob pattern specifying index files; if not empty, the index is read from these files in sorted order")
maxResults = flag.Int("maxresults", 10000, "maximum number of full text search results shown")
indexThrottle = flag.Float64("index_throttle", 0.75, "index throttle value; 0.0 = no time allocated, 1.0 = full throttle")
diff --git a/cmd/godoc/setup-godoc-app.bash b/cmd/godoc/setup-godoc-app.bash
index 284df8b..9d82cd7 100755
--- a/cmd/godoc/setup-godoc-app.bash
+++ b/cmd/godoc/setup-godoc-app.bash
@@ -33,8 +33,8 @@ getArgs() {
if [ -z $APPENGINE_SDK ]; then
error "APPENGINE_SDK environment variable not set"
fi
- if [ ! -x $APPENGINE_SDK/go ]; then
- error "couldn't find go comment in $APPENGINE_SDK"
+ if [ ! -x $APPENGINE_SDK/goapp ]; then
+ error "couldn't find goapp command in $APPENGINE_SDK"
fi
if [ -z $GOROOT ]; then
GOROOT=$(go env GOROOT)
@@ -47,7 +47,7 @@ getArgs() {
APPDIR=$1
echo "APPDIR not set, using argument instead"
fi
-
+
# safety checks
if [ ! -d $GOROOT ]; then
error "$GOROOT is not a directory"
@@ -64,7 +64,7 @@ getArgs() {
fetchGodoc() {
echo "*** Fetching godoc (if not already in GOPATH)"
unset GOBIN
- go=$APPENGINE_SDK/go
+ go=$APPENGINE_SDK/goapp
$go get -d -tags appengine $GODOC
mkdir -p $APPDIR/$GODOC
cp $(find $($go list -f '{{.Dir}}' $GODOC) -type f -depth 1) $APPDIR/$GODOC/
@@ -76,7 +76,7 @@ makeAppYaml() {
application: godoc
version: 1
runtime: go
-api_version: go1
+api_version: go1.4beta
handlers:
- url: /.*
@@ -105,7 +105,7 @@ makeConfigfile() {
package main
// GENERATED FILE - DO NOT MODIFY BY HAND.
-// (generated by $GOROOT/src/cmd/godoc/setup-godoc-app.bash)
+// (generated by golang.org/x/tools/cmd/godoc/setup-godoc-app.bash)
const (
// .zip filename
diff --git a/cmd/godoc/x.go b/cmd/godoc/x.go
index ed7c8d8..f638b68 100644
--- a/cmd/godoc/x.go
+++ b/cmd/godoc/x.go
@@ -17,19 +17,30 @@ import (
const xPrefix = "/x/"
-var xMap = map[string]string{
- "benchmarks": "https://code.google.com/p/go.benchmarks",
- "blog": "https://code.google.com/p/go.blog",
- "codereview": "https://code.google.com/p/go.codereview",
- "crypto": "https://code.google.com/p/go.crypto",
- "exp": "https://code.google.com/p/go.exp",
- "image": "https://code.google.com/p/go.image",
- "mobile": "https://code.google.com/p/go.mobile",
- "net": "https://code.google.com/p/go.net",
- "sys": "https://code.google.com/p/go.sys",
- "talks": "https://code.google.com/p/go.talks",
- "text": "https://code.google.com/p/go.text",
- "tools": "https://code.google.com/p/go.tools",
+type xRepo struct {
+ URL, VCS string
+}
+
+var xMap = map[string]xRepo{
+ "codereview": {"https://code.google.com/p/go.codereview", "hg"},
+
+ "benchmarks": {"https://go.googlesource.com/benchmarks", "git"},
+ "blog": {"https://go.googlesource.com/blog", "git"},
+ "build": {"https://go.googlesource.com/build", "git"},
+ "crypto": {"https://go.googlesource.com/crypto", "git"},
+ "debug": {"https://go.googlesource.com/debug", "git"},
+ "exp": {"https://go.googlesource.com/exp", "git"},
+ "image": {"https://go.googlesource.com/image", "git"},
+ "mobile": {"https://go.googlesource.com/mobile", "git"},
+ "net": {"https://go.googlesource.com/net", "git"},
+ "oauth2": {"https://go.googlesource.com/oauth2", "git"},
+ "playground": {"https://go.googlesource.com/playground", "git"},
+ "review": {"https://go.googlesource.com/review", "git"},
+ "sys": {"https://go.googlesource.com/sys", "git"},
+ "talks": {"https://go.googlesource.com/talks", "git"},
+ "text": {"https://go.googlesource.com/text", "git"},
+ "tools": {"https://go.googlesource.com/tools", "git"},
+ "tour": {"https://go.googlesource.com/tour", "git"},
}
func init() {
@@ -47,7 +58,8 @@ func xHandler(w http.ResponseWriter, r *http.Request) {
return
}
data := struct {
- Prefix, Head, Tail, Repo string
+ Prefix, Head, Tail string
+ Repo xRepo
}{xPrefix, head, tail, repo}
if err := xTemplate.Execute(w, data); err != nil {
log.Println("xHandler:", err)
@@ -58,7 +70,8 @@ var xTemplate = template.Must(template.New("x").Parse(`<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
-<meta name="go-import" content="golang.org{{.Prefix}}{{.Head}} hg {{.Repo}}">
+<meta name="go-import" content="golang.org{{.Prefix}}{{.Head}} {{.Repo.VCS}} {{.Repo.URL}}">
+<meta name="go-source" content="golang.org{{.Prefix}}{{.Head}} https://github.com/golang/{{.Head}}/ https://github.com/golang/{{.Head}}/tree/master{/dir} https://github.com/golang/{{.Head}}/blob/master{/dir}/{file}#L{line}">
<meta http-equiv="refresh" content="0; url=https://godoc.org/golang.org{{.Prefix}}{{.Head}}{{.Tail}}">
</head>
<body>
diff --git a/cmd/goimports/doc.go b/cmd/goimports/doc.go
index a64ad93..46b2b07 100644
--- a/cmd/goimports/doc.go
+++ b/cmd/goimports/doc.go
@@ -18,8 +18,8 @@ Then in your .emacs file:
(add-hook 'before-save-hook 'gofmt-before-save)
For vim, set "gofmt_command" to "goimports":
- https://code.google.com/p/go/source/detail?r=39c724dd7f252
- https://code.google.com/p/go/source/browse#hg%2Fmisc%2Fvim
+ https://golang.org/change/39c724dd7f252
+ https://golang.org/wiki/IDEsAndTextEditorPlugins
etc
For GoSublime, follow the steps described here:
@@ -30,4 +30,4 @@ For other editors, you probably know what to do.
Happy hacking!
*/
-package main
+package main // import "golang.org/x/tools/cmd/goimports"
diff --git a/cmd/gomvpkg/main.go b/cmd/gomvpkg/main.go
new file mode 100644
index 0000000..86b8067
--- /dev/null
+++ b/cmd/gomvpkg/main.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// licence that can be found in the LICENSE file.
+
+// The gomvpkg command moves go packages, updating import declarations.
+// See the -help message or Usage constant for details.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/build"
+ "os"
+
+ "golang.org/x/tools/refactor/rename"
+)
+
+var (
+ fromFlag = flag.String("from", "", "Import path of package to be moved")
+ toFlag = flag.String("to", "", "Destination import path for package")
+ vcsMvCmdFlag = flag.String("vcs_mv_cmd", "", `A template for the version control system's "move directory" command, e.g. "git mv {{.Src}} {{.Dst}}`)
+ helpFlag = flag.Bool("help", false, "show usage message")
+)
+
+const Usage = `gomvpkg: moves a package, updating import declarations
+
+Usage:
+
+ gomvpkg -from <path> -to <path> [-vcs_mv_cmd <template>]
+
+Flags:
+
+-from specifies the import path of the package to be moved
+
+-to specifies the destination import path
+
+-vcs_mv_cmd specifies a shell command to inform the version control system of a
+ directory move. The argument is a template using the syntax of the
+ text/template package. It has two fields: Src and Dst, the absolute
+ paths of the directories.
+
+ For example: "git mv {{.Src}} {{.Dst}}"
+
+gomvpkg determines the set of packages that might be affected, including all
+packages importing the 'from' package and any of its subpackages. It will move
+the 'from' package and all its subpackages to the destination path and update all
+imports of those packages to point to its new import path.
+
+gomvpkg rejects moves in which a package already exists at the destination import
+path, or in which a directory already exists at the location the package would be
+moved to.
+
+gomvpkg will not always be able to rename imports when a package's name is changed.
+Import statements may want further cleanup.
+
+gomvpkg's behavior is not defined if any of the packages to be moved are
+imported using dot imports.
+
+Examples:
+
+% gomvpkg -from myproject/foo -to myproject/bar
+
+ Move the package with import path "myproject/foo" to the new path
+ "myproject/bar".
+
+% gomvpkg -from myproject/foo -to myproject/bar -vcs_mv_cmd "git mv {{.Src}} {{.Dst}}"
+
+ Move the package with import path "myproject/foo" to the new path
+ "myproject/bar" using "git mv" to execute the directory move.
+`
+
+func main() {
+ flag.Parse()
+
+ if len(flag.Args()) > 0 {
+ fmt.Fprintln(os.Stderr, "gomvpkg: surplus arguments.")
+ os.Exit(1)
+ }
+
+ if *helpFlag || *fromFlag == "" || *toFlag == "" {
+ fmt.Println(Usage)
+ return
+ }
+
+ if err := rename.Move(&build.Default, *fromFlag, *toFlag, *vcsMvCmdFlag); err != nil {
+ fmt.Fprintf(os.Stderr, "gomvpkg: %s.\n", err)
+ os.Exit(1)
+ }
+}
diff --git a/cmd/gorename/main.go b/cmd/gorename/main.go
index 20e271e..ea08f88 100644
--- a/cmd/gorename/main.go
+++ b/cmd/gorename/main.go
@@ -1,7 +1,11 @@
// The gorename command performs precise type-safe renaming of
-// identifiers in Go source code. See the -help message or Usage
-// constant for details.
-package main
+// identifiers in Go source code.
+//
+// Run with -help for usage information, or view the Usage constant in
+// package golang.org/x/tools/refactor/rename, which contains most of
+// the implementation.
+//
+package main // import "golang.org/x/tools/cmd/gorename"
import (
"flag"
@@ -36,102 +40,21 @@ func init() {
}
}
-const Usage = `gorename: precise type-safe renaming of identifiers in Go source code.
-
-Usage:
-
- gorename (-from <spec> | -offset <file>:#<byte-offset>) -to <name> [-force]
-
-You must specify the object (named entity) to rename using the -offset
-or -from flag. Exactly one must be specified.
-
-Flags:
-
--offset specifies the filename and byte offset of an identifier to rename.
- This form is intended for use by text editors.
-
--from specifies the object to rename using a query notation;
- This form is intended for interactive use at the command line.
-` + rename.FromFlagUsage + `
-
--to the new name.
-
--force causes the renaming to proceed even if conflicts were reported.
- The resulting program may be ill-formed, or experience a change
- in behaviour.
-
- WARNING: this flag may even cause the renaming tool to crash.
- (In due course this bug will be fixed by moving certain
- analyses into the type-checker.)
-
--dryrun causes the tool to report conflicts but not update any files.
-
--v enables verbose logging.
-
-gorename automatically computes the set of packages that might be
-affected. For a local renaming, this is just the package specified by
--from or -offset, but for a potentially exported name, gorename scans
-the workspace ($GOROOT and $GOPATH).
-
-gorename rejects any renaming that would create a conflict at the point
-of declaration, or a reference conflict (ambiguity or shadowing), or
-anything else that could cause the resulting program not to compile.
-Currently, it also rejects any method renaming that would change the
-assignability relation between types and interfaces.
-
-
-Examples:
-
-% gorename -offset file.go:#123 -to foo
-
- Rename the object whose identifier is at byte offset 123 within file file.go.
-
-% gorename -from '"bytes".Buffer.Len' -to Size
-
- Rename the "Len" method of the *bytes.Buffer type to "Size".
-
----- TODO ----
-
-Correctness:
-- handle dot imports correctly
-- document limitations (reflection, 'implements' guesswork).
-- sketch a proof of exhaustiveness.
-
-Features:
-- support running on packages specified as *.go files on the command line
-- support running on programs containing errors (loader.Config.AllowErrors)
-- allow users to specify a scope other than "global" (to avoid being
- stuck by neglected packages in $GOPATH that don't build).
-- support renaming the package clause (no object)
-- support renaming an import path (no ident or object)
- (requires filesystem + SCM updates).
-- detect and reject edits to autogenerated files (cgo, protobufs)
- and optionally $GOROOT packages.
-- report all conflicts, or at least all qualitatively distinct ones.
- Sometimes we stop to avoid redundancy, but
- it may give a disproportionate sense of safety in -force mode.
-- support renaming all instances of a pattern, e.g.
- all receiver vars of a given type,
- all local variables of a given type,
- all PkgNames for a given package.
-- emit JSON output for other editors and tools.
-`
-
func main() {
flag.Parse()
if len(flag.Args()) > 0 {
- fmt.Fprintf(os.Stderr, "Error: surplus arguments.\n")
+ fmt.Fprintln(os.Stderr, "gorename: surplus arguments.")
os.Exit(1)
}
if *helpFlag || (*offsetFlag == "" && *fromFlag == "" && *toFlag == "") {
- fmt.Println(Usage)
+ fmt.Println(rename.Usage)
return
}
if err := rename.Main(&build.Default, *offsetFlag, *fromFlag, *toFlag); err != nil {
if err != rename.ConflictError {
- fmt.Fprintf(os.Stderr, "Error: %s.\n", err)
+ fmt.Fprintf(os.Stderr, "gorename: %s\n", err)
}
os.Exit(1)
}
diff --git a/cmd/gotype/doc.go b/cmd/gotype/doc.go
index e689378..ea0b2b1 100644
--- a/cmd/gotype/doc.go
+++ b/cmd/gotype/doc.go
@@ -59,4 +59,4 @@ To verify the output of a pipe:
echo "package foo" | gotype
*/
-package main
+package main // import "golang.org/x/tools/cmd/gotype"
diff --git a/cmd/html2article/conv.go b/cmd/html2article/conv.go
index ae14a25..d3267d7 100644
--- a/cmd/html2article/conv.go
+++ b/cmd/html2article/conv.go
@@ -4,7 +4,7 @@
// This program takes an HTML file and outputs a corresponding article file in
// present format. See: golang.org/x/tools/present
-package main
+package main // import "golang.org/x/tools/cmd/html2article"
import (
"bufio"
@@ -223,7 +223,7 @@ func text(n *html.Node) string {
}
// Use original url for Google Docs redirections.
if u, err := url.Parse(href); err != nil {
- log.Println("parsing url %q: %v", href, err)
+ log.Printf("parsing url %q: %v", href, err)
} else if u.Host == "www.google.com" && u.Path == "/url" {
href = u.Query().Get("q")
}
diff --git a/cmd/oracle/main.go b/cmd/oracle/main.go
index cdf5329..96efcb6 100644
--- a/cmd/oracle/main.go
+++ b/cmd/oracle/main.go
@@ -8,7 +8,7 @@
//
// Run with -help flag or help subcommand for usage information.
//
-package main
+package main // import "golang.org/x/tools/cmd/oracle"
import (
"bufio"
@@ -36,7 +36,6 @@ var ptalogFlag = flag.String("ptalog", "",
var formatFlag = flag.String("format", "plain", "Output format. One of {plain,json,xml}.")
-// TODO(adonovan): flip this flag after PTA presolver is implemented.
var reflectFlag = flag.Bool("reflect", false, "Analyze reflection soundly (slow).")
const useHelp = "Run 'oracle -help' for more information.\n"
@@ -60,9 +59,10 @@ The mode argument determines the query to perform:
callstack show path from callgraph root to selected function
describe describe selected syntax: definition, methods, etc
freevars show free variables of selection
- implements show 'implements' relation for selected package
+ implements show 'implements' relation for selected type or method
peers show send/receive corresponding to selected channel op
referrers show all refs to entity denoted by selected identifier
+ what show basic information about the selected syntax node
The user manual is available here: http://golang.org/s/oracle-user-manual
@@ -111,7 +111,7 @@ func main() {
args := flag.Args()
if len(args) == 0 || args[0] == "" {
- fmt.Fprint(os.Stderr, "Error: a mode argument is required.\n"+useHelp)
+ fmt.Fprint(os.Stderr, "oracle: a mode argument is required.\n"+useHelp)
os.Exit(2)
}
@@ -123,7 +123,7 @@ func main() {
}
if len(args) == 0 && mode != "what" {
- fmt.Fprint(os.Stderr, "Error: no package arguments.\n"+useHelp)
+ fmt.Fprint(os.Stderr, "oracle: no package arguments.\n"+useHelp)
os.Exit(2)
}
@@ -136,8 +136,12 @@ func main() {
buf := bufio.NewWriter(f)
ptalog = buf
defer func() {
- buf.Flush()
- f.Close()
+ if err := buf.Flush(); err != nil {
+ log.Printf("flush: %s", err)
+ }
+ if err := f.Close(); err != nil {
+ log.Printf("close: %s", err)
+ }
}()
}
}
@@ -157,14 +161,14 @@ func main() {
case "json", "plain", "xml":
// ok
default:
- fmt.Fprintf(os.Stderr, "Error: illegal -format value: %q.\n"+useHelp, *formatFlag)
+ fmt.Fprintf(os.Stderr, "oracle: illegal -format value: %q.\n"+useHelp, *formatFlag)
os.Exit(2)
}
// Ask the oracle.
res, err := oracle.Query(args, mode, *posFlag, ptalog, &build.Default, *reflectFlag)
if err != nil {
- fmt.Fprintf(os.Stderr, "Error: %s.\n", err)
+ fmt.Fprintf(os.Stderr, "oracle: %s.\n", err)
os.Exit(1)
}
@@ -173,7 +177,7 @@ func main() {
case "json":
b, err := json.MarshalIndent(res.Serial(), "", "\t")
if err != nil {
- fmt.Fprintf(os.Stderr, "JSON error: %s.\n", err)
+ fmt.Fprintf(os.Stderr, "oracle: JSON error: %s.\n", err)
os.Exit(1)
}
os.Stdout.Write(b)
@@ -181,7 +185,7 @@ func main() {
case "xml":
b, err := xml.MarshalIndent(res.Serial(), "", "\t")
if err != nil {
- fmt.Fprintf(os.Stderr, "XML error: %s.\n", err)
+ fmt.Fprintf(os.Stderr, "oracle: XML error: %s.\n", err)
os.Exit(1)
}
os.Stdout.Write(b)
diff --git a/cmd/oracle/oracle.el b/cmd/oracle/oracle.el
index 35cd892..f90a5fd 100644
--- a/cmd/oracle/oracle.el
+++ b/cmd/oracle/oracle.el
@@ -213,6 +213,12 @@ identifier."
(interactive)
(go-oracle--run "referrers"))
+(defun go-oracle-whicherrs ()
+ "Show globals, constants and types to which the selected
+expression (of type 'error') may refer."
+ (interactive)
+ (go-oracle--run "whicherrs"))
+
;; TODO(dominikh): better docstring
(define-minor-mode go-oracle-mode "Oracle minor mode for go-mode
diff --git a/cmd/present/dir.go b/cmd/present/dir.go
index 26392dc..6845a21 100644
--- a/cmd/present/dir.go
+++ b/cmd/present/dir.go
@@ -139,8 +139,8 @@ func dirList(w io.Writer, name string) (isDir bool, err error) {
}
d := &dirListData{Path: name}
for _, fi := range fis {
- // skip the pkg directory
- if name == "." && fi.Name() == "pkg" {
+ // skip the golang.org directory
+ if name == "." && fi.Name() == "golang.org" {
continue
}
e := dirEntry{
diff --git a/cmd/present/doc.go b/cmd/present/doc.go
index 1b81c31..fafcefe 100644
--- a/cmd/present/doc.go
+++ b/cmd/present/doc.go
@@ -18,7 +18,7 @@ Usage of present:
-play=true: enable playground (permit execution of arbitrary user code)
The setup of the Go version of NaCl is documented at:
-https://code.google.com/p/go-wiki/wiki/NativeClient
+https://golang.org/wiki/NativeClient
Input files are named foo.extension, where "extension" defines the format of
the generated output. The supported formats are:
@@ -28,4 +28,4 @@ the generated output. The supported formats are:
The present file format is documented by the present package:
http://godoc.org/golang.org/x/tools/present
*/
-package main
+package main // import "golang.org/x/tools/cmd/present"
diff --git a/cmd/present/static/print.css b/cmd/present/static/print.css
deleted file mode 100644
index 6c58257..0000000
--- a/cmd/present/static/print.css
+++ /dev/null
@@ -1,51 +0,0 @@
-/* set page layout */
-@page {
- size: A4 landscape;
-}
-
-body {
- display: block !important;
-}
-
-.slides {
- left: 0;
- top: 0;
-}
-
-.slides > article {
- position: relative;
-
- left: 0;
- top: 0;
-
- margin: 0 !important;
- page-break-inside: avoid;
-
- text-shadow: none; /* disable shadow */
-
- display: block !important;
- transform: translate(0) !important;
- -o-transform: translate(0) !important;
- -moz-transform: translate(0) !important;
- -webkit-transform: translate3d(0, 0, 0) !important;
-}
-
-div.code {
- background: rgb(240, 240, 240);
-}
-
-/* hide click areas */
-.slide-area, #prev-slide-area, #next-slide-area {
- display: none;
-}
-
-/* add explicit links */
-a:link:after, a:visited:after {
- content: " (" attr(href) ") ";
- font-size: 50%;
-}
-
-/* white background */
-body {
- background: rgb(255,255,255) !important;
-}
diff --git a/cmd/present/static/slides.js b/cmd/present/static/slides.js
index ee54c94..3697b4e 100644
--- a/cmd/present/static/slides.js
+++ b/cmd/present/static/slides.js
@@ -134,6 +134,10 @@ if (objCtr.defineProperty) {
/* Slide movement */
+function hideHelpText() {
+ $('#help').hide();
+};
+
function getSlideEl(no) {
if ((no < 0) || (no >= slideEls.length)) {
return null;
@@ -201,6 +205,7 @@ function updateSlides() {
};
function prevSlide() {
+ hideHelpText();
if (curSlide > 0) {
curSlide--;
@@ -209,6 +214,7 @@ function prevSlide() {
};
function nextSlide() {
+ hideHelpText();
if (curSlide < slideEls.length - 1) {
curSlide++;
@@ -392,6 +398,11 @@ function handleBodyKeyDown(event) {
var inCode = event.target.classList.contains("code");
switch (event.keyCode) {
+ case 72: // 'H' hides the help text
+ case 27: // escape key
+ if (!inCode) hideHelpText();
+ break;
+
case 39: // right arrow
case 13: // Enter
case 32: // space
@@ -457,13 +468,7 @@ function addGeneralStyle() {
document.querySelector('head').appendChild(el);
};
-function addPrintStyle() {
- var el = document.createElement('link');
- el.rel = 'stylesheet';
- el.type = 'text/css';
- el.media = "print";
- el.href = PERMANENT_URL_PREFIX + 'print.css';
- document.body.appendChild(el);
+function showHelpText() {
};
function handleDomLoaded() {
@@ -473,13 +478,16 @@ function handleDomLoaded() {
addFontStyle();
addGeneralStyle();
- addPrintStyle();
addEventListeners();
updateSlides();
setupInteraction();
+ if (window.location.hostname == "localhost" || window.location.hostname == "127.0.0.1" || window.location.hostname == "::1") {
+ hideHelpText();
+ }
+
document.body.classList.add('loaded');
};
diff --git a/cmd/present/static/styles.css b/cmd/present/static/styles.css
index b3d829a..5cb2953 100644
--- a/cmd/present/static/styles.css
+++ b/cmd/present/static/styles.css
@@ -1,210 +1,254 @@
-/* Framework */
+@media screen {
+ /* Framework */
+ html {
+ height: 100%;
+ }
-html {
- height: 100%;
-}
-
-body {
- margin: 0;
- padding: 0;
-
- display: block !important;
-
- height: 100%;
- min-height: 740px;
-
- overflow-x: hidden;
- overflow-y: auto;
-
- background: rgb(215, 215, 215);
- background: -o-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
- background: -moz-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
- background: -webkit-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
- background: -webkit-gradient(radial, 50% 50%, 0, 50% 50%, 500, from(rgb(240, 240, 240)), to(rgb(190, 190, 190)));
+ body {
+ margin: 0;
+ padding: 0;
- -webkit-font-smoothing: antialiased;
-}
-
-.slides {
- width: 100%;
- height: 100%;
- left: 0;
- top: 0;
+ display: block !important;
- position: absolute;
+ height: 100%;
+ min-height: 740px;
- -webkit-transform: translate3d(0, 0, 0);
-}
-
-.slides > article {
- display: block;
-
- position: absolute;
- overflow: hidden;
-
- width: 900px;
- height: 700px;
-
- left: 50%;
- top: 50%;
-
- margin-left: -450px;
- margin-top: -350px;
-
- padding: 40px 60px;
-
- box-sizing: border-box;
- -o-box-sizing: border-box;
- -moz-box-sizing: border-box;
- -webkit-box-sizing: border-box;
-
- border-radius: 10px;
- -o-border-radius: 10px;
- -moz-border-radius: 10px;
- -webkit-border-radius: 10px;
-
- background-color: white;
-
- border: 1px solid rgba(0, 0, 0, .3);
-
- transition: transform .3s ease-out;
- -o-transition: -o-transform .3s ease-out;
- -moz-transition: -moz-transform .3s ease-out;
- -webkit-transition: -webkit-transform .3s ease-out;
-}
-.slides.layout-widescreen > article {
- margin-left: -550px;
- width: 1100px;
-}
-.slides.layout-faux-widescreen > article {
- margin-left: -550px;
- width: 1100px;
-
- padding: 40px 160px;
-}
-
-.slides.layout-widescreen > article:not(.nobackground):not(.biglogo),
-.slides.layout-faux-widescreen > article:not(.nobackground):not(.biglogo) {
- background-position-x: 0, 840px;
-}
-
-/* Clickable/tappable areas */
-
-.slide-area {
- z-index: 1000;
-
- position: absolute;
- left: 0;
- top: 0;
- width: 150px;
- height: 700px;
-
- left: 50%;
- top: 50%;
-
- cursor: pointer;
- margin-top: -350px;
-
- tap-highlight-color: transparent;
- -o-tap-highlight-color: transparent;
- -moz-tap-highlight-color: transparent;
- -webkit-tap-highlight-color: transparent;
-}
-#prev-slide-area {
- margin-left: -550px;
-}
-#next-slide-area {
- margin-left: 400px;
-}
-.slides.layout-widescreen #prev-slide-area,
-.slides.layout-faux-widescreen #prev-slide-area {
- margin-left: -650px;
-}
-.slides.layout-widescreen #next-slide-area,
-.slides.layout-faux-widescreen #next-slide-area {
- margin-left: 500px;
-}
-
-/* Slides */
-
-.slides > article {
- display: none;
-}
-.slides > article.far-past {
- display: block;
- transform: translate(-2040px);
- -o-transform: translate(-2040px);
- -moz-transform: translate(-2040px);
- -webkit-transform: translate3d(-2040px, 0, 0);
-}
-.slides > article.past {
- display: block;
- transform: translate(-1020px);
- -o-transform: translate(-1020px);
- -moz-transform: translate(-1020px);
- -webkit-transform: translate3d(-1020px, 0, 0);
-}
-.slides > article.current {
- display: block;
- transform: translate(0);
- -o-transform: translate(0);
- -moz-transform: translate(0);
- -webkit-transform: translate3d(0, 0, 0);
-}
-.slides > article.next {
- display: block;
- transform: translate(1020px);
- -o-transform: translate(1020px);
- -moz-transform: translate(1020px);
- -webkit-transform: translate3d(1020px, 0, 0);
-}
-.slides > article.far-next {
- display: block;
- transform: translate(2040px);
- -o-transform: translate(2040px);
- -moz-transform: translate(2040px);
- -webkit-transform: translate3d(2040px, 0, 0);
-}
-
-.slides.layout-widescreen > article.far-past,
-.slides.layout-faux-widescreen > article.far-past {
- display: block;
- transform: translate(-2260px);
- -o-transform: translate(-2260px);
- -moz-transform: translate(-2260px);
- -webkit-transform: translate3d(-2260px, 0, 0);
-}
-.slides.layout-widescreen > article.past,
-.slides.layout-faux-widescreen > article.past {
- display: block;
- transform: translate(-1130px);
- -o-transform: translate(-1130px);
- -moz-transform: translate(-1130px);
- -webkit-transform: translate3d(-1130px, 0, 0);
-}
-.slides.layout-widescreen > article.current,
-.slides.layout-faux-widescreen > article.current {
- display: block;
- transform: translate(0);
- -o-transform: translate(0);
- -moz-transform: translate(0);
- -webkit-transform: translate3d(0, 0, 0);
-}
-.slides.layout-widescreen > article.next,
-.slides.layout-faux-widescreen > article.next {
- display: block;
- transform: translate(1130px);
- -o-transform: translate(1130px);
- -moz-transform: translate(1130px);
- -webkit-transform: translate3d(1130px, 0, 0);
-}
-.slides.layout-widescreen > article.far-next,
-.slides.layout-faux-widescreen > article.far-next {
- display: block;
- transform: translate(2260px);
- -o-transform: translate(2260px);
- -moz-transform: translate(2260px);
- -webkit-transform: translate3d(2260px, 0, 0);
+ overflow-x: hidden;
+ overflow-y: auto;
+
+ background: rgb(215, 215, 215);
+ background: -o-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
+ background: -moz-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
+ background: -webkit-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
+ background: -webkit-gradient(radial, 50% 50%, 0, 50% 50%, 500, from(rgb(240, 240, 240)), to(rgb(190, 190, 190)));
+
+ -webkit-font-smoothing: antialiased;
+ }
+
+ .slides {
+ width: 100%;
+ height: 100%;
+ left: 0;
+ top: 0;
+
+ position: absolute;
+
+ -webkit-transform: translate3d(0, 0, 0);
+ }
+
+ .slides > article {
+ display: block;
+
+ position: absolute;
+ overflow: hidden;
+
+ width: 900px;
+ height: 700px;
+
+ left: 50%;
+ top: 50%;
+
+ margin-left: -450px;
+ margin-top: -350px;
+
+ padding: 40px 60px;
+
+ box-sizing: border-box;
+ -o-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ -webkit-box-sizing: border-box;
+
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+
+ background-color: white;
+
+ border: 1px solid rgba(0, 0, 0, .3);
+
+ transition: transform .3s ease-out;
+ -o-transition: -o-transform .3s ease-out;
+ -moz-transition: -moz-transform .3s ease-out;
+ -webkit-transition: -webkit-transform .3s ease-out;
+ }
+ .slides.layout-widescreen > article {
+ margin-left: -550px;
+ width: 1100px;
+ }
+ .slides.layout-faux-widescreen > article {
+ margin-left: -550px;
+ width: 1100px;
+
+ padding: 40px 160px;
+ }
+
+ .slides.layout-widescreen > article:not(.nobackground):not(.biglogo),
+ .slides.layout-faux-widescreen > article:not(.nobackground):not(.biglogo) {
+ background-position-x: 0, 840px;
+ }
+
+ /* Clickable/tappable areas */
+
+ .slide-area {
+ z-index: 1000;
+
+ position: absolute;
+ left: 0;
+ top: 0;
+ width: 150px;
+ height: 700px;
+
+ left: 50%;
+ top: 50%;
+
+ cursor: pointer;
+ margin-top: -350px;
+
+ tap-highlight-color: transparent;
+ -o-tap-highlight-color: transparent;
+ -moz-tap-highlight-color: transparent;
+ -webkit-tap-highlight-color: transparent;
+ }
+ #prev-slide-area {
+ margin-left: -550px;
+ }
+ #next-slide-area {
+ margin-left: 400px;
+ }
+ .slides.layout-widescreen #prev-slide-area,
+ .slides.layout-faux-widescreen #prev-slide-area {
+ margin-left: -650px;
+ }
+ .slides.layout-widescreen #next-slide-area,
+ .slides.layout-faux-widescreen #next-slide-area {
+ margin-left: 500px;
+ }
+
+ /* Slides */
+
+ .slides > article {
+ display: none;
+ }
+ .slides > article.far-past {
+ display: block;
+ transform: translate(-2040px);
+ -o-transform: translate(-2040px);
+ -moz-transform: translate(-2040px);
+ -webkit-transform: translate3d(-2040px, 0, 0);
+ }
+ .slides > article.past {
+ display: block;
+ transform: translate(-1020px);
+ -o-transform: translate(-1020px);
+ -moz-transform: translate(-1020px);
+ -webkit-transform: translate3d(-1020px, 0, 0);
+ }
+ .slides > article.current {
+ display: block;
+ transform: translate(0);
+ -o-transform: translate(0);
+ -moz-transform: translate(0);
+ -webkit-transform: translate3d(0, 0, 0);
+ }
+ .slides > article.next {
+ display: block;
+ transform: translate(1020px);
+ -o-transform: translate(1020px);
+ -moz-transform: translate(1020px);
+ -webkit-transform: translate3d(1020px, 0, 0);
+ }
+ .slides > article.far-next {
+ display: block;
+ transform: translate(2040px);
+ -o-transform: translate(2040px);
+ -moz-transform: translate(2040px);
+ -webkit-transform: translate3d(2040px, 0, 0);
+ }
+
+ .slides.layout-widescreen > article.far-past,
+ .slides.layout-faux-widescreen > article.far-past {
+ display: block;
+ transform: translate(-2260px);
+ -o-transform: translate(-2260px);
+ -moz-transform: translate(-2260px);
+ -webkit-transform: translate3d(-2260px, 0, 0);
+ }
+ .slides.layout-widescreen > article.past,
+ .slides.layout-faux-widescreen > article.past {
+ display: block;
+ transform: translate(-1130px);
+ -o-transform: translate(-1130px);
+ -moz-transform: translate(-1130px);
+ -webkit-transform: translate3d(-1130px, 0, 0);
+ }
+ .slides.layout-widescreen > article.current,
+ .slides.layout-faux-widescreen > article.current {
+ display: block;
+ transform: translate(0);
+ -o-transform: translate(0);
+ -moz-transform: translate(0);
+ -webkit-transform: translate3d(0, 0, 0);
+ }
+ .slides.layout-widescreen > article.next,
+ .slides.layout-faux-widescreen > article.next {
+ display: block;
+ transform: translate(1130px);
+ -o-transform: translate(1130px);
+ -moz-transform: translate(1130px);
+ -webkit-transform: translate3d(1130px, 0, 0);
+ }
+ .slides.layout-widescreen > article.far-next,
+ .slides.layout-faux-widescreen > article.far-next {
+ display: block;
+ transform: translate(2260px);
+ -o-transform: translate(2260px);
+ -moz-transform: translate(2260px);
+ -webkit-transform: translate3d(2260px, 0, 0);
+ }
+}
+
+@media print {
+ /* Set page layout */
+ @page {
+ size: A4 landscape;
+ }
+
+ body {
+ display: block !important;
+ }
+
+ .slides > article {
+ display: block;
+
+ position: relative;
+
+ page-break-inside: never;
+ page-break-after: always;
+
+ overflow: hidden;
+ }
+
+ h2 {
+ position: static !important;
+ margin-top: 400px !important;
+ margin-bottom: 100px !important;
+ }
+
+ div.code {
+ background: rgb(240, 240, 240);
+ }
+
+ /* Add explicit links */
+ a:link:after, a:visited:after {
+ content: " (" attr(href) ") ";
+ font-size: 50%;
+ }
+
+ #help {
+ display: none;
+ visibility: hidden;
+ }
}
/* Styles for slides */
@@ -336,8 +380,8 @@ code {
}
article > .image {
- text-align: center;
- margin-top: 40px;
+ text-align: center;
+ margin-top: 40px;
}
table {
@@ -417,13 +461,13 @@ div.output .buttons {
/* Presenter details */
.presenter {
- margin-top: 20px;
+ margin-top: 20px;
}
.presenter p,
.presenter .link {
- margin: 0;
- font-size: 28px;
- line-height: 1.2em;
+ margin: 0;
+ font-size: 28px;
+ line-height: 1.2em;
}
/* Output resize details */
@@ -458,4 +502,22 @@ figcaption {
color: #666;
text-align: center;
font-size: 0.75em;
-} \ No newline at end of file
+}
+
+#help {
+ font-family: 'Open Sans', Arial, sans-serif;
+ text-align: center;
+ color: white;
+ background: #000;
+ opacity: 0.5;
+ position: fixed;
+ bottom: 25px;
+ left: 50px;
+ right: 50px;
+ padding: 20px;
+
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
diff --git a/cmd/present/templates/dir.tmpl b/cmd/present/templates/dir.tmpl
index aa83868..c5dbcaa 100644
--- a/cmd/present/templates/dir.tmpl
+++ b/cmd/present/templates/dir.tmpl
@@ -13,7 +13,7 @@
<form method="GET" action="http://golang.org/search">
<div id="menu">
<a href="http://golang.org/doc/">Documents</a>
-<a href="http://golang.org/ref/">References</a>
+<a href="http://golang.org/ref">References</a>
<a href="http://golang.org/pkg/">Packages</a>
<a href="http://golang.org/project/">The Project</a>
<a href="http://golang.org/help/">Help</a>
diff --git a/cmd/present/templates/slides.tmpl b/cmd/present/templates/slides.tmpl
index d2abfa1..11070d2 100644
--- a/cmd/present/templates/slides.tmpl
+++ b/cmd/present/templates/slides.tmpl
@@ -38,7 +38,7 @@
{{end}}{{/* of Slide block */}}
<article>
- <h3>Thank you</h1>
+ <h3>Thank you</h3>
{{range .Authors}}
<div class="presenter">
{{range .Elem}}{{elem $.Template .}}{{end}}
@@ -46,6 +46,14 @@
{{end}}
</article>
+ </section>
+
+ <div id="help">
+ Use the left and right arrow keys or click the left and right
+ edges of the page to navigate between slides.<br>
+ (Press 'H' or navigate to hide this message.)
+ </div>
+
</body>
{{if .PlayEnabled}}
<script src='/play.js'></script>
diff --git a/cmd/ssadump/main.go b/cmd/ssadump/main.go
index f063659..2f847ab 100644
--- a/cmd/ssadump/main.go
+++ b/cmd/ssadump/main.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// ssadump: a tool for displaying and interpreting the SSA form of Go programs.
-package main
+package main // import "golang.org/x/tools/cmd/ssadump"
import (
"flag"
@@ -19,28 +19,23 @@ import (
"golang.org/x/tools/go/types"
)
-var buildFlag = flag.String("build", "", `Options controlling the SSA builder.
-The value is a sequence of zero or more of these letters:
-C perform sanity [C]hecking of the SSA form.
-D include [D]ebug info for every function.
-P print [P]ackage inventory.
-F print [F]unction SSA code.
-S log [S]ource locations as SSA builder progresses.
-G use binary object files from gc to provide imports (no code).
-L build distinct packages seria[L]ly instead of in parallel.
-N build [N]aive SSA form: don't replace local loads/stores with registers.
-I build bare [I]nit functions: no init guards or calls to dependent inits.
-`)
+var (
+ importbinFlag = flag.Bool("importbin", false,
+ "Import binary export data from gc's object files, not source. "+
+ "Imported functions will have no bodies.")
+
+ modeFlag = ssa.BuilderModeFlag(flag.CommandLine, "build", 0)
-var testFlag = flag.Bool("test", false, "Loads test code (*_test.go) for imported packages.")
+ testFlag = flag.Bool("test", false, "Loads test code (*_test.go) for imported packages.")
-var runFlag = flag.Bool("run", false, "Invokes the SSA interpreter on the program.")
+ runFlag = flag.Bool("run", false, "Invokes the SSA interpreter on the program.")
-var interpFlag = flag.String("interp", "", `Options controlling the SSA test interpreter.
+ interpFlag = flag.String("interp", "", `Options controlling the SSA test interpreter.
The value is a sequence of zero or more more of these letters:
R disable [R]ecover() from panic; show interpreter crash instead.
T [T]race execution of the program. Best for single-threaded programs!
`)
+)
const usage = `SSA builder and interpreter.
Usage: ssadump [<flag> ...] <args> ...
@@ -74,7 +69,7 @@ func init() {
func main() {
if err := doMain(); err != nil {
- fmt.Fprintf(os.Stderr, "ssadump: %s.\n", err)
+ fmt.Fprintf(os.Stderr, "ssadump: %s\n", err)
os.Exit(1)
}
}
@@ -84,8 +79,8 @@ func doMain() error {
args := flag.Args()
conf := loader.Config{
- Build: &build.Default,
- SourceImports: true,
+ Build: &build.Default,
+ ImportFromBinary: *importbinFlag,
}
// TODO(adonovan): make go/types choose its default Sizes from
// build.Default or a specified *build.Context.
@@ -99,32 +94,6 @@ func doMain() error {
WordSize: wordSize,
}
- var mode ssa.BuilderMode
- for _, c := range *buildFlag {
- switch c {
- case 'D':
- mode |= ssa.GlobalDebug
- case 'P':
- mode |= ssa.PrintPackages
- case 'F':
- mode |= ssa.PrintFunctions
- case 'S':
- mode |= ssa.LogSource | ssa.BuildSerially
- case 'C':
- mode |= ssa.SanityCheckFunctions
- case 'N':
- mode |= ssa.NaiveForm
- case 'G':
- conf.SourceImports = false
- case 'L':
- mode |= ssa.BuildSerially
- case 'I':
- mode |= ssa.BareInits
- default:
- return fmt.Errorf("unknown -build option: '%c'", c)
- }
- }
-
var interpMode interp.Mode
for _, c := range *interpFlag {
switch c {
@@ -171,7 +140,7 @@ func doMain() error {
}
// Create and build SSA-form program representation.
- prog := ssa.Create(iprog, mode)
+ prog := ssa.Create(iprog, *modeFlag)
prog.BuildAll()
// Run the interpreter.
@@ -203,7 +172,7 @@ func doMain() error {
}
if runtime.GOARCH != build.Default.GOARCH {
- return fmt.Errorf("cross-interpretation is not yet supported (target has GOARCH %s, interpreter has %s)",
+ return fmt.Errorf("cross-interpretation is not supported (target has GOARCH %s, interpreter has %s)",
build.Default.GOARCH, runtime.GOARCH)
}
diff --git a/cmd/stringer/endtoend_test.go b/cmd/stringer/endtoend_test.go
index ae2cc0a..35479f0 100644
--- a/cmd/stringer/endtoend_test.go
+++ b/cmd/stringer/endtoend_test.go
@@ -6,6 +6,7 @@ package main
import (
"fmt"
+ "go/build"
"io"
"io/ioutil"
"os"
@@ -22,6 +23,9 @@ import (
func TestEndToEnd(t *testing.T) {
dir, err := ioutil.TempDir("", "stringer")
+ if err != nil {
+ t.Fatal(err)
+ }
defer os.RemoveAll(dir)
// Create stringer in temporary directory.
stringer := filepath.Join(dir, "stringer.exe")
@@ -45,6 +49,10 @@ func TestEndToEnd(t *testing.T) {
t.Errorf("%s is not a Go file", name)
continue
}
+ if name == "cgo.go" && !build.Default.CgoEnabled {
+ t.Logf("cgo is no enabled for %s", name)
+ continue
+ }
// Names are known to be ASCII and long enough.
typeName := fmt.Sprintf("%c%s", name[0]+'A'-'a', name[1:len(name)-len(".go")])
stringerCompileAndRun(t, dir, stringer, typeName, name)
diff --git a/cmd/stringer/golden_test.go b/cmd/stringer/golden_test.go
index f0db09f..76e81c3 100644
--- a/cmd/stringer/golden_test.go
+++ b/cmd/stringer/golden_test.go
@@ -48,18 +48,13 @@ const (
const day_out = `
const _Day_name = "MondayTuesdayWednesdayThursdayFridaySaturdaySunday"
-var _Day_index = [...]uint8{6, 13, 22, 30, 36, 44, 50}
+var _Day_index = [...]uint8{0, 6, 13, 22, 30, 36, 44, 50}
func (i Day) String() string {
- if i < 0 || i >= Day(len(_Day_index)) {
+ if i < 0 || i+1 >= Day(len(_Day_index)) {
return fmt.Sprintf("Day(%d)", i)
}
- hi := _Day_index[i]
- lo := uint8(0)
- if i > 0 {
- lo = _Day_index[i-1]
- }
- return _Day_name[lo:hi]
+ return _Day_name[_Day_index[i]:_Day_index[i+1]]
}
`
@@ -78,19 +73,14 @@ const (
const offset_out = `
const _Number_name = "OneTwoThree"
-var _Number_index = [...]uint8{3, 6, 11}
+var _Number_index = [...]uint8{0, 3, 6, 11}
func (i Number) String() string {
i -= 1
- if i < 0 || i >= Number(len(_Number_index)) {
+ if i < 0 || i+1 >= Number(len(_Number_index)) {
return fmt.Sprintf("Number(%d)", i+1)
}
- hi := _Number_index[i]
- lo := uint8(0)
- if i > 0 {
- lo = _Number_index[i-1]
- }
- return _Number_name[lo:hi]
+ return _Number_name[_Number_index[i]:_Number_index[i+1]]
}
`
@@ -116,27 +106,19 @@ const (
)
var (
- _Gap_index_0 = [...]uint8{3, 8}
- _Gap_index_1 = [...]uint8{4, 7, 12, 17, 21}
- _Gap_index_2 = [...]uint8{6}
+ _Gap_index_0 = [...]uint8{0, 3, 8}
+ _Gap_index_1 = [...]uint8{0, 4, 7, 12, 17, 21}
+ _Gap_index_2 = [...]uint8{0, 6}
)
func (i Gap) String() string {
switch {
case 2 <= i && i <= 3:
i -= 2
- lo := uint8(0)
- if i > 0 {
- lo = _Gap_index_0[i-1]
- }
- return _Gap_name_0[lo:_Gap_index_0[i]]
+ return _Gap_name_0[_Gap_index_0[i]:_Gap_index_0[i+1]]
case 5 <= i && i <= 9:
i -= 5
- lo := uint8(0)
- if i > 0 {
- lo = _Gap_index_1[i-1]
- }
- return _Gap_name_1[lo:_Gap_index_1[i]]
+ return _Gap_name_1[_Gap_index_1[i]:_Gap_index_1[i+1]]
case i == 11:
return _Gap_name_2
default:
@@ -159,19 +141,14 @@ const (
const num_out = `
const _Num_name = "m_2m_1m0m1m2"
-var _Num_index = [...]uint8{3, 6, 8, 10, 12}
+var _Num_index = [...]uint8{0, 3, 6, 8, 10, 12}
func (i Num) String() string {
i -= -2
- if i < 0 || i >= Num(len(_Num_index)) {
+ if i < 0 || i+1 >= Num(len(_Num_index)) {
return fmt.Sprintf("Num(%d)", i+-2)
}
- hi := _Num_index[i]
- lo := uint8(0)
- if i > 0 {
- lo = _Num_index[i-1]
- }
- return _Num_name[lo:hi]
+ return _Num_name[_Num_index[i]:_Num_index[i+1]]
}
`
@@ -196,25 +173,17 @@ const (
)
var (
- _Unum_index_0 = [...]uint8{2, 4, 6}
- _Unum_index_1 = [...]uint8{3, 6}
+ _Unum_index_0 = [...]uint8{0, 2, 4, 6}
+ _Unum_index_1 = [...]uint8{0, 3, 6}
)
func (i Unum) String() string {
switch {
case 0 <= i && i <= 2:
- lo := uint8(0)
- if i > 0 {
- lo = _Unum_index_0[i-1]
- }
- return _Unum_name_0[lo:_Unum_index_0[i]]
+ return _Unum_name_0[_Unum_index_0[i]:_Unum_index_0[i+1]]
case 253 <= i && i <= 254:
i -= 253
- lo := uint8(0)
- if i > 0 {
- lo = _Unum_index_1[i-1]
- }
- return _Unum_name_1[lo:_Unum_index_1[i]]
+ return _Unum_name_1[_Unum_index_1[i]:_Unum_index_1[i+1]]
default:
return fmt.Sprintf("Unum(%d)", i)
}
diff --git a/cmd/stringer/stringer.go b/cmd/stringer/stringer.go
index 33343de..0bc7aa8 100644
--- a/cmd/stringer/stringer.go
+++ b/cmd/stringer/stringer.go
@@ -56,7 +56,7 @@
// where t is the lower-cased name of the first type listed. It can be overridden
// with the -output flag.
//
-package main
+package main // import "golang.org/x/tools/cmd/stringer"
import (
"bytes"
@@ -261,7 +261,7 @@ func (g *Generator) parsePackage(directory string, names []string, text interfac
// check type-checks the package. The package must be OK to proceed.
func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) {
pkg.defs = make(map[*ast.Ident]types.Object)
- var config types.Config
+ config := types.Config{FakeImportC: true}
info := &types.Info{
Defs: pkg.defs,
}
@@ -519,7 +519,7 @@ func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix
nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String())
nameLen := b.Len()
b.Reset()
- fmt.Fprintf(b, "_%s_index%s = [...]uint%d{", typeName, suffix, usize(nameLen))
+ fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen))
for i, v := range indexes {
if i > 0 {
fmt.Fprintf(b, ", ")
@@ -563,15 +563,10 @@ func (g *Generator) buildOneRun(runs [][]Value, typeName string) {
// [2]: size of index element (8 for uint8 etc.)
// [3]: less than zero check (for signed types)
const stringOneRun = `func (i %[1]s) String() string {
- if %[3]si >= %[1]s(len(_%[1]s_index)) {
+ if %[3]si+1 >= %[1]s(len(_%[1]s_index)) {
return fmt.Sprintf("%[1]s(%%d)", i)
}
- hi := _%[1]s_index[i]
- lo := uint%[2]d(0)
- if i > 0 {
- lo = _%[1]s_index[i-1]
- }
- return _%[1]s_name[lo:hi]
+ return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]]
}
`
@@ -584,15 +579,10 @@ const stringOneRun = `func (i %[1]s) String() string {
*/
const stringOneRunWithOffset = `func (i %[1]s) String() string {
i -= %[2]s
- if %[4]si >= %[1]s(len(_%[1]s_index)) {
+ if %[4]si+1 >= %[1]s(len(_%[1]s_index)) {
return fmt.Sprintf("%[1]s(%%d)", i + %[2]s)
}
- hi := _%[1]s_index[i]
- lo := uint%[3]d(0)
- if i > 0 {
- lo = _%[1]s_index[i-1]
- }
- return _%[1]s_name[lo : hi]
+ return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]]
}
`
@@ -613,11 +603,8 @@ func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) {
if values[0].value != 0 {
g.Printf("\t\ti -= %s\n", &values[0])
}
- g.Printf("\t\tlo := uint%d(0)\n", usize(len(values)))
- g.Printf("\t\tif i > 0 {\n")
- g.Printf("\t\t\tlo = _%s_index_%d[i-1]\n", typeName, i)
- g.Printf("\t\t}\n")
- g.Printf("\t\treturn _%s_name_%d[lo:_%s_index_%d[i]]\n", typeName, i, typeName, i)
+ g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n",
+ typeName, i, typeName, i, typeName, i)
}
g.Printf("\tdefault:\n")
g.Printf("\t\treturn fmt.Sprintf(\"%s(%%d)\", i)\n", typeName)
diff --git a/cmd/stringer/testdata/cgo.go b/cmd/stringer/testdata/cgo.go
new file mode 100644
index 0000000..ef38f95
--- /dev/null
+++ b/cmd/stringer/testdata/cgo.go
@@ -0,0 +1,32 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Import "C" shouldn't be imported.
+
+package main
+
+/*
+#define HELLO 1
+*/
+import "C"
+
+import "fmt"
+
+type Cgo uint32
+
+const (
+ // MustScanSubDirs indicates that events were coalesced hierarchically.
+ MustScanSubDirs Cgo = 1 << iota
+)
+
+func main() {
+ _ = C.HELLO
+ ck(MustScanSubDirs, "MustScanSubDirs")
+}
+
+func ck(day Cgo, str string) {
+ if fmt.Sprint(day) != str {
+ panic("cgo.go: " + str)
+ }
+}
diff --git a/cmd/tipgodoc/Dockerfile b/cmd/tipgodoc/Dockerfile
new file mode 100644
index 0000000..ac95819
--- /dev/null
+++ b/cmd/tipgodoc/Dockerfile
@@ -0,0 +1,13 @@
+FROM golang:1.4.1
+
+RUN apt-get update && apt-get install --no-install-recommends -y -q build-essential git
+
+# golang puts its go install here (weird but true)
+ENV GOROOT_BOOTSTRAP /usr/src/go
+
+# golang sets GOPATH=/go
+ADD . /go/src/tipgodoc
+RUN go install tipgodoc
+ENTRYPOINT ["/go/bin/tipgodoc"]
+# Kubernetes expects us to listen on port 8080
+EXPOSE 8080
diff --git a/cmd/tipgodoc/README b/cmd/tipgodoc/README
new file mode 100644
index 0000000..602e546
--- /dev/null
+++ b/cmd/tipgodoc/README
@@ -0,0 +1,3 @@
+To deploy as an App Engine Manged VM, use gcloud:
+
+ $ gcloud --project golang-org preview app deploy .
diff --git a/cmd/tipgodoc/app.yaml b/cmd/tipgodoc/app.yaml
new file mode 100644
index 0000000..59e5a06
--- /dev/null
+++ b/cmd/tipgodoc/app.yaml
@@ -0,0 +1,15 @@
+application: golang-org
+version: tip
+runtime: custom
+api_version: go1
+vm: true
+
+manual_scaling:
+ instances: 1
+
+handlers:
+- url: /.*
+ script: _go_app
+
+health_check:
+ enable_health_check: False
diff --git a/cmd/tipgodoc/tip.go b/cmd/tipgodoc/tip.go
new file mode 100644
index 0000000..4a92a5c
--- /dev/null
+++ b/cmd/tipgodoc/tip.go
@@ -0,0 +1,278 @@
+// Copyright 2014 The Go AUTHORS. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Command tipgodoc is the beginning of the new tip.golang.org server,
+// serving the latest HEAD straight from the Git oven.
+package main
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+const (
+ repoURL = "https://go.googlesource.com/"
+ metaURL = "https://go.googlesource.com/?b=master&format=JSON"
+)
+
+func main() {
+ p := new(Proxy)
+ go p.run()
+ http.Handle("/", p)
+ log.Fatal(http.ListenAndServe(":8080", nil))
+}
+
+// Proxy implements the tip.golang.org server: a reverse-proxy
+// that builds and runs godoc instances showing the latest docs.
+type Proxy struct {
+ mu sync.Mutex // protects the followin'
+ proxy http.Handler
+ cur string // signature of gorepo+toolsrepo
+ cmd *exec.Cmd // live godoc instance, or nil for none
+ side string
+ err error
+}
+
+func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/_tipstatus" {
+ p.serveStatus(w, r)
+ return
+ }
+ p.mu.Lock()
+ proxy := p.proxy
+ err := p.err
+ p.mu.Unlock()
+ if proxy == nil {
+ s := "tip.golang.org is starting up"
+ if err != nil {
+ s = err.Error()
+ }
+ http.Error(w, s, http.StatusInternalServerError)
+ return
+ }
+ proxy.ServeHTTP(w, r)
+}
+
+func (p *Proxy) serveStatus(w http.ResponseWriter, r *http.Request) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ fmt.Fprintf(w, "side=%v\ncurrent=%v\nerror=%v\n", p.side, p.cur, p.err)
+}
+
+// run runs in its own goroutine.
+func (p *Proxy) run() {
+ p.side = "a"
+ for {
+ p.poll()
+ time.Sleep(30 * time.Second)
+ }
+}
+
+// poll runs from the run loop goroutine.
+func (p *Proxy) poll() {
+ heads := gerritMetaMap()
+ if heads == nil {
+ return
+ }
+
+ sig := heads["go"] + "-" + heads["tools"]
+
+ p.mu.Lock()
+ changes := sig != p.cur
+ curSide := p.side
+ p.cur = sig
+ p.mu.Unlock()
+
+ if !changes {
+ return
+ }
+
+ newSide := "b"
+ if curSide == "b" {
+ newSide = "a"
+ }
+
+ cmd, hostport, err := initSide(newSide, heads["go"], heads["tools"])
+
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if err != nil {
+ log.Println(err)
+ p.err = err
+ return
+ }
+
+ u, err := url.Parse(fmt.Sprintf("http://%v/", hostport))
+ if err != nil {
+ log.Println(err)
+ p.err = err
+ return
+ }
+ p.proxy = httputil.NewSingleHostReverseProxy(u)
+ p.side = newSide
+ if p.cmd != nil {
+ p.cmd.Process.Kill()
+ }
+ p.cmd = cmd
+}
+
+func initSide(side, goHash, toolsHash string) (godoc *exec.Cmd, hostport string, err error) {
+ dir := filepath.Join(os.TempDir(), "tipgodoc", side)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return nil, "", err
+ }
+
+ goDir := filepath.Join(dir, "go")
+ toolsDir := filepath.Join(dir, "gopath/src/golang.org/x/tools")
+ if err := checkout(repoURL+"go", goHash, goDir); err != nil {
+ return nil, "", err
+ }
+ if err := checkout(repoURL+"tools", toolsHash, toolsDir); err != nil {
+ return nil, "", err
+ }
+
+ make := exec.Command(filepath.Join(goDir, "src/make.bash"))
+ make.Dir = filepath.Join(goDir, "src")
+ if err := runErr(make); err != nil {
+ return nil, "", err
+ }
+ goBin := filepath.Join(goDir, "bin/go")
+ install := exec.Command(goBin, "install", "golang.org/x/tools/cmd/godoc")
+ install.Env = []string{
+ "GOROOT=" + goDir,
+ "GOPATH=" + filepath.Join(dir, "gopath"),
+ "GOROOT_BOOTSTRAP=" + os.Getenv("GOROOT_BOOTSTRAP"),
+ }
+ if err := runErr(install); err != nil {
+ return nil, "", err
+ }
+
+ godocBin := filepath.Join(goDir, "bin/godoc")
+ hostport = "localhost:8081"
+ if side == "b" {
+ hostport = "localhost:8082"
+ }
+ godoc = exec.Command(godocBin, "-http="+hostport)
+ godoc.Env = []string{"GOROOT=" + goDir}
+ // TODO(adg): log this somewhere useful
+ godoc.Stdout = os.Stdout
+ godoc.Stderr = os.Stderr
+ if err := godoc.Start(); err != nil {
+ return nil, "", err
+ }
+ go func() {
+ // TODO(bradfitz): tell the proxy that this side is dead
+ if err := godoc.Wait(); err != nil {
+ log.Printf("side %v exited: %v", side, err)
+ }
+ }()
+
+ for i := 0; i < 15; i++ {
+ time.Sleep(time.Second)
+ var res *http.Response
+ res, err = http.Get(fmt.Sprintf("http://%v/", hostport))
+ if err != nil {
+ continue
+ }
+ res.Body.Close()
+ if res.StatusCode == http.StatusOK {
+ return godoc, hostport, nil
+ }
+ }
+ return nil, "", fmt.Errorf("timed out waiting for side %v at %v (%v)", side, hostport, err)
+}
+
+func runErr(cmd *exec.Cmd) error {
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ if len(out) == 0 {
+ return err
+ }
+ return fmt.Errorf("%s\n%v", out, err)
+ }
+ return nil
+}
+
+func checkout(repo, hash, path string) error {
+ // Clone git repo if it doesn't exist.
+ if _, err := os.Stat(filepath.Join(path, ".git")); os.IsNotExist(err) {
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ if err := runErr(exec.Command("git", "clone", repo, path)); err != nil {
+ return err
+ }
+ } else if err != nil {
+ return err
+ }
+
+ // Pull down changes and update to hash.
+ cmd := exec.Command("git", "fetch")
+ cmd.Dir = path
+ if err := runErr(cmd); err != nil {
+ return err
+ }
+ cmd = exec.Command("git", "reset", "--hard", hash)
+ cmd.Dir = path
+ if err := runErr(cmd); err != nil {
+ return err
+ }
+ cmd = exec.Command("git", "clean", "-d", "-f", "-x")
+ cmd.Dir = path
+ return runErr(cmd)
+}
+
+// gerritMetaMap returns the map from repo name (e.g. "go") to its
+// latest master hash.
+// The returned map is nil on any transient error.
+func gerritMetaMap() map[string]string {
+ res, err := http.Get(metaURL)
+ if err != nil {
+ return nil
+ }
+ defer res.Body.Close()
+ defer io.Copy(ioutil.Discard, res.Body) // ensure EOF for keep-alive
+ if res.StatusCode != 200 {
+ return nil
+ }
+ var meta map[string]struct {
+ Branches map[string]string
+ }
+ br := bufio.NewReader(res.Body)
+ // For security reasons or something, this URL starts with ")]}'\n" before
+ // the JSON object. So ignore that.
+ // Shawn Pearce says it's guaranteed to always be just one line, ending in '\n'.
+ for {
+ b, err := br.ReadByte()
+ if err != nil {
+ return nil
+ }
+ if b == '\n' {
+ break
+ }
+ }
+ if err := json.NewDecoder(br).Decode(&meta); err != nil {
+ log.Printf("JSON decoding error from %v: %s", metaURL, err)
+ return nil
+ }
+ m := map[string]string{}
+ for repo, v := range meta {
+ if master, ok := v.Branches["master"]; ok {
+ m[repo] = master
+ }
+ }
+ return m
+}
diff --git a/cmd/vet/asmdecl.go b/cmd/vet/asmdecl.go
index 954ffbd..63095aa 100644
--- a/cmd/vet/asmdecl.go
+++ b/cmd/vet/asmdecl.go
@@ -81,7 +81,7 @@ var (
asmTEXT = re(`\bTEXT\b.*·([^\(]+)\(SB\)(?:\s*,\s*([0-9A-Z|+]+))?(?:\s*,\s*\$(-?[0-9]+)(?:-([0-9]+))?)?`)
asmDATA = re(`\b(DATA|GLOBL)\b`)
asmNamedFP = re(`([a-zA-Z0-9_\xFF-\x{10FFFF}]+)(?:\+([0-9]+))\(FP\)`)
- asmUnnamedFP = re(`[^+\-0-9]](([0-9]+)\(FP\))`)
+ asmUnnamedFP = re(`[^+\-0-9](([0-9]+)\(FP\))`)
asmSP = re(`[^+\-0-9](([0-9]+)\(([A-Z0-9]+)\))`)
asmOpcode = re(`^\s*(?:[A-Z0-9a-z_]+:)?\s*([A-Z]+)\s*([^,]*)(?:,\s*(.*))?`)
power64Suff = re(`([BHWD])(ZU|Z|U|BR)?$`)
@@ -110,6 +110,7 @@ func asmCheck(pkg *Package) {
}
}
+Files:
for _, f := range pkg.files {
if !strings.HasSuffix(f.name, ".s") {
continue
@@ -173,7 +174,7 @@ func asmCheck(pkg *Package) {
flushRet()
if arch == "" {
f.Warnf(token.NoPos, "%s: cannot determine architecture for assembly file", f.name)
- return
+ continue Files
}
fnName = m[1]
fn = knownFunc[m[1]][arch]
@@ -190,6 +191,9 @@ func asmCheck(pkg *Package) {
localSize += archDef.intSize
}
argSize, _ = strconv.Atoi(m[4])
+ if fn == nil && !strings.Contains(fnName, "<>") {
+ badf("function %s missing Go declaration", fnName)
+ }
wroteSP = false
haveRetArg = false
continue
@@ -251,7 +255,13 @@ func asmCheck(pkg *Package) {
}
for _, m := range asmUnnamedFP.FindAllStringSubmatch(line, -1) {
- badf("use of unnamed argument %s", m[1])
+ off, _ := strconv.Atoi(m[2])
+ v := fn.varByOffset[off]
+ if v != nil {
+ badf("use of unnamed argument %s; offset %d is %s+%d(FP)", m[1], off, v.name, v.off)
+ } else {
+ badf("use of unnamed argument %s", m[1])
+ }
}
for _, m := range asmNamedFP.FindAllStringSubmatch(line, -1) {
diff --git a/cmd/vet/bool.go b/cmd/vet/bool.go
index e28c258..37aac68 100644
--- a/cmd/vet/bool.go
+++ b/cmd/vet/bool.go
@@ -9,6 +9,8 @@ package main
import (
"go/ast"
"go/token"
+
+ "golang.org/x/tools/go/ast/astutil"
)
func init() {
@@ -162,7 +164,7 @@ func hasSideEffects(e ast.Expr) bool {
// split returns []{d, c, b, a}.
func (op boolOp) split(e ast.Expr) (exprs []ast.Expr) {
for {
- e = unparen(e)
+ e = astutil.Unparen(e)
if b, ok := e.(*ast.BinaryExpr); ok && b.Op == op.tok {
exprs = append(exprs, op.split(b.Y)...)
e = b.X
@@ -173,13 +175,3 @@ func (op boolOp) split(e ast.Expr) (exprs []ast.Expr) {
}
return
}
-
-func unparen(e ast.Expr) ast.Expr {
- for {
- p, ok := e.(*ast.ParenExpr)
- if !ok {
- return e
- }
- e = p.X
- }
-}
diff --git a/cmd/vet/doc.go b/cmd/vet/doc.go
index e90a8b8..2ae32d1 100644
--- a/cmd/vet/doc.go
+++ b/cmd/vet/doc.go
@@ -179,4 +179,4 @@ These flags configure the behavior of vet:
-test
For testing only: sets -all and -shadow.
*/
-package main
+package main // import "golang.org/x/tools/cmd/vet"
diff --git a/cmd/vet/print.go b/cmd/vet/print.go
index 14de6ab..82edd81 100644
--- a/cmd/vet/print.go
+++ b/cmd/vet/print.go
@@ -474,6 +474,10 @@ func (f *File) argCanBeChecked(call *ast.CallExpr, formatArg int, isStar bool, s
// Shouldn't happen, so catch it with prejudice.
panic("negative arg num")
}
+ if argNum == 0 {
+ f.Badf(call.Pos(), `index value [0] for %s("%s"); indexes start at 1`, state.name, state.format)
+ return false
+ }
if argNum < len(call.Args)-1 {
return true // Always OK.
}
diff --git a/cmd/vet/structtag.go b/cmd/vet/structtag.go
index 5da3904..e8164a4 100644
--- a/cmd/vet/structtag.go
+++ b/cmd/vet/structtag.go
@@ -7,6 +7,7 @@
package main
import (
+ "errors"
"go/ast"
"reflect"
"strconv"
@@ -32,13 +33,8 @@ func checkCanonicalFieldTag(f *File, node ast.Node) {
return
}
- // Check tag for validity by appending
- // new key:value to end and checking that
- // the tag parsing code can find it.
- st := reflect.StructTag(tag + ` _gofix:"_magic"`)
- if st.Get("_gofix") != "_magic" {
- f.Badf(field.Pos(), "struct field tag %s not compatible with reflect.StructTag.Get", field.Tag.Value)
- return
+ if err := validateStructTag(tag); err != nil {
+ f.Badf(field.Pos(), "struct field tag %s not compatible with reflect.StructTag.Get: %s", field.Tag.Value, err)
}
// Check for use of json or xml tags with unexported fields.
@@ -53,6 +49,7 @@ func checkCanonicalFieldTag(f *File, node ast.Node) {
return
}
+ st := reflect.StructTag(tag)
for _, enc := range [...]string{"json", "xml"} {
if st.Get(enc) != "" {
f.Badf(field.Pos(), "struct field %s has %s tag but is not exported", field.Names[0].Name, enc)
@@ -60,3 +57,66 @@ func checkCanonicalFieldTag(f *File, node ast.Node) {
}
}
}
+
+var (
+ errTagSyntax = errors.New("bad syntax for struct tag pair")
+ errTagKeySyntax = errors.New("bad syntax for struct tag key")
+ errTagValueSyntax = errors.New("bad syntax for struct tag value")
+)
+
+// validateStructTag parses the struct tag and returns an error if it is not
+// in the canonical format, which is a space-separated list of key:"value"
+// settings. The value may contain spaces.
+func validateStructTag(tag string) error {
+ // This code is based on the StructTag.Get code in package reflect.
+
+ for tag != "" {
+ // Skip leading space.
+ i := 0
+ for i < len(tag) && tag[i] == ' ' {
+ i++
+ }
+ tag = tag[i:]
+ if tag == "" {
+ break
+ }
+
+ // Scan to colon. A space, a quote or a control character is a syntax error.
+ // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
+ // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
+ // as it is simpler to inspect the tag's bytes than the tag's runes.
+ i = 0
+ for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
+ i++
+ }
+ if i == 0 {
+ return errTagKeySyntax
+ }
+ if i+1 >= len(tag) || tag[i] != ':' {
+ return errTagSyntax
+ }
+ if tag[i+1] != '"' {
+ return errTagValueSyntax
+ }
+ tag = tag[i+1:]
+
+ // Scan quoted string to find value.
+ i = 1
+ for i < len(tag) && tag[i] != '"' {
+ if tag[i] == '\\' {
+ i++
+ }
+ i++
+ }
+ if i >= len(tag) {
+ return errTagValueSyntax
+ }
+ qvalue := string(tag[:i+1])
+ tag = tag[i+1:]
+
+ if _, err := strconv.Unquote(qvalue); err != nil {
+ return errTagValueSyntax
+ }
+ }
+ return nil
+}
diff --git a/cmd/vet/testdata/print.go b/cmd/vet/testdata/print.go
index 3875ac5..22c6e0a 100644
--- a/cmd/vet/testdata/print.go
+++ b/cmd/vet/testdata/print.go
@@ -174,6 +174,8 @@ func PrintfTests() {
Printf("%[3]*s", "hi", 2) // ERROR "missing argument for Printf.* reads arg 3, have only 2"
fmt.Sprintf("%[3]d", 2) // ERROR "missing argument for Sprintf.* reads arg 3, have only 1"
Printf("%[2]*.[1]*[3]d", 2, "hi", 4) // ERROR "arg .hi. for \* in printf format not of type int"
+ Printf("%[0]s", "arg1") // ERROR "index value \[0\] for Printf.*; indexes start at 1"
+ Printf("%[0]d", 1) // ERROR "index value \[0\] for Printf.*; indexes start at 1"
// Something that satisfies the error interface.
var e error
fmt.Println(e.Error()) // ok
diff --git a/cmd/vet/testdata/structtag.go b/cmd/vet/testdata/structtag.go
index 55462e5..6878f56 100644
--- a/cmd/vet/testdata/structtag.go
+++ b/cmd/vet/testdata/structtag.go
@@ -2,14 +2,23 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file contains tests for the structtag checker.
-
// This file contains the test for canonical struct tags.
package testdata
type StructTagTest struct {
- X int "hello" // ERROR "not compatible with reflect.StructTag.Get"
+ A int "hello" // ERROR "not compatible with reflect.StructTag.Get: bad syntax for struct tag pair"
+ B int "\tx:\"y\"" // ERROR "not compatible with reflect.StructTag.Get: bad syntax for struct tag key"
+ C int "x:\"y\"\tx:\"y\"" // ERROR "not compatible with reflect.StructTag.Get"
+ D int "x:`y`" // ERROR "not compatible with reflect.StructTag.Get: bad syntax for struct tag value"
+ E int "ct\brl:\"char\"" // ERROR "not compatible with reflect.StructTag.Get: bad syntax for struct tag pair"
+ F int `:"emptykey"` // ERROR "not compatible with reflect.StructTag.Get: bad syntax for struct tag key"
+ G int `x:"noEndQuote` // ERROR "not compatible with reflect.StructTag.Get: bad syntax for struct tag value"
+ H int `x:"trunc\x0"` // ERROR "not compatible with reflect.StructTag.Get: bad syntax for struct tag value"
+ OK0 int `x:"y" u:"v" w:""`
+ OK1 int `x:"y:z" u:"v" w:""` // note multiple colons.
+ OK2 int "k0:\"values contain spaces\" k1:\"literal\ttabs\" k2:\"and\\tescaped\\tabs\""
+ OK3 int `under_scores:"and" CAPS:"ARE_OK"`
}
type UnexportedEncodingTagTest struct {
diff --git a/cmd/vet/types.go b/cmd/vet/types.go
index 0a2a248..8a0182b 100644
--- a/cmd/vet/types.go
+++ b/cmd/vet/types.go
@@ -37,7 +37,9 @@ func init() {
func importType(path, name string) types.Type {
pkg, err := types.DefaultImport(imports, path)
if err != nil {
- warnf("import failed: %v", err)
+ // This can happen if fmt hasn't been compiled yet.
+ // Since nothing uses formatterType anyway, don't complain.
+ //warnf("import failed: %v", err)
return nil
}
if obj, ok := pkg.Scope().Lookup(name).(*types.TypeName); ok {
diff --git a/cmd/vet/whitelist/whitelist.go b/cmd/vet/whitelist/whitelist.go
index 975c9e3..d6f0dce 100644
--- a/cmd/vet/whitelist/whitelist.go
+++ b/cmd/vet/whitelist/whitelist.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package whitelist defines exceptions for the vet tool.
-package whitelist
+package whitelist // import "golang.org/x/tools/cmd/vet/whitelist"
// UnkeyedLiteral are types that are actually slices, but
// syntactically, we cannot tell whether the Typ in pkg.Typ{1, 2, 3}
@@ -39,6 +39,7 @@ var UnkeyedLiteral = map[string]bool{
// These image and image/color struct types are frozen. We will never add fields to them.
"image/color.Alpha16": true,
"image/color.Alpha": true,
+ "image/color.CMYK": true,
"image/color.Gray16": true,
"image/color.Gray": true,
"image/color.NRGBA64": true,
diff --git a/codereview.cfg b/codereview.cfg
deleted file mode 100644
index 43dbf3c..0000000
--- a/codereview.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-defaultcc: golang-codereviews@googlegroups.com
-contributors: http://go.googlecode.com/hg/CONTRIBUTORS
diff --git a/container/intsets/sparse.go b/container/intsets/sparse.go
index 0ba7cb2..8847feb 100644
--- a/container/intsets/sparse.go
+++ b/container/intsets/sparse.go
@@ -11,11 +11,9 @@
// map type. The IsEmpty, Min, Max, Clear and TakeMin operations
// require constant time.
//
-package intsets
+package intsets // import "golang.org/x/tools/container/intsets"
// TODO(adonovan):
-// - Add SymmetricDifference(x, y *Sparse), i.e. x ∆ y.
-// - Add SubsetOf (x∖y=∅) and Intersects (x∩y≠∅) predicates.
// - Add InsertAll(...int), RemoveAll(...int)
// - Add 'bool changed' results for {Intersection,Difference}With too.
//
@@ -485,6 +483,29 @@ func (s *Sparse) Intersection(x, y *Sparse) {
s.discardTail(sb)
}
+// Intersects reports whether s ∩ x ≠ ∅.
+func (s *Sparse) Intersects(x *Sparse) bool {
+ sb := s.start()
+ xb := x.start()
+ for sb != &s.root && xb != &x.root {
+ switch {
+ case xb.offset < sb.offset:
+ xb = xb.next
+ case xb.offset > sb.offset:
+ sb = sb.next
+ default:
+ for i := range sb.bits {
+ if sb.bits[i]&xb.bits[i] != 0 {
+ return true
+ }
+ }
+ sb = sb.next
+ xb = xb.next
+ }
+ }
+ return false
+}
+
// UnionWith sets s to the union s ∪ x, and reports whether s grew.
func (s *Sparse) UnionWith(x *Sparse) bool {
if s == x {
@@ -667,6 +688,146 @@ func (s *Sparse) Difference(x, y *Sparse) {
s.discardTail(sb)
}
+// SymmetricDifferenceWith sets s to the symmetric difference s ∆ x.
+func (s *Sparse) SymmetricDifferenceWith(x *Sparse) {
+ if s == x {
+ s.Clear()
+ return
+ }
+
+ sb := s.start()
+ xb := x.start()
+ for xb != &x.root && sb != &s.root {
+ switch {
+ case sb.offset < xb.offset:
+ sb = sb.next
+ case xb.offset < sb.offset:
+ nb := s.insertBlockBefore(sb)
+ nb.offset = xb.offset
+ nb.bits = xb.bits
+ xb = xb.next
+ default:
+ var sum word
+ for i := range sb.bits {
+ r := sb.bits[i] ^ xb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ sb = sb.next
+ xb = xb.next
+ if sum == 0 {
+ s.removeBlock(sb.prev)
+ }
+ }
+ }
+
+ for xb != &x.root { // append the tail of x to s
+ sb = s.insertBlockBefore(sb)
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ sb = sb.next
+ xb = xb.next
+ }
+}
+
+// SymmetricDifference sets s to the symmetric difference x ∆ y.
+func (s *Sparse) SymmetricDifference(x, y *Sparse) {
+ switch {
+ case x == y:
+ s.Clear()
+ return
+ case s == x:
+ s.SymmetricDifferenceWith(y)
+ return
+ case s == y:
+ s.SymmetricDifferenceWith(x)
+ return
+ }
+
+ sb := s.start()
+ xb := x.start()
+ yb := y.start()
+ for xb != &x.root && yb != &y.root {
+ if sb == &s.root {
+ sb = s.insertBlockBefore(sb)
+ }
+ switch {
+ case yb.offset < xb.offset:
+ sb.offset = yb.offset
+ sb.bits = yb.bits
+ sb = sb.next
+ yb = yb.next
+ case xb.offset < yb.offset:
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ sb = sb.next
+ xb = xb.next
+ default:
+ var sum word
+ for i := range sb.bits {
+ r := xb.bits[i] ^ yb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum != 0 {
+ sb.offset = xb.offset
+ sb = sb.next
+ }
+ xb = xb.next
+ yb = yb.next
+ }
+ }
+
+ for xb != &x.root { // append the tail of x to s
+ if sb == &s.root {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ sb = sb.next
+ xb = xb.next
+ }
+
+ for yb != &y.root { // append the tail of y to s
+ if sb == &s.root {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = yb.offset
+ sb.bits = yb.bits
+ sb = sb.next
+ yb = yb.next
+ }
+
+ s.discardTail(sb)
+}
+
+// SubsetOf reports whether s ∖ x = ∅.
+func (s *Sparse) SubsetOf(x *Sparse) bool {
+ if s == x {
+ return true
+ }
+
+ sb := s.start()
+ xb := x.start()
+ for sb != &s.root {
+ switch {
+ case xb == &x.root || xb.offset > sb.offset:
+ return false
+ case xb.offset < sb.offset:
+ xb = xb.next
+ default:
+ for i := range sb.bits {
+ if sb.bits[i]&^xb.bits[i] != 0 {
+ return false
+ }
+ }
+ sb = sb.next
+ xb = xb.next
+ }
+ }
+ return true
+}
+
// Equals reports whether the sets s and t have the same elements.
func (s *Sparse) Equals(t *Sparse) bool {
if s == t {
diff --git a/container/intsets/sparse_test.go b/container/intsets/sparse_test.go
index 2338201..34b9a4e 100644
--- a/container/intsets/sparse_test.go
+++ b/container/intsets/sparse_test.go
@@ -60,7 +60,7 @@ func TestBasics(t *testing.T) {
// Insert, Len, IsEmpty, Hash, Clear, AppendTo.
func TestMoreBasics(t *testing.T) {
- var set intsets.Sparse
+ set := new(intsets.Sparse)
set.Insert(456)
set.Insert(123)
set.Insert(789)
@@ -78,7 +78,7 @@ func TestMoreBasics(t *testing.T) {
}
got := set.AppendTo([]int{-1})
if want := []int{-1, 123, 456, 789}; fmt.Sprint(got) != fmt.Sprint(want) {
- t.Errorf("%s.AppendTo: got %v, want %v", got, want)
+ t.Errorf("%s.AppendTo: got %v, want %v", set, got, want)
}
set.Clear()
@@ -107,7 +107,7 @@ func TestTakeMin(t *testing.T) {
}
}
if set.TakeMin(&got) {
- t.Errorf("%s.TakeMin returned true", set, got)
+ t.Errorf("%s.TakeMin returned true", &set)
}
if err := set.Check(); err != nil {
t.Fatalf("check: %s: %#v", err, &set)
@@ -397,6 +397,47 @@ func TestSetOperations(t *testing.T) {
D.bits.Copy(&X.bits)
D.bits.DifferenceWith(&D.bits)
D.check(t, "D.DifferenceWith(D)")
+
+ // SD.SymmetricDifference(X, Y)
+ SD := makePset()
+ SD.bits.SymmetricDifference(&X.bits, &Y.bits)
+ for n := range X.hash {
+ if !Y.hash[n] {
+ SD.hash[n] = true
+ }
+ }
+ for n := range Y.hash {
+ if !X.hash[n] {
+ SD.hash[n] = true
+ }
+ }
+ SD.check(t, "SD.SymmetricDifference(X, Y)")
+
+ // X.SymmetricDifferenceWith(Y)
+ SD.bits.Copy(&X.bits)
+ SD.bits.SymmetricDifferenceWith(&Y.bits)
+ SD.check(t, "X.SymmetricDifference(Y)")
+
+ // Y.SymmetricDifferenceWith(X)
+ SD.bits.Copy(&Y.bits)
+ SD.bits.SymmetricDifferenceWith(&X.bits)
+ SD.check(t, "Y.SymmetricDifference(X)")
+
+ // SD.SymmetricDifference(X, X)
+ SD.bits.SymmetricDifference(&X.bits, &X.bits)
+ SD.hash = nil
+ SD.check(t, "SD.SymmetricDifference(X, X)")
+
+ // SD.SymmetricDifference(X, Copy(X))
+ X2 := makePset()
+ X2.bits.Copy(&X.bits)
+ SD.bits.SymmetricDifference(&X.bits, &X2.bits)
+ SD.check(t, "SD.SymmetricDifference(X, Copy(X))")
+
+ // Copy(X).SymmetricDifferenceWith(X)
+ SD.bits.Copy(&X.bits)
+ SD.bits.SymmetricDifferenceWith(&X.bits)
+ SD.check(t, "Copy(X).SymmetricDifferenceWith(X)")
}
}
@@ -417,6 +458,82 @@ func TestIntersectionWith(t *testing.T) {
}
}
+func TestIntersects(t *testing.T) {
+ prng := rand.New(rand.NewSource(0))
+
+ for i := uint(0); i < 12; i++ {
+ X, Y := randomPset(prng, 1<<i), randomPset(prng, 1<<i)
+ x, y := &X.bits, &Y.bits
+
+ // test the slow way
+ var z intsets.Sparse
+ z.Copy(x)
+ z.IntersectionWith(y)
+
+ if got, want := x.Intersects(y), !z.IsEmpty(); got != want {
+ t.Errorf("Intersects: got %v, want %v", got, want)
+ }
+
+ // make it false
+ a := x.AppendTo(nil)
+ for _, v := range a {
+ y.Remove(v)
+ }
+
+ if got, want := x.Intersects(y), false; got != want {
+ t.Errorf("Intersects: got %v, want %v", got, want)
+ }
+
+ // make it true
+ if x.IsEmpty() {
+ continue
+ }
+ i := prng.Intn(len(a))
+ y.Insert(a[i])
+
+ if got, want := x.Intersects(y), true; got != want {
+ t.Errorf("Intersects: got %v, want %v", got, want)
+ }
+ }
+}
+
+func TestSubsetOf(t *testing.T) {
+ prng := rand.New(rand.NewSource(0))
+
+ for i := uint(0); i < 12; i++ {
+ X, Y := randomPset(prng, 1<<i), randomPset(prng, 1<<i)
+ x, y := &X.bits, &Y.bits
+
+ // test the slow way
+ var z intsets.Sparse
+ z.Copy(x)
+ z.DifferenceWith(y)
+
+ if got, want := x.SubsetOf(y), z.IsEmpty(); got != want {
+ t.Errorf("SubsetOf: got %v, want %v", got, want)
+ }
+
+ // make it true
+ y.UnionWith(x)
+
+ if got, want := x.SubsetOf(y), true; got != want {
+ t.Errorf("SubsetOf: got %v, want %v", got, want)
+ }
+
+ // make it false
+ if x.IsEmpty() {
+ continue
+ }
+ a := x.AppendTo(nil)
+ i := prng.Intn(len(a))
+ y.Remove(a[i])
+
+ if got, want := x.SubsetOf(y), false; got != want {
+ t.Errorf("SubsetOf: got %v, want %v", got, want)
+ }
+ }
+}
+
func TestBitString(t *testing.T) {
for _, test := range []struct {
input []int
diff --git a/cover/profile.go b/cover/profile.go
index 1cbd739..a53bf4e 100644
--- a/cover/profile.go
+++ b/cover/profile.go
@@ -4,7 +4,7 @@
// Package cover provides support for parsing coverage profiles
// generated by "go test -coverprofile=cover.out".
-package cover
+package cover // import "golang.org/x/tools/cover"
import (
"bufio"
@@ -162,7 +162,7 @@ func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
if b.StartLine == line && b.StartCol == col {
boundaries = append(boundaries, boundary(si, true, b.Count))
}
- if b.EndLine == line && b.EndCol == col {
+ if b.EndLine == line && b.EndCol == col || line > b.EndLine {
boundaries = append(boundaries, boundary(si, false, 0))
bi++
continue // Don't advance through src; maybe the next block starts here.
diff --git a/dashboard/README b/dashboard/README
deleted file mode 100644
index 1224450..0000000
--- a/dashboard/README
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-The files in this directory constitute the continuous builder:
-
-app/: an AppEngine server. The code that runs http://build.golang.org/
-builder/: gobuilder, a Go continuous build client
-coordinator/: daemon that runs on CoreOS on Google Compute Engine and manages
- builds (using the builder in single-shot mode) in Docker containers.
-env/: configuration files describing the environment of builders.
- Many builders are still configured ad-hoc.
-watcher/: a daemon that watches for new commits to the Go repository and
- its sub-repositories, and notifies the dashboard of those commits.
-
-If you wish to run a Go builder, please email golang-dev@googlegroups.com
-
-To run a builder:
-
-* Write the key ~gobuild/.gobuildkey
- You need to get it from someone who knows the key.
- You may also use a filename of the form .gobuildkey-$BUILDER if you
- wish to run builders for multiple targets.
-
-* Append your username and password googlecode.com credentials from
- https://code.google.com/hosting/settings
- to the buildkey file in the format "Username\nPassword\n".
- (This is for uploading tarballs to the project downloads section,
- and is an optional step.)
-
-* Build and run gobuilder (see its documentation for command-line options).
-
diff --git a/dashboard/app/app.yaml b/dashboard/app/app.yaml
deleted file mode 100644
index 8424cd0..0000000
--- a/dashboard/app/app.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Update with
-# google_appengine/appcfg.py [-V test-build] update .
-#
-# Using -V test-build will run as test-build.golang.org.
-
-application: golang-org
-version: build
-runtime: go
-api_version: go1
-
-handlers:
-- url: /static
- static_dir: static
-- url: /(|gccgo/)log/.+
- script: _go_app
-- url: /(|gccgo/)(|commit|packages|result|perf-result|tag|todo|perf|perfdetail|perfgraph|updatebenchmark)
- script: _go_app
-- url: /(|gccgo/)(init|buildtest|key|perflearn|_ah/queue/go/delay)
- script: _go_app
- login: admin
-
diff --git a/dashboard/app/build/build.go b/dashboard/app/build/build.go
deleted file mode 100644
index 90ca344..0000000
--- a/dashboard/app/build/build.go
+++ /dev/null
@@ -1,911 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "bytes"
- "compress/gzip"
- "crypto/sha1"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "appengine"
- "appengine/datastore"
-
- "cache"
-)
-
-const (
- maxDatastoreStringLen = 500
- PerfRunLength = 1024
-)
-
-// A Package describes a package that is listed on the dashboard.
-type Package struct {
- Kind string // "subrepo", "external", or empty for the main Go tree
- Name string
- Path string // (empty for the main Go tree)
- NextNum int // Num of the next head Commit
-}
-
-func (p *Package) String() string {
- return fmt.Sprintf("%s: %q", p.Path, p.Name)
-}
-
-func (p *Package) Key(c appengine.Context) *datastore.Key {
- key := p.Path
- if key == "" {
- key = "go"
- }
- return datastore.NewKey(c, "Package", key, 0, nil)
-}
-
-// LastCommit returns the most recent Commit for this Package.
-func (p *Package) LastCommit(c appengine.Context) (*Commit, error) {
- var commits []*Commit
- _, err := datastore.NewQuery("Commit").
- Ancestor(p.Key(c)).
- Order("-Time").
- Limit(1).
- GetAll(c, &commits)
- if err != nil {
- return nil, err
- }
- if len(commits) != 1 {
- return nil, datastore.ErrNoSuchEntity
- }
- return commits[0], nil
-}
-
-// GetPackage fetches a Package by path from the datastore.
-func GetPackage(c appengine.Context, path string) (*Package, error) {
- p := &Package{Path: path}
- err := datastore.Get(c, p.Key(c), p)
- if err == datastore.ErrNoSuchEntity {
- return nil, fmt.Errorf("package %q not found", path)
- }
- return p, err
-}
-
-// A Commit describes an individual commit in a package.
-//
-// Each Commit entity is a descendant of its associated Package entity.
-// In other words, all Commits with the same PackagePath belong to the same
-// datastore entity group.
-type Commit struct {
- PackagePath string // (empty for main repo commits)
- Hash string
- ParentHash string
- Num int // Internal monotonic counter unique to this package.
-
- User string
- Desc string `datastore:",noindex"`
- Time time.Time
- NeedsBenchmarking bool
- TryPatch bool
-
- // ResultData is the Data string of each build Result for this Commit.
- // For non-Go commits, only the Results for the current Go tip, weekly,
- // and release Tags are stored here. This is purely de-normalized data.
- // The complete data set is stored in Result entities.
- ResultData []string `datastore:",noindex"`
-
- // PerfResults holds a set of “builder|benchmark” tuples denoting
- // what benchmarks have been executed on the commit.
- PerfResults []string `datastore:",noindex"`
-
- FailNotificationSent bool
-}
-
-func (com *Commit) Key(c appengine.Context) *datastore.Key {
- if com.Hash == "" {
- panic("tried Key on Commit with empty Hash")
- }
- p := Package{Path: com.PackagePath}
- key := com.PackagePath + "|" + com.Hash
- return datastore.NewKey(c, "Commit", key, 0, p.Key(c))
-}
-
-func (c *Commit) Valid() error {
- if !validHash(c.Hash) {
- return errors.New("invalid Hash")
- }
- if c.ParentHash != "" && !validHash(c.ParentHash) { // empty is OK
- return errors.New("invalid ParentHash")
- }
- return nil
-}
-
-func putCommit(c appengine.Context, com *Commit) error {
- if err := com.Valid(); err != nil {
- return fmt.Errorf("putting Commit: %v", err)
- }
- if com.Num == 0 && com.ParentHash != "0000" { // 0000 is used in tests
- return fmt.Errorf("putting Commit: invalid Num (must be > 0)")
- }
- if _, err := datastore.Put(c, com.Key(c), com); err != nil {
- return fmt.Errorf("putting Commit: %v", err)
- }
- return nil
-}
-
-// each result line is approx 105 bytes. This constant is a tradeoff between
-// build history and the AppEngine datastore limit of 1mb.
-const maxResults = 1000
-
-// AddResult adds the denormalized Result data to the Commit's Result field.
-// It must be called from inside a datastore transaction.
-func (com *Commit) AddResult(c appengine.Context, r *Result) error {
- if err := datastore.Get(c, com.Key(c), com); err != nil {
- return fmt.Errorf("getting Commit: %v", err)
- }
-
- var resultExists bool
- for i, s := range com.ResultData {
- // if there already exists result data for this builder at com, overwrite it.
- if strings.HasPrefix(s, r.Builder+"|") && strings.HasSuffix(s, "|"+r.GoHash) {
- resultExists = true
- com.ResultData[i] = r.Data()
- }
- }
- if !resultExists {
- // otherwise, add the new result data for this builder.
- com.ResultData = trim(append(com.ResultData, r.Data()), maxResults)
- }
- return putCommit(c, com)
-}
-
-// AddPerfResult remembers that the builder has run the benchmark on the commit.
-// It must be called from inside a datastore transaction.
-func (com *Commit) AddPerfResult(c appengine.Context, builder, benchmark string) error {
- if err := datastore.Get(c, com.Key(c), com); err != nil {
- return fmt.Errorf("getting Commit: %v", err)
- }
- if !com.NeedsBenchmarking {
- return fmt.Errorf("trying to add perf result to Commit(%v) that does not require benchmarking", com.Hash)
- }
- s := builder + "|" + benchmark
- for _, v := range com.PerfResults {
- if v == s {
- return nil
- }
- }
- com.PerfResults = append(com.PerfResults, s)
- return putCommit(c, com)
-}
-
-func trim(s []string, n int) []string {
- l := min(len(s), n)
- return s[len(s)-l:]
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-// Result returns the build Result for this Commit for the given builder/goHash.
-func (c *Commit) Result(builder, goHash string) *Result {
- for _, r := range c.ResultData {
- p := strings.SplitN(r, "|", 4)
- if len(p) != 4 || p[0] != builder || p[3] != goHash {
- continue
- }
- return partsToHash(c, p)
- }
- return nil
-}
-
-// Results returns the build Results for this Commit.
-func (c *Commit) Results() (results []*Result) {
- for _, r := range c.ResultData {
- p := strings.SplitN(r, "|", 4)
- if len(p) != 4 {
- continue
- }
- results = append(results, partsToHash(c, p))
- }
- return
-}
-
-func (c *Commit) ResultGoHashes() []string {
- // For the main repo, just return the empty string
- // (there's no corresponding main repo hash for a main repo Commit).
- // This function is only really useful for sub-repos.
- if c.PackagePath == "" {
- return []string{""}
- }
- var hashes []string
- for _, r := range c.ResultData {
- p := strings.SplitN(r, "|", 4)
- if len(p) != 4 {
- continue
- }
- // Append only new results (use linear scan to preserve order).
- if !contains(hashes, p[3]) {
- hashes = append(hashes, p[3])
- }
- }
- // Return results in reverse order (newest first).
- reverse(hashes)
- return hashes
-}
-
-func contains(t []string, s string) bool {
- for _, s2 := range t {
- if s2 == s {
- return true
- }
- }
- return false
-}
-
-func reverse(s []string) {
- for i := 0; i < len(s)/2; i++ {
- j := len(s) - i - 1
- s[i], s[j] = s[j], s[i]
- }
-}
-
-// A CommitRun provides summary information for commits [StartCommitNum, StartCommitNum + PerfRunLength).
-// Descendant of Package.
-type CommitRun struct {
- PackagePath string // (empty for main repo commits)
- StartCommitNum int
- Hash []string `datastore:",noindex"`
- User []string `datastore:",noindex"`
- Desc []string `datastore:",noindex"` // Only first line.
- Time []time.Time `datastore:",noindex"`
- NeedsBenchmarking []bool `datastore:",noindex"`
-}
-
-func (cr *CommitRun) Key(c appengine.Context) *datastore.Key {
- p := Package{Path: cr.PackagePath}
- key := strconv.Itoa(cr.StartCommitNum)
- return datastore.NewKey(c, "CommitRun", key, 0, p.Key(c))
-}
-
-// GetCommitRun loads and returns CommitRun that contains information
-// for commit commitNum.
-func GetCommitRun(c appengine.Context, commitNum int) (*CommitRun, error) {
- cr := &CommitRun{StartCommitNum: commitNum / PerfRunLength * PerfRunLength}
- err := datastore.Get(c, cr.Key(c), cr)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return nil, fmt.Errorf("getting CommitRun: %v", err)
- }
- if len(cr.Hash) != PerfRunLength {
- cr.Hash = make([]string, PerfRunLength)
- cr.User = make([]string, PerfRunLength)
- cr.Desc = make([]string, PerfRunLength)
- cr.Time = make([]time.Time, PerfRunLength)
- cr.NeedsBenchmarking = make([]bool, PerfRunLength)
- }
- return cr, nil
-}
-
-func (cr *CommitRun) AddCommit(c appengine.Context, com *Commit) error {
- if com.Num < cr.StartCommitNum || com.Num >= cr.StartCommitNum+PerfRunLength {
- return fmt.Errorf("AddCommit: commit num %v out of range [%v, %v)",
- com.Num, cr.StartCommitNum, cr.StartCommitNum+PerfRunLength)
- }
- i := com.Num - cr.StartCommitNum
- // Be careful with string lengths,
- // we need to fit 1024 commits into 1 MB.
- cr.Hash[i] = com.Hash
- cr.User[i] = shortDesc(com.User)
- cr.Desc[i] = shortDesc(com.Desc)
- cr.Time[i] = com.Time
- cr.NeedsBenchmarking[i] = com.NeedsBenchmarking
- if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
- return fmt.Errorf("putting CommitRun: %v", err)
- }
- return nil
-}
-
-// GetCommits returns [startCommitNum, startCommitNum+n) commits.
-// Commits information is partial (obtained from CommitRun),
-// do not store them back into datastore.
-func GetCommits(c appengine.Context, startCommitNum, n int) ([]*Commit, error) {
- if startCommitNum < 0 || n <= 0 {
- return nil, fmt.Errorf("GetCommits: invalid args (%v, %v)", startCommitNum, n)
- }
-
- p := &Package{}
- t := datastore.NewQuery("CommitRun").
- Ancestor(p.Key(c)).
- Filter("StartCommitNum >=", startCommitNum/PerfRunLength*PerfRunLength).
- Order("StartCommitNum").
- Limit(100).
- Run(c)
-
- res := make([]*Commit, n)
- for {
- cr := new(CommitRun)
- _, err := t.Next(cr)
- if err == datastore.Done {
- break
- }
- if err != nil {
- return nil, err
- }
- if cr.StartCommitNum >= startCommitNum+n {
- break
- }
- // Calculate start index for copying.
- i := 0
- if cr.StartCommitNum < startCommitNum {
- i = startCommitNum - cr.StartCommitNum
- }
- // Calculate end index for copying.
- e := PerfRunLength
- if cr.StartCommitNum+e > startCommitNum+n {
- e = startCommitNum + n - cr.StartCommitNum
- }
- for ; i < e; i++ {
- com := new(Commit)
- com.Hash = cr.Hash[i]
- com.User = cr.User[i]
- com.Desc = cr.Desc[i]
- com.Time = cr.Time[i]
- com.NeedsBenchmarking = cr.NeedsBenchmarking[i]
- res[cr.StartCommitNum-startCommitNum+i] = com
- }
- if e != PerfRunLength {
- break
- }
- }
- return res, nil
-}
-
-// partsToHash converts a Commit and ResultData substrings to a Result.
-func partsToHash(c *Commit, p []string) *Result {
- return &Result{
- Builder: p[0],
- Hash: c.Hash,
- PackagePath: c.PackagePath,
- GoHash: p[3],
- OK: p[1] == "true",
- LogHash: p[2],
- }
-}
-
-// A Result describes a build result for a Commit on an OS/architecture.
-//
-// Each Result entity is a descendant of its associated Package entity.
-type Result struct {
- PackagePath string // (empty for Go commits)
- Builder string // "os-arch[-note]"
- Hash string
-
- // The Go Commit this was built against (empty for Go commits).
- GoHash string
-
- OK bool
- Log string `datastore:"-"` // for JSON unmarshaling only
- LogHash string `datastore:",noindex"` // Key to the Log record.
-
- RunTime int64 // time to build+test in nanoseconds
-}
-
-func (r *Result) Key(c appengine.Context) *datastore.Key {
- p := Package{Path: r.PackagePath}
- key := r.Builder + "|" + r.PackagePath + "|" + r.Hash + "|" + r.GoHash
- return datastore.NewKey(c, "Result", key, 0, p.Key(c))
-}
-
-func (r *Result) Valid() error {
- if !validHash(r.Hash) {
- return errors.New("invalid Hash")
- }
- if r.PackagePath != "" && !validHash(r.GoHash) {
- return errors.New("invalid GoHash")
- }
- return nil
-}
-
-// Data returns the Result in string format
-// to be stored in Commit's ResultData field.
-func (r *Result) Data() string {
- return fmt.Sprintf("%v|%v|%v|%v", r.Builder, r.OK, r.LogHash, r.GoHash)
-}
-
-// A PerfResult describes all benchmarking result for a Commit.
-// Descendant of Package.
-type PerfResult struct {
- PackagePath string
- CommitHash string
- CommitNum int
- Data []string `datastore:",noindex"` // "builder|benchmark|ok|metric1=val1|metric2=val2|file:log=hash|file:cpuprof=hash"
-
- // Local cache with parsed Data.
- // Maps builder->benchmark->ParsedPerfResult.
- parsedData map[string]map[string]*ParsedPerfResult
-}
-
-type ParsedPerfResult struct {
- OK bool
- Metrics map[string]uint64
- Artifacts map[string]string
-}
-
-func (r *PerfResult) Key(c appengine.Context) *datastore.Key {
- p := Package{Path: r.PackagePath}
- key := r.CommitHash
- return datastore.NewKey(c, "PerfResult", key, 0, p.Key(c))
-}
-
-// AddResult add the benchmarking result to r.
-// Existing result for the same builder/benchmark is replaced if already exists.
-// Returns whether the result was already present.
-func (r *PerfResult) AddResult(req *PerfRequest) bool {
- present := false
- str := fmt.Sprintf("%v|%v|", req.Builder, req.Benchmark)
- for i, s := range r.Data {
- if strings.HasPrefix(s, str) {
- present = true
- last := len(r.Data) - 1
- r.Data[i] = r.Data[last]
- r.Data = r.Data[:last]
- break
- }
- }
- ok := "ok"
- if !req.OK {
- ok = "false"
- }
- str += ok
- for _, m := range req.Metrics {
- str += fmt.Sprintf("|%v=%v", m.Type, m.Val)
- }
- for _, a := range req.Artifacts {
- str += fmt.Sprintf("|file:%v=%v", a.Type, a.Body)
- }
- r.Data = append(r.Data, str)
- r.parsedData = nil
- return present
-}
-
-func (r *PerfResult) ParseData() map[string]map[string]*ParsedPerfResult {
- if r.parsedData != nil {
- return r.parsedData
- }
- res := make(map[string]map[string]*ParsedPerfResult)
- for _, str := range r.Data {
- ss := strings.Split(str, "|")
- builder := ss[0]
- bench := ss[1]
- ok := ss[2]
- m := res[builder]
- if m == nil {
- m = make(map[string]*ParsedPerfResult)
- res[builder] = m
- }
- var p ParsedPerfResult
- p.OK = ok == "ok"
- p.Metrics = make(map[string]uint64)
- p.Artifacts = make(map[string]string)
- for _, entry := range ss[3:] {
- if strings.HasPrefix(entry, "file:") {
- ss1 := strings.Split(entry[len("file:"):], "=")
- p.Artifacts[ss1[0]] = ss1[1]
- } else {
- ss1 := strings.Split(entry, "=")
- val, _ := strconv.ParseUint(ss1[1], 10, 64)
- p.Metrics[ss1[0]] = val
- }
- }
- m[bench] = &p
- }
- r.parsedData = res
- return res
-}
-
-// A PerfMetricRun entity holds a set of metric values for builder/benchmark/metric
-// for commits [StartCommitNum, StartCommitNum + PerfRunLength).
-// Descendant of Package.
-type PerfMetricRun struct {
- PackagePath string
- Builder string
- Benchmark string
- Metric string // e.g. realtime, cputime, gc-pause
- StartCommitNum int
- Vals []int64 `datastore:",noindex"`
-}
-
-func (m *PerfMetricRun) Key(c appengine.Context) *datastore.Key {
- p := Package{Path: m.PackagePath}
- key := m.Builder + "|" + m.Benchmark + "|" + m.Metric + "|" + strconv.Itoa(m.StartCommitNum)
- return datastore.NewKey(c, "PerfMetricRun", key, 0, p.Key(c))
-}
-
-// GetPerfMetricRun loads and returns PerfMetricRun that contains information
-// for commit commitNum.
-func GetPerfMetricRun(c appengine.Context, builder, benchmark, metric string, commitNum int) (*PerfMetricRun, error) {
- startCommitNum := commitNum / PerfRunLength * PerfRunLength
- m := &PerfMetricRun{Builder: builder, Benchmark: benchmark, Metric: metric, StartCommitNum: startCommitNum}
- err := datastore.Get(c, m.Key(c), m)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return nil, fmt.Errorf("getting PerfMetricRun: %v", err)
- }
- if len(m.Vals) != PerfRunLength {
- m.Vals = make([]int64, PerfRunLength)
- }
- return m, nil
-}
-
-func (m *PerfMetricRun) AddMetric(c appengine.Context, commitNum int, v uint64) error {
- if commitNum < m.StartCommitNum || commitNum >= m.StartCommitNum+PerfRunLength {
- return fmt.Errorf("AddMetric: CommitNum %v out of range [%v, %v)",
- commitNum, m.StartCommitNum, m.StartCommitNum+PerfRunLength)
- }
- m.Vals[commitNum-m.StartCommitNum] = int64(v)
- if _, err := datastore.Put(c, m.Key(c), m); err != nil {
- return fmt.Errorf("putting PerfMetricRun: %v", err)
- }
- return nil
-}
-
-// GetPerfMetricsForCommits returns perf metrics for builder/benchmark/metric
-// and commits [startCommitNum, startCommitNum+n).
-func GetPerfMetricsForCommits(c appengine.Context, builder, benchmark, metric string, startCommitNum, n int) ([]uint64, error) {
- if startCommitNum < 0 || n <= 0 {
- return nil, fmt.Errorf("GetPerfMetricsForCommits: invalid args (%v, %v)", startCommitNum, n)
- }
-
- p := &Package{}
- t := datastore.NewQuery("PerfMetricRun").
- Ancestor(p.Key(c)).
- Filter("Builder =", builder).
- Filter("Benchmark =", benchmark).
- Filter("Metric =", metric).
- Filter("StartCommitNum >=", startCommitNum/PerfRunLength*PerfRunLength).
- Order("StartCommitNum").
- Limit(100).
- Run(c)
-
- res := make([]uint64, n)
- for {
- metrics := new(PerfMetricRun)
- _, err := t.Next(metrics)
- if err == datastore.Done {
- break
- }
- if err != nil {
- return nil, err
- }
- if metrics.StartCommitNum >= startCommitNum+n {
- break
- }
- // Calculate start index for copying.
- i := 0
- if metrics.StartCommitNum < startCommitNum {
- i = startCommitNum - metrics.StartCommitNum
- }
- // Calculate end index for copying.
- e := PerfRunLength
- if metrics.StartCommitNum+e > startCommitNum+n {
- e = startCommitNum + n - metrics.StartCommitNum
- }
- for ; i < e; i++ {
- res[metrics.StartCommitNum-startCommitNum+i] = uint64(metrics.Vals[i])
- }
- if e != PerfRunLength {
- break
- }
- }
- return res, nil
-}
-
-// PerfConfig holds read-mostly configuration related to benchmarking.
-// There is only one PerfConfig entity.
-type PerfConfig struct {
- BuilderBench []string `datastore:",noindex"` // "builder|benchmark" pairs
- BuilderProcs []string `datastore:",noindex"` // "builder|proc" pairs
- BenchMetric []string `datastore:",noindex"` // "benchmark|metric" pairs
- NoiseLevels []string `datastore:",noindex"` // "builder|benchmark|metric1=noise1|metric2=noise2"
-
- // Local cache of "builder|benchmark|metric" -> noise.
- noise map[string]float64
-}
-
-func PerfConfigKey(c appengine.Context) *datastore.Key {
- p := Package{}
- return datastore.NewKey(c, "PerfConfig", "PerfConfig", 0, p.Key(c))
-}
-
-const perfConfigCacheKey = "perf-config"
-
-func GetPerfConfig(c appengine.Context, r *http.Request) (*PerfConfig, error) {
- pc := new(PerfConfig)
- now := cache.Now(c)
- if cache.Get(r, now, perfConfigCacheKey, pc) {
- return pc, nil
- }
- err := datastore.Get(c, PerfConfigKey(c), pc)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return nil, fmt.Errorf("GetPerfConfig: %v", err)
- }
- cache.Set(r, now, perfConfigCacheKey, pc)
- return pc, nil
-}
-
-func (pc *PerfConfig) NoiseLevel(builder, benchmark, metric string) float64 {
- if pc.noise == nil {
- pc.noise = make(map[string]float64)
- for _, str := range pc.NoiseLevels {
- split := strings.Split(str, "|")
- builderBench := split[0] + "|" + split[1]
- for _, entry := range split[2:] {
- metricValue := strings.Split(entry, "=")
- noise, _ := strconv.ParseFloat(metricValue[1], 64)
- pc.noise[builderBench+"|"+metricValue[0]] = noise
- }
- }
- }
- me := fmt.Sprintf("%v|%v|%v", builder, benchmark, metric)
- n := pc.noise[me]
- if n == 0 {
- // Use a very conservative value
- // until we have learned the real noise level.
- n = 200
- }
- return n
-}
-
-// UpdatePerfConfig updates the PerfConfig entity with results of benchmarking.
-// Returns whether it's a benchmark that we have not yet seem on the builder.
-func UpdatePerfConfig(c appengine.Context, r *http.Request, req *PerfRequest) (newBenchmark bool, err error) {
- pc, err := GetPerfConfig(c, r)
- if err != nil {
- return false, err
- }
-
- modified := false
- add := func(arr *[]string, str string) {
- for _, s := range *arr {
- if s == str {
- return
- }
- }
- *arr = append(*arr, str)
- modified = true
- return
- }
-
- BenchProcs := strings.Split(req.Benchmark, "-")
- benchmark := BenchProcs[0]
- procs := "1"
- if len(BenchProcs) > 1 {
- procs = BenchProcs[1]
- }
-
- add(&pc.BuilderBench, req.Builder+"|"+benchmark)
- newBenchmark = modified
- add(&pc.BuilderProcs, req.Builder+"|"+procs)
- for _, m := range req.Metrics {
- add(&pc.BenchMetric, benchmark+"|"+m.Type)
- }
-
- if modified {
- if _, err := datastore.Put(c, PerfConfigKey(c), pc); err != nil {
- return false, fmt.Errorf("putting PerfConfig: %v", err)
- }
- cache.Tick(c)
- }
- return newBenchmark, nil
-}
-
-type MetricList []string
-
-func (l MetricList) Len() int {
- return len(l)
-}
-
-func (l MetricList) Less(i, j int) bool {
- bi := strings.HasPrefix(l[i], "build-") || strings.HasPrefix(l[i], "binary-")
- bj := strings.HasPrefix(l[j], "build-") || strings.HasPrefix(l[j], "binary-")
- if bi == bj {
- return l[i] < l[j]
- }
- return !bi
-}
-
-func (l MetricList) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-func collectList(all []string, idx int, second string) (res []string) {
- m := make(map[string]bool)
- for _, str := range all {
- ss := strings.Split(str, "|")
- v := ss[idx]
- v2 := ss[1-idx]
- if (second == "" || second == v2) && !m[v] {
- m[v] = true
- res = append(res, v)
- }
- }
- sort.Sort(MetricList(res))
- return res
-}
-
-func (pc *PerfConfig) BuildersForBenchmark(bench string) []string {
- return collectList(pc.BuilderBench, 0, bench)
-}
-
-func (pc *PerfConfig) BenchmarksForBuilder(builder string) []string {
- return collectList(pc.BuilderBench, 1, builder)
-}
-
-func (pc *PerfConfig) MetricsForBenchmark(bench string) []string {
- return collectList(pc.BenchMetric, 1, bench)
-}
-
-func (pc *PerfConfig) BenchmarkProcList() (res []string) {
- bl := pc.BenchmarksForBuilder("")
- pl := pc.ProcList("")
- for _, b := range bl {
- for _, p := range pl {
- res = append(res, fmt.Sprintf("%v-%v", b, p))
- }
- }
- return res
-}
-
-func (pc *PerfConfig) ProcList(builder string) []int {
- ss := collectList(pc.BuilderProcs, 1, builder)
- var procs []int
- for _, s := range ss {
- p, _ := strconv.ParseInt(s, 10, 32)
- procs = append(procs, int(p))
- }
- sort.Ints(procs)
- return procs
-}
-
-// A PerfTodo contains outstanding commits for benchmarking for a builder.
-// Descendant of Package.
-type PerfTodo struct {
- PackagePath string // (empty for main repo commits)
- Builder string
- CommitNums []int `datastore:",noindex"` // LIFO queue of commits to benchmark.
-}
-
-func (todo *PerfTodo) Key(c appengine.Context) *datastore.Key {
- p := Package{Path: todo.PackagePath}
- key := todo.Builder
- return datastore.NewKey(c, "PerfTodo", key, 0, p.Key(c))
-}
-
-// AddCommitToPerfTodo adds the commit to all existing PerfTodo entities.
-func AddCommitToPerfTodo(c appengine.Context, com *Commit) error {
- var todos []*PerfTodo
- _, err := datastore.NewQuery("PerfTodo").
- Ancestor((&Package{}).Key(c)).
- GetAll(c, &todos)
- if err != nil {
- return fmt.Errorf("fetching PerfTodo's: %v", err)
- }
- for _, todo := range todos {
- todo.CommitNums = append(todo.CommitNums, com.Num)
- _, err = datastore.Put(c, todo.Key(c), todo)
- if err != nil {
- return fmt.Errorf("updating PerfTodo: %v", err)
- }
- }
- return nil
-}
-
-// A Log is a gzip-compressed log file stored under the SHA1 hash of the
-// uncompressed log text.
-type Log struct {
- CompressedLog []byte
-}
-
-func (l *Log) Text() ([]byte, error) {
- d, err := gzip.NewReader(bytes.NewBuffer(l.CompressedLog))
- if err != nil {
- return nil, fmt.Errorf("reading log data: %v", err)
- }
- b, err := ioutil.ReadAll(d)
- if err != nil {
- return nil, fmt.Errorf("reading log data: %v", err)
- }
- return b, nil
-}
-
-func PutLog(c appengine.Context, text string) (hash string, err error) {
- h := sha1.New()
- io.WriteString(h, text)
- b := new(bytes.Buffer)
- z, _ := gzip.NewWriterLevel(b, gzip.BestCompression)
- io.WriteString(z, text)
- z.Close()
- hash = fmt.Sprintf("%x", h.Sum(nil))
- key := datastore.NewKey(c, "Log", hash, 0, nil)
- _, err = datastore.Put(c, key, &Log{b.Bytes()})
- return
-}
-
-// A Tag is used to keep track of the most recent Go weekly and release tags.
-// Typically there will be one Tag entity for each kind of hg tag.
-type Tag struct {
- Kind string // "weekly", "release", or "tip"
- Name string // the tag itself (for example: "release.r60")
- Hash string
-}
-
-func (t *Tag) Key(c appengine.Context) *datastore.Key {
- p := &Package{}
- return datastore.NewKey(c, "Tag", t.Kind, 0, p.Key(c))
-}
-
-func (t *Tag) Valid() error {
- if t.Kind != "weekly" && t.Kind != "release" && t.Kind != "tip" {
- return errors.New("invalid Kind")
- }
- if !validHash(t.Hash) {
- return errors.New("invalid Hash")
- }
- return nil
-}
-
-// Commit returns the Commit that corresponds with this Tag.
-func (t *Tag) Commit(c appengine.Context) (*Commit, error) {
- com := &Commit{Hash: t.Hash}
- err := datastore.Get(c, com.Key(c), com)
- return com, err
-}
-
-// GetTag fetches a Tag by name from the datastore.
-func GetTag(c appengine.Context, tag string) (*Tag, error) {
- t := &Tag{Kind: tag}
- if err := datastore.Get(c, t.Key(c), t); err != nil {
- if err == datastore.ErrNoSuchEntity {
- return nil, errors.New("tag not found: " + tag)
- }
- return nil, err
- }
- if err := t.Valid(); err != nil {
- return nil, err
- }
- return t, nil
-}
-
-// Packages returns packages of the specified kind.
-// Kind must be one of "external" or "subrepo".
-func Packages(c appengine.Context, kind string) ([]*Package, error) {
- switch kind {
- case "external", "subrepo":
- default:
- return nil, errors.New(`kind must be one of "external" or "subrepo"`)
- }
- var pkgs []*Package
- q := datastore.NewQuery("Package").Filter("Kind=", kind)
- for t := q.Run(c); ; {
- pkg := new(Package)
- _, err := t.Next(pkg)
- if err == datastore.Done {
- break
- } else if err != nil {
- return nil, err
- }
- if pkg.Path != "" {
- pkgs = append(pkgs, pkg)
- }
- }
- return pkgs, nil
-}
diff --git a/dashboard/app/build/dash.go b/dashboard/app/build/dash.go
deleted file mode 100644
index 52ca74d..0000000
--- a/dashboard/app/build/dash.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "net/http"
- "strings"
-
- "appengine"
-)
-
-// Dashboard describes a unique build dashboard.
-type Dashboard struct {
- Name string // This dashboard's name and namespace
- RelPath string // The relative url path
- Packages []*Package // The project's packages to build
-}
-
-// dashboardForRequest returns the appropriate dashboard for a given URL path.
-func dashboardForRequest(r *http.Request) *Dashboard {
- if strings.HasPrefix(r.URL.Path, gccgoDash.RelPath) {
- return gccgoDash
- }
- return goDash
-}
-
-// Context returns a namespaced context for this dashboard, or panics if it
-// fails to create a new context.
-func (d *Dashboard) Context(c appengine.Context) appengine.Context {
- // No namespace needed for the original Go dashboard.
- if d.Name == "Go" {
- return c
- }
- n, err := appengine.Namespace(c, d.Name)
- if err != nil {
- panic(err)
- }
- return n
-}
-
-// the currently known dashboards.
-var dashboards = []*Dashboard{goDash, gccgoDash}
-
-// goDash is the dashboard for the main go repository.
-var goDash = &Dashboard{
- Name: "Go",
- RelPath: "/",
- Packages: goPackages,
-}
-
-// goPackages is a list of all of the packages built by the main go repository.
-var goPackages = []*Package{
- {
- Kind: "go",
- Name: "Go",
- },
- {
- Kind: "subrepo",
- Name: "go.blog",
- Path: "code.google.com/p/go.blog",
- },
- {
- Kind: "subrepo",
- Name: "go.codereview",
- Path: "code.google.com/p/go.codereview",
- },
- {
- Kind: "subrepo",
- Name: "go.crypto",
- Path: "code.google.com/p/go.crypto",
- },
- {
- Kind: "subrepo",
- Name: "go.exp",
- Path: "code.google.com/p/go.exp",
- },
- {
- Kind: "subrepo",
- Name: "go.image",
- Path: "code.google.com/p/go.image",
- },
- {
- Kind: "subrepo",
- Name: "go.net",
- Path: "code.google.com/p/go.net",
- },
- {
- Kind: "subrepo",
- Name: "go.sys",
- Path: "code.google.com/p/go.sys",
- },
- {
- Kind: "subrepo",
- Name: "go.talks",
- Path: "code.google.com/p/go.talks",
- },
- {
- Kind: "subrepo",
- Name: "go.tools",
- Path: "code.google.com/p/go.tools",
- },
-}
-
-// gccgoDash is the dashboard for gccgo.
-var gccgoDash = &Dashboard{
- Name: "Gccgo",
- RelPath: "/gccgo/",
- Packages: []*Package{
- {
- Kind: "gccgo",
- Name: "Gccgo",
- },
- },
-}
diff --git a/dashboard/app/build/handler.go b/dashboard/app/build/handler.go
deleted file mode 100644
index 5d06815..0000000
--- a/dashboard/app/build/handler.go
+++ /dev/null
@@ -1,906 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/md5"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "strconv"
- "strings"
- "unicode/utf8"
-
- "appengine"
- "appengine/datastore"
-
- "cache"
- "key"
-)
-
-const commitsPerPage = 30
-const watcherVersion = 2
-
-// commitHandler retrieves commit data or records a new commit.
-//
-// For GET requests it returns a Commit value for the specified
-// packagePath and hash.
-//
-// For POST requests it reads a JSON-encoded Commit value from the request
-// body and creates a new Commit entity. It also updates the "tip" Tag for
-// each new commit at tip.
-//
-// This handler is used by a gobuilder process in -commit mode.
-func commitHandler(r *http.Request) (interface{}, error) {
- c := contextForRequest(r)
- com := new(Commit)
-
- if r.Method == "GET" {
- com.PackagePath = r.FormValue("packagePath")
- com.Hash = r.FormValue("hash")
- err := datastore.Get(c, com.Key(c), com)
- if com.Num == 0 && com.Desc == "" {
- // Perf builder might have written an incomplete Commit.
- // Pretend it doesn't exist, so that we can get complete details.
- err = datastore.ErrNoSuchEntity
- }
- if err != nil {
- if err == datastore.ErrNoSuchEntity {
- // This error string is special.
- // The commit watcher expects it.
- // Do not change it.
- return nil, errors.New("Commit not found")
- }
- return nil, fmt.Errorf("getting Commit: %v", err)
- }
- if com.Num == 0 {
- // Corrupt state which shouldn't happen but does.
- // Return an error so builders' commit loops will
- // be willing to retry submitting this commit.
- return nil, errors.New("in datastore with zero Num")
- }
- if com.Desc == "" || com.User == "" {
- // Also shouldn't happen, but at least happened
- // once on a single commit when trying to fix data
- // in the datastore viewer UI?
- return nil, errors.New("missing field")
- }
- // Strip potentially large and unnecessary fields.
- com.ResultData = nil
- com.PerfResults = nil
- return com, nil
- }
- if r.Method != "POST" {
- return nil, errBadMethod(r.Method)
- }
- if !isMasterKey(c, r.FormValue("key")) {
- return nil, errors.New("can only POST commits with master key")
- }
-
- // For now, the commit watcher doesn't support gccgo,
- // so only do this check for Go commits.
- // TODO(adg,cmang): remove this check when gccgo is supported.
- if dashboardForRequest(r) == goDash {
- v, _ := strconv.Atoi(r.FormValue("version"))
- if v != watcherVersion {
- return nil, fmt.Errorf("rejecting POST from commit watcher; need version %v", watcherVersion)
- }
- }
-
- // POST request
- body, err := ioutil.ReadAll(r.Body)
- r.Body.Close()
- if err != nil {
- return nil, fmt.Errorf("reading Body: %v", err)
- }
- if !bytes.Contains(body, needsBenchmarkingBytes) {
- c.Warningf("old builder detected at %v", r.RemoteAddr)
- return nil, fmt.Errorf("rejecting old builder request, body does not contain %s: %q", needsBenchmarkingBytes, body)
- }
- if err := json.Unmarshal(body, com); err != nil {
- return nil, fmt.Errorf("unmarshaling body %q: %v", body, err)
- }
- com.Desc = limitStringLength(com.Desc, maxDatastoreStringLen)
- if err := com.Valid(); err != nil {
- return nil, fmt.Errorf("validating Commit: %v", err)
- }
- defer cache.Tick(c)
- tx := func(c appengine.Context) error {
- return addCommit(c, com)
- }
- return nil, datastore.RunInTransaction(c, tx, nil)
-}
-
-var needsBenchmarkingBytes = []byte(`"NeedsBenchmarking"`)
-
-// addCommit adds the Commit entity to the datastore and updates the tip Tag.
-// It must be run inside a datastore transaction.
-func addCommit(c appengine.Context, com *Commit) error {
- var ec Commit // existing commit
- isUpdate := false
- err := datastore.Get(c, com.Key(c), &ec)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return fmt.Errorf("getting Commit: %v", err)
- }
- if err == nil {
- // Commit already in the datastore. Any fields different?
- // If not, don't do anything.
- changes := (com.Num != 0 && com.Num != ec.Num) ||
- com.ParentHash != ec.ParentHash ||
- com.Desc != ec.Desc ||
- com.User != ec.User ||
- !com.Time.Equal(ec.Time)
- if !changes {
- return nil
- }
- ec.ParentHash = com.ParentHash
- ec.Desc = com.Desc
- ec.User = com.User
- if !com.Time.IsZero() {
- ec.Time = com.Time
- }
- if com.Num != 0 {
- ec.Num = com.Num
- }
- isUpdate = true
- com = &ec
- }
- p, err := GetPackage(c, com.PackagePath)
- if err != nil {
- return fmt.Errorf("GetPackage: %v", err)
- }
- if com.Num == 0 {
- // get the next commit number
- com.Num = p.NextNum
- p.NextNum++
- if _, err := datastore.Put(c, p.Key(c), p); err != nil {
- return fmt.Errorf("putting Package: %v", err)
- }
- } else if com.Num >= p.NextNum {
- p.NextNum = com.Num + 1
- if _, err := datastore.Put(c, p.Key(c), p); err != nil {
- return fmt.Errorf("putting Package: %v", err)
- }
- }
- // if this isn't the first Commit test the parent commit exists.
- // The all zeros are returned by hg's p1node template for parentless commits.
- if com.ParentHash != "" && com.ParentHash != "0000000000000000000000000000000000000000" && com.ParentHash != "0000" {
- n, err := datastore.NewQuery("Commit").
- Filter("Hash =", com.ParentHash).
- Ancestor(p.Key(c)).
- Count(c)
- if err != nil {
- return fmt.Errorf("testing for parent Commit: %v", err)
- }
- if n == 0 {
- return errors.New("parent commit not found")
- }
- }
- // update the tip Tag if this is the Go repo and this isn't on a release branch
- if p.Path == "" && !strings.HasPrefix(com.Desc, "[") && !isUpdate {
- t := &Tag{Kind: "tip", Hash: com.Hash}
- if _, err = datastore.Put(c, t.Key(c), t); err != nil {
- return fmt.Errorf("putting Tag: %v", err)
- }
- }
- // put the Commit
- if err = putCommit(c, com); err != nil {
- return err
- }
- if com.NeedsBenchmarking {
- // add to CommitRun
- cr, err := GetCommitRun(c, com.Num)
- if err != nil {
- return err
- }
- if err = cr.AddCommit(c, com); err != nil {
- return err
- }
- // create PerfResult
- res := &PerfResult{CommitHash: com.Hash, CommitNum: com.Num}
- if _, err := datastore.Put(c, res.Key(c), res); err != nil {
- return fmt.Errorf("putting PerfResult: %v", err)
- }
- // Update perf todo if necessary.
- if err = AddCommitToPerfTodo(c, com); err != nil {
- return err
- }
- }
- return nil
-}
-
-// tagHandler records a new tag. It reads a JSON-encoded Tag value from the
-// request body and updates the Tag entity for the Kind of tag provided.
-//
-// This handler is used by a gobuilder process in -commit mode.
-func tagHandler(r *http.Request) (interface{}, error) {
- if r.Method != "POST" {
- return nil, errBadMethod(r.Method)
- }
-
- t := new(Tag)
- defer r.Body.Close()
- if err := json.NewDecoder(r.Body).Decode(t); err != nil {
- return nil, err
- }
- if err := t.Valid(); err != nil {
- return nil, err
- }
- c := contextForRequest(r)
- defer cache.Tick(c)
- _, err := datastore.Put(c, t.Key(c), t)
- return nil, err
-}
-
-// Todo is a todoHandler response.
-type Todo struct {
- Kind string // "build-go-commit" or "build-package"
- Data interface{}
-}
-
-// todoHandler returns the next action to be performed by a builder.
-// It expects "builder" and "kind" query parameters and returns a *Todo value.
-// Multiple "kind" parameters may be specified.
-func todoHandler(r *http.Request) (interface{}, error) {
- c := contextForRequest(r)
- now := cache.Now(c)
- key := "build-todo-" + r.Form.Encode()
- var todo *Todo
- if cache.Get(r, now, key, &todo) {
- return todo, nil
- }
- var err error
- builder := r.FormValue("builder")
- for _, kind := range r.Form["kind"] {
- var com *Commit
- switch kind {
- case "build-go-commit":
- com, err = buildTodo(c, builder, "", "")
- if com != nil {
- com.PerfResults = []string{}
- }
- case "build-package":
- packagePath := r.FormValue("packagePath")
- goHash := r.FormValue("goHash")
- com, err = buildTodo(c, builder, packagePath, goHash)
- if com != nil {
- com.PerfResults = []string{}
- }
- case "benchmark-go-commit":
- com, err = perfTodo(c, builder)
- }
- if com != nil || err != nil {
- if com != nil {
- // ResultData can be large and not needed on builder.
- com.ResultData = []string{}
- }
- todo = &Todo{Kind: kind, Data: com}
- break
- }
- }
- if err == nil {
- cache.Set(r, now, key, todo)
- }
- return todo, err
-}
-
-// buildTodo returns the next Commit to be built (or nil if none available).
-//
-// If packagePath and goHash are empty, it scans the first 20 Go Commits in
-// Num-descending order and returns the first one it finds that doesn't have a
-// Result for this builder.
-//
-// If provided with non-empty packagePath and goHash args, it scans the first
-// 20 Commits in Num-descending order for the specified packagePath and
-// returns the first that doesn't have a Result for this builder and goHash.
-func buildTodo(c appengine.Context, builder, packagePath, goHash string) (*Commit, error) {
- p, err := GetPackage(c, packagePath)
- if err != nil {
- return nil, err
- }
-
- t := datastore.NewQuery("Commit").
- Ancestor(p.Key(c)).
- Limit(commitsPerPage).
- Order("-Num").
- Run(c)
- for {
- com := new(Commit)
- if _, err := t.Next(com); err == datastore.Done {
- break
- } else if err != nil {
- return nil, err
- }
- if com.Result(builder, goHash) == nil {
- return com, nil
- }
- }
-
- // Nothing left to do if this is a package (not the Go tree).
- if packagePath != "" {
- return nil, nil
- }
-
- // If there are no Go tree commits left to build,
- // see if there are any subrepo commits that need to be built at tip.
- // If so, ask the builder to build a go tree at the tip commit.
- // TODO(adg): do the same for "weekly" and "release" tags.
-
- tag, err := GetTag(c, "tip")
- if err != nil {
- return nil, err
- }
-
- // Check that this Go commit builds OK for this builder.
- // If not, don't re-build as the subrepos will never get built anyway.
- com, err := tag.Commit(c)
- if err != nil {
- return nil, err
- }
- if r := com.Result(builder, ""); r != nil && !r.OK {
- return nil, nil
- }
-
- pkgs, err := Packages(c, "subrepo")
- if err != nil {
- return nil, err
- }
- for _, pkg := range pkgs {
- com, err := pkg.LastCommit(c)
- if err != nil {
- c.Warningf("%v: no Commit found: %v", pkg, err)
- continue
- }
- if com.Result(builder, tag.Hash) == nil {
- return tag.Commit(c)
- }
- }
-
- return nil, nil
-}
-
-// perfTodo returns the next Commit to be benchmarked (or nil if none available).
-func perfTodo(c appengine.Context, builder string) (*Commit, error) {
- p := &Package{}
- todo := &PerfTodo{Builder: builder}
- err := datastore.Get(c, todo.Key(c), todo)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return nil, fmt.Errorf("fetching PerfTodo: %v", err)
- }
- if err == datastore.ErrNoSuchEntity {
- todo, err = buildPerfTodo(c, builder)
- if err != nil {
- return nil, err
- }
- }
- if len(todo.CommitNums) == 0 {
- return nil, nil
- }
-
- // Have commit to benchmark, fetch it.
- num := todo.CommitNums[len(todo.CommitNums)-1]
- t := datastore.NewQuery("Commit").
- Ancestor(p.Key(c)).
- Filter("Num =", num).
- Limit(1).
- Run(c)
- com := new(Commit)
- if _, err := t.Next(com); err != nil {
- return nil, err
- }
- if !com.NeedsBenchmarking {
- return nil, fmt.Errorf("commit from perf todo queue is not intended for benchmarking")
- }
-
- // Remove benchmarks from other builders.
- var benchs []string
- for _, b := range com.PerfResults {
- bb := strings.Split(b, "|")
- if bb[0] == builder && bb[1] != "meta-done" {
- benchs = append(benchs, bb[1])
- }
- }
- com.PerfResults = benchs
-
- return com, nil
-}
-
-// buildPerfTodo creates PerfTodo for the builder with all commits. In a transaction.
-func buildPerfTodo(c appengine.Context, builder string) (*PerfTodo, error) {
- todo := &PerfTodo{Builder: builder}
- tx := func(c appengine.Context) error {
- err := datastore.Get(c, todo.Key(c), todo)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return fmt.Errorf("fetching PerfTodo: %v", err)
- }
- if err == nil {
- return nil
- }
- t := datastore.NewQuery("CommitRun").
- Ancestor((&Package{}).Key(c)).
- Order("-StartCommitNum").
- Run(c)
- var nums []int
- var releaseNums []int
- loop:
- for {
- cr := new(CommitRun)
- if _, err := t.Next(cr); err == datastore.Done {
- break
- } else if err != nil {
- return fmt.Errorf("scanning commit runs for perf todo: %v", err)
- }
- for i := len(cr.Hash) - 1; i >= 0; i-- {
- if !cr.NeedsBenchmarking[i] || cr.Hash[i] == "" {
- continue // There's nothing to see here. Move along.
- }
- num := cr.StartCommitNum + i
- for k, v := range knownTags {
- // Releases are benchmarked first, because they are important (and there are few of them).
- if cr.Hash[i] == v {
- releaseNums = append(releaseNums, num)
- if k == "go1" {
- break loop // Point of no benchmark: test/bench/shootout: update timing.log to Go 1.
- }
- }
- }
- nums = append(nums, num)
- }
- }
- todo.CommitNums = orderPerfTodo(nums)
- todo.CommitNums = append(todo.CommitNums, releaseNums...)
- if _, err = datastore.Put(c, todo.Key(c), todo); err != nil {
- return fmt.Errorf("putting PerfTodo: %v", err)
- }
- return nil
- }
- return todo, datastore.RunInTransaction(c, tx, nil)
-}
-
-func removeCommitFromPerfTodo(c appengine.Context, builder string, num int) error {
- todo := &PerfTodo{Builder: builder}
- err := datastore.Get(c, todo.Key(c), todo)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return fmt.Errorf("fetching PerfTodo: %v", err)
- }
- if err == datastore.ErrNoSuchEntity {
- return nil
- }
- for i := len(todo.CommitNums) - 1; i >= 0; i-- {
- if todo.CommitNums[i] == num {
- for ; i < len(todo.CommitNums)-1; i++ {
- todo.CommitNums[i] = todo.CommitNums[i+1]
- }
- todo.CommitNums = todo.CommitNums[:i]
- _, err = datastore.Put(c, todo.Key(c), todo)
- if err != nil {
- return fmt.Errorf("putting PerfTodo: %v", err)
- }
- break
- }
- }
- return nil
-}
-
-// packagesHandler returns a list of the non-Go Packages monitored
-// by the dashboard.
-func packagesHandler(r *http.Request) (interface{}, error) {
- kind := r.FormValue("kind")
- c := contextForRequest(r)
- now := cache.Now(c)
- key := "build-packages-" + kind
- var p []*Package
- if cache.Get(r, now, key, &p) {
- return p, nil
- }
- p, err := Packages(c, kind)
- if err != nil {
- return nil, err
- }
- cache.Set(r, now, key, p)
- return p, nil
-}
-
-// resultHandler records a build result.
-// It reads a JSON-encoded Result value from the request body,
-// creates a new Result entity, and updates the relevant Commit entity.
-// If the Log field is not empty, resultHandler creates a new Log entity
-// and updates the LogHash field before putting the Commit entity.
-func resultHandler(r *http.Request) (interface{}, error) {
- if r.Method != "POST" {
- return nil, errBadMethod(r.Method)
- }
-
- c := contextForRequest(r)
- res := new(Result)
- defer r.Body.Close()
- if err := json.NewDecoder(r.Body).Decode(res); err != nil {
- return nil, fmt.Errorf("decoding Body: %v", err)
- }
- if err := res.Valid(); err != nil {
- return nil, fmt.Errorf("validating Result: %v", err)
- }
- defer cache.Tick(c)
- // store the Log text if supplied
- if len(res.Log) > 0 {
- hash, err := PutLog(c, res.Log)
- if err != nil {
- return nil, fmt.Errorf("putting Log: %v", err)
- }
- res.LogHash = hash
- }
- tx := func(c appengine.Context) error {
- // check Package exists
- if _, err := GetPackage(c, res.PackagePath); err != nil {
- return fmt.Errorf("GetPackage: %v", err)
- }
- // put Result
- if _, err := datastore.Put(c, res.Key(c), res); err != nil {
- return fmt.Errorf("putting Result: %v", err)
- }
- // add Result to Commit
- com := &Commit{PackagePath: res.PackagePath, Hash: res.Hash}
- if err := com.AddResult(c, res); err != nil {
- return fmt.Errorf("AddResult: %v", err)
- }
- // Send build failure notifications, if necessary.
- // Note this must run after the call AddResult, which
- // populates the Commit's ResultData field.
- return notifyOnFailure(c, com, res.Builder)
- }
- return nil, datastore.RunInTransaction(c, tx, nil)
-}
-
-// perf-result request payload
-type PerfRequest struct {
- Builder string
- Benchmark string
- Hash string
- OK bool
- Metrics []PerfMetric
- Artifacts []PerfArtifact
-}
-
-type PerfMetric struct {
- Type string
- Val uint64
-}
-
-type PerfArtifact struct {
- Type string
- Body string
-}
-
-// perfResultHandler records a becnhmarking result.
-func perfResultHandler(r *http.Request) (interface{}, error) {
- defer r.Body.Close()
- if r.Method != "POST" {
- return nil, errBadMethod(r.Method)
- }
-
- req := new(PerfRequest)
- if err := json.NewDecoder(r.Body).Decode(req); err != nil {
- return nil, fmt.Errorf("decoding Body: %v", err)
- }
-
- c := contextForRequest(r)
- defer cache.Tick(c)
-
- // store the text files if supplied
- for i, a := range req.Artifacts {
- hash, err := PutLog(c, a.Body)
- if err != nil {
- return nil, fmt.Errorf("putting Log: %v", err)
- }
- req.Artifacts[i].Body = hash
- }
- tx := func(c appengine.Context) error {
- return addPerfResult(c, r, req)
- }
- return nil, datastore.RunInTransaction(c, tx, nil)
-}
-
-// addPerfResult creates PerfResult and updates Commit, PerfTodo,
-// PerfMetricRun and PerfConfig.
-// MUST be called from inside a transaction.
-func addPerfResult(c appengine.Context, r *http.Request, req *PerfRequest) error {
- // check Package exists
- p, err := GetPackage(c, "")
- if err != nil {
- return fmt.Errorf("GetPackage: %v", err)
- }
- // add result to Commit
- com := &Commit{Hash: req.Hash}
- if err := com.AddPerfResult(c, req.Builder, req.Benchmark); err != nil {
- return fmt.Errorf("AddPerfResult: %v", err)
- }
-
- // add the result to PerfResult
- res := &PerfResult{CommitHash: req.Hash}
- if err := datastore.Get(c, res.Key(c), res); err != nil {
- return fmt.Errorf("getting PerfResult: %v", err)
- }
- present := res.AddResult(req)
- if _, err := datastore.Put(c, res.Key(c), res); err != nil {
- return fmt.Errorf("putting PerfResult: %v", err)
- }
-
- // Meta-done denotes that there are no benchmarks left.
- if req.Benchmark == "meta-done" {
- // Don't send duplicate emails for the same commit/builder.
- // And don't send emails about too old commits.
- if !present && com.Num >= p.NextNum-commitsPerPage {
- if err := checkPerfChanges(c, r, com, req.Builder, res); err != nil {
- return err
- }
- }
- if err := removeCommitFromPerfTodo(c, req.Builder, com.Num); err != nil {
- return nil
- }
- return nil
- }
-
- // update PerfConfig
- newBenchmark, err := UpdatePerfConfig(c, r, req)
- if err != nil {
- return fmt.Errorf("updating PerfConfig: %v", err)
- }
- if newBenchmark {
- // If this is a new benchmark on the builder, delete PerfTodo.
- // It will be recreated later with all commits again.
- todo := &PerfTodo{Builder: req.Builder}
- err = datastore.Delete(c, todo.Key(c))
- if err != nil && err != datastore.ErrNoSuchEntity {
- return fmt.Errorf("deleting PerfTodo: %v", err)
- }
- }
-
- // add perf metrics
- for _, metric := range req.Metrics {
- m, err := GetPerfMetricRun(c, req.Builder, req.Benchmark, metric.Type, com.Num)
- if err != nil {
- return fmt.Errorf("GetPerfMetrics: %v", err)
- }
- if err = m.AddMetric(c, com.Num, metric.Val); err != nil {
- return fmt.Errorf("AddMetric: %v", err)
- }
- }
-
- return nil
-}
-
-// MUST be called from inside a transaction.
-func checkPerfChanges(c appengine.Context, r *http.Request, com *Commit, builder string, res *PerfResult) error {
- pc, err := GetPerfConfig(c, r)
- if err != nil {
- return err
- }
-
- results := res.ParseData()[builder]
- rcNewer := MakePerfResultCache(c, com, true)
- rcOlder := MakePerfResultCache(c, com, false)
-
- // Check whether we need to send failure notification email.
- if results["meta-done"].OK {
- // This one is successful, see if the next is failed.
- nextRes, err := rcNewer.Next(com.Num)
- if err != nil {
- return err
- }
- if nextRes != nil && isPerfFailed(nextRes, builder) {
- sendPerfFailMail(c, builder, nextRes)
- }
- } else {
- // This one is failed, see if the previous is successful.
- prevRes, err := rcOlder.Next(com.Num)
- if err != nil {
- return err
- }
- if prevRes != nil && !isPerfFailed(prevRes, builder) {
- sendPerfFailMail(c, builder, res)
- }
- }
-
- // Now see if there are any performance changes.
- // Find the previous and the next results for performance comparison.
- prevRes, err := rcOlder.NextForComparison(com.Num, builder)
- if err != nil {
- return err
- }
- nextRes, err := rcNewer.NextForComparison(com.Num, builder)
- if err != nil {
- return err
- }
- if results["meta-done"].OK {
- // This one is successful, compare with a previous one.
- if prevRes != nil {
- if err := comparePerfResults(c, pc, builder, prevRes, res); err != nil {
- return err
- }
- }
- // Compare a next one with the current.
- if nextRes != nil {
- if err := comparePerfResults(c, pc, builder, res, nextRes); err != nil {
- return err
- }
- }
- } else {
- // This one is failed, compare a previous one with a next one.
- if prevRes != nil && nextRes != nil {
- if err := comparePerfResults(c, pc, builder, prevRes, nextRes); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func comparePerfResults(c appengine.Context, pc *PerfConfig, builder string, prevRes, res *PerfResult) error {
- changes := significantPerfChanges(pc, builder, prevRes, res)
- if len(changes) == 0 {
- return nil
- }
- com := &Commit{Hash: res.CommitHash}
- if err := datastore.Get(c, com.Key(c), com); err != nil {
- return fmt.Errorf("getting commit %v: %v", com.Hash, err)
- }
- sendPerfMailLater.Call(c, com, prevRes.CommitHash, builder, changes) // add task to queue
- return nil
-}
-
-// logHandler displays log text for a given hash.
-// It handles paths like "/log/hash".
-func logHandler(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-type", "text/plain; charset=utf-8")
- c := contextForRequest(r)
- hash := r.URL.Path[strings.LastIndex(r.URL.Path, "/")+1:]
- key := datastore.NewKey(c, "Log", hash, 0, nil)
- l := new(Log)
- if err := datastore.Get(c, key, l); err != nil {
- logErr(w, r, err)
- return
- }
- b, err := l.Text()
- if err != nil {
- logErr(w, r, err)
- return
- }
- w.Write(b)
-}
-
-type dashHandler func(*http.Request) (interface{}, error)
-
-type dashResponse struct {
- Response interface{}
- Error string
-}
-
-// errBadMethod is returned by a dashHandler when
-// the request has an unsuitable method.
-type errBadMethod string
-
-func (e errBadMethod) Error() string {
- return "bad method: " + string(e)
-}
-
-// AuthHandler wraps a http.HandlerFunc with a handler that validates the
-// supplied key and builder query parameters.
-func AuthHandler(h dashHandler) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- c := contextForRequest(r)
-
- // Put the URL Query values into r.Form to avoid parsing the
- // request body when calling r.FormValue.
- r.Form = r.URL.Query()
-
- var err error
- var resp interface{}
-
- // Validate key query parameter for POST requests only.
- key := r.FormValue("key")
- builder := r.FormValue("builder")
- if r.Method == "POST" && !validKey(c, key, builder) {
- err = fmt.Errorf("invalid key %q for builder %q", key, builder)
- }
-
- // Call the original HandlerFunc and return the response.
- if err == nil {
- resp, err = h(r)
- }
-
- // Write JSON response.
- dashResp := &dashResponse{Response: resp}
- if err != nil {
- c.Errorf("%v", err)
- dashResp.Error = err.Error()
- }
- w.Header().Set("Content-Type", "application/json")
- if err = json.NewEncoder(w).Encode(dashResp); err != nil {
- c.Criticalf("encoding response: %v", err)
- }
- }
-}
-
-func keyHandler(w http.ResponseWriter, r *http.Request) {
- builder := r.FormValue("builder")
- if builder == "" {
- logErr(w, r, errors.New("must supply builder in query string"))
- return
- }
- c := contextForRequest(r)
- fmt.Fprint(w, builderKey(c, builder))
-}
-
-func init() {
- for _, d := range dashboards {
- // admin handlers
- http.HandleFunc(d.RelPath+"init", initHandler)
- http.HandleFunc(d.RelPath+"key", keyHandler)
-
- // authenticated handlers
- http.HandleFunc(d.RelPath+"commit", AuthHandler(commitHandler))
- http.HandleFunc(d.RelPath+"packages", AuthHandler(packagesHandler))
- http.HandleFunc(d.RelPath+"result", AuthHandler(resultHandler))
- http.HandleFunc(d.RelPath+"perf-result", AuthHandler(perfResultHandler))
- http.HandleFunc(d.RelPath+"tag", AuthHandler(tagHandler))
- http.HandleFunc(d.RelPath+"todo", AuthHandler(todoHandler))
-
- // public handlers
- http.HandleFunc(d.RelPath+"log/", logHandler)
- }
-}
-
-func validHash(hash string) bool {
- // TODO(adg): correctly validate a hash
- return hash != ""
-}
-
-func validKey(c appengine.Context, key, builder string) bool {
- return isMasterKey(c, key) || key == builderKey(c, builder)
-}
-
-func isMasterKey(c appengine.Context, k string) bool {
- return appengine.IsDevAppServer() || k == key.Secret(c)
-}
-
-func builderKey(c appengine.Context, builder string) string {
- h := hmac.New(md5.New, []byte(key.Secret(c)))
- h.Write([]byte(builder))
- return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-func logErr(w http.ResponseWriter, r *http.Request, err error) {
- contextForRequest(r).Errorf("Error: %v", err)
- w.WriteHeader(http.StatusInternalServerError)
- fmt.Fprint(w, "Error: ", err)
-}
-
-func contextForRequest(r *http.Request) appengine.Context {
- return dashboardForRequest(r).Context(appengine.NewContext(r))
-}
-
-// limitStringLength essentially does return s[:max],
-// but it ensures that we dot not split UTF-8 rune in half.
-// Otherwise appengine python scripts will break badly.
-func limitStringLength(s string, max int) string {
- if len(s) <= max {
- return s
- }
- for {
- s = s[:max]
- r, size := utf8.DecodeLastRuneInString(s)
- if r != utf8.RuneError || size != 1 {
- return s
- }
- max--
- }
-}
diff --git a/dashboard/app/build/init.go b/dashboard/app/build/init.go
deleted file mode 100644
index e7d63ed..0000000
--- a/dashboard/app/build/init.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "fmt"
- "net/http"
-
- "appengine"
- "appengine/datastore"
-
- "cache"
- "key"
-)
-
-func initHandler(w http.ResponseWriter, r *http.Request) {
- d := dashboardForRequest(r)
- c := d.Context(appengine.NewContext(r))
- defer cache.Tick(c)
- for _, p := range d.Packages {
- err := datastore.Get(c, p.Key(c), new(Package))
- if _, ok := err.(*datastore.ErrFieldMismatch); ok {
- // Some fields have been removed, so it's okay to ignore this error.
- err = nil
- }
- if err == nil {
- continue
- } else if err != datastore.ErrNoSuchEntity {
- logErr(w, r, err)
- return
- }
- if _, err := datastore.Put(c, p.Key(c), p); err != nil {
- logErr(w, r, err)
- return
- }
- }
-
- // Create secret key.
- key.Secret(c)
-
- fmt.Fprint(w, "OK")
-}
diff --git a/dashboard/app/build/notify.go b/dashboard/app/build/notify.go
deleted file mode 100644
index 1a71dd2..0000000
--- a/dashboard/app/build/notify.go
+++ /dev/null
@@ -1,378 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "bytes"
- "encoding/gob"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "regexp"
- "runtime"
- "sort"
- "text/template"
-
- "appengine"
- "appengine/datastore"
- "appengine/delay"
- "appengine/mail"
- "appengine/urlfetch"
-)
-
-const (
- mailFrom = "builder@golang.org" // use this for sending any mail
- failMailTo = "golang-dev@googlegroups.com"
- domain = "build.golang.org"
- gobotBase = "http://research.swtch.com/gobot_codereview"
-)
-
-// ignoreFailure is a set of builders that we don't email about because
-// they are not yet production-ready.
-var ignoreFailure = map[string]bool{
- "dragonfly-386": true,
- "dragonfly-amd64": true,
- "freebsd-arm": true,
- "netbsd-amd64-bsiegert": true,
- "netbsd-arm-rpi": true,
- "plan9-amd64-aram": true,
-}
-
-// notifyOnFailure checks whether the supplied Commit or the subsequent
-// Commit (if present) breaks the build for this builder.
-// If either of those commits break the build an email notification is sent
-// from a delayed task. (We use a task because this way the mail won't be
-// sent if the enclosing datastore transaction fails.)
-//
-// This must be run in a datastore transaction, and the provided *Commit must
-// have been retrieved from the datastore within that transaction.
-func notifyOnFailure(c appengine.Context, com *Commit, builder string) error {
- if ignoreFailure[builder] {
- return nil
- }
-
- // TODO(adg): implement notifications for packages
- if com.PackagePath != "" {
- return nil
- }
-
- p := &Package{Path: com.PackagePath}
- var broken *Commit
- cr := com.Result(builder, "")
- if cr == nil {
- return fmt.Errorf("no result for %s/%s", com.Hash, builder)
- }
- q := datastore.NewQuery("Commit").Ancestor(p.Key(c))
- if cr.OK {
- // This commit is OK. Notify if next Commit is broken.
- next := new(Commit)
- q = q.Filter("ParentHash=", com.Hash)
- if err := firstMatch(c, q, next); err != nil {
- if err == datastore.ErrNoSuchEntity {
- // OK at tip, no notification necessary.
- return nil
- }
- return err
- }
- if nr := next.Result(builder, ""); nr != nil && !nr.OK {
- c.Debugf("commit ok: %#v\nresult: %#v", com, cr)
- c.Debugf("next commit broken: %#v\nnext result:%#v", next, nr)
- broken = next
- }
- } else {
- // This commit is broken. Notify if the previous Commit is OK.
- prev := new(Commit)
- q = q.Filter("Hash=", com.ParentHash)
- if err := firstMatch(c, q, prev); err != nil {
- if err == datastore.ErrNoSuchEntity {
- // No previous result, let the backfill of
- // this result trigger the notification.
- return nil
- }
- return err
- }
- if pr := prev.Result(builder, ""); pr != nil && pr.OK {
- c.Debugf("commit broken: %#v\nresult: %#v", com, cr)
- c.Debugf("previous commit ok: %#v\nprevious result:%#v", prev, pr)
- broken = com
- }
- }
- if broken == nil {
- return nil
- }
- r := broken.Result(builder, "")
- if r == nil {
- return fmt.Errorf("finding result for %q: %+v", builder, com)
- }
- return commonNotify(c, broken, builder, r.LogHash)
-}
-
-// firstMatch executes the query q and loads the first entity into v.
-func firstMatch(c appengine.Context, q *datastore.Query, v interface{}) error {
- t := q.Limit(1).Run(c)
- _, err := t.Next(v)
- if err == datastore.Done {
- err = datastore.ErrNoSuchEntity
- }
- return err
-}
-
-var notifyLater = delay.Func("notify", notify)
-
-// notify tries to update the CL for the given Commit with a failure message.
-// If it doesn't succeed, it sends a failure email to golang-dev.
-func notify(c appengine.Context, com *Commit, builder, logHash string) {
- v := url.Values{"brokebuild": {builder}, "log": {logHash}}
- if !updateCL(c, com, v) {
- // Send a mail notification if the CL can't be found.
- sendFailMail(c, com, builder, logHash)
- }
-}
-
-// updateCL tells gobot to update the CL for the given Commit with
-// the provided query values.
-func updateCL(c appengine.Context, com *Commit, v url.Values) bool {
- cl, err := lookupCL(c, com)
- if err != nil {
- c.Errorf("could not find CL for %v: %v", com.Hash, err)
- return false
- }
- u := fmt.Sprintf("%v?cl=%v&%s", gobotBase, cl, v.Encode())
- r, err := urlfetch.Client(c).Post(u, "text/plain", nil)
- if err != nil {
- c.Errorf("could not update CL %v: %v", cl, err)
- return false
- }
- r.Body.Close()
- if r.StatusCode != http.StatusOK {
- c.Errorf("could not update CL %v: %v", cl, r.Status)
- return false
- }
- return true
-}
-
-var clURL = regexp.MustCompile(`https://codereview.appspot.com/([0-9]+)`)
-
-// lookupCL consults code.google.com for the full change description for the
-// provided Commit, and returns the relevant CL number.
-func lookupCL(c appengine.Context, com *Commit) (string, error) {
- url := "https://code.google.com/p/go/source/detail?r=" + com.Hash
- r, err := urlfetch.Client(c).Get(url)
- if err != nil {
- return "", err
- }
- defer r.Body.Close()
- if r.StatusCode != http.StatusOK {
- return "", fmt.Errorf("retrieving %v: %v", url, r.Status)
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", err
- }
- m := clURL.FindAllSubmatch(b, -1)
- if m == nil {
- return "", errors.New("no CL URL found on changeset page")
- }
- // Return the last visible codereview URL on the page,
- // in case the change description refers to another CL.
- return string(m[len(m)-1][1]), nil
-}
-
-var sendFailMailTmpl = template.Must(template.New("notify.txt").
- Funcs(template.FuncMap(tmplFuncs)).
- ParseFiles("build/notify.txt"))
-
-func init() {
- gob.Register(&Commit{}) // for delay
-}
-
-var (
- sendPerfMailLater = delay.Func("sendPerfMail", sendPerfMailFunc)
- sendPerfMailTmpl = template.Must(
- template.New("perf_notify.txt").
- Funcs(template.FuncMap(tmplFuncs)).
- ParseFiles("build/perf_notify.txt"),
- )
-)
-
-// MUST be called from inside a transaction.
-func sendPerfFailMail(c appengine.Context, builder string, res *PerfResult) error {
- com := &Commit{Hash: res.CommitHash}
- if err := datastore.Get(c, com.Key(c), com); err != nil {
- return err
- }
- logHash := ""
- parsed := res.ParseData()
- for _, data := range parsed[builder] {
- if !data.OK {
- logHash = data.Artifacts["log"]
- break
- }
- }
- if logHash == "" {
- return fmt.Errorf("can not find failed result for commit %v on builder %v", com.Hash, builder)
- }
- return commonNotify(c, com, builder, logHash)
-}
-
-// commonNotify MUST!!! be called from within a transaction inside which
-// the provided Commit entity was retrieved from the datastore.
-func commonNotify(c appengine.Context, com *Commit, builder, logHash string) error {
- if com.Num == 0 || com.Desc == "" {
- stk := make([]byte, 10000)
- n := runtime.Stack(stk, false)
- stk = stk[:n]
- c.Errorf("refusing to notify with com=%+v\n%s", *com, string(stk))
- return fmt.Errorf("misuse of commonNotify")
- }
- if com.FailNotificationSent {
- return nil
- }
- c.Infof("%s is broken commit; notifying", com.Hash)
- notifyLater.Call(c, com, builder, logHash) // add task to queue
- com.FailNotificationSent = true
- return putCommit(c, com)
-}
-
-// sendFailMail sends a mail notification that the build failed on the
-// provided commit and builder.
-func sendFailMail(c appengine.Context, com *Commit, builder, logHash string) {
- // get Log
- k := datastore.NewKey(c, "Log", logHash, 0, nil)
- l := new(Log)
- if err := datastore.Get(c, k, l); err != nil {
- c.Errorf("finding Log record %v: %v", logHash, err)
- return
- }
- logText, err := l.Text()
- if err != nil {
- c.Errorf("unpacking Log record %v: %v", logHash, err)
- return
- }
-
- // prepare mail message
- var body bytes.Buffer
- err = sendFailMailTmpl.Execute(&body, map[string]interface{}{
- "Builder": builder, "Commit": com, "LogHash": logHash, "LogText": logText,
- "Hostname": domain,
- })
- if err != nil {
- c.Errorf("rendering mail template: %v", err)
- return
- }
- subject := fmt.Sprintf("%s broken by %s", builder, shortDesc(com.Desc))
- msg := &mail.Message{
- Sender: mailFrom,
- To: []string{failMailTo},
- ReplyTo: failMailTo,
- Subject: subject,
- Body: body.String(),
- }
-
- // send mail
- if err := mail.Send(c, msg); err != nil {
- c.Errorf("sending mail: %v", err)
- }
-}
-
-type PerfChangeBenchmark struct {
- Name string
- Metrics []*PerfChangeMetric
-}
-
-type PerfChangeMetric struct {
- Name string
- Old uint64
- New uint64
- Delta float64
-}
-
-type PerfChangeBenchmarkSlice []*PerfChangeBenchmark
-
-func (l PerfChangeBenchmarkSlice) Len() int { return len(l) }
-func (l PerfChangeBenchmarkSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l PerfChangeBenchmarkSlice) Less(i, j int) bool {
- b1, p1 := splitBench(l[i].Name)
- b2, p2 := splitBench(l[j].Name)
- if b1 != b2 {
- return b1 < b2
- }
- return p1 < p2
-}
-
-type PerfChangeMetricSlice []*PerfChangeMetric
-
-func (l PerfChangeMetricSlice) Len() int { return len(l) }
-func (l PerfChangeMetricSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l PerfChangeMetricSlice) Less(i, j int) bool { return l[i].Name < l[j].Name }
-
-func sendPerfMailFunc(c appengine.Context, com *Commit, prevCommitHash, builder string, changes []*PerfChange) {
- // Sort the changes into the right order.
- var benchmarks []*PerfChangeBenchmark
- for _, ch := range changes {
- // Find the benchmark.
- var b *PerfChangeBenchmark
- for _, b1 := range benchmarks {
- if b1.Name == ch.Bench {
- b = b1
- break
- }
- }
- if b == nil {
- b = &PerfChangeBenchmark{Name: ch.Bench}
- benchmarks = append(benchmarks, b)
- }
- b.Metrics = append(b.Metrics, &PerfChangeMetric{Name: ch.Metric, Old: ch.Old, New: ch.New, Delta: ch.Diff})
- }
- for _, b := range benchmarks {
- sort.Sort(PerfChangeMetricSlice(b.Metrics))
- }
- sort.Sort(PerfChangeBenchmarkSlice(benchmarks))
-
- u := fmt.Sprintf("http://%v/perfdetail?commit=%v&commit0=%v&kind=builder&builder=%v", domain, com.Hash, prevCommitHash, builder)
-
- // Prepare mail message (without Commit, for updateCL).
- var body bytes.Buffer
- err := sendPerfMailTmpl.Execute(&body, map[string]interface{}{
- "Builder": builder, "Hostname": domain, "Url": u, "Benchmarks": benchmarks,
- })
- if err != nil {
- c.Errorf("rendering perf mail template: %v", err)
- return
- }
-
- // First, try to update the CL.
- v := url.Values{"textmsg": {body.String()}}
- if updateCL(c, com, v) {
- return
- }
-
- // Otherwise, send mail (with Commit, for independent mail message).
- body.Reset()
- err = sendPerfMailTmpl.Execute(&body, map[string]interface{}{
- "Builder": builder, "Commit": com, "Hostname": domain, "Url": u, "Benchmarks": benchmarks,
- })
- if err != nil {
- c.Errorf("rendering perf mail template: %v", err)
- return
- }
- subject := fmt.Sprintf("Perf changes on %s by %s", builder, shortDesc(com.Desc))
- msg := &mail.Message{
- Sender: mailFrom,
- To: []string{failMailTo},
- ReplyTo: failMailTo,
- Subject: subject,
- Body: body.String(),
- }
-
- // send mail
- if err := mail.Send(c, msg); err != nil {
- c.Errorf("sending mail: %v", err)
- }
-}
diff --git a/dashboard/app/build/notify.txt b/dashboard/app/build/notify.txt
deleted file mode 100644
index 514191f..0000000
--- a/dashboard/app/build/notify.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Change {{shortHash .Commit.Hash}} broke the {{.Builder}} build:
-http://{{.Hostname}}/log/{{.LogHash}}
-
-{{.Commit.Desc}}
-
-http://code.google.com/p/go/source/detail?r={{shortHash .Commit.Hash}}
-
-$ tail -200 < log
-{{printf "%s" .LogText | tail 200}}
diff --git a/dashboard/app/build/perf.go b/dashboard/app/build/perf.go
deleted file mode 100644
index 2c16e60..0000000
--- a/dashboard/app/build/perf.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "fmt"
- "sort"
- "strconv"
- "strings"
-
- "appengine"
- "appengine/datastore"
-)
-
-var knownTags = map[string]string{
- "go1": "0051c7442fed9c888de6617fa9239a913904d96e",
- "go1.1": "d29da2ced72ba2cf48ed6a8f1ec4abc01e4c5bf1",
- "go1.2": "b1edf8faa5d6cbc50c6515785df9df9c19296564",
- "go1.3": "f153208c0a0e306bfca14f71ef11f09859ccabc8",
-}
-
-var lastRelease = "go1.3"
-
-func splitBench(benchProcs string) (string, int) {
- ss := strings.Split(benchProcs, "-")
- procs, _ := strconv.Atoi(ss[1])
- return ss[0], procs
-}
-
-func dashPerfCommits(c appengine.Context, page int) ([]*Commit, error) {
- q := datastore.NewQuery("Commit").
- Ancestor((&Package{}).Key(c)).
- Order("-Num").
- Filter("NeedsBenchmarking =", true).
- Limit(commitsPerPage).
- Offset(page * commitsPerPage)
- var commits []*Commit
- _, err := q.GetAll(c, &commits)
- if err == nil && len(commits) == 0 {
- err = fmt.Errorf("no commits")
- }
- return commits, err
-}
-
-func perfChangeStyle(pc *PerfConfig, v float64, builder, benchmark, metric string) string {
- noise := pc.NoiseLevel(builder, benchmark, metric)
- if isNoise(v, noise) {
- return "noise"
- }
- if v > 0 {
- return "bad"
- }
- return "good"
-}
-
-func isNoise(diff, noise float64) bool {
- rnoise := -100 * noise / (noise + 100)
- return diff < noise && diff > rnoise
-}
-
-func perfDiff(old, new uint64) float64 {
- return 100*float64(new)/float64(old) - 100
-}
-
-func isPerfFailed(res *PerfResult, builder string) bool {
- data := res.ParseData()[builder]
- return data != nil && data["meta-done"] != nil && !data["meta-done"].OK
-}
-
-// PerfResultCache caches a set of PerfResults so that it's easy to access them
-// without lots of duplicate accesses to datastore.
-// It allows to iterate over newer or older results for some base commit.
-type PerfResultCache struct {
- c appengine.Context
- newer bool
- iter *datastore.Iterator
- results map[int]*PerfResult
-}
-
-func MakePerfResultCache(c appengine.Context, com *Commit, newer bool) *PerfResultCache {
- p := &Package{}
- q := datastore.NewQuery("PerfResult").Ancestor(p.Key(c)).Limit(100)
- if newer {
- q = q.Filter("CommitNum >=", com.Num).Order("CommitNum")
- } else {
- q = q.Filter("CommitNum <=", com.Num).Order("-CommitNum")
- }
- rc := &PerfResultCache{c: c, newer: newer, iter: q.Run(c), results: make(map[int]*PerfResult)}
- return rc
-}
-
-func (rc *PerfResultCache) Get(commitNum int) *PerfResult {
- rc.Next(commitNum) // fetch the commit, if necessary
- return rc.results[commitNum]
-}
-
-// Next returns the next PerfResult for the commit commitNum.
-// It does not care whether the result has any data, failed or whatever.
-func (rc *PerfResultCache) Next(commitNum int) (*PerfResult, error) {
- // See if we have next result in the cache.
- next := -1
- for ci := range rc.results {
- if rc.newer {
- if ci > commitNum && (next == -1 || ci < next) {
- next = ci
- }
- } else {
- if ci < commitNum && (next == -1 || ci > next) {
- next = ci
- }
- }
- }
- if next != -1 {
- return rc.results[next], nil
- }
- // Fetch next result from datastore.
- res := new(PerfResult)
- _, err := rc.iter.Next(res)
- if err == datastore.Done {
- return nil, nil
- }
- if err != nil {
- return nil, fmt.Errorf("fetching perf results: %v", err)
- }
- if (rc.newer && res.CommitNum < commitNum) || (!rc.newer && res.CommitNum > commitNum) {
- rc.c.Errorf("PerfResultCache.Next: bad commit num")
- }
- rc.results[res.CommitNum] = res
- return res, nil
-}
-
-// NextForComparison returns PerfResult which we need to use for performance comprison.
-// It skips failed results, but does not skip results with no data.
-func (rc *PerfResultCache) NextForComparison(commitNum int, builder string) (*PerfResult, error) {
- for {
- res, err := rc.Next(commitNum)
- if err != nil {
- return nil, err
- }
- if res == nil {
- return nil, nil
- }
- if res.CommitNum == commitNum {
- continue
- }
- parsed := res.ParseData()
- if builder != "" {
- // Comparing for a particular builder.
- // This is used in perf_changes and in email notifications.
- b := parsed[builder]
- if b == nil || b["meta-done"] == nil {
- // No results yet, must not do the comparison.
- return nil, nil
- }
- if b["meta-done"].OK {
- // Have complete results, compare.
- return res, nil
- }
- } else {
- // Comparing for all builders, find a result with at least
- // one successful meta-done.
- // This is used in perf_detail.
- for _, benchs := range parsed {
- if data := benchs["meta-done"]; data != nil && data.OK {
- return res, nil
- }
- }
- }
- // Failed, try next result.
- commitNum = res.CommitNum
- }
-}
-
-type PerfChange struct {
- Builder string
- Bench string
- Metric string
- Old uint64
- New uint64
- Diff float64
-}
-
-func significantPerfChanges(pc *PerfConfig, builder string, prevRes, res *PerfResult) (changes []*PerfChange) {
- // First, collect all significant changes.
- for builder1, benchmarks1 := range res.ParseData() {
- if builder != "" && builder != builder1 {
- // This is not the builder you're looking for, Luke.
- continue
- }
- benchmarks0 := prevRes.ParseData()[builder1]
- if benchmarks0 == nil {
- continue
- }
- for benchmark, data1 := range benchmarks1 {
- data0 := benchmarks0[benchmark]
- if data0 == nil {
- continue
- }
- for metric, val := range data1.Metrics {
- val0 := data0.Metrics[metric]
- if val0 == 0 {
- continue
- }
- diff := perfDiff(val0, val)
- noise := pc.NoiseLevel(builder, benchmark, metric)
- if isNoise(diff, noise) {
- continue
- }
- ch := &PerfChange{Builder: builder, Bench: benchmark, Metric: metric, Old: val0, New: val, Diff: diff}
- changes = append(changes, ch)
- }
- }
- }
- // Then, strip non-repeatable changes (flakes).
- // The hypothesis is that a real change must show up with the majority of GOMAXPROCS values.
- majority := len(pc.ProcList(builder))/2 + 1
- cnt := make(map[string]int)
- for _, ch := range changes {
- b, _ := splitBench(ch.Bench)
- name := b + "|" + ch.Metric
- if ch.Diff < 0 {
- name += "--"
- }
- cnt[name] = cnt[name] + 1
- }
- for i := 0; i < len(changes); i++ {
- ch := changes[i]
- b, _ := splitBench(ch.Bench)
- name := b + "|" + ch.Metric
- if cnt[name] >= majority {
- continue
- }
- if cnt[name+"--"] >= majority {
- continue
- }
- // Remove flake.
- last := len(changes) - 1
- changes[i] = changes[last]
- changes = changes[:last]
- i--
- }
- return changes
-}
-
-// orderPerfTodo reorders commit nums for benchmarking todo.
-// The resulting order is somewhat tricky. We want 2 things:
-// 1. benchmark sequentially backwards (this provides information about most
-// recent changes, and allows to estimate noise levels)
-// 2. benchmark old commits in "scatter" order (this allows to quickly gather
-// brief information about thousands of old commits)
-// So this function interleaves the two orders.
-func orderPerfTodo(nums []int) []int {
- sort.Ints(nums)
- n := len(nums)
- pow2 := uint32(0) // next power-of-two that is >= n
- npow2 := 0
- for npow2 <= n {
- pow2++
- npow2 = 1 << pow2
- }
- res := make([]int, n)
- resPos := n - 1 // result array is filled backwards
- present := make([]bool, n) // denotes values that already present in result array
- for i0, i1 := n-1, 0; i0 >= 0 || i1 < npow2; {
- // i0 represents "benchmark sequentially backwards" sequence
- // find the next commit that is not yet present and add it
- for cnt := 0; cnt < 2; cnt++ {
- for ; i0 >= 0; i0-- {
- if !present[i0] {
- present[i0] = true
- res[resPos] = nums[i0]
- resPos--
- i0--
- break
- }
- }
- }
- // i1 represents "scatter order" sequence
- // find the next commit that is not yet present and add it
- for ; i1 < npow2; i1++ {
- // do the "recursive split-ordering" trick
- idx := 0 // bitwise reverse of i1
- for j := uint32(0); j <= pow2; j++ {
- if (i1 & (1 << j)) != 0 {
- idx = idx | (1 << (pow2 - j - 1))
- }
- }
- if idx < n && !present[idx] {
- present[idx] = true
- res[resPos] = nums[idx]
- resPos--
- i1++
- break
- }
- }
- }
- // The above can't possibly be correct. Do dump check.
- res2 := make([]int, n)
- copy(res2, res)
- sort.Ints(res2)
- for i := range res2 {
- if res2[i] != nums[i] {
- panic(fmt.Sprintf("diff at %v: expect %v, want %v\nwas: %v\n become: %v",
- i, nums[i], res2[i], nums, res2))
- }
- }
- return res
-}
diff --git a/dashboard/app/build/perf_changes.go b/dashboard/app/build/perf_changes.go
deleted file mode 100644
index 4abbf1a..0000000
--- a/dashboard/app/build/perf_changes.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "net/http"
- "sort"
- "strconv"
-
- "appengine"
- "appengine/datastore"
-)
-
-func init() {
- http.HandleFunc("/perf", perfChangesHandler)
-}
-
-// perfSummaryHandler draws the main benchmarking page.
-func perfChangesHandler(w http.ResponseWriter, r *http.Request) {
- d := dashboardForRequest(r)
- c := d.Context(appengine.NewContext(r))
-
- page, _ := strconv.Atoi(r.FormValue("page"))
- if page < 0 {
- page = 0
- }
-
- pc, err := GetPerfConfig(c, r)
- if err != nil {
- logErr(w, r, err)
- return
- }
-
- commits, err := dashPerfCommits(c, page)
- if err != nil {
- logErr(w, r, err)
- return
- }
-
- // Fetch PerfResult's for the commits.
- var uiCommits []*perfChangesCommit
- rc := MakePerfResultCache(c, commits[0], false)
-
- // But first compare tip with the last release.
- if page == 0 {
- res0 := &PerfResult{CommitHash: knownTags[lastRelease]}
- if err := datastore.Get(c, res0.Key(c), res0); err != nil && err != datastore.ErrNoSuchEntity {
- logErr(w, r, fmt.Errorf("getting PerfResult: %v", err))
- return
- }
- if err != datastore.ErrNoSuchEntity {
- uiCom, err := handleOneCommit(pc, commits[0], rc, res0)
- if err != nil {
- logErr(w, r, err)
- return
- }
- uiCom.IsSummary = true
- uiCom.ParentHash = lastRelease
- uiCommits = append(uiCommits, uiCom)
- }
- }
-
- for _, com := range commits {
- uiCom, err := handleOneCommit(pc, com, rc, nil)
- if err != nil {
- logErr(w, r, err)
- return
- }
- uiCommits = append(uiCommits, uiCom)
- }
-
- p := &Pagination{}
- if len(commits) == commitsPerPage {
- p.Next = page + 1
- }
- if page > 0 {
- p.Prev = page - 1
- p.HasPrev = true
- }
-
- data := &perfChangesData{d, p, uiCommits}
-
- var buf bytes.Buffer
- if err := perfChangesTemplate.Execute(&buf, data); err != nil {
- logErr(w, r, err)
- return
- }
-
- buf.WriteTo(w)
-}
-
-func handleOneCommit(pc *PerfConfig, com *Commit, rc *PerfResultCache, baseRes *PerfResult) (*perfChangesCommit, error) {
- uiCom := new(perfChangesCommit)
- uiCom.Commit = com
- res1 := rc.Get(com.Num)
- for builder, benchmarks1 := range res1.ParseData() {
- for benchmark, data1 := range benchmarks1 {
- if benchmark != "meta-done" || !data1.OK {
- uiCom.NumResults++
- }
- if !data1.OK {
- v := new(perfChangesChange)
- v.diff = 10000
- v.Style = "fail"
- v.Builder = builder
- v.Link = fmt.Sprintf("log/%v", data1.Artifacts["log"])
- v.Val = builder
- v.Hint = builder
- if benchmark != "meta-done" {
- v.Hint += "/" + benchmark
- }
- m := findMetric(uiCom, "failure")
- m.BadChanges = append(m.BadChanges, v)
- }
- }
- res0 := baseRes
- if res0 == nil {
- var err error
- res0, err = rc.NextForComparison(com.Num, builder)
- if err != nil {
- return nil, err
- }
- if res0 == nil {
- continue
- }
- }
- changes := significantPerfChanges(pc, builder, res0, res1)
- changes = dedupPerfChanges(changes)
- for _, ch := range changes {
- v := new(perfChangesChange)
- v.Builder = builder
- v.Benchmark, v.Procs = splitBench(ch.Bench)
- v.diff = ch.Diff
- v.Val = fmt.Sprintf("%+.2f%%", ch.Diff)
- v.Hint = fmt.Sprintf("%v/%v", builder, ch.Bench)
- v.Link = fmt.Sprintf("perfdetail?commit=%v&commit0=%v&builder=%v&benchmark=%v", com.Hash, res0.CommitHash, builder, v.Benchmark)
- m := findMetric(uiCom, ch.Metric)
- if v.diff > 0 {
- v.Style = "bad"
- m.BadChanges = append(m.BadChanges, v)
- } else {
- v.Style = "good"
- m.GoodChanges = append(m.GoodChanges, v)
- }
- }
- }
-
- // Sort metrics and changes.
- for _, m := range uiCom.Metrics {
- sort.Sort(m.GoodChanges)
- sort.Sort(m.BadChanges)
- }
- sort.Sort(uiCom.Metrics)
- // Need at least one metric for UI.
- if len(uiCom.Metrics) == 0 {
- uiCom.Metrics = append(uiCom.Metrics, &perfChangesMetric{})
- }
- uiCom.Metrics[0].First = true
- return uiCom, nil
-}
-
-// Find builder-procs with the maximum absolute diff for every benchmark-metric, drop the rest.
-func dedupPerfChanges(changes []*PerfChange) (deduped []*PerfChange) {
- maxDiff := make(map[string]float64)
- maxBench := make(map[string]string)
- // First, find the maximum.
- for _, ch := range changes {
- bench, _ := splitBench(ch.Bench)
- k := bench + "|" + ch.Metric
- v := ch.Diff
- if v < 0 {
- v = -v
- }
- if maxDiff[k] < v {
- maxDiff[k] = v
- maxBench[k] = ch.Builder + "|" + ch.Bench
- }
- }
- // Then, remove the rest.
- for _, ch := range changes {
- bench, _ := splitBench(ch.Bench)
- k := bench + "|" + ch.Metric
- if maxBench[k] == ch.Builder+"|"+ch.Bench {
- deduped = append(deduped, ch)
- }
- }
- return
-}
-
-func findMetric(c *perfChangesCommit, metric string) *perfChangesMetric {
- for _, m := range c.Metrics {
- if m.Name == metric {
- return m
- }
- }
- m := new(perfChangesMetric)
- m.Name = metric
- c.Metrics = append(c.Metrics, m)
- return m
-}
-
-type uiPerfConfig struct {
- Builders []uiPerfConfigElem
- Benchmarks []uiPerfConfigElem
- Metrics []uiPerfConfigElem
- Procs []uiPerfConfigElem
- CommitsFrom []uiPerfConfigElem
- CommitsTo []uiPerfConfigElem
-}
-
-type uiPerfConfigElem struct {
- Name string
- Selected bool
-}
-
-var perfChangesTemplate = template.Must(
- template.New("perf_changes.html").Funcs(tmplFuncs).ParseFiles("build/perf_changes.html"),
-)
-
-type perfChangesData struct {
- Dashboard *Dashboard
- Pagination *Pagination
- Commits []*perfChangesCommit
-}
-
-type perfChangesCommit struct {
- *Commit
- IsSummary bool
- NumResults int
- Metrics perfChangesMetricSlice
-}
-
-type perfChangesMetric struct {
- Name string
- First bool
- BadChanges perfChangesChangeSlice
- GoodChanges perfChangesChangeSlice
-}
-
-type perfChangesChange struct {
- Builder string
- Benchmark string
- Link string
- Hint string
- Style string
- Val string
- Procs int
- diff float64
-}
-
-type perfChangesMetricSlice []*perfChangesMetric
-
-func (l perfChangesMetricSlice) Len() int { return len(l) }
-func (l perfChangesMetricSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l perfChangesMetricSlice) Less(i, j int) bool {
- if l[i].Name == "failure" || l[j].Name == "failure" {
- return l[i].Name == "failure"
- }
- return l[i].Name < l[j].Name
-}
-
-type perfChangesChangeSlice []*perfChangesChange
-
-func (l perfChangesChangeSlice) Len() int { return len(l) }
-func (l perfChangesChangeSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l perfChangesChangeSlice) Less(i, j int) bool {
- vi, vj := l[i].diff, l[j].diff
- if vi > 0 && vj > 0 {
- return vi > vj
- } else if vi < 0 && vj < 0 {
- return vi < vj
- } else {
- panic("comparing positive and negative diff")
- }
-}
diff --git a/dashboard/app/build/perf_changes.html b/dashboard/app/build/perf_changes.html
deleted file mode 100644
index 24f0534..0000000
--- a/dashboard/app/build/perf_changes.html
+++ /dev/null
@@ -1,89 +0,0 @@
-<!doctype html>
-<html>
-<head>
- <title>{{$.Dashboard.Name}} Dashboard</title>
- <link rel="stylesheet" href="/static/style.css"/>
-</head>
-<body>
- <header id="topbar">
- <h1>Go Dashboard</h1>
- <nav>
- <a href="{{$.Dashboard.RelPath}}">Test</a>
- <a href="{{$.Dashboard.RelPath}}perf">Perf</a>
- <a href="{{$.Dashboard.RelPath}}perfgraph">Graphs</a>
- </nav>
- <div class="clear"></div>
- </header>
-
- <div class="page">
- <div class="build-container">
- <table class="build">
- <colgroup class="col-hash"></colgroup>
- <colgroup class="col-numresults"></colgroup>
- <colgroup class="col-metric"></colgroup>
- <colgroup class="col-result"></colgroup>
- <colgroup class="col-result"></colgroup>
- <colgroup class="col-user"></colgroup>
- <colgroup class="col-time"></colgroup>
- <colgroup class="col-desc"></colgroup>
- <tbody>
- {{range $c := $.Commits}}
- {{range $m := $c.Metrics}}
- {{if $m.First}}
- <tr class="row-commit">
- {{if $c.IsSummary}}
- <td class="hash">tip vs {{$c.ParentHash}}</td>
- {{else}}
- <td class="hash"><a href="{{repoURL $.Dashboard.Name $c.Hash ""}}">{{shortHash $c.Hash}}</a></td>
- {{end}}
- <td class="numresults">{{$c.NumResults}}</td>
- {{else}}
- <tr>
- <td class="user">&nbsp;</td>
- <td class="numresults">&nbsp;</td>
- {{end}}
- <td>{{$m.Name}}</td>
- <td>
- {{range $ch := $m.BadChanges}}
- <a class="{{$ch.Style}}" href="{{$ch.Link}}" title="{{$ch.Hint}}">{{$ch.Val}}</a> &nbsp;
- {{end}}
- </td>
- <td>
- {{range $ch := $m.GoodChanges}}
- <a class="{{$ch.Style}}" href="{{$ch.Link}}" title="{{$ch.Hint}}">{{$ch.Val}}</a> &nbsp;
- {{end}}
- </td>
- {{if $m.First}}
- <td class="user" title="{{$c.User}}">{{shortUser $c.User}}</td>
- <td class="time">{{$c.Time.Format "Mon 02 Jan 15:04"}}</td>
- <td class="desc" title="{{$c.Desc}}">{{shortDesc $c.Desc}}</td>
- {{else}}
- <td class="user">&nbsp;</td>
- <td class="time">&nbsp;</td>
- <td class="desc">&nbsp;</td>
- {{end}}
- </tr>
- {{end}}
- {{if $c.IsSummary}}
- <tr class="row-commit"><td>---</td></tr>
- {{end}}
- {{end}}
- </tbody>
- </table>
-
- {{with $.Pagination}}
- <div class="paginate">
- <nav>
- <a {{if .HasPrev}}href="?page={{.Prev}}"{{else}}class="inactive"{{end}}>newer</a>
- <a {{if .Next}}href="?page={{.Next}}"{{else}}class="inactive"{{end}}>older</a>
- <a {{if .HasPrev}}href="?"{{else}}class="inactive"{{end}}>latest</a>
- <a href="https://code.google.com/p/go-wiki/wiki/PerfDashboard">Help</a>
- </nav>
- </div>
- {{end}}
-
- </div>
- <div class="clear"></div>
-</div>
-</body>
-</html>
diff --git a/dashboard/app/build/perf_detail.go b/dashboard/app/build/perf_detail.go
deleted file mode 100644
index f8d9bfd..0000000
--- a/dashboard/app/build/perf_detail.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "net/http"
- "sort"
- "strconv"
- "strings"
-
- "appengine"
- "appengine/datastore"
-)
-
-func init() {
- for _, d := range dashboards {
- http.HandleFunc(d.RelPath+"perfdetail", perfDetailUIHandler)
- }
-}
-
-func perfDetailUIHandler(w http.ResponseWriter, r *http.Request) {
- d := dashboardForRequest(r)
- c := d.Context(appengine.NewContext(r))
- pc, err := GetPerfConfig(c, r)
- if err != nil {
- logErr(w, r, err)
- return
- }
-
- kind := r.FormValue("kind")
- builder := r.FormValue("builder")
- benchmark := r.FormValue("benchmark")
- if kind == "" {
- kind = "benchmark"
- }
- if kind != "benchmark" && kind != "builder" {
- logErr(w, r, fmt.Errorf("unknown kind %s", kind))
- return
- }
-
- // Fetch the new commit.
- com1 := new(Commit)
- com1.Hash = r.FormValue("commit")
- if hash, ok := knownTags[com1.Hash]; ok {
- com1.Hash = hash
- }
- if err := datastore.Get(c, com1.Key(c), com1); err != nil {
- logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com1.Hash, err))
- return
- }
- // Fetch the associated perf result.
- ress1 := &PerfResult{CommitHash: com1.Hash}
- if err := datastore.Get(c, ress1.Key(c), ress1); err != nil {
- logErr(w, r, fmt.Errorf("failed to fetch perf result %s: %v", com1.Hash, err))
- return
- }
-
- // Fetch the old commit.
- var ress0 *PerfResult
- com0 := new(Commit)
- com0.Hash = r.FormValue("commit0")
- if hash, ok := knownTags[com0.Hash]; ok {
- com0.Hash = hash
- }
- if com0.Hash != "" {
- // Have an exact commit hash, fetch directly.
- if err := datastore.Get(c, com0.Key(c), com0); err != nil {
- logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com0.Hash, err))
- return
- }
- ress0 = &PerfResult{CommitHash: com0.Hash}
- if err := datastore.Get(c, ress0.Key(c), ress0); err != nil {
- logErr(w, r, fmt.Errorf("failed to fetch perf result for %s: %v", com0.Hash, err))
- return
- }
- } else {
- // Don't have the commit hash, find the previous commit to compare.
- rc := MakePerfResultCache(c, com1, false)
- ress0, err = rc.NextForComparison(com1.Num, "")
- if err != nil {
- logErr(w, r, err)
- return
- }
- if ress0 == nil {
- logErr(w, r, fmt.Errorf("no previous commit with results"))
- return
- }
- // Now that we know the right result, fetch the commit.
- com0.Hash = ress0.CommitHash
- if err := datastore.Get(c, com0.Key(c), com0); err != nil {
- logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com0.Hash, err))
- return
- }
- }
-
- res0 := ress0.ParseData()
- res1 := ress1.ParseData()
- var benchmarks []*uiPerfDetailBenchmark
- var list []string
- if kind == "builder" {
- list = pc.BenchmarksForBuilder(builder)
- } else {
- list = pc.BuildersForBenchmark(benchmark)
- }
- for _, other := range list {
- if kind == "builder" {
- benchmark = other
- } else {
- builder = other
- }
- var procs []*uiPerfDetailProcs
- allProcs := pc.ProcList(builder)
- for _, p := range allProcs {
- BenchProcs := fmt.Sprintf("%v-%v", benchmark, p)
- if res0[builder] == nil || res0[builder][BenchProcs] == nil {
- continue
- }
- pp := &uiPerfDetailProcs{Procs: p}
- for metric, val := range res0[builder][BenchProcs].Metrics {
- var pm uiPerfDetailMetric
- pm.Name = metric
- pm.Val0 = fmt.Sprintf("%v", val)
- val1 := uint64(0)
- if res1[builder] != nil && res1[builder][BenchProcs] != nil {
- val1 = res1[builder][BenchProcs].Metrics[metric]
- }
- pm.Val1 = fmt.Sprintf("%v", val1)
- v0 := val
- v1 := val1
- valf := perfDiff(v0, v1)
- pm.Delta = fmt.Sprintf("%+.2f%%", valf)
- pm.Style = perfChangeStyle(pc, valf, builder, BenchProcs, pm.Name)
- pp.Metrics = append(pp.Metrics, pm)
- }
- sort.Sort(pp.Metrics)
- for artifact, hash := range res0[builder][BenchProcs].Artifacts {
- var pm uiPerfDetailMetric
- pm.Val0 = fmt.Sprintf("%v", artifact)
- pm.Link0 = fmt.Sprintf("log/%v", hash)
- pm.Val1 = fmt.Sprintf("%v", artifact)
- if res1[builder] != nil && res1[builder][BenchProcs] != nil && res1[builder][BenchProcs].Artifacts[artifact] != "" {
- pm.Link1 = fmt.Sprintf("log/%v", res1[builder][BenchProcs].Artifacts[artifact])
- }
- pp.Metrics = append(pp.Metrics, pm)
- }
- procs = append(procs, pp)
- }
- benchmarks = append(benchmarks, &uiPerfDetailBenchmark{other, procs})
- }
-
- cfg := new(uiPerfConfig)
- for _, v := range pc.BuildersForBenchmark("") {
- cfg.Builders = append(cfg.Builders, uiPerfConfigElem{v, v == builder})
- }
- for _, v := range pc.BenchmarksForBuilder("") {
- cfg.Benchmarks = append(cfg.Benchmarks, uiPerfConfigElem{v, v == benchmark})
- }
-
- data := &uiPerfDetailTemplateData{d, cfg, kind == "builder", com0, com1, benchmarks}
-
- var buf bytes.Buffer
- if err := uiPerfDetailTemplate.Execute(&buf, data); err != nil {
- logErr(w, r, err)
- return
- }
-
- buf.WriteTo(w)
-}
-
-func perfResultSplit(s string) (builder string, benchmark string, procs int) {
- s1 := strings.Split(s, "|")
- s2 := strings.Split(s1[1], "-")
- procs, _ = strconv.Atoi(s2[1])
- return s1[0], s2[0], procs
-}
-
-type uiPerfDetailTemplateData struct {
- Dashboard *Dashboard
- Config *uiPerfConfig
- KindBuilder bool
- Commit0 *Commit
- Commit1 *Commit
- Benchmarks []*uiPerfDetailBenchmark
-}
-
-type uiPerfDetailBenchmark struct {
- Name string
- Procs []*uiPerfDetailProcs
-}
-
-type uiPerfDetailProcs struct {
- Procs int
- Metrics uiPerfDetailMetrics
-}
-
-type uiPerfDetailMetric struct {
- Name string
- Val0 string
- Val1 string
- Link0 string
- Link1 string
- Delta string
- Style string
-}
-
-type uiPerfDetailMetrics []uiPerfDetailMetric
-
-func (l uiPerfDetailMetrics) Len() int { return len(l) }
-func (l uiPerfDetailMetrics) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l uiPerfDetailMetrics) Less(i, j int) bool { return l[i].Name < l[j].Name }
-
-var uiPerfDetailTemplate = template.Must(
- template.New("perf_detail.html").Funcs(tmplFuncs).ParseFiles("build/perf_detail.html"),
-)
diff --git a/dashboard/app/build/perf_detail.html b/dashboard/app/build/perf_detail.html
deleted file mode 100644
index 18b3028..0000000
--- a/dashboard/app/build/perf_detail.html
+++ /dev/null
@@ -1,101 +0,0 @@
-<!doctype html>
-<html>
-<head>
- <title>{{$.Dashboard.Name}} Dashboard</title>
- <link rel="stylesheet" href="/static/style.css"/>
- <script type="text/javascript">
- function kindBuilder() {
- document.getElementById('checkBuilder').checked = true;
- document.getElementById('controlBuilder').style.display='inline';
- document.getElementById('controlBenchmark').style.display='none';
- }
- function kindBenchmark() {
- document.getElementById('checkBenchmark').checked = true;
- document.getElementById('controlBenchmark').style.display='inline';
- document.getElementById('controlBuilder').style.display='none';
- }
- window.onload = {{if $.KindBuilder}} kindBuilder {{else}} kindBenchmark {{end}};
- </script>
-</head>
-<body>
- <header id="topbar">
- <h1>Go Dashboard</h1>
- <nav>
- <a href="{{$.Dashboard.RelPath}}">Test</a>
- <a href="{{$.Dashboard.RelPath}}perf">Perf</a>
- <a href="{{$.Dashboard.RelPath}}perfgraph">Graphs</a>
- </nav>
- <div class="clear"></div>
- </header>
-
- <div class="page">
- <div class="diff-container">
- <div class="diff-meta">
- <form>
- <div><b>New: </b><input type="edit" name="commit" value="{{$.Commit1.Hash}}" /> {{shortUser $.Commit1.User}} {{$.Commit1.Time.Format "Mon 02 Jan 15:04"}} {{shortDesc $.Commit1.Desc}} </div>
- <div><b>Old: </b><input type="edit" name="commit0" value="{{$.Commit0.Hash}}" /> {{shortUser $.Commit0.User}} {{$.Commit0.Time.Format "Mon 02 Jan 15:04"}} {{shortDesc $.Commit0.Desc}} </div>
- <div>
- <input id="checkBuilder" type="radio" name="kind" value="builder" required onclick="kindBuilder()">builder</input>
- <input id="checkBenchmark" type="radio" name="kind" value="benchmark" required onclick="kindBenchmark()">benchmark</input>
- <select id="controlBuilder" name="builder">
- {{range $.Config.Builders}}
- <option {{if .Selected}}selected{{end}}>{{.Name}}</option>
- {{end}}
- </select>
- <select id="controlBenchmark" name="benchmark">
- {{range $.Config.Benchmarks}}
- <option {{if .Selected}}selected{{end}}>{{.Name}}</option>
- {{end}}
- </select>
- <input type="submit" value="Refresh" />
- <a href="https://code.google.com/p/go-wiki/wiki/PerfDashboard">Help</a>
- </div>
- </form>
- </div>
- <p></p>
-
- {{range $b := $.Benchmarks}}
- <div class="diff-benchmark">
- <h2>{{$b.Name}}</h2>
- {{range $p := $b.Procs}}
- <div class="diff">
- <h1>GOMAXPROCS={{$p.Procs}}</h1>
- <table>
- <thead>
- <tr>
- <th>Metric</th>
- <th>old</th>
- <th>new</th>
- <th>delta</th>
- </tr>
- </thead>
- <tbody>
- {{range $m := $p.Metrics}}
- <tr>
- <td class="metric">{{$m.Name}}</td>
- {{if $m.Link0}}
- <td><a href="{{$.Dashboard.RelPath}}{{$m.Link0}}">{{$m.Val0}}</td>
- {{else}}
- <td>{{$m.Val0}}</td>
- {{end}}
- {{if $m.Link1}}
- <td><a href="{{$.Dashboard.RelPath}}{{$m.Link1}}">{{$m.Val1}}</td>
- {{else}}
- <td>{{$m.Val1}}</td>
- {{end}}
- <td class="result"><span class="{{$m.Style}}">{{$m.Delta}}</span></td>
- </tr>
- {{end}}
- </tbody>
- </table>
- </div>
- {{end}}
- </div>
- {{end}}
-
- <div class="clear"></div>
- </div>
- <div class="clear"></div>
- </div>
-</body>
-</html>
diff --git a/dashboard/app/build/perf_graph.go b/dashboard/app/build/perf_graph.go
deleted file mode 100644
index 81eb5e1..0000000
--- a/dashboard/app/build/perf_graph.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "net/http"
- "strconv"
-
- "appengine"
- "appengine/datastore"
-)
-
-func init() {
- for _, d := range dashboards {
- http.HandleFunc(d.RelPath+"perfgraph", perfGraphHandler)
- }
-}
-
-func perfGraphHandler(w http.ResponseWriter, r *http.Request) {
- d := dashboardForRequest(r)
- c := d.Context(appengine.NewContext(r))
- pc, err := GetPerfConfig(c, r)
- if err != nil {
- logErr(w, r, err)
- return
- }
- allBuilders := pc.BuildersForBenchmark("")
- allBenchmarks := pc.BenchmarksForBuilder("")
- allMetrics := pc.MetricsForBenchmark("")
- allProcs := pc.ProcList("")
- r.ParseForm()
- selBuilders := r.Form["builder"]
- selBenchmarks := r.Form["benchmark"]
- selMetrics := r.Form["metric"]
- selProcs := r.Form["procs"]
- if len(selBuilders) == 0 {
- selBuilders = append(selBuilders, allBuilders[0])
- }
- if len(selBenchmarks) == 0 {
- selBenchmarks = append(selBenchmarks, "json")
- }
- if len(selMetrics) == 0 {
- selMetrics = append(selMetrics, "time")
- }
- if len(selProcs) == 0 {
- selProcs = append(selProcs, "1")
- }
- commitFrom := r.FormValue("commit-from")
- if commitFrom == "" {
- commitFrom = lastRelease
- }
- commitTo := r.FormValue("commit-to")
- if commitTo == "" {
- commitTo = "tip"
- }
- // TODO(dvyukov): validate input
-
- // Figure out start and end commit from commitFrom/commitTo.
- startCommitNum := 0
- endCommitNum := 0
- {
- comFrom := &Commit{Hash: knownTags[commitFrom]}
- if err := datastore.Get(c, comFrom.Key(c), comFrom); err != nil {
- logErr(w, r, err)
- return
- }
- startCommitNum = comFrom.Num
-
- retry:
- if commitTo == "tip" {
- p, err := GetPackage(c, "")
- if err != nil {
- logErr(w, r, err)
- return
- }
- endCommitNum = p.NextNum
- } else {
- comTo := &Commit{Hash: knownTags[commitTo]}
- if err := datastore.Get(c, comTo.Key(c), comTo); err != nil {
- logErr(w, r, err)
- return
- }
- endCommitNum = comTo.Num + 1
- }
- if endCommitNum <= startCommitNum {
- // User probably selected from:go1.3 to:go1.2. Fix go1.2 to tip.
- if commitTo == "tip" {
- logErr(w, r, fmt.Errorf("no commits to display (%v-%v)", commitFrom, commitTo))
- return
- }
- commitTo = "tip"
- goto retry
- }
- }
- commitsToDisplay := endCommitNum - startCommitNum
-
- present := func(set []string, s string) bool {
- for _, s1 := range set {
- if s1 == s {
- return true
- }
- }
- return false
- }
-
- cfg := &uiPerfConfig{}
- for _, v := range allBuilders {
- cfg.Builders = append(cfg.Builders, uiPerfConfigElem{v, present(selBuilders, v)})
- }
- for _, v := range allBenchmarks {
- cfg.Benchmarks = append(cfg.Benchmarks, uiPerfConfigElem{v, present(selBenchmarks, v)})
- }
- for _, v := range allMetrics {
- cfg.Metrics = append(cfg.Metrics, uiPerfConfigElem{v, present(selMetrics, v)})
- }
- for _, v := range allProcs {
- cfg.Procs = append(cfg.Procs, uiPerfConfigElem{strconv.Itoa(v), present(selProcs, strconv.Itoa(v))})
- }
- for k := range knownTags {
- cfg.CommitsFrom = append(cfg.CommitsFrom, uiPerfConfigElem{k, commitFrom == k})
- }
- for k := range knownTags {
- cfg.CommitsTo = append(cfg.CommitsTo, uiPerfConfigElem{k, commitTo == k})
- }
- cfg.CommitsTo = append(cfg.CommitsTo, uiPerfConfigElem{"tip", commitTo == "tip"})
-
- var vals [][]float64
- var hints [][]string
- var annotations [][]string
- var certainty [][]bool
- var headers []string
- commits2, err := GetCommits(c, startCommitNum, commitsToDisplay)
- if err != nil {
- logErr(w, r, err)
- return
- }
- for _, builder := range selBuilders {
- for _, metric := range selMetrics {
- for _, benchmark := range selBenchmarks {
- for _, procs := range selProcs {
- benchProcs := fmt.Sprintf("%v-%v", benchmark, procs)
- vv, err := GetPerfMetricsForCommits(c, builder, benchProcs, metric, startCommitNum, commitsToDisplay)
- if err != nil {
- logErr(w, r, err)
- return
- }
- hasdata := false
- for _, v := range vv {
- if v != 0 {
- hasdata = true
- }
- }
- if hasdata {
- noise := pc.NoiseLevel(builder, benchProcs, metric)
- descBuilder := "/" + builder
- descBenchmark := "/" + benchProcs
- descMetric := "/" + metric
- if len(selBuilders) == 1 {
- descBuilder = ""
- }
- if len(selBenchmarks) == 1 && len(selProcs) == 1 {
- descBenchmark = ""
- }
- if len(selMetrics) == 1 && (len(selBuilders) > 1 || len(selBenchmarks) > 1 || len(selProcs) > 1) {
- descMetric = ""
- }
- desc := fmt.Sprintf("%v%v%v", descBuilder, descBenchmark, descMetric)[1:]
- hh := make([]string, commitsToDisplay)
- ann := make([]string, commitsToDisplay)
- valf := make([]float64, commitsToDisplay)
- cert := make([]bool, commitsToDisplay)
- firstval := uint64(0)
- lastval := uint64(0)
- for i, v := range vv {
- cert[i] = true
- if v == 0 {
- if lastval == 0 {
- continue
- }
- cert[i] = false
- v = lastval
- }
- if firstval == 0 {
- firstval = v
- }
- valf[i] = float64(v) / float64(firstval)
- if cert[i] {
- d := ""
- if lastval != 0 {
- diff := perfDiff(lastval, v)
- d = fmt.Sprintf(" (%+.02f%%)", diff)
- if !isNoise(diff, noise) {
- ann[i] = fmt.Sprintf("%+.02f%%", diff)
- }
- }
- hh[i] = fmt.Sprintf("%v%v", v, d)
- } else {
- hh[i] = "NO DATA"
- }
- lastval = v
- }
- vals = append(vals, valf)
- hints = append(hints, hh)
- annotations = append(annotations, ann)
- certainty = append(certainty, cert)
- headers = append(headers, desc)
- }
- }
- }
- }
- }
-
- var commits []perfGraphCommit
- if len(vals) != 0 && len(vals[0]) != 0 {
- idx := 0
- for i := range vals[0] {
- com := commits2[i]
- if com == nil || !com.NeedsBenchmarking {
- continue
- }
- c := perfGraphCommit{Id: idx, Name: fmt.Sprintf("%v (%v)", com.Desc, com.Time.Format("Jan 2, 2006 1:04"))}
- idx++
- for j := range vals {
- c.Vals = append(c.Vals, perfGraphValue{float64(vals[j][i]), certainty[j][i], hints[j][i], annotations[j][i]})
- }
- commits = append(commits, c)
- }
- }
-
- data := &perfGraphData{d, cfg, headers, commits}
-
- var buf bytes.Buffer
- if err := perfGraphTemplate.Execute(&buf, data); err != nil {
- logErr(w, r, err)
- return
- }
-
- buf.WriteTo(w)
-}
-
-var perfGraphTemplate = template.Must(
- template.New("perf_graph.html").ParseFiles("build/perf_graph.html"),
-)
-
-type perfGraphData struct {
- Dashboard *Dashboard
- Config *uiPerfConfig
- Headers []string
- Commits []perfGraphCommit
-}
-
-type perfGraphCommit struct {
- Id int
- Name string
- Vals []perfGraphValue
-}
-
-type perfGraphValue struct {
- Val float64
- Certainty bool
- Hint string
- Ann string
-}
diff --git a/dashboard/app/build/perf_graph.html b/dashboard/app/build/perf_graph.html
deleted file mode 100644
index da1c0d0..0000000
--- a/dashboard/app/build/perf_graph.html
+++ /dev/null
@@ -1,120 +0,0 @@
-<!doctype html>
-<html>
- <head>
- <title>{{$.Dashboard.Name}} Dashboard</title>
- <link rel="stylesheet" href="/static/style.css"/>
- <style>
- .graph-container { background: #eee; }
- </style>
-
- <script type="text/javascript" src="https://www.google.com/jsapi"></script>
- <script type="text/javascript">
- google.load("visualization", "1", {packages:["corechart"]});
- google.setOnLoadCallback(drawCharts);
- function drawCharts() {
- var data = new google.visualization.DataTable();
- data.addColumn({type: 'number', label: 'Commit'});
- data.addColumn({type: 'number'});
- data.addColumn({type: 'string', role: 'tooltip'});
- {{range $.Headers}}
- data.addColumn({type: 'number', label: '{{.}}'});
- data.addColumn({type: 'boolean', role: 'certainty'});
- data.addColumn({type: 'string', role: 'tooltip'});
- data.addColumn({type: 'string', role: 'annotation'});
- {{end}}
- data.addRows([
- {{range $.Commits}}
- [ {{.Id}}, 1, "{{.Name}}",
- {{range .Vals}}
- {{if .Val}}
- {{.Val}}, {{.Certainty}}, '{{.Hint}}', '{{.Ann}}',
- {{else}}
- ,,,,
- {{end}}
- {{end}}
- ],
- {{end}}
- ]);
- new google.visualization.LineChart(document.getElementById('graph_div')).
- draw(data, {
- width: "100%",
- height: 700,
- legend: {position: "bottom"},
- focusTarget: "category",
- hAxis: {textPosition: "none"},
- chartArea: {left: "10%", top: "5%", width: "85%", height:"80%"},
- explorer: {axis: 'horizontal', maxZoomIn: 0, maxZoomOut: 1, zoomDelta: 1.2, keepInBounds: true}
- })
- }
- </script>
-</head>
-<body>
-
- <header id="topbar">
- <h1>Go Dashboard</h1>
- <nav>
- <a href="{{$.Dashboard.RelPath}}">Test</a>
- <a href="{{$.Dashboard.RelPath}}perf">Perf</a>
- <a href="{{$.Dashboard.RelPath}}perfgraph">Graphs</a>
- </nav>
- <div class="clear"></div>
- </header>
-
- <div class="page">
- <div id="graph_div" class="main-content graph-container">
- </div>
-
- <aside>
- <form>
- <div class="panel">
- <h1>Builders</h1>
- {{range $.Config.Builders}}
- <input type="checkbox" name="builder" value="{{.Name}}" {{if .Selected}}checked{{end}}>{{.Name}}</input><br>
- {{end}}
- </div>
-
- <div class="panel">
- <h1>Benchmarks</h1>
- {{range $.Config.Benchmarks}}
- <input type="checkbox" name="benchmark" value="{{.Name}}" {{if .Selected}}checked{{end}}>{{.Name}}</input><br>
- {{end}}
- </div>
-
- <div class="panel">
- <h1>Procs</h1>
- {{range $.Config.Procs}}
- <input type="checkbox" name="procs" value="{{.Name}}" {{if .Selected}}checked{{end}}>{{.Name}}</input><br>
- {{end}}
- </div>
-
- <div class="panel">
- <h1>Metrics</h1>
- {{range $.Config.Metrics}}
- <input type="checkbox" name="metric" value="{{.Name}}" {{if .Selected}}checked{{end}}>{{.Name}}</input><br>
- {{end}}
- </div>
-
- <div class="panel">
- <h1>Commits</h1>
- <b>From:</b>
- <select required name="commit-from">
- {{range $.Config.CommitsFrom}}
- <option {{if .Selected}}selected{{end}}>{{.Name}}</option>
- {{end}}
- </select>
- <b>To:</b>
- <select required name="commit-to">
- {{range $.Config.CommitsTo}}
- <option {{if .Selected}}selected{{end}}>{{.Name}}</option>
- {{end}}
- </select>
- </div>
-
- <input class="button" type="submit" value="Refresh" name="refresh"/>
- <a href="https://code.google.com/p/go-wiki/wiki/PerfDashboard">Help</a>
- </form>
- </aside>
- <div class="clear"></div>
- </div>
-</body>
-</html>
diff --git a/dashboard/app/build/perf_learn.go b/dashboard/app/build/perf_learn.go
deleted file mode 100644
index 683ba60..0000000
--- a/dashboard/app/build/perf_learn.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "net/http"
- "sort"
-
- "appengine"
- "appengine/datastore"
-)
-
-func init() {
- http.HandleFunc("/perflearn", perfLearnHandler)
-}
-
-const (
- learnPercentile = 0.95
- learnSignalMultiplier = 1.1
- learnMinSignal = 0.5
-)
-
-func perfLearnHandler(w http.ResponseWriter, r *http.Request) {
- d := dashboardForRequest(r)
- c := d.Context(appengine.NewContext(r))
-
- pc, err := GetPerfConfig(c, r)
- if err != nil {
- logErr(w, r, err)
- return
- }
-
- p, err := GetPackage(c, "")
- if err != nil {
- logErr(w, r, err)
- return
- }
-
- update := r.FormValue("update") != ""
- noise := make(map[string]string)
-
- data := &perfLearnData{}
-
- commits, err := GetCommits(c, 0, p.NextNum)
- if err != nil {
- logErr(w, r, err)
- return
- }
-
- for _, builder := range pc.BuildersForBenchmark("") {
- for _, benchmark := range pc.BenchmarksForBuilder(builder) {
- for _, metric := range pc.MetricsForBenchmark(benchmark) {
- for _, procs := range pc.ProcList(builder) {
- values, err := GetPerfMetricsForCommits(c, builder, fmt.Sprintf("%v-%v", benchmark, procs), metric, 0, p.NextNum)
- if err != nil {
- logErr(w, r, err)
- return
- }
- var dd []float64
- last := uint64(0)
- for i, v := range values {
- if v == 0 {
- if com := commits[i]; com == nil || com.NeedsBenchmarking {
- last = 0
- }
- continue
- }
- if last != 0 {
- v1 := v
- if v1 < last {
- v1, last = last, v1
- }
- diff := float64(v1)/float64(last)*100 - 100
- dd = append(dd, diff)
- }
- last = v
- }
- if len(dd) == 0 {
- continue
- }
- sort.Float64s(dd)
-
- baseIdx := int(float64(len(dd)) * learnPercentile)
- baseVal := dd[baseIdx]
- signalVal := baseVal * learnSignalMultiplier
- if signalVal < learnMinSignal {
- signalVal = learnMinSignal
- }
- signalIdx := -1
- noiseNum := 0
- signalNum := 0
-
- var diffs []*perfLearnDiff
- for i, d := range dd {
- if d > 3*signalVal {
- d = 3 * signalVal
- }
- diffs = append(diffs, &perfLearnDiff{Num: i, Val: d})
- if signalIdx == -1 && d >= signalVal {
- signalIdx = i
- }
- if d < signalVal {
- noiseNum++
- } else {
- signalNum++
- }
- }
- diffs[baseIdx].Hint = "95%"
- if signalIdx != -1 {
- diffs[signalIdx].Hint = "signal"
- }
- diffs = diffs[len(diffs)*4/5:]
- name := fmt.Sprintf("%v/%v-%v/%v", builder, benchmark, procs, metric)
- data.Entries = append(data.Entries, &perfLearnEntry{len(data.Entries), name, baseVal, noiseNum, signalVal, signalNum, diffs})
-
- if len(dd) >= 100 || r.FormValue("force") != "" {
- nname := fmt.Sprintf("%v|%v-%v", builder, benchmark, procs)
- n := noise[nname] + fmt.Sprintf("|%v=%.2f", metric, signalVal)
- noise[nname] = n
- }
- }
- }
- }
- }
-
- if update {
- var noiseLevels []string
- for k, v := range noise {
- noiseLevels = append(noiseLevels, k+v)
- }
- tx := func(c appengine.Context) error {
- pc, err := GetPerfConfig(c, r)
- if err != nil {
- return err
- }
- pc.NoiseLevels = noiseLevels
- if _, err := datastore.Put(c, PerfConfigKey(c), pc); err != nil {
- return fmt.Errorf("putting PerfConfig: %v", err)
- }
- return nil
- }
- if err := datastore.RunInTransaction(c, tx, nil); err != nil {
- logErr(w, r, err)
- return
- }
- }
-
- var buf bytes.Buffer
- if err := perfLearnTemplate.Execute(&buf, data); err != nil {
- logErr(w, r, err)
- return
- }
-
- buf.WriteTo(w)
-}
-
-var perfLearnTemplate = template.Must(
- template.New("perf_learn.html").Funcs(tmplFuncs).ParseFiles("build/perf_learn.html"),
-)
-
-type perfLearnData struct {
- Entries []*perfLearnEntry
-}
-
-type perfLearnEntry struct {
- Num int
- Name string
- BaseVal float64
- NoiseNum int
- SignalVal float64
- SignalNum int
- Diffs []*perfLearnDiff
-}
-
-type perfLearnDiff struct {
- Num int
- Val float64
- Hint string
-}
diff --git a/dashboard/app/build/perf_learn.html b/dashboard/app/build/perf_learn.html
deleted file mode 100644
index 294e957..0000000
--- a/dashboard/app/build/perf_learn.html
+++ /dev/null
@@ -1,45 +0,0 @@
-<!doctype html>
-<html>
- <head>
- <script type="text/javascript" src="https://www.google.com/jsapi"></script>
- <script type="text/javascript">
- google.load("visualization", "1", {packages:["corechart"]});
- google.setOnLoadCallback(drawCharts);
- function drawCharts() {
- {
- {{range $ent := $.Entries}}
- var data = new google.visualization.DataTable();
- data.addColumn('number', 'idx');
- data.addColumn('number', '95%');
- data.addColumn({type: 'boolean', role: 'certainty'});
- data.addColumn('number', 'signal');
- data.addColumn({type: 'boolean', role: 'certainty'});
- data.addColumn('number', 'diff');
- data.addColumn({type: 'string', role: 'annotation'});
- data.addRows([
- {{range .Diffs}} [{{.Num}}, {{$ent.BaseVal}}, false, {{$ent.SignalVal}}, false, {{.Val}}, '{{.Hint}}'], {{end}}
- ]);
- new google.visualization.LineChart(document.getElementById('graph{{.Num}}')).
- draw(data, {
- width: 600,
- height: 200,
- legend: {position: "none"},
- vAxis: {minValue: 0},
- chartArea: {left: "10%", top: "1%", width: "90%", height:"95%"}
- }
- )
- {{end}}
- }
- }
- </script>
- </head>
-
- <body>
- {{range $.Entries}}
- <p>
- {{.Name}}: base={{printf "%.2f[%d]" .BaseVal .NoiseNum}} signal={{printf "%.2f[%d]" .SignalVal .SignalNum}}
- <div id="graph{{.Num}}" width="100px" height="100px"> </div>
- </p>
- {{end}}
- </body>
-</html>
diff --git a/dashboard/app/build/perf_notify.txt b/dashboard/app/build/perf_notify.txt
deleted file mode 100644
index c5e8ebe..0000000
--- a/dashboard/app/build/perf_notify.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-{{if .Commit}}Change {{shortHash .Commit.Hash}} caused perf changes on {{.Builder}}:
-
-{{.Commit.Desc}}
-
-http://code.google.com/p/go/source/detail?r={{shortHash .Commit.Hash}}
-{{else}}This changed caused perf changes on {{.Builder}}:
-{{end}}
-{{range $b := .Benchmarks}}
-{{printf "%-16s %12s %12s %10s" $b.Name "old" "new" "delta"}}
-{{range $m := $b.Metrics}}{{printf "%-16s %12v %12v %+10.2f" $m.Name $m.Old $m.New $m.Delta}}
-{{end}}{{end}}
-{{.Url}}
-
diff --git a/dashboard/app/build/test.go b/dashboard/app/build/test.go
deleted file mode 100644
index 34a1c39..0000000
--- a/dashboard/app/build/test.go
+++ /dev/null
@@ -1,378 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-// TODO(adg): test authentication
-// TODO(adg): refactor to use appengine/aetest instead
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/http/httptest"
- "net/url"
- "strings"
- "time"
-
- "appengine"
- "appengine/datastore"
-)
-
-func init() {
- http.HandleFunc("/buildtest", testHandler)
-}
-
-var testEntityKinds = []string{
- "Package",
- "Commit",
- "CommitRun",
- "Result",
- "PerfResult",
- "PerfMetricRun",
- "PerfConfig",
- "PerfTodo",
- "Log",
-}
-
-const testPkg = "golang.org/x/test"
-
-var testPackage = &Package{Name: "Test", Kind: "subrepo", Path: testPkg}
-
-var testPackages = []*Package{
- {Name: "Go", Path: ""},
- testPackage,
-}
-
-var tCommitTime = time.Now().Add(-time.Hour * 24 * 7)
-
-func tCommit(hash, parentHash, path string, bench bool) *Commit {
- tCommitTime.Add(time.Hour) // each commit should have a different time
- return &Commit{
- PackagePath: path,
- Hash: hash,
- ParentHash: parentHash,
- Time: tCommitTime,
- User: "adg",
- Desc: "change description " + hash,
- NeedsBenchmarking: bench,
- }
-}
-
-var testRequests = []struct {
- path string
- vals url.Values
- req interface{}
- res interface{}
-}{
- // Packages
- {"/packages", url.Values{"kind": {"subrepo"}}, nil, []*Package{testPackage}},
-
- // Go repo
- {"/commit", nil, tCommit("0001", "0000", "", true), nil},
- {"/commit", nil, tCommit("0002", "0001", "", false), nil},
- {"/commit", nil, tCommit("0003", "0002", "", true), nil},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
- {"/result", nil, &Result{Builder: "linux-386", Hash: "0001", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
- {"/result", nil, &Result{Builder: "linux-386", Hash: "0002", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
-
- // Other builders, to test the UI.
- {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0001", OK: true}, nil},
- {"/result", nil, &Result{Builder: "linux-amd64-race", Hash: "0001", OK: true}, nil},
- {"/result", nil, &Result{Builder: "netbsd-386", Hash: "0001", OK: true}, nil},
- {"/result", nil, &Result{Builder: "plan9-386", Hash: "0001", OK: true}, nil},
- {"/result", nil, &Result{Builder: "windows-386", Hash: "0001", OK: true}, nil},
- {"/result", nil, &Result{Builder: "windows-amd64", Hash: "0001", OK: true}, nil},
- {"/result", nil, &Result{Builder: "windows-amd64-race", Hash: "0001", OK: true}, nil},
- {"/result", nil, &Result{Builder: "linux-amd64-temp", Hash: "0001", OK: true}, nil},
-
- // multiple builders
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
- {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0003", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0002"}}},
-
- // branches
- {"/commit", nil, tCommit("0004", "0003", "", false), nil},
- {"/commit", nil, tCommit("0005", "0002", "", false), nil},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
- {"/result", nil, &Result{Builder: "linux-386", Hash: "0005", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0004"}}},
- {"/result", nil, &Result{Builder: "linux-386", Hash: "0004", OK: false}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
-
- // logs
- {"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
- {"/log/a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", nil, nil, "test"},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
-
- // repeat failure (shouldn't re-send mail)
- {"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
-
- // non-Go repos
- {"/commit", nil, tCommit("1001", "0000", testPkg, false), nil},
- {"/commit", nil, tCommit("1002", "1001", testPkg, false), nil},
- {"/commit", nil, tCommit("1003", "1002", testPkg, false), nil},
- {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1003", GoHash: "0001", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1002"}}},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1002", GoHash: "0001", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1001"}}},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0001", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, nil},
- {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0002"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
-
- // re-build Go revision for stale subrepos
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0005", OK: false, Log: "boo"}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
-
- // benchmarks
- // build-go-commit must have precedence over benchmark-go-commit
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
- // drain build-go-commit todo
- {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0005", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0004"}}},
- {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0004", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0002"}}},
- {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0002", OK: true}, nil},
- // drain sub-repo todos
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0005", OK: false}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0005", OK: false}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0005", OK: false}, nil},
- // now we must get benchmark todo
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0003", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http"}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "json", Hash: "0003", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http", "json"}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001", PerfResults: []string{}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0001", OK: true}, nil},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
- // create new commit, it must appear in todo
- {"/commit", nil, tCommit("0006", "0005", "", true), nil},
- // drain build-go-commit todo
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0006"}}},
- {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0006", OK: true}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0006", OK: false}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0006", OK: false}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0006", OK: false}, nil},
- // now we must get benchmark todo
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006", PerfResults: []string{}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0006", OK: true}, nil},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
- // create new benchmark, all commits must re-appear in todo
- {"/commit", nil, tCommit("0007", "0006", "", true), nil},
- // drain build-go-commit todo
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0007"}}},
- {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0007", OK: true}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0007", OK: false}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0007", OK: false}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0007", OK: false}, nil},
- // now we must get benchmark todo
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007", PerfResults: []string{}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "bson", Hash: "0007", OK: true}, nil},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007", PerfResults: []string{"bson"}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006", PerfResults: []string{"http"}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001", PerfResults: []string{"http"}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http", "json"}}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
- // attach second builder
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0007"}}},
- // drain build-go-commit todo
- {"/result", nil, &Result{Builder: "linux-386", Hash: "0007", OK: true}, nil},
- {"/result", nil, &Result{Builder: "linux-386", Hash: "0006", OK: true}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1003", GoHash: "0007", OK: false}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1002", GoHash: "0007", OK: false}, nil},
- {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0007", OK: false}, nil},
- // now we must get benchmark todo
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007"}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006"}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001"}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003"}}},
- {"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
- {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, nil},
-}
-
-func testHandler(w http.ResponseWriter, r *http.Request) {
- if !appengine.IsDevAppServer() {
- fmt.Fprint(w, "These tests must be run under the dev_appserver.")
- return
- }
- c := appengine.NewContext(r)
- if err := nukeEntities(c, testEntityKinds); err != nil {
- logErr(w, r, err)
- return
- }
- if r.FormValue("nukeonly") != "" {
- fmt.Fprint(w, "OK")
- return
- }
-
- for _, p := range testPackages {
- if _, err := datastore.Put(c, p.Key(c), p); err != nil {
- logErr(w, r, err)
- return
- }
- }
-
- origReq := *r
- defer func() {
- // HACK: We need to clobber the original request (see below)
- // so make sure we fix it before exiting the handler.
- *r = origReq
- }()
- for i, t := range testRequests {
- c.Infof("running test %d %s vals='%q' req='%q' res='%q'", i, t.path, t.vals, t.req, t.res)
- errorf := func(format string, args ...interface{}) {
- fmt.Fprintf(w, "%d %s: ", i, t.path)
- fmt.Fprintf(w, format, args...)
- fmt.Fprintln(w)
- }
- var body io.ReadWriter
- if t.req != nil {
- body = new(bytes.Buffer)
- json.NewEncoder(body).Encode(t.req)
- }
- url := "http://" + domain + t.path
- if t.vals != nil {
- url += "?" + t.vals.Encode() + "&version=2"
- } else {
- url += "?version=2"
- }
- req, err := http.NewRequest("POST", url, body)
- if err != nil {
- logErr(w, r, err)
- return
- }
- if t.req != nil {
- req.Method = "POST"
- }
- req.Header = origReq.Header
- rec := httptest.NewRecorder()
-
- // Make the request
- *r = *req // HACK: App Engine uses the request pointer
- // as a map key to resolve Contexts.
- http.DefaultServeMux.ServeHTTP(rec, r)
-
- if rec.Code != 0 && rec.Code != 200 {
- errorf(rec.Body.String())
- return
- }
- c.Infof("response='%v'", rec.Body.String())
- resp := new(dashResponse)
-
- // If we're expecting a *Todo value,
- // prime the Response field with a Todo and a Commit inside it.
- if t.path == "/todo" {
- resp.Response = &Todo{Data: &Commit{}}
- }
-
- if strings.HasPrefix(t.path, "/log/") {
- resp.Response = rec.Body.String()
- } else {
- err := json.NewDecoder(rec.Body).Decode(resp)
- if err != nil {
- errorf("decoding response: %v", err)
- return
- }
- }
- if e, ok := t.res.(string); ok {
- g, ok := resp.Response.(string)
- if !ok {
- errorf("Response not string: %T", resp.Response)
- return
- }
- if g != e {
- errorf("response mismatch: got %q want %q", g, e)
- return
- }
- }
- if e, ok := t.res.(*Todo); ok {
- g, ok := resp.Response.(*Todo)
- if !ok {
- errorf("Response not *Todo: %T", resp.Response)
- return
- }
- if e.Data == nil && g.Data != nil {
- errorf("Response.Data should be nil, got: %v", g.Data)
- return
- }
- if g.Data == nil {
- errorf("Response.Data is nil, want: %v", e.Data)
- return
- }
- gd, ok := g.Data.(*Commit)
- if !ok {
- errorf("Response.Data not *Commit: %T", g.Data)
- return
- }
- if g.Kind != e.Kind {
- errorf("kind don't match: got %q, want %q", g.Kind, e.Kind)
- return
- }
- ed := e.Data.(*Commit)
- if ed.Hash != gd.Hash {
- errorf("hashes don't match: got %q, want %q", gd.Hash, ed.Hash)
- return
- }
- if len(gd.PerfResults) != len(ed.PerfResults) {
- errorf("result data len don't match: got %v, want %v", len(gd.PerfResults), len(ed.PerfResults))
- return
- }
- for i := range gd.PerfResults {
- if gd.PerfResults[i] != ed.PerfResults[i] {
- errorf("result data %v don't match: got %v, want %v", i, gd.PerfResults[i], ed.PerfResults[i])
- return
- }
- }
- }
- if t.res == nil && resp.Response != nil {
- errorf("response mismatch: got %q expected <nil>", resp.Response)
- return
- }
- }
- fmt.Fprint(w, "PASS\nYou should see only one mail notification (for 0003/linux-386) in the dev_appserver logs.")
-}
-
-func nukeEntities(c appengine.Context, kinds []string) error {
- if !appengine.IsDevAppServer() {
- return errors.New("can't nuke production data")
- }
- var keys []*datastore.Key
- for _, kind := range kinds {
- q := datastore.NewQuery(kind).KeysOnly()
- for t := q.Run(c); ; {
- k, err := t.Next(nil)
- if err == datastore.Done {
- break
- }
- if err != nil {
- return err
- }
- keys = append(keys, k)
- }
- }
- return datastore.DeleteMulti(c, keys)
-}
diff --git a/dashboard/app/build/ui.go b/dashboard/app/build/ui.go
deleted file mode 100644
index c2cf7c5..0000000
--- a/dashboard/app/build/ui.go
+++ /dev/null
@@ -1,460 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO(adg): packages at weekly/release
-// TODO(adg): some means to register new packages
-
-// +build appengine
-
-package build
-
-import (
- "bytes"
- "errors"
- "fmt"
- "html/template"
- "net/http"
- "regexp"
- "sort"
- "strconv"
- "strings"
-
- "cache"
-
- "appengine"
- "appengine/datastore"
-)
-
-func init() {
- for _, d := range dashboards {
- http.HandleFunc(d.RelPath, uiHandler)
- }
-}
-
-// uiHandler draws the build status page.
-func uiHandler(w http.ResponseWriter, r *http.Request) {
- d := dashboardForRequest(r)
- c := d.Context(appengine.NewContext(r))
- now := cache.Now(c)
- key := "build-ui"
-
- page, _ := strconv.Atoi(r.FormValue("page"))
- if page < 0 {
- page = 0
- }
- key += fmt.Sprintf("-page%v", page)
-
- branch := r.FormValue("branch")
- if branch != "" {
- key += "-branch-" + branch
- }
-
- repo := r.FormValue("repo")
- if repo != "" {
- key += "-repo-" + repo
- }
-
- var b []byte
- if cache.Get(r, now, key, &b) {
- w.Write(b)
- return
- }
-
- pkg := &Package{} // empty package is the main repository
- if repo != "" {
- var err error
- pkg, err = GetPackage(c, repo)
- if err != nil {
- logErr(w, r, err)
- return
- }
- }
- commits, err := dashCommits(c, pkg, page, branch)
- if err != nil {
- logErr(w, r, err)
- return
- }
- builders := commitBuilders(commits)
-
- var tipState *TagState
- if pkg.Kind == "" && page == 0 && (branch == "" || branch == "default") {
- // only show sub-repo state on first page of normal repo view
- tipState, err = TagStateByName(c, "tip")
- if err != nil {
- logErr(w, r, err)
- return
- }
- }
-
- p := &Pagination{}
- if len(commits) == commitsPerPage {
- p.Next = page + 1
- }
- if page > 0 {
- p.Prev = page - 1
- p.HasPrev = true
- }
- data := &uiTemplateData{d, pkg, commits, builders, tipState, p, branch}
-
- var buf bytes.Buffer
- if err := uiTemplate.Execute(&buf, data); err != nil {
- logErr(w, r, err)
- return
- }
-
- cache.Set(r, now, key, buf.Bytes())
-
- buf.WriteTo(w)
-}
-
-type Pagination struct {
- Next, Prev int
- HasPrev bool
-}
-
-// dashCommits gets a slice of the latest Commits to the current dashboard.
-// If page > 0 it paginates by commitsPerPage.
-func dashCommits(c appengine.Context, pkg *Package, page int, branch string) ([]*Commit, error) {
- offset := page * commitsPerPage
- q := datastore.NewQuery("Commit").
- Ancestor(pkg.Key(c)).
- Order("-Num")
-
- var commits []*Commit
- if branch == "" {
- _, err := q.Limit(commitsPerPage).Offset(offset).
- GetAll(c, &commits)
- return commits, err
- }
-
- // Look for commits on a specific branch.
- for t, n := q.Run(c), 0; len(commits) < commitsPerPage && n < 1000; {
- var c Commit
- _, err := t.Next(&c)
- if err == datastore.Done {
- break
- }
- if err != nil {
- return nil, err
- }
- if !isBranchCommit(&c, branch) {
- continue
- }
- if n >= offset {
- commits = append(commits, &c)
- }
- n++
- }
- return commits, nil
-}
-
-// isBranchCommit reports whether the given commit is on the specified branch.
-// It does so by examining the commit description, so there will be some bad
-// matches where the branch commits do not begin with the "[branch]" prefix.
-func isBranchCommit(c *Commit, b string) bool {
- d := strings.TrimSpace(c.Desc)
- if b == "default" {
- return !strings.HasPrefix(d, "[")
- }
- return strings.HasPrefix(d, "["+b+"]")
-}
-
-// commitBuilders returns the names of the builders that provided
-// Results for the provided commits.
-func commitBuilders(commits []*Commit) []string {
- builders := make(map[string]bool)
- for _, commit := range commits {
- for _, r := range commit.Results() {
- builders[r.Builder] = true
- }
- }
- k := keys(builders)
- sort.Sort(builderOrder(k))
- return k
-}
-
-func keys(m map[string]bool) (s []string) {
- for k := range m {
- s = append(s, k)
- }
- sort.Strings(s)
- return
-}
-
-// builderOrder implements sort.Interface, sorting builder names
-// ("darwin-amd64", etc) first by builderPriority and then alphabetically.
-type builderOrder []string
-
-func (s builderOrder) Len() int { return len(s) }
-func (s builderOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s builderOrder) Less(i, j int) bool {
- pi, pj := builderPriority(s[i]), builderPriority(s[j])
- if pi == pj {
- return s[i] < s[j]
- }
- return pi < pj
-}
-
-func builderPriority(builder string) (p int) {
- // Put -temp builders at the end, always.
- if strings.HasSuffix(builder, "-temp") {
- defer func() { p += 20 }()
- }
- // Group race builders together.
- if isRace(builder) {
- return 1
- }
- // If the OS has a specified priority, use it.
- if p, ok := osPriority[builderOS(builder)]; ok {
- return p
- }
- // The rest.
- return 10
-}
-
-func isRace(s string) bool {
- return strings.Contains(s, "-race-") || strings.HasSuffix(s, "-race")
-}
-
-func unsupported(builder string) bool {
- if strings.HasSuffix(builder, "-temp") {
- return true
- }
- return unsupportedOS(builderOS(builder))
-}
-
-func unsupportedOS(os string) bool {
- if os == "race" {
- return false
- }
- p, ok := osPriority[os]
- return !ok || p > 0
-}
-
-// Priorities for specific operating systems.
-var osPriority = map[string]int{
- "darwin": 0,
- "freebsd": 0,
- "linux": 0,
- "windows": 0,
- // race == 1
- "openbsd": 2,
- "netbsd": 3,
- "dragonfly": 4,
-}
-
-// TagState represents the state of all Packages at a Tag.
-type TagState struct {
- Tag *Commit
- Packages []*PackageState
-}
-
-// PackageState represents the state of a Package at a Tag.
-type PackageState struct {
- Package *Package
- Commit *Commit
-}
-
-// TagStateByName fetches the results for all Go subrepos at the specified Tag.
-func TagStateByName(c appengine.Context, name string) (*TagState, error) {
- tag, err := GetTag(c, name)
- if err != nil {
- return nil, err
- }
- pkgs, err := Packages(c, "subrepo")
- if err != nil {
- return nil, err
- }
- var st TagState
- for _, pkg := range pkgs {
- com, err := pkg.LastCommit(c)
- if err != nil {
- c.Warningf("%v: no Commit found: %v", pkg, err)
- continue
- }
- st.Packages = append(st.Packages, &PackageState{pkg, com})
- }
- st.Tag, err = tag.Commit(c)
- if err != nil {
- return nil, err
- }
- return &st, nil
-}
-
-type uiTemplateData struct {
- Dashboard *Dashboard
- Package *Package
- Commits []*Commit
- Builders []string
- TipState *TagState
- Pagination *Pagination
- Branch string
-}
-
-var uiTemplate = template.Must(
- template.New("ui.html").Funcs(tmplFuncs).ParseFiles("build/ui.html"),
-)
-
-var tmplFuncs = template.FuncMap{
- "buildDashboards": buildDashboards,
- "builderOS": builderOS,
- "builderSpans": builderSpans,
- "builderSubheading": builderSubheading,
- "builderTitle": builderTitle,
- "repoURL": repoURL,
- "shortDesc": shortDesc,
- "shortHash": shortHash,
- "shortUser": shortUser,
- "tail": tail,
- "unsupported": unsupported,
-}
-
-func splitDash(s string) (string, string) {
- i := strings.Index(s, "-")
- if i >= 0 {
- return s[:i], s[i+1:]
- }
- return s, ""
-}
-
-// builderOS returns the os tag for a builder string
-func builderOS(s string) string {
- os, _ := splitDash(s)
- return os
-}
-
-// builderOSOrRace returns the builder OS or, if it is a race builder, "race".
-func builderOSOrRace(s string) string {
- if isRace(s) {
- return "race"
- }
- return builderOS(s)
-}
-
-// builderArch returns the arch tag for a builder string
-func builderArch(s string) string {
- _, arch := splitDash(s)
- arch, _ = splitDash(arch) // chop third part
- return arch
-}
-
-// builderSubheading returns a short arch tag for a builder string
-// or, if it is a race builder, the builder OS.
-func builderSubheading(s string) string {
- if isRace(s) {
- return builderOS(s)
- }
- arch := builderArch(s)
- switch arch {
- case "amd64":
- return "x64"
- }
- return arch
-}
-
-// builderArchChar returns the architecture letter for a builder string
-func builderArchChar(s string) string {
- arch := builderArch(s)
- switch arch {
- case "386":
- return "8"
- case "amd64":
- return "6"
- case "arm":
- return "5"
- }
- return arch
-}
-
-type builderSpan struct {
- N int
- OS string
- Unsupported bool
-}
-
-// builderSpans creates a list of tags showing
-// the builder's operating system names, spanning
-// the appropriate number of columns.
-func builderSpans(s []string) []builderSpan {
- var sp []builderSpan
- for len(s) > 0 {
- i := 1
- os := builderOSOrRace(s[0])
- u := unsupportedOS(os) || strings.HasSuffix(s[0], "-temp")
- for i < len(s) && builderOSOrRace(s[i]) == os {
- i++
- }
- sp = append(sp, builderSpan{i, os, u})
- s = s[i:]
- }
- return sp
-}
-
-// builderTitle formats "linux-amd64-foo" as "linux amd64 foo".
-func builderTitle(s string) string {
- return strings.Replace(s, "-", " ", -1)
-}
-
-// buildDashboards returns the known public dashboards.
-func buildDashboards() []*Dashboard {
- return dashboards
-}
-
-// shortDesc returns the first line of a description.
-func shortDesc(desc string) string {
- if i := strings.Index(desc, "\n"); i != -1 {
- desc = desc[:i]
- }
- return limitStringLength(desc, 100)
-}
-
-// shortHash returns a short version of a hash.
-func shortHash(hash string) string {
- if len(hash) > 12 {
- hash = hash[:12]
- }
- return hash
-}
-
-// shortUser returns a shortened version of a user string.
-func shortUser(user string) string {
- if i, j := strings.Index(user, "<"), strings.Index(user, ">"); 0 <= i && i < j {
- user = user[i+1 : j]
- }
- if i := strings.Index(user, "@"); i >= 0 {
- return user[:i]
- }
- return user
-}
-
-// repoRe matches Google Code repositories and subrepositories (without paths).
-var repoRe = regexp.MustCompile(`^code\.google\.com/p/([a-z0-9\-]+)(\.[a-z0-9\-]+)?$`)
-
-// repoURL returns the URL of a change at a Google Code repository or subrepo.
-func repoURL(dashboard, hash, packagePath string) (string, error) {
- if packagePath == "" {
- if dashboard == "Gccgo" {
- return "https://code.google.com/p/gofrontend/source/detail?r=" + hash, nil
- }
- return "https://code.google.com/p/go/source/detail?r=" + hash, nil
- }
- m := repoRe.FindStringSubmatch(packagePath)
- if m == nil {
- return "", errors.New("unrecognized package: " + packagePath)
- }
- url := "https://code.google.com/p/" + m[1] + "/source/detail?r=" + hash
- if len(m) > 2 {
- url += "&repo=" + m[2][1:]
- }
- return url, nil
-}
-
-// tail returns the trailing n lines of s.
-func tail(n int, s string) string {
- lines := strings.Split(s, "\n")
- if len(lines) < n {
- return s
- }
- return strings.Join(lines[len(lines)-n:], "\n")
-}
diff --git a/dashboard/app/build/ui.html b/dashboard/app/build/ui.html
deleted file mode 100644
index 6ae268c..0000000
--- a/dashboard/app/build/ui.html
+++ /dev/null
@@ -1,210 +0,0 @@
-<!DOCTYPE HTML>
-<html>
- <head>
- <title>{{$.Dashboard.Name}} Build Dashboard</title>
- <link rel="stylesheet" href="/static/style.css"/>
- <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js"></script>
- <script>
- var showUnsupported = window.location.hash.substr(1) != "short";
- function redraw() {
- showUnsupported = !$("#showshort").prop('checked');
- $('.unsupported')[showUnsupported?'show':'hide']();
- window.location.hash = showUnsupported?'':'short';
- }
- $(document).ready(function() {
- $("#showshort").attr('checked', !showUnsupported).change(redraw);
- redraw();
- })
- </script>
- </head>
-
- <body>
- <header id="topbar">
- <h1>Go Dashboard</h1>
- <nav>
- <a href="{{$.Dashboard.RelPath}}">Test</a>
- <a href="{{$.Dashboard.RelPath}}perf">Perf</a>
- <a href="{{$.Dashboard.RelPath}}perfgraph">Graphs</a>
- </nav>
- <div class="clear"></div>
- </header>
-
- <nav class="dashboards">
- {{range buildDashboards}}
- <a href="{{.RelPath}}">{{.Name}}</a>
- {{end}}
- <label>
- <input type=checkbox id="showshort">
- show only <a href="http://golang.org/wiki/PortingPolicy">first-class ports</a>
- </label>
- </nav>
- {{with $.Package.Name}}<h2>{{.}}</h2>{{end}}
-
- <div class="page">
-
- {{if $.Commits}}
-
- <table class="build">
- <colgroup class="col-hash" {{if $.Package.Path}}span="2"{{end}}></colgroup>
- {{range $.Builders | builderSpans}}
- <colgroup class="col-result{{if .Unsupported}} unsupported{{end}}" span="{{.N}}"></colgroup>
- {{end}}
- <colgroup class="col-user"></colgroup>
- <colgroup class="col-time"></colgroup>
- <colgroup class="col-desc"></colgroup>
- <tr>
- <!-- extra row to make alternating colors use dark for first result -->
- </tr>
- <tr>
- {{if $.Package.Path}}
- <th colspan="2">revision</th>
- {{else}}
- <th>&nbsp;</th>
- {{end}}
- {{range $.Builders | builderSpans}}
- <th {{if .Unsupported}}class="unsupported"{{end}} colspan="{{.N}}">{{.OS}}</th>
- {{end}}
- <th></th>
- <th></th>
- <th></th>
- </tr>
- <tr>
- {{if $.Package.Path}}
- <th class="result arch">repo</th>
- <th class="result arch">{{$.Dashboard.Name}}</th>
- {{else}}
- <th>&nbsp;</th>
- {{end}}
- {{range $.Builders}}
- <th class="result arch{{if (unsupported .)}} unsupported{{end}}" title="{{.}}">{{builderSubheading .}}</th>
- {{end}}
- <th></th>
- <th></th>
- <th></th>
- </tr>
- {{range $c := $.Commits}}
- {{range $i, $h := $c.ResultGoHashes}}
- <tr class="commit">
- {{if $i}}
- <td>&nbsp;</td>
- {{else}}
- <td class="hash"><a href="{{repoURL $.Dashboard.Name $c.Hash $.Package.Path}}">{{shortHash $c.Hash}}</a></td>
- {{end}}
- {{if $h}}
- <td class="hash"><a href="{{repoURL $.Dashboard.Name $h ""}}">{{shortHash $h}}</a></td>
- {{end}}
- {{range $.Builders}}
- <td class="result{{if (unsupported .)}} unsupported{{end}}">
- {{with $c.Result . $h}}
- {{if .OK}}
- <span class="ok">ok</span>
- {{else}}
- <a href="{{$.Dashboard.RelPath}}log/{{.LogHash}}" class="fail">fail</a>
- {{end}}
- {{else}}
- &nbsp;
- {{end}}
- </td>
- {{end}}
- {{if $i}}
- <td>&nbsp;</td>
- <td>&nbsp;</td>
- <td>&nbsp;</td>
- {{else}}
- <td class="user" title="{{$c.User}}">{{shortUser $c.User}}</td>
- <td class="time">{{$c.Time.Format "Mon 02 Jan 15:04"}}</td>
- <td class="desc" title="{{$c.Desc}}">{{shortDesc $c.Desc}}</td>
- {{end}}
- </tr>
- {{end}}
- {{end}}
- </table>
-
- {{with $.Pagination}}
- <div class="paginate">
- <nav>
- <a {{if .HasPrev}}href="?{{with $.Package.Path}}repo={{.}}&{{end}}page={{.Prev}}{{with $.Branch}}&branch={{.}}{{end}}"{{else}}class="inactive"{{end}}>newer</a>
- <a {{if .Next}}href="?{{with $.Package.Path}}repo={{.}}&{{end}}page={{.Next}}{{with $.Branch}}&branch={{.}}{{end}}"{{else}}class="inactive"{{end}}>older</a>
- <a {{if .HasPrev}}href=".{{with $.Branch}}?branch={{.}}{{end}}"{{else}}class="inactive"{{end}}>latest</a>
- </nav>
- </div>
- {{end}}
-
- {{else}}
- <p>No commits to display. Hm.</p>
- {{end}}
-
- {{with $.TipState}}
- {{$goHash := .Tag.Hash}}
- {{if .Packages}}
- <h2>
- Sub-repositories at tip
- <small>(<a href="{{repoURL $.Dashboard.Name .Tag.Hash ""}}">{{shortHash .Tag.Hash}}</a>)</small>
- </h2>
-
- <table class="build">
- <colgroup class="col-package"></colgroup>
- <colgroup class="col-hash"></colgroup>
- {{range $.Builders | builderSpans}}
- <colgroup class="col-result{{if .Unsupported}} unsupported{{end}}" span="{{.N}}"></colgroup>
- {{end}}
- <colgroup class="col-user"></colgroup>
- <colgroup class="col-time"></colgroup>
- <colgroup class="col-desc"></colgroup>
- <tr>
- <!-- extra row to make alternating colors use dark for first result -->
- </tr>
- <tr>
- <th></th>
- <th></th>
- {{range $.Builders | builderSpans}}
- <th {{if .Unsupported}}class="unsupported"{{end}} colspan="{{.N}}">{{.OS}}</th>
- {{end}}
- <th></th>
- <th></th>
- <th></th>
- </tr>
- <tr>
- <th></th>
- <th></th>
- {{range $.Builders}}
- <th class="result arch{{if (unsupported .)}} unsupported{{end}}" title="{{.}}">{{builderSubheading .}}</th>
- {{end}}
- <th></th>
- <th></th>
- <th></th>
- </tr>
- {{range $pkg := .Packages}}
- <tr class="commit">
- <td><a title="{{.Package.Path}}" href="?repo={{.Package.Path}}">{{.Package.Name}}</a></td>
- <td class="hash">
- {{$h := $pkg.Commit.Hash}}
- <a href="{{repoURL $.Dashboard.Name $h $pkg.Commit.PackagePath}}">{{shortHash $h}}</a>
- </td>
- {{range $.Builders}}
- <td class="result{{if (unsupported .)}} unsupported{{end}}">
- {{with $pkg.Commit.Result . $goHash}}
- {{if .OK}}
- <span class="ok">ok</span>
- {{else}}
- <a href="{{$.Dashboard.RelPath}}log/{{.LogHash}}" class="fail">fail</a>
- {{end}}
- {{else}}
- &nbsp;
- {{end}}
- </td>
- {{end}}
- {{with $pkg.Commit}}
- <td class="user" title="{{.User}}">{{shortUser .User}}</td>
- <td class="time">{{.Time.Format "Mon 02 Jan 15:04"}}</td>
- <td class="desc" title="{{.Desc}}">{{shortDesc .Desc}}</td>
- {{end}}
- </tr>
- {{end}}
- </table>
- {{end}}
- {{end}}
-
- </div>
- </body>
-</html>
diff --git a/dashboard/app/build/update.go b/dashboard/app/build/update.go
deleted file mode 100644
index 1d22cc9..0000000
--- a/dashboard/app/build/update.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package build
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-
- "appengine"
- "appengine/datastore"
-)
-
-func init() {
- http.HandleFunc("/updatebenchmark", updateBenchmark)
-}
-
-func updateBenchmark(w http.ResponseWriter, r *http.Request) {
- if !appengine.IsDevAppServer() {
- fmt.Fprint(w, "Update must not run on real server.")
- return
- }
-
- if r.Method != "POST" {
- fmt.Fprintf(w, "bad request method")
- return
- }
-
- c := contextForRequest(r)
- if !validKey(c, r.FormValue("key"), r.FormValue("builder")) {
- fmt.Fprintf(w, "bad builder/key")
- return
- }
-
- defer r.Body.Close()
- var hashes []string
- if err := json.NewDecoder(r.Body).Decode(&hashes); err != nil {
- fmt.Fprintf(w, "failed to decode request: %v", err)
- return
- }
-
- ncommit := 0
- nrun := 0
- tx := func(c appengine.Context) error {
- var cr *CommitRun
- for _, hash := range hashes {
- // Update Commit.
- com := &Commit{Hash: hash}
- err := datastore.Get(c, com.Key(c), com)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return fmt.Errorf("fetching Commit: %v", err)
- }
- if err == datastore.ErrNoSuchEntity {
- continue
- }
- com.NeedsBenchmarking = true
- com.PerfResults = nil
- if err := putCommit(c, com); err != nil {
- return err
- }
- ncommit++
-
- // create PerfResult
- res := &PerfResult{CommitHash: com.Hash, CommitNum: com.Num}
- err = datastore.Get(c, res.Key(c), res)
- if err != nil && err != datastore.ErrNoSuchEntity {
- return fmt.Errorf("fetching PerfResult: %v", err)
- }
- if err == datastore.ErrNoSuchEntity {
- if _, err := datastore.Put(c, res.Key(c), res); err != nil {
- return fmt.Errorf("putting PerfResult: %v", err)
- }
- }
-
- // Update CommitRun.
- if cr != nil && cr.StartCommitNum != com.Num/PerfRunLength*PerfRunLength {
- if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
- return fmt.Errorf("putting CommitRun: %v", err)
- }
- nrun++
- cr = nil
- }
- if cr == nil {
- var err error
- cr, err = GetCommitRun(c, com.Num)
- if err != nil {
- return fmt.Errorf("getting CommitRun: %v", err)
- }
- }
- if com.Num < cr.StartCommitNum || com.Num >= cr.StartCommitNum+PerfRunLength {
- return fmt.Errorf("commit num %v out of range [%v, %v)", com.Num, cr.StartCommitNum, cr.StartCommitNum+PerfRunLength)
- }
- idx := com.Num - cr.StartCommitNum
- cr.Hash[idx] = com.Hash
- cr.User[idx] = shortDesc(com.User)
- cr.Desc[idx] = shortDesc(com.Desc)
- cr.Time[idx] = com.Time
- cr.NeedsBenchmarking[idx] = com.NeedsBenchmarking
- }
- if cr != nil {
- if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
- return fmt.Errorf("putting CommitRun: %v", err)
- }
- nrun++
- }
- return nil
- }
- if err := datastore.RunInTransaction(c, tx, nil); err != nil {
- fmt.Fprintf(w, "failed to execute tx: %v", err)
- return
- }
- fmt.Fprintf(w, "OK (updated %v commits and %v commit runs)", ncommit, nrun)
-}
diff --git a/dashboard/app/cache/cache.go b/dashboard/app/cache/cache.go
deleted file mode 100644
index 27b14ce..0000000
--- a/dashboard/app/cache/cache.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package cache
-
-import (
- "fmt"
- "net/http"
- "time"
-
- "appengine"
- "appengine/memcache"
-)
-
-// TimeKey specifies the memcache entity that keeps the logical datastore time.
-var TimeKey = "cachetime"
-
-const (
- nocache = "nocache"
- expiry = 600 // 10 minutes
-)
-
-func newTime() uint64 { return uint64(time.Now().Unix()) << 32 }
-
-// Now returns the current logical datastore time to use for cache lookups.
-func Now(c appengine.Context) uint64 {
- t, err := memcache.Increment(c, TimeKey, 0, newTime())
- if err != nil {
- c.Errorf("cache.Now: %v", err)
- return 0
- }
- return t
-}
-
-// Tick sets the current logical datastore time to a never-before-used time
-// and returns that time. It should be called to invalidate the cache.
-func Tick(c appengine.Context) uint64 {
- t, err := memcache.Increment(c, TimeKey, 1, newTime())
- if err != nil {
- c.Errorf("cache: tick: %v", err)
- return 0
- }
- return t
-}
-
-// Get fetches data for name at time now from memcache and unmarshals it into
-// value. It reports whether it found the cache record and logs any errors to
-// the admin console.
-func Get(r *http.Request, now uint64, name string, value interface{}) bool {
- c := appengine.NewContext(r)
- if now == 0 || r.FormValue(nocache) != "" {
- c.Debugf("cache: skipping get: now=%v, nocache=%q", now, nocache)
- return false
- }
- key := fmt.Sprintf("%s.%d", name, now)
- _, err := memcache.JSON.Get(c, key, value)
- switch err {
- case nil:
- c.Debugf("cache: get %q: hit", key)
- return true
- case memcache.ErrCacheMiss:
- c.Debugf("cache: get %q: cache miss", key)
- default:
- c.Errorf("cache: get %q: %v", key, err)
- }
- return false
-}
-
-// Set puts value into memcache under name at time now.
-// It logs any errors to the admin console.
-func Set(r *http.Request, now uint64, name string, value interface{}) {
- c := appengine.NewContext(r)
- if now == 0 || r.FormValue(nocache) != "" {
- c.Debugf("cache: skipping set: now=%v, nocache=%q", now, nocache)
- return
- }
- key := fmt.Sprintf("%s.%d", name, now)
- err := memcache.JSON.Set(c, &memcache.Item{
- Key: key,
- Object: value,
- Expiration: expiry,
- })
- if err != nil {
- c.Errorf("cache: set %q: %v", key, err)
- return
- }
- c.Debugf("cache: set %q: ok", key)
-}
diff --git a/dashboard/app/cron.yaml b/dashboard/app/cron.yaml
deleted file mode 100644
index 4118f9e..0000000
--- a/dashboard/app/cron.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-cron:
-- description: updates noise level for benchmarking results
- url: /perflearn?update=1
- schedule: every 24 hours
-
diff --git a/dashboard/app/index.yaml b/dashboard/app/index.yaml
deleted file mode 100644
index 670a667..0000000
--- a/dashboard/app/index.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-indexes:
-
-- kind: Commit
- ancestor: yes
- properties:
- - name: Num
- direction: desc
-
-- kind: Commit
- ancestor: yes
- properties:
- - name: Time
- direction: desc
-
-- kind: Commit
- ancestor: yes
- properties:
- - name: NeedsBenchmarking
- - name: Num
- direction: desc
-
-- kind: CommitRun
- ancestor: yes
- properties:
- - name: StartCommitNum
- direction: desc
-
-- kind: PerfResult
- ancestor: yes
- properties:
- - name: CommitNum
- direction: desc
-
-- kind: PerfResult
- ancestor: yes
- properties:
- - name: CommitNum
- direction: asc
-
-- kind: CommitRun
- ancestor: yes
- properties:
- - name: StartCommitNum
- direction: asc
-
-- kind: PerfMetricRun
- ancestor: yes
- properties:
- - name: Builder
- - name: Benchmark
- - name: Metric
- - name: StartCommitNum
- direction: asc
-
diff --git a/dashboard/app/key/key.go b/dashboard/app/key/key.go
deleted file mode 100644
index e52554f..0000000
--- a/dashboard/app/key/key.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package key
-
-import (
- "sync"
-
- "appengine"
- "appengine/datastore"
-)
-
-var theKey struct {
- sync.RWMutex
- builderKey
-}
-
-type builderKey struct {
- Secret string
-}
-
-func (k *builderKey) Key(c appengine.Context) *datastore.Key {
- return datastore.NewKey(c, "BuilderKey", "root", 0, nil)
-}
-
-func Secret(c appengine.Context) string {
- // check with rlock
- theKey.RLock()
- k := theKey.Secret
- theKey.RUnlock()
- if k != "" {
- return k
- }
-
- // prepare to fill; check with lock and keep lock
- theKey.Lock()
- defer theKey.Unlock()
- if theKey.Secret != "" {
- return theKey.Secret
- }
-
- // fill
- if err := datastore.Get(c, theKey.Key(c), &theKey.builderKey); err != nil {
- if err == datastore.ErrNoSuchEntity {
- // If the key is not stored in datastore, write it.
- // This only happens at the beginning of a new deployment.
- // The code is left here for SDK use and in case a fresh
- // deployment is ever needed. "gophers rule" is not the
- // real key.
- if !appengine.IsDevAppServer() {
- panic("lost key from datastore")
- }
- theKey.Secret = "gophers rule"
- datastore.Put(c, theKey.Key(c), &theKey.builderKey)
- return theKey.Secret
- }
- panic("cannot load builder key: " + err.Error())
- }
-
- return theKey.Secret
-}
diff --git a/dashboard/app/static/status_alert.gif b/dashboard/app/static/status_alert.gif
deleted file mode 100644
index 495d9d2..0000000
--- a/dashboard/app/static/status_alert.gif
+++ /dev/null
Binary files differ
diff --git a/dashboard/app/static/status_good.gif b/dashboard/app/static/status_good.gif
deleted file mode 100644
index ef9c5a8..0000000
--- a/dashboard/app/static/status_good.gif
+++ /dev/null
Binary files differ
diff --git a/dashboard/app/static/style.css b/dashboard/app/static/style.css
deleted file mode 100644
index ddf2129..0000000
--- a/dashboard/app/static/style.css
+++ /dev/null
@@ -1,308 +0,0 @@
-* { box-sizing: border-box; }
-
- .dashboards {
- padding: 0.5em;
- }
- .dashboards a {
- padding: 0.5em;
- background: #eee;
- color: blue;
- }
-
-body {
- margin: 0;
- font-family: sans-serif;
- padding: 0; margin: 0;
- color: #222;
-}
-
-.container {
- max-width: 900px;
- margin: 0 auto;
-}
-
-p, pre, ul, ol { margin: 20px; }
-
-h1, h2, h3, h4 {
- margin: 20px 0;
- padding: 0;
- color: #375EAB;
- font-weight: bold;
-}
-
-h1 { font-size: 24px; }
-h2 { font-size: 20px; }
-h3 { font-size: 20px; }
-h4 { font-size: 16px; }
-
-h2 { background: #E0EBF5; padding: 2px 5px; }
-h3, h4 { margin: 20px 5px; }
-
-dl, dd { font-size: 14px; }
-dl { margin: 20px; }
-dd { margin: 2px 20px; }
-
-.clear {
- clear: both;
-}
-
-.button {
- padding: 10px;
-
- color: #222;
- border: 1px solid #375EAB;
- background: #E0EBF5;
-
- border-radius: 5px;
-
- cursor: pointer;
-
- margin-left: 60px;
-}
-
-/* navigation bar */
-
-#topbar {
- padding: 10px 10px;
- background: #E0EBF5;
-}
-
-#topbar a {
- color: #222;
-}
-#topbar h1 {
- float: left;
- margin: 0;
- padding-top: 5px;
-}
-
-#topbar nav {
- float: left;
- margin-left: 20px;
-}
-#topbar nav a {
- display: inline-block;
- padding: 10px;
-
- margin: 0;
- margin-right: 5px;
-
- color: white;
- background: #375EAB;
-
- text-decoration: none;
- font-size: 16px;
-
- border: 1px solid #375EAB;
- -webkit-border-radius: 5px;
- -moz-border-radius: 5px;
- border-radius: 5px;
-}
-
-.page {
- margin-top: 20px;
-}
-
-/* settings panels */
-aside {
- margin-top: 5px;
-}
-
-.panel {
- border: 1px solid #aaa;
- border-radius: 5px;
- margin-bottom: 5px;
-}
-
-.panel h1 {
- font-size: 16px;
- margin: 0;
- padding: 2px 8px;
-}
-
-.panel select {
- padding: 5px;
- border: 0;
- width: 100%;
-}
-
-/* results table */
-
-table {
- margin: 5px;
- border-collapse: collapse;
- font-size: 11px;
-}
-
-table td, table th, table td, table th {
- vertical-align: top;
- padding: 2px 6px;
-}
-
-table tr:nth-child(2n+1) {
- background: #F4F4F4;
-}
-
-table thead tr {
- background: #fff !important;
-}
-
-/* build results */
-
-.build td, .build th, .packages td, .packages th {
- vertical-align: top;
- padding: 2px 4px;
- font-size: 10pt;
-}
-
-.build .hash {
- font-family: monospace;
- font-size: 9pt;
-}
-
-.build .result {
- text-align: center;
- width: 2em;
-}
-
-.build .col-hash, .build .col-result, .build .col-metric, .build .col-numresults {
- border-right: 1px solid #ccc;
-}
-
-.build .row-commit {
- border-top: 2px solid #ccc;
-}
-
-.build .arch {
- font-size: 83%;
- font-weight: normal;
-}
-
-.build .time {
- color: #666;
-}
-
-.build .ok {
- font-size: 83%;
-}
-
-.build .desc, .build .time, .build .user {
- white-space: nowrap;
-}
-
-.build .desc {
- text-align: left;
- max-width: 470px;
- overflow: hidden;
- text-overflow: ellipsis;
-}
-
-.good { text-decoration: none; color: #000000; border: 2px solid #00E700}
-.bad { text-decoration: none; text-shadow: 1px 1px 0 #000000; color: #FFFFFF; background: #E70000;}
-.noise { text-decoration: none; color: #888; }
-.fail { color: #C00; }
-
-/* pagination */
-
-.paginate nav {
- padding: 0.5em;
- margin: 10px 0;
-}
-
-.paginate nav a {
- padding: 0.5em;
- background: #E0EBF5;
- color: blue;
-
- -webkit-border-radius: 5px;
- -moz-border-radius: 5px;
- border-radius: 5px;
-}
-
-.paginate nav a.inactive {
- color: #888;
- cursor: default;
- text-decoration: none;
-}
-
-/* diffs */
-
-.diff-meta {
- font-family: monospace;
- margin-bottom: 10px;
-}
-
-.diff-container {
- padding: 10px;
-}
-
-.diff table .metric {
- font-weight: bold;
-}
-
-.diff {
- border: 1px solid #aaa;
- border-radius: 5px;
- margin-bottom: 5px;
- margin-right: 10px;
- float: left;
-}
-
-.diff h1 {
- font-size: 16px;
- margin: 0;
- padding: 2px 8px;
-}
-
-.diff-benchmark {
- clear: both;
- padding-top: 5px;
-}
-
-/* positioning elements */
-
-.page {
- position: relative;
- width: 100%;
-}
-
-aside {
- position: absolute;
- top: 0;
- left: 0;
- bottom: 0;
- width: 200px;
-}
-
-.main-content {
- position: absolute;
- top: 0;
- left: 210px;
- right: 5px;
- min-height: 200px;
- overflow: hidden;
-}
-
-@media only screen and (max-width: 900px) {
- aside {
- position: relative;
- display: block;
- width: auto;
- }
-
- .main-content {
- position: static;
- padding: 0;
- }
-
- aside .panel {
- float: left;
- width: auto;
- margin-right: 5px;
- }
- aside .button {
- float: left;
- margin: 0;
- }
-}
-
diff --git a/dashboard/builder/bench.go b/dashboard/builder/bench.go
deleted file mode 100644
index fb99aac..0000000
--- a/dashboard/builder/bench.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "strconv"
- "strings"
-)
-
-// benchHash benchmarks a single commit.
-func (b *Builder) benchHash(hash string, benchs []string) error {
- if *verbose {
- log.Println(b.name, "benchmarking", hash)
- }
-
- res := &PerfResult{Hash: hash, Benchmark: "meta-done"}
-
- // Create place in which to do work.
- workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12])
- // Prepare a workpath if we don't have one we can reuse.
- update := false
- if b.lastWorkpath != workpath {
- if err := os.Mkdir(workpath, mkdirPerm); err != nil {
- return err
- }
- buildLog, _, err := b.buildRepoOnHash(workpath, hash, makeCmd)
- if err != nil {
- removePath(workpath)
- // record failure
- res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog})
- return b.recordPerfResult(res)
- }
- b.lastWorkpath = workpath
- update = true
- }
-
- // Build the benchmark binary.
- benchBin, buildLog, err := b.buildBenchmark(workpath, update)
- if err != nil {
- // record failure
- res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog})
- return b.recordPerfResult(res)
- }
-
- benchmark, procs, affinity, last := chooseBenchmark(benchBin, benchs)
- if benchmark != "" {
- res.Benchmark = fmt.Sprintf("%v-%v", benchmark, procs)
- res.Metrics, res.Artifacts, res.OK = b.executeBenchmark(workpath, hash, benchBin, benchmark, procs, affinity)
- if err = b.recordPerfResult(res); err != nil {
- return fmt.Errorf("recordResult: %s", err)
- }
- }
-
- if last {
- // All benchmarks have beed executed, don't need workpath anymore.
- removePath(b.lastWorkpath)
- b.lastWorkpath = ""
- // Notify the app.
- res = &PerfResult{Hash: hash, Benchmark: "meta-done", OK: true}
- if err = b.recordPerfResult(res); err != nil {
- return fmt.Errorf("recordResult: %s", err)
- }
- }
-
- return nil
-}
-
-// buildBenchmark builds the benchmark binary.
-func (b *Builder) buildBenchmark(workpath string, update bool) (benchBin, log string, err error) {
- goroot := filepath.Join(workpath, "go")
- gobin := filepath.Join(goroot, "bin", "go") + exeExt
- gopath := filepath.Join(*buildroot, "gopath")
- env := append([]string{
- "GOROOT=" + goroot,
- "GOPATH=" + gopath},
- b.envv()...)
- // First, download without installing.
- args := []string{"get", "-d"}
- if update {
- args = append(args, "-u")
- }
- args = append(args, *benchPath)
- var buildlog bytes.Buffer
- runOpts := []runOpt{runTimeout(*buildTimeout), runEnv(env), allOutput(&buildlog), runDir(workpath)}
- err = run(exec.Command(gobin, args...), runOpts...)
- if err != nil {
- fmt.Fprintf(&buildlog, "go get -d %s failed: %s", *benchPath, err)
- return "", buildlog.String(), err
- }
- // Then, build into workpath.
- benchBin = filepath.Join(workpath, "benchbin") + exeExt
- args = []string{"build", "-o", benchBin, *benchPath}
- buildlog.Reset()
- err = run(exec.Command(gobin, args...), runOpts...)
- if err != nil {
- fmt.Fprintf(&buildlog, "go build %s failed: %s", *benchPath, err)
- return "", buildlog.String(), err
- }
- return benchBin, "", nil
-}
-
-// chooseBenchmark chooses the next benchmark to run
-// based on the list of available benchmarks, already executed benchmarks
-// and -benchcpu list.
-func chooseBenchmark(benchBin string, doneBenchs []string) (bench string, procs, affinity int, last bool) {
- var out bytes.Buffer
- err := run(exec.Command(benchBin), allOutput(&out))
- if err != nil {
- log.Printf("Failed to query benchmark list: %v\n%s", err, out)
- last = true
- return
- }
- outStr := out.String()
- nlIdx := strings.Index(outStr, "\n")
- if nlIdx < 0 {
- log.Printf("Failed to parse benchmark list (no new line): %s", outStr)
- last = true
- return
- }
- localBenchs := strings.Split(outStr[:nlIdx], ",")
- benchsMap := make(map[string]bool)
- for _, b := range doneBenchs {
- benchsMap[b] = true
- }
- cnt := 0
- // We want to run all benchmarks with GOMAXPROCS=1 first.
- for i, procs1 := range benchCPU {
- for _, bench1 := range localBenchs {
- if benchsMap[fmt.Sprintf("%v-%v", bench1, procs1)] {
- continue
- }
- cnt++
- if cnt == 1 {
- bench = bench1
- procs = procs1
- if i < len(benchAffinity) {
- affinity = benchAffinity[i]
- }
- }
- }
- }
- last = cnt <= 1
- return
-}
-
-// executeBenchmark runs a single benchmark and parses its output.
-func (b *Builder) executeBenchmark(workpath, hash, benchBin, bench string, procs, affinity int) (metrics []PerfMetric, artifacts []PerfArtifact, ok bool) {
- // Benchmarks runs mutually exclusive with other activities.
- benchMutex.RUnlock()
- defer benchMutex.RLock()
- benchMutex.Lock()
- defer benchMutex.Unlock()
-
- log.Printf("%v executing benchmark %v-%v on %v", b.name, bench, procs, hash)
-
- // The benchmark executes 'go build'/'go tool',
- // so we need properly setup env.
- env := append([]string{
- "GOROOT=" + filepath.Join(workpath, "go"),
- "PATH=" + filepath.Join(workpath, "go", "bin") + string(os.PathListSeparator) + os.Getenv("PATH"),
- "GODEBUG=gctrace=1", // since Go1.2
- "GOGCTRACE=1", // before Go1.2
- fmt.Sprintf("GOMAXPROCS=%v", procs)},
- b.envv()...)
- args := []string{
- "-bench", bench,
- "-benchmem", strconv.Itoa(*benchMem),
- "-benchtime", benchTime.String(),
- "-benchnum", strconv.Itoa(*benchNum),
- "-tmpdir", workpath}
- if affinity != 0 {
- args = append(args, "-affinity", strconv.Itoa(affinity))
- }
- benchlog := new(bytes.Buffer)
- err := run(exec.Command(benchBin, args...), runEnv(env), allOutput(benchlog), runDir(workpath))
- if strip := benchlog.Len() - 512<<10; strip > 0 {
- // Leave the last 512K, that part contains metrics.
- benchlog = bytes.NewBuffer(benchlog.Bytes()[strip:])
- }
- artifacts = []PerfArtifact{{Type: "log", Body: benchlog.String()}}
- if err != nil {
- if err != nil {
- log.Printf("Failed to execute benchmark '%v': %v", bench, err)
- ok = false
- }
- return
- }
-
- metrics1, artifacts1, err := parseBenchmarkOutput(benchlog)
- if err != nil {
- log.Printf("Failed to parse benchmark output: %v", err)
- ok = false
- return
- }
- metrics = metrics1
- artifacts = append(artifacts, artifacts1...)
- ok = true
- return
-}
-
-// parseBenchmarkOutput fetches metrics and artifacts from benchmark output.
-func parseBenchmarkOutput(out io.Reader) (metrics []PerfMetric, artifacts []PerfArtifact, err error) {
- s := bufio.NewScanner(out)
- metricRe := regexp.MustCompile("^GOPERF-METRIC:([a-z,0-9,-]+)=([0-9]+)$")
- fileRe := regexp.MustCompile("^GOPERF-FILE:([a-z,0-9,-]+)=(.+)$")
- for s.Scan() {
- ln := s.Text()
- if ss := metricRe.FindStringSubmatch(ln); ss != nil {
- var v uint64
- v, err = strconv.ParseUint(ss[2], 10, 64)
- if err != nil {
- err = fmt.Errorf("Failed to parse metric '%v=%v': %v", ss[1], ss[2], err)
- return
- }
- metrics = append(metrics, PerfMetric{Type: ss[1], Val: v})
- } else if ss := fileRe.FindStringSubmatch(ln); ss != nil {
- var buf []byte
- buf, err = ioutil.ReadFile(ss[2])
- if err != nil {
- err = fmt.Errorf("Failed to read file '%v': %v", ss[2], err)
- return
- }
- artifacts = append(artifacts, PerfArtifact{ss[1], string(buf)})
- }
- }
- return
-}
-
-// needsBenchmarking determines whether the commit needs benchmarking.
-func needsBenchmarking(log *HgLog) bool {
- // Do not benchmark branch commits, they are usually not interesting
- // and fall out of the trunk succession.
- if log.Branch != "" {
- return false
- }
- // Do not benchmark commits that do not touch source files (e.g. CONTRIBUTORS).
- for _, f := range strings.Split(log.Files, " ") {
- if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) &&
- !strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") {
- return true
- }
- }
- return false
-}
diff --git a/dashboard/builder/doc.go b/dashboard/builder/doc.go
deleted file mode 100644
index 5192861..0000000
--- a/dashboard/builder/doc.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-
-Go Builder is a continuous build client for the Go project.
-It integrates with the Go Dashboard AppEngine application.
-
-Go Builder is intended to run continuously as a background process.
-
-It periodically pulls updates from the Go Mercurial repository.
-
-When a newer revision is found, Go Builder creates a clone of the repository,
-runs all.bash, and reports build success or failure to the Go Dashboard.
-
-For a release revision (a change description that matches "release.YYYY-MM-DD"),
-Go Builder will create a tar.gz archive of the GOROOT and deliver it to the
-Go Google Code project's downloads section.
-
-Usage:
-
- gobuilder goos-goarch...
-
- Several goos-goarch combinations can be provided, and the builder will
- build them in serial.
-
-Optional flags:
-
- -dashboard="godashboard.appspot.com": Go Dashboard Host
- The location of the Go Dashboard application to which Go Builder will
- report its results.
-
- -release: Build and deliver binary release archive
-
- -rev=N: Build revision N and exit
-
- -cmd="./all.bash": Build command (specify absolute or relative to go/src)
-
- -v: Verbose logging
-
- -external: External package builder mode (will not report Go build
- state to dashboard or issue releases)
-
-The key file should be located at $HOME/.gobuildkey or, for a builder-specific
-key, $HOME/.gobuildkey-$BUILDER (eg, $HOME/.gobuildkey-linux-amd64).
-
-The build key file is a text file of the format:
-
- godashboard-key
- googlecode-username
- googlecode-password
-
-If the Google Code credentials are not provided the archival step
-will be skipped.
-
-*/
-package main
diff --git a/dashboard/builder/env.go b/dashboard/builder/env.go
deleted file mode 100644
index b97db99..0000000
--- a/dashboard/builder/env.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "runtime"
- "strings"
-
- "golang.org/x/tools/go/vcs"
-)
-
-// builderEnv represents the environment that a Builder will run tests in.
-type builderEnv interface {
- // setup sets up the builder environment and returns the directory to run the buildCmd in.
- setup(repo *Repo, workpath, hash string, envv []string) (string, error)
-}
-
-// goEnv represents the builderEnv for the main Go repo.
-type goEnv struct {
- goos, goarch string
-}
-
-func (b *Builder) envv() []string {
- if runtime.GOOS == "windows" {
- return b.envvWindows()
- }
-
- var e []string
- if *buildTool == "go" {
- e = []string{
- "GOOS=" + b.goos,
- "GOARCH=" + b.goarch,
- "GOROOT_FINAL=/usr/local/go",
- }
- switch b.goos {
- case "android", "nacl":
- // Cross compile.
- default:
- // If we are building, for example, linux/386 on a linux/amd64 machine we want to
- // make sure that the whole build is done as a if this were compiled on a real
- // linux/386 machine. In other words, we want to not do a cross compilation build.
- // To do this we set GOHOSTOS and GOHOSTARCH to override the detection in make.bash.
- //
- // The exception to this rule is when we are doing nacl/android builds. These are by
- // definition always cross compilation, and we have support built into cmd/go to be
- // able to handle this case.
- e = append(e, "GOHOSTOS="+b.goos, "GOHOSTARCH="+b.goarch)
- }
- }
-
- for _, k := range extraEnv() {
- if s, ok := getenvOk(k); ok {
- e = append(e, k+"="+s)
- }
- }
- return e
-}
-
-func (b *Builder) envvWindows() []string {
- var start map[string]string
- if *buildTool == "go" {
- start = map[string]string{
- "GOOS": b.goos,
- "GOHOSTOS": b.goos,
- "GOARCH": b.goarch,
- "GOHOSTARCH": b.goarch,
- "GOROOT_FINAL": `c:\go`,
- "GOBUILDEXIT": "1", // exit all.bat with completion status.
- }
- }
-
- for _, name := range extraEnv() {
- if s, ok := getenvOk(name); ok {
- start[name] = s
- }
- }
- if b.goos == "windows" {
- switch b.goarch {
- case "amd64":
- start["PATH"] = `c:\TDM-GCC-64\bin;` + start["PATH"]
- case "386":
- start["PATH"] = `c:\TDM-GCC-32\bin;` + start["PATH"]
- }
- }
- skip := map[string]bool{
- "GOBIN": true,
- "GOPATH": true,
- "GOROOT": true,
- "INCLUDE": true,
- "LIB": true,
- }
- var e []string
- for name, v := range start {
- e = append(e, name+"="+v)
- skip[name] = true
- }
- for _, kv := range os.Environ() {
- s := strings.SplitN(kv, "=", 2)
- name := strings.ToUpper(s[0])
- switch {
- case name == "":
- // variables, like "=C:=C:\", just copy them
- e = append(e, kv)
- case !skip[name]:
- e = append(e, kv)
- skip[name] = true
- }
- }
- return e
-}
-
-// setup for a goEnv clones the main go repo to workpath/go at the provided hash
-// and returns the path workpath/go/src, the location of all go build scripts.
-func (env *goEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) {
- goworkpath := filepath.Join(workpath, "go")
- if err := repo.Export(goworkpath, hash); err != nil {
- return "", fmt.Errorf("error exporting repository: %s", err)
- }
- if err := ioutil.WriteFile(filepath.Join(goworkpath, "VERSION"), []byte(hash), 0644); err != nil {
- return "", fmt.Errorf("error writing VERSION file: %s", err)
- }
- return filepath.Join(goworkpath, "src"), nil
-}
-
-// gccgoEnv represents the builderEnv for the gccgo compiler.
-type gccgoEnv struct{}
-
-// setup for a gccgoEnv clones the gofrontend repo to workpath/go at the hash
-// and clones the latest GCC branch to repo.Path/gcc. The gccgo sources are
-// replaced with the updated sources in the gofrontend repo and gcc gets
-// gets configured and built in workpath/gcc-objdir. The path to
-// workpath/gcc-objdir is returned.
-func (env *gccgoEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) {
- gccpath := filepath.Join(repo.Path, "gcc")
-
- // get a handle to Git vcs.Cmd for pulling down GCC from the mirror.
- git := vcs.ByCmd("git")
-
- // only pull down gcc if we don't have a local copy.
- if _, err := os.Stat(gccpath); err != nil {
- if err := timeout(*cmdTimeout, func() error {
- // pull down a working copy of GCC.
- return git.Create(gccpath, *gccPath)
- }); err != nil {
- return "", err
- }
- }
-
- if err := git.Download(gccpath); err != nil {
- return "", err
- }
-
- // get the modified files for this commit.
-
- var buf bytes.Buffer
- if err := run(exec.Command("hg", "status", "--no-status", "--change", hash),
- allOutput(&buf), runDir(repo.Path), runEnv(envv)); err != nil {
- return "", fmt.Errorf("Failed to find the modified files for %s: %s", hash, err)
- }
- modifiedFiles := strings.Split(buf.String(), "\n")
- var isMirrored bool
- for _, f := range modifiedFiles {
- if strings.HasPrefix(f, "go/") || strings.HasPrefix(f, "libgo/") {
- isMirrored = true
- break
- }
- }
-
- // use git log to find the corresponding commit to sync to in the gcc mirror.
- // If the files modified in the gofrontend are mirrored to gcc, we expect a
- // commit with a similar description in the gcc mirror. If the files modified are
- // not mirrored, e.g. in support/, we can sync to the most recent gcc commit that
- // occurred before those files were modified to verify gccgo's status at that point.
- logCmd := []string{
- "log",
- "-1",
- "--format=%H",
- }
- var errMsg string
- if isMirrored {
- commitDesc, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{desc|firstline|escape}")
- if err != nil {
- return "", err
- }
-
- quotedDesc := regexp.QuoteMeta(string(commitDesc))
- logCmd = append(logCmd, "--grep", quotedDesc, "--regexp-ignore-case", "--extended-regexp")
- errMsg = fmt.Sprintf("Failed to find a commit with a similar description to '%s'", string(commitDesc))
- } else {
- commitDate, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{date|rfc3339date}")
- if err != nil {
- return "", err
- }
-
- logCmd = append(logCmd, "--before", string(commitDate))
- errMsg = fmt.Sprintf("Failed to find a commit before '%s'", string(commitDate))
- }
-
- buf.Reset()
- if err := run(exec.Command("git", logCmd...), runEnv(envv), allOutput(&buf), runDir(gccpath)); err != nil {
- return "", fmt.Errorf("%s: %s", errMsg, err)
- }
- gccRev := buf.String()
- if gccRev == "" {
- return "", fmt.Errorf(errMsg)
- }
-
- // checkout gccRev
- // TODO(cmang): Fix this to work in parallel mode.
- if err := run(exec.Command("git", "reset", "--hard", strings.TrimSpace(gccRev)), runEnv(envv), runDir(gccpath)); err != nil {
- return "", fmt.Errorf("Failed to checkout commit at revision %s: %s", gccRev, err)
- }
-
- // make objdir to work in
- gccobjdir := filepath.Join(workpath, "gcc-objdir")
- if err := os.Mkdir(gccobjdir, mkdirPerm); err != nil {
- return "", err
- }
-
- // configure GCC with substituted gofrontend and libgo
- if err := run(exec.Command(filepath.Join(gccpath, "configure"),
- "--enable-languages=c,c++,go",
- "--disable-bootstrap",
- "--disable-multilib",
- ), runEnv(envv), runDir(gccobjdir)); err != nil {
- return "", fmt.Errorf("Failed to configure GCC: %v", err)
- }
-
- // build gcc
- if err := run(exec.Command("make"), runTimeout(*buildTimeout), runEnv(envv), runDir(gccobjdir)); err != nil {
- return "", fmt.Errorf("Failed to build GCC: %s", err)
- }
-
- return gccobjdir, nil
-}
-
-func getenvOk(k string) (v string, ok bool) {
- v = os.Getenv(k)
- if v != "" {
- return v, true
- }
- keq := k + "="
- for _, kv := range os.Environ() {
- if kv == keq {
- return "", true
- }
- }
- return "", false
-}
-
-// extraEnv returns environment variables that need to be copied from
-// the gobuilder's environment to the envv of its subprocesses.
-func extraEnv() []string {
- extra := []string{
- "GOARM",
- "GO386",
- "CGO_ENABLED",
- "CC",
- "CC_FOR_TARGET",
- "PATH",
- "TMPDIR",
- "USER",
- }
- if runtime.GOOS == "plan9" {
- extra = append(extra, "objtype", "cputype", "path")
- }
- return extra
-}
diff --git a/dashboard/builder/exec.go b/dashboard/builder/exec.go
deleted file mode 100644
index c40301f..0000000
--- a/dashboard/builder/exec.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "fmt"
- "io"
- "log"
- "os/exec"
- "time"
-)
-
-// run runs a command with optional arguments.
-func run(cmd *exec.Cmd, opts ...runOpt) error {
- a := runArgs{cmd, *cmdTimeout}
- for _, opt := range opts {
- opt.modArgs(&a)
- }
- if *verbose {
- log.Printf("running %v", a.cmd.Args)
- }
- if err := cmd.Start(); err != nil {
- return err
- }
- err := timeout(a.timeout, cmd.Wait)
- if _, ok := err.(timeoutError); ok {
- cmd.Process.Kill()
- }
- return err
-}
-
-// Zero or more runOpts can be passed to run to modify the command
-// before it's run.
-type runOpt interface {
- modArgs(*runArgs)
-}
-
-// allOutput sends both stdout and stderr to w.
-func allOutput(w io.Writer) optFunc {
- return func(a *runArgs) {
- a.cmd.Stdout = w
- a.cmd.Stderr = w
- }
-}
-
-func runTimeout(timeout time.Duration) optFunc {
- return func(a *runArgs) {
- a.timeout = timeout
- }
-}
-
-func runDir(dir string) optFunc {
- return func(a *runArgs) {
- a.cmd.Dir = dir
- }
-}
-
-func runEnv(env []string) optFunc {
- return func(a *runArgs) {
- a.cmd.Env = env
- }
-}
-
-// timeout runs f and returns its error value, or if the function does not
-// complete before the provided duration it returns a timeout error.
-func timeout(d time.Duration, f func() error) error {
- errc := make(chan error, 1)
- go func() {
- errc <- f()
- }()
- t := time.NewTimer(d)
- defer t.Stop()
- select {
- case <-t.C:
- return timeoutError(d)
- case err := <-errc:
- return err
- }
-}
-
-type timeoutError time.Duration
-
-func (e timeoutError) Error() string {
- return fmt.Sprintf("timed out after %v", time.Duration(e))
-}
-
-// optFunc implements runOpt with a function, like http.HandlerFunc.
-type optFunc func(*runArgs)
-
-func (f optFunc) modArgs(a *runArgs) { f(a) }
-
-// internal detail to exec.go:
-type runArgs struct {
- cmd *exec.Cmd
- timeout time.Duration
-}
diff --git a/dashboard/builder/filemutex_flock.go b/dashboard/builder/filemutex_flock.go
deleted file mode 100644
index 68851b8..0000000
--- a/dashboard/builder/filemutex_flock.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd
-
-package main
-
-import (
- "sync"
- "syscall"
-)
-
-// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
-// This implementation is based on flock syscall.
-type FileMutex struct {
- mu sync.RWMutex
- fd int
-}
-
-func MakeFileMutex(filename string) *FileMutex {
- if filename == "" {
- return &FileMutex{fd: -1}
- }
- fd, err := syscall.Open(filename, syscall.O_CREAT|syscall.O_RDONLY, mkdirPerm)
- if err != nil {
- panic(err)
- }
- return &FileMutex{fd: fd}
-}
-
-func (m *FileMutex) Lock() {
- m.mu.Lock()
- if m.fd != -1 {
- if err := syscall.Flock(m.fd, syscall.LOCK_EX); err != nil {
- panic(err)
- }
- }
-}
-
-func (m *FileMutex) Unlock() {
- if m.fd != -1 {
- if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil {
- panic(err)
- }
- }
- m.mu.Unlock()
-}
-
-func (m *FileMutex) RLock() {
- m.mu.RLock()
- if m.fd != -1 {
- if err := syscall.Flock(m.fd, syscall.LOCK_SH); err != nil {
- panic(err)
- }
- }
-}
-
-func (m *FileMutex) RUnlock() {
- if m.fd != -1 {
- if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil {
- panic(err)
- }
- }
- m.mu.RUnlock()
-}
diff --git a/dashboard/builder/filemutex_local.go b/dashboard/builder/filemutex_local.go
deleted file mode 100644
index 68cfb62..0000000
--- a/dashboard/builder/filemutex_local.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build nacl plan9 solaris
-
-package main
-
-import (
- "log"
- "sync"
-)
-
-// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
-// This implementation is a fallback that does not actually provide inter-process synchronization.
-type FileMutex struct {
- sync.RWMutex
-}
-
-func MakeFileMutex(filename string) *FileMutex {
- return &FileMutex{}
-}
-
-func init() {
- log.Printf("WARNING: using fake file mutex." +
- " Don't run more than one of these at once!!!")
-}
diff --git a/dashboard/builder/filemutex_windows.go b/dashboard/builder/filemutex_windows.go
deleted file mode 100644
index 1f058b2..0000000
--- a/dashboard/builder/filemutex_windows.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "sync"
- "syscall"
- "unsafe"
-)
-
-var (
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
- procLockFileEx = modkernel32.NewProc("LockFileEx")
- procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
-)
-
-const (
- INVALID_FILE_HANDLE = ^syscall.Handle(0)
- LOCKFILE_EXCLUSIVE_LOCK = 2
-)
-
-func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
- if r1 == 0 {
- if e1 != 0 {
- err = error(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
-func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
- if r1 == 0 {
- if e1 != 0 {
- err = error(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
-// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
-// This implementation is based on flock syscall.
-type FileMutex struct {
- mu sync.RWMutex
- fd syscall.Handle
-}
-
-func MakeFileMutex(filename string) *FileMutex {
- if filename == "" {
- return &FileMutex{fd: INVALID_FILE_HANDLE}
- }
- fd, err := syscall.CreateFile(&(syscall.StringToUTF16(filename)[0]), syscall.GENERIC_READ|syscall.GENERIC_WRITE,
- syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
- if err != nil {
- panic(err)
- }
- return &FileMutex{fd: fd}
-}
-
-func (m *FileMutex) Lock() {
- m.mu.Lock()
- if m.fd != INVALID_FILE_HANDLE {
- var ol syscall.Overlapped
- if err := lockFileEx(m.fd, LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &ol); err != nil {
- panic(err)
- }
- }
-}
-
-func (m *FileMutex) Unlock() {
- if m.fd != INVALID_FILE_HANDLE {
- var ol syscall.Overlapped
- if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {
- panic(err)
- }
- }
- m.mu.Unlock()
-}
-
-func (m *FileMutex) RLock() {
- m.mu.RLock()
- if m.fd != INVALID_FILE_HANDLE {
- var ol syscall.Overlapped
- if err := lockFileEx(m.fd, 0, 0, 1, 0, &ol); err != nil {
- panic(err)
- }
- }
-}
-
-func (m *FileMutex) RUnlock() {
- if m.fd != INVALID_FILE_HANDLE {
- var ol syscall.Overlapped
- if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {
- panic(err)
- }
- }
- m.mu.RUnlock()
-}
diff --git a/dashboard/builder/http.go b/dashboard/builder/http.go
deleted file mode 100644
index 3fbad3a..0000000
--- a/dashboard/builder/http.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "log"
- "net/http"
- "net/url"
- "time"
-)
-
-type obj map[string]interface{}
-
-// dash runs the given method and command on the dashboard.
-// If args is non-nil it is encoded as the URL query string.
-// If req is non-nil it is JSON-encoded and passed as the body of the HTTP POST.
-// If resp is non-nil the server's response is decoded into the value pointed
-// to by resp (resp must be a pointer).
-func dash(meth, cmd string, args url.Values, req, resp interface{}) error {
- var r *http.Response
- var err error
- if *verbose {
- log.Println("dash <-", meth, cmd, args, req)
- }
- cmd = *dashboard + "/" + cmd
- if len(args) > 0 {
- cmd += "?" + args.Encode()
- }
- switch meth {
- case "GET":
- if req != nil {
- log.Panicf("%s to %s with req", meth, cmd)
- }
- r, err = http.Get(cmd)
- case "POST":
- var body io.Reader
- if req != nil {
- b, err := json.Marshal(req)
- if err != nil {
- return err
- }
- body = bytes.NewBuffer(b)
- }
- r, err = http.Post(cmd, "text/json", body)
- default:
- log.Panicf("%s: invalid method %q", cmd, meth)
- panic("invalid method: " + meth)
- }
- if err != nil {
- return err
- }
- defer r.Body.Close()
- if r.StatusCode != http.StatusOK {
- return fmt.Errorf("bad http response: %v", r.Status)
- }
- body := new(bytes.Buffer)
- if _, err := body.ReadFrom(r.Body); err != nil {
- return err
- }
-
- // Read JSON-encoded Response into provided resp
- // and return an error if present.
- var result = struct {
- Response interface{}
- Error string
- }{
- // Put the provided resp in here as it can be a pointer to
- // some value we should unmarshal into.
- Response: resp,
- }
- if err = json.Unmarshal(body.Bytes(), &result); err != nil {
- log.Printf("json unmarshal %#q: %s\n", body.Bytes(), err)
- return err
- }
- if *verbose {
- log.Println("dash ->", result)
- }
- if result.Error != "" {
- return errors.New(result.Error)
- }
-
- return nil
-}
-
-// todo returns the next hash to build or benchmark.
-func (b *Builder) todo(kinds []string, pkg, goHash string) (kind, rev string, benchs []string, err error) {
- args := url.Values{
- "builder": {b.name},
- "packagePath": {pkg},
- "goHash": {goHash},
- }
- for _, k := range kinds {
- args.Add("kind", k)
- }
- var resp *struct {
- Kind string
- Data struct {
- Hash string
- PerfResults []string
- }
- }
- if err = dash("GET", "todo", args, nil, &resp); err != nil {
- return
- }
- if resp == nil {
- return
- }
- if *verbose {
- fmt.Printf("dash resp: %+v\n", *resp)
- }
- for _, k := range kinds {
- if k == resp.Kind {
- return resp.Kind, resp.Data.Hash, resp.Data.PerfResults, nil
- }
- }
- err = fmt.Errorf("expecting Kinds %q, got %q", kinds, resp.Kind)
- return
-}
-
-// recordResult sends build results to the dashboard
-func (b *Builder) recordResult(ok bool, pkg, hash, goHash, buildLog string, runTime time.Duration) error {
- if !*report {
- return nil
- }
- req := obj{
- "Builder": b.name,
- "PackagePath": pkg,
- "Hash": hash,
- "GoHash": goHash,
- "OK": ok,
- "Log": buildLog,
- "RunTime": runTime,
- }
- args := url.Values{"key": {b.key}, "builder": {b.name}}
- return dash("POST", "result", args, req, nil)
-}
-
-// Result of running a single benchmark on a single commit.
-type PerfResult struct {
- Builder string
- Benchmark string
- Hash string
- OK bool
- Metrics []PerfMetric
- Artifacts []PerfArtifact
-}
-
-type PerfMetric struct {
- Type string
- Val uint64
-}
-
-type PerfArtifact struct {
- Type string
- Body string
-}
-
-// recordPerfResult sends benchmarking results to the dashboard
-func (b *Builder) recordPerfResult(req *PerfResult) error {
- if !*report {
- return nil
- }
- req.Builder = b.name
- args := url.Values{"key": {b.key}, "builder": {b.name}}
- return dash("POST", "perf-result", args, req, nil)
-}
-
-func postCommit(key, pkg string, l *HgLog) error {
- if !*report {
- return nil
- }
- t, err := time.Parse(time.RFC3339, l.Date)
- if err != nil {
- return fmt.Errorf("parsing %q: %v", l.Date, t)
- }
- return dash("POST", "commit", url.Values{"key": {key}}, obj{
- "PackagePath": pkg,
- "Hash": l.Hash,
- "ParentHash": l.Parent,
- "Time": t.Format(time.RFC3339),
- "User": l.Author,
- "Desc": l.Desc,
- "NeedsBenchmarking": l.bench,
- }, nil)
-}
-
-func dashboardCommit(pkg, hash string) bool {
- err := dash("GET", "commit", url.Values{
- "packagePath": {pkg},
- "hash": {hash},
- }, nil, nil)
- return err == nil
-}
-
-func dashboardPackages(kind string) []string {
- args := url.Values{"kind": []string{kind}}
- var resp []struct {
- Path string
- }
- if err := dash("GET", "packages", args, nil, &resp); err != nil {
- log.Println("dashboardPackages:", err)
- return nil
- }
- if *verbose {
- fmt.Printf("dash resp: %+v\n", resp)
- }
- var pkgs []string
- for _, r := range resp {
- pkgs = append(pkgs, r.Path)
- }
- return pkgs
-}
diff --git a/dashboard/builder/main.go b/dashboard/builder/main.go
deleted file mode 100644
index ef7bf4e..0000000
--- a/dashboard/builder/main.go
+++ /dev/null
@@ -1,831 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "golang.org/x/tools/go/vcs"
-)
-
-const (
- codeProject = "go"
- codePyScript = "misc/dashboard/googlecode_upload.py"
- gofrontendImportPath = "code.google.com/p/gofrontend"
- mkdirPerm = 0750
- waitInterval = 30 * time.Second // time to wait before checking for new revs
- pkgBuildInterval = 24 * time.Hour // rebuild packages every 24 hours
-)
-
-type Builder struct {
- goroot *Repo
- name string
- goos, goarch string
- key string
- env builderEnv
- // Last benchmarking workpath. We reuse it, if do successive benchmarks on the same commit.
- lastWorkpath string
-}
-
-var (
- doBuild = flag.Bool("build", true, "Build and test packages")
- doBench = flag.Bool("bench", false, "Run benchmarks")
- buildroot = flag.String("buildroot", defaultBuildRoot(), "Directory under which to build")
- dashboard = flag.String("dashboard", "https://build.golang.org", "Dashboard app base path")
- buildRelease = flag.Bool("release", false, "Build and upload binary release archives")
- buildRevision = flag.String("rev", "", "Build specified revision and exit")
- buildCmd = flag.String("cmd", filepath.Join(".", allCmd), "Build command (specify relative to go/src/)")
- buildTool = flag.String("tool", "go", "Tool to build.")
- gcPath = flag.String("gcpath", "code.google.com/p/go", "Path to download gc from")
- gccPath = flag.String("gccpath", "https://github.com/mirrors/gcc.git", "Path to download gcc from")
- benchPath = flag.String("benchpath", "golang.org/x/benchmarks/bench", "Path to download benchmarks from")
- failAll = flag.Bool("fail", false, "fail all builds")
- parallel = flag.Bool("parallel", false, "Build multiple targets in parallel")
- buildTimeout = flag.Duration("buildTimeout", 60*time.Minute, "Maximum time to wait for builds and tests")
- cmdTimeout = flag.Duration("cmdTimeout", 10*time.Minute, "Maximum time to wait for an external command")
- commitInterval = flag.Duration("commitInterval", 1*time.Minute, "Time to wait between polling for new commits (0 disables commit poller)")
- commitWatch = flag.Bool("commitWatch", false, "run the commit watch loop only (do no builds)")
- benchNum = flag.Int("benchnum", 5, "Run each benchmark that many times")
- benchTime = flag.Duration("benchtime", 5*time.Second, "Benchmarking time for a single benchmark run")
- benchMem = flag.Int("benchmem", 64, "Approx RSS value to aim at in benchmarks, in MB")
- fileLock = flag.String("filelock", "", "File to lock around benchmaring (synchronizes several builders)")
- verbose = flag.Bool("v", false, "verbose")
- report = flag.Bool("report", true, "whether to report results to the dashboard")
-)
-
-var (
- binaryTagRe = regexp.MustCompile(`^(release\.r|weekly\.)[0-9\-.]+`)
- releaseRe = regexp.MustCompile(`^release\.r[0-9\-.]+`)
- allCmd = "all" + suffix
- makeCmd = "make" + suffix
- raceCmd = "race" + suffix
- cleanCmd = "clean" + suffix
- suffix = defaultSuffix()
- exeExt = defaultExeExt()
-
- benchCPU = CpuList([]int{1})
- benchAffinity = CpuList([]int{})
- benchMutex *FileMutex // Isolates benchmarks from other activities
-)
-
-// CpuList is used as flag.Value for -benchcpu flag.
-type CpuList []int
-
-func (cl *CpuList) String() string {
- str := ""
- for _, cpu := range *cl {
- if str == "" {
- str = strconv.Itoa(cpu)
- } else {
- str += fmt.Sprintf(",%v", cpu)
- }
- }
- return str
-}
-
-func (cl *CpuList) Set(str string) error {
- *cl = []int{}
- for _, val := range strings.Split(str, ",") {
- val = strings.TrimSpace(val)
- if val == "" {
- continue
- }
- cpu, err := strconv.Atoi(val)
- if err != nil || cpu <= 0 {
- return fmt.Errorf("%v is a bad value for GOMAXPROCS", val)
- }
- *cl = append(*cl, cpu)
- }
- if len(*cl) == 0 {
- *cl = append(*cl, 1)
- }
- return nil
-}
-
-func main() {
- flag.Var(&benchCPU, "benchcpu", "Comma-delimited list of GOMAXPROCS values for benchmarking")
- flag.Var(&benchAffinity, "benchaffinity", "Comma-delimited list of affinity values for benchmarking")
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "usage: %s goos-goarch...\n", os.Args[0])
- flag.PrintDefaults()
- os.Exit(2)
- }
- flag.Parse()
- if len(flag.Args()) == 0 && !*commitWatch {
- flag.Usage()
- }
-
- vcs.ShowCmd = *verbose
- vcs.Verbose = *verbose
-
- benchMutex = MakeFileMutex(*fileLock)
-
- rr, err := repoForTool()
- if err != nil {
- log.Fatal("Error finding repository:", err)
- }
- rootPath := filepath.Join(*buildroot, "goroot")
- goroot := &Repo{
- Path: rootPath,
- Master: rr,
- }
-
- // set up work environment, use existing environment if possible
- if goroot.Exists() || *failAll {
- log.Print("Found old workspace, will use it")
- } else {
- if err := os.RemoveAll(*buildroot); err != nil {
- log.Fatalf("Error removing build root (%s): %s", *buildroot, err)
- }
- if err := os.Mkdir(*buildroot, mkdirPerm); err != nil {
- log.Fatalf("Error making build root (%s): %s", *buildroot, err)
- }
- var err error
- goroot, err = RemoteRepo(goroot.Master.Root, rootPath)
- if err != nil {
- log.Fatalf("Error creating repository with url (%s): %s", goroot.Master.Root, err)
- }
-
- goroot, err = goroot.Clone(goroot.Path, "tip")
- if err != nil {
- log.Fatal("Error cloning repository:", err)
- }
- }
-
- // set up builders
- builders := make([]*Builder, len(flag.Args()))
- for i, name := range flag.Args() {
- b, err := NewBuilder(goroot, name)
- if err != nil {
- log.Fatal(err)
- }
- builders[i] = b
- }
-
- if *failAll {
- failMode(builders)
- return
- }
-
- // if specified, build revision and return
- if *buildRevision != "" {
- hash, err := goroot.FullHash(*buildRevision)
- if err != nil {
- log.Fatal("Error finding revision: ", err)
- }
- var exitErr error
- for _, b := range builders {
- if err := b.buildHash(hash); err != nil {
- log.Println(err)
- exitErr = err
- }
- }
- if exitErr != nil && !*report {
- // This mode (-report=false) is used for
- // testing Docker images, making sure the
- // environment is correctly configured. For
- // testing, we want a non-zero exit status, as
- // returned by log.Fatal:
- log.Fatal("Build error.")
- }
- return
- }
-
- if !*doBuild && !*doBench {
- fmt.Fprintf(os.Stderr, "Nothing to do, exiting (specify either -build or -bench or both)\n")
- os.Exit(2)
- }
-
- // Start commit watcher.
- if *commitWatch {
- commitWatcher(goroot)
- return
- }
-
- // go continuous build mode
- // check for new commits and build them
- benchMutex.RLock()
- for {
- built := false
- t := time.Now()
- if *parallel {
- done := make(chan bool)
- for _, b := range builders {
- go func(b *Builder) {
- done <- b.buildOrBench()
- }(b)
- }
- for _ = range builders {
- built = <-done || built
- }
- } else {
- for _, b := range builders {
- built = b.buildOrBench() || built
- }
- }
- // sleep if there was nothing to build
- benchMutex.RUnlock()
- if !built {
- time.Sleep(waitInterval)
- }
- benchMutex.RLock()
- // sleep if we're looping too fast.
- dt := time.Now().Sub(t)
- if dt < waitInterval {
- time.Sleep(waitInterval - dt)
- }
- }
-}
-
-// go continuous fail mode
-// check for new commits and FAIL them
-func failMode(builders []*Builder) {
- for {
- built := false
- for _, b := range builders {
- built = b.failBuild() || built
- }
- // stop if there was nothing to fail
- if !built {
- break
- }
- }
-}
-
-func NewBuilder(goroot *Repo, name string) (*Builder, error) {
- b := &Builder{
- goroot: goroot,
- name: name,
- }
-
- // get builderEnv for this tool
- var err error
- if b.env, err = b.builderEnv(name); err != nil {
- return nil, err
- }
- if *report {
- err = b.setKey()
- }
- return b, err
-}
-
-func (b *Builder) setKey() error {
- // read keys from keyfile
- fn := ""
- switch runtime.GOOS {
- case "plan9":
- fn = os.Getenv("home")
- case "windows":
- fn = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
- default:
- fn = os.Getenv("HOME")
- }
- fn = filepath.Join(fn, ".gobuildkey")
- if s := fn + "-" + b.name; isFile(s) { // builder-specific file
- fn = s
- }
- c, err := ioutil.ReadFile(fn)
- if err != nil {
- // If the on-disk file doesn't exist, also try the
- // Google Compute Engine metadata.
- if v := gceProjectMetadata("buildkey-" + b.name); v != "" {
- b.key = v
- return nil
- }
- return fmt.Errorf("readKeys %s (%s): %s", b.name, fn, err)
- }
- b.key = string(bytes.TrimSpace(bytes.SplitN(c, []byte("\n"), 2)[0]))
- return nil
-}
-
-func gceProjectMetadata(attr string) string {
- client := &http.Client{
- Transport: &http.Transport{
- Dial: (&net.Dialer{
- Timeout: 750 * time.Millisecond,
- KeepAlive: 30 * time.Second,
- }).Dial,
- ResponseHeaderTimeout: 750 * time.Millisecond,
- },
- }
- req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/"+attr, nil)
- req.Header.Set("Metadata-Flavor", "Google")
- res, err := client.Do(req)
- if err != nil {
- return ""
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return ""
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return ""
- }
- return string(bytes.TrimSpace(slurp))
-}
-
-// builderEnv returns the builderEnv for this buildTool.
-func (b *Builder) builderEnv(name string) (builderEnv, error) {
- // get goos/goarch from builder string
- s := strings.SplitN(b.name, "-", 3)
- if len(s) < 2 {
- return nil, fmt.Errorf("unsupported builder form: %s", name)
- }
- b.goos = s[0]
- b.goarch = s[1]
-
- switch *buildTool {
- case "go":
- return &goEnv{
- goos: s[0],
- goarch: s[1],
- }, nil
- case "gccgo":
- return &gccgoEnv{}, nil
- default:
- return nil, fmt.Errorf("unsupported build tool: %s", *buildTool)
- }
-}
-
-// buildCmd returns the build command to invoke.
-// Builders which contain the string '-race' in their
-// name will override *buildCmd and return raceCmd.
-func (b *Builder) buildCmd() string {
- if strings.Contains(b.name, "-race") {
- return raceCmd
- }
- return *buildCmd
-}
-
-// buildOrBench checks for a new commit for this builder
-// and builds or benchmarks it if one is found.
-// It returns true if a build/benchmark was attempted.
-func (b *Builder) buildOrBench() bool {
- var kinds []string
- if *doBuild {
- kinds = append(kinds, "build-go-commit")
- }
- if *doBench {
- kinds = append(kinds, "benchmark-go-commit")
- }
- kind, hash, benchs, err := b.todo(kinds, "", "")
- if err != nil {
- log.Println(err)
- return false
- }
- if hash == "" {
- return false
- }
- switch kind {
- case "build-go-commit":
- if err := b.buildHash(hash); err != nil {
- log.Println(err)
- }
- return true
- case "benchmark-go-commit":
- if err := b.benchHash(hash, benchs); err != nil {
- log.Println(err)
- }
- return true
- default:
- log.Printf("Unknown todo kind %v", kind)
- return false
- }
-}
-
-func (b *Builder) buildHash(hash string) error {
- log.Println(b.name, "building", hash)
-
- // create place in which to do work
- workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12])
- if err := os.Mkdir(workpath, mkdirPerm); err != nil {
- if err2 := removePath(workpath); err2 != nil {
- return err
- }
- if err := os.Mkdir(workpath, mkdirPerm); err != nil {
- return err
- }
- }
- defer removePath(workpath)
-
- buildLog, runTime, err := b.buildRepoOnHash(workpath, hash, b.buildCmd())
- if err != nil {
- // record failure
- return b.recordResult(false, "", hash, "", buildLog, runTime)
- }
-
- // record success
- if err = b.recordResult(true, "", hash, "", "", runTime); err != nil {
- return fmt.Errorf("recordResult: %s", err)
- }
-
- // build sub-repositories
- goRoot := filepath.Join(workpath, *buildTool)
- goPath := workpath
- b.buildSubrepos(goRoot, goPath, hash)
-
- return nil
-}
-
-// buildRepoOnHash clones repo into workpath and builds it.
-func (b *Builder) buildRepoOnHash(workpath, hash, cmd string) (buildLog string, runTime time.Duration, err error) {
- // Delete the previous workdir, if necessary
- // (benchmarking code can execute several benchmarks in the same workpath).
- if b.lastWorkpath != "" {
- if b.lastWorkpath == workpath {
- panic("workpath already exists: " + workpath)
- }
- removePath(b.lastWorkpath)
- b.lastWorkpath = ""
- }
-
- // pull before cloning to ensure we have the revision
- if err = b.goroot.Pull(); err != nil {
- buildLog = err.Error()
- return
- }
-
- // set up builder's environment.
- srcDir, err := b.env.setup(b.goroot, workpath, hash, b.envv())
- if err != nil {
- buildLog = err.Error()
- return
- }
-
- // build
- var buildbuf bytes.Buffer
- logfile := filepath.Join(workpath, "build.log")
- f, err := os.Create(logfile)
- if err != nil {
- return err.Error(), 0, err
- }
- defer f.Close()
- w := io.MultiWriter(f, &buildbuf)
-
- // go's build command is a script relative to the srcDir, whereas
- // gccgo's build command is usually "make check-go" in the srcDir.
- if *buildTool == "go" {
- if !filepath.IsAbs(cmd) {
- cmd = filepath.Join(srcDir, cmd)
- }
- }
-
- // naive splitting of command from its arguments:
- args := strings.Split(cmd, " ")
- c := exec.Command(args[0], args[1:]...)
- c.Dir = srcDir
- c.Env = b.envv()
- if *verbose {
- c.Stdout = io.MultiWriter(os.Stdout, w)
- c.Stderr = io.MultiWriter(os.Stderr, w)
- } else {
- c.Stdout = w
- c.Stderr = w
- }
-
- startTime := time.Now()
- err = run(c, runTimeout(*buildTimeout))
- runTime = time.Since(startTime)
- if err != nil {
- fmt.Fprintf(w, "Build complete, duration %v. Result: error: %v\n", runTime, err)
- } else {
- fmt.Fprintf(w, "Build complete, duration %v. Result: success\n", runTime)
- }
- return buildbuf.String(), runTime, err
-}
-
-// failBuild checks for a new commit for this builder
-// and fails it if one is found.
-// It returns true if a build was "attempted".
-func (b *Builder) failBuild() bool {
- _, hash, _, err := b.todo([]string{"build-go-commit"}, "", "")
- if err != nil {
- log.Println(err)
- return false
- }
- if hash == "" {
- return false
- }
-
- log.Printf("fail %s %s\n", b.name, hash)
-
- if err := b.recordResult(false, "", hash, "", "auto-fail mode run by "+os.Getenv("USER"), 0); err != nil {
- log.Print(err)
- }
- return true
-}
-
-func (b *Builder) buildSubrepos(goRoot, goPath, goHash string) {
- for _, pkg := range dashboardPackages("subrepo") {
- // get the latest todo for this package
- _, hash, _, err := b.todo([]string{"build-package"}, pkg, goHash)
- if err != nil {
- log.Printf("buildSubrepos %s: %v", pkg, err)
- continue
- }
- if hash == "" {
- continue
- }
-
- // build the package
- if *verbose {
- log.Printf("buildSubrepos %s: building %q", pkg, hash)
- }
- buildLog, err := b.buildSubrepo(goRoot, goPath, pkg, hash)
- if err != nil {
- if buildLog == "" {
- buildLog = err.Error()
- }
- log.Printf("buildSubrepos %s: %v", pkg, err)
- }
-
- // record the result
- err = b.recordResult(err == nil, pkg, hash, goHash, buildLog, 0)
- if err != nil {
- log.Printf("buildSubrepos %s: %v", pkg, err)
- }
- }
-}
-
-// buildSubrepo fetches the given package, updates it to the specified hash,
-// and runs 'go test -short pkg/...'. It returns the build log and any error.
-func (b *Builder) buildSubrepo(goRoot, goPath, pkg, hash string) (string, error) {
- goTool := filepath.Join(goRoot, "bin", "go") + exeExt
- env := append(b.envv(), "GOROOT="+goRoot, "GOPATH="+goPath)
-
- // add $GOROOT/bin and $GOPATH/bin to PATH
- for i, e := range env {
- const p = "PATH="
- if !strings.HasPrefix(e, p) {
- continue
- }
- sep := string(os.PathListSeparator)
- env[i] = p + filepath.Join(goRoot, "bin") + sep + filepath.Join(goPath, "bin") + sep + e[len(p):]
- }
-
- // HACK: check out to new sub-repo location instead of old location.
- pkg = strings.Replace(pkg, "code.google.com/p/go.", "golang.org/x/", 1)
-
- // fetch package and dependencies
- var outbuf bytes.Buffer
- err := run(exec.Command(goTool, "get", "-d", pkg+"/..."), runEnv(env), allOutput(&outbuf), runDir(goPath))
- if err != nil {
- return outbuf.String(), err
- }
- outbuf.Reset()
-
- // hg update to the specified hash
- pkgmaster, err := vcs.RepoRootForImportPath(pkg, *verbose)
- if err != nil {
- return "", fmt.Errorf("Error finding subrepo (%s): %s", pkg, err)
- }
- repo := &Repo{
- Path: filepath.Join(goPath, "src", pkg),
- Master: pkgmaster,
- }
- if err := repo.UpdateTo(hash); err != nil {
- return "", err
- }
-
- // test the package
- err = run(exec.Command(goTool, "test", "-short", pkg+"/..."),
- runTimeout(*buildTimeout), runEnv(env), allOutput(&outbuf), runDir(goPath))
- return outbuf.String(), err
-}
-
-// repoForTool returns the correct RepoRoot for the buildTool, or an error if
-// the tool is unknown.
-func repoForTool() (*vcs.RepoRoot, error) {
- switch *buildTool {
- case "go":
- return vcs.RepoRootForImportPath(*gcPath, *verbose)
- case "gccgo":
- return vcs.RepoRootForImportPath(gofrontendImportPath, *verbose)
- default:
- return nil, fmt.Errorf("unknown build tool: %s", *buildTool)
- }
-}
-
-func isDirectory(name string) bool {
- s, err := os.Stat(name)
- return err == nil && s.IsDir()
-}
-
-func isFile(name string) bool {
- s, err := os.Stat(name)
- return err == nil && !s.IsDir()
-}
-
-// commitWatcher polls hg for new commits and tells the dashboard about them.
-func commitWatcher(goroot *Repo) {
- if *commitInterval == 0 {
- log.Printf("commitInterval is 0; disabling commitWatcher")
- return
- }
- if !*report {
- log.Printf("-report is false; disabling commitWatcher")
- return
- }
- // Create builder just to get master key.
- b, err := NewBuilder(goroot, "mercurial-commit")
- if err != nil {
- log.Fatal(err)
- }
- key := b.key
-
- benchMutex.RLock()
- for {
- if *verbose {
- log.Printf("poll...")
- }
- // Main Go repository.
- commitPoll(goroot, "", key)
- // Go sub-repositories.
- for _, pkg := range dashboardPackages("subrepo") {
- pkgmaster, err := vcs.RepoRootForImportPath(pkg, *verbose)
- if err != nil {
- log.Printf("Error finding subrepo (%s): %s", pkg, err)
- continue
- }
- pkgroot := &Repo{
- Path: filepath.Join(*buildroot, pkg),
- Master: pkgmaster,
- }
- commitPoll(pkgroot, pkg, key)
- }
- benchMutex.RUnlock()
- if *verbose {
- log.Printf("sleep...")
- }
- time.Sleep(*commitInterval)
- benchMutex.RLock()
- }
-}
-
-// logByHash is a cache of all Mercurial revisions we know about,
-// indexed by full hash.
-var logByHash = map[string]*HgLog{}
-
-// commitPoll pulls any new revisions from the hg server
-// and tells the server about them.
-func commitPoll(repo *Repo, pkg, key string) {
- pkgPath := filepath.Join(*buildroot, repo.Master.Root)
- if !repo.Exists() {
- var err error
- repo, err = RemoteRepo(pkg, pkgPath)
- if err != nil {
- log.Printf("Error cloning package (%s): %s", pkg, err)
- return
- }
-
- path := repo.Path
- repo, err = repo.Clone(path, "tip")
- if err != nil {
- log.Printf("%s: hg clone failed: %v", pkg, err)
- if err := os.RemoveAll(path); err != nil {
- log.Printf("%s: %v", pkg, err)
- }
- }
- return
- }
-
- logs, err := repo.Log() // repo.Log calls repo.Pull internally
- if err != nil {
- log.Printf("hg log: %v", err)
- return
- }
-
- // Pass 1. Fill in parents and add new log entries to logByHash.
- // Empty parent means take parent from next log entry.
- // Non-empty parent has form 1234:hashhashhash; we want full hash.
- for i := range logs {
- l := &logs[i]
- if l.Parent == "" && i+1 < len(logs) {
- l.Parent = logs[i+1].Hash
- } else if l.Parent != "" {
- l.Parent, _ = repo.FullHash(l.Parent)
- }
- if *verbose {
- log.Printf("hg log %s: %s < %s\n", pkg, l.Hash, l.Parent)
- }
- if logByHash[l.Hash] == nil {
- l.bench = needsBenchmarking(l)
- // These fields are needed only for needsBenchmarking, do not waste memory.
- l.Branch = ""
- l.Files = ""
- // Make copy to avoid pinning entire slice when only one entry is new.
- t := *l
- logByHash[t.Hash] = &t
- }
- }
-
- for _, l := range logs {
- addCommit(pkg, l.Hash, key)
- }
-}
-
-// addCommit adds the commit with the named hash to the dashboard.
-// key is the secret key for authentication to the dashboard.
-// It avoids duplicate effort.
-func addCommit(pkg, hash, key string) bool {
- l := logByHash[hash]
- if l == nil {
- return false
- }
- if l.added {
- return true
- }
-
- // Check for already added, perhaps in an earlier run.
- if dashboardCommit(pkg, hash) {
- log.Printf("%s already on dashboard\n", hash)
- // Record that this hash is on the dashboard,
- // as must be all its parents.
- for l != nil {
- l.added = true
- l = logByHash[l.Parent]
- }
- return true
- }
-
- // Create parent first, to maintain some semblance of order.
- if l.Parent != "" {
- if !addCommit(pkg, l.Parent, key) {
- return false
- }
- }
-
- // Create commit.
- if err := postCommit(key, pkg, l); err != nil {
- log.Printf("failed to add %s to dashboard: %v", hash, err)
- return false
- }
- l.added = true
- return true
-}
-
-// defaultSuffix returns file extension used for command files in
-// current os environment.
-func defaultSuffix() string {
- switch runtime.GOOS {
- case "windows":
- return ".bat"
- case "plan9":
- return ".rc"
- default:
- return ".bash"
- }
-}
-
-func defaultExeExt() string {
- switch runtime.GOOS {
- case "windows":
- return ".exe"
- default:
- return ""
- }
-}
-
-// defaultBuildRoot returns default buildroot directory.
-func defaultBuildRoot() string {
- var d string
- if runtime.GOOS == "windows" {
- // will use c:\, otherwise absolute paths become too long
- // during builder run, see http://golang.org/issue/3358.
- d = `c:\`
- } else {
- d = os.TempDir()
- }
- return filepath.Join(d, "gobuilder")
-}
-
-// removePath is a more robust version of os.RemoveAll.
-// On windows, if remove fails (which can happen if test/benchmark timeouts
-// and keeps some files open) it tries to rename the dir.
-func removePath(path string) error {
- if err := os.RemoveAll(path); err != nil {
- if runtime.GOOS == "windows" {
- err = os.Rename(path, filepath.Clean(path)+"_remove_me")
- }
- return err
- }
- return nil
-}
diff --git a/dashboard/builder/vcs.go b/dashboard/builder/vcs.go
deleted file mode 100644
index 9d94bdc..0000000
--- a/dashboard/builder/vcs.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "encoding/xml"
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
- "sync"
-
- "golang.org/x/tools/go/vcs"
-)
-
-// Repo represents a mercurial repository.
-type Repo struct {
- Path string
- Master *vcs.RepoRoot
- sync.Mutex
-}
-
-// RemoteRepo constructs a *Repo representing a remote repository.
-func RemoteRepo(url, path string) (*Repo, error) {
- rr, err := vcs.RepoRootForImportPath(url, *verbose)
- if err != nil {
- return nil, err
- }
- return &Repo{
- Path: path,
- Master: rr,
- }, nil
-}
-
-// Clone clones the current Repo to a new destination
-// returning a new *Repo if successful.
-func (r *Repo) Clone(path, rev string) (*Repo, error) {
- r.Lock()
- defer r.Unlock()
-
- err := timeout(*cmdTimeout, func() error {
- downloadPath := r.Path
- if !r.Exists() {
- downloadPath = r.Master.Repo
- }
-
- err := r.Master.VCS.CreateAtRev(path, downloadPath, rev)
- if err != nil {
- return err
- }
- return r.Master.VCS.TagSync(path, "")
- })
- if err != nil {
- return nil, err
- }
- return &Repo{
- Path: path,
- Master: r.Master,
- }, nil
-}
-
-// Export exports the current Repo at revision rev to a new destination.
-func (r *Repo) Export(path, rev string) error {
- r.Lock()
- defer r.Unlock()
-
- downloadPath := r.Path
- if !r.Exists() {
- _, err := r.Clone(path, rev)
- return err
- }
-
- cmd := exec.Command(r.Master.VCS.Cmd, "archive", "-t", "files", "-r", rev, path)
- cmd.Dir = downloadPath
- if err := run(cmd); err != nil {
- return fmt.Errorf("executing %v: %v", cmd.Args, err)
- }
- return nil
-}
-
-// UpdateTo updates the working copy of this Repo to the
-// supplied revision.
-func (r *Repo) UpdateTo(hash string) error {
- r.Lock()
- defer r.Unlock()
-
- return timeout(*cmdTimeout, func() error {
- return r.Master.VCS.TagSync(r.Path, hash)
- })
-}
-
-// Exists reports whether this Repo represents a valid Mecurial repository.
-func (r *Repo) Exists() bool {
- fi, err := os.Stat(filepath.Join(r.Path, "."+r.Master.VCS.Cmd))
- if err != nil {
- return false
- }
- return fi.IsDir()
-}
-
-// Pull pulls changes from the default path, that is, the path
-// this Repo was cloned from.
-func (r *Repo) Pull() error {
- r.Lock()
- defer r.Unlock()
-
- return timeout(*cmdTimeout, func() error {
- return r.Master.VCS.Download(r.Path)
- })
-}
-
-// Log returns the changelog for this repository.
-func (r *Repo) Log() ([]HgLog, error) {
- if err := r.Pull(); err != nil {
- return nil, err
- }
- r.Lock()
- defer r.Unlock()
-
- var logStruct struct {
- Log []HgLog
- }
- err := timeout(*cmdTimeout, func() error {
- data, err := r.Master.VCS.Log(r.Path, xmlLogTemplate)
- if err != nil {
- return err
- }
-
- // We have a commit with description that contains 0x1b byte.
- // Mercurial does not escape it, but xml.Unmarshal does not accept it.
- data = bytes.Replace(data, []byte{0x1b}, []byte{'?'}, -1)
-
- err = xml.Unmarshal([]byte("<Top>"+string(data)+"</Top>"), &logStruct)
- if err != nil {
- return fmt.Errorf("unmarshal %s log: %v", r.Master.VCS, err)
- }
- return nil
- })
- if err != nil {
- return nil, err
- }
- for i, log := range logStruct.Log {
- // Let's pretend there can be only one parent.
- if log.Parent != "" && strings.Contains(log.Parent, " ") {
- logStruct.Log[i].Parent = strings.Split(log.Parent, " ")[0]
- }
- }
- return logStruct.Log, nil
-}
-
-// FullHash returns the full hash for the given Mercurial revision.
-func (r *Repo) FullHash(rev string) (string, error) {
- r.Lock()
- defer r.Unlock()
-
- var hash string
- err := timeout(*cmdTimeout, func() error {
- data, err := r.Master.VCS.LogAtRev(r.Path, rev, "{node}")
- if err != nil {
- return err
- }
-
- s := strings.TrimSpace(string(data))
- if s == "" {
- return fmt.Errorf("cannot find revision")
- }
- if len(s) != 40 {
- return fmt.Errorf("%s returned invalid hash: %s", r.Master.VCS, s)
- }
- hash = s
- return nil
- })
- if err != nil {
- return "", err
- }
- return hash, nil
-}
-
-// HgLog represents a single Mercurial revision.
-type HgLog struct {
- Hash string
- Author string
- Date string
- Desc string
- Parent string
- Branch string
- Files string
-
- // Internal metadata
- added bool
- bench bool // needs to be benchmarked?
-}
-
-// xmlLogTemplate is a template to pass to Mercurial to make
-// hg log print the log in valid XML for parsing with xml.Unmarshal.
-// Can not escape branches and files, because it crashes python with:
-// AttributeError: 'NoneType' object has no attribute 'replace'
-const xmlLogTemplate = `
- <Log>
- <Hash>{node|escape}</Hash>
- <Parent>{p1node}</Parent>
- <Author>{author|escape}</Author>
- <Date>{date|rfc3339date}</Date>
- <Desc>{desc|escape}</Desc>
- <Branch>{branches}</Branch>
- <Files>{files}</Files>
- </Log>
-`
diff --git a/dashboard/coordinator/Makefile b/dashboard/coordinator/Makefile
deleted file mode 100644
index ec1d88b..0000000
--- a/dashboard/coordinator/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-coordinator: main.go
- GOOS=linux go build -o coordinator .
-
-upload: coordinator
- cat coordinator | (cd buildongce && go run create.go --write_object=go-builder-data/coordinator)
-
diff --git a/dashboard/coordinator/buildongce/create.go b/dashboard/coordinator/buildongce/create.go
deleted file mode 100644
index d4f0cd6..0000000
--- a/dashboard/coordinator/buildongce/create.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/http"
- "os"
- "strings"
- "time"
-
- "code.google.com/p/goauth2/oauth"
- compute "code.google.com/p/google-api-go-client/compute/v1"
-)
-
-var (
- proj = flag.String("project", "symbolic-datum-552", "name of Project")
- zone = flag.String("zone", "us-central1-a", "GCE zone")
- mach = flag.String("machinetype", "n1-standard-16", "Machine type")
- instName = flag.String("instance_name", "go-builder-1", "Name of VM instance.")
- sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
- staticIP = flag.String("static_ip", "", "Static IP to use. If empty, automatic.")
- reuseDisk = flag.Bool("reuse_disk", true, "Whether disk images should be reused between shutdowns/restarts.")
-
- writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
-)
-
-func readFile(v string) string {
- slurp, err := ioutil.ReadFile(v)
- if err != nil {
- log.Fatalf("Error reading %s: %v", v, err)
- }
- return strings.TrimSpace(string(slurp))
-}
-
-var config = &oauth.Config{
- // The client-id and secret should be for an "Installed Application" when using
- // the CLI. Later we'll use a web application with a callback.
- ClientId: readFile("client-id.dat"),
- ClientSecret: readFile("client-secret.dat"),
- Scope: strings.Join([]string{
- compute.DevstorageFull_controlScope,
- compute.ComputeScope,
- "https://www.googleapis.com/auth/sqlservice",
- "https://www.googleapis.com/auth/sqlservice.admin",
- }, " "),
- AuthURL: "https://accounts.google.com/o/oauth2/auth",
- TokenURL: "https://accounts.google.com/o/oauth2/token",
- RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
-}
-
-const baseConfig = `#cloud-config
-coreos:
- units:
- - name: gobuild.service
- command: start
- content: |
- [Unit]
- Description=Go Builders
- After=docker.service
- Requires=docker.service
-
- [Service]
- ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/coordinator http://storage.googleapis.com/go-builder-data/coordinator && chmod +x /opt/bin/coordinator'
- ExecStart=/opt/bin/coordinator
- RestartSec=10s
- Restart=always
- Type=simple
-
- [Install]
- WantedBy=multi-user.target
-`
-
-func main() {
- flag.Parse()
- if *proj == "" {
- log.Fatalf("Missing --project flag")
- }
- prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
- machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
-
- tr := &oauth.Transport{
- Config: config,
- }
-
- tokenCache := oauth.CacheFile("token.dat")
- token, err := tokenCache.Token()
- if err != nil {
- if *writeObject != "" {
- log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
- }
- log.Printf("Error getting token from %s: %v", string(tokenCache), err)
- log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
- fmt.Print("\nEnter auth code: ")
- sc := bufio.NewScanner(os.Stdin)
- sc.Scan()
- authCode := strings.TrimSpace(sc.Text())
- token, err = tr.Exchange(authCode)
- if err != nil {
- log.Fatalf("Error exchanging auth code for a token: %v", err)
- }
- tokenCache.PutToken(token)
- }
-
- tr.Token = token
- oauthClient := &http.Client{Transport: tr}
- if *writeObject != "" {
- writeCloudStorageObject(oauthClient)
- return
- }
-
- computeService, _ := compute.New(oauthClient)
-
- natIP := *staticIP
- if natIP == "" {
- // Try to find it by name.
- aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
- if err != nil {
- log.Fatal(err)
- }
- // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
- IPLoop:
- for _, asl := range aggAddrList.Items {
- for _, addr := range asl.Addresses {
- if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
- natIP = addr.Address
- break IPLoop
- }
- }
- }
- }
-
- cloudConfig := baseConfig
- if *sshPub != "" {
- key := strings.TrimSpace(readFile(*sshPub))
- cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
- }
- if os.Getenv("USER") == "bradfitz" {
- cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
- }
- const maxCloudConfig = 32 << 10 // per compute API docs
- if len(cloudConfig) > maxCloudConfig {
- log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
- }
-
- instance := &compute.Instance{
- Name: *instName,
- Description: "Go Builder",
- MachineType: machType,
- Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
- Tags: &compute.Tags{
- Items: []string{"http-server", "https-server"},
- },
- Metadata: &compute.Metadata{
- Items: []*compute.MetadataItems{
- {
- Key: "user-data",
- Value: cloudConfig,
- },
- },
- },
- NetworkInterfaces: []*compute.NetworkInterface{
- &compute.NetworkInterface{
- AccessConfigs: []*compute.AccessConfig{
- &compute.AccessConfig{
- Type: "ONE_TO_ONE_NAT",
- Name: "External NAT",
- NatIP: natIP,
- },
- },
- Network: prefix + "/global/networks/default",
- },
- },
- ServiceAccounts: []*compute.ServiceAccount{
- {
- Email: "default",
- Scopes: []string{
- compute.DevstorageFull_controlScope,
- compute.ComputeScope,
- },
- },
- },
- }
-
- log.Printf("Creating instance...")
- op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
- if err != nil {
- log.Fatalf("Failed to create instance: %v", err)
- }
- opName := op.Name
- log.Printf("Created. Waiting on operation %v", opName)
-OpLoop:
- for {
- time.Sleep(2 * time.Second)
- op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
- if err != nil {
- log.Fatalf("Failed to get op %s: %v", opName, err)
- }
- switch op.Status {
- case "PENDING", "RUNNING":
- log.Printf("Waiting on operation %v", opName)
- continue
- case "DONE":
- if op.Error != nil {
- for _, operr := range op.Error.Errors {
- log.Printf("Error: %+v", operr)
- }
- log.Fatalf("Failed to start.")
- }
- log.Printf("Success. %+v", op)
- break OpLoop
- default:
- log.Fatalf("Unknown status %q: %+v", op.Status, op)
- }
- }
-
- inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
- if err != nil {
- log.Fatalf("Error getting instance after creation: %v", err)
- }
- ij, _ := json.MarshalIndent(inst, "", " ")
- log.Printf("Instance: %s", ij)
-}
-
-func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
- const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-402-2-0-v20140807"
- diskName := *instName + "-coreos-stateless-pd"
-
- if *reuseDisk {
- dl, err := svc.Disks.List(*proj, *zone).Do()
- if err != nil {
- log.Fatalf("Error listing disks: %v", err)
- }
- for _, disk := range dl.Items {
- if disk.Name != diskName {
- continue
- }
- return &compute.AttachedDisk{
- AutoDelete: false,
- Boot: true,
- DeviceName: diskName,
- Type: "PERSISTENT",
- Source: disk.SelfLink,
- Mode: "READ_WRITE",
-
- // The GCP web UI's "Show REST API" link includes a
- // "zone" parameter, but it's not in the API
- // description. But it wants this form (disk.Zone, a
- // full zone URL, not *zone):
- // Zone: disk.Zone,
- // ... but it seems to work without it. Keep this
- // comment here until I file a bug with the GCP
- // people.
- }
- }
- }
-
- return &compute.AttachedDisk{
- AutoDelete: !*reuseDisk,
- Boot: true,
- Type: "PERSISTENT",
- InitializeParams: &compute.AttachedDiskInitializeParams{
- DiskName: diskName,
- SourceImage: imageURL,
- DiskSizeGb: 50,
- },
- }
-}
-
-func writeCloudStorageObject(httpClient *http.Client) {
- content := os.Stdin
- const maxSlurp = 1 << 20
- var buf bytes.Buffer
- n, err := io.CopyN(&buf, content, maxSlurp)
- if err != nil && err != io.EOF {
- log.Fatalf("Error reading from stdin: %v, %v", n, err)
- }
- contentType := http.DetectContentType(buf.Bytes())
-
- req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
- if err != nil {
- log.Fatal(err)
- }
- req.Header.Set("x-goog-api-version", "2")
- req.Header.Set("x-goog-acl", "public-read")
- req.Header.Set("Content-Type", contentType)
- res, err := httpClient.Do(req)
- if err != nil {
- log.Fatal(err)
- }
- if res.StatusCode != 200 {
- res.Write(os.Stderr)
- log.Fatalf("Failed.")
- }
- log.Printf("Success.")
- os.Exit(0)
-}
diff --git a/dashboard/coordinator/main.go b/dashboard/coordinator/main.go
deleted file mode 100644
index a855000..0000000
--- a/dashboard/coordinator/main.go
+++ /dev/null
@@ -1,458 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The coordinator runs on GCE and coordinates builds in Docker containers.
-package main
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/md5"
- "encoding/json"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/http"
- "os"
- "os/exec"
- "sort"
- "strings"
- "sync"
- "time"
-)
-
-var (
- masterKeyFile = flag.String("masterkey", "", "Path to builder master key. Else fetched using GCE project attribute 'builder-master-key'.")
- maxBuilds = flag.Int("maxbuilds", 6, "Max concurrent builds")
-
- // Debug flags:
- addTemp = flag.Bool("temp", false, "Append -temp to all builders.")
- just = flag.String("just", "", "If non-empty, run single build in the foreground. Requires rev.")
- rev = flag.String("rev", "", "Revision to build.")
-)
-
-var (
- startTime = time.Now()
- builders = map[string]buildConfig{} // populated once at startup
- donec = make(chan builderRev) // reports of finished builders
-
- statusMu sync.Mutex
- status = map[builderRev]*buildStatus{}
-)
-
-type imageInfo struct {
- url string // of tar file
-
- mu sync.Mutex
- lastMod string
-}
-
-var images = map[string]*imageInfo{
- "gobuilders/linux-x86-base": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.base.tar.gz"},
- "gobuilders/linux-x86-clang": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.clang.tar.gz"},
- "gobuilders/linux-x86-gccgo": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.gccgo.tar.gz"},
- "gobuilders/linux-x86-nacl": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.nacl.tar.gz"},
- "gobuilders/linux-x86-sid": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.sid.tar.gz"},
-}
-
-type buildConfig struct {
- name string // "linux-amd64-race"
- image string // Docker image to use to build
- cmd string // optional -cmd flag (relative to go/src/)
- env []string // extra environment ("key=value") pairs
- dashURL string // url of the build dashboard
- tool string // the tool this configuration is for
-}
-
-func main() {
- flag.Parse()
- addBuilder(buildConfig{name: "linux-386"})
- addBuilder(buildConfig{name: "linux-386-387", env: []string{"GO386=387"}})
- addBuilder(buildConfig{name: "linux-amd64"})
- addBuilder(buildConfig{name: "linux-amd64-nocgo", env: []string{"CGO_ENABLED=0", "USER=root"}})
- addBuilder(buildConfig{name: "linux-amd64-noopt", env: []string{"GO_GCFLAGS=-N -l"}})
- addBuilder(buildConfig{name: "linux-amd64-race"})
- addBuilder(buildConfig{name: "nacl-386"})
- addBuilder(buildConfig{name: "nacl-amd64p32"})
- addBuilder(buildConfig{
- name: "linux-amd64-gccgo",
- image: "gobuilders/linux-x86-gccgo",
- cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m64\" check-go -j16",
- dashURL: "https://build.golang.org/gccgo",
- tool: "gccgo",
- })
- addBuilder(buildConfig{
- name: "linux-386-gccgo",
- image: "gobuilders/linux-x86-gccgo",
- cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m32\" check-go -j16",
- dashURL: "https://build.golang.org/gccgo",
- tool: "gccgo",
- })
- addBuilder(buildConfig{name: "linux-386-sid", image: "gobuilders/linux-x86-sid"})
- addBuilder(buildConfig{name: "linux-amd64-sid", image: "gobuilders/linux-x86-sid"})
- addBuilder(buildConfig{name: "linux-386-clang", image: "gobuilders/linux-x86-clang"})
- addBuilder(buildConfig{name: "linux-amd64-clang", image: "gobuilders/linux-x86-clang"})
-
- if (*just != "") != (*rev != "") {
- log.Fatalf("--just and --rev must be used together")
- }
- if *just != "" {
- conf, ok := builders[*just]
- if !ok {
- log.Fatalf("unknown builder %q", *just)
- }
- cmd := exec.Command("docker", append([]string{"run"}, conf.dockerRunArgs(*rev)...)...)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if err := cmd.Run(); err != nil {
- log.Fatalf("Build failed: %v", err)
- }
- return
- }
-
- http.HandleFunc("/", handleStatus)
- http.HandleFunc("/logs", handleLogs)
- go http.ListenAndServe(":80", nil)
-
- workc := make(chan builderRev)
- for name, builder := range builders {
- go findWorkLoop(name, builder.dashURL, workc)
- }
-
- ticker := time.NewTicker(1 * time.Minute)
- for {
- select {
- case work := <-workc:
- log.Printf("workc received %+v; len(status) = %v, maxBuilds = %v; cur = %p", work, len(status), *maxBuilds, status[work])
- mayBuild := mayBuildRev(work)
- if mayBuild {
- out, _ := exec.Command("docker", "ps").Output()
- numBuilds := bytes.Count(out, []byte("\n")) - 1
- log.Printf("num current docker builds: %d", numBuilds)
- if numBuilds > *maxBuilds {
- mayBuild = false
- }
- }
- if mayBuild {
- if st, err := startBuilding(builders[work.name], work.rev); err == nil {
- setStatus(work, st)
- log.Printf("%v now building in %v", work, st.container)
- } else {
- log.Printf("Error starting to build %v: %v", work, err)
- }
- }
- case done := <-donec:
- log.Printf("%v done", done)
- setStatus(done, nil)
- case <-ticker.C:
- if numCurrentBuilds() == 0 && time.Now().After(startTime.Add(10*time.Minute)) {
- // TODO: halt the whole machine to kill the VM or something
- }
- }
- }
-}
-
-func numCurrentBuilds() int {
- statusMu.Lock()
- defer statusMu.Unlock()
- return len(status)
-}
-
-func mayBuildRev(work builderRev) bool {
- statusMu.Lock()
- defer statusMu.Unlock()
- return len(status) < *maxBuilds && status[work] == nil
-}
-
-func setStatus(work builderRev, st *buildStatus) {
- statusMu.Lock()
- defer statusMu.Unlock()
- if st == nil {
- delete(status, work)
- } else {
- status[work] = st
- }
-}
-
-func getStatus(work builderRev) *buildStatus {
- statusMu.Lock()
- defer statusMu.Unlock()
- return status[work]
-}
-
-type byAge []*buildStatus
-
-func (s byAge) Len() int { return len(s) }
-func (s byAge) Less(i, j int) bool { return s[i].start.Before(s[j].start) }
-func (s byAge) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func handleStatus(w http.ResponseWriter, r *http.Request) {
- var active []*buildStatus
- statusMu.Lock()
- for _, st := range status {
- active = append(active, st)
- }
- statusMu.Unlock()
-
- fmt.Fprintf(w, "<html><body><h1>Go build coordinator</h1>%d of max %d builds running:<p><pre>", len(status), *maxBuilds)
- sort.Sort(byAge(active))
- for _, st := range active {
- fmt.Fprintf(w, "%-22s hg %s in container <a href='/logs?name=%s&rev=%s'>%s</a>, %v ago\n", st.name, st.rev, st.name, st.rev,
- st.container, time.Now().Sub(st.start))
- }
- fmt.Fprintf(w, "</pre></body></html>")
-}
-
-func handleLogs(w http.ResponseWriter, r *http.Request) {
- st := getStatus(builderRev{r.FormValue("name"), r.FormValue("rev")})
- if st == nil {
- fmt.Fprintf(w, "<html><body><h1>not building</h1>")
- return
- }
- out, err := exec.Command("docker", "logs", st.container).CombinedOutput()
- if err != nil {
- log.Print(err)
- http.Error(w, "Error fetching logs. Already finished?", 500)
- return
- }
- key := builderKey(st.name)
- logs := strings.Replace(string(out), key, "BUILDERKEY", -1)
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- io.WriteString(w, logs)
-}
-
-func findWorkLoop(builderName, dashURL string, work chan<- builderRev) {
- // TODO: make this better
- for {
- rev, err := findWork(builderName, dashURL)
- if err != nil {
- log.Printf("Finding work for %s: %v", builderName, err)
- } else if rev != "" {
- work <- builderRev{builderName, rev}
- }
- time.Sleep(60 * time.Second)
- }
-}
-
-func findWork(builderName, dashURL string) (rev string, err error) {
- var jres struct {
- Response struct {
- Kind string
- Data struct {
- Hash string
- PerfResults []string
- }
- }
- }
- res, err := http.Get(dashURL + "/todo?builder=" + builderName + "&kind=build-go-commit")
- if err != nil {
- return
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return "", fmt.Errorf("unexpected http status %d", res.StatusCode)
- }
- err = json.NewDecoder(res.Body).Decode(&jres)
- if jres.Response.Kind == "build-go-commit" {
- rev = jres.Response.Data.Hash
- }
- return rev, err
-}
-
-type builderRev struct {
- name, rev string
-}
-
-// returns the part after "docker run"
-func (conf buildConfig) dockerRunArgs(rev string) (args []string) {
- if key := builderKey(conf.name); key != "" {
- tmpKey := "/tmp/" + conf.name + ".buildkey"
- if _, err := os.Stat(tmpKey); err != nil {
- if err := ioutil.WriteFile(tmpKey, []byte(key), 0600); err != nil {
- log.Fatal(err)
- }
- }
- // Images may look for .gobuildkey in / or /root, so provide both.
- // TODO(adg): fix images that look in the wrong place.
- args = append(args, "-v", tmpKey+":/.gobuildkey")
- args = append(args, "-v", tmpKey+":/root/.gobuildkey")
- }
- for _, pair := range conf.env {
- args = append(args, "-e", pair)
- }
- args = append(args,
- conf.image,
- "/usr/local/bin/builder",
- "-rev="+rev,
- "-dashboard="+conf.dashURL,
- "-tool="+conf.tool,
- "-buildroot=/",
- "-v",
- )
- if conf.cmd != "" {
- args = append(args, "-cmd", conf.cmd)
- }
- args = append(args, conf.name)
- return
-}
-
-func addBuilder(c buildConfig) {
- if c.name == "" {
- panic("empty name")
- }
- if *addTemp {
- c.name += "-temp"
- }
- if _, dup := builders[c.name]; dup {
- panic("dup name")
- }
- if c.dashURL == "" {
- c.dashURL = "https://build.golang.org"
- }
- if c.tool == "" {
- c.tool = "go"
- }
-
- if strings.HasPrefix(c.name, "nacl-") {
- if c.image == "" {
- c.image = "gobuilders/linux-x86-nacl"
- }
- if c.cmd == "" {
- c.cmd = "/usr/local/bin/build-command.pl"
- }
- }
- if strings.HasPrefix(c.name, "linux-") && c.image == "" {
- c.image = "gobuilders/linux-x86-base"
- }
- if c.image == "" {
- panic("empty image")
- }
- builders[c.name] = c
-}
-
-func condUpdateImage(img string) error {
- ii := images[img]
- if ii == nil {
- log.Fatalf("Image %q not described.", img)
- }
- ii.mu.Lock()
- defer ii.mu.Unlock()
- res, err := http.Head(ii.url)
- if err != nil {
- return fmt.Errorf("Error checking %s: %v", ii.url, err)
- }
- if res.StatusCode != 200 {
- return fmt.Errorf("Error checking %s: %v", ii.url, res.Status)
- }
- if res.Header.Get("Last-Modified") == ii.lastMod {
- return nil
- }
-
- res, err = http.Get(ii.url)
- if err != nil || res.StatusCode != 200 {
- return fmt.Errorf("Get after Head failed for %s: %v, %v", ii.url, err, res)
- }
- defer res.Body.Close()
-
- log.Printf("Running: docker load of %s\n", ii.url)
- cmd := exec.Command("docker", "load")
- cmd.Stdin = res.Body
-
- var out bytes.Buffer
- cmd.Stdout = &out
- cmd.Stderr = &out
-
- if cmd.Run(); err != nil {
- log.Printf("Failed to pull latest %s from %s and pipe into docker load: %v, %s", img, ii.url, err, out.Bytes())
- return err
- }
- ii.lastMod = res.Header.Get("Last-Modified")
- return nil
-}
-
-func startBuilding(conf buildConfig, rev string) (*buildStatus, error) {
- if err := condUpdateImage(conf.image); err != nil {
- log.Printf("Failed to setup container for %v %v: %v", conf.name, rev, err)
- return nil, err
- }
-
- cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs(rev)...)...)
- all, err := cmd.CombinedOutput()
- log.Printf("Docker run for %v %v = err:%v, output:%s", conf.name, rev, err, all)
- if err != nil {
- return nil, err
- }
- container := strings.TrimSpace(string(all))
- go func() {
- all, err := exec.Command("docker", "wait", container).CombinedOutput()
- log.Printf("docker wait %s/%s: %v, %s", container, rev, err, strings.TrimSpace(string(all)))
- donec <- builderRev{conf.name, rev}
- exec.Command("docker", "rm", container).Run()
- }()
- return &buildStatus{
- builderRev: builderRev{
- name: conf.name,
- rev: rev,
- },
- container: container,
- start: time.Now(),
- }, nil
-}
-
-type buildStatus struct {
- builderRev
- container string
- start time.Time
-
- mu sync.Mutex
- // ...
-}
-
-func builderKey(builder string) string {
- master := masterKey()
- if len(master) == 0 {
- return ""
- }
- h := hmac.New(md5.New, master)
- io.WriteString(h, builder)
- return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-func masterKey() []byte {
- keyOnce.Do(loadKey)
- return masterKeyCache
-}
-
-var (
- keyOnce sync.Once
- masterKeyCache []byte
-)
-
-func loadKey() {
- if *masterKeyFile != "" {
- b, err := ioutil.ReadFile(*masterKeyFile)
- if err != nil {
- log.Fatal(err)
- }
- masterKeyCache = bytes.TrimSpace(b)
- return
- }
- req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/builder-master-key", nil)
- req.Header.Set("Metadata-Flavor", "Google")
- res, err := http.DefaultClient.Do(req)
- if err != nil {
- log.Fatal("No builder master key available")
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- log.Fatalf("No builder-master-key project attribute available.")
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- log.Fatal(err)
- }
- masterKeyCache = bytes.TrimSpace(slurp)
-}
diff --git a/dashboard/env/linux-x86-base/Dockerfile b/dashboard/env/linux-x86-base/Dockerfile
deleted file mode 100644
index 7d9ed80..0000000
--- a/dashboard/env/linux-x86-base/Dockerfile
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# Base builder image: gobuilders/linux-x86-base
-
-FROM debian:wheezy
-MAINTAINER golang-dev <golang-dev@googlegroups.com>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-ADD /scripts/install-apt-deps.sh /scripts/
-RUN /scripts/install-apt-deps.sh
-
-ADD /scripts/build-go-builder.sh /scripts/
-RUN GO_REV=8c27884843c3 BUILDER_REV=75944e2e3a63 /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
diff --git a/dashboard/env/linux-x86-base/Makefile b/dashboard/env/linux-x86-base/Makefile
deleted file mode 100644
index d94baf6..0000000
--- a/dashboard/env/linux-x86-base/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-docker: Dockerfile
- docker build -t gobuilders/linux-x86-base .
-
-docker-linux.base.tar.gz: docker
- docker save gobuilders/linux-x86-base | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.base.tar.gz)
-
-check: docker
- docker run gobuilders/linux-x86-base /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -report=false linux-amd64-temp
diff --git a/dashboard/env/linux-x86-base/README b/dashboard/env/linux-x86-base/README
deleted file mode 100644
index a511909..0000000
--- a/dashboard/env/linux-x86-base/README
+++ /dev/null
@@ -1,11 +0,0 @@
-For now, you can at least do a single build of a single revision:
-
-$ export BUILD=linux-amd64-temp
-$ docker run \
- -v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
- gobuilders/linux-x86-base \
- /usr/local/bin/builder -rev=50ac9eded6ad -buildroot=/ -v $BUILD
-
-TODO(bradfitz): automate with CoreOS + GCE, ala:
- https://github.com/bradfitz/camlistore/blob/master/misc/gce/create.go
-
diff --git a/dashboard/env/linux-x86-base/scripts/build-go-builder.sh b/dashboard/env/linux-x86-base/scripts/build-go-builder.sh
deleted file mode 100755
index 097ac17..0000000
--- a/dashboard/env/linux-x86-base/scripts/build-go-builder.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-set -ex
-
-export GOPATH=/gopath
-export GOROOT=/goroot
-PREFIX=/usr/local
-: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
-: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
-
-mkdir -p $GOROOT
-curl -s https://storage.googleapis.com/gobuilder/go-snap.tar.gz | tar x --no-same-owner -zv -C $GOROOT
-(cd $GOROOT/src && hg pull -r $GO_REV -u && find && ./make.bash)
-
-GO_TOOLS=$GOPATH/src/golang.org/x/tools
-mkdir -p $GO_TOOLS
-curl -s https://storage.googleapis.com/gobuilder/go.tools-snap.tar.gz | tar x --no-same-owner -zv -C $GO_TOOLS
-
-mkdir -p $PREFIX/bin
-(cd $GO_TOOLS && hg pull -r $BUILDER_REV -u && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
-
-rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
diff --git a/dashboard/env/linux-x86-base/scripts/install-apt-deps.sh b/dashboard/env/linux-x86-base/scripts/install-apt-deps.sh
deleted file mode 100755
index 839f4ad..0000000
--- a/dashboard/env/linux-x86-base/scripts/install-apt-deps.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-set -ex
-
-apt-get update
-# For running curl to get the hg starter tarballs (faster than hg clone).
-apt-get install -y --no-install-recommends curl ca-certificates
-# Optionally used by some net/http tests:
-apt-get install -y --no-install-recommends strace
-# For building Go's bootstrap 'dist' prog
-apt-get install -y --no-install-recommends gcc libc6-dev
-# For 32-bit builds:
-# TODO(bradfitz): move these into a 386 image that derives from this one.
-apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
-# For interacting with the Go source & subrepos:
-apt-get install -y --no-install-recommends mercurial git-core
-
-apt-get clean
-rm -fr /var/lib/apt/lists
diff --git a/dashboard/env/linux-x86-clang/Dockerfile b/dashboard/env/linux-x86-clang/Dockerfile
deleted file mode 100644
index 209375c..0000000
--- a/dashboard/env/linux-x86-clang/Dockerfile
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# gobuilders/linux-x86-clang for building with clang instead of gcc.
-
-FROM debian:wheezy
-MAINTAINER golang-dev <golang-dev@googlegroups.com>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-ADD /sources/clang-deps.list /etc/apt/sources.list.d/
-
-ADD /scripts/install-apt-deps.sh /scripts/
-RUN /scripts/install-apt-deps.sh
-
-ADD /scripts/build-go-builder.sh /scripts/
-RUN GO_REV=8c27884843c3 BUILDER_REV=75944e2e3a63 /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
-
-ENV CC /usr/bin/clang
diff --git a/dashboard/env/linux-x86-clang/Makefile b/dashboard/env/linux-x86-clang/Makefile
deleted file mode 100644
index 5e1ed0f..0000000
--- a/dashboard/env/linux-x86-clang/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-docker: Dockerfile
- docker build -t gobuilders/linux-x86-clang .
-
-docker-linux.clang.tar.gz: docker
- docker save gobuilders/linux-x86-clang | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.clang.tar.gz)
-
-check: docker
- docker run gobuilders/linux-x86-clang /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -report=false linux-amd64-temp
-
-check32: docker
- docker run gobuilders/linux-x86-clang /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -report=false linux-386-temp
diff --git a/dashboard/env/linux-x86-clang/scripts/build-go-builder.sh b/dashboard/env/linux-x86-clang/scripts/build-go-builder.sh
deleted file mode 100755
index 097ac17..0000000
--- a/dashboard/env/linux-x86-clang/scripts/build-go-builder.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-set -ex
-
-export GOPATH=/gopath
-export GOROOT=/goroot
-PREFIX=/usr/local
-: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
-: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
-
-mkdir -p $GOROOT
-curl -s https://storage.googleapis.com/gobuilder/go-snap.tar.gz | tar x --no-same-owner -zv -C $GOROOT
-(cd $GOROOT/src && hg pull -r $GO_REV -u && find && ./make.bash)
-
-GO_TOOLS=$GOPATH/src/golang.org/x/tools
-mkdir -p $GO_TOOLS
-curl -s https://storage.googleapis.com/gobuilder/go.tools-snap.tar.gz | tar x --no-same-owner -zv -C $GO_TOOLS
-
-mkdir -p $PREFIX/bin
-(cd $GO_TOOLS && hg pull -r $BUILDER_REV -u && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
-
-rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
diff --git a/dashboard/env/linux-x86-clang/scripts/install-apt-deps.sh b/dashboard/env/linux-x86-clang/scripts/install-apt-deps.sh
deleted file mode 100755
index 1382dd6..0000000
--- a/dashboard/env/linux-x86-clang/scripts/install-apt-deps.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-set -ex
-
-apt-get update
-# For running curl to get the hg starter tarballs (faster than hg clone).
-apt-get install -y --no-install-recommends curl ca-certificates
-# Optionally used by some net/http tests:
-apt-get install -y --no-install-recommends strace
-# For building Go's bootstrap 'dist' prog
-apt-get install -y --no-install-recommends wget
-wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | apt-key add -
-apt-get update
-apt-get install -y --no-install-recommends clang-3.5
-# TODO(cmang): move these into a 386 image that derives from this one.
-apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
-# Remove gcc binary so it doesn't interfere with clang
-rm -f /usr/bin/gcc
-# For interacting with the Go source & subrepos:
-apt-get install -y --no-install-recommends mercurial git-core
-
-apt-get clean
-rm -fr /var/lib/apt/lists
diff --git a/dashboard/env/linux-x86-clang/sources/clang-deps.list b/dashboard/env/linux-x86-clang/sources/clang-deps.list
deleted file mode 100644
index eb3e244..0000000
--- a/dashboard/env/linux-x86-clang/sources/clang-deps.list
+++ /dev/null
@@ -1,3 +0,0 @@
-# The debian sources for stable clang builds, taken from http://llvm.org/apt/
-deb http://llvm.org/apt/wheezy/ llvm-toolchain-wheezy main
-deb-src http://llvm.org/apt/wheezy/ llvm-toolchain-wheezy main \ No newline at end of file
diff --git a/dashboard/env/linux-x86-gccgo/Dockerfile b/dashboard/env/linux-x86-gccgo/Dockerfile
deleted file mode 100644
index 2ccd0d9..0000000
--- a/dashboard/env/linux-x86-gccgo/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# gobuilders/linux-x86-gccgo for 32- and 64-bit gccgo.
-
-FROM debian:wheezy
-MAINTAINER golang-dev <golang-dev@googlegroups.com>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-ADD /scripts/install-apt-deps.sh /scripts/
-RUN /scripts/install-apt-deps.sh
-
-ADD /scripts/install-gold.sh /scripts/
-RUN /scripts/install-gold.sh
-
-ADD /scripts/install-gccgo-builder.sh /scripts/
-RUN /scripts/install-gccgo-builder.sh && test -f /usr/local/bin/builder \ No newline at end of file
diff --git a/dashboard/env/linux-x86-gccgo/Makefile b/dashboard/env/linux-x86-gccgo/Makefile
deleted file mode 100644
index 9d5143f..0000000
--- a/dashboard/env/linux-x86-gccgo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-docker: Dockerfile
- docker build -t gobuilders/linux-x86-gccgo .
-
-docker-linux.gccgo.tar.gz: docker
- docker save gobuilders/linux-x86-gccgo | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.gccgo.tar.gz)
-
-check: docker
- docker run gobuilders/linux-x86-gccgo /usr/local/bin/builder -tool="gccgo" -rev=b9151e911a54 -v -cmd='make RUNTESTFLAGS="--target_board=unix/-m64" check-go' -report=false linux-amd64-gccgo-temp
-
-check32: docker
- docker run gobuilders/linux-x86-gccgo /usr/local/bin/builder -tool="gccgo" -rev=b9151e911a54 -v -cmd='make RUNTESTFLAGS="--target_board=unix/-m32" check-go' -report=false linux-386-gccgo-temp
diff --git a/dashboard/env/linux-x86-gccgo/README b/dashboard/env/linux-x86-gccgo/README
deleted file mode 100644
index 65180bc..0000000
--- a/dashboard/env/linux-x86-gccgo/README
+++ /dev/null
@@ -1,6 +0,0 @@
-$ export BUILD=linux-amd64-gccgo
-$ export BUILDREV=b9151e911a54
-$ docker run \
- -v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
- gobuilders/linux-x86-gccgo \
- /usr/local/bin/builder -tool=gccgo -dashboard='https://build.golang.org/gccgo' -rev=$BUILDREV -buildroot=/gccgo -v -cmd='make check-go -kj' $BUILD
diff --git a/dashboard/env/linux-x86-gccgo/scripts/install-apt-deps.sh b/dashboard/env/linux-x86-gccgo/scripts/install-apt-deps.sh
deleted file mode 100755
index 90dbac1..0000000
--- a/dashboard/env/linux-x86-gccgo/scripts/install-apt-deps.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-set -ex
-
-apt-get update
-# For running curl to get the gccgo builder binary.
-apt-get install -y --no-install-recommends curl ca-certificates
-# Optionally used by some net/http tests:
-apt-get install -y --no-install-recommends strace
-# For using numeric libraries within GCC.
-apt-get install -y --no-install-recommends libgmp10-dev libmpc-dev libmpfr-dev
-# For building binutils and gcc from source.
-apt-get install -y --no-install-recommends make g++ flex bison
-# Same as above, but for 32-bit builds as well.
-apt-get install -y --no-install-recommends libc6-dev-i386 g++-multilib
-# For running the extended gccgo testsuite
-apt-get install -y --no-install-recommends dejagnu
-# For interacting with the gccgo source and git mirror:
-apt-get install -y --no-install-recommends mercurial git-core
-
-apt-get clean
-rm -rf /var/lib/apt/lists
diff --git a/dashboard/env/linux-x86-gccgo/scripts/install-gccgo-builder.sh b/dashboard/env/linux-x86-gccgo/scripts/install-gccgo-builder.sh
deleted file mode 100755
index fd3785d..0000000
--- a/dashboard/env/linux-x86-gccgo/scripts/install-gccgo-builder.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-set -ex
-
-# Installs a version of the go.tools dashboard builder that runs the gccgo build
-# command assuming there are 16 cores available to speed up build times.
-# TODO(cmang): There should be an option in the builder to specify this.
-
-curl -o /usr/local/bin/builder http://storage.googleapis.com/go-builder-data/gccgo_builder && chmod +x /usr/local/bin/builder
diff --git a/dashboard/env/linux-x86-gccgo/scripts/install-gold.sh b/dashboard/env/linux-x86-gccgo/scripts/install-gold.sh
deleted file mode 100755
index 77f96ac..0000000
--- a/dashboard/env/linux-x86-gccgo/scripts/install-gold.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-set -ex
-
-# gccgo uses the Gold linker from binutils.
-export BINUTILS_VERSION=binutils-2.24
-mkdir -p binutils-objdir
-curl -s http://ftp.gnu.org/gnu/binutils/$BINUTILS_VERSION.tar.gz | tar x --no-same-owner -zv
-(cd binutils-objdir && ../$BINUTILS_VERSION/configure --enable-gold --enable-plugins --prefix=/opt/gold && make -sj && make install -sj)
-
-rm -rf binutils* \ No newline at end of file
diff --git a/dashboard/env/linux-x86-nacl/Dockerfile b/dashboard/env/linux-x86-nacl/Dockerfile
deleted file mode 100644
index b40385d..0000000
--- a/dashboard/env/linux-x86-nacl/Dockerfile
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# gobuilders/linux-x86-nacl for 32- and 64-bit nacl.
-#
-# We need more modern libc than Debian stable as used in base, so we're
-# using Ubuntu LTS here.
-#
-# TODO(bradfitz): make both be Ubuntu? But we also want Debian, Fedora,
-# etc coverage., so deal with unifying these later, once there's a plan
-# or a generator for them and the other builders are turned down.
-
-FROM ubuntu:trusty
-MAINTAINER golang-dev <golang-dev@googlegroups.com>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-ADD /scripts/install-apt-deps.sh /scripts/
-RUN /scripts/install-apt-deps.sh
-
-ADD /scripts/build-go-builder.sh /scripts/
-RUN GO_REV=8c27884843c3 BUILDER_REV=75944e2e3a63 /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
-
-ADD build-command.pl /usr/local/bin/
-
-ENV PATH /usr/local/bin:$GOROOT/bin:$PATH
diff --git a/dashboard/env/linux-x86-nacl/Makefile b/dashboard/env/linux-x86-nacl/Makefile
deleted file mode 100644
index 3c2b7e3..0000000
--- a/dashboard/env/linux-x86-nacl/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-docker: Dockerfile
- docker build -t gobuilders/linux-x86-nacl .
-
-upload: docker
- docker save gobuilders/linux-x86-nacl | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.nacl.tar.gz)
-
-check: docker
- docker run gobuilders/linux-x86-nacl /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -cmd=/usr/local/bin/build-command.pl -report=false nacl-amd64p32-temp
diff --git a/dashboard/env/linux-x86-nacl/README b/dashboard/env/linux-x86-nacl/README
deleted file mode 100644
index 5862ee1..0000000
--- a/dashboard/env/linux-x86-nacl/README
+++ /dev/null
@@ -1,6 +0,0 @@
-$ export BUILD=nacl-amd64p32-temp
-$ export BUILDREV=59b1bb4bf045
-$ docker run \
- -v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
- gobuilders/linux-x86-nacl \
- /usr/local/bin/builder -rev=$BUILDREV -buildroot=/ -v -cmd=/usr/local/bin/build-command.pl $BUILD
diff --git a/dashboard/env/linux-x86-nacl/build-command.pl b/dashboard/env/linux-x86-nacl/build-command.pl
deleted file mode 100755
index 0eb9edb..0000000
--- a/dashboard/env/linux-x86-nacl/build-command.pl
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-
-if ($ENV{GOOS} eq "nacl") {
- delete $ENV{GOROOT_FINAL};
- exec("./nacltest.bash", @ARGV);
- die "Failed to run nacltest.bash: $!\n";
-}
-
-exec("./all.bash", @ARGV);
-die "Failed to run all.bash: $!\n";
-
diff --git a/dashboard/env/linux-x86-nacl/scripts/build-go-builder.sh b/dashboard/env/linux-x86-nacl/scripts/build-go-builder.sh
deleted file mode 100755
index 10bf847..0000000
--- a/dashboard/env/linux-x86-nacl/scripts/build-go-builder.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-set -ex
-
-export GOPATH=/gopath
-export GOROOT=/goroot
-PREFIX=/usr/local
-: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
-: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
-
-mkdir -p $GOROOT
-curl -s https://storage.googleapis.com/gobuilder/go-snap.tar.gz | tar x --no-same-owner -zv -C $GOROOT
-(cd $GOROOT/src && hg pull -r $GO_REV -u && find && ./make.bash)
-
-GO_TOOLS=$GOPATH/src/golang.org/x/tools
-mkdir -p $GO_TOOLS
-curl -s https://storage.googleapis.com/gobuilder/go.tools-snap.tar.gz | tar x --no-same-owner -zv -C $GO_TOOLS
-
-mkdir -p $PREFIX/bin
-(cd $GO_TOOLS && hg pull -r $BUILDER_REV -u && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
-
-rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
-
-(cd /usr/local/bin && curl -s -O https://storage.googleapis.com/gobuilder/sel_ldr_x86_32 && chmod +x sel_ldr_x86_32)
-(cd /usr/local/bin && curl -s -O https://storage.googleapis.com/gobuilder/sel_ldr_x86_64 && chmod +x sel_ldr_x86_64)
-
-ln -s $GOROOT/misc/nacl/go_nacl_386_exec /usr/local/bin/
-ln -s $GOROOT/misc/nacl/go_nacl_amd64p32_exec /usr/local/bin/
diff --git a/dashboard/env/linux-x86-nacl/scripts/install-apt-deps.sh b/dashboard/env/linux-x86-nacl/scripts/install-apt-deps.sh
deleted file mode 100755
index f518646..0000000
--- a/dashboard/env/linux-x86-nacl/scripts/install-apt-deps.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-set -ex
-
-apt-get update
-# For running curl to get the hg starter tarballs (faster than hg clone).
-apt-get install -y --no-install-recommends curl ca-certificates
-# For building Go's bootstrap 'dist' prog
-apt-get install -y --no-install-recommends gcc libc6-dev
-# For interacting with the Go source & subrepos:
-apt-get install -y --no-install-recommends mercurial git-core
-# For 32-bit nacl:
-apt-get install -y --no-install-recommends libc6-i386 libc6-dev-i386 lib32stdc++6 gcc-multilib
-
-apt-get clean
-rm -fr /var/lib/apt/lists
diff --git a/dashboard/env/linux-x86-sid/Dockerfile b/dashboard/env/linux-x86-sid/Dockerfile
deleted file mode 100644
index 05a917c..0000000
--- a/dashboard/env/linux-x86-sid/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-FROM debian:sid
-MAINTAINER golang-dev <golang-dev@googlegroups.com>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-ADD /scripts/install-apt-deps.sh /scripts/
-RUN /scripts/install-apt-deps.sh
-
-ADD /scripts/build-go-builder.sh /scripts/
-RUN GO_REV=8c27884843c3 BUILDER_REV=75944e2e3a63 /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
diff --git a/dashboard/env/linux-x86-sid/Makefile b/dashboard/env/linux-x86-sid/Makefile
deleted file mode 100644
index e636e49..0000000
--- a/dashboard/env/linux-x86-sid/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2014 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-docker: Dockerfile
- docker build -t gobuilders/linux-x86-sid .
-
-docker-linux.sid.tar.gz: docker
- docker save gobuilders/linux-x86-sid | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.sid.tar.gz)
-
-check: docker
- docker run gobuilders/linux-x86-sid /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -report=false linux-amd64-sid
diff --git a/dashboard/env/linux-x86-sid/scripts/build-go-builder.sh b/dashboard/env/linux-x86-sid/scripts/build-go-builder.sh
deleted file mode 100755
index 097ac17..0000000
--- a/dashboard/env/linux-x86-sid/scripts/build-go-builder.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-set -ex
-
-export GOPATH=/gopath
-export GOROOT=/goroot
-PREFIX=/usr/local
-: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
-: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
-
-mkdir -p $GOROOT
-curl -s https://storage.googleapis.com/gobuilder/go-snap.tar.gz | tar x --no-same-owner -zv -C $GOROOT
-(cd $GOROOT/src && hg pull -r $GO_REV -u && find && ./make.bash)
-
-GO_TOOLS=$GOPATH/src/golang.org/x/tools
-mkdir -p $GO_TOOLS
-curl -s https://storage.googleapis.com/gobuilder/go.tools-snap.tar.gz | tar x --no-same-owner -zv -C $GO_TOOLS
-
-mkdir -p $PREFIX/bin
-(cd $GO_TOOLS && hg pull -r $BUILDER_REV -u && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
-
-rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
diff --git a/dashboard/env/linux-x86-sid/scripts/install-apt-deps.sh b/dashboard/env/linux-x86-sid/scripts/install-apt-deps.sh
deleted file mode 100755
index 839f4ad..0000000
--- a/dashboard/env/linux-x86-sid/scripts/install-apt-deps.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-set -ex
-
-apt-get update
-# For running curl to get the hg starter tarballs (faster than hg clone).
-apt-get install -y --no-install-recommends curl ca-certificates
-# Optionally used by some net/http tests:
-apt-get install -y --no-install-recommends strace
-# For building Go's bootstrap 'dist' prog
-apt-get install -y --no-install-recommends gcc libc6-dev
-# For 32-bit builds:
-# TODO(bradfitz): move these into a 386 image that derives from this one.
-apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
-# For interacting with the Go source & subrepos:
-apt-get install -y --no-install-recommends mercurial git-core
-
-apt-get clean
-rm -fr /var/lib/apt/lists
diff --git a/dashboard/updater/updater.go b/dashboard/updater/updater.go
deleted file mode 100644
index 0601611..0000000
--- a/dashboard/updater/updater.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "encoding/json"
- "encoding/xml"
- "flag"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "os"
- "os/exec"
- "strings"
-)
-
-var (
- builder = flag.String("builder", "", "builder name")
- key = flag.String("key", "", "builder key")
- gopath = flag.String("gopath", "", "path to go repo")
- dashboard = flag.String("dashboard", "build.golang.org", "Go Dashboard Host")
- batch = flag.Int("batch", 100, "upload batch size")
-)
-
-// Do not benchmark beyond this commit.
-// There is little sense in benchmarking till first commit,
-// and the benchmark won't build anyway.
-const Go1Commit = "0051c7442fed" // test/bench/shootout: update timing.log to Go 1.
-
-// HgLog represents a single Mercurial revision.
-type HgLog struct {
- Hash string
- Branch string
- Files string
-}
-
-func main() {
- flag.Parse()
- logs := hgLog()
- var hashes []string
- ngo1 := 0
- for i := range logs {
- if strings.HasPrefix(logs[i].Hash, Go1Commit) {
- break
- }
- if needsBenchmarking(&logs[i]) {
- hashes = append(hashes, logs[i].Hash)
- }
- ngo1++
- }
- fmt.Printf("found %v commits, %v after Go1, %v need benchmarking\n", len(logs), ngo1, len(hashes))
- for i := 0; i < len(hashes); i += *batch {
- j := i + *batch
- if j > len(hashes) {
- j = len(hashes)
- }
- fmt.Printf("sending %v-%v... ", i, j)
- res := postCommits(hashes[i:j])
- fmt.Printf("%s\n", res)
- }
-}
-
-func hgLog() []HgLog {
- var out bytes.Buffer
- cmd := exec.Command("hg", "log", "--encoding=utf-8", "--template", xmlLogTemplate)
- cmd.Dir = *gopath
- cmd.Stdout = &out
- cmd.Stderr = os.Stderr
- err := cmd.Run()
- if err != nil {
- fmt.Printf("failed to execute 'hg log': %v\n", err)
- os.Exit(1)
- }
- var top struct{ Log []HgLog }
- err = xml.Unmarshal([]byte("<Top>"+out.String()+"</Top>"), &top)
- if err != nil {
- fmt.Printf("failed to parse log: %v\n", err)
- os.Exit(1)
- }
- return top.Log
-}
-
-func needsBenchmarking(log *HgLog) bool {
- if log.Branch != "" {
- return false
- }
- for _, f := range strings.Split(log.Files, " ") {
- if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) &&
- !strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") {
- return true
- }
- }
- return false
-}
-
-func postCommits(hashes []string) string {
- args := url.Values{"builder": {*builder}, "key": {*key}}
- cmd := fmt.Sprintf("http://%v/updatebenchmark?%v", *dashboard, args.Encode())
- b, err := json.Marshal(hashes)
- if err != nil {
- return fmt.Sprintf("failed to encode request: %v\n", err)
- }
- r, err := http.Post(cmd, "text/json", bytes.NewReader(b))
- if err != nil {
- return fmt.Sprintf("failed to send http request: %v\n", err)
- }
- defer r.Body.Close()
- if r.StatusCode != http.StatusOK {
- return fmt.Sprintf("http request failed: %v\n", r.Status)
- }
- resp, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return fmt.Sprintf("failed to read http response: %v\n", err)
- }
- return string(resp)
-}
-
-const xmlLogTemplate = `
- <Log>
- <Hash>{node|escape}</Hash>
- <Branch>{branches}</Branch>
- <Files>{files}</Files>
- </Log>
-`
diff --git a/dashboard/watcher/watcher.go b/dashboard/watcher/watcher.go
deleted file mode 100644
index 7c145f5..0000000
--- a/dashboard/watcher/watcher.go
+++ /dev/null
@@ -1,589 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Command watcher watches the specified repository for new commits
-// and reports them to the build dashboard.
-package main
-
-import (
- "bytes"
- "encoding/json"
- "encoding/xml"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/http"
- "net/url"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "runtime"
- "strings"
- "time"
-)
-
-var (
- repoURL = flag.String("repo", "https://code.google.com/p/go", "Repository URL")
- dashboard = flag.String("dash", "https://build.golang.org/", "Dashboard URL (must end in /)")
- keyFile = flag.String("key", defaultKeyFile, "Build dashboard key file")
- pollInterval = flag.Duration("poll", 10*time.Second, "Remote repo poll interval")
-)
-
-var (
- defaultKeyFile = filepath.Join(homeDir(), ".gobuildkey")
- dashboardKey = ""
-)
-
-// The first main repo commit on the dashboard; ignore commits before this.
-// This is for the main Go repo only.
-const dashboardStart = "2f970046e1ba96f32de62f5639b7141cda2e977c"
-
-func main() {
- flag.Parse()
-
- err := run()
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
-}
-
-// run is a little wrapper so we can use defer and return to signal
-// errors. It should only return a non-nil error.
-func run() error {
- if !strings.HasSuffix(*dashboard, "/") {
- return errors.New("dashboard URL (-dashboard) must end in /")
- }
- if err := checkHgVersion(); err != nil {
- return err
- }
-
- if k, err := readKey(); err != nil {
- return err
- } else {
- dashboardKey = k
- }
-
- dir, err := ioutil.TempDir("", "watcher")
- if err != nil {
- return err
- }
- defer os.RemoveAll(dir)
-
- errc := make(chan error)
-
- go func() {
- r, err := NewRepo(dir, *repoURL, "")
- if err != nil {
- errc <- err
- return
- }
- errc <- r.Watch()
- }()
-
- subrepos, err := subrepoList()
- if err != nil {
- return err
- }
- for _, path := range subrepos {
- go func(path string) {
- url := "https://" + path
- r, err := NewRepo(dir, url, path)
- if err != nil {
- errc <- err
- return
- }
- errc <- r.Watch()
- }(path)
- }
-
- // Must be non-nil.
- return <-errc
-}
-
-// Repo represents a repository to be watched.
-type Repo struct {
- root string // on-disk location of the hg repo
- path string // base import path for repo (blank for main repo)
- commits map[string]*Commit // keyed by full commit hash (40 lowercase hex digits)
- branches map[string]*Branch // keyed by branch name, eg "release-branch.go1.3" (or empty for default)
-}
-
-// NewRepo checks out a new instance of the Mercurial repository
-// specified by url to a new directory inside dir.
-// The path argument is the base import path of the repository,
-// and should be empty for the main Go repo.
-func NewRepo(dir, url, path string) (*Repo, error) {
- r := &Repo{
- path: path,
- root: filepath.Join(dir, filepath.Base(path)),
- }
-
- r.logf("cloning %v", url)
- cmd := exec.Command("hg", "clone", url, r.root)
- if out, err := cmd.CombinedOutput(); err != nil {
- return nil, fmt.Errorf("%v\n\n%s", err, out)
- }
-
- r.logf("loading commit log")
- if err := r.loadCommits(); err != nil {
- return nil, err
- }
- if err := r.findBranches(); err != nil {
- return nil, err
- }
-
- r.logf("found %v branches among %v commits\n", len(r.branches), len(r.commits))
- return r, nil
-}
-
-// Watch continuously runs "hg pull" in the repo, checks for
-// new commits, and posts any new commits to the dashboard.
-// It only returns a non-nil error.
-func (r *Repo) Watch() error {
- for {
- if err := hgPull(r.root); err != nil {
- return err
- }
- if err := r.update(); err != nil {
- return err
- }
- for _, b := range r.branches {
- if err := r.postNewCommits(b); err != nil {
- return err
- }
- }
- time.Sleep(*pollInterval)
- }
-}
-
-func (r *Repo) logf(format string, args ...interface{}) {
- p := "go"
- if r.path != "" {
- p = path.Base(r.path)
- }
- log.Printf(p+": "+format, args...)
-}
-
-// postNewCommits looks for unseen commits on the specified branch and
-// posts them to the dashboard.
-func (r *Repo) postNewCommits(b *Branch) error {
- if b.Head == b.LastSeen {
- return nil
- }
- c := b.LastSeen
- if c == nil {
- // Haven't seen any: find the commit that this branch forked from.
- for c := b.Head; c.Branch == b.Name; c = c.parent {
- }
- }
- // Add unseen commits on this branch, working forward from last seen.
- for c.children != nil {
- // Find the next commit on this branch.
- var next *Commit
- for _, c2 := range c.children {
- if c2.Branch != b.Name {
- continue
- }
- if next != nil {
- // Shouldn't happen, but be paranoid.
- return fmt.Errorf("found multiple children of %v on branch %q: %v and %v", c, b.Name, next, c2)
- }
- next = c2
- }
- if next == nil {
- // No more children on this branch, bail.
- break
- }
- // Found it.
- c = next
-
- if err := r.postCommit(c); err != nil {
- return err
- }
- b.LastSeen = c
- }
- return nil
-}
-
-// postCommit sends a commit to the build dashboard.
-func (r *Repo) postCommit(c *Commit) error {
- r.logf("sending commit to dashboard: %v", c)
-
- t, err := time.Parse(time.RFC3339, c.Date)
- if err != nil {
- return err
- }
- dc := struct {
- PackagePath string // (empty for main repo commits)
- Hash string
- ParentHash string
-
- User string
- Desc string
- Time time.Time
-
- NeedsBenchmarking bool
- }{
- PackagePath: r.path,
- Hash: c.Hash,
- ParentHash: c.Parent,
-
- User: c.Author,
- Desc: c.Desc,
- Time: t,
-
- NeedsBenchmarking: c.NeedsBenchmarking(),
- }
- b, err := json.Marshal(dc)
- if err != nil {
- return err
- }
-
- u := *dashboard + "commit?version=2&key=" + dashboardKey
- resp, err := http.Post(u, "text/json", bytes.NewReader(b))
- if err != nil {
- return err
- }
- if resp.StatusCode != 200 {
- return fmt.Errorf("status: %v", resp.Status)
- }
- return nil
-}
-
-// loadCommits runs "hg log" and populates the Repo's commit map.
-func (r *Repo) loadCommits() error {
- log, err := hgLog(r.root)
- if err != nil {
- return err
- }
- r.commits = make(map[string]*Commit)
- for _, c := range log {
- r.commits[c.Hash] = c
- }
- for _, c := range r.commits {
- if p, ok := r.commits[c.Parent]; ok {
- c.parent = p
- p.children = append(p.children, c)
- }
- }
- return nil
-}
-
-// findBranches finds branch heads in the Repo's commit map
-// and populates its branch map.
-func (r *Repo) findBranches() error {
- r.branches = make(map[string]*Branch)
- for _, c := range r.commits {
- if c.children == nil {
- if !validHead(c) {
- continue
- }
- seen, err := r.lastSeen(c.Hash)
- if err != nil {
- return err
- }
- b := &Branch{Name: c.Branch, Head: c, LastSeen: seen}
- r.branches[c.Branch] = b
- r.logf("found branch: %v", b)
- }
- }
- return nil
-}
-
-// validHead reports whether the specified commit should be considered a branch
-// head. It considers pre-go1 branches and certain specific commits as invalid.
-func validHead(c *Commit) bool {
- // Pre Go-1 releases branches are irrelevant.
- if strings.HasPrefix(c.Branch, "release-branch.r") {
- return false
- }
- // Not sure why these revisions have no child commits,
- // but they're old so let's just ignore them.
- if c.Hash == "b59f4ff1b51094314f735a4d57a2b8f06cfadf15" ||
- c.Hash == "fc75f13840b896e82b9fa6165cf705fbacaf019c" {
- return false
- }
- // All other branches are valid.
- return true
-}
-
-// update runs "hg pull" in the specified reporoot,
-// looks for new commits and branches,
-// and updates the comits and branches maps.
-func (r *Repo) update() error {
- // TODO(adg): detect new branches with "hg branches".
-
- // Check each branch for new commits.
- for _, b := range r.branches {
-
- // Find all commits on this branch from known head.
- // The logic of this function assumes that "hg log $HASH:"
- // returns hashes in the order they were committed (parent first).
- bname := b.Name
- if bname == "" {
- bname = "default"
- }
- log, err := hgLog(r.root, "-r", b.Head.Hash+":", "-b", bname)
- if err != nil {
- return err
- }
-
- // Add unknown commits to r.commits, and update branch head.
- for _, c := range log {
- // Ignore if we already know this commit.
- if _, ok := r.commits[c.Hash]; ok {
- continue
- }
- r.logf("found new commit %v", c)
-
- // Sanity check that we're looking at a commit on this branch.
- if c.Branch != b.Name {
- return fmt.Errorf("hg log gave us a commit from wrong branch: want %q, got %q", b.Name, c.Branch)
- }
-
- // Find parent commit.
- p, ok := r.commits[c.Parent]
- if !ok {
- return fmt.Errorf("can't find parent hash %q for %v", c.Parent, c)
- }
-
- // Link parent and child Commits.
- c.parent = p
- p.children = append(p.children, c)
-
- // Update branch head.
- b.Head = c
-
- // Add new commit to map.
- r.commits[c.Hash] = c
- }
- }
-
- return nil
-}
-
-// lastSeen finds the most recent commit the dashboard has seen,
-// starting at the specified head. If the dashboard hasn't seen
-// any of the commits from head to the beginning, it returns nil.
-func (r *Repo) lastSeen(head string) (*Commit, error) {
- h, ok := r.commits[head]
- if !ok {
- return nil, fmt.Errorf("lastSeen: can't find %q in commits", head)
- }
-
- var s []*Commit
- for c := h; c != nil; c = c.parent {
- s = append(s, c)
- if r.path == "" && c.Hash == dashboardStart {
- break
- }
- }
-
- for _, c := range s {
- v := url.Values{"hash": {c.Hash}, "packagePath": {r.path}}
- u := *dashboard + "commit?" + v.Encode()
- r, err := http.Get(u)
- if err != nil {
- return nil, err
- }
- var resp struct {
- Error string
- }
- err = json.NewDecoder(r.Body).Decode(&resp)
- r.Body.Close()
- if err != nil {
- return nil, err
- }
- switch resp.Error {
- case "":
- // Found one.
- return c, nil
- case "Commit not found":
- // Commit not found, keep looking for earlier commits.
- continue
- default:
- return nil, fmt.Errorf("dashboard: %v", resp.Error)
- }
- }
-
- // Dashboard saw no commits.
- return nil, nil
-}
-
-// hgLog runs "hg log" with the supplied arguments
-// and parses the output into Commit values.
-func hgLog(dir string, args ...string) ([]*Commit, error) {
- args = append([]string{"log", "--template", xmlLogTemplate}, args...)
- cmd := exec.Command("hg", args...)
- cmd.Dir = dir
- out, err := cmd.CombinedOutput()
- if err != nil {
- return nil, err
- }
-
- // We have a commit with description that contains 0x1b byte.
- // Mercurial does not escape it, but xml.Unmarshal does not accept it.
- out = bytes.Replace(out, []byte{0x1b}, []byte{'?'}, -1)
-
- xr := io.MultiReader(
- strings.NewReader("<Top>"),
- bytes.NewReader(out),
- strings.NewReader("</Top>"),
- )
- var logStruct struct {
- Log []*Commit
- }
- err = xml.NewDecoder(xr).Decode(&logStruct)
- if err != nil {
- return nil, err
- }
- return logStruct.Log, nil
-}
-
-// hgPull runs "hg pull" in the specified directory.
-// It tries three times, just in case it failed because of a transient error.
-func hgPull(dir string) error {
- var err error
- for tries := 0; tries < 3; tries++ {
- time.Sleep(time.Duration(tries) * 5 * time.Second) // Linear back-off.
- cmd := exec.Command("hg", "pull")
- cmd.Dir = dir
- if out, e := cmd.CombinedOutput(); err != nil {
- e = fmt.Errorf("%v\n\n%s", e, out)
- log.Printf("hg pull error %v: %v", dir, e)
- if err == nil {
- err = e
- }
- continue
- }
- return nil
- }
- return err
-}
-
-// Branch represents a Mercurial branch.
-type Branch struct {
- Name string
- Head *Commit
- LastSeen *Commit // the last commit posted to the dashboard
-}
-
-func (b *Branch) String() string {
- return fmt.Sprintf("%q(Head: %v LastSeen: %v)", b.Name, b.Head, b.LastSeen)
-}
-
-// Commit represents a single Mercurial revision.
-type Commit struct {
- Hash string
- Author string
- Date string
- Desc string // Plain text, first linefeed-terminated line is a short description.
- Parent string
- Branch string
- Files string
-
- // For walking the graph.
- parent *Commit
- children []*Commit
-}
-
-func (c *Commit) String() string {
- return fmt.Sprintf("%v(%q)", c.Hash, strings.SplitN(c.Desc, "\n", 2)[0])
-}
-
-// NeedsBenchmarking reports whether the Commit needs benchmarking.
-func (c *Commit) NeedsBenchmarking() bool {
- // Do not benchmark branch commits, they are usually not interesting
- // and fall out of the trunk succession.
- if c.Branch != "" {
- return false
- }
- // Do not benchmark commits that do not touch source files (e.g. CONTRIBUTORS).
- for _, f := range strings.Split(c.Files, " ") {
- if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) &&
- !strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") {
- return true
- }
- }
- return false
-}
-
-// xmlLogTemplate is a template to pass to Mercurial to make
-// hg log print the log in valid XML for parsing with xml.Unmarshal.
-// Can not escape branches and files, because it crashes python with:
-// AttributeError: 'NoneType' object has no attribute 'replace'
-const xmlLogTemplate = `
- <Log>
- <Hash>{node|escape}</Hash>
- <Parent>{p1node}</Parent>
- <Author>{author|escape}</Author>
- <Date>{date|rfc3339date}</Date>
- <Desc>{desc|escape}</Desc>
- <Branch>{branches}</Branch>
- <Files>{files}</Files>
- </Log>
-`
-
-func homeDir() string {
- switch runtime.GOOS {
- case "plan9":
- return os.Getenv("home")
- case "windows":
- return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
- }
- return os.Getenv("HOME")
-}
-
-func readKey() (string, error) {
- c, err := ioutil.ReadFile(*keyFile)
- if err != nil {
- return "", err
- }
- return string(bytes.TrimSpace(bytes.SplitN(c, []byte("\n"), 2)[0])), nil
-}
-
-// subrepoList fetches a list of sub-repositories from the dashboard
-// and returns them as a slice of base import paths.
-// Eg, []string{"golang.org/x/tools", "golang.org/x/net"}.
-func subrepoList() ([]string, error) {
- r, err := http.Get(*dashboard + "packages?kind=subrepo")
- if err != nil {
- return nil, err
- }
- var resp struct {
- Response []struct {
- Path string
- }
- Error string
- }
- err = json.NewDecoder(r.Body).Decode(&resp)
- r.Body.Close()
- if err != nil {
- return nil, err
- }
- if resp.Error != "" {
- return nil, errors.New(resp.Error)
- }
- var pkgs []string
- for _, r := range resp.Response {
- pkgs = append(pkgs, r.Path)
- }
- return pkgs, nil
-}
-
-// checkHgVersion checks whether the installed version of hg supports the
-// template features we need. (May not be precise.)
-func checkHgVersion() error {
- out, err := exec.Command("hg", "help", "templates").CombinedOutput()
- if err != nil {
- return fmt.Errorf("error running hg help templates: %v\n\n%s", err, out)
- }
- if !bytes.Contains(out, []byte("p1node")) {
- return errors.New("installed hg doesn't support 'p1node' template keyword; please upgrade")
- }
- return nil
-}
diff --git a/astutil/enclosing.go b/go/ast/astutil/enclosing.go
index 2de739e..2de739e 100644
--- a/astutil/enclosing.go
+++ b/go/ast/astutil/enclosing.go
diff --git a/astutil/enclosing_test.go b/go/ast/astutil/enclosing_test.go
index ace6974..107f87c 100644
--- a/astutil/enclosing_test.go
+++ b/go/ast/astutil/enclosing_test.go
@@ -18,7 +18,7 @@ import (
"strings"
"testing"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
)
// pathToString returns a string containing the concrete types of the
diff --git a/astutil/imports.go b/go/ast/astutil/imports.go
index 27262d3..29f52de 100644
--- a/astutil/imports.go
+++ b/go/ast/astutil/imports.go
@@ -3,17 +3,12 @@
// license that can be found in the LICENSE file.
// Package astutil contains common utilities for working with the Go AST.
-package astutil
+package astutil // import "golang.org/x/tools/go/ast/astutil"
import (
- "bufio"
- "bytes"
"fmt"
"go/ast"
- "go/format"
- "go/parser"
"go/token"
- "log"
"strconv"
"strings"
)
@@ -46,17 +41,18 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
}
// Find an import decl to add to.
+ // The goal is to find an existing import
+ // whose import path has the longest shared
+ // prefix with ipath.
var (
- bestMatch = -1
- lastImport = -1
- impDecl *ast.GenDecl
- impIndex = -1
- hasImports = false
+ bestMatch = -1 // length of longest shared prefix
+ lastImport = -1 // index in f.Decls of the file's final import decl
+ impDecl *ast.GenDecl // import decl containing the best match
+ impIndex = -1 // spec index in impDecl containing the best match
)
for i, decl := range f.Decls {
gen, ok := decl.(*ast.GenDecl)
if ok && gen.Tok == token.IMPORT {
- hasImports = true
lastImport = i
// Do not add to import "C", to avoid disrupting the
// association with its doc comment, breaking cgo.
@@ -64,7 +60,12 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
continue
}
- // Compute longest shared prefix with imports in this block.
+ // Match an empty import decl if that's all that is available.
+ if len(gen.Specs) == 0 && bestMatch == -1 {
+ impDecl = gen
+ }
+
+ // Compute longest shared prefix with imports in this group.
for j, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
n := matchLen(importPath(impspec), ipath)
@@ -79,49 +80,57 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
// If no import decl found, add one after the last import.
if impDecl == nil {
- // TODO(bradfitz): remove this hack. See comment below on
- // addImportViaSourceModification.
- if !hasImports {
- f2, err := addImportViaSourceModification(fset, f, name, ipath)
- if err == nil {
- *f = *f2
- return true
- }
- log.Printf("addImportViaSourceModification error: %v", err)
- }
-
- // TODO(bradfitz): fix above and resume using this old code:
impDecl = &ast.GenDecl{
Tok: token.IMPORT,
}
+ if lastImport >= 0 {
+ impDecl.TokPos = f.Decls[lastImport].End()
+ } else {
+ // There are no existing imports.
+ // Our new import goes after the package declaration and after
+ // the comment, if any, that starts on the same line as the
+ // package declaration.
+ impDecl.TokPos = f.Package
+
+ file := fset.File(f.Package)
+ pkgLine := file.Line(f.Package)
+ for _, c := range f.Comments {
+ if file.Line(c.Pos()) > pkgLine {
+ break
+ }
+ impDecl.TokPos = c.End()
+ }
+ }
f.Decls = append(f.Decls, nil)
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
f.Decls[lastImport+1] = impDecl
}
- // Ensure the import decl has parentheses, if needed.
- if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() {
- impDecl.Lparen = impDecl.Pos()
- }
-
- insertAt := impIndex + 1
- if insertAt == 0 {
- insertAt = len(impDecl.Specs)
+ // Insert new import at insertAt.
+ insertAt := 0
+ if impIndex >= 0 {
+ // insert after the found import
+ insertAt = impIndex + 1
}
impDecl.Specs = append(impDecl.Specs, nil)
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
impDecl.Specs[insertAt] = newImport
+ pos := impDecl.Pos()
if insertAt > 0 {
// Assign same position as the previous import,
// so that the sorter sees it as being in the same block.
- prev := impDecl.Specs[insertAt-1]
- newImport.Path.ValuePos = prev.Pos()
- newImport.EndPos = prev.Pos()
+ pos = impDecl.Specs[insertAt-1].Pos()
}
- if len(impDecl.Specs) > 1 && impDecl.Lparen == 0 {
- // set Lparen to something not zero, so the printer prints
- // the full block rather just the first ImportSpec.
- impDecl.Lparen = 1
+ newImport.Path.ValuePos = pos
+ newImport.EndPos = pos
+
+ // Clean up parens. impDecl contains at least one spec.
+ if len(impDecl.Specs) == 1 {
+ // Remove unneeded parens.
+ impDecl.Lparen = token.NoPos
+ } else if !impDecl.Lparen.IsValid() {
+ // impDecl needs parens added.
+ impDecl.Lparen = impDecl.Specs[0].Pos()
}
f.Imports = append(f.Imports, newImport)
@@ -343,29 +352,3 @@ func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
return groups
}
-
-// NOTE(bradfitz): this is a bit of a hack for golang.org/issue/6884
-// because we can't get the comment positions correct. Instead of modifying
-// the AST, we print it, modify the text, and re-parse it. Gross.
-func addImportViaSourceModification(fset *token.FileSet, f *ast.File, name, ipath string) (*ast.File, error) {
- var buf bytes.Buffer
- if err := format.Node(&buf, fset, f); err != nil {
- return nil, fmt.Errorf("Error formatting ast.File node: %v", err)
- }
- var out bytes.Buffer
- sc := bufio.NewScanner(bytes.NewReader(buf.Bytes()))
- didAdd := false
- for sc.Scan() {
- ln := sc.Text()
- out.WriteString(ln)
- out.WriteByte('\n')
- if !didAdd && strings.HasPrefix(ln, "package ") {
- fmt.Fprintf(&out, "\nimport %s %q\n\n", name, ipath)
- didAdd = true
- }
- }
- if err := sc.Err(); err != nil {
- return nil, err
- }
- return parser.ParseFile(fset, "", out.Bytes(), parser.ParseComments)
-}
diff --git a/astutil/imports_test.go b/go/ast/astutil/imports_test.go
index 3621972..6bc940c 100644
--- a/astutil/imports_test.go
+++ b/go/ast/astutil/imports_test.go
@@ -143,7 +143,7 @@ import (
`,
},
{
- name: "import into singular block",
+ name: "import into singular group",
pkg: "bytes",
in: `package main
@@ -159,6 +159,44 @@ import (
`,
},
{
+ name: "import into singular group with comment",
+ pkg: "bytes",
+ in: `package main
+
+import /* why */ /* comment here? */ "os"
+
+`,
+ out: `package main
+
+import /* why */ /* comment here? */ (
+ "bytes"
+ "os"
+)
+`,
+ },
+ {
+ name: "import into group with leading comment",
+ pkg: "strings",
+ in: `package main
+
+import (
+ // comment before bytes
+ "bytes"
+ "os"
+)
+
+`,
+ out: `package main
+
+import (
+ // comment before bytes
+ "bytes"
+ "os"
+ "strings"
+)
+`,
+ },
+ {
name: "",
renamedPkg: "fmtpkg",
pkg: "fmt",
@@ -195,6 +233,88 @@ type T struct {
}
`,
},
+ {
+ name: "issue 8729 import C",
+ pkg: "time",
+ in: `package main
+
+import "C"
+
+// comment
+type T time.Time
+`,
+ out: `package main
+
+import "C"
+import "time"
+
+// comment
+type T time.Time
+`,
+ },
+ {
+ name: "issue 8729 empty import",
+ pkg: "time",
+ in: `package main
+
+import ()
+
+// comment
+type T time.Time
+`,
+ out: `package main
+
+import "time"
+
+// comment
+type T time.Time
+`,
+ },
+ {
+ name: "issue 8729 comment on package line",
+ pkg: "time",
+ in: `package main // comment
+
+type T time.Time
+`,
+ out: `package main // comment
+import "time"
+
+type T time.Time
+`,
+ },
+ {
+ name: "issue 8729 comment after package",
+ pkg: "time",
+ in: `package main
+// comment
+
+type T time.Time
+`,
+ out: `package main
+
+import "time"
+
+// comment
+
+type T time.Time
+`,
+ },
+ {
+ name: "issue 8729 comment before and on package line",
+ pkg: "time",
+ in: `// comment before
+package main // comment on
+
+type T time.Time
+`,
+ out: `// comment before
+package main // comment on
+import "time"
+
+type T time.Time
+`,
+ },
}
func TestAddImport(t *testing.T) {
@@ -233,6 +353,34 @@ import (
}
}
+// Part of issue 8729.
+func TestDoubleAddImportWithDeclComment(t *testing.T) {
+ file := parse(t, "doubleimport", `package main
+
+import (
+)
+
+// comment
+type I int
+`)
+ // The AddImport order here matters.
+ AddImport(fset, file, "golang.org/x/tools/go/ast/astutil")
+ AddImport(fset, file, "os")
+ want := `package main
+
+import (
+ "golang.org/x/tools/go/ast/astutil"
+ "os"
+)
+
+// comment
+type I int
+`
+ if got := print(t, "doubleimport_with_decl_comment", file); got != want {
+ t.Errorf("got: %s\nwant: %s", got, want)
+ }
+}
+
var deleteTests = []test{
{
name: "import.4",
@@ -743,9 +891,9 @@ func TestImports(t *testing.T) {
continue
}
var got [][]string
- for _, block := range Imports(fset, f) {
+ for _, group := range Imports(fset, f) {
var b []string
- for _, spec := range block {
+ for _, spec := range group {
b = append(b, unquote(spec.Path.Value))
}
got = append(got, b)
diff --git a/go/ast/astutil/util.go b/go/ast/astutil/util.go
new file mode 100644
index 0000000..7630629
--- /dev/null
+++ b/go/ast/astutil/util.go
@@ -0,0 +1,14 @@
+package astutil
+
+import "go/ast"
+
+// Unparen returns e with any enclosing parentheses stripped.
+func Unparen(e ast.Expr) ast.Expr {
+ for {
+ p, ok := e.(*ast.ParenExpr)
+ if !ok {
+ return e
+ }
+ e = p.X
+ }
+}
diff --git a/go/buildutil/allpackages.go b/go/buildutil/allpackages.go
index 1da5560..0f909ee 100644
--- a/go/buildutil/allpackages.go
+++ b/go/buildutil/allpackages.go
@@ -7,7 +7,7 @@
//
// All I/O is done via the build.Context file system interface, which must
// be concurrency-safe.
-package buildutil
+package buildutil // import "golang.org/x/tools/go/buildutil"
import (
"go/build"
@@ -30,11 +30,8 @@ import (
//
func AllPackages(ctxt *build.Context) []string {
var list []string
- var mu sync.Mutex
ForEachPackage(ctxt, func(pkg string, _ error) {
- mu.Lock()
list = append(list, pkg)
- mu.Unlock()
})
sort.Strings(list)
return list
@@ -47,27 +44,42 @@ func AllPackages(ctxt *build.Context) []string {
// If the package directory exists but could not be read, the second
// argument to the found function provides the error.
//
-// The found function and the build.Context file system interface
-// accessors must be concurrency safe.
+// All I/O is done via the build.Context file system interface,
+// which must be concurrency-safe.
//
func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
// We use a counting semaphore to limit
// the number of parallel calls to ReadDir.
sema := make(chan bool, 20)
+ ch := make(chan item)
+
var wg sync.WaitGroup
for _, root := range ctxt.SrcDirs() {
root := root
wg.Add(1)
go func() {
- allPackages(ctxt, sema, root, found)
+ allPackages(ctxt, sema, root, ch)
wg.Done()
}()
}
- wg.Wait()
+ go func() {
+ wg.Wait()
+ close(ch)
+ }()
+
+ // All calls to found occur in the caller's goroutine.
+ for i := range ch {
+ found(i.importPath, i.err)
+ }
+}
+
+type item struct {
+ importPath string
+ err error // (optional)
}
-func allPackages(ctxt *build.Context, sema chan bool, root string, found func(string, error)) {
+func allPackages(ctxt *build.Context, sema chan bool, root string, ch chan<- item) {
root = filepath.Clean(root) + string(os.PathSeparator)
var wg sync.WaitGroup
@@ -92,7 +104,7 @@ func allPackages(ctxt *build.Context, sema chan bool, root string, found func(st
files, err := ReadDir(ctxt, dir)
<-sema
if pkg != "" || err != nil {
- found(pkg, err)
+ ch <- item{pkg, err}
}
for _, fi := range files {
fi := fi
diff --git a/go/buildutil/fakecontext.go b/go/buildutil/fakecontext.go
new file mode 100644
index 0000000..24cbcbe
--- /dev/null
+++ b/go/buildutil/fakecontext.go
@@ -0,0 +1,108 @@
+package buildutil
+
+import (
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+)
+
+// FakeContext returns a build.Context for the fake file tree specified
+// by pkgs, which maps package import paths to a mapping from file base
+// names to contents.
+//
+// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
+// the necessary file access methods to read from memory instead of the
+// real file system.
+//
+// Unlike a real file tree, the fake one has only two levels---packages
+// and files---so ReadDir("/go/src/") returns all packages under
+// /go/src/ including, for instance, "math" and "math/big".
+// ReadDir("/go/src/math/big") would return all the files in the
+// "math/big" package.
+//
+func FakeContext(pkgs map[string]map[string]string) *build.Context {
+ clean := func(filename string) string {
+ f := path.Clean(filepath.ToSlash(filename))
+ // Removing "/go/src" while respecting segment
+ // boundaries has this unfortunate corner case:
+ if f == "/go/src" {
+ return ""
+ }
+ return strings.TrimPrefix(f, "/go/src/")
+ }
+
+ ctxt := build.Default // copy
+ ctxt.GOROOT = "/go"
+ ctxt.GOPATH = ""
+ ctxt.IsDir = func(dir string) bool {
+ dir = clean(dir)
+ if dir == "" {
+ return true // needed by (*build.Context).SrcDirs
+ }
+ return pkgs[dir] != nil
+ }
+ ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
+ dir = clean(dir)
+ var fis []os.FileInfo
+ if dir == "" {
+ // enumerate packages
+ for importPath := range pkgs {
+ fis = append(fis, fakeDirInfo(importPath))
+ }
+ } else {
+ // enumerate files of package
+ for basename := range pkgs[dir] {
+ fis = append(fis, fakeFileInfo(basename))
+ }
+ }
+ sort.Sort(byName(fis))
+ return fis, nil
+ }
+ ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
+ filename = clean(filename)
+ dir, base := path.Split(filename)
+ content, ok := pkgs[path.Clean(dir)][base]
+ if !ok {
+ return nil, fmt.Errorf("file not found: %s", filename)
+ }
+ return ioutil.NopCloser(strings.NewReader(content)), nil
+ }
+ ctxt.IsAbsPath = func(path string) bool {
+ path = filepath.ToSlash(path)
+ // Don't rely on the default (filepath.Path) since on
+ // Windows, it reports virtual paths as non-absolute.
+ return strings.HasPrefix(path, "/")
+ }
+ return &ctxt
+}
+
+type byName []os.FileInfo
+
+func (s byName) Len() int { return len(s) }
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
+
+type fakeFileInfo string
+
+func (fi fakeFileInfo) Name() string { return string(fi) }
+func (fakeFileInfo) Sys() interface{} { return nil }
+func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
+func (fakeFileInfo) IsDir() bool { return false }
+func (fakeFileInfo) Size() int64 { return 0 }
+func (fakeFileInfo) Mode() os.FileMode { return 0644 }
+
+type fakeDirInfo string
+
+func (fd fakeDirInfo) Name() string { return string(fd) }
+func (fakeDirInfo) Sys() interface{} { return nil }
+func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
+func (fakeDirInfo) IsDir() bool { return true }
+func (fakeDirInfo) Size() int64 { return 0 }
+func (fakeDirInfo) Mode() os.FileMode { return 0755 }
diff --git a/go/callgraph/callgraph.go b/go/callgraph/callgraph.go
index d0e12d8..dad8c22 100644
--- a/go/callgraph/callgraph.go
+++ b/go/callgraph/callgraph.go
@@ -32,7 +32,7 @@ in the call graph; they are treated like built-in operators of the
language.
*/
-package callgraph
+package callgraph // import "golang.org/x/tools/go/callgraph"
// TODO(adonovan): add a function to eliminate wrappers from the
// callgraph, preserving topology.
diff --git a/go/callgraph/cha/cha.go b/go/callgraph/cha/cha.go
new file mode 100644
index 0000000..fcdf686
--- /dev/null
+++ b/go/callgraph/cha/cha.go
@@ -0,0 +1,120 @@
+// Package cha computes the call graph of a Go program using the Class
+// Hierarchy Analysis (CHA) algorithm.
+//
+// CHA was first described in "Optimization of Object-Oriented Programs
+// Using Static Class Hierarchy Analysis", Jeffrey Dean, David Grove,
+// and Craig Chambers, ECOOP'95.
+//
+// CHA is related to RTA (see go/callgraph/rta); the difference is that
+// CHA conservatively computes the entire "implements" relation between
+// interfaces and concrete types ahead of time, whereas RTA uses dynamic
+// programming to construct it on the fly as it encounters new functions
+// reachable from main. CHA may thus include spurious call edges for
+// types that haven't been instantiated yet, or types that are never
+// instantiated.
+//
+// Since CHA conservatively assumes that all functions are address-taken
+// and all concrete types are put into interfaces, it is sound to run on
+// partial programs, such as libraries without a main or test function.
+//
+package cha // import "golang.org/x/tools/go/callgraph/cha"
+
+import (
+ "golang.org/x/tools/go/callgraph"
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/go/types"
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+// CallGraph computes the call graph of the specified program using the
+// Class Hierarchy Analysis algorithm.
+//
+func CallGraph(prog *ssa.Program) *callgraph.Graph {
+ cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
+
+ allFuncs := ssautil.AllFunctions(prog)
+
+ // funcsBySig contains all functions, keyed by signature. It is
+ // the effective set of address-taken functions used to resolve
+ // a dynamic call of a particular signature.
+ var funcsBySig typeutil.Map // value is []*ssa.Function
+
+ // methodsByName contains all methods,
+ // grouped by name for efficient lookup.
+ methodsByName := make(map[string][]*ssa.Function)
+
+ // methodsMemo records, for every abstract method call call I.f on
+ // interface type I, the set of concrete methods C.f of all
+ // types C that satisfy interface I.
+ methodsMemo := make(map[*types.Func][]*ssa.Function)
+ lookupMethods := func(m *types.Func) []*ssa.Function {
+ methods, ok := methodsMemo[m]
+ if !ok {
+ I := m.Type().(*types.Signature).Recv().Type().Underlying().(*types.Interface)
+ for _, f := range methodsByName[m.Name()] {
+ C := f.Signature.Recv().Type() // named or *named
+ if types.Implements(C, I) {
+ methods = append(methods, f)
+ }
+ }
+ methodsMemo[m] = methods
+ }
+ return methods
+ }
+
+ for f := range allFuncs {
+ if f.Signature.Recv() == nil {
+ // Package initializers can never be address-taken.
+ if f.Name() == "init" && f.Synthetic == "package initializer" {
+ continue
+ }
+ funcs, _ := funcsBySig.At(f.Signature).([]*ssa.Function)
+ funcs = append(funcs, f)
+ funcsBySig.Set(f.Signature, funcs)
+ } else {
+ methodsByName[f.Name()] = append(methodsByName[f.Name()], f)
+ }
+ }
+
+ addEdge := func(fnode *callgraph.Node, site ssa.CallInstruction, g *ssa.Function) {
+ gnode := cg.CreateNode(g)
+ callgraph.AddEdge(fnode, site, gnode)
+ }
+
+ addEdges := func(fnode *callgraph.Node, site ssa.CallInstruction, callees []*ssa.Function) {
+ // Because every call to a highly polymorphic and
+ // frequently used abstract method such as
+ // (io.Writer).Write is assumed to call every concrete
+ // Write method in the program, the call graph can
+ // contain a lot of duplication.
+ //
+ // TODO(adonovan): opt: consider factoring the callgraph
+ // API so that the Callers component of each edge is a
+ // slice of nodes, not a singleton.
+ for _, g := range callees {
+ addEdge(fnode, site, g)
+ }
+ }
+
+ for f := range allFuncs {
+ fnode := cg.CreateNode(f)
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if site, ok := instr.(ssa.CallInstruction); ok {
+ call := site.Common()
+ if call.IsInvoke() {
+ addEdges(fnode, site, lookupMethods(call.Method))
+ } else if g := call.StaticCallee(); g != nil {
+ addEdge(fnode, site, g)
+ } else if _, ok := call.Value.(*ssa.Builtin); !ok {
+ callees, _ := funcsBySig.At(call.Signature()).([]*ssa.Function)
+ addEdges(fnode, site, callees)
+ }
+ }
+ }
+ }
+ }
+
+ return cg
+}
diff --git a/go/callgraph/cha/cha_test.go b/go/callgraph/cha/cha_test.go
new file mode 100644
index 0000000..56c7c1f
--- /dev/null
+++ b/go/callgraph/cha/cha_test.go
@@ -0,0 +1,106 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cha_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/callgraph"
+ "golang.org/x/tools/go/callgraph/cha"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/types"
+)
+
+var inputs = []string{
+ "testdata/func.go",
+ "testdata/iface.go",
+ "testdata/recv.go",
+}
+
+func expectation(f *ast.File) (string, token.Pos) {
+ for _, c := range f.Comments {
+ text := strings.TrimSpace(c.Text())
+ if t := strings.TrimPrefix(text, "WANT:\n"); t != text {
+ return t, c.Pos()
+ }
+ }
+ return "", token.NoPos
+}
+
+// TestCHA runs CHA on each file in inputs, prints the dynamic edges of
+// the call graph, and compares it with the golden results embedded in
+// the WANT comment at the end of the file.
+//
+func TestCHA(t *testing.T) {
+ for _, filename := range inputs {
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Errorf("couldn't read file '%s': %s", filename, err)
+ continue
+ }
+
+ conf := loader.Config{
+ ParserMode: parser.ParseComments,
+ }
+ f, err := conf.ParseFile(filename, content)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ want, pos := expectation(f)
+ if pos == token.NoPos {
+ t.Errorf("No WANT: comment in %s", filename)
+ continue
+ }
+
+ conf.CreateFromFiles("main", f)
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ prog := ssa.Create(iprog, 0)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+ prog.BuildAll()
+
+ cg := cha.CallGraph(prog)
+
+ if got := printGraph(cg, mainPkg.Object); got != want {
+ t.Errorf("%s: got:\n%s\nwant:\n%s",
+ prog.Fset.Position(pos), got, want)
+ }
+ }
+}
+
+func printGraph(cg *callgraph.Graph, from *types.Package) string {
+ var edges []string
+ callgraph.GraphVisitEdges(cg, func(e *callgraph.Edge) error {
+ if strings.Contains(e.Description(), "dynamic") {
+ edges = append(edges, fmt.Sprintf("%s --> %s",
+ e.Caller.Func.RelString(from),
+ e.Callee.Func.RelString(from)))
+ }
+ return nil
+ })
+ sort.Strings(edges)
+
+ var buf bytes.Buffer
+ buf.WriteString("Dynamic calls\n")
+ for _, edge := range edges {
+ fmt.Fprintf(&buf, " %s\n", edge)
+ }
+ return strings.TrimSpace(buf.String())
+}
diff --git a/go/callgraph/cha/testdata/func.go b/go/callgraph/cha/testdata/func.go
new file mode 100644
index 0000000..ad483f1
--- /dev/null
+++ b/go/callgraph/cha/testdata/func.go
@@ -0,0 +1,23 @@
+//+build ignore
+
+package main
+
+// Test of dynamic function calls; no interfaces.
+
+func A(int) {}
+
+var (
+ B = func(int) {}
+ C = func(int) {}
+)
+
+func f() {
+ pfn := B
+ pfn(0) // calls A, B, C, even though A is not even address-taken
+}
+
+// WANT:
+// Dynamic calls
+// f --> A
+// f --> init$1
+// f --> init$2
diff --git a/go/callgraph/cha/testdata/iface.go b/go/callgraph/cha/testdata/iface.go
new file mode 100644
index 0000000..1622ec1
--- /dev/null
+++ b/go/callgraph/cha/testdata/iface.go
@@ -0,0 +1,65 @@
+//+build ignore
+
+package main
+
+// Test of interface calls. None of the concrete types are ever
+// instantiated or converted to interfaces.
+
+type I interface {
+ f()
+}
+
+type J interface {
+ f()
+ g()
+}
+
+type C int // implements I
+
+func (*C) f()
+
+type D int // implements I and J
+
+func (*D) f()
+func (*D) g()
+
+func one(i I, j J) {
+ i.f() // calls *C and *D
+}
+
+func two(i I, j J) {
+ j.f() // calls *D (but not *C, even though it defines method f)
+}
+
+func three(i I, j J) {
+ j.g() // calls *D
+}
+
+func four(i I, j J) {
+ Jf := J.f
+ if unknown {
+ Jf = nil // suppress SSA constant propagation
+ }
+ Jf(nil) // calls *D
+}
+
+func five(i I, j J) {
+ jf := j.f
+ if unknown {
+ jf = nil // suppress SSA constant propagation
+ }
+ jf() // calls *D
+}
+
+var unknown bool
+
+// WANT:
+// Dynamic calls
+// (J).f$bound --> (*D).f
+// (J).f$thunk --> (*D).f
+// five --> (J).f$bound
+// four --> (J).f$thunk
+// one --> (*C).f
+// one --> (*D).f
+// three --> (*D).g
+// two --> (*D).f
diff --git a/go/callgraph/cha/testdata/recv.go b/go/callgraph/cha/testdata/recv.go
new file mode 100644
index 0000000..5ba48e9
--- /dev/null
+++ b/go/callgraph/cha/testdata/recv.go
@@ -0,0 +1,37 @@
+//+build ignore
+
+package main
+
+type I interface {
+ f()
+}
+
+type J interface {
+ g()
+}
+
+type C int // C and *C implement I; *C implements J
+
+func (C) f()
+func (*C) g()
+
+type D int // *D implements I and J
+
+func (*D) f()
+func (*D) g()
+
+func f(i I) {
+ i.f() // calls C, *C, *D
+}
+
+func g(j J) {
+ j.g() // calls *C, *D
+}
+
+// WANT:
+// Dynamic calls
+// f --> (*C).f
+// f --> (*D).f
+// f --> (C).f
+// g --> (*C).g
+// g --> (*D).g
diff --git a/go/callgraph/rta/rta.go b/go/callgraph/rta/rta.go
index c4e2916..8d22da6 100644
--- a/go/callgraph/rta/rta.go
+++ b/go/callgraph/rta/rta.go
@@ -40,7 +40,7 @@
// cmd/callgraph tool on its own source takes ~2.1s for RTA and ~5.4s
// for points-to analysis.
//
-package rta
+package rta // import "golang.org/x/tools/go/callgraph/rta"
// TODO(adonovan): test it by connecting it to the interpreter and
// replacing all "unreachable" functions by a special intrinsic, and
diff --git a/go/callgraph/rta/rta_test.go b/go/callgraph/rta/rta_test.go
index 03e16b0..11fa1a5 100644
--- a/go/callgraph/rta/rta_test.go
+++ b/go/callgraph/rta/rta_test.go
@@ -55,8 +55,7 @@ func TestRTA(t *testing.T) {
}
conf := loader.Config{
- SourceImports: true,
- ParserMode: parser.ParseComments,
+ ParserMode: parser.ParseComments,
}
f, err := conf.ParseFile(filename, content)
if err != nil {
diff --git a/go/callgraph/static/static.go b/go/callgraph/static/static.go
new file mode 100644
index 0000000..ebb183b
--- /dev/null
+++ b/go/callgraph/static/static.go
@@ -0,0 +1,33 @@
+// Package static computes the call graph of a Go program containing
+// only static call edges.
+package static // import "golang.org/x/tools/go/callgraph/static"
+
+import (
+ "golang.org/x/tools/go/callgraph"
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
+)
+
+// CallGraph computes the call graph of the specified program
+// considering only static calls.
+//
+func CallGraph(prog *ssa.Program) *callgraph.Graph {
+ cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
+
+ // TODO(adonovan): opt: use only a single pass over the ssa.Program.
+ for f := range ssautil.AllFunctions(prog) {
+ fnode := cg.CreateNode(f)
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if site, ok := instr.(ssa.CallInstruction); ok {
+ if g := site.Common().StaticCallee(); g != nil {
+ gnode := cg.CreateNode(g)
+ callgraph.AddEdge(fnode, site, gnode)
+ }
+ }
+ }
+ }
+ }
+
+ return cg
+}
diff --git a/go/callgraph/static/static_test.go b/go/callgraph/static/static_test.go
new file mode 100644
index 0000000..5a74ca1
--- /dev/null
+++ b/go/callgraph/static/static_test.go
@@ -0,0 +1,88 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package static_test
+
+import (
+ "fmt"
+ "go/parser"
+ "reflect"
+ "sort"
+ "testing"
+
+ "golang.org/x/tools/go/callgraph"
+ "golang.org/x/tools/go/callgraph/static"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/ssa"
+)
+
+const input = `package P
+
+type C int
+func (C) f()
+
+type I interface{f()}
+
+func f() {
+ p := func() {}
+ g()
+ p() // SSA constant propagation => static
+
+ if unknown {
+ p = h
+ }
+ p() // dynamic
+
+ C(0).f()
+}
+
+func g() {
+ var i I = C(0)
+ i.f()
+}
+
+func h()
+
+var unknown bool
+`
+
+func TestStatic(t *testing.T) {
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ f, err := conf.ParseFile("P.go", input)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conf.CreateFromFiles("P", f)
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ P := iprog.Created[0].Pkg
+
+ prog := ssa.Create(iprog, 0)
+ prog.BuildAll()
+
+ cg := static.CallGraph(prog)
+
+ var edges []string
+ callgraph.GraphVisitEdges(cg, func(e *callgraph.Edge) error {
+ edges = append(edges, fmt.Sprintf("%s -> %s",
+ e.Caller.Func.RelString(P),
+ e.Callee.Func.RelString(P)))
+ return nil
+ })
+ sort.Strings(edges)
+
+ want := []string{
+ "(*C).f -> (C).f",
+ "f -> (C).f",
+ "f -> f$1",
+ "f -> g",
+ }
+ if !reflect.DeepEqual(edges, want) {
+ t.Errorf("Got edges %v, want %v", edges, want)
+ }
+}
diff --git a/go/exact/exact.go b/go/exact/exact.go
index 06d5918..51c4906 100644
--- a/go/exact/exact.go
+++ b/go/exact/exact.go
@@ -11,7 +11,7 @@
// values produce unknown values unless specified
// otherwise.
//
-package exact
+package exact // import "golang.org/x/tools/go/exact"
import (
"fmt"
@@ -560,14 +560,16 @@ func ord(x Value) int {
switch x.(type) {
default:
return 0
- case int64Val:
+ case boolVal, stringVal:
return 1
- case intVal:
+ case int64Val:
return 2
- case floatVal:
+ case intVal:
return 3
- case complexVal:
+ case floatVal:
return 4
+ case complexVal:
+ return 5
}
}
diff --git a/go/exact/exact_test.go b/go/exact/exact_test.go
index c517970..aa38a89 100644
--- a/go/exact/exact_test.go
+++ b/go/exact/exact_test.go
@@ -346,3 +346,30 @@ func TestBytes(t *testing.T) {
}
}
}
+
+func TestUnknown(t *testing.T) {
+ u := MakeUnknown()
+ var values = []Value{
+ u,
+ MakeBool(false), // token.ADD ok below, operation is never considered
+ MakeString(""),
+ MakeInt64(1),
+ MakeFromLiteral("-1234567890123456789012345678901234567890", token.INT),
+ MakeFloat64(1.2),
+ MakeImag(MakeFloat64(1.2)),
+ }
+ for _, val := range values {
+ x, y := val, u
+ for i := range [2]int{} {
+ if i == 1 {
+ x, y = y, x
+ }
+ if got := BinaryOp(x, token.ADD, y); got.Kind() != Unknown {
+ t.Errorf("%s + %s: got %s; want %s", x, y, got, u)
+ }
+ if got := Compare(x, token.EQL, y); got {
+ t.Errorf("%s == %s: got true; want false", x, y)
+ }
+ }
+ }
+}
diff --git a/go/gccgoimporter/importer.go b/go/gccgoimporter/importer.go
index a2b772b..59576ca 100644
--- a/go/gccgoimporter/importer.go
+++ b/go/gccgoimporter/importer.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package gccgoimporter implements Import for gccgo-generated object files.
-package gccgoimporter
+package gccgoimporter // import "golang.org/x/tools/go/gccgoimporter"
import (
"bytes"
diff --git a/go/gccgoimporter/importer_test.go b/go/gccgoimporter/importer_test.go
index 722151f..c7adb45 100644
--- a/go/gccgoimporter/importer_test.go
+++ b/go/gccgoimporter/importer_test.go
@@ -95,7 +95,8 @@ var importerTests = [...]importerTest{
{pkgpath: "complexnums", name: "NP", want: "const NP untyped complex", wantval: "(-1/1 + 1/1i)"},
{pkgpath: "complexnums", name: "PN", want: "const PN untyped complex", wantval: "(1/1 + -1/1i)"},
{pkgpath: "complexnums", name: "PP", want: "const PP untyped complex", wantval: "(1/1 + 1/1i)"},
- {pkgpath: "imports", wantinits: []string{"imports..import", "fmt..import", "math..import"}},
+ // TODO: enable this entry once bug has been tracked down
+ //{pkgpath: "imports", wantinits: []string{"imports..import", "fmt..import", "math..import"}},
}
func TestGoxImporter(t *testing.T) {
diff --git a/go/gcimporter/gcimporter.go b/go/gcimporter/gcimporter.go
index b6844b4..5df01a7 100644
--- a/go/gcimporter/gcimporter.go
+++ b/go/gcimporter/gcimporter.go
@@ -4,7 +4,7 @@
// Package gcimporter implements Import for gc-generated object files.
// Importing this package installs Import as go/types.DefaultImport.
-package gcimporter
+package gcimporter // import "golang.org/x/tools/go/gcimporter"
import (
"bufio"
diff --git a/go/importer/import.go b/go/importer/import.go
index 3604c98..2b6e279 100644
--- a/go/importer/import.go
+++ b/go/importer/import.go
@@ -7,7 +7,7 @@
// by R. Griesemer, Technical Report 156, ETH Zürich, 1991.
// package importer implements an exporter and importer for Go export data.
-package importer
+package importer // import "golang.org/x/tools/go/importer"
import (
"encoding/binary"
diff --git a/go/loader/loader.go b/go/loader/loader.go
index 82165a9..16ccd24 100644
--- a/go/loader/loader.go
+++ b/go/loader/loader.go
@@ -27,19 +27,19 @@
//
// // Parse the specified files and create an ad-hoc package with path "foo".
// // All files must have the same 'package' declaration.
-// err := conf.CreateFromFilenames("foo", "foo.go", "bar.go")
+// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
//
// // Create an ad-hoc package with path "foo" from
// // the specified already-parsed files.
// // All ASTs must have the same 'package' declaration.
-// err := conf.CreateFromFiles("foo", parsedFiles)
+// conf.CreateFromFiles("foo", parsedFiles)
//
// // Add "runtime" to the set of packages to be loaded.
// conf.Import("runtime")
//
// // Adds "fmt" and "fmt_test" to the set of packages
// // to be loaded. "fmt" will include *_test.go files.
-// err := conf.ImportWithTests("fmt")
+// conf.ImportWithTests("fmt")
//
// // Finally, load all the packages specified by the configuration.
// prog, err := conf.Load()
@@ -112,19 +112,79 @@ package loader
// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
// regexp/exec_test.go (package regexp) imports "compress/bzip2"
+//
+//
+// Concurrency
+// -----------
+//
+// Let us define the import dependency graph as follows. Each node is a
+// list of files passed to (Checker).Files at once. Many of these lists
+// are the production code of an importable Go package, so those nodes
+// are labelled by the package's import path. The remaining nodes are
+// ad-hoc packages and lists of in-package *_test.go files that augment
+// an importable package; those nodes have no label.
+//
+// The edges of the graph represent import statements appearing within a
+// file. An edge connects a node (a list of files) to the node it
+// imports, which is importable and thus always labelled.
+//
+// Loading is controlled by this dependency graph.
+//
+// To reduce I/O latency, we start loading a package's dependencies
+// asynchronously as soon as we've parsed its files and enumerated its
+// imports (scanImports). This performs a preorder traversal of the
+// import dependency graph.
+//
+// To exploit hardware parallelism, we type-check unrelated packages in
+// parallel, where "unrelated" means not ordered by the partial order of
+// the import dependency graph.
+//
+// We use a concurrency-safe blocking cache (importer.imported) to
+// record the results of type-checking, whether success or failure. An
+// entry is created in this cache by startLoad the first time the
+// package is imported. The first goroutine to request an entry becomes
+// responsible for completing the task and broadcasting completion to
+// subsequent requestors, which block until then.
+//
+// Type checking occurs in (parallel) postorder: we cannot type-check a
+// set of files until we have loaded and type-checked all of their
+// immediate dependencies (and thus all of their transitive
+// dependencies). If the input were guaranteed free of import cycles,
+// this would be trivial: we could simply wait for completion of the
+// dependencies and then invoke the typechecker.
+//
+// But as we saw in the 'go test' section above, some cycles in the
+// import graph over packages are actually legal, so long as the
+// cycle-forming edge originates in the in-package test files that
+// augment the package. This explains why the nodes of the import
+// dependency graph are not packages, but lists of files: the unlabelled
+// nodes avoid the cycles. Consider packages A and B where B imports A
+// and A's in-package tests AT import B. The naively constructed import
+// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
+// the graph over lists of files is AT --> B --> A, where AT is an
+// unlabelled node.
+//
+// Awaiting completion of the dependencies in a cyclic graph would
+// deadlock, so we must materialize the import dependency graph (as
+// importer.graph) and check whether each import edge forms a cycle. If
+// x imports y, and the graph already contains a path from y to x, then
+// there is an import cycle, in which case the processing of x must not
+// wait for the completion of processing of y.
+//
+// When the type-checker makes a callback (doImport) to the loader for a
+// given import edge, there are two possible cases. In the normal case,
+// the dependency has already been completely type-checked; doImport
+// does a cache lookup and returns it. In the cyclic case, the entry in
+// the cache is still necessarily incomplete, indicating a cycle. We
+// perform the cycle check again to obtain the error message, and return
+// the error.
+//
+// The result of using concurrency is about a 2.5x speedup for stdlib_test.
// TODO(adonovan):
-// - (*Config).ParseFile is very handy, but feels like feature creep.
-// (*Config).CreateFromFiles has a nasty precondition.
-// - s/path/importPath/g to avoid ambiguity with other meanings of
-// "path": a file name, a colon-separated directory list.
// - cache the calls to build.Import so we don't do it three times per
// test package.
// - Thorough overhaul of package documentation.
-// - Certain errors (e.g. parse error in x_test.go files, or failure to
-// import an initial package) still cause Load() to fail hard.
-// Fix that. (It's tricky because of the way x_test files are parsed
-// eagerly.)
import (
"errors"
@@ -134,18 +194,23 @@ import (
"go/parser"
"go/token"
"os"
+ "sort"
"strings"
+ "sync"
+ "time"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/gcimporter"
"golang.org/x/tools/go/types"
)
+const trace = false // show timing info for type-checking
+
// Config specifies the configuration for a program to load.
// The zero value for Config is a ready-to-use default configuration.
type Config struct {
// Fset is the file set for the parser to use when loading the
- // program. If nil, it will be lazily initialized by any
+ // program. If nil, it may be lazily initialized by any
// method of Config.
Fset *token.FileSet
@@ -169,24 +234,30 @@ type Config struct {
// checked.
TypeCheckFuncBodies func(string) bool
- // SourceImports determines whether to satisfy dependencies by
- // loading Go source code.
+ // ImportFromBinary determines whether to satisfy dependencies by
+ // loading gc export data instead of Go source code.
//
- // If true, the entire program---the initial packages and
- // their transitive closure of dependencies---will be loaded,
- // parsed and type-checked. This is required for
+ // If false, the entire program---the initial packages and their
+ // transitive closure of dependencies---will be loaded from
+ // source, parsed, and type-checked. This is required for
// whole-program analyses such as pointer analysis.
//
- // If false, the TypeChecker.Import mechanism will be used
- // instead. Since that typically supplies only the types of
- // package-level declarations and values of constants, but no
- // code, it will not yield a whole program. It is intended
- // for analyses that perform modular analysis of a
- // single package, e.g. traditional compilation.
+ // If true, the go/gcimporter mechanism is used instead to read
+ // the binary export-data files written by the gc toolchain.
+ // They supply only the types of package-level declarations and
+ // values of constants, but no code, this option will not yield
+ // a whole program. It is intended for analyses that perform
+ // modular analysis of a single package, e.g. traditional
+ // compilation.
+ //
+ // No check is made that the export data files are up-to-date.
//
// The initial packages (CreatePkgs and ImportPkgs) are always
// loaded from Go source, regardless of this flag's setting.
- SourceImports bool
+ //
+ // NB: there is a bug when loading multiple initial packages with
+ // this flag enabled: https://github.com/golang/go/issues/9955.
+ ImportFromBinary bool
// If Build is non-nil, it is used to locate source packages.
// Otherwise &build.Default is used.
@@ -210,27 +281,52 @@ type Config struct {
AllowErrors bool
// CreatePkgs specifies a list of non-importable initial
- // packages to create. Each element specifies a list of
- // parsed files to be type-checked into a new package, and a
- // path for that package. If the path is "", the package's
- // name will be used instead. The path needn't be globally
- // unique.
- //
- // The resulting packages will appear in the corresponding
- // elements of the Program.Created slice.
- CreatePkgs []CreatePkg
+ // packages to create. The resulting packages will appear in
+ // the corresponding elements of the Program.Created slice.
+ CreatePkgs []PkgSpec
// ImportPkgs specifies a set of initial packages to load from
// source. The map keys are package import paths, used to
- // locate the package relative to $GOROOT. The corresponding
- // values indicate whether to augment the package by *_test.go
- // files in a second pass.
+ // locate the package relative to $GOROOT.
+ //
+ // The map value indicates whether to load tests. If true, Load
+ // will add and type-check two lists of files to the package:
+ // non-test files followed by in-package *_test.go files. In
+ // addition, it will append the external test package (if any)
+ // to Program.Created.
ImportPkgs map[string]bool
+
+ // FindPackage is called during Load to create the build.Package
+ // for a given import path. If nil, a default implementation
+ // based on ctxt.Import is used. A client may use this hook to
+ // adapt to a proprietary build system that does not follow the
+ // "go build" layout conventions, for example.
+ //
+ // It must be safe to call concurrently from multiple goroutines.
+ FindPackage func(ctxt *build.Context, importPath string) (*build.Package, error)
+
+ // PackageCreated is a hook called when a types.Package
+ // is created but before it has been populated.
+ //
+ // The package's import Path() and Scope() are defined,
+ // but not its Name() since no package declaration has
+ // been seen yet.
+ //
+ // Clients may use this to insert synthetic items into
+ // the package scope, for example.
+ //
+ // It must be safe to call concurrently from multiple goroutines.
+ PackageCreated func(*types.Package)
}
-type CreatePkg struct {
- Path string
- Files []*ast.File
+// A PkgSpec specifies a non-importable package to be created by Load.
+// Files are processed first, but typically only one of Files and
+// Filenames is provided. The path needn't be globally unique.
+//
+type PkgSpec struct {
+ Path string // import path ("" => use package declaration)
+ Files []*ast.File // ASTs of already-parsed files
+ Filenames []string // names of files to be parsed
}
// A Program is a Go program loaded from source or binary
@@ -238,8 +334,10 @@ type CreatePkg struct {
type Program struct {
Fset *token.FileSet // the file set for this program
- // Created[i] contains the initial package whose ASTs were
- // supplied by Config.CreatePkgs[i].
+ // Created[i] contains the initial package whose ASTs or
+ // filenames were supplied by Config.CreatePkgs[i], followed by
+ // the external test package, if any, of each package in
+ // Config.ImportPkgs ordered by ImportPath.
Created []*PackageInfo
// Imported contains the initially imported packages,
@@ -293,8 +391,12 @@ func (conf *Config) fset() *token.FileSet {
return conf.Fset
}
-// ParseFile is a convenience function that invokes the parser using
-// the Config's FileSet, which is initialized if nil.
+// ParseFile is a convenience function (intended for testing) that invokes
+// the parser using the Config's FileSet, which is initialized if nil.
+//
+// src specifies the parser input as a string, []byte, or io.Reader, and
+// filename is its apparent name. If src is nil, the contents of
+// filename are read from the file system.
//
func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) {
// TODO(adonovan): use conf.build() etc like parseFiles does.
@@ -327,9 +429,6 @@ It may take one of two forms:
import path may denote two packages. (Whether this behaviour is
enabled is tool-specific, and may depend on additional flags.)
- Due to current limitations in the type-checker, only the first
- import path of the command line will contribute any tests.
-
A '--' argument terminates the list of packages.
`
@@ -341,7 +440,11 @@ A '--' argument terminates the list of packages.
// set of initial packages to be specified; see FromArgsUsage message
// for details.
//
-func (conf *Config) FromArgs(args []string, xtest bool) (rest []string, err error) {
+// Only superficial errors are reported at this stage; errors dependent
+// on I/O are detected during Load.
+//
+func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) {
+ var rest []string
for i, arg := range args {
if arg == "--" {
rest = args[i+1:]
@@ -358,51 +461,35 @@ func (conf *Config) FromArgs(args []string, xtest bool) (rest []string, err erro
return nil, fmt.Errorf("named files must be .go files: %s", arg)
}
}
- err = conf.CreateFromFilenames("", args...)
+ conf.CreateFromFilenames("", args...)
} else {
// Assume args are directories each denoting a
// package and (perhaps) an external test, iff xtest.
for _, arg := range args {
if xtest {
- err = conf.ImportWithTests(arg)
- if err != nil {
- break
- }
+ conf.ImportWithTests(arg)
} else {
conf.Import(arg)
}
}
}
- return
+ return rest, nil
}
-// CreateFromFilenames is a convenience function that parses the
-// specified *.go files and adds a package entry for them to
-// conf.CreatePkgs.
-//
-// It fails if any file could not be loaded or parsed.
+// CreateFromFilenames is a convenience function that adds
+// a conf.CreatePkgs entry to create a package of the specified *.go
+// files.
//
-func (conf *Config) CreateFromFilenames(path string, filenames ...string) error {
- files, errs := parseFiles(conf.fset(), conf.build(), nil, ".", filenames, conf.ParserMode)
- if len(errs) > 0 {
- return errs[0]
- }
- conf.CreateFromFiles(path, files...)
- return nil
+func (conf *Config) CreateFromFilenames(path string, filenames ...string) {
+ conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames})
}
-// CreateFromFiles is a convenience function that adds a CreatePkgs
+// CreateFromFiles is a convenience function that adds a conf.CreatePkgs
// entry to create package of the specified path and parsed files.
//
-// Precondition: conf.Fset is non-nil and was the fileset used to parse
-// the files. (e.g. the files came from conf.ParseFile().)
-//
func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
- if conf.Fset == nil {
- panic("nil Fset")
- }
- conf.CreatePkgs = append(conf.CreatePkgs, CreatePkg{path, files})
+ conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files})
}
// ImportWithTests is a convenience function that adds path to
@@ -415,45 +502,21 @@ func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
// declaration, an additional package comprising just those files will
// be added to CreatePkgs.
//
-func (conf *Config) ImportWithTests(path string) error {
- if path == "unsafe" {
- return nil // ignore; not a real package
- }
- conf.Import(path)
-
- // Load the external test package.
- bp, err := conf.findSourcePackage(path)
- if err != nil {
- return err // package not found
- }
- xtestFiles, errs := conf.parsePackageFiles(bp, 'x')
- if len(errs) > 0 {
- // TODO(adonovan): fix: parse errors in x_test.go files
- // cause FromArgs() to fail completely.
- return errs[0] // I/O or parse error
- }
- if len(xtestFiles) > 0 {
- conf.CreateFromFiles(path+"_test", xtestFiles...)
- }
-
- // Mark the non-xtest package for augmentation with
- // in-package *_test.go files when we import it below.
- conf.ImportPkgs[path] = true
- return nil
-}
+func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) }
// Import is a convenience function that adds path to ImportPkgs, the
// set of initial packages that will be imported from source.
//
-func (conf *Config) Import(path string) {
+func (conf *Config) Import(path string) { conf.addImport(path, false) }
+
+func (conf *Config) addImport(path string, tests bool) {
if path == "unsafe" {
return // ignore; not a real package
}
if conf.ImportPkgs == nil {
conf.ImportPkgs = make(map[string]bool)
}
- // Subtle: adds value 'false' unless value is already true.
- conf.ImportPkgs[path] = conf.ImportPkgs[path] // unaugmented source package
+ conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests
}
// PathEnclosingInterval returns the PackageInfo and ast.Node that
@@ -493,15 +556,63 @@ func (prog *Program) InitialPackages() []*PackageInfo {
// importer holds the working state of the algorithm.
type importer struct {
- conf *Config // the client configuration
- prog *Program // resulting program
- imported map[string]*importInfo // all imported packages (incl. failures) by import path
+ conf *Config // the client configuration
+ prog *Program // resulting program
+ start time.Time // for logging
+
+ // This mutex serializes access to prog.ImportMap (aka
+ // TypeChecker.Packages); we also use it for AllPackages.
+ //
+ // The TypeChecker.Packages map is not really used by this
+ // package, but may be used by the client's Import function,
+ // and by clients of the returned Program.
+ typecheckerMu sync.Mutex
+
+ importedMu sync.Mutex
+ imported map[string]*importInfo // all imported packages (incl. failures) by import path
+
+ // import dependency graph: graph[x][y] => x imports y
+ //
+ // Since non-importable packages cannot be cyclic, we ignore
+ // their imports, thus we only need the subgraph over importable
+ // packages. Nodes are identified by their import paths.
+ graphMu sync.Mutex
+ graph map[string]map[string]bool
}
// importInfo tracks the success or failure of a single import.
+//
+// Upon completion, exactly one of info and err is non-nil:
+// info on successful creation of a package, err otherwise.
+// A successful package may still contain type errors.
+//
type importInfo struct {
- info *PackageInfo // results of typechecking (including errors)
- err error // reason for failure to make a package
+ path string // import path
+ mu sync.Mutex // guards the following fields prior to completion
+ info *PackageInfo // results of typechecking (including errors)
+ err error // reason for failure to create a package
+ complete sync.Cond // complete condition is that one of info, err is non-nil.
+}
+
+// awaitCompletion blocks until ii is complete,
+// i.e. the info and err fields are safe to inspect without a lock.
+// It is concurrency-safe and idempotent.
+func (ii *importInfo) awaitCompletion() {
+ ii.mu.Lock()
+ for ii.info == nil && ii.err == nil {
+ ii.complete.Wait()
+ }
+ ii.mu.Unlock()
+}
+
+// Complete marks ii as complete.
+// Its info and err fields will not be subsequently updated.
+func (ii *importInfo) Complete(info *PackageInfo, err error) {
+ ii.mu.Lock()
+ ii.info = info
+ ii.err = err
+ ii.complete.Broadcast()
+ ii.mu.Unlock()
}
// Load creates the initial packages specified by conf.{Create,Import}Pkgs,
@@ -529,6 +640,10 @@ func (conf *Config) Load() (*Program, error) {
conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }
}
+ if conf.FindPackage == nil {
+ conf.FindPackage = defaultFindPackage
+ }
+
prog := &Program{
Fset: conf.fset(),
Imported: make(map[string]*PackageInfo),
@@ -540,47 +655,101 @@ func (conf *Config) Load() (*Program, error) {
conf: conf,
prog: prog,
imported: make(map[string]*importInfo),
+ start: time.Now(),
+ graph: make(map[string]map[string]bool),
}
- for path := range conf.ImportPkgs {
- info, err := imp.importPackage(path)
- if err != nil {
- return nil, err // failed to create package
+ // -- loading proper (concurrent phase) --------------------------------
+
+ var errpkgs []string // packages that contained errors
+
+ // Load the initially imported packages and their dependencies,
+ // in parallel.
+ for _, ii := range imp.loadAll("", conf.ImportPkgs) {
+ if ii.err != nil {
+ conf.TypeChecker.Error(ii.err) // failed to create package
+ errpkgs = append(errpkgs, ii.path)
+ continue
}
- prog.Imported[path] = info
+ prog.Imported[ii.info.Pkg.Path()] = ii.info
}
- // Now augment those packages that need it.
+ // Augment the designated initial packages by their tests.
+ // Dependencies are loaded in parallel.
+ var xtestPkgs []*build.Package
for path, augment := range conf.ImportPkgs {
- if augment {
- // Find and create the actual package.
- bp, err := conf.findSourcePackage(path)
- if err != nil {
- // "Can't happen" because of previous loop.
- return nil, err // package not found
- }
+ if !augment {
+ continue
+ }
- info := imp.imported[path].info // must be non-nil, see above
- files, errs := imp.conf.parsePackageFiles(bp, 't')
- for _, err := range errs {
- info.appendError(err)
- }
- typeCheckFiles(info, files...)
+ bp, err := conf.FindPackage(conf.build(), path)
+ if err != nil {
+ // Package not found, or can't even parse package declaration.
+ // Already reported by previous loop; ignore it.
+ continue
+ }
+
+ // Needs external test package?
+ if len(bp.XTestGoFiles) > 0 {
+ xtestPkgs = append(xtestPkgs, bp)
}
- }
- for _, create := range conf.CreatePkgs {
- path := create.Path
- if create.Path == "" && len(create.Files) > 0 {
- path = create.Files[0].Name.Name
+ imp.importedMu.Lock() // (unnecessary, we're sequential here)
+ info := imp.imported[path].info // must be non-nil, see above
+ imp.importedMu.Unlock()
+
+ // Parse the in-package test files.
+ files, errs := imp.conf.parsePackageFiles(bp, 't')
+ for _, err := range errs {
+ info.appendError(err)
}
+
+ // The test files augmenting package P cannot be imported,
+ // but may import packages that import P,
+ // so we must disable the cycle check.
+ imp.addFiles(info, files, false)
+ }
+
+ createPkg := func(path string, files []*ast.File, errs []error) {
info := imp.newPackageInfo(path)
- typeCheckFiles(info, create.Files...)
+ for _, err := range errs {
+ info.appendError(err)
+ }
+
+ // Ad-hoc packages are non-importable,
+ // so no cycle check is needed.
+ // addFiles loads dependencies in parallel.
+ imp.addFiles(info, files, false)
prog.Created = append(prog.Created, info)
}
+ // Create packages specified by conf.CreatePkgs.
+ for _, cp := range conf.CreatePkgs {
+ files, errs := parseFiles(conf.fset(), conf.build(), nil, ".", cp.Filenames, conf.ParserMode)
+ files = append(files, cp.Files...)
+
+ path := cp.Path
+ if path == "" {
+ if len(files) > 0 {
+ path = files[0].Name.Name
+ } else {
+ path = "(unnamed)"
+ }
+ }
+ createPkg(path, files, errs)
+ }
+
+ // Create external test packages.
+ sort.Sort(byImportPath(xtestPkgs))
+ for _, bp := range xtestPkgs {
+ files, errs := imp.conf.parsePackageFiles(bp, 'x')
+ createPkg(bp.ImportPath+"_test", files, errs)
+ }
+
+ // -- finishing up (sequential) ----------------------------------------
+
if len(prog.Imported)+len(prog.Created) == 0 {
- return nil, errors.New("no initial packages were specified")
+ return nil, errors.New("no initial packages were loaded")
}
// Create infos for indirectly imported packages.
@@ -598,7 +767,6 @@ func (conf *Config) Load() (*Program, error) {
if !conf.AllowErrors {
// Report errors in indirectly imported packages.
- var errpkgs []string
for _, info := range prog.AllPackages {
if len(info.Errors) > 0 {
errpkgs = append(errpkgs, info.Pkg.Path())
@@ -620,6 +788,12 @@ func (conf *Config) Load() (*Program, error) {
return prog, nil
}
+type byImportPath []*build.Package
+
+func (b byImportPath) Len() int { return len(b) }
+func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath }
+func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+
// markErrorFreePackages sets the TransitivelyErrorFree flag on all
// applicable packages.
func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) {
@@ -669,12 +843,11 @@ func (conf *Config) build() *build.Context {
return &build.Default
}
-// findSourcePackage locates the specified (possibly empty) package
+// defaultFindPackage locates the specified (possibly empty) package
// using go/build logic. It returns an error if not found.
-//
-func (conf *Config) findSourcePackage(path string) (*build.Package, error) {
+func defaultFindPackage(ctxt *build.Context, path string) (*build.Package, error) {
// Import(srcDir="") disables local imports, e.g. import "./foo".
- bp, err := conf.build().Import(path, "", 0)
+ bp, err := ctxt.Import(path, "", 0)
if _, ok := err.(*build.NoGoError); ok {
return bp, nil // empty directory is not an error
}
@@ -730,52 +903,150 @@ func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.Fil
//
// Idempotent.
//
-func (imp *importer) doImport(imports map[string]*types.Package, path string) (*types.Package, error) {
+func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) {
// Package unsafe is handled specially, and has no PackageInfo.
- if path == "unsafe" {
+ // TODO(adonovan): move this check into go/types?
+ if to == "unsafe" {
return types.Unsafe, nil
}
- info, err := imp.importPackage(path)
- if err != nil {
- return nil, err
+ imp.importedMu.Lock()
+ ii := imp.imported[to]
+ imp.importedMu.Unlock()
+ if ii == nil {
+ panic("internal error: unexpected import: " + to)
+ }
+ if ii.err != nil {
+ return nil, ii.err
+ }
+ if ii.info != nil {
+ return ii.info.Pkg, nil
+ }
+
+ // Import of incomplete package: this indicates a cycle.
+ fromPath := from.Pkg.Path()
+ if cycle := imp.findPath(to, fromPath); cycle != nil {
+ cycle = append([]string{fromPath}, cycle...)
+ return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> "))
+ }
+
+ panic("internal error: import of incomplete (yet acyclic) package: " + fromPath)
+}
+
+// loadAll loads, parses, and type-checks the specified packages in
+// parallel and returns their completed importInfos in unspecified order.
+//
+// fromPath is the import path of the importing package, if it is
+// importable, "" otherwise. It is used for cycle detection.
+//
+func (imp *importer) loadAll(fromPath string, paths map[string]bool) []*importInfo {
+ result := make([]*importInfo, 0, len(paths))
+ for path := range paths {
+ result = append(result, imp.startLoad(path))
+ }
+
+ if fromPath != "" {
+ // We're loading a set of imports.
+ //
+ // We must record graph edges from the importing package
+ // to its dependencies, and check for cycles.
+ imp.graphMu.Lock()
+ deps, ok := imp.graph[fromPath]
+ if !ok {
+ deps = make(map[string]bool)
+ imp.graph[fromPath] = deps
+ }
+ for path := range paths {
+ deps[path] = true
+ }
+ imp.graphMu.Unlock()
}
- // Update the type checker's package map on success.
- imports[path] = info.Pkg
+ for _, ii := range result {
+ if fromPath != "" {
+ if cycle := imp.findPath(ii.path, fromPath); cycle != nil {
+ // Cycle-forming import: we must not await its
+ // completion since it would deadlock.
+ //
+ // We don't record the error in ii since
+ // the error is really associated with the
+ // cycle-forming edge, not the package itself.
+ // (Also it would complicate the
+ // invariants of importPath completion.)
+ if trace {
+ fmt.Fprintln(os.Stderr, "import cycle: %q", cycle)
+ }
+ continue
+ }
+ }
+ ii.awaitCompletion()
- return info.Pkg, nil
+ }
+ return result
}
-// importPackage imports the package with the given import path, plus
-// its dependencies.
+// findPath returns an arbitrary path from 'from' to 'to' in the import
+// graph, or nil if there was none.
+func (imp *importer) findPath(from, to string) []string {
+ imp.graphMu.Lock()
+ defer imp.graphMu.Unlock()
+
+ seen := make(map[string]bool)
+ var search func(stack []string, importPath string) []string
+ search = func(stack []string, importPath string) []string {
+ if !seen[importPath] {
+ seen[importPath] = true
+ stack = append(stack, importPath)
+ if importPath == to {
+ return stack
+ }
+ for x := range imp.graph[importPath] {
+ if p := search(stack, x); p != nil {
+ return p
+ }
+ }
+ }
+ return nil
+ }
+ return search(make([]string, 0, 20), from)
+}
+
+// startLoad initiates the loading, parsing and type-checking of the
+// specified package and its dependencies, if it has not already begun.
//
-// On success, it returns a PackageInfo, possibly containing errors.
-// importPackage returns an error if it couldn't even create the package.
+// It returns an importInfo, not necessarily in a completed state. The
+// caller must call awaitCompletion() before accessing its info and err
+// fields.
+//
+// startLoad is concurrency-safe and idempotent.
//
// Precondition: path != "unsafe".
//
-func (imp *importer) importPackage(path string) (*PackageInfo, error) {
+func (imp *importer) startLoad(path string) *importInfo {
+ imp.importedMu.Lock()
ii, ok := imp.imported[path]
if !ok {
- // In preorder, initialize the map entry to a cycle
- // error in case importPackage(path) is called again
- // before the import is completed.
- ii = &importInfo{err: fmt.Errorf("import cycle in package %s", path)}
+ ii = &importInfo{path: path}
+ ii.complete.L = &ii.mu
imp.imported[path] = ii
- // Find and create the actual package.
- if _, ok := imp.conf.ImportPkgs[path]; ok || imp.conf.SourceImports {
- ii.info, ii.err = imp.importFromSource(path)
- } else {
- ii.info, ii.err = imp.importFromBinary(path)
- }
- if ii.info != nil {
- ii.info.Importable = true
- }
+ go imp.load(path, ii)
}
+ imp.importedMu.Unlock()
- return ii.info, ii.err
+ return ii
+}
+
+func (imp *importer) load(path string, ii *importInfo) {
+ var info *PackageInfo
+ var err error
+ // Find and create the actual package.
+ if _, ok := imp.conf.ImportPkgs[path]; ok || !imp.conf.ImportFromBinary {
+ info, err = imp.loadFromSource(path)
+ } else {
+ info, err = imp.importFromBinary(path)
+ }
+ ii.Complete(info, err)
}
// importFromBinary implements package loading from the client-supplied
@@ -787,47 +1058,86 @@ func (imp *importer) importFromBinary(path string) (*PackageInfo, error) {
if importfn == nil {
importfn = gcimporter.Import
}
+ imp.typecheckerMu.Lock()
pkg, err := importfn(imp.conf.TypeChecker.Packages, path)
+ if pkg != nil {
+ imp.conf.TypeChecker.Packages[path] = pkg
+ }
+ imp.typecheckerMu.Unlock()
if err != nil {
return nil, err
}
info := &PackageInfo{Pkg: pkg}
+ info.Importable = true
+ imp.typecheckerMu.Lock()
imp.prog.AllPackages[pkg] = info
+ imp.typecheckerMu.Unlock()
return info, nil
}
-// importFromSource implements package loading by parsing Go source files
+// loadFromSource implements package loading by parsing Go source files
// located by go/build.
+// The returned PackageInfo's typeCheck function must be called.
//
-func (imp *importer) importFromSource(path string) (*PackageInfo, error) {
- bp, err := imp.conf.findSourcePackage(path)
+func (imp *importer) loadFromSource(path string) (*PackageInfo, error) {
+ bp, err := imp.conf.FindPackage(imp.conf.build(), path)
if err != nil {
return nil, err // package not found
}
- // Type-check the package.
info := imp.newPackageInfo(path)
+ info.Importable = true
files, errs := imp.conf.parsePackageFiles(bp, 'g')
for _, err := range errs {
info.appendError(err)
}
- typeCheckFiles(info, files...)
+
+ imp.addFiles(info, files, true)
+
+ imp.typecheckerMu.Lock()
+ imp.conf.TypeChecker.Packages[path] = info.Pkg
+ imp.typecheckerMu.Unlock()
+
return info, nil
}
-// typeCheckFiles adds the specified files to info and type-checks them.
-// The order of files determines the package initialization order.
-// It may be called multiple times.
+// addFiles adds and type-checks the specified files to info, loading
+// their dependencies if needed. The order of files determines the
+// package initialization order. It may be called multiple times on the
+// same package. Errors are appended to the info.Errors field.
+//
+// cycleCheck determines whether the imports within files create
+// dependency edges that should be checked for potential cycles.
//
-// Errors are stored in the info.Errors field.
-func typeCheckFiles(info *PackageInfo, files ...*ast.File) {
+func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) {
info.Files = append(info.Files, files...)
- // Ignore the returned (first) error since we already collect them all.
- _ = info.checker.Files(files)
+ // Ensure the dependencies are loaded, in parallel.
+ var fromPath string
+ if cycleCheck {
+ fromPath = info.Pkg.Path()
+ }
+ imp.loadAll(fromPath, scanImports(files))
+
+ if trace {
+ fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n",
+ time.Since(imp.start), info.Pkg.Path(), len(files))
+ }
+
+ // Ignore the returned (first) error since we
+ // already collect them all in the PackageInfo.
+ info.checker.Files(files)
+
+ if trace {
+ fmt.Fprintf(os.Stderr, "%s: stop %q\n",
+ time.Since(imp.start), info.Pkg.Path())
+ }
}
func (imp *importer) newPackageInfo(path string) *PackageInfo {
pkg := types.NewPackage(path, "")
+ if imp.conf.PackageCreated != nil {
+ imp.conf.PackageCreated(pkg)
+ }
info := &PackageInfo{
Pkg: pkg,
Info: types.Info{
@@ -847,10 +1157,14 @@ func (imp *importer) newPackageInfo(path string) *PackageInfo {
if f := imp.conf.TypeCheckFuncBodies; f != nil {
tc.IgnoreFuncBodies = !f(path)
}
- tc.Import = imp.doImport // doImport wraps the user's importfn, effectively
+ tc.Import = func(_ map[string]*types.Package, to string) (*types.Package, error) {
+ return imp.doImport(info, to)
+ }
tc.Error = info.appendError // appendError wraps the user's Error function
info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info)
+ imp.typecheckerMu.Lock()
imp.prog.AllPackages[pkg] = info
+ imp.typecheckerMu.Unlock()
return info
}
diff --git a/go/loader/loader_test.go b/go/loader/loader_test.go
index c239c63..aa8c15b 100644
--- a/go/loader/loader_test.go
+++ b/go/loader/loader_test.go
@@ -5,141 +5,372 @@
package loader_test
import (
- "bytes"
"fmt"
"go/build"
- "io"
- "io/ioutil"
- "os"
+ "reflect"
"sort"
"strings"
+ "sync"
"testing"
- "time"
+ "golang.org/x/tools/go/buildutil"
"golang.org/x/tools/go/loader"
)
-func loadFromArgs(args []string) (prog *loader.Program, rest []string, err error) {
- conf := &loader.Config{}
- rest, err = conf.FromArgs(args, true)
- if err == nil {
- prog, err = conf.Load()
+// TestFromArgs checks that conf.FromArgs populates conf correctly.
+// It does no I/O.
+func TestFromArgs(t *testing.T) {
+ type result struct {
+ Err string
+ Rest []string
+ ImportPkgs map[string]bool
+ CreatePkgs []loader.PkgSpec
+ }
+ for _, test := range []struct {
+ args []string
+ tests bool
+ want result
+ }{
+ // Mix of existing and non-existent packages.
+ {
+ args: []string{"nosuchpkg", "errors"},
+ want: result{
+ ImportPkgs: map[string]bool{"errors": false, "nosuchpkg": false},
+ },
+ },
+ // Same, with -test flag.
+ {
+ args: []string{"nosuchpkg", "errors"},
+ tests: true,
+ want: result{
+ ImportPkgs: map[string]bool{"errors": true, "nosuchpkg": true},
+ },
+ },
+ // Surplus arguments.
+ {
+ args: []string{"fmt", "errors", "--", "surplus"},
+ want: result{
+ Rest: []string{"surplus"},
+ ImportPkgs: map[string]bool{"errors": false, "fmt": false},
+ },
+ },
+ // Ad hoc package specified as *.go files.
+ {
+ args: []string{"foo.go", "bar.go"},
+ want: result{CreatePkgs: []loader.PkgSpec{{
+ Filenames: []string{"foo.go", "bar.go"},
+ }}},
+ },
+ // Mixture of *.go and import paths.
+ {
+ args: []string{"foo.go", "fmt"},
+ want: result{
+ Err: "named files must be .go files: fmt",
+ },
+ },
+ } {
+ var conf loader.Config
+ rest, err := conf.FromArgs(test.args, test.tests)
+ got := result{
+ Rest: rest,
+ ImportPkgs: conf.ImportPkgs,
+ CreatePkgs: conf.CreatePkgs,
+ }
+ if err != nil {
+ got.Err = err.Error()
+ }
+ if !reflect.DeepEqual(got, test.want) {
+ t.Errorf("FromArgs(%q) = %+v, want %+v", test.args, got, test.want)
+ }
}
- return
}
-func TestLoadFromArgs(t *testing.T) {
- // Failed load: bad first import path causes parsePackageFiles to fail.
- args := []string{"nosuchpkg", "errors"}
- if _, _, err := loadFromArgs(args); err == nil {
- t.Errorf("loadFromArgs(%q) succeeded, want failure", args)
- } else {
- // cannot find package: ok.
+func TestLoad_NoInitialPackages(t *testing.T) {
+ var conf loader.Config
+
+ const wantErr = "no initial packages were loaded"
+
+ prog, err := conf.Load()
+ if err == nil {
+ t.Errorf("Load succeeded unexpectedly, want %q", wantErr)
+ } else if err.Error() != wantErr {
+ t.Errorf("Load failed with wrong error %q, want %q", err, wantErr)
+ }
+ if prog != nil {
+ t.Errorf("Load unexpectedly returned a Program")
}
+}
+
+func TestLoad_MissingInitialPackage(t *testing.T) {
+ var conf loader.Config
+ conf.Import("nosuchpkg")
+ conf.Import("errors")
+
+ const wantErr = "couldn't load packages due to errors: nosuchpkg"
- // Failed load: bad second import path proceeds to doImport0, which fails.
- args = []string{"errors", "nosuchpkg"}
- if _, _, err := loadFromArgs(args); err == nil {
- t.Errorf("loadFromArgs(%q) succeeded, want failure", args)
- } else {
- // cannot find package: ok
+ prog, err := conf.Load()
+ if err == nil {
+ t.Errorf("Load succeeded unexpectedly, want %q", wantErr)
+ } else if err.Error() != wantErr {
+ t.Errorf("Load failed with wrong error %q, want %q", err, wantErr)
}
+ if prog != nil {
+ t.Errorf("Load unexpectedly returned a Program")
+ }
+}
- // Successful load.
- args = []string{"fmt", "errors", "--", "surplus"}
- prog, rest, err := loadFromArgs(args)
+func TestLoad_MissingInitialPackage_AllowErrors(t *testing.T) {
+ var conf loader.Config
+ conf.AllowErrors = true
+ conf.Import("nosuchpkg")
+ conf.ImportWithTests("errors")
+
+ prog, err := conf.Load()
if err != nil {
- t.Fatalf("loadFromArgs(%q) failed: %s", args, err)
+ t.Errorf("Load failed unexpectedly: %v", err)
}
- if got, want := fmt.Sprint(rest), "[surplus]"; got != want {
- t.Errorf("loadFromArgs(%q) rest: got %s, want %s", args, got, want)
+ if prog == nil {
+ t.Fatalf("Load returned a nil Program")
}
- // Check list of Created packages.
- var pkgnames []string
- for _, info := range prog.Created {
- pkgnames = append(pkgnames, info.Pkg.Path())
+ if got, want := created(prog), "errors_test"; got != want {
+ t.Errorf("Created = %s, want %s", got, want)
}
- // All import paths may contribute tests.
- if got, want := fmt.Sprint(pkgnames), "[fmt_test errors_test]"; got != want {
- t.Errorf("Created: got %s, want %s", got, want)
+ if got, want := imported(prog), "errors"; got != want {
+ t.Errorf("Imported = %s, want %s", got, want)
}
+}
- // Check set of Imported packages.
- pkgnames = nil
- for path := range prog.Imported {
- pkgnames = append(pkgnames, path)
+func TestCreateUnnamedPackage(t *testing.T) {
+ var conf loader.Config
+ conf.CreateFromFilenames("")
+ prog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load failed: %v", err)
}
- sort.Strings(pkgnames)
- // All import paths may contribute tests.
- if got, want := fmt.Sprint(pkgnames), "[errors fmt]"; got != want {
- t.Errorf("Loaded: got %s, want %s", got, want)
+ if got, want := fmt.Sprint(prog.InitialPackages()), "[(unnamed)]"; got != want {
+ t.Errorf("InitialPackages = %s, want %s", got, want)
}
+}
- // Check set of transitive packages.
- // There are >30 and the set may grow over time, so only check a few.
- all := map[string]struct{}{}
- for _, info := range prog.AllPackages {
- all[info.Pkg.Path()] = struct{}{}
+func TestLoad_MissingFileInCreatedPackage(t *testing.T) {
+ var conf loader.Config
+ conf.CreateFromFilenames("", "missing.go")
+
+ const wantErr = "couldn't load packages due to errors: (unnamed)"
+
+ prog, err := conf.Load()
+ if prog != nil {
+ t.Errorf("Load unexpectedly returned a Program")
}
- want := []string{"strings", "time", "runtime", "testing", "unicode"}
- for _, w := range want {
- if _, ok := all[w]; !ok {
- t.Errorf("AllPackages: want element %s, got set %v", w, all)
- }
+ if err == nil {
+ t.Fatalf("Load succeeded unexpectedly, want %q", wantErr)
+ }
+ if err.Error() != wantErr {
+ t.Fatalf("Load failed with wrong error %q, want %q", err, wantErr)
}
}
-func TestLoadFromArgsSource(t *testing.T) {
- // mixture of *.go/non-go.
- args := []string{"testdata/a.go", "fmt"}
- prog, _, err := loadFromArgs(args)
+func TestLoad_MissingFileInCreatedPackage_AllowErrors(t *testing.T) {
+ conf := loader.Config{AllowErrors: true}
+ conf.CreateFromFilenames("", "missing.go")
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Errorf("Load failed: %v", err)
+ }
+ if got, want := fmt.Sprint(prog.InitialPackages()), "[(unnamed)]"; got != want {
+ t.Fatalf("InitialPackages = %s, want %s", got, want)
+ }
+}
+
+func TestLoad_ParseError(t *testing.T) {
+ var conf loader.Config
+ conf.CreateFromFilenames("badpkg", "testdata/badpkgdecl.go")
+
+ const wantErr = "couldn't load packages due to errors: badpkg"
+
+ prog, err := conf.Load()
+ if prog != nil {
+ t.Errorf("Load unexpectedly returned a Program")
+ }
if err == nil {
- t.Errorf("loadFromArgs(%q) succeeded, want failure", args)
- } else {
- // "named files must be .go files: fmt": ok
+ t.Fatalf("Load succeeded unexpectedly, want %q", wantErr)
+ }
+ if err.Error() != wantErr {
+ t.Fatalf("Load failed with wrong error %q, want %q", err, wantErr)
}
+}
+
+func TestLoad_ParseError_AllowErrors(t *testing.T) {
+ var conf loader.Config
+ conf.AllowErrors = true
+ conf.CreateFromFilenames("badpkg", "testdata/badpkgdecl.go")
- // successful load
- args = []string{"testdata/a.go", "testdata/b.go"}
- prog, _, err = loadFromArgs(args)
+ prog, err := conf.Load()
if err != nil {
- t.Fatalf("loadFromArgs(%q) failed: %s", args, err)
+ t.Errorf("Load failed unexpectedly: %v", err)
}
- if len(prog.Created) != 1 {
- t.Errorf("loadFromArgs(%q): got %d items, want 1", len(prog.Created))
+ if prog == nil {
+ t.Fatalf("Load returned a nil Program")
}
- if len(prog.Created) > 0 {
- path := prog.Created[0].Pkg.Path()
- if path != "P" {
- t.Errorf("loadFromArgs(%q): got %v, want [P]", prog.Created, path)
- }
+ if got, want := created(prog), "badpkg"; got != want {
+ t.Errorf("Created = %s, want %s", got, want)
+ }
+
+ badpkg := prog.Created[0]
+ if len(badpkg.Files) != 1 {
+ t.Errorf("badpkg has %d files, want 1", len(badpkg.Files))
+ }
+ wantErr := "testdata/badpkgdecl.go:1:34: expected 'package', found 'EOF'"
+ if !hasError(badpkg.Errors, wantErr) {
+ t.Errorf("badpkg.Errors = %v, want %s", badpkg.Errors, wantErr)
}
}
-type fakeFileInfo struct{}
+func TestLoad_FromSource_Success(t *testing.T) {
+ var conf loader.Config
+ conf.CreateFromFilenames("P", "testdata/a.go", "testdata/b.go")
-func (fakeFileInfo) Name() string { return "x.go" }
-func (fakeFileInfo) Sys() interface{} { return nil }
-func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
-func (fakeFileInfo) IsDir() bool { return false }
-func (fakeFileInfo) Size() int64 { return 0 }
-func (fakeFileInfo) Mode() os.FileMode { return 0644 }
+ prog, err := conf.Load()
+ if err != nil {
+ t.Errorf("Load failed unexpectedly: %v", err)
+ }
+ if prog == nil {
+ t.Fatalf("Load returned a nil Program")
+ }
+ if got, want := created(prog), "P"; got != want {
+ t.Errorf("Created = %s, want %s", got, want)
+ }
+}
-var justXgo = [1]os.FileInfo{fakeFileInfo{}} // ["x.go"]
+func TestLoad_FromImports_Success(t *testing.T) {
+ var conf loader.Config
+ conf.ImportWithTests("fmt")
+ conf.ImportWithTests("errors")
-func fakeContext(pkgs map[string]string) *build.Context {
- ctxt := build.Default // copy
- ctxt.GOROOT = "/go"
- ctxt.GOPATH = ""
- ctxt.IsDir = func(path string) bool { return true }
- ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { return justXgo[:], nil }
- ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
- path = path[len("/go/src/"):]
- return ioutil.NopCloser(bytes.NewBufferString(pkgs[path[0:1]])), nil
- }
- return &ctxt
+ prog, err := conf.Load()
+ if err != nil {
+ t.Errorf("Load failed unexpectedly: %v", err)
+ }
+ if prog == nil {
+ t.Fatalf("Load returned a nil Program")
+ }
+ if got, want := created(prog), "errors_test fmt_test"; got != want {
+ t.Errorf("Created = %q, want %s", got, want)
+ }
+ if got, want := imported(prog), "errors fmt"; got != want {
+ t.Errorf("Imported = %s, want %s", got, want)
+ }
+ // Check set of transitive packages.
+ // There are >30 and the set may grow over time, so only check a few.
+ want := map[string]bool{
+ "strings": true,
+ "time": true,
+ "runtime": true,
+ "testing": true,
+ "unicode": true,
+ }
+ for _, path := range all(prog) {
+ delete(want, path)
+ }
+ if len(want) > 0 {
+ t.Errorf("AllPackages is missing these keys: %q", keys(want))
+ }
+}
+
+func TestLoad_MissingIndirectImport(t *testing.T) {
+ pkgs := map[string]string{
+ "a": `package a; import _ "b"`,
+ "b": `package b; import _ "c"`,
+ }
+ conf := loader.Config{Build: fakeContext(pkgs)}
+ conf.Import("a")
+
+ const wantErr = "couldn't load packages due to errors: b"
+
+ prog, err := conf.Load()
+ if err == nil {
+ t.Errorf("Load succeeded unexpectedly, want %q", wantErr)
+ } else if err.Error() != wantErr {
+ t.Errorf("Load failed with wrong error %q, want %q", err, wantErr)
+ }
+ if prog != nil {
+ t.Errorf("Load unexpectedly returned a Program")
+ }
}
+func TestLoad_BadDependency_AllowErrors(t *testing.T) {
+ for _, test := range []struct {
+ descr string
+ pkgs map[string]string
+ wantPkgs string
+ }{
+
+ {
+ descr: "missing dependency",
+ pkgs: map[string]string{
+ "a": `package a; import _ "b"`,
+ "b": `package b; import _ "c"`,
+ },
+ wantPkgs: "a b",
+ },
+ {
+ descr: "bad package decl in dependency",
+ pkgs: map[string]string{
+ "a": `package a; import _ "b"`,
+ "b": `package b; import _ "c"`,
+ "c": `package`,
+ },
+ wantPkgs: "a b",
+ },
+ {
+ descr: "parse error in dependency",
+ pkgs: map[string]string{
+ "a": `package a; import _ "b"`,
+ "b": `package b; import _ "c"`,
+ "c": `package c; var x = `,
+ },
+ wantPkgs: "a b c",
+ },
+ } {
+ conf := loader.Config{
+ AllowErrors: true,
+ Build: fakeContext(test.pkgs),
+ }
+ conf.Import("a")
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Errorf("%s: Load failed unexpectedly: %v", test.descr, err)
+ }
+ if prog == nil {
+ t.Fatalf("%s: Load returned a nil Program", test.descr)
+ }
+
+ if got, want := imported(prog), "a"; got != want {
+ t.Errorf("%s: Imported = %s, want %s", test.descr, got, want)
+ }
+ if got := all(prog); strings.Join(got, " ") != test.wantPkgs {
+ t.Errorf("%s: AllPackages = %s, want %s", test.descr, got, test.wantPkgs)
+ }
+ }
+}
+
+// TODO(adonovan): more Load tests:
+//
+// failures:
+// - to parse package decl of *_test.go files
+// - to parse package decl of external *_test.go files
+// - to parse whole of *_test.go files
+// - to parse whole of external *_test.go files
+// - to open a *.go file during import scanning
+// - to import from binary
+
+// features:
+// - InitialPackages
+// - PackageCreated hook
+// - TypeCheckFuncBodies hook
+
func TestTransitivelyErrorFreeFlag(t *testing.T) {
// Create an minimal custom build.Context
// that fakes the following packages:
@@ -157,9 +388,8 @@ func TestTransitivelyErrorFreeFlag(t *testing.T) {
"e": `package e; import _ "d"`,
}
conf := loader.Config{
- AllowErrors: true,
- SourceImports: true,
- Build: fakeContext(pkgs),
+ AllowErrors: true,
+ Build: fakeContext(pkgs),
}
conf.Import("a")
@@ -200,21 +430,23 @@ func TestTransitivelyErrorFreeFlag(t *testing.T) {
}
}
-// Test that both syntax (scan/parse) and type errors are both recorded
+// Test that syntax (scan/parse), type, and loader errors are recorded
// (in PackageInfo.Errors) and reported (via Config.TypeChecker.Error).
func TestErrorReporting(t *testing.T) {
pkgs := map[string]string{
- "a": `package a; import _ "b"; var x int = false`,
+ "a": `package a; import (_ "b"; _ "c"); var x int = false`,
"b": `package b; 'syntax error!`,
}
conf := loader.Config{
- AllowErrors: true,
- SourceImports: true,
- Build: fakeContext(pkgs),
+ AllowErrors: true,
+ Build: fakeContext(pkgs),
}
+ var mu sync.Mutex
var allErrors []error
conf.TypeChecker.Error = func(err error) {
+ mu.Lock()
allErrors = append(allErrors, err)
+ mu.Unlock()
}
conf.Import("a")
@@ -226,15 +458,6 @@ func TestErrorReporting(t *testing.T) {
t.Fatalf("Load returned nil *Program")
}
- hasError := func(errors []error, substr string) bool {
- for _, err := range errors {
- if strings.Contains(err.Error(), substr) {
- return true
- }
- }
- return false
- }
-
// TODO(adonovan): test keys of ImportMap.
// Check errors recorded in each PackageInfo.
@@ -244,6 +467,9 @@ func TestErrorReporting(t *testing.T) {
if !hasError(info.Errors, "cannot convert false") {
t.Errorf("a.Errors = %v, want bool conversion (type) error", info.Errors)
}
+ if !hasError(info.Errors, "could not import c") {
+ t.Errorf("a.Errors = %v, want import (loader) error", info.Errors)
+ }
case "b":
if !hasError(info.Errors, "rune literal not terminated") {
t.Errorf("b.Errors = %v, want unterminated literal (syntax) error", info.Errors)
@@ -253,7 +479,159 @@ func TestErrorReporting(t *testing.T) {
// Check errors reported via error handler.
if !hasError(allErrors, "cannot convert false") ||
- !hasError(allErrors, "rune literal not terminated") {
- t.Errorf("allErrors = %v, want both syntax and type errors", allErrors)
+ !hasError(allErrors, "rune literal not terminated") ||
+ !hasError(allErrors, "could not import c") {
+ t.Errorf("allErrors = %v, want syntax, type and loader errors", allErrors)
+ }
+}
+
+func TestCycles(t *testing.T) {
+ for _, test := range []struct {
+ descr string
+ ctxt *build.Context
+ wantErr string
+ }{
+ {
+ "self-cycle",
+ fakeContext(map[string]string{
+ "main": `package main; import _ "selfcycle"`,
+ "selfcycle": `package selfcycle; import _ "selfcycle"`,
+ }),
+ `import cycle: selfcycle -> selfcycle`,
+ },
+ {
+ "three-package cycle",
+ fakeContext(map[string]string{
+ "main": `package main; import _ "a"`,
+ "a": `package a; import _ "b"`,
+ "b": `package b; import _ "c"`,
+ "c": `package c; import _ "a"`,
+ }),
+ `import cycle: c -> a -> b -> c`,
+ },
+ {
+ "self-cycle in dependency of test file",
+ buildutil.FakeContext(map[string]map[string]string{
+ "main": {
+ "main.go": `package main`,
+ "main_test.go": `package main; import _ "a"`,
+ },
+ "a": {
+ "a.go": `package a; import _ "a"`,
+ },
+ }),
+ `import cycle: a -> a`,
+ },
+ // TODO(adonovan): fix: these fail
+ // {
+ // "two-package cycle in dependency of test file",
+ // buildutil.FakeContext(map[string]map[string]string{
+ // "main": {
+ // "main.go": `package main`,
+ // "main_test.go": `package main; import _ "a"`,
+ // },
+ // "a": {
+ // "a.go": `package a; import _ "main"`,
+ // },
+ // }),
+ // `import cycle: main -> a -> main`,
+ // },
+ // {
+ // "self-cycle in augmented package",
+ // buildutil.FakeContext(map[string]map[string]string{
+ // "main": {
+ // "main.go": `package main`,
+ // "main_test.go": `package main; import _ "main"`,
+ // },
+ // }),
+ // `import cycle: main -> main`,
+ // },
+ } {
+ conf := loader.Config{
+ AllowErrors: true,
+ Build: test.ctxt,
+ }
+ var mu sync.Mutex
+ var allErrors []error
+ conf.TypeChecker.Error = func(err error) {
+ mu.Lock()
+ allErrors = append(allErrors, err)
+ mu.Unlock()
+ }
+ conf.ImportWithTests("main")
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Errorf("%s: Load failed: %s", test.descr, err)
+ }
+ if prog == nil {
+ t.Fatalf("%s: Load returned nil *Program", test.descr)
+ }
+
+ if !hasError(allErrors, test.wantErr) {
+ t.Errorf("%s: Load() errors = %q, want %q",
+ test.descr, allErrors, test.wantErr)
+ }
+ }
+
+ // TODO(adonovan):
+ // - Test that in a legal test cycle, none of the symbols
+ // defined by augmentation are visible via import.
+}
+
+// ---- utilities ----
+
+// Simplifying wrapper around buildutil.FakeContext for single-file packages.
+func fakeContext(pkgs map[string]string) *build.Context {
+ pkgs2 := make(map[string]map[string]string)
+ for path, content := range pkgs {
+ pkgs2[path] = map[string]string{"x.go": content}
+ }
+ return buildutil.FakeContext(pkgs2)
+}
+
+func hasError(errors []error, substr string) bool {
+ for _, err := range errors {
+ if strings.Contains(err.Error(), substr) {
+ return true
+ }
+ }
+ return false
+}
+
+func keys(m map[string]bool) (keys []string) {
+ for key := range m {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ return
+}
+
+// Returns all loaded packages.
+func all(prog *loader.Program) []string {
+ var pkgs []string
+ for _, info := range prog.AllPackages {
+ pkgs = append(pkgs, info.Pkg.Path())
+ }
+ sort.Strings(pkgs)
+ return pkgs
+}
+
+// Returns initially imported packages, as a string.
+func imported(prog *loader.Program) string {
+ var pkgs []string
+ for _, info := range prog.Imported {
+ pkgs = append(pkgs, info.Pkg.Path())
+ }
+ sort.Strings(pkgs)
+ return strings.Join(pkgs, " ")
+}
+
+// Returns initially created packages, as a string.
+func created(prog *loader.Program) string {
+ var pkgs []string
+ for _, info := range prog.Created {
+ pkgs = append(pkgs, info.Pkg.Path())
}
+ return strings.Join(pkgs, " ")
}
diff --git a/go/loader/source_test.go b/go/loader/source_test.go
index d0ca4a7..f2e06be 100644
--- a/go/loader/source_test.go
+++ b/go/loader/source_test.go
@@ -13,7 +13,7 @@ import (
"strings"
"testing"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
)
diff --git a/go/loader/stdlib_test.go b/go/loader/stdlib_test.go
index eb439a1..d14928a 100644
--- a/go/loader/stdlib_test.go
+++ b/go/loader/stdlib_test.go
@@ -40,9 +40,7 @@ func TestStdlib(t *testing.T) {
ctxt.GOPATH = "" // disable GOPATH
conf := loader.Config{Build: &ctxt}
for _, path := range buildutil.AllPackages(conf.Build) {
- if err := conf.ImportWithTests(path); err != nil {
- t.Error(err)
- }
+ conf.ImportWithTests(path)
}
prog, err := conf.Load()
diff --git a/go/loader/testdata/badpkgdecl.go b/go/loader/testdata/badpkgdecl.go
new file mode 100644
index 0000000..1e39359
--- /dev/null
+++ b/go/loader/testdata/badpkgdecl.go
@@ -0,0 +1 @@
+// this file has no package decl
diff --git a/go/loader/util.go b/go/loader/util.go
index 467a74c..1166c92 100644
--- a/go/loader/util.go
+++ b/go/loader/util.go
@@ -11,8 +11,10 @@ import (
"go/token"
"io"
"os"
- "path/filepath"
+ "strconv"
"sync"
+
+ "golang.org/x/tools/go/buildutil"
)
// parseFiles parses the Go source files within directory dir and
@@ -26,21 +28,13 @@ func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(strin
if displayPath == nil {
displayPath = func(path string) string { return path }
}
- isAbs := filepath.IsAbs
- if ctxt.IsAbsPath != nil {
- isAbs = ctxt.IsAbsPath
- }
- joinPath := filepath.Join
- if ctxt.JoinPath != nil {
- joinPath = ctxt.JoinPath
- }
var wg sync.WaitGroup
n := len(files)
parsed := make([]*ast.File, n)
errors := make([]error, n)
for i, file := range files {
- if !isAbs(file) {
- file = joinPath(dir, file)
+ if !buildutil.IsAbsPath(ctxt, file) {
+ file = buildutil.JoinPath(ctxt, dir, file)
}
wg.Add(1)
go func(i int, file string) {
@@ -86,6 +80,32 @@ func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(strin
return parsed, errors
}
+// scanImports returns the set of all package import paths from all
+// import specs in the specified files.
+func scanImports(files []*ast.File) map[string]bool {
+ imports := make(map[string]bool)
+ for _, f := range files {
+ for _, decl := range f.Decls {
+ if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
+ for _, spec := range decl.Specs {
+ spec := spec.(*ast.ImportSpec)
+
+ // NB: do not assume the program is well-formed!
+ path, err := strconv.Unquote(spec.Path.Value)
+ if err != nil {
+ continue // quietly ignore the error
+ }
+ if path == "C" || path == "unsafe" {
+ continue // skip pseudo packages
+ }
+ imports[path] = true
+ }
+ }
+ }
+ }
+ return imports
+}
+
// ---------- Internal helpers ----------
// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
diff --git a/go/pointer/analysis.go b/go/pointer/analysis.go
index 8887a86..f3b70f1 100644
--- a/go/pointer/analysis.go
+++ b/go/pointer/analysis.go
@@ -255,7 +255,7 @@ func Analyze(config *Config) (result *Result, err error) {
// (This only checks that the package scope is complete,
// not that func bodies exist, but it's a good signal.)
if !pkg.Object.Complete() {
- return nil, fmt.Errorf(`pointer analysis requires a complete program yet package %q was incomplete (set loader.Config.SourceImports during loading)`, pkg.Object.Path())
+ return nil, fmt.Errorf(`pointer analysis requires a complete program yet package %q was incomplete (don't set loader.Config.ImportFromBinary during loading)`, pkg.Object.Path())
}
}
diff --git a/go/pointer/doc.go b/go/pointer/doc.go
index 00bf2a4..22e569c 100644
--- a/go/pointer/doc.go
+++ b/go/pointer/doc.go
@@ -607,4 +607,4 @@ ACM, New York, NY, USA, 47-56. DOI=10.1145/349299.349310
http://doi.acm.org/10.1145/349299.349310
*/
-package pointer
+package pointer // import "golang.org/x/tools/go/pointer"
diff --git a/go/pointer/example_test.go b/go/pointer/example_test.go
index eecfd30..5f2e940 100644
--- a/go/pointer/example_test.go
+++ b/go/pointer/example_test.go
@@ -41,10 +41,10 @@ func main() {
i.f(x) // dynamic method call
}
`
- // Construct a loader.
- conf := loader.Config{SourceImports: true}
+ var conf loader.Config
- // Parse the input file.
+ // Parse the input file, a string.
+ // (Command-line tools should use conf.FromArgs.)
file, err := conf.ParseFile("myprog.go", myprog)
if err != nil {
fmt.Print(err) // parse error
diff --git a/go/pointer/gen.go b/go/pointer/gen.go
index 12f4d79..6c256ac 100644
--- a/go/pointer/gen.go
+++ b/go/pointer/gen.go
@@ -525,7 +525,9 @@ func (a *analysis) genBuiltinCall(instr ssa.CallInstruction, cgn *cgnode) {
case "print":
// In the tests, the probe might be the sole reference
// to its arg, so make sure we create nodes for it.
- a.valueNode(call.Args[0])
+ if len(call.Args) > 0 {
+ a.valueNode(call.Args[0])
+ }
case "ssa:wrapnilchk":
a.copy(a.valueNode(instr.Value()), a.valueNode(call.Args[0]), 1)
@@ -1260,7 +1262,7 @@ func (a *analysis) generate() {
// Create nodes and constraints for all methods of all types
// that are dynamically accessible via reflection or interfaces.
- for _, T := range a.prog.TypesWithMethodSets() {
+ for _, T := range a.prog.RuntimeTypes() {
a.genMethodsOf(T)
}
diff --git a/go/pointer/pointer_test.go b/go/pointer/pointer_test.go
index aeffb5c..1daf9c3 100644
--- a/go/pointer/pointer_test.go
+++ b/go/pointer/pointer_test.go
@@ -153,7 +153,7 @@ func findProbe(prog *ssa.Program, probes map[*ssa.CallCommon]bool, queries map[s
}
func doOneInput(input, filename string) bool {
- conf := loader.Config{SourceImports: true}
+ var conf loader.Config
// Parsing.
f, err := conf.ParseFile(filename, input)
@@ -191,7 +191,8 @@ func doOneInput(input, filename string) bool {
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
if instr, ok := instr.(ssa.CallInstruction); ok {
- if b, ok := instr.Common().Value.(*ssa.Builtin); ok && b.Name() == "print" {
+ call := instr.Common()
+ if b, ok := call.Value.(*ssa.Builtin); ok && b.Name() == "print" && len(call.Args) == 1 {
probes[instr.Common()] = true
}
}
@@ -247,13 +248,14 @@ func doOneInput(input, filename string) bool {
continue
}
mainFileScope := mainpkg.Object.Scope().Child(0)
- t, _, err = types.EvalNode(prog.Fset, texpr, mainpkg.Object, mainFileScope)
+ tv, err := types.EvalNode(prog.Fset, texpr, mainpkg.Object, mainFileScope)
if err != nil {
ok = false
// Don't print err since its location is bad.
e.errorf("'%s' is not a valid type: %s", typstr, err)
continue
}
+ t = tv.Type
}
e.types = append(e.types, t)
}
diff --git a/go/pointer/stdlib_test.go b/go/pointer/stdlib_test.go
index 214cdfc..6365279 100644
--- a/go/pointer/stdlib_test.go
+++ b/go/pointer/stdlib_test.go
@@ -35,10 +35,7 @@ func TestStdlib(t *testing.T) {
// Load, parse and type-check the program.
ctxt := build.Default // copy
ctxt.GOPATH = "" // disable GOPATH
- conf := loader.Config{
- SourceImports: true,
- Build: &ctxt,
- }
+ conf := loader.Config{Build: &ctxt}
if _, err := conf.FromArgs(buildutil.AllPackages(conf.Build), true); err != nil {
t.Errorf("FromArgs failed: %v", err)
return
diff --git a/go/pointer/testdata/another.go b/go/pointer/testdata/another.go
index 443c94d..12ed690 100644
--- a/go/pointer/testdata/another.go
+++ b/go/pointer/testdata/another.go
@@ -31,4 +31,6 @@ func main() {
// labels, even though it may contain pointers that do.
print(i) // @pointsto makeinterface:func(x int) int | makeinterface:func(x int, y int) | makeinterface:func(int, int) | makeinterface:int | makeinterface:main.S
print(i.(func(int) int)) // @pointsto main.incr
+
+ print() // regression test for crash
}
diff --git a/go/pointer/util.go b/go/pointer/util.go
index 88bada8..d4ccbb5 100644
--- a/go/pointer/util.go
+++ b/go/pointer/util.go
@@ -50,11 +50,7 @@ func CanHaveDynamicTypes(T types.Type) bool {
return false
}
-// isInterface reports whether T is an interface type.
-func isInterface(T types.Type) bool {
- _, ok := T.Underlying().(*types.Interface)
- return ok
-}
+func isInterface(T types.Type) bool { return types.IsInterface(T) }
// mustDeref returns the element type of its argument, which must be a
// pointer; panic ensues otherwise.
diff --git a/go/ssa/builder.go b/go/ssa/builder.go
index 6e1739c..f4418df 100644
--- a/go/ssa/builder.go
+++ b/go/ssa/builder.go
@@ -241,6 +241,20 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
if len(args) == 3 {
m = b.expr(fn, args[2])
}
+ if m, ok := m.(*Const); ok {
+ // treat make([]T, n, m) as new([m]T)[:n]
+ cap, _ := exact.Int64Val(m.Value)
+ at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap)
+ alloc := emitNew(fn, at, pos)
+ alloc.Comment = "makeslice"
+ v := &Slice{
+ X: alloc,
+ High: n,
+ }
+ v.setPos(pos)
+ v.setType(typ)
+ return fn.emit(v)
+ }
v := &MakeSlice{
Len: n,
Cap: m,
@@ -333,7 +347,7 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
if v == nil {
v = fn.lookup(obj, escaping)
}
- return &address{addr: v, expr: e}
+ return &address{addr: v, pos: e.Pos(), expr: e}
case *ast.CompositeLit:
t := deref(fn.Pkg.typeOf(e))
@@ -345,7 +359,7 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
}
v.Comment = "complit"
b.compLit(fn, v, e, true) // initialize in place
- return &address{addr: v, expr: e}
+ return &address{addr: v, pos: e.Lbrace, expr: e}
case *ast.ParenExpr:
return b.addr(fn, e.X, escaping)
@@ -364,6 +378,7 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
last := len(sel.Index()) - 1
return &address{
addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel),
+ pos: e.Sel.Pos(),
expr: e.Sel,
}
@@ -396,10 +411,10 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
}
v.setPos(e.Lbrack)
v.setType(et)
- return &address{addr: fn.emit(v), expr: e}
+ return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e}
case *ast.StarExpr:
- return &address{addr: b.expr(fn, e.X), starPos: e.Star, expr: e}
+ return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e}
}
panic(fmt.Sprintf("unexpected address expression: %T", e))
@@ -585,7 +600,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
case *types.Basic, *types.Slice, *types.Pointer: // *array
x = b.expr(fn, e.X)
default:
- unreachable()
+ panic("unreachable")
}
if e.High != nil {
high = b.expr(fn, e.High)
@@ -732,7 +747,7 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) {
// selections of sel.
//
// wantAddr requests that the result is an an address. If
-// !sel.Indirect(), this may require that e be build in addr() mode; it
+// !sel.Indirect(), this may require that e be built in addr() mode; it
// must thus be addressable.
//
// escaping is defined as per builder.addr().
@@ -765,10 +780,11 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) {
sel, ok := fn.Pkg.info.Selections[selector]
if ok && sel.Kind() == types.MethodVal {
obj := sel.Obj().(*types.Func)
- wantAddr := isPointer(recvType(obj))
+ recv := recvType(obj)
+ wantAddr := isPointer(recv)
escaping := true
v := b.receiver(fn, selector.X, wantAddr, escaping, sel)
- if isInterface(deref(v.Type())) {
+ if isInterface(recv) {
// Invoke-mode call.
c.Value = v
c.Method = obj
@@ -877,7 +893,7 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx
}
iaddr.setType(types.NewPointer(vt))
fn.emit(iaddr)
- emitStore(fn, iaddr, arg)
+ emitStore(fn, iaddr, arg, arg.Pos())
}
s := &Slice{X: a}
s.setType(st)
@@ -1030,17 +1046,19 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero
switch t := typ.Underlying().(type) {
case *types.Struct:
if !isZero && len(e.Elts) != t.NumFields() {
- emitMemClear(fn, addr)
+ emitMemClear(fn, addr, e.Lbrace)
isZero = true
}
for i, e := range e.Elts {
fieldIndex := i
+ pos := e.Pos()
if kv, ok := e.(*ast.KeyValueExpr); ok {
fname := kv.Key.(*ast.Ident).Name
for i, n := 0, t.NumFields(); i < n; i++ {
sf := t.Field(i)
if sf.Name() == fname {
fieldIndex = i
+ pos = kv.Colon
e = kv.Value
break
}
@@ -1053,7 +1071,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero
}
faddr.setType(types.NewPointer(sf.Type()))
fn.emit(faddr)
- b.exprInPlace(fn, &address{addr: faddr, expr: e}, e, isZero)
+ b.exprInPlace(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero)
}
case *types.Array, *types.Slice:
@@ -1072,7 +1090,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero
}
if !isZero && int64(len(e.Elts)) != at.Len() {
- emitMemClear(fn, array)
+ emitMemClear(fn, array, e.Lbrace)
isZero = true
}
@@ -1094,20 +1112,20 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero
}
iaddr.setType(types.NewPointer(at.Elem()))
fn.emit(iaddr)
- b.exprInPlace(fn, &address{addr: iaddr, expr: e}, e, isZero)
+ b.exprInPlace(fn, &address{addr: iaddr, pos: e.Pos(), expr: e}, e, isZero)
}
if t != at { // slice
s := &Slice{X: array}
s.setPos(e.Lbrace)
s.setType(typ)
- emitStore(fn, addr, fn.emit(s))
+ emitStore(fn, addr, fn.emit(s), e.Lbrace)
}
case *types.Map:
m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))}
m.setPos(e.Lbrace)
m.setType(typ)
- emitStore(fn, addr, fn.emit(m))
+ emitStore(fn, addr, fn.emit(m), e.Lbrace)
for _, e := range e.Elts {
e := e.(*ast.KeyValueExpr)
loc := &element{
@@ -1323,7 +1341,7 @@ func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *
// In a single-type case, y has that type.
// In multi-type cases, 'case nil' and default,
// y has the same type as the interface operand.
- emitStore(fn, fn.addNamedLocal(obj), x)
+ emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos())
}
fn.targets = &targets{
tail: fn.targets,
@@ -1574,8 +1592,9 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) {
// rangeIndexed emits to fn the header for an integer-indexed loop
// over array, *array or slice value x.
// The v result is defined only if tv is non-nil.
+// forPos is the position of the "for" token.
//
-func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type) (k, v Value, loop, done *BasicBlock) {
+func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) {
//
// length = len(x)
// index = -1
@@ -1609,7 +1628,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type) (k, v Value
}
index := fn.addLocal(tInt, token.NoPos)
- emitStore(fn, index, intConst(-1))
+ emitStore(fn, index, intConst(-1), pos)
loop = fn.newBasicBlock("rangeindex.loop")
emitJump(fn, loop)
@@ -1621,7 +1640,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type) (k, v Value
Y: vOne,
}
incr.setType(tInt)
- emitStore(fn, index, fn.emit(incr))
+ emitStore(fn, index, fn.emit(incr), pos)
body := fn.newBasicBlock("rangeindex.body")
done = fn.newBasicBlock("rangeindex.done")
@@ -1800,10 +1819,10 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) {
var loop, done *BasicBlock
switch rt := x.Type().Underlying().(type) {
case *types.Slice, *types.Array, *types.Pointer: // *array
- k, v, loop, done = b.rangeIndexed(fn, x, tv)
+ k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For)
case *types.Chan:
- k, loop, done = b.rangeChan(fn, x, tk, s.TokPos)
+ k, loop, done = b.rangeChan(fn, x, tk, s.For)
case *types.Map, *types.Basic: // string
k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For)
@@ -1941,7 +1960,7 @@ start:
// Function has named result parameters (NRPs).
// Perform parallel assignment of return operands to NRPs.
for i, r := range results {
- emitStore(fn, fn.namedResults[i], r)
+ emitStore(fn, fn.namedResults[i], r, s.Return)
}
}
// Run function calls deferred in this
@@ -2106,24 +2125,12 @@ func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) {
if isBlankIdent(id) {
return // discard
}
- var fn *Function
+ fn := pkg.values[pkg.info.Defs[id]].(*Function)
if decl.Recv == nil && id.Name == "init" {
- pkg.ninit++
- fn = &Function{
- name: fmt.Sprintf("init#%d", pkg.ninit),
- Signature: new(types.Signature),
- pos: decl.Name.NamePos,
- Pkg: pkg,
- Prog: pkg.Prog,
- syntax: decl,
- }
-
var v Call
v.Call.Value = fn
v.setType(types.NewTuple())
pkg.init.emit(&v)
- } else {
- fn = pkg.values[pkg.info.Defs[id]].(*Function)
}
b.buildFunction(fn)
}
@@ -2174,7 +2181,7 @@ func (p *Package) Build() {
// that would require package creation in topological order.
for name, mem := range p.Members {
if ast.IsExported(name) {
- p.needMethodsOf(mem.Type())
+ p.Prog.needMethodsOf(mem.Type())
}
}
if p.Prog.mode&LogSource != 0 {
@@ -2192,7 +2199,7 @@ func (p *Package) Build() {
done = init.newBasicBlock("init.done")
emitIf(init, emitLoad(init, initguard), done, doinit)
init.currentBlock = doinit
- emitStore(init, initguard, vTrue)
+ emitStore(init, initguard, vTrue, token.NoPos)
// Call the init() function of each package we import.
for _, pkg := range p.info.Pkg.Imports() {
@@ -2220,7 +2227,7 @@ func (p *Package) Build() {
// 1:1 initialization: var x, y = a(), b()
var lval lvalue
if v := varinit.Lhs[0]; v.Name() != "_" {
- lval = &address{addr: p.values[v].(*Global)}
+ lval = &address{addr: p.values[v].(*Global), pos: v.Pos()}
} else {
lval = blank{}
}
@@ -2232,7 +2239,7 @@ func (p *Package) Build() {
if v.Name() == "_" {
continue
}
- emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i))
+ emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i), v.Pos())
}
}
}
@@ -2282,129 +2289,3 @@ func (p *Package) typeOf(e ast.Expr) types.Type {
panic(fmt.Sprintf("no type for %T @ %s",
e, p.Prog.Fset.Position(e.Pos())))
}
-
-// needMethodsOf ensures that runtime type information (including the
-// complete method set) is available for the specified type T and all
-// its subcomponents.
-//
-// needMethodsOf must be called for at least every type that is an
-// operand of some MakeInterface instruction, and for the type of
-// every exported package member.
-//
-// Precondition: T is not a method signature (*Signature with Recv()!=nil).
-//
-// Thread-safe. (Called via emitConv from multiple builder goroutines.)
-//
-// TODO(adonovan): make this faster. It accounts for 20% of SSA build
-// time. Do we need to maintain a distinct needRTTI and methodSets per
-// package? Using just one in the program might be much faster.
-//
-func (p *Package) needMethodsOf(T types.Type) {
- p.methodsMu.Lock()
- p.needMethods(T, false)
- p.methodsMu.Unlock()
-}
-
-// Precondition: T is not a method signature (*Signature with Recv()!=nil).
-// Precondition: the p.methodsMu lock is held.
-// Recursive case: skip => don't call makeMethods(T).
-func (p *Package) needMethods(T types.Type, skip bool) {
- // Each package maintains its own set of types it has visited.
- if prevSkip, ok := p.needRTTI.At(T).(bool); ok {
- // needMethods(T) was previously called
- if !prevSkip || skip {
- return // already seen, with same or false 'skip' value
- }
- }
- p.needRTTI.Set(T, skip)
-
- // Prune the recursion if we find a named or *named type
- // belonging to another package.
- var n *types.Named
- switch T := T.(type) {
- case *types.Named:
- n = T
- case *types.Pointer:
- n, _ = T.Elem().(*types.Named)
- }
- if n != nil {
- owner := n.Obj().Pkg()
- if owner == nil {
- return // built-in error type
- }
- if owner != p.Object {
- return // belongs to another package
- }
- }
-
- // All the actual method sets live in the Program so that
- // multiple packages can share a single copy in memory of the
- // symbols that would be compiled into multiple packages (as
- // weak symbols).
- if !skip && p.Prog.makeMethods(T) {
- p.methodSets = append(p.methodSets, T)
- }
-
- // Recursion over signatures of each method.
- tmset := p.Prog.MethodSets.MethodSet(T)
- for i := 0; i < tmset.Len(); i++ {
- sig := tmset.At(i).Type().(*types.Signature)
- p.needMethods(sig.Params(), false)
- p.needMethods(sig.Results(), false)
- }
-
- switch t := T.(type) {
- case *types.Basic:
- // nop
-
- case *types.Interface:
- // nop---handled by recursion over method set.
-
- case *types.Pointer:
- p.needMethods(t.Elem(), false)
-
- case *types.Slice:
- p.needMethods(t.Elem(), false)
-
- case *types.Chan:
- p.needMethods(t.Elem(), false)
-
- case *types.Map:
- p.needMethods(t.Key(), false)
- p.needMethods(t.Elem(), false)
-
- case *types.Signature:
- if t.Recv() != nil {
- panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
- }
- p.needMethods(t.Params(), false)
- p.needMethods(t.Results(), false)
-
- case *types.Named:
- // A pointer-to-named type can be derived from a named
- // type via reflection. It may have methods too.
- p.needMethods(types.NewPointer(T), false)
-
- // Consider 'type T struct{S}' where S has methods.
- // Reflection provides no way to get from T to struct{S},
- // only to S, so the method set of struct{S} is unwanted,
- // so set 'skip' flag during recursion.
- p.needMethods(t.Underlying(), true)
-
- case *types.Array:
- p.needMethods(t.Elem(), false)
-
- case *types.Struct:
- for i, n := 0, t.NumFields(); i < n; i++ {
- p.needMethods(t.Field(i).Type(), false)
- }
-
- case *types.Tuple:
- for i, n := 0, t.Len(); i < n; i++ {
- p.needMethods(t.At(i).Type(), false)
- }
-
- default:
- panic(T)
- }
-}
diff --git a/go/ssa/builder_test.go b/go/ssa/builder_test.go
index 858db4b..cb30ef6 100644
--- a/go/ssa/builder_test.go
+++ b/go/ssa/builder_test.go
@@ -13,6 +13,7 @@ import (
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
"golang.org/x/tools/go/types"
)
@@ -20,7 +21,7 @@ func isEmpty(f *ssa.Function) bool { return f.Blocks == nil }
// Tests that programs partially loaded from gc object files contain
// functions with no code for the external portions, but are otherwise ok.
-func TestExternalPackages(t *testing.T) {
+func TestImportFromBinary(t *testing.T) {
test := `
package main
@@ -42,7 +43,7 @@ func main() {
`
// Create a single-file main package.
- var conf loader.Config
+ conf := loader.Config{ImportFromBinary: true}
f, err := conf.ParseFile("<input>", test)
if err != nil {
t.Error(err)
@@ -150,8 +151,8 @@ func main() {
}
}
-// TestTypesWithMethodSets tests that Package.TypesWithMethodSets includes all necessary types.
-func TestTypesWithMethodSets(t *testing.T) {
+// TestRuntimeTypes tests that (*Program).RuntimeTypes() includes all necessary types.
+func TestRuntimeTypes(t *testing.T) {
tests := []struct {
input string
want []string
@@ -166,7 +167,7 @@ func TestTypesWithMethodSets(t *testing.T) {
},
// Subcomponents of type of exported package-level var are needed.
{`package C; import "bytes"; var V struct {*bytes.Buffer}`,
- []string{"*struct{*bytes.Buffer}", "struct{*bytes.Buffer}"},
+ []string{"*bytes.Buffer", "*struct{*bytes.Buffer}", "struct{*bytes.Buffer}"},
},
// Subcomponents of type of unexported package-level var are not needed.
{`package D; import "bytes"; var v struct {*bytes.Buffer}`,
@@ -174,7 +175,7 @@ func TestTypesWithMethodSets(t *testing.T) {
},
// Subcomponents of type of exported package-level function are needed.
{`package E; import "bytes"; func F(struct {*bytes.Buffer}) {}`,
- []string{"struct{*bytes.Buffer}"},
+ []string{"*bytes.Buffer", "struct{*bytes.Buffer}"},
},
// Subcomponents of type of unexported package-level function are not needed.
{`package F; import "bytes"; func f(struct {*bytes.Buffer}) {}`,
@@ -186,11 +187,11 @@ func TestTypesWithMethodSets(t *testing.T) {
},
// ...unless used by MakeInterface.
{`package G2; import "bytes"; type x struct{}; func (x) G(struct {*bytes.Buffer}) {}; var v interface{} = x{}`,
- []string{"*p.x", "p.x", "struct{*bytes.Buffer}"},
+ []string{"*bytes.Buffer", "*p.x", "p.x", "struct{*bytes.Buffer}"},
},
// Subcomponents of type of unexported method are not needed.
{`package I; import "bytes"; type X struct{}; func (X) G(struct {*bytes.Buffer}) {}`,
- []string{"*p.X", "p.X", "struct{*bytes.Buffer}"},
+ []string{"*bytes.Buffer", "*p.X", "p.X", "struct{*bytes.Buffer}"},
},
// Local types aren't needed.
{`package J; import "bytes"; func f() { type T struct {*bytes.Buffer}; var t T; _ = t }`,
@@ -198,11 +199,11 @@ func TestTypesWithMethodSets(t *testing.T) {
},
// ...unless used by MakeInterface.
{`package K; import "bytes"; func f() { type T struct {*bytes.Buffer}; _ = interface{}(T{}) }`,
- []string{"*p.T", "p.T"},
+ []string{"*bytes.Buffer", "*p.T", "p.T"},
},
// Types used as operand of MakeInterface are needed.
{`package L; import "bytes"; func f() { _ = interface{}(struct{*bytes.Buffer}{}) }`,
- []string{"struct{*bytes.Buffer}"},
+ []string{"*bytes.Buffer", "struct{*bytes.Buffer}"},
},
// MakeInterface is optimized away when storing to a blank.
{`package M; import "bytes"; var _ interface{} = struct{*bytes.Buffer}{}`,
@@ -211,7 +212,7 @@ func TestTypesWithMethodSets(t *testing.T) {
}
for _, test := range tests {
// Create a single-file main package.
- var conf loader.Config
+ conf := loader.Config{ImportFromBinary: true}
f, err := conf.ParseFile("<input>", test.input)
if err != nil {
t.Errorf("test %q: %s", test.input[:15], err)
@@ -225,17 +226,17 @@ func TestTypesWithMethodSets(t *testing.T) {
continue
}
prog := ssa.Create(iprog, ssa.SanityCheckFunctions)
- mainPkg := prog.Package(iprog.Created[0].Pkg)
prog.BuildAll()
var typstrs []string
- for _, T := range mainPkg.TypesWithMethodSets() {
+ for _, T := range prog.RuntimeTypes() {
typstrs = append(typstrs, T.String())
}
sort.Strings(typstrs)
if !reflect.DeepEqual(typstrs, test.want) {
- t.Errorf("test 'package %s': got %q, want %q", f.Name.Name, typstrs, test.want)
+ t.Errorf("test 'package %s': got %q, want %q",
+ f.Name.Name, typstrs, test.want)
}
}
}
@@ -317,3 +318,98 @@ func init():
}
}
}
+
+// TestSyntheticFuncs checks that the expected synthetic functions are
+// created, reachable, and not duplicated.
+func TestSyntheticFuncs(t *testing.T) {
+ const input = `package P
+type T int
+func (T) f() int
+func (*T) g() int
+var (
+ // thunks
+ a = T.f
+ b = T.f
+ c = (struct{T}).f
+ d = (struct{T}).f
+ e = (*T).g
+ f = (*T).g
+ g = (struct{*T}).g
+ h = (struct{*T}).g
+
+ // bounds
+ i = T(0).f
+ j = T(0).f
+ k = new(T).g
+ l = new(T).g
+
+ // wrappers
+ m interface{} = struct{T}{}
+ n interface{} = struct{T}{}
+ o interface{} = struct{*T}{}
+ p interface{} = struct{*T}{}
+ q interface{} = new(struct{T})
+ r interface{} = new(struct{T})
+ s interface{} = new(struct{*T})
+ t interface{} = new(struct{*T})
+)
+`
+ // Parse
+ var conf loader.Config
+ f, err := conf.ParseFile("<input>", input)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ conf.CreateFromFiles(f.Name.Name, f)
+
+ // Load
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load: %v", err)
+ }
+
+ // Create and build SSA
+ prog := ssa.Create(iprog, 0)
+ prog.BuildAll()
+
+ // Enumerate reachable synthetic functions
+ want := map[string]string{
+ "(*P.T).g$bound": "bound method wrapper for func (*P.T).g() int",
+ "(P.T).f$bound": "bound method wrapper for func (P.T).f() int",
+
+ "(*P.T).g$thunk": "thunk for func (*P.T).g() int",
+ "(P.T).f$thunk": "thunk for func (P.T).f() int",
+ "(struct{*P.T}).g$thunk": "thunk for func (*P.T).g() int",
+ "(struct{P.T}).f$thunk": "thunk for func (P.T).f() int",
+
+ "(*P.T).f": "wrapper for func (P.T).f() int",
+ "(*struct{*P.T}).f": "wrapper for func (P.T).f() int",
+ "(*struct{*P.T}).g": "wrapper for func (*P.T).g() int",
+ "(*struct{P.T}).f": "wrapper for func (P.T).f() int",
+ "(*struct{P.T}).g": "wrapper for func (*P.T).g() int",
+ "(struct{*P.T}).f": "wrapper for func (P.T).f() int",
+ "(struct{*P.T}).g": "wrapper for func (*P.T).g() int",
+ "(struct{P.T}).f": "wrapper for func (P.T).f() int",
+
+ "P.init": "package initializer",
+ }
+ for fn := range ssautil.AllFunctions(prog) {
+ if fn.Synthetic == "" {
+ continue
+ }
+ name := fn.String()
+ wantDescr, ok := want[name]
+ if !ok {
+ t.Errorf("got unexpected/duplicate func: %q: %q", name, fn.Synthetic)
+ continue
+ }
+ delete(want, name)
+
+ if wantDescr != fn.Synthetic {
+ t.Errorf("(%s).Synthetic = %q, want %q", name, fn.Synthetic, wantDescr)
+ }
+ }
+ for fn, descr := range want {
+ t.Errorf("want func: %q: %q", fn, descr)
+ }
+}
diff --git a/go/ssa/create.go b/go/ssa/create.go
index 7f57101..0c25bf5 100644
--- a/go/ssa/create.go
+++ b/go/ssa/create.go
@@ -8,6 +8,7 @@ package ssa
// See builder.go for explanation.
import (
+ "fmt"
"go/ast"
"go/token"
"os"
@@ -15,20 +16,7 @@ import (
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/types"
-)
-
-// BuilderMode is a bitmask of options for diagnostics and checking.
-type BuilderMode uint
-
-const (
- PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout
- PrintFunctions // Print function SSA code to stdout
- LogSource // Log source locations as SSA builder progresses
- SanityCheckFunctions // Perform sanity checking of function bodies
- NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers
- BuildSerially // Build packages serially, not in parallel.
- GlobalDebug // Enable debug info for all packages
- BareInits // Build init functions without guards or calls to dependent inits
+ "golang.org/x/tools/go/types/typeutil"
)
// Create returns a new SSA Program. An SSA Package is created for
@@ -49,6 +37,10 @@ func Create(iprog *loader.Program, mode BuilderMode) *Program {
mode: mode,
}
+ h := typeutil.MakeHasher() // protected by methodsMu, in effect
+ prog.methodSets.SetHasher(h)
+ prog.canon.SetHasher(h)
+
for _, info := range iprog.AllPackages {
// TODO(adonovan): relax this constraint if the
// program contains only "soft" errors.
@@ -97,10 +89,15 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
pkg.Members[name] = g
case *types.Func:
+ sig := obj.Type().(*types.Signature)
+ if sig.Recv() == nil && name == "init" {
+ pkg.ninit++
+ name = fmt.Sprintf("init#%d", pkg.ninit)
+ }
fn := &Function{
name: name,
object: obj,
- Signature: obj.Type().(*types.Signature),
+ Signature: sig,
syntax: syntax,
pos: obj.Pos(),
Pkg: pkg,
@@ -111,7 +108,7 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
}
pkg.values[obj] = fn
- if fn.Signature.Recv() == nil {
+ if sig.Recv() == nil {
pkg.Members[name] = fn // package-level function
}
@@ -157,9 +154,6 @@ func membersFromDecl(pkg *Package, decl ast.Decl) {
case *ast.FuncDecl:
id := decl.Name
- if decl.Recv == nil && id.Name == "init" {
- return // no object
- }
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], decl)
}
@@ -252,7 +246,7 @@ func (prog *Program) CreatePackage(info *loader.PackageInfo) *Package {
return p
}
-// printMu serializes printing of Packages/Functions to stdout
+// printMu serializes printing of Packages/Functions to stdout.
var printMu sync.Mutex
// AllPackages returns a new slice containing all packages in the
diff --git a/go/ssa/doc.go b/go/ssa/doc.go
index 0b5c33d..d666073 100644
--- a/go/ssa/doc.go
+++ b/go/ssa/doc.go
@@ -120,4 +120,4 @@
// domains of source locations, ast.Nodes, types.Objects,
// ssa.Values/Instructions.
//
-package ssa
+package ssa // import "golang.org/x/tools/go/ssa"
diff --git a/go/ssa/emit.go b/go/ssa/emit.go
index 84246c6..f1ba0f7 100644
--- a/go/ssa/emit.go
+++ b/go/ssa/emit.go
@@ -208,7 +208,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
val = emitConv(f, val, DefaultType(ut_src))
}
- f.Pkg.needMethodsOf(val.Type())
+ f.Pkg.Prog.needMethodsOf(val.Type())
mi := &MakeInterface{X: val}
mi.setType(typ)
return f.emit(mi)
@@ -246,10 +246,11 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
// emitStore emits to f an instruction to store value val at location
// addr, applying implicit conversions as required by assignability rules.
//
-func emitStore(f *Function, addr, val Value) *Store {
+func emitStore(f *Function, addr, val Value, pos token.Pos) *Store {
s := &Store{
Addr: addr,
Val: emitConv(f, val, deref(addr.Type())),
+ pos: pos,
}
f.emit(s)
return s
@@ -430,9 +431,9 @@ func zeroValue(f *Function, t types.Type) Value {
}
// emitMemClear emits to f code to zero the value pointed to by ptr.
-func emitMemClear(f *Function, ptr Value) {
+func emitMemClear(f *Function, ptr Value, pos token.Pos) {
// TODO(adonovan): define and use a 'memclr' intrinsic for aggregate types.
- emitStore(f, ptr, zeroValue(f, deref(ptr.Type())))
+ emitStore(f, ptr, zeroValue(f, deref(ptr.Type())), pos)
}
// createRecoverBlock emits to f a block of code to return after a
diff --git a/go/ssa/func.go b/go/ssa/func.go
index 3f08d3e..fec527b 100644
--- a/go/ssa/func.go
+++ b/go/ssa/func.go
@@ -426,7 +426,7 @@ func (f *Function) lookup(obj types.Object, escaping bool) Value {
// Definition must be in an enclosing function;
// plumb it through intervening closures.
if f.parent == nil {
- panic("no Value for type.Object " + obj.Name())
+ panic("no ssa.Value for " + obj.String())
}
outer := f.parent.lookup(obj, true) // escaping
v := &FreeVar{
@@ -464,7 +464,10 @@ func (f *Function) emit(instr Instruction) Value {
// (i.e. from == f.Pkg.Object), they are rendered without the package path.
// For example: "IsNaN", "(*Buffer).Bytes", etc.
//
-// Invariant: all non-synthetic functions have distinct package-qualified names.
+// All non-synthetic functions have distinct package-qualified names.
+// (But two methods may have the same name "(T).f" if one is a synthetic
+// wrapper promoting a non-exported method "f" from another package; in
+// that case, the strings are equal but the identifiers "f" are distinct.)
//
func (f *Function) RelString(from *types.Package) string {
// Anonymous?
diff --git a/go/ssa/interp/external.go b/go/ssa/interp/external.go
index e3a8ee0..fc43366 100644
--- a/go/ssa/interp/external.go
+++ b/go/ssa/interp/external.go
@@ -80,6 +80,7 @@ func init() {
"math.Min": ext۰math۰Min,
"os.runtime_args": ext۰os۰runtime_args,
"reflect.New": ext۰reflect۰New,
+ "reflect.SliceOf": ext۰reflect۰SliceOf,
"reflect.TypeOf": ext۰reflect۰TypeOf,
"reflect.ValueOf": ext۰reflect۰ValueOf,
"reflect.init": ext۰reflect۰Init,
diff --git a/go/ssa/interp/interp.go b/go/ssa/interp/interp.go
index d674c25..825d2d0 100644
--- a/go/ssa/interp/interp.go
+++ b/go/ssa/interp/interp.go
@@ -42,7 +42,7 @@
//
// * os.Exit is implemented using panic, causing deferred functions to
// run.
-package interp
+package interp // import "golang.org/x/tools/go/ssa/interp"
import (
"fmt"
@@ -631,6 +631,20 @@ func init() {
environ = append(environ, "GOARCH="+runtime.GOARCH)
}
+// deleteBodies delete the bodies of all standalone functions except the
+// specified ones. A missing intrinsic leads to a clear runtime error.
+func deleteBodies(pkg *ssa.Package, except ...string) {
+ keep := make(map[string]bool)
+ for _, e := range except {
+ keep[e] = true
+ }
+ for _, mem := range pkg.Members {
+ if fn, ok := mem.(*ssa.Function); ok && !keep[fn.Name()] {
+ fn.Blocks = nil
+ }
+ }
+}
+
// Interpret interprets the Go program whose main package is mainpkg.
// mode specifies various interpreter options. filename and args are
// the initial values of os.Args for the target program. sizes is the
@@ -676,22 +690,13 @@ func Interpret(mainpkg *ssa.Package, mode Mode, sizes types.Sizes, filename stri
case "syscall":
setGlobal(i, pkg, "envs", environ)
+ case "reflect":
+ deleteBodies(pkg, "DeepEqual", "deepValueEqual")
+
case "runtime":
sz := sizes.Sizeof(pkg.Object.Scope().Lookup("MemStats").Type())
setGlobal(i, pkg, "sizeof_C_MStats", uintptr(sz))
-
- // Delete the bodies of almost all "runtime" functions since they're magic.
- // A missing intrinsic leads to a very clear error.
- for _, mem := range pkg.Members {
- if fn, ok := mem.(*ssa.Function); ok {
- switch fn.Name() {
- case "GOROOT", "gogetenv":
- // keep
- default:
- fn.Blocks = nil
- }
- }
- }
+ deleteBodies(pkg, "GOROOT", "gogetenv")
}
}
@@ -712,7 +717,7 @@ func Interpret(mainpkg *ssa.Package, mode Mode, sizes types.Sizes, filename stri
case string:
fmt.Fprintln(os.Stderr, "panic:", p)
default:
- fmt.Fprintf(os.Stderr, "panic: unexpected type: %T\n", p)
+ fmt.Fprintf(os.Stderr, "panic: unexpected type: %T: %v\n", p, p)
}
// TODO(adonovan): dump panicking interpreter goroutine?
diff --git a/go/ssa/interp/interp_test.go b/go/ssa/interp/interp_test.go
index 456fdaf..569d9ea 100644
--- a/go/ssa/interp/interp_test.go
+++ b/go/ssa/interp/interp_test.go
@@ -146,6 +146,7 @@ var testdataTests = []string{
"mrvchain.go",
"range.go",
"recover.go",
+ "reflect.go",
"static.go",
"callstack.go",
}
@@ -187,7 +188,7 @@ func run(t *testing.T, dir, input string, success successPredicate) bool {
inputs = append(inputs, i)
}
- conf := loader.Config{SourceImports: true}
+ var conf loader.Config
if _, err := conf.FromArgs(inputs, true); err != nil {
t.Errorf("FromArgs(%s) failed: %s", inputs, err)
return false
@@ -276,11 +277,13 @@ func printFailures(failures []string) {
}
}
-// The "normal" success predicate.
-func exitsZero(exitcode int, _ string) error {
+func success(exitcode int, output string) error {
if exitcode != 0 {
return fmt.Errorf("exit code was %d", exitcode)
}
+ if strings.Contains(output, "BUG") {
+ return fmt.Errorf("exited zero but output contained 'BUG'")
+ }
return nil
}
@@ -288,7 +291,7 @@ func exitsZero(exitcode int, _ string) error {
func TestTestdataFiles(t *testing.T) {
var failures []string
for _, input := range testdataTests {
- if !run(t, "testdata"+slash, input, exitsZero) {
+ if !run(t, "testdata"+slash, input, success) {
failures = append(failures, input)
}
}
@@ -303,16 +306,6 @@ func TestGorootTest(t *testing.T) {
var failures []string
- // $GOROOT/tests are also considered a failure if they print "BUG".
- success := func(exitcode int, output string) error {
- if exitcode != 0 {
- return fmt.Errorf("exit code was %d", exitcode)
- }
- if strings.Contains(output, "BUG") {
- return fmt.Errorf("exited zero but output contained 'BUG'")
- }
- return nil
- }
for _, input := range gorootTestTests {
if !run(t, filepath.Join(build.Default.GOROOT, "test")+slash, input, success) {
failures = append(failures, input)
@@ -347,9 +340,7 @@ func TestTestmainPackage(t *testing.T) {
// CreateTestMainPackage should return nil if there were no tests.
func TestNullTestmainPackage(t *testing.T) {
var conf loader.Config
- if err := conf.CreateFromFilenames("", "testdata/b_test.go"); err != nil {
- t.Fatalf("ParseFile failed: %s", err)
- }
+ conf.CreateFromFilenames("", "testdata/b_test.go")
iprog, err := conf.Load()
if err != nil {
t.Fatalf("CreatePackages failed: %s", err)
diff --git a/go/ssa/interp/reflect.go b/go/ssa/interp/reflect.go
index 051695c..fd190df 100644
--- a/go/ssa/interp/reflect.go
+++ b/go/ssa/interp/reflect.go
@@ -150,8 +150,13 @@ func ext۰reflect۰New(fr *frame, args []value) value {
return makeReflectValue(types.NewPointer(t), &alloc)
}
+func ext۰reflect۰SliceOf(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) Type
+ return makeReflectType(rtype{types.NewSlice(args[0].(iface).v.(rtype).t)})
+}
+
func ext۰reflect۰TypeOf(fr *frame, args []value) value {
- // Signature: func (t reflect.rtype) string
+ // Signature: func (t reflect.rtype) Type
return makeReflectType(rtype{args[0].(iface).t})
}
diff --git a/go/ssa/interp/testdata/coverage.go b/go/ssa/interp/testdata/coverage.go
index ca65643..dc094da 100644
--- a/go/ssa/interp/testdata/coverage.go
+++ b/go/ssa/interp/testdata/coverage.go
@@ -478,7 +478,28 @@ func init() {
}
}
-// Test that a nice error is issue by indirection wrappers.
+var one = 1 // not a constant
+
+// Test makeslice.
+func init() {
+ check := func(s []string, wantLen, wantCap int) {
+ if len(s) != wantLen {
+ panic(len(s))
+ }
+ if cap(s) != wantCap {
+ panic(cap(s))
+ }
+ }
+ // SSA form:
+ check(make([]string, 10), 10, 10) // new([10]string)[:10]
+ check(make([]string, one), 1, 1) // make([]string, one, one)
+ check(make([]string, 0, 10), 0, 10) // new([10]string)[:0]
+ check(make([]string, 0, one), 0, 1) // make([]string, 0, one)
+ check(make([]string, one, 10), 1, 10) // new([10]string)[:one]
+ check(make([]string, one, one), 1, 1) // make([]string, one, one)
+}
+
+// Test that a nice error is issued by indirection wrappers.
func init() {
var ptr *T
var i I = ptr
diff --git a/go/ssa/interp/testdata/reflect.go b/go/ssa/interp/testdata/reflect.go
new file mode 100644
index 0000000..6aa9a67
--- /dev/null
+++ b/go/ssa/interp/testdata/reflect.go
@@ -0,0 +1,11 @@
+package main
+
+import "reflect"
+
+func main() {
+ // Regression test for issue 9462.
+ got := reflect.SliceOf(reflect.TypeOf(byte(0))).String()
+ if got != "[]uint8" && got != "[]byte" { // result varies by toolchain
+ println("BUG: " + got)
+ }
+}
diff --git a/go/ssa/lvalue.go b/go/ssa/lvalue.go
index e58bc24..8342645 100644
--- a/go/ssa/lvalue.go
+++ b/go/ssa/lvalue.go
@@ -27,20 +27,19 @@ type lvalue interface {
// An address is an lvalue represented by a true pointer.
type address struct {
- addr Value
- starPos token.Pos // source position, if from explicit *addr
- expr ast.Expr // source syntax [debug mode]
+ addr Value
+ pos token.Pos // source position
+ expr ast.Expr // source syntax [debug mode]
}
func (a *address) load(fn *Function) Value {
load := emitLoad(fn, a.addr)
- load.pos = a.starPos
+ load.pos = a.pos
return load
}
func (a *address) store(fn *Function, v Value) {
- store := emitStore(fn, a.addr, v)
- store.pos = a.starPos
+ store := emitStore(fn, a.addr, v, a.pos)
if a.expr != nil {
// store.Val is v, converted for assignability.
emitDebugRef(fn, a.expr, store.Val, false)
diff --git a/go/ssa/methods.go b/go/ssa/methods.go
index 1ef725c..12534de 100644
--- a/go/ssa/methods.go
+++ b/go/ssa/methods.go
@@ -55,44 +55,6 @@ func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string)
return prog.Method(sel)
}
-// makeMethods ensures that all concrete methods of type T are
-// generated. It is equivalent to calling prog.Method() on all
-// members of T.methodSet(), but acquires fewer locks.
-//
-// It reports whether the type's (concrete) method set is non-empty.
-//
-// Thread-safe.
-//
-// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
-//
-func (prog *Program) makeMethods(T types.Type) bool {
- if isInterface(T) {
- return false // abstract method
- }
- tmset := prog.MethodSets.MethodSet(T)
- n := tmset.Len()
- if n == 0 {
- return false // empty (common case)
- }
-
- if prog.mode&LogSource != 0 {
- defer logStack("makeMethods %s", T)()
- }
-
- prog.methodsMu.Lock()
- defer prog.methodsMu.Unlock()
-
- mset := prog.createMethodSet(T)
- if !mset.complete {
- mset.complete = true
- for i := 0; i < n; i++ {
- prog.addMethod(mset, tmset.At(i))
- }
- }
-
- return true
-}
-
// methodSet contains the (concrete) methods of a non-interface type.
type methodSet struct {
mapping map[string]*Function // populated lazily
@@ -135,18 +97,15 @@ func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function
return fn
}
-// TypesWithMethodSets returns a new unordered slice containing all
+// RuntimeTypes returns a new unordered slice containing all
// concrete types in the program for which a complete (non-empty)
// method set is required at run-time.
//
-// It is the union of pkg.TypesWithMethodSets() for all pkg in
-// prog.AllPackages().
-//
// Thread-safe.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
//
-func (prog *Program) TypesWithMethodSets() []types.Type {
+func (prog *Program) RuntimeTypes() []types.Type {
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
@@ -159,39 +118,126 @@ func (prog *Program) TypesWithMethodSets() []types.Type {
return res
}
-// TypesWithMethodSets returns an unordered slice containing the set
-// of all concrete types referenced within package pkg and not
-// belonging to some other package, for which a complete (non-empty)
-// method set is required at run-time.
+// declaredFunc returns the concrete function/method denoted by obj.
+// Panic ensues if there is none.
+//
+func (prog *Program) declaredFunc(obj *types.Func) *Function {
+ if v := prog.packageLevelValue(obj); v != nil {
+ return v.(*Function)
+ }
+ panic("no concrete method: " + obj.String())
+}
+
+// needMethodsOf ensures that runtime type information (including the
+// complete method set) is available for the specified type T and all
+// its subcomponents.
+//
+// needMethodsOf must be called for at least every type that is an
+// operand of some MakeInterface instruction, and for the type of
+// every exported package member.
//
-// A type belongs to a package if it is a named type or a pointer to a
-// named type, and the name was defined in that package. All other
-// types belong to no package.
+// Precondition: T is not a method signature (*Signature with Recv()!=nil).
//
-// A type may appear in the TypesWithMethodSets() set of multiple
-// distinct packages if that type belongs to no package. Typical
-// compilers emit method sets for such types multiple times (using
-// weak symbols) into each package that references them, with the
-// linker performing duplicate elimination.
+// Thread-safe. (Called via emitConv from multiple builder goroutines.)
//
-// This set includes the types of all operands of some MakeInterface
-// instruction, the types of all exported members of some package, and
-// all types that are subcomponents, since even types that aren't used
-// directly may be derived via reflection.
+// TODO(adonovan): make this faster. It accounts for 20% of SSA build time.
//
-// Callers must not mutate the result.
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
//
-func (pkg *Package) TypesWithMethodSets() []types.Type {
- // pkg.methodsMu not required; concurrent (build) phase is over.
- return pkg.methodSets
+func (prog *Program) needMethodsOf(T types.Type) {
+ prog.methodsMu.Lock()
+ prog.needMethods(T, false)
+ prog.methodsMu.Unlock()
}
-// declaredFunc returns the concrete function/method denoted by obj.
-// Panic ensues if there is none.
+// Precondition: T is not a method signature (*Signature with Recv()!=nil).
+// Recursive case: skip => don't create methods for T.
//
-func (prog *Program) declaredFunc(obj *types.Func) *Function {
- if v := prog.packageLevelValue(obj); v != nil {
- return v.(*Function)
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
+//
+func (prog *Program) needMethods(T types.Type, skip bool) {
+ // Each package maintains its own set of types it has visited.
+ if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok {
+ // needMethods(T) was previously called
+ if !prevSkip || skip {
+ return // already seen, with same or false 'skip' value
+ }
+ }
+ prog.runtimeTypes.Set(T, skip)
+
+ tmset := prog.MethodSets.MethodSet(T)
+
+ if !skip && !isInterface(T) && tmset.Len() > 0 {
+ // Create methods of T.
+ mset := prog.createMethodSet(T)
+ if !mset.complete {
+ mset.complete = true
+ n := tmset.Len()
+ for i := 0; i < n; i++ {
+ prog.addMethod(mset, tmset.At(i))
+ }
+ }
+ }
+
+ // Recursion over signatures of each method.
+ for i := 0; i < tmset.Len(); i++ {
+ sig := tmset.At(i).Type().(*types.Signature)
+ prog.needMethods(sig.Params(), false)
+ prog.needMethods(sig.Results(), false)
+ }
+
+ switch t := T.(type) {
+ case *types.Basic:
+ // nop
+
+ case *types.Interface:
+ // nop---handled by recursion over method set.
+
+ case *types.Pointer:
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Slice:
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Chan:
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Map:
+ prog.needMethods(t.Key(), false)
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Signature:
+ if t.Recv() != nil {
+ panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
+ }
+ prog.needMethods(t.Params(), false)
+ prog.needMethods(t.Results(), false)
+
+ case *types.Named:
+ // A pointer-to-named type can be derived from a named
+ // type via reflection. It may have methods too.
+ prog.needMethods(types.NewPointer(T), false)
+
+ // Consider 'type T struct{S}' where S has methods.
+ // Reflection provides no way to get from T to struct{S},
+ // only to S, so the method set of struct{S} is unwanted,
+ // so set 'skip' flag during recursion.
+ prog.needMethods(t.Underlying(), true)
+
+ case *types.Array:
+ prog.needMethods(t.Elem(), false)
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ prog.needMethods(t.Field(i).Type(), false)
+ }
+
+ case *types.Tuple:
+ for i, n := 0, t.Len(); i < n; i++ {
+ prog.needMethods(t.At(i).Type(), false)
+ }
+
+ default:
+ panic(T)
}
- panic("no concrete method: " + obj.String())
}
diff --git a/go/ssa/mode.go b/go/ssa/mode.go
new file mode 100644
index 0000000..bbd613a
--- /dev/null
+++ b/go/ssa/mode.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines the BuilderMode type and its command-line flag.
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+)
+
+// BuilderMode is a bitmask of options for diagnostics and checking.
+type BuilderMode uint
+
+const (
+ PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout
+ PrintFunctions // Print function SSA code to stdout
+ LogSource // Log source locations as SSA builder progresses
+ SanityCheckFunctions // Perform sanity checking of function bodies
+ NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers
+ BuildSerially // Build packages serially, not in parallel.
+ GlobalDebug // Enable debug info for all packages
+ BareInits // Build init functions without guards or calls to dependent inits
+)
+
+const modeFlagUsage = `Options controlling the SSA builder.
+The value is a sequence of zero or more of these letters:
+C perform sanity [C]hecking of the SSA form.
+D include [D]ebug info for every function.
+P print [P]ackage inventory.
+F print [F]unction SSA code.
+S log [S]ource locations as SSA builder progresses.
+L build distinct packages seria[L]ly instead of in parallel.
+N build [N]aive SSA form: don't replace local loads/stores with registers.
+I build bare [I]nit functions: no init guards or calls to dependent inits.
+`
+
+// BuilderModeFlag creates a new command line flag of type BuilderMode,
+// adds it to the specified flag set, and returns it.
+//
+// Example:
+// var ssabuild = BuilderModeFlag(flag.CommandLine, "ssabuild", 0)
+//
+func BuilderModeFlag(set *flag.FlagSet, name string, value BuilderMode) *BuilderMode {
+ set.Var((*builderModeValue)(&value), name, modeFlagUsage)
+ return &value
+}
+
+type builderModeValue BuilderMode // satisfies flag.Value and flag.Getter.
+
+func (v *builderModeValue) Set(s string) error {
+ var mode BuilderMode
+ for _, c := range s {
+ switch c {
+ case 'D':
+ mode |= GlobalDebug
+ case 'P':
+ mode |= PrintPackages
+ case 'F':
+ mode |= PrintFunctions
+ case 'S':
+ mode |= LogSource | BuildSerially
+ case 'C':
+ mode |= SanityCheckFunctions
+ case 'N':
+ mode |= NaiveForm
+ case 'L':
+ mode |= BuildSerially
+ default:
+ return fmt.Errorf("unknown BuilderMode option: %q", c)
+ }
+ }
+ *v = builderModeValue(mode)
+ return nil
+}
+
+func (v *builderModeValue) Get() interface{} { return BuilderMode(*v) }
+
+func (v *builderModeValue) String() string {
+ mode := BuilderMode(*v)
+ var buf bytes.Buffer
+ if mode&GlobalDebug != 0 {
+ buf.WriteByte('D')
+ }
+ if mode&PrintPackages != 0 {
+ buf.WriteByte('P')
+ }
+ if mode&PrintFunctions != 0 {
+ buf.WriteByte('F')
+ }
+ if mode&LogSource != 0 {
+ buf.WriteByte('S')
+ }
+ if mode&SanityCheckFunctions != 0 {
+ buf.WriteByte('C')
+ }
+ if mode&NaiveForm != 0 {
+ buf.WriteByte('N')
+ }
+ if mode&BuildSerially != 0 {
+ buf.WriteByte('L')
+ }
+ return buf.String()
+}
diff --git a/go/ssa/sanity.go b/go/ssa/sanity.go
index 3fd6747..b0593d0 100644
--- a/go/ssa/sanity.go
+++ b/go/ssa/sanity.go
@@ -505,8 +505,13 @@ func sanityCheckPackage(pkg *Package) {
continue // not all members have typechecker objects
}
if obj.Name() != name {
- panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
- pkg.Object.Path(), mem, obj.Name(), name))
+ if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") {
+ // Ok. The name of a declared init function varies between
+ // its types.Func ("init") and its ssa.Function ("init#%d").
+ } else {
+ panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
+ pkg.Object.Path(), mem, obj.Name(), name))
+ }
}
if obj.Pos() != mem.Pos() {
panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos()))
diff --git a/go/ssa/source.go b/go/ssa/source.go
index 02b0260..0566d23 100644
--- a/go/ssa/source.go
+++ b/go/ssa/source.go
@@ -144,7 +144,7 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function {
// - e is a reference to nil or a built-in function.
// - the value was optimised away.
//
-// If e is an addressable expression used an an lvalue context,
+// If e is an addressable expression used in an lvalue context,
// value is the address denoted by e, and isAddr is true.
//
// The types of e (or &e, if isAddr) and the result are equal
diff --git a/go/ssa/source_test.go b/go/ssa/source_test.go
index 203da7f..68b5401 100644
--- a/go/ssa/source_test.go
+++ b/go/ssa/source_test.go
@@ -16,7 +16,7 @@ import (
"strings"
"testing"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/exact"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
diff --git a/go/ssa/ssa.go b/go/ssa/ssa.go
index 8543992..75fdff8 100644
--- a/go/ssa/ssa.go
+++ b/go/ssa/ssa.go
@@ -20,7 +20,6 @@ import (
)
// A Program is a partial or complete Go program converted to SSA form.
-//
type Program struct {
Fset *token.FileSet // position information for the files of this Program
imported map[string]*Package // all importable Packages, keyed by import path
@@ -28,10 +27,12 @@ type Program struct {
mode BuilderMode // set of mode bits for SSA construction
MethodSets types.MethodSetCache // cache of type-checker's method-sets
- methodsMu sync.Mutex // guards the following maps:
- methodSets typeutil.Map // maps type to its concrete methodSet
- bounds map[*types.Func]*Function // bounds for curried x.Method closures
- thunks map[selectionKey]*Function // thunks for T.Method expressions
+ methodsMu sync.Mutex // guards the following maps:
+ methodSets typeutil.Map // maps type to its concrete methodSet
+ runtimeTypes typeutil.Map // types for which rtypes are needed
+ canon typeutil.Map // type canonicalization map
+ bounds map[*types.Func]*Function // bounds for curried x.Method closures
+ thunks map[selectionKey]*Function // thunks for T.Method expressions
}
// A Package is a single analyzed Go package containing Members for
@@ -39,22 +40,23 @@ type Program struct {
// declares. These may be accessed directly via Members, or via the
// type-specific accessor methods Func, Type, Var and Const.
//
+// Members also contains entries for "init" (the synthetic package
+// initializer) and "init#%d", the nth declared init function,
+// and unspecified other things too.
+//
type Package struct {
- Prog *Program // the owning program
- Object *types.Package // the type checker's package object for this package
- Members map[string]Member // all package members keyed by name
- methodsMu sync.Mutex // guards needRTTI and methodSets
- methodSets []types.Type // types whose method sets are included in this package
- values map[types.Object]Value // package members (incl. types and methods), keyed by object
- init *Function // Func("init"); the package's init function
- debug bool // include full debug info in this package.
+ Prog *Program // the owning program
+ Object *types.Package // the type checker's package object for this package
+ Members map[string]Member // all package members keyed by name (incl. init and init#%d)
+ values map[types.Object]Value // package members (incl. types and methods), keyed by object
+ init *Function // Func("init"); the package's init function
+ debug bool // include full debug info in this package
// The following fields are set transiently, then cleared
// after building.
- started int32 // atomically tested and set at start of build phase
- ninit int32 // number of init functions
- info *loader.PackageInfo // package ASTs and type information
- needRTTI typeutil.Map // types for which runtime type info is needed
+ started int32 // atomically tested and set at start of build phase
+ ninit int32 // number of init functions
+ info *loader.PackageInfo // package ASTs and type information
}
// A Member is a member of a Go package, implemented by *NamedConst,
@@ -69,7 +71,7 @@ type Member interface {
Pos() token.Pos // position of member's declaration, if known
Type() types.Type // type of the package member
Token() token.Token // token.{VAR,FUNC,CONST,TYPE}
- Package() *Package // returns the containing package. (TODO: rename Pkg)
+ Package() *Package // the containing package
}
// A Type is a Member of a Package representing a package-level named type.
@@ -81,8 +83,8 @@ type Type struct {
pkg *Package
}
-// A NamedConst is a Member of Package representing a package-level
-// named constant value.
+// A NamedConst is a Member of a Package representing a package-level
+// named constant.
//
// Pos() returns the position of the declaring ast.ValueSpec.Names[*]
// identifier.
@@ -157,7 +159,6 @@ type Value interface {
// corresponds to an ast.Expr; use Function.ValueForExpr
// instead. NB: it requires that the function was built with
// debug information.)
- //
Pos() token.Pos
}
@@ -169,21 +170,21 @@ type Value interface {
// does not.
//
type Instruction interface {
- // String returns the disassembled form of this value. e.g.
+ // String returns the disassembled form of this value.
//
- // Examples of Instructions that define a Value:
- // e.g. "x + y" (BinOp)
+ // Examples of Instructions that are Values:
+ // "x + y" (BinOp)
// "len([])" (Call)
// Note that the name of the Value is not printed.
//
- // Examples of Instructions that do define (are) Values:
- // e.g. "return x" (Return)
+ // Examples of Instructions that are not Values:
+ // "return x" (Return)
// "*y = x" (Store)
//
- // (This separation is useful for some analyses which
- // distinguish the operation from the value it
- // defines. e.g. 'y = local int' is both an allocation of
- // memory 'local int' and a definition of a pointer y.)
+ // (The separation Value.Name() from Value.String() is useful
+ // for some analyses which distinguish the operation from the
+ // value it defines, e.g., 'y = local int' is both an allocation
+ // of memory 'local int' and a definition of a pointer y.)
String() string
// Parent returns the function to which this instruction
@@ -231,7 +232,6 @@ type Instruction interface {
// This position may be used to determine which non-Value
// Instruction corresponds to some ast.Stmts, but not all: If
// and Jump instructions have no Pos(), for example.)
- //
Pos() token.Pos
}
@@ -257,12 +257,12 @@ type Node interface {
Referrers() *[]Instruction // nil for non-Values
}
-// Function represents the parameters, results and code of a function
+// Function represents the parameters, results, and code of a function
// or method.
//
// If Blocks is nil, this indicates an external function for which no
// Go source code is available. In this case, FreeVars and Locals
-// will be nil too. Clients performing whole-program analysis must
+// are nil too. Clients performing whole-program analysis must
// handle external functions specially.
//
// Blocks contains the function's control-flow graph (CFG).
@@ -277,14 +277,18 @@ type Node interface {
// parameters, if any.
//
// A nested function (Parent()!=nil) that refers to one or more
-// lexically enclosing local variables ("free variables") has FreeVar
-// parameters. Such functions cannot be called directly but require a
+// lexically enclosing local variables ("free variables") has FreeVars.
+// Such functions cannot be called directly but require a
// value created by MakeClosure which, via its Bindings, supplies
// values for these parameters.
//
// If the function is a method (Signature.Recv() != nil) then the first
// element of Params is the receiver parameter.
//
+// A Go package may declare many functions called "init".
+// For each one, Object().Name() returns "init" but Name() returns
+// "init#1", etc, in declaration order.
+//
// Pos() returns the declaring ast.FuncLit.Type.Func or the position
// of the ast.FuncDecl.Name, if the function was explicit in the
// source. Synthetic wrappers, for which Synthetic != "", may share
@@ -322,13 +326,13 @@ type Function struct {
lblocks map[*ast.Object]*lblock // labelled blocks
}
-// An SSA basic block.
+// BasicBlock represents an SSA basic block.
//
// The final element of Instrs is always an explicit transfer of
-// control (If, Jump, Return or Panic).
+// control (If, Jump, Return, or Panic).
//
// A block may contain no Instructions only if it is unreachable,
-// i.e. Preds is nil. Empty blocks are typically pruned.
+// i.e., Preds is nil. Empty blocks are typically pruned.
//
// BasicBlocks and their Preds/Succs relation form a (possibly cyclic)
// graph independent of the SSA Value graph: the control-flow graph or
@@ -348,9 +352,9 @@ type BasicBlock struct {
parent *Function // parent function
Instrs []Instruction // instructions in order
Preds, Succs []*BasicBlock // predecessors and successors
- succs2 [2]*BasicBlock // initial space for Succs.
+ succs2 [2]*BasicBlock // initial space for Succs
dom domInfo // dominator tree info
- gaps int // number of nil Instrs (transient).
+ gaps int // number of nil Instrs (transient)
rundefers int // number of rundefers (transient)
}
@@ -398,11 +402,11 @@ type Parameter struct {
//
// The underlying type of a constant may be any boolean, numeric, or
// string type. In addition, a Const may represent the nil value of
-// any reference type: interface, map, channel, pointer, slice, or
+// any reference type---interface, map, channel, pointer, slice, or
// function---but not "untyped nil".
//
// All source-level constant expressions are represented by a Const
-// of equal type and value.
+// of the same type and value.
//
// Value holds the exact value of the constant, independent of its
// Type(), using the same representation as package go/exact uses for
@@ -461,11 +465,12 @@ type Builtin struct {
// Value-defining instructions ----------------------------------------
-// The Alloc instruction reserves space for a value of the given type,
+// The Alloc instruction reserves space for a variable of the given type,
// zero-initializes it, and yields its address.
//
// Alloc values are always addresses, and have pointer types, so the
-// type of the allocated space is actually indirect(Type()).
+// type of the allocated variable is actually
+// Type().Underlying().(*types.Pointer).Elem().
//
// If Heap is false, Alloc allocates space in the function's
// activation record (frame); we refer to an Alloc(Heap=false) as a
@@ -473,7 +478,7 @@ type Builtin struct {
// it is executed within the same activation; the space is
// re-initialized to zero.
//
-// If Heap is true, Alloc allocates space in the heap, and returns; we
+// If Heap is true, Alloc allocates space in the heap; we
// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc
// returns a different address each time it is executed.
//
@@ -507,7 +512,7 @@ type Alloc struct {
// during SSA renaming.
//
// Example printed form:
-// t2 = phi [0.start: t0, 1.if.then: t1, ...]
+// t2 = phi [0: t0, 1: t1]
//
type Phi struct {
register
@@ -517,8 +522,8 @@ type Phi struct {
// The Call instruction represents a function or method call.
//
-// The Call instruction yields the function result, if there is
-// exactly one, or a tuple (empty or len>1) whose components are
+// The Call instruction yields the function result if there is exactly
+// one. Otherwise it returns a tuple, the components of which are
// accessed via Extract.
//
// See CallCommon for generic function call documentation.
@@ -563,8 +568,12 @@ type BinOp struct {
// and a boolean indicating the success of the receive. The
// components of the tuple are accessed using Extract.
//
-// Pos() returns the ast.UnaryExpr.OpPos or ast.RangeStmt.TokPos (for
-// ranging over a channel), if explicit in the source.
+// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source.
+// For receive operations (ARROW) implicit in ranging over a channel,
+// Pos() returns the ast.RangeStmt.For.
+// For implicit memory loads (STAR), Pos() returns the position of the
+// most closely associated source-level construct; the details are not
+// specified.
//
// Example printed form:
// t0 = *x
@@ -1161,7 +1170,10 @@ type Send struct {
// The Store instruction stores Val at address Addr.
// Stores can be of arbitrary types.
//
-// Pos() returns the ast.StarExpr.Star, if explicit in the source.
+// Pos() returns the position of the source-level construct most closely
+// associated with the memory store operation.
+// Since implicit memory stores are numerous and varied and depend upon
+// implementation choices, the details are not specified.
//
// Example printed form:
// *x = y
@@ -1353,7 +1365,7 @@ func (c *CallCommon) StaticCallee() *Function {
}
// Description returns a description of the mode of this call suitable
-// for a user interface, e.g. "static method call".
+// for a user interface, e.g., "static method call".
func (c *CallCommon) Description() string {
switch fn := c.Value.(type) {
case *Builtin:
diff --git a/go/ssa/ssautil/visit.go b/go/ssa/ssautil/visit.go
index 8dc6694..30843c3 100644
--- a/go/ssa/ssautil/visit.go
+++ b/go/ssa/ssautil/visit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package ssautil
+package ssautil // import "golang.org/x/tools/go/ssa/ssautil"
import "golang.org/x/tools/go/ssa"
@@ -41,7 +41,7 @@ func (visit *visitor) program() {
}
}
}
- for _, T := range visit.prog.TypesWithMethodSets() {
+ for _, T := range visit.prog.RuntimeTypes() {
mset := visit.prog.MethodSets.MethodSet(T)
for i, n := 0, mset.Len(); i < n; i++ {
visit.function(visit.prog.Method(mset.At(i)))
diff --git a/go/ssa/stdlib_test.go b/go/ssa/stdlib_test.go
index 4935db0..63d031e 100644
--- a/go/ssa/stdlib_test.go
+++ b/go/ssa/stdlib_test.go
@@ -10,6 +10,7 @@ package ssa_test
// Run with "go test -cpu=8 to" set GOMAXPROCS.
import (
+ "go/ast"
"go/build"
"go/token"
"runtime"
@@ -37,10 +38,7 @@ func TestStdlib(t *testing.T) {
// Load, parse and type-check the program.
ctxt := build.Default // copy
ctxt.GOPATH = "" // disable GOPATH
- conf := loader.Config{
- SourceImports: true,
- Build: &ctxt,
- }
+ conf := loader.Config{Build: &ctxt}
if _, err := conf.FromArgs(buildutil.AllPackages(conf.Build), true); err != nil {
t.Errorf("FromArgs failed: %v", err)
return
@@ -82,9 +80,11 @@ func TestStdlib(t *testing.T) {
allFuncs := ssautil.AllFunctions(prog)
// Check that all non-synthetic functions have distinct names.
+ // Synthetic wrappers for exported methods should be distinct too,
+ // except for unexported ones (explained at (*Function).RelString).
byName := make(map[string]*ssa.Function)
for fn := range allFuncs {
- if fn.Synthetic == "" {
+ if fn.Synthetic == "" || ast.IsExported(fn.Name()) {
str := fn.String()
prev := byName[str]
byName[str] = fn
diff --git a/go/ssa/testdata/objlookup.go b/go/ssa/testdata/objlookup.go
index bd266e4..1aaa417 100644
--- a/go/ssa/testdata/objlookup.go
+++ b/go/ssa/testdata/objlookup.go
@@ -90,9 +90,9 @@ func main() {
_ = v8ptr[0] // v8ptr::Alloc
_ = *v8ptr // v8ptr::Alloc
- v8a := make([]int, 1) // v8a::MakeSlice
- v8a[0] = 0 // v8a::MakeSlice
- print(v8a[:]) // v8a::MakeSlice
+ v8a := make([]int, 1) // v8a::Slice
+ v8a[0] = 0 // v8a::Slice
+ print(v8a[:]) // v8a::Slice
v9 := S{} // &v9::Alloc
diff --git a/go/ssa/testdata/valueforexpr.go b/go/ssa/testdata/valueforexpr.go
index 70906ca..0697ec6 100644
--- a/go/ssa/testdata/valueforexpr.go
+++ b/go/ssa/testdata/valueforexpr.go
@@ -33,7 +33,7 @@ func f(spilled, unspilled int) {
_ = /*@Phi*/ (y)
map1 := /*@MakeMap*/ (make(map[string]string))
_ = map1
- _ = /*@MakeSlice*/ (make([]int, 0))
+ _ = /*@Slice*/ (make([]int, 0))
_ = /*@MakeClosure*/ (func() { print(spilled) })
sl := []int{}
diff --git a/go/ssa/testmain.go b/go/ssa/testmain.go
index 422c7db..7935f4e 100644
--- a/go/ssa/testmain.go
+++ b/go/ssa/testmain.go
@@ -12,8 +12,10 @@ import (
"go/ast"
"go/token"
"os"
+ "sort"
"strings"
+ "golang.org/x/tools/go/exact"
"golang.org/x/tools/go/types"
)
@@ -100,7 +102,7 @@ func (prog *Program) CreateTestMainPackage(pkgs ...*Package) *Package {
Prog: prog,
Members: make(map[string]Member),
values: make(map[types.Object]Value),
- Object: types.NewPackage("testmain", "testmain"),
+ Object: types.NewPackage("test$main", "main"),
}
// Build package's init function.
@@ -118,25 +120,33 @@ func (prog *Program) CreateTestMainPackage(pkgs ...*Package) *Package {
}
// Initialize packages to test.
+ var pkgpaths []string
for _, pkg := range pkgs {
var v Call
v.Call.Value = pkg.init
v.setType(types.NewTuple())
init.emit(&v)
+
+ pkgpaths = append(pkgpaths, pkg.Object.Path())
}
+ sort.Strings(pkgpaths)
init.emit(new(Return))
init.finishBody()
testmain.init = init
testmain.Object.MarkComplete()
testmain.Members[init.name] = init
- main := &Function{
- name: "main",
- Signature: new(types.Signature),
- Synthetic: "test main function",
- Prog: prog,
- Pkg: testmain,
- }
+ // For debugging convenience, define an unexported const
+ // that enumerates the packages.
+ packagesConst := types.NewConst(token.NoPos, testmain.Object, "packages", tString,
+ exact.MakeString(strings.Join(pkgpaths, " ")))
+ memberFromObject(testmain, packagesConst, nil)
+
+ // Create main *types.Func and *ssa.Function
+ mainFunc := types.NewFunc(token.NoPos, testmain.Object, "main", new(types.Signature))
+ memberFromObject(testmain, mainFunc, nil)
+ main := testmain.Func("main")
+ main.Synthetic = "test main function"
main.startBody()
@@ -247,7 +257,7 @@ func testMainSlice(fn *Function, testfuncs []*Function, slice types.Type) Value
pname := fn.emit(fa)
// Emit: *pname = "testfunc"
- emitStore(fn, pname, stringConst(testfunc.Name()))
+ emitStore(fn, pname, stringConst(testfunc.Name()), token.NoPos)
// Emit: pfunc = &pitem.F
fa = &FieldAddr{X: pitem, Field: 1} // .F
@@ -255,7 +265,7 @@ func testMainSlice(fn *Function, testfuncs []*Function, slice types.Type) Value
pfunc := fn.emit(fa)
// Emit: *pfunc = testfunc
- emitStore(fn, pfunc, testfunc)
+ emitStore(fn, pfunc, testfunc, token.NoPos)
}
// Emit: slice array[:]
diff --git a/go/ssa/util.go b/go/ssa/util.go
index 1cf15e8..10ebb8c 100644
--- a/go/ssa/util.go
+++ b/go/ssa/util.go
@@ -13,26 +13,13 @@ import (
"io"
"os"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/types"
)
-func unreachable() {
- panic("unreachable")
-}
-
//// AST utilities
-// unparen returns e with any enclosing parentheses stripped.
-func unparen(e ast.Expr) ast.Expr {
- for {
- p, ok := e.(*ast.ParenExpr)
- if !ok {
- break
- }
- e = p.X
- }
- return e
-}
+func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
// isBlankIdent returns true iff e is an Ident with name "_".
// They have no associated types.Object, and thus no type.
@@ -50,11 +37,7 @@ func isPointer(typ types.Type) bool {
return ok
}
-// isInterface reports whether T's underlying type is an interface.
-func isInterface(T types.Type) bool {
- _, ok := T.Underlying().(*types.Interface)
- return ok
-}
+func isInterface(T types.Type) bool { return types.IsInterface(T) }
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
diff --git a/go/ssa/wrappers.go b/go/ssa/wrappers.go
index 10c8a64..3c7e7f0 100644
--- a/go/ssa/wrappers.go
+++ b/go/ssa/wrappers.go
@@ -5,13 +5,13 @@
package ssa
// This file defines synthesis of Functions that delegate to declared
-// methods, which come in three kinds:
+// methods; they come in three kinds:
//
// (1) wrappers: methods that wrap declared methods, performing
// implicit pointer indirections and embedded field selections.
//
// (2) thunks: funcs that wrap declared methods. Like wrappers,
-// thunks perform indirections and field selections. The thunks's
+// thunks perform indirections and field selections. The thunk's
// first parameter is used as the receiver for the method call.
//
// (3) bounds: funcs that wrap declared methods. The bound's sole
@@ -250,8 +250,6 @@ func makeThunk(prog *Program, sel *types.Selection) *Function {
panic(sel)
}
- // TODO(adonovan): opt: canonicalize the recv Type to avoid
- // construct unnecessary duplicate thunks.
key := selectionKey{
kind: sel.Kind(),
recv: sel.Recv(),
@@ -262,6 +260,15 @@ func makeThunk(prog *Program, sel *types.Selection) *Function {
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
+
+ // Canonicalize key.recv to avoid constructing duplicate thunks.
+ canonRecv, ok := prog.canon.At(key.recv).(types.Type)
+ if !ok {
+ canonRecv = key.recv
+ prog.canon.Set(key.recv, canonRecv)
+ }
+ key.recv = canonRecv
+
fn, ok := prog.thunks[key]
if !ok {
fn = makeWrapper(prog, sel)
@@ -280,7 +287,7 @@ func changeRecv(s *types.Signature, recv *types.Var) *types.Signature {
// selectionKey is like types.Selection but a usable map key.
type selectionKey struct {
kind types.SelectionKind
- recv types.Type
+ recv types.Type // canonicalized via Program.canon
obj types.Object
index string
indirect bool
diff --git a/go/types/api.go b/go/types/api.go
index bfdcccb..5344a39 100644
--- a/go/types/api.go
+++ b/go/types/api.go
@@ -20,7 +20,7 @@
// and checks for compliance with the language specification.
// Use Info.Types[expr].Type for the results of type inference.
//
-package types
+package types // import "golang.org/x/tools/go/types"
import (
"bytes"
@@ -113,6 +113,10 @@ type Config struct {
// If Sizes != nil, it provides the sizing functions for package unsafe.
// Otherwise &StdSizes{WordSize: 8, MaxAlign: 8} is used instead.
Sizes Sizes
+
+ // If DisableUnusedImportCheck is set, packages are not checked
+ // for unused imports.
+ DisableUnusedImportCheck bool
}
// DefaultImport is the default importer invoked if Config.Import == nil.
diff --git a/go/types/api_test.go b/go/types/api_test.go
index ad3d30b..57f7edc 100644
--- a/go/types/api_test.go
+++ b/go/types/api_test.go
@@ -813,10 +813,10 @@ func main() {
got := sig.Recv().Type()
want := sel.Recv()
if !Identical(got, want) {
- t.Errorf("%s: Recv() = %s, want %s", got, want)
+ t.Errorf("%s: Recv() = %s, want %s", syntax, got, want)
}
} else if sig != nil && sig.Recv() != nil {
- t.Error("%s: signature has receiver %s", sig, sig.Recv().Type())
+ t.Errorf("%s: signature has receiver %s", sig, sig.Recv().Type())
}
}
// Assert that all wantOut entries were used exactly once.
diff --git a/go/types/assignments.go b/go/types/assignments.go
index 7ee1abc..14ee286 100644
--- a/go/types/assignments.go
+++ b/go/types/assignments.go
@@ -45,7 +45,7 @@ func (check *Checker) assignment(x *operand, T Type) bool {
// bool, rune, int, float64, complex128 or string respectively, depending
// on whether the value is a boolean, rune, integer, floating-point, complex,
// or string constant."
- if T == nil || isInterface(T) {
+ if T == nil || IsInterface(T) {
if T == nil && x.typ == Typ[UntypedNil] {
check.errorf(x.pos(), "use of untyped nil")
x.mode = invalid
diff --git a/go/types/builtins.go b/go/types/builtins.go
index fc0b7f7..f3e8855 100644
--- a/go/types/builtins.go
+++ b/go/types/builtins.go
@@ -10,6 +10,7 @@ import (
"go/ast"
"go/token"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/exact"
)
@@ -47,7 +48,6 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
// make argument getter
arg, nargs, _ = unpack(func(x *operand, i int) { check.expr(x, call.Args[i]) }, nargs, false)
if arg == nil {
- x.mode = invalid
return
}
// evaluate first argument, if present
@@ -608,15 +608,7 @@ func implicitArrayDeref(typ Type) Type {
return typ
}
-// unparen removes any parentheses surrounding an expression and returns
-// the naked expression.
-//
-func unparen(x ast.Expr) ast.Expr {
- if p, ok := x.(*ast.ParenExpr); ok {
- return unparen(p.X)
- }
- return x
-}
+func unparen(x ast.Expr) ast.Expr { return astutil.Unparen(x) }
func (check *Checker) complexArg(x *operand) bool {
t, _ := x.typ.Underlying().(*Basic)
diff --git a/go/types/call.go b/go/types/call.go
index a392d91..7f366a8 100644
--- a/go/types/call.go
+++ b/go/types/call.go
@@ -179,14 +179,18 @@ func unpack(get getter, n int, allowCommaOk bool) (getter, int, bool) {
// arguments checks argument passing for the call with the given signature.
// The arg function provides the operand for the i'th argument.
func (check *Checker) arguments(x *operand, call *ast.CallExpr, sig *Signature, arg getter, n int) {
- passSlice := false
if call.Ellipsis.IsValid() {
// last argument is of the form x...
- if sig.variadic {
- passSlice = true
- } else {
+ if len(call.Args) == 1 && n > 1 {
+ // f()... is not permitted if f() is multi-valued
+ check.errorf(call.Ellipsis, "cannot use ... with %d-valued expression %s", n, call.Args[0])
+ check.useGetter(arg, n)
+ return
+ }
+ if !sig.variadic {
check.errorf(call.Ellipsis, "cannot use ... in call to non-variadic %s", call.Fun)
- // ok to continue
+ check.useGetter(arg, n)
+ return
}
}
@@ -194,7 +198,11 @@ func (check *Checker) arguments(x *operand, call *ast.CallExpr, sig *Signature,
for i := 0; i < n; i++ {
arg(x, i)
if x.mode != invalid {
- check.argument(sig, i, x, passSlice && i == n-1)
+ var ellipsis token.Pos
+ if i == n-1 && call.Ellipsis.IsValid() {
+ ellipsis = call.Ellipsis
+ }
+ check.argument(sig, i, x, ellipsis)
}
}
@@ -211,8 +219,8 @@ func (check *Checker) arguments(x *operand, call *ast.CallExpr, sig *Signature,
}
// argument checks passing of argument x to the i'th parameter of the given signature.
-// If passSlice is set, the argument is followed by ... in the call.
-func (check *Checker) argument(sig *Signature, i int, x *operand, passSlice bool) {
+// If ellipsis is valid, the argument is followed by ... at that position in the call.
+func (check *Checker) argument(sig *Signature, i int, x *operand, ellipsis token.Pos) {
n := sig.params.Len()
// determine parameter type
@@ -232,13 +240,19 @@ func (check *Checker) argument(sig *Signature, i int, x *operand, passSlice bool
return
}
- if passSlice {
+ if ellipsis.IsValid() {
// argument is of the form x...
if i != n-1 {
- check.errorf(x.pos(), "can only use ... with matching parameter")
+ check.errorf(ellipsis, "can only use ... with matching parameter")
return
}
- if _, ok := x.typ.Underlying().(*Slice); !ok {
+ switch t := x.typ.Underlying().(type) {
+ case *Slice:
+ // ok
+ case *Tuple:
+ check.errorf(ellipsis, "cannot use ... with %d-valued expression %s", t.Len(), x)
+ return
+ default:
check.errorf(x.pos(), "cannot use %s as parameter of type %s", x, typ)
return
}
@@ -383,7 +397,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
// includes the methods of typ.
// Variables are addressable, so we can always take their
// address.
- if _, ok := typ.(*Pointer); !ok && !isInterface(typ) {
+ if _, ok := typ.(*Pointer); !ok && !IsInterface(typ) {
typ = &Pointer{base: typ}
}
}
diff --git a/go/types/check.go b/go/types/check.go
index 5b30a30..b48a8ab 100644
--- a/go/types/check.go
+++ b/go/types/check.go
@@ -233,7 +233,9 @@ func (check *Checker) Files(files []*ast.File) (err error) {
check.initOrder()
- check.unusedImports()
+ if !check.conf.DisableUnusedImportCheck {
+ check.unusedImports()
+ }
// perform delayed checks
for _, f := range check.delayed {
diff --git a/go/types/conversions.go b/go/types/conversions.go
index 32cc822..f7b2a56 100644
--- a/go/types/conversions.go
+++ b/go/types/conversions.go
@@ -54,7 +54,7 @@ func (check *Checker) conversion(x *operand, T Type) {
// use the default type (e.g., []byte("foo") should report string
// not []byte as type for the constant "foo").
// - Keep untyped nil for untyped nil arguments.
- if isInterface(T) || constArg && !isConstType(T) {
+ if IsInterface(T) || constArg && !isConstType(T) {
final = defaultType(x.typ)
}
check.updateExprType(x.expr, final, true)
diff --git a/go/types/eval.go b/go/types/eval.go
index 3edb385..7fa319e 100644
--- a/go/types/eval.go
+++ b/go/types/eval.go
@@ -11,8 +11,6 @@ import (
"go/ast"
"go/parser"
"go/token"
-
- "golang.org/x/tools/go/exact"
)
// New is a convenience function to create a new type from a given
@@ -22,11 +20,11 @@ import (
// Position info for objects in the result type is undefined.
//
func New(str string) Type {
- typ, _, err := Eval(str, nil, nil)
+ tv, err := Eval(str, nil, nil)
if err != nil {
panic(err)
}
- return typ
+ return tv.Type
}
// Eval returns the type and, if constant, the value for the
@@ -50,10 +48,10 @@ func New(str string) Type {
// level untyped constants will return an untyped type rather then the
// respective context-specific type.
//
-func Eval(str string, pkg *Package, scope *Scope) (typ Type, val exact.Value, err error) {
+func Eval(str string, pkg *Package, scope *Scope) (TypeAndValue, error) {
node, err := parser.ParseExpr(str)
if err != nil {
- return nil, nil, err
+ return TypeAndValue{}, err
}
// Create a file set that looks structurally identical to the
@@ -70,7 +68,7 @@ func Eval(str string, pkg *Package, scope *Scope) (typ Type, val exact.Value, er
// An error is returned if the scope is incorrect
// if the node cannot be evaluated in the scope.
//
-func EvalNode(fset *token.FileSet, node ast.Expr, pkg *Package, scope *Scope) (typ Type, val exact.Value, err error) {
+func EvalNode(fset *token.FileSet, node ast.Expr, pkg *Package, scope *Scope) (tv TypeAndValue, err error) {
// verify package/scope relationship
if pkg == nil {
scope = Universe
@@ -81,7 +79,7 @@ func EvalNode(fset *token.FileSet, node ast.Expr, pkg *Package, scope *Scope) (t
}
// s == nil || s == pkg.scope
if s == nil {
- return nil, nil, fmt.Errorf("scope does not belong to package %s", pkg.name)
+ return TypeAndValue{}, fmt.Errorf("scope does not belong to package %s", pkg.name)
}
}
@@ -92,18 +90,6 @@ func EvalNode(fset *token.FileSet, node ast.Expr, pkg *Package, scope *Scope) (t
// evaluate node
var x operand
- check.exprOrType(&x, node)
- switch x.mode {
- case invalid, novalue:
- fallthrough
- default:
- unreachable() // or bailed out with error
- case constant:
- val = x.val
- fallthrough
- case typexpr, variable, mapindex, value, commaok:
- typ = x.typ
- }
-
- return
+ check.rawExpr(&x, node, nil)
+ return TypeAndValue{x.mode, x.typ, x.val}, nil
}
diff --git a/go/types/eval_test.go b/go/types/eval_test.go
index 14d3327..40d70ac 100644
--- a/go/types/eval_test.go
+++ b/go/types/eval_test.go
@@ -18,12 +18,12 @@ import (
)
func testEval(t *testing.T, pkg *Package, scope *Scope, str string, typ Type, typStr, valStr string) {
- gotTyp, gotVal, err := Eval(str, pkg, scope)
+ gotTv, err := Eval(str, pkg, scope)
if err != nil {
t.Errorf("Eval(%q) failed: %s", str, err)
return
}
- if gotTyp == nil {
+ if gotTv.Type == nil {
t.Errorf("Eval(%q) got nil type but no error", str)
return
}
@@ -31,13 +31,13 @@ func testEval(t *testing.T, pkg *Package, scope *Scope, str string, typ Type, ty
// compare types
if typ != nil {
// we have a type, check identity
- if !Identical(gotTyp, typ) {
- t.Errorf("Eval(%q) got type %s, want %s", str, gotTyp, typ)
+ if !Identical(gotTv.Type, typ) {
+ t.Errorf("Eval(%q) got type %s, want %s", str, gotTv.Type, typ)
return
}
} else {
// we have a string, compare type string
- gotStr := gotTyp.String()
+ gotStr := gotTv.Type.String()
if gotStr != typStr {
t.Errorf("Eval(%q) got type %s, want %s", str, gotStr, typStr)
return
@@ -46,8 +46,8 @@ func testEval(t *testing.T, pkg *Package, scope *Scope, str string, typ Type, ty
// compare values
gotStr := ""
- if gotVal != nil {
- gotStr = gotVal.String()
+ if gotTv.Value != nil {
+ gotStr = gotTv.Value.String()
}
if gotStr != valStr {
t.Errorf("Eval(%q) got value %s, want %s", str, gotStr, valStr)
diff --git a/go/types/expr.go b/go/types/expr.go
index c55ce6a..67b91da 100644
--- a/go/types/expr.go
+++ b/go/types/expr.go
@@ -1057,7 +1057,12 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
break // cannot continue
}
// i < len(fields)
- etyp := fields[i].typ
+ fld := fields[i]
+ if !fld.Exported() && fld.pkg != check.pkg {
+ check.errorf(x.pos(), "implicit assignment to unexported field %s in %s literal", fld.name, typ)
+ continue
+ }
+ etyp := fld.typ
if !check.assignment(x, etyp) {
if x.mode != invalid {
check.errorf(x.pos(), "cannot use %s as %s value in struct literal", x, etyp)
@@ -1126,8 +1131,11 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
}
default:
- check.errorf(e.Pos(), "invalid composite literal type %s", typ)
- goto Error
+ // if utyp is invalid, an error was reported before
+ if utyp != Typ[Invalid] {
+ check.errorf(e.Pos(), "invalid composite literal type %s", typ)
+ goto Error
+ }
}
x.mode = value
diff --git a/go/types/predicates.go b/go/types/predicates.go
index 2e36a72..b5c39d9 100644
--- a/go/types/predicates.go
+++ b/go/types/predicates.go
@@ -71,7 +71,8 @@ func isConstType(typ Type) bool {
return ok && t.info&IsConstType != 0
}
-func isInterface(typ Type) bool {
+// IsInterface reports whether typ is an interface type.
+func IsInterface(typ Type) bool {
_, ok := typ.Underlying().(*Interface)
return ok
}
diff --git a/go/types/resolver.go b/go/types/resolver.go
index e8b2823..7eb18a2 100644
--- a/go/types/resolver.go
+++ b/go/types/resolver.go
@@ -130,10 +130,14 @@ func (check *Checker) collectObjects() {
importer := check.conf.Import
if importer == nil {
- if DefaultImport == nil {
- panic(`no Config.Import or DefaultImport (missing import _ "golang.org/x/tools/go/gcimporter"?)`)
+ if DefaultImport != nil {
+ importer = DefaultImport
+ } else {
+ // Panic if we encounter an import.
+ importer = func(map[string]*Package, string) (*Package, error) {
+ panic(`no Config.Import or DefaultImport (missing import _ "golang.org/x/tools/go/gcimporter"?)`)
+ }
}
- importer = DefaultImport
}
// pkgImports is the set of packages already imported by any package file seen
diff --git a/go/types/stmt.go b/go/types/stmt.go
index 42580bd..3917336 100644
--- a/go/types/stmt.go
+++ b/go/types/stmt.go
@@ -456,7 +456,14 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
check.invalidAST(s.Pos(), "incorrect form of type switch guard")
return
}
- check.recordDef(lhs, nil) // lhs variable is implicitly declared in each cause clause
+
+ if lhs.Name == "_" {
+ // _ := x.(type) is an invalid short variable declaration
+ check.softErrorf(lhs.Pos(), "no new variable on left side of :=")
+ lhs = nil // avoid declared but not used error below
+ } else {
+ check.recordDef(lhs, nil) // lhs variable is implicitly declared in each cause clause
+ }
rhs = guard.Rhs[0]
@@ -569,11 +576,11 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
}
check.openScope(s, "case")
- defer check.closeScope()
if clause.Comm != nil {
check.stmt(inner, clause.Comm)
}
check.stmtList(inner, clause.Body)
+ check.closeScope()
}
case *ast.ForStmt:
diff --git a/go/types/testdata/builtins.src b/go/types/testdata/builtins.src
index 8b405c3..9eb551d 100644
--- a/go/types/testdata/builtins.src
+++ b/go/types/testdata/builtins.src
@@ -24,11 +24,11 @@ func append1() {
_ = append(s, b)
_ = append(s, x /* ERROR cannot pass argument x */ )
_ = append(s, s /* ERROR cannot pass argument s */ )
- _ = append(s /* ERROR can only use ... with matching parameter */ ...)
- _ = append(s, b, s /* ERROR can only use ... with matching parameter */ ...)
+ _ = append(s... /* ERROR can only use ... with matching parameter */ )
+ _ = append(s, b, s... /* ERROR can only use ... with matching parameter */ )
_ = append(s, 1, 2, 3)
_ = append(s, 1, 2, 3, x /* ERROR cannot pass argument x */ , 5, 6, 6)
- _ = append(s, 1, 2, s /* ERROR can only use ... with matching parameter */ ...)
+ _ = append(s, 1, 2, s... /* ERROR can only use ... with matching parameter */ )
_ = append([]interface{}(nil), 1, 2, "foo", x, 3.1425, false)
type S []byte
diff --git a/go/types/testdata/constdecl.src b/go/types/testdata/constdecl.src
index 8577cb9..6de9b13 100644
--- a/go/types/testdata/constdecl.src
+++ b/go/types/testdata/constdecl.src
@@ -20,17 +20,20 @@ func _() {
}
// Identifier and expression arity must match.
-const _ /* ERROR "missing init expr for _" */
+// The first error message is produced by the parser.
+// In a real-world scenario, the type-checker would not be run
+// in this case and the 2nd error message would not appear.
+const _ /* ERROR "missing constant value" */ /* ERROR "missing init expr for _" */
const _ = 1, 2 /* ERROR "extra init expr 2" */
-const _ /* ERROR "missing init expr for _" */ int
+const _ /* ERROR "missing constant value" */ /* ERROR "missing init expr for _" */ int
const _ int = 1, 2 /* ERROR "extra init expr 2" */
const (
- _ /* ERROR "missing init expr for _" */
+ _ /* ERROR "missing constant value" */ /* ERROR "missing init expr for _" */
_ = 1, 2 /* ERROR "extra init expr 2" */
- _ /* ERROR "missing init expr for _" */ int
+ _ /* ERROR "missing constant value" */ /* ERROR "missing init expr for _" */ int
_ int = 1, 2 /* ERROR "extra init expr 2" */
)
@@ -51,17 +54,17 @@ const (
)
func _() {
- const _ /* ERROR "missing init expr for _" */
+ const _ /* ERROR "missing constant value" */ /* ERROR "missing init expr for _" */
const _ = 1, 2 /* ERROR "extra init expr 2" */
- const _ /* ERROR "missing init expr for _" */ int
+ const _ /* ERROR "missing constant value" */ /* ERROR "missing init expr for _" */ int
const _ int = 1, 2 /* ERROR "extra init expr 2" */
const (
- _ /* ERROR "missing init expr for _" */
+ _ /* ERROR "missing constant value" */ /* ERROR "missing init expr for _" */
_ = 1, 2 /* ERROR "extra init expr 2" */
- _ /* ERROR "missing init expr for _" */ int
+ _ /* ERROR "missing constant value" */ /* ERROR "missing init expr for _" */ int
_ int = 1, 2 /* ERROR "extra init expr 2" */
)
diff --git a/go/types/testdata/expr3.src b/go/types/testdata/expr3.src
index 50ae7c4..125a850 100644
--- a/go/types/testdata/expr3.src
+++ b/go/types/testdata/expr3.src
@@ -4,6 +4,8 @@
package expr3
+import "time"
+
func indexes() {
_ = 1 /* ERROR "cannot index" */ [0]
_ = indexes /* ERROR "cannot index" */ [0]
@@ -26,7 +28,7 @@ func indexes() {
a0 = a[0]
_ = a0
var a1 int32
- a1 = a /* ERROR "cannot assign" */ [1]
+ a1 = a /* ERROR "cannot assign" */ [1]
_ = a1
_ = a[9]
@@ -101,7 +103,6 @@ func indexes() {
_, ok = m["bar"]
_ = ok
-
var t string
_ = t[- /* ERROR "negative" */ 1]
_ = t[- /* ERROR "negative" */ 1 :]
@@ -201,6 +202,15 @@ func struct_literals() {
x int
}
_ = P /* ERROR "invalid composite literal type" */ {}
+
+ // unexported fields
+ _ = time.Time{}
+ _ = time.Time{sec /* ERROR "unknown field" */ : 0}
+ _ = time.Time{
+ 0 /* ERROR implicit assignment to unexported field sec in time.Time literal */,
+ 0 /* ERROR implicit assignment */ ,
+ nil /* ERROR implicit assignment */ ,
+ }
}
func array_literals() {
@@ -237,7 +247,7 @@ func array_literals() {
a0 := [...]int{}
assert(len(a0) == 0)
-
+
a1 := [...]int{0, 1, 2}
assert(len(a1) == 3)
var a13 [3]int
@@ -245,7 +255,7 @@ func array_literals() {
a13 = a1
a14 = a1 /* ERROR "cannot assign" */
_, _ = a13, a14
-
+
a2 := [...]int{- /* ERROR "negative" */ 1: 0}
_ = a2
@@ -439,7 +449,7 @@ func _calls() {
fv(s /* ERROR "cannot pass" */ )
fv(s...)
fv(x /* ERROR "cannot use" */ ...)
- fv(1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ fv(1, s... /* ERROR "can only use ... with matching parameter" */ )
fv(gs /* ERROR "cannot pass" */ ())
fv(gs /* ERROR "cannot pass" */ ()...)
@@ -448,7 +458,7 @@ func _calls() {
t.fm(1, 2.0, x)
t.fm(s /* ERROR "cannot pass" */ )
t.fm(g1())
- t.fm(1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ t.fm(1, s... /* ERROR "can only use ... with matching parameter" */ )
t.fm(gs /* ERROR "cannot pass" */ ())
t.fm(gs /* ERROR "cannot pass" */ ()...)
@@ -456,7 +466,7 @@ func _calls() {
T.fm(t, 1, 2.0, x)
T.fm(t, s /* ERROR "cannot pass" */ )
T.fm(t, g1())
- T.fm(t, 1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ T.fm(t, 1, s... /* ERROR "can only use ... with matching parameter" */ )
T.fm(t, gs /* ERROR "cannot pass" */ ())
T.fm(t, gs /* ERROR "cannot pass" */ ()...)
@@ -465,7 +475,7 @@ func _calls() {
i.fm(1, 2.0, x)
i.fm(s /* ERROR "cannot pass" */ )
i.fm(g1())
- i.fm(1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ i.fm(1, s... /* ERROR "can only use ... with matching parameter" */ )
i.fm(gs /* ERROR "cannot pass" */ ())
i.fm(gs /* ERROR "cannot pass" */ ()...)
diff --git a/go/types/testdata/issues.src b/go/types/testdata/issues.src
index 58c450f..d08e0fd 100644
--- a/go/types/testdata/issues.src
+++ b/go/types/testdata/issues.src
@@ -35,3 +35,39 @@ func issue8799b(x int, ok bool) {
_ = !ok
_ = x
}
+
+func issue9182() {
+ type Point C /* ERROR undeclared */ .Point
+ // no error for composite literal based on unknown type
+ _ = Point{x: 1, y: 2}
+}
+
+func f0() (a []int) { return }
+func f1() (a []int, b int) { return }
+func f2() (a, b []int) { return }
+
+func append_([]int, ...int) {}
+
+func issue9473(a []int, b ...int) {
+ // variadic builtin function
+ _ = append(f0())
+ _ = append(f0(), f0()...)
+ _ = append(f1())
+ _ = append(f2 /* ERROR cannot pass argument */ ())
+ _ = append(f2()... /* ERROR cannot use ... */ )
+ _ = append(f0(), f1 /* ERROR 2-valued expression */ ())
+ _ = append(f0(), f2 /* ERROR 2-valued expression */ ())
+ _ = append(f0(), f1()... /* ERROR cannot use ... */ )
+ _ = append(f0(), f2()... /* ERROR cannot use ... */ )
+
+ // variadic user-defined function
+ append_(f0())
+ append_(f0(), f0()...)
+ append_(f1())
+ append_(f2 /* ERROR cannot pass argument */ ())
+ append_(f2()... /* ERROR cannot use ... */ )
+ append_(f0(), f1 /* ERROR 2-valued expression */ ())
+ append_(f0(), f2 /* ERROR 2-valued expression */ ())
+ append_(f0(), f1()... /* ERROR cannot use */ )
+ append_(f0(), f2()... /* ERROR cannot use */ )
+}
diff --git a/go/types/testdata/stmt0.src b/go/types/testdata/stmt0.src
index 646c418..073e83b 100644
--- a/go/types/testdata/stmt0.src
+++ b/go/types/testdata/stmt0.src
@@ -215,6 +215,17 @@ func selects() {
case x /* ERROR send or receive */ :
case a /* ERROR send or receive */ := ch:
}
+
+ // test for issue 9570: ch2 in second case falsely resolved to
+ // ch2 declared in body of first case
+ ch1 := make(chan int)
+ ch2 := make(chan int)
+ select {
+ case <-ch1:
+ var ch2 /* ERROR ch2 declared but not used */ chan bool
+ case i := <-ch2:
+ print(i + 1)
+ }
}
func gos() {
@@ -529,6 +540,7 @@ func typeswitches() {
}
switch x /* ERROR "declared but not used" */ := x.(type) {}
+ switch _ /* ERROR "no new variable on left side of :=" */ := x.(type) {}
switch x := x.(type) {
case int:
diff --git a/go/types/testdata/vardecl.src b/go/types/testdata/vardecl.src
index 329f4ff..fb6b5f7 100644
--- a/go/types/testdata/vardecl.src
+++ b/go/types/testdata/vardecl.src
@@ -14,9 +14,12 @@ var m map[string]int
var _ int
var _, _ int
-var _ /* ERROR "missing type or init expr" */
-var _ /* ERROR "missing type or init expr" */, _
-var _ /* ERROR "missing type or init expr" */, _, _
+// The first error message is produced by the parser.
+// In a real-world scenario, the type-checker would not be run
+// in this case and the 2nd error message would not appear.
+var _ /* ERROR "missing variable type" */ /* ERROR "missing type or init expr" */
+var _ /* ERROR "missing variable type" */ /* ERROR "missing type or init expr" */, _
+var _ /* ERROR "missing variable type" */ /* ERROR "missing type or init expr" */, _, _
// The initializer must be an expression.
var _ = int /* ERROR "not an expression" */
diff --git a/go/types/typeutil/example_test.go b/go/types/typeutil/example_test.go
new file mode 100644
index 0000000..67ee34f
--- /dev/null
+++ b/go/types/typeutil/example_test.go
@@ -0,0 +1,64 @@
+package typeutil_test
+
+import (
+ "fmt"
+ "sort"
+
+ "go/ast"
+ "go/parser"
+ "go/token"
+
+ "golang.org/x/tools/go/types"
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+func ExampleMap() {
+ const source = `package P
+
+var X []string
+var Y []string
+
+const p, q = 1.0, 2.0
+
+func f(offset int32) (value byte, ok bool)
+func g(rune) (uint8, bool)
+`
+
+ // Parse and type-check the package.
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "P.go", source, 0)
+ if err != nil {
+ panic(err)
+ }
+ pkg, err := new(types.Config).Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ scope := pkg.Scope()
+
+ // Group names of package-level objects by their type.
+ var namesByType typeutil.Map // value is []string
+ for _, name := range scope.Names() {
+ T := scope.Lookup(name).Type()
+
+ names, _ := namesByType.At(T).([]string)
+ names = append(names, name)
+ namesByType.Set(T, names)
+ }
+
+ // Format, sort, and print the map entries.
+ var lines []string
+ namesByType.Iterate(func(T types.Type, names interface{}) {
+ lines = append(lines, fmt.Sprintf("%s %s", names, T))
+ })
+ sort.Strings(lines)
+ for _, line := range lines {
+ fmt.Println(line)
+ }
+
+ // Output:
+ // [X Y] []string
+ // [f g] func(offset int32) (value byte, ok bool)
+ // [p q] untyped float
+}
diff --git a/go/types/typeutil/map.go b/go/types/typeutil/map.go
index bf1ed2d..b3a04cc 100644
--- a/go/types/typeutil/map.go
+++ b/go/types/typeutil/map.go
@@ -4,7 +4,7 @@
// Package typeutil defines various utilities for types, such as Map,
// a mapping from types.Type to interface{} values.
-package typeutil
+package typeutil // import "golang.org/x/tools/go/types/typeutil"
import (
"bytes"
diff --git a/go/vcs/discovery.go b/go/vcs/discovery.go
index d5c3fc6..c4b0e3d 100644
--- a/go/vcs/discovery.go
+++ b/go/vcs/discovery.go
@@ -36,6 +36,9 @@ func parseMetaGoImports(r io.Reader) (imports []metaImport, err error) {
for {
t, err = d.Token()
if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
return
}
if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
diff --git a/go/vcs/vcs.go b/go/vcs/vcs.go
index 586e1b8..2d9b7de 100644
--- a/go/vcs/vcs.go
+++ b/go/vcs/vcs.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package vcs
+package vcs // import "golang.org/x/tools/go/vcs"
import (
"bytes"
@@ -239,9 +239,10 @@ func (v *Cmd) Create(dir, repo string) error {
// The parent of dir must exist; dir must not.
// rev must be a valid revision in repo.
func (v *Cmd) CreateAtRev(dir, repo, rev string) error {
- // Append revision flag to CreateCmd
- createAtRevCmd := v.CreateCmd + " --rev=" + rev
- return v.run(".", createAtRevCmd, "dir", dir, "repo", repo)
+ if err := v.Create(dir, repo); err != nil {
+ return err
+ }
+ return v.run(dir, v.TagSyncCmd, "tag", rev)
}
// Download downloads any new changes for the repo in dir.
@@ -589,6 +590,15 @@ func expand(match map[string]string, s string) string {
// vcsPaths lists the known vcs paths.
var vcsPaths = []*vcsPath{
+ // go.googlesource.com
+ {
+ prefix: "go.googlesource.com",
+ re: `^(?P<root>go\.googlesource\.com/[A-Za-z0-9_.\-]+/?)$`,
+ vcs: "git",
+ repo: "https://{root}",
+ check: noVCSSuffix,
+ },
+
// Google Code - new syntax
{
prefix: "code.google.com/",
diff --git a/go/vcs/vcs_test.go b/go/vcs/vcs_test.go
index 0b8dd8c..226a3e4 100644
--- a/go/vcs/vcs_test.go
+++ b/go/vcs/vcs_test.go
@@ -8,6 +8,8 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "reflect"
+ "strings"
"testing"
)
@@ -84,3 +86,45 @@ func TestFromDir(t *testing.T) {
os.RemoveAll(test.path)
}
}
+
+var parseMetaGoImportsTests = []struct {
+ in string
+ out []metaImport
+}{
+ {
+ `<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">`,
+ []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}},
+ },
+ {
+ `<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
+ <meta name="go-import" content="baz/quux git http://github.com/rsc/baz/quux">`,
+ []metaImport{
+ {"foo/bar", "git", "https://github.com/rsc/foo/bar"},
+ {"baz/quux", "git", "http://github.com/rsc/baz/quux"},
+ },
+ },
+ {
+ `<head>
+ <meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
+ </head>`,
+ []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}},
+ },
+ {
+ `<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
+ <body>`,
+ []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}},
+ },
+}
+
+func TestParseMetaGoImports(t *testing.T) {
+ for i, tt := range parseMetaGoImportsTests {
+ out, err := parseMetaGoImports(strings.NewReader(tt.in))
+ if err != nil {
+ t.Errorf("test#%d: %v", i, err)
+ continue
+ }
+ if !reflect.DeepEqual(out, tt.out) {
+ t.Errorf("test#%d:\n\thave %q\n\twant %q", i, out, tt.out)
+ }
+ }
+}
diff --git a/godoc/analysis/analysis.go b/godoc/analysis/analysis.go
index 2642495..1f59a4a 100644
--- a/godoc/analysis/analysis.go
+++ b/godoc/analysis/analysis.go
@@ -40,7 +40,7 @@
// location is highlighted in red and hover text provides the compiler
// error message.
//
-package analysis
+package analysis // import "golang.org/x/tools/godoc/analysis"
import (
"fmt"
@@ -57,6 +57,7 @@ import (
"strings"
"sync"
+ "golang.org/x/tools/go/exact"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
@@ -337,8 +338,7 @@ func (a *analysis) posURL(pos token.Pos, len int) string {
//
func Run(pta bool, result *Result) {
conf := loader.Config{
- SourceImports: true,
- AllowErrors: true,
+ AllowErrors: true,
}
// Silence the default error handler.
@@ -400,6 +400,9 @@ func Run(pta bool, result *Result) {
var mainPkgs []*ssa.Package
if testmain := prog.CreateTestMainPackage(allPackages...); testmain != nil {
mainPkgs = append(mainPkgs, testmain)
+ if p := testmain.Const("packages"); p != nil {
+ log.Printf("Tested packages: %v", exact.StringVal(p.Value.Value))
+ }
}
for _, pkg := range allPackages {
if pkg.Object.Name() == "main" && pkg.Func("main") != nil {
diff --git a/godoc/analysis/typeinfo.go b/godoc/analysis/typeinfo.go
index 82f2f29..bd1b0c1 100644
--- a/godoc/analysis/typeinfo.go
+++ b/godoc/analysis/typeinfo.go
@@ -215,10 +215,7 @@ func (a *analysis) namedType(obj *types.TypeName, implements map[*types.Named]im
// -- utilities --------------------------------------------------------
-func isInterface(T types.Type) bool {
- _, isI := T.Underlying().(*types.Interface)
- return isI
-}
+func isInterface(T types.Type) bool { return types.IsInterface(T) }
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
diff --git a/godoc/godoc.go b/godoc/godoc.go
index 8ec731c..e77a81f 100644
--- a/godoc/godoc.go
+++ b/godoc/godoc.go
@@ -7,7 +7,7 @@
//
// This package comment will evolve over time as this package splits
// into smaller pieces.
-package godoc
+package godoc // import "golang.org/x/tools/godoc"
import (
"bytes"
diff --git a/godoc/redirect/hash.go b/godoc/redirect/hash.go
new file mode 100644
index 0000000..d5a1e3e
--- /dev/null
+++ b/godoc/redirect/hash.go
@@ -0,0 +1,138 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides a compact encoding of
+// a map of Mercurial hashes to Git hashes.
+
+package redirect
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// hashMap is a map of Mercurial hashes to Git hashes.
+type hashMap struct {
+ file *os.File
+ entries int
+}
+
+// newHashMap takes a file handle that contains a map of Mercurial to Git
+// hashes. The file should be a sequence of pairs of little-endian encoded
+// uint32s, representing a hgHash and a gitHash respectively.
+// The sequence must be sorted by hgHash.
+// The file must remain open for as long as the returned hashMap is used.
+func newHashMap(f *os.File) (*hashMap, error) {
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ return &hashMap{file: f, entries: int(fi.Size() / 8)}, nil
+}
+
+// Lookup finds an hgHash in the map that matches the given prefix, and returns
+// its corresponding gitHash. The prefix must be at least 8 characters long.
+func (m *hashMap) Lookup(s string) gitHash {
+ if m == nil {
+ return 0
+ }
+ hg, err := hgHashFromString(s)
+ if err != nil {
+ return 0
+ }
+ var git gitHash
+ b := make([]byte, 8)
+ sort.Search(m.entries, func(i int) bool {
+ n, err := m.file.ReadAt(b, int64(i*8))
+ if err != nil {
+ panic(err)
+ }
+ if n != 8 {
+ panic(io.ErrUnexpectedEOF)
+ }
+ v := hgHash(binary.LittleEndian.Uint32(b[:4]))
+ if v == hg {
+ git = gitHash(binary.LittleEndian.Uint32(b[4:]))
+ }
+ return v >= hg
+ })
+ return git
+}
+
+// hgHash represents the lower (leftmost) 32 bits of a Mercurial hash.
+type hgHash uint32
+
+func (h hgHash) String() string {
+ return intToHash(int64(h))
+}
+
+func hgHashFromString(s string) (hgHash, error) {
+ if len(s) < 8 {
+ return 0, fmt.Errorf("string too small: len(s) = %d", len(s))
+ }
+ hash := s[:8]
+ i, err := strconv.ParseInt(hash, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return hgHash(i), nil
+}
+
+// gitHash represents the leftmost 28 bits of a Git hash in its upper 28 bits,
+// and it encodes hash's repository in the lower 4 bits.
+type gitHash uint32
+
+func (h gitHash) Hash() string {
+ return intToHash(int64(h))[:7]
+}
+
+func (h gitHash) Repo() string {
+ return repo(h & 0xF).String()
+}
+
+func intToHash(i int64) string {
+ s := strconv.FormatInt(i, 16)
+ if len(s) < 8 {
+ s = strings.Repeat("0", 8-len(s)) + s
+ }
+ return s
+}
+
+// repo represents a Go Git repository.
+type repo byte
+
+const (
+ repoGo repo = iota
+ repoBlog
+ repoCrypto
+ repoExp
+ repoImage
+ repoMobile
+ repoNet
+ repoSys
+ repoTalks
+ repoText
+ repoTools
+)
+
+func (r repo) String() string {
+ return map[repo]string{
+ repoGo: "go",
+ repoBlog: "blog",
+ repoCrypto: "crypto",
+ repoExp: "exp",
+ repoImage: "image",
+ repoMobile: "mobile",
+ repoNet: "net",
+ repoSys: "sys",
+ repoTalks: "talks",
+ repoText: "text",
+ repoTools: "tools",
+ }[r]
+}
diff --git a/godoc/redirect/redirect.go b/godoc/redirect/redirect.go
index 29e3c4e..b7498b3 100644
--- a/godoc/redirect/redirect.go
+++ b/godoc/redirect/redirect.go
@@ -5,11 +5,15 @@
// Package redirect provides hooks to register HTTP handlers that redirect old
// godoc paths to their new equivalents and assist in accessing the issue
// tracker, wiki, code review system, etc.
-package redirect
+package redirect // import "golang.org/x/tools/godoc/redirect"
import (
+ "fmt"
"net/http"
+ "os"
"regexp"
+ "strconv"
+ "strings"
)
// Register registers HTTP handlers that redirect old godoc paths to their new
@@ -29,7 +33,9 @@ func Register(mux *http.ServeMux) {
mux.Handle(path, Handler(redirect))
}
// NB: /src/pkg (sans trailing slash) is the index of packages.
- http.HandleFunc("/src/pkg/", srcPkgHandler)
+ mux.HandleFunc("/src/pkg/", srcPkgHandler)
+ mux.HandleFunc("/cl/", clHandler)
+ mux.HandleFunc("/change/", changeHandler)
}
func handlePathRedirects(mux *http.ServeMux, redirects map[string]string, prefix string) {
@@ -84,13 +90,13 @@ var cmdRedirects = map[string]string{
var redirects = map[string]string{
"/blog": "/blog/",
"/build": "http://build.golang.org",
- "/change": "https://code.google.com/p/go/source/list",
- "/cl": "https://gocodereview.appspot.com/",
+ "/change": "https://go.googlesource.com/go",
+ "/cl": "https://go-review.googlesource.com",
"/cmd/godoc/": "http://godoc.org/golang.org/x/tools/cmd/godoc/",
"/cmd/vet/": "http://godoc.org/golang.org/x/tools/cmd/vet/",
- "/issue": "https://code.google.com/p/go/issues",
- "/issue/new": "https://code.google.com/p/go/issues/entry",
- "/issues": "https://code.google.com/p/go/issues",
+ "/issue": "https://github.com/golang/go/issues",
+ "/issue/new": "https://github.com/golang/go/issues/new",
+ "/issues": "https://github.com/golang/go/issues",
"/play": "http://play.golang.org",
// In Go 1.2 the references page is part of /doc/.
@@ -105,7 +111,7 @@ var redirects = map[string]string{
"/talks": "http://talks.golang.org",
"/tour": "http://tour.golang.org",
- "/wiki": "https://code.google.com/p/go-wiki/w/list",
+ "/wiki": "https://github.com/golang/go/wiki",
"/doc/articles/c_go_cgo.html": "/blog/c-go-cgo",
"/doc/articles/concurrency_patterns.html": "/blog/go-concurrency-patterns-timing-out-and",
@@ -120,17 +126,15 @@ var redirects = map[string]string{
"/doc/articles/json_rpc_tale_of_interfaces.html": "/blog/json-rpc-tale-of-interfaces",
"/doc/articles/laws_of_reflection.html": "/blog/laws-of-reflection",
"/doc/articles/slices_usage_and_internals.html": "/blog/go-slices-usage-and-internals",
- "/doc/go_for_cpp_programmers.html": "https://code.google.com/p/go-wiki/wiki/GoForCPPProgrammers",
+ "/doc/go_for_cpp_programmers.html": "/wiki/GoForCPPProgrammers",
"/doc/go_tutorial.html": "http://tour.golang.org/",
}
var prefixHelpers = map[string]string{
- "change": "https://code.google.com/p/go/source/detail?r=",
- "cl": "https://codereview.appspot.com/",
- "issue": "https://code.google.com/p/go/issues/detail?id=",
- "play": "http://play.golang.org/",
- "talks": "http://talks.golang.org/",
- "wiki": "https://code.google.com/p/go-wiki/wiki/",
+ "issue": "https://github.com/golang/go/issues/",
+ "play": "http://play.golang.org/",
+ "talks": "http://talks.golang.org/",
+ "wiki": "https://github.com/golang/go/wiki/",
}
func Handler(target string) http.Handler {
@@ -164,3 +168,64 @@ func srcPkgHandler(w http.ResponseWriter, r *http.Request) {
r.URL.Path = "/src/" + r.URL.Path[len("/src/pkg/"):]
http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)
}
+
+func clHandler(w http.ResponseWriter, r *http.Request) {
+ const prefix = "/cl/"
+ if p := r.URL.Path; p == prefix {
+ // redirect /prefix/ to /prefix
+ http.Redirect(w, r, p[:len(p)-1], http.StatusFound)
+ return
+ }
+ id := r.URL.Path[len(prefix):]
+ // support /cl/152700045/, which is used in commit 0edafefc36.
+ id = strings.TrimSuffix(id, "/")
+ if !validId.MatchString(id) {
+ http.Error(w, "Not found", http.StatusNotFound)
+ return
+ }
+ target := ""
+ // the first CL in rietveld is about 152046, so only treat the id as
+ // a rietveld CL if it is larger than 150000.
+ if n, err := strconv.Atoi(id); err == nil && n > 150000 {
+ target = "https://codereview.appspot.com/" + id
+ } else {
+ target = "https://go-review.googlesource.com/r/" + id
+ }
+ http.Redirect(w, r, target, http.StatusFound)
+}
+
+var changeMap *hashMap
+
+// LoadChangeMap loads the specified map of Mercurial to Git revisions,
+// which is used by the /change/ handler to intelligently map old hg
+// revisions to their new git equivalents.
+// It should be called before calling Register.
+// The file should remain open as long as the process is running.
+// See the implementation of this package for details.
+func LoadChangeMap(filename string) error {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ m, err := newHashMap(f)
+ if err != nil {
+ return err
+ }
+ changeMap = m
+ return nil
+}
+
+func changeHandler(w http.ResponseWriter, r *http.Request) {
+ const prefix = "/change/"
+ if p := r.URL.Path; p == prefix {
+ // redirect /prefix/ to /prefix
+ http.Redirect(w, r, p[:len(p)-1], http.StatusFound)
+ return
+ }
+ hash := r.URL.Path[len(prefix):]
+ target := "https://go.googlesource.com/go/+/" + hash
+ if git := changeMap.Lookup(hash); git > 0 {
+ target = fmt.Sprintf("https://go.googlesource.com/%v/+/%v", git.Repo(), git.Hash())
+ }
+ http.Redirect(w, r, target, http.StatusFound)
+}
diff --git a/godoc/static/analysis/help.html b/godoc/static/analysis/help.html
index 61f0665..82409fb 100644
--- a/godoc/static/analysis/help.html
+++ b/godoc/static/analysis/help.html
@@ -77,7 +77,8 @@
<p>
Clicking on the identifier that defines a named type causes a panel
to appear, displaying information about the named type, including
- its size and alignment in bytes, its <a href='http://golang.org/ref/spec#Method_sets'>method set</a>, and its
+ its size and alignment in bytes, its
+ <a href='http://golang.org/ref/spec#Method_sets'>method set</a>, and its
<i>implements</i> relation: the set of types T that are assignable to
or from this type U where at least one of T or U is an interface.
@@ -251,7 +252,7 @@
<span class='err'>⚠</span> All analysis results pertain to exactly
one configuration (e.g. amd64 linux). Files that are conditionally
compiled based on different platforms or build tags are not visible
- to the analysis.</br>
+ to the analysis.<br/>
<span class='err'>⚠</span> Files that <code>import "C"</code> require
preprocessing by the cgo tool. The file offsets after preprocessing
@@ -259,7 +260,8 @@
<span class='err'>⚠</span> Files are not periodically re-analyzed.
If the files change underneath the running server, the displayed
- markup is misaligned.</br>
+ markup is misaligned.<br/>
- <span class='err'>⚠</span> Additional issues are listed at <a href='https://code.google.com/p/go/source/browse/godoc/analysis/README?repo=tools'>go.tools/godoc/analysis/README</a>.</br>
+ <span class='err'>⚠</span> Additional issues are listed at
+ <a href='https://go.googlesource.com/tools/+/master/godoc/analysis/README'>tools/godoc/analysis/README</a>.<br/>
</p>
diff --git a/godoc/static/doc.go b/godoc/static/doc.go
index 5ef96ae..b3d8bcf 100644
--- a/godoc/static/doc.go
+++ b/godoc/static/doc.go
@@ -5,4 +5,4 @@
// Package static exports a map of static file content that supports the godoc
// user interface. The map should be used with the mapfs package, see
// golang.org/x/tools/godoc/vfs/mapfs.
-package static
+package static // import "golang.org/x/tools/godoc/static"
diff --git a/godoc/static/makestatic.go b/godoc/static/makestatic.go
index b3b9c5f..f5e3272 100644
--- a/godoc/static/makestatic.go
+++ b/godoc/static/makestatic.go
@@ -4,8 +4,9 @@
// +build ignore
-// Command bake reads a set of files and writes a Go source file to "static.go"
+// Command makestatic reads a set of files and writes a Go source file to "static.go"
// that declares a map of string constants containing contents of the input files.
+// It is intended to be invoked via "go generate" (directive in "gen.go").
package main
import (
@@ -70,13 +71,13 @@ var files = []string{
}
func main() {
- if err := bake(); err != nil {
+ if err := makestatic(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
-func bake() error {
+func makestatic() error {
f, err := os.Create("static.go")
if err != nil {
return err
@@ -117,4 +118,4 @@ func sanitize(b []byte) []byte {
return bytes.Replace(b, []byte("\xEF\xBB\xBF"), []byte("`+\"\\xEF\\xBB\\xBF\"+`"), -1)
}
-const warning = "// DO NOT EDIT ** This file was generated with the bake tool ** DO NOT EDIT //"
+const warning = "// DO NOT EDIT ** This file was generated by \"go generate\" ** DO NOT EDIT //"
diff --git a/godoc/static/search.txt b/godoc/static/search.txt
index 15c1941..0ae0c08 100644
--- a/godoc/static/search.txt
+++ b/godoc/static/search.txt
@@ -23,7 +23,7 @@ QUERY
---------------------------------------
*/}}{{range $key, $val := .Idents}}{{if $val}}{{$key.Name}}
-{{range $val.Idents}} {{.Path}}.{{.Name}}
+{{range $val}} {{.Path}}.{{.Name}}
{{end}}
{{end}}{{end}}{{/* .Idents */}}{{/*
diff --git a/godoc/static/static.go b/godoc/static/static.go
index 593d433..d3376ea 100644
--- a/godoc/static/static.go
+++ b/godoc/static/static.go
@@ -1,4 +1,4 @@
-// DO NOT EDIT ** This file was generated with the bake tool ** DO NOT EDIT //
+// DO NOT EDIT ** This file was generated by "go generate" ** DO NOT EDIT //
package static
@@ -98,7 +98,8 @@ var Files = map[string]string{
<p>
Clicking on the identifier that defines a named type causes a panel
to appear, displaying information about the named type, including
- its size and alignment in bytes, its <a href='http://golang.org/ref/spec#Method_sets'>method set</a>, and its
+ its size and alignment in bytes, its
+ <a href='http://golang.org/ref/spec#Method_sets'>method set</a>, and its
<i>implements</i> relation: the set of types T that are assignable to
or from this type U where at least one of T or U is an interface.
@@ -272,7 +273,7 @@ var Files = map[string]string{
<span class='err'>⚠</span> All analysis results pertain to exactly
one configuration (e.g. amd64 linux). Files that are conditionally
compiled based on different platforms or build tags are not visible
- to the analysis.</br>
+ to the analysis.<br/>
<span class='err'>⚠</span> Files that <code>import "C"</code> require
preprocessing by the cgo tool. The file offsets after preprocessing
@@ -280,9 +281,10 @@ var Files = map[string]string{
<span class='err'>⚠</span> Files are not periodically re-analyzed.
If the files change underneath the running server, the displayed
- markup is misaligned.</br>
+ markup is misaligned.<br/>
- <span class='err'>⚠</span> Additional issues are listed at <a href='https://code.google.com/p/go/source/browse/godoc/analysis/README?repo=tools'>go.tools/godoc/analysis/README</a>.</br>
+ <span class='err'>⚠</span> Additional issues are listed at
+ <a href='https://go.googlesource.com/tools/+/master/godoc/analysis/README'>tools/godoc/analysis/README</a>.<br/>
</p>
`,
@@ -2523,7 +2525,7 @@ function PlaygroundOutput(el) {
---------------------------------------
*/}}{{range $key, $val := .Idents}}{{if $val}}{{$key.Name}}
-{{range $val.Idents}} {{.Path}}.{{.Name}}
+{{range $val}} {{.Path}}.{{.Name}}
{{end}}
{{end}}{{end}}{{/* .Idents */}}{{/*
diff --git a/godoc/util/util.go b/godoc/util/util.go
index d6416df..feedb76 100644
--- a/godoc/util/util.go
+++ b/godoc/util/util.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package util contains utility types and functions for godoc.
-package util
+package util // import "golang.org/x/tools/godoc/util"
import (
pathpkg "path"
diff --git a/godoc/vfs/gatefs/gatefs.go b/godoc/vfs/gatefs/gatefs.go
index cb8ec03..7045a5c 100644
--- a/godoc/vfs/gatefs/gatefs.go
+++ b/godoc/vfs/gatefs/gatefs.go
@@ -4,7 +4,7 @@
// Package gatefs provides an implementation of the FileSystem
// interface that wraps another FileSystem and limits its concurrency.
-package gatefs
+package gatefs // import "golang.org/x/tools/godoc/vfs/gatefs"
import (
"fmt"
diff --git a/godoc/vfs/httpfs/httpfs.go b/godoc/vfs/httpfs/httpfs.go
index e4beeec..f232f03 100644
--- a/godoc/vfs/httpfs/httpfs.go
+++ b/godoc/vfs/httpfs/httpfs.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package httpfs implements http.FileSystem using a godoc vfs.FileSystem.
-package httpfs
+package httpfs // import "golang.org/x/tools/godoc/vfs/httpfs"
import (
"fmt"
diff --git a/godoc/vfs/mapfs/mapfs.go b/godoc/vfs/mapfs/mapfs.go
index 6da3c0a..660b1ca 100644
--- a/godoc/vfs/mapfs/mapfs.go
+++ b/godoc/vfs/mapfs/mapfs.go
@@ -4,7 +4,7 @@
// Package mapfs file provides an implementation of the FileSystem
// interface based on the contents of a map[string]string.
-package mapfs
+package mapfs // import "golang.org/x/tools/godoc/vfs/mapfs"
import (
"io"
diff --git a/godoc/vfs/vfs.go b/godoc/vfs/vfs.go
index 937c2b2..ad06b1a 100644
--- a/godoc/vfs/vfs.go
+++ b/godoc/vfs/vfs.go
@@ -4,7 +4,7 @@
// Package vfs defines types for abstract file system access and provides an
// implementation accessing the file system of the underlying OS.
-package vfs
+package vfs // import "golang.org/x/tools/godoc/vfs"
import (
"io"
diff --git a/godoc/vfs/zipfs/zipfs.go b/godoc/vfs/zipfs/zipfs.go
index 0e04a4a..87eaf8d 100644
--- a/godoc/vfs/zipfs/zipfs.go
+++ b/godoc/vfs/zipfs/zipfs.go
@@ -15,7 +15,7 @@
// like absolute paths w/o a leading '/'; i.e., the paths are considered
// relative to the root of the file system.
// - All path arguments to file system methods must be absolute paths.
-package zipfs
+package zipfs // import "golang.org/x/tools/godoc/vfs/zipfs"
import (
"archive/zip"
diff --git a/imports/fix.go b/imports/fix.go
index 6f441aa..22fde6c 100644
--- a/imports/fix.go
+++ b/imports/fix.go
@@ -16,7 +16,7 @@ import (
"strings"
"sync"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
)
// importToGroup is a list of functions which map from an import path to
diff --git a/imports/imports.go b/imports/imports.go
index b97529e..e30946b 100644
--- a/imports/imports.go
+++ b/imports/imports.go
@@ -4,7 +4,7 @@
// Package imports implements a Go pretty-printer (like package "go/format")
// that also adds or removes import statements as necessary.
-package imports
+package imports // import "golang.org/x/tools/imports"
import (
"bufio"
@@ -20,7 +20,7 @@ import (
"strconv"
"strings"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
)
// Options specifies options for processing files.
diff --git a/oracle/TODO b/oracle/TODO
index bbf0577..b9d4271 100644
--- a/oracle/TODO
+++ b/oracle/TODO
@@ -15,8 +15,6 @@ tools, which should act as if they were saved.
Fix: make the guessImportPath hack work with external _test.go files too.
-Allow the analysis scope to include multiple test packages at once.
-
Include complete pos/end information Serial output.
But beware that sometimes a single token (e.g. +) is more helpful
than the pos/end of the containing expression (e.g. x \n + \n y).
diff --git a/oracle/describe.go b/oracle/describe.go
index 01b245c..9c21017 100644
--- a/oracle/describe.go
+++ b/oracle/describe.go
@@ -13,7 +13,7 @@ import (
"os"
"strings"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/exact"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/types"
@@ -215,13 +215,6 @@ func findInterestingNode(pkginfo *loader.PackageInfo, path []ast.Node) ([]ast.No
return path, actionExpr
case *types.Func:
- // For f in 'interface {f()}', return the interface type, for now.
- if _, ok := path[1].(*ast.Field); ok {
- _ = path[2].(*ast.FieldList) // assertion
- if _, ok := path[3].(*ast.InterfaceType); ok {
- return path[3:], actionType
- }
- }
return path, actionExpr
case *types.Builtin:
@@ -737,10 +730,14 @@ func isAccessibleFrom(obj types.Object, pkg *types.Package) bool {
func methodsToSerial(this *types.Package, methods []*types.Selection, fset *token.FileSet) []serial.DescribeMethod {
var jmethods []serial.DescribeMethod
for _, meth := range methods {
- jmethods = append(jmethods, serial.DescribeMethod{
- Name: types.SelectionString(this, meth),
- Pos: fset.Position(meth.Obj().Pos()).String(),
- })
+ var ser serial.DescribeMethod
+ if meth != nil { // may contain nils when called by implements (on a method)
+ ser = serial.DescribeMethod{
+ Name: types.SelectionString(this, meth),
+ Pos: fset.Position(meth.Obj().Pos()).String(),
+ }
+ }
+ jmethods = append(jmethods, ser)
}
return jmethods
}
diff --git a/oracle/implements.go b/oracle/implements.go
index d07a02c..1ad80b4 100644
--- a/oracle/implements.go
+++ b/oracle/implements.go
@@ -17,18 +17,35 @@ import (
)
// Implements displays the "implements" relation as it pertains to the
-// selected type.
+// selected type. If the selection is a method, 'implements' displays
+// the corresponding methods of the types that would have been reported
+// by an implements query on the receiver type.
//
func implements(o *Oracle, qpos *QueryPos) (queryResult, error) {
// Find the selected type.
- // TODO(adonovan): fix: make it work on qualified Idents too.
path, action := findInterestingNode(qpos.info, qpos.path)
- if action != actionType {
- return nil, fmt.Errorf("no type here")
+
+ var method *types.Func
+ var T types.Type // selected type (receiver if method != nil)
+
+ switch action {
+ case actionExpr:
+ // method?
+ if id, ok := path[0].(*ast.Ident); ok {
+ if obj, ok := qpos.info.ObjectOf(id).(*types.Func); ok {
+ recv := obj.Type().(*types.Signature).Recv()
+ if recv == nil {
+ return nil, fmt.Errorf("this function is not a method")
+ }
+ method = obj
+ T = recv.Type()
+ }
+ }
+ case actionType:
+ T = qpos.info.TypeOf(path[0].(ast.Expr))
}
- T := qpos.info.TypeOf(path[0].(ast.Expr))
if T == nil {
- return nil, fmt.Errorf("no type here")
+ return nil, fmt.Errorf("no type or method here")
}
// Find all named types, even local types (which can have
@@ -102,52 +119,128 @@ func implements(o *Oracle, qpos *QueryPos) (queryResult, error) {
sort.Sort(typesByString(from))
sort.Sort(typesByString(fromPtr))
- return &implementsResult{T, pos, to, from, fromPtr}, nil
+ var toMethod, fromMethod, fromPtrMethod []*types.Selection // contain nils
+ if method != nil {
+ for _, t := range to {
+ toMethod = append(toMethod,
+ types.NewMethodSet(t).Lookup(method.Pkg(), method.Name()))
+ }
+ for _, t := range from {
+ fromMethod = append(fromMethod,
+ types.NewMethodSet(t).Lookup(method.Pkg(), method.Name()))
+ }
+ for _, t := range fromPtr {
+ fromPtrMethod = append(fromPtrMethod,
+ types.NewMethodSet(t).Lookup(method.Pkg(), method.Name()))
+ }
+ }
+
+ return &implementsResult{qpos, T, pos, to, from, fromPtr, method, toMethod, fromMethod, fromPtrMethod}, nil
}
type implementsResult struct {
+ qpos *QueryPos
+
t types.Type // queried type (not necessarily named)
pos interface{} // pos of t (*types.Name or *QueryPos)
to []types.Type // named or ptr-to-named types assignable to interface T
from []types.Type // named interfaces assignable from T
fromPtr []types.Type // named interfaces assignable only from *T
+
+ // if a method was queried:
+ method *types.Func // queried method
+ toMethod []*types.Selection // method of type to[i], if any
+ fromMethod []*types.Selection // method of type from[i], if any
+ fromPtrMethod []*types.Selection // method of type fromPtrMethod[i], if any
}
func (r *implementsResult) display(printf printfFunc) {
+ relation := "is implemented by"
+
+ meth := func(sel *types.Selection) {
+ if sel != nil {
+ printf(sel.Obj(), "\t%s method (%s).%s",
+ relation, r.qpos.TypeString(sel.Recv()), sel.Obj().Name())
+ }
+ }
+
if isInterface(r.t) {
if types.NewMethodSet(r.t).Len() == 0 { // TODO(adonovan): cache mset
printf(r.pos, "empty interface type %s", r.t)
return
}
- printf(r.pos, "interface type %s", r.t)
- // Show concrete types first; use two passes.
- for _, sub := range r.to {
+ if r.method == nil {
+ printf(r.pos, "interface type %s", r.t)
+ } else {
+ printf(r.method, "abstract method %s", r.qpos.ObjectString(r.method))
+ }
+
+ // Show concrete types (or methods) first; use two passes.
+ for i, sub := range r.to {
if !isInterface(sub) {
- printf(deref(sub).(*types.Named).Obj(), "\tis implemented by %s type %s",
- typeKind(sub), sub)
+ if r.method == nil {
+ printf(deref(sub).(*types.Named).Obj(), "\t%s %s type %s",
+ relation, typeKind(sub), sub)
+ } else {
+ meth(r.toMethod[i])
+ }
}
}
- for _, sub := range r.to {
+ for i, sub := range r.to {
if isInterface(sub) {
- printf(deref(sub).(*types.Named).Obj(), "\tis implemented by %s type %s", typeKind(sub), sub)
+ if r.method == nil {
+ printf(sub.(*types.Named).Obj(), "\t%s %s type %s",
+ relation, typeKind(sub), sub)
+ } else {
+ meth(r.toMethod[i])
+ }
}
}
- for _, super := range r.from {
- printf(super.(*types.Named).Obj(), "\timplements %s", super)
+ relation = "implements"
+ for i, super := range r.from {
+ if r.method == nil {
+ printf(super.(*types.Named).Obj(), "\t%s %s", relation, super)
+ } else {
+ meth(r.fromMethod[i])
+ }
}
} else {
+ relation = "implements"
+
if r.from != nil {
- printf(r.pos, "%s type %s", typeKind(r.t), r.t)
- for _, super := range r.from {
- printf(super.(*types.Named).Obj(), "\timplements %s", super)
+ if r.method == nil {
+ printf(r.pos, "%s type %s", typeKind(r.t), r.t)
+ } else {
+ printf(r.method, "concrete method %s",
+ r.qpos.ObjectString(r.method))
+ }
+ for i, super := range r.from {
+ if r.method == nil {
+ printf(super.(*types.Named).Obj(), "\t%s %s",
+ relation, super)
+ } else {
+ meth(r.fromMethod[i])
+ }
}
}
if r.fromPtr != nil {
- printf(r.pos, "pointer type *%s", r.t)
- for _, psuper := range r.fromPtr {
- printf(psuper.(*types.Named).Obj(), "\timplements %s", psuper)
+ if r.method == nil {
+ printf(r.pos, "pointer type *%s", r.t)
+ } else {
+ // TODO(adonovan): de-dup (C).f and (*C).f implementing (I).f.
+ printf(r.method, "concrete method %s",
+ r.qpos.ObjectString(r.method))
+ }
+
+ for i, psuper := range r.fromPtr {
+ if r.method == nil {
+ printf(psuper.(*types.Named).Obj(), "\t%s %s",
+ relation, psuper)
+ } else {
+ meth(r.fromPtrMethod[i])
+ }
}
} else if r.from == nil {
printf(r.pos, "%s type %s implements only interface{}", typeKind(r.t), r.t)
@@ -157,10 +250,19 @@ func (r *implementsResult) display(printf printfFunc) {
func (r *implementsResult) toSerial(res *serial.Result, fset *token.FileSet) {
res.Implements = &serial.Implements{
- T: makeImplementsType(r.t, fset),
- AssignableTo: makeImplementsTypes(r.to, fset),
- AssignableFrom: makeImplementsTypes(r.from, fset),
- AssignableFromPtr: makeImplementsTypes(r.fromPtr, fset),
+ T: makeImplementsType(r.t, fset),
+ AssignableTo: makeImplementsTypes(r.to, fset),
+ AssignableFrom: makeImplementsTypes(r.from, fset),
+ AssignableFromPtr: makeImplementsTypes(r.fromPtr, fset),
+ AssignableToMethod: methodsToSerial(r.qpos.info.Pkg, r.toMethod, fset),
+ AssignableFromMethod: methodsToSerial(r.qpos.info.Pkg, r.fromMethod, fset),
+ AssignableFromPtrMethod: methodsToSerial(r.qpos.info.Pkg, r.fromPtrMethod, fset),
+ }
+ if r.method != nil {
+ res.Implements.Method = &serial.DescribeMethod{
+ Name: r.qpos.ObjectString(r.method),
+ Pos: fset.Position(r.method.Pos()).String(),
+ }
}
}
@@ -191,10 +293,7 @@ func typeKind(T types.Type) string {
return strings.ToLower(strings.TrimPrefix(s, "*types."))
}
-func isInterface(T types.Type) bool {
- _, isI := T.Underlying().(*types.Interface)
- return isI
-}
+func isInterface(T types.Type) bool { return types.IsInterface(T) }
type typesByString []types.Type
diff --git a/oracle/oracle.go b/oracle/oracle.go
index 55566c3..3cff219 100644
--- a/oracle/oracle.go
+++ b/oracle/oracle.go
@@ -8,7 +8,7 @@
// http://golang.org/s/oracle-design
// http://golang.org/s/oracle-user-manual
//
-package oracle
+package oracle // import "golang.org/x/tools/oracle"
// This file defines oracle.Query, the entry point for the oracle tool.
// The actual executable is defined in cmd/oracle.
@@ -56,7 +56,7 @@ import (
"go/token"
"io"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
@@ -101,6 +101,7 @@ var modes = []*modeInfo{
{"callstack", needPTA | needPos, callstack},
{"peers", needPTA | needSSADebug | needPos, peers},
{"pointsto", needPTA | needSSADebug | needExactPos, pointsto},
+ {"whicherrs", needPTA | needSSADebug | needExactPos, whicherrs},
// Type-based, modular analyses:
{"definition", needPos, definition},
@@ -192,7 +193,7 @@ func (res *Result) Serial() *serial.Result {
// Clients that intend to perform multiple queries against the same
// analysis scope should use this pattern instead:
//
-// conf := loader.Config{Build: buildContext, SourceImports: true}
+// conf := loader.Config{Build: buildContext}
// ... populate config, e.g. conf.FromArgs(args) ...
// iprog, err := conf.Load()
// if err != nil { ... }
@@ -222,7 +223,7 @@ func Query(args []string, mode, pos string, ptalog io.Writer, buildContext *buil
return nil, fmt.Errorf("invalid mode type: %q", mode)
}
- conf := loader.Config{Build: buildContext, SourceImports: true}
+ conf := loader.Config{Build: buildContext}
// Determine initial packages.
args, err := conf.FromArgs(args, true)
@@ -321,7 +322,7 @@ func reduceScope(pos string, conf *loader.Config) {
// (and possibly its corresponding tests/production code).
// TODO(adonovan): set 'augment' based on which file list
// contains
- _ = conf.ImportWithTests(importPath) // ignore error
+ conf.ImportWithTests(importPath)
}
func pkgContainsFile(bp *build.Package, filename string) bool {
@@ -481,17 +482,7 @@ func ptrAnalysis(o *Oracle) *pointer.Result {
return result
}
-// unparen returns e with any enclosing parentheses stripped.
-func unparen(e ast.Expr) ast.Expr {
- for {
- p, ok := e.(*ast.ParenExpr)
- if !ok {
- break
- }
- e = p.X
- }
- return e
-}
+func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
diff --git a/oracle/oracle_test.go b/oracle/oracle_test.go
index 987defe..fe43fc2 100644
--- a/oracle/oracle_test.go
+++ b/oracle/oracle_test.go
@@ -74,8 +74,8 @@ func parseQueries(t *testing.T, filename string) []*query {
}
// Parse the file once to discover the test queries.
- var fset token.FileSet
- f, err := parser.ParseFile(&fset, filename, filedata, parser.ParseComments)
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, filename, filedata, parser.ParseComments)
if err != nil {
t.Fatal(err)
}
@@ -208,11 +208,13 @@ func TestOracle(t *testing.T) {
"testdata/src/main/describe.go",
"testdata/src/main/freevars.go",
"testdata/src/main/implements.go",
+ "testdata/src/main/implements-methods.go",
"testdata/src/main/imports.go",
"testdata/src/main/peers.go",
"testdata/src/main/pointsto.go",
"testdata/src/main/reflection.go",
"testdata/src/main/what.go",
+ "testdata/src/main/whicherrs.go",
// JSON:
// TODO(adonovan): most of these are very similar; combine them.
"testdata/src/main/callgraph-json.go",
@@ -220,6 +222,7 @@ func TestOracle(t *testing.T) {
"testdata/src/main/peers-json.go",
"testdata/src/main/describe-json.go",
"testdata/src/main/implements-json.go",
+ "testdata/src/main/implements-methods-json.go",
"testdata/src/main/pointsto-json.go",
"testdata/src/main/referrers-json.go",
"testdata/src/main/what-json.go",
@@ -271,7 +274,7 @@ func TestMultipleQueries(t *testing.T) {
// Loader
var buildContext = build.Default
buildContext.GOPATH = "testdata"
- conf := loader.Config{Build: &buildContext, SourceImports: true}
+ conf := loader.Config{Build: &buildContext}
filename := "testdata/src/main/multi.go"
conf.CreateFromFilenames("", filename)
iprog, err := conf.Load()
diff --git a/oracle/pointsto.go b/oracle/pointsto.go
index 8f633ad..10ad069 100644
--- a/oracle/pointsto.go
+++ b/oracle/pointsto.go
@@ -10,7 +10,7 @@ import (
"go/token"
"sort"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
diff --git a/oracle/pos.go b/oracle/pos.go
index 6ca019a..d5d558b 100644
--- a/oracle/pos.go
+++ b/oracle/pos.go
@@ -11,7 +11,7 @@ import (
"strconv"
"strings"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
)
// parseOctothorpDecimal returns the numeric value if s matches "#%d",
diff --git a/oracle/serial/serial.go b/oracle/serial/serial.go
index 6776872..86ccb02 100644
--- a/oracle/serial/serial.go
+++ b/oracle/serial/serial.go
@@ -101,7 +101,6 @@ type FreeVar struct {
}
// An Implements contains the result of an 'implements' query.
-
// It describes the queried type, the set of named non-empty interface
// types to which it is assignable, and the set of named/*named types
// (concrete or non-empty interface) which may be assigned to it.
@@ -111,6 +110,15 @@ type Implements struct {
AssignableTo []ImplementsType `json:"to,omitempty"` // types assignable to T
AssignableFrom []ImplementsType `json:"from,omitempty"` // interface types assignable from T
AssignableFromPtr []ImplementsType `json:"fromptr,omitempty"` // interface types assignable only from *T
+
+ // The following fields are set only if the query was a method.
+ // Assignable{To,From,FromPtr}Method[i] is the corresponding
+ // method of type Assignable{To,From,FromPtr}[i], or blank
+ // {"",""} if that type lacks the method.
+ Method *DescribeMethod `json:"method,omitempty"` // the queried method
+ AssignableToMethod []DescribeMethod `json:"to_method,omitempty"`
+ AssignableFromMethod []DescribeMethod `json:"from_method,omitempty"`
+ AssignableFromPtrMethod []DescribeMethod `json:"fromptr_method,omitempty"`
}
// An ImplementsType describes a single type as part of an 'implements' query.
@@ -229,6 +237,21 @@ type PTAWarning struct {
Message string `json:"message"` // warning message
}
+// A WhichErrs is the result of a 'whicherrs' query.
+// It contains the position of the queried error and the possible globals,
+// constants, and types it may point to.
+type WhichErrs struct {
+ ErrPos string `json:"errpos,omitempty"` // location of queried error
+ Globals []string `json:"globals,omitempty"` // locations of globals
+ Constants []string `json:"constants,omitempty"` // locations of constants
+ Types []WhichErrsType `json:"types,omitempty"` // Types
+}
+
+type WhichErrsType struct {
+ Type string `json:"type,omitempty"`
+ Position string `json:"position,omitempty"`
+}
+
// A Result is the common result of any oracle query.
// It contains a query-specific result element.
//
@@ -251,6 +274,7 @@ type Result struct {
PointsTo []PointsTo `json:"pointsto,omitempty"`
Referrers *Referrers `json:"referrers,omitempty"`
What *What `json:"what,omitempty"`
+ WhichErrs *WhichErrs `json:"whicherrs,omitempty"`
Warnings []PTAWarning `json:"warnings,omitempty"` // warnings from pointer analysis
}
diff --git a/oracle/testdata/src/main/describe.golden b/oracle/testdata/src/main/describe.golden
index 3f305d4..33d751a 100644
--- a/oracle/testdata/src/main/describe.golden
+++ b/oracle/testdata/src/main/describe.golden
@@ -167,7 +167,5 @@ Method set:
method (I) f()
-------- @describe def-imethod-I.f --------
-type interface{f()}
-Method set:
- method (interface{f()}) f()
+definition of interface method func (I).f()
diff --git a/oracle/testdata/src/main/implements-methods-json.go b/oracle/testdata/src/main/implements-methods-json.go
new file mode 100644
index 0000000..507dca5
--- /dev/null
+++ b/oracle/testdata/src/main/implements-methods-json.go
@@ -0,0 +1,38 @@
+package main
+
+// Tests of 'implements' query applied to methods, -output=json.
+// See go.tools/oracle/oracle_test.go for explanation.
+// See implements-methods.golden for expected query results.
+
+import _ "lib"
+import _ "sort"
+
+func main() {
+}
+
+type F interface {
+ f() // @implements F.f "f"
+}
+
+type FG interface {
+ f() // @implements FG.f "f"
+ g() []int // @implements FG.g "g"
+}
+
+type C int
+type D struct{}
+
+func (c *C) f() {} // @implements *C.f "f"
+func (d D) f() {} // @implements D.f "f"
+
+func (d *D) g() []int { return nil } // @implements *D.g "g"
+
+type sorter []int
+
+func (sorter) Len() int { return 0 } // @implements Len "Len"
+func (sorter) Less(i, j int) bool { return false }
+func (sorter) Swap(i, j int) {}
+
+type I interface {
+ Method(*int) *int // @implements I.Method "Method"
+}
diff --git a/oracle/testdata/src/main/implements-methods-json.golden b/oracle/testdata/src/main/implements-methods-json.golden
new file mode 100644
index 0000000..a925caa
--- /dev/null
+++ b/oracle/testdata/src/main/implements-methods-json.golden
@@ -0,0 +1,283 @@
+-------- @implements F.f --------
+{
+ "mode": "implements",
+ "implements": {
+ "type": {
+ "name": "main.F",
+ "pos": "testdata/src/main/implements-methods-json.go:13:6",
+ "kind": "interface"
+ },
+ "to": [
+ {
+ "name": "*main.C",
+ "pos": "testdata/src/main/implements-methods-json.go:22:6",
+ "kind": "pointer"
+ },
+ {
+ "name": "main.D",
+ "pos": "testdata/src/main/implements-methods-json.go:23:6",
+ "kind": "struct"
+ },
+ {
+ "name": "main.FG",
+ "pos": "testdata/src/main/implements-methods-json.go:17:6",
+ "kind": "interface"
+ }
+ ],
+ "method": {
+ "name": "func (F).f()",
+ "pos": "testdata/src/main/implements-methods-json.go:14:2"
+ },
+ "to_method": [
+ {
+ "name": "method (*C) f()",
+ "pos": "testdata/src/main/implements-methods-json.go:25:13"
+ },
+ {
+ "name": "method (D) f()",
+ "pos": "testdata/src/main/implements-methods-json.go:26:12"
+ },
+ {
+ "name": "method (FG) f()",
+ "pos": "testdata/src/main/implements-methods-json.go:18:2"
+ }
+ ]
+ }
+}-------- @implements FG.f --------
+{
+ "mode": "implements",
+ "implements": {
+ "type": {
+ "name": "main.FG",
+ "pos": "testdata/src/main/implements-methods-json.go:17:6",
+ "kind": "interface"
+ },
+ "to": [
+ {
+ "name": "*main.D",
+ "pos": "testdata/src/main/implements-methods-json.go:23:6",
+ "kind": "pointer"
+ }
+ ],
+ "from": [
+ {
+ "name": "main.F",
+ "pos": "testdata/src/main/implements-methods-json.go:13:6",
+ "kind": "interface"
+ }
+ ],
+ "method": {
+ "name": "func (FG).f()",
+ "pos": "testdata/src/main/implements-methods-json.go:18:2"
+ },
+ "to_method": [
+ {
+ "name": "method (*D) f()",
+ "pos": "testdata/src/main/implements-methods-json.go:26:12"
+ }
+ ],
+ "from_method": [
+ {
+ "name": "method (F) f()",
+ "pos": "testdata/src/main/implements-methods-json.go:14:2"
+ }
+ ]
+ }
+}-------- @implements FG.g --------
+{
+ "mode": "implements",
+ "implements": {
+ "type": {
+ "name": "main.FG",
+ "pos": "testdata/src/main/implements-methods-json.go:17:6",
+ "kind": "interface"
+ },
+ "to": [
+ {
+ "name": "*main.D",
+ "pos": "testdata/src/main/implements-methods-json.go:23:6",
+ "kind": "pointer"
+ }
+ ],
+ "from": [
+ {
+ "name": "main.F",
+ "pos": "testdata/src/main/implements-methods-json.go:13:6",
+ "kind": "interface"
+ }
+ ],
+ "method": {
+ "name": "func (FG).g() []int",
+ "pos": "testdata/src/main/implements-methods-json.go:19:2"
+ },
+ "to_method": [
+ {
+ "name": "method (*D) g() []int",
+ "pos": "testdata/src/main/implements-methods-json.go:28:13"
+ }
+ ],
+ "from_method": [
+ {
+ "name": "",
+ "pos": ""
+ }
+ ]
+ }
+}-------- @implements *C.f --------
+{
+ "mode": "implements",
+ "implements": {
+ "type": {
+ "name": "*main.C",
+ "pos": "testdata/src/main/implements-methods-json.go:22:6",
+ "kind": "pointer"
+ },
+ "from": [
+ {
+ "name": "main.F",
+ "pos": "testdata/src/main/implements-methods-json.go:13:6",
+ "kind": "interface"
+ }
+ ],
+ "method": {
+ "name": "func (*C).f()",
+ "pos": "testdata/src/main/implements-methods-json.go:25:13"
+ },
+ "from_method": [
+ {
+ "name": "method (F) f()",
+ "pos": "testdata/src/main/implements-methods-json.go:14:2"
+ }
+ ]
+ }
+}-------- @implements D.f --------
+{
+ "mode": "implements",
+ "implements": {
+ "type": {
+ "name": "main.D",
+ "pos": "testdata/src/main/implements-methods-json.go:23:6",
+ "kind": "struct"
+ },
+ "from": [
+ {
+ "name": "main.F",
+ "pos": "testdata/src/main/implements-methods-json.go:13:6",
+ "kind": "interface"
+ }
+ ],
+ "fromptr": [
+ {
+ "name": "main.FG",
+ "pos": "testdata/src/main/implements-methods-json.go:17:6",
+ "kind": "interface"
+ }
+ ],
+ "method": {
+ "name": "func (D).f()",
+ "pos": "testdata/src/main/implements-methods-json.go:26:12"
+ },
+ "from_method": [
+ {
+ "name": "method (F) f()",
+ "pos": "testdata/src/main/implements-methods-json.go:14:2"
+ }
+ ],
+ "fromptr_method": [
+ {
+ "name": "method (FG) f()",
+ "pos": "testdata/src/main/implements-methods-json.go:18:2"
+ }
+ ]
+ }
+}-------- @implements *D.g --------
+{
+ "mode": "implements",
+ "implements": {
+ "type": {
+ "name": "*main.D",
+ "pos": "testdata/src/main/implements-methods-json.go:23:6",
+ "kind": "pointer"
+ },
+ "from": [
+ {
+ "name": "main.F",
+ "pos": "testdata/src/main/implements-methods-json.go:13:6",
+ "kind": "interface"
+ },
+ {
+ "name": "main.FG",
+ "pos": "testdata/src/main/implements-methods-json.go:17:6",
+ "kind": "interface"
+ }
+ ],
+ "method": {
+ "name": "func (*D).g() []int",
+ "pos": "testdata/src/main/implements-methods-json.go:28:13"
+ },
+ "from_method": [
+ {
+ "name": "",
+ "pos": ""
+ },
+ {
+ "name": "method (FG) g() []int",
+ "pos": "testdata/src/main/implements-methods-json.go:19:2"
+ }
+ ]
+ }
+}-------- @implements Len --------
+{
+ "mode": "implements",
+ "implements": {
+ "type": {
+ "name": "main.sorter",
+ "pos": "testdata/src/main/implements-methods-json.go:30:6",
+ "kind": "slice"
+ },
+ "from": [
+ {
+ "name": "sort.Interface",
+ "pos": "/usr/local/google/home/adonovan/go/src/sort/sort.go:12:6",
+ "kind": "interface"
+ }
+ ],
+ "method": {
+ "name": "func (sorter).Len() int",
+ "pos": "testdata/src/main/implements-methods-json.go:32:15"
+ },
+ "from_method": [
+ {
+ "name": "method (sort.Interface) Len() int",
+ "pos": "/usr/local/google/home/adonovan/go/src/sort/sort.go:14:2"
+ }
+ ]
+ }
+}-------- @implements I.Method --------
+{
+ "mode": "implements",
+ "implements": {
+ "type": {
+ "name": "main.I",
+ "pos": "testdata/src/main/implements-methods-json.go:36:6",
+ "kind": "interface"
+ },
+ "to": [
+ {
+ "name": "lib.Type",
+ "pos": "testdata/src/lib/lib.go:3:6",
+ "kind": "basic"
+ }
+ ],
+ "method": {
+ "name": "func (I).Method(*int) *int",
+ "pos": "testdata/src/main/implements-methods-json.go:37:2"
+ },
+ "to_method": [
+ {
+ "name": "method (lib.Type) Method(x *int) *int",
+ "pos": "testdata/src/lib/lib.go:5:13"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/oracle/testdata/src/main/implements-methods.go b/oracle/testdata/src/main/implements-methods.go
new file mode 100644
index 0000000..4cc4288
--- /dev/null
+++ b/oracle/testdata/src/main/implements-methods.go
@@ -0,0 +1,38 @@
+package main
+
+// Tests of 'implements' query applied to methods.
+// See go.tools/oracle/oracle_test.go for explanation.
+// See implements-methods.golden for expected query results.
+
+import _ "lib"
+import _ "sort"
+
+func main() {
+}
+
+type F interface {
+ f() // @implements F.f "f"
+}
+
+type FG interface {
+ f() // @implements FG.f "f"
+ g() []int // @implements FG.g "g"
+}
+
+type C int
+type D struct{}
+
+func (c *C) f() {} // @implements *C.f "f"
+func (d D) f() {} // @implements D.f "f"
+
+func (d *D) g() []int { return nil } // @implements *D.g "g"
+
+type sorter []int
+
+func (sorter) Len() int { return 0 } // @implements Len "Len"
+func (sorter) Less(i, j int) bool { return false }
+func (sorter) Swap(i, j int) {}
+
+type I interface {
+ Method(*int) *int // @implements I.Method "Method"
+}
diff --git a/oracle/testdata/src/main/implements-methods.golden b/oracle/testdata/src/main/implements-methods.golden
new file mode 100644
index 0000000..11ccaf4
--- /dev/null
+++ b/oracle/testdata/src/main/implements-methods.golden
@@ -0,0 +1,37 @@
+-------- @implements F.f --------
+abstract method func (F).f()
+ is implemented by method (*C).f
+ is implemented by method (D).f
+ is implemented by method (FG).f
+
+-------- @implements FG.f --------
+abstract method func (FG).f()
+ is implemented by method (*D).f
+ implements method (F).f
+
+-------- @implements FG.g --------
+abstract method func (FG).g() []int
+ is implemented by method (*D).g
+
+-------- @implements *C.f --------
+concrete method func (*C).f()
+ implements method (F).f
+
+-------- @implements D.f --------
+concrete method func (D).f()
+ implements method (F).f
+concrete method func (D).f()
+ implements method (FG).f
+
+-------- @implements *D.g --------
+concrete method func (*D).g() []int
+ implements method (FG).g
+
+-------- @implements Len --------
+concrete method func (sorter).Len() int
+ implements method (sort.Interface).Len
+
+-------- @implements I.Method --------
+abstract method func (I).Method(*int) *int
+ is implemented by method (lib.Type).Method
+
diff --git a/oracle/testdata/src/main/whicherrs.go b/oracle/testdata/src/main/whicherrs.go
new file mode 100644
index 0000000..27fe6b5
--- /dev/null
+++ b/oracle/testdata/src/main/whicherrs.go
@@ -0,0 +1,27 @@
+package main
+
+type errType string
+
+const constErr errType = "blah"
+
+func (et errType) Error() string {
+ return string(et)
+}
+
+var errVar error = errType("foo")
+
+func genErr(i int) error {
+ switch i {
+ case 0:
+ return constErr
+ case 1:
+ return errVar
+ default:
+ return nil
+ }
+}
+
+func main() {
+ err := genErr(0) // @whicherrs localerrs "err"
+ _ = err
+}
diff --git a/oracle/testdata/src/main/whicherrs.golden b/oracle/testdata/src/main/whicherrs.golden
new file mode 100644
index 0000000..1118e0a
--- /dev/null
+++ b/oracle/testdata/src/main/whicherrs.golden
@@ -0,0 +1,8 @@
+-------- @whicherrs localerrs --------
+this error may point to these globals:
+ errVar
+this error may contain these constants:
+ constErr
+this error may contain these dynamic types:
+ errType
+
diff --git a/oracle/what.go b/oracle/what.go
index 9ffc067..c1053f4 100644
--- a/oracle/what.go
+++ b/oracle/what.go
@@ -14,7 +14,7 @@ import (
"sort"
"strings"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/oracle/serial"
)
diff --git a/oracle/whicherrs.go b/oracle/whicherrs.go
new file mode 100644
index 0000000..a73aa8d
--- /dev/null
+++ b/oracle/whicherrs.go
@@ -0,0 +1,294 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oracle
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "sort"
+
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
+ "golang.org/x/tools/go/types"
+ "golang.org/x/tools/oracle/serial"
+)
+
+var builtinErrorType = types.Universe.Lookup("error").Type()
+
+// whicherrs takes an position to an error and tries to find all types, constants
+// and global value which a given error can point to and which can be checked from the
+// scope where the error lives.
+// In short, it returns a list of things that can be checked against in order to handle
+// an error properly.
+//
+// TODO(dmorsing): figure out if fields in errors like *os.PathError.Err
+// can be queried recursively somehow.
+func whicherrs(o *Oracle, qpos *QueryPos) (queryResult, error) {
+ path, action := findInterestingNode(qpos.info, qpos.path)
+ if action != actionExpr {
+ return nil, fmt.Errorf("whicherrs wants an expression; got %s",
+ astutil.NodeDescription(qpos.path[0]))
+ }
+ var expr ast.Expr
+ var obj types.Object
+ switch n := path[0].(type) {
+ case *ast.ValueSpec:
+ // ambiguous ValueSpec containing multiple names
+ return nil, fmt.Errorf("multiple value specification")
+ case *ast.Ident:
+ obj = qpos.info.ObjectOf(n)
+ expr = n
+ case ast.Expr:
+ expr = n
+ default:
+ return nil, fmt.Errorf("unexpected AST for expr: %T", n)
+ }
+
+ typ := qpos.info.TypeOf(expr)
+ if !types.Identical(typ, builtinErrorType) {
+ return nil, fmt.Errorf("selection is not an expression of type 'error'")
+ }
+ // Determine the ssa.Value for the expression.
+ var value ssa.Value
+ var err error
+ if obj != nil {
+ // def/ref of func/var object
+ value, _, err = ssaValueForIdent(o.prog, qpos.info, obj, path)
+ } else {
+ value, _, err = ssaValueForExpr(o.prog, qpos.info, path)
+ }
+ if err != nil {
+ return nil, err // e.g. trivially dead code
+ }
+ buildSSA(o)
+
+ globals := findVisibleErrs(o.prog, qpos)
+ constants := findVisibleConsts(o.prog, qpos)
+
+ res := &whicherrsResult{
+ qpos: qpos,
+ errpos: expr.Pos(),
+ }
+
+ // Find the instruction which initialized the
+ // global error. If more than one instruction has stored to the global
+ // remove the global from the set of values that we want to query.
+ allFuncs := ssautil.AllFunctions(o.prog)
+ for fn := range allFuncs {
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ store, ok := instr.(*ssa.Store)
+ if !ok {
+ continue
+ }
+ gval, ok := store.Addr.(*ssa.Global)
+ if !ok {
+ continue
+ }
+ gbl, ok := globals[gval]
+ if !ok {
+ continue
+ }
+ // we already found a store to this global
+ // The normal error define is just one store in the init
+ // so we just remove this global from the set we want to query
+ if gbl != nil {
+ delete(globals, gval)
+ }
+ globals[gval] = store.Val
+ }
+ }
+ }
+
+ o.ptaConfig.AddQuery(value)
+ for _, v := range globals {
+ o.ptaConfig.AddQuery(v)
+ }
+
+ ptares := ptrAnalysis(o)
+ valueptr := ptares.Queries[value]
+ for g, v := range globals {
+ ptr, ok := ptares.Queries[v]
+ if !ok {
+ continue
+ }
+ if !ptr.MayAlias(valueptr) {
+ continue
+ }
+ res.globals = append(res.globals, g)
+ }
+ pts := valueptr.PointsTo()
+ dedup := make(map[*ssa.NamedConst]bool)
+ for _, label := range pts.Labels() {
+ // These values are either MakeInterfaces or reflect
+ // generated interfaces. For the purposes of this
+ // analysis, we don't care about reflect generated ones
+ makeiface, ok := label.Value().(*ssa.MakeInterface)
+ if !ok {
+ continue
+ }
+ constval, ok := makeiface.X.(*ssa.Const)
+ if !ok {
+ continue
+ }
+ c := constants[*constval]
+ if c != nil && !dedup[c] {
+ dedup[c] = true
+ res.consts = append(res.consts, c)
+ }
+ }
+ concs := pts.DynamicTypes()
+ concs.Iterate(func(conc types.Type, _ interface{}) {
+ // go/types is a bit annoying here.
+ // We want to find all the types that we can
+ // typeswitch or assert to. This means finding out
+ // if the type pointed to can be seen by us.
+ //
+ // For the purposes of this analysis, the type is always
+ // either a Named type or a pointer to one.
+ // There are cases where error can be implemented
+ // by unnamed types, but in that case, we can't assert to
+ // it, so we don't care about it for this analysis.
+ var name *types.TypeName
+ switch t := conc.(type) {
+ case *types.Pointer:
+ named, ok := t.Elem().(*types.Named)
+ if !ok {
+ return
+ }
+ name = named.Obj()
+ case *types.Named:
+ name = t.Obj()
+ default:
+ return
+ }
+ if !isAccessibleFrom(name, qpos.info.Pkg) {
+ return
+ }
+ res.types = append(res.types, &errorType{conc, name})
+ })
+ sort.Sort(membersByPosAndString(res.globals))
+ sort.Sort(membersByPosAndString(res.consts))
+ sort.Sort(sorterrorType(res.types))
+ return res, nil
+}
+
+// findVisibleErrs returns a mapping from each package-level variable of type "error" to nil.
+func findVisibleErrs(prog *ssa.Program, qpos *QueryPos) map[*ssa.Global]ssa.Value {
+ globals := make(map[*ssa.Global]ssa.Value)
+ for _, pkg := range prog.AllPackages() {
+ for _, mem := range pkg.Members {
+ gbl, ok := mem.(*ssa.Global)
+ if !ok {
+ continue
+ }
+ gbltype := gbl.Type()
+ // globals are always pointers
+ if !types.Identical(deref(gbltype), builtinErrorType) {
+ continue
+ }
+ if !isAccessibleFrom(gbl.Object(), qpos.info.Pkg) {
+ continue
+ }
+ globals[gbl] = nil
+ }
+ }
+ return globals
+}
+
+// findVisibleConsts returns a mapping from each package-level constant assignable to type "error", to nil.
+func findVisibleConsts(prog *ssa.Program, qpos *QueryPos) map[ssa.Const]*ssa.NamedConst {
+ constants := make(map[ssa.Const]*ssa.NamedConst)
+ for _, pkg := range prog.AllPackages() {
+ for _, mem := range pkg.Members {
+ obj, ok := mem.(*ssa.NamedConst)
+ if !ok {
+ continue
+ }
+ consttype := obj.Type()
+ if !types.AssignableTo(consttype, builtinErrorType) {
+ continue
+ }
+ if !isAccessibleFrom(obj.Object(), qpos.info.Pkg) {
+ continue
+ }
+ constants[*obj.Value] = obj
+ }
+ }
+
+ return constants
+}
+
+type membersByPosAndString []ssa.Member
+
+func (a membersByPosAndString) Len() int { return len(a) }
+func (a membersByPosAndString) Less(i, j int) bool {
+ cmp := a[i].Pos() - a[j].Pos()
+ return cmp < 0 || cmp == 0 && a[i].String() < a[j].String()
+}
+func (a membersByPosAndString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type sorterrorType []*errorType
+
+func (a sorterrorType) Len() int { return len(a) }
+func (a sorterrorType) Less(i, j int) bool {
+ cmp := a[i].obj.Pos() - a[j].obj.Pos()
+ return cmp < 0 || cmp == 0 && a[i].typ.String() < a[j].typ.String()
+}
+func (a sorterrorType) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type errorType struct {
+ typ types.Type // concrete type N or *N that implements error
+ obj *types.TypeName // the named type N
+}
+
+type whicherrsResult struct {
+ qpos *QueryPos
+ errpos token.Pos
+ globals []ssa.Member
+ consts []ssa.Member
+ types []*errorType
+}
+
+func (r *whicherrsResult) display(printf printfFunc) {
+ if len(r.globals) > 0 {
+ printf(r.qpos, "this error may point to these globals:")
+ for _, g := range r.globals {
+ printf(g.Pos(), "\t%s", g.RelString(r.qpos.info.Pkg))
+ }
+ }
+ if len(r.consts) > 0 {
+ printf(r.qpos, "this error may contain these constants:")
+ for _, c := range r.consts {
+ printf(c.Pos(), "\t%s", c.RelString(r.qpos.info.Pkg))
+ }
+ }
+ if len(r.types) > 0 {
+ printf(r.qpos, "this error may contain these dynamic types:")
+ for _, t := range r.types {
+ printf(t.obj.Pos(), "\t%s", r.qpos.TypeString(t.typ))
+ }
+ }
+}
+
+func (r *whicherrsResult) toSerial(res *serial.Result, fset *token.FileSet) {
+ we := &serial.WhichErrs{}
+ we.ErrPos = fset.Position(r.errpos).String()
+ for _, g := range r.globals {
+ we.Globals = append(we.Globals, fset.Position(g.Pos()).String())
+ }
+ for _, c := range r.consts {
+ we.Constants = append(we.Constants, fset.Position(c.Pos()).String())
+ }
+ for _, t := range r.types {
+ var et serial.WhichErrsType
+ et.Type = r.qpos.TypeString(t.typ)
+ et.Position = fset.Position(t.obj.Pos()).String()
+ we.Types = append(we.Types, et)
+ }
+ res.WhichErrs = we
+}
diff --git a/playground/common.go b/playground/common.go
index 0551362..3ffce88 100644
--- a/playground/common.go
+++ b/playground/common.go
@@ -5,7 +5,7 @@
// Package playground registers HTTP handlers at "/compile" and "/share" that
// proxy requests to the golang.org playground service.
// This package may be used unaltered on App Engine.
-package playground
+package playground // import "golang.org/x/tools/playground"
import (
"bytes"
diff --git a/playground/socket/socket.go b/playground/socket/socket.go
index fe1ac9e..6905d0c 100644
--- a/playground/socket/socket.go
+++ b/playground/socket/socket.go
@@ -11,7 +11,7 @@
// The wire format is JSON and is described by the Message type.
//
// This will not run on App Engine as WebSockets are not supported there.
-package socket
+package socket // import "golang.org/x/tools/playground/socket"
import (
"bytes"
diff --git a/present/doc.go b/present/doc.go
index cc2f008..584e0c1 100644
--- a/present/doc.go
+++ b/present/doc.go
@@ -202,4 +202,4 @@ It is your responsibilty to make sure the included HTML is valid and safe.
.html file.html
*/
-package present
+package present // import "golang.org/x/tools/present"
diff --git a/refactor/eg/eg.go b/refactor/eg/eg.go
index 933078f..7d37ec6 100644
--- a/refactor/eg/eg.go
+++ b/refactor/eg/eg.go
@@ -1,11 +1,12 @@
// Package eg implements the example-based refactoring tool whose
// command-line is defined in golang.org/x/tools/cmd/eg.
-package eg
+package eg // import "golang.org/x/tools/refactor/eg"
import (
"bytes"
"fmt"
"go/ast"
+ "go/format"
"go/printer"
"go/token"
"os"
@@ -288,7 +289,7 @@ func WriteAST(fset *token.FileSet, filename string, f *ast.File) (err error) {
err = err2 // prefer earlier error
}
}()
- return printer.Fprint(fh, fset, f)
+ return format.Node(fh, fset, f)
}
// -- utilities --------------------------------------------------------
diff --git a/refactor/eg/eg_test.go b/refactor/eg/eg_test.go
index 6e9fc4b..c44256c 100644
--- a/refactor/eg/eg_test.go
+++ b/refactor/eg/eg_test.go
@@ -36,9 +36,8 @@ func Test(t *testing.T) {
}
conf := loader.Config{
- Fset: token.NewFileSet(),
- ParserMode: parser.ParseComments,
- SourceImports: true,
+ Fset: token.NewFileSet(),
+ ParserMode: parser.ParseComments,
}
// Each entry is a single-file package.
@@ -72,9 +71,7 @@ func Test(t *testing.T) {
"testdata/expr_type_mismatch.template",
} {
pkgname := strings.TrimSuffix(filepath.Base(filename), ".go")
- if err := conf.CreateFromFilenames(pkgname, filename); err != nil {
- t.Fatal(err)
- }
+ conf.CreateFromFilenames(pkgname, filename)
}
iprog, err := conf.Load()
if err != nil {
diff --git a/refactor/eg/match.go b/refactor/eg/match.go
index f524505..298a258 100644
--- a/refactor/eg/match.go
+++ b/refactor/eg/match.go
@@ -8,6 +8,7 @@ import (
"os"
"reflect"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/exact"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/types"
@@ -216,18 +217,7 @@ func (tr *Transformer) matchWildcard(xobj *types.Var, y ast.Expr) bool {
// -- utilities --------------------------------------------------------
-// unparen returns e with any enclosing parentheses stripped.
-// TODO(adonovan): move to astutil package.
-func unparen(e ast.Expr) ast.Expr {
- for {
- p, ok := e.(*ast.ParenExpr)
- if !ok {
- break
- }
- e = p.X
- }
- return e
-}
+func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
// isRef returns the object referred to by this (possibly qualified)
// identifier, or nil if the node is not a referring identifier.
diff --git a/refactor/eg/rewrite.go b/refactor/eg/rewrite.go
index 61cfe88..db9c693 100644
--- a/refactor/eg/rewrite.go
+++ b/refactor/eg/rewrite.go
@@ -14,7 +14,7 @@ import (
"strconv"
"strings"
- "golang.org/x/tools/astutil"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/types"
)
diff --git a/refactor/eg/testdata/A1.golden b/refactor/eg/testdata/A1.golden
index 4f7ba82..7eb2934 100644
--- a/refactor/eg/testdata/A1.golden
+++ b/refactor/eg/testdata/A1.golden
@@ -3,8 +3,8 @@
package A1
import (
- . "fmt"
"errors"
+ . "fmt"
myfmt "fmt"
"os"
"strings"
diff --git a/refactor/eg/testdata/A2.golden b/refactor/eg/testdata/A2.golden
index 5c2384b..b6e3a6d 100644
--- a/refactor/eg/testdata/A2.golden
+++ b/refactor/eg/testdata/A2.golden
@@ -6,8 +6,8 @@ package A2
// TODO(adonovan): fix: it should also remove "fmt".
import (
- myfmt "fmt"
"errors"
+ myfmt "fmt"
)
func example(n int) {
diff --git a/refactor/eg/testdata/D1.golden b/refactor/eg/testdata/D1.golden
index 3f2dc59..2932652 100644
--- a/refactor/eg/testdata/D1.golden
+++ b/refactor/eg/testdata/D1.golden
@@ -5,8 +5,8 @@ package D1
import "fmt"
func example() {
- fmt.Println(456, "!") // match
- fmt.Println(456, "!") // match
- fmt.Println(456, "!") // match
- fmt.Println(100+20+3, "a"+"") // no match: constant expressions, but not basic literals
+ fmt.Println(456, "!") // match
+ fmt.Println(456, "!") // match
+ fmt.Println(456, "!") // match
+ fmt.Println(100+20+3, "a"+"") // no match: constant expressions, but not basic literals
}
diff --git a/refactor/eg/testdata/E1.golden b/refactor/eg/testdata/E1.golden
index a0adfc8..796364f 100644
--- a/refactor/eg/testdata/E1.golden
+++ b/refactor/eg/testdata/E1.golden
@@ -3,11 +3,11 @@
package E1
import (
+ "fmt"
"log"
"os"
- "fmt"
)
func example() {
- fmt.Fprintf(os.Stderr, "warning: %v", "oops") // match
+ fmt.Fprintf(os.Stderr, "warning: %v", "oops") // match
}
diff --git a/refactor/eg/testdata/F1.go b/refactor/eg/testdata/F1.go
index 0b9c678..2258abd 100644
--- a/refactor/eg/testdata/F1.go
+++ b/refactor/eg/testdata/F1.go
@@ -37,7 +37,7 @@ func example(n int) {
z.Lock()
// Should be no match however currently matches due to:
- // https://code.google.com/p/go/issues/detail?id=8584
+ // https://golang.org/issue/8584
// Will start failing when this is fixed then just change golden to
// No match pointer indirect
// a.Lock()
diff --git a/refactor/eg/testdata/F1.golden b/refactor/eg/testdata/F1.golden
index c691b37..5ffda69 100644
--- a/refactor/eg/testdata/F1.golden
+++ b/refactor/eg/testdata/F1.golden
@@ -37,7 +37,7 @@ func example(n int) {
z.RLock()
// Should be no match however currently matches due to:
- // https://code.google.com/p/go/issues/detail?id=8584
+ // https://golang.org/issue/8584
// Will start failing when this is fixed then just change golden to
// No match pointer indirect
// a.Lock()
diff --git a/refactor/importgraph/graph.go b/refactor/importgraph/graph.go
index bbdbb33..df73e23 100644
--- a/refactor/importgraph/graph.go
+++ b/refactor/importgraph/graph.go
@@ -4,7 +4,7 @@
// Package importgraph computes the forward and reverse import
// dependency graphs for all packages in a Go workspace.
-package importgraph
+package importgraph // import "golang.org/x/tools/refactor/importgraph"
import (
"go/build"
@@ -51,75 +51,73 @@ func (g Graph) Search(roots ...string) map[string]bool {
return seen
}
-// Builds scans the specified Go workspace and builds the forward and
+// Build scans the specified Go workspace and builds the forward and
// reverse import dependency graphs for all its packages.
// It also returns a mapping from import paths to errors for packages
// that could not be loaded.
func Build(ctxt *build.Context) (forward, reverse Graph, errors map[string]error) {
- // The (sole) graph builder goroutine receives a stream of import
- // edges from the package loading goroutine.
- forward = make(Graph)
- reverse = make(Graph)
- edgec := make(chan [2]string)
- go func() {
- for edge := range edgec {
- if edge[1] == "C" {
- continue // "C" is fake
- }
- forward.addEdge(edge[0], edge[1])
- reverse.addEdge(edge[1], edge[0])
- }
- }()
-
- // The (sole) error goroutine receives a stream of ReadDir and
- // Import errors.
+ type importEdge struct {
+ from, to string
+ }
type pathError struct {
path string
err error
}
- errorc := make(chan pathError)
- go func() {
- for e := range errorc {
- if errors == nil {
- errors = make(map[string]error)
- }
- errors[e.path] = e.err
- }
- }()
+
+ ch := make(chan interface{})
var wg sync.WaitGroup
buildutil.ForEachPackage(ctxt, func(path string, err error) {
- if err != nil {
- errorc <- pathError{path, err}
- return
- }
wg.Add(1)
- // The import goroutines load the metadata for each package.
- go func(path string) {
+ go func() {
defer wg.Done()
+ if err != nil {
+ ch <- pathError{path, err}
+ return
+ }
bp, err := ctxt.Import(path, "", 0)
if _, ok := err.(*build.NoGoError); ok {
return // empty directory is not an error
}
if err != nil {
- errorc <- pathError{path, err}
+ ch <- pathError{path, err}
return
}
for _, imp := range bp.Imports {
- edgec <- [2]string{path, imp}
+ ch <- importEdge{path, imp}
}
for _, imp := range bp.TestImports {
- edgec <- [2]string{path, imp}
+ ch <- importEdge{path, imp}
}
for _, imp := range bp.XTestImports {
- edgec <- [2]string{path, imp}
+ ch <- importEdge{path, imp}
}
- }(path)
+ }()
})
- wg.Wait()
+ go func() {
+ wg.Wait()
+ close(ch)
+ }()
- close(edgec)
- close(errorc)
+ forward = make(Graph)
+ reverse = make(Graph)
+
+ for e := range ch {
+ switch e := e.(type) {
+ case pathError:
+ if errors == nil {
+ errors = make(map[string]error)
+ }
+ errors[e.path] = e.err
+
+ case importEdge:
+ if e.to == "C" {
+ continue // "C" is fake
+ }
+ forward.addEdge(e.from, e.to)
+ reverse.addEdge(e.to, e.from)
+ }
+ }
return forward, reverse, errors
}
diff --git a/refactor/lexical/lexical.go b/refactor/lexical/lexical.go
index 31dfb0f..55ec391 100644
--- a/refactor/lexical/lexical.go
+++ b/refactor/lexical/lexical.go
@@ -9,7 +9,7 @@
//
// THIS INTERFACE IS EXPERIMENTAL AND MAY CHANGE OR BE REMOVED IN FUTURE.
//
-package lexical
+package lexical // import "golang.org/x/tools/refactor/lexical"
// OVERVIEW
//
diff --git a/refactor/lexical/lexical_test.go b/refactor/lexical/lexical_test.go
index ada6595..fd237ce 100644
--- a/refactor/lexical/lexical_test.go
+++ b/refactor/lexical/lexical_test.go
@@ -32,16 +32,10 @@ func TestStdlib(t *testing.T) {
"golang.org/x/tools/refactor/lexical")
// Load, parse and type-check the program.
- conf := loader.Config{
- Build: &ctxt,
- SourceImports: true,
- }
+ conf := loader.Config{Build: &ctxt}
for _, path := range pkgs {
- if err := conf.ImportWithTests(path); err != nil {
- t.Error(err)
- }
+ conf.ImportWithTests(path)
}
-
iprog, err := conf.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
diff --git a/refactor/rename/check.go b/refactor/rename/check.go
index dc25b5c..017a604 100644
--- a/refactor/rename/check.go
+++ b/refactor/rename/check.go
@@ -39,7 +39,7 @@ func (r *renamer) check(from types.Object) {
r.checkInPackageBlock(from)
} else if v, ok := from.(*types.Var); ok && v.IsField() {
r.checkStructField(v)
- } else if f, ok := from.(*types.Func); ok && f.Type().(*types.Signature).Recv() != nil {
+ } else if f, ok := from.(*types.Func); ok && recv(f) != nil {
r.checkMethod(f)
} else if isLocal(from) {
r.checkInLocalScope(from)
@@ -177,7 +177,7 @@ func (r *renamer) checkInLocalScope(from types.Object) {
// same-, sub-, and super-block conflicts. We will illustrate all three
// using this example:
//
-// var x int
+// var x int
// var z int
//
// func f(y int) {
@@ -430,7 +430,7 @@ func (r *renamer) selectionConflict(from types.Object, delta int, syntax *ast.Se
// analogous to sub-block conflict
r.errorf(syntax.Sel.Pos(),
"\twould change the referent of this selection")
- r.errorf(obj.Pos(), "\tto this %s", objectKind(obj))
+ r.errorf(obj.Pos(), "\tof this %s", objectKind(obj))
case delta == 0:
// analogous to same-block conflict
r.errorf(syntax.Sel.Pos(),
@@ -440,7 +440,7 @@ func (r *renamer) selectionConflict(from types.Object, delta int, syntax *ast.Se
// analogous to super-block conflict
r.errorf(syntax.Sel.Pos(),
"\twould shadow this selection")
- r.errorf(obj.Pos(), "\tto the %s declared here",
+ r.errorf(obj.Pos(), "\tof the %s declared here",
objectKind(obj))
}
}
@@ -449,7 +449,11 @@ func (r *renamer) selectionConflict(from types.Object, delta int, syntax *ast.Se
// There are three hazards:
// - declaration conflicts
// - selection ambiguity/changes
-// - entailed renamings of assignable concrete/interface types (for now, just reject)
+// - entailed renamings of assignable concrete/interface types.
+// We reject renamings initiated at concrete methods if it would
+// change the assignability relation. For renamings of abstract
+// methods, we rename all methods transitively coupled to it via
+// assignability.
func (r *renamer) checkMethod(from *types.Func) {
// e.g. error.Error
if from.Pkg() == nil {
@@ -457,50 +461,20 @@ func (r *renamer) checkMethod(from *types.Func) {
return
}
- // As always, having to support concrete methods with pointer
- // and non-pointer receivers, and named vs unnamed types with
- // methods, makes tooling fun.
-
- // ASSIGNABILITY
- //
- // For now, if any method renaming breaks a required
- // assignability to another type, we reject it.
- //
- // TODO(adonovan): probably we should compute the entailed
- // renamings so that an interface method renaming causes
- // concrete methods to change too. But which ones?
- //
- // There is no correct answer, only heuristics, because Go's
- // "duck typing" doesn't distinguish intentional from contingent
- // assignability. There are two obvious approaches:
- //
- // (1) Update the minimum set of types to preserve the
- // assignability of types all syntactic assignments
- // (incl. implicit ones in calls, returns, sends, etc).
- // The satisfy.Finder enumerates these.
- // This is likely to be an underapproximation.
- //
- // (2) Update all types that are assignable to/from the changed
- // type. This requires computing the "implements" relation
- // for all pairs of types (as godoc and oracle do).
- // This is likely to be an overapproximation.
- //
- // If a concrete type is renamed, we probably do not want to
- // rename corresponding interfaces; interface renamings should
- // probably be initiated at the interface. (But what if a
- // concrete type implements multiple interfaces with the same
- // method? Then the user is stuck.)
- //
- // We need some experience before we decide how to implement this.
+ // ASSIGNABILITY: We reject renamings of concrete methods that
+ // would break a 'satisfy' constraint; but renamings of abstract
+ // methods are allowed to proceed, and we rename affected
+ // concrete and abstract methods as necessary. It is the
+ // initial method that determines the policy.
// Check for conflict at point of declaration.
// Check to ensure preservation of assignability requirements.
- recv := from.Type().(*types.Signature).Recv().Type()
- if isInterface(recv) {
+ R := recv(from).Type()
+ if isInterface(R) {
// Abstract method
// declaration
- prev, _, _ := types.LookupFieldOrMethod(recv, false, from.Pkg(), r.to)
+ prev, _, _ := types.LookupFieldOrMethod(R, false, from.Pkg(), r.to)
if prev != nil {
r.errorf(from.Pos(), "renaming this interface method %q to %q",
from.Name(), r.to)
@@ -542,30 +516,100 @@ func (r *renamer) checkMethod(from *types.Func) {
}
// assignability
- for T := range r.findAssignments(recv) {
- if obj, _, _ := types.LookupFieldOrMethod(T, false, from.Pkg(), from.Name()); obj == nil {
+ //
+ // Find the set of concrete or abstract methods directly
+ // coupled to abstract method 'from' by some
+ // satisfy.Constraint, and rename them too.
+ for key := range r.satisfy() {
+ // key = (lhs, rhs) where lhs is always an interface.
+
+ lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name())
+ if lsel == nil {
+ continue
+ }
+ rmethods := r.msets.MethodSet(key.RHS)
+ rsel := rmethods.Lookup(from.Pkg(), from.Name())
+ if rsel == nil {
continue
}
- r.errorf(from.Pos(), "renaming this method %q to %q",
- from.Name(), r.to)
- var pos token.Pos
- var other string
- if named, ok := T.(*types.Named); ok {
- pos = named.Obj().Pos()
- other = named.Obj().Name()
- } else {
- pos = from.Pos()
- other = T.String()
+ // If both sides have a method of this name,
+ // and one of them is m, the other must be coupled.
+ var coupled *types.Func
+ switch from {
+ case lsel.Obj():
+ coupled = rsel.Obj().(*types.Func)
+ case rsel.Obj():
+ coupled = lsel.Obj().(*types.Func)
+ default:
+ continue
}
- r.errorf(pos, "\twould make %s no longer assignable to it", other)
- return
+
+ // We must treat concrete-to-interface
+ // constraints like an implicit selection C.f of
+ // each interface method I.f, and check that the
+ // renaming leaves the selection unchanged and
+ // unambiguous.
+ //
+ // Fun fact: the implicit selection of C.f
+ // type I interface{f()}
+ // type C struct{I}
+ // func (C) g()
+ // var _ I = C{} // here
+ // yields abstract method I.f. This can make error
+ // messages less than obvious.
+ //
+ if !isInterface(key.RHS) {
+ // The logic below was derived from checkSelections.
+
+ rtosel := rmethods.Lookup(from.Pkg(), r.to)
+ if rtosel != nil {
+ rto := rtosel.Obj().(*types.Func)
+ delta := len(rsel.Index()) - len(rtosel.Index())
+ if delta < 0 {
+ continue // no ambiguity
+ }
+
+ // TODO(adonovan): record the constraint's position.
+ keyPos := token.NoPos
+
+ r.errorf(from.Pos(), "renaming this method %q to %q",
+ from.Name(), r.to)
+ if delta == 0 {
+ // analogous to same-block conflict
+ r.errorf(keyPos, "\twould make the %s method of %s invoked via interface %s ambiguous",
+ r.to, key.RHS, key.LHS)
+ r.errorf(rto.Pos(), "\twith (%s).%s",
+ recv(rto).Type(), r.to)
+ } else {
+ // analogous to super-block conflict
+ r.errorf(keyPos, "\twould change the %s method of %s invoked via interface %s",
+ r.to, key.RHS, key.LHS)
+ r.errorf(coupled.Pos(), "\tfrom (%s).%s",
+ recv(coupled).Type(), r.to)
+ r.errorf(rto.Pos(), "\tto (%s).%s",
+ recv(rto).Type(), r.to)
+ }
+ return // one error is enough
+ }
+ }
+
+ if !r.changeMethods {
+ // This should be unreachable.
+ r.errorf(from.Pos(), "internal error: during renaming of abstract method %s", from)
+ r.errorf(coupled.Pos(), "\tchangedMethods=false, coupled method=%s", coupled)
+ r.errorf(from.Pos(), "\tPlease file a bug report")
+ return
+ }
+
+ // Rename the coupled method to preserve assignability.
+ r.check(coupled)
}
} else {
// Concrete method
// declaration
- prev, indices, _ := types.LookupFieldOrMethod(recv, true, from.Pkg(), r.to)
+ prev, indices, _ := types.LookupFieldOrMethod(R, true, from.Pkg(), r.to)
if prev != nil && len(indices) == 1 {
r.errorf(from.Pos(), "renaming this method %q to %q",
from.Name(), r.to)
@@ -574,17 +618,44 @@ func (r *renamer) checkMethod(from *types.Func) {
return
}
- // assignability (of both T and *T)
- recvBase := deref(recv)
- for _, R := range []types.Type{recvBase, types.NewPointer(recvBase)} {
- for I := range r.findAssignments(R) {
- if obj, _, _ := types.LookupFieldOrMethod(I, true, from.Pkg(), from.Name()); obj == nil {
- continue
- }
+ // assignability
+ //
+ // Find the set of abstract methods coupled to concrete
+ // method 'from' by some satisfy.Constraint, and rename
+ // them too.
+ //
+ // Coupling may be indirect, e.g. I.f <-> C.f via type D.
+ //
+ // type I interface {f()}
+ // type C int
+ // type (C) f()
+ // type D struct{C}
+ // var _ I = D{}
+ //
+ for key := range r.satisfy() {
+ // key = (lhs, rhs) where lhs is always an interface.
+ if isInterface(key.RHS) {
+ continue
+ }
+ rsel := r.msets.MethodSet(key.RHS).Lookup(from.Pkg(), from.Name())
+ if rsel == nil || rsel.Obj() != from {
+ continue // rhs does not have the method
+ }
+ lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name())
+ if lsel == nil {
+ continue
+ }
+ imeth := lsel.Obj().(*types.Func)
+
+ // imeth is the abstract method (e.g. I.f)
+ // and key.RHS is the concrete coupling type (e.g. D).
+ if !r.changeMethods {
r.errorf(from.Pos(), "renaming this method %q to %q",
from.Name(), r.to)
var pos token.Pos
var iface string
+
+ I := recv(imeth).Type()
if named, ok := I.(*types.Named); ok {
pos = named.Obj().Pos()
iface = "interface " + named.Obj().Name()
@@ -592,9 +663,15 @@ func (r *renamer) checkMethod(from *types.Func) {
pos = from.Pos()
iface = I.String()
}
- r.errorf(pos, "\twould make it no longer assignable to %s", iface)
- return // one is enough
+ r.errorf(pos, "\twould make %s no longer assignable to %s",
+ key.RHS, iface)
+ r.errorf(imeth.Pos(), "\t(rename %s.%s if you intend to change both types)",
+ I, from.Name())
+ return // one error is enough
}
+
+ // Rename the coupled interface method to preserve assignability.
+ r.check(imeth)
}
}
@@ -618,9 +695,8 @@ func (r *renamer) checkExport(id *ast.Ident, pkg *types.Package, from types.Obje
return true
}
-// findAssignments returns the set of types to or from which type T is
-// assigned in the program syntax.
-func (r *renamer) findAssignments(T types.Type) map[types.Type]bool {
+// satisfy returns the set of interface satisfaction constraints.
+func (r *renamer) satisfy() map[satisfy.Constraint]bool {
if r.satisfyConstraints == nil {
// Compute on demand: it's expensive.
var f satisfy.Finder
@@ -629,23 +705,16 @@ func (r *renamer) findAssignments(T types.Type) map[types.Type]bool {
}
r.satisfyConstraints = f.Result
}
-
- result := make(map[types.Type]bool)
- for key := range r.satisfyConstraints {
- // key = (lhs, rhs) where lhs is always an interface.
- if types.Identical(key.RHS, T) {
- result[key.LHS] = true
- }
- if isInterface(T) && types.Identical(key.LHS, T) {
- // must check both sides
- result[key.RHS] = true
- }
- }
- return result
+ return r.satisfyConstraints
}
// -- helpers ----------------------------------------------------------
+// recv returns the method's receiver.
+func recv(meth *types.Func) *types.Var {
+ return meth.Type().(*types.Signature).Recv()
+}
+
// someUse returns an arbitrary use of obj within info.
func someUse(info *loader.PackageInfo, obj types.Object) *ast.Ident {
for id, o := range info.Uses {
@@ -658,10 +727,7 @@ func someUse(info *loader.PackageInfo, obj types.Object) *ast.Ident {
// -- Plundered from golang.org/x/tools/go/ssa -----------------
-func isInterface(T types.Type) bool {
- _, ok := T.Underlying().(*types.Interface)
- return ok
-}
+func isInterface(T types.Type) bool { return types.IsInterface(T) }
func deref(typ types.Type) types.Type {
if p, _ := typ.(*types.Pointer); p != nil {
diff --git a/refactor/rename/mvpkg.go b/refactor/rename/mvpkg.go
new file mode 100644
index 0000000..bf8edd6
--- /dev/null
+++ b/refactor/rename/mvpkg.go
@@ -0,0 +1,320 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// licence that can be found in the LICENSE file.
+
+// This file contains the implementation of the 'gomovepkg' command
+// whose main function is in golang.org/x/tools/cmd/gomovepkg.
+
+package rename
+
+// TODO(matloob):
+// - think about what happens if the package is moving across version control systems.
+// - think about windows, which uses "\" as its directory separator.
+// - dot imports are not supported. Make sure it's clearly documented.
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "text/template"
+
+ "golang.org/x/tools/go/buildutil"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/refactor/importgraph"
+)
+
+// Move, given a package path and a destination package path, will try
+// to move the given package to the new path. The Move function will
+// first check for any conflicts preventing the move, such as a
+// package already existing at the destination package path. If the
+// move can proceed, it builds an import graph to find all imports of
+// the packages whose paths need to be renamed. This includes uses of
+// the subpackages of the package to be moved as those packages will
+// also need to be moved. It then renames all imports to point to the
+// new paths, and then moves the packages to their new paths.
+func Move(ctxt *build.Context, from, to, moveTmpl string) error {
+ srcDir, err := srcDir(ctxt, from)
+ if err != nil {
+ return err
+ }
+
+ // This should be the only place in the program that constructs
+ // file paths.
+ // TODO(matloob): test on Microsoft Windows.
+ fromDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(from))
+ toDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(to))
+ toParent := filepath.Dir(toDir)
+ if !buildutil.IsDir(ctxt, toParent) {
+ return fmt.Errorf("parent directory does not exist for path %s", toDir)
+ }
+
+ // Build the import graph and figure out which packages to update.
+ fwd, rev, errors := importgraph.Build(ctxt)
+ if len(errors) > 0 {
+ fmt.Fprintf(os.Stderr, "While scanning Go workspace:\n")
+ for path, err := range errors {
+ fmt.Fprintf(os.Stderr, "Package %q: %s.\n", path, err)
+ }
+ return fmt.Errorf("failed to construct import graph")
+ }
+
+ // Determine the affected packages---the set of packages whose import
+ // statements need updating.
+ affectedPackages := map[string]bool{from: true}
+ destinations := map[string]string{} // maps old dir to new dir
+ for pkg := range subpackages(ctxt, srcDir, from) {
+ for r := range rev[pkg] {
+ affectedPackages[r] = true
+ }
+ destinations[pkg] = strings.Replace(pkg,
+ // Ensure directories have a trailing "/".
+ filepath.Join(from, ""), filepath.Join(to, ""), 1)
+ }
+
+ // Load all the affected packages.
+ iprog, err := loadProgram(ctxt, affectedPackages)
+ if err != nil {
+ return err
+ }
+
+ // Prepare the move command, if one was supplied.
+ var cmd string
+ if moveTmpl != "" {
+ if cmd, err = moveCmd(moveTmpl, fromDir, toDir); err != nil {
+ return err
+ }
+ }
+
+ m := mover{
+ ctxt: ctxt,
+ fwd: fwd,
+ rev: rev,
+ iprog: iprog,
+ from: from,
+ to: to,
+ fromDir: fromDir,
+ toDir: toDir,
+ affectedPackages: affectedPackages,
+ destinations: destinations,
+ cmd: cmd,
+ }
+
+ if err := m.checkValid(); err != nil {
+ return err
+ }
+
+ m.move()
+
+ return nil
+}
+
+// srcDir returns the absolute path of the srcdir containing pkg.
+func srcDir(ctxt *build.Context, pkg string) (string, error) {
+ for _, srcDir := range ctxt.SrcDirs() {
+ path := buildutil.JoinPath(ctxt, srcDir, pkg)
+ if buildutil.IsDir(ctxt, path) {
+ return srcDir, nil
+ }
+ }
+ return "", fmt.Errorf("src dir not found for package: %s", pkg)
+}
+
+// subpackages returns the set of packages in the given srcDir whose
+// import paths start with dir.
+func subpackages(ctxt *build.Context, srcDir string, dir string) map[string]bool {
+ subs := map[string]bool{dir: true}
+
+ // Find all packages under srcDir whose import paths start with dir.
+ buildutil.ForEachPackage(ctxt, func(pkg string, err error) {
+ if err != nil {
+ log.Fatalf("unexpected error in ForEachPackage: %v", err)
+ }
+
+ if !strings.HasPrefix(pkg, path.Join(dir, "")) {
+ return
+ }
+
+ p, err := ctxt.Import(pkg, "", build.FindOnly)
+ if err != nil {
+ log.Fatalf("unexpected: package %s can not be located by build context: %s", pkg, err)
+ }
+ if p.SrcRoot == "" {
+ log.Fatalf("unexpected: could not determine srcDir for package %s: %s", pkg, err)
+ }
+ if p.SrcRoot != srcDir {
+ return
+ }
+
+ subs[pkg] = true
+ })
+
+ return subs
+}
+
+type mover struct {
+ // iprog contains all packages whose contents need to be updated
+ // with new package names or import paths.
+ iprog *loader.Program
+ ctxt *build.Context
+ // fwd and rev are the forward and reverse import graphs
+ fwd, rev importgraph.Graph
+ // from and to are the source and destination import
+ // paths. fromDir and toDir are the source and destination
+ // absolute paths that package source files will be moved between.
+ from, to, fromDir, toDir string
+ // affectedPackages is the set of all packages whose contents need
+ // to be updated to reflect new package names or import paths.
+ affectedPackages map[string]bool
+ // destinations maps each subpackage to be moved to its
+ // destination path.
+ destinations map[string]string
+ // cmd, if not empty, will be executed to move fromDir to toDir.
+ cmd string
+}
+
+func (m *mover) checkValid() error {
+ const prefix = "invalid move destination"
+
+ match, err := regexp.MatchString("^[_\\pL][_\\pL\\p{Nd}]*$", path.Base(m.to))
+ if err != nil {
+ panic("regexp.MatchString failed")
+ }
+ if !match {
+ return fmt.Errorf("%s: %s; gomvpkg does not support move destinations "+
+ "whose base names are not valid go identifiers", prefix, m.to)
+ }
+
+ if buildutil.FileExists(m.ctxt, m.toDir) {
+ return fmt.Errorf("%s: %s conflicts with file %s", prefix, m.to, m.toDir)
+ }
+ if buildutil.IsDir(m.ctxt, m.toDir) {
+ return fmt.Errorf("%s: %s conflicts with directory %s", prefix, m.to, m.toDir)
+ }
+
+ for _, toSubPkg := range m.destinations {
+ if _, err := m.ctxt.Import(toSubPkg, "", build.FindOnly); err == nil {
+ return fmt.Errorf("%s: %s; package or subpackage %s already exists",
+ prefix, m.to, toSubPkg)
+ }
+ }
+
+ return nil
+}
+
+// moveCmd produces the version control move command used to move fromDir to toDir by
+// executing the given template.
+func moveCmd(moveTmpl, fromDir, toDir string) (string, error) {
+ tmpl, err := template.New("movecmd").Parse(moveTmpl)
+ if err != nil {
+ return "", err
+ }
+
+ var buf bytes.Buffer
+ err = tmpl.Execute(&buf, struct {
+ Src string
+ Dst string
+ }{fromDir, toDir})
+ return buf.String(), err
+}
+
+func (m *mover) move() error {
+ filesToUpdate := make(map[*ast.File]bool)
+
+ // Change the moved package's "package" declaration to its new base name.
+ pkg, ok := m.iprog.Imported[m.from]
+ if !ok {
+ log.Fatalf("unexpected: package %s is not in import map", m.from)
+ }
+ newName := filepath.Base(m.to)
+ for _, f := range pkg.Files {
+ f.Name.Name = newName // change package decl
+ filesToUpdate[f] = true
+ }
+ // Update imports of that package to use the new import name.
+ // None of the subpackages will change their name---only the from package
+ // itself will.
+ for p := range m.rev[m.from] {
+ _, err := importName(
+ m.iprog, m.iprog.Imported[p], m.from, path.Base(m.from), newName)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Update import paths for all imports by affected packages.
+ for ap := range m.affectedPackages {
+ info, ok := m.iprog.Imported[ap]
+ if !ok {
+ log.Fatalf("unexpected: package %s is not in import map", ap)
+ }
+ for _, f := range info.Files {
+ for _, imp := range f.Imports {
+ importPath, _ := strconv.Unquote(imp.Path.Value)
+ if newPath, ok := m.destinations[importPath]; ok {
+ imp.Path.Value = strconv.Quote(newPath)
+
+ oldName := path.Base(importPath)
+ if imp.Name != nil {
+ oldName = imp.Name.Name
+ }
+
+ newName := path.Base(newPath)
+ if imp.Name == nil && oldName != newName {
+ imp.Name = ast.NewIdent(oldName)
+ } else if imp.Name == nil || imp.Name.Name == newName {
+ imp.Name = nil
+ }
+ filesToUpdate[f] = true
+ }
+ }
+ }
+ }
+
+ for f := range filesToUpdate {
+ tokenFile := m.iprog.Fset.File(f.Pos())
+ rewriteFile(m.iprog.Fset, f, tokenFile.Name())
+ }
+
+ // Move the directories.
+ // If either the fromDir or toDir are contained under version control it is
+ // the user's responsibility to provide a custom move command that updates
+ // version control to reflect the move.
+ // TODO(matloob): If the parent directory of toDir does not exist, create it.
+ // For now, it's required that it does exist.
+
+ if m.cmd != "" {
+ // TODO(matloob): Verify that the windows and plan9 cases are correct.
+ var cmd *exec.Cmd
+ switch runtime.GOOS {
+ case "windows":
+ cmd = exec.Command("cmd", "/c", m.cmd)
+ case "plan9":
+ cmd = exec.Command("rc", "-c", m.cmd)
+ default:
+ cmd = exec.Command("sh", "-c", m.cmd)
+ }
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = os.Stdout
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("version control system's move command failed: %v", err)
+ }
+
+ return nil
+ }
+
+ return moveDirectory(m.fromDir, m.toDir)
+}
+
+var moveDirectory = func(from, to string) error {
+ return os.Rename(from, to)
+}
diff --git a/refactor/rename/mvpkg_test.go b/refactor/rename/mvpkg_test.go
new file mode 100644
index 0000000..3c915b4
--- /dev/null
+++ b/refactor/rename/mvpkg_test.go
@@ -0,0 +1,284 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// licence that can be found in the LICENSE file.
+
+package rename
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/format"
+ "go/token"
+ "io/ioutil"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/buildutil"
+)
+
+func TestErrors(t *testing.T) {
+ tests := []struct {
+ ctxt *build.Context
+ from, to string
+ want string // regexp to match error, or "OK"
+ }{
+ // Simple example.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "foo": {`package foo; type T int`},
+ "bar": {`package bar`},
+ "main": {`package main
+
+import "foo"
+
+var _ foo.T
+`},
+ }),
+ from: "foo", to: "bar",
+ want: `invalid move destination: bar conflicts with directory .go.src.bar`,
+ },
+ // Subpackage already exists.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "foo": {`package foo; type T int`},
+ "foo/sub": {`package sub`},
+ "bar/sub": {`package sub`},
+ "main": {`package main
+
+import "foo"
+
+var _ foo.T
+`},
+ }),
+ from: "foo", to: "bar",
+ want: "invalid move destination: bar; package or subpackage bar/sub already exists",
+ },
+ // Invalid base name.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "foo": {`package foo; type T int`},
+ "main": {`package main
+
+import "foo"
+
+var _ foo.T
+`},
+ }),
+ from: "foo", to: "bar-v2.0",
+ want: "invalid move destination: bar-v2.0; gomvpkg does not " +
+ "support move destinations whose base names are not valid " +
+ "go identifiers",
+ },
+ }
+
+ for _, test := range tests {
+ ctxt := test.ctxt
+
+ got := make(map[string]string)
+ rewriteFile = func(fset *token.FileSet, f *ast.File, orig string) error {
+ var out bytes.Buffer
+ if err := format.Node(&out, fset, f); err != nil {
+ return err
+ }
+ got[orig] = out.String()
+ return nil
+ }
+ moveDirectory = func(from, to string) error {
+ for path, contents := range got {
+ if strings.HasPrefix(path, from) {
+ newPath := strings.Replace(path, from, to, 1)
+ delete(got, path)
+ got[newPath] = contents
+ }
+ }
+ return nil
+ }
+
+ err := Move(ctxt, test.from, test.to, "")
+ prefix := fmt.Sprintf("-from %q -to %q", test.from, test.to)
+ if err == nil {
+ t.Errorf("%s: nil error. Expected error: %s", prefix, test.want)
+ continue
+ }
+ matched, err2 := regexp.MatchString(test.want, err.Error())
+ if err2 != nil {
+ t.Errorf("regexp.MatchString failed %s", err2)
+ continue
+ }
+ if !matched {
+ t.Errorf("%s: conflict does not match expectation:\n"+
+ "Error: %q\n"+
+ "Pattern: %q",
+ prefix, err.Error(), test.want)
+ }
+ }
+}
+
+func TestMoves(t *testing.T) {
+ tests := []struct {
+ ctxt *build.Context
+ from, to string
+ want map[string]string
+ }{
+ // Simple example.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "foo": {`package foo; type T int`},
+ "main": {`package main
+
+import "foo"
+
+var _ foo.T
+`},
+ }),
+ from: "foo", to: "bar",
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+import "bar"
+
+var _ bar.T
+`,
+ "/go/src/bar/0.go": `package bar
+
+type T int
+`,
+ },
+ },
+
+ // Example with subpackage.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "foo": {`package foo; type T int`},
+ "foo/sub": {`package sub; type T int`},
+ "main": {`package main
+
+import "foo"
+import "foo/sub"
+
+var _ foo.T
+var _ sub.T
+`},
+ }),
+ from: "foo", to: "bar",
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+import "bar"
+import "bar/sub"
+
+var _ bar.T
+var _ sub.T
+`,
+ "/go/src/bar/0.go": `package bar
+
+type T int
+`,
+ "/go/src/bar/sub/0.go": `package sub; type T int`,
+ },
+ },
+
+ // References into subpackages
+ {
+ ctxt: fakeContext(map[string][]string{
+ "foo": {`package foo; import "foo/a"; var _ a.T`},
+ "foo/a": {`package a; type T int`},
+ "foo/b": {`package b; import "foo/a"; var _ a.T`},
+ }),
+ from: "foo", to: "bar",
+ want: map[string]string{
+ "/go/src/bar/0.go": `package bar
+
+import "bar/a"
+
+var _ a.T
+`,
+ "/go/src/bar/a/0.go": `package a; type T int`,
+ "/go/src/bar/b/0.go": `package b
+
+import "bar/a"
+
+var _ a.T
+`,
+ },
+ },
+ }
+
+ for _, test := range tests {
+ ctxt := test.ctxt
+
+ got := make(map[string]string)
+ // Populate got with starting file set. rewriteFile and moveDirectory
+ // will mutate got to produce resulting file set.
+ buildutil.ForEachPackage(ctxt, func(importPath string, err error) {
+ if err != nil {
+ return
+ }
+ path := filepath.Join("/go/src", importPath, "0.go")
+ if !buildutil.FileExists(ctxt, path) {
+ return
+ }
+ f, err := ctxt.OpenFile(path)
+ if err != nil {
+ t.Errorf("unexpected error opening file: %s", err)
+ return
+ }
+ bytes, err := ioutil.ReadAll(f)
+ f.Close()
+ if err != nil {
+ t.Errorf("unexpected error reading file: %s", err)
+ return
+ }
+ got[path] = string(bytes)
+ })
+ rewriteFile = func(fset *token.FileSet, f *ast.File, orig string) error {
+ var out bytes.Buffer
+ if err := format.Node(&out, fset, f); err != nil {
+ return err
+ }
+ got[orig] = out.String()
+ return nil
+ }
+ moveDirectory = func(from, to string) error {
+ for path, contents := range got {
+ if strings.HasPrefix(path, from) {
+ newPath := strings.Replace(path, from, to, 1)
+ delete(got, path)
+ got[newPath] = contents
+ }
+ }
+ return nil
+ }
+
+ err := Move(ctxt, test.from, test.to, "")
+ prefix := fmt.Sprintf("-from %q -to %q", test.from, test.to)
+ if err != nil {
+ t.Errorf("%s: unexpected error: %s", prefix, err)
+ continue
+ }
+
+ for file, wantContent := range test.want {
+ k := filepath.FromSlash(file)
+ gotContent, ok := got[k]
+ delete(got, k)
+ if !ok {
+ // TODO(matloob): some testcases might have files that won't be
+ // rewritten
+ t.Errorf("%s: file %s not rewritten", prefix, file)
+ continue
+ }
+ if gotContent != wantContent {
+ t.Errorf("%s: rewritten file %s does not match expectation; got <<<%s>>>\n"+
+ "want <<<%s>>>", prefix, file, gotContent, wantContent)
+ }
+ }
+ // got should now be empty
+ for file := range got {
+ t.Errorf("%s: unexpected rewrite of file %s", prefix, file)
+ }
+ }
+}
diff --git a/refactor/rename/rename.el b/refactor/rename/rename.el
index 9268358..ea6c744 100644
--- a/refactor/rename/rename.el
+++ b/refactor/rename/rename.el
@@ -25,10 +25,12 @@
:type 'string
:group 'go-rename)
-(defun go-rename (new-name)
+(defun go-rename (new-name &optional force)
"Rename the entity denoted by the identifier at point, using
-the `gorename' tool."
- (interactive (list (read-string "New name: " (thing-at-point 'symbol))))
+the `gorename' tool. With FORCE, call `gorename' with the
+`-force' flag."
+ (interactive (list (read-string "New name: " (thing-at-point 'symbol))
+ current-prefix-arg))
(if (not buffer-file-name)
(error "Cannot use go-rename on a buffer without a file name"))
;; It's not sufficient to save the current buffer if modified,
@@ -50,7 +52,7 @@ the `gorename' tool."
(with-current-buffer (get-buffer-create "*go-rename*")
(setq buffer-read-only nil)
(erase-buffer)
- (let ((args (list go-rename-command nil t nil posflag "-to" new-name)))
+ (let ((args (append (list go-rename-command nil t nil posflag "-to" new-name) (if force '("-force")))))
;; Log the command to *Messages*, for debugging.
(message "Command: %s:" args)
(message "Running gorename...")
diff --git a/refactor/rename/rename.go b/refactor/rename/rename.go
index 25e1071..2f86e2e 100644
--- a/refactor/rename/rename.go
+++ b/refactor/rename/rename.go
@@ -3,9 +3,9 @@
// license that can be found in the LICENSE file.
// Package rename contains the implementation of the 'gorename' command
-// whose main function is in golang.org/x/tools/refactor/rename.
-// See that package for the command documentation.
-package rename
+// whose main function is in golang.org/x/tools/cmd/gorename.
+// See the Usage constant for the command documentation.
+package rename // import "golang.org/x/tools/refactor/rename"
import (
"errors"
@@ -16,8 +16,10 @@ import (
"go/parser"
"go/token"
"os"
+ "path"
"path/filepath"
"sort"
+ "strconv"
"strings"
"golang.org/x/tools/go/loader"
@@ -26,6 +28,107 @@ import (
"golang.org/x/tools/refactor/satisfy"
)
+const Usage = `gorename: precise type-safe renaming of identifiers in Go source code.
+
+Usage:
+
+ gorename (-from <spec> | -offset <file>:#<byte-offset>) -to <name> [-force]
+
+You must specify the object (named entity) to rename using the -offset
+or -from flag. Exactly one must be specified.
+
+Flags:
+
+-offset specifies the filename and byte offset of an identifier to rename.
+ This form is intended for use by text editors.
+
+-from specifies the object to rename using a query notation;
+ This form is intended for interactive use at the command line.
+ A legal -from query has one of the following forms:
+
+ "encoding/json".Decoder.Decode method of package-level named type
+ (*"encoding/json".Decoder).Decode ditto, alternative syntax
+ "encoding/json".Decoder.buf field of package-level named struct type
+ "encoding/json".HTMLEscape package member (const, func, var, type)
+ "encoding/json".Decoder.Decode::x local object x within a method
+ "encoding/json".HTMLEscape::x local object x within a function
+ "encoding/json"::x object x anywhere within a package
+ json.go::x object x within file json.go
+
+ Double-quotes must be escaped when writing a shell command.
+ Quotes may be omitted for single-segment import paths such as "fmt".
+
+ For methods, the parens and '*' on the receiver type are both
+ optional.
+
+ It is an error if one of the ::x queries matches multiple
+ objects.
+
+-to the new name.
+
+-force causes the renaming to proceed even if conflicts were reported.
+ The resulting program may be ill-formed, or experience a change
+ in behaviour.
+
+ WARNING: this flag may even cause the renaming tool to crash.
+ (In due course this bug will be fixed by moving certain
+ analyses into the type-checker.)
+
+-dryrun causes the tool to report conflicts but not update any files.
+
+-v enables verbose logging.
+
+gorename automatically computes the set of packages that might be
+affected. For a local renaming, this is just the package specified by
+-from or -offset, but for a potentially exported name, gorename scans
+the workspace ($GOROOT and $GOPATH).
+
+gorename rejects renamings of concrete methods that would change the
+assignability relation between types and interfaces. If the interface
+change was intentional, initiate the renaming at the interface method.
+
+gorename rejects any renaming that would create a conflict at the point
+of declaration, or a reference conflict (ambiguity or shadowing), or
+anything else that could cause the resulting program not to compile.
+
+
+Examples:
+
+% gorename -offset file.go:#123 -to foo
+
+ Rename the object whose identifier is at byte offset 123 within file file.go.
+
+% gorename -from \"bytes\".Buffer.Len' -to Size
+
+ Rename the "Len" method of the *bytes.Buffer type to "Size".
+
+---- TODO ----
+
+Correctness:
+- handle dot imports correctly
+- document limitations (reflection, 'implements' algorithm).
+- sketch a proof of exhaustiveness.
+
+Features:
+- support running on packages specified as *.go files on the command line
+- support running on programs containing errors (loader.Config.AllowErrors)
+- allow users to specify a scope other than "global" (to avoid being
+ stuck by neglected packages in $GOPATH that don't build).
+- support renaming the package clause (no object)
+- support renaming an import path (no ident or object)
+ (requires filesystem + SCM updates).
+- detect and reject edits to autogenerated files (cgo, protobufs)
+ and optionally $GOROOT packages.
+- report all conflicts, or at least all qualitatively distinct ones.
+ Sometimes we stop to avoid redundancy, but
+ it may give a disproportionate sense of safety in -force mode.
+- support renaming all instances of a pattern, e.g.
+ all receiver vars of a given type,
+ all local variables of a given type,
+ all PkgNames for a given package.
+- emit JSON output for other editors and tools.
+`
+
var (
// Force enables patching of the source files even if conflicts were reported.
// The resulting program may be ill-formed.
@@ -50,12 +153,54 @@ type renamer struct {
to string
satisfyConstraints map[satisfy.Constraint]bool
packages map[*types.Package]*loader.PackageInfo // subset of iprog.AllPackages to inspect
+ msets types.MethodSetCache
+ changeMethods bool
}
var reportError = func(posn token.Position, message string) {
fmt.Fprintf(os.Stderr, "%s: %s\n", posn, message)
}
+// importName renames imports of the package with the given path in
+// the given package. If fromName is not empty, only imports as
+// fromName will be renamed. Even if renaming is successful, there
+// may be some files that are unchanged; they are reported in
+// unchangedFiles.
+func importName(iprog *loader.Program, info *loader.PackageInfo, fromPath, fromName, to string) (unchangedFiles []string, err error) {
+ for _, f := range info.Files {
+ var from types.Object
+ for _, imp := range f.Imports {
+ importPath, _ := strconv.Unquote(imp.Path.Value)
+ importName := path.Base(importPath)
+ if imp.Name != nil {
+ importName = imp.Name.Name
+ }
+ if importPath == fromPath && (fromName == "" || importName == fromName) {
+ from = info.Implicits[imp]
+ break
+ }
+ }
+ if from == nil {
+ continue
+ }
+ r := renamer{
+ iprog: iprog,
+ objsToUpdate: make(map[types.Object]bool),
+ to: to,
+ packages: map[*types.Package]*loader.PackageInfo{info.Pkg: info},
+ }
+ r.check(from)
+ if r.hadConflicts {
+ continue // ignore errors; leave the existing name
+ unchangedFiles = append(unchangedFiles, f.Name.Name)
+ }
+ if err := r.update(); err != nil {
+ return nil, err
+ }
+ }
+ return unchangedFiles, nil
+}
+
func Main(ctxt *build.Context, offsetFlag, fromFlag, to string) error {
// -- Parse the -from or -offset specifier ----------------------------
@@ -151,6 +296,19 @@ func Main(ctxt *build.Context, offsetFlag, fromFlag, to string) error {
packages: make(map[*types.Package]*loader.PackageInfo),
}
+ // A renaming initiated at an interface method indicates the
+ // intention to rename abstract and concrete methods as needed
+ // to preserve assignability.
+ for _, obj := range fromObjects {
+ if obj, ok := obj.(*types.Func); ok {
+ recv := obj.Type().(*types.Signature).Recv()
+ if recv != nil && isInterface(recv.Type().Underlying()) {
+ r.changeMethods = true
+ break
+ }
+ }
+ }
+
// Only the initially imported packages (iprog.Imported) and
// their external tests (iprog.Created) should be inspected or
// modified, as only they have type-checked functions bodies.
@@ -181,9 +339,8 @@ func Main(ctxt *build.Context, offsetFlag, fromFlag, to string) error {
// context. Only packages in pkgs will have their functions bodies typechecked.
func loadProgram(ctxt *build.Context, pkgs map[string]bool) (*loader.Program, error) {
conf := loader.Config{
- Build: ctxt,
- SourceImports: true,
- ParserMode: parser.ParseComments,
+ Build: ctxt,
+ ParserMode: parser.ParseComments,
// TODO(adonovan): enable this. Requires making a lot of code more robust!
AllowErrors: false,
@@ -207,9 +364,7 @@ func loadProgram(ctxt *build.Context, pkgs map[string]bool) (*loader.Program, er
}
for pkg := range pkgs {
- if err := conf.ImportWithTests(pkg); err != nil {
- return nil, err
- }
+ conf.ImportWithTests(pkg)
}
return conf.Load()
}
@@ -281,7 +436,7 @@ func (r *renamer) update() error {
}
}
if err := rewriteFile(r.iprog.Fset, f, tokenFile.Name()); err != nil {
- fmt.Fprintf(os.Stderr, "Error: %s.\n", err)
+ fmt.Fprintf(os.Stderr, "gorename: %s\n", err)
nerrs++
}
}
@@ -304,16 +459,23 @@ func plural(n int) string {
return ""
}
-func writeFile(name string, fset *token.FileSet, f *ast.File) error {
- out, err := os.Create(name)
+func writeFile(name string, fset *token.FileSet, f *ast.File, mode os.FileMode) error {
+ out, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
// assume error includes the filename
return fmt.Errorf("failed to open file: %s", err)
}
+
+ // Oddly, os.OpenFile doesn't preserve all the mode bits, hence
+ // this chmod. (We use 0600 above to avoid a brief
+ // vulnerability if the user has an insecure umask.)
+ os.Chmod(name, mode) // ignore error
+
if err := format.Node(out, fset, f); err != nil {
out.Close() // ignore error
return fmt.Errorf("failed to write file: %s", err)
}
+
return out.Close()
}
@@ -324,11 +486,16 @@ var rewriteFile = func(fset *token.FileSet, f *ast.File, orig string) (err error
if Verbose {
fmt.Fprintf(os.Stderr, "\t%s\n", orig)
}
+ // save file mode
+ var mode os.FileMode = 0666
+ if fi, err := os.Stat(orig); err == nil {
+ mode = fi.Mode()
+ }
if err := os.Rename(orig, backup); err != nil {
return fmt.Errorf("failed to make backup %s -> %s: %s",
orig, filepath.Base(backup), err)
}
- if err := writeFile(orig, fset, f); err != nil {
+ if err := writeFile(orig, fset, f, mode); err != nil {
// Restore the file from the backup.
os.Remove(orig) // ignore error
os.Rename(backup, orig) // ignore error
diff --git a/refactor/rename/rename_test.go b/refactor/rename/rename_test.go
index a4169b3..1d282a1 100644
--- a/refactor/rename/rename_test.go
+++ b/refactor/rename/rename_test.go
@@ -11,15 +11,12 @@ import (
"go/build"
"go/format"
"go/token"
- "io"
- "io/ioutil"
- "os"
"path/filepath"
"regexp"
- "strconv"
"strings"
"testing"
- "time"
+
+ "golang.org/x/tools/go/buildutil"
)
// TODO(adonovan): test reported source positions, somehow.
@@ -241,14 +238,14 @@ func f() {
from: "(main.U).u", to: "w",
want: `renaming this field "u" to "w".*` +
`would change the referent of this selection.*` +
- `to this field`,
+ `of this field`,
},
{
// field/field shadowing at different promotion levels ('to' selection)
from: "(main.W).w", to: "u",
want: `renaming this field "w" to "u".*` +
`would shadow this selection.*` +
- `to the field declared here`,
+ `of the field declared here`,
},
{
from: "(main.V).v", to: "w",
@@ -323,17 +320,65 @@ var _ interface {f()} = C(0)
{
from: "(main.C).f", to: "e",
want: `renaming this method "f" to "e".*` +
- `would make it no longer assignable to interface{f..}`,
+ `would make main.C no longer assignable to interface{f..}.*` +
+ `(rename interface{f..}.f if you intend to change both types)`,
},
{
from: "(main.D).g", to: "e",
want: `renaming this method "g" to "e".*` +
- `would make it no longer assignable to interface I`,
+ `would make \*main.D no longer assignable to interface I.*` +
+ `(rename main.I.g if you intend to change both types)`,
},
{
from: "(main.I).f", to: "e",
- want: `renaming this method "f" to "e".*` +
- `would make \*main.D no longer assignable to it`,
+ want: `OK`,
+ },
+ // Indirect C/I method coupling via another concrete type D.
+ {
+ ctxt: main(`
+package main
+type I interface { f() }
+type C int
+func (C) f()
+type D struct{C}
+var _ I = D{}
+`),
+ from: "(main.C).f", to: "F",
+ want: `renaming this method "f" to "F".*` +
+ `would make main.D no longer assignable to interface I.*` +
+ `(rename main.I.f if you intend to change both types)`,
+ },
+ // Renaming causes promoted method to become shadowed; C no longer satisfies I.
+ {
+ ctxt: main(`
+package main
+type I interface { f() }
+type C struct { I }
+func (C) g() int
+var _ I = C{}
+`),
+ from: "main.I.f", to: "g",
+ want: `renaming this method "f" to "g".*` +
+ `would change the g method of main.C invoked via interface main.I.*` +
+ `from \(main.I\).g.*` +
+ `to \(main.C\).g`,
+ },
+ // Renaming causes promoted method to become ambiguous; C no longer satisfies I.
+ {
+ ctxt: main(`
+package main
+type I interface{f()}
+type C int
+func (C) f()
+type D int
+func (D) g()
+type E struct{C;D}
+var _ I = E{}
+`),
+ from: "main.I.f", to: "g",
+ want: `renaming this method "f" to "g".*` +
+ `would make the g method of main.E invoked via interface main.I ambiguous.*` +
+ `with \(main.D\).g`,
},
} {
var conflicts []string
@@ -675,6 +720,257 @@ type _ struct{ *foo.U }
`,
},
},
+
+ // Interface method renaming.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "main": {`
+package main
+type I interface { f() }
+type J interface { f(); g() }
+type A int
+func (A) f()
+type B int
+func (B) f()
+func (B) g()
+type C int
+func (C) f()
+func (C) g()
+var _, _ I = A(0), B(0)
+var _, _ J = B(0), C(0)
+`,
+ },
+ }),
+ offset: "/go/src/main/0.go:#33", to: "F", // abstract method I.f
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+type I interface {
+ F()
+}
+type J interface {
+ F()
+ g()
+}
+type A int
+
+func (A) F()
+
+type B int
+
+func (B) F()
+func (B) g()
+
+type C int
+
+func (C) F()
+func (C) g()
+
+var _, _ I = A(0), B(0)
+var _, _ J = B(0), C(0)
+`,
+ },
+ },
+ {
+ offset: "/go/src/main/0.go:#58", to: "F", // abstract method J.f
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+type I interface {
+ F()
+}
+type J interface {
+ F()
+ g()
+}
+type A int
+
+func (A) F()
+
+type B int
+
+func (B) F()
+func (B) g()
+
+type C int
+
+func (C) F()
+func (C) g()
+
+var _, _ I = A(0), B(0)
+var _, _ J = B(0), C(0)
+`,
+ },
+ },
+ {
+ offset: "/go/src/main/0.go:#63", to: "G", // abstract method J.g
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+type I interface {
+ f()
+}
+type J interface {
+ f()
+ G()
+}
+type A int
+
+func (A) f()
+
+type B int
+
+func (B) f()
+func (B) G()
+
+type C int
+
+func (C) f()
+func (C) G()
+
+var _, _ I = A(0), B(0)
+var _, _ J = B(0), C(0)
+`,
+ },
+ },
+ // Indirect coupling of I.f to C.f from D->I assignment and anonymous field of D.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "main": {`
+package main
+type I interface { f() }
+type C int
+func (C) f()
+type D struct{C}
+var _ I = D{}
+`,
+ },
+ }),
+ offset: "/go/src/main/0.go:#33", to: "F", // abstract method I.f
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+type I interface {
+ F()
+}
+type C int
+
+func (C) F()
+
+type D struct{ C }
+
+var _ I = D{}
+`,
+ },
+ },
+ // Interface embedded in struct. No conflict if C need not satisfy I.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "main": {`
+package main
+type I interface {f()}
+type C struct{I}
+func (C) g() int
+var _ int = C{}.g()
+`,
+ },
+ }),
+ offset: "/go/src/main/0.go:#32", to: "g", // abstract method I.f
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+type I interface {
+ g()
+}
+type C struct{ I }
+
+func (C) g() int
+
+var _ int = C{}.g()
+`,
+ },
+ },
+ // A type assertion causes method coupling iff signatures match.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "main": {`package main
+type I interface{f()}
+type J interface{f()}
+var _ = I(nil).(J)
+`,
+ },
+ }),
+ offset: "/go/src/main/0.go:#30", to: "g", // abstract method I.f
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+type I interface {
+ g()
+}
+type J interface {
+ g()
+}
+
+var _ = I(nil).(J)
+`,
+ },
+ },
+ // Impossible type assertion: no method coupling.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "main": {`package main
+type I interface{f()}
+type J interface{f()int}
+var _ = I(nil).(J)
+`,
+ },
+ }),
+ offset: "/go/src/main/0.go:#30", to: "g", // abstract method I.f
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+type I interface {
+ g()
+}
+type J interface {
+ f() int
+}
+
+var _ = I(nil).(J)
+`,
+ },
+ },
+ // Impossible type assertion: no method coupling C.f<->J.f.
+ {
+ ctxt: fakeContext(map[string][]string{
+ "main": {`package main
+type I interface{f()}
+type C int
+func (C) f()
+type J interface{f()int}
+var _ = I(C(0)).(J)
+`,
+ },
+ }),
+ offset: "/go/src/main/0.go:#30", to: "g", // abstract method I.f
+ want: map[string]string{
+ "/go/src/main/0.go": `package main
+
+type I interface {
+ g()
+}
+type C int
+
+func (C) g()
+
+type J interface {
+ f() int
+}
+
+var _ = I(C(0)).(J)
+`,
+ },
+ },
} {
if test.ctxt != nil {
ctxt = test.ctxt
@@ -723,81 +1019,22 @@ type _ struct{ *foo.U }
// ---------------------------------------------------------------------
-// Plundered/adapted from go/loader/loader_test.go
-
-// TODO(adonovan): make this into a nice testing utility within go/buildutil.
-
-// pkgs maps the import path of a fake package to a list of its file contents;
-// file names are synthesized, e.g. %d.go.
+// Simplifying wrapper around buildutil.FakeContext for packages whose
+// filenames are sequentially numbered (%d.go). pkgs maps a package
+// import path to its list of file contents.
func fakeContext(pkgs map[string][]string) *build.Context {
- ctxt := build.Default // copy
- ctxt.GOROOT = "/go"
- ctxt.GOPATH = ""
- ctxt.IsDir = func(path string) bool {
- path = filepath.ToSlash(path)
- if path == "/go/src" {
- return true // needed by (*build.Context).SrcDirs
- }
- if p := strings.TrimPrefix(path, "/go/src/"); p == path {
- return false
- } else {
- path = p
+ pkgs2 := make(map[string]map[string]string)
+ for path, files := range pkgs {
+ filemap := make(map[string]string)
+ for i, contents := range files {
+ filemap[fmt.Sprintf("%d.go", i)] = contents
}
- _, ok := pkgs[path]
- return ok
+ pkgs2[path] = filemap
}
- ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
- dir = filepath.ToSlash(dir)
- dir = dir[len("/go/src/"):]
- var fis []os.FileInfo
- if dir == "" {
- // Assumes keys of pkgs are single-segment.
- for p := range pkgs {
- fis = append(fis, fakeDirInfo(p))
- }
- } else {
- for i := range pkgs[dir] {
- fis = append(fis, fakeFileInfo(i))
- }
- }
- return fis, nil
- }
- ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
- path = filepath.ToSlash(path)
- path = path[len("/go/src/"):]
- dir, base := filepath.Split(path)
- dir = filepath.Clean(dir)
- index, _ := strconv.Atoi(strings.TrimSuffix(base, ".go"))
- return ioutil.NopCloser(bytes.NewBufferString(pkgs[dir][index])), nil
- }
- ctxt.IsAbsPath = func(path string) bool {
- path = filepath.ToSlash(path)
- // Don't rely on the default (filepath.Path) since on
- // Windows, it reports our virtual paths as non-absolute.
- return strings.HasPrefix(path, "/")
- }
- return &ctxt
+ return buildutil.FakeContext(pkgs2)
}
// helper for single-file main packages with no imports.
func main(content string) *build.Context {
return fakeContext(map[string][]string{"main": {content}})
}
-
-type fakeFileInfo int
-
-func (fi fakeFileInfo) Name() string { return fmt.Sprintf("%d.go", fi) }
-func (fakeFileInfo) Sys() interface{} { return nil }
-func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
-func (fakeFileInfo) IsDir() bool { return false }
-func (fakeFileInfo) Size() int64 { return 0 }
-func (fakeFileInfo) Mode() os.FileMode { return 0644 }
-
-type fakeDirInfo string
-
-func (fd fakeDirInfo) Name() string { return string(fd) }
-func (fakeDirInfo) Sys() interface{} { return nil }
-func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
-func (fakeDirInfo) IsDir() bool { return true }
-func (fakeDirInfo) Size() int64 { return 0 }
-func (fakeDirInfo) Mode() os.FileMode { return 0755 }
diff --git a/refactor/rename/spec.go b/refactor/rename/spec.go
index 571e817..3c73653 100644
--- a/refactor/rename/spec.go
+++ b/refactor/rename/spec.go
@@ -6,7 +6,7 @@ package rename
// This file contains logic related to specifying a renaming: parsing of
// the flags as a form of query, and finding the object(s) it denotes.
-// See FromFlagUsage for details.
+// See Usage for flag details.
import (
"bytes"
@@ -27,8 +27,8 @@ import (
// A spec specifies an entity to rename.
//
-// It is populated from an -offset flag or -from query; see
-// FromFlagUsage for the allowed -from query forms.
+// It is populated from an -offset flag or -from query;
+// see Usage for the allowed -from query forms.
//
type spec struct {
// pkg is the package containing the position
@@ -65,28 +65,8 @@ type spec struct {
offset int
}
-const FromFlagUsage = `
-A legal -from query has one of the following forms:
-
- "encoding/json".Decoder.Decode method of package-level named type
- (*"encoding/json".Decoder).Decode ditto, alternative syntax
- "encoding/json".Decoder.buf field of package-level named struct type
- "encoding/json".HTMLEscape package member (const, func, var, type)
- "encoding/json".Decoder.Decode::x local object x within a method
- "encoding/json".HTMLEscape::x local object x within a function
- "encoding/json"::x object x anywhere within a package
- json.go::x object x within file json.go
-
- For methods, the parens and '*' on the receiver type are both optional.
-
- Double-quotes may be omitted for single-segment import paths such as
- fmt. They may need to be escaped when writing a shell command.
-
- It is an error if one of the ::x queries matches multiple objects.
-`
-
// parseFromFlag interprets the "-from" flag value as a renaming specification.
-// See FromFlagUsage for valid formats.
+// See Usage in rename.go for valid formats.
func parseFromFlag(ctxt *build.Context, fromFlag string) (*spec, error) {
var spec spec
var main string // sans "::x" suffix
@@ -197,7 +177,7 @@ func parseObjectSpec(spec *spec, main string) error {
}
}
- return fmt.Errorf("-from %q: invalid expression")
+ return fmt.Errorf("-from %q: invalid expression", main)
}
// parseImportPath returns the import path of the package denoted by e.
diff --git a/refactor/rename/util.go b/refactor/rename/util.go
index bf9e350..def9399 100644
--- a/refactor/rename/util.go
+++ b/refactor/rename/util.go
@@ -13,6 +13,7 @@ import (
"strings"
"unicode"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/types"
)
@@ -100,14 +101,4 @@ func sameFile(x, y string) bool {
return false
}
-// unparen returns e with any enclosing parentheses stripped.
-func unparen(e ast.Expr) ast.Expr {
- for {
- p, ok := e.(*ast.ParenExpr)
- if !ok {
- break
- }
- e = p.X
- }
- return e
-}
+func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
diff --git a/refactor/satisfy/find.go b/refactor/satisfy/find.go
index 8671cc5..20fb288 100644
--- a/refactor/satisfy/find.go
+++ b/refactor/satisfy/find.go
@@ -15,7 +15,7 @@
// since it is computing it anyway, and it is robust for ill-typed
// inputs, which this package is not.
//
-package satisfy
+package satisfy // import "golang.org/x/tools/refactor/satisfy"
// NOTES:
//
@@ -49,8 +49,8 @@ import (
"go/ast"
"go/token"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/types"
- "golang.org/x/tools/go/types/typeutil"
)
// A Constraint records the fact that the RHS type does and must
@@ -72,7 +72,6 @@ type Constraint struct {
type Finder struct {
Result map[Constraint]bool
msetcache types.MethodSetCache
- canon typeutil.Map // maps types to canonical type
// per-Find state
info *types.Info
@@ -82,6 +81,9 @@ type Finder struct {
// Find inspects a single package, populating Result with its pairs of
// constrained types.
//
+// The result is non-canonical and thus may contain duplicates (but this
+// tends to preserves names of interface types better).
+//
// The package must be free of type errors, and
// info.{Defs,Uses,Selections,Types} must have been populated by the
// type-checker.
@@ -281,25 +283,15 @@ func (f *Finder) assign(lhs, rhs types.Type) {
if !isInterface(lhs) {
return
}
+
if f.msetcache.MethodSet(lhs).Len() == 0 {
return
}
if f.msetcache.MethodSet(rhs).Len() == 0 {
return
}
- // canonicalize types
- lhsc, ok := f.canon.At(lhs).(types.Type)
- if !ok {
- lhsc = lhs
- f.canon.Set(lhs, lhsc)
- }
- rhsc, ok := f.canon.At(rhs).(types.Type)
- if !ok {
- rhsc = rhs
- f.canon.Set(rhs, rhsc)
- }
// record the pair
- f.Result[Constraint{lhsc, rhsc}] = true
+ f.Result[Constraint{lhs, rhs}] = true
}
// typeAssert must be called for each type assertion x.(T) where x has
@@ -417,7 +409,7 @@ func (f *Finder) expr(e ast.Expr) types.Type {
x := f.expr(e.X)
i := f.expr(e.Index)
if ux, ok := x.Underlying().(*types.Map); ok {
- f.assign(ux.Elem(), i)
+ f.assign(ux.Key(), i)
}
case *ast.SliceExpr:
@@ -707,19 +699,6 @@ func deref(typ types.Type) types.Type {
return typ
}
-// unparen returns e with any enclosing parentheses stripped.
-func unparen(e ast.Expr) ast.Expr {
- for {
- p, ok := e.(*ast.ParenExpr)
- if !ok {
- break
- }
- e = p.X
- }
- return e
-}
+func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
-func isInterface(T types.Type) bool {
- _, ok := T.Underlying().(*types.Interface)
- return ok
-}
+func isInterface(T types.Type) bool { return types.IsInterface(T) }