From e83451b9c2f0bffdd8ae5e5889d982ad960a81f4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 14 Jan 2015 17:24:45 -0800 Subject: dashboard: rearrange tree, in prep for packagification Package main binaries now go under cmd, leaving the top level for similarly-named packages. I'll be moving a lot of data structures and functions out of the commands and into common packages. A future CL will also unify all the "+build whatever" restrictions into one common build tag, since there will be so much package sharing coming up. Probably they will become "+build extdep", to indicate that they have dependencies outside of x/tools. Change-Id: Idc198e0dfa933b5f5de8f2b581533e8d299d2293 Reviewed-on: https://go-review.googlesource.com/2738 Reviewed-by: Andrew Gerrand --- dashboard/README | 29 +- dashboard/builder/bench.go | 256 ---- dashboard/builder/doc.go | 58 - dashboard/builder/env.go | 299 ----- dashboard/builder/exec.go | 99 -- dashboard/builder/filemutex_flock.go | 66 - dashboard/builder/filemutex_local.go | 27 - dashboard/builder/filemutex_windows.go | 105 -- dashboard/builder/http.go | 225 ---- dashboard/builder/main.go | 679 ----------- dashboard/builder/vcs.go | 225 ---- dashboard/buildlet/.gitignore | 5 - dashboard/buildlet/Makefile | 26 - dashboard/buildlet/README | 12 - dashboard/buildlet/buildlet.go | 351 ------ dashboard/buildlet/stage0/Makefile | 3 - dashboard/buildlet/stage0/stage0.go | 78 -- dashboard/cmd/builder/bench.go | 256 ++++ dashboard/cmd/builder/doc.go | 58 + dashboard/cmd/builder/env.go | 299 +++++ dashboard/cmd/builder/exec.go | 99 ++ dashboard/cmd/builder/filemutex_flock.go | 66 + dashboard/cmd/builder/filemutex_local.go | 27 + dashboard/cmd/builder/filemutex_windows.go | 105 ++ dashboard/cmd/builder/http.go | 225 ++++ dashboard/cmd/builder/main.go | 679 +++++++++++ dashboard/cmd/builder/vcs.go | 225 ++++ dashboard/cmd/buildlet/.gitignore | 5 + dashboard/cmd/buildlet/Makefile | 26 + dashboard/cmd/buildlet/README | 12 + dashboard/cmd/buildlet/buildlet.go | 351 ++++++ dashboard/cmd/buildlet/stage0/Makefile | 3 + dashboard/cmd/buildlet/stage0/stage0.go | 78 ++ dashboard/cmd/coordinator/.gitignore | 3 + dashboard/cmd/coordinator/Makefile | 9 + dashboard/cmd/coordinator/buildongce/create.go | 299 +++++ dashboard/cmd/coordinator/coordinator.go | 1540 ++++++++++++++++++++++++ dashboard/cmd/retrybuilds/retrybuilds.go | 235 ++++ dashboard/cmd/upload/upload.go | 132 ++ dashboard/coordinator/Makefile | 9 - dashboard/coordinator/buildongce/create.go | 299 ----- dashboard/coordinator/main.go | 1540 ------------------------ dashboard/env/commit-watcher/Makefile | 2 +- dashboard/env/linux-x86-base/Makefile | 2 +- dashboard/env/linux-x86-clang/Makefile | 2 +- dashboard/env/linux-x86-gccgo/Makefile | 2 +- dashboard/env/linux-x86-nacl/Makefile | 2 +- dashboard/env/linux-x86-sid/Makefile | 2 +- dashboard/retrybuilds/retrybuilds.go | 235 ---- dashboard/updater/updater.go | 128 -- dashboard/upload/upload.go | 132 -- 51 files changed, 4757 insertions(+), 4873 deletions(-) delete mode 100644 dashboard/builder/bench.go delete mode 100644 dashboard/builder/doc.go delete mode 100644 dashboard/builder/env.go delete mode 100644 dashboard/builder/exec.go delete mode 100644 dashboard/builder/filemutex_flock.go delete mode 100644 dashboard/builder/filemutex_local.go delete mode 100644 dashboard/builder/filemutex_windows.go delete mode 100644 dashboard/builder/http.go delete mode 100644 dashboard/builder/main.go delete mode 100644 dashboard/builder/vcs.go delete mode 100644 dashboard/buildlet/.gitignore delete mode 100644 dashboard/buildlet/Makefile delete mode 100644 dashboard/buildlet/README delete mode 100644 dashboard/buildlet/buildlet.go delete mode 100644 dashboard/buildlet/stage0/Makefile delete mode 100644 dashboard/buildlet/stage0/stage0.go create mode 100644 dashboard/cmd/builder/bench.go create mode 100644 dashboard/cmd/builder/doc.go create mode 100644 dashboard/cmd/builder/env.go create mode 100644 dashboard/cmd/builder/exec.go create mode 100644 dashboard/cmd/builder/filemutex_flock.go create mode 100644 dashboard/cmd/builder/filemutex_local.go create mode 100644 dashboard/cmd/builder/filemutex_windows.go create mode 100644 dashboard/cmd/builder/http.go create mode 100644 dashboard/cmd/builder/main.go create mode 100644 dashboard/cmd/builder/vcs.go create mode 100644 dashboard/cmd/buildlet/.gitignore create mode 100644 dashboard/cmd/buildlet/Makefile create mode 100644 dashboard/cmd/buildlet/README create mode 100644 dashboard/cmd/buildlet/buildlet.go create mode 100644 dashboard/cmd/buildlet/stage0/Makefile create mode 100644 dashboard/cmd/buildlet/stage0/stage0.go create mode 100644 dashboard/cmd/coordinator/.gitignore create mode 100644 dashboard/cmd/coordinator/Makefile create mode 100644 dashboard/cmd/coordinator/buildongce/create.go create mode 100644 dashboard/cmd/coordinator/coordinator.go create mode 100644 dashboard/cmd/retrybuilds/retrybuilds.go create mode 100644 dashboard/cmd/upload/upload.go delete mode 100644 dashboard/coordinator/Makefile delete mode 100644 dashboard/coordinator/buildongce/create.go delete mode 100644 dashboard/coordinator/main.go delete mode 100644 dashboard/retrybuilds/retrybuilds.go delete mode 100644 dashboard/updater/updater.go delete mode 100644 dashboard/upload/upload.go (limited to 'dashboard') diff --git a/dashboard/README b/dashboard/README index 4e596a0..7731947 100644 --- a/dashboard/README +++ b/dashboard/README @@ -4,28 +4,37 @@ The files in these directories constitute the continuous builder: -app/: a.k.a the "dashboard"; the App Engine code that runs http://build.golang.org/ -buildlet/: HTTP server that runs on a VM and is told what to write to disk +app/: a.k.a the "dashboard"; the App Engine code that runs http://build.golang.org/ + +cmd/: + + buildlet/: HTTP server that runs on a VM and is told what to write to disk and what command to run. This is cross-compiled to different architectures and is the first program run when a builder VM comes up. It then is contacted by the coordinator to do a build. Not all builders use the buildlet (at least not yet). -builder/: gobuilder, a Go continuous build client. The original Go builder program. -coordinator/: daemon that runs on CoreOS on Google Compute Engine and manages + + builder/: gobuilder, a Go continuous build client. The original Go builder program. + + coordinator/: daemon that runs on CoreOS on Google Compute Engine and manages builds using Docker containers and/or VMs as needed. + + retrybuilds/: a Go client program to delete build results from the dashboard (app) + + upload/: a Go program to upload to Google Cloud Storage. used by Makefiles elsewhere. + + watcher/: a daemon that watches for new commits to the Go repository and + its sub-repositories, and notifies the dashboard of those commits. + env/: configuration files describing the environment of builders and related binaries. Many builders are still configured ad-hoc, without a hermetic environment. -retrybuilds/: a Go client program to delete build results from the dashboard (app) + types/: a Go package contain common types used by other pieces. -upload/: a Go program to upload to Google Cloud Storage. used by Makefiles elsewhere. -watcher/: a daemon that watches for new commits to the Go repository and - its sub-repositories, and notifies the dashboard of those commits. + If you wish to run a Go builder, please email golang-dev@googlegroups.com first. There is documentation at https://golang.org/wiki/DashboardBuilders but depending on the type of builder, we may want to run it ourselves, after you prepare an environment description (resulting in a VM image) of it. See the env directory. - - diff --git a/dashboard/builder/bench.go b/dashboard/builder/bench.go deleted file mode 100644 index a9a59ce..0000000 --- a/dashboard/builder/bench.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" -) - -// benchHash benchmarks a single commit. -func (b *Builder) benchHash(hash string, benchs []string) error { - if *verbose { - log.Println(b.name, "benchmarking", hash) - } - - res := &PerfResult{Hash: hash, Benchmark: "meta-done"} - - // Create place in which to do work. - workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12]) - // Prepare a workpath if we don't have one we can reuse. - update := false - if b.lastWorkpath != workpath { - if err := os.Mkdir(workpath, mkdirPerm); err != nil { - return err - } - buildLog, _, err := b.buildRepoOnHash(workpath, hash, makeCmd) - if err != nil { - removePath(workpath) - // record failure - res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog}) - return b.recordPerfResult(res) - } - b.lastWorkpath = workpath - update = true - } - - // Build the benchmark binary. - benchBin, buildLog, err := b.buildBenchmark(workpath, update) - if err != nil { - // record failure - res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog}) - return b.recordPerfResult(res) - } - - benchmark, procs, affinity, last := chooseBenchmark(benchBin, benchs) - if benchmark != "" { - res.Benchmark = fmt.Sprintf("%v-%v", benchmark, procs) - res.Metrics, res.Artifacts, res.OK = b.executeBenchmark(workpath, hash, benchBin, benchmark, procs, affinity) - if err = b.recordPerfResult(res); err != nil { - return fmt.Errorf("recordResult: %s", err) - } - } - - if last { - // All benchmarks have beed executed, don't need workpath anymore. - removePath(b.lastWorkpath) - b.lastWorkpath = "" - // Notify the app. - res = &PerfResult{Hash: hash, Benchmark: "meta-done", OK: true} - if err = b.recordPerfResult(res); err != nil { - return fmt.Errorf("recordResult: %s", err) - } - } - - return nil -} - -// buildBenchmark builds the benchmark binary. -func (b *Builder) buildBenchmark(workpath string, update bool) (benchBin, log string, err error) { - goroot := filepath.Join(workpath, "go") - gobin := filepath.Join(goroot, "bin", "go") + exeExt - gopath := filepath.Join(*buildroot, "gopath") - env := append([]string{ - "GOROOT=" + goroot, - "GOPATH=" + gopath}, - b.envv()...) - // First, download without installing. - args := []string{"get", "-d"} - if update { - args = append(args, "-u") - } - args = append(args, *benchPath) - var buildlog bytes.Buffer - runOpts := []runOpt{runTimeout(*buildTimeout), runEnv(env), allOutput(&buildlog), runDir(workpath)} - err = run(exec.Command(gobin, args...), runOpts...) - if err != nil { - fmt.Fprintf(&buildlog, "go get -d %s failed: %s", *benchPath, err) - return "", buildlog.String(), err - } - // Then, build into workpath. - benchBin = filepath.Join(workpath, "benchbin") + exeExt - args = []string{"build", "-o", benchBin, *benchPath} - buildlog.Reset() - err = run(exec.Command(gobin, args...), runOpts...) - if err != nil { - fmt.Fprintf(&buildlog, "go build %s failed: %s", *benchPath, err) - return "", buildlog.String(), err - } - return benchBin, "", nil -} - -// chooseBenchmark chooses the next benchmark to run -// based on the list of available benchmarks, already executed benchmarks -// and -benchcpu list. -func chooseBenchmark(benchBin string, doneBenchs []string) (bench string, procs, affinity int, last bool) { - var out bytes.Buffer - err := run(exec.Command(benchBin), allOutput(&out)) - if err != nil { - log.Printf("Failed to query benchmark list: %v\n%s", err, &out) - last = true - return - } - outStr := out.String() - nlIdx := strings.Index(outStr, "\n") - if nlIdx < 0 { - log.Printf("Failed to parse benchmark list (no new line): %s", outStr) - last = true - return - } - localBenchs := strings.Split(outStr[:nlIdx], ",") - benchsMap := make(map[string]bool) - for _, b := range doneBenchs { - benchsMap[b] = true - } - cnt := 0 - // We want to run all benchmarks with GOMAXPROCS=1 first. - for i, procs1 := range benchCPU { - for _, bench1 := range localBenchs { - if benchsMap[fmt.Sprintf("%v-%v", bench1, procs1)] { - continue - } - cnt++ - if cnt == 1 { - bench = bench1 - procs = procs1 - if i < len(benchAffinity) { - affinity = benchAffinity[i] - } - } - } - } - last = cnt <= 1 - return -} - -// executeBenchmark runs a single benchmark and parses its output. -func (b *Builder) executeBenchmark(workpath, hash, benchBin, bench string, procs, affinity int) (metrics []PerfMetric, artifacts []PerfArtifact, ok bool) { - // Benchmarks runs mutually exclusive with other activities. - benchMutex.RUnlock() - defer benchMutex.RLock() - benchMutex.Lock() - defer benchMutex.Unlock() - - log.Printf("%v executing benchmark %v-%v on %v", b.name, bench, procs, hash) - - // The benchmark executes 'go build'/'go tool', - // so we need properly setup env. - env := append([]string{ - "GOROOT=" + filepath.Join(workpath, "go"), - "PATH=" + filepath.Join(workpath, "go", "bin") + string(os.PathListSeparator) + os.Getenv("PATH"), - "GODEBUG=gctrace=1", // since Go1.2 - "GOGCTRACE=1", // before Go1.2 - fmt.Sprintf("GOMAXPROCS=%v", procs)}, - b.envv()...) - args := []string{ - "-bench", bench, - "-benchmem", strconv.Itoa(*benchMem), - "-benchtime", benchTime.String(), - "-benchnum", strconv.Itoa(*benchNum), - "-tmpdir", workpath} - if affinity != 0 { - args = append(args, "-affinity", strconv.Itoa(affinity)) - } - benchlog := new(bytes.Buffer) - err := run(exec.Command(benchBin, args...), runEnv(env), allOutput(benchlog), runDir(workpath)) - if strip := benchlog.Len() - 512<<10; strip > 0 { - // Leave the last 512K, that part contains metrics. - benchlog = bytes.NewBuffer(benchlog.Bytes()[strip:]) - } - artifacts = []PerfArtifact{{Type: "log", Body: benchlog.String()}} - if err != nil { - if err != nil { - log.Printf("Failed to execute benchmark '%v': %v", bench, err) - ok = false - } - return - } - - metrics1, artifacts1, err := parseBenchmarkOutput(benchlog) - if err != nil { - log.Printf("Failed to parse benchmark output: %v", err) - ok = false - return - } - metrics = metrics1 - artifacts = append(artifacts, artifacts1...) - ok = true - return -} - -// parseBenchmarkOutput fetches metrics and artifacts from benchmark output. -func parseBenchmarkOutput(out io.Reader) (metrics []PerfMetric, artifacts []PerfArtifact, err error) { - s := bufio.NewScanner(out) - metricRe := regexp.MustCompile("^GOPERF-METRIC:([a-z,0-9,-]+)=([0-9]+)$") - fileRe := regexp.MustCompile("^GOPERF-FILE:([a-z,0-9,-]+)=(.+)$") - for s.Scan() { - ln := s.Text() - if ss := metricRe.FindStringSubmatch(ln); ss != nil { - var v uint64 - v, err = strconv.ParseUint(ss[2], 10, 64) - if err != nil { - err = fmt.Errorf("Failed to parse metric '%v=%v': %v", ss[1], ss[2], err) - return - } - metrics = append(metrics, PerfMetric{Type: ss[1], Val: v}) - } else if ss := fileRe.FindStringSubmatch(ln); ss != nil { - var buf []byte - buf, err = ioutil.ReadFile(ss[2]) - if err != nil { - err = fmt.Errorf("Failed to read file '%v': %v", ss[2], err) - return - } - artifacts = append(artifacts, PerfArtifact{ss[1], string(buf)}) - } - } - return -} - -// needsBenchmarking determines whether the commit needs benchmarking. -func needsBenchmarking(log *HgLog) bool { - // Do not benchmark branch commits, they are usually not interesting - // and fall out of the trunk succession. - if log.Branch != "" { - return false - } - // Do not benchmark commits that do not touch source files (e.g. CONTRIBUTORS). - for _, f := range strings.Split(log.Files, " ") { - if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) && - !strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") { - return true - } - } - return false -} diff --git a/dashboard/builder/doc.go b/dashboard/builder/doc.go deleted file mode 100644 index e958e89..0000000 --- a/dashboard/builder/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - -Go Builder is a continuous build client for the Go project. -It integrates with the Go Dashboard AppEngine application. - -Go Builder is intended to run continuously as a background process. - -It periodically pulls updates from the Go Mercurial repository. - -When a newer revision is found, Go Builder creates a clone of the repository, -runs all.bash, and reports build success or failure to the Go Dashboard. - -For a release revision (a change description that matches "release.YYYY-MM-DD"), -Go Builder will create a tar.gz archive of the GOROOT and deliver it to the -Go Google Code project's downloads section. - -Usage: - - gobuilder goos-goarch... - - Several goos-goarch combinations can be provided, and the builder will - build them in serial. - -Optional flags: - - -dashboard="godashboard.appspot.com": Go Dashboard Host - The location of the Go Dashboard application to which Go Builder will - report its results. - - -release: Build and deliver binary release archive - - -rev=N: Build revision N and exit - - -cmd="./all.bash": Build command (specify absolute or relative to go/src) - - -v: Verbose logging - - -external: External package builder mode (will not report Go build - state to dashboard or issue releases) - -The key file should be located at $HOME/.gobuildkey or, for a builder-specific -key, $HOME/.gobuildkey-$BUILDER (eg, $HOME/.gobuildkey-linux-amd64). - -The build key file is a text file of the format: - - godashboard-key - googlecode-username - googlecode-password - -If the Google Code credentials are not provided the archival step -will be skipped. - -*/ -package main // import "golang.org/x/tools/dashboard/builder" diff --git a/dashboard/builder/env.go b/dashboard/builder/env.go deleted file mode 100644 index 7261229..0000000 --- a/dashboard/builder/env.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "fmt" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strings" - - "golang.org/x/tools/go/vcs" -) - -// builderEnv represents the environment that a Builder will run tests in. -type builderEnv interface { - // setup sets up the builder environment and returns the directory to run the buildCmd in. - setup(repo *Repo, workpath, hash string, envv []string) (string, error) -} - -// goEnv represents the builderEnv for the main Go repo. -type goEnv struct { - goos, goarch string -} - -func (b *Builder) envv() []string { - if runtime.GOOS == "windows" { - return b.envvWindows() - } - - var e []string - if *buildTool == "go" { - e = []string{ - "GOOS=" + b.goos, - "GOARCH=" + b.goarch, - "GOROOT_FINAL=/usr/local/go", - } - switch b.goos { - case "android", "nacl": - // Cross compile. - default: - // If we are building, for example, linux/386 on a linux/amd64 machine we want to - // make sure that the whole build is done as a if this were compiled on a real - // linux/386 machine. In other words, we want to not do a cross compilation build. - // To do this we set GOHOSTOS and GOHOSTARCH to override the detection in make.bash. - // - // The exception to this rule is when we are doing nacl/android builds. These are by - // definition always cross compilation, and we have support built into cmd/go to be - // able to handle this case. - e = append(e, "GOHOSTOS="+b.goos, "GOHOSTARCH="+b.goarch) - } - } - - for _, k := range extraEnv() { - if s, ok := getenvOk(k); ok { - e = append(e, k+"="+s) - } - } - return e -} - -func (b *Builder) envvWindows() []string { - var start map[string]string - if *buildTool == "go" { - start = map[string]string{ - "GOOS": b.goos, - "GOHOSTOS": b.goos, - "GOARCH": b.goarch, - "GOHOSTARCH": b.goarch, - "GOROOT_FINAL": `c:\go`, - "GOBUILDEXIT": "1", // exit all.bat with completion status. - } - } - - for _, name := range extraEnv() { - if s, ok := getenvOk(name); ok { - start[name] = s - } - } - if b.goos == "windows" { - switch b.goarch { - case "amd64": - start["PATH"] = `c:\TDM-GCC-64\bin;` + start["PATH"] - case "386": - start["PATH"] = `c:\TDM-GCC-32\bin;` + start["PATH"] - } - } - skip := map[string]bool{ - "GOBIN": true, - "GOPATH": true, - "GOROOT": true, - "INCLUDE": true, - "LIB": true, - } - var e []string - for name, v := range start { - e = append(e, name+"="+v) - skip[name] = true - } - for _, kv := range os.Environ() { - s := strings.SplitN(kv, "=", 2) - name := strings.ToUpper(s[0]) - switch { - case name == "": - // variables, like "=C:=C:\", just copy them - e = append(e, kv) - case !skip[name]: - e = append(e, kv) - skip[name] = true - } - } - return e -} - -// setup for a goEnv clones the main go repo to workpath/go at the provided hash -// and returns the path workpath/go/src, the location of all go build scripts. -func (env *goEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) { - goworkpath := filepath.Join(workpath, "go") - if err := repo.Export(goworkpath, hash); err != nil { - return "", fmt.Errorf("error exporting repository: %s", err) - } - return filepath.Join(goworkpath, "src"), nil -} - -// gccgoEnv represents the builderEnv for the gccgo compiler. -type gccgoEnv struct{} - -// setup for a gccgoEnv clones the gofrontend repo to workpath/go at the hash -// and clones the latest GCC branch to repo.Path/gcc. The gccgo sources are -// replaced with the updated sources in the gofrontend repo and gcc gets -// gets configured and built in workpath/gcc-objdir. The path to -// workpath/gcc-objdir is returned. -func (env *gccgoEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) { - gccpath := filepath.Join(repo.Path, "gcc") - - // get a handle to Git vcs.Cmd for pulling down GCC from the mirror. - git := vcs.ByCmd("git") - - // only pull down gcc if we don't have a local copy. - if _, err := os.Stat(gccpath); err != nil { - if err := timeout(*cmdTimeout, func() error { - // pull down a working copy of GCC. - - cloneCmd := []string{ - "clone", - // This is just a guess since there are ~6000 commits to - // GCC per year. It's likely there will be enough history - // to cross-reference the Gofrontend commit against GCC. - // The disadvantage would be if the commit being built is more than - // a year old; in this case, the user should make a clone that has - // the full history. - "--depth", "6000", - // We only care about the master branch. - "--branch", "master", "--single-branch", - *gccPath, - } - - // Clone Kind Clone Time(Dry run) Clone Size - // --------------------------------------------------------------- - // Full Clone 10 - 15 min 2.2 GiB - // Master Branch 2 - 3 min 1.5 GiB - // Full Clone(shallow) 1 min 900 MiB - // Master Branch(shallow) 40 sec 900 MiB - // - // The shallow clones have the same size, which is expected, - // but the full shallow clone will only have 6000 commits - // spread across all branches. There are ~50 branches. - return run(exec.Command("git", cloneCmd...), runEnv(envv), allOutput(os.Stdout), runDir(repo.Path)) - }); err != nil { - return "", err - } - } - - if err := git.Download(gccpath); err != nil { - return "", err - } - - // get the modified files for this commit. - - var buf bytes.Buffer - if err := run(exec.Command("hg", "status", "--no-status", "--change", hash), - allOutput(&buf), runDir(repo.Path), runEnv(envv)); err != nil { - return "", fmt.Errorf("Failed to find the modified files for %s: %s", hash, err) - } - modifiedFiles := strings.Split(buf.String(), "\n") - var isMirrored bool - for _, f := range modifiedFiles { - if strings.HasPrefix(f, "go/") || strings.HasPrefix(f, "libgo/") { - isMirrored = true - break - } - } - - // use git log to find the corresponding commit to sync to in the gcc mirror. - // If the files modified in the gofrontend are mirrored to gcc, we expect a - // commit with a similar description in the gcc mirror. If the files modified are - // not mirrored, e.g. in support/, we can sync to the most recent gcc commit that - // occurred before those files were modified to verify gccgo's status at that point. - logCmd := []string{ - "log", - "-1", - "--format=%H", - } - var errMsg string - if isMirrored { - commitDesc, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{desc|firstline|escape}") - if err != nil { - return "", err - } - - quotedDesc := regexp.QuoteMeta(string(commitDesc)) - logCmd = append(logCmd, "--grep", quotedDesc, "--regexp-ignore-case", "--extended-regexp") - errMsg = fmt.Sprintf("Failed to find a commit with a similar description to '%s'", string(commitDesc)) - } else { - commitDate, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{date|rfc3339date}") - if err != nil { - return "", err - } - - logCmd = append(logCmd, "--before", string(commitDate)) - errMsg = fmt.Sprintf("Failed to find a commit before '%s'", string(commitDate)) - } - - buf.Reset() - if err := run(exec.Command("git", logCmd...), runEnv(envv), allOutput(&buf), runDir(gccpath)); err != nil { - return "", fmt.Errorf("%s: %s", errMsg, err) - } - gccRev := buf.String() - if gccRev == "" { - return "", fmt.Errorf(errMsg) - } - - // checkout gccRev - // TODO(cmang): Fix this to work in parallel mode. - if err := run(exec.Command("git", "reset", "--hard", strings.TrimSpace(gccRev)), runEnv(envv), runDir(gccpath)); err != nil { - return "", fmt.Errorf("Failed to checkout commit at revision %s: %s", gccRev, err) - } - - // make objdir to work in - gccobjdir := filepath.Join(workpath, "gcc-objdir") - if err := os.Mkdir(gccobjdir, mkdirPerm); err != nil { - return "", err - } - - // configure GCC with substituted gofrontend and libgo - if err := run(exec.Command(filepath.Join(gccpath, "configure"), - "--enable-languages=c,c++,go", - "--disable-bootstrap", - "--disable-multilib", - ), runEnv(envv), runDir(gccobjdir)); err != nil { - return "", fmt.Errorf("Failed to configure GCC: %v", err) - } - - // build gcc - if err := run(exec.Command("make", *gccOpts), runTimeout(*buildTimeout), runEnv(envv), runDir(gccobjdir)); err != nil { - return "", fmt.Errorf("Failed to build GCC: %s", err) - } - - return gccobjdir, nil -} - -func getenvOk(k string) (v string, ok bool) { - v = os.Getenv(k) - if v != "" { - return v, true - } - keq := k + "=" - for _, kv := range os.Environ() { - if kv == keq { - return "", true - } - } - return "", false -} - -// extraEnv returns environment variables that need to be copied from -// the gobuilder's environment to the envv of its subprocesses. -func extraEnv() []string { - extra := []string{ - "GOARM", - "GO386", - "GOROOT_BOOTSTRAP", // See https://golang.org/s/go15bootstrap - "CGO_ENABLED", - "CC", - "CC_FOR_TARGET", - "PATH", - "TMPDIR", - "USER", - } - if runtime.GOOS == "plan9" { - extra = append(extra, "objtype", "cputype", "path") - } - return extra -} diff --git a/dashboard/builder/exec.go b/dashboard/builder/exec.go deleted file mode 100644 index 1b46ed1..0000000 --- a/dashboard/builder/exec.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "io" - "log" - "os/exec" - "time" -) - -// run runs a command with optional arguments. -func run(cmd *exec.Cmd, opts ...runOpt) error { - a := runArgs{cmd, *cmdTimeout} - for _, opt := range opts { - opt.modArgs(&a) - } - if *verbose { - log.Printf("running %v in %v", a.cmd.Args, a.cmd.Dir) - } - if err := cmd.Start(); err != nil { - log.Printf("failed to start command %v: %v", a.cmd.Args, err) - return err - } - err := timeout(a.timeout, cmd.Wait) - if _, ok := err.(timeoutError); ok { - cmd.Process.Kill() - } - return err -} - -// Zero or more runOpts can be passed to run to modify the command -// before it's run. -type runOpt interface { - modArgs(*runArgs) -} - -// allOutput sends both stdout and stderr to w. -func allOutput(w io.Writer) optFunc { - return func(a *runArgs) { - a.cmd.Stdout = w - a.cmd.Stderr = w - } -} - -func runTimeout(timeout time.Duration) optFunc { - return func(a *runArgs) { - a.timeout = timeout - } -} - -func runDir(dir string) optFunc { - return func(a *runArgs) { - a.cmd.Dir = dir - } -} - -func runEnv(env []string) optFunc { - return func(a *runArgs) { - a.cmd.Env = env - } -} - -// timeout runs f and returns its error value, or if the function does not -// complete before the provided duration it returns a timeout error. -func timeout(d time.Duration, f func() error) error { - errc := make(chan error, 1) - go func() { - errc <- f() - }() - t := time.NewTimer(d) - defer t.Stop() - select { - case <-t.C: - return timeoutError(d) - case err := <-errc: - return err - } -} - -type timeoutError time.Duration - -func (e timeoutError) Error() string { - return fmt.Sprintf("timed out after %v", time.Duration(e)) -} - -// optFunc implements runOpt with a function, like http.HandlerFunc. -type optFunc func(*runArgs) - -func (f optFunc) modArgs(a *runArgs) { f(a) } - -// internal detail to exec.go: -type runArgs struct { - cmd *exec.Cmd - timeout time.Duration -} diff --git a/dashboard/builder/filemutex_flock.go b/dashboard/builder/filemutex_flock.go deleted file mode 100644 index 68851b8..0000000 --- a/dashboard/builder/filemutex_flock.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package main - -import ( - "sync" - "syscall" -) - -// FileMutex is similar to sync.RWMutex, but also synchronizes across processes. -// This implementation is based on flock syscall. -type FileMutex struct { - mu sync.RWMutex - fd int -} - -func MakeFileMutex(filename string) *FileMutex { - if filename == "" { - return &FileMutex{fd: -1} - } - fd, err := syscall.Open(filename, syscall.O_CREAT|syscall.O_RDONLY, mkdirPerm) - if err != nil { - panic(err) - } - return &FileMutex{fd: fd} -} - -func (m *FileMutex) Lock() { - m.mu.Lock() - if m.fd != -1 { - if err := syscall.Flock(m.fd, syscall.LOCK_EX); err != nil { - panic(err) - } - } -} - -func (m *FileMutex) Unlock() { - if m.fd != -1 { - if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil { - panic(err) - } - } - m.mu.Unlock() -} - -func (m *FileMutex) RLock() { - m.mu.RLock() - if m.fd != -1 { - if err := syscall.Flock(m.fd, syscall.LOCK_SH); err != nil { - panic(err) - } - } -} - -func (m *FileMutex) RUnlock() { - if m.fd != -1 { - if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil { - panic(err) - } - } - m.mu.RUnlock() -} diff --git a/dashboard/builder/filemutex_local.go b/dashboard/builder/filemutex_local.go deleted file mode 100644 index 68cfb62..0000000 --- a/dashboard/builder/filemutex_local.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 solaris - -package main - -import ( - "log" - "sync" -) - -// FileMutex is similar to sync.RWMutex, but also synchronizes across processes. -// This implementation is a fallback that does not actually provide inter-process synchronization. -type FileMutex struct { - sync.RWMutex -} - -func MakeFileMutex(filename string) *FileMutex { - return &FileMutex{} -} - -func init() { - log.Printf("WARNING: using fake file mutex." + - " Don't run more than one of these at once!!!") -} diff --git a/dashboard/builder/filemutex_windows.go b/dashboard/builder/filemutex_windows.go deleted file mode 100644 index 1f058b2..0000000 --- a/dashboard/builder/filemutex_windows.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "sync" - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - INVALID_FILE_HANDLE = ^syscall.Handle(0) - LOCKFILE_EXCLUSIVE_LOCK = 2 -) - -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -// FileMutex is similar to sync.RWMutex, but also synchronizes across processes. -// This implementation is based on flock syscall. -type FileMutex struct { - mu sync.RWMutex - fd syscall.Handle -} - -func MakeFileMutex(filename string) *FileMutex { - if filename == "" { - return &FileMutex{fd: INVALID_FILE_HANDLE} - } - fd, err := syscall.CreateFile(&(syscall.StringToUTF16(filename)[0]), syscall.GENERIC_READ|syscall.GENERIC_WRITE, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) - if err != nil { - panic(err) - } - return &FileMutex{fd: fd} -} - -func (m *FileMutex) Lock() { - m.mu.Lock() - if m.fd != INVALID_FILE_HANDLE { - var ol syscall.Overlapped - if err := lockFileEx(m.fd, LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &ol); err != nil { - panic(err) - } - } -} - -func (m *FileMutex) Unlock() { - if m.fd != INVALID_FILE_HANDLE { - var ol syscall.Overlapped - if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil { - panic(err) - } - } - m.mu.Unlock() -} - -func (m *FileMutex) RLock() { - m.mu.RLock() - if m.fd != INVALID_FILE_HANDLE { - var ol syscall.Overlapped - if err := lockFileEx(m.fd, 0, 0, 1, 0, &ol); err != nil { - panic(err) - } - } -} - -func (m *FileMutex) RUnlock() { - if m.fd != INVALID_FILE_HANDLE { - var ol syscall.Overlapped - if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil { - panic(err) - } - } - m.mu.RUnlock() -} diff --git a/dashboard/builder/http.go b/dashboard/builder/http.go deleted file mode 100644 index 8d0923c..0000000 --- a/dashboard/builder/http.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "log" - "net/http" - "net/url" - "time" -) - -const builderVersion = 1 // keep in sync with dashboard/app/build/handler.go - -type obj map[string]interface{} - -// dash runs the given method and command on the dashboard. -// If args is non-nil it is encoded as the URL query string. -// If req is non-nil it is JSON-encoded and passed as the body of the HTTP POST. -// If resp is non-nil the server's response is decoded into the value pointed -// to by resp (resp must be a pointer). -func dash(meth, cmd string, args url.Values, req, resp interface{}) error { - argsCopy := url.Values{"version": {fmt.Sprint(builderVersion)}} - for k, v := range args { - if k == "version" { - panic(`dash: reserved args key: "version"`) - } - argsCopy[k] = v - } - var r *http.Response - var err error - if *verbose { - log.Println("dash <-", meth, cmd, argsCopy, req) - } - cmd = *dashboard + "/" + cmd + "?" + argsCopy.Encode() - switch meth { - case "GET": - if req != nil { - log.Panicf("%s to %s with req", meth, cmd) - } - r, err = http.Get(cmd) - case "POST": - var body io.Reader - if req != nil { - b, err := json.Marshal(req) - if err != nil { - return err - } - body = bytes.NewBuffer(b) - } - r, err = http.Post(cmd, "text/json", body) - default: - log.Panicf("%s: invalid method %q", cmd, meth) - panic("invalid method: " + meth) - } - if err != nil { - return err - } - defer r.Body.Close() - if r.StatusCode != http.StatusOK { - return fmt.Errorf("bad http response: %v", r.Status) - } - body := new(bytes.Buffer) - if _, err := body.ReadFrom(r.Body); err != nil { - return err - } - - // Read JSON-encoded Response into provided resp - // and return an error if present. - var result = struct { - Response interface{} - Error string - }{ - // Put the provided resp in here as it can be a pointer to - // some value we should unmarshal into. - Response: resp, - } - if err = json.Unmarshal(body.Bytes(), &result); err != nil { - log.Printf("json unmarshal %#q: %s\n", body.Bytes(), err) - return err - } - if *verbose { - log.Println("dash ->", result) - } - if result.Error != "" { - return errors.New(result.Error) - } - - return nil -} - -// todo returns the next hash to build or benchmark. -func (b *Builder) todo(kinds []string, pkg, goHash string) (kind, rev string, benchs []string, err error) { - args := url.Values{ - "builder": {b.name}, - "packagePath": {pkg}, - "goHash": {goHash}, - } - for _, k := range kinds { - args.Add("kind", k) - } - var resp *struct { - Kind string - Data struct { - Hash string - PerfResults []string - } - } - if err = dash("GET", "todo", args, nil, &resp); err != nil { - return - } - if resp == nil { - return - } - if *verbose { - fmt.Printf("dash resp: %+v\n", *resp) - } - for _, k := range kinds { - if k == resp.Kind { - return resp.Kind, resp.Data.Hash, resp.Data.PerfResults, nil - } - } - err = fmt.Errorf("expecting Kinds %q, got %q", kinds, resp.Kind) - return -} - -// recordResult sends build results to the dashboard -func (b *Builder) recordResult(ok bool, pkg, hash, goHash, buildLog string, runTime time.Duration) error { - if !*report { - return nil - } - req := obj{ - "Builder": b.name, - "PackagePath": pkg, - "Hash": hash, - "GoHash": goHash, - "OK": ok, - "Log": buildLog, - "RunTime": runTime, - } - args := url.Values{"key": {b.key}, "builder": {b.name}} - return dash("POST", "result", args, req, nil) -} - -// Result of running a single benchmark on a single commit. -type PerfResult struct { - Builder string - Benchmark string - Hash string - OK bool - Metrics []PerfMetric - Artifacts []PerfArtifact -} - -type PerfMetric struct { - Type string - Val uint64 -} - -type PerfArtifact struct { - Type string - Body string -} - -// recordPerfResult sends benchmarking results to the dashboard -func (b *Builder) recordPerfResult(req *PerfResult) error { - if !*report { - return nil - } - req.Builder = b.name - args := url.Values{"key": {b.key}, "builder": {b.name}} - return dash("POST", "perf-result", args, req, nil) -} - -func postCommit(key, pkg string, l *HgLog) error { - if !*report { - return nil - } - t, err := time.Parse(time.RFC3339, l.Date) - if err != nil { - return fmt.Errorf("parsing %q: %v", l.Date, t) - } - return dash("POST", "commit", url.Values{"key": {key}}, obj{ - "PackagePath": pkg, - "Hash": l.Hash, - "ParentHash": l.Parent, - "Time": t.Format(time.RFC3339), - "User": l.Author, - "Desc": l.Desc, - "NeedsBenchmarking": l.bench, - }, nil) -} - -func dashboardCommit(pkg, hash string) bool { - err := dash("GET", "commit", url.Values{ - "packagePath": {pkg}, - "hash": {hash}, - }, nil, nil) - return err == nil -} - -func dashboardPackages(kind string) []string { - args := url.Values{"kind": []string{kind}} - var resp []struct { - Path string - } - if err := dash("GET", "packages", args, nil, &resp); err != nil { - log.Println("dashboardPackages:", err) - return nil - } - if *verbose { - fmt.Printf("dash resp: %+v\n", resp) - } - var pkgs []string - for _, r := range resp { - pkgs = append(pkgs, r.Path) - } - return pkgs -} diff --git a/dashboard/builder/main.go b/dashboard/builder/main.go deleted file mode 100644 index 9e7c1ed..0000000 --- a/dashboard/builder/main.go +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - "golang.org/x/tools/go/vcs" -) - -const ( - codeProject = "go" - codePyScript = "misc/dashboard/googlecode_upload.py" - gofrontendImportPath = "code.google.com/p/gofrontend" - mkdirPerm = 0750 - waitInterval = 30 * time.Second // time to wait before checking for new revs - pkgBuildInterval = 24 * time.Hour // rebuild packages every 24 hours -) - -type Builder struct { - goroot *Repo - name string - goos, goarch string - key string - env builderEnv - // Last benchmarking workpath. We reuse it, if do successive benchmarks on the same commit. - lastWorkpath string -} - -var ( - doBuild = flag.Bool("build", true, "Build and test packages") - doBench = flag.Bool("bench", false, "Run benchmarks") - buildroot = flag.String("buildroot", defaultBuildRoot(), "Directory under which to build") - dashboard = flag.String("dashboard", "https://build.golang.org", "Dashboard app base path") - buildRelease = flag.Bool("release", false, "Build and upload binary release archives") - buildRevision = flag.String("rev", "", "Build specified revision and exit") - buildCmd = flag.String("cmd", filepath.Join(".", allCmd), "Build command (specify relative to go/src/)") - buildTool = flag.String("tool", "go", "Tool to build.") - gcPath = flag.String("gcpath", "go.googlesource.com/go", "Path to download gc from") - gccPath = flag.String("gccpath", "https://github.com/mirrors/gcc.git", "Path to download gcc from") - gccOpts = flag.String("gccopts", "", "Command-line options to pass to `make` when building gccgo") - benchPath = flag.String("benchpath", "golang.org/x/benchmarks/bench", "Path to download benchmarks from") - failAll = flag.Bool("fail", false, "fail all builds") - parallel = flag.Bool("parallel", false, "Build multiple targets in parallel") - buildTimeout = flag.Duration("buildTimeout", 60*time.Minute, "Maximum time to wait for builds and tests") - cmdTimeout = flag.Duration("cmdTimeout", 10*time.Minute, "Maximum time to wait for an external command") - benchNum = flag.Int("benchnum", 5, "Run each benchmark that many times") - benchTime = flag.Duration("benchtime", 5*time.Second, "Benchmarking time for a single benchmark run") - benchMem = flag.Int("benchmem", 64, "Approx RSS value to aim at in benchmarks, in MB") - fileLock = flag.String("filelock", "", "File to lock around benchmaring (synchronizes several builders)") - verbose = flag.Bool("v", false, "verbose") - report = flag.Bool("report", true, "whether to report results to the dashboard") -) - -var ( - binaryTagRe = regexp.MustCompile(`^(release\.r|weekly\.)[0-9\-.]+`) - releaseRe = regexp.MustCompile(`^release\.r[0-9\-.]+`) - allCmd = "all" + suffix - makeCmd = "make" + suffix - raceCmd = "race" + suffix - cleanCmd = "clean" + suffix - suffix = defaultSuffix() - exeExt = defaultExeExt() - - benchCPU = CpuList([]int{1}) - benchAffinity = CpuList([]int{}) - benchMutex *FileMutex // Isolates benchmarks from other activities -) - -// CpuList is used as flag.Value for -benchcpu flag. -type CpuList []int - -func (cl *CpuList) String() string { - str := "" - for _, cpu := range *cl { - if str == "" { - str = strconv.Itoa(cpu) - } else { - str += fmt.Sprintf(",%v", cpu) - } - } - return str -} - -func (cl *CpuList) Set(str string) error { - *cl = []int{} - for _, val := range strings.Split(str, ",") { - val = strings.TrimSpace(val) - if val == "" { - continue - } - cpu, err := strconv.Atoi(val) - if err != nil || cpu <= 0 { - return fmt.Errorf("%v is a bad value for GOMAXPROCS", val) - } - *cl = append(*cl, cpu) - } - if len(*cl) == 0 { - *cl = append(*cl, 1) - } - return nil -} - -func main() { - flag.Var(&benchCPU, "benchcpu", "Comma-delimited list of GOMAXPROCS values for benchmarking") - flag.Var(&benchAffinity, "benchaffinity", "Comma-delimited list of affinity values for benchmarking") - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "usage: %s goos-goarch...\n", os.Args[0]) - flag.PrintDefaults() - os.Exit(2) - } - flag.Parse() - if len(flag.Args()) == 0 { - flag.Usage() - } - - vcs.ShowCmd = *verbose - vcs.Verbose = *verbose - - benchMutex = MakeFileMutex(*fileLock) - - rr, err := repoForTool() - if err != nil { - log.Fatal("Error finding repository:", err) - } - rootPath := filepath.Join(*buildroot, "goroot") - goroot := &Repo{ - Path: rootPath, - Master: rr, - } - - // set up work environment, use existing environment if possible - if goroot.Exists() || *failAll { - log.Print("Found old workspace, will use it") - } else { - if err := os.RemoveAll(*buildroot); err != nil { - log.Fatalf("Error removing build root (%s): %s", *buildroot, err) - } - if err := os.Mkdir(*buildroot, mkdirPerm); err != nil { - log.Fatalf("Error making build root (%s): %s", *buildroot, err) - } - var err error - goroot, err = RemoteRepo(goroot.Master.Root, rootPath) - if err != nil { - log.Fatalf("Error creating repository with url (%s): %s", goroot.Master.Root, err) - } - - goroot, err = goroot.Clone(goroot.Path, "") - if err != nil { - log.Fatal("Error cloning repository:", err) - } - } - - // set up builders - builders := make([]*Builder, len(flag.Args())) - for i, name := range flag.Args() { - b, err := NewBuilder(goroot, name) - if err != nil { - log.Fatal(err) - } - builders[i] = b - } - - if *failAll { - failMode(builders) - return - } - - // if specified, build revision and return - if *buildRevision != "" { - hash, err := goroot.FullHash(*buildRevision) - if err != nil { - log.Fatal("Error finding revision: ", err) - } - var exitErr error - for _, b := range builders { - if err := b.buildHash(hash); err != nil { - log.Println(err) - exitErr = err - } - } - if exitErr != nil && !*report { - // This mode (-report=false) is used for - // testing Docker images, making sure the - // environment is correctly configured. For - // testing, we want a non-zero exit status, as - // returned by log.Fatal: - log.Fatal("Build error.") - } - return - } - - if !*doBuild && !*doBench { - fmt.Fprintf(os.Stderr, "Nothing to do, exiting (specify either -build or -bench or both)\n") - os.Exit(2) - } - - // go continuous build mode - // check for new commits and build them - benchMutex.RLock() - for { - built := false - t := time.Now() - if *parallel { - done := make(chan bool) - for _, b := range builders { - go func(b *Builder) { - done <- b.buildOrBench() - }(b) - } - for _ = range builders { - built = <-done || built - } - } else { - for _, b := range builders { - built = b.buildOrBench() || built - } - } - // sleep if there was nothing to build - benchMutex.RUnlock() - if !built { - time.Sleep(waitInterval) - } - benchMutex.RLock() - // sleep if we're looping too fast. - dt := time.Now().Sub(t) - if dt < waitInterval { - time.Sleep(waitInterval - dt) - } - } -} - -// go continuous fail mode -// check for new commits and FAIL them -func failMode(builders []*Builder) { - for { - built := false - for _, b := range builders { - built = b.failBuild() || built - } - // stop if there was nothing to fail - if !built { - break - } - } -} - -func NewBuilder(goroot *Repo, name string) (*Builder, error) { - b := &Builder{ - goroot: goroot, - name: name, - } - - // get builderEnv for this tool - var err error - if b.env, err = b.builderEnv(name); err != nil { - return nil, err - } - if *report { - err = b.setKey() - } - return b, err -} - -func (b *Builder) setKey() error { - // read keys from keyfile - fn := "" - switch runtime.GOOS { - case "plan9": - fn = os.Getenv("home") - case "windows": - fn = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - default: - fn = os.Getenv("HOME") - } - fn = filepath.Join(fn, ".gobuildkey") - if s := fn + "-" + b.name; isFile(s) { // builder-specific file - fn = s - } - c, err := ioutil.ReadFile(fn) - if err != nil { - // If the on-disk file doesn't exist, also try the - // Google Compute Engine metadata. - if v := gceProjectMetadata("buildkey-" + b.name); v != "" { - b.key = v - return nil - } - return fmt.Errorf("readKeys %s (%s): %s", b.name, fn, err) - } - b.key = string(bytes.TrimSpace(bytes.SplitN(c, []byte("\n"), 2)[0])) - return nil -} - -func gceProjectMetadata(attr string) string { - client := &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 750 * time.Millisecond, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 750 * time.Millisecond, - }, - } - req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/"+attr, nil) - req.Header.Set("Metadata-Flavor", "Google") - res, err := client.Do(req) - if err != nil { - return "" - } - defer res.Body.Close() - if res.StatusCode != 200 { - return "" - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return "" - } - return string(bytes.TrimSpace(slurp)) -} - -// builderEnv returns the builderEnv for this buildTool. -func (b *Builder) builderEnv(name string) (builderEnv, error) { - // get goos/goarch from builder string - s := strings.SplitN(b.name, "-", 3) - if len(s) < 2 { - return nil, fmt.Errorf("unsupported builder form: %s", name) - } - b.goos = s[0] - b.goarch = s[1] - - switch *buildTool { - case "go": - return &goEnv{ - goos: s[0], - goarch: s[1], - }, nil - case "gccgo": - return &gccgoEnv{}, nil - default: - return nil, fmt.Errorf("unsupported build tool: %s", *buildTool) - } -} - -// buildCmd returns the build command to invoke. -// Builders which contain the string '-race' in their -// name will override *buildCmd and return raceCmd. -func (b *Builder) buildCmd() string { - if strings.Contains(b.name, "-race") { - return raceCmd - } - return *buildCmd -} - -// buildOrBench checks for a new commit for this builder -// and builds or benchmarks it if one is found. -// It returns true if a build/benchmark was attempted. -func (b *Builder) buildOrBench() bool { - var kinds []string - if *doBuild { - kinds = append(kinds, "build-go-commit") - } - if *doBench { - kinds = append(kinds, "benchmark-go-commit") - } - kind, hash, benchs, err := b.todo(kinds, "", "") - if err != nil { - log.Println(err) - return false - } - if hash == "" { - return false - } - switch kind { - case "build-go-commit": - if err := b.buildHash(hash); err != nil { - log.Println(err) - } - return true - case "benchmark-go-commit": - if err := b.benchHash(hash, benchs); err != nil { - log.Println(err) - } - return true - default: - log.Printf("Unknown todo kind %v", kind) - return false - } -} - -func (b *Builder) buildHash(hash string) error { - log.Println(b.name, "building", hash) - - // create place in which to do work - workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12]) - if err := os.Mkdir(workpath, mkdirPerm); err != nil { - if err2 := removePath(workpath); err2 != nil { - return err - } - if err := os.Mkdir(workpath, mkdirPerm); err != nil { - return err - } - } - defer removePath(workpath) - - buildLog, runTime, err := b.buildRepoOnHash(workpath, hash, b.buildCmd()) - if err != nil { - log.Printf("%s failed at %v: %v", b.name, hash, err) - // record failure - return b.recordResult(false, "", hash, "", buildLog, runTime) - } - - // record success - if err = b.recordResult(true, "", hash, "", "", runTime); err != nil { - return fmt.Errorf("recordResult: %s", err) - } - - if *buildTool == "go" { - // build sub-repositories - goRoot := filepath.Join(workpath, *buildTool) - goPath := workpath - b.buildSubrepos(goRoot, goPath, hash) - } - - return nil -} - -// buildRepoOnHash clones repo into workpath and builds it. -func (b *Builder) buildRepoOnHash(workpath, hash, cmd string) (buildLog string, runTime time.Duration, err error) { - // Delete the previous workdir, if necessary - // (benchmarking code can execute several benchmarks in the same workpath). - if b.lastWorkpath != "" { - if b.lastWorkpath == workpath { - panic("workpath already exists: " + workpath) - } - removePath(b.lastWorkpath) - b.lastWorkpath = "" - } - - // pull before cloning to ensure we have the revision - if err = b.goroot.Pull(); err != nil { - buildLog = err.Error() - return - } - - // set up builder's environment. - srcDir, err := b.env.setup(b.goroot, workpath, hash, b.envv()) - if err != nil { - buildLog = err.Error() - return - } - - // build - var buildbuf bytes.Buffer - logfile := filepath.Join(workpath, "build.log") - f, err := os.Create(logfile) - if err != nil { - return err.Error(), 0, err - } - defer f.Close() - w := io.MultiWriter(f, &buildbuf) - - // go's build command is a script relative to the srcDir, whereas - // gccgo's build command is usually "make check-go" in the srcDir. - if *buildTool == "go" { - if !filepath.IsAbs(cmd) { - cmd = filepath.Join(srcDir, cmd) - } - } - - // naive splitting of command from its arguments: - args := strings.Split(cmd, " ") - c := exec.Command(args[0], args[1:]...) - c.Dir = srcDir - c.Env = b.envv() - if *verbose { - c.Stdout = io.MultiWriter(os.Stdout, w) - c.Stderr = io.MultiWriter(os.Stderr, w) - } else { - c.Stdout = w - c.Stderr = w - } - - startTime := time.Now() - err = run(c, runTimeout(*buildTimeout)) - runTime = time.Since(startTime) - if err != nil { - fmt.Fprintf(w, "Build complete, duration %v. Result: error: %v\n", runTime, err) - } else { - fmt.Fprintf(w, "Build complete, duration %v. Result: success\n", runTime) - } - return buildbuf.String(), runTime, err -} - -// failBuild checks for a new commit for this builder -// and fails it if one is found. -// It returns true if a build was "attempted". -func (b *Builder) failBuild() bool { - _, hash, _, err := b.todo([]string{"build-go-commit"}, "", "") - if err != nil { - log.Println(err) - return false - } - if hash == "" { - return false - } - - log.Printf("fail %s %s\n", b.name, hash) - - if err := b.recordResult(false, "", hash, "", "auto-fail mode run by "+os.Getenv("USER"), 0); err != nil { - log.Print(err) - } - return true -} - -func (b *Builder) buildSubrepos(goRoot, goPath, goHash string) { - for _, pkg := range dashboardPackages("subrepo") { - // get the latest todo for this package - _, hash, _, err := b.todo([]string{"build-package"}, pkg, goHash) - if err != nil { - log.Printf("buildSubrepos %s: %v", pkg, err) - continue - } - if hash == "" { - continue - } - - // build the package - if *verbose { - log.Printf("buildSubrepos %s: building %q", pkg, hash) - } - buildLog, err := b.buildSubrepo(goRoot, goPath, pkg, hash) - if err != nil { - if buildLog == "" { - buildLog = err.Error() - } - log.Printf("buildSubrepos %s: %v", pkg, err) - } - - // record the result - err = b.recordResult(err == nil, pkg, hash, goHash, buildLog, 0) - if err != nil { - log.Printf("buildSubrepos %s: %v", pkg, err) - } - } -} - -// buildSubrepo fetches the given package, updates it to the specified hash, -// and runs 'go test -short pkg/...'. It returns the build log and any error. -func (b *Builder) buildSubrepo(goRoot, goPath, pkg, hash string) (string, error) { - goTool := filepath.Join(goRoot, "bin", "go") + exeExt - env := append(b.envv(), "GOROOT="+goRoot, "GOPATH="+goPath) - - // add $GOROOT/bin and $GOPATH/bin to PATH - for i, e := range env { - const p = "PATH=" - if !strings.HasPrefix(e, p) { - continue - } - sep := string(os.PathListSeparator) - env[i] = p + filepath.Join(goRoot, "bin") + sep + filepath.Join(goPath, "bin") + sep + e[len(p):] - } - - // HACK: check out to new sub-repo location instead of old location. - pkg = strings.Replace(pkg, "code.google.com/p/go.", "golang.org/x/", 1) - - // fetch package and dependencies - var outbuf bytes.Buffer - err := run(exec.Command(goTool, "get", "-d", pkg+"/..."), runEnv(env), allOutput(&outbuf), runDir(goPath)) - if err != nil { - return outbuf.String(), err - } - outbuf.Reset() - - // hg update to the specified hash - pkgmaster, err := vcs.RepoRootForImportPath(pkg, *verbose) - if err != nil { - return "", fmt.Errorf("Error finding subrepo (%s): %s", pkg, err) - } - repo := &Repo{ - Path: filepath.Join(goPath, "src", pkg), - Master: pkgmaster, - } - if err := repo.UpdateTo(hash); err != nil { - return "", err - } - - // test the package - err = run(exec.Command(goTool, "test", "-short", pkg+"/..."), - runTimeout(*buildTimeout), runEnv(env), allOutput(&outbuf), runDir(goPath)) - return outbuf.String(), err -} - -// repoForTool returns the correct RepoRoot for the buildTool, or an error if -// the tool is unknown. -func repoForTool() (*vcs.RepoRoot, error) { - switch *buildTool { - case "go": - return vcs.RepoRootForImportPath(*gcPath, *verbose) - case "gccgo": - return vcs.RepoRootForImportPath(gofrontendImportPath, *verbose) - default: - return nil, fmt.Errorf("unknown build tool: %s", *buildTool) - } -} - -func isDirectory(name string) bool { - s, err := os.Stat(name) - return err == nil && s.IsDir() -} - -func isFile(name string) bool { - s, err := os.Stat(name) - return err == nil && !s.IsDir() -} - -// defaultSuffix returns file extension used for command files in -// current os environment. -func defaultSuffix() string { - switch runtime.GOOS { - case "windows": - return ".bat" - case "plan9": - return ".rc" - default: - return ".bash" - } -} - -func defaultExeExt() string { - switch runtime.GOOS { - case "windows": - return ".exe" - default: - return "" - } -} - -// defaultBuildRoot returns default buildroot directory. -func defaultBuildRoot() string { - var d string - if runtime.GOOS == "windows" { - // will use c:\, otherwise absolute paths become too long - // during builder run, see http://golang.org/issue/3358. - d = `c:\` - } else { - d = os.TempDir() - } - return filepath.Join(d, "gobuilder") -} - -// removePath is a more robust version of os.RemoveAll. -// On windows, if remove fails (which can happen if test/benchmark timeouts -// and keeps some files open) it tries to rename the dir. -func removePath(path string) error { - if err := os.RemoveAll(path); err != nil { - if runtime.GOOS == "windows" { - err = os.Rename(path, filepath.Clean(path)+"_remove_me") - } - return err - } - return nil -} diff --git a/dashboard/builder/vcs.go b/dashboard/builder/vcs.go deleted file mode 100644 index 2139a90..0000000 --- a/dashboard/builder/vcs.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - - "golang.org/x/tools/go/vcs" -) - -// Repo represents a mercurial repository. -type Repo struct { - Path string - Master *vcs.RepoRoot - sync.Mutex -} - -// RemoteRepo constructs a *Repo representing a remote repository. -func RemoteRepo(url, path string) (*Repo, error) { - rr, err := vcs.RepoRootForImportPath(url, *verbose) - if err != nil { - return nil, err - } - return &Repo{ - Path: path, - Master: rr, - }, nil -} - -// Clone clones the current Repo to a new destination -// returning a new *Repo if successful. -func (r *Repo) Clone(path, rev string) (*Repo, error) { - r.Lock() - defer r.Unlock() - - err := timeout(*cmdTimeout, func() error { - downloadPath := r.Path - if !r.Exists() { - downloadPath = r.Master.Repo - } - if rev == "" { - return r.Master.VCS.Create(path, downloadPath) - } - return r.Master.VCS.CreateAtRev(path, downloadPath, rev) - }) - if err != nil { - return nil, err - } - return &Repo{ - Path: path, - Master: r.Master, - }, nil -} - -// Export exports the current Repo at revision rev to a new destination. -func (r *Repo) Export(path, rev string) error { - // TODO(adg,cmang): implement Export in go/vcs - _, err := r.Clone(path, rev) - return err -} - -// UpdateTo updates the working copy of this Repo to the -// supplied revision. -func (r *Repo) UpdateTo(hash string) error { - r.Lock() - defer r.Unlock() - - if r.Master.VCS.Cmd == "git" { - cmd := exec.Command("git", "reset", "--hard", hash) - var log bytes.Buffer - err := run(cmd, runTimeout(*cmdTimeout), runDir(r.Path), allOutput(&log)) - if err != nil { - return fmt.Errorf("Error running git update -C %v: %v ; output=%s", hash, err, log.Bytes()) - } - return nil - } - - // Else go down three more levels of abstractions, at - // least two of which are broken for git. - return timeout(*cmdTimeout, func() error { - return r.Master.VCS.TagSync(r.Path, hash) - }) -} - -// Exists reports whether this Repo represents a valid Mecurial repository. -func (r *Repo) Exists() bool { - fi, err := os.Stat(filepath.Join(r.Path, "."+r.Master.VCS.Cmd)) - if err != nil { - return false - } - return fi.IsDir() -} - -// Pull pulls changes from the default path, that is, the path -// this Repo was cloned from. -func (r *Repo) Pull() error { - r.Lock() - defer r.Unlock() - - return timeout(*cmdTimeout, func() error { - return r.Master.VCS.Download(r.Path) - }) -} - -// Log returns the changelog for this repository. -func (r *Repo) Log() ([]HgLog, error) { - if err := r.Pull(); err != nil { - return nil, err - } - r.Lock() - defer r.Unlock() - - var logStruct struct { - Log []HgLog - } - err := timeout(*cmdTimeout, func() error { - data, err := r.Master.VCS.Log(r.Path, xmlLogTemplate) - if err != nil { - return err - } - - // We have a commit with description that contains 0x1b byte. - // Mercurial does not escape it, but xml.Unmarshal does not accept it. - data = bytes.Replace(data, []byte{0x1b}, []byte{'?'}, -1) - - err = xml.Unmarshal([]byte(""+string(data)+""), &logStruct) - if err != nil { - return fmt.Errorf("unmarshal %s log: %v", r.Master.VCS, err) - } - return nil - }) - if err != nil { - return nil, err - } - for i, log := range logStruct.Log { - // Let's pretend there can be only one parent. - if log.Parent != "" && strings.Contains(log.Parent, " ") { - logStruct.Log[i].Parent = strings.Split(log.Parent, " ")[0] - } - } - return logStruct.Log, nil -} - -// FullHash returns the full hash for the given Git or Mercurial revision. -func (r *Repo) FullHash(rev string) (string, error) { - r.Lock() - defer r.Unlock() - - var hash string - err := timeout(*cmdTimeout, func() error { - var data []byte - // Avoid the vcs package for git, since it's broken - // for git, and and we're trying to remove levels of - // abstraction which are increasingly getting - // difficult to navigate. - if r.Master.VCS.Cmd == "git" { - cmd := exec.Command("git", "rev-parse", rev) - var out bytes.Buffer - err := run(cmd, runTimeout(*cmdTimeout), runDir(r.Path), allOutput(&out)) - data = out.Bytes() - if err != nil { - return fmt.Errorf("Failed to find FullHash of %q; git rev-parse: %v, %s", rev, err, data) - } - } else { - var err error - data, err = r.Master.VCS.LogAtRev(r.Path, rev, "{node}") - if err != nil { - return err - } - } - s := strings.TrimSpace(string(data)) - if s == "" { - return fmt.Errorf("cannot find revision") - } - if len(s) != 40 { // correct for both hg and git - return fmt.Errorf("%s returned invalid hash: %s", r.Master.VCS, s) - } - hash = s - return nil - }) - if err != nil { - return "", err - } - return hash, nil -} - -// HgLog represents a single Mercurial revision. -type HgLog struct { - Hash string - Author string - Date string - Desc string - Parent string - Branch string - Files string - - // Internal metadata - added bool - bench bool // needs to be benchmarked? -} - -// xmlLogTemplate is a template to pass to Mercurial to make -// hg log print the log in valid XML for parsing with xml.Unmarshal. -// Can not escape branches and files, because it crashes python with: -// AttributeError: 'NoneType' object has no attribute 'replace' -const xmlLogTemplate = ` - - {node|escape} - {p1node} - {author|escape} - {date|rfc3339date} - {desc|escape} - {branches} - {files} - -` diff --git a/dashboard/buildlet/.gitignore b/dashboard/buildlet/.gitignore deleted file mode 100644 index bbd21a2..0000000 --- a/dashboard/buildlet/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -buildlet -buildlet.*-* -stage0/buildlet-stage0.* -cert.pem -key.pem diff --git a/dashboard/buildlet/Makefile b/dashboard/buildlet/Makefile deleted file mode 100644 index b30ce59..0000000 --- a/dashboard/buildlet/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -buildlet: buildlet.go - go build --tags=buildlet - -buildlet.linux-amd64: buildlet.go - GOOS=linux GOARCH=amd64 go build -o $@ --tags=buildlet - cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) - -buildlet.openbsd-amd64: buildlet.go - GOOS=openbsd GOARCH=amd64 go build -o $@ --tags=buildlet - cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) - -buildlet.plan9-386: buildlet.go - GOOS=plan9 GOARCH=386 go build -o $@ --tags=buildlet - cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) - -buildlet.windows-amd64: buildlet.go - GOOS=windows GOARCH=amd64 go build -o $@ --tags=buildlet - cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) - -buildlet.darwin-amd64: buildlet.go - GOOS=darwin GOARCH=amd64 go build -o $@ --tags=buildlet - cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) - -buildlet.netbsd-amd64: buildlet.go - GOOS=netbsd GOARCH=amd64 go build -o $@ --tags=buildlet - cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) diff --git a/dashboard/buildlet/README b/dashboard/buildlet/README deleted file mode 100644 index 0dd68cf..0000000 --- a/dashboard/buildlet/README +++ /dev/null @@ -1,12 +0,0 @@ -Local development notes: - -Server: (TLS stuff is optional) -$ go run $GOROOT/src/crypto/tls/generate_cert.go --host=example.com -$ GCEMETA_password=foo GCEMETA_tls_cert=@cert.pem GCEMETA_tls_key='@key.pem' ./buildlet - -Client: -$ curl -O https://go.googlesource.com/go/+archive/3b76b017cabb.tar.gz -$ curl -k --user :foo -X PUT --data-binary "@go-3b76b017cabb.tar.gz" https://localhost:5936/writetgz -$ curl -k --user :foo -d "cmd=src/make.bash" http://127.0.0.1:5937/exec -etc - diff --git a/dashboard/buildlet/buildlet.go b/dashboard/buildlet/buildlet.go deleted file mode 100644 index ad65b95..0000000 --- a/dashboard/buildlet/buildlet.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build buildlet - -// The buildlet is an HTTP server that untars content to disk and runs -// commands it has untarred, streaming their output back over HTTP. -// It is part of Go's continuous build system. -// -// This program intentionally allows remote code execution, and -// provides no security of its own. It is assumed that any user uses -// it with an appropriately-configured firewall between their VM -// instances. -package main // import "golang.org/x/tools/dashboard/buildlet" - -import ( - "archive/tar" - "compress/gzip" - "crypto/tls" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - "google.golang.org/cloud/compute/metadata" -) - -var ( - scratchDir = flag.String("scratchdir", "", "Temporary directory to use. The contents of this directory may be deleted at any time. If empty, TempDir is used to create one.") - listenAddr = flag.String("listen", defaultListenAddr(), "address to listen on. Warning: this service is inherently insecure and offers no protection of its own. Do not expose this port to the world.") -) - -func defaultListenAddr() string { - if runtime.GOOS == "darwin" { - // Darwin will never run on GCE, so let's always - // listen on a high port (so we don't need to be - // root). - return ":5936" - } - if !metadata.OnGCE() { - return "localhost:5936" - } - // In production, default to port 80 or 443, depending on - // whether TLS is configured. - if metadataValue("tls-cert") != "" { - return ":443" - } - return ":80" -} - -func main() { - flag.Parse() - if !metadata.OnGCE() && !strings.HasPrefix(*listenAddr, "localhost:") { - log.Printf("** WARNING *** This server is unsafe and offers no security. Be careful.") - } - if runtime.GOOS == "plan9" { - // Plan 9 is too slow on GCE, so stop running run.rc after the basics. - // See https://golang.org/cl/2522 and https://golang.org/issue/9491 - // TODO(bradfitz): once the buildlet has environment variable support, - // the coordinator can send this in, and this variable can be part of - // the build configuration struct instead of hard-coded here. - // But no need for environment variables quite yet. - os.Setenv("GOTESTONLY", "std") - } - - if *scratchDir == "" { - dir, err := ioutil.TempDir("", "buildlet-scatch") - if err != nil { - log.Fatalf("error creating scratchdir with ioutil.TempDir: %v", err) - } - *scratchDir = dir - } - if _, err := os.Lstat(*scratchDir); err != nil { - log.Fatalf("invalid --scratchdir %q: %v", *scratchDir, err) - } - http.HandleFunc("/", handleRoot) - - password := metadataValue("password") - http.Handle("/writetgz", requirePassword{http.HandlerFunc(handleWriteTGZ), password}) - http.Handle("/exec", requirePassword{http.HandlerFunc(handleExec), password}) - // TODO: removeall - - tlsCert, tlsKey := metadataValue("tls-cert"), metadataValue("tls-key") - if (tlsCert == "") != (tlsKey == "") { - log.Fatalf("tls-cert and tls-key must both be supplied, or neither.") - } - - log.Printf("Listening on %s ...", *listenAddr) - ln, err := net.Listen("tcp", *listenAddr) - if err != nil { - log.Fatalf("Failed to listen on %s: %v", *listenAddr, err) - } - ln = tcpKeepAliveListener{ln.(*net.TCPListener)} - - var srv http.Server - if tlsCert != "" { - cert, err := tls.X509KeyPair([]byte(tlsCert), []byte(tlsKey)) - if err != nil { - log.Fatalf("TLS cert error: %v", err) - } - tlsConf := &tls.Config{ - Certificates: []tls.Certificate{cert}, - } - ln = tls.NewListener(ln, tlsConf) - } - - log.Fatalf("Serve: %v", srv.Serve(ln)) -} - -// metadataValue returns the GCE metadata instance value for the given key. -// If the metadata is not defined, the returned string is empty. -// -// If not running on GCE, it falls back to using environment variables -// for local development. -func metadataValue(key string) string { - // The common case: - if metadata.OnGCE() { - v, err := metadata.InstanceAttributeValue(key) - if _, notDefined := err.(metadata.NotDefinedError); notDefined { - return "" - } - if err != nil { - log.Fatalf("metadata.InstanceAttributeValue(%q): %v", key, err) - } - return v - } - - // Else let developers use environment variables to fake - // metadata keys, for local testing. - envKey := "GCEMETA_" + strings.Replace(key, "-", "_", -1) - v := os.Getenv(envKey) - // Respect curl-style '@' prefix to mean the rest is a filename. - if strings.HasPrefix(v, "@") { - slurp, err := ioutil.ReadFile(v[1:]) - if err != nil { - log.Fatalf("Error reading file for GCEMETA_%v: %v", key, err) - } - return string(slurp) - } - if v == "" { - log.Printf("Warning: not running on GCE, and no %v environment variable defined", envKey) - } - return v -} - -// tcpKeepAliveListener is a net.Listener that sets TCP keep-alive -// timeouts on accepted connections. -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -func handleRoot(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "buildlet running on %s-%s\n", runtime.GOOS, runtime.GOARCH) -} - -func handleWriteTGZ(w http.ResponseWriter, r *http.Request) { - if r.Method != "PUT" { - http.Error(w, "requires PUT method", http.StatusBadRequest) - return - } - err := untar(r.Body, *scratchDir) - if err != nil { - status := http.StatusInternalServerError - if he, ok := err.(httpStatuser); ok { - status = he.httpStatus() - } - http.Error(w, err.Error(), status) - return - } - io.WriteString(w, "OK") -} - -// untar reads the gzip-compressed tar file from r and writes it into dir. -func untar(r io.Reader, dir string) error { - zr, err := gzip.NewReader(r) - if err != nil { - return badRequest("requires gzip-compressed body: " + err.Error()) - } - tr := tar.NewReader(zr) - for { - f, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - log.Printf("tar reading error: %v", err) - return badRequest("tar error: " + err.Error()) - } - if !validRelPath(f.Name) { - return badRequest(fmt.Sprintf("tar file contained invalid name %q", f.Name)) - } - rel := filepath.FromSlash(f.Name) - abs := filepath.Join(dir, rel) - - fi := f.FileInfo() - mode := fi.Mode() - switch { - case mode.IsRegular(): - // Make the directory. This is redundant because it should - // already be made by a directory entry in the tar - // beforehand. Thus, don't check for errors; the next - // write will fail with the same error. - os.MkdirAll(filepath.Dir(abs), 0755) - wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()) - if err != nil { - return err - } - n, err := io.Copy(wf, tr) - if closeErr := wf.Close(); closeErr != nil && err == nil { - err = closeErr - } - if err != nil { - return fmt.Errorf("error writing to %s: %v", abs, err) - } - if n != f.Size { - return fmt.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size) - } - log.Printf("wrote %s", abs) - case mode.IsDir(): - if err := os.MkdirAll(abs, 0755); err != nil { - return err - } - default: - return badRequest(fmt.Sprintf("tar file entry %s contained unsupported file type %v", f.Name, mode)) - } - } - return nil -} - -// Process-State is an HTTP Trailer set in the /exec handler to "ok" -// on success, or os.ProcessState.String() on failure. -const hdrProcessState = "Process-State" - -func handleExec(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - http.Error(w, "requires POST method", http.StatusBadRequest) - return - } - if r.ProtoMajor*10+r.ProtoMinor < 11 { - // We need trailers, only available in HTTP/1.1 or HTTP/2. - http.Error(w, "HTTP/1.1 or higher required", http.StatusBadRequest) - return - } - - w.Header().Set("Trailer", hdrProcessState) // declare it so we can set it - - cmdPath := r.FormValue("cmd") // required - if !validRelPath(cmdPath) { - http.Error(w, "requires 'cmd' parameter", http.StatusBadRequest) - return - } - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - - absCmd := filepath.Join(*scratchDir, filepath.FromSlash(cmdPath)) - cmd := exec.Command(absCmd, r.PostForm["cmdArg"]...) - cmd.Dir = filepath.Dir(absCmd) - cmdOutput := &flushWriter{w: w} - cmd.Stdout = cmdOutput - cmd.Stderr = cmdOutput - err := cmd.Run() - state := "ok" - if err != nil { - if ps := cmd.ProcessState; ps != nil { - state = ps.String() - } else { - state = err.Error() - } - } - w.Header().Set(hdrProcessState, state) - log.Printf("Run = %s", state) -} - -// flushWriter is an io.Writer wrapper that writes to w and -// Flushes the output immediately, if w is an http.Flusher. -type flushWriter struct { - mu sync.Mutex - w http.ResponseWriter -} - -func (hw *flushWriter) Write(p []byte) (n int, err error) { - hw.mu.Lock() - defer hw.mu.Unlock() - n, err = hw.w.Write(p) - if f, ok := hw.w.(http.Flusher); ok { - f.Flush() - } - return -} - -func validRelPath(p string) bool { - if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") { - return false - } - return true -} - -type httpStatuser interface { - error - httpStatus() int -} - -type httpError struct { - statusCode int - msg string -} - -func (he httpError) Error() string { return he.msg } -func (he httpError) httpStatus() int { return he.statusCode } - -func badRequest(msg string) error { - return httpError{http.StatusBadRequest, msg} -} - -// requirePassword is an http.Handler auth wrapper that enforces a -// HTTP Basic password. The username is ignored. -type requirePassword struct { - h http.Handler - password string // empty means no password -} - -func (h requirePassword) ServeHTTP(w http.ResponseWriter, r *http.Request) { - _, gotPass, _ := r.BasicAuth() - if h.password != "" && h.password != gotPass { - http.Error(w, "invalid password", http.StatusForbidden) - return - } - h.h.ServeHTTP(w, r) -} diff --git a/dashboard/buildlet/stage0/Makefile b/dashboard/buildlet/stage0/Makefile deleted file mode 100644 index f9a3c19..0000000 --- a/dashboard/buildlet/stage0/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -buildlet-stage0.windows-amd64: stage0.go - GOOS=windows GOARCH=amd64 go build -o $@ --tags=stage0 - cat $@ | (cd ../../upload && go run upload.go --public go-builder-data/$@) diff --git a/dashboard/buildlet/stage0/stage0.go b/dashboard/buildlet/stage0/stage0.go deleted file mode 100644 index 609c5c8..0000000 --- a/dashboard/buildlet/stage0/stage0.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build stage0 - -// The stage0 command looks up the buildlet's URL from the GCE metadata -// service, downloads it, and runs it. It's used primarily by Windows, -// since it can be written in a couple lines of shell elsewhere. -package main - -import ( - "fmt" - "io" - "log" - "net/http" - "os" - "os/exec" - "path/filepath" - "time" - - "google.golang.org/cloud/compute/metadata" -) - -const attr = "buildlet-binary-url" - -func main() { - buildletURL, err := metadata.InstanceAttributeValue(attr) - if err != nil { - sleepFatalf("Failed to look up %q attribute value: %v", attr, err) - } - target := filepath.FromSlash("./buildlet.exe") - if err := download(target, buildletURL); err != nil { - sleepFatalf("Downloading %s: %v", buildletURL, err) - } - cmd := exec.Command(target) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - sleepFatalf("Error running buildlet: %v", err) - } -} - -func sleepFatalf(format string, args ...interface{}) { - log.Printf(format, args...) - time.Sleep(time.Minute) // so user has time to see it in cmd.exe, maybe - os.Exit(1) -} - -func download(file, url string) error { - log.Printf("Downloading %s to %s ...\n", url, file) - res, err := http.Get(url) - if err != nil { - return fmt.Errorf("Error fetching %v: %v", url, err) - } - if res.StatusCode != 200 { - return fmt.Errorf("HTTP status code of %s was %v", url, res.Status) - } - tmp := file + ".tmp" - os.Remove(tmp) - os.Remove(file) - f, err := os.Create(tmp) - if err != nil { - return err - } - n, err := io.Copy(f, res.Body) - res.Body.Close() - if err != nil { - return fmt.Errorf("Error reading %v: %v", url, err) - } - f.Close() - err = os.Rename(tmp, file) - if err != nil { - return err - } - log.Printf("Downloaded %s (%d bytes)", file, n) - return nil -} diff --git a/dashboard/cmd/builder/bench.go b/dashboard/cmd/builder/bench.go new file mode 100644 index 0000000..a9a59ce --- /dev/null +++ b/dashboard/cmd/builder/bench.go @@ -0,0 +1,256 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +// benchHash benchmarks a single commit. +func (b *Builder) benchHash(hash string, benchs []string) error { + if *verbose { + log.Println(b.name, "benchmarking", hash) + } + + res := &PerfResult{Hash: hash, Benchmark: "meta-done"} + + // Create place in which to do work. + workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12]) + // Prepare a workpath if we don't have one we can reuse. + update := false + if b.lastWorkpath != workpath { + if err := os.Mkdir(workpath, mkdirPerm); err != nil { + return err + } + buildLog, _, err := b.buildRepoOnHash(workpath, hash, makeCmd) + if err != nil { + removePath(workpath) + // record failure + res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog}) + return b.recordPerfResult(res) + } + b.lastWorkpath = workpath + update = true + } + + // Build the benchmark binary. + benchBin, buildLog, err := b.buildBenchmark(workpath, update) + if err != nil { + // record failure + res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog}) + return b.recordPerfResult(res) + } + + benchmark, procs, affinity, last := chooseBenchmark(benchBin, benchs) + if benchmark != "" { + res.Benchmark = fmt.Sprintf("%v-%v", benchmark, procs) + res.Metrics, res.Artifacts, res.OK = b.executeBenchmark(workpath, hash, benchBin, benchmark, procs, affinity) + if err = b.recordPerfResult(res); err != nil { + return fmt.Errorf("recordResult: %s", err) + } + } + + if last { + // All benchmarks have beed executed, don't need workpath anymore. + removePath(b.lastWorkpath) + b.lastWorkpath = "" + // Notify the app. + res = &PerfResult{Hash: hash, Benchmark: "meta-done", OK: true} + if err = b.recordPerfResult(res); err != nil { + return fmt.Errorf("recordResult: %s", err) + } + } + + return nil +} + +// buildBenchmark builds the benchmark binary. +func (b *Builder) buildBenchmark(workpath string, update bool) (benchBin, log string, err error) { + goroot := filepath.Join(workpath, "go") + gobin := filepath.Join(goroot, "bin", "go") + exeExt + gopath := filepath.Join(*buildroot, "gopath") + env := append([]string{ + "GOROOT=" + goroot, + "GOPATH=" + gopath}, + b.envv()...) + // First, download without installing. + args := []string{"get", "-d"} + if update { + args = append(args, "-u") + } + args = append(args, *benchPath) + var buildlog bytes.Buffer + runOpts := []runOpt{runTimeout(*buildTimeout), runEnv(env), allOutput(&buildlog), runDir(workpath)} + err = run(exec.Command(gobin, args...), runOpts...) + if err != nil { + fmt.Fprintf(&buildlog, "go get -d %s failed: %s", *benchPath, err) + return "", buildlog.String(), err + } + // Then, build into workpath. + benchBin = filepath.Join(workpath, "benchbin") + exeExt + args = []string{"build", "-o", benchBin, *benchPath} + buildlog.Reset() + err = run(exec.Command(gobin, args...), runOpts...) + if err != nil { + fmt.Fprintf(&buildlog, "go build %s failed: %s", *benchPath, err) + return "", buildlog.String(), err + } + return benchBin, "", nil +} + +// chooseBenchmark chooses the next benchmark to run +// based on the list of available benchmarks, already executed benchmarks +// and -benchcpu list. +func chooseBenchmark(benchBin string, doneBenchs []string) (bench string, procs, affinity int, last bool) { + var out bytes.Buffer + err := run(exec.Command(benchBin), allOutput(&out)) + if err != nil { + log.Printf("Failed to query benchmark list: %v\n%s", err, &out) + last = true + return + } + outStr := out.String() + nlIdx := strings.Index(outStr, "\n") + if nlIdx < 0 { + log.Printf("Failed to parse benchmark list (no new line): %s", outStr) + last = true + return + } + localBenchs := strings.Split(outStr[:nlIdx], ",") + benchsMap := make(map[string]bool) + for _, b := range doneBenchs { + benchsMap[b] = true + } + cnt := 0 + // We want to run all benchmarks with GOMAXPROCS=1 first. + for i, procs1 := range benchCPU { + for _, bench1 := range localBenchs { + if benchsMap[fmt.Sprintf("%v-%v", bench1, procs1)] { + continue + } + cnt++ + if cnt == 1 { + bench = bench1 + procs = procs1 + if i < len(benchAffinity) { + affinity = benchAffinity[i] + } + } + } + } + last = cnt <= 1 + return +} + +// executeBenchmark runs a single benchmark and parses its output. +func (b *Builder) executeBenchmark(workpath, hash, benchBin, bench string, procs, affinity int) (metrics []PerfMetric, artifacts []PerfArtifact, ok bool) { + // Benchmarks runs mutually exclusive with other activities. + benchMutex.RUnlock() + defer benchMutex.RLock() + benchMutex.Lock() + defer benchMutex.Unlock() + + log.Printf("%v executing benchmark %v-%v on %v", b.name, bench, procs, hash) + + // The benchmark executes 'go build'/'go tool', + // so we need properly setup env. + env := append([]string{ + "GOROOT=" + filepath.Join(workpath, "go"), + "PATH=" + filepath.Join(workpath, "go", "bin") + string(os.PathListSeparator) + os.Getenv("PATH"), + "GODEBUG=gctrace=1", // since Go1.2 + "GOGCTRACE=1", // before Go1.2 + fmt.Sprintf("GOMAXPROCS=%v", procs)}, + b.envv()...) + args := []string{ + "-bench", bench, + "-benchmem", strconv.Itoa(*benchMem), + "-benchtime", benchTime.String(), + "-benchnum", strconv.Itoa(*benchNum), + "-tmpdir", workpath} + if affinity != 0 { + args = append(args, "-affinity", strconv.Itoa(affinity)) + } + benchlog := new(bytes.Buffer) + err := run(exec.Command(benchBin, args...), runEnv(env), allOutput(benchlog), runDir(workpath)) + if strip := benchlog.Len() - 512<<10; strip > 0 { + // Leave the last 512K, that part contains metrics. + benchlog = bytes.NewBuffer(benchlog.Bytes()[strip:]) + } + artifacts = []PerfArtifact{{Type: "log", Body: benchlog.String()}} + if err != nil { + if err != nil { + log.Printf("Failed to execute benchmark '%v': %v", bench, err) + ok = false + } + return + } + + metrics1, artifacts1, err := parseBenchmarkOutput(benchlog) + if err != nil { + log.Printf("Failed to parse benchmark output: %v", err) + ok = false + return + } + metrics = metrics1 + artifacts = append(artifacts, artifacts1...) + ok = true + return +} + +// parseBenchmarkOutput fetches metrics and artifacts from benchmark output. +func parseBenchmarkOutput(out io.Reader) (metrics []PerfMetric, artifacts []PerfArtifact, err error) { + s := bufio.NewScanner(out) + metricRe := regexp.MustCompile("^GOPERF-METRIC:([a-z,0-9,-]+)=([0-9]+)$") + fileRe := regexp.MustCompile("^GOPERF-FILE:([a-z,0-9,-]+)=(.+)$") + for s.Scan() { + ln := s.Text() + if ss := metricRe.FindStringSubmatch(ln); ss != nil { + var v uint64 + v, err = strconv.ParseUint(ss[2], 10, 64) + if err != nil { + err = fmt.Errorf("Failed to parse metric '%v=%v': %v", ss[1], ss[2], err) + return + } + metrics = append(metrics, PerfMetric{Type: ss[1], Val: v}) + } else if ss := fileRe.FindStringSubmatch(ln); ss != nil { + var buf []byte + buf, err = ioutil.ReadFile(ss[2]) + if err != nil { + err = fmt.Errorf("Failed to read file '%v': %v", ss[2], err) + return + } + artifacts = append(artifacts, PerfArtifact{ss[1], string(buf)}) + } + } + return +} + +// needsBenchmarking determines whether the commit needs benchmarking. +func needsBenchmarking(log *HgLog) bool { + // Do not benchmark branch commits, they are usually not interesting + // and fall out of the trunk succession. + if log.Branch != "" { + return false + } + // Do not benchmark commits that do not touch source files (e.g. CONTRIBUTORS). + for _, f := range strings.Split(log.Files, " ") { + if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) && + !strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") { + return true + } + } + return false +} diff --git a/dashboard/cmd/builder/doc.go b/dashboard/cmd/builder/doc.go new file mode 100644 index 0000000..15b7252 --- /dev/null +++ b/dashboard/cmd/builder/doc.go @@ -0,0 +1,58 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Go Builder is a continuous build client for the Go project. +It integrates with the Go Dashboard AppEngine application. + +Go Builder is intended to run continuously as a background process. + +It periodically pulls updates from the Go Mercurial repository. + +When a newer revision is found, Go Builder creates a clone of the repository, +runs all.bash, and reports build success or failure to the Go Dashboard. + +For a release revision (a change description that matches "release.YYYY-MM-DD"), +Go Builder will create a tar.gz archive of the GOROOT and deliver it to the +Go Google Code project's downloads section. + +Usage: + + gobuilder goos-goarch... + + Several goos-goarch combinations can be provided, and the builder will + build them in serial. + +Optional flags: + + -dashboard="godashboard.appspot.com": Go Dashboard Host + The location of the Go Dashboard application to which Go Builder will + report its results. + + -release: Build and deliver binary release archive + + -rev=N: Build revision N and exit + + -cmd="./all.bash": Build command (specify absolute or relative to go/src) + + -v: Verbose logging + + -external: External package builder mode (will not report Go build + state to dashboard or issue releases) + +The key file should be located at $HOME/.gobuildkey or, for a builder-specific +key, $HOME/.gobuildkey-$BUILDER (eg, $HOME/.gobuildkey-linux-amd64). + +The build key file is a text file of the format: + + godashboard-key + googlecode-username + googlecode-password + +If the Google Code credentials are not provided the archival step +will be skipped. + +*/ +package main // import "golang.org/x/tools/dashboard/cmd/builder" diff --git a/dashboard/cmd/builder/env.go b/dashboard/cmd/builder/env.go new file mode 100644 index 0000000..7261229 --- /dev/null +++ b/dashboard/cmd/builder/env.go @@ -0,0 +1,299 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + + "golang.org/x/tools/go/vcs" +) + +// builderEnv represents the environment that a Builder will run tests in. +type builderEnv interface { + // setup sets up the builder environment and returns the directory to run the buildCmd in. + setup(repo *Repo, workpath, hash string, envv []string) (string, error) +} + +// goEnv represents the builderEnv for the main Go repo. +type goEnv struct { + goos, goarch string +} + +func (b *Builder) envv() []string { + if runtime.GOOS == "windows" { + return b.envvWindows() + } + + var e []string + if *buildTool == "go" { + e = []string{ + "GOOS=" + b.goos, + "GOARCH=" + b.goarch, + "GOROOT_FINAL=/usr/local/go", + } + switch b.goos { + case "android", "nacl": + // Cross compile. + default: + // If we are building, for example, linux/386 on a linux/amd64 machine we want to + // make sure that the whole build is done as a if this were compiled on a real + // linux/386 machine. In other words, we want to not do a cross compilation build. + // To do this we set GOHOSTOS and GOHOSTARCH to override the detection in make.bash. + // + // The exception to this rule is when we are doing nacl/android builds. These are by + // definition always cross compilation, and we have support built into cmd/go to be + // able to handle this case. + e = append(e, "GOHOSTOS="+b.goos, "GOHOSTARCH="+b.goarch) + } + } + + for _, k := range extraEnv() { + if s, ok := getenvOk(k); ok { + e = append(e, k+"="+s) + } + } + return e +} + +func (b *Builder) envvWindows() []string { + var start map[string]string + if *buildTool == "go" { + start = map[string]string{ + "GOOS": b.goos, + "GOHOSTOS": b.goos, + "GOARCH": b.goarch, + "GOHOSTARCH": b.goarch, + "GOROOT_FINAL": `c:\go`, + "GOBUILDEXIT": "1", // exit all.bat with completion status. + } + } + + for _, name := range extraEnv() { + if s, ok := getenvOk(name); ok { + start[name] = s + } + } + if b.goos == "windows" { + switch b.goarch { + case "amd64": + start["PATH"] = `c:\TDM-GCC-64\bin;` + start["PATH"] + case "386": + start["PATH"] = `c:\TDM-GCC-32\bin;` + start["PATH"] + } + } + skip := map[string]bool{ + "GOBIN": true, + "GOPATH": true, + "GOROOT": true, + "INCLUDE": true, + "LIB": true, + } + var e []string + for name, v := range start { + e = append(e, name+"="+v) + skip[name] = true + } + for _, kv := range os.Environ() { + s := strings.SplitN(kv, "=", 2) + name := strings.ToUpper(s[0]) + switch { + case name == "": + // variables, like "=C:=C:\", just copy them + e = append(e, kv) + case !skip[name]: + e = append(e, kv) + skip[name] = true + } + } + return e +} + +// setup for a goEnv clones the main go repo to workpath/go at the provided hash +// and returns the path workpath/go/src, the location of all go build scripts. +func (env *goEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) { + goworkpath := filepath.Join(workpath, "go") + if err := repo.Export(goworkpath, hash); err != nil { + return "", fmt.Errorf("error exporting repository: %s", err) + } + return filepath.Join(goworkpath, "src"), nil +} + +// gccgoEnv represents the builderEnv for the gccgo compiler. +type gccgoEnv struct{} + +// setup for a gccgoEnv clones the gofrontend repo to workpath/go at the hash +// and clones the latest GCC branch to repo.Path/gcc. The gccgo sources are +// replaced with the updated sources in the gofrontend repo and gcc gets +// gets configured and built in workpath/gcc-objdir. The path to +// workpath/gcc-objdir is returned. +func (env *gccgoEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) { + gccpath := filepath.Join(repo.Path, "gcc") + + // get a handle to Git vcs.Cmd for pulling down GCC from the mirror. + git := vcs.ByCmd("git") + + // only pull down gcc if we don't have a local copy. + if _, err := os.Stat(gccpath); err != nil { + if err := timeout(*cmdTimeout, func() error { + // pull down a working copy of GCC. + + cloneCmd := []string{ + "clone", + // This is just a guess since there are ~6000 commits to + // GCC per year. It's likely there will be enough history + // to cross-reference the Gofrontend commit against GCC. + // The disadvantage would be if the commit being built is more than + // a year old; in this case, the user should make a clone that has + // the full history. + "--depth", "6000", + // We only care about the master branch. + "--branch", "master", "--single-branch", + *gccPath, + } + + // Clone Kind Clone Time(Dry run) Clone Size + // --------------------------------------------------------------- + // Full Clone 10 - 15 min 2.2 GiB + // Master Branch 2 - 3 min 1.5 GiB + // Full Clone(shallow) 1 min 900 MiB + // Master Branch(shallow) 40 sec 900 MiB + // + // The shallow clones have the same size, which is expected, + // but the full shallow clone will only have 6000 commits + // spread across all branches. There are ~50 branches. + return run(exec.Command("git", cloneCmd...), runEnv(envv), allOutput(os.Stdout), runDir(repo.Path)) + }); err != nil { + return "", err + } + } + + if err := git.Download(gccpath); err != nil { + return "", err + } + + // get the modified files for this commit. + + var buf bytes.Buffer + if err := run(exec.Command("hg", "status", "--no-status", "--change", hash), + allOutput(&buf), runDir(repo.Path), runEnv(envv)); err != nil { + return "", fmt.Errorf("Failed to find the modified files for %s: %s", hash, err) + } + modifiedFiles := strings.Split(buf.String(), "\n") + var isMirrored bool + for _, f := range modifiedFiles { + if strings.HasPrefix(f, "go/") || strings.HasPrefix(f, "libgo/") { + isMirrored = true + break + } + } + + // use git log to find the corresponding commit to sync to in the gcc mirror. + // If the files modified in the gofrontend are mirrored to gcc, we expect a + // commit with a similar description in the gcc mirror. If the files modified are + // not mirrored, e.g. in support/, we can sync to the most recent gcc commit that + // occurred before those files were modified to verify gccgo's status at that point. + logCmd := []string{ + "log", + "-1", + "--format=%H", + } + var errMsg string + if isMirrored { + commitDesc, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{desc|firstline|escape}") + if err != nil { + return "", err + } + + quotedDesc := regexp.QuoteMeta(string(commitDesc)) + logCmd = append(logCmd, "--grep", quotedDesc, "--regexp-ignore-case", "--extended-regexp") + errMsg = fmt.Sprintf("Failed to find a commit with a similar description to '%s'", string(commitDesc)) + } else { + commitDate, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{date|rfc3339date}") + if err != nil { + return "", err + } + + logCmd = append(logCmd, "--before", string(commitDate)) + errMsg = fmt.Sprintf("Failed to find a commit before '%s'", string(commitDate)) + } + + buf.Reset() + if err := run(exec.Command("git", logCmd...), runEnv(envv), allOutput(&buf), runDir(gccpath)); err != nil { + return "", fmt.Errorf("%s: %s", errMsg, err) + } + gccRev := buf.String() + if gccRev == "" { + return "", fmt.Errorf(errMsg) + } + + // checkout gccRev + // TODO(cmang): Fix this to work in parallel mode. + if err := run(exec.Command("git", "reset", "--hard", strings.TrimSpace(gccRev)), runEnv(envv), runDir(gccpath)); err != nil { + return "", fmt.Errorf("Failed to checkout commit at revision %s: %s", gccRev, err) + } + + // make objdir to work in + gccobjdir := filepath.Join(workpath, "gcc-objdir") + if err := os.Mkdir(gccobjdir, mkdirPerm); err != nil { + return "", err + } + + // configure GCC with substituted gofrontend and libgo + if err := run(exec.Command(filepath.Join(gccpath, "configure"), + "--enable-languages=c,c++,go", + "--disable-bootstrap", + "--disable-multilib", + ), runEnv(envv), runDir(gccobjdir)); err != nil { + return "", fmt.Errorf("Failed to configure GCC: %v", err) + } + + // build gcc + if err := run(exec.Command("make", *gccOpts), runTimeout(*buildTimeout), runEnv(envv), runDir(gccobjdir)); err != nil { + return "", fmt.Errorf("Failed to build GCC: %s", err) + } + + return gccobjdir, nil +} + +func getenvOk(k string) (v string, ok bool) { + v = os.Getenv(k) + if v != "" { + return v, true + } + keq := k + "=" + for _, kv := range os.Environ() { + if kv == keq { + return "", true + } + } + return "", false +} + +// extraEnv returns environment variables that need to be copied from +// the gobuilder's environment to the envv of its subprocesses. +func extraEnv() []string { + extra := []string{ + "GOARM", + "GO386", + "GOROOT_BOOTSTRAP", // See https://golang.org/s/go15bootstrap + "CGO_ENABLED", + "CC", + "CC_FOR_TARGET", + "PATH", + "TMPDIR", + "USER", + } + if runtime.GOOS == "plan9" { + extra = append(extra, "objtype", "cputype", "path") + } + return extra +} diff --git a/dashboard/cmd/builder/exec.go b/dashboard/cmd/builder/exec.go new file mode 100644 index 0000000..1b46ed1 --- /dev/null +++ b/dashboard/cmd/builder/exec.go @@ -0,0 +1,99 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "io" + "log" + "os/exec" + "time" +) + +// run runs a command with optional arguments. +func run(cmd *exec.Cmd, opts ...runOpt) error { + a := runArgs{cmd, *cmdTimeout} + for _, opt := range opts { + opt.modArgs(&a) + } + if *verbose { + log.Printf("running %v in %v", a.cmd.Args, a.cmd.Dir) + } + if err := cmd.Start(); err != nil { + log.Printf("failed to start command %v: %v", a.cmd.Args, err) + return err + } + err := timeout(a.timeout, cmd.Wait) + if _, ok := err.(timeoutError); ok { + cmd.Process.Kill() + } + return err +} + +// Zero or more runOpts can be passed to run to modify the command +// before it's run. +type runOpt interface { + modArgs(*runArgs) +} + +// allOutput sends both stdout and stderr to w. +func allOutput(w io.Writer) optFunc { + return func(a *runArgs) { + a.cmd.Stdout = w + a.cmd.Stderr = w + } +} + +func runTimeout(timeout time.Duration) optFunc { + return func(a *runArgs) { + a.timeout = timeout + } +} + +func runDir(dir string) optFunc { + return func(a *runArgs) { + a.cmd.Dir = dir + } +} + +func runEnv(env []string) optFunc { + return func(a *runArgs) { + a.cmd.Env = env + } +} + +// timeout runs f and returns its error value, or if the function does not +// complete before the provided duration it returns a timeout error. +func timeout(d time.Duration, f func() error) error { + errc := make(chan error, 1) + go func() { + errc <- f() + }() + t := time.NewTimer(d) + defer t.Stop() + select { + case <-t.C: + return timeoutError(d) + case err := <-errc: + return err + } +} + +type timeoutError time.Duration + +func (e timeoutError) Error() string { + return fmt.Sprintf("timed out after %v", time.Duration(e)) +} + +// optFunc implements runOpt with a function, like http.HandlerFunc. +type optFunc func(*runArgs) + +func (f optFunc) modArgs(a *runArgs) { f(a) } + +// internal detail to exec.go: +type runArgs struct { + cmd *exec.Cmd + timeout time.Duration +} diff --git a/dashboard/cmd/builder/filemutex_flock.go b/dashboard/cmd/builder/filemutex_flock.go new file mode 100644 index 0000000..68851b8 --- /dev/null +++ b/dashboard/cmd/builder/filemutex_flock.go @@ -0,0 +1,66 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package main + +import ( + "sync" + "syscall" +) + +// FileMutex is similar to sync.RWMutex, but also synchronizes across processes. +// This implementation is based on flock syscall. +type FileMutex struct { + mu sync.RWMutex + fd int +} + +func MakeFileMutex(filename string) *FileMutex { + if filename == "" { + return &FileMutex{fd: -1} + } + fd, err := syscall.Open(filename, syscall.O_CREAT|syscall.O_RDONLY, mkdirPerm) + if err != nil { + panic(err) + } + return &FileMutex{fd: fd} +} + +func (m *FileMutex) Lock() { + m.mu.Lock() + if m.fd != -1 { + if err := syscall.Flock(m.fd, syscall.LOCK_EX); err != nil { + panic(err) + } + } +} + +func (m *FileMutex) Unlock() { + if m.fd != -1 { + if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil { + panic(err) + } + } + m.mu.Unlock() +} + +func (m *FileMutex) RLock() { + m.mu.RLock() + if m.fd != -1 { + if err := syscall.Flock(m.fd, syscall.LOCK_SH); err != nil { + panic(err) + } + } +} + +func (m *FileMutex) RUnlock() { + if m.fd != -1 { + if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil { + panic(err) + } + } + m.mu.RUnlock() +} diff --git a/dashboard/cmd/builder/filemutex_local.go b/dashboard/cmd/builder/filemutex_local.go new file mode 100644 index 0000000..68cfb62 --- /dev/null +++ b/dashboard/cmd/builder/filemutex_local.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 solaris + +package main + +import ( + "log" + "sync" +) + +// FileMutex is similar to sync.RWMutex, but also synchronizes across processes. +// This implementation is a fallback that does not actually provide inter-process synchronization. +type FileMutex struct { + sync.RWMutex +} + +func MakeFileMutex(filename string) *FileMutex { + return &FileMutex{} +} + +func init() { + log.Printf("WARNING: using fake file mutex." + + " Don't run more than one of these at once!!!") +} diff --git a/dashboard/cmd/builder/filemutex_windows.go b/dashboard/cmd/builder/filemutex_windows.go new file mode 100644 index 0000000..1f058b2 --- /dev/null +++ b/dashboard/cmd/builder/filemutex_windows.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "sync" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + INVALID_FILE_HANDLE = ^syscall.Handle(0) + LOCKFILE_EXCLUSIVE_LOCK = 2 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +// FileMutex is similar to sync.RWMutex, but also synchronizes across processes. +// This implementation is based on flock syscall. +type FileMutex struct { + mu sync.RWMutex + fd syscall.Handle +} + +func MakeFileMutex(filename string) *FileMutex { + if filename == "" { + return &FileMutex{fd: INVALID_FILE_HANDLE} + } + fd, err := syscall.CreateFile(&(syscall.StringToUTF16(filename)[0]), syscall.GENERIC_READ|syscall.GENERIC_WRITE, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if err != nil { + panic(err) + } + return &FileMutex{fd: fd} +} + +func (m *FileMutex) Lock() { + m.mu.Lock() + if m.fd != INVALID_FILE_HANDLE { + var ol syscall.Overlapped + if err := lockFileEx(m.fd, LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &ol); err != nil { + panic(err) + } + } +} + +func (m *FileMutex) Unlock() { + if m.fd != INVALID_FILE_HANDLE { + var ol syscall.Overlapped + if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil { + panic(err) + } + } + m.mu.Unlock() +} + +func (m *FileMutex) RLock() { + m.mu.RLock() + if m.fd != INVALID_FILE_HANDLE { + var ol syscall.Overlapped + if err := lockFileEx(m.fd, 0, 0, 1, 0, &ol); err != nil { + panic(err) + } + } +} + +func (m *FileMutex) RUnlock() { + if m.fd != INVALID_FILE_HANDLE { + var ol syscall.Overlapped + if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil { + panic(err) + } + } + m.mu.RUnlock() +} diff --git a/dashboard/cmd/builder/http.go b/dashboard/cmd/builder/http.go new file mode 100644 index 0000000..8d0923c --- /dev/null +++ b/dashboard/cmd/builder/http.go @@ -0,0 +1,225 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "net/url" + "time" +) + +const builderVersion = 1 // keep in sync with dashboard/app/build/handler.go + +type obj map[string]interface{} + +// dash runs the given method and command on the dashboard. +// If args is non-nil it is encoded as the URL query string. +// If req is non-nil it is JSON-encoded and passed as the body of the HTTP POST. +// If resp is non-nil the server's response is decoded into the value pointed +// to by resp (resp must be a pointer). +func dash(meth, cmd string, args url.Values, req, resp interface{}) error { + argsCopy := url.Values{"version": {fmt.Sprint(builderVersion)}} + for k, v := range args { + if k == "version" { + panic(`dash: reserved args key: "version"`) + } + argsCopy[k] = v + } + var r *http.Response + var err error + if *verbose { + log.Println("dash <-", meth, cmd, argsCopy, req) + } + cmd = *dashboard + "/" + cmd + "?" + argsCopy.Encode() + switch meth { + case "GET": + if req != nil { + log.Panicf("%s to %s with req", meth, cmd) + } + r, err = http.Get(cmd) + case "POST": + var body io.Reader + if req != nil { + b, err := json.Marshal(req) + if err != nil { + return err + } + body = bytes.NewBuffer(b) + } + r, err = http.Post(cmd, "text/json", body) + default: + log.Panicf("%s: invalid method %q", cmd, meth) + panic("invalid method: " + meth) + } + if err != nil { + return err + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + return fmt.Errorf("bad http response: %v", r.Status) + } + body := new(bytes.Buffer) + if _, err := body.ReadFrom(r.Body); err != nil { + return err + } + + // Read JSON-encoded Response into provided resp + // and return an error if present. + var result = struct { + Response interface{} + Error string + }{ + // Put the provided resp in here as it can be a pointer to + // some value we should unmarshal into. + Response: resp, + } + if err = json.Unmarshal(body.Bytes(), &result); err != nil { + log.Printf("json unmarshal %#q: %s\n", body.Bytes(), err) + return err + } + if *verbose { + log.Println("dash ->", result) + } + if result.Error != "" { + return errors.New(result.Error) + } + + return nil +} + +// todo returns the next hash to build or benchmark. +func (b *Builder) todo(kinds []string, pkg, goHash string) (kind, rev string, benchs []string, err error) { + args := url.Values{ + "builder": {b.name}, + "packagePath": {pkg}, + "goHash": {goHash}, + } + for _, k := range kinds { + args.Add("kind", k) + } + var resp *struct { + Kind string + Data struct { + Hash string + PerfResults []string + } + } + if err = dash("GET", "todo", args, nil, &resp); err != nil { + return + } + if resp == nil { + return + } + if *verbose { + fmt.Printf("dash resp: %+v\n", *resp) + } + for _, k := range kinds { + if k == resp.Kind { + return resp.Kind, resp.Data.Hash, resp.Data.PerfResults, nil + } + } + err = fmt.Errorf("expecting Kinds %q, got %q", kinds, resp.Kind) + return +} + +// recordResult sends build results to the dashboard +func (b *Builder) recordResult(ok bool, pkg, hash, goHash, buildLog string, runTime time.Duration) error { + if !*report { + return nil + } + req := obj{ + "Builder": b.name, + "PackagePath": pkg, + "Hash": hash, + "GoHash": goHash, + "OK": ok, + "Log": buildLog, + "RunTime": runTime, + } + args := url.Values{"key": {b.key}, "builder": {b.name}} + return dash("POST", "result", args, req, nil) +} + +// Result of running a single benchmark on a single commit. +type PerfResult struct { + Builder string + Benchmark string + Hash string + OK bool + Metrics []PerfMetric + Artifacts []PerfArtifact +} + +type PerfMetric struct { + Type string + Val uint64 +} + +type PerfArtifact struct { + Type string + Body string +} + +// recordPerfResult sends benchmarking results to the dashboard +func (b *Builder) recordPerfResult(req *PerfResult) error { + if !*report { + return nil + } + req.Builder = b.name + args := url.Values{"key": {b.key}, "builder": {b.name}} + return dash("POST", "perf-result", args, req, nil) +} + +func postCommit(key, pkg string, l *HgLog) error { + if !*report { + return nil + } + t, err := time.Parse(time.RFC3339, l.Date) + if err != nil { + return fmt.Errorf("parsing %q: %v", l.Date, t) + } + return dash("POST", "commit", url.Values{"key": {key}}, obj{ + "PackagePath": pkg, + "Hash": l.Hash, + "ParentHash": l.Parent, + "Time": t.Format(time.RFC3339), + "User": l.Author, + "Desc": l.Desc, + "NeedsBenchmarking": l.bench, + }, nil) +} + +func dashboardCommit(pkg, hash string) bool { + err := dash("GET", "commit", url.Values{ + "packagePath": {pkg}, + "hash": {hash}, + }, nil, nil) + return err == nil +} + +func dashboardPackages(kind string) []string { + args := url.Values{"kind": []string{kind}} + var resp []struct { + Path string + } + if err := dash("GET", "packages", args, nil, &resp); err != nil { + log.Println("dashboardPackages:", err) + return nil + } + if *verbose { + fmt.Printf("dash resp: %+v\n", resp) + } + var pkgs []string + for _, r := range resp { + pkgs = append(pkgs, r.Path) + } + return pkgs +} diff --git a/dashboard/cmd/builder/main.go b/dashboard/cmd/builder/main.go new file mode 100644 index 0000000..9e7c1ed --- /dev/null +++ b/dashboard/cmd/builder/main.go @@ -0,0 +1,679 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "golang.org/x/tools/go/vcs" +) + +const ( + codeProject = "go" + codePyScript = "misc/dashboard/googlecode_upload.py" + gofrontendImportPath = "code.google.com/p/gofrontend" + mkdirPerm = 0750 + waitInterval = 30 * time.Second // time to wait before checking for new revs + pkgBuildInterval = 24 * time.Hour // rebuild packages every 24 hours +) + +type Builder struct { + goroot *Repo + name string + goos, goarch string + key string + env builderEnv + // Last benchmarking workpath. We reuse it, if do successive benchmarks on the same commit. + lastWorkpath string +} + +var ( + doBuild = flag.Bool("build", true, "Build and test packages") + doBench = flag.Bool("bench", false, "Run benchmarks") + buildroot = flag.String("buildroot", defaultBuildRoot(), "Directory under which to build") + dashboard = flag.String("dashboard", "https://build.golang.org", "Dashboard app base path") + buildRelease = flag.Bool("release", false, "Build and upload binary release archives") + buildRevision = flag.String("rev", "", "Build specified revision and exit") + buildCmd = flag.String("cmd", filepath.Join(".", allCmd), "Build command (specify relative to go/src/)") + buildTool = flag.String("tool", "go", "Tool to build.") + gcPath = flag.String("gcpath", "go.googlesource.com/go", "Path to download gc from") + gccPath = flag.String("gccpath", "https://github.com/mirrors/gcc.git", "Path to download gcc from") + gccOpts = flag.String("gccopts", "", "Command-line options to pass to `make` when building gccgo") + benchPath = flag.String("benchpath", "golang.org/x/benchmarks/bench", "Path to download benchmarks from") + failAll = flag.Bool("fail", false, "fail all builds") + parallel = flag.Bool("parallel", false, "Build multiple targets in parallel") + buildTimeout = flag.Duration("buildTimeout", 60*time.Minute, "Maximum time to wait for builds and tests") + cmdTimeout = flag.Duration("cmdTimeout", 10*time.Minute, "Maximum time to wait for an external command") + benchNum = flag.Int("benchnum", 5, "Run each benchmark that many times") + benchTime = flag.Duration("benchtime", 5*time.Second, "Benchmarking time for a single benchmark run") + benchMem = flag.Int("benchmem", 64, "Approx RSS value to aim at in benchmarks, in MB") + fileLock = flag.String("filelock", "", "File to lock around benchmaring (synchronizes several builders)") + verbose = flag.Bool("v", false, "verbose") + report = flag.Bool("report", true, "whether to report results to the dashboard") +) + +var ( + binaryTagRe = regexp.MustCompile(`^(release\.r|weekly\.)[0-9\-.]+`) + releaseRe = regexp.MustCompile(`^release\.r[0-9\-.]+`) + allCmd = "all" + suffix + makeCmd = "make" + suffix + raceCmd = "race" + suffix + cleanCmd = "clean" + suffix + suffix = defaultSuffix() + exeExt = defaultExeExt() + + benchCPU = CpuList([]int{1}) + benchAffinity = CpuList([]int{}) + benchMutex *FileMutex // Isolates benchmarks from other activities +) + +// CpuList is used as flag.Value for -benchcpu flag. +type CpuList []int + +func (cl *CpuList) String() string { + str := "" + for _, cpu := range *cl { + if str == "" { + str = strconv.Itoa(cpu) + } else { + str += fmt.Sprintf(",%v", cpu) + } + } + return str +} + +func (cl *CpuList) Set(str string) error { + *cl = []int{} + for _, val := range strings.Split(str, ",") { + val = strings.TrimSpace(val) + if val == "" { + continue + } + cpu, err := strconv.Atoi(val) + if err != nil || cpu <= 0 { + return fmt.Errorf("%v is a bad value for GOMAXPROCS", val) + } + *cl = append(*cl, cpu) + } + if len(*cl) == 0 { + *cl = append(*cl, 1) + } + return nil +} + +func main() { + flag.Var(&benchCPU, "benchcpu", "Comma-delimited list of GOMAXPROCS values for benchmarking") + flag.Var(&benchAffinity, "benchaffinity", "Comma-delimited list of affinity values for benchmarking") + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "usage: %s goos-goarch...\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(2) + } + flag.Parse() + if len(flag.Args()) == 0 { + flag.Usage() + } + + vcs.ShowCmd = *verbose + vcs.Verbose = *verbose + + benchMutex = MakeFileMutex(*fileLock) + + rr, err := repoForTool() + if err != nil { + log.Fatal("Error finding repository:", err) + } + rootPath := filepath.Join(*buildroot, "goroot") + goroot := &Repo{ + Path: rootPath, + Master: rr, + } + + // set up work environment, use existing environment if possible + if goroot.Exists() || *failAll { + log.Print("Found old workspace, will use it") + } else { + if err := os.RemoveAll(*buildroot); err != nil { + log.Fatalf("Error removing build root (%s): %s", *buildroot, err) + } + if err := os.Mkdir(*buildroot, mkdirPerm); err != nil { + log.Fatalf("Error making build root (%s): %s", *buildroot, err) + } + var err error + goroot, err = RemoteRepo(goroot.Master.Root, rootPath) + if err != nil { + log.Fatalf("Error creating repository with url (%s): %s", goroot.Master.Root, err) + } + + goroot, err = goroot.Clone(goroot.Path, "") + if err != nil { + log.Fatal("Error cloning repository:", err) + } + } + + // set up builders + builders := make([]*Builder, len(flag.Args())) + for i, name := range flag.Args() { + b, err := NewBuilder(goroot, name) + if err != nil { + log.Fatal(err) + } + builders[i] = b + } + + if *failAll { + failMode(builders) + return + } + + // if specified, build revision and return + if *buildRevision != "" { + hash, err := goroot.FullHash(*buildRevision) + if err != nil { + log.Fatal("Error finding revision: ", err) + } + var exitErr error + for _, b := range builders { + if err := b.buildHash(hash); err != nil { + log.Println(err) + exitErr = err + } + } + if exitErr != nil && !*report { + // This mode (-report=false) is used for + // testing Docker images, making sure the + // environment is correctly configured. For + // testing, we want a non-zero exit status, as + // returned by log.Fatal: + log.Fatal("Build error.") + } + return + } + + if !*doBuild && !*doBench { + fmt.Fprintf(os.Stderr, "Nothing to do, exiting (specify either -build or -bench or both)\n") + os.Exit(2) + } + + // go continuous build mode + // check for new commits and build them + benchMutex.RLock() + for { + built := false + t := time.Now() + if *parallel { + done := make(chan bool) + for _, b := range builders { + go func(b *Builder) { + done <- b.buildOrBench() + }(b) + } + for _ = range builders { + built = <-done || built + } + } else { + for _, b := range builders { + built = b.buildOrBench() || built + } + } + // sleep if there was nothing to build + benchMutex.RUnlock() + if !built { + time.Sleep(waitInterval) + } + benchMutex.RLock() + // sleep if we're looping too fast. + dt := time.Now().Sub(t) + if dt < waitInterval { + time.Sleep(waitInterval - dt) + } + } +} + +// go continuous fail mode +// check for new commits and FAIL them +func failMode(builders []*Builder) { + for { + built := false + for _, b := range builders { + built = b.failBuild() || built + } + // stop if there was nothing to fail + if !built { + break + } + } +} + +func NewBuilder(goroot *Repo, name string) (*Builder, error) { + b := &Builder{ + goroot: goroot, + name: name, + } + + // get builderEnv for this tool + var err error + if b.env, err = b.builderEnv(name); err != nil { + return nil, err + } + if *report { + err = b.setKey() + } + return b, err +} + +func (b *Builder) setKey() error { + // read keys from keyfile + fn := "" + switch runtime.GOOS { + case "plan9": + fn = os.Getenv("home") + case "windows": + fn = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + default: + fn = os.Getenv("HOME") + } + fn = filepath.Join(fn, ".gobuildkey") + if s := fn + "-" + b.name; isFile(s) { // builder-specific file + fn = s + } + c, err := ioutil.ReadFile(fn) + if err != nil { + // If the on-disk file doesn't exist, also try the + // Google Compute Engine metadata. + if v := gceProjectMetadata("buildkey-" + b.name); v != "" { + b.key = v + return nil + } + return fmt.Errorf("readKeys %s (%s): %s", b.name, fn, err) + } + b.key = string(bytes.TrimSpace(bytes.SplitN(c, []byte("\n"), 2)[0])) + return nil +} + +func gceProjectMetadata(attr string) string { + client := &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 750 * time.Millisecond, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 750 * time.Millisecond, + }, + } + req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/"+attr, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := client.Do(req) + if err != nil { + return "" + } + defer res.Body.Close() + if res.StatusCode != 200 { + return "" + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return "" + } + return string(bytes.TrimSpace(slurp)) +} + +// builderEnv returns the builderEnv for this buildTool. +func (b *Builder) builderEnv(name string) (builderEnv, error) { + // get goos/goarch from builder string + s := strings.SplitN(b.name, "-", 3) + if len(s) < 2 { + return nil, fmt.Errorf("unsupported builder form: %s", name) + } + b.goos = s[0] + b.goarch = s[1] + + switch *buildTool { + case "go": + return &goEnv{ + goos: s[0], + goarch: s[1], + }, nil + case "gccgo": + return &gccgoEnv{}, nil + default: + return nil, fmt.Errorf("unsupported build tool: %s", *buildTool) + } +} + +// buildCmd returns the build command to invoke. +// Builders which contain the string '-race' in their +// name will override *buildCmd and return raceCmd. +func (b *Builder) buildCmd() string { + if strings.Contains(b.name, "-race") { + return raceCmd + } + return *buildCmd +} + +// buildOrBench checks for a new commit for this builder +// and builds or benchmarks it if one is found. +// It returns true if a build/benchmark was attempted. +func (b *Builder) buildOrBench() bool { + var kinds []string + if *doBuild { + kinds = append(kinds, "build-go-commit") + } + if *doBench { + kinds = append(kinds, "benchmark-go-commit") + } + kind, hash, benchs, err := b.todo(kinds, "", "") + if err != nil { + log.Println(err) + return false + } + if hash == "" { + return false + } + switch kind { + case "build-go-commit": + if err := b.buildHash(hash); err != nil { + log.Println(err) + } + return true + case "benchmark-go-commit": + if err := b.benchHash(hash, benchs); err != nil { + log.Println(err) + } + return true + default: + log.Printf("Unknown todo kind %v", kind) + return false + } +} + +func (b *Builder) buildHash(hash string) error { + log.Println(b.name, "building", hash) + + // create place in which to do work + workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12]) + if err := os.Mkdir(workpath, mkdirPerm); err != nil { + if err2 := removePath(workpath); err2 != nil { + return err + } + if err := os.Mkdir(workpath, mkdirPerm); err != nil { + return err + } + } + defer removePath(workpath) + + buildLog, runTime, err := b.buildRepoOnHash(workpath, hash, b.buildCmd()) + if err != nil { + log.Printf("%s failed at %v: %v", b.name, hash, err) + // record failure + return b.recordResult(false, "", hash, "", buildLog, runTime) + } + + // record success + if err = b.recordResult(true, "", hash, "", "", runTime); err != nil { + return fmt.Errorf("recordResult: %s", err) + } + + if *buildTool == "go" { + // build sub-repositories + goRoot := filepath.Join(workpath, *buildTool) + goPath := workpath + b.buildSubrepos(goRoot, goPath, hash) + } + + return nil +} + +// buildRepoOnHash clones repo into workpath and builds it. +func (b *Builder) buildRepoOnHash(workpath, hash, cmd string) (buildLog string, runTime time.Duration, err error) { + // Delete the previous workdir, if necessary + // (benchmarking code can execute several benchmarks in the same workpath). + if b.lastWorkpath != "" { + if b.lastWorkpath == workpath { + panic("workpath already exists: " + workpath) + } + removePath(b.lastWorkpath) + b.lastWorkpath = "" + } + + // pull before cloning to ensure we have the revision + if err = b.goroot.Pull(); err != nil { + buildLog = err.Error() + return + } + + // set up builder's environment. + srcDir, err := b.env.setup(b.goroot, workpath, hash, b.envv()) + if err != nil { + buildLog = err.Error() + return + } + + // build + var buildbuf bytes.Buffer + logfile := filepath.Join(workpath, "build.log") + f, err := os.Create(logfile) + if err != nil { + return err.Error(), 0, err + } + defer f.Close() + w := io.MultiWriter(f, &buildbuf) + + // go's build command is a script relative to the srcDir, whereas + // gccgo's build command is usually "make check-go" in the srcDir. + if *buildTool == "go" { + if !filepath.IsAbs(cmd) { + cmd = filepath.Join(srcDir, cmd) + } + } + + // naive splitting of command from its arguments: + args := strings.Split(cmd, " ") + c := exec.Command(args[0], args[1:]...) + c.Dir = srcDir + c.Env = b.envv() + if *verbose { + c.Stdout = io.MultiWriter(os.Stdout, w) + c.Stderr = io.MultiWriter(os.Stderr, w) + } else { + c.Stdout = w + c.Stderr = w + } + + startTime := time.Now() + err = run(c, runTimeout(*buildTimeout)) + runTime = time.Since(startTime) + if err != nil { + fmt.Fprintf(w, "Build complete, duration %v. Result: error: %v\n", runTime, err) + } else { + fmt.Fprintf(w, "Build complete, duration %v. Result: success\n", runTime) + } + return buildbuf.String(), runTime, err +} + +// failBuild checks for a new commit for this builder +// and fails it if one is found. +// It returns true if a build was "attempted". +func (b *Builder) failBuild() bool { + _, hash, _, err := b.todo([]string{"build-go-commit"}, "", "") + if err != nil { + log.Println(err) + return false + } + if hash == "" { + return false + } + + log.Printf("fail %s %s\n", b.name, hash) + + if err := b.recordResult(false, "", hash, "", "auto-fail mode run by "+os.Getenv("USER"), 0); err != nil { + log.Print(err) + } + return true +} + +func (b *Builder) buildSubrepos(goRoot, goPath, goHash string) { + for _, pkg := range dashboardPackages("subrepo") { + // get the latest todo for this package + _, hash, _, err := b.todo([]string{"build-package"}, pkg, goHash) + if err != nil { + log.Printf("buildSubrepos %s: %v", pkg, err) + continue + } + if hash == "" { + continue + } + + // build the package + if *verbose { + log.Printf("buildSubrepos %s: building %q", pkg, hash) + } + buildLog, err := b.buildSubrepo(goRoot, goPath, pkg, hash) + if err != nil { + if buildLog == "" { + buildLog = err.Error() + } + log.Printf("buildSubrepos %s: %v", pkg, err) + } + + // record the result + err = b.recordResult(err == nil, pkg, hash, goHash, buildLog, 0) + if err != nil { + log.Printf("buildSubrepos %s: %v", pkg, err) + } + } +} + +// buildSubrepo fetches the given package, updates it to the specified hash, +// and runs 'go test -short pkg/...'. It returns the build log and any error. +func (b *Builder) buildSubrepo(goRoot, goPath, pkg, hash string) (string, error) { + goTool := filepath.Join(goRoot, "bin", "go") + exeExt + env := append(b.envv(), "GOROOT="+goRoot, "GOPATH="+goPath) + + // add $GOROOT/bin and $GOPATH/bin to PATH + for i, e := range env { + const p = "PATH=" + if !strings.HasPrefix(e, p) { + continue + } + sep := string(os.PathListSeparator) + env[i] = p + filepath.Join(goRoot, "bin") + sep + filepath.Join(goPath, "bin") + sep + e[len(p):] + } + + // HACK: check out to new sub-repo location instead of old location. + pkg = strings.Replace(pkg, "code.google.com/p/go.", "golang.org/x/", 1) + + // fetch package and dependencies + var outbuf bytes.Buffer + err := run(exec.Command(goTool, "get", "-d", pkg+"/..."), runEnv(env), allOutput(&outbuf), runDir(goPath)) + if err != nil { + return outbuf.String(), err + } + outbuf.Reset() + + // hg update to the specified hash + pkgmaster, err := vcs.RepoRootForImportPath(pkg, *verbose) + if err != nil { + return "", fmt.Errorf("Error finding subrepo (%s): %s", pkg, err) + } + repo := &Repo{ + Path: filepath.Join(goPath, "src", pkg), + Master: pkgmaster, + } + if err := repo.UpdateTo(hash); err != nil { + return "", err + } + + // test the package + err = run(exec.Command(goTool, "test", "-short", pkg+"/..."), + runTimeout(*buildTimeout), runEnv(env), allOutput(&outbuf), runDir(goPath)) + return outbuf.String(), err +} + +// repoForTool returns the correct RepoRoot for the buildTool, or an error if +// the tool is unknown. +func repoForTool() (*vcs.RepoRoot, error) { + switch *buildTool { + case "go": + return vcs.RepoRootForImportPath(*gcPath, *verbose) + case "gccgo": + return vcs.RepoRootForImportPath(gofrontendImportPath, *verbose) + default: + return nil, fmt.Errorf("unknown build tool: %s", *buildTool) + } +} + +func isDirectory(name string) bool { + s, err := os.Stat(name) + return err == nil && s.IsDir() +} + +func isFile(name string) bool { + s, err := os.Stat(name) + return err == nil && !s.IsDir() +} + +// defaultSuffix returns file extension used for command files in +// current os environment. +func defaultSuffix() string { + switch runtime.GOOS { + case "windows": + return ".bat" + case "plan9": + return ".rc" + default: + return ".bash" + } +} + +func defaultExeExt() string { + switch runtime.GOOS { + case "windows": + return ".exe" + default: + return "" + } +} + +// defaultBuildRoot returns default buildroot directory. +func defaultBuildRoot() string { + var d string + if runtime.GOOS == "windows" { + // will use c:\, otherwise absolute paths become too long + // during builder run, see http://golang.org/issue/3358. + d = `c:\` + } else { + d = os.TempDir() + } + return filepath.Join(d, "gobuilder") +} + +// removePath is a more robust version of os.RemoveAll. +// On windows, if remove fails (which can happen if test/benchmark timeouts +// and keeps some files open) it tries to rename the dir. +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if runtime.GOOS == "windows" { + err = os.Rename(path, filepath.Clean(path)+"_remove_me") + } + return err + } + return nil +} diff --git a/dashboard/cmd/builder/vcs.go b/dashboard/cmd/builder/vcs.go new file mode 100644 index 0000000..2139a90 --- /dev/null +++ b/dashboard/cmd/builder/vcs.go @@ -0,0 +1,225 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "golang.org/x/tools/go/vcs" +) + +// Repo represents a mercurial repository. +type Repo struct { + Path string + Master *vcs.RepoRoot + sync.Mutex +} + +// RemoteRepo constructs a *Repo representing a remote repository. +func RemoteRepo(url, path string) (*Repo, error) { + rr, err := vcs.RepoRootForImportPath(url, *verbose) + if err != nil { + return nil, err + } + return &Repo{ + Path: path, + Master: rr, + }, nil +} + +// Clone clones the current Repo to a new destination +// returning a new *Repo if successful. +func (r *Repo) Clone(path, rev string) (*Repo, error) { + r.Lock() + defer r.Unlock() + + err := timeout(*cmdTimeout, func() error { + downloadPath := r.Path + if !r.Exists() { + downloadPath = r.Master.Repo + } + if rev == "" { + return r.Master.VCS.Create(path, downloadPath) + } + return r.Master.VCS.CreateAtRev(path, downloadPath, rev) + }) + if err != nil { + return nil, err + } + return &Repo{ + Path: path, + Master: r.Master, + }, nil +} + +// Export exports the current Repo at revision rev to a new destination. +func (r *Repo) Export(path, rev string) error { + // TODO(adg,cmang): implement Export in go/vcs + _, err := r.Clone(path, rev) + return err +} + +// UpdateTo updates the working copy of this Repo to the +// supplied revision. +func (r *Repo) UpdateTo(hash string) error { + r.Lock() + defer r.Unlock() + + if r.Master.VCS.Cmd == "git" { + cmd := exec.Command("git", "reset", "--hard", hash) + var log bytes.Buffer + err := run(cmd, runTimeout(*cmdTimeout), runDir(r.Path), allOutput(&log)) + if err != nil { + return fmt.Errorf("Error running git update -C %v: %v ; output=%s", hash, err, log.Bytes()) + } + return nil + } + + // Else go down three more levels of abstractions, at + // least two of which are broken for git. + return timeout(*cmdTimeout, func() error { + return r.Master.VCS.TagSync(r.Path, hash) + }) +} + +// Exists reports whether this Repo represents a valid Mecurial repository. +func (r *Repo) Exists() bool { + fi, err := os.Stat(filepath.Join(r.Path, "."+r.Master.VCS.Cmd)) + if err != nil { + return false + } + return fi.IsDir() +} + +// Pull pulls changes from the default path, that is, the path +// this Repo was cloned from. +func (r *Repo) Pull() error { + r.Lock() + defer r.Unlock() + + return timeout(*cmdTimeout, func() error { + return r.Master.VCS.Download(r.Path) + }) +} + +// Log returns the changelog for this repository. +func (r *Repo) Log() ([]HgLog, error) { + if err := r.Pull(); err != nil { + return nil, err + } + r.Lock() + defer r.Unlock() + + var logStruct struct { + Log []HgLog + } + err := timeout(*cmdTimeout, func() error { + data, err := r.Master.VCS.Log(r.Path, xmlLogTemplate) + if err != nil { + return err + } + + // We have a commit with description that contains 0x1b byte. + // Mercurial does not escape it, but xml.Unmarshal does not accept it. + data = bytes.Replace(data, []byte{0x1b}, []byte{'?'}, -1) + + err = xml.Unmarshal([]byte(""+string(data)+""), &logStruct) + if err != nil { + return fmt.Errorf("unmarshal %s log: %v", r.Master.VCS, err) + } + return nil + }) + if err != nil { + return nil, err + } + for i, log := range logStruct.Log { + // Let's pretend there can be only one parent. + if log.Parent != "" && strings.Contains(log.Parent, " ") { + logStruct.Log[i].Parent = strings.Split(log.Parent, " ")[0] + } + } + return logStruct.Log, nil +} + +// FullHash returns the full hash for the given Git or Mercurial revision. +func (r *Repo) FullHash(rev string) (string, error) { + r.Lock() + defer r.Unlock() + + var hash string + err := timeout(*cmdTimeout, func() error { + var data []byte + // Avoid the vcs package for git, since it's broken + // for git, and and we're trying to remove levels of + // abstraction which are increasingly getting + // difficult to navigate. + if r.Master.VCS.Cmd == "git" { + cmd := exec.Command("git", "rev-parse", rev) + var out bytes.Buffer + err := run(cmd, runTimeout(*cmdTimeout), runDir(r.Path), allOutput(&out)) + data = out.Bytes() + if err != nil { + return fmt.Errorf("Failed to find FullHash of %q; git rev-parse: %v, %s", rev, err, data) + } + } else { + var err error + data, err = r.Master.VCS.LogAtRev(r.Path, rev, "{node}") + if err != nil { + return err + } + } + s := strings.TrimSpace(string(data)) + if s == "" { + return fmt.Errorf("cannot find revision") + } + if len(s) != 40 { // correct for both hg and git + return fmt.Errorf("%s returned invalid hash: %s", r.Master.VCS, s) + } + hash = s + return nil + }) + if err != nil { + return "", err + } + return hash, nil +} + +// HgLog represents a single Mercurial revision. +type HgLog struct { + Hash string + Author string + Date string + Desc string + Parent string + Branch string + Files string + + // Internal metadata + added bool + bench bool // needs to be benchmarked? +} + +// xmlLogTemplate is a template to pass to Mercurial to make +// hg log print the log in valid XML for parsing with xml.Unmarshal. +// Can not escape branches and files, because it crashes python with: +// AttributeError: 'NoneType' object has no attribute 'replace' +const xmlLogTemplate = ` + + {node|escape} + {p1node} + {author|escape} + {date|rfc3339date} + {desc|escape} + {branches} + {files} + +` diff --git a/dashboard/cmd/buildlet/.gitignore b/dashboard/cmd/buildlet/.gitignore new file mode 100644 index 0000000..bbd21a2 --- /dev/null +++ b/dashboard/cmd/buildlet/.gitignore @@ -0,0 +1,5 @@ +buildlet +buildlet.*-* +stage0/buildlet-stage0.* +cert.pem +key.pem diff --git a/dashboard/cmd/buildlet/Makefile b/dashboard/cmd/buildlet/Makefile new file mode 100644 index 0000000..b30ce59 --- /dev/null +++ b/dashboard/cmd/buildlet/Makefile @@ -0,0 +1,26 @@ +buildlet: buildlet.go + go build --tags=buildlet + +buildlet.linux-amd64: buildlet.go + GOOS=linux GOARCH=amd64 go build -o $@ --tags=buildlet + cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) + +buildlet.openbsd-amd64: buildlet.go + GOOS=openbsd GOARCH=amd64 go build -o $@ --tags=buildlet + cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) + +buildlet.plan9-386: buildlet.go + GOOS=plan9 GOARCH=386 go build -o $@ --tags=buildlet + cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) + +buildlet.windows-amd64: buildlet.go + GOOS=windows GOARCH=amd64 go build -o $@ --tags=buildlet + cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) + +buildlet.darwin-amd64: buildlet.go + GOOS=darwin GOARCH=amd64 go build -o $@ --tags=buildlet + cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) + +buildlet.netbsd-amd64: buildlet.go + GOOS=netbsd GOARCH=amd64 go build -o $@ --tags=buildlet + cat $@ | (cd ../upload && go run upload.go --public go-builder-data/$@) diff --git a/dashboard/cmd/buildlet/README b/dashboard/cmd/buildlet/README new file mode 100644 index 0000000..0dd68cf --- /dev/null +++ b/dashboard/cmd/buildlet/README @@ -0,0 +1,12 @@ +Local development notes: + +Server: (TLS stuff is optional) +$ go run $GOROOT/src/crypto/tls/generate_cert.go --host=example.com +$ GCEMETA_password=foo GCEMETA_tls_cert=@cert.pem GCEMETA_tls_key='@key.pem' ./buildlet + +Client: +$ curl -O https://go.googlesource.com/go/+archive/3b76b017cabb.tar.gz +$ curl -k --user :foo -X PUT --data-binary "@go-3b76b017cabb.tar.gz" https://localhost:5936/writetgz +$ curl -k --user :foo -d "cmd=src/make.bash" http://127.0.0.1:5937/exec +etc + diff --git a/dashboard/cmd/buildlet/buildlet.go b/dashboard/cmd/buildlet/buildlet.go new file mode 100644 index 0000000..ad65b95 --- /dev/null +++ b/dashboard/cmd/buildlet/buildlet.go @@ -0,0 +1,351 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build buildlet + +// The buildlet is an HTTP server that untars content to disk and runs +// commands it has untarred, streaming their output back over HTTP. +// It is part of Go's continuous build system. +// +// This program intentionally allows remote code execution, and +// provides no security of its own. It is assumed that any user uses +// it with an appropriately-configured firewall between their VM +// instances. +package main // import "golang.org/x/tools/dashboard/buildlet" + +import ( + "archive/tar" + "compress/gzip" + "crypto/tls" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "google.golang.org/cloud/compute/metadata" +) + +var ( + scratchDir = flag.String("scratchdir", "", "Temporary directory to use. The contents of this directory may be deleted at any time. If empty, TempDir is used to create one.") + listenAddr = flag.String("listen", defaultListenAddr(), "address to listen on. Warning: this service is inherently insecure and offers no protection of its own. Do not expose this port to the world.") +) + +func defaultListenAddr() string { + if runtime.GOOS == "darwin" { + // Darwin will never run on GCE, so let's always + // listen on a high port (so we don't need to be + // root). + return ":5936" + } + if !metadata.OnGCE() { + return "localhost:5936" + } + // In production, default to port 80 or 443, depending on + // whether TLS is configured. + if metadataValue("tls-cert") != "" { + return ":443" + } + return ":80" +} + +func main() { + flag.Parse() + if !metadata.OnGCE() && !strings.HasPrefix(*listenAddr, "localhost:") { + log.Printf("** WARNING *** This server is unsafe and offers no security. Be careful.") + } + if runtime.GOOS == "plan9" { + // Plan 9 is too slow on GCE, so stop running run.rc after the basics. + // See https://golang.org/cl/2522 and https://golang.org/issue/9491 + // TODO(bradfitz): once the buildlet has environment variable support, + // the coordinator can send this in, and this variable can be part of + // the build configuration struct instead of hard-coded here. + // But no need for environment variables quite yet. + os.Setenv("GOTESTONLY", "std") + } + + if *scratchDir == "" { + dir, err := ioutil.TempDir("", "buildlet-scatch") + if err != nil { + log.Fatalf("error creating scratchdir with ioutil.TempDir: %v", err) + } + *scratchDir = dir + } + if _, err := os.Lstat(*scratchDir); err != nil { + log.Fatalf("invalid --scratchdir %q: %v", *scratchDir, err) + } + http.HandleFunc("/", handleRoot) + + password := metadataValue("password") + http.Handle("/writetgz", requirePassword{http.HandlerFunc(handleWriteTGZ), password}) + http.Handle("/exec", requirePassword{http.HandlerFunc(handleExec), password}) + // TODO: removeall + + tlsCert, tlsKey := metadataValue("tls-cert"), metadataValue("tls-key") + if (tlsCert == "") != (tlsKey == "") { + log.Fatalf("tls-cert and tls-key must both be supplied, or neither.") + } + + log.Printf("Listening on %s ...", *listenAddr) + ln, err := net.Listen("tcp", *listenAddr) + if err != nil { + log.Fatalf("Failed to listen on %s: %v", *listenAddr, err) + } + ln = tcpKeepAliveListener{ln.(*net.TCPListener)} + + var srv http.Server + if tlsCert != "" { + cert, err := tls.X509KeyPair([]byte(tlsCert), []byte(tlsKey)) + if err != nil { + log.Fatalf("TLS cert error: %v", err) + } + tlsConf := &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + ln = tls.NewListener(ln, tlsConf) + } + + log.Fatalf("Serve: %v", srv.Serve(ln)) +} + +// metadataValue returns the GCE metadata instance value for the given key. +// If the metadata is not defined, the returned string is empty. +// +// If not running on GCE, it falls back to using environment variables +// for local development. +func metadataValue(key string) string { + // The common case: + if metadata.OnGCE() { + v, err := metadata.InstanceAttributeValue(key) + if _, notDefined := err.(metadata.NotDefinedError); notDefined { + return "" + } + if err != nil { + log.Fatalf("metadata.InstanceAttributeValue(%q): %v", key, err) + } + return v + } + + // Else let developers use environment variables to fake + // metadata keys, for local testing. + envKey := "GCEMETA_" + strings.Replace(key, "-", "_", -1) + v := os.Getenv(envKey) + // Respect curl-style '@' prefix to mean the rest is a filename. + if strings.HasPrefix(v, "@") { + slurp, err := ioutil.ReadFile(v[1:]) + if err != nil { + log.Fatalf("Error reading file for GCEMETA_%v: %v", key, err) + } + return string(slurp) + } + if v == "" { + log.Printf("Warning: not running on GCE, and no %v environment variable defined", envKey) + } + return v +} + +// tcpKeepAliveListener is a net.Listener that sets TCP keep-alive +// timeouts on accepted connections. +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func handleRoot(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "buildlet running on %s-%s\n", runtime.GOOS, runtime.GOARCH) +} + +func handleWriteTGZ(w http.ResponseWriter, r *http.Request) { + if r.Method != "PUT" { + http.Error(w, "requires PUT method", http.StatusBadRequest) + return + } + err := untar(r.Body, *scratchDir) + if err != nil { + status := http.StatusInternalServerError + if he, ok := err.(httpStatuser); ok { + status = he.httpStatus() + } + http.Error(w, err.Error(), status) + return + } + io.WriteString(w, "OK") +} + +// untar reads the gzip-compressed tar file from r and writes it into dir. +func untar(r io.Reader, dir string) error { + zr, err := gzip.NewReader(r) + if err != nil { + return badRequest("requires gzip-compressed body: " + err.Error()) + } + tr := tar.NewReader(zr) + for { + f, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + log.Printf("tar reading error: %v", err) + return badRequest("tar error: " + err.Error()) + } + if !validRelPath(f.Name) { + return badRequest(fmt.Sprintf("tar file contained invalid name %q", f.Name)) + } + rel := filepath.FromSlash(f.Name) + abs := filepath.Join(dir, rel) + + fi := f.FileInfo() + mode := fi.Mode() + switch { + case mode.IsRegular(): + // Make the directory. This is redundant because it should + // already be made by a directory entry in the tar + // beforehand. Thus, don't check for errors; the next + // write will fail with the same error. + os.MkdirAll(filepath.Dir(abs), 0755) + wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()) + if err != nil { + return err + } + n, err := io.Copy(wf, tr) + if closeErr := wf.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return fmt.Errorf("error writing to %s: %v", abs, err) + } + if n != f.Size { + return fmt.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size) + } + log.Printf("wrote %s", abs) + case mode.IsDir(): + if err := os.MkdirAll(abs, 0755); err != nil { + return err + } + default: + return badRequest(fmt.Sprintf("tar file entry %s contained unsupported file type %v", f.Name, mode)) + } + } + return nil +} + +// Process-State is an HTTP Trailer set in the /exec handler to "ok" +// on success, or os.ProcessState.String() on failure. +const hdrProcessState = "Process-State" + +func handleExec(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "requires POST method", http.StatusBadRequest) + return + } + if r.ProtoMajor*10+r.ProtoMinor < 11 { + // We need trailers, only available in HTTP/1.1 or HTTP/2. + http.Error(w, "HTTP/1.1 or higher required", http.StatusBadRequest) + return + } + + w.Header().Set("Trailer", hdrProcessState) // declare it so we can set it + + cmdPath := r.FormValue("cmd") // required + if !validRelPath(cmdPath) { + http.Error(w, "requires 'cmd' parameter", http.StatusBadRequest) + return + } + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + absCmd := filepath.Join(*scratchDir, filepath.FromSlash(cmdPath)) + cmd := exec.Command(absCmd, r.PostForm["cmdArg"]...) + cmd.Dir = filepath.Dir(absCmd) + cmdOutput := &flushWriter{w: w} + cmd.Stdout = cmdOutput + cmd.Stderr = cmdOutput + err := cmd.Run() + state := "ok" + if err != nil { + if ps := cmd.ProcessState; ps != nil { + state = ps.String() + } else { + state = err.Error() + } + } + w.Header().Set(hdrProcessState, state) + log.Printf("Run = %s", state) +} + +// flushWriter is an io.Writer wrapper that writes to w and +// Flushes the output immediately, if w is an http.Flusher. +type flushWriter struct { + mu sync.Mutex + w http.ResponseWriter +} + +func (hw *flushWriter) Write(p []byte) (n int, err error) { + hw.mu.Lock() + defer hw.mu.Unlock() + n, err = hw.w.Write(p) + if f, ok := hw.w.(http.Flusher); ok { + f.Flush() + } + return +} + +func validRelPath(p string) bool { + if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") { + return false + } + return true +} + +type httpStatuser interface { + error + httpStatus() int +} + +type httpError struct { + statusCode int + msg string +} + +func (he httpError) Error() string { return he.msg } +func (he httpError) httpStatus() int { return he.statusCode } + +func badRequest(msg string) error { + return httpError{http.StatusBadRequest, msg} +} + +// requirePassword is an http.Handler auth wrapper that enforces a +// HTTP Basic password. The username is ignored. +type requirePassword struct { + h http.Handler + password string // empty means no password +} + +func (h requirePassword) ServeHTTP(w http.ResponseWriter, r *http.Request) { + _, gotPass, _ := r.BasicAuth() + if h.password != "" && h.password != gotPass { + http.Error(w, "invalid password", http.StatusForbidden) + return + } + h.h.ServeHTTP(w, r) +} diff --git a/dashboard/cmd/buildlet/stage0/Makefile b/dashboard/cmd/buildlet/stage0/Makefile new file mode 100644 index 0000000..f9a3c19 --- /dev/null +++ b/dashboard/cmd/buildlet/stage0/Makefile @@ -0,0 +1,3 @@ +buildlet-stage0.windows-amd64: stage0.go + GOOS=windows GOARCH=amd64 go build -o $@ --tags=stage0 + cat $@ | (cd ../../upload && go run upload.go --public go-builder-data/$@) diff --git a/dashboard/cmd/buildlet/stage0/stage0.go b/dashboard/cmd/buildlet/stage0/stage0.go new file mode 100644 index 0000000..609c5c8 --- /dev/null +++ b/dashboard/cmd/buildlet/stage0/stage0.go @@ -0,0 +1,78 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build stage0 + +// The stage0 command looks up the buildlet's URL from the GCE metadata +// service, downloads it, and runs it. It's used primarily by Windows, +// since it can be written in a couple lines of shell elsewhere. +package main + +import ( + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "time" + + "google.golang.org/cloud/compute/metadata" +) + +const attr = "buildlet-binary-url" + +func main() { + buildletURL, err := metadata.InstanceAttributeValue(attr) + if err != nil { + sleepFatalf("Failed to look up %q attribute value: %v", attr, err) + } + target := filepath.FromSlash("./buildlet.exe") + if err := download(target, buildletURL); err != nil { + sleepFatalf("Downloading %s: %v", buildletURL, err) + } + cmd := exec.Command(target) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + sleepFatalf("Error running buildlet: %v", err) + } +} + +func sleepFatalf(format string, args ...interface{}) { + log.Printf(format, args...) + time.Sleep(time.Minute) // so user has time to see it in cmd.exe, maybe + os.Exit(1) +} + +func download(file, url string) error { + log.Printf("Downloading %s to %s ...\n", url, file) + res, err := http.Get(url) + if err != nil { + return fmt.Errorf("Error fetching %v: %v", url, err) + } + if res.StatusCode != 200 { + return fmt.Errorf("HTTP status code of %s was %v", url, res.Status) + } + tmp := file + ".tmp" + os.Remove(tmp) + os.Remove(file) + f, err := os.Create(tmp) + if err != nil { + return err + } + n, err := io.Copy(f, res.Body) + res.Body.Close() + if err != nil { + return fmt.Errorf("Error reading %v: %v", url, err) + } + f.Close() + err = os.Rename(tmp, file) + if err != nil { + return err + } + log.Printf("Downloaded %s (%d bytes)", file, n) + return nil +} diff --git a/dashboard/cmd/coordinator/.gitignore b/dashboard/cmd/coordinator/.gitignore new file mode 100644 index 0000000..91d0fb0 --- /dev/null +++ b/dashboard/cmd/coordinator/.gitignore @@ -0,0 +1,3 @@ +buildongce/client-*.dat +buildongce/token.dat +coordinator diff --git a/dashboard/cmd/coordinator/Makefile b/dashboard/cmd/coordinator/Makefile new file mode 100644 index 0000000..166086f --- /dev/null +++ b/dashboard/cmd/coordinator/Makefile @@ -0,0 +1,9 @@ +coordinator: main.go + GOOS=linux go build --tags=build_coordinator -o coordinator . + +# After "make upload", either reboot the machine, or ssh to it and: +# sudo systemctl restart gobuild.service +# And watch its logs with: +# sudo journalctl -f -u gobuild.service +upload: coordinator + cat coordinator | (cd ../upload && go run upload.go --public go-builder-data/coordinator) diff --git a/dashboard/cmd/coordinator/buildongce/create.go b/dashboard/cmd/coordinator/buildongce/create.go new file mode 100644 index 0000000..2148ebb --- /dev/null +++ b/dashboard/cmd/coordinator/buildongce/create.go @@ -0,0 +1,299 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main // import "golang.org/x/tools/dashboard/coordinator/buildongce" + +import ( + "bufio" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + proj = flag.String("project", "symbolic-datum-552", "name of Project") + zone = flag.String("zone", "us-central1-a", "GCE zone") + mach = flag.String("machinetype", "n1-standard-16", "Machine type") + instName = flag.String("instance_name", "go-builder-1", "Name of VM instance.") + sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") + staticIP = flag.String("static_ip", "", "Static IP to use. If empty, automatic.") + reuseDisk = flag.Bool("reuse_disk", true, "Whether disk images should be reused between shutdowns/restarts.") + ssd = flag.Bool("ssd", false, "use a solid state disk (faster, more expensive)") +) + +func readFile(v string) string { + slurp, err := ioutil.ReadFile(v) + if err != nil { + log.Fatalf("Error reading %s: %v", v, err) + } + return strings.TrimSpace(string(slurp)) +} + +var config = &oauth2.Config{ + // The client-id and secret should be for an "Installed Application" when using + // the CLI. Later we'll use a web application with a callback. + ClientID: readFile("client-id.dat"), + ClientSecret: readFile("client-secret.dat"), + Endpoint: google.Endpoint, + Scopes: []string{ + compute.DevstorageFull_controlScope, + compute.ComputeScope, + "https://www.googleapis.com/auth/sqlservice", + "https://www.googleapis.com/auth/sqlservice.admin", + }, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", +} + +const baseConfig = `#cloud-config +coreos: + update: + group: alpha + reboot-strategy: off + units: + - name: gobuild.service + command: start + content: | + [Unit] + Description=Go Builders + After=docker.service + Requires=docker.service + + [Service] + ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/coordinator http://storage.googleapis.com/go-builder-data/coordinator && chmod +x /opt/bin/coordinator' + ExecStart=/opt/bin/coordinator + RestartSec=10s + Restart=always + Type=simple + + [Install] + WantedBy=multi-user.target +` + +func main() { + flag.Parse() + if *proj == "" { + log.Fatalf("Missing --project flag") + } + prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj + machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach + + const tokenFileName = "token.dat" + tokenFile := tokenCacheFile(tokenFileName) + tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) + token, err := tokenSource.Token() + if err != nil { + log.Printf("Error getting token from %s: %v", tokenFileName, err) + log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) + fmt.Print("\nEnter auth code: ") + sc := bufio.NewScanner(os.Stdin) + sc.Scan() + authCode := strings.TrimSpace(sc.Text()) + token, err = config.Exchange(oauth2.NoContext, authCode) + if err != nil { + log.Fatalf("Error exchanging auth code for a token: %v", err) + } + if err := tokenFile.WriteToken(token); err != nil { + log.Fatalf("Error writing to %s: %v", tokenFileName, err) + } + tokenSource = oauth2.ReuseTokenSource(token, nil) + } + + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + + computeService, _ := compute.New(oauthClient) + + natIP := *staticIP + if natIP == "" { + // Try to find it by name. + aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() + if err != nil { + log.Fatal(err) + } + // https://godoc.org/google.golang.org/api/compute/v1#AddressAggregatedList + IPLoop: + for _, asl := range aggAddrList.Items { + for _, addr := range asl.Addresses { + if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { + natIP = addr.Address + break IPLoop + } + } + } + } + + cloudConfig := baseConfig + if *sshPub != "" { + key := strings.TrimSpace(readFile(*sshPub)) + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) + } + if os.Getenv("USER") == "bradfitz" { + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") + } + const maxCloudConfig = 32 << 10 // per compute API docs + if len(cloudConfig) > maxCloudConfig { + log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) + } + + instance := &compute.Instance{ + Name: *instName, + Description: "Go Builder", + MachineType: machType, + Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, + Tags: &compute.Tags{ + Items: []string{"http-server", "https-server"}, + }, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "user-data", + Value: cloudConfig, + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + &compute.NetworkInterface{ + AccessConfigs: []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + NatIP: natIP, + }, + }, + Network: prefix + "/global/networks/default", + }, + }, + ServiceAccounts: []*compute.ServiceAccount{ + { + Email: "default", + Scopes: []string{ + compute.DevstorageFull_controlScope, + compute.ComputeScope, + }, + }, + }, + } + + log.Printf("Creating instance...") + op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() + if err != nil { + log.Fatalf("Failed to create instance: %v", err) + } + opName := op.Name + log.Printf("Created. Waiting on operation %v", opName) +OpLoop: + for { + time.Sleep(2 * time.Second) + op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() + if err != nil { + log.Fatalf("Failed to get op %s: %v", opName, err) + } + switch op.Status { + case "PENDING", "RUNNING": + log.Printf("Waiting on operation %v", opName) + continue + case "DONE": + if op.Error != nil { + for _, operr := range op.Error.Errors { + log.Printf("Error: %+v", operr) + } + log.Fatalf("Failed to start.") + } + log.Printf("Success. %+v", op) + break OpLoop + default: + log.Fatalf("Unknown status %q: %+v", op.Status, op) + } + } + + inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() + if err != nil { + log.Fatalf("Error getting instance after creation: %v", err) + } + ij, _ := json.MarshalIndent(inst, "", " ") + log.Printf("Instance: %s", ij) +} + +func instanceDisk(svc *compute.Service) *compute.AttachedDisk { + const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-402-2-0-v20140807" + diskName := *instName + "-coreos-stateless-pd" + + if *reuseDisk { + dl, err := svc.Disks.List(*proj, *zone).Do() + if err != nil { + log.Fatalf("Error listing disks: %v", err) + } + for _, disk := range dl.Items { + if disk.Name != diskName { + continue + } + return &compute.AttachedDisk{ + AutoDelete: false, + Boot: true, + DeviceName: diskName, + Type: "PERSISTENT", + Source: disk.SelfLink, + Mode: "READ_WRITE", + + // The GCP web UI's "Show REST API" link includes a + // "zone" parameter, but it's not in the API + // description. But it wants this form (disk.Zone, a + // full zone URL, not *zone): + // Zone: disk.Zone, + // ... but it seems to work without it. Keep this + // comment here until I file a bug with the GCP + // people. + } + } + } + + diskType := "" + if *ssd { + diskType = "https://www.googleapis.com/compute/v1/projects/" + *proj + "/zones/" + *zone + "/diskTypes/pd-ssd" + } + + return &compute.AttachedDisk{ + AutoDelete: !*reuseDisk, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: diskName, + SourceImage: imageURL, + DiskSizeGb: 50, + DiskType: diskType, + }, + } +} + +type tokenCacheFile string + +func (f tokenCacheFile) Token() (*oauth2.Token, error) { + slurp, err := ioutil.ReadFile(string(f)) + if err != nil { + return nil, err + } + t := new(oauth2.Token) + if err := json.Unmarshal(slurp, t); err != nil { + return nil, err + } + return t, nil +} + +func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { + jt, err := json.Marshal(t) + if err != nil { + return err + } + return ioutil.WriteFile(string(f), jt, 0600) +} diff --git a/dashboard/cmd/coordinator/coordinator.go b/dashboard/cmd/coordinator/coordinator.go new file mode 100644 index 0000000..df3dc48 --- /dev/null +++ b/dashboard/cmd/coordinator/coordinator.go @@ -0,0 +1,1540 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build build_coordinator + +// The coordinator runs on GCE and coordinates builds in Docker containers. +package main // import "golang.org/x/tools/dashboard/coordinator" + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "encoding/json" + "errors" + "flag" + "fmt" + "html" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/tools/dashboard/types" + "google.golang.org/api/compute/v1" + "google.golang.org/cloud/compute/metadata" +) + +var ( + masterKeyFile = flag.String("masterkey", "", "Path to builder master key. Else fetched using GCE project attribute 'builder-master-key'.") + maxLocalBuilds = flag.Int("maxbuilds", 6, "Max concurrent Docker builds (VM builds don't count)") + + cleanZones = flag.String("zones", "us-central1-a,us-central1-b,us-central1-f", "Comma-separated list of zones to periodically clean of stale build VMs (ones that failed to shut themselves down)") + + // Debug flags: + addTemp = flag.Bool("temp", false, "Append -temp to all builders.") + just = flag.String("just", "", "If non-empty, run single build in the foreground. Requires rev.") + rev = flag.String("rev", "", "Revision to build.") +) + +var ( + startTime = time.Now() + builders = map[string]buildConfig{} // populated at startup, keys like "openbsd-amd64-56" + watchers = map[string]watchConfig{} // populated at startup, keyed by repo, e.g. "https://go.googlesource.com/go" + donec = make(chan builderRev) // reports of finished builders + + statusMu sync.Mutex // guards both status (ongoing ones) and statusDone (just finished) + status = map[builderRev]*buildStatus{} + statusDone []*buildStatus // finished recently, capped to maxStatusDone +) + +const ( + maxStatusDone = 30 + + // vmDeleteTimeout is how long before we delete a VM. + // In practice this need only be as long as the slowest + // builder (plan9 currently), because on startup this program + // already deletes all buildlets it doesn't know about + // (i.e. ones from a previous instance of the coordinator). + vmDeleteTimeout = 45 * time.Minute +) + +// Initialized by initGCE: +var ( + projectID string + projectZone string + computeService *compute.Service + externalIP string +) + +func initGCE() error { + if !metadata.OnGCE() { + return errors.New("not running on GCE; VM support disabled") + } + var err error + projectID, err = metadata.ProjectID() + if err != nil { + return fmt.Errorf("failed to get current GCE ProjectID: %v", err) + } + projectZone, err = metadata.Get("instance/zone") + if err != nil || projectZone == "" { + return fmt.Errorf("failed to get current GCE zone: %v", err) + } + // Convert the zone from "projects/1234/zones/us-central1-a" to "us-central1-a". + projectZone = path.Base(projectZone) + if !hasComputeScope() { + return errors.New("The coordinator is not running with access to read and write Compute resources. VM support disabled.") + + } + externalIP, err = metadata.ExternalIP() + if err != nil { + return fmt.Errorf("ExternalIP: %v", err) + } + ts := google.ComputeTokenSource("default") + computeService, _ = compute.New(oauth2.NewClient(oauth2.NoContext, ts)) + return nil +} + +type imageInfo struct { + url string // of tar file + + mu sync.Mutex + lastMod string +} + +var images = map[string]*imageInfo{ + "go-commit-watcher": {url: "https://storage.googleapis.com/go-builder-data/docker-commit-watcher.tar.gz"}, + "gobuilders/linux-x86-base": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.base.tar.gz"}, + "gobuilders/linux-x86-clang": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.clang.tar.gz"}, + "gobuilders/linux-x86-gccgo": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.gccgo.tar.gz"}, + "gobuilders/linux-x86-nacl": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.nacl.tar.gz"}, + "gobuilders/linux-x86-sid": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.sid.tar.gz"}, +} + +// A buildConfig describes how to run either a Docker-based or VM-based build. +type buildConfig struct { + name string // "linux-amd64-race" + + // VM-specific settings: (used if vmImage != "") + vmImage string // e.g. "openbsd-amd64-56" + machineType string // optional GCE instance type + + // Docker-specific settings: (used if vmImage == "") + image string // Docker image to use to build + cmd string // optional -cmd flag (relative to go/src/) + env []string // extra environment ("key=value") pairs + dashURL string // url of the build dashboard + tool string // the tool this configuration is for +} + +func (c *buildConfig) usesDocker() bool { return c.vmImage == "" } +func (c *buildConfig) usesVM() bool { return c.vmImage != "" } + +func (c *buildConfig) MachineType() string { + if v := c.machineType; v != "" { + return v + } + return "n1-highcpu-4" +} + +// recordResult sends build results to the dashboard +func (b *buildConfig) recordResult(ok bool, hash, buildLog string, runTime time.Duration) error { + req := map[string]interface{}{ + "Builder": b.name, + "PackagePath": "", + "Hash": hash, + "GoHash": "", + "OK": ok, + "Log": buildLog, + "RunTime": runTime, + } + args := url.Values{"key": {builderKey(b.name)}, "builder": {b.name}} + return dash("POST", "result", args, req, nil) +} + +// pingDashboard is a goroutine that periodically POSTS to build.golang.org/building +// to let it know that we're still working on a build. +func pingDashboard(st *buildStatus) { + u := "https://build.golang.org/building?" + url.Values{ + "builder": []string{st.name}, + "key": []string{builderKey(st.name)}, + "hash": []string{st.rev}, + "url": []string{fmt.Sprintf("http://%v/logs?name=%s&rev=%s&st=%p", externalIP, st.name, st.rev, st)}, + }.Encode() + for { + st.mu.Lock() + done := st.done + st.mu.Unlock() + if !done.IsZero() { + return + } + if res, _ := http.PostForm(u, nil); res != nil { + res.Body.Close() + } + time.Sleep(60 * time.Second) + } +} + +type watchConfig struct { + repo string // "https://go.googlesource.com/go" + dash string // "https://build.golang.org/" (must end in /) + interval time.Duration // Polling interval +} + +func main() { + flag.Parse() + + if err := initGCE(); err != nil { + log.Printf("VM support disabled due to error initializing GCE: %v", err) + } + + addBuilder(buildConfig{name: "linux-386"}) + addBuilder(buildConfig{name: "linux-386-387", env: []string{"GO386=387"}}) + addBuilder(buildConfig{name: "linux-amd64"}) + addBuilder(buildConfig{name: "linux-amd64-nocgo", env: []string{"CGO_ENABLED=0", "USER=root"}}) + addBuilder(buildConfig{name: "linux-amd64-noopt", env: []string{"GO_GCFLAGS=-N -l"}}) + addBuilder(buildConfig{name: "linux-amd64-race"}) + addBuilder(buildConfig{name: "nacl-386"}) + addBuilder(buildConfig{name: "nacl-amd64p32"}) + addBuilder(buildConfig{ + name: "linux-amd64-gccgo", + image: "gobuilders/linux-x86-gccgo", + cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m64\" check-go -j16", + dashURL: "https://build.golang.org/gccgo", + tool: "gccgo", + }) + addBuilder(buildConfig{ + name: "linux-386-gccgo", + image: "gobuilders/linux-x86-gccgo", + cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m32\" check-go -j16", + dashURL: "https://build.golang.org/gccgo", + tool: "gccgo", + }) + addBuilder(buildConfig{name: "linux-386-sid", image: "gobuilders/linux-x86-sid"}) + addBuilder(buildConfig{name: "linux-amd64-sid", image: "gobuilders/linux-x86-sid"}) + addBuilder(buildConfig{name: "linux-386-clang", image: "gobuilders/linux-x86-clang"}) + addBuilder(buildConfig{name: "linux-amd64-clang", image: "gobuilders/linux-x86-clang"}) + + // VMs: + addBuilder(buildConfig{ + name: "openbsd-amd64-gce56", + vmImage: "openbsd-amd64-56", + machineType: "n1-highcpu-2", + }) + addBuilder(buildConfig{ + // It's named "partial" because the buildlet sets + // GOTESTONLY=std to stop after the "go test std" + // tests because it's so slow otherwise. + // TODO(braditz): move that env variable to the + // coordinator and into this config. + name: "plan9-386-gcepartial", + vmImage: "plan9-386", + // We *were* using n1-standard-1 because Plan 9 can only + // reliably use a single CPU. Using 2 or 4 and we see + // test failures. See: + // https://golang.org/issue/8393 + // https://golang.org/issue/9491 + // n1-standard-1 has 3.6 GB of memory which is + // overkill (userspace probably only sees 2GB anyway), + // but it's the cheapest option. And plenty to keep + // our ~250 MB of inputs+outputs in its ramfs. + // + // But the docs says "For the n1 series of machine + // types, a virtual CPU is implemented as a single + // hyperthread on a 2.6GHz Intel Sandy Bridge Xeon or + // Intel Ivy Bridge Xeon (or newer) processor. This + // means that the n1-standard-2 machine type will see + // a whole physical core." + // + // ... so we use n1-highcpu-2 (1.80 RAM, still + // plenty), just so we can get 1 whole core for the + // single-core Plan 9. It will see 2 virtual cores and + // only use 1, but we hope that 1 will be more powerful + // and we'll stop timing out on tests. + machineType: "n1-highcpu-2", + }) + + addWatcher(watchConfig{repo: "https://go.googlesource.com/go", dash: "https://build.golang.org/"}) + // TODO(adg,cmang): fix gccgo watcher + // addWatcher(watchConfig{repo: "https://code.google.com/p/gofrontend", dash: "https://build.golang.org/gccgo/"}) + + if (*just != "") != (*rev != "") { + log.Fatalf("--just and --rev must be used together") + } + if *just != "" { + conf, ok := builders[*just] + if !ok { + log.Fatalf("unknown builder %q", *just) + } + cmd := exec.Command("docker", append([]string{"run"}, conf.dockerRunArgs(*rev)...)...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + log.Fatalf("Build failed: %v", err) + } + return + } + + http.HandleFunc("/", handleStatus) + http.HandleFunc("/logs", handleLogs) + go http.ListenAndServe(":80", nil) + + go cleanUpOldContainers() + go cleanUpOldVMs() + + stopWatchers() // clean up before we start new ones + for _, watcher := range watchers { + if err := startWatching(watchers[watcher.repo]); err != nil { + log.Printf("Error starting watcher for %s: %v", watcher.repo, err) + } + } + + workc := make(chan builderRev) + go findWorkLoop(workc) + // TODO(cmang): gccgo will need its own findWorkLoop + + ticker := time.NewTicker(1 * time.Minute) + for { + select { + case work := <-workc: + log.Printf("workc received %+v; len(status) = %v, maxLocalBuilds = %v; cur = %p", work, len(status), *maxLocalBuilds, status[work]) + if mayBuildRev(work) { + conf := builders[work.name] + if st, err := startBuilding(conf, work.rev); err == nil { + setStatus(work, st) + go pingDashboard(st) + } else { + log.Printf("Error starting to build %v: %v", work, err) + } + } + case done := <-donec: + log.Printf("%v done", done) + markDone(done) + case <-ticker.C: + if numCurrentBuilds() == 0 && time.Now().After(startTime.Add(10*time.Minute)) { + // TODO: halt the whole machine to kill the VM or something + } + } + } +} + +func numCurrentBuilds() int { + statusMu.Lock() + defer statusMu.Unlock() + return len(status) +} + +func isBuilding(work builderRev) bool { + statusMu.Lock() + defer statusMu.Unlock() + _, building := status[work] + return building +} + +// mayBuildRev reports whether the build type & revision should be started. +// It returns true if it's not already building, and there is capacity. +func mayBuildRev(work builderRev) bool { + conf := builders[work.name] + + statusMu.Lock() + _, building := status[work] + statusMu.Unlock() + + if building { + return false + } + if conf.usesVM() { + // These don't count towards *maxLocalBuilds. + return true + } + numDocker, err := numDockerBuilds() + if err != nil { + log.Printf("not starting %v due to docker ps failure: %v", work, err) + return false + } + return numDocker < *maxLocalBuilds +} + +func setStatus(work builderRev, st *buildStatus) { + statusMu.Lock() + defer statusMu.Unlock() + status[work] = st +} + +func markDone(work builderRev) { + statusMu.Lock() + defer statusMu.Unlock() + st, ok := status[work] + if !ok { + return + } + delete(status, work) + if len(statusDone) == maxStatusDone { + copy(statusDone, statusDone[1:]) + statusDone = statusDone[:len(statusDone)-1] + } + statusDone = append(statusDone, st) +} + +func vmIsBuilding(instName string) bool { + if instName == "" { + log.Printf("bogus empty instance name passed to vmIsBuilding") + return false + } + statusMu.Lock() + defer statusMu.Unlock() + for _, st := range status { + if st.instName == instName { + return true + } + } + return false +} + +// statusPtrStr disambiguates which status to return if there are +// multiple in the history (e.g. recent failures where the build +// didn't finish for reasons outside of all.bash failing) +func getStatus(work builderRev, statusPtrStr string) *buildStatus { + statusMu.Lock() + defer statusMu.Unlock() + match := func(st *buildStatus) bool { + return statusPtrStr == "" || fmt.Sprintf("%p", st) == statusPtrStr + } + if st, ok := status[work]; ok && match(st) { + return st + } + for _, st := range statusDone { + if st.builderRev == work && match(st) { + return st + } + } + return nil +} + +type byAge []*buildStatus + +func (s byAge) Len() int { return len(s) } +func (s byAge) Less(i, j int) bool { return s[i].start.Before(s[j].start) } +func (s byAge) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func handleStatus(w http.ResponseWriter, r *http.Request) { + var active []*buildStatus + var recent []*buildStatus + statusMu.Lock() + for _, st := range status { + active = append(active, st) + } + recent = append(recent, statusDone...) + numTotal := len(status) + numDocker, err := numDockerBuilds() + statusMu.Unlock() + + sort.Sort(byAge(active)) + sort.Sort(sort.Reverse(byAge(recent))) + + io.WriteString(w, "

Go build coordinator

") + + if err != nil { + fmt.Fprintf(w, "

Error

Error fetching Docker build count: %s\n", html.EscapeString(err.Error())) + } + + fmt.Fprintf(w, "

running

%d total builds active (Docker: %d/%d; VMs: %d/∞):", + numTotal, numDocker, *maxLocalBuilds, numTotal-numDocker) + + io.WriteString(w, "

")
+	for _, st := range active {
+		io.WriteString(w, st.htmlStatusLine())
+	}
+	io.WriteString(w, "
") + + io.WriteString(w, "

recently completed

")
+	for _, st := range recent {
+		io.WriteString(w, st.htmlStatusLine())
+	}
+	io.WriteString(w, "
") + + fmt.Fprintf(w, "

disk space

%s
", html.EscapeString(diskFree())) +} + +func diskFree() string { + out, _ := exec.Command("df", "-h").Output() + return string(out) +} + +func handleLogs(w http.ResponseWriter, r *http.Request) { + st := getStatus(builderRev{r.FormValue("name"), r.FormValue("rev")}, r.FormValue("st")) + if st == nil { + http.NotFound(w, r) + return + } + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + writeStatusHeader(w, st) + + io.WriteString(w, st.logs()) + // TODO: if st is still building, stream them to the user with + // http.Flusher.Flush and CloseNotifier and registering interest + // of new writes with the buildStatus. Will require moving the + // BUILDERKEY scrubbing into the Write method. +} + +func writeStatusHeader(w http.ResponseWriter, st *buildStatus) { + st.mu.Lock() + defer st.mu.Unlock() + fmt.Fprintf(w, " builder: %s\n", st.name) + fmt.Fprintf(w, " rev: %s\n", st.rev) + if st.container != "" { + fmt.Fprintf(w, "container: %s\n", st.container) + } + if st.instName != "" { + fmt.Fprintf(w, " vm name: %s\n", st.instName) + } + fmt.Fprintf(w, " started: %v\n", st.start) + done := !st.done.IsZero() + if done { + fmt.Fprintf(w, " started: %v\n", st.done) + fmt.Fprintf(w, " success: %v\n", st.succeeded) + } else { + fmt.Fprintf(w, " status: still running\n") + } + if len(st.events) > 0 { + io.WriteString(w, "\nEvents:\n") + st.writeEventsLocked(w, false) + } + io.WriteString(w, "\nBuild log:\n") +} + +// findWorkLoop polls http://build.golang.org/?mode=json looking for new work +// for the main dashboard. It does not support gccgo. +// TODO(bradfitz): it also currently does not support subrepos. +func findWorkLoop(work chan<- builderRev) { + ticker := time.NewTicker(15 * time.Second) + for { + if err := findWork(work); err != nil { + log.Printf("failed to find new work: %v", err) + } + <-ticker.C + } +} + +func findWork(work chan<- builderRev) error { + var bs types.BuildStatus + res, err := http.Get("https://build.golang.org/?mode=json") + if err != nil { + return err + } + defer res.Body.Close() + if err := json.NewDecoder(res.Body).Decode(&bs); err != nil { + return err + } + if res.StatusCode != 200 { + return fmt.Errorf("unexpected http status %v", res.Status) + } + + knownToDashboard := map[string]bool{} // keys are builder + for _, b := range bs.Builders { + knownToDashboard[b] = true + } + + var goRevisions []string + for _, br := range bs.Revisions { + if br.Repo == "go" { + goRevisions = append(goRevisions, br.Revision) + } else { + // TODO(bradfitz): support these: golang.org/issue/9506 + continue + } + if len(br.Results) != len(bs.Builders) { + return errors.New("bogus JSON response from dashboard: results is too long.") + } + for i, res := range br.Results { + if res != "" { + // It's either "ok" or a failure URL. + continue + } + builder := bs.Builders[i] + if _, ok := builders[builder]; !ok { + // Not managed by the coordinator. + continue + } + br := builderRev{bs.Builders[i], br.Revision} + if !isBuilding(br) { + work <- br + } + } + } + + // And to bootstrap new builders, see if we have any builders + // that the dashboard doesn't know about. + for b := range builders { + if knownToDashboard[b] { + continue + } + for _, rev := range goRevisions { + br := builderRev{b, rev} + if !isBuilding(br) { + work <- br + } + } + } + return nil +} + +// builderRev is a build configuration type and a revision. +type builderRev struct { + name string // e.g. "linux-amd64-race" + rev string // lowercase hex git hash +} + +// returns the part after "docker run" +func (conf buildConfig) dockerRunArgs(rev string) (args []string) { + if key := builderKey(conf.name); key != "" { + tmpKey := "/tmp/" + conf.name + ".buildkey" + if _, err := os.Stat(tmpKey); err != nil { + if err := ioutil.WriteFile(tmpKey, []byte(key), 0600); err != nil { + log.Fatal(err) + } + } + // Images may look for .gobuildkey in / or /root, so provide both. + // TODO(adg): fix images that look in the wrong place. + args = append(args, "-v", tmpKey+":/.gobuildkey") + args = append(args, "-v", tmpKey+":/root/.gobuildkey") + } + for _, pair := range conf.env { + args = append(args, "-e", pair) + } + if strings.HasPrefix(conf.name, "linux-amd64") { + args = append(args, "-e", "GOROOT_BOOTSTRAP=/go1.4-amd64/go") + } else if strings.HasPrefix(conf.name, "linux-386") { + args = append(args, "-e", "GOROOT_BOOTSTRAP=/go1.4-386/go") + } + args = append(args, + conf.image, + "/usr/local/bin/builder", + "-rev="+rev, + "-dashboard="+conf.dashURL, + "-tool="+conf.tool, + "-buildroot=/", + "-v", + ) + if conf.cmd != "" { + args = append(args, "-cmd", conf.cmd) + } + args = append(args, conf.name) + return +} + +func addBuilder(c buildConfig) { + if c.tool == "gccgo" { + // TODO(cmang,bradfitz,adg): fix gccgo + return + } + if c.name == "" { + panic("empty name") + } + if *addTemp { + c.name += "-temp" + } + if _, dup := builders[c.name]; dup { + panic("dup name") + } + if c.dashURL == "" { + c.dashURL = "https://build.golang.org" + } + if c.tool == "" { + c.tool = "go" + } + + if strings.HasPrefix(c.name, "nacl-") { + if c.image == "" { + c.image = "gobuilders/linux-x86-nacl" + } + if c.cmd == "" { + c.cmd = "/usr/local/bin/build-command.pl" + } + } + if strings.HasPrefix(c.name, "linux-") && c.image == "" { + c.image = "gobuilders/linux-x86-base" + } + if c.image == "" && c.vmImage == "" { + panic("empty image and vmImage") + } + if c.image != "" && c.vmImage != "" { + panic("can't specify both image and vmImage") + } + builders[c.name] = c +} + +// returns the part after "docker run" +func (conf watchConfig) dockerRunArgs() (args []string) { + log.Printf("Running watcher with master key %q", masterKey()) + if key := masterKey(); len(key) > 0 { + tmpKey := "/tmp/watcher.buildkey" + if _, err := os.Stat(tmpKey); err != nil { + if err := ioutil.WriteFile(tmpKey, key, 0600); err != nil { + log.Fatal(err) + } + } + // Images may look for .gobuildkey in / or /root, so provide both. + // TODO(adg): fix images that look in the wrong place. + args = append(args, "-v", tmpKey+":/.gobuildkey") + args = append(args, "-v", tmpKey+":/root/.gobuildkey") + } + args = append(args, + "go-commit-watcher", + "/usr/local/bin/watcher", + "-repo="+conf.repo, + "-dash="+conf.dash, + "-poll="+conf.interval.String(), + ) + return +} + +func addWatcher(c watchConfig) { + if c.repo == "" { + c.repo = "https://go.googlesource.com/go" + } + if c.dash == "" { + c.dash = "https://build.golang.org/" + } + if c.interval == 0 { + c.interval = 10 * time.Second + } + watchers[c.repo] = c +} + +func condUpdateImage(img string) error { + ii := images[img] + if ii == nil { + return fmt.Errorf("image %q doesn't exist", img) + } + ii.mu.Lock() + defer ii.mu.Unlock() + res, err := http.Head(ii.url) + if err != nil { + return fmt.Errorf("Error checking %s: %v", ii.url, err) + } + if res.StatusCode != 200 { + return fmt.Errorf("Error checking %s: %v", ii.url, res.Status) + } + if res.Header.Get("Last-Modified") == ii.lastMod { + return nil + } + + res, err = http.Get(ii.url) + if err != nil || res.StatusCode != 200 { + return fmt.Errorf("Get after Head failed for %s: %v, %v", ii.url, err, res) + } + defer res.Body.Close() + + log.Printf("Running: docker load of %s\n", ii.url) + cmd := exec.Command("docker", "load") + cmd.Stdin = res.Body + + var out bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &out + + if cmd.Run(); err != nil { + log.Printf("Failed to pull latest %s from %s and pipe into docker load: %v, %s", img, ii.url, err, out.Bytes()) + return err + } + ii.lastMod = res.Header.Get("Last-Modified") + return nil +} + +// numDockerBuilds finds the number of go builder instances currently running. +func numDockerBuilds() (n int, err error) { + out, err := exec.Command("docker", "ps").Output() + if err != nil { + return 0, err + } + for _, line := range strings.Split(string(out), "\n") { + if strings.Contains(line, "gobuilders/") { + n++ + } + } + return n, nil +} + +func startBuilding(conf buildConfig, rev string) (*buildStatus, error) { + if conf.usesVM() { + return startBuildingInVM(conf, rev) + } else { + return startBuildingInDocker(conf, rev) + } +} + +func startBuildingInDocker(conf buildConfig, rev string) (*buildStatus, error) { + if err := condUpdateImage(conf.image); err != nil { + log.Printf("Failed to setup container for %v %v: %v", conf.name, rev, err) + return nil, err + } + + cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs(rev)...)...) + all, err := cmd.CombinedOutput() + log.Printf("Docker run for %v %v = err:%v, output:%s", conf.name, rev, err, all) + if err != nil { + return nil, err + } + container := strings.TrimSpace(string(all)) + brev := builderRev{ + name: conf.name, + rev: rev, + } + st := &buildStatus{ + builderRev: brev, + container: container, + start: time.Now(), + } + log.Printf("%v now building in Docker container %v", brev, st.container) + go func() { + all, err := exec.Command("docker", "wait", container).CombinedOutput() + output := strings.TrimSpace(string(all)) + var ok bool + if err == nil { + exit, err := strconv.Atoi(output) + ok = (err == nil && exit == 0) + } + st.setDone(ok) + log.Printf("docker wait %s/%s: %v, %s", container, rev, err, output) + donec <- builderRev{conf.name, rev} + exec.Command("docker", "rm", container).Run() + }() + go func() { + cmd := exec.Command("docker", "logs", "-f", container) + cmd.Stdout = st + cmd.Stderr = st + if err := cmd.Run(); err != nil { + // The docker logs subcommand always returns + // success, even if the underlying process + // fails. + log.Printf("failed to follow docker logs of %s: %v", container, err) + } + }() + return st, nil +} + +var osArchRx = regexp.MustCompile(`^(\w+-\w+)`) + +func randHex(n int) string { + buf := make([]byte, n/2) + _, err := rand.Read(buf) + if err != nil { + panic("Failed to get randomness: " + err.Error()) + } + return fmt.Sprintf("%x", buf) +} + +// startBuildingInVM starts a VM on GCE running the buildlet binary to build rev. +func startBuildingInVM(conf buildConfig, rev string) (*buildStatus, error) { + brev := builderRev{ + name: conf.name, + rev: rev, + } + st := &buildStatus{ + builderRev: brev, + start: time.Now(), + } + + // name is the project-wide unique name of the GCE instance. It can't be longer + // than 61 bytes, so we only use the first 8 bytes of the rev. + name := "buildlet-" + conf.name + "-" + rev[:8] + "-rn" + randHex(6) + + // buildletURL is the URL of the buildlet binary which the VMs + // are configured to download at boot and run. This lets us + // update the buildlet more easily than rebuilding the whole + // VM image. We put this URL in a well-known GCE metadata attribute. + // The value will be of the form: + // http://storage.googleapis.com/go-builder-data/buildlet.GOOS-GOARCH + m := osArchRx.FindStringSubmatch(conf.name) + if m == nil { + return nil, fmt.Errorf("invalid builder name %q", conf.name) + } + buildletURL := "http://storage.googleapis.com/go-builder-data/buildlet." + m[1] + + prefix := "https://www.googleapis.com/compute/v1/projects/" + projectID + machType := prefix + "/zones/" + projectZone + "/machineTypes/" + conf.MachineType() + + instance := &compute.Instance{ + Name: name, + Description: fmt.Sprintf("Go Builder building %s %s", conf.name, rev), + MachineType: machType, + Disks: []*compute.AttachedDisk{ + { + AutoDelete: true, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: name, + SourceImage: "https://www.googleapis.com/compute/v1/projects/" + projectID + "/global/images/" + conf.vmImage, + DiskType: "https://www.googleapis.com/compute/v1/projects/" + projectID + "/zones/" + projectZone + "/diskTypes/pd-ssd", + }, + }, + }, + Tags: &compute.Tags{ + // Warning: do NOT list "http-server" or "allow-ssh" (our + // project's custom tag to allow ssh access) here; the + // buildlet provides full remote code execution. + Items: []string{}, + }, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "buildlet-binary-url", + Value: buildletURL, + }, + // In case the VM gets away from us (generally: if the + // coordinator dies while a build is running), then we + // set this attribute of when it should be killed so + // we can kill it later when the coordinator is + // restarted. The cleanUpOldVMs goroutine loop handles + // that killing. + { + Key: "delete-at", + Value: fmt.Sprint(time.Now().Add(vmDeleteTimeout).Unix()), + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + &compute.NetworkInterface{ + AccessConfigs: []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + }, + }, + Network: prefix + "/global/networks/default", + }, + }, + } + op, err := computeService.Instances.Insert(projectID, projectZone, instance).Do() + if err != nil { + return nil, fmt.Errorf("Failed to create instance: %v", err) + } + st.createOp = op.Name + st.instName = name + log.Printf("%v now building in VM %v", brev, st.instName) + // Start the goroutine to monitor the VM now that it's booting. This might + // take minutes for it to come up, and then even more time to do the build. + go func() { + err := watchVM(st) + if st.hasEvent("instance_created") { + deleteVM(projectZone, st.instName) + } + st.setDone(err == nil) + if err != nil { + fmt.Fprintf(st, "\n\nError: %v\n", err) + } + donec <- builderRev{conf.name, rev} + }() + return st, nil +} + +// watchVM monitors a VM doing a build. +func watchVM(st *buildStatus) (retErr error) { + goodRes := func(res *http.Response, err error, what string) bool { + if err != nil { + retErr = fmt.Errorf("%s: %v", what, err) + return false + } + if res.StatusCode/100 != 2 { + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 4<<10)) + retErr = fmt.Errorf("%s: %v; body: %s", what, res.Status, slurp) + res.Body.Close() + return false + + } + return true + } + st.logEventTime("instance_create_requested") + // Wait for instance create operation to succeed. +OpLoop: + for { + time.Sleep(2 * time.Second) + op, err := computeService.ZoneOperations.Get(projectID, projectZone, st.createOp).Do() + if err != nil { + return fmt.Errorf("Failed to get op %s: %v", st.createOp, err) + } + switch op.Status { + case "PENDING", "RUNNING": + continue + case "DONE": + if op.Error != nil { + for _, operr := range op.Error.Errors { + return fmt.Errorf("Error creating instance: %+v", operr) + } + return errors.New("Failed to start.") + } + break OpLoop + default: + log.Fatalf("Unknown status %q: %+v", op.Status, op) + } + } + st.logEventTime("instance_created") + + inst, err := computeService.Instances.Get(projectID, projectZone, st.instName).Do() + if err != nil { + return fmt.Errorf("Error getting instance %s details after creation: %v", st.instName, err) + } + st.logEventTime("got_instance_info") + + // Find its internal IP. + var ip string + for _, iface := range inst.NetworkInterfaces { + if strings.HasPrefix(iface.NetworkIP, "10.") { + ip = iface.NetworkIP + } + } + if ip == "" { + return errors.New("didn't find its internal IP address") + } + + // Wait for it to boot and its buildlet to come up on port 80. + st.logEventTime("waiting_for_buildlet") + buildletURL := "http://" + ip + const numTries = 60 + var alive bool + impatientClient := &http.Client{Timeout: 2 * time.Second} + for i := 1; i <= numTries; i++ { + res, err := impatientClient.Get(buildletURL) + if err != nil { + time.Sleep(1 * time.Second) + continue + } + res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("buildlet returned HTTP status code %d on try number %d", res.StatusCode, i) + } + st.logEventTime("buildlet_up") + alive = true + break + } + if !alive { + return fmt.Errorf("buildlet didn't come up in %d seconds", numTries) + } + + // Write the VERSION file. + st.logEventTime("start_write_version_tar") + verReq, err := http.NewRequest("PUT", buildletURL+"/writetgz", versionTgz(st.rev)) + if err != nil { + return err + } + verRes, err := http.DefaultClient.Do(verReq) + if !goodRes(verRes, err, "writing VERSION tgz") { + return + } + + // Feed the buildlet a tar file for it to extract. + // TODO: cache these. + st.logEventTime("start_fetch_gerrit_tgz") + tarRes, err := http.Get("https://go.googlesource.com/go/+archive/" + st.rev + ".tar.gz") + if !goodRes(tarRes, err, "fetching tarball from Gerrit") { + return + } + + st.logEventTime("start_write_tar") + putReq, err := http.NewRequest("PUT", buildletURL+"/writetgz", tarRes.Body) + if err != nil { + tarRes.Body.Close() + return err + } + putRes, err := http.DefaultClient.Do(putReq) + st.logEventTime("end_write_tar") + tarRes.Body.Close() + if !goodRes(putRes, err, "writing tarball to buildlet") { + return + } + + // Run the builder + cmd := "all.bash" + if strings.HasPrefix(st.name, "windows-") { + cmd = "all.bat" + } else if strings.HasPrefix(st.name, "plan9-") { + cmd = "all.rc" + } + execStartTime := time.Now() + st.logEventTime("start_exec") + res, err := http.PostForm(buildletURL+"/exec", url.Values{"cmd": {"src/" + cmd}}) + if !goodRes(res, err, "running "+cmd) { + return + } + defer res.Body.Close() + st.logEventTime("running_exec") + // Stream the output: + if _, err := io.Copy(st, res.Body); err != nil { + return fmt.Errorf("error copying response: %v", err) + } + st.logEventTime("done") + + // Don't record to the dashboard unless we heard the trailer from + // the buildlet, otherwise it was probably some unrelated error + // (like the VM being killed, or the buildlet crashing due to + // e.g. https://golang.org/issue/9309, since we require a tip + // build of the buildlet to get Trailers support) + state := res.Trailer.Get("Process-State") + if state == "" { + return errors.New("missing Process-State trailer from HTTP response; buildlet built with old (<= 1.4) Go?") + } + + conf := builders[st.name] + var log string + if state != "ok" { + log = st.logs() + } + if err := conf.recordResult(state == "ok", st.rev, log, time.Since(execStartTime)); err != nil { + return fmt.Errorf("Status was %q but failed to report it to the dashboard: %v", state, err) + } + if state != "ok" { + return fmt.Errorf("%s failed: %v", cmd, state) + } + return nil +} + +type eventAndTime struct { + evt string + t time.Time +} + +// buildStatus is the status of a build. +type buildStatus struct { + // Immutable: + builderRev + start time.Time + container string // container ID for docker, else it's a VM + + // Immutable, used by VM only: + createOp string // Instances.Insert operation name + instName string + + mu sync.Mutex // guards following + done time.Time // finished running + succeeded bool // set when done + output bytes.Buffer // stdout and stderr + events []eventAndTime +} + +func (st *buildStatus) setDone(succeeded bool) { + st.mu.Lock() + defer st.mu.Unlock() + st.succeeded = succeeded + st.done = time.Now() +} + +func (st *buildStatus) logEventTime(event string) { + st.mu.Lock() + defer st.mu.Unlock() + st.events = append(st.events, eventAndTime{event, time.Now()}) +} + +func (st *buildStatus) hasEvent(event string) bool { + st.mu.Lock() + defer st.mu.Unlock() + for _, e := range st.events { + if e.evt == event { + return true + } + } + return false +} + +// htmlStatusLine returns the HTML to show within the
 block on
+// the main page's list of active builds.
+func (st *buildStatus) htmlStatusLine() string {
+	st.mu.Lock()
+	defer st.mu.Unlock()
+
+	urlPrefix := "https://go-review.googlesource.com/#/q/"
+	if strings.Contains(st.name, "gccgo") {
+		urlPrefix = "https://code.google.com/p/gofrontend/source/detail?r="
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "%s rev %s",
+		st.name, urlPrefix, st.rev, st.rev)
+
+	if st.done.IsZero() {
+		buf.WriteString(", running")
+	} else if st.succeeded {
+		buf.WriteString(", succeeded")
+	} else {
+		buf.WriteString(", failed")
+	}
+
+	if st.container != "" {
+		fmt.Fprintf(&buf, " in container %s", st.logsURL(), st.container)
+	} else {
+		fmt.Fprintf(&buf, " in VM %s", st.logsURL(), st.instName)
+	}
+
+	t := st.done
+	if t.IsZero() {
+		t = st.start
+	}
+	fmt.Fprintf(&buf, ", %v ago\n", time.Since(t))
+	st.writeEventsLocked(&buf, true)
+	return buf.String()
+}
+
+func (st *buildStatus) logsURL() string {
+	return fmt.Sprintf("/logs?name=%s&rev=%s&st=%p", st.name, st.rev, st)
+}
+
+// st.mu must be held.
+func (st *buildStatus) writeEventsLocked(w io.Writer, html bool) {
+	for i, evt := range st.events {
+		var elapsed string
+		if i != 0 {
+			elapsed = fmt.Sprintf("+%0.1fs", evt.t.Sub(st.events[i-1].t).Seconds())
+		}
+		msg := evt.evt
+		if msg == "running_exec" && html {
+			msg = fmt.Sprintf("%s", st.logsURL(), msg)
+		}
+		fmt.Fprintf(w, " %7s %v %s\n", elapsed, evt.t.Format(time.RFC3339), msg)
+	}
+}
+
+func (st *buildStatus) logs() string {
+	st.mu.Lock()
+	logs := st.output.String()
+	st.mu.Unlock()
+	key := builderKey(st.name)
+	return strings.Replace(string(logs), key, "BUILDERKEY", -1)
+}
+
+func (st *buildStatus) Write(p []byte) (n int, err error) {
+	st.mu.Lock()
+	defer st.mu.Unlock()
+	const maxBufferSize = 2 << 20 // 2MB of output is way more than we expect.
+	plen := len(p)
+	if st.output.Len()+len(p) > maxBufferSize {
+		p = p[:maxBufferSize-st.output.Len()]
+	}
+	st.output.Write(p) // bytes.Buffer can't fail
+	return plen, nil
+}
+
+// Stop any previous go-commit-watcher Docker tasks, so they don't
+// pile up upon restarts of the coordinator.
+func stopWatchers() {
+	out, err := exec.Command("docker", "ps").Output()
+	if err != nil {
+		return
+	}
+	for _, line := range strings.Split(string(out), "\n") {
+		if !strings.Contains(line, "go-commit-watcher:") {
+			continue
+		}
+		f := strings.Fields(line)
+		exec.Command("docker", "rm", "-f", "-v", f[0]).Run()
+	}
+}
+
+func startWatching(conf watchConfig) (err error) {
+	defer func() {
+		if err != nil {
+			restartWatcherSoon(conf)
+		}
+	}()
+	log.Printf("Starting watcher for %v", conf.repo)
+	if err := condUpdateImage("go-commit-watcher"); err != nil {
+		log.Printf("Failed to setup container for commit watcher: %v", err)
+		return err
+	}
+
+	cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs()...)...)
+	all, err := cmd.CombinedOutput()
+	if err != nil {
+		log.Printf("Docker run for commit watcher = err:%v, output: %s", err, all)
+		return err
+	}
+	container := strings.TrimSpace(string(all))
+	// Start a goroutine to wait for the watcher to die.
+	go func() {
+		exec.Command("docker", "wait", container).Run()
+		exec.Command("docker", "rm", "-v", container).Run()
+		log.Printf("Watcher crashed. Restarting soon.")
+		restartWatcherSoon(conf)
+	}()
+	return nil
+}
+
+func restartWatcherSoon(conf watchConfig) {
+	time.AfterFunc(30*time.Second, func() {
+		startWatching(conf)
+	})
+}
+
+func builderKey(builder string) string {
+	master := masterKey()
+	if len(master) == 0 {
+		return ""
+	}
+	h := hmac.New(md5.New, master)
+	io.WriteString(h, builder)
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func masterKey() []byte {
+	keyOnce.Do(loadKey)
+	return masterKeyCache
+}
+
+var (
+	keyOnce        sync.Once
+	masterKeyCache []byte
+)
+
+func loadKey() {
+	if *masterKeyFile != "" {
+		b, err := ioutil.ReadFile(*masterKeyFile)
+		if err != nil {
+			log.Fatal(err)
+		}
+		masterKeyCache = bytes.TrimSpace(b)
+		return
+	}
+	req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/builder-master-key", nil)
+	req.Header.Set("Metadata-Flavor", "Google")
+	res, err := http.DefaultClient.Do(req)
+	if err != nil {
+		log.Fatal("No builder master key available")
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		log.Fatalf("No builder-master-key project attribute available.")
+	}
+	slurp, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		log.Fatal(err)
+	}
+	masterKeyCache = bytes.TrimSpace(slurp)
+}
+
+func cleanUpOldContainers() {
+	for {
+		for _, cid := range oldContainers() {
+			log.Printf("Cleaning old container %v", cid)
+			exec.Command("docker", "rm", "-v", cid).Run()
+		}
+		time.Sleep(30 * time.Second)
+	}
+}
+
+func oldContainers() []string {
+	out, _ := exec.Command("docker", "ps", "-a", "--filter=status=exited", "--no-trunc", "-q").Output()
+	return strings.Fields(string(out))
+}
+
+// cleanUpOldVMs loops forever and periodically enumerates virtual
+// machines and deletes those which have expired.
+//
+// A VM is considered expired if it has a "delete-at" metadata
+// attribute having a unix timestamp before the current time.
+//
+// This is the safety mechanism to delete VMs which stray from the
+// normal deleting process. VMs are created to run a single build and
+// should be shut down by a controlling process. Due to various types
+// of failures, they might get stranded. To prevent them from getting
+// stranded and wasting resources forever, we instead set the
+// "delete-at" metadata attribute on them when created to some time
+// that's well beyond their expected lifetime.
+func cleanUpOldVMs() {
+	if computeService == nil {
+		return
+	}
+	for {
+		for _, zone := range strings.Split(*cleanZones, ",") {
+			zone = strings.TrimSpace(zone)
+			if err := cleanZoneVMs(zone); err != nil {
+				log.Printf("Error cleaning VMs in zone %q: %v", zone, err)
+			}
+		}
+		time.Sleep(time.Minute)
+	}
+}
+
+// cleanZoneVMs is part of cleanUpOldVMs, operating on a single zone.
+func cleanZoneVMs(zone string) error {
+	// Fetch the first 500 (default) running instances and clean
+	// thoes. We expect that we'll be running many fewer than
+	// that. Even if we have more, eventually the first 500 will
+	// either end or be cleaned, and then the next call will get a
+	// partially-different 500.
+	// TODO(bradfitz): revist this code if we ever start running
+	// thousands of VMs.
+	list, err := computeService.Instances.List(projectID, zone).Do()
+	if err != nil {
+		return fmt.Errorf("listing instances: %v", err)
+	}
+	for _, inst := range list.Items {
+		if !strings.HasPrefix(inst.Name, "buildlet-") {
+			// We only delete ones we created.
+			continue
+		}
+		if inst.Metadata == nil {
+			// Defensive. Not seen in practice.
+			continue
+		}
+		sawDeleteAt := false
+		for _, it := range inst.Metadata.Items {
+			if it.Key == "delete-at" {
+				sawDeleteAt = true
+				unixDeadline, err := strconv.ParseInt(it.Value, 10, 64)
+				if err != nil {
+					log.Printf("invalid delete-at value %q seen; ignoring", it.Value)
+				}
+				if err == nil && time.Now().Unix() > unixDeadline {
+					log.Printf("Deleting expired VM %q in zone %q ...", inst.Name, zone)
+					deleteVM(zone, inst.Name)
+				}
+			}
+		}
+		if sawDeleteAt && !vmIsBuilding(inst.Name) {
+			log.Printf("Deleting VM %q in zone %q from an earlier coordinator generation ...", inst.Name, zone)
+			deleteVM(zone, inst.Name)
+		}
+	}
+	return nil
+}
+
+func deleteVM(zone, instName string) {
+	op, err := computeService.Instances.Delete(projectID, zone, instName).Do()
+	if err != nil {
+		log.Printf("Failed to delete instance %q in zone %q: %v", instName, zone, err)
+		return
+	}
+	log.Printf("Sent request to delete instance %q in zone %q. Operation ID == %v", instName, zone, op.Id)
+}
+
+func hasComputeScope() bool {
+	if !metadata.OnGCE() {
+		return false
+	}
+	scopes, err := metadata.Scopes("default")
+	if err != nil {
+		log.Printf("failed to query metadata default scopes: %v", err)
+		return false
+	}
+	for _, v := range scopes {
+		if v == compute.DevstorageFull_controlScope {
+			return true
+		}
+	}
+	return false
+}
+
+// dash is copied from the builder binary. It runs the given method and command on the dashboard.
+//
+// TODO(bradfitz,adg): unify this somewhere?
+//
+// If args is non-nil it is encoded as the URL query string.
+// If req is non-nil it is JSON-encoded and passed as the body of the HTTP POST.
+// If resp is non-nil the server's response is decoded into the value pointed
+// to by resp (resp must be a pointer).
+func dash(meth, cmd string, args url.Values, req, resp interface{}) error {
+	const builderVersion = 1 // keep in sync with dashboard/app/build/handler.go
+	argsCopy := url.Values{"version": {fmt.Sprint(builderVersion)}}
+	for k, v := range args {
+		if k == "version" {
+			panic(`dash: reserved args key: "version"`)
+		}
+		argsCopy[k] = v
+	}
+	var r *http.Response
+	var err error
+	cmd = "https://build.golang.org/" + cmd + "?" + argsCopy.Encode()
+	switch meth {
+	case "GET":
+		if req != nil {
+			log.Panicf("%s to %s with req", meth, cmd)
+		}
+		r, err = http.Get(cmd)
+	case "POST":
+		var body io.Reader
+		if req != nil {
+			b, err := json.Marshal(req)
+			if err != nil {
+				return err
+			}
+			body = bytes.NewBuffer(b)
+		}
+		r, err = http.Post(cmd, "text/json", body)
+	default:
+		log.Panicf("%s: invalid method %q", cmd, meth)
+		panic("invalid method: " + meth)
+	}
+	if err != nil {
+		return err
+	}
+	defer r.Body.Close()
+	if r.StatusCode != http.StatusOK {
+		return fmt.Errorf("bad http response: %v", r.Status)
+	}
+	body := new(bytes.Buffer)
+	if _, err := body.ReadFrom(r.Body); err != nil {
+		return err
+	}
+
+	// Read JSON-encoded Response into provided resp
+	// and return an error if present.
+	var result = struct {
+		Response interface{}
+		Error    string
+	}{
+		// Put the provided resp in here as it can be a pointer to
+		// some value we should unmarshal into.
+		Response: resp,
+	}
+	if err = json.Unmarshal(body.Bytes(), &result); err != nil {
+		log.Printf("json unmarshal %#q: %s\n", body.Bytes(), err)
+		return err
+	}
+	if result.Error != "" {
+		return errors.New(result.Error)
+	}
+
+	return nil
+}
+
+func versionTgz(rev string) io.Reader {
+	var buf bytes.Buffer
+	zw := gzip.NewWriter(&buf)
+	tw := tar.NewWriter(zw)
+
+	contents := fmt.Sprintf("devel " + rev)
+	check(tw.WriteHeader(&tar.Header{
+		Name: "VERSION",
+		Mode: 0644,
+		Size: int64(len(contents)),
+	}))
+	_, err := io.WriteString(tw, contents)
+	check(err)
+	check(tw.Close())
+	check(zw.Close())
+	return bytes.NewReader(buf.Bytes())
+}
+
+// check is only for things which should be impossible (not even rare)
+// to fail.
+func check(err error) {
+	if err != nil {
+		panic("previously assumed to never fail: " + err.Error())
+	}
+}
diff --git a/dashboard/cmd/retrybuilds/retrybuilds.go b/dashboard/cmd/retrybuilds/retrybuilds.go
new file mode 100644
index 0000000..c432df2
--- /dev/null
+++ b/dashboard/cmd/retrybuilds/retrybuilds.go
@@ -0,0 +1,235 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The retrybuilds command clears build failures from the build.golang.org dashboard
+// to force them to be rebuilt.
+//
+// Valid usage modes:
+//
+//   retrybuilds -loghash=f45f0eb8
+//   retrybuilds -builder=openbsd-amd64
+//   retrybuilds -builder=openbsd-amd64 -hash=6fecb7
+//   retrybuilds -redo-flaky
+//   retrybuilds -redo-flaky -builder=linux-amd64-clang
+package main
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/md5"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"net/url"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+)
+
+var (
+	masterKeyFile = flag.String("masterkey", filepath.Join(os.Getenv("HOME"), "keys", "gobuilder-master.key"), "path to Go builder master key. If present, the key argument is not necessary")
+	keyFile       = flag.String("key", "", "path to key file")
+	builder       = flag.String("builder", "", "builder to wipe a result for.")
+	hash          = flag.String("hash", "", "Hash to wipe. If empty, all will be wiped.")
+	redoFlaky     = flag.Bool("redo-flaky", false, "Reset all flaky builds. If builder is empty, the master key is required.")
+	builderPrefix = flag.String("builder-prefix", "https://build.golang.org", "builder URL prefix")
+	logHash       = flag.String("loghash", "", "If non-empty, clear the build that failed with this loghash prefix")
+)
+
+type Failure struct {
+	Builder string
+	Hash    string
+	LogURL  string
+}
+
+func main() {
+	flag.Parse()
+	*builderPrefix = strings.TrimSuffix(*builderPrefix, "/")
+	if *logHash != "" {
+		substr := "/log/" + *logHash
+		for _, f := range failures() {
+			if strings.Contains(f.LogURL, substr) {
+				wipe(f.Builder, f.Hash)
+			}
+		}
+		return
+	}
+	if *redoFlaky {
+		fixTheFlakes()
+		return
+	}
+	if *builder == "" {
+		log.Fatalf("Missing -builder, -redo-flaky, or -loghash flag.")
+	}
+	wipe(*builder, fullHash(*hash))
+}
+
+func fixTheFlakes() {
+	gate := make(chan bool, 50)
+	var wg sync.WaitGroup
+	for _, f := range failures() {
+		f := f
+		if *builder != "" && f.Builder != *builder {
+			continue
+		}
+		gate <- true
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			defer func() { <-gate }()
+			res, err := http.Get(f.LogURL)
+			if err != nil {
+				log.Fatalf("Error fetching %s: %v", f.LogURL, err)
+			}
+			defer res.Body.Close()
+			failLog, err := ioutil.ReadAll(res.Body)
+			if err != nil {
+				log.Fatalf("Error reading %s: %v", f.LogURL, err)
+			}
+			if isFlaky(string(failLog)) {
+				log.Printf("Restarting flaky %+v", f)
+				wipe(f.Builder, f.Hash)
+			}
+		}()
+	}
+	wg.Wait()
+}
+
+var flakePhrases = []string{
+	"No space left on device",
+	"fatal error: error in backend: IO failure on output stream",
+	"Boffset: unknown state 0",
+	"Bseek: unknown state 0",
+	"error exporting repository: exit status",
+	"remote error: User Is Over Quota",
+	"fatal: remote did not send all necessary objects",
+}
+
+func isFlaky(failLog string) bool {
+	if strings.HasPrefix(failLog, "exit status ") {
+		return true
+	}
+	for _, phrase := range flakePhrases {
+		if strings.Contains(failLog, phrase) {
+			return true
+		}
+	}
+	numLines := strings.Count(failLog, "\n")
+	if numLines < 20 && strings.Contains(failLog, "error: exit status") {
+		return true
+	}
+	// e.g. fatal: destination path 'go.tools.TMP' already exists and is not an empty directory.
+	// To be fixed in golang.org/issue/9407
+	if strings.Contains(failLog, "fatal: destination path '") &&
+		strings.Contains(failLog, "' already exists and is not an empty directory.") {
+		return true
+	}
+	return false
+}
+
+func fullHash(h string) string {
+	if h == "" || len(h) == 40 {
+		return h
+	}
+	for _, f := range failures() {
+		if strings.HasPrefix(f.Hash, h) {
+			return f.Hash
+		}
+	}
+	log.Fatalf("invalid hash %q; failed to finds its full hash. Not a recent failure?", h)
+	panic("unreachable")
+}
+
+// hash may be empty
+func wipe(builder, hash string) {
+	if hash != "" {
+		log.Printf("Clearing %s, hash %s", builder, hash)
+	} else {
+		log.Printf("Clearing all builds for %s", builder)
+	}
+	vals := url.Values{
+		"builder": {builder},
+		"hash":    {hash},
+		"key":     {builderKey(builder)},
+	}
+	res, err := http.PostForm(*builderPrefix+"/clear-results?"+vals.Encode(), nil)
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		log.Fatalf("Error clearing %v hash %q: %v", builder, hash, res.Status)
+	}
+}
+
+func builderKey(builder string) string {
+	if v, ok := builderKeyFromMaster(builder); ok {
+		return v
+	}
+	if *keyFile == "" {
+		log.Fatalf("No --key specified for builder %s", builder)
+	}
+	slurp, err := ioutil.ReadFile(*keyFile)
+	if err != nil {
+		log.Fatalf("Error reading builder key %s: %v", builder, err)
+	}
+	return strings.TrimSpace(string(slurp))
+}
+
+func builderKeyFromMaster(builder string) (key string, ok bool) {
+	if *masterKeyFile == "" {
+		return
+	}
+	slurp, err := ioutil.ReadFile(*masterKeyFile)
+	if err != nil {
+		return
+	}
+	h := hmac.New(md5.New, bytes.TrimSpace(slurp))
+	h.Write([]byte(builder))
+	return fmt.Sprintf("%x", h.Sum(nil)), true
+}
+
+var (
+	failMu    sync.Mutex
+	failCache []Failure
+)
+
+func failures() (ret []Failure) {
+	failMu.Lock()
+	ret = failCache
+	failMu.Unlock()
+	if ret != nil {
+		return
+	}
+	ret = []Failure{} // non-nil
+
+	res, err := http.Get(*builderPrefix + "/?mode=failures")
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer res.Body.Close()
+	slurp, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		log.Fatal(err)
+	}
+	body := string(slurp)
+	for _, line := range strings.Split(body, "\n") {
+		f := strings.Fields(line)
+		if len(f) == 3 {
+			ret = append(ret, Failure{
+				Hash:    f[0],
+				Builder: f[1],
+				LogURL:  f[2],
+			})
+		}
+	}
+
+	failMu.Lock()
+	failCache = ret
+	failMu.Unlock()
+	return ret
+}
diff --git a/dashboard/cmd/upload/upload.go b/dashboard/cmd/upload/upload.go
new file mode 100644
index 0000000..44f5a72
--- /dev/null
+++ b/dashboard/cmd/upload/upload.go
@@ -0,0 +1,132 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build upload
+
+//       ^ this is so we don't break the build of x/tools/...  build due
+//         to missing depenencies on the builders. We don't want full builds
+//         needing to pull in dependencies outside of the x/tools repo.
+
+// The upload command writes a file to Google Cloud Storage. It's used
+// exclusively by the Makefiles in the Go project repos. Think of it
+// as a very light version of gsutil or gcloud, but with some
+// Go-specific configuration knowledge baked in.
+package main
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/google"
+	"google.golang.org/cloud"
+	"google.golang.org/cloud/storage"
+)
+
+var (
+	public  = flag.Bool("public", false, "object should be world-readable")
+	file    = flag.String("file", "-", "Filename to read object from, or '-' for stdin.")
+	verbose = flag.Bool("verbose", false, "verbose logging")
+)
+
+func main() {
+	flag.Usage = func() {
+		fmt.Fprintf(os.Stderr, "Usage: upload [--public] [--file=...] \n")
+		flag.PrintDefaults()
+	}
+	flag.Parse()
+	if flag.NArg() != 1 {
+		flag.Usage()
+		os.Exit(1)
+	}
+	args := strings.SplitN(flag.Arg(0), "/", 2)
+	if len(args) != 2 {
+		flag.Usage()
+		os.Exit(1)
+	}
+	bucket, object := args[0], args[1]
+
+	proj, ok := bucketProject[bucket]
+	if !ok {
+		log.Fatalf("bucket %q doesn't have an associated project in upload.go")
+	}
+
+	ts, err := tokenSource(bucket)
+	if err != nil {
+		log.Fatalf("Failed to get an OAuth2 token source: %v", err)
+	}
+	httpClient := oauth2.NewClient(oauth2.NoContext, ts)
+
+	ctx := cloud.NewContext(proj, httpClient)
+	w := storage.NewWriter(ctx, bucket, object)
+	// If you don't give the owners access, the web UI seems to
+	// have a bug and doesn't have access to see that it's public, so
+	// won't render the "Shared Publicly" link. So we do that, even
+	// though it's dumb and unnecessary otherwise:
+	w.ACL = append(w.ACL, storage.ACLRule{Entity: storage.ACLEntity("project-owners-" + proj), Role: storage.RoleOwner})
+	if *public {
+		w.ACL = append(w.ACL, storage.ACLRule{Entity: storage.AllUsers, Role: storage.RoleReader})
+	}
+	var content io.Reader
+	if *file == "-" {
+		content = os.Stdin
+	} else {
+		content, err = os.Open(*file)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+	const maxSlurp = 1 << 20
+	var buf bytes.Buffer
+	n, err := io.CopyN(&buf, content, maxSlurp)
+	if err != nil && err != io.EOF {
+		log.Fatalf("Error reading from stdin: %v, %v", n, err)
+	}
+	w.ContentType = http.DetectContentType(buf.Bytes())
+
+	_, err = io.Copy(w, io.MultiReader(&buf, content))
+	if cerr := w.Close(); cerr != nil && err == nil {
+		err = cerr
+	}
+	if err != nil {
+		log.Fatalf("Write error: %v", err)
+	}
+	if *verbose {
+		log.Printf("Wrote %v", object)
+	}
+	os.Exit(0)
+}
+
+var bucketProject = map[string]string{
+	"go-builder-data":       "symbolic-datum-552",
+	"http2-demo-server-tls": "symbolic-datum-552",
+	"winstrap":              "999119582588",
+	"gobuilder":             "999119582588", // deprecated
+}
+
+func tokenSource(bucket string) (oauth2.TokenSource, error) {
+	proj := bucketProject[bucket]
+	fileName := filepath.Join(os.Getenv("HOME"), "keys", proj+".key.json")
+	jsonConf, err := ioutil.ReadFile(fileName)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil, fmt.Errorf("Missing JSON key configuration. Download the Service Account JSON key from https://console.developers.google.com/project/%s/apiui/credential and place it at %s", proj, fileName)
+		}
+		return nil, err
+	}
+	conf, err := google.JWTConfigFromJSON(jsonConf, storage.ScopeReadWrite)
+	if err != nil {
+		return nil, fmt.Errorf("reading JSON config from %s: %v", fileName, err)
+	}
+	return conf.TokenSource(oauth2.NoContext), nil
+}
diff --git a/dashboard/coordinator/Makefile b/dashboard/coordinator/Makefile
deleted file mode 100644
index 166086f..0000000
--- a/dashboard/coordinator/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-coordinator: main.go
-	GOOS=linux go build --tags=build_coordinator -o coordinator .
-
-# After "make upload", either reboot the machine, or ssh to it and:
-#   sudo systemctl restart gobuild.service
-# And watch its logs with:
-#   sudo journalctl -f -u gobuild.service
-upload: coordinator
-	cat coordinator | (cd ../upload && go run upload.go --public go-builder-data/coordinator)
diff --git a/dashboard/coordinator/buildongce/create.go b/dashboard/coordinator/buildongce/create.go
deleted file mode 100644
index 2148ebb..0000000
--- a/dashboard/coordinator/buildongce/create.go
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main // import "golang.org/x/tools/dashboard/coordinator/buildongce"
-
-import (
-	"bufio"
-	"encoding/json"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"os"
-	"strings"
-	"time"
-
-	"golang.org/x/oauth2"
-	"golang.org/x/oauth2/google"
-	compute "google.golang.org/api/compute/v1"
-)
-
-var (
-	proj      = flag.String("project", "symbolic-datum-552", "name of Project")
-	zone      = flag.String("zone", "us-central1-a", "GCE zone")
-	mach      = flag.String("machinetype", "n1-standard-16", "Machine type")
-	instName  = flag.String("instance_name", "go-builder-1", "Name of VM instance.")
-	sshPub    = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
-	staticIP  = flag.String("static_ip", "", "Static IP to use. If empty, automatic.")
-	reuseDisk = flag.Bool("reuse_disk", true, "Whether disk images should be reused between shutdowns/restarts.")
-	ssd       = flag.Bool("ssd", false, "use a solid state disk (faster, more expensive)")
-)
-
-func readFile(v string) string {
-	slurp, err := ioutil.ReadFile(v)
-	if err != nil {
-		log.Fatalf("Error reading %s: %v", v, err)
-	}
-	return strings.TrimSpace(string(slurp))
-}
-
-var config = &oauth2.Config{
-	// The client-id and secret should be for an "Installed Application" when using
-	// the CLI. Later we'll use a web application with a callback.
-	ClientID:     readFile("client-id.dat"),
-	ClientSecret: readFile("client-secret.dat"),
-	Endpoint:     google.Endpoint,
-	Scopes: []string{
-		compute.DevstorageFull_controlScope,
-		compute.ComputeScope,
-		"https://www.googleapis.com/auth/sqlservice",
-		"https://www.googleapis.com/auth/sqlservice.admin",
-	},
-	RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
-}
-
-const baseConfig = `#cloud-config
-coreos:
-  update:
-    group: alpha
-    reboot-strategy: off
-  units:
-    - name: gobuild.service
-      command: start
-      content: |
-        [Unit]
-        Description=Go Builders
-        After=docker.service
-        Requires=docker.service
-        
-        [Service]
-        ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/coordinator http://storage.googleapis.com/go-builder-data/coordinator && chmod +x /opt/bin/coordinator'
-        ExecStart=/opt/bin/coordinator
-        RestartSec=10s
-        Restart=always
-        Type=simple
-        
-        [Install]
-        WantedBy=multi-user.target
-`
-
-func main() {
-	flag.Parse()
-	if *proj == "" {
-		log.Fatalf("Missing --project flag")
-	}
-	prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
-	machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
-
-	const tokenFileName = "token.dat"
-	tokenFile := tokenCacheFile(tokenFileName)
-	tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
-	token, err := tokenSource.Token()
-	if err != nil {
-		log.Printf("Error getting token from %s: %v", tokenFileName, err)
-		log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
-		fmt.Print("\nEnter auth code: ")
-		sc := bufio.NewScanner(os.Stdin)
-		sc.Scan()
-		authCode := strings.TrimSpace(sc.Text())
-		token, err = config.Exchange(oauth2.NoContext, authCode)
-		if err != nil {
-			log.Fatalf("Error exchanging auth code for a token: %v", err)
-		}
-		if err := tokenFile.WriteToken(token); err != nil {
-			log.Fatalf("Error writing to %s: %v", tokenFileName, err)
-		}
-		tokenSource = oauth2.ReuseTokenSource(token, nil)
-	}
-
-	oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
-
-	computeService, _ := compute.New(oauthClient)
-
-	natIP := *staticIP
-	if natIP == "" {
-		// Try to find it by name.
-		aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
-		if err != nil {
-			log.Fatal(err)
-		}
-		// https://godoc.org/google.golang.org/api/compute/v1#AddressAggregatedList
-	IPLoop:
-		for _, asl := range aggAddrList.Items {
-			for _, addr := range asl.Addresses {
-				if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
-					natIP = addr.Address
-					break IPLoop
-				}
-			}
-		}
-	}
-
-	cloudConfig := baseConfig
-	if *sshPub != "" {
-		key := strings.TrimSpace(readFile(*sshPub))
-		cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n    - %s\n", key)
-	}
-	if os.Getenv("USER") == "bradfitz" {
-		cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n    - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
-	}
-	const maxCloudConfig = 32 << 10 // per compute API docs
-	if len(cloudConfig) > maxCloudConfig {
-		log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
-	}
-
-	instance := &compute.Instance{
-		Name:        *instName,
-		Description: "Go Builder",
-		MachineType: machType,
-		Disks:       []*compute.AttachedDisk{instanceDisk(computeService)},
-		Tags: &compute.Tags{
-			Items: []string{"http-server", "https-server"},
-		},
-		Metadata: &compute.Metadata{
-			Items: []*compute.MetadataItems{
-				{
-					Key:   "user-data",
-					Value: cloudConfig,
-				},
-			},
-		},
-		NetworkInterfaces: []*compute.NetworkInterface{
-			&compute.NetworkInterface{
-				AccessConfigs: []*compute.AccessConfig{
-					&compute.AccessConfig{
-						Type:  "ONE_TO_ONE_NAT",
-						Name:  "External NAT",
-						NatIP: natIP,
-					},
-				},
-				Network: prefix + "/global/networks/default",
-			},
-		},
-		ServiceAccounts: []*compute.ServiceAccount{
-			{
-				Email: "default",
-				Scopes: []string{
-					compute.DevstorageFull_controlScope,
-					compute.ComputeScope,
-				},
-			},
-		},
-	}
-
-	log.Printf("Creating instance...")
-	op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
-	if err != nil {
-		log.Fatalf("Failed to create instance: %v", err)
-	}
-	opName := op.Name
-	log.Printf("Created. Waiting on operation %v", opName)
-OpLoop:
-	for {
-		time.Sleep(2 * time.Second)
-		op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
-		if err != nil {
-			log.Fatalf("Failed to get op %s: %v", opName, err)
-		}
-		switch op.Status {
-		case "PENDING", "RUNNING":
-			log.Printf("Waiting on operation %v", opName)
-			continue
-		case "DONE":
-			if op.Error != nil {
-				for _, operr := range op.Error.Errors {
-					log.Printf("Error: %+v", operr)
-				}
-				log.Fatalf("Failed to start.")
-			}
-			log.Printf("Success. %+v", op)
-			break OpLoop
-		default:
-			log.Fatalf("Unknown status %q: %+v", op.Status, op)
-		}
-	}
-
-	inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
-	if err != nil {
-		log.Fatalf("Error getting instance after creation: %v", err)
-	}
-	ij, _ := json.MarshalIndent(inst, "", "    ")
-	log.Printf("Instance: %s", ij)
-}
-
-func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
-	const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-402-2-0-v20140807"
-	diskName := *instName + "-coreos-stateless-pd"
-
-	if *reuseDisk {
-		dl, err := svc.Disks.List(*proj, *zone).Do()
-		if err != nil {
-			log.Fatalf("Error listing disks: %v", err)
-		}
-		for _, disk := range dl.Items {
-			if disk.Name != diskName {
-				continue
-			}
-			return &compute.AttachedDisk{
-				AutoDelete: false,
-				Boot:       true,
-				DeviceName: diskName,
-				Type:       "PERSISTENT",
-				Source:     disk.SelfLink,
-				Mode:       "READ_WRITE",
-
-				// The GCP web UI's "Show REST API" link includes a
-				// "zone" parameter, but it's not in the API
-				// description. But it wants this form (disk.Zone, a
-				// full zone URL, not *zone):
-				// Zone: disk.Zone,
-				// ... but it seems to work without it.  Keep this
-				// comment here until I file a bug with the GCP
-				// people.
-			}
-		}
-	}
-
-	diskType := ""
-	if *ssd {
-		diskType = "https://www.googleapis.com/compute/v1/projects/" + *proj + "/zones/" + *zone + "/diskTypes/pd-ssd"
-	}
-
-	return &compute.AttachedDisk{
-		AutoDelete: !*reuseDisk,
-		Boot:       true,
-		Type:       "PERSISTENT",
-		InitializeParams: &compute.AttachedDiskInitializeParams{
-			DiskName:    diskName,
-			SourceImage: imageURL,
-			DiskSizeGb:  50,
-			DiskType:    diskType,
-		},
-	}
-}
-
-type tokenCacheFile string
-
-func (f tokenCacheFile) Token() (*oauth2.Token, error) {
-	slurp, err := ioutil.ReadFile(string(f))
-	if err != nil {
-		return nil, err
-	}
-	t := new(oauth2.Token)
-	if err := json.Unmarshal(slurp, t); err != nil {
-		return nil, err
-	}
-	return t, nil
-}
-
-func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
-	jt, err := json.Marshal(t)
-	if err != nil {
-		return err
-	}
-	return ioutil.WriteFile(string(f), jt, 0600)
-}
diff --git a/dashboard/coordinator/main.go b/dashboard/coordinator/main.go
deleted file mode 100644
index df3dc48..0000000
--- a/dashboard/coordinator/main.go
+++ /dev/null
@@ -1,1540 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build build_coordinator
-
-// The coordinator runs on GCE and coordinates builds in Docker containers.
-package main // import "golang.org/x/tools/dashboard/coordinator"
-
-import (
-	"archive/tar"
-	"bytes"
-	"compress/gzip"
-	"crypto/hmac"
-	"crypto/md5"
-	"crypto/rand"
-	"encoding/json"
-	"errors"
-	"flag"
-	"fmt"
-	"html"
-	"io"
-	"io/ioutil"
-	"log"
-	"net/http"
-	"net/url"
-	"os"
-	"os/exec"
-	"path"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-
-	"golang.org/x/oauth2"
-	"golang.org/x/oauth2/google"
-	"golang.org/x/tools/dashboard/types"
-	"google.golang.org/api/compute/v1"
-	"google.golang.org/cloud/compute/metadata"
-)
-
-var (
-	masterKeyFile  = flag.String("masterkey", "", "Path to builder master key. Else fetched using GCE project attribute 'builder-master-key'.")
-	maxLocalBuilds = flag.Int("maxbuilds", 6, "Max concurrent Docker builds (VM builds don't count)")
-
-	cleanZones = flag.String("zones", "us-central1-a,us-central1-b,us-central1-f", "Comma-separated list of zones to periodically clean of stale build VMs (ones that failed to shut themselves down)")
-
-	// Debug flags:
-	addTemp = flag.Bool("temp", false, "Append -temp to all builders.")
-	just    = flag.String("just", "", "If non-empty, run single build in the foreground. Requires rev.")
-	rev     = flag.String("rev", "", "Revision to build.")
-)
-
-var (
-	startTime = time.Now()
-	builders  = map[string]buildConfig{} // populated at startup, keys like "openbsd-amd64-56"
-	watchers  = map[string]watchConfig{} // populated at startup, keyed by repo, e.g. "https://go.googlesource.com/go"
-	donec     = make(chan builderRev)    // reports of finished builders
-
-	statusMu   sync.Mutex // guards both status (ongoing ones) and statusDone (just finished)
-	status     = map[builderRev]*buildStatus{}
-	statusDone []*buildStatus // finished recently, capped to maxStatusDone
-)
-
-const (
-	maxStatusDone = 30
-
-	// vmDeleteTimeout is how long before we delete a VM.
-	// In practice this need only be as long as the slowest
-	// builder (plan9 currently), because on startup this program
-	// already deletes all buildlets it doesn't know about
-	// (i.e. ones from a previous instance of the coordinator).
-	vmDeleteTimeout = 45 * time.Minute
-)
-
-// Initialized by initGCE:
-var (
-	projectID      string
-	projectZone    string
-	computeService *compute.Service
-	externalIP     string
-)
-
-func initGCE() error {
-	if !metadata.OnGCE() {
-		return errors.New("not running on GCE; VM support disabled")
-	}
-	var err error
-	projectID, err = metadata.ProjectID()
-	if err != nil {
-		return fmt.Errorf("failed to get current GCE ProjectID: %v", err)
-	}
-	projectZone, err = metadata.Get("instance/zone")
-	if err != nil || projectZone == "" {
-		return fmt.Errorf("failed to get current GCE zone: %v", err)
-	}
-	// Convert the zone from "projects/1234/zones/us-central1-a" to "us-central1-a".
-	projectZone = path.Base(projectZone)
-	if !hasComputeScope() {
-		return errors.New("The coordinator is not running with access to read and write Compute resources. VM support disabled.")
-
-	}
-	externalIP, err = metadata.ExternalIP()
-	if err != nil {
-		return fmt.Errorf("ExternalIP: %v", err)
-	}
-	ts := google.ComputeTokenSource("default")
-	computeService, _ = compute.New(oauth2.NewClient(oauth2.NoContext, ts))
-	return nil
-}
-
-type imageInfo struct {
-	url string // of tar file
-
-	mu      sync.Mutex
-	lastMod string
-}
-
-var images = map[string]*imageInfo{
-	"go-commit-watcher":          {url: "https://storage.googleapis.com/go-builder-data/docker-commit-watcher.tar.gz"},
-	"gobuilders/linux-x86-base":  {url: "https://storage.googleapis.com/go-builder-data/docker-linux.base.tar.gz"},
-	"gobuilders/linux-x86-clang": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.clang.tar.gz"},
-	"gobuilders/linux-x86-gccgo": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.gccgo.tar.gz"},
-	"gobuilders/linux-x86-nacl":  {url: "https://storage.googleapis.com/go-builder-data/docker-linux.nacl.tar.gz"},
-	"gobuilders/linux-x86-sid":   {url: "https://storage.googleapis.com/go-builder-data/docker-linux.sid.tar.gz"},
-}
-
-// A buildConfig describes how to run either a Docker-based or VM-based build.
-type buildConfig struct {
-	name string // "linux-amd64-race"
-
-	// VM-specific settings: (used if vmImage != "")
-	vmImage     string // e.g. "openbsd-amd64-56"
-	machineType string // optional GCE instance type
-
-	// Docker-specific settings: (used if vmImage == "")
-	image   string   // Docker image to use to build
-	cmd     string   // optional -cmd flag (relative to go/src/)
-	env     []string // extra environment ("key=value") pairs
-	dashURL string   // url of the build dashboard
-	tool    string   // the tool this configuration is for
-}
-
-func (c *buildConfig) usesDocker() bool { return c.vmImage == "" }
-func (c *buildConfig) usesVM() bool     { return c.vmImage != "" }
-
-func (c *buildConfig) MachineType() string {
-	if v := c.machineType; v != "" {
-		return v
-	}
-	return "n1-highcpu-4"
-}
-
-// recordResult sends build results to the dashboard
-func (b *buildConfig) recordResult(ok bool, hash, buildLog string, runTime time.Duration) error {
-	req := map[string]interface{}{
-		"Builder":     b.name,
-		"PackagePath": "",
-		"Hash":        hash,
-		"GoHash":      "",
-		"OK":          ok,
-		"Log":         buildLog,
-		"RunTime":     runTime,
-	}
-	args := url.Values{"key": {builderKey(b.name)}, "builder": {b.name}}
-	return dash("POST", "result", args, req, nil)
-}
-
-// pingDashboard is a goroutine that periodically POSTS to build.golang.org/building
-// to let it know that we're still working on a build.
-func pingDashboard(st *buildStatus) {
-	u := "https://build.golang.org/building?" + url.Values{
-		"builder": []string{st.name},
-		"key":     []string{builderKey(st.name)},
-		"hash":    []string{st.rev},
-		"url":     []string{fmt.Sprintf("http://%v/logs?name=%s&rev=%s&st=%p", externalIP, st.name, st.rev, st)},
-	}.Encode()
-	for {
-		st.mu.Lock()
-		done := st.done
-		st.mu.Unlock()
-		if !done.IsZero() {
-			return
-		}
-		if res, _ := http.PostForm(u, nil); res != nil {
-			res.Body.Close()
-		}
-		time.Sleep(60 * time.Second)
-	}
-}
-
-type watchConfig struct {
-	repo     string        // "https://go.googlesource.com/go"
-	dash     string        // "https://build.golang.org/" (must end in /)
-	interval time.Duration // Polling interval
-}
-
-func main() {
-	flag.Parse()
-
-	if err := initGCE(); err != nil {
-		log.Printf("VM support disabled due to error initializing GCE: %v", err)
-	}
-
-	addBuilder(buildConfig{name: "linux-386"})
-	addBuilder(buildConfig{name: "linux-386-387", env: []string{"GO386=387"}})
-	addBuilder(buildConfig{name: "linux-amd64"})
-	addBuilder(buildConfig{name: "linux-amd64-nocgo", env: []string{"CGO_ENABLED=0", "USER=root"}})
-	addBuilder(buildConfig{name: "linux-amd64-noopt", env: []string{"GO_GCFLAGS=-N -l"}})
-	addBuilder(buildConfig{name: "linux-amd64-race"})
-	addBuilder(buildConfig{name: "nacl-386"})
-	addBuilder(buildConfig{name: "nacl-amd64p32"})
-	addBuilder(buildConfig{
-		name:    "linux-amd64-gccgo",
-		image:   "gobuilders/linux-x86-gccgo",
-		cmd:     "make RUNTESTFLAGS=\"--target_board=unix/-m64\" check-go -j16",
-		dashURL: "https://build.golang.org/gccgo",
-		tool:    "gccgo",
-	})
-	addBuilder(buildConfig{
-		name:    "linux-386-gccgo",
-		image:   "gobuilders/linux-x86-gccgo",
-		cmd:     "make RUNTESTFLAGS=\"--target_board=unix/-m32\" check-go -j16",
-		dashURL: "https://build.golang.org/gccgo",
-		tool:    "gccgo",
-	})
-	addBuilder(buildConfig{name: "linux-386-sid", image: "gobuilders/linux-x86-sid"})
-	addBuilder(buildConfig{name: "linux-amd64-sid", image: "gobuilders/linux-x86-sid"})
-	addBuilder(buildConfig{name: "linux-386-clang", image: "gobuilders/linux-x86-clang"})
-	addBuilder(buildConfig{name: "linux-amd64-clang", image: "gobuilders/linux-x86-clang"})
-
-	// VMs:
-	addBuilder(buildConfig{
-		name:        "openbsd-amd64-gce56",
-		vmImage:     "openbsd-amd64-56",
-		machineType: "n1-highcpu-2",
-	})
-	addBuilder(buildConfig{
-		// It's named "partial" because the buildlet sets
-		// GOTESTONLY=std to stop after the "go test std"
-		// tests because it's so slow otherwise.
-		// TODO(braditz): move that env variable to the
-		// coordinator and into this config.
-		name:    "plan9-386-gcepartial",
-		vmImage: "plan9-386",
-		// We *were* using n1-standard-1 because Plan 9 can only
-		// reliably use a single CPU. Using 2 or 4 and we see
-		// test failures. See:
-		//    https://golang.org/issue/8393
-		//    https://golang.org/issue/9491
-		// n1-standard-1 has 3.6 GB of memory which is
-		// overkill (userspace probably only sees 2GB anyway),
-		// but it's the cheapest option. And plenty to keep
-		// our ~250 MB of inputs+outputs in its ramfs.
-		//
-		// But the docs says "For the n1 series of machine
-		// types, a virtual CPU is implemented as a single
-		// hyperthread on a 2.6GHz Intel Sandy Bridge Xeon or
-		// Intel Ivy Bridge Xeon (or newer) processor. This
-		// means that the n1-standard-2 machine type will see
-		// a whole physical core."
-		//
-		// ... so we use n1-highcpu-2 (1.80 RAM, still
-		// plenty), just so we can get 1 whole core for the
-		// single-core Plan 9. It will see 2 virtual cores and
-		// only use 1, but we hope that 1 will be more powerful
-		// and we'll stop timing out on tests.
-		machineType: "n1-highcpu-2",
-	})
-
-	addWatcher(watchConfig{repo: "https://go.googlesource.com/go", dash: "https://build.golang.org/"})
-	// TODO(adg,cmang): fix gccgo watcher
-	// addWatcher(watchConfig{repo: "https://code.google.com/p/gofrontend", dash: "https://build.golang.org/gccgo/"})
-
-	if (*just != "") != (*rev != "") {
-		log.Fatalf("--just and --rev must be used together")
-	}
-	if *just != "" {
-		conf, ok := builders[*just]
-		if !ok {
-			log.Fatalf("unknown builder %q", *just)
-		}
-		cmd := exec.Command("docker", append([]string{"run"}, conf.dockerRunArgs(*rev)...)...)
-		cmd.Stdout = os.Stdout
-		cmd.Stderr = os.Stderr
-		if err := cmd.Run(); err != nil {
-			log.Fatalf("Build failed: %v", err)
-		}
-		return
-	}
-
-	http.HandleFunc("/", handleStatus)
-	http.HandleFunc("/logs", handleLogs)
-	go http.ListenAndServe(":80", nil)
-
-	go cleanUpOldContainers()
-	go cleanUpOldVMs()
-
-	stopWatchers() // clean up before we start new ones
-	for _, watcher := range watchers {
-		if err := startWatching(watchers[watcher.repo]); err != nil {
-			log.Printf("Error starting watcher for %s: %v", watcher.repo, err)
-		}
-	}
-
-	workc := make(chan builderRev)
-	go findWorkLoop(workc)
-	// TODO(cmang): gccgo will need its own findWorkLoop
-
-	ticker := time.NewTicker(1 * time.Minute)
-	for {
-		select {
-		case work := <-workc:
-			log.Printf("workc received %+v; len(status) = %v, maxLocalBuilds = %v; cur = %p", work, len(status), *maxLocalBuilds, status[work])
-			if mayBuildRev(work) {
-				conf := builders[work.name]
-				if st, err := startBuilding(conf, work.rev); err == nil {
-					setStatus(work, st)
-					go pingDashboard(st)
-				} else {
-					log.Printf("Error starting to build %v: %v", work, err)
-				}
-			}
-		case done := <-donec:
-			log.Printf("%v done", done)
-			markDone(done)
-		case <-ticker.C:
-			if numCurrentBuilds() == 0 && time.Now().After(startTime.Add(10*time.Minute)) {
-				// TODO: halt the whole machine to kill the VM or something
-			}
-		}
-	}
-}
-
-func numCurrentBuilds() int {
-	statusMu.Lock()
-	defer statusMu.Unlock()
-	return len(status)
-}
-
-func isBuilding(work builderRev) bool {
-	statusMu.Lock()
-	defer statusMu.Unlock()
-	_, building := status[work]
-	return building
-}
-
-// mayBuildRev reports whether the build type & revision should be started.
-// It returns true if it's not already building, and there is capacity.
-func mayBuildRev(work builderRev) bool {
-	conf := builders[work.name]
-
-	statusMu.Lock()
-	_, building := status[work]
-	statusMu.Unlock()
-
-	if building {
-		return false
-	}
-	if conf.usesVM() {
-		// These don't count towards *maxLocalBuilds.
-		return true
-	}
-	numDocker, err := numDockerBuilds()
-	if err != nil {
-		log.Printf("not starting %v due to docker ps failure: %v", work, err)
-		return false
-	}
-	return numDocker < *maxLocalBuilds
-}
-
-func setStatus(work builderRev, st *buildStatus) {
-	statusMu.Lock()
-	defer statusMu.Unlock()
-	status[work] = st
-}
-
-func markDone(work builderRev) {
-	statusMu.Lock()
-	defer statusMu.Unlock()
-	st, ok := status[work]
-	if !ok {
-		return
-	}
-	delete(status, work)
-	if len(statusDone) == maxStatusDone {
-		copy(statusDone, statusDone[1:])
-		statusDone = statusDone[:len(statusDone)-1]
-	}
-	statusDone = append(statusDone, st)
-}
-
-func vmIsBuilding(instName string) bool {
-	if instName == "" {
-		log.Printf("bogus empty instance name passed to vmIsBuilding")
-		return false
-	}
-	statusMu.Lock()
-	defer statusMu.Unlock()
-	for _, st := range status {
-		if st.instName == instName {
-			return true
-		}
-	}
-	return false
-}
-
-// statusPtrStr disambiguates which status to return if there are
-// multiple in the history (e.g. recent failures where the build
-// didn't finish for reasons outside of all.bash failing)
-func getStatus(work builderRev, statusPtrStr string) *buildStatus {
-	statusMu.Lock()
-	defer statusMu.Unlock()
-	match := func(st *buildStatus) bool {
-		return statusPtrStr == "" || fmt.Sprintf("%p", st) == statusPtrStr
-	}
-	if st, ok := status[work]; ok && match(st) {
-		return st
-	}
-	for _, st := range statusDone {
-		if st.builderRev == work && match(st) {
-			return st
-		}
-	}
-	return nil
-}
-
-type byAge []*buildStatus
-
-func (s byAge) Len() int           { return len(s) }
-func (s byAge) Less(i, j int) bool { return s[i].start.Before(s[j].start) }
-func (s byAge) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-func handleStatus(w http.ResponseWriter, r *http.Request) {
-	var active []*buildStatus
-	var recent []*buildStatus
-	statusMu.Lock()
-	for _, st := range status {
-		active = append(active, st)
-	}
-	recent = append(recent, statusDone...)
-	numTotal := len(status)
-	numDocker, err := numDockerBuilds()
-	statusMu.Unlock()
-
-	sort.Sort(byAge(active))
-	sort.Sort(sort.Reverse(byAge(recent)))
-
-	io.WriteString(w, "

Go build coordinator

") - - if err != nil { - fmt.Fprintf(w, "

Error

Error fetching Docker build count: %s\n", html.EscapeString(err.Error())) - } - - fmt.Fprintf(w, "

running

%d total builds active (Docker: %d/%d; VMs: %d/∞):", - numTotal, numDocker, *maxLocalBuilds, numTotal-numDocker) - - io.WriteString(w, "

")
-	for _, st := range active {
-		io.WriteString(w, st.htmlStatusLine())
-	}
-	io.WriteString(w, "
") - - io.WriteString(w, "

recently completed

")
-	for _, st := range recent {
-		io.WriteString(w, st.htmlStatusLine())
-	}
-	io.WriteString(w, "
") - - fmt.Fprintf(w, "

disk space

%s
", html.EscapeString(diskFree())) -} - -func diskFree() string { - out, _ := exec.Command("df", "-h").Output() - return string(out) -} - -func handleLogs(w http.ResponseWriter, r *http.Request) { - st := getStatus(builderRev{r.FormValue("name"), r.FormValue("rev")}, r.FormValue("st")) - if st == nil { - http.NotFound(w, r) - return - } - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - writeStatusHeader(w, st) - - io.WriteString(w, st.logs()) - // TODO: if st is still building, stream them to the user with - // http.Flusher.Flush and CloseNotifier and registering interest - // of new writes with the buildStatus. Will require moving the - // BUILDERKEY scrubbing into the Write method. -} - -func writeStatusHeader(w http.ResponseWriter, st *buildStatus) { - st.mu.Lock() - defer st.mu.Unlock() - fmt.Fprintf(w, " builder: %s\n", st.name) - fmt.Fprintf(w, " rev: %s\n", st.rev) - if st.container != "" { - fmt.Fprintf(w, "container: %s\n", st.container) - } - if st.instName != "" { - fmt.Fprintf(w, " vm name: %s\n", st.instName) - } - fmt.Fprintf(w, " started: %v\n", st.start) - done := !st.done.IsZero() - if done { - fmt.Fprintf(w, " started: %v\n", st.done) - fmt.Fprintf(w, " success: %v\n", st.succeeded) - } else { - fmt.Fprintf(w, " status: still running\n") - } - if len(st.events) > 0 { - io.WriteString(w, "\nEvents:\n") - st.writeEventsLocked(w, false) - } - io.WriteString(w, "\nBuild log:\n") -} - -// findWorkLoop polls http://build.golang.org/?mode=json looking for new work -// for the main dashboard. It does not support gccgo. -// TODO(bradfitz): it also currently does not support subrepos. -func findWorkLoop(work chan<- builderRev) { - ticker := time.NewTicker(15 * time.Second) - for { - if err := findWork(work); err != nil { - log.Printf("failed to find new work: %v", err) - } - <-ticker.C - } -} - -func findWork(work chan<- builderRev) error { - var bs types.BuildStatus - res, err := http.Get("https://build.golang.org/?mode=json") - if err != nil { - return err - } - defer res.Body.Close() - if err := json.NewDecoder(res.Body).Decode(&bs); err != nil { - return err - } - if res.StatusCode != 200 { - return fmt.Errorf("unexpected http status %v", res.Status) - } - - knownToDashboard := map[string]bool{} // keys are builder - for _, b := range bs.Builders { - knownToDashboard[b] = true - } - - var goRevisions []string - for _, br := range bs.Revisions { - if br.Repo == "go" { - goRevisions = append(goRevisions, br.Revision) - } else { - // TODO(bradfitz): support these: golang.org/issue/9506 - continue - } - if len(br.Results) != len(bs.Builders) { - return errors.New("bogus JSON response from dashboard: results is too long.") - } - for i, res := range br.Results { - if res != "" { - // It's either "ok" or a failure URL. - continue - } - builder := bs.Builders[i] - if _, ok := builders[builder]; !ok { - // Not managed by the coordinator. - continue - } - br := builderRev{bs.Builders[i], br.Revision} - if !isBuilding(br) { - work <- br - } - } - } - - // And to bootstrap new builders, see if we have any builders - // that the dashboard doesn't know about. - for b := range builders { - if knownToDashboard[b] { - continue - } - for _, rev := range goRevisions { - br := builderRev{b, rev} - if !isBuilding(br) { - work <- br - } - } - } - return nil -} - -// builderRev is a build configuration type and a revision. -type builderRev struct { - name string // e.g. "linux-amd64-race" - rev string // lowercase hex git hash -} - -// returns the part after "docker run" -func (conf buildConfig) dockerRunArgs(rev string) (args []string) { - if key := builderKey(conf.name); key != "" { - tmpKey := "/tmp/" + conf.name + ".buildkey" - if _, err := os.Stat(tmpKey); err != nil { - if err := ioutil.WriteFile(tmpKey, []byte(key), 0600); err != nil { - log.Fatal(err) - } - } - // Images may look for .gobuildkey in / or /root, so provide both. - // TODO(adg): fix images that look in the wrong place. - args = append(args, "-v", tmpKey+":/.gobuildkey") - args = append(args, "-v", tmpKey+":/root/.gobuildkey") - } - for _, pair := range conf.env { - args = append(args, "-e", pair) - } - if strings.HasPrefix(conf.name, "linux-amd64") { - args = append(args, "-e", "GOROOT_BOOTSTRAP=/go1.4-amd64/go") - } else if strings.HasPrefix(conf.name, "linux-386") { - args = append(args, "-e", "GOROOT_BOOTSTRAP=/go1.4-386/go") - } - args = append(args, - conf.image, - "/usr/local/bin/builder", - "-rev="+rev, - "-dashboard="+conf.dashURL, - "-tool="+conf.tool, - "-buildroot=/", - "-v", - ) - if conf.cmd != "" { - args = append(args, "-cmd", conf.cmd) - } - args = append(args, conf.name) - return -} - -func addBuilder(c buildConfig) { - if c.tool == "gccgo" { - // TODO(cmang,bradfitz,adg): fix gccgo - return - } - if c.name == "" { - panic("empty name") - } - if *addTemp { - c.name += "-temp" - } - if _, dup := builders[c.name]; dup { - panic("dup name") - } - if c.dashURL == "" { - c.dashURL = "https://build.golang.org" - } - if c.tool == "" { - c.tool = "go" - } - - if strings.HasPrefix(c.name, "nacl-") { - if c.image == "" { - c.image = "gobuilders/linux-x86-nacl" - } - if c.cmd == "" { - c.cmd = "/usr/local/bin/build-command.pl" - } - } - if strings.HasPrefix(c.name, "linux-") && c.image == "" { - c.image = "gobuilders/linux-x86-base" - } - if c.image == "" && c.vmImage == "" { - panic("empty image and vmImage") - } - if c.image != "" && c.vmImage != "" { - panic("can't specify both image and vmImage") - } - builders[c.name] = c -} - -// returns the part after "docker run" -func (conf watchConfig) dockerRunArgs() (args []string) { - log.Printf("Running watcher with master key %q", masterKey()) - if key := masterKey(); len(key) > 0 { - tmpKey := "/tmp/watcher.buildkey" - if _, err := os.Stat(tmpKey); err != nil { - if err := ioutil.WriteFile(tmpKey, key, 0600); err != nil { - log.Fatal(err) - } - } - // Images may look for .gobuildkey in / or /root, so provide both. - // TODO(adg): fix images that look in the wrong place. - args = append(args, "-v", tmpKey+":/.gobuildkey") - args = append(args, "-v", tmpKey+":/root/.gobuildkey") - } - args = append(args, - "go-commit-watcher", - "/usr/local/bin/watcher", - "-repo="+conf.repo, - "-dash="+conf.dash, - "-poll="+conf.interval.String(), - ) - return -} - -func addWatcher(c watchConfig) { - if c.repo == "" { - c.repo = "https://go.googlesource.com/go" - } - if c.dash == "" { - c.dash = "https://build.golang.org/" - } - if c.interval == 0 { - c.interval = 10 * time.Second - } - watchers[c.repo] = c -} - -func condUpdateImage(img string) error { - ii := images[img] - if ii == nil { - return fmt.Errorf("image %q doesn't exist", img) - } - ii.mu.Lock() - defer ii.mu.Unlock() - res, err := http.Head(ii.url) - if err != nil { - return fmt.Errorf("Error checking %s: %v", ii.url, err) - } - if res.StatusCode != 200 { - return fmt.Errorf("Error checking %s: %v", ii.url, res.Status) - } - if res.Header.Get("Last-Modified") == ii.lastMod { - return nil - } - - res, err = http.Get(ii.url) - if err != nil || res.StatusCode != 200 { - return fmt.Errorf("Get after Head failed for %s: %v, %v", ii.url, err, res) - } - defer res.Body.Close() - - log.Printf("Running: docker load of %s\n", ii.url) - cmd := exec.Command("docker", "load") - cmd.Stdin = res.Body - - var out bytes.Buffer - cmd.Stdout = &out - cmd.Stderr = &out - - if cmd.Run(); err != nil { - log.Printf("Failed to pull latest %s from %s and pipe into docker load: %v, %s", img, ii.url, err, out.Bytes()) - return err - } - ii.lastMod = res.Header.Get("Last-Modified") - return nil -} - -// numDockerBuilds finds the number of go builder instances currently running. -func numDockerBuilds() (n int, err error) { - out, err := exec.Command("docker", "ps").Output() - if err != nil { - return 0, err - } - for _, line := range strings.Split(string(out), "\n") { - if strings.Contains(line, "gobuilders/") { - n++ - } - } - return n, nil -} - -func startBuilding(conf buildConfig, rev string) (*buildStatus, error) { - if conf.usesVM() { - return startBuildingInVM(conf, rev) - } else { - return startBuildingInDocker(conf, rev) - } -} - -func startBuildingInDocker(conf buildConfig, rev string) (*buildStatus, error) { - if err := condUpdateImage(conf.image); err != nil { - log.Printf("Failed to setup container for %v %v: %v", conf.name, rev, err) - return nil, err - } - - cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs(rev)...)...) - all, err := cmd.CombinedOutput() - log.Printf("Docker run for %v %v = err:%v, output:%s", conf.name, rev, err, all) - if err != nil { - return nil, err - } - container := strings.TrimSpace(string(all)) - brev := builderRev{ - name: conf.name, - rev: rev, - } - st := &buildStatus{ - builderRev: brev, - container: container, - start: time.Now(), - } - log.Printf("%v now building in Docker container %v", brev, st.container) - go func() { - all, err := exec.Command("docker", "wait", container).CombinedOutput() - output := strings.TrimSpace(string(all)) - var ok bool - if err == nil { - exit, err := strconv.Atoi(output) - ok = (err == nil && exit == 0) - } - st.setDone(ok) - log.Printf("docker wait %s/%s: %v, %s", container, rev, err, output) - donec <- builderRev{conf.name, rev} - exec.Command("docker", "rm", container).Run() - }() - go func() { - cmd := exec.Command("docker", "logs", "-f", container) - cmd.Stdout = st - cmd.Stderr = st - if err := cmd.Run(); err != nil { - // The docker logs subcommand always returns - // success, even if the underlying process - // fails. - log.Printf("failed to follow docker logs of %s: %v", container, err) - } - }() - return st, nil -} - -var osArchRx = regexp.MustCompile(`^(\w+-\w+)`) - -func randHex(n int) string { - buf := make([]byte, n/2) - _, err := rand.Read(buf) - if err != nil { - panic("Failed to get randomness: " + err.Error()) - } - return fmt.Sprintf("%x", buf) -} - -// startBuildingInVM starts a VM on GCE running the buildlet binary to build rev. -func startBuildingInVM(conf buildConfig, rev string) (*buildStatus, error) { - brev := builderRev{ - name: conf.name, - rev: rev, - } - st := &buildStatus{ - builderRev: brev, - start: time.Now(), - } - - // name is the project-wide unique name of the GCE instance. It can't be longer - // than 61 bytes, so we only use the first 8 bytes of the rev. - name := "buildlet-" + conf.name + "-" + rev[:8] + "-rn" + randHex(6) - - // buildletURL is the URL of the buildlet binary which the VMs - // are configured to download at boot and run. This lets us - // update the buildlet more easily than rebuilding the whole - // VM image. We put this URL in a well-known GCE metadata attribute. - // The value will be of the form: - // http://storage.googleapis.com/go-builder-data/buildlet.GOOS-GOARCH - m := osArchRx.FindStringSubmatch(conf.name) - if m == nil { - return nil, fmt.Errorf("invalid builder name %q", conf.name) - } - buildletURL := "http://storage.googleapis.com/go-builder-data/buildlet." + m[1] - - prefix := "https://www.googleapis.com/compute/v1/projects/" + projectID - machType := prefix + "/zones/" + projectZone + "/machineTypes/" + conf.MachineType() - - instance := &compute.Instance{ - Name: name, - Description: fmt.Sprintf("Go Builder building %s %s", conf.name, rev), - MachineType: machType, - Disks: []*compute.AttachedDisk{ - { - AutoDelete: true, - Boot: true, - Type: "PERSISTENT", - InitializeParams: &compute.AttachedDiskInitializeParams{ - DiskName: name, - SourceImage: "https://www.googleapis.com/compute/v1/projects/" + projectID + "/global/images/" + conf.vmImage, - DiskType: "https://www.googleapis.com/compute/v1/projects/" + projectID + "/zones/" + projectZone + "/diskTypes/pd-ssd", - }, - }, - }, - Tags: &compute.Tags{ - // Warning: do NOT list "http-server" or "allow-ssh" (our - // project's custom tag to allow ssh access) here; the - // buildlet provides full remote code execution. - Items: []string{}, - }, - Metadata: &compute.Metadata{ - Items: []*compute.MetadataItems{ - { - Key: "buildlet-binary-url", - Value: buildletURL, - }, - // In case the VM gets away from us (generally: if the - // coordinator dies while a build is running), then we - // set this attribute of when it should be killed so - // we can kill it later when the coordinator is - // restarted. The cleanUpOldVMs goroutine loop handles - // that killing. - { - Key: "delete-at", - Value: fmt.Sprint(time.Now().Add(vmDeleteTimeout).Unix()), - }, - }, - }, - NetworkInterfaces: []*compute.NetworkInterface{ - &compute.NetworkInterface{ - AccessConfigs: []*compute.AccessConfig{ - &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - Name: "External NAT", - }, - }, - Network: prefix + "/global/networks/default", - }, - }, - } - op, err := computeService.Instances.Insert(projectID, projectZone, instance).Do() - if err != nil { - return nil, fmt.Errorf("Failed to create instance: %v", err) - } - st.createOp = op.Name - st.instName = name - log.Printf("%v now building in VM %v", brev, st.instName) - // Start the goroutine to monitor the VM now that it's booting. This might - // take minutes for it to come up, and then even more time to do the build. - go func() { - err := watchVM(st) - if st.hasEvent("instance_created") { - deleteVM(projectZone, st.instName) - } - st.setDone(err == nil) - if err != nil { - fmt.Fprintf(st, "\n\nError: %v\n", err) - } - donec <- builderRev{conf.name, rev} - }() - return st, nil -} - -// watchVM monitors a VM doing a build. -func watchVM(st *buildStatus) (retErr error) { - goodRes := func(res *http.Response, err error, what string) bool { - if err != nil { - retErr = fmt.Errorf("%s: %v", what, err) - return false - } - if res.StatusCode/100 != 2 { - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 4<<10)) - retErr = fmt.Errorf("%s: %v; body: %s", what, res.Status, slurp) - res.Body.Close() - return false - - } - return true - } - st.logEventTime("instance_create_requested") - // Wait for instance create operation to succeed. -OpLoop: - for { - time.Sleep(2 * time.Second) - op, err := computeService.ZoneOperations.Get(projectID, projectZone, st.createOp).Do() - if err != nil { - return fmt.Errorf("Failed to get op %s: %v", st.createOp, err) - } - switch op.Status { - case "PENDING", "RUNNING": - continue - case "DONE": - if op.Error != nil { - for _, operr := range op.Error.Errors { - return fmt.Errorf("Error creating instance: %+v", operr) - } - return errors.New("Failed to start.") - } - break OpLoop - default: - log.Fatalf("Unknown status %q: %+v", op.Status, op) - } - } - st.logEventTime("instance_created") - - inst, err := computeService.Instances.Get(projectID, projectZone, st.instName).Do() - if err != nil { - return fmt.Errorf("Error getting instance %s details after creation: %v", st.instName, err) - } - st.logEventTime("got_instance_info") - - // Find its internal IP. - var ip string - for _, iface := range inst.NetworkInterfaces { - if strings.HasPrefix(iface.NetworkIP, "10.") { - ip = iface.NetworkIP - } - } - if ip == "" { - return errors.New("didn't find its internal IP address") - } - - // Wait for it to boot and its buildlet to come up on port 80. - st.logEventTime("waiting_for_buildlet") - buildletURL := "http://" + ip - const numTries = 60 - var alive bool - impatientClient := &http.Client{Timeout: 2 * time.Second} - for i := 1; i <= numTries; i++ { - res, err := impatientClient.Get(buildletURL) - if err != nil { - time.Sleep(1 * time.Second) - continue - } - res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("buildlet returned HTTP status code %d on try number %d", res.StatusCode, i) - } - st.logEventTime("buildlet_up") - alive = true - break - } - if !alive { - return fmt.Errorf("buildlet didn't come up in %d seconds", numTries) - } - - // Write the VERSION file. - st.logEventTime("start_write_version_tar") - verReq, err := http.NewRequest("PUT", buildletURL+"/writetgz", versionTgz(st.rev)) - if err != nil { - return err - } - verRes, err := http.DefaultClient.Do(verReq) - if !goodRes(verRes, err, "writing VERSION tgz") { - return - } - - // Feed the buildlet a tar file for it to extract. - // TODO: cache these. - st.logEventTime("start_fetch_gerrit_tgz") - tarRes, err := http.Get("https://go.googlesource.com/go/+archive/" + st.rev + ".tar.gz") - if !goodRes(tarRes, err, "fetching tarball from Gerrit") { - return - } - - st.logEventTime("start_write_tar") - putReq, err := http.NewRequest("PUT", buildletURL+"/writetgz", tarRes.Body) - if err != nil { - tarRes.Body.Close() - return err - } - putRes, err := http.DefaultClient.Do(putReq) - st.logEventTime("end_write_tar") - tarRes.Body.Close() - if !goodRes(putRes, err, "writing tarball to buildlet") { - return - } - - // Run the builder - cmd := "all.bash" - if strings.HasPrefix(st.name, "windows-") { - cmd = "all.bat" - } else if strings.HasPrefix(st.name, "plan9-") { - cmd = "all.rc" - } - execStartTime := time.Now() - st.logEventTime("start_exec") - res, err := http.PostForm(buildletURL+"/exec", url.Values{"cmd": {"src/" + cmd}}) - if !goodRes(res, err, "running "+cmd) { - return - } - defer res.Body.Close() - st.logEventTime("running_exec") - // Stream the output: - if _, err := io.Copy(st, res.Body); err != nil { - return fmt.Errorf("error copying response: %v", err) - } - st.logEventTime("done") - - // Don't record to the dashboard unless we heard the trailer from - // the buildlet, otherwise it was probably some unrelated error - // (like the VM being killed, or the buildlet crashing due to - // e.g. https://golang.org/issue/9309, since we require a tip - // build of the buildlet to get Trailers support) - state := res.Trailer.Get("Process-State") - if state == "" { - return errors.New("missing Process-State trailer from HTTP response; buildlet built with old (<= 1.4) Go?") - } - - conf := builders[st.name] - var log string - if state != "ok" { - log = st.logs() - } - if err := conf.recordResult(state == "ok", st.rev, log, time.Since(execStartTime)); err != nil { - return fmt.Errorf("Status was %q but failed to report it to the dashboard: %v", state, err) - } - if state != "ok" { - return fmt.Errorf("%s failed: %v", cmd, state) - } - return nil -} - -type eventAndTime struct { - evt string - t time.Time -} - -// buildStatus is the status of a build. -type buildStatus struct { - // Immutable: - builderRev - start time.Time - container string // container ID for docker, else it's a VM - - // Immutable, used by VM only: - createOp string // Instances.Insert operation name - instName string - - mu sync.Mutex // guards following - done time.Time // finished running - succeeded bool // set when done - output bytes.Buffer // stdout and stderr - events []eventAndTime -} - -func (st *buildStatus) setDone(succeeded bool) { - st.mu.Lock() - defer st.mu.Unlock() - st.succeeded = succeeded - st.done = time.Now() -} - -func (st *buildStatus) logEventTime(event string) { - st.mu.Lock() - defer st.mu.Unlock() - st.events = append(st.events, eventAndTime{event, time.Now()}) -} - -func (st *buildStatus) hasEvent(event string) bool { - st.mu.Lock() - defer st.mu.Unlock() - for _, e := range st.events { - if e.evt == event { - return true - } - } - return false -} - -// htmlStatusLine returns the HTML to show within the
 block on
-// the main page's list of active builds.
-func (st *buildStatus) htmlStatusLine() string {
-	st.mu.Lock()
-	defer st.mu.Unlock()
-
-	urlPrefix := "https://go-review.googlesource.com/#/q/"
-	if strings.Contains(st.name, "gccgo") {
-		urlPrefix = "https://code.google.com/p/gofrontend/source/detail?r="
-	}
-
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "%s rev %s",
-		st.name, urlPrefix, st.rev, st.rev)
-
-	if st.done.IsZero() {
-		buf.WriteString(", running")
-	} else if st.succeeded {
-		buf.WriteString(", succeeded")
-	} else {
-		buf.WriteString(", failed")
-	}
-
-	if st.container != "" {
-		fmt.Fprintf(&buf, " in container %s", st.logsURL(), st.container)
-	} else {
-		fmt.Fprintf(&buf, " in VM %s", st.logsURL(), st.instName)
-	}
-
-	t := st.done
-	if t.IsZero() {
-		t = st.start
-	}
-	fmt.Fprintf(&buf, ", %v ago\n", time.Since(t))
-	st.writeEventsLocked(&buf, true)
-	return buf.String()
-}
-
-func (st *buildStatus) logsURL() string {
-	return fmt.Sprintf("/logs?name=%s&rev=%s&st=%p", st.name, st.rev, st)
-}
-
-// st.mu must be held.
-func (st *buildStatus) writeEventsLocked(w io.Writer, html bool) {
-	for i, evt := range st.events {
-		var elapsed string
-		if i != 0 {
-			elapsed = fmt.Sprintf("+%0.1fs", evt.t.Sub(st.events[i-1].t).Seconds())
-		}
-		msg := evt.evt
-		if msg == "running_exec" && html {
-			msg = fmt.Sprintf("%s", st.logsURL(), msg)
-		}
-		fmt.Fprintf(w, " %7s %v %s\n", elapsed, evt.t.Format(time.RFC3339), msg)
-	}
-}
-
-func (st *buildStatus) logs() string {
-	st.mu.Lock()
-	logs := st.output.String()
-	st.mu.Unlock()
-	key := builderKey(st.name)
-	return strings.Replace(string(logs), key, "BUILDERKEY", -1)
-}
-
-func (st *buildStatus) Write(p []byte) (n int, err error) {
-	st.mu.Lock()
-	defer st.mu.Unlock()
-	const maxBufferSize = 2 << 20 // 2MB of output is way more than we expect.
-	plen := len(p)
-	if st.output.Len()+len(p) > maxBufferSize {
-		p = p[:maxBufferSize-st.output.Len()]
-	}
-	st.output.Write(p) // bytes.Buffer can't fail
-	return plen, nil
-}
-
-// Stop any previous go-commit-watcher Docker tasks, so they don't
-// pile up upon restarts of the coordinator.
-func stopWatchers() {
-	out, err := exec.Command("docker", "ps").Output()
-	if err != nil {
-		return
-	}
-	for _, line := range strings.Split(string(out), "\n") {
-		if !strings.Contains(line, "go-commit-watcher:") {
-			continue
-		}
-		f := strings.Fields(line)
-		exec.Command("docker", "rm", "-f", "-v", f[0]).Run()
-	}
-}
-
-func startWatching(conf watchConfig) (err error) {
-	defer func() {
-		if err != nil {
-			restartWatcherSoon(conf)
-		}
-	}()
-	log.Printf("Starting watcher for %v", conf.repo)
-	if err := condUpdateImage("go-commit-watcher"); err != nil {
-		log.Printf("Failed to setup container for commit watcher: %v", err)
-		return err
-	}
-
-	cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs()...)...)
-	all, err := cmd.CombinedOutput()
-	if err != nil {
-		log.Printf("Docker run for commit watcher = err:%v, output: %s", err, all)
-		return err
-	}
-	container := strings.TrimSpace(string(all))
-	// Start a goroutine to wait for the watcher to die.
-	go func() {
-		exec.Command("docker", "wait", container).Run()
-		exec.Command("docker", "rm", "-v", container).Run()
-		log.Printf("Watcher crashed. Restarting soon.")
-		restartWatcherSoon(conf)
-	}()
-	return nil
-}
-
-func restartWatcherSoon(conf watchConfig) {
-	time.AfterFunc(30*time.Second, func() {
-		startWatching(conf)
-	})
-}
-
-func builderKey(builder string) string {
-	master := masterKey()
-	if len(master) == 0 {
-		return ""
-	}
-	h := hmac.New(md5.New, master)
-	io.WriteString(h, builder)
-	return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-func masterKey() []byte {
-	keyOnce.Do(loadKey)
-	return masterKeyCache
-}
-
-var (
-	keyOnce        sync.Once
-	masterKeyCache []byte
-)
-
-func loadKey() {
-	if *masterKeyFile != "" {
-		b, err := ioutil.ReadFile(*masterKeyFile)
-		if err != nil {
-			log.Fatal(err)
-		}
-		masterKeyCache = bytes.TrimSpace(b)
-		return
-	}
-	req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/builder-master-key", nil)
-	req.Header.Set("Metadata-Flavor", "Google")
-	res, err := http.DefaultClient.Do(req)
-	if err != nil {
-		log.Fatal("No builder master key available")
-	}
-	defer res.Body.Close()
-	if res.StatusCode != 200 {
-		log.Fatalf("No builder-master-key project attribute available.")
-	}
-	slurp, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		log.Fatal(err)
-	}
-	masterKeyCache = bytes.TrimSpace(slurp)
-}
-
-func cleanUpOldContainers() {
-	for {
-		for _, cid := range oldContainers() {
-			log.Printf("Cleaning old container %v", cid)
-			exec.Command("docker", "rm", "-v", cid).Run()
-		}
-		time.Sleep(30 * time.Second)
-	}
-}
-
-func oldContainers() []string {
-	out, _ := exec.Command("docker", "ps", "-a", "--filter=status=exited", "--no-trunc", "-q").Output()
-	return strings.Fields(string(out))
-}
-
-// cleanUpOldVMs loops forever and periodically enumerates virtual
-// machines and deletes those which have expired.
-//
-// A VM is considered expired if it has a "delete-at" metadata
-// attribute having a unix timestamp before the current time.
-//
-// This is the safety mechanism to delete VMs which stray from the
-// normal deleting process. VMs are created to run a single build and
-// should be shut down by a controlling process. Due to various types
-// of failures, they might get stranded. To prevent them from getting
-// stranded and wasting resources forever, we instead set the
-// "delete-at" metadata attribute on them when created to some time
-// that's well beyond their expected lifetime.
-func cleanUpOldVMs() {
-	if computeService == nil {
-		return
-	}
-	for {
-		for _, zone := range strings.Split(*cleanZones, ",") {
-			zone = strings.TrimSpace(zone)
-			if err := cleanZoneVMs(zone); err != nil {
-				log.Printf("Error cleaning VMs in zone %q: %v", zone, err)
-			}
-		}
-		time.Sleep(time.Minute)
-	}
-}
-
-// cleanZoneVMs is part of cleanUpOldVMs, operating on a single zone.
-func cleanZoneVMs(zone string) error {
-	// Fetch the first 500 (default) running instances and clean
-	// thoes. We expect that we'll be running many fewer than
-	// that. Even if we have more, eventually the first 500 will
-	// either end or be cleaned, and then the next call will get a
-	// partially-different 500.
-	// TODO(bradfitz): revist this code if we ever start running
-	// thousands of VMs.
-	list, err := computeService.Instances.List(projectID, zone).Do()
-	if err != nil {
-		return fmt.Errorf("listing instances: %v", err)
-	}
-	for _, inst := range list.Items {
-		if !strings.HasPrefix(inst.Name, "buildlet-") {
-			// We only delete ones we created.
-			continue
-		}
-		if inst.Metadata == nil {
-			// Defensive. Not seen in practice.
-			continue
-		}
-		sawDeleteAt := false
-		for _, it := range inst.Metadata.Items {
-			if it.Key == "delete-at" {
-				sawDeleteAt = true
-				unixDeadline, err := strconv.ParseInt(it.Value, 10, 64)
-				if err != nil {
-					log.Printf("invalid delete-at value %q seen; ignoring", it.Value)
-				}
-				if err == nil && time.Now().Unix() > unixDeadline {
-					log.Printf("Deleting expired VM %q in zone %q ...", inst.Name, zone)
-					deleteVM(zone, inst.Name)
-				}
-			}
-		}
-		if sawDeleteAt && !vmIsBuilding(inst.Name) {
-			log.Printf("Deleting VM %q in zone %q from an earlier coordinator generation ...", inst.Name, zone)
-			deleteVM(zone, inst.Name)
-		}
-	}
-	return nil
-}
-
-func deleteVM(zone, instName string) {
-	op, err := computeService.Instances.Delete(projectID, zone, instName).Do()
-	if err != nil {
-		log.Printf("Failed to delete instance %q in zone %q: %v", instName, zone, err)
-		return
-	}
-	log.Printf("Sent request to delete instance %q in zone %q. Operation ID == %v", instName, zone, op.Id)
-}
-
-func hasComputeScope() bool {
-	if !metadata.OnGCE() {
-		return false
-	}
-	scopes, err := metadata.Scopes("default")
-	if err != nil {
-		log.Printf("failed to query metadata default scopes: %v", err)
-		return false
-	}
-	for _, v := range scopes {
-		if v == compute.DevstorageFull_controlScope {
-			return true
-		}
-	}
-	return false
-}
-
-// dash is copied from the builder binary. It runs the given method and command on the dashboard.
-//
-// TODO(bradfitz,adg): unify this somewhere?
-//
-// If args is non-nil it is encoded as the URL query string.
-// If req is non-nil it is JSON-encoded and passed as the body of the HTTP POST.
-// If resp is non-nil the server's response is decoded into the value pointed
-// to by resp (resp must be a pointer).
-func dash(meth, cmd string, args url.Values, req, resp interface{}) error {
-	const builderVersion = 1 // keep in sync with dashboard/app/build/handler.go
-	argsCopy := url.Values{"version": {fmt.Sprint(builderVersion)}}
-	for k, v := range args {
-		if k == "version" {
-			panic(`dash: reserved args key: "version"`)
-		}
-		argsCopy[k] = v
-	}
-	var r *http.Response
-	var err error
-	cmd = "https://build.golang.org/" + cmd + "?" + argsCopy.Encode()
-	switch meth {
-	case "GET":
-		if req != nil {
-			log.Panicf("%s to %s with req", meth, cmd)
-		}
-		r, err = http.Get(cmd)
-	case "POST":
-		var body io.Reader
-		if req != nil {
-			b, err := json.Marshal(req)
-			if err != nil {
-				return err
-			}
-			body = bytes.NewBuffer(b)
-		}
-		r, err = http.Post(cmd, "text/json", body)
-	default:
-		log.Panicf("%s: invalid method %q", cmd, meth)
-		panic("invalid method: " + meth)
-	}
-	if err != nil {
-		return err
-	}
-	defer r.Body.Close()
-	if r.StatusCode != http.StatusOK {
-		return fmt.Errorf("bad http response: %v", r.Status)
-	}
-	body := new(bytes.Buffer)
-	if _, err := body.ReadFrom(r.Body); err != nil {
-		return err
-	}
-
-	// Read JSON-encoded Response into provided resp
-	// and return an error if present.
-	var result = struct {
-		Response interface{}
-		Error    string
-	}{
-		// Put the provided resp in here as it can be a pointer to
-		// some value we should unmarshal into.
-		Response: resp,
-	}
-	if err = json.Unmarshal(body.Bytes(), &result); err != nil {
-		log.Printf("json unmarshal %#q: %s\n", body.Bytes(), err)
-		return err
-	}
-	if result.Error != "" {
-		return errors.New(result.Error)
-	}
-
-	return nil
-}
-
-func versionTgz(rev string) io.Reader {
-	var buf bytes.Buffer
-	zw := gzip.NewWriter(&buf)
-	tw := tar.NewWriter(zw)
-
-	contents := fmt.Sprintf("devel " + rev)
-	check(tw.WriteHeader(&tar.Header{
-		Name: "VERSION",
-		Mode: 0644,
-		Size: int64(len(contents)),
-	}))
-	_, err := io.WriteString(tw, contents)
-	check(err)
-	check(tw.Close())
-	check(zw.Close())
-	return bytes.NewReader(buf.Bytes())
-}
-
-// check is only for things which should be impossible (not even rare)
-// to fail.
-func check(err error) {
-	if err != nil {
-		panic("previously assumed to never fail: " + err.Error())
-	}
-}
diff --git a/dashboard/env/commit-watcher/Makefile b/dashboard/env/commit-watcher/Makefile
index 7ca67b8..d154c51 100644
--- a/dashboard/env/commit-watcher/Makefile
+++ b/dashboard/env/commit-watcher/Makefile
@@ -6,4 +6,4 @@ docker: Dockerfile
 	docker build -t go-commit-watcher .
 
 docker-commit-watcher.tar.gz: docker
-	docker save go-commit-watcher | gzip | (cd ../../upload && go run upload.go --public go-builder-data/docker-commit-watcher.tar.gz)
+	docker save go-commit-watcher | gzip | (cd ../../cmd/upload && go run upload.go --public go-builder-data/docker-commit-watcher.tar.gz)
diff --git a/dashboard/env/linux-x86-base/Makefile b/dashboard/env/linux-x86-base/Makefile
index 464bac3..981675c 100644
--- a/dashboard/env/linux-x86-base/Makefile
+++ b/dashboard/env/linux-x86-base/Makefile
@@ -6,7 +6,7 @@ docker: Dockerfile
 	docker build -t gobuilders/linux-x86-base .
 
 docker-linux.base.tar.gz: docker
-	docker save gobuilders/linux-x86-base | gzip | (cd ../../upload && go run upload.go --public go-builder-data/docker-linux.base.tar.gz)
+	docker save gobuilders/linux-x86-base | gzip | (cd ../../cmd/upload && go run upload.go --public go-builder-data/docker-linux.base.tar.gz)
 
 check: docker
 	docker run -e GOROOT_BOOTSTRAP=/go1.4-amd64/go gobuilders/linux-x86-base /usr/local/bin/builder -rev=20a10e7ddd1 -buildroot=/ -v -report=false linux-amd64-temp
diff --git a/dashboard/env/linux-x86-clang/Makefile b/dashboard/env/linux-x86-clang/Makefile
index 2286762..2c945bb 100644
--- a/dashboard/env/linux-x86-clang/Makefile
+++ b/dashboard/env/linux-x86-clang/Makefile
@@ -6,7 +6,7 @@ docker: Dockerfile
 	docker build -t gobuilders/linux-x86-clang .
 
 docker-linux.clang.tar.gz: docker
-	docker save gobuilders/linux-x86-clang | gzip | (cd ../../upload && go run upload.go --public go-builder-data/docker-linux.clang.tar.gz)
+	docker save gobuilders/linux-x86-clang | gzip | (cd ../../cmd/upload && go run upload.go --public go-builder-data/docker-linux.clang.tar.gz)
 
 check: docker
 	docker run -e GOROOT_BOOTSTRAP=/go1.4-amd64/go gobuilders/linux-x86-clang /usr/local/bin/builder -rev=20a10e7ddd1b -buildroot=/ -v -report=false linux-amd64-temp
diff --git a/dashboard/env/linux-x86-gccgo/Makefile b/dashboard/env/linux-x86-gccgo/Makefile
index a12f6e1..e114b3d 100644
--- a/dashboard/env/linux-x86-gccgo/Makefile
+++ b/dashboard/env/linux-x86-gccgo/Makefile
@@ -6,7 +6,7 @@ docker: Dockerfile
 	docker build -t gobuilders/linux-x86-gccgo .
 
 docker-linux.gccgo.tar.gz: docker
-	docker save gobuilders/linux-x86-gccgo | gzip | (cd ../../upload && go run upload.go --public go-builder-data/docker-linux.gccgo.tar.gz)
+	docker save gobuilders/linux-x86-gccgo | gzip | (cd ../../cmd/upload && go run upload.go --public go-builder-data/docker-linux.gccgo.tar.gz)
 
 check: docker
 	docker run gobuilders/linux-x86-gccgo /usr/local/bin/builder -tool="gccgo" -rev=b9151e911a54 -v -cmd='make RUNTESTFLAGS="--target_board=unix/-m64" check-go' -report=false linux-amd64-gccgo-temp
diff --git a/dashboard/env/linux-x86-nacl/Makefile b/dashboard/env/linux-x86-nacl/Makefile
index adb0c84..d2f76ed 100644
--- a/dashboard/env/linux-x86-nacl/Makefile
+++ b/dashboard/env/linux-x86-nacl/Makefile
@@ -6,7 +6,7 @@ docker: Dockerfile
 	docker build -t gobuilders/linux-x86-nacl .
 
 upload: docker
-	docker save gobuilders/linux-x86-nacl | gzip | (cd ../../upload && go run upload.go --public go-builder-data/docker-linux.nacl.tar.gz)
+	docker save gobuilders/linux-x86-nacl | gzip | (cd ../../cmd/upload && go run upload.go --public go-builder-data/docker-linux.nacl.tar.gz)
 
 check: docker
 	docker run gobuilders/linux-x86-nacl /usr/local/bin/builder -rev=77e96c9208d0 -buildroot=/ -v -cmd=/usr/local/bin/build-command.pl -report=false nacl-amd64p32
diff --git a/dashboard/env/linux-x86-sid/Makefile b/dashboard/env/linux-x86-sid/Makefile
index eac489c..df7b2cf 100644
--- a/dashboard/env/linux-x86-sid/Makefile
+++ b/dashboard/env/linux-x86-sid/Makefile
@@ -6,7 +6,7 @@ docker: Dockerfile
 	docker build -t gobuilders/linux-x86-sid .
 
 docker-linux.sid.tar.gz: docker
-	docker save gobuilders/linux-x86-sid | gzip | (cd ../../upload && go run upload.go --public go-builder-data/docker-linux.sid.tar.gz)
+	docker save gobuilders/linux-x86-sid | gzip | (cd ../../cmd/upload && go run upload.go --public go-builder-data/docker-linux.sid.tar.gz)
 
 check: docker
 	docker run -e GOROOT_BOOTSTRAP=/go1.4-amd64/go gobuilders/linux-x86-sid /usr/local/bin/builder -rev=20a10e7ddd1b -buildroot=/ -v -report=false linux-amd64-sid
diff --git a/dashboard/retrybuilds/retrybuilds.go b/dashboard/retrybuilds/retrybuilds.go
deleted file mode 100644
index c432df2..0000000
--- a/dashboard/retrybuilds/retrybuilds.go
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The retrybuilds command clears build failures from the build.golang.org dashboard
-// to force them to be rebuilt.
-//
-// Valid usage modes:
-//
-//   retrybuilds -loghash=f45f0eb8
-//   retrybuilds -builder=openbsd-amd64
-//   retrybuilds -builder=openbsd-amd64 -hash=6fecb7
-//   retrybuilds -redo-flaky
-//   retrybuilds -redo-flaky -builder=linux-amd64-clang
-package main
-
-import (
-	"bytes"
-	"crypto/hmac"
-	"crypto/md5"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"net/http"
-	"net/url"
-	"os"
-	"path/filepath"
-	"strings"
-	"sync"
-)
-
-var (
-	masterKeyFile = flag.String("masterkey", filepath.Join(os.Getenv("HOME"), "keys", "gobuilder-master.key"), "path to Go builder master key. If present, the key argument is not necessary")
-	keyFile       = flag.String("key", "", "path to key file")
-	builder       = flag.String("builder", "", "builder to wipe a result for.")
-	hash          = flag.String("hash", "", "Hash to wipe. If empty, all will be wiped.")
-	redoFlaky     = flag.Bool("redo-flaky", false, "Reset all flaky builds. If builder is empty, the master key is required.")
-	builderPrefix = flag.String("builder-prefix", "https://build.golang.org", "builder URL prefix")
-	logHash       = flag.String("loghash", "", "If non-empty, clear the build that failed with this loghash prefix")
-)
-
-type Failure struct {
-	Builder string
-	Hash    string
-	LogURL  string
-}
-
-func main() {
-	flag.Parse()
-	*builderPrefix = strings.TrimSuffix(*builderPrefix, "/")
-	if *logHash != "" {
-		substr := "/log/" + *logHash
-		for _, f := range failures() {
-			if strings.Contains(f.LogURL, substr) {
-				wipe(f.Builder, f.Hash)
-			}
-		}
-		return
-	}
-	if *redoFlaky {
-		fixTheFlakes()
-		return
-	}
-	if *builder == "" {
-		log.Fatalf("Missing -builder, -redo-flaky, or -loghash flag.")
-	}
-	wipe(*builder, fullHash(*hash))
-}
-
-func fixTheFlakes() {
-	gate := make(chan bool, 50)
-	var wg sync.WaitGroup
-	for _, f := range failures() {
-		f := f
-		if *builder != "" && f.Builder != *builder {
-			continue
-		}
-		gate <- true
-		wg.Add(1)
-		go func() {
-			defer wg.Done()
-			defer func() { <-gate }()
-			res, err := http.Get(f.LogURL)
-			if err != nil {
-				log.Fatalf("Error fetching %s: %v", f.LogURL, err)
-			}
-			defer res.Body.Close()
-			failLog, err := ioutil.ReadAll(res.Body)
-			if err != nil {
-				log.Fatalf("Error reading %s: %v", f.LogURL, err)
-			}
-			if isFlaky(string(failLog)) {
-				log.Printf("Restarting flaky %+v", f)
-				wipe(f.Builder, f.Hash)
-			}
-		}()
-	}
-	wg.Wait()
-}
-
-var flakePhrases = []string{
-	"No space left on device",
-	"fatal error: error in backend: IO failure on output stream",
-	"Boffset: unknown state 0",
-	"Bseek: unknown state 0",
-	"error exporting repository: exit status",
-	"remote error: User Is Over Quota",
-	"fatal: remote did not send all necessary objects",
-}
-
-func isFlaky(failLog string) bool {
-	if strings.HasPrefix(failLog, "exit status ") {
-		return true
-	}
-	for _, phrase := range flakePhrases {
-		if strings.Contains(failLog, phrase) {
-			return true
-		}
-	}
-	numLines := strings.Count(failLog, "\n")
-	if numLines < 20 && strings.Contains(failLog, "error: exit status") {
-		return true
-	}
-	// e.g. fatal: destination path 'go.tools.TMP' already exists and is not an empty directory.
-	// To be fixed in golang.org/issue/9407
-	if strings.Contains(failLog, "fatal: destination path '") &&
-		strings.Contains(failLog, "' already exists and is not an empty directory.") {
-		return true
-	}
-	return false
-}
-
-func fullHash(h string) string {
-	if h == "" || len(h) == 40 {
-		return h
-	}
-	for _, f := range failures() {
-		if strings.HasPrefix(f.Hash, h) {
-			return f.Hash
-		}
-	}
-	log.Fatalf("invalid hash %q; failed to finds its full hash. Not a recent failure?", h)
-	panic("unreachable")
-}
-
-// hash may be empty
-func wipe(builder, hash string) {
-	if hash != "" {
-		log.Printf("Clearing %s, hash %s", builder, hash)
-	} else {
-		log.Printf("Clearing all builds for %s", builder)
-	}
-	vals := url.Values{
-		"builder": {builder},
-		"hash":    {hash},
-		"key":     {builderKey(builder)},
-	}
-	res, err := http.PostForm(*builderPrefix+"/clear-results?"+vals.Encode(), nil)
-	if err != nil {
-		log.Fatal(err)
-	}
-	defer res.Body.Close()
-	if res.StatusCode != 200 {
-		log.Fatalf("Error clearing %v hash %q: %v", builder, hash, res.Status)
-	}
-}
-
-func builderKey(builder string) string {
-	if v, ok := builderKeyFromMaster(builder); ok {
-		return v
-	}
-	if *keyFile == "" {
-		log.Fatalf("No --key specified for builder %s", builder)
-	}
-	slurp, err := ioutil.ReadFile(*keyFile)
-	if err != nil {
-		log.Fatalf("Error reading builder key %s: %v", builder, err)
-	}
-	return strings.TrimSpace(string(slurp))
-}
-
-func builderKeyFromMaster(builder string) (key string, ok bool) {
-	if *masterKeyFile == "" {
-		return
-	}
-	slurp, err := ioutil.ReadFile(*masterKeyFile)
-	if err != nil {
-		return
-	}
-	h := hmac.New(md5.New, bytes.TrimSpace(slurp))
-	h.Write([]byte(builder))
-	return fmt.Sprintf("%x", h.Sum(nil)), true
-}
-
-var (
-	failMu    sync.Mutex
-	failCache []Failure
-)
-
-func failures() (ret []Failure) {
-	failMu.Lock()
-	ret = failCache
-	failMu.Unlock()
-	if ret != nil {
-		return
-	}
-	ret = []Failure{} // non-nil
-
-	res, err := http.Get(*builderPrefix + "/?mode=failures")
-	if err != nil {
-		log.Fatal(err)
-	}
-	defer res.Body.Close()
-	slurp, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		log.Fatal(err)
-	}
-	body := string(slurp)
-	for _, line := range strings.Split(body, "\n") {
-		f := strings.Fields(line)
-		if len(f) == 3 {
-			ret = append(ret, Failure{
-				Hash:    f[0],
-				Builder: f[1],
-				LogURL:  f[2],
-			})
-		}
-	}
-
-	failMu.Lock()
-	failCache = ret
-	failMu.Unlock()
-	return ret
-}
diff --git a/dashboard/updater/updater.go b/dashboard/updater/updater.go
deleted file mode 100644
index 81919f6..0000000
--- a/dashboard/updater/updater.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main // import "golang.org/x/tools/dashboard/updater"
-
-import (
-	"bytes"
-	"encoding/json"
-	"encoding/xml"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"os"
-	"os/exec"
-	"strings"
-)
-
-var (
-	builder   = flag.String("builder", "", "builder name")
-	key       = flag.String("key", "", "builder key")
-	gopath    = flag.String("gopath", "", "path to go repo")
-	dashboard = flag.String("dashboard", "build.golang.org", "Go Dashboard Host")
-	batch     = flag.Int("batch", 100, "upload batch size")
-)
-
-// Do not benchmark beyond this commit.
-// There is little sense in benchmarking till first commit,
-// and the benchmark won't build anyway.
-const Go1Commit = "0051c7442fed" // test/bench/shootout: update timing.log to Go 1.
-
-// HgLog represents a single Mercurial revision.
-type HgLog struct {
-	Hash   string
-	Branch string
-	Files  string
-}
-
-func main() {
-	flag.Parse()
-	logs := hgLog()
-	var hashes []string
-	ngo1 := 0
-	for i := range logs {
-		if strings.HasPrefix(logs[i].Hash, Go1Commit) {
-			break
-		}
-		if needsBenchmarking(&logs[i]) {
-			hashes = append(hashes, logs[i].Hash)
-		}
-		ngo1++
-	}
-	fmt.Printf("found %v commits, %v after Go1, %v need benchmarking\n", len(logs), ngo1, len(hashes))
-	for i := 0; i < len(hashes); i += *batch {
-		j := i + *batch
-		if j > len(hashes) {
-			j = len(hashes)
-		}
-		fmt.Printf("sending %v-%v... ", i, j)
-		res := postCommits(hashes[i:j])
-		fmt.Printf("%s\n", res)
-	}
-}
-
-func hgLog() []HgLog {
-	var out bytes.Buffer
-	cmd := exec.Command("hg", "log", "--encoding=utf-8", "--template", xmlLogTemplate)
-	cmd.Dir = *gopath
-	cmd.Stdout = &out
-	cmd.Stderr = os.Stderr
-	err := cmd.Run()
-	if err != nil {
-		fmt.Printf("failed to execute 'hg log': %v\n", err)
-		os.Exit(1)
-	}
-	var top struct{ Log []HgLog }
-	err = xml.Unmarshal([]byte(""+out.String()+""), &top)
-	if err != nil {
-		fmt.Printf("failed to parse log: %v\n", err)
-		os.Exit(1)
-	}
-	return top.Log
-}
-
-func needsBenchmarking(log *HgLog) bool {
-	if log.Branch != "" {
-		return false
-	}
-	for _, f := range strings.Split(log.Files, " ") {
-		if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) &&
-			!strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") {
-			return true
-		}
-	}
-	return false
-}
-
-func postCommits(hashes []string) string {
-	args := url.Values{"builder": {*builder}, "key": {*key}}
-	cmd := fmt.Sprintf("http://%v/updatebenchmark?%v", *dashboard, args.Encode())
-	b, err := json.Marshal(hashes)
-	if err != nil {
-		return fmt.Sprintf("failed to encode request: %v\n", err)
-	}
-	r, err := http.Post(cmd, "text/json", bytes.NewReader(b))
-	if err != nil {
-		return fmt.Sprintf("failed to send http request: %v\n", err)
-	}
-	defer r.Body.Close()
-	if r.StatusCode != http.StatusOK {
-		return fmt.Sprintf("http request failed: %v\n", r.Status)
-	}
-	resp, err := ioutil.ReadAll(r.Body)
-	if err != nil {
-		return fmt.Sprintf("failed to read http response: %v\n", err)
-	}
-	return string(resp)
-}
-
-const xmlLogTemplate = `
-        
-        {node|escape}
-        {branches}
-        {files}
-        
-`
diff --git a/dashboard/upload/upload.go b/dashboard/upload/upload.go
deleted file mode 100644
index 44f5a72..0000000
--- a/dashboard/upload/upload.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build upload
-
-//       ^ this is so we don't break the build of x/tools/...  build due
-//         to missing depenencies on the builders. We don't want full builds
-//         needing to pull in dependencies outside of the x/tools repo.
-
-// The upload command writes a file to Google Cloud Storage. It's used
-// exclusively by the Makefiles in the Go project repos. Think of it
-// as a very light version of gsutil or gcloud, but with some
-// Go-specific configuration knowledge baked in.
-package main
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
-	"net/http"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"golang.org/x/oauth2"
-	"golang.org/x/oauth2/google"
-	"google.golang.org/cloud"
-	"google.golang.org/cloud/storage"
-)
-
-var (
-	public  = flag.Bool("public", false, "object should be world-readable")
-	file    = flag.String("file", "-", "Filename to read object from, or '-' for stdin.")
-	verbose = flag.Bool("verbose", false, "verbose logging")
-)
-
-func main() {
-	flag.Usage = func() {
-		fmt.Fprintf(os.Stderr, "Usage: upload [--public] [--file=...] \n")
-		flag.PrintDefaults()
-	}
-	flag.Parse()
-	if flag.NArg() != 1 {
-		flag.Usage()
-		os.Exit(1)
-	}
-	args := strings.SplitN(flag.Arg(0), "/", 2)
-	if len(args) != 2 {
-		flag.Usage()
-		os.Exit(1)
-	}
-	bucket, object := args[0], args[1]
-
-	proj, ok := bucketProject[bucket]
-	if !ok {
-		log.Fatalf("bucket %q doesn't have an associated project in upload.go")
-	}
-
-	ts, err := tokenSource(bucket)
-	if err != nil {
-		log.Fatalf("Failed to get an OAuth2 token source: %v", err)
-	}
-	httpClient := oauth2.NewClient(oauth2.NoContext, ts)
-
-	ctx := cloud.NewContext(proj, httpClient)
-	w := storage.NewWriter(ctx, bucket, object)
-	// If you don't give the owners access, the web UI seems to
-	// have a bug and doesn't have access to see that it's public, so
-	// won't render the "Shared Publicly" link. So we do that, even
-	// though it's dumb and unnecessary otherwise:
-	w.ACL = append(w.ACL, storage.ACLRule{Entity: storage.ACLEntity("project-owners-" + proj), Role: storage.RoleOwner})
-	if *public {
-		w.ACL = append(w.ACL, storage.ACLRule{Entity: storage.AllUsers, Role: storage.RoleReader})
-	}
-	var content io.Reader
-	if *file == "-" {
-		content = os.Stdin
-	} else {
-		content, err = os.Open(*file)
-		if err != nil {
-			log.Fatal(err)
-		}
-	}
-
-	const maxSlurp = 1 << 20
-	var buf bytes.Buffer
-	n, err := io.CopyN(&buf, content, maxSlurp)
-	if err != nil && err != io.EOF {
-		log.Fatalf("Error reading from stdin: %v, %v", n, err)
-	}
-	w.ContentType = http.DetectContentType(buf.Bytes())
-
-	_, err = io.Copy(w, io.MultiReader(&buf, content))
-	if cerr := w.Close(); cerr != nil && err == nil {
-		err = cerr
-	}
-	if err != nil {
-		log.Fatalf("Write error: %v", err)
-	}
-	if *verbose {
-		log.Printf("Wrote %v", object)
-	}
-	os.Exit(0)
-}
-
-var bucketProject = map[string]string{
-	"go-builder-data":       "symbolic-datum-552",
-	"http2-demo-server-tls": "symbolic-datum-552",
-	"winstrap":              "999119582588",
-	"gobuilder":             "999119582588", // deprecated
-}
-
-func tokenSource(bucket string) (oauth2.TokenSource, error) {
-	proj := bucketProject[bucket]
-	fileName := filepath.Join(os.Getenv("HOME"), "keys", proj+".key.json")
-	jsonConf, err := ioutil.ReadFile(fileName)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return nil, fmt.Errorf("Missing JSON key configuration. Download the Service Account JSON key from https://console.developers.google.com/project/%s/apiui/credential and place it at %s", proj, fileName)
-		}
-		return nil, err
-	}
-	conf, err := google.JWTConfigFromJSON(jsonConf, storage.ScopeReadWrite)
-	if err != nil {
-		return nil, fmt.Errorf("reading JSON config from %s: %v", fileName, err)
-	}
-	return conf.TokenSource(oauth2.NoContext), nil
-}
-- 
cgit v1.2.3