aboutsummaryrefslogtreecommitdiff
path: root/go/tools
diff options
context:
space:
mode:
Diffstat (limited to 'go/tools')
-rw-r--r--go/tools/BUILD.bazel13
-rw-r--r--go/tools/bazel/BUILD.bazel34
-rw-r--r--go/tools/bazel/bazel.go73
-rw-r--r--go/tools/bazel/bazel_test.go267
-rw-r--r--go/tools/bazel/empty.txt0
-rw-r--r--go/tools/bazel/runfiles.go442
-rw-r--r--go/tools/bazel_benchmark/BUILD.bazel14
-rw-r--r--go/tools/bazel_benchmark/BUILD.bazel.in6
-rw-r--r--go/tools/bazel_benchmark/WORKSPACE.in26
-rw-r--r--go/tools/bazel_benchmark/bazel_benchmark.go400
-rwxr-xr-xgo/tools/bazel_benchmark/bazel_benchmark.sh29
-rw-r--r--go/tools/bazel_benchmark/hello.go.in9
-rw-r--r--go/tools/bazel_testing/BUILD.bazel33
-rw-r--r--go/tools/bazel_testing/bazel_testing.go535
-rw-r--r--go/tools/bazel_testing/def.bzl62
-rw-r--r--go/tools/builders/BUILD.bazel173
-rw-r--r--go/tools/builders/ar.go104
-rw-r--r--go/tools/builders/asm.go138
-rw-r--r--go/tools/builders/builder.go64
-rw-r--r--go/tools/builders/cgo2.go397
-rw-r--r--go/tools/builders/compilepkg.go615
-rw-r--r--go/tools/builders/cover.go110
-rw-r--r--go/tools/builders/cover_test.go130
-rw-r--r--go/tools/builders/edit.go95
-rw-r--r--go/tools/builders/embed.go340
-rw-r--r--go/tools/builders/embedcfg.go439
-rw-r--r--go/tools/builders/env.go474
-rw-r--r--go/tools/builders/filter.go168
-rw-r--r--go/tools/builders/filter_buildid.go44
-rw-r--r--go/tools/builders/filter_test.go136
-rw-r--r--go/tools/builders/flags.go135
-rw-r--r--go/tools/builders/generate_nogo_main.go196
-rw-r--r--go/tools/builders/generate_test_main.go416
-rw-r--r--go/tools/builders/go_path.go203
-rw-r--r--go/tools/builders/importcfg.go261
-rw-r--r--go/tools/builders/info.go64
-rw-r--r--go/tools/builders/link.go163
-rw-r--r--go/tools/builders/md5sum.go89
-rw-r--r--go/tools/builders/nogo_main.go654
-rw-r--r--go/tools/builders/nogo_typeparams_go117.go23
-rw-r--r--go/tools/builders/nogo_typeparams_go118.go28
-rw-r--r--go/tools/builders/nolint.go39
-rw-r--r--go/tools/builders/nolint_test.go79
-rw-r--r--go/tools/builders/pack.go388
-rw-r--r--go/tools/builders/path.go7
-rw-r--r--go/tools/builders/path_windows.go25
-rw-r--r--go/tools/builders/protoc.go219
-rw-r--r--go/tools/builders/read.go551
-rw-r--r--go/tools/builders/replicate.go167
-rw-r--r--go/tools/builders/stdlib.go169
-rw-r--r--go/tools/builders/stdliblist.go293
-rw-r--r--go/tools/builders/stdliblist_test.go48
-rw-r--r--go/tools/bzltestutil/BUILD.bazel45
-rw-r--r--go/tools/bzltestutil/init.go60
-rw-r--r--go/tools/bzltestutil/lcov.go178
-rw-r--r--go/tools/bzltestutil/lcov_test.go71
-rw-r--r--go/tools/bzltestutil/test2json.go482
-rw-r--r--go/tools/bzltestutil/testdata/empty.json1
-rw-r--r--go/tools/bzltestutil/testdata/empty.xml3
-rw-r--r--go/tools/bzltestutil/testdata/report.json47
-rw-r--r--go/tools/bzltestutil/testdata/report.xml19
-rw-r--r--go/tools/bzltestutil/wrap.go154
-rw-r--r--go/tools/bzltestutil/wrap_test.go63
-rw-r--r--go/tools/bzltestutil/xml.go181
-rw-r--r--go/tools/bzltestutil/xml_test.go55
-rw-r--r--go/tools/coverdata/BUILD.bazel15
-rw-r--r--go/tools/coverdata/coverdata.go58
-rw-r--r--go/tools/fetch_repo/BUILD.bazel23
-rw-r--r--go/tools/fetch_repo/fetch_repo_test.go96
-rw-r--r--go/tools/fetch_repo/main.go75
-rw-r--r--go/tools/gazelle/README.rst2
-rw-r--r--go/tools/go_bin_runner/BUILD.bazel39
-rw-r--r--go/tools/go_bin_runner/main.go41
-rw-r--r--go/tools/go_bin_runner/process.go20
-rw-r--r--go/tools/gopackagesdriver/BUILD.bazel39
-rw-r--r--go/tools/gopackagesdriver/aspect.bzl169
-rw-r--r--go/tools/gopackagesdriver/bazel.go164
-rw-r--r--go/tools/gopackagesdriver/bazel_json_builder.go250
-rw-r--r--go/tools/gopackagesdriver/build_context.go34
-rw-r--r--go/tools/gopackagesdriver/driver_request.go91
-rw-r--r--go/tools/gopackagesdriver/flatpackage.go159
-rw-r--r--go/tools/gopackagesdriver/json_packages_driver.go59
-rw-r--r--go/tools/gopackagesdriver/main.go126
-rw-r--r--go/tools/gopackagesdriver/packageregistry.go111
-rw-r--r--go/tools/gopackagesdriver/utils.go77
-rw-r--r--go/tools/internal/stdlib_tags/BUILD.bazel14
-rw-r--r--go/tools/internal/stdlib_tags/stdlib_tags.go174
-rw-r--r--go/tools/internal/txtar/BUILD.bazel20
-rw-r--r--go/tools/internal/txtar/archive.go140
-rw-r--r--go/tools/internal/txtar/archive_test.go67
-rw-r--r--go/tools/releaser/BUILD.bazel37
-rw-r--r--go/tools/releaser/boilerplate.go78
-rw-r--r--go/tools/releaser/file.go301
-rw-r--r--go/tools/releaser/git.go52
-rw-r--r--go/tools/releaser/github.go132
-rw-r--r--go/tools/releaser/prepare.go253
-rw-r--r--go/tools/releaser/releaser.go127
-rw-r--r--go/tools/releaser/run.go92
-rw-r--r--go/tools/releaser/upgradedep.go561
-rw-r--r--go/tools/releaser/upgradedep_test.go102
-rw-r--r--go/tools/windows-testrunner/windows-testrunner.go105
101 files changed, 14849 insertions, 0 deletions
diff --git a/go/tools/BUILD.bazel b/go/tools/BUILD.bazel
new file mode 100644
index 00000000..50b87d74
--- /dev/null
+++ b/go/tools/BUILD.bazel
@@ -0,0 +1,13 @@
+filegroup(
+ name = "all_files",
+ testonly = True,
+ srcs = [
+ "//go/tools/bazel:all_files",
+ "//go/tools/bazel_testing:all_files",
+ "//go/tools/builders:all_files",
+ "//go/tools/bzltestutil:all_files",
+ "//go/tools/coverdata:all_files",
+ "//go/tools/go_bin_runner:all_files",
+ ],
+ visibility = ["//visibility:public"],
+)
diff --git a/go/tools/bazel/BUILD.bazel b/go/tools/bazel/BUILD.bazel
new file mode 100644
index 00000000..674da86f
--- /dev/null
+++ b/go/tools/bazel/BUILD.bazel
@@ -0,0 +1,34 @@
+load("//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "bazel",
+ srcs = [
+ "bazel.go",
+ "runfiles.go",
+ ],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/bazel",
+ visibility = ["//visibility:public"],
+)
+
+go_test(
+ name = "bazel_test",
+ size = "small",
+ srcs = ["bazel_test.go"],
+ data = ["empty.txt"],
+ embed = [":bazel"],
+)
+
+# Runfiles functionality in this package is tested by //tests/core/runfiles.
+
+filegroup(
+ name = "all_files",
+ testonly = True,
+ srcs = glob(["**"]),
+ visibility = ["//visibility:public"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":bazel",
+ visibility = ["//visibility:public"],
+)
diff --git a/go/tools/bazel/bazel.go b/go/tools/bazel/bazel.go
new file mode 100644
index 00000000..d90b0298
--- /dev/null
+++ b/go/tools/bazel/bazel.go
@@ -0,0 +1,73 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package bazel provides utilities for interacting with the surrounding Bazel environment.
+package bazel
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+const TEST_SRCDIR = "TEST_SRCDIR"
+const TEST_TMPDIR = "TEST_TMPDIR"
+const TEST_WORKSPACE = "TEST_WORKSPACE"
+
+// NewTmpDir creates a new temporary directory in TestTmpDir().
+func NewTmpDir(prefix string) (string, error) {
+ return ioutil.TempDir(TestTmpDir(), prefix)
+}
+
+// TestTmpDir returns the path the Bazel test temp directory.
+// If TEST_TMPDIR is not defined, it returns the OS default temp dir.
+func TestTmpDir() string {
+ if tmp, ok := os.LookupEnv(TEST_TMPDIR); ok {
+ return tmp
+ }
+ return os.TempDir()
+}
+
+// SpliceDelimitedOSArgs is a utility function that scans the os.Args list for
+// entries delimited by the begin and end delimiters (typically the values
+// "-begin_files" and "-end_files" are used). Entries between these delimiters
+// are spliced out of from os.Args and returned to the caller. If the ordering
+// of -begin_files or -end_files is malformed, error is returned.
+func SpliceDelimitedOSArgs(begin, end string) ([]string, error) {
+ var files []string
+ beginFiles, endFiles := -1, -1
+ for i, arg := range os.Args {
+ if arg == begin {
+ beginFiles = i
+ } else if arg == end {
+ endFiles = i
+ break
+ } else if arg == "--" {
+ break
+ }
+ }
+
+ if beginFiles >= 0 && endFiles < 0 ||
+ beginFiles < 0 && endFiles >= 0 ||
+ beginFiles >= 0 && beginFiles >= endFiles {
+ return nil, fmt.Errorf("error: %s, %s not set together or in order", begin, end)
+ }
+
+ if beginFiles >= 0 {
+ files = os.Args[beginFiles+1 : endFiles]
+ os.Args = append(os.Args[:beginFiles:beginFiles], os.Args[endFiles+1:]...)
+ }
+
+ return files, nil
+}
diff --git a/go/tools/bazel/bazel_test.go b/go/tools/bazel/bazel_test.go
new file mode 100644
index 00000000..52903d9c
--- /dev/null
+++ b/go/tools/bazel/bazel_test.go
@@ -0,0 +1,267 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package bazel
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+// makeAndEnterTempdir creates a temporary directory and chdirs into it.
+func makeAndEnterTempdir() (func(), error) {
+ oldCwd, err := os.Getwd()
+ if err != nil {
+ return nil, fmt.Errorf("cannot get path to current directory: %v", err)
+ }
+
+ tempDir, err := ioutil.TempDir("", "test")
+ if err != nil {
+ return nil, fmt.Errorf("failed to create temporary directory: %v", err)
+ }
+
+ err = os.Chdir(tempDir)
+ if err != nil {
+ os.RemoveAll(tempDir)
+ return nil, fmt.Errorf("cannot enter temporary directory %s: %v", tempDir, err)
+ }
+
+ cleanup := func() {
+ defer os.RemoveAll(tempDir)
+ defer os.Chdir(oldCwd)
+ }
+ return cleanup, nil
+}
+
+// createPaths creates a collection of paths for testing purposes. Paths can end with a /, in
+// which case a directory is created; or they can end with a *, in which case an executable file
+// is created. (This matches the nomenclature of "ls -F".)
+func createPaths(paths []string) error {
+ for _, path := range paths {
+ if strings.HasSuffix(path, "/") {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return fmt.Errorf("failed to create directory %s: %v", path, err)
+ }
+ } else {
+ mode := os.FileMode(0644)
+ if strings.HasSuffix(path, "*") {
+ path = path[0 : len(path)-1]
+ mode |= 0111
+ }
+ if err := ioutil.WriteFile(path, []byte{}, mode); err != nil {
+ return fmt.Errorf("failed to create file %s with mode %v: %v", path, mode, err)
+ }
+ }
+ }
+ return nil
+}
+
+func TestRunfile(t *testing.T) {
+ file := "go/tools/bazel/empty.txt"
+ runfile, err := Runfile(file)
+ if err != nil {
+ t.Errorf("When reading file %s got error %s", file, err)
+ }
+
+ // Check that the file actually exist
+ if _, err := os.Stat(runfile); err != nil {
+ t.Errorf("File found by runfile doesn't exist")
+ }
+}
+
+func TestRunfilesPath(t *testing.T) {
+ path, err := RunfilesPath()
+ if err != nil {
+ t.Errorf("Error finding runfiles path: %s", err)
+ }
+
+ if path == "" {
+ t.Errorf("Runfiles path is empty: %s", path)
+ }
+}
+
+func TestNewTmpDir(t *testing.T) {
+ // prefix := "new/temp/dir"
+ prefix := "demodir"
+ tmpdir, err := NewTmpDir(prefix)
+ if err != nil {
+ t.Errorf("When creating temp dir %s got error %s", prefix, err)
+ }
+
+ // Check that the tempdir actually exist
+ if _, err := os.Stat(tmpdir); err != nil {
+ t.Errorf("New tempdir (%s) not created. Got error %s", tmpdir, err)
+ }
+}
+
+func TestTestTmpDir(t *testing.T) {
+ if TestTmpDir() == "" {
+ t.Errorf("TestTmpDir (TEST_TMPDIR) was left empty")
+ }
+}
+
+func TestTestWorkspace(t *testing.T) {
+ workspace, err := TestWorkspace()
+
+ if workspace == "" {
+ t.Errorf("Workspace is left empty")
+ }
+
+ if err != nil {
+ t.Errorf("Unable to get workspace with error %s", err)
+ }
+}
+
+func TestPythonManifest(t *testing.T) {
+ cleanup, err := makeAndEnterTempdir()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer cleanup()
+
+ err = ioutil.WriteFile("MANIFEST",
+ // all on one line to make sure the whitespace stays exactly as in the source file
+ []byte("__init__.py \n__main__/external/__init__.py \n__main__/external/rules_python/__init__.py \n__main__/external/rules_python/python/__init__.py \n__main__/external/rules_python/python/runfiles/__init__.py \n__main__/external/rules_python/python/runfiles/runfiles.py C:/users/sam/_bazel_sam/pj4cl7d4/external/rules_python/python/runfiles/runfiles.py\n__main__/go_cat_/go_cat.exe C:/users/sam/_bazel_sam/pj4cl7d4/execroot/__main__/bazel-out/x64_windows-opt-exec-2B5CBBC6/bin/go_cat_/go_cat.exe\n__main__/important.txt C:/users/sam/dev/rules_go_runfiles_repro/important.txt\n__main__/parent.exe C:/users/sam/_bazel_sam/pj4cl7d4/execroot/__main__/bazel-out/x64_windows-opt-exec-2B5CBBC6/bin/parent.exe\n__main__/parent.py C:/users/sam/dev/rules_go_runfiles_repro/parent.py\n__main__/parent.zip C:/users/sam/_bazel_sam/pj4cl7d4/execroot/__main__/bazel-out/x64_windows-opt-exec-2B5CBBC6/bin/parent.zip\nrules_python/__init__.py \nrules_python/python/__init__.py \nrules_python/python/runfiles/__init__.py \nrules_python/python/runfiles/runfiles.py C:/users/sam/_bazel_sam/pj4cl7d4/external/rules_python/python/runfiles/runfiles.py"),
+ os.FileMode(0644),
+ )
+ if err != nil {
+ t.Fatalf("Failed to write sample manifest: %v", err)
+ }
+
+ originalEnvVar := os.Getenv(RUNFILES_MANIFEST_FILE)
+ defer func() {
+ if err = os.Setenv(RUNFILES_MANIFEST_FILE, originalEnvVar); err != nil {
+ t.Fatalf("Failed to reset environment: %v", err)
+ }
+ }()
+
+ if err = os.Setenv(RUNFILES_MANIFEST_FILE, "MANIFEST"); err != nil {
+ t.Fatalf("Failed to set manifest file environement variable: %v", err)
+ }
+
+ initRunfiles()
+
+ if runfiles.err != nil {
+ t.Errorf("failed to init runfiles: %v", runfiles.err)
+ }
+
+ entry, ok := runfiles.index.GetIgnoringWorkspace("important.txt")
+ if !ok {
+ t.Errorf("failed to locate runfile %s in index", "important.txt")
+ }
+
+ if entry.Workspace != "__main__" {
+ t.Errorf("incorrect workspace for runfile. Expected: %s, actual %s", "__main__", entry.Workspace)
+ }
+}
+
+func TestSpliceDelimitedOSArgs(t *testing.T) {
+ testData := map[string]struct {
+ initial []string
+ want []string
+ final []string
+ wantErr error
+ }{
+ "no args": {
+ []string{},
+ []string{},
+ []string{},
+ nil,
+ },
+ "empty splice": {
+ []string{"-begin_files", "-end_files"},
+ []string{},
+ []string{},
+ nil,
+ },
+ "removes inner args": {
+ []string{"-begin_files", "a", "-end_files"},
+ []string{"a"},
+ []string{},
+ nil,
+ },
+ "preserves outer args": {
+ []string{"a", "-begin_files", "b", "c", "-end_files", "d"},
+ []string{"b", "c"},
+ []string{"a", "d"},
+ nil,
+ },
+ "complains about missing end delimiter": {
+ []string{"-begin_files"},
+ []string{},
+ []string{},
+ errors.New("error: -begin_files, -end_files not set together or in order"),
+ },
+ "complains about missing begin delimiter": {
+ []string{"-end_files"},
+ []string{},
+ []string{},
+ errors.New("error: -begin_files, -end_files not set together or in order"),
+ },
+ "complains about out-of-order delimiter": {
+ []string{"-end_files", "-begin_files"},
+ []string{},
+ []string{},
+ errors.New("error: -begin_files, -end_files not set together or in order"),
+ },
+ "-- at middle": {
+ []string{"-begin_files", "a", "b", "--", "-end_files"},
+ []string{},
+ []string{},
+ errors.New("error: -begin_files, -end_files not set together or in order"),
+ },
+ "-- at beginning": {
+ []string{"--", "-begin_files", "a", "-end_files"},
+ []string{},
+ []string{"--", "-begin_files", "a", "-end_files"},
+ nil,
+ },
+ }
+ for name, tc := range testData {
+ t.Run(name, func(t *testing.T) {
+ os.Args = tc.initial
+ got, err := SpliceDelimitedOSArgs("-begin_files", "-end_files")
+ if err != nil {
+ if tc.wantErr == nil {
+ t.Fatalf("unexpected err: %v", err)
+ }
+ if tc.wantErr.Error() != err.Error() {
+ t.Fatalf("err: want %v, got %v", tc.wantErr, err)
+ }
+ return
+ }
+ if len(tc.want) != len(got) {
+ t.Fatalf("len(want: %d, got %d", len(tc.want), len(got))
+ }
+ for i, actual := range got {
+ expected := tc.want[i]
+ if expected != actual {
+ t.Errorf("%d: want %v, got %v", i, expected, actual)
+ }
+ }
+ if len(tc.final) != len(os.Args) {
+ t.Fatalf("len(want: %d, os.Args %d", len(tc.final), len(os.Args))
+ }
+ for i, actual := range os.Args {
+ expected := tc.final[i]
+ if expected != actual {
+ t.Errorf("%d: want %v, os.Args %v", i, expected, actual)
+ }
+ }
+ })
+ }
+}
diff --git a/go/tools/bazel/empty.txt b/go/tools/bazel/empty.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/go/tools/bazel/empty.txt
diff --git a/go/tools/bazel/runfiles.go b/go/tools/bazel/runfiles.go
new file mode 100644
index 00000000..a2ac11d8
--- /dev/null
+++ b/go/tools/bazel/runfiles.go
@@ -0,0 +1,442 @@
+// Copyright 2018 The Bazel Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bazel
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+)
+
+const (
+ RUNFILES_MANIFEST_FILE = "RUNFILES_MANIFEST_FILE"
+ RUNFILES_DIR = "RUNFILES_DIR"
+)
+
+// Runfile returns an absolute path to the file named by "path", which
+// should be a relative path from the workspace root to the file within
+// the bazel workspace.
+//
+// Runfile may be called from tests invoked with 'bazel test' and
+// binaries invoked with 'bazel run'. On Windows,
+// only tests invoked with 'bazel test' are supported.
+//
+// Deprecated: Use github.com/bazelbuild/rules_go/go/runfiles instead for
+// cross-platform support matching the behavior of the Bazel-provided runfiles
+// libraries.
+func Runfile(path string) (string, error) {
+ // Search in working directory
+ if _, err := os.Stat(path); err == nil {
+ return filepath.Abs(path)
+ }
+
+ if err := ensureRunfiles(); err != nil {
+ return "", err
+ }
+
+ // Search manifest if we have one.
+ if entry, ok := runfiles.index.GetIgnoringWorkspace(path); ok {
+ return entry.Path, nil
+ }
+
+ if strings.HasPrefix(path, "../") || strings.HasPrefix(path, "external/") {
+ pathParts := strings.Split(path, "/")
+ if len(pathParts) >= 3 {
+ workspace := pathParts[1]
+ pathInsideWorkspace := strings.Join(pathParts[2:], "/")
+ if path := runfiles.index.Get(workspace, pathInsideWorkspace); path != "" {
+ return path, nil
+ }
+ }
+ }
+
+ // Search the main workspace.
+ if runfiles.workspace != "" {
+ mainPath := filepath.Join(runfiles.dir, runfiles.workspace, path)
+ if _, err := os.Stat(mainPath); err == nil {
+ return mainPath, nil
+ }
+ }
+
+ // Search other workspaces.
+ for _, w := range runfiles.workspaces {
+ workPath := filepath.Join(runfiles.dir, w, path)
+ if _, err := os.Stat(workPath); err == nil {
+ return workPath, nil
+ }
+ }
+
+ return "", fmt.Errorf("Runfile %s: could not locate file", path)
+}
+
+// FindBinary returns an absolute path to the binary built from a go_binary
+// rule in the given package with the given name. FindBinary is similar to
+// Runfile, but it accounts for varying configurations and file extensions,
+// which may cause the binary to have different paths on different platforms.
+//
+// FindBinary may be called from tests invoked with 'bazel test' and
+// binaries invoked with 'bazel run'. On Windows,
+// only tests invoked with 'bazel test' are supported.
+func FindBinary(pkg, name string) (string, bool) {
+ if err := ensureRunfiles(); err != nil {
+ return "", false
+ }
+
+ // If we've gathered a list of runfiles, either by calling ListRunfiles or
+ // parsing the manifest on Windows, just use that instead of searching
+ // directories. Return the first match. The manifest on Windows may contain
+ // multiple entries for the same file.
+ if runfiles.list != nil {
+ if runtime.GOOS == "windows" {
+ name += ".exe"
+ }
+ for _, entry := range runfiles.list {
+ if path.Base(entry.ShortPath) != name {
+ continue
+ }
+ pkgDir := path.Dir(path.Dir(entry.ShortPath))
+ if pkgDir == "." {
+ pkgDir = ""
+ }
+ if pkgDir != pkg {
+ continue
+ }
+ return entry.Path, true
+ }
+ return "", false
+ }
+
+ dir, err := Runfile(pkg)
+ if err != nil {
+ return "", false
+ }
+ var found string
+ stopErr := errors.New("stop")
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ base := filepath.Base(path)
+ stem := strings.TrimSuffix(base, ".exe")
+ if stem != name {
+ return nil
+ }
+ if runtime.GOOS != "windows" {
+ if st, err := os.Stat(path); err != nil {
+ return err
+ } else if st.Mode()&0111 == 0 {
+ return nil
+ }
+ }
+ if stem == name {
+ found = path
+ return stopErr
+ }
+ return nil
+ })
+ if err == stopErr {
+ return found, true
+ } else {
+ return "", false
+ }
+}
+
+// A RunfileEntry describes a runfile.
+type RunfileEntry struct {
+ // Workspace is the bazel workspace the file came from. For example,
+ // this would be "io_bazel_rules_go" for a file in rules_go.
+ Workspace string
+
+ // ShortPath is a relative, slash-separated path from the workspace root
+ // to the file. For non-binary files, this may be passed to Runfile
+ // to locate a file.
+ ShortPath string
+
+ // Path is an absolute path to the file.
+ Path string
+}
+
+// ListRunfiles returns a list of available runfiles.
+func ListRunfiles() ([]RunfileEntry, error) {
+ if err := ensureRunfiles(); err != nil {
+ return nil, err
+ }
+
+ if runfiles.list == nil && runfiles.dir != "" {
+ runfiles.listOnce.Do(func() {
+ var list []RunfileEntry
+ haveWorkspaces := strings.HasSuffix(runfiles.dir, ".runfiles") && runfiles.workspace != ""
+
+ err := filepath.Walk(runfiles.dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ rel, _ := filepath.Rel(runfiles.dir, path)
+ rel = filepath.ToSlash(rel)
+ if rel == "." {
+ return nil
+ }
+
+ var workspace, shortPath string
+ if haveWorkspaces {
+ if i := strings.IndexByte(rel, '/'); i < 0 {
+ return nil
+ } else {
+ workspace, shortPath = rel[:i], rel[i+1:]
+ }
+ } else {
+ workspace, shortPath = "", rel
+ }
+
+ list = append(list, RunfileEntry{Workspace: workspace, ShortPath: shortPath, Path: path})
+ return nil
+ })
+ if err != nil {
+ runfiles.err = err
+ return
+ }
+ runfiles.list = list
+ })
+ }
+ return runfiles.list, runfiles.err
+}
+
+// TestWorkspace returns the name of the Bazel workspace for this test.
+// TestWorkspace returns an error if the TEST_WORKSPACE environment variable
+// was not set or SetDefaultTestWorkspace was not called.
+func TestWorkspace() (string, error) {
+ if err := ensureRunfiles(); err != nil {
+ return "", err
+ }
+ if runfiles.workspace != "" {
+ return runfiles.workspace, nil
+ }
+ return "", errors.New("TEST_WORKSPACE not set and SetDefaultTestWorkspace not called")
+}
+
+// SetDefaultTestWorkspace allows you to set a fake value for the
+// environment variable TEST_WORKSPACE if it is not defined. This is useful
+// when running tests on the command line and not through Bazel.
+func SetDefaultTestWorkspace(w string) {
+ ensureRunfiles()
+ runfiles.workspace = w
+}
+
+// RunfilesPath return the path to the runfiles tree.
+// It will return an error if there is no runfiles tree, for example because
+// the executable is run on Windows or was not invoked with 'bazel test'
+// or 'bazel run'.
+func RunfilesPath() (string, error) {
+ if err := ensureRunfiles(); err != nil {
+ return "", err
+ }
+ if runfiles.dir == "" {
+ if runtime.GOOS == "windows" {
+ return "", errors.New("RunfilesPath: no runfiles directory on windows")
+ } else {
+ return "", errors.New("could not locate runfiles directory")
+ }
+ }
+ if runfiles.workspace == "" {
+ return "", errors.New("could not locate runfiles workspace")
+ }
+ return filepath.Join(runfiles.dir, runfiles.workspace), nil
+}
+
+var runfiles = struct {
+ once, listOnce sync.Once
+
+ // list is a list of known runfiles, either loaded from the manifest
+ // or discovered by walking the runfile directory.
+ list []RunfileEntry
+
+ // index maps runfile short paths to absolute paths.
+ index index
+
+ // dir is a path to the runfile directory. Typically this is a directory
+ // named <target>.runfiles, with a subdirectory for each workspace.
+ dir string
+
+ // workspace is workspace where the binary or test was built.
+ workspace string
+
+ // workspaces is a list of other workspace names.
+ workspaces []string
+
+ // err is set when there is an error loading runfiles, for example,
+ // parsing the manifest.
+ err error
+}{}
+
+type index struct {
+ indexWithWorkspace map[indexKey]*RunfileEntry
+ indexIgnoringWorksapce map[string]*RunfileEntry
+}
+
+func newIndex() index {
+ return index{
+ indexWithWorkspace: make(map[indexKey]*RunfileEntry),
+ indexIgnoringWorksapce: make(map[string]*RunfileEntry),
+ }
+}
+
+func (i *index) Put(entry *RunfileEntry) {
+ i.indexWithWorkspace[indexKey{
+ workspace: entry.Workspace,
+ shortPath: entry.ShortPath,
+ }] = entry
+ i.indexIgnoringWorksapce[entry.ShortPath] = entry
+}
+
+func (i *index) Get(workspace string, shortPath string) string {
+ entry := i.indexWithWorkspace[indexKey{
+ workspace: workspace,
+ shortPath: shortPath,
+ }]
+ if entry == nil {
+ return ""
+ }
+ return entry.Path
+}
+
+func (i *index) GetIgnoringWorkspace(shortPath string) (*RunfileEntry, bool) {
+ entry, ok := i.indexIgnoringWorksapce[shortPath]
+ return entry, ok
+}
+
+type indexKey struct {
+ workspace string
+ shortPath string
+}
+
+func ensureRunfiles() error {
+ runfiles.once.Do(initRunfiles)
+ return runfiles.err
+}
+
+func initRunfiles() {
+ manifest := os.Getenv("RUNFILES_MANIFEST_FILE")
+ if manifest != "" {
+ // On Windows, Bazel doesn't create a symlink tree of runfiles because
+ // Windows doesn't support symbolic links by default. Instead, runfile
+ // locations are written to a manifest file.
+ runfiles.index = newIndex()
+ data, err := ioutil.ReadFile(manifest)
+ if err != nil {
+ runfiles.err = err
+ return
+ }
+ lineno := 0
+ for len(data) > 0 {
+ i := bytes.IndexByte(data, '\n')
+ var line []byte
+ if i < 0 {
+ line = data
+ data = nil
+ } else {
+ line = data[:i]
+ data = data[i+1:]
+ }
+ lineno++
+
+ // Only TrimRight newlines. Do not TrimRight() completely, because that would remove spaces too.
+ // This is necessary in order to have at least one space in every manifest line.
+ // Some manifest entries don't have any path after this space, namely the "__init__.py" entries.
+ // original comment sourced from: https://github.com/bazelbuild/bazel/blob/09c621e4cf5b968f4c6cdf905ab142d5961f9ddc/src/test/py/bazel/runfiles_test.py#L225
+ line = bytes.TrimRight(line, "\r\n")
+ if len(line) == 0 {
+ continue
+ }
+
+ spaceIndex := bytes.IndexByte(line, ' ')
+ if spaceIndex < 0 {
+ runfiles.err = fmt.Errorf(
+ "error parsing runfiles manifest: %s:%d: no space: '%s'", manifest, lineno, line)
+ return
+ }
+ shortPath := string(line[0:spaceIndex])
+ abspath := ""
+ if len(line) > spaceIndex+1 {
+ abspath = string(line[spaceIndex+1:])
+ }
+
+ entry := RunfileEntry{ShortPath: shortPath, Path: abspath}
+ if i := strings.IndexByte(entry.ShortPath, '/'); i >= 0 {
+ entry.Workspace = entry.ShortPath[:i]
+ entry.ShortPath = entry.ShortPath[i+1:]
+ }
+ if strings.HasPrefix(entry.ShortPath, "external/") {
+ entry.ShortPath = entry.ShortPath[len("external/"):]
+ if i := strings.IndexByte(entry.ShortPath, '/'); i >= 0 {
+ entry.Workspace = entry.ShortPath[:i]
+ entry.ShortPath = entry.ShortPath[i+1:]
+ }
+ }
+
+ runfiles.list = append(runfiles.list, entry)
+ runfiles.index.Put(&entry)
+ }
+ }
+
+ runfiles.workspace = os.Getenv("TEST_WORKSPACE")
+
+ if dir := os.Getenv("RUNFILES_DIR"); dir != "" {
+ runfiles.dir = dir
+ } else if dir = os.Getenv("TEST_SRCDIR"); dir != "" {
+ runfiles.dir = dir
+ } else if runtime.GOOS != "windows" {
+ dir, err := os.Getwd()
+ if err != nil {
+ runfiles.err = fmt.Errorf("error locating runfiles dir: %v", err)
+ return
+ }
+
+ parent := filepath.Dir(dir)
+ if strings.HasSuffix(parent, ".runfiles") {
+ runfiles.dir = parent
+ if runfiles.workspace == "" {
+ runfiles.workspace = filepath.Base(dir)
+ }
+ } else {
+ runfiles.err = errors.New("could not locate runfiles directory")
+ return
+ }
+ }
+
+ if runfiles.dir != "" {
+ fis, err := ioutil.ReadDir(runfiles.dir)
+ if err != nil {
+ runfiles.err = fmt.Errorf("could not open runfiles directory: %v", err)
+ return
+ }
+ for _, fi := range fis {
+ if fi.IsDir() {
+ runfiles.workspaces = append(runfiles.workspaces, fi.Name())
+ }
+ }
+ sort.Strings(runfiles.workspaces)
+ }
+}
diff --git a/go/tools/bazel_benchmark/BUILD.bazel b/go/tools/bazel_benchmark/BUILD.bazel
new file mode 100644
index 00000000..8e1f9ec2
--- /dev/null
+++ b/go/tools/bazel_benchmark/BUILD.bazel
@@ -0,0 +1,14 @@
+load("//go:def.bzl", "go_binary", "go_library")
+
+go_binary(
+ name = "bazel_benchmark",
+ embed = [":bazel_benchmark_lib"],
+ visibility = ["//visibility:public"],
+)
+
+go_library(
+ name = "bazel_benchmark_lib",
+ srcs = ["bazel_benchmark.go"],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/bazel_benchmark",
+ visibility = ["//visibility:private"],
+)
diff --git a/go/tools/bazel_benchmark/BUILD.bazel.in b/go/tools/bazel_benchmark/BUILD.bazel.in
new file mode 100644
index 00000000..6e95da13
--- /dev/null
+++ b/go/tools/bazel_benchmark/BUILD.bazel.in
@@ -0,0 +1,6 @@
+load("//go:def.bzl", "go_binary")
+
+go_binary(
+ name = "hello",
+ srcs = ["hello.go"],
+)
diff --git a/go/tools/bazel_benchmark/WORKSPACE.in b/go/tools/bazel_benchmark/WORKSPACE.in
new file mode 100644
index 00000000..62941aef
--- /dev/null
+++ b/go/tools/bazel_benchmark/WORKSPACE.in
@@ -0,0 +1,26 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+local_repository(
+ name = "io_bazel_rules_go",
+ path = "{{.RulesGoDir}}",
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/0.13.0/bazel-gazelle-0.13.0.tar.gz"],
+ sha256 = "bc653d3e058964a5a26dcad02b6c72d7d63e6bb88d94704990b908a1445b8758",
+)
+
+load("@io_bazel_rules_go//go:def.bzl", "go_register_toolchains", "go_rules_dependencies")
+
+go_rules_dependencies()
+
+go_register_toolchains(go_version = "host")
+
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
+
+gazelle_dependencies()
+
+load("@io_bazel_rules_go//tests/integration/popular_repos:popular_repos.bzl", "popular_repos")
+
+popular_repos()
diff --git a/go/tools/bazel_benchmark/bazel_benchmark.go b/go/tools/bazel_benchmark/bazel_benchmark.go
new file mode 100644
index 00000000..30a32ab8
--- /dev/null
+++ b/go/tools/bazel_benchmark/bazel_benchmark.go
@@ -0,0 +1,400 @@
+// Copyright 2018 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "encoding/csv"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "text/template"
+ "time"
+)
+
+var programName = filepath.Base(os.Args[0])
+
+type substitutions struct {
+ RulesGoDir string
+}
+
+type serverState int
+
+const (
+ asleep serverState = iota
+ awake
+)
+
+type cleanState int
+
+const (
+ clean cleanState = iota
+ incr
+)
+
+type benchmark struct {
+ desc string
+ serverState serverState
+ cleanState cleanState
+ incrFile string
+ targets []string
+ result time.Duration
+}
+
+var benchmarks = []benchmark{
+ {
+ desc: "hello_asleep_clean",
+ serverState: asleep,
+ cleanState: clean,
+ targets: []string{"//:hello"},
+ }, {
+ desc: "hello_awake_clean",
+ serverState: awake,
+ cleanState: clean,
+ targets: []string{"//:hello"},
+ }, {
+ desc: "hello_asleep_incr",
+ serverState: asleep,
+ cleanState: incr,
+ incrFile: "hello.go",
+ targets: []string{"//:hello"},
+ }, {
+ desc: "hello_awake_incr",
+ serverState: awake,
+ cleanState: incr,
+ incrFile: "hello.go",
+ targets: []string{"//:hello"},
+ }, {
+ desc: "popular_repos_awake_clean",
+ serverState: awake,
+ cleanState: clean,
+ targets: []string{"@io_bazel_rules_go//tests/integration/popular_repos:all"},
+ },
+ // TODO: more substantial Kubernetes targets
+}
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix(programName + ": ")
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func run(args []string) error {
+ fs := flag.NewFlagSet(programName, flag.ExitOnError)
+ var rulesGoDir, outPath string
+ fs.StringVar(&rulesGoDir, "rules_go_dir", "", "directory where rules_go is checked out")
+ fs.StringVar(&outPath, "out", "", "csv file to append results to")
+ var keep bool
+ fs.BoolVar(&keep, "keep", false, "if true, the workspace directory won't be deleted at the end")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+ if rulesGoDir == "" {
+ return errors.New("-rules_go_dir not set")
+ }
+ if abs, err := filepath.Abs(rulesGoDir); err != nil {
+ return err
+ } else {
+ rulesGoDir = abs
+ }
+ if outPath == "" {
+ return errors.New("-out not set")
+ }
+ if abs, err := filepath.Abs(outPath); err != nil {
+ return err
+ } else {
+ outPath = abs
+ }
+
+ commit, err := getCommit(rulesGoDir)
+ if err != nil {
+ return err
+ }
+
+ dir, err := setupWorkspace(rulesGoDir)
+ if err != nil {
+ return err
+ }
+ if !keep {
+ defer cleanupWorkspace(dir)
+ }
+
+ bazelVersion, err := getBazelVersion()
+ if err != nil {
+ return err
+ }
+
+ log.Printf("running benchmarks in %s", dir)
+ targetSet := make(map[string]bool)
+ for _, b := range benchmarks {
+ for _, t := range b.targets {
+ targetSet[t] = true
+ }
+ }
+ allTargets := make([]string, 0, len(targetSet))
+ for t := range targetSet {
+ allTargets = append(allTargets, t)
+ }
+ fetch(allTargets)
+
+ for i := range benchmarks {
+ b := &benchmarks[i]
+ log.Printf("running benchmark %d/%d: %s", i+1, len(benchmarks), b.desc)
+ if err := runBenchmark(b); err != nil {
+ return fmt.Errorf("error running benchmark %s: %v", b.desc, err)
+ }
+ }
+
+ log.Printf("writing results to %s", outPath)
+ return recordResults(outPath, time.Now().UTC(), bazelVersion, commit, benchmarks)
+}
+
+func getCommit(rulesGoDir string) (commit string, err error) {
+ wd, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ if err := os.Chdir(rulesGoDir); err != nil {
+ return "", err
+ }
+ defer func() {
+ if cderr := os.Chdir(wd); cderr != nil {
+ if err != nil {
+ err = cderr
+ }
+ }
+ }()
+ out, err := exec.Command("git", "rev-parse", "HEAD").Output()
+ if err != nil {
+ return "", err
+ }
+ outStr := strings.TrimSpace(string(out))
+ if len(outStr) < 7 {
+ return "", errors.New("git output too short")
+ }
+ return outStr[:7], nil
+}
+
+func setupWorkspace(rulesGoDir string) (workspaceDir string, err error) {
+ workspaceDir, err = ioutil.TempDir("", "bazel_benchmark")
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(workspaceDir)
+ }
+ }()
+ benchmarkDir := filepath.Join(rulesGoDir, "go", "tools", "bazel_benchmark")
+ files, err := ioutil.ReadDir(benchmarkDir)
+ if err != nil {
+ return "", err
+ }
+ substitutions := substitutions{
+ RulesGoDir: filepath.Join(benchmarkDir, "..", "..", ".."),
+ }
+ for _, f := range files {
+ name := f.Name()
+ if filepath.Ext(name) != ".in" {
+ continue
+ }
+ srcPath := filepath.Join(benchmarkDir, name)
+ tpl, err := template.ParseFiles(srcPath)
+ if err != nil {
+ return "", err
+ }
+ dstPath := filepath.Join(workspaceDir, name[:len(name)-len(".in")])
+ out, err := os.Create(dstPath)
+ if err != nil {
+ return "", err
+ }
+ if err := tpl.Execute(out, substitutions); err != nil {
+ out.Close()
+ return "", err
+ }
+ if err := out.Close(); err != nil {
+ return "", err
+ }
+ }
+ if err := os.Chdir(workspaceDir); err != nil {
+ return "", err
+ }
+ return workspaceDir, nil
+}
+
+func cleanupWorkspace(dir string) error {
+ if err := logBazelCommand("clean", "--expunge"); err != nil {
+ return err
+ }
+ return os.RemoveAll(dir)
+}
+
+func getBazelVersion() (string, error) {
+ out, err := exec.Command("bazel", "version").Output()
+ if err != nil {
+ return "", err
+ }
+ prefix := []byte("Build label: ")
+ i := bytes.Index(out, prefix)
+ if i < 0 {
+ return "", errors.New("could not find bazel version in output")
+ }
+ out = out[i+len(prefix):]
+ i = bytes.IndexByte(out, '\n')
+ if i >= 0 {
+ out = out[:i]
+ }
+ return string(out), nil
+}
+
+func fetch(targets []string) error {
+ return logBazelCommand("fetch", targets...)
+}
+
+func runBenchmark(b *benchmark) error {
+ switch b.cleanState {
+ case clean:
+ if err := logBazelCommand("clean"); err != nil {
+ return err
+ }
+ case incr:
+ if err := logBazelCommand("build", b.targets...); err != nil {
+ return err
+ }
+ if b.incrFile == "" {
+ return errors.New("incrFile not set")
+ }
+ data, err := ioutil.ReadFile(b.incrFile)
+ if err != nil {
+ return err
+ }
+ data = bytes.Replace(data, []byte("INCR"), []byte("INCR."), -1)
+ if err := ioutil.WriteFile(b.incrFile, data, 0666); err != nil {
+ return err
+ }
+ }
+ if b.serverState == asleep {
+ if err := logBazelCommand("shutdown"); err != nil {
+ return err
+ }
+ }
+ start := time.Now()
+ if err := logBazelCommand("build", b.targets...); err != nil {
+ return err
+ }
+ b.result = time.Since(start)
+ return nil
+}
+
+func recordResults(outPath string, t time.Time, bazelVersion, commit string, benchmarks []benchmark) (err error) {
+ // TODO(jayconrod): update the header if new columns are added.
+ columnMap, outExists, err := buildColumnMap(outPath, benchmarks)
+ header := buildHeader(columnMap)
+ record := buildRecord(t, bazelVersion, commit, benchmarks, columnMap)
+ defer func() {
+ if err != nil {
+ log.Printf("error writing results: %s: %v", outPath, err)
+ log.Print("data are printed below")
+ log.Print(strings.Join(header, ","))
+ log.Print(strings.Join(record, ","))
+ }
+ }()
+ outFile, err := os.OpenFile(outPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := outFile.Close(); err != nil {
+ err = cerr
+ }
+ }()
+ outCsv := csv.NewWriter(outFile)
+ if !outExists {
+ outCsv.Write(header)
+ }
+ outCsv.Write(record)
+ outCsv.Flush()
+ return outCsv.Error()
+}
+
+func logBazelCommand(command string, args ...string) error {
+ args = append([]string{command}, args...)
+ cmd := exec.Command("bazel", args...)
+ log.Printf("bazel %s\n", strings.Join(args, " "))
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+func buildColumnMap(outPath string, benchmarks []benchmark) (columnMap map[string]int, outExists bool, err error) {
+ columnMap = make(map[string]int)
+ {
+ inFile, oerr := os.Open(outPath)
+ if oerr != nil {
+ goto doneReading
+ }
+ outExists = true
+ defer inFile.Close()
+ inCsv := csv.NewReader(inFile)
+ var header []string
+ header, err = inCsv.Read()
+ if err != nil {
+ goto doneReading
+ }
+ for i, column := range header {
+ columnMap[column] = i
+ }
+ }
+
+doneReading:
+ for _, s := range []string{"time", "bazel_version", "commit"} {
+ if _, ok := columnMap[s]; !ok {
+ columnMap[s] = len(columnMap)
+ }
+ }
+ for _, b := range benchmarks {
+ if _, ok := columnMap[b.desc]; !ok {
+ columnMap[b.desc] = len(columnMap)
+ }
+ }
+ return columnMap, outExists, err
+}
+
+func buildHeader(columnMap map[string]int) []string {
+ header := make([]string, len(columnMap))
+ for name, i := range columnMap {
+ header[i] = name
+ }
+ return header
+}
+
+func buildRecord(t time.Time, bazelVersion, commit string, benchmarks []benchmark, columnMap map[string]int) []string {
+ record := make([]string, len(columnMap))
+ record[columnMap["time"]] = t.Format("2006-01-02 15:04:05")
+ record[columnMap["bazel_version"]] = bazelVersion
+ record[columnMap["commit"]] = commit
+ for _, b := range benchmarks {
+ record[columnMap[b.desc]] = fmt.Sprintf("%.3f", b.result.Seconds())
+ }
+ return record
+}
diff --git a/go/tools/bazel_benchmark/bazel_benchmark.sh b/go/tools/bazel_benchmark/bazel_benchmark.sh
new file mode 100755
index 00000000..8abf0bd8
--- /dev/null
+++ b/go/tools/bazel_benchmark/bazel_benchmark.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+# Copyright 2018 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+rules_go_dir=$(mktemp --directory --tmpdir rules_go.XXXXXX)
+function cleanup {
+ rm -rf "$rules_go_dir"
+}
+trap cleanup EXIT
+
+git clone --depth=1 --single-branch --no-tags \
+ https://github.com/bazelbuild/rules_go "$rules_go_dir"
+cd "$rules_go_dir"
+bazel run //go/tools/bazel_benchmark -- -rules_go_dir "$rules_go_dir" "$@"
+
diff --git a/go/tools/bazel_benchmark/hello.go.in b/go/tools/bazel_benchmark/hello.go.in
new file mode 100644
index 00000000..27d1996d
--- /dev/null
+++ b/go/tools/bazel_benchmark/hello.go.in
@@ -0,0 +1,9 @@
+package main
+
+import "fmt"
+
+const incr = "INCR"
+
+func main() {
+ fmt.Println(incr)
+}
diff --git a/go/tools/bazel_testing/BUILD.bazel b/go/tools/bazel_testing/BUILD.bazel
new file mode 100644
index 00000000..9089774d
--- /dev/null
+++ b/go/tools/bazel_testing/BUILD.bazel
@@ -0,0 +1,33 @@
+load("@bazel_skylib//:bzl_library.bzl", "bzl_library")
+load("//go:def.bzl", "go_library")
+
+go_library(
+ name = "bazel_testing",
+ srcs = ["bazel_testing.go"],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/bazel_testing",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//go/tools/bazel",
+ "//go/tools/internal/txtar",
+ ],
+)
+
+filegroup(
+ name = "all_files",
+ testonly = True,
+ srcs = glob(["**"]),
+ visibility = ["//visibility:public"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":bazel_testing",
+ visibility = ["//visibility:public"],
+)
+
+bzl_library(
+ name = "def",
+ srcs = ["def.bzl"],
+ visibility = ["//visibility:public"],
+ deps = ["//go:def"],
+)
diff --git a/go/tools/bazel_testing/bazel_testing.go b/go/tools/bazel_testing/bazel_testing.go
new file mode 100644
index 00000000..45431405
--- /dev/null
+++ b/go/tools/bazel_testing/bazel_testing.go
@@ -0,0 +1,535 @@
+// Copyright 2019 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package bazel_testing provides an integration testing framework for
+// testing rules_go with Bazel.
+//
+// Tests may be written by declaring a go_bazel_test target instead of
+// a go_test (go_bazel_test is defined in def.bzl here), then calling
+// TestMain. Tests are run in a synthetic test workspace. Tests may run
+// bazel commands with RunBazel.
+package bazel_testing
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "os/signal"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+ "text/template"
+
+ "github.com/bazelbuild/rules_go/go/tools/bazel"
+ "github.com/bazelbuild/rules_go/go/tools/internal/txtar"
+)
+
+const (
+ // Standard Bazel exit codes.
+ // A subset of codes in https://cs.opensource.google/bazel/bazel/+/master:src/main/java/com/google/devtools/build/lib/util/ExitCode.java.
+ SUCCESS = 0
+ BUILD_FAILURE = 1
+ COMMAND_LINE_ERROR = 2
+ TESTS_FAILED = 3
+ NO_TESTS_FOUND = 4
+ RUN_FAILURE = 6
+ ANALYSIS_FAILURE = 7
+ INTERRUPTED = 8
+ LOCK_HELD_NOBLOCK_FOR_LOCK = 9
+)
+
+// Args is a list of arguments to TestMain. It's defined as a struct so
+// that new optional arguments may be added without breaking compatibility.
+type Args struct {
+ // Main is a text archive containing files in the main workspace.
+ // The text archive format is parsed by
+ // //go/tools/internal/txtar:go_default_library, which is copied from
+ // cmd/go/internal/txtar. If this archive does not contain a WORKSPACE file,
+ // a default file will be synthesized.
+ Main string
+
+ // Nogo is the nogo target to pass to go_register_toolchains. By default,
+ // nogo is not used.
+ Nogo string
+
+ // WorkspaceSuffix is a string that should be appended to the end
+ // of the default generated WORKSPACE file.
+ WorkspaceSuffix string
+
+ // SetUp is a function that is executed inside the context of the testing
+ // workspace. It is executed once and only once before the beginning of
+ // all tests. If SetUp returns a non-nil error, execution is halted and
+ // tests cases are not executed.
+ SetUp func() error
+}
+
+// debug may be set to make the test print the test workspace path and stop
+// instead of running tests.
+const debug = false
+
+// outputUserRoot is set to the directory where Bazel should put its internal files.
+// Since Bazel 2.0.0, this needs to be set explicitly to avoid it defaulting to a
+// deeply nested directory within the test, which runs into Windows path length limits.
+// We try to detect the original value in setupWorkspace and set it to that.
+var outputUserRoot string
+
+// TestMain should be called by tests using this framework from a function named
+// "TestMain". For example:
+//
+// func TestMain(m *testing.M) {
+// os.Exit(bazel_testing.TestMain(m, bazel_testing.Args{...}))
+// }
+//
+// TestMain constructs a set of workspaces and changes the working directory to
+// the main workspace.
+func TestMain(m *testing.M, args Args) {
+ // Defer os.Exit with the correct code. This ensures other deferred cleanup
+ // functions are run first.
+ code := 1
+ defer func() {
+ if r := recover(); r != nil {
+ fmt.Fprintf(os.Stderr, "panic: %v\n", r)
+ code = 1
+ }
+ os.Exit(code)
+ }()
+
+ files, err := bazel.SpliceDelimitedOSArgs("-begin_files", "-end_files")
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return
+ }
+
+ flag.Parse()
+
+ workspaceDir, cleanup, err := setupWorkspace(args, files)
+ defer func() {
+ if err := cleanup(); err != nil {
+ fmt.Fprintf(os.Stderr, "cleanup error: %v\n", err)
+ // Don't fail the test on a cleanup error.
+ // Some operating systems (windows, maybe also darwin) can't reliably
+ // delete executable files after they're run.
+ }
+ }()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ return
+ }
+
+ if debug {
+ fmt.Fprintf(os.Stderr, "test setup in %s\n", workspaceDir)
+ interrupted := make(chan os.Signal)
+ signal.Notify(interrupted, os.Interrupt)
+ <-interrupted
+ return
+ }
+
+ if err := os.Chdir(workspaceDir); err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return
+ }
+ defer exec.Command("bazel", "shutdown").Run()
+
+ if args.SetUp != nil {
+ if err := args.SetUp(); err != nil {
+ fmt.Fprintf(os.Stderr, "test provided SetUp method returned error: %v\n", err)
+ return
+ }
+ }
+
+ code = m.Run()
+}
+
+// BazelCmd prepares a bazel command for execution. It chooses the correct
+// bazel binary based on the environment and sanitizes the environment to
+// hide that this code is executing inside a bazel test.
+func BazelCmd(args ...string) *exec.Cmd {
+ cmd := exec.Command("bazel")
+ if outputUserRoot != "" {
+ cmd.Args = append(cmd.Args,
+ "--output_user_root="+outputUserRoot,
+ "--nosystem_rc",
+ "--nohome_rc",
+ )
+ }
+ cmd.Args = append(cmd.Args, args...)
+ for _, e := range os.Environ() {
+ // Filter environment variables set by the bazel test wrapper script.
+ // These confuse recursive invocations of Bazel.
+ if strings.HasPrefix(e, "TEST_") || strings.HasPrefix(e, "RUNFILES_") {
+ continue
+ }
+ cmd.Env = append(cmd.Env, e)
+ }
+ return cmd
+}
+
+// RunBazel invokes a bazel command with a list of arguments.
+//
+// If the command starts but exits with a non-zero status, a *StderrExitError
+// will be returned which wraps the original *exec.ExitError.
+func RunBazel(args ...string) error {
+ cmd := BazelCmd(args...)
+
+ buf := &bytes.Buffer{}
+ cmd.Stderr = buf
+ err := cmd.Run()
+ if eErr, ok := err.(*exec.ExitError); ok {
+ eErr.Stderr = buf.Bytes()
+ err = &StderrExitError{Err: eErr}
+ }
+ return err
+}
+
+// BazelOutput invokes a bazel command with a list of arguments and returns
+// the content of stdout.
+//
+// If the command starts but exits with a non-zero status, a *StderrExitError
+// will be returned which wraps the original *exec.ExitError.
+func BazelOutput(args ...string) ([]byte, error) {
+ cmd := BazelCmd(args...)
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err := cmd.Run()
+ if eErr, ok := err.(*exec.ExitError); ok {
+ eErr.Stderr = stderr.Bytes()
+ err = &StderrExitError{Err: eErr}
+ }
+ return stdout.Bytes(), err
+}
+
+// StderrExitError wraps *exec.ExitError and prints the complete stderr output
+// from a command.
+type StderrExitError struct {
+ Err *exec.ExitError
+}
+
+func (e *StderrExitError) Error() string {
+ sb := &strings.Builder{}
+ sb.Write(e.Err.Stderr)
+ sb.WriteString(e.Err.Error())
+ return sb.String()
+}
+
+func (e *StderrExitError) Unwrap() error {
+ return e.Err
+}
+
+func setupWorkspace(args Args, files []string) (dir string, cleanup func() error, err error) {
+ var cleanups []func() error
+ cleanup = func() error {
+ var firstErr error
+ for i := len(cleanups) - 1; i >= 0; i-- {
+ if err := cleanups[i](); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+ }
+ defer func() {
+ if err != nil {
+ cleanup()
+ cleanup = func() error { return nil }
+ }
+ }()
+
+ // Find a suitable cache directory. We want something persistent where we
+ // can store a bazel output base across test runs, even for multiple tests.
+ var cacheDir, outBaseDir string
+ if tmpDir := os.Getenv("TEST_TMPDIR"); tmpDir != "" {
+ // TEST_TMPDIR is set by Bazel's test wrapper. Bazel itself uses this to
+ // detect that it's run by a test. When invoked like this, Bazel sets
+ // its output base directory to a temporary directory. This wastes a lot
+ // of time (a simple test takes 45s instead of 3s). We use TEST_TMPDIR
+ // to find a persistent location in the execroot. We won't pass TEST_TMPDIR
+ // to bazel in RunBazel.
+ tmpDir = filepath.Clean(tmpDir)
+ if i := strings.Index(tmpDir, string(os.PathSeparator)+"execroot"+string(os.PathSeparator)); i >= 0 {
+ outBaseDir = tmpDir[:i]
+ outputUserRoot = filepath.Dir(outBaseDir)
+ cacheDir = filepath.Join(outBaseDir, "bazel_testing")
+ } else {
+ cacheDir = filepath.Join(tmpDir, "bazel_testing")
+ }
+ } else {
+ // The test is not invoked by Bazel, so just use the user's cache.
+ cacheDir, err = os.UserCacheDir()
+ if err != nil {
+ return "", cleanup, err
+ }
+ cacheDir = filepath.Join(cacheDir, "bazel_testing")
+ }
+
+ // TODO(jayconrod): any other directories needed for caches?
+ execDir := filepath.Join(cacheDir, "bazel_go_test")
+ if err := os.RemoveAll(execDir); err != nil {
+ return "", cleanup, err
+ }
+ cleanups = append(cleanups, func() error { return os.RemoveAll(execDir) })
+
+ // Create the workspace directory.
+ mainDir := filepath.Join(execDir, "main")
+ if err := os.MkdirAll(mainDir, 0777); err != nil {
+ return "", cleanup, err
+ }
+
+ // Create a .bazelrc file if GO_BAZEL_TEST_BAZELFLAGS is set.
+ // The test can override this with its own .bazelrc or with flags in commands.
+ if flags := os.Getenv("GO_BAZEL_TEST_BAZELFLAGS"); flags != "" {
+ bazelrcPath := filepath.Join(mainDir, ".bazelrc")
+ content := "build " + flags
+ if err := ioutil.WriteFile(bazelrcPath, []byte(content), 0666); err != nil {
+ return "", cleanup, err
+ }
+ }
+
+ // Extract test files for the main workspace.
+ if err := extractTxtar(mainDir, args.Main); err != nil {
+ return "", cleanup, fmt.Errorf("building main workspace: %v", err)
+ }
+
+ // If some of the path arguments are missing an explicit workspace,
+ // read the workspace name from WORKSPACE. We need this to map arguments
+ // to runfiles in specific workspaces.
+ haveDefaultWorkspace := false
+ var defaultWorkspaceName string
+ for _, argPath := range files {
+ workspace, _, err := parseLocationArg(argPath)
+ if err == nil && workspace == "" {
+ haveDefaultWorkspace = true
+ cleanPath := path.Clean(argPath)
+ if cleanPath == "WORKSPACE" {
+ defaultWorkspaceName, err = loadWorkspaceName(cleanPath)
+ if err != nil {
+ return "", cleanup, fmt.Errorf("could not load default workspace name: %v", err)
+ }
+ break
+ }
+ }
+ }
+ if haveDefaultWorkspace && defaultWorkspaceName == "" {
+ return "", cleanup, fmt.Errorf("found files from default workspace, but not WORKSPACE")
+ }
+
+ // Index runfiles by workspace and short path. We need this to determine
+ // destination paths when we copy or link files.
+ runfiles, err := bazel.ListRunfiles()
+ if err != nil {
+ return "", cleanup, err
+ }
+
+ type runfileKey struct{ workspace, short string }
+ runfileMap := make(map[runfileKey]string)
+ for _, rf := range runfiles {
+ runfileMap[runfileKey{rf.Workspace, rf.ShortPath}] = rf.Path
+ }
+
+ // Copy or link file arguments from runfiles into fake workspace dirctories.
+ // Keep track of the workspace names we see, since we'll generate a WORKSPACE
+ // with local_repository rules later.
+ workspaceNames := make(map[string]bool)
+ for _, argPath := range files {
+ workspace, shortPath, err := parseLocationArg(argPath)
+ if err != nil {
+ return "", cleanup, err
+ }
+ if workspace == "" {
+ workspace = defaultWorkspaceName
+ }
+ workspaceNames[workspace] = true
+
+ srcPath, ok := runfileMap[runfileKey{workspace, shortPath}]
+ if !ok {
+ return "", cleanup, fmt.Errorf("unknown runfile: %s", argPath)
+ }
+ dstPath := filepath.Join(execDir, workspace, shortPath)
+ if err := copyOrLink(dstPath, srcPath); err != nil {
+ return "", cleanup, err
+ }
+ }
+
+ // If there's no WORKSPACE file, create one.
+ workspacePath := filepath.Join(mainDir, "WORKSPACE")
+ if _, err := os.Stat(workspacePath); os.IsNotExist(err) {
+ w, err := os.Create(workspacePath)
+ if err != nil {
+ return "", cleanup, err
+ }
+ defer func() {
+ if cerr := w.Close(); err == nil && cerr != nil {
+ err = cerr
+ }
+ }()
+ info := workspaceTemplateInfo{
+ Suffix: args.WorkspaceSuffix,
+ Nogo: args.Nogo,
+ }
+ for name := range workspaceNames {
+ info.WorkspaceNames = append(info.WorkspaceNames, name)
+ }
+ sort.Strings(info.WorkspaceNames)
+ if outBaseDir != "" {
+ goSDKPath := filepath.Join(outBaseDir, "external", "go_sdk")
+ rel, err := filepath.Rel(mainDir, goSDKPath)
+ if err != nil {
+ return "", cleanup, fmt.Errorf("could not find relative path from %q to %q for go_sdk", mainDir, goSDKPath)
+ }
+ rel = filepath.ToSlash(rel)
+ info.GoSDKPath = rel
+ }
+ if err := defaultWorkspaceTpl.Execute(w, info); err != nil {
+ return "", cleanup, err
+ }
+ }
+
+ return mainDir, cleanup, nil
+}
+
+func extractTxtar(dir, txt string) error {
+ ar := txtar.Parse([]byte(txt))
+ for _, f := range ar.Files {
+ if parentDir := filepath.Dir(f.Name); parentDir != "." {
+ if err := os.MkdirAll(filepath.Join(dir, parentDir), 0777); err != nil {
+ return err
+ }
+ }
+ if err := ioutil.WriteFile(filepath.Join(dir, f.Name), f.Data, 0666); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func parseLocationArg(arg string) (workspace, shortPath string, err error) {
+ cleanPath := path.Clean(arg)
+ if !strings.HasPrefix(cleanPath, "external/") {
+ return "", cleanPath, nil
+ }
+ i := strings.IndexByte(arg[len("external/"):], '/')
+ if i < 0 {
+ return "", "", fmt.Errorf("unexpected file (missing / after external/): %s", arg)
+ }
+ i += len("external/")
+ workspace = cleanPath[len("external/"):i]
+ shortPath = cleanPath[i+1:]
+ return workspace, shortPath, nil
+}
+
+func loadWorkspaceName(workspacePath string) (string, error) {
+ runfilePath, err := bazel.Runfile(workspacePath)
+ if err == nil {
+ workspacePath = runfilePath
+ }
+ workspaceData, err := ioutil.ReadFile(workspacePath)
+ if err != nil {
+ return "", err
+ }
+ nameRe := regexp.MustCompile(`(?m)^workspace\(\s*name\s*=\s*("[^"]*"|'[^']*')\s*,?\s*\)\s*$`)
+ match := nameRe.FindSubmatchIndex(workspaceData)
+ if match == nil {
+ return "", fmt.Errorf("%s: workspace name not set", workspacePath)
+ }
+ name := string(workspaceData[match[2]+1 : match[3]-1])
+ if name == "" {
+ return "", fmt.Errorf("%s: workspace name is empty", workspacePath)
+ }
+ return name, nil
+}
+
+type workspaceTemplateInfo struct {
+ WorkspaceNames []string
+ GoSDKPath string
+ Nogo string
+ Suffix string
+}
+
+var defaultWorkspaceTpl = template.Must(template.New("").Parse(`
+{{range .WorkspaceNames}}
+local_repository(
+ name = "{{.}}",
+ path = "../{{.}}",
+)
+{{end}}
+
+{{if not .GoSDKPath}}
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+
+go_rules_dependencies()
+
+go_register_toolchains(go_version = "host")
+{{else}}
+local_repository(
+ name = "local_go_sdk",
+ path = "{{.GoSDKPath}}",
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains", "go_wrap_sdk")
+
+go_rules_dependencies()
+
+go_wrap_sdk(
+ name = "go_sdk",
+ root_file = "@local_go_sdk//:ROOT",
+)
+
+go_register_toolchains({{if .Nogo}}nogo = "{{.Nogo}}"{{end}})
+{{end}}
+{{.Suffix}}
+`))
+
+func copyOrLink(dstPath, srcPath string) error {
+ if err := os.MkdirAll(filepath.Dir(dstPath), 0777); err != nil {
+ return err
+ }
+
+ copy := func(dstPath, srcPath string) (err error) {
+ src, err := os.Open(srcPath)
+ if err != nil {
+ return err
+ }
+ defer src.Close()
+
+ dst, err := os.Create(dstPath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := dst.Close(); err == nil && cerr != nil {
+ err = cerr
+ }
+ }()
+
+ _, err = io.Copy(dst, src)
+ return err
+ }
+
+ if runtime.GOOS == "windows" {
+ return copy(dstPath, srcPath)
+ }
+ absSrcPath, err := filepath.Abs(srcPath)
+ if err != nil {
+ return err
+ }
+ return os.Symlink(absSrcPath, dstPath)
+}
diff --git a/go/tools/bazel_testing/def.bzl b/go/tools/bazel_testing/def.bzl
new file mode 100644
index 00000000..d097027b
--- /dev/null
+++ b/go/tools/bazel_testing/def.bzl
@@ -0,0 +1,62 @@
+# Copyright 2019 The Bazel Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("//go:def.bzl", "go_test")
+
+def go_bazel_test(rule_files = None, **kwargs):
+ """go_bazel_test is a wrapper for go_test that simplifies the use of
+ //go/tools/bazel_testing. Tests may be written
+ that don't explicitly depend on bazel_testing or rules_go files.
+ """
+
+ if not rule_files:
+ rule_files = [Label("//:all_files")]
+
+ # Add dependency on bazel_testing library.
+ kwargs.setdefault("deps", [])
+
+ bazel_testing_library = "@io_bazel_rules_go//go/tools/bazel_testing"
+ if bazel_testing_library not in kwargs["deps"]:
+ kwargs["deps"].append(bazel_testing_library)
+
+ # Add data dependency on rules_go files. bazel_testing will copy or link
+ # these files in an external repo.
+ kwargs.setdefault("data", [])
+ kwargs["data"] += rule_files
+
+ # Add paths to rules_go files to arguments. bazel_testing will copy or link
+ # these files.
+ kwargs.setdefault("args", [])
+ kwargs["args"] = (["-begin_files"] +
+ ["$(locations {})".format(t) for t in rule_files] +
+ ["-end_files"] +
+ kwargs["args"])
+
+ # Set rundir to the workspace root directory to ensure relative paths
+ # are interpreted correctly.
+ kwargs.setdefault("rundir", ".")
+
+ # Set tags.
+ # local: don't run in sandbox or on remote executor.
+ # exclusive: run one test at a time, since they share a Bazel
+ # output directory. If we don't do this, tests must extract the bazel
+ # installation and start with a fresh cache every time, making them
+ # much slower.
+ kwargs.setdefault("tags", [])
+ if "local" not in kwargs["tags"]:
+ kwargs["tags"].append("local")
+ if "exclusive" not in kwargs["tags"]:
+ kwargs["tags"].append("exclusive")
+
+ go_test(**kwargs)
diff --git a/go/tools/builders/BUILD.bazel b/go/tools/builders/BUILD.bazel
new file mode 100644
index 00000000..28724714
--- /dev/null
+++ b/go/tools/builders/BUILD.bazel
@@ -0,0 +1,173 @@
+load("//go:def.bzl", "go_binary", "go_source", "go_test")
+load("//go/private/rules:transition.bzl", "go_reset_target")
+
+go_test(
+ name = "filter_test",
+ size = "small",
+ srcs = [
+ "filter.go",
+ "filter_test.go",
+ "read.go",
+ ],
+)
+
+go_test(
+ name = "cover_test",
+ size = "small",
+ srcs = [
+ "cover.go",
+ "cover_test.go",
+ "edit.go",
+ "env.go",
+ "flags.go",
+ ],
+)
+
+go_test(
+ name = "stdliblist_test",
+ size = "small",
+ srcs = [
+ "env.go",
+ "flags.go",
+ "replicate.go",
+ "stdliblist.go",
+ "stdliblist_test.go",
+ ],
+ data = ["@go_sdk//:files"],
+ rundir = ".",
+)
+
+go_test(
+ name = "nolint_test",
+ size = "small",
+ srcs = [
+ "nolint.go",
+ "nolint_test.go",
+ ],
+)
+
+filegroup(
+ name = "builder_srcs",
+ srcs = [
+ "ar.go",
+ "asm.go",
+ "builder.go",
+ "cgo2.go",
+ "compilepkg.go",
+ "cover.go",
+ "edit.go",
+ "embedcfg.go",
+ "env.go",
+ "filter.go",
+ "filter_buildid.go",
+ "flags.go",
+ "generate_nogo_main.go",
+ "generate_test_main.go",
+ "importcfg.go",
+ "link.go",
+ "pack.go",
+ "read.go",
+ "replicate.go",
+ "stdlib.go",
+ "stdliblist.go",
+ ] + select({
+ "@bazel_tools//src/conditions:windows": ["path_windows.go"],
+ "//conditions:default": ["path.go"],
+ }),
+ visibility = ["//visibility:public"],
+)
+
+go_binary(
+ name = "embed",
+ srcs = ["embed.go"],
+ visibility = ["//visibility:public"],
+)
+
+go_source(
+ name = "nogo_srcs",
+ srcs = [
+ "env.go",
+ "flags.go",
+ "nogo_main.go",
+ "nogo_typeparams_go117.go",
+ "nogo_typeparams_go118.go",
+ "nolint.go",
+ "pack.go",
+ ],
+ # //go/tools/builders:nogo_srcs is considered a different target by
+ # Bazel's visibility check than
+ # @io_bazel_rules_go//go/tools/builders:nogo_srcs. Only the latter is
+ # allowed to depend on
+ # @org_golang_x_tools//go/analysis/internal/facts:go_tool_library.
+ tags = ["manual"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_x_tools//go/analysis",
+ "@org_golang_x_tools//go/gcexportdata",
+ "@org_golang_x_tools//internal/facts",
+ ],
+)
+
+go_binary(
+ name = "go_path-bin",
+ srcs = [
+ "env.go",
+ "flags.go",
+ "go_path.go",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+go_reset_target(
+ name = "go_path",
+ dep = ":go_path-bin",
+ visibility = ["//visibility:public"],
+)
+
+go_binary(
+ name = "info",
+ srcs = [
+ "env.go",
+ "flags.go",
+ "info.go",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+go_binary(
+ name = "md5sum",
+ srcs = [
+ "md5sum.go",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+go_binary(
+ name = "go-protoc-bin",
+ srcs = [
+ "env.go",
+ "flags.go",
+ "protoc.go",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+go_reset_target(
+ name = "go-protoc",
+ dep = ":go-protoc-bin",
+ visibility = ["//visibility:public"],
+)
+
+filegroup(
+ name = "all_builder_srcs",
+ testonly = True,
+ srcs = glob(["*.go"]),
+ visibility = ["//:__subpackages__"],
+)
+
+filegroup(
+ name = "all_files",
+ testonly = True,
+ srcs = glob(["**"]),
+ visibility = ["//visibility:public"],
+)
diff --git a/go/tools/builders/ar.go b/go/tools/builders/ar.go
new file mode 100644
index 00000000..2f4b36c8
--- /dev/null
+++ b/go/tools/builders/ar.go
@@ -0,0 +1,104 @@
+// Copyright 2018 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type header struct {
+ NameRaw [16]byte
+ ModTimeRaw [12]byte
+ OwnerIdRaw [6]byte
+ GroupIdRaw [6]byte
+ FileModeRaw [8]byte
+ FileSizeRaw [10]byte
+ EndRaw [2]byte
+}
+
+func (h *header) name() string {
+ return strings.TrimRight(string(h.NameRaw[:]), " ")
+}
+
+func (h *header) size() int64 {
+ s, err := strconv.Atoi(strings.TrimRight(string(h.FileSizeRaw[:]), " "))
+ if err != nil {
+ panic(err)
+ }
+ return int64(s)
+}
+
+func (h *header) next() int64 {
+ size := h.size()
+ return size + size%2
+}
+
+func (h *header) deterministic() *header {
+ h2 := *h
+ copy(h2.ModTimeRaw[:], zeroBytes)
+ copy(h2.OwnerIdRaw[:], zeroBytes)
+ copy(h2.GroupIdRaw[:], zeroBytes)
+ copy(h2.FileModeRaw[:], zeroBytes) // GNU ar also clears this
+ return &h2
+}
+
+// stripArMetadata strips the archive metadata of non-deterministic data:
+// - Timestamps
+// - User IDs
+// - Group IDs
+// - File Modes
+// The archive is modified in place.
+func stripArMetadata(archivePath string) error {
+ archive, err := os.OpenFile(archivePath, os.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+
+ magic := make([]byte, len(arHeader))
+ if _, err := io.ReadFull(archive, magic); err != nil {
+ return err
+ }
+
+ if string(magic) != arHeader {
+ return fmt.Errorf("%s is not an archive", archivePath)
+ }
+
+ for {
+ hdr := &header{}
+ if err := binary.Read(archive, binary.BigEndian, hdr); err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // Seek back at the beginning of the header and overwrite it.
+ archive.Seek(-entryLength, os.SEEK_CUR)
+ if err := binary.Write(archive, binary.BigEndian, hdr.deterministic()); err != nil {
+ return err
+ }
+
+ if _, err := archive.Seek(hdr.next(), os.SEEK_CUR); err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+}
diff --git a/go/tools/builders/asm.go b/go/tools/builders/asm.go
new file mode 100644
index 00000000..3d64c9ba
--- /dev/null
+++ b/go/tools/builders/asm.go
@@ -0,0 +1,138 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "go/build"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+var ASM_DEFINES = []string{
+ "-D", "GOOS_" + build.Default.GOOS,
+ "-D", "GOARCH_" + build.Default.GOARCH,
+ "-D", "GOOS_GOARCH_" + build.Default.GOOS + "_" + build.Default.GOARCH,
+}
+
+// buildSymabisFile generates a file from assembly files that is consumed
+// by the compiler. This is only needed in go1.12+ when there is at least one
+// .s file. If the symabis file is not needed, no file will be generated,
+// and "", nil will be returned.
+func buildSymabisFile(goenv *env, sFiles, hFiles []fileInfo, asmhdr string) (string, error) {
+ if len(sFiles) == 0 {
+ return "", nil
+ }
+
+ // Check version. The symabis file is only required and can only be built
+ // starting at go1.12.
+ version := runtime.Version()
+ if strings.HasPrefix(version, "go1.") {
+ minor := version[len("go1."):]
+ if i := strings.IndexByte(minor, '.'); i >= 0 {
+ minor = minor[:i]
+ }
+ n, err := strconv.Atoi(minor)
+ if err == nil && n <= 11 {
+ return "", nil
+ }
+ // Fall through if the version can't be parsed. It's probably a newer
+ // development version.
+ }
+
+ // Create an empty go_asm.h file. The compiler will write this later, but
+ // we need one to exist now.
+ asmhdrFile, err := os.Create(asmhdr)
+ if err != nil {
+ return "", err
+ }
+ if err := asmhdrFile.Close(); err != nil {
+ return "", err
+ }
+ asmhdrDir := filepath.Dir(asmhdr)
+
+ // Create a temporary output file. The caller is responsible for deleting it.
+ var symabisName string
+ symabisFile, err := ioutil.TempFile("", "symabis")
+ if err != nil {
+ return "", err
+ }
+ symabisName = symabisFile.Name()
+ symabisFile.Close()
+
+ // Run the assembler.
+ wd, err := os.Getwd()
+ if err != nil {
+ return symabisName, err
+ }
+ asmargs := goenv.goTool("asm")
+ asmargs = append(asmargs, "-trimpath", wd)
+ asmargs = append(asmargs, "-I", wd)
+ asmargs = append(asmargs, "-I", filepath.Join(os.Getenv("GOROOT"), "pkg", "include"))
+ asmargs = append(asmargs, "-I", asmhdrDir)
+ seenHdrDirs := map[string]bool{wd: true, asmhdrDir: true}
+ for _, hFile := range hFiles {
+ hdrDir := filepath.Dir(abs(hFile.filename))
+ if !seenHdrDirs[hdrDir] {
+ asmargs = append(asmargs, "-I", hdrDir)
+ seenHdrDirs[hdrDir] = true
+ }
+ }
+ asmargs = append(asmargs, ASM_DEFINES...)
+ asmargs = append(asmargs, "-gensymabis", "-o", symabisName, "--")
+ for _, sFile := range sFiles {
+ asmargs = append(asmargs, sFile.filename)
+ }
+
+ err = goenv.runCommand(asmargs)
+ return symabisName, err
+}
+
+func asmFile(goenv *env, srcPath, packagePath string, asmFlags []string, outPath string) error {
+ args := goenv.goTool("asm")
+ args = append(args, asmFlags...)
+ // The package path has to be specified as of Go 1.19 or the resulting
+ // object will be unlinkable, but the -p flag is also only available
+ // since Go 1.19.
+ if packagePath != "" && isGo119OrHigher() {
+ args = append(args, "-p", packagePath)
+ }
+ args = append(args, ASM_DEFINES...)
+ args = append(args, "-trimpath", ".")
+ args = append(args, "-o", outPath)
+ args = append(args, "--", srcPath)
+ absArgs(args, []string{"-I", "-o", "-trimpath"})
+ return goenv.runCommand(args)
+}
+
+var goMinorVersionRegexp = regexp.MustCompile(`^go1\.(\d+)`)
+
+func isGo119OrHigher() bool {
+ match := goMinorVersionRegexp.FindStringSubmatch(runtime.Version())
+ if match == nil {
+ // Developer version or something with an unparseable version string,
+ // assume Go 1.19 or higher.
+ return true
+ }
+ minorVersion, err := strconv.Atoi(match[1])
+ if err != nil {
+ return true
+ }
+ return minorVersion >= 19
+}
diff --git a/go/tools/builders/builder.go b/go/tools/builders/builder.go
new file mode 100644
index 00000000..5d691839
--- /dev/null
+++ b/go/tools/builders/builder.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// builder implements most of the actions for Bazel to compile and link
+// go code. We use a single binary for most actions, since this reduces
+// the number of inputs needed for each action and allows us to build
+// multiple related files in a single action.
+
+package main
+
+import (
+ "log"
+ "os"
+)
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix("builder: ")
+
+ args, _, err := expandParamsFiles(os.Args[1:])
+ if err != nil {
+ log.Fatal(err)
+ }
+ if len(args) == 0 {
+ log.Fatalf("usage: %s verb options...", os.Args[0])
+ }
+ verb, rest := args[0], args[1:]
+
+ var action func(args []string) error
+ switch verb {
+ case "compilepkg":
+ action = compilePkg
+ case "filterbuildid":
+ action = filterBuildID
+ case "gentestmain":
+ action = genTestMain
+ case "link":
+ action = link
+ case "gennogomain":
+ action = genNogoMain
+ case "stdlib":
+ action = stdlib
+ case "stdliblist":
+ action = stdliblist
+ default:
+ log.Fatalf("unknown action: %s", verb)
+ }
+ log.SetPrefix(verb + ": ")
+
+ if err := action(rest); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/go/tools/builders/cgo2.go b/go/tools/builders/cgo2.go
new file mode 100644
index 00000000..fc2876a9
--- /dev/null
+++ b/go/tools/builders/cgo2.go
@@ -0,0 +1,397 @@
+// Copyright 2019 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// cgo2.go provides new cgo functionality for use by the GoCompilePkg action.
+// We can't use the functionality in cgo.go, since it relies too heavily
+// on logic in cgo.bzl. Ideally, we'd be able to replace cgo.go with this
+// file eventually, but not until Bazel gives us enough toolchain information
+// to compile ObjC files.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// cgo2 processes a set of mixed source files with cgo.
+func cgo2(goenv *env, goSrcs, cgoSrcs, cSrcs, cxxSrcs, objcSrcs, objcxxSrcs, sSrcs, hSrcs []string, packagePath, packageName string, cc string, cppFlags, cFlags, cxxFlags, objcFlags, objcxxFlags, ldFlags []string, cgoExportHPath string) (srcDir string, allGoSrcs, cObjs []string, err error) {
+ // Report an error if the C/C++ toolchain wasn't configured.
+ if cc == "" {
+ err := cgoError(cgoSrcs[:])
+ err = append(err, cSrcs...)
+ err = append(err, cxxSrcs...)
+ err = append(err, objcSrcs...)
+ err = append(err, objcxxSrcs...)
+ err = append(err, sSrcs...)
+ return "", nil, nil, err
+ }
+
+ // If we only have C/C++ sources without cgo, just compile and pack them
+ // without generating code. The Go command forbids this, but we've
+ // historically allowed it.
+ // TODO(jayconrod): this doesn't write CGO_LDFLAGS into the archive. We
+ // might miss dependencies like -lstdc++ if they aren't referenced in
+ // some other way.
+ if len(cgoSrcs) == 0 {
+ cObjs, err = compileCSources(goenv, cSrcs, cxxSrcs, objcSrcs, objcxxSrcs, sSrcs, hSrcs, cc, cppFlags, cFlags, cxxFlags, objcFlags, objcxxFlags)
+ return ".", nil, cObjs, err
+ }
+
+ workDir, cleanup, err := goenv.workDir()
+ if err != nil {
+ return "", nil, nil, err
+ }
+ defer cleanup()
+
+ // cgo2 will gather sources into a single temporary directory, since nogo
+ // scanners might want to include or exclude these sources we need to ensure
+ // that a fragment of the path is stable and human friendly enough to be
+ // referenced in nogo configuration.
+ workDir = filepath.Join(workDir, "cgo", packagePath)
+ if err := os.MkdirAll(workDir, 0700); err != nil {
+ return "", nil, nil, err
+ }
+
+ // Filter out -lstdc++ and -lc++ from ldflags if we don't have C++ sources,
+ // and set CGO_LDFLAGS. These flags get written as special comments into cgo
+ // generated sources. The compiler encodes those flags in the compiled .a
+ // file, and the linker passes them on to the external linker.
+ haveCxx := len(cxxSrcs)+len(objcxxSrcs) > 0
+ if !haveCxx {
+ for _, f := range ldFlags {
+ if strings.HasSuffix(f, ".a") {
+ // These flags come from cdeps options. Assume C++.
+ haveCxx = true
+ break
+ }
+ }
+ }
+ var combinedLdFlags []string
+ if haveCxx {
+ combinedLdFlags = append(combinedLdFlags, ldFlags...)
+ } else {
+ for _, f := range ldFlags {
+ if f != "-lc++" && f != "-lstdc++" {
+ combinedLdFlags = append(combinedLdFlags, f)
+ }
+ }
+ }
+ combinedLdFlags = append(combinedLdFlags, defaultLdFlags()...)
+ os.Setenv("CGO_LDFLAGS", strings.Join(combinedLdFlags, " "))
+
+ // If cgo sources are in different directories, gather them into a temporary
+ // directory so we can use -srcdir.
+ srcDir = filepath.Dir(cgoSrcs[0])
+ srcsInSingleDir := true
+ for _, src := range cgoSrcs[1:] {
+ if filepath.Dir(src) != srcDir {
+ srcsInSingleDir = false
+ break
+ }
+ }
+
+ if srcsInSingleDir {
+ for i := range cgoSrcs {
+ cgoSrcs[i] = filepath.Base(cgoSrcs[i])
+ }
+ } else {
+ srcDir = filepath.Join(workDir, "cgosrcs")
+ if err := os.Mkdir(srcDir, 0777); err != nil {
+ return "", nil, nil, err
+ }
+ copiedSrcs, err := gatherSrcs(srcDir, cgoSrcs)
+ if err != nil {
+ return "", nil, nil, err
+ }
+ cgoSrcs = copiedSrcs
+ }
+
+ // Generate Go and C code.
+ hdrDirs := map[string]bool{}
+ var hdrIncludes []string
+ for _, hdr := range hSrcs {
+ hdrDir := filepath.Dir(hdr)
+ if !hdrDirs[hdrDir] {
+ hdrDirs[hdrDir] = true
+ hdrIncludes = append(hdrIncludes, "-iquote", hdrDir)
+ }
+ }
+ hdrIncludes = append(hdrIncludes, "-iquote", workDir) // for _cgo_export.h
+
+ execRoot, err := bazelExecRoot()
+ if err != nil {
+ return "", nil, nil, err
+ }
+ // Trim the execroot from the //line comments emitted by cgo.
+ args := goenv.goTool("cgo", "-srcdir", srcDir, "-objdir", workDir, "-trimpath", execRoot)
+ if packagePath != "" {
+ args = append(args, "-importpath", packagePath)
+ }
+ args = append(args, "--")
+ args = append(args, cppFlags...)
+ args = append(args, hdrIncludes...)
+ args = append(args, cFlags...)
+ args = append(args, cgoSrcs...)
+ if err := goenv.runCommand(args); err != nil {
+ return "", nil, nil, err
+ }
+
+ if cgoExportHPath != "" {
+ if err := copyFile(filepath.Join(workDir, "_cgo_export.h"), cgoExportHPath); err != nil {
+ return "", nil, nil, err
+ }
+ }
+ genGoSrcs := make([]string, 1+len(cgoSrcs))
+ genGoSrcs[0] = filepath.Join(workDir, "_cgo_gotypes.go")
+ genCSrcs := make([]string, 1+len(cgoSrcs))
+ genCSrcs[0] = filepath.Join(workDir, "_cgo_export.c")
+ for i, src := range cgoSrcs {
+ stem := strings.TrimSuffix(filepath.Base(src), ".go")
+ genGoSrcs[i+1] = filepath.Join(workDir, stem+".cgo1.go")
+ genCSrcs[i+1] = filepath.Join(workDir, stem+".cgo2.c")
+ }
+ cgoMainC := filepath.Join(workDir, "_cgo_main.c")
+
+ // Compile C, C++, Objective-C/C++, and assembly code.
+ defaultCFlags := defaultCFlags(workDir)
+ combinedCFlags := combineFlags(cppFlags, hdrIncludes, cFlags, defaultCFlags)
+ for _, lang := range []struct{ srcs, flags []string }{
+ {genCSrcs, combinedCFlags},
+ {cSrcs, combinedCFlags},
+ {cxxSrcs, combineFlags(cppFlags, hdrIncludes, cxxFlags, defaultCFlags)},
+ {objcSrcs, combineFlags(cppFlags, hdrIncludes, objcFlags, defaultCFlags)},
+ {objcxxSrcs, combineFlags(cppFlags, hdrIncludes, objcxxFlags, defaultCFlags)},
+ {sSrcs, nil},
+ } {
+ for _, src := range lang.srcs {
+ obj := filepath.Join(workDir, fmt.Sprintf("_x%d.o", len(cObjs)))
+ cObjs = append(cObjs, obj)
+ if err := cCompile(goenv, src, cc, lang.flags, obj); err != nil {
+ return "", nil, nil, err
+ }
+ }
+ }
+
+ mainObj := filepath.Join(workDir, "_cgo_main.o")
+ if err := cCompile(goenv, cgoMainC, cc, combinedCFlags, mainObj); err != nil {
+ return "", nil, nil, err
+ }
+
+ // Link cgo binary and use the symbols to generate _cgo_import.go.
+ mainBin := filepath.Join(workDir, "_cgo_.o") // .o is a lie; it's an executable
+ args = append([]string{cc, "-o", mainBin, mainObj}, cObjs...)
+ args = append(args, combinedLdFlags...)
+ var originalErrBuf bytes.Buffer
+ if err := goenv.runCommandToFile(os.Stdout, &originalErrBuf, args); err != nil {
+ // If linking the binary for cgo fails, this is usually because the
+ // object files reference external symbols that can't be resolved yet.
+ // Since the binary is only produced to have its symbols read by the cgo
+ // command, there is no harm in trying to build it allowing unresolved
+ // symbols - the real link that happens at the end will fail if they
+ // rightfully can't be resolved.
+ var allowUnresolvedSymbolsLdFlag string
+ switch os.Getenv("GOOS") {
+ case "windows":
+ // MinGW's linker doesn't seem to support --unresolved-symbols
+ // and MSVC isn't supported at all.
+ return "", nil, nil, err
+ case "darwin", "ios":
+ allowUnresolvedSymbolsLdFlag = "-Wl,-undefined,dynamic_lookup"
+ default:
+ allowUnresolvedSymbolsLdFlag = "-Wl,--unresolved-symbols=ignore-all"
+ }
+ // Print and return the original error if we can't link the binary with
+ // the additional linker flags as they may simply be incorrect for the
+ // particular compiler/linker pair and would obscure the true reason for
+ // the failure of the original command.
+ if err2 := goenv.runCommandToFile(
+ os.Stdout,
+ ioutil.Discard,
+ append(args, allowUnresolvedSymbolsLdFlag),
+ ); err2 != nil {
+ os.Stderr.Write(relativizePaths(originalErrBuf.Bytes()))
+ return "", nil, nil, err
+ }
+ // Do not print the original error - rerunning the command with the
+ // additional linker flag fixed it.
+ }
+
+ cgoImportsGo := filepath.Join(workDir, "_cgo_imports.go")
+ args = goenv.goTool("cgo", "-dynpackage", packageName, "-dynimport", mainBin, "-dynout", cgoImportsGo)
+ if err := goenv.runCommand(args); err != nil {
+ return "", nil, nil, err
+ }
+ genGoSrcs = append(genGoSrcs, cgoImportsGo)
+
+ // Copy regular Go source files into the work directory so that we can
+ // use -trimpath=workDir.
+ goBases, err := gatherSrcs(workDir, goSrcs)
+ if err != nil {
+ return "", nil, nil, err
+ }
+
+ allGoSrcs = make([]string, len(goSrcs)+len(genGoSrcs))
+ for i := range goSrcs {
+ allGoSrcs[i] = filepath.Join(workDir, goBases[i])
+ }
+ copy(allGoSrcs[len(goSrcs):], genGoSrcs)
+ return workDir, allGoSrcs, cObjs, nil
+}
+
+// compileCSources compiles a list of C, C++, Objective-C, Objective-C++,
+// and assembly sources into .o files to be packed into the archive.
+// It does not run cgo. This is used for packages with "cgo = True" but
+// without any .go files that import "C". The Go command forbids this,
+// but we have historically allowed it.
+func compileCSources(goenv *env, cSrcs, cxxSrcs, objcSrcs, objcxxSrcs, sSrcs, hSrcs []string, cc string, cppFlags, cFlags, cxxFlags, objcFlags, objcxxFlags []string) (cObjs []string, err error) {
+ workDir, cleanup, err := goenv.workDir()
+ if err != nil {
+ return nil, err
+ }
+ defer cleanup()
+
+ hdrDirs := map[string]bool{}
+ var hdrIncludes []string
+ for _, hdr := range hSrcs {
+ hdrDir := filepath.Dir(hdr)
+ if !hdrDirs[hdrDir] {
+ hdrDirs[hdrDir] = true
+ hdrIncludes = append(hdrIncludes, "-iquote", hdrDir)
+ }
+ }
+
+ defaultCFlags := defaultCFlags(workDir)
+ for _, lang := range []struct{ srcs, flags []string }{
+ {cSrcs, combineFlags(cppFlags, hdrIncludes, cFlags, defaultCFlags)},
+ {cxxSrcs, combineFlags(cppFlags, hdrIncludes, cxxFlags, defaultCFlags)},
+ {objcSrcs, combineFlags(cppFlags, hdrIncludes, objcFlags, defaultCFlags)},
+ {objcxxSrcs, combineFlags(cppFlags, hdrIncludes, objcxxFlags, defaultCFlags)},
+ {sSrcs, nil},
+ } {
+ for _, src := range lang.srcs {
+ obj := filepath.Join(workDir, fmt.Sprintf("_x%d.o", len(cObjs)))
+ cObjs = append(cObjs, obj)
+ if err := cCompile(goenv, src, cc, lang.flags, obj); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return cObjs, nil
+}
+
+func combineFlags(lists ...[]string) []string {
+ n := 0
+ for _, list := range lists {
+ n += len(list)
+ }
+ flags := make([]string, 0, n)
+ for _, list := range lists {
+ flags = append(flags, list...)
+ }
+ return flags
+}
+
+func cCompile(goenv *env, src, cc string, flags []string, out string) error {
+ args := []string{cc}
+ args = append(args, flags...)
+ args = append(args, "-c", src, "-o", out)
+ return goenv.runCommand(args)
+}
+
+func defaultCFlags(workDir string) []string {
+ flags := []string{
+ "-fdebug-prefix-map=" + abs(".") + "=.",
+ "-fdebug-prefix-map=" + workDir + "=.",
+ }
+ goos, goarch := os.Getenv("GOOS"), os.Getenv("GOARCH")
+ switch {
+ case goos == "darwin" || goos == "ios":
+ return flags
+ case goos == "windows" && goarch == "amd64":
+ return append(flags, "-mthreads")
+ default:
+ return append(flags, "-pthread")
+ }
+}
+
+func defaultLdFlags() []string {
+ goos, goarch := os.Getenv("GOOS"), os.Getenv("GOARCH")
+ switch {
+ case goos == "android":
+ return []string{"-llog", "-ldl"}
+ case goos == "darwin" || goos == "ios":
+ return nil
+ case goos == "windows" && goarch == "amd64":
+ return []string{"-mthreads"}
+ default:
+ return []string{"-pthread"}
+ }
+}
+
+// gatherSrcs copies or links files listed in srcs into dir. This is needed
+// to effectively use -trimpath with generated sources. It's also needed by cgo.
+//
+// gatherSrcs returns the basenames of copied files in the directory.
+func gatherSrcs(dir string, srcs []string) ([]string, error) {
+ copiedBases := make([]string, len(srcs))
+ for i, src := range srcs {
+ base := filepath.Base(src)
+ ext := filepath.Ext(base)
+ stem := base[:len(base)-len(ext)]
+ var err error
+ for j := 1; j < 10000; j++ {
+ if err = copyOrLinkFile(src, filepath.Join(dir, base)); err == nil {
+ break
+ } else if !os.IsExist(err) {
+ return nil, err
+ } else {
+ base = fmt.Sprintf("%s_%d%s", stem, j, ext)
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("could not find unique name for file %s", src)
+ }
+ copiedBases[i] = base
+ }
+ return copiedBases, nil
+}
+
+func bazelExecRoot() (string, error) {
+ // Bazel executes the builder with a working directory of the form
+ // .../execroot/<workspace name>. By stripping the last segment, we obtain a
+ // prefix of all possible source files, even when contained in external
+ // repositories.
+ cwd, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Dir(cwd), nil
+}
+
+type cgoError []string
+
+func (e cgoError) Error() string {
+ b := &bytes.Buffer{}
+ fmt.Fprint(b, "CC is not set and files need to be processed with cgo:\n")
+ for _, f := range e {
+ fmt.Fprintf(b, "\t%s\n", f)
+ }
+ fmt.Fprintf(b, "Ensure that 'cgo = True' is set and the C/C++ toolchain is configured.")
+ return b.String()
+}
diff --git a/go/tools/builders/compilepkg.go b/go/tools/builders/compilepkg.go
new file mode 100644
index 00000000..6e21ca24
--- /dev/null
+++ b/go/tools/builders/compilepkg.go
@@ -0,0 +1,615 @@
+// Copyright 2019 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// compilepkg compiles a complete Go package from Go, C, and assembly files. It
+// supports cgo, coverage, and nogo. It is invoked by the Go rules as an action.
+package main
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+type nogoResult int
+
+const (
+ nogoNotRun nogoResult = iota
+ nogoError
+ nogoFailed
+ nogoSucceeded
+)
+
+func compilePkg(args []string) error {
+ // Parse arguments.
+ args, _, err := expandParamsFiles(args)
+ if err != nil {
+ return err
+ }
+
+ fs := flag.NewFlagSet("GoCompilePkg", flag.ExitOnError)
+ goenv := envFlags(fs)
+ var unfilteredSrcs, coverSrcs, embedSrcs, embedLookupDirs, embedRoots, recompileInternalDeps multiFlag
+ var deps archiveMultiFlag
+ var importPath, packagePath, nogoPath, packageListPath, coverMode string
+ var outPath, outFactsPath, cgoExportHPath string
+ var testFilter string
+ var gcFlags, asmFlags, cppFlags, cFlags, cxxFlags, objcFlags, objcxxFlags, ldFlags quoteMultiFlag
+ var coverFormat string
+ fs.Var(&unfilteredSrcs, "src", ".go, .c, .cc, .m, .mm, .s, or .S file to be filtered and compiled")
+ fs.Var(&coverSrcs, "cover", ".go file that should be instrumented for coverage (must also be a -src)")
+ fs.Var(&embedSrcs, "embedsrc", "file that may be compiled into the package with a //go:embed directive")
+ fs.Var(&embedLookupDirs, "embedlookupdir", "Root-relative paths to directories relative to which //go:embed directives are resolved")
+ fs.Var(&embedRoots, "embedroot", "Bazel output root under which a file passed via -embedsrc resides")
+ fs.Var(&deps, "arc", "Import path, package path, and file name of a direct dependency, separated by '='")
+ fs.StringVar(&importPath, "importpath", "", "The import path of the package being compiled. Not passed to the compiler, but may be displayed in debug data.")
+ fs.StringVar(&packagePath, "p", "", "The package path (importmap) of the package being compiled")
+ fs.Var(&gcFlags, "gcflags", "Go compiler flags")
+ fs.Var(&asmFlags, "asmflags", "Go assembler flags")
+ fs.Var(&cppFlags, "cppflags", "C preprocessor flags")
+ fs.Var(&cFlags, "cflags", "C compiler flags")
+ fs.Var(&cxxFlags, "cxxflags", "C++ compiler flags")
+ fs.Var(&objcFlags, "objcflags", "Objective-C compiler flags")
+ fs.Var(&objcxxFlags, "objcxxflags", "Objective-C++ compiler flags")
+ fs.Var(&ldFlags, "ldflags", "C linker flags")
+ fs.StringVar(&nogoPath, "nogo", "", "The nogo binary. If unset, nogo will not be run.")
+ fs.StringVar(&packageListPath, "package_list", "", "The file containing the list of standard library packages")
+ fs.StringVar(&coverMode, "cover_mode", "", "The coverage mode to use. Empty if coverage instrumentation should not be added.")
+ fs.StringVar(&outPath, "o", "", "The output archive file to write compiled code")
+ fs.StringVar(&outFactsPath, "x", "", "The output archive file to write export data and nogo facts")
+ fs.StringVar(&cgoExportHPath, "cgoexport", "", "The _cgo_exports.h file to write")
+ fs.StringVar(&testFilter, "testfilter", "off", "Controls test package filtering")
+ fs.StringVar(&coverFormat, "cover_format", "", "Emit source file paths in coverage instrumentation suitable for the specified coverage format")
+ fs.Var(&recompileInternalDeps, "recompile_internal_deps", "The import path of the direct dependencies that needs to be recompiled.")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+ if err := goenv.checkFlags(); err != nil {
+ return err
+ }
+ if importPath == "" {
+ importPath = packagePath
+ }
+ cgoEnabled := os.Getenv("CGO_ENABLED") == "1"
+ cc := os.Getenv("CC")
+ outPath = abs(outPath)
+ for i := range unfilteredSrcs {
+ unfilteredSrcs[i] = abs(unfilteredSrcs[i])
+ }
+ for i := range embedSrcs {
+ embedSrcs[i] = abs(embedSrcs[i])
+ }
+
+ // Filter sources.
+ srcs, err := filterAndSplitFiles(unfilteredSrcs)
+ if err != nil {
+ return err
+ }
+
+ // TODO(jayconrod): remove -testfilter flag. The test action should compile
+ // the main, internal, and external packages by calling compileArchive
+ // with the correct sources for each.
+ switch testFilter {
+ case "off":
+ case "only":
+ testSrcs := make([]fileInfo, 0, len(srcs.goSrcs))
+ for _, f := range srcs.goSrcs {
+ if strings.HasSuffix(f.pkg, "_test") {
+ testSrcs = append(testSrcs, f)
+ }
+ }
+ srcs.goSrcs = testSrcs
+ case "exclude":
+ libSrcs := make([]fileInfo, 0, len(srcs.goSrcs))
+ for _, f := range srcs.goSrcs {
+ if !strings.HasSuffix(f.pkg, "_test") {
+ libSrcs = append(libSrcs, f)
+ }
+ }
+ srcs.goSrcs = libSrcs
+ default:
+ return fmt.Errorf("invalid test filter %q", testFilter)
+ }
+
+ return compileArchive(
+ goenv,
+ importPath,
+ packagePath,
+ srcs,
+ deps,
+ coverMode,
+ coverSrcs,
+ embedSrcs,
+ embedLookupDirs,
+ embedRoots,
+ cgoEnabled,
+ cc,
+ gcFlags,
+ asmFlags,
+ cppFlags,
+ cFlags,
+ cxxFlags,
+ objcFlags,
+ objcxxFlags,
+ ldFlags,
+ nogoPath,
+ packageListPath,
+ outPath,
+ outFactsPath,
+ cgoExportHPath,
+ coverFormat,
+ recompileInternalDeps)
+}
+
+func compileArchive(
+ goenv *env,
+ importPath string,
+ packagePath string,
+ srcs archiveSrcs,
+ deps []archive,
+ coverMode string,
+ coverSrcs []string,
+ embedSrcs []string,
+ embedLookupDirs []string,
+ embedRoots []string,
+ cgoEnabled bool,
+ cc string,
+ gcFlags []string,
+ asmFlags []string,
+ cppFlags []string,
+ cFlags []string,
+ cxxFlags []string,
+ objcFlags []string,
+ objcxxFlags []string,
+ ldFlags []string,
+ nogoPath string,
+ packageListPath string,
+ outPath string,
+ outXPath string,
+ cgoExportHPath string,
+ coverFormat string,
+ recompileInternalDeps []string,
+) error {
+ workDir, cleanup, err := goenv.workDir()
+ if err != nil {
+ return err
+ }
+ defer cleanup()
+
+ // As part of compilation process, rules_go does generate and/or rewrite code
+ // based on the original source files. We should only run static analysis
+ // over original source files and not the generated source as end users have
+ // little control over the generated source.
+ //
+ // nogoSrcsOrigin maps generated/rewritten source files back to original source.
+ // If the original source path is an empty string, exclude generated source from nogo run.
+ nogoSrcsOrigin := make(map[string]string)
+
+ if len(srcs.goSrcs) == 0 {
+ // We need to run the compiler to create a valid archive, even if there's nothing in it.
+ // Otherwise, GoPack will complain if we try to add assembly or cgo objects.
+ // A truly empty archive does not include any references to source file paths, which
+ // ensures hermeticity even though the temp file path is random.
+ emptyGoFile, err := os.CreateTemp(filepath.Dir(outPath), "*.go")
+ if err != nil {
+ return err
+ }
+ defer os.Remove(emptyGoFile.Name())
+ defer emptyGoFile.Close()
+ if _, err := emptyGoFile.WriteString("package empty\n"); err != nil {
+ return err
+ }
+ if err := emptyGoFile.Close(); err != nil {
+ return err
+ }
+
+ srcs.goSrcs = append(srcs.goSrcs, fileInfo{
+ filename: emptyGoFile.Name(),
+ ext: goExt,
+ matched: true,
+ pkg: "empty",
+ })
+
+ nogoSrcsOrigin[emptyGoFile.Name()] = ""
+ }
+ packageName := srcs.goSrcs[0].pkg
+ var goSrcs, cgoSrcs []string
+ for _, src := range srcs.goSrcs {
+ if src.isCgo {
+ cgoSrcs = append(cgoSrcs, src.filename)
+ } else {
+ goSrcs = append(goSrcs, src.filename)
+ }
+ }
+ cSrcs := make([]string, len(srcs.cSrcs))
+ for i, src := range srcs.cSrcs {
+ cSrcs[i] = src.filename
+ }
+ cxxSrcs := make([]string, len(srcs.cxxSrcs))
+ for i, src := range srcs.cxxSrcs {
+ cxxSrcs[i] = src.filename
+ }
+ objcSrcs := make([]string, len(srcs.objcSrcs))
+ for i, src := range srcs.objcSrcs {
+ objcSrcs[i] = src.filename
+ }
+ objcxxSrcs := make([]string, len(srcs.objcxxSrcs))
+ for i, src := range srcs.objcxxSrcs {
+ objcxxSrcs[i] = src.filename
+ }
+ sSrcs := make([]string, len(srcs.sSrcs))
+ for i, src := range srcs.sSrcs {
+ sSrcs[i] = src.filename
+ }
+ hSrcs := make([]string, len(srcs.hSrcs))
+ for i, src := range srcs.hSrcs {
+ hSrcs[i] = src.filename
+ }
+ haveCgo := len(cgoSrcs)+len(cSrcs)+len(cxxSrcs)+len(objcSrcs)+len(objcxxSrcs) > 0
+
+ // Instrument source files for coverage.
+ if coverMode != "" {
+ relCoverPath := make(map[string]string)
+ for _, s := range coverSrcs {
+ relCoverPath[abs(s)] = s
+ }
+
+ combined := append([]string{}, goSrcs...)
+ if cgoEnabled {
+ combined = append(combined, cgoSrcs...)
+ }
+ for i, origSrc := range combined {
+ if _, ok := relCoverPath[origSrc]; !ok {
+ continue
+ }
+
+ var srcName string
+ switch coverFormat {
+ case "go_cover":
+ srcName = origSrc
+ if importPath != "" {
+ srcName = path.Join(importPath, filepath.Base(origSrc))
+ }
+ case "lcov":
+ // Bazel merges lcov reports across languages and thus assumes
+ // that the source file paths are relative to the exec root.
+ srcName = relCoverPath[origSrc]
+ default:
+ return fmt.Errorf("invalid value for -cover_format: %q", coverFormat)
+ }
+
+ stem := filepath.Base(origSrc)
+ if ext := filepath.Ext(stem); ext != "" {
+ stem = stem[:len(stem)-len(ext)]
+ }
+ coverVar := fmt.Sprintf("Cover_%s_%d_%s", sanitizePathForIdentifier(importPath), i, sanitizePathForIdentifier(stem))
+ coverVar = strings.ReplaceAll(coverVar, "_", "Z")
+ coverSrc := filepath.Join(workDir, fmt.Sprintf("cover_%d.go", i))
+ if err := instrumentForCoverage(goenv, origSrc, srcName, coverVar, coverMode, coverSrc); err != nil {
+ return err
+ }
+
+ if i < len(goSrcs) {
+ goSrcs[i] = coverSrc
+ nogoSrcsOrigin[coverSrc] = origSrc
+ continue
+ }
+
+ cgoSrcs[i-len(goSrcs)] = coverSrc
+ }
+ }
+
+ // If we have cgo, generate separate C and go files, and compile the
+ // C files.
+ var objFiles []string
+ if cgoEnabled && haveCgo {
+ // TODO(#2006): Compile .s and .S files with cgo2, not the Go assembler.
+ // If cgo is not enabled or we don't have other cgo sources, don't
+ // compile .S files.
+ var srcDir string
+ srcDir, goSrcs, objFiles, err = cgo2(goenv, goSrcs, cgoSrcs, cSrcs, cxxSrcs, objcSrcs, objcxxSrcs, nil, hSrcs, packagePath, packageName, cc, cppFlags, cFlags, cxxFlags, objcFlags, objcxxFlags, ldFlags, cgoExportHPath)
+ if err != nil {
+ return err
+ }
+
+ gcFlags = append(gcFlags, createTrimPath(gcFlags, srcDir))
+ } else {
+ if cgoExportHPath != "" {
+ if err := ioutil.WriteFile(cgoExportHPath, nil, 0o666); err != nil {
+ return err
+ }
+ }
+ gcFlags = append(gcFlags, createTrimPath(gcFlags, "."))
+ }
+
+ // Check that the filtered sources don't import anything outside of
+ // the standard library and the direct dependencies.
+ imports, err := checkImports(srcs.goSrcs, deps, packageListPath, importPath, recompileInternalDeps)
+ if err != nil {
+ return err
+ }
+ if cgoEnabled && len(cgoSrcs) != 0 {
+ // cgo generated code imports some extra packages.
+ imports["runtime/cgo"] = nil
+ imports["syscall"] = nil
+ imports["unsafe"] = nil
+ }
+ if coverMode != "" {
+ if coverMode == "atomic" {
+ imports["sync/atomic"] = nil
+ }
+ const coverdataPath = "github.com/bazelbuild/rules_go/go/tools/coverdata"
+ var coverdata *archive
+ for i := range deps {
+ if deps[i].importPath == coverdataPath {
+ coverdata = &deps[i]
+ break
+ }
+ }
+ if coverdata == nil {
+ return errors.New("coverage requested but coverdata dependency not provided")
+ }
+ imports[coverdataPath] = coverdata
+ }
+
+ // Build an importcfg file for the compiler.
+ importcfgPath, err := buildImportcfgFileForCompile(imports, goenv.installSuffix, filepath.Dir(outPath))
+ if err != nil {
+ return err
+ }
+ if !goenv.shouldPreserveWorkDir {
+ defer os.Remove(importcfgPath)
+ }
+
+ // Build an embedcfg file mapping embed patterns to filenames.
+ // Embed patterns are relative to any one of a list of root directories
+ // that may contain embeddable files. Source files containing embed patterns
+ // must be in one of these root directories so the pattern appears to be
+ // relative to the source file. Due to transitions, source files can reside
+ // under Bazel roots different from both those of the go srcs and those of
+ // the compilation output. Thus, we have to consider all combinations of
+ // Bazel roots embedsrcs and root-relative paths of source files and the
+ // output binary.
+ var embedRootDirs []string
+ for _, root := range embedRoots {
+ for _, lookupDir := range embedLookupDirs {
+ embedRootDir := abs(filepath.Join(root, lookupDir))
+ // Since we are iterating over all combinations of roots and
+ // root-relative paths, some resulting paths may not exist and
+ // should be filtered out before being passed to buildEmbedcfgFile.
+ // Since Bazel uniquified both the roots and the root-relative
+ // paths, the combinations are automatically unique.
+ if _, err := os.Stat(embedRootDir); err == nil {
+ embedRootDirs = append(embedRootDirs, embedRootDir)
+ }
+ }
+ }
+ embedcfgPath, err := buildEmbedcfgFile(srcs.goSrcs, embedSrcs, embedRootDirs, workDir)
+ if err != nil {
+ return err
+ }
+ if embedcfgPath != "" {
+ if !goenv.shouldPreserveWorkDir {
+ defer os.Remove(embedcfgPath)
+ }
+ }
+
+ // Run nogo concurrently.
+ var nogoChan chan error
+ outFactsPath := filepath.Join(workDir, nogoFact)
+ nogoSrcs := make([]string, 0, len(goSrcs))
+ for _, goSrc := range goSrcs {
+ // If source is found in the origin map, that means it's likely to be a generated source file
+ // so feed the original source file to static analyzers instead of the generated one.
+ //
+ // If origin is empty, that means the generated source file is not based on a user-provided source file
+ // thus ignore that entry entirely.
+ if originSrc, ok := nogoSrcsOrigin[goSrc]; ok {
+ if originSrc != "" {
+ nogoSrcs = append(nogoSrcs, originSrc)
+ }
+ continue
+ }
+
+ // TODO(sluongng): most likely what remains here are CGO-generated source files as the result of calling cgo2()
+ // Need to determine whether we want to feed these CGO-generated files into static analyzers.
+ //
+ // Add unknown origin source files into the mix.
+ nogoSrcs = append(nogoSrcs, goSrc)
+ }
+ if nogoPath != "" && len(nogoSrcs) > 0 {
+ ctx, cancel := context.WithCancel(context.Background())
+ nogoChan = make(chan error)
+ go func() {
+ nogoChan <- runNogo(ctx, workDir, nogoPath, nogoSrcs, deps, packagePath, importcfgPath, outFactsPath)
+ }()
+ defer func() {
+ if nogoChan != nil {
+ cancel()
+ <-nogoChan
+ }
+ }()
+ }
+
+ // If there are assembly files, and this is go1.12+, generate symbol ABIs.
+ asmHdrPath := ""
+ if len(srcs.sSrcs) > 0 {
+ asmHdrPath = filepath.Join(workDir, "go_asm.h")
+ }
+ symabisPath, err := buildSymabisFile(goenv, srcs.sSrcs, srcs.hSrcs, asmHdrPath)
+ if symabisPath != "" {
+ if !goenv.shouldPreserveWorkDir {
+ defer os.Remove(symabisPath)
+ }
+ }
+ if err != nil {
+ return err
+ }
+
+ // Compile the filtered .go files.
+ if err := compileGo(goenv, goSrcs, packagePath, importcfgPath, embedcfgPath, asmHdrPath, symabisPath, gcFlags, outPath); err != nil {
+ return err
+ }
+
+ // Compile the .s files.
+ if len(srcs.sSrcs) > 0 {
+ includeSet := map[string]struct{}{
+ filepath.Join(os.Getenv("GOROOT"), "pkg", "include"): {},
+ workDir: {},
+ }
+ for _, hdr := range srcs.hSrcs {
+ includeSet[filepath.Dir(hdr.filename)] = struct{}{}
+ }
+ includes := make([]string, len(includeSet))
+ for inc := range includeSet {
+ includes = append(includes, inc)
+ }
+ sort.Strings(includes)
+ for _, inc := range includes {
+ asmFlags = append(asmFlags, "-I", inc)
+ }
+ for i, sSrc := range srcs.sSrcs {
+ obj := filepath.Join(workDir, fmt.Sprintf("s%d.o", i))
+ if err := asmFile(goenv, sSrc.filename, packagePath, asmFlags, obj); err != nil {
+ return err
+ }
+ objFiles = append(objFiles, obj)
+ }
+ }
+
+ // Pack .o files into the archive. These may come from cgo generated code,
+ // cgo dependencies (cdeps), or assembly.
+ if len(objFiles) > 0 {
+ if err := appendFiles(goenv, outPath, objFiles); err != nil {
+ return err
+ }
+ }
+
+ // Check results from nogo.
+ nogoStatus := nogoNotRun
+ if nogoChan != nil {
+ err := <-nogoChan
+ nogoChan = nil // no cancellation needed
+ if err != nil {
+ nogoStatus = nogoFailed
+ // TODO: should we still create the .x file without nogo facts in this case?
+ return err
+ }
+ nogoStatus = nogoSucceeded
+ }
+
+ // Extract the export data file and pack it in an .x archive together with the
+ // nogo facts file (if there is one). This allows compile actions to depend
+ // on .x files only, so we don't need to recompile a package when one of its
+ // imports changes in a way that doesn't affect export data.
+ // TODO(golang/go#33820): After Go 1.16 is the minimum supported version,
+ // use -linkobj to tell the compiler to create separate .a and .x files for
+ // compiled code and export data. Before that version, the linker needed
+ // export data in the .a file when building a plugin. To work around that,
+ // we copy the export data into .x ourselves.
+ if err = extractFileFromArchive(outPath, workDir, pkgDef); err != nil {
+ return err
+ }
+ pkgDefPath := filepath.Join(workDir, pkgDef)
+ if nogoStatus == nogoSucceeded {
+ return appendFiles(goenv, outXPath, []string{pkgDefPath, outFactsPath})
+ }
+ return appendFiles(goenv, outXPath, []string{pkgDefPath})
+}
+
+func compileGo(goenv *env, srcs []string, packagePath, importcfgPath, embedcfgPath, asmHdrPath, symabisPath string, gcFlags []string, outPath string) error {
+ args := goenv.goTool("compile")
+ args = append(args, "-p", packagePath, "-importcfg", importcfgPath, "-pack")
+ if embedcfgPath != "" {
+ args = append(args, "-embedcfg", embedcfgPath)
+ }
+ if asmHdrPath != "" {
+ args = append(args, "-asmhdr", asmHdrPath)
+ }
+ if symabisPath != "" {
+ args = append(args, "-symabis", symabisPath)
+ }
+ args = append(args, gcFlags...)
+ args = append(args, "-o", outPath)
+ args = append(args, "--")
+ args = append(args, srcs...)
+ absArgs(args, []string{"-I", "-o", "-trimpath", "-importcfg"})
+ return goenv.runCommand(args)
+}
+
+func runNogo(ctx context.Context, workDir string, nogoPath string, srcs []string, deps []archive, packagePath, importcfgPath, outFactsPath string) error {
+ args := []string{nogoPath}
+ args = append(args, "-p", packagePath)
+ args = append(args, "-importcfg", importcfgPath)
+ for _, dep := range deps {
+ args = append(args, "-fact", fmt.Sprintf("%s=%s", dep.importPath, dep.file))
+ }
+ args = append(args, "-x", outFactsPath)
+ args = append(args, srcs...)
+
+ paramsFile := filepath.Join(workDir, "nogo.param")
+ if err := writeParamsFile(paramsFile, args[1:]); err != nil {
+ return fmt.Errorf("error writing nogo params file: %v", err)
+ }
+
+ cmd := exec.CommandContext(ctx, args[0], "-param="+paramsFile)
+ out := &bytes.Buffer{}
+ cmd.Stdout, cmd.Stderr = out, out
+ if err := cmd.Run(); err != nil {
+ if exitErr, ok := err.(*exec.ExitError); ok {
+ if !exitErr.Exited() {
+ cmdLine := strings.Join(args, " ")
+ return fmt.Errorf("nogo command '%s' exited unexpectedly: %s", cmdLine, exitErr.String())
+ }
+ return errors.New(string(relativizePaths(out.Bytes())))
+ } else {
+ if out.Len() != 0 {
+ fmt.Fprintln(os.Stderr, out.String())
+ }
+ return fmt.Errorf("error running nogo: %v", err)
+ }
+ }
+ return nil
+}
+
+func createTrimPath(gcFlags []string, path string) string {
+ for _, flag := range gcFlags {
+ if strings.HasPrefix(flag, "-trimpath=") {
+ return flag + ":" + path
+ }
+ }
+
+ return "-trimpath=" + path
+}
+
+func sanitizePathForIdentifier(path string) string {
+ return strings.Map(func(r rune) rune {
+ if 'A' <= r && r <= 'Z' ||
+ 'a' <= r && r <= 'z' ||
+ '0' <= r && r <= '9' ||
+ r == '_' {
+ return r
+ }
+ return '_'
+ }, path)
+}
diff --git a/go/tools/builders/cover.go b/go/tools/builders/cover.go
new file mode 100644
index 00000000..fadc4fd7
--- /dev/null
+++ b/go/tools/builders/cover.go
@@ -0,0 +1,110 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "strconv"
+)
+
+// instrumentForCoverage runs "go tool cover" on a source file to produce
+// a coverage-instrumented version of the file. It also registers the file
+// with the coverdata package.
+func instrumentForCoverage(goenv *env, srcPath, srcName, coverVar, mode, outPath string) error {
+ goargs := goenv.goTool("cover", "-var", coverVar, "-mode", mode, "-o", outPath, srcPath)
+ if err := goenv.runCommand(goargs); err != nil {
+ return err
+ }
+
+ return registerCoverage(outPath, coverVar, srcName)
+}
+
+// registerCoverage modifies coverSrcFilename, the output file from go tool cover.
+// It adds a call to coverdata.RegisterCoverage, which ensures the coverage
+// data from each file is reported. The name by which the file is registered
+// need not match its original name (it may use the importpath).
+func registerCoverage(coverSrcFilename, varName, srcName string) error {
+ coverSrc, err := os.ReadFile(coverSrcFilename)
+ if err != nil {
+ return fmt.Errorf("instrumentForCoverage: reading instrumented source: %w", err)
+ }
+
+ // Parse the file.
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, coverSrcFilename, coverSrc, parser.ParseComments)
+ if err != nil {
+ return nil // parse error: proceed and let the compiler fail
+ }
+
+ // Perform edits using a byte buffer instead of the AST, because
+ // we can not use go/format to write the AST back out without
+ // changing line numbers.
+ editor := NewBuffer(coverSrc)
+
+ // Ensure coverdata is imported. Use an existing import if present
+ // or add a new one.
+ const coverdataPath = "github.com/bazelbuild/rules_go/go/tools/coverdata"
+ var coverdataName string
+ for _, imp := range f.Imports {
+ path, err := strconv.Unquote(imp.Path.Value)
+ if err != nil {
+ return nil // parse error: proceed and let the compiler fail
+ }
+ if path == coverdataPath {
+ if imp.Name != nil {
+ // renaming import
+ if imp.Name.Name == "_" {
+ // Change blank import to named import
+ editor.Replace(
+ fset.Position(imp.Name.Pos()).Offset,
+ fset.Position(imp.Name.End()).Offset,
+ "coverdata")
+ coverdataName = "coverdata"
+ } else {
+ coverdataName = imp.Name.Name
+ }
+ } else {
+ // default import
+ coverdataName = "coverdata"
+ }
+ break
+ }
+ }
+ if coverdataName == "" {
+ // No existing import. Add a new one.
+ coverdataName = "coverdata"
+ editor.Insert(fset.Position(f.Name.End()).Offset, fmt.Sprintf("; import %q", coverdataPath))
+ }
+
+ // Append an init function.
+ var buf = bytes.NewBuffer(editor.Bytes())
+ fmt.Fprintf(buf, `
+func init() {
+ %s.RegisterFile(%q,
+ %[3]s.Count[:],
+ %[3]s.Pos[:],
+ %[3]s.NumStmt[:])
+}
+`, coverdataName, srcName, varName)
+ if err := ioutil.WriteFile(coverSrcFilename, buf.Bytes(), 0666); err != nil {
+ return fmt.Errorf("registerCoverage: %v", err)
+ }
+ return nil
+}
diff --git a/go/tools/builders/cover_test.go b/go/tools/builders/cover_test.go
new file mode 100644
index 00000000..fc1ba818
--- /dev/null
+++ b/go/tools/builders/cover_test.go
@@ -0,0 +1,130 @@
+package main
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+type test struct {
+ name string
+ in string
+ out string
+}
+
+var tests = []test{
+ {
+ name: "no imports",
+ in: `package main
+`,
+ out: `package main; import "github.com/bazelbuild/rules_go/go/tools/coverdata"
+
+func init() {
+ coverdata.RegisterFile("srcName",
+ varName.Count[:],
+ varName.Pos[:],
+ varName.NumStmt[:])
+}
+`,
+ },
+ {
+ name: "other imports",
+ in: `package main
+
+import (
+ "os"
+)
+`,
+ out: `package main; import "github.com/bazelbuild/rules_go/go/tools/coverdata"
+
+import (
+ "os"
+)
+
+func init() {
+ coverdata.RegisterFile("srcName",
+ varName.Count[:],
+ varName.Pos[:],
+ varName.NumStmt[:])
+}
+`,
+ },
+ {
+ name: "existing import",
+ in: `package main
+
+import "github.com/bazelbuild/rules_go/go/tools/coverdata"
+`,
+ out: `package main
+
+import "github.com/bazelbuild/rules_go/go/tools/coverdata"
+
+func init() {
+ coverdata.RegisterFile("srcName",
+ varName.Count[:],
+ varName.Pos[:],
+ varName.NumStmt[:])
+}
+`,
+ },
+ {
+ name: "existing _ import",
+ in: `package main
+
+import _ "github.com/bazelbuild/rules_go/go/tools/coverdata"
+`,
+ out: `package main
+
+import coverdata "github.com/bazelbuild/rules_go/go/tools/coverdata"
+
+func init() {
+ coverdata.RegisterFile("srcName",
+ varName.Count[:],
+ varName.Pos[:],
+ varName.NumStmt[:])
+}
+`,
+ },
+ {
+ name: "existing renamed import",
+ in: `package main
+
+import cover0 "github.com/bazelbuild/rules_go/go/tools/coverdata"
+`,
+ out: `package main
+
+import cover0 "github.com/bazelbuild/rules_go/go/tools/coverdata"
+
+func init() {
+ cover0.RegisterFile("srcName",
+ varName.Count[:],
+ varName.Pos[:],
+ varName.NumStmt[:])
+}
+`,
+ },
+}
+
+func TestRegisterCoverage(t *testing.T) {
+ var filename = filepath.Join(t.TempDir(), "test_input.go")
+ for _, test := range tests {
+ if err := ioutil.WriteFile(filename, []byte(test.in), 0666); err != nil {
+ t.Errorf("writing input file: %v", err)
+ return
+ }
+ err := registerCoverage(filename, "varName", "srcName")
+ if err != nil {
+ t.Errorf("%q: %+v", test.name, err)
+ continue
+ }
+ coverSrc, err := os.ReadFile(filename)
+ if err != nil {
+ t.Errorf("%q: %+v", test.name, err)
+ continue
+ }
+ if got, want := string(coverSrc), test.out; got != want {
+ t.Errorf("%q: got %v, want %v", test.name, got, want)
+ }
+ }
+}
diff --git a/go/tools/builders/edit.go b/go/tools/builders/edit.go
new file mode 100644
index 00000000..f8ccd52b
--- /dev/null
+++ b/go/tools/builders/edit.go
@@ -0,0 +1,95 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied from go1.17 tree: //src/cmd/internal/edit/edit.go
+
+// Package edit implements buffered position-based editing of byte slices.
+package main
+
+import (
+ "fmt"
+ "sort"
+)
+
+// A Buffer is a queue of edits to apply to a given byte slice.
+type Buffer struct {
+ old []byte
+ q edits
+}
+
+// An edit records a single text modification: change the bytes in [start,end) to new.
+type edit struct {
+ start int
+ end int
+ new string
+}
+
+// An edits is a list of edits that is sortable by start offset, breaking ties by end offset.
+type edits []edit
+
+func (x edits) Len() int { return len(x) }
+func (x edits) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x edits) Less(i, j int) bool {
+ if x[i].start != x[j].start {
+ return x[i].start < x[j].start
+ }
+ return x[i].end < x[j].end
+}
+
+// NewBuffer returns a new buffer to accumulate changes to an initial data slice.
+// The returned buffer maintains a reference to the data, so the caller must ensure
+// the data is not modified until after the Buffer is done being used.
+func NewBuffer(data []byte) *Buffer {
+ return &Buffer{old: data}
+}
+
+func (b *Buffer) Insert(pos int, new string) {
+ if pos < 0 || pos > len(b.old) {
+ panic("invalid edit position")
+ }
+ b.q = append(b.q, edit{pos, pos, new})
+}
+
+func (b *Buffer) Delete(start, end int) {
+ if end < start || start < 0 || end > len(b.old) {
+ panic("invalid edit position")
+ }
+ b.q = append(b.q, edit{start, end, ""})
+}
+
+func (b *Buffer) Replace(start, end int, new string) {
+ if end < start || start < 0 || end > len(b.old) {
+ panic("invalid edit position")
+ }
+ b.q = append(b.q, edit{start, end, new})
+}
+
+// Bytes returns a new byte slice containing the original data
+// with the queued edits applied.
+func (b *Buffer) Bytes() []byte {
+ // Sort edits by starting position and then by ending position.
+ // Breaking ties by ending position allows insertions at point x
+ // to be applied before a replacement of the text at [x, y).
+ sort.Stable(b.q)
+
+ var new []byte
+ offset := 0
+ for i, e := range b.q {
+ if e.start < offset {
+ e0 := b.q[i-1]
+ panic(fmt.Sprintf("overlapping edits: [%d,%d)->%q, [%d,%d)->%q", e0.start, e0.end, e0.new, e.start, e.end, e.new))
+ }
+ new = append(new, b.old[offset:e.start]...)
+ offset = e.end
+ new = append(new, e.new...)
+ }
+ new = append(new, b.old[offset:]...)
+ return new
+}
+
+// String returns a string containing the original data
+// with the queued edits applied.
+func (b *Buffer) String() string {
+ return string(b.Bytes())
+}
diff --git a/go/tools/builders/embed.go b/go/tools/builders/embed.go
new file mode 100644
index 00000000..e68da974
--- /dev/null
+++ b/go/tools/builders/embed.go
@@ -0,0 +1,340 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// embed generates a .go file from the contents of a list of data files. It is
+// invoked by go_embed_data as an action.
+package main
+
+import (
+ "archive/tar"
+ "archive/zip"
+ "bufio"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+ "unicode/utf8"
+)
+
+var headerTpl = template.Must(template.New("embed").Parse(`// Generated by go_embed_data for {{.Label}}. DO NOT EDIT.
+
+package {{.Package}}
+
+`))
+
+var multiFooterTpl = template.Must(template.New("embed").Parse(`
+var {{.Var}} = map[string]{{.Type}}{
+{{- range $i, $f := .FoundSources}}
+ {{$.Key $f}}: {{$.Var}}_{{$i}},
+{{- end}}
+}
+
+`))
+
+func main() {
+ log.SetPrefix("embed: ")
+ log.SetFlags(0) // don't print timestamps
+ if err := run(os.Args); err != nil {
+ log.Fatal(err)
+ }
+}
+
+type configuration struct {
+ Label, Package, Var string
+ Multi bool
+ sources []string
+ FoundSources []string
+ out, workspace string
+ flatten, unpack, strData bool
+}
+
+func (c *configuration) Type() string {
+ if c.strData {
+ return "string"
+ } else {
+ return "[]byte"
+ }
+}
+
+func (c *configuration) Key(filename string) string {
+ workspacePrefix := "external/" + c.workspace + "/"
+ key := filepath.FromSlash(strings.TrimPrefix(filename, workspacePrefix))
+ if c.flatten {
+ key = path.Base(filename)
+ }
+ return strconv.Quote(key)
+}
+
+func run(args []string) error {
+ c, err := newConfiguration(args)
+ if err != nil {
+ return err
+ }
+
+ f, err := os.Create(c.out)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ w := bufio.NewWriter(f)
+ defer w.Flush()
+
+ if err := headerTpl.Execute(w, c); err != nil {
+ return err
+ }
+
+ if c.Multi {
+ return embedMultipleFiles(c, w)
+ }
+ return embedSingleFile(c, w)
+}
+
+func newConfiguration(args []string) (*configuration, error) {
+ var c configuration
+ flags := flag.NewFlagSet("embed", flag.ExitOnError)
+ flags.StringVar(&c.Label, "label", "", "Label of the rule being executed (required)")
+ flags.StringVar(&c.Package, "package", "", "Go package name (required)")
+ flags.StringVar(&c.Var, "var", "", "Variable name (required)")
+ flags.BoolVar(&c.Multi, "multi", false, "Whether the variable is a map or a single value")
+ flags.StringVar(&c.out, "out", "", "Go file to generate (required)")
+ flags.StringVar(&c.workspace, "workspace", "", "Name of the workspace (required)")
+ flags.BoolVar(&c.flatten, "flatten", false, "Whether to access files by base name")
+ flags.BoolVar(&c.strData, "string", false, "Whether to store contents as strings")
+ flags.BoolVar(&c.unpack, "unpack", false, "Whether to treat files as archives to unpack.")
+ flags.Parse(args[1:])
+ if c.Label == "" {
+ return nil, errors.New("error: -label option not provided")
+ }
+ if c.Package == "" {
+ return nil, errors.New("error: -package option not provided")
+ }
+ if c.Var == "" {
+ return nil, errors.New("error: -var option not provided")
+ }
+ if c.out == "" {
+ return nil, errors.New("error: -out option not provided")
+ }
+ if c.workspace == "" {
+ return nil, errors.New("error: -workspace option not provided")
+ }
+ c.sources = flags.Args()
+ if !c.Multi && len(c.sources) != 1 {
+ return nil, fmt.Errorf("error: -multi flag not given, so want exactly one source; got %d", len(c.sources))
+ }
+ if c.unpack {
+ if !c.Multi {
+ return nil, errors.New("error: -multi flag is required for -unpack mode.")
+ }
+ for _, src := range c.sources {
+ if ext := filepath.Ext(src); ext != ".zip" && ext != ".tar" {
+ return nil, fmt.Errorf("error: -unpack flag expects .zip or .tar extension (got %q)", ext)
+ }
+ }
+ }
+ return &c, nil
+}
+
+func embedSingleFile(c *configuration, w io.Writer) error {
+ dataBegin, dataEnd := "\"", "\"\n"
+ if !c.strData {
+ dataBegin, dataEnd = "[]byte(\"", "\")\n"
+ }
+
+ if _, err := fmt.Fprintf(w, "var %s = %s", c.Var, dataBegin); err != nil {
+ return err
+ }
+ if err := embedFileContents(w, c.sources[0]); err != nil {
+ return err
+ }
+ _, err := fmt.Fprint(w, dataEnd)
+ return err
+}
+
+func embedMultipleFiles(c *configuration, w io.Writer) error {
+ dataBegin, dataEnd := "\"", "\"\n"
+ if !c.strData {
+ dataBegin, dataEnd = "[]byte(\"", "\")\n"
+ }
+
+ if _, err := fmt.Fprint(w, "var (\n"); err != nil {
+ return err
+ }
+ if err := findSources(c, func(i int, f io.Reader) error {
+ if _, err := fmt.Fprintf(w, "\t%s_%d = %s", c.Var, i, dataBegin); err != nil {
+ return err
+ }
+ if _, err := io.Copy(&escapeWriter{w}, f); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprint(w, dataEnd); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprint(w, ")\n"); err != nil {
+ return err
+ }
+ if err := multiFooterTpl.Execute(w, c); err != nil {
+ return err
+ }
+ return nil
+}
+
+func findSources(c *configuration, cb func(i int, f io.Reader) error) error {
+ if c.unpack {
+ for _, filename := range c.sources {
+ ext := filepath.Ext(filename)
+ if ext == ".zip" {
+ if err := findZipSources(c, filename, cb); err != nil {
+ return err
+ }
+ } else if ext == ".tar" {
+ if err := findTarSources(c, filename, cb); err != nil {
+ return err
+ }
+ } else {
+ panic("unknown archive extension: " + ext)
+ }
+ }
+ return nil
+ }
+ for _, filename := range c.sources {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ err = cb(len(c.FoundSources), bufio.NewReader(f))
+ f.Close()
+ if err != nil {
+ return err
+ }
+ c.FoundSources = append(c.FoundSources, filename)
+ }
+ return nil
+}
+
+func findZipSources(c *configuration, filename string, cb func(i int, f io.Reader) error) error {
+ r, err := zip.OpenReader(filename)
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+ for _, file := range r.File {
+ f, err := file.Open()
+ if err != nil {
+ return err
+ }
+ err = cb(len(c.FoundSources), f)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ c.FoundSources = append(c.FoundSources, file.Name)
+ }
+ return nil
+}
+
+func findTarSources(c *configuration, filename string, cb func(i int, f io.Reader) error) error {
+ tf, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer tf.Close()
+ reader := tar.NewReader(bufio.NewReader(tf))
+ for {
+ h, err := reader.Next()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ if h.Typeflag != tar.TypeReg {
+ continue
+ }
+ if err := cb(len(c.FoundSources), &io.LimitedReader{
+ R: reader,
+ N: h.Size,
+ }); err != nil {
+ return err
+ }
+ c.FoundSources = append(c.FoundSources, h.Name)
+ }
+}
+
+func embedFileContents(w io.Writer, filename string) error {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ _, err = io.Copy(&escapeWriter{w}, bufio.NewReader(f))
+ return err
+}
+
+type escapeWriter struct {
+ w io.Writer
+}
+
+func (w *escapeWriter) Write(data []byte) (n int, err error) {
+ n = len(data)
+
+ for err == nil && len(data) > 0 {
+ // https://golang.org/ref/spec#String_literals: "Within the quotes, any
+ // character may appear except newline and unescaped double quote. The
+ // text between the quotes forms the value of the literal, with backslash
+ // escapes interpreted as they are in rune literals […]."
+ switch b := data[0]; b {
+ case '\\':
+ _, err = w.w.Write([]byte(`\\`))
+ case '"':
+ _, err = w.w.Write([]byte(`\"`))
+ case '\n':
+ _, err = w.w.Write([]byte(`\n`))
+
+ case '\x00':
+ // https://golang.org/ref/spec#Source_code_representation: "Implementation
+ // restriction: For compatibility with other tools, a compiler may
+ // disallow the NUL character (U+0000) in the source text."
+ _, err = w.w.Write([]byte(`\x00`))
+
+ default:
+ // https://golang.org/ref/spec#Source_code_representation: "Implementation
+ // restriction: […] A byte order mark may be disallowed anywhere else in
+ // the source."
+ const byteOrderMark = '\uFEFF'
+
+ if r, size := utf8.DecodeRune(data); r != utf8.RuneError && r != byteOrderMark {
+ _, err = w.w.Write(data[:size])
+ data = data[size:]
+ continue
+ }
+
+ _, err = fmt.Fprintf(w.w, `\x%02x`, b)
+ }
+ data = data[1:]
+ }
+
+ return n - len(data), err
+}
diff --git a/go/tools/builders/embedcfg.go b/go/tools/builders/embedcfg.go
new file mode 100644
index 00000000..2de4f3b9
--- /dev/null
+++ b/go/tools/builders/embedcfg.go
@@ -0,0 +1,439 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+)
+
+// buildEmbedcfgFile writes an embedcfg file to be read by the compiler.
+// An embedcfg file can be used in Go 1.16 or higher if the "embed" package
+// is imported and there are one or more //go:embed comments in .go files.
+// The embedcfg file maps //go:embed patterns to actual file names.
+//
+// The embedcfg file will be created in workDir, and its name is returned.
+// The caller is responsible for deleting it. If no embedcfg file is needed,
+// "" is returned with no error.
+//
+// All source files listed in goSrcs with //go:embed comments must be in one
+// of the directories in embedRootDirs (not in a subdirectory). Embed patterns
+// are evaluated relative to the source directory. Embed sources (embedSrcs)
+// outside those directories are ignored, since they can't be matched by any
+// valid pattern.
+func buildEmbedcfgFile(goSrcs []fileInfo, embedSrcs, embedRootDirs []string, workDir string) (string, error) {
+ // Check whether this package uses embedding and whether the toolchain
+ // supports it (Go 1.16+). With Go 1.15 and lower, we'll try to compile
+ // without an embedcfg file, and the compiler will complain the "embed"
+ // package is missing.
+ var major, minor int
+ if n, err := fmt.Sscanf(runtime.Version(), "go%d.%d", &major, &minor); n != 2 || err != nil {
+ // Can't parse go version. Maybe it's a development version; fall through.
+ } else if major < 1 || (major == 1 && minor < 16) {
+ return "", nil
+ }
+ importEmbed := false
+ haveEmbed := false
+ for _, src := range goSrcs {
+ if len(src.embeds) > 0 {
+ haveEmbed = true
+ rootDir := findInRootDirs(src.filename, embedRootDirs)
+ if rootDir == "" || strings.Contains(src.filename[len(rootDir)+1:], string(filepath.Separator)) {
+ // Report an error if a source files appears in a subdirectory of
+ // another source directory. In this situation, the same file could be
+ // referenced with different paths.
+ return "", fmt.Errorf("%s: source files with //go:embed should be in same directory. Allowed directories are:\n\t%s",
+ src.filename,
+ strings.Join(embedRootDirs, "\n\t"))
+ }
+ }
+ for _, imp := range src.imports {
+ if imp.path == "embed" {
+ importEmbed = true
+ }
+ }
+ }
+ if !importEmbed || !haveEmbed {
+ return "", nil
+ }
+
+ // Build a tree of embeddable files. This includes paths listed with
+ // -embedsrc. If one of those paths is a directory, the tree includes
+ // its files and subdirectories. Paths in the tree are relative to the
+ // path in embedRootDirs that contains them.
+ root, err := buildEmbedTree(embedSrcs, embedRootDirs)
+ if err != nil {
+ return "", err
+ }
+
+ // Resolve patterns to sets of files.
+ var embedcfg struct {
+ Patterns map[string][]string
+ Files map[string]string
+ }
+ embedcfg.Patterns = make(map[string][]string)
+ embedcfg.Files = make(map[string]string)
+ for _, src := range goSrcs {
+ for _, embed := range src.embeds {
+ matchedPaths, matchedFiles, err := resolveEmbed(embed, root)
+ if err != nil {
+ return "", err
+ }
+ embedcfg.Patterns[embed.pattern] = matchedPaths
+ for i, rel := range matchedPaths {
+ embedcfg.Files[rel] = matchedFiles[i]
+ }
+ }
+ }
+
+ // Write the configuration to a JSON file.
+ embedcfgData, err := json.MarshalIndent(&embedcfg, "", "\t")
+ if err != nil {
+ return "", err
+ }
+ embedcfgName := filepath.Join(workDir, "embedcfg")
+ if err := ioutil.WriteFile(embedcfgName, embedcfgData, 0o666); err != nil {
+ return "", err
+ }
+ return embedcfgName, nil
+}
+
+// findInRootDirs returns a string from rootDirs which is a parent of the
+// file path p. If there is no such string, findInRootDirs returns "".
+func findInRootDirs(p string, rootDirs []string) string {
+ dir := filepath.Dir(p)
+ for _, rootDir := range rootDirs {
+ if rootDir == dir ||
+ (strings.HasPrefix(dir, rootDir) && len(dir) > len(rootDir)+1 && dir[len(rootDir)] == filepath.Separator) {
+ return rootDir
+ }
+ }
+ return ""
+}
+
+// embedNode represents an embeddable file or directory in a tree.
+type embedNode struct {
+ name string // base name
+ path string // absolute file path
+ children map[string]*embedNode // non-nil for directory
+ childNames []string // sorted
+}
+
+// add inserts file nodes into the tree rooted at f for the slash-separated
+// path src, relative to the absolute file path rootDir. If src points to a
+// directory, add recursively inserts nodes for its contents. If a node already
+// exists (for example, if a source file and a generated file have the same
+// name), add leaves the existing node in place.
+func (n *embedNode) add(rootDir, src string) error {
+ // Create nodes for parents of src.
+ parent := n
+ parts := strings.Split(src, "/")
+ for _, p := range parts[:len(parts)-1] {
+ if parent.children[p] == nil {
+ parent.children[p] = &embedNode{
+ name: p,
+ children: make(map[string]*embedNode),
+ }
+ }
+ parent = parent.children[p]
+ }
+
+ // Create a node for src. If src is a directory, recursively create nodes for
+ // its contents. Go embedding ignores symbolic links, but Bazel may use links
+ // for generated files and directories, so we follow them here.
+ var visit func(*embedNode, string, os.FileInfo) error
+ visit = func(parent *embedNode, path string, fi os.FileInfo) error {
+ base := filepath.Base(path)
+ if parent.children[base] == nil {
+ parent.children[base] = &embedNode{name: base, path: path}
+ }
+ if !fi.IsDir() {
+ return nil
+ }
+ node := parent.children[base]
+ node.children = make(map[string]*embedNode)
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ names, err := f.Readdirnames(0)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ for _, name := range names {
+ cPath := filepath.Join(path, name)
+ cfi, err := os.Stat(cPath)
+ if err != nil {
+ return err
+ }
+ if err := visit(node, cPath, cfi); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ path := filepath.Join(rootDir, src)
+ fi, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+ return visit(parent, path, fi)
+}
+
+func (n *embedNode) isDir() bool {
+ return n.children != nil
+}
+
+// get returns a tree node, given a slash-separated path relative to the
+// receiver. get returns nil if no node exists with that path.
+func (n *embedNode) get(path string) *embedNode {
+ if path == "." || path == "" {
+ return n
+ }
+ for _, part := range strings.Split(path, "/") {
+ n = n.children[part]
+ if n == nil {
+ return nil
+ }
+ }
+ return n
+}
+
+var errSkip = errors.New("skip")
+
+// walk calls fn on each node in the tree rooted at n in depth-first pre-order.
+func (n *embedNode) walk(fn func(rel string, n *embedNode) error) error {
+ var visit func(string, *embedNode) error
+ visit = func(rel string, node *embedNode) error {
+ err := fn(rel, node)
+ if err == errSkip {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ for _, name := range node.childNames {
+ if err := visit(path.Join(rel, name), node.children[name]); err != nil && err != errSkip {
+ return err
+ }
+ }
+ return nil
+ }
+ err := visit("", n)
+ if err == errSkip {
+ return nil
+ }
+ return err
+}
+
+// buildEmbedTree constructs a logical directory tree of embeddable files.
+// The tree may contain a mix of static and generated files from multiple
+// root directories. Directory artifacts are recursively expanded.
+func buildEmbedTree(embedSrcs, embedRootDirs []string) (root *embedNode, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("building tree of embeddable files in directories %s: %v", strings.Join(embedRootDirs, string(filepath.ListSeparator)), err)
+ }
+ }()
+
+ // Add each path to the tree.
+ root = &embedNode{name: "", children: make(map[string]*embedNode)}
+ for _, src := range embedSrcs {
+ rootDir := findInRootDirs(src, embedRootDirs)
+ if rootDir == "" {
+ // Embedded path cannot be matched by any valid pattern. Ignore.
+ continue
+ }
+ rel := filepath.ToSlash(src[len(rootDir)+1:])
+ if err := root.add(rootDir, rel); err != nil {
+ return nil, err
+ }
+ }
+
+ // Sort children in each directory node.
+ var visit func(*embedNode)
+ visit = func(node *embedNode) {
+ node.childNames = make([]string, 0, len(node.children))
+ for name, child := range node.children {
+ node.childNames = append(node.childNames, name)
+ visit(child)
+ }
+ sort.Strings(node.childNames)
+ }
+ visit(root)
+
+ return root, nil
+}
+
+// resolveEmbed matches a //go:embed pattern in a source file to a set of
+// embeddable files in the given tree.
+func resolveEmbed(embed fileEmbed, root *embedNode) (matchedPaths, matchedFiles []string, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("%v: could not embed %s: %v", embed.pos, embed.pattern, err)
+ }
+ }()
+
+ // Remove optional "all:" prefix from pattern and set matchAll flag if present.
+ // See https://pkg.go.dev/embed#hdr-Directives for details.
+ pattern := embed.pattern
+ var matchAll bool
+ if strings.HasPrefix(pattern, "all:") {
+ matchAll = true
+ pattern = pattern[4:]
+ }
+
+ // Check that the pattern has valid syntax.
+ if _, err := path.Match(pattern, ""); err != nil || !validEmbedPattern(pattern) {
+ return nil, nil, fmt.Errorf("invalid pattern syntax")
+ }
+
+ // Search for matching files.
+ err = root.walk(func(matchRel string, matchNode *embedNode) error {
+ if ok, _ := path.Match(pattern, matchRel); !ok {
+ // Non-matching file or directory.
+ return nil
+ }
+
+ // TODO: Should check that directories along path do not begin a new module
+ // (do not contain a go.mod).
+ // https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;l=2158;drc=261fe25c83a94fc3defe064baed3944cd3d16959
+ for dir := matchRel; len(dir) > 1; dir = filepath.Dir(dir) {
+ if base := path.Base(matchRel); isBadEmbedName(base) {
+ what := "file"
+ if matchNode.isDir() {
+ what = "directory"
+ }
+ if dir == matchRel {
+ return fmt.Errorf("cannot embed %s %s: invalid name %s", what, matchRel, base)
+ } else {
+ return fmt.Errorf("cannot embed %s %s: in invalid directory %s", what, matchRel, base)
+ }
+ }
+ }
+
+ if !matchNode.isDir() {
+ // Matching file. Add to list.
+ matchedPaths = append(matchedPaths, matchRel)
+ matchedFiles = append(matchedFiles, matchNode.path)
+ return nil
+ }
+
+ // Matching directory. Recursively add all files in subdirectories.
+ // Don't add hidden files or directories (starting with "." or "_"),
+ // unless "all:" prefix was set.
+ // See golang/go#42328.
+ matchTreeErr := matchNode.walk(func(childRel string, childNode *embedNode) error {
+ // TODO: Should check that directories along path do not begin a new module
+ // https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;l=2158;drc=261fe25c83a94fc3defe064baed3944cd3d16959
+ if childRel != "" {
+ base := path.Base(childRel)
+ if isBadEmbedName(base) || (!matchAll && (strings.HasPrefix(base, ".") || strings.HasPrefix(base, "_"))) {
+ if childNode.isDir() {
+ return errSkip
+ }
+ return nil
+ }
+ }
+ if !childNode.isDir() {
+ matchedPaths = append(matchedPaths, path.Join(matchRel, childRel))
+ matchedFiles = append(matchedFiles, childNode.path)
+ }
+ return nil
+ })
+ if matchTreeErr != nil {
+ return matchTreeErr
+ }
+ return errSkip
+ })
+ if err != nil && err != errSkip {
+ return nil, nil, err
+ }
+ if len(matchedPaths) == 0 {
+ return nil, nil, fmt.Errorf("no matching files found")
+ }
+ return matchedPaths, matchedFiles, nil
+}
+
+func validEmbedPattern(pattern string) bool {
+ return pattern != "." && fsValidPath(pattern)
+}
+
+// validPath reports whether the given path name
+// is valid for use in a call to Open.
+// Path names passed to open are unrooted, slash-separated
+// sequences of path elements, like “x/y/z”.
+// Path names must not contain a “.” or “..” or empty element,
+// except for the special case that the root directory is named “.”.
+//
+// Paths are slash-separated on all systems, even Windows.
+// Backslashes must not appear in path names.
+//
+// Copied from io/fs.ValidPath in Go 1.16beta1.
+func fsValidPath(name string) bool {
+ if name == "." {
+ // special case
+ return true
+ }
+
+ // Iterate over elements in name, checking each.
+ for {
+ i := 0
+ for i < len(name) && name[i] != '/' {
+ if name[i] == '\\' {
+ return false
+ }
+ i++
+ }
+ elem := name[:i]
+ if elem == "" || elem == "." || elem == ".." {
+ return false
+ }
+ if i == len(name) {
+ return true // reached clean ending
+ }
+ name = name[i+1:]
+ }
+}
+
+// isBadEmbedName reports whether name is the base name of a file that
+// can't or won't be included in modules and therefore shouldn't be treated
+// as existing for embedding.
+//
+// TODO: This should use the equivalent of golang.org/x/mod/module.CheckFilePath instead of fsValidPath.
+// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;l=2200;drc=261fe25c83a94fc3defe064baed3944cd3d16959
+func isBadEmbedName(name string) bool {
+ if !fsValidPath(name) {
+ return true
+ }
+ switch name {
+ // Empty string should be impossible but make it bad.
+ case "":
+ return true
+ // Version control directories won't be present in module.
+ case ".bzr", ".hg", ".git", ".svn":
+ return true
+ }
+ return false
+}
diff --git a/go/tools/builders/env.go b/go/tools/builders/env.go
new file mode 100644
index 00000000..177617f8
--- /dev/null
+++ b/go/tools/builders/env.go
@@ -0,0 +1,474 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+var (
+ // cgoEnvVars is the list of all cgo environment variable
+ cgoEnvVars = []string{"CGO_CFLAGS", "CGO_CXXFLAGS", "CGO_CPPFLAGS", "CGO_LDFLAGS"}
+ // cgoAbsEnvFlags are all the flags that need absolute path in cgoEnvVars
+ cgoAbsEnvFlags = []string{"-I", "-L", "-isysroot", "-isystem", "-iquote", "-include", "-gcc-toolchain", "--sysroot", "-resource-dir", "-fsanitize-blacklist", "-fsanitize-ignorelist"}
+)
+
+// env holds a small amount of Go environment and toolchain information
+// which is common to multiple builders. Most Bazel-agnostic build information
+// is collected in go/build.Default though.
+//
+// See ./README.rst for more information about handling arguments and
+// environment variables.
+type env struct {
+ // sdk is the path to the Go SDK, which contains tools for the host
+ // platform. This may be different than GOROOT.
+ sdk string
+
+ // installSuffix is the name of the directory below GOROOT/pkg that contains
+ // the .a files for the standard library we should build against.
+ // For example, linux_amd64_race.
+ installSuffix string
+
+ // verbose indicates whether subprocess command lines should be printed.
+ verbose bool
+
+ // workDirPath is a temporary work directory. It is created lazily.
+ workDirPath string
+
+ shouldPreserveWorkDir bool
+}
+
+// envFlags registers flags common to multiple builders and returns an env
+// configured with those flags.
+func envFlags(flags *flag.FlagSet) *env {
+ env := &env{}
+ flags.StringVar(&env.sdk, "sdk", "", "Path to the Go SDK.")
+ flags.Var(&tagFlag{}, "tags", "List of build tags considered true.")
+ flags.StringVar(&env.installSuffix, "installsuffix", "", "Standard library under GOROOT/pkg")
+ flags.BoolVar(&env.verbose, "v", false, "Whether subprocess command lines should be printed")
+ flags.BoolVar(&env.shouldPreserveWorkDir, "work", false, "if true, the temporary work directory will be preserved")
+ return env
+}
+
+// checkFlags checks whether env flags were set to valid values. checkFlags
+// should be called after parsing flags.
+func (e *env) checkFlags() error {
+ if e.sdk == "" {
+ return errors.New("-sdk was not set")
+ }
+ return nil
+}
+
+// workDir returns a path to a temporary work directory. The same directory
+// is returned on multiple calls. The caller is responsible for cleaning
+// up the work directory by calling cleanup.
+func (e *env) workDir() (path string, cleanup func(), err error) {
+ if e.workDirPath != "" {
+ return e.workDirPath, func() {}, nil
+ }
+ // Keep the stem "rules_go_work" in sync with reproducible_binary_test.go.
+ e.workDirPath, err = ioutil.TempDir("", "rules_go_work-")
+ if err != nil {
+ return "", func() {}, err
+ }
+ if e.verbose {
+ log.Printf("WORK=%s\n", e.workDirPath)
+ }
+ if e.shouldPreserveWorkDir {
+ cleanup = func() {}
+ } else {
+ cleanup = func() { os.RemoveAll(e.workDirPath) }
+ }
+ return e.workDirPath, cleanup, nil
+}
+
+// goTool returns a slice containing the path to an executable at
+// $GOROOT/pkg/$GOOS_$GOARCH/$tool and additional arguments.
+func (e *env) goTool(tool string, args ...string) []string {
+ platform := fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)
+ toolPath := filepath.Join(e.sdk, "pkg", "tool", platform, tool)
+ if runtime.GOOS == "windows" {
+ toolPath += ".exe"
+ }
+ return append([]string{toolPath}, args...)
+}
+
+// goCmd returns a slice containing the path to the go executable
+// and additional arguments.
+func (e *env) goCmd(cmd string, args ...string) []string {
+ exe := filepath.Join(e.sdk, "bin", "go")
+ if runtime.GOOS == "windows" {
+ exe += ".exe"
+ }
+ return append([]string{exe, cmd}, args...)
+}
+
+// runCommand executes a subprocess that inherits stdout, stderr, and the
+// environment from this process.
+func (e *env) runCommand(args []string) error {
+ cmd := exec.Command(args[0], args[1:]...)
+ // Redirecting stdout to stderr. This mirrors behavior in the go command:
+ // https://go.googlesource.com/go/+/refs/tags/go1.15.2/src/cmd/go/internal/work/exec.go#1958
+ buf := &bytes.Buffer{}
+ cmd.Stdout = buf
+ cmd.Stderr = buf
+ err := runAndLogCommand(cmd, e.verbose)
+ os.Stderr.Write(relativizePaths(buf.Bytes()))
+ return err
+}
+
+// runCommandToFile executes a subprocess and writes stdout/stderr to the given
+// writers.
+func (e *env) runCommandToFile(out, err io.Writer, args []string) error {
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Stdout = out
+ cmd.Stderr = err
+ return runAndLogCommand(cmd, e.verbose)
+}
+
+func absEnv(envNameList []string, argList []string) error {
+ for _, envName := range envNameList {
+ splitedEnv := strings.Fields(os.Getenv(envName))
+ absArgs(splitedEnv, argList)
+ if err := os.Setenv(envName, strings.Join(splitedEnv, " ")); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func runAndLogCommand(cmd *exec.Cmd, verbose bool) error {
+ if verbose {
+ fmt.Fprintln(os.Stderr, formatCommand(cmd))
+ }
+ cleanup := passLongArgsInResponseFiles(cmd)
+ defer cleanup()
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("error running subcommand %s: %v", cmd.Path, err)
+ }
+ return nil
+}
+
+// expandParamsFiles looks for arguments in args of the form
+// "-param=filename". When it finds these arguments it reads the file "filename"
+// and replaces the argument with its content.
+// It returns the expanded arguments as well as a bool that is true if any param
+// files have been passed.
+func expandParamsFiles(args []string) ([]string, bool, error) {
+ var paramsIndices []int
+ for i, arg := range args {
+ if strings.HasPrefix(arg, "-param=") {
+ paramsIndices = append(paramsIndices, i)
+ }
+ }
+ if len(paramsIndices) == 0 {
+ return args, false, nil
+ }
+ var expandedArgs []string
+ last := 0
+ for _, pi := range paramsIndices {
+ expandedArgs = append(expandedArgs, args[last:pi]...)
+ last = pi + 1
+
+ fileName := args[pi][len("-param="):]
+ fileArgs, err := readParamsFile(fileName)
+ if err != nil {
+ return nil, true, err
+ }
+ expandedArgs = append(expandedArgs, fileArgs...)
+ }
+ expandedArgs = append(expandedArgs, args[last:]...)
+ return expandedArgs, true, nil
+}
+
+// readParamsFiles parses a Bazel params file in "shell" format. The file
+// should contain one argument per line. Arguments may be quoted with single
+// quotes. All characters within quoted strings are interpreted literally
+// including newlines and excepting single quotes. Characters outside quoted
+// strings may be escaped with a backslash.
+func readParamsFile(name string) ([]string, error) {
+ data, err := ioutil.ReadFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ var args []string
+ var arg []byte
+ quote := false
+ escape := false
+ for p := 0; p < len(data); p++ {
+ b := data[p]
+ switch {
+ case escape:
+ arg = append(arg, b)
+ escape = false
+
+ case b == '\'':
+ quote = !quote
+
+ case !quote && b == '\\':
+ escape = true
+
+ case !quote && b == '\n':
+ args = append(args, string(arg))
+ arg = arg[:0]
+
+ default:
+ arg = append(arg, b)
+ }
+ }
+ if quote {
+ return nil, fmt.Errorf("unterminated quote")
+ }
+ if escape {
+ return nil, fmt.Errorf("unterminated escape")
+ }
+ if len(arg) > 0 {
+ args = append(args, string(arg))
+ }
+ return args, nil
+}
+
+// writeParamsFile formats a list of arguments in Bazel's "shell" format and writes
+// it to a file.
+func writeParamsFile(path string, args []string) error {
+ buf := new(bytes.Buffer)
+ for _, arg := range args {
+ if !strings.ContainsAny(arg, "'\n\\") {
+ fmt.Fprintln(buf, arg)
+ continue
+ }
+ buf.WriteByte('\'')
+ for _, r := range arg {
+ if r == '\'' {
+ buf.WriteString(`'\''`)
+ } else {
+ buf.WriteRune(r)
+ }
+ }
+ buf.WriteString("'\n")
+ }
+ return ioutil.WriteFile(path, buf.Bytes(), 0666)
+}
+
+// splitArgs splits a list of command line arguments into two parts: arguments
+// that should be interpreted by the builder (before "--"), and arguments
+// that should be passed through to the underlying tool (after "--").
+func splitArgs(args []string) (builderArgs []string, toolArgs []string) {
+ for i, arg := range args {
+ if arg == "--" {
+ return args[:i], args[i+1:]
+ }
+ }
+ return args, nil
+}
+
+// abs returns the absolute representation of path. Some tools/APIs require
+// absolute paths to work correctly. Most notably, golang on Windows cannot
+// handle relative paths to files whose absolute path is > ~250 chars, while
+// it can handle absolute paths. See http://goo.gl/eqeWjm.
+//
+// Note that strings that begin with "__BAZEL_" are not absolutized. These are
+// used on macOS for paths that the compiler wrapper (wrapped_clang) is
+// supposed to know about.
+func abs(path string) string {
+ if strings.HasPrefix(path, "__BAZEL_") {
+ return path
+ }
+
+ if abs, err := filepath.Abs(path); err != nil {
+ return path
+ } else {
+ return abs
+ }
+}
+
+// absArgs applies abs to strings that appear in args. Only paths that are
+// part of options named by flags are modified.
+func absArgs(args []string, flags []string) {
+ absNext := false
+ for i := range args {
+ if absNext {
+ args[i] = abs(args[i])
+ absNext = false
+ continue
+ }
+ for _, f := range flags {
+ if !strings.HasPrefix(args[i], f) {
+ continue
+ }
+ possibleValue := args[i][len(f):]
+ if len(possibleValue) == 0 {
+ absNext = true
+ break
+ }
+ separator := ""
+ if possibleValue[0] == '=' {
+ possibleValue = possibleValue[1:]
+ separator = "="
+ }
+ args[i] = fmt.Sprintf("%s%s%s", f, separator, abs(possibleValue))
+ break
+ }
+ }
+}
+
+// relativizePaths converts absolute paths found in the given output string to
+// relative, if they are within the working directory.
+func relativizePaths(output []byte) []byte {
+ dir, err := os.Getwd()
+ if dir == "" || err != nil {
+ return output
+ }
+ dirBytes := make([]byte, len(dir), len(dir)+1)
+ copy(dirBytes, dir)
+ if bytes.HasSuffix(dirBytes, []byte{filepath.Separator}) {
+ return bytes.ReplaceAll(output, dirBytes, nil)
+ }
+
+ // This is the common case.
+ // Replace "$CWD/" with "" and "$CWD" with "."
+ dirBytes = append(dirBytes, filepath.Separator)
+ output = bytes.ReplaceAll(output, dirBytes, nil)
+ dirBytes = dirBytes[:len(dirBytes)-1]
+ return bytes.ReplaceAll(output, dirBytes, []byte{'.'})
+}
+
+// formatCommand formats cmd as a string that can be pasted into a shell.
+// Spaces in environment variables and arguments are escaped as needed.
+func formatCommand(cmd *exec.Cmd) string {
+ quoteIfNeeded := func(s string) string {
+ if strings.IndexByte(s, ' ') < 0 {
+ return s
+ }
+ return strconv.Quote(s)
+ }
+ quoteEnvIfNeeded := func(s string) string {
+ eq := strings.IndexByte(s, '=')
+ if eq < 0 {
+ return s
+ }
+ key, value := s[:eq], s[eq+1:]
+ if strings.IndexByte(value, ' ') < 0 {
+ return s
+ }
+ return fmt.Sprintf("%s=%s", key, strconv.Quote(value))
+ }
+ var w bytes.Buffer
+ environ := cmd.Env
+ if environ == nil {
+ environ = os.Environ()
+ }
+ for _, e := range environ {
+ fmt.Fprintf(&w, "%s \\\n", quoteEnvIfNeeded(e))
+ }
+
+ sep := ""
+ for _, arg := range cmd.Args {
+ fmt.Fprintf(&w, "%s%s", sep, quoteIfNeeded(arg))
+ sep = " "
+ }
+ return w.String()
+}
+
+// passLongArgsInResponseFiles modifies cmd such that, for
+// certain programs, long arguments are passed in "response files", a
+// file on disk with the arguments, with one arg per line. An actual
+// argument starting with '@' means that the rest of the argument is
+// a filename of arguments to expand.
+//
+// See https://github.com/golang/go/issues/18468 (Windows) and
+// https://github.com/golang/go/issues/37768 (Darwin).
+func passLongArgsInResponseFiles(cmd *exec.Cmd) (cleanup func()) {
+ cleanup = func() {} // no cleanup by default
+ var argLen int
+ for _, arg := range cmd.Args {
+ argLen += len(arg)
+ }
+ // If we're not approaching 32KB of args, just pass args normally.
+ // (use 30KB instead to be conservative; not sure how accounting is done)
+ if !useResponseFile(cmd.Path, argLen) {
+ return
+ }
+ tf, err := ioutil.TempFile("", "args")
+ if err != nil {
+ log.Fatalf("error writing long arguments to response file: %v", err)
+ }
+ cleanup = func() { os.Remove(tf.Name()) }
+ var buf bytes.Buffer
+ for _, arg := range cmd.Args[1:] {
+ fmt.Fprintf(&buf, "%s\n", arg)
+ }
+ if _, err := tf.Write(buf.Bytes()); err != nil {
+ tf.Close()
+ cleanup()
+ log.Fatalf("error writing long arguments to response file: %v", err)
+ }
+ if err := tf.Close(); err != nil {
+ cleanup()
+ log.Fatalf("error writing long arguments to response file: %v", err)
+ }
+ cmd.Args = []string{cmd.Args[0], "@" + tf.Name()}
+ return cleanup
+}
+
+// quotePathIfNeeded quotes path if it contains whitespace and isn't already quoted.
+// Use this for paths that will be passed through
+// https://github.com/golang/go/blob/06264b740e3bfe619f5e90359d8f0d521bd47806/src/cmd/internal/quoted/quoted.go#L25
+func quotePathIfNeeded(path string) string {
+ if strings.HasPrefix(path, "\"") || strings.HasPrefix(path, "'") {
+ // Assume already quoted
+ return path
+ }
+ // https://github.com/golang/go/blob/06264b740e3bfe619f5e90359d8f0d521bd47806/src/cmd/internal/quoted/quoted.go#L16
+ if strings.IndexAny(path, " \t\n\r") < 0 {
+ // Does not require quoting
+ return path
+ }
+ // Escaping quotes is not supported, so we can assume path doesn't contain any quotes.
+ return "'" + path + "'"
+}
+
+func useResponseFile(path string, argLen int) bool {
+ // Unless the program uses objabi.Flagparse, which understands
+ // response files, don't use response files.
+ // TODO: do we need more commands? asm? cgo? For now, no.
+ prog := strings.TrimSuffix(filepath.Base(path), ".exe")
+ switch prog {
+ case "compile", "link":
+ default:
+ return false
+ }
+ // Windows has a limit of 32 KB arguments. To be conservative and not
+ // worry about whether that includes spaces or not, just use 30 KB.
+ // Darwin's limit is less clear. The OS claims 256KB, but we've seen
+ // failures with arglen as small as 50KB.
+ if argLen > (30 << 10) {
+ return true
+ }
+ return false
+}
diff --git a/go/tools/builders/filter.go b/go/tools/builders/filter.go
new file mode 100644
index 00000000..fbb0f2ac
--- /dev/null
+++ b/go/tools/builders/filter.go
@@ -0,0 +1,168 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/token"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+type fileInfo struct {
+ filename string
+ ext ext
+ header []byte
+ fset *token.FileSet
+ parsed *ast.File
+ parseErr error
+ matched bool
+ isCgo bool
+ pkg string
+ imports []fileImport
+ embeds []fileEmbed
+}
+
+type ext int
+
+const (
+ goExt ext = iota
+ cExt
+ cxxExt
+ objcExt
+ objcxxExt
+ sExt
+ hExt
+)
+
+type fileImport struct {
+ path string
+ pos token.Pos
+ doc *ast.CommentGroup
+}
+
+type fileEmbed struct {
+ pattern string
+ pos token.Position
+}
+
+type archiveSrcs struct {
+ goSrcs, cSrcs, cxxSrcs, objcSrcs, objcxxSrcs, sSrcs, hSrcs []fileInfo
+}
+
+// filterAndSplitFiles filters files using build constraints and collates
+// them by extension.
+func filterAndSplitFiles(fileNames []string) (archiveSrcs, error) {
+ var res archiveSrcs
+ for _, s := range fileNames {
+ src, err := readFileInfo(build.Default, s)
+ if err != nil {
+ return archiveSrcs{}, err
+ }
+ if !src.matched {
+ continue
+ }
+ var srcs *[]fileInfo
+ switch src.ext {
+ case goExt:
+ srcs = &res.goSrcs
+ case cExt:
+ srcs = &res.cSrcs
+ case cxxExt:
+ srcs = &res.cxxSrcs
+ case objcExt:
+ srcs = &res.objcSrcs
+ case objcxxExt:
+ srcs = &res.objcxxSrcs
+ case sExt:
+ srcs = &res.sSrcs
+ case hExt:
+ srcs = &res.hSrcs
+ }
+ *srcs = append(*srcs, src)
+ }
+ return res, nil
+}
+
+// readFileInfo applies build constraints to an input file and returns whether
+// it should be compiled.
+func readFileInfo(bctx build.Context, input string) (fileInfo, error) {
+ fi := fileInfo{filename: input}
+ if ext := filepath.Ext(input); ext == ".C" {
+ fi.ext = cxxExt
+ } else {
+ switch strings.ToLower(ext) {
+ case ".go":
+ fi.ext = goExt
+ case ".c":
+ fi.ext = cExt
+ case ".cc", ".cxx", ".cpp":
+ fi.ext = cxxExt
+ case ".m":
+ fi.ext = objcExt
+ case ".mm":
+ fi.ext = objcxxExt
+ case ".s":
+ fi.ext = sExt
+ case ".h", ".hh", ".hpp", ".hxx":
+ fi.ext = hExt
+ default:
+ return fileInfo{}, fmt.Errorf("unrecognized file extension: %s", ext)
+ }
+ }
+
+ dir, base := filepath.Split(input)
+ // Check build constraints on non-cgo files.
+ // Skip cgo files, since they get rejected (due to leading '_') and won't
+ // have any build constraints anyway.
+ if strings.HasPrefix(base, "_cgo") {
+ fi.matched = true
+ } else {
+ match, err := bctx.MatchFile(dir, base)
+ if err != nil {
+ return fi, err
+ }
+ fi.matched = match
+ }
+ // If it's not a go file, there's nothing more to read.
+ if fi.ext != goExt {
+ return fi, nil
+ }
+
+ // Scan the file for imports and embeds.
+ f, err := os.Open(input)
+ if err != nil {
+ return fileInfo{}, err
+ }
+ defer f.Close()
+ fi.fset = token.NewFileSet()
+ if err := readGoInfo(f, &fi); err != nil {
+ return fileInfo{}, err
+ }
+
+ // Exclude cgo files if cgo is not enabled.
+ for _, imp := range fi.imports {
+ if imp.path == "C" {
+ fi.isCgo = true
+ break
+ }
+ }
+ fi.matched = fi.matched && (bctx.CgoEnabled || !fi.isCgo)
+
+ return fi, nil
+}
diff --git a/go/tools/builders/filter_buildid.go b/go/tools/builders/filter_buildid.go
new file mode 100644
index 00000000..893a0f6a
--- /dev/null
+++ b/go/tools/builders/filter_buildid.go
@@ -0,0 +1,44 @@
+// Copyright 2018 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "os"
+ "os/exec"
+ "runtime"
+ "syscall"
+)
+
+// filterBuildID executes the tool on the command line, filtering out any
+// -buildid arguments. It is intended to be used with -toolexec.
+func filterBuildID(args []string) error {
+ newArgs := make([]string, 0, len(args))
+ for i := 0; i < len(args); i++ {
+ arg := args[i]
+ if arg == "-buildid" {
+ i++
+ continue
+ }
+ newArgs = append(newArgs, arg)
+ }
+ if runtime.GOOS == "windows" {
+ cmd := exec.Command(newArgs[0], newArgs[1:]...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+ } else {
+ return syscall.Exec(newArgs[0], newArgs, os.Environ())
+ }
+}
diff --git a/go/tools/builders/filter_test.go b/go/tools/builders/filter_test.go
new file mode 100644
index 00000000..61ec385b
--- /dev/null
+++ b/go/tools/builders/filter_test.go
@@ -0,0 +1,136 @@
+/* Copyright 2016 The Bazel Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "go/build"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+var testfiles = map[string]string{
+ "cgo.go": `
+//+build cgo
+
+package tags
+
+/*
+#include <stdio.h>
+#include <stdlib.h>
+
+void myprint(char* s) {
+ printf("%s", s);
+}
+*/
+
+import "C"
+
+func main() {
+ C.myprint("hello")
+}
+`,
+ "extra.go": `
+//+build a,b b,c
+
+package tags
+`,
+ "ignore.go": `
+//+build ignore
+
+package tags
+`,
+ "normal.go": `
+package tags
+`,
+ "on_darwin.go": `
+package tags
+`,
+ "system.go": `
+//+build arm,darwin linux,amd64
+
+package tags
+`,
+}
+
+func TestTags(t *testing.T) {
+ tempdir, err := ioutil.TempDir("", "goruletest")
+ if err != nil {
+ t.Fatalf("Error creating temporary directory: %v", err)
+ }
+ defer os.RemoveAll(tempdir)
+
+ input := []string{}
+ for k, v := range testfiles {
+ p := filepath.Join(tempdir, k)
+ if err := ioutil.WriteFile(p, []byte(v), 0644); err != nil {
+ t.Fatalf("WriteFile(%s): %v", p, err)
+ }
+ input = append(input, k)
+ }
+ sort.Strings(input)
+
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("Getwd: %v", err)
+ }
+
+ err = os.Chdir(tempdir)
+ if err != nil {
+ t.Fatalf("Chdir(%s): %v", tempdir, err)
+ }
+ defer os.Chdir(wd)
+
+ bctx := build.Default
+ // Always fake the os and arch
+ bctx.GOOS = "darwin"
+ bctx.GOARCH = "amd64"
+ bctx.CgoEnabled = false
+ runTest(t, bctx, input, []string{"normal.go", "on_darwin.go"})
+ bctx.GOOS = "linux"
+ runTest(t, bctx, input, []string{"normal.go", "system.go"})
+ bctx.GOARCH = "arm"
+ runTest(t, bctx, input, []string{"normal.go"})
+ bctx.BuildTags = []string{"a", "b"}
+ runTest(t, bctx, input, []string{"extra.go", "normal.go"})
+ bctx.BuildTags = []string{"a", "c"}
+ runTest(t, bctx, input, []string{"normal.go"})
+ bctx.CgoEnabled = true
+ runTest(t, bctx, input, []string{"cgo.go", "normal.go"})
+}
+
+func runTest(t *testing.T, bctx build.Context, inputs []string, expect []string) {
+ build.Default = bctx
+ got, err := filterAndSplitFiles(inputs)
+ if err != nil {
+ t.Errorf("filter %v,%v,%v,%v failed: %v", bctx.GOOS, bctx.GOARCH, bctx.CgoEnabled, bctx.BuildTags, err)
+ }
+ gotGoFilenames := make([]string, len(got.goSrcs))
+ for i, src := range got.goSrcs {
+ gotGoFilenames[i] = src.filename
+ }
+ if !reflect.DeepEqual(expect, gotGoFilenames) {
+ t.Errorf("filter %v,%v,%v,%v: expect %v got %v", bctx.GOOS, bctx.GOARCH, bctx.CgoEnabled, bctx.BuildTags, expect, got)
+ }
+}
+
+// abs is a dummy env.go abs to avoid depending on env.go and flags.go.
+func abs(p string) string {
+ return p
+}
diff --git a/go/tools/builders/flags.go b/go/tools/builders/flags.go
new file mode 100644
index 00000000..e3604cbd
--- /dev/null
+++ b/go/tools/builders/flags.go
@@ -0,0 +1,135 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "errors"
+ "fmt"
+ "go/build"
+ "strings"
+ "unicode"
+)
+
+// multiFlag allows repeated string flags to be collected into a slice
+type multiFlag []string
+
+func (m *multiFlag) String() string {
+ if m == nil || len(*m) == 0 {
+ return ""
+ }
+ return fmt.Sprint(*m)
+}
+
+func (m *multiFlag) Set(v string) error {
+ (*m) = append(*m, v)
+ return nil
+}
+
+// quoteMultiFlag allows repeated string flags to be collected into a slice.
+// Flags are split on spaces. Single quotes are removed, and spaces within
+// quotes are removed. Literal quotes may be escaped with a backslash.
+type quoteMultiFlag []string
+
+func (m *quoteMultiFlag) String() string {
+ if m == nil || len(*m) == 0 {
+ return ""
+ }
+ return fmt.Sprint(*m)
+}
+
+func (m *quoteMultiFlag) Set(v string) error {
+ fs, err := splitQuoted(v)
+ if err != nil {
+ return err
+ }
+ *m = append(*m, fs...)
+ return nil
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+//
+// Copied from go/build.splitQuoted. Also in Gazelle (where tests are).
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
+}
+
+// tagFlag adds tags to the build.Default context. Tags are expected to be
+// formatted as a comma-separated list.
+type tagFlag struct{}
+
+func (f *tagFlag) String() string {
+ return strings.Join(build.Default.BuildTags, ",")
+}
+
+func (f *tagFlag) Set(opt string) error {
+ tags := strings.Split(opt, ",")
+ build.Default.BuildTags = append(build.Default.BuildTags, tags...)
+ return nil
+}
diff --git a/go/tools/builders/generate_nogo_main.go b/go/tools/builders/generate_nogo_main.go
new file mode 100644
index 00000000..872b9b0a
--- /dev/null
+++ b/go/tools/builders/generate_nogo_main.go
@@ -0,0 +1,196 @@
+/* Copyright 2018 The Bazel Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Generates the nogo binary to analyze Go source code at build time.
+
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "os"
+ "regexp"
+ "strconv"
+ "text/template"
+)
+
+const nogoMainTpl = `
+package main
+
+
+import (
+{{- if .NeedRegexp }}
+ "regexp"
+{{- end}}
+{{- range $import := .Imports}}
+ {{$import.Name}} "{{$import.Path}}"
+{{- end}}
+ "golang.org/x/tools/go/analysis"
+)
+
+var analyzers = []*analysis.Analyzer{
+{{- range $import := .Imports}}
+ {{$import.Name}}.Analyzer,
+{{- end}}
+}
+
+// configs maps analysis names to configurations.
+var configs = map[string]config{
+{{- range $name, $config := .Configs}}
+ {{printf "%q" $name}}: config{
+ {{- if $config.AnalyzerFlags }}
+ analyzerFlags: map[string]string {
+ {{- range $flagKey, $flagValue := $config.AnalyzerFlags}}
+ {{printf "%q: %q" $flagKey $flagValue}},
+ {{- end}}
+ },
+ {{- end -}}
+ {{- if $config.OnlyFiles}}
+ onlyFiles: []*regexp.Regexp{
+ {{- range $path, $comment := $config.OnlyFiles}}
+ {{- if $comment}}
+ // {{$comment}}
+ {{end -}}
+ {{printf "regexp.MustCompile(%q)" $path}},
+ {{- end}}
+ },
+ {{- end -}}
+ {{- if $config.ExcludeFiles}}
+ excludeFiles: []*regexp.Regexp{
+ {{- range $path, $comment := $config.ExcludeFiles}}
+ {{- if $comment}}
+ // {{$comment}}
+ {{end -}}
+ {{printf "regexp.MustCompile(%q)" $path}},
+ {{- end}}
+ },
+ {{- end}}
+ },
+{{- end}}
+}
+`
+
+func genNogoMain(args []string) error {
+ analyzerImportPaths := multiFlag{}
+ flags := flag.NewFlagSet("generate_nogo_main", flag.ExitOnError)
+ out := flags.String("output", "", "output file to write (defaults to stdout)")
+ flags.Var(&analyzerImportPaths, "analyzer_importpath", "import path of an analyzer library")
+ configFile := flags.String("config", "", "nogo config file")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ if *out == "" {
+ return errors.New("must provide output file")
+ }
+
+ outFile := os.Stdout
+ var cErr error
+ outFile, err := os.Create(*out)
+ if err != nil {
+ return fmt.Errorf("os.Create(%q): %v", *out, err)
+ }
+ defer func() {
+ if err := outFile.Close(); err != nil {
+ cErr = fmt.Errorf("error closing %s: %v", outFile.Name(), err)
+ }
+ }()
+
+ config, err := buildConfig(*configFile)
+ if err != nil {
+ return err
+ }
+
+ type Import struct {
+ Path, Name string
+ }
+ // Create unique name for each imported analyzer.
+ suffix := 1
+ imports := make([]Import, 0, len(analyzerImportPaths))
+ for _, path := range analyzerImportPaths {
+ imports = append(imports, Import{
+ Path: path,
+ Name: "analyzer" + strconv.Itoa(suffix)})
+ if suffix == math.MaxInt32 {
+ return fmt.Errorf("cannot generate more than %d analyzers", suffix)
+ }
+ suffix++
+ }
+ data := struct {
+ Imports []Import
+ Configs Configs
+ NeedRegexp bool
+ }{
+ Imports: imports,
+ Configs: config,
+ }
+ for _, c := range config {
+ if len(c.OnlyFiles) > 0 || len(c.ExcludeFiles) > 0 {
+ data.NeedRegexp = true
+ break
+ }
+ }
+
+ tpl := template.Must(template.New("source").Parse(nogoMainTpl))
+ if err := tpl.Execute(outFile, data); err != nil {
+ return fmt.Errorf("template.Execute failed: %v", err)
+ }
+ return cErr
+}
+
+func buildConfig(path string) (Configs, error) {
+ if path == "" {
+ return Configs{}, nil
+ }
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ return Configs{}, fmt.Errorf("failed to read config file: %v", err)
+ }
+ configs := make(Configs)
+ if err = json.Unmarshal(b, &configs); err != nil {
+ return Configs{}, fmt.Errorf("failed to unmarshal config file: %v", err)
+ }
+ for name, config := range configs {
+ for pattern := range config.OnlyFiles {
+ if _, err := regexp.Compile(pattern); err != nil {
+ return Configs{}, fmt.Errorf("invalid pattern for analysis %q: %v", name, err)
+ }
+ }
+ for pattern := range config.ExcludeFiles {
+ if _, err := regexp.Compile(pattern); err != nil {
+ return Configs{}, fmt.Errorf("invalid pattern for analysis %q: %v", name, err)
+ }
+ }
+ configs[name] = Config{
+ // Description is currently unused.
+ OnlyFiles: config.OnlyFiles,
+ ExcludeFiles: config.ExcludeFiles,
+ AnalyzerFlags: config.AnalyzerFlags,
+ }
+ }
+ return configs, nil
+}
+
+type Configs map[string]Config
+
+type Config struct {
+ Description string
+ OnlyFiles map[string]string `json:"only_files"`
+ ExcludeFiles map[string]string `json:"exclude_files"`
+ AnalyzerFlags map[string]string `json:"analyzer_flags"`
+}
diff --git a/go/tools/builders/generate_test_main.go b/go/tools/builders/generate_test_main.go
new file mode 100644
index 00000000..6d545b9d
--- /dev/null
+++ b/go/tools/builders/generate_test_main.go
@@ -0,0 +1,416 @@
+/* Copyright 2016 The Bazel Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Go testing support for Bazel.
+//
+// A Go test comprises three packages:
+//
+// 1. An internal test package, compiled from the sources of the library being
+// tested and any _test.go files with the same package name.
+// 2. An external test package, compiled from _test.go files with a package
+// name ending with "_test".
+// 3. A generated main package that imports both packages and initializes the
+// test framework with a list of tests, benchmarks, examples, and fuzz
+// targets read from source files.
+//
+// This action generates the source code for (3). The equivalent code for
+// 'go test' is in $GOROOT/src/cmd/go/internal/load/test.go.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "os"
+ "sort"
+ "strings"
+ "text/template"
+)
+
+type Import struct {
+ Name string
+ Path string
+}
+
+type TestCase struct {
+ Package string
+ Name string
+}
+
+type Example struct {
+ Package string
+ Name string
+ Output string
+ Unordered bool
+}
+
+// Cases holds template data.
+type Cases struct {
+ Imports []*Import
+ Tests []TestCase
+ Benchmarks []TestCase
+ FuzzTargets []TestCase
+ Examples []Example
+ TestMain string
+ CoverMode string
+ CoverFormat string
+ Pkgname string
+}
+
+// Version returns whether v is a supported Go version (like "go1.18").
+func (c *Cases) Version(v string) bool {
+ for _, r := range build.Default.ReleaseTags {
+ if v == r {
+ return true
+ }
+ }
+ return false
+}
+
+const testMainTpl = `
+package main
+
+// This package must be initialized before packages being tested.
+// NOTE: this relies on the order of package initialization, which is the spec
+// is somewhat unclear about-- it only clearly guarantees that imported packages
+// are initialized before their importers, though in practice (and implied) it
+// also respects declaration order, which we're relying on here.
+import "github.com/bazelbuild/rules_go/go/tools/bzltestutil"
+
+import (
+ "flag"
+ "log"
+ "os"
+ "os/exec"
+{{if .TestMain}}
+ "reflect"
+{{end}}
+ "strconv"
+ "testing"
+ "testing/internal/testdeps"
+
+{{if ne .CoverMode ""}}
+ "github.com/bazelbuild/rules_go/go/tools/coverdata"
+{{end}}
+
+{{range $p := .Imports}}
+ {{$p.Name}} "{{$p.Path}}"
+{{end}}
+)
+
+var allTests = []testing.InternalTest{
+{{range .Tests}}
+ {"{{.Name}}", {{.Package}}.{{.Name}} },
+{{end}}
+}
+
+var benchmarks = []testing.InternalBenchmark{
+{{range .Benchmarks}}
+ {"{{.Name}}", {{.Package}}.{{.Name}} },
+{{end}}
+}
+
+{{if .Version "go1.18"}}
+var fuzzTargets = []testing.InternalFuzzTarget{
+{{range .FuzzTargets}}
+ {"{{.Name}}", {{.Package}}.{{.Name}} },
+{{end}}
+}
+{{end}}
+
+var examples = []testing.InternalExample{
+{{range .Examples}}
+ {Name: "{{.Name}}", F: {{.Package}}.{{.Name}}, Output: {{printf "%q" .Output}}, Unordered: {{.Unordered}} },
+{{end}}
+}
+
+func testsInShard() []testing.InternalTest {
+ totalShards, err := strconv.Atoi(os.Getenv("TEST_TOTAL_SHARDS"))
+ if err != nil || totalShards <= 1 {
+ return allTests
+ }
+ file, err := os.Create(os.Getenv("TEST_SHARD_STATUS_FILE"))
+ if err != nil {
+ log.Fatalf("Failed to touch TEST_SHARD_STATUS_FILE: %v", err)
+ }
+ _ = file.Close()
+ shardIndex, err := strconv.Atoi(os.Getenv("TEST_SHARD_INDEX"))
+ if err != nil || shardIndex < 0 {
+ return allTests
+ }
+ tests := []testing.InternalTest{}
+ for i, t := range allTests {
+ if i % totalShards == shardIndex {
+ tests = append(tests, t)
+ }
+ }
+ return tests
+}
+
+func main() {
+ if bzltestutil.ShouldWrap() {
+ err := bzltestutil.Wrap("{{.Pkgname}}")
+ if xerr, ok := err.(*exec.ExitError); ok {
+ os.Exit(xerr.ExitCode())
+ } else if err != nil {
+ log.Print(err)
+ os.Exit(bzltestutil.TestWrapperAbnormalExit)
+ } else {
+ os.Exit(0)
+ }
+ }
+
+ testDeps :=
+ {{if eq .CoverFormat "lcov"}}
+ bzltestutil.LcovTestDeps{TestDeps: testdeps.TestDeps{}}
+ {{else}}
+ testdeps.TestDeps{}
+ {{end}}
+ {{if .Version "go1.18"}}
+ m := testing.MainStart(testDeps, testsInShard(), benchmarks, fuzzTargets, examples)
+ {{else}}
+ m := testing.MainStart(testDeps, testsInShard(), benchmarks, examples)
+ {{end}}
+
+ if filter := os.Getenv("TESTBRIDGE_TEST_ONLY"); filter != "" {
+ flag.Lookup("test.run").Value.Set(filter)
+ }
+
+ if failfast := os.Getenv("TESTBRIDGE_TEST_RUNNER_FAIL_FAST"); failfast != "" {
+ flag.Lookup("test.failfast").Value.Set("true")
+ }
+{{if eq .CoverFormat "lcov"}}
+ panicOnExit0Flag := flag.Lookup("test.paniconexit0").Value
+ testDeps.OriginalPanicOnExit = panicOnExit0Flag.(flag.Getter).Get().(bool)
+ // Setting this flag provides a way to run hooks right before testing.M.Run() returns.
+ panicOnExit0Flag.Set("true")
+{{end}}
+{{if ne .CoverMode ""}}
+ if len(coverdata.Counters) > 0 {
+ testing.RegisterCover(testing.Cover{
+ Mode: "{{ .CoverMode }}",
+ Counters: coverdata.Counters,
+ Blocks: coverdata.Blocks,
+ })
+
+ if coverageDat, ok := os.LookupEnv("COVERAGE_OUTPUT_FILE"); ok {
+ {{if eq .CoverFormat "lcov"}}
+ flag.Lookup("test.coverprofile").Value.Set(coverageDat+".cover")
+ {{else}}
+ flag.Lookup("test.coverprofile").Value.Set(coverageDat)
+ {{end}}
+ }
+ }
+ {{end}}
+
+ {{if not .TestMain}}
+ res := m.Run()
+ {{else}}
+ {{.TestMain}}(m)
+ {{/* See golang.org/issue/34129 and golang.org/cl/219639 */}}
+ res := int(reflect.ValueOf(m).Elem().FieldByName("exitCode").Int())
+ {{end}}
+ os.Exit(res)
+}
+`
+
+func genTestMain(args []string) error {
+ // Prepare our flags
+ args, _, err := expandParamsFiles(args)
+ if err != nil {
+ return err
+ }
+ imports := multiFlag{}
+ sources := multiFlag{}
+ flags := flag.NewFlagSet("GoTestGenTest", flag.ExitOnError)
+ goenv := envFlags(flags)
+ out := flags.String("output", "", "output file to write. Defaults to stdout.")
+ coverMode := flags.String("cover_mode", "", "the coverage mode to use")
+ coverFormat := flags.String("cover_format", "", "the coverage report type to generate (go_cover or lcov)")
+ pkgname := flags.String("pkgname", "", "package name of test")
+ flags.Var(&imports, "import", "Packages to import")
+ flags.Var(&sources, "src", "Sources to process for tests")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ if err := goenv.checkFlags(); err != nil {
+ return err
+ }
+ // Process import args
+ importMap := map[string]*Import{}
+ for _, imp := range imports {
+ parts := strings.Split(imp, "=")
+ if len(parts) != 2 {
+ return fmt.Errorf("Invalid import %q specified", imp)
+ }
+ i := &Import{Name: parts[0], Path: parts[1]}
+ importMap[i.Name] = i
+ }
+ // Process source args
+ sourceList := []string{}
+ sourceMap := map[string]string{}
+ for _, s := range sources {
+ parts := strings.Split(s, "=")
+ if len(parts) != 2 {
+ return fmt.Errorf("Invalid source %q specified", s)
+ }
+ sourceList = append(sourceList, parts[1])
+ sourceMap[parts[1]] = parts[0]
+ }
+
+ // filter our input file list
+ filteredSrcs, err := filterAndSplitFiles(sourceList)
+ if err != nil {
+ return err
+ }
+ goSrcs := filteredSrcs.goSrcs
+
+ outFile := os.Stdout
+ if *out != "" {
+ var err error
+ outFile, err = os.Create(*out)
+ if err != nil {
+ return fmt.Errorf("os.Create(%q): %v", *out, err)
+ }
+ defer outFile.Close()
+ }
+
+ cases := Cases{
+ CoverFormat: *coverFormat,
+ CoverMode: *coverMode,
+ Pkgname: *pkgname,
+ }
+
+ testFileSet := token.NewFileSet()
+ pkgs := map[string]bool{}
+ for _, f := range goSrcs {
+ parse, err := parser.ParseFile(testFileSet, f.filename, nil, parser.ParseComments)
+ if err != nil {
+ return fmt.Errorf("ParseFile(%q): %v", f.filename, err)
+ }
+ pkg := sourceMap[f.filename]
+ if strings.HasSuffix(parse.Name.String(), "_test") {
+ pkg += "_test"
+ }
+ for _, e := range doc.Examples(parse) {
+ if e.Output == "" && !e.EmptyOutput {
+ continue
+ }
+ cases.Examples = append(cases.Examples, Example{
+ Name: "Example" + e.Name,
+ Package: pkg,
+ Output: e.Output,
+ Unordered: e.Unordered,
+ })
+ pkgs[pkg] = true
+ }
+ for _, d := range parse.Decls {
+ fn, ok := d.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if fn.Recv != nil {
+ continue
+ }
+ if fn.Name.Name == "TestMain" {
+ // TestMain is not, itself, a test
+ pkgs[pkg] = true
+ cases.TestMain = fmt.Sprintf("%s.%s", pkg, fn.Name.Name)
+ continue
+ }
+
+ // Here we check the signature of the Test* function. To
+ // be considered a test:
+
+ // 1. The function should have a single argument.
+ if len(fn.Type.Params.List) != 1 {
+ continue
+ }
+
+ // 2. The function should return nothing.
+ if fn.Type.Results != nil {
+ continue
+ }
+
+ // 3. The only parameter should have a type identified as
+ // *<something>.T
+ starExpr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr)
+ if !ok {
+ continue
+ }
+ selExpr, ok := starExpr.X.(*ast.SelectorExpr)
+ if !ok {
+ continue
+ }
+
+ // We do not descriminate on the referenced type of the
+ // parameter being *testing.T. Instead we assert that it
+ // should be *<something>.T. This is because the import
+ // could have been aliased as a different identifier.
+
+ if strings.HasPrefix(fn.Name.Name, "Test") {
+ if selExpr.Sel.Name != "T" {
+ continue
+ }
+ pkgs[pkg] = true
+ cases.Tests = append(cases.Tests, TestCase{
+ Package: pkg,
+ Name: fn.Name.Name,
+ })
+ }
+ if strings.HasPrefix(fn.Name.Name, "Benchmark") {
+ if selExpr.Sel.Name != "B" {
+ continue
+ }
+ pkgs[pkg] = true
+ cases.Benchmarks = append(cases.Benchmarks, TestCase{
+ Package: pkg,
+ Name: fn.Name.Name,
+ })
+ }
+ if strings.HasPrefix(fn.Name.Name, "Fuzz") {
+ if selExpr.Sel.Name != "F" {
+ continue
+ }
+ pkgs[pkg] = true
+ cases.FuzzTargets = append(cases.FuzzTargets, TestCase{
+ Package: pkg,
+ Name: fn.Name.Name,
+ })
+ }
+ }
+ }
+
+ for name := range importMap {
+ // Set the names for all unused imports to "_"
+ if !pkgs[name] {
+ importMap[name].Name = "_"
+ }
+ cases.Imports = append(cases.Imports, importMap[name])
+ }
+ sort.Slice(cases.Imports, func(i, j int) bool {
+ return cases.Imports[i].Name < cases.Imports[j].Name
+ })
+ tpl := template.Must(template.New("source").Parse(testMainTpl))
+ if err := tpl.Execute(outFile, &cases); err != nil {
+ return fmt.Errorf("template.Execute(%v): %v", cases, err)
+ }
+ return nil
+}
diff --git a/go/tools/builders/go_path.go b/go/tools/builders/go_path.go
new file mode 100644
index 00000000..58a7b8a9
--- /dev/null
+++ b/go/tools/builders/go_path.go
@@ -0,0 +1,203 @@
+// Copyright 2018 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "archive/zip"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+)
+
+type mode int
+
+const (
+ invalidMode mode = iota
+ archiveMode
+ copyMode
+ linkMode
+)
+
+func modeFromString(s string) (mode, error) {
+ switch s {
+ case "archive":
+ return archiveMode, nil
+ case "copy":
+ return copyMode, nil
+ case "link":
+ return linkMode, nil
+ default:
+ return invalidMode, fmt.Errorf("invalid mode: %s", s)
+ }
+}
+
+type manifestEntry struct {
+ Src, Dst string
+}
+
+func main() {
+ log.SetPrefix("GoPath: ")
+ log.SetFlags(0)
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func run(args []string) error {
+ var manifest, out string
+ flags := flag.NewFlagSet("go_path", flag.ContinueOnError)
+ flags.StringVar(&manifest, "manifest", "", "name of json file listing files to include")
+ flags.StringVar(&out, "out", "", "output file or directory")
+ modeFlag := flags.String("mode", "", "copy, link, or archive")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ if manifest == "" {
+ return errors.New("-manifest not set")
+ }
+ if out == "" {
+ return errors.New("-out not set")
+ }
+ if *modeFlag == "" {
+ return errors.New("-mode not set")
+ }
+ mode, err := modeFromString(*modeFlag)
+ if err != nil {
+ return err
+ }
+
+ entries, err := readManifest(manifest)
+ if err != nil {
+ return err
+ }
+
+ switch mode {
+ case archiveMode:
+ err = archivePath(out, entries)
+ case copyMode:
+ err = copyPath(out, entries)
+ case linkMode:
+ err = linkPath(out, entries)
+ }
+ return err
+}
+
+func readManifest(path string) ([]manifestEntry, error) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("error reading manifest: %v", err)
+ }
+ var entries []manifestEntry
+ if err := json.Unmarshal(data, &entries); err != nil {
+ return nil, fmt.Errorf("error unmarshalling manifest %s: %v", path, err)
+ }
+ return entries, nil
+}
+
+func archivePath(out string, manifest []manifestEntry) (err error) {
+ outFile, err := os.Create(out)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if e := outFile.Close(); err == nil && e != nil {
+ err = fmt.Errorf("error closing archive %s: %v", out, e)
+ }
+ }()
+ outZip := zip.NewWriter(outFile)
+
+ for _, entry := range manifest {
+ srcFile, err := os.Open(abs(filepath.FromSlash(entry.Src)))
+ if err != nil {
+ return err
+ }
+ w, err := outZip.Create(entry.Dst)
+ if err != nil {
+ srcFile.Close()
+ return err
+ }
+ if _, err := io.Copy(w, srcFile); err != nil {
+ srcFile.Close()
+ return err
+ }
+ if err := srcFile.Close(); err != nil {
+ return err
+ }
+ }
+
+ if err := outZip.Close(); err != nil {
+ return fmt.Errorf("error constructing archive %s: %v", out, err)
+ }
+ return nil
+}
+
+func copyPath(out string, manifest []manifestEntry) error {
+ if err := os.MkdirAll(out, 0777); err != nil {
+ return err
+ }
+ for _, entry := range manifest {
+ dst := abs(filepath.Join(out, filepath.FromSlash(entry.Dst)))
+ if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil {
+ return err
+ }
+ srcFile, err := os.Open(abs(filepath.FromSlash(entry.Src)))
+ if err != nil {
+ return err
+ }
+ dstFile, err := os.Create(dst)
+ if err != nil {
+ srcFile.Close()
+ return err
+ }
+ if _, err := io.Copy(dstFile, srcFile); err != nil {
+ dstFile.Close()
+ srcFile.Close()
+ return err
+ }
+ srcFile.Close()
+ if err := dstFile.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func linkPath(out string, manifest []manifestEntry) error {
+ // out directory may already exist and may contain old symlinks. Delete.
+ if err := os.RemoveAll(out); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(out, 0777); err != nil {
+ return err
+ }
+ for _, entry := range manifest {
+ dst := filepath.Join(out, filepath.FromSlash(entry.Dst))
+ dstDir := filepath.Dir(dst)
+ src, _ := filepath.Rel(dstDir, entry.Src)
+ if err := os.MkdirAll(dstDir, 0777); err != nil {
+ return err
+ }
+ if err := os.Symlink(src, dst); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/go/tools/builders/importcfg.go b/go/tools/builders/importcfg.go
new file mode 100644
index 00000000..9fe55b42
--- /dev/null
+++ b/go/tools/builders/importcfg.go
@@ -0,0 +1,261 @@
+// Copyright 2019 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+type archive struct {
+ label, importPath, packagePath, file string
+ importPathAliases []string
+}
+
+// checkImports verifies that each import in files refers to a
+// direct dependency in archives or to a standard library package
+// listed in the file at stdPackageListPath. checkImports returns
+// a map from source import paths to elements of archives or to nil
+// for standard library packages.
+func checkImports(files []fileInfo, archives []archive, stdPackageListPath string, importPath string, recompileInternalDeps []string) (map[string]*archive, error) {
+ // Read the standard package list.
+ packagesTxt, err := ioutil.ReadFile(stdPackageListPath)
+ if err != nil {
+ return nil, err
+ }
+ stdPkgs := make(map[string]bool)
+ for len(packagesTxt) > 0 {
+ n := bytes.IndexByte(packagesTxt, '\n')
+ var line string
+ if n < 0 {
+ line = string(packagesTxt)
+ packagesTxt = nil
+ } else {
+ line = string(packagesTxt[:n])
+ packagesTxt = packagesTxt[n+1:]
+ }
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+ stdPkgs[line] = true
+ }
+
+ // Index the archives.
+ importToArchive := make(map[string]*archive)
+ importAliasToArchive := make(map[string]*archive)
+ for i := range archives {
+ arc := &archives[i]
+ importToArchive[arc.importPath] = arc
+ for _, imp := range arc.importPathAliases {
+ importAliasToArchive[imp] = arc
+ }
+ }
+ // Construct recompileInternalDeps as a map to check if there are imports that are disallowed.
+ recompileInternalDepMap := make(map[string]struct{})
+ for _, dep := range recompileInternalDeps {
+ recompileInternalDepMap[dep] = struct{}{}
+ }
+ // Build the import map.
+ imports := make(map[string]*archive)
+ var derr depsError
+ for _, f := range files {
+ for _, imp := range f.imports {
+ path := imp.path
+ if _, ok := imports[path]; ok || path == "C" || isRelative(path) {
+ // TODO(#1645): Support local (relative) import paths. We don't emit
+ // errors for them here, but they will probably break something else.
+ continue
+ }
+ if _, ok := recompileInternalDepMap[path]; ok {
+ return nil, fmt.Errorf("dependency cycle detected between %q and %q in file %q", importPath, path, f.filename)
+ }
+ if stdPkgs[path] {
+ imports[path] = nil
+ } else if arc := importToArchive[path]; arc != nil {
+ imports[path] = arc
+ } else if arc := importAliasToArchive[path]; arc != nil {
+ imports[path] = arc
+ } else {
+ derr.missing = append(derr.missing, missingDep{f.filename, path})
+ }
+ }
+ }
+ if len(derr.missing) > 0 {
+ return nil, derr
+ }
+ return imports, nil
+}
+
+// buildImportcfgFileForCompile writes an importcfg file to be consumed by the
+// compiler. The file is constructed from direct dependencies and std imports.
+// The caller is responsible for deleting the importcfg file.
+func buildImportcfgFileForCompile(imports map[string]*archive, installSuffix, dir string) (string, error) {
+ buf := &bytes.Buffer{}
+ goroot, ok := os.LookupEnv("GOROOT")
+ if !ok {
+ return "", errors.New("GOROOT not set")
+ }
+ goroot = abs(goroot)
+
+ sortedImports := make([]string, 0, len(imports))
+ for imp := range imports {
+ sortedImports = append(sortedImports, imp)
+ }
+ sort.Strings(sortedImports)
+
+ for _, imp := range sortedImports {
+ if arc := imports[imp]; arc == nil {
+ // std package
+ path := filepath.Join(goroot, "pkg", installSuffix, filepath.FromSlash(imp))
+ fmt.Fprintf(buf, "packagefile %s=%s.a\n", imp, path)
+ } else {
+ if imp != arc.packagePath {
+ fmt.Fprintf(buf, "importmap %s=%s\n", imp, arc.packagePath)
+ }
+ fmt.Fprintf(buf, "packagefile %s=%s\n", arc.packagePath, arc.file)
+ }
+ }
+
+ f, err := ioutil.TempFile(dir, "importcfg")
+ if err != nil {
+ return "", err
+ }
+ filename := f.Name()
+ if _, err := io.Copy(f, buf); err != nil {
+ f.Close()
+ os.Remove(filename)
+ return "", err
+ }
+ if err := f.Close(); err != nil {
+ os.Remove(filename)
+ return "", err
+ }
+ return filename, nil
+}
+
+func buildImportcfgFileForLink(archives []archive, stdPackageListPath, installSuffix, dir string) (string, error) {
+ buf := &bytes.Buffer{}
+ goroot, ok := os.LookupEnv("GOROOT")
+ if !ok {
+ return "", errors.New("GOROOT not set")
+ }
+ prefix := abs(filepath.Join(goroot, "pkg", installSuffix))
+ stdPackageListFile, err := os.Open(stdPackageListPath)
+ if err != nil {
+ return "", err
+ }
+ defer stdPackageListFile.Close()
+ scanner := bufio.NewScanner(stdPackageListFile)
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if line == "" {
+ continue
+ }
+ fmt.Fprintf(buf, "packagefile %s=%s.a\n", line, filepath.Join(prefix, filepath.FromSlash(line)))
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+ depsSeen := map[string]string{}
+ for _, arc := range archives {
+ if _, ok := depsSeen[arc.packagePath]; ok {
+ return "", fmt.Errorf("internal error: package %s provided multiple times. This should have been detected during analysis.", arc.packagePath)
+ }
+ depsSeen[arc.packagePath] = arc.label
+ fmt.Fprintf(buf, "packagefile %s=%s\n", arc.packagePath, arc.file)
+ }
+ f, err := ioutil.TempFile(dir, "importcfg")
+ if err != nil {
+ return "", err
+ }
+ filename := f.Name()
+ if _, err := io.Copy(f, buf); err != nil {
+ f.Close()
+ os.Remove(filename)
+ return "", err
+ }
+ if err := f.Close(); err != nil {
+ os.Remove(filename)
+ return "", err
+ }
+ return filename, nil
+}
+
+type depsError struct {
+ missing []missingDep
+ known []string
+}
+
+type missingDep struct {
+ filename, imp string
+}
+
+var _ error = depsError{}
+
+func (e depsError) Error() string {
+ buf := bytes.NewBuffer(nil)
+ fmt.Fprintf(buf, "missing strict dependencies:\n")
+ for _, dep := range e.missing {
+ fmt.Fprintf(buf, "\t%s: import of %q\n", dep.filename, dep.imp)
+ }
+ if len(e.known) == 0 {
+ fmt.Fprintln(buf, "No dependencies were provided.")
+ } else {
+ fmt.Fprintln(buf, "Known dependencies are:")
+ for _, imp := range e.known {
+ fmt.Fprintf(buf, "\t%s\n", imp)
+ }
+ }
+ fmt.Fprint(buf, "Check that imports in Go sources match importpath attributes in deps.")
+ return buf.String()
+}
+
+func isRelative(path string) bool {
+ return strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
+}
+
+type archiveMultiFlag []archive
+
+func (m *archiveMultiFlag) String() string {
+ if m == nil || len(*m) == 0 {
+ return ""
+ }
+ return fmt.Sprint(*m)
+}
+
+func (m *archiveMultiFlag) Set(v string) error {
+ parts := strings.Split(v, "=")
+ if len(parts) != 3 {
+ return fmt.Errorf("badly formed -arc flag: %s", v)
+ }
+ importPaths := strings.Split(parts[0], ":")
+ a := archive{
+ importPath: importPaths[0],
+ importPathAliases: importPaths[1:],
+ packagePath: parts[1],
+ file: abs(parts[2]),
+ }
+ *m = append(*m, a)
+ return nil
+}
diff --git a/go/tools/builders/info.go b/go/tools/builders/info.go
new file mode 100644
index 00000000..f7f1fd03
--- /dev/null
+++ b/go/tools/builders/info.go
@@ -0,0 +1,64 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// info prints debugging information about the go environment.
+// It is used to help examine the execution environment of rules_go
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+)
+
+func run(args []string) error {
+ args, _, err := expandParamsFiles(args)
+ if err != nil {
+ return err
+ }
+ filename := ""
+ flags := flag.NewFlagSet("info", flag.ExitOnError)
+ flags.StringVar(&filename, "out", filename, "The file to write the report to")
+ goenv := envFlags(flags)
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ if err := goenv.checkFlags(); err != nil {
+ return err
+ }
+ os.Setenv("GO111MODULE", "off")
+ f := os.Stderr
+ if filename != "" {
+ var err error
+ f, err = os.Create(filename)
+ if err != nil {
+ return fmt.Errorf("Could not create report file: %v", err)
+ }
+ defer f.Close()
+ }
+ if err := goenv.runCommandToFile(f, os.Stderr, goenv.goCmd("version")); err != nil {
+ return err
+ }
+ if err := goenv.runCommandToFile(f, os.Stderr, goenv.goCmd("env")); err != nil {
+ return err
+ }
+ return nil
+}
+
+func main() {
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/go/tools/builders/link.go b/go/tools/builders/link.go
new file mode 100644
index 00000000..723bb193
--- /dev/null
+++ b/go/tools/builders/link.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// link combines the results of a compile step using "go tool link". It is invoked by the
+// Go rules as an action.
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+func link(args []string) error {
+ // Parse arguments.
+ args, _, err := expandParamsFiles(args)
+ if err != nil {
+ return err
+ }
+ builderArgs, toolArgs := splitArgs(args)
+ stamps := multiFlag{}
+ xdefs := multiFlag{}
+ archives := archiveMultiFlag{}
+ flags := flag.NewFlagSet("link", flag.ExitOnError)
+ goenv := envFlags(flags)
+ main := flags.String("main", "", "Path to the main archive.")
+ packagePath := flags.String("p", "", "Package path of the main archive.")
+ outFile := flags.String("o", "", "Path to output file.")
+ flags.Var(&archives, "arc", "Label, package path, and file name of a dependency, separated by '='")
+ packageList := flags.String("package_list", "", "The file containing the list of standard library packages")
+ buildmode := flags.String("buildmode", "", "Build mode used.")
+ flags.Var(&xdefs, "X", "A string variable to replace in the linked binary (repeated).")
+ flags.Var(&stamps, "stamp", "The name of a file with stamping values.")
+ conflictErrMsg := flags.String("conflict_err", "", "Error message about conflicts to report if there's a link error.")
+ if err := flags.Parse(builderArgs); err != nil {
+ return err
+ }
+ if err := goenv.checkFlags(); err != nil {
+ return err
+ }
+
+ if *conflictErrMsg != "" {
+ return errors.New(*conflictErrMsg)
+ }
+
+ // On Windows, take the absolute path of the output file and main file.
+ // This is needed on Windows because the relative path is frequently too long.
+ // os.Open on Windows converts absolute paths to some other path format with
+ // longer length limits. Absolute paths do not work on macOS for .dylib
+ // outputs because they get baked in as the "install path".
+ if runtime.GOOS != "darwin" && runtime.GOOS != "ios" {
+ *outFile = abs(*outFile)
+ }
+ *main = abs(*main)
+
+ // If we were given any stamp value files, read and parse them
+ stampMap := map[string]string{}
+ for _, stampfile := range stamps {
+ stampbuf, err := ioutil.ReadFile(stampfile)
+ if err != nil {
+ return fmt.Errorf("Failed reading stamp file %s: %v", stampfile, err)
+ }
+ scanner := bufio.NewScanner(bytes.NewReader(stampbuf))
+ for scanner.Scan() {
+ line := strings.SplitN(scanner.Text(), " ", 2)
+ switch len(line) {
+ case 0:
+ // Nothing to do here
+ case 1:
+ // Map to the empty string
+ stampMap[line[0]] = ""
+ case 2:
+ // Key and value
+ stampMap[line[0]] = line[1]
+ }
+ }
+ }
+
+ // Build an importcfg file.
+ importcfgName, err := buildImportcfgFileForLink(archives, *packageList, goenv.installSuffix, filepath.Dir(*outFile))
+ if err != nil {
+ return err
+ }
+ if !goenv.shouldPreserveWorkDir {
+ defer os.Remove(importcfgName)
+ }
+
+ // generate any additional link options we need
+ goargs := goenv.goTool("link")
+ goargs = append(goargs, "-importcfg", importcfgName)
+
+ parseXdef := func(xdef string) (pkg, name, value string, err error) {
+ eq := strings.IndexByte(xdef, '=')
+ if eq < 0 {
+ return "", "", "", fmt.Errorf("-X flag does not contain '=': %s", xdef)
+ }
+ dot := strings.LastIndexByte(xdef[:eq], '.')
+ if dot < 0 {
+ return "", "", "", fmt.Errorf("-X flag does not contain '.': %s", xdef)
+ }
+ pkg, name, value = xdef[:dot], xdef[dot+1:eq], xdef[eq+1:]
+ if pkg == *packagePath {
+ pkg = "main"
+ }
+ return pkg, name, value, nil
+ }
+ for _, xdef := range xdefs {
+ pkg, name, value, err := parseXdef(xdef)
+ if err != nil {
+ return err
+ }
+ var missingKey bool
+ value = regexp.MustCompile(`\{.+?\}`).ReplaceAllStringFunc(value, func(key string) string {
+ if value, ok := stampMap[key[1:len(key)-1]]; ok {
+ return value
+ }
+ missingKey = true
+ return key
+ })
+ if !missingKey {
+ goargs = append(goargs, "-X", fmt.Sprintf("%s.%s=%s", pkg, name, value))
+ }
+ }
+
+ if *buildmode != "" {
+ goargs = append(goargs, "-buildmode", *buildmode)
+ }
+ goargs = append(goargs, "-o", *outFile)
+
+ // add in the unprocess pass through options
+ goargs = append(goargs, toolArgs...)
+ goargs = append(goargs, *main)
+ if err := goenv.runCommand(goargs); err != nil {
+ return err
+ }
+
+ if *buildmode == "c-archive" {
+ if err := stripArMetadata(*outFile); err != nil {
+ return fmt.Errorf("error stripping archive metadata: %v", err)
+ }
+ }
+
+ return nil
+}
diff --git a/go/tools/builders/md5sum.go b/go/tools/builders/md5sum.go
new file mode 100644
index 00000000..834eb272
--- /dev/null
+++ b/go/tools/builders/md5sum.go
@@ -0,0 +1,89 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// md5sum replicates the equivalent functionality of the unix tool of the same name.
+package main
+
+import (
+ "crypto/md5"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+)
+
+func md5SumFile(filename string) ([]byte, error) {
+ var result []byte
+ f, err := os.Open(filename)
+ if err != nil {
+ return result, err
+ }
+ defer f.Close()
+ hash := md5.New()
+ if _, err := io.Copy(hash, f); err != nil {
+ return nil, err
+ }
+ return hash.Sum(result), nil
+}
+
+func run(args []string) error {
+ // Prepare our flags
+ flags := flag.NewFlagSet("md5sum", flag.ExitOnError)
+ output := flags.String("output", "", "If set, write the results to this file, instead of stdout.")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ // print the outputs if we need not
+ to := os.Stdout
+ if *output != "" {
+ f, err := os.Create(*output)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ to = f
+ }
+ for _, path := range flags.Args() {
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+
+ if b, err := md5SumFile(path); err != nil {
+ return err
+ } else {
+ fmt.Fprintf(to, "%s %x\n", path, b)
+ }
+ return nil
+ }
+
+ if err := filepath.Walk(path, walkFn); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix("GoMd5sum: ")
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/go/tools/builders/nogo_main.go b/go/tools/builders/nogo_main.go
new file mode 100644
index 00000000..c6156e1d
--- /dev/null
+++ b/go/tools/builders/nogo_main.go
@@ -0,0 +1,654 @@
+/* Copyright 2018 The Bazel Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Loads and runs registered analyses on a well-typed Go package.
+// The code in this file is combined with the code generated by
+// generate_nogo_main.go.
+
+package main
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/gcexportdata"
+ "golang.org/x/tools/internal/facts"
+)
+
+const nogoBaseConfigName = "_base"
+
+func init() {
+ if err := analysis.Validate(analyzers); err != nil {
+ log.Fatal(err)
+ }
+}
+
+var typesSizes = types.SizesFor("gc", os.Getenv("GOARCH"))
+
+func main() {
+ log.SetFlags(0) // no timestamp
+ log.SetPrefix("nogo: ")
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// run returns an error if there is a problem loading the package or if any
+// analysis fails.
+func run(args []string) error {
+ args, _, err := expandParamsFiles(args)
+ if err != nil {
+ return fmt.Errorf("error reading paramfiles: %v", err)
+ }
+
+ factMap := factMultiFlag{}
+ flags := flag.NewFlagSet("nogo", flag.ExitOnError)
+ flags.Var(&factMap, "fact", "Import path and file containing facts for that library, separated by '=' (may be repeated)'")
+ importcfg := flags.String("importcfg", "", "The import configuration file")
+ packagePath := flags.String("p", "", "The package path (importmap) of the package being compiled")
+ xPath := flags.String("x", "", "The archive file where serialized facts should be written")
+ flags.Parse(args)
+ srcs := flags.Args()
+
+ packageFile, importMap, err := readImportCfg(*importcfg)
+ if err != nil {
+ return fmt.Errorf("error parsing importcfg: %v", err)
+ }
+
+ diagnostics, facts, err := checkPackage(analyzers, *packagePath, packageFile, importMap, factMap, srcs)
+ if err != nil {
+ return fmt.Errorf("error running analyzers: %v", err)
+ }
+ if diagnostics != "" {
+ return fmt.Errorf("errors found by nogo during build-time code analysis:\n%s\n", diagnostics)
+ }
+ if *xPath != "" {
+ if err := ioutil.WriteFile(abs(*xPath), facts, 0o666); err != nil {
+ return fmt.Errorf("error writing facts: %v", err)
+ }
+ }
+
+ return nil
+}
+
+// Adapted from go/src/cmd/compile/internal/gc/main.go. Keep in sync.
+func readImportCfg(file string) (packageFile map[string]string, importMap map[string]string, err error) {
+ packageFile, importMap = make(map[string]string), make(map[string]string)
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, nil, fmt.Errorf("-importcfg: %v", err)
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ var verb, args string
+ if i := strings.Index(line, " "); i < 0 {
+ verb = line
+ } else {
+ verb, args = line[:i], strings.TrimSpace(line[i+1:])
+ }
+ var before, after string
+ if i := strings.Index(args, "="); i >= 0 {
+ before, after = args[:i], args[i+1:]
+ }
+ switch verb {
+ default:
+ return nil, nil, fmt.Errorf("%s:%d: unknown directive %q", file, lineNum, verb)
+ case "importmap":
+ if before == "" || after == "" {
+ return nil, nil, fmt.Errorf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
+ }
+ importMap[before] = after
+ case "packagefile":
+ if before == "" || after == "" {
+ return nil, nil, fmt.Errorf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
+ }
+ packageFile[before] = after
+ }
+ }
+ return packageFile, importMap, nil
+}
+
+// checkPackage runs all the given analyzers on the specified package and
+// returns the source code diagnostics that the must be printed in the build log.
+// It returns an empty string if no source code diagnostics need to be printed.
+//
+// This implementation was adapted from that of golang.org/x/tools/go/checker/internal/checker.
+func checkPackage(analyzers []*analysis.Analyzer, packagePath string, packageFile, importMap map[string]string, factMap map[string]string, filenames []string) (string, []byte, error) {
+ // Register fact types and establish dependencies between analyzers.
+ actions := make(map[*analysis.Analyzer]*action)
+ var visit func(a *analysis.Analyzer) *action
+ visit = func(a *analysis.Analyzer) *action {
+ act, ok := actions[a]
+ if !ok {
+ act = &action{a: a}
+ actions[a] = act
+ for _, f := range a.FactTypes {
+ act.usesFacts = true
+ gob.Register(f)
+ }
+ act.deps = make([]*action, len(a.Requires))
+ for i, req := range a.Requires {
+ dep := visit(req)
+ if dep.usesFacts {
+ act.usesFacts = true
+ }
+ act.deps[i] = dep
+ }
+ }
+ return act
+ }
+
+ roots := make([]*action, 0, len(analyzers))
+ for _, a := range analyzers {
+ if cfg, ok := configs[a.Name]; ok {
+ for flagKey, flagVal := range cfg.analyzerFlags {
+ if strings.HasPrefix(flagKey, "-") {
+ return "", nil, fmt.Errorf(
+ "%s: flag should not begin with '-': %s", a.Name, flagKey)
+ }
+ if flag := a.Flags.Lookup(flagKey); flag == nil {
+ return "", nil, fmt.Errorf("%s: unrecognized flag: %s", a.Name, flagKey)
+ }
+ if err := a.Flags.Set(flagKey, flagVal); err != nil {
+ return "", nil, fmt.Errorf(
+ "%s: invalid value for flag: %s=%s: %w", a.Name, flagKey, flagVal, err)
+ }
+ }
+ }
+ roots = append(roots, visit(a))
+ }
+
+ // Load the package, including AST, types, and facts.
+ imp := newImporter(importMap, packageFile, factMap)
+ pkg, err := load(packagePath, imp, filenames)
+ if err != nil {
+ return "", nil, fmt.Errorf("error loading package: %v", err)
+ }
+ for _, act := range actions {
+ act.pkg = pkg
+ }
+
+ // Process nolint directives similar to golangci-lint.
+ for _, f := range pkg.syntax {
+ // CommentMap will correctly associate comments to the largest node group
+ // applicable. This handles inline comments that might trail a large
+ // assignment and will apply the comment to the entire assignment.
+ commentMap := ast.NewCommentMap(pkg.fset, f, f.Comments)
+ for node, groups := range commentMap {
+ rng := &Range{
+ from: pkg.fset.Position(node.Pos()),
+ to: pkg.fset.Position(node.End()).Line,
+ }
+ for _, group := range groups {
+ for _, comm := range group.List {
+ linters, ok := parseNolint(comm.Text)
+ if !ok {
+ continue
+ }
+ for analyzer, act := range actions {
+ if linters == nil || linters[analyzer.Name] {
+ act.nolint = append(act.nolint, rng)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Execute the analyzers.
+ execAll(roots)
+
+ // Process diagnostics and encode facts for importers of this package.
+ diagnostics := checkAnalysisResults(roots, pkg)
+ facts := pkg.facts.Encode()
+ return diagnostics, facts, nil
+}
+
+type Range struct {
+ from token.Position
+ to int
+}
+
+// An action represents one unit of analysis work: the application of
+// one analysis to one package. Actions form a DAG within a
+// package (as different analyzers are applied, either in sequence or
+// parallel).
+type action struct {
+ once sync.Once
+ a *analysis.Analyzer
+ pass *analysis.Pass
+ pkg *goPackage
+ deps []*action
+ inputs map[*analysis.Analyzer]interface{}
+ result interface{}
+ diagnostics []analysis.Diagnostic
+ usesFacts bool
+ err error
+ nolint []*Range
+}
+
+func (act *action) String() string {
+ return fmt.Sprintf("%s@%s", act.a, act.pkg)
+}
+
+func execAll(actions []*action) {
+ var wg sync.WaitGroup
+ wg.Add(len(actions))
+ for _, act := range actions {
+ go func(act *action) {
+ defer wg.Done()
+ act.exec()
+ }(act)
+ }
+ wg.Wait()
+}
+
+func (act *action) exec() { act.once.Do(act.execOnce) }
+
+func (act *action) execOnce() {
+ // Analyze dependencies.
+ execAll(act.deps)
+
+ // Report an error if any dependency failed.
+ var failed []string
+ for _, dep := range act.deps {
+ if dep.err != nil {
+ failed = append(failed, dep.String())
+ }
+ }
+ if failed != nil {
+ sort.Strings(failed)
+ act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", "))
+ return
+ }
+
+ // Plumb the output values of the dependencies
+ // into the inputs of this action.
+ inputs := make(map[*analysis.Analyzer]interface{})
+ for _, dep := range act.deps {
+ // Same package, different analysis (horizontal edge):
+ // in-memory outputs of prerequisite analyzers
+ // become inputs to this analysis pass.
+ inputs[dep.a] = dep.result
+ }
+
+ ignoreNolintReporter := func(d analysis.Diagnostic) {
+ pos := act.pkg.fset.Position(d.Pos)
+ for _, rng := range act.nolint {
+ // The list of nolint ranges is built for the entire package. Make sure we
+ // only apply ranges to the correct file.
+ if pos.Filename != rng.from.Filename {
+ continue
+ }
+ if pos.Line < rng.from.Line || pos.Line > rng.to {
+ continue
+ }
+ // Found a nolint range. Ignore the issue.
+ return
+ }
+ act.diagnostics = append(act.diagnostics, d)
+ }
+
+ // Run the analysis.
+ factFilter := make(map[reflect.Type]bool)
+ for _, f := range act.a.FactTypes {
+ factFilter[reflect.TypeOf(f)] = true
+ }
+ pass := &analysis.Pass{
+ Analyzer: act.a,
+ Fset: act.pkg.fset,
+ Files: act.pkg.syntax,
+ Pkg: act.pkg.types,
+ TypesInfo: act.pkg.typesInfo,
+ ResultOf: inputs,
+ Report: ignoreNolintReporter,
+ ImportPackageFact: act.pkg.facts.ImportPackageFact,
+ ExportPackageFact: act.pkg.facts.ExportPackageFact,
+ ImportObjectFact: act.pkg.facts.ImportObjectFact,
+ ExportObjectFact: act.pkg.facts.ExportObjectFact,
+ AllPackageFacts: func() []analysis.PackageFact { return act.pkg.facts.AllPackageFacts(factFilter) },
+ AllObjectFacts: func() []analysis.ObjectFact { return act.pkg.facts.AllObjectFacts(factFilter) },
+ TypesSizes: typesSizes,
+ }
+ act.pass = pass
+
+ var err error
+ if act.pkg.illTyped && !pass.Analyzer.RunDespiteErrors {
+ err = fmt.Errorf("analysis skipped due to type-checking error: %v", act.pkg.typeCheckError)
+ } else {
+ act.result, err = pass.Analyzer.Run(pass)
+ if err == nil {
+ if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want {
+ err = fmt.Errorf(
+ "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
+ pass.Pkg.Path(), pass.Analyzer, got, want)
+ }
+ }
+ }
+ act.err = err
+}
+
+// load parses and type checks the source code in each file in filenames.
+// load also deserializes facts stored for imported packages.
+func load(packagePath string, imp *importer, filenames []string) (*goPackage, error) {
+ if len(filenames) == 0 {
+ return nil, errors.New("no filenames")
+ }
+ var syntax []*ast.File
+ for _, file := range filenames {
+ s, err := parser.ParseFile(imp.fset, file, nil, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+ syntax = append(syntax, s)
+ }
+ pkg := &goPackage{fset: imp.fset, syntax: syntax}
+
+ config := types.Config{Importer: imp}
+ info := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Uses: make(map[*ast.Ident]types.Object),
+ Defs: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+
+ initInstanceInfo(info)
+
+ types, err := config.Check(packagePath, pkg.fset, syntax, info)
+ if err != nil {
+ pkg.illTyped, pkg.typeCheckError = true, err
+ }
+ pkg.types, pkg.typesInfo = types, info
+
+ pkg.facts, err = facts.NewDecoder(pkg.types).Decode(imp.readFacts)
+ if err != nil {
+ return nil, fmt.Errorf("internal error decoding facts: %v", err)
+ }
+
+ return pkg, nil
+}
+
+// A goPackage describes a loaded Go package.
+type goPackage struct {
+ // fset provides position information for types, typesInfo, and syntax.
+ // It is set only when types is set.
+ fset *token.FileSet
+ // syntax is the package's syntax trees.
+ syntax []*ast.File
+ // types provides type information for the package.
+ types *types.Package
+ // facts contains information saved by the analysis framework. Passes may
+ // import facts for imported packages and may also export facts for this
+ // package to be consumed by analyses in downstream packages.
+ facts *facts.Set
+ // illTyped indicates whether the package or any dependency contains errors.
+ // It is set only when types is set.
+ illTyped bool
+ // typeCheckError contains any error encountered during type-checking. It is
+ // only set when illTyped is true.
+ typeCheckError error
+ // typesInfo provides type information about the package's syntax trees.
+ // It is set only when syntax is set.
+ typesInfo *types.Info
+}
+
+func (g *goPackage) String() string {
+ return g.types.Path()
+}
+
+// checkAnalysisResults checks the analysis diagnostics in the given actions
+// and returns a string containing all the diagnostics that should be printed
+// to the build log.
+func checkAnalysisResults(actions []*action, pkg *goPackage) string {
+ type entry struct {
+ analysis.Diagnostic
+ *analysis.Analyzer
+ }
+ var diagnostics []entry
+ var errs []error
+ for _, act := range actions {
+ if act.err != nil {
+ // Analyzer failed.
+ errs = append(errs, fmt.Errorf("analyzer %q failed: %v", act.a.Name, act.err))
+ continue
+ }
+ if len(act.diagnostics) == 0 {
+ continue
+ }
+ var currentConfig config
+ // Use the base config if it exists.
+ if baseConfig, ok := configs[nogoBaseConfigName]; ok {
+ currentConfig = baseConfig
+ }
+ // Overwrite the config with the desired config. Any unset fields
+ // in the config will default to the base config.
+ if actionConfig, ok := configs[act.a.Name]; ok {
+ if actionConfig.analyzerFlags != nil {
+ currentConfig.analyzerFlags = actionConfig.analyzerFlags
+ }
+ if actionConfig.onlyFiles != nil {
+ currentConfig.onlyFiles = actionConfig.onlyFiles
+ }
+ if actionConfig.excludeFiles != nil {
+ currentConfig.excludeFiles = actionConfig.excludeFiles
+ }
+ }
+
+ if currentConfig.onlyFiles == nil && currentConfig.excludeFiles == nil {
+ for _, diag := range act.diagnostics {
+ diagnostics = append(diagnostics, entry{Diagnostic: diag, Analyzer: act.a})
+ }
+ continue
+ }
+ // Discard diagnostics based on the analyzer configuration.
+ for _, d := range act.diagnostics {
+ // NOTE(golang.org/issue/31008): nilness does not set positions,
+ // so don't assume the position is valid.
+ p := pkg.fset.Position(d.Pos)
+ filename := "-"
+ if p.IsValid() {
+ filename = p.Filename
+ }
+ include := true
+ if len(currentConfig.onlyFiles) > 0 {
+ // This analyzer emits diagnostics for only a set of files.
+ include = false
+ for _, pattern := range currentConfig.onlyFiles {
+ if pattern.MatchString(filename) {
+ include = true
+ break
+ }
+ }
+ }
+ if include {
+ for _, pattern := range currentConfig.excludeFiles {
+ if pattern.MatchString(filename) {
+ include = false
+ break
+ }
+ }
+ }
+ if include {
+ diagnostics = append(diagnostics, entry{Diagnostic: d, Analyzer: act.a})
+ }
+ }
+ }
+ if len(diagnostics) == 0 && len(errs) == 0 {
+ return ""
+ }
+
+ sort.Slice(diagnostics, func(i, j int) bool {
+ return diagnostics[i].Pos < diagnostics[j].Pos
+ })
+ errMsg := &bytes.Buffer{}
+ sep := ""
+ for _, err := range errs {
+ errMsg.WriteString(sep)
+ sep = "\n"
+ errMsg.WriteString(err.Error())
+ }
+ for _, d := range diagnostics {
+ errMsg.WriteString(sep)
+ sep = "\n"
+ fmt.Fprintf(errMsg, "%s: %s (%s)", pkg.fset.Position(d.Pos), d.Message, d.Name)
+ }
+ return errMsg.String()
+}
+
+// config determines which source files an analyzer will emit diagnostics for.
+// config values are generated in another file that is compiled with
+// nogo_main.go by the nogo rule.
+type config struct {
+ // onlyFiles is a list of regular expressions that match files an analyzer
+ // will emit diagnostics for. When empty, the analyzer will emit diagnostics
+ // for all files.
+ onlyFiles []*regexp.Regexp
+
+ // excludeFiles is a list of regular expressions that match files that an
+ // analyzer will not emit diagnostics for.
+ excludeFiles []*regexp.Regexp
+
+ // analyzerFlags is a map of flag names to flag values which will be passed
+ // to Analyzer.Flags. Note that no leading '-' should be present in a flag
+ // name
+ analyzerFlags map[string]string
+}
+
+// importer is an implementation of go/types.Importer that imports type
+// information from the export data in compiled .a files.
+type importer struct {
+ fset *token.FileSet
+ importMap map[string]string // map import path in source code to package path
+ packageCache map[string]*types.Package // cache of previously imported packages
+ packageFile map[string]string // map package path to .a file with export data
+ factMap map[string]string // map import path in source code to file containing serialized facts
+}
+
+func newImporter(importMap, packageFile map[string]string, factMap map[string]string) *importer {
+ return &importer{
+ fset: token.NewFileSet(),
+ importMap: importMap,
+ packageCache: make(map[string]*types.Package),
+ packageFile: packageFile,
+ factMap: factMap,
+ }
+}
+
+func (i *importer) Import(path string) (*types.Package, error) {
+ if imp, ok := i.importMap[path]; ok {
+ // Translate import path if necessary.
+ path = imp
+ }
+ if path == "unsafe" {
+ // Special case: go/types has pre-defined type information for unsafe.
+ // See https://github.com/golang/go/issues/13882.
+ return types.Unsafe, nil
+ }
+ if pkg, ok := i.packageCache[path]; ok && pkg.Complete() {
+ return pkg, nil // cache hit
+ }
+
+ archive, ok := i.packageFile[path]
+ if !ok {
+ return nil, fmt.Errorf("could not import %q", path)
+ }
+ // open file
+ f, err := os.Open(archive)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ f.Close()
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("reading export data: %s: %v", archive, err)
+ }
+ }()
+
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return gcexportdata.Read(r, i.fset, i.packageCache, path)
+}
+
+func (i *importer) readFacts(pkg *types.Package) ([]byte, error) {
+ archive := i.factMap[pkg.Path()]
+ if archive == "" {
+ // Packages that were not built with the nogo toolchain will not be
+ // analyzed, so there's no opportunity to store facts. This includes
+ // packages in the standard library and packages built with go_tool_library,
+ // such as coverdata. Analyzers are expected to hard code information
+ // about standard library definitions and must gracefully handle packages
+ // that don't have facts. For example, the "printf" analyzer must know
+ // fmt.Printf accepts a format string.
+ return nil, nil
+ }
+ factReader, err := readFileInArchive(nogoFact, archive)
+ if os.IsNotExist(err) {
+ // Packages that were not built with the nogo toolchain will not be
+ // analyzed, so there's no opportunity to store facts. This includes
+ // packages in the standard library and packages built with go_tool_library,
+ // such as coverdata.
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ defer factReader.Close()
+ return ioutil.ReadAll(factReader)
+}
+
+type factMultiFlag map[string]string
+
+func (m *factMultiFlag) String() string {
+ if m == nil || len(*m) == 0 {
+ return ""
+ }
+ return fmt.Sprintf("%v", *m)
+}
+
+func (m *factMultiFlag) Set(v string) error {
+ parts := strings.Split(v, "=")
+ if len(parts) != 2 {
+ return fmt.Errorf("badly formatted -fact flag: %s", v)
+ }
+ (*m)[parts[0]] = parts[1]
+ return nil
+}
diff --git a/go/tools/builders/nogo_typeparams_go117.go b/go/tools/builders/nogo_typeparams_go117.go
new file mode 100644
index 00000000..9b6fe9ac
--- /dev/null
+++ b/go/tools/builders/nogo_typeparams_go117.go
@@ -0,0 +1,23 @@
+/* Copyright 2022 The Bazel Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+//go:build !go1.18
+// +build !go1.18
+
+package main
+
+import "go/types"
+
+func initInstanceInfo(*types.Info) {}
diff --git a/go/tools/builders/nogo_typeparams_go118.go b/go/tools/builders/nogo_typeparams_go118.go
new file mode 100644
index 00000000..787b492a
--- /dev/null
+++ b/go/tools/builders/nogo_typeparams_go118.go
@@ -0,0 +1,28 @@
+/* Copyright 2022 The Bazel Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+//go:build go1.18
+// +build go1.18
+
+package main
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+func initInstanceInfo(info *types.Info) {
+ info.Instances = make(map[*ast.Ident]types.Instance)
+}
diff --git a/go/tools/builders/nolint.go b/go/tools/builders/nolint.go
new file mode 100644
index 00000000..e6e3c043
--- /dev/null
+++ b/go/tools/builders/nolint.go
@@ -0,0 +1,39 @@
+// Copyright 2023 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import "strings"
+
+// Parse nolint directives and return the applicable linters. If all linters
+// apply, returns (nil, true).
+func parseNolint(text string) (map[string]bool, bool) {
+ text = strings.TrimLeft(text, "/ ")
+ if !strings.HasPrefix(text, "nolint") {
+ return nil, false
+ }
+ parts := strings.Split(text, ":")
+ if len(parts) == 1 {
+ return nil, true
+ }
+ linters := strings.Split(parts[1], ",")
+ result := map[string]bool{}
+ for _, linter := range linters {
+ if strings.EqualFold(linter, "all") {
+ return nil, true
+ }
+ result[linter] = true
+ }
+ return result, true
+}
diff --git a/go/tools/builders/nolint_test.go b/go/tools/builders/nolint_test.go
new file mode 100644
index 00000000..2870eaaf
--- /dev/null
+++ b/go/tools/builders/nolint_test.go
@@ -0,0 +1,79 @@
+// Copyright 2023 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestParseNolint(t *testing.T) {
+ tests := []struct {
+ Name string
+ Comment string
+ Valid bool
+ Linters []string
+ }{
+ {
+ Name: "Invalid",
+ Comment: "not a comment",
+ },
+ {
+ Name: "No match",
+ Comment: "// comment",
+ },
+ {
+ Name: "All linters",
+ Comment: "//nolint",
+ Valid: true,
+ },
+ {
+ Name: "All linters (explicit)",
+ Comment: "//nolint:all",
+ Valid: true,
+ },
+ {
+ Name: "Single linter",
+ Comment: "// nolint:foo",
+ Valid: true,
+ Linters: []string{"foo"},
+ },
+ {
+ Name: "Multiple linters",
+ Comment: "// nolint:a,b,c",
+ Valid: true,
+ Linters: []string{"a", "b", "c"},
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.Name, func(t *testing.T) {
+ result, ok := parseNolint(tc.Comment)
+ if tc.Valid != ok {
+ t.Fatalf("parseNolint expect %t got %t", tc.Valid, ok)
+ }
+ var linters map[string]bool
+ if len(tc.Linters) != 0 {
+ linters = make(map[string]bool)
+ for _, l := range tc.Linters {
+ linters[l] = true
+ }
+ }
+ if !reflect.DeepEqual(result, linters) {
+ t.Fatalf("parseNolint expect %v got %v", linters, result)
+ }
+ })
+ }
+}
diff --git a/go/tools/builders/pack.go b/go/tools/builders/pack.go
new file mode 100644
index 00000000..ddbb1930
--- /dev/null
+++ b/go/tools/builders/pack.go
@@ -0,0 +1,388 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+func copyFile(inPath, outPath string) error {
+ inFile, err := os.Open(inPath)
+ if err != nil {
+ return err
+ }
+ defer inFile.Close()
+ outFile, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+ _, err = io.Copy(outFile, inFile)
+ return err
+}
+
+func linkFile(inPath, outPath string) error {
+ inPath, err := filepath.Abs(inPath)
+ if err != nil {
+ return err
+ }
+ return os.Symlink(inPath, outPath)
+}
+
+func copyOrLinkFile(inPath, outPath string) error {
+ if runtime.GOOS == "windows" {
+ return copyFile(inPath, outPath)
+ } else {
+ return linkFile(inPath, outPath)
+ }
+}
+
+const (
+ // arHeader appears at the beginning of archives created by "ar" and
+ // "go tool pack" on all platforms.
+ arHeader = "!<arch>\n"
+
+ // entryLength is the size in bytes of the metadata preceding each file
+ // in an archive.
+ entryLength = 60
+
+ // pkgDef is the name of the export data file within an archive
+ pkgDef = "__.PKGDEF"
+
+ // nogoFact is the name of the nogo fact file
+ nogoFact = "nogo.out"
+)
+
+var zeroBytes = []byte("0 ")
+
+type bufioReaderWithCloser struct {
+ // bufio.Reader is needed to skip bytes in archives
+ *bufio.Reader
+ io.Closer
+}
+
+func extractFiles(archive, dir string, names map[string]struct{}) (files []string, err error) {
+ rc, err := openArchive(archive)
+ if err != nil {
+ return nil, err
+ }
+ defer rc.Close()
+
+ var nameData []byte
+ bufReader := rc.Reader
+ for {
+ name, size, err := readMetadata(bufReader, &nameData)
+ if err == io.EOF {
+ return files, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if !isObjectFile(name) {
+ if err := skipFile(bufReader, size); err != nil {
+ return nil, err
+ }
+ continue
+ }
+ name, err = simpleName(name, names)
+ if err != nil {
+ return nil, err
+ }
+ name = filepath.Join(dir, name)
+ if err := extractFile(bufReader, name, size); err != nil {
+ return nil, err
+ }
+ files = append(files, name)
+ }
+}
+
+func openArchive(archive string) (bufioReaderWithCloser, error) {
+ f, err := os.Open(archive)
+ if err != nil {
+ return bufioReaderWithCloser{}, err
+ }
+ r := bufio.NewReader(f)
+ header := make([]byte, len(arHeader))
+ if _, err := io.ReadFull(r, header); err != nil || string(header) != arHeader {
+ f.Close()
+ return bufioReaderWithCloser{}, fmt.Errorf("%s: bad header", archive)
+ }
+ return bufioReaderWithCloser{r, f}, nil
+}
+
+// readMetadata reads the relevant fields of an entry. Before calling,
+// r must be positioned at the beginning of an entry. Afterward, r will
+// be positioned at the beginning of the file data. io.EOF is returned if
+// there are no more files in the archive.
+//
+// Both BSD and GNU / SysV naming conventions are supported.
+func readMetadata(r *bufio.Reader, nameData *[]byte) (name string, size int64, err error) {
+retry:
+ // Each file is preceded by a 60-byte header that contains its metadata.
+ // We only care about two fields, name and size. Other fields (mtime,
+ // owner, group, mode) are ignored because they don't affect compilation.
+ var entry [entryLength]byte
+ if _, err := io.ReadFull(r, entry[:]); err != nil {
+ return "", 0, err
+ }
+
+ sizeField := strings.TrimSpace(string(entry[48:58]))
+ size, err = strconv.ParseInt(sizeField, 10, 64)
+ if err != nil {
+ return "", 0, err
+ }
+
+ nameField := strings.TrimRight(string(entry[:16]), " ")
+ switch {
+ case strings.HasPrefix(nameField, "#1/"):
+ // BSD-style name. The number of bytes in the name is written here in
+ // ASCII, right-padded with spaces. The actual name is stored at the
+ // beginning of the file data, left-padded with NUL bytes.
+ nameField = nameField[len("#1/"):]
+ nameLen, err := strconv.ParseInt(nameField, 10, 64)
+ if err != nil {
+ return "", 0, err
+ }
+ nameBuf := make([]byte, nameLen)
+ if _, err := io.ReadFull(r, nameBuf); err != nil {
+ return "", 0, err
+ }
+ name = strings.TrimRight(string(nameBuf), "\x00")
+ size -= nameLen
+
+ case nameField == "//":
+ // GNU / SysV-style name data. This is a fake file that contains names
+ // for files with long names. We read this into nameData, then read
+ // the next entry.
+ *nameData = make([]byte, size)
+ if _, err := io.ReadFull(r, *nameData); err != nil {
+ return "", 0, err
+ }
+ if size%2 != 0 {
+ // Files are aligned at 2-byte offsets. Discard the padding byte if the
+ // size was odd.
+ if _, err := r.ReadByte(); err != nil {
+ return "", 0, err
+ }
+ }
+ goto retry
+
+ case nameField == "/":
+ // GNU / SysV-style symbol lookup table. Skip.
+ if err := skipFile(r, size); err != nil {
+ return "", 0, err
+ }
+ goto retry
+
+ case strings.HasPrefix(nameField, "/"):
+ // GNU / SysV-style long file name. The number that follows the slash is
+ // an offset into the name data that should have been read earlier.
+ // The file name ends with a slash.
+ nameField = nameField[1:]
+ nameOffset, err := strconv.Atoi(nameField)
+ if err != nil {
+ return "", 0, err
+ }
+ if nameData == nil || nameOffset < 0 || nameOffset >= len(*nameData) {
+ return "", 0, fmt.Errorf("invalid name length: %d", nameOffset)
+ }
+ i := bytes.IndexByte((*nameData)[nameOffset:], '/')
+ if i < 0 {
+ return "", 0, errors.New("file name does not end with '/'")
+ }
+ name = string((*nameData)[nameOffset : nameOffset+i])
+
+ case strings.HasSuffix(nameField, "/"):
+ // GNU / SysV-style short file name.
+ name = nameField[:len(nameField)-1]
+
+ default:
+ // Common format name.
+ name = nameField
+ }
+
+ return name, size, err
+}
+
+// extractFile reads size bytes from r and writes them to a new file, name.
+func extractFile(r *bufio.Reader, name string, size int64) error {
+ w, err := os.Create(name)
+ if err != nil {
+ return err
+ }
+ defer w.Close()
+ _, err = io.CopyN(w, r, size)
+ if err != nil {
+ return err
+ }
+ if size%2 != 0 {
+ // Files are aligned at 2-byte offsets. Discard the padding byte if the
+ // size was odd.
+ if _, err := r.ReadByte(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func skipFile(r *bufio.Reader, size int64) error {
+ if size%2 != 0 {
+ // Files are aligned at 2-byte offsets. Discard the padding byte if the
+ // size was odd.
+ size += 1
+ }
+ _, err := r.Discard(int(size))
+ return err
+}
+
+func isObjectFile(name string) bool {
+ return strings.HasSuffix(name, ".o")
+}
+
+// simpleName returns a file name which is at most 15 characters
+// and doesn't conflict with other names. If it is not possible to choose
+// such a name, simpleName will truncate the given name to 15 characters.
+// The original file extension will be preserved.
+func simpleName(name string, names map[string]struct{}) (string, error) {
+ if _, ok := names[name]; !ok && len(name) < 16 {
+ names[name] = struct{}{}
+ return name, nil
+ }
+ var stem, ext string
+ if i := strings.LastIndexByte(name, '.'); i < 0 {
+ stem = name
+ } else {
+ stem = strings.Replace(name[:i], ".", "_", -1)
+ ext = name[i:]
+ }
+ for n := 0; n < len(names)+1; n++ {
+ ns := strconv.Itoa(n)
+ stemLen := 15 - len(ext) - len(ns)
+ if stemLen < 0 {
+ break
+ }
+ if stemLen > len(stem) {
+ stemLen = len(stem)
+ }
+ candidate := stem[:stemLen] + ns + ext
+ if _, ok := names[candidate]; !ok {
+ names[candidate] = struct{}{}
+ return candidate, nil
+ }
+ }
+ return "", fmt.Errorf("cannot shorten file name: %q", name)
+}
+
+func appendFiles(goenv *env, archive string, files []string) error {
+ archive = abs(archive) // required for long filenames on Windows.
+
+ // Create an empty archive if one doesn't already exist.
+ // In Go 1.16, 'go tool pack r' reports an error if the archive doesn't exist.
+ // 'go tool pack c' copies export data in addition to creating the archive,
+ // so we don't want to use that directly.
+ _, err := os.Stat(archive)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if os.IsNotExist(err) {
+ if err := ioutil.WriteFile(archive, []byte(arHeader), 0666); err != nil {
+ return err
+ }
+ }
+
+ // Append files to the archive.
+ // TODO(jayconrod): copy cmd/internal/archive and use that instead of
+ // shelling out to cmd/pack.
+ args := goenv.goTool("pack", "r", archive)
+ args = append(args, files...)
+ return goenv.runCommand(args)
+}
+
+type readWithCloser struct {
+ io.Reader
+ io.Closer
+}
+
+func readFileInArchive(fileName, archive string) (io.ReadCloser, error) {
+ rc, err := openArchive(archive)
+ if err != nil {
+ return nil, err
+ }
+ var nameData []byte
+ bufReader := rc.Reader
+ for err == nil {
+ // avoid shadowing err in the loop it can be returned correctly in the end
+ var (
+ name string
+ size int64
+ )
+ name, size, err = readMetadata(bufReader, &nameData)
+ if err != nil {
+ break
+ }
+ if name == fileName {
+ return readWithCloser{
+ Reader: io.LimitReader(rc, size),
+ Closer: rc,
+ }, nil
+ }
+ err = skipFile(bufReader, size)
+ }
+ if err == io.EOF {
+ err = os.ErrNotExist
+ }
+ rc.Close()
+ return nil, err
+}
+
+func extractFileFromArchive(archive, dir, name string) (err error) {
+ archiveReader, err := readFileInArchive(name, archive)
+ if err != nil {
+ return fmt.Errorf("error reading %s from %s: %v", name, archive, err)
+ }
+ defer func() {
+ e := archiveReader.Close()
+ if e != nil && err == nil {
+ err = fmt.Errorf("error closing %q: %v", archive, e)
+ }
+ }()
+ outPath := filepath.Join(dir, pkgDef)
+ outFile, err := os.Create(outPath)
+ if err != nil {
+ return fmt.Errorf("error creating %s: %v", outPath, err)
+ }
+ defer func() {
+ e := outFile.Close()
+ if e != nil && err == nil {
+ err = fmt.Errorf("error closing %q: %v", outPath, e)
+ }
+ }()
+ if size, err := io.Copy(outFile, archiveReader); err != nil {
+ return fmt.Errorf("error writing %s: %v", outPath, err)
+ } else if size == 0 {
+ return fmt.Errorf("%s is empty in %s", name, archive)
+ }
+ return err
+}
diff --git a/go/tools/builders/path.go b/go/tools/builders/path.go
new file mode 100644
index 00000000..f60e4deb
--- /dev/null
+++ b/go/tools/builders/path.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package main
+
+func processPath(path string) (string, error) {
+ return path, nil
+}
diff --git a/go/tools/builders/path_windows.go b/go/tools/builders/path_windows.go
new file mode 100644
index 00000000..23b1b65b
--- /dev/null
+++ b/go/tools/builders/path_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package main
+
+import (
+ "runtime"
+ "syscall"
+)
+
+func processPath(path string) (string, error) {
+ if runtime.GOOS != "windows" {
+ return path, nil
+ }
+
+ var buf [258]uint16
+ up, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return path, err
+ }
+ _, err = syscall.GetShortPathName(up, &buf[0], 258)
+ if err != nil {
+ return path, err
+ }
+ return syscall.UTF16ToString(buf[:]), nil
+}
diff --git a/go/tools/builders/protoc.go b/go/tools/builders/protoc.go
new file mode 100644
index 00000000..46a9f012
--- /dev/null
+++ b/go/tools/builders/protoc.go
@@ -0,0 +1,219 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// protoc invokes the protobuf compiler and captures the resulting .pb.go file.
+package main
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+type genFileInfo struct {
+ base string // The basename of the path
+ path string // The full path to the final file
+ expected bool // Whether the file is expected by the rules
+ created bool // Whether the file was created by protoc
+ from *genFileInfo // The actual file protoc produced if not Path
+ unique bool // True if this base name is unique in expected results
+ ambiguious bool // True if there were more than one possible outputs that matched this file
+}
+
+func run(args []string) error {
+ // process the args
+ args, useParamFile, err := expandParamsFiles(args)
+ if err != nil {
+ return err
+ }
+ options := multiFlag{}
+ descriptors := multiFlag{}
+ expected := multiFlag{}
+ imports := multiFlag{}
+ flags := flag.NewFlagSet("protoc", flag.ExitOnError)
+ protoc := flags.String("protoc", "", "The path to the real protoc.")
+ outPath := flags.String("out_path", "", "The base output path to write to.")
+ plugin := flags.String("plugin", "", "The go plugin to use.")
+ importpath := flags.String("importpath", "", "The importpath for the generated sources.")
+ flags.Var(&options, "option", "The plugin options.")
+ flags.Var(&descriptors, "descriptor_set", "The descriptor set to read.")
+ flags.Var(&expected, "expected", "The expected output files.")
+ flags.Var(&imports, "import", "Map a proto file to an import path.")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+
+ // Output to a temporary folder and then move the contents into place below.
+ // This is to work around long file paths on Windows.
+ tmpDir, err := ioutil.TempDir("", "go_proto")
+ if err != nil {
+ return err
+ }
+ tmpDir = abs(tmpDir) // required to work with long paths on Windows
+ absOutPath := abs(*outPath) // required to work with long paths on Windows
+ defer os.RemoveAll(tmpDir)
+
+ pluginBase := filepath.Base(*plugin)
+ pluginName := strings.TrimSuffix(
+ strings.TrimPrefix(filepath.Base(*plugin), "protoc-gen-"), ".exe")
+ for _, m := range imports {
+ options = append(options, fmt.Sprintf("M%v", m))
+ }
+ if runtime.GOOS == "windows" {
+ // Turn the plugin path into raw form, since we're handing it off to a non-go binary.
+ // This is required to work with long paths on Windows.
+ *plugin = "\\\\?\\" + abs(*plugin)
+ }
+ protoc_args := []string{
+ fmt.Sprintf("--%v_out=%v:%v", pluginName, strings.Join(options, ","), tmpDir),
+ "--plugin", fmt.Sprintf("%v=%v", strings.TrimSuffix(pluginBase, ".exe"), *plugin),
+ "--descriptor_set_in", strings.Join(descriptors, string(os.PathListSeparator)),
+ }
+ protoc_args = append(protoc_args, flags.Args()...)
+
+ var cmd *exec.Cmd
+ if useParamFile {
+ paramFile, err := ioutil.TempFile(tmpDir, "protoc-*.params")
+ if err != nil {
+ return fmt.Errorf("error creating param file for protoc: %v", err)
+ }
+ for _, arg := range protoc_args {
+ _, err := fmt.Fprintln(paramFile, arg)
+ if err != nil {
+ return fmt.Errorf("error writing param file for protoc: %v", err)
+ }
+ }
+ cmd = exec.Command(*protoc, "@"+paramFile.Name())
+ } else {
+ cmd = exec.Command(*protoc, protoc_args...)
+ }
+
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("error running protoc: %v", err)
+ }
+ // Build our file map, and test for existance
+ files := map[string]*genFileInfo{}
+ byBase := map[string]*genFileInfo{}
+ for _, path := range expected {
+ info := &genFileInfo{
+ path: path,
+ base: filepath.Base(path),
+ expected: true,
+ unique: true,
+ }
+ files[info.path] = info
+ if byBase[info.base] != nil {
+ info.unique = false
+ byBase[info.base].unique = false
+ } else {
+ byBase[info.base] = info
+ }
+ }
+ // Walk the generated files
+ filepath.Walk(tmpDir, func(path string, f os.FileInfo, err error) error {
+ relPath, err := filepath.Rel(tmpDir, path)
+ if err != nil {
+ return err
+ }
+ if relPath == "." {
+ return nil
+ }
+
+ if f.IsDir() {
+ if err := os.Mkdir(filepath.Join(absOutPath, relPath), f.Mode()); !os.IsExist(err) {
+ return err
+ }
+ return nil
+ }
+
+ if !strings.HasSuffix(path, ".go") {
+ return nil
+ }
+
+ info := &genFileInfo{
+ path: path,
+ base: filepath.Base(path),
+ created: true,
+ }
+
+ if foundInfo, ok := files[relPath]; ok {
+ foundInfo.created = true
+ foundInfo.from = info
+ return nil
+ }
+ files[relPath] = info
+ copyTo := byBase[info.base]
+ switch {
+ case copyTo == nil:
+ // Unwanted output
+ case !copyTo.unique:
+ // not unique, no copy allowed
+ case copyTo.from != nil:
+ copyTo.ambiguious = true
+ info.ambiguious = true
+ default:
+ copyTo.from = info
+ copyTo.created = true
+ info.expected = true
+ }
+ return nil
+ })
+ buf := &bytes.Buffer{}
+ for _, f := range files {
+ switch {
+ case f.expected && !f.created:
+ // Some plugins only create output files if the proto source files have
+ // have relevant definitions (e.g., services for grpc_gateway). Create
+ // trivial files that the compiler will ignore for missing outputs.
+ data := []byte("// +build ignore\n\npackage ignore")
+ if err := ioutil.WriteFile(abs(f.path), data, 0644); err != nil {
+ return err
+ }
+ case f.expected && f.ambiguious:
+ fmt.Fprintf(buf, "Ambiguious output %v.\n", f.path)
+ case f.from != nil:
+ data, err := ioutil.ReadFile(f.from.path)
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(abs(f.path), data, 0644); err != nil {
+ return err
+ }
+ case !f.expected:
+ //fmt.Fprintf(buf, "Unexpected output %v.\n", f.path)
+ }
+ if buf.Len() > 0 {
+ fmt.Fprintf(buf, "Check that the go_package option is %q.", *importpath)
+ return errors.New(buf.String())
+ }
+ }
+
+ return nil
+}
+
+func main() {
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/go/tools/builders/read.go b/go/tools/builders/read.go
new file mode 100644
index 00000000..b03c02bf
--- /dev/null
+++ b/go/tools/builders/read.go
@@ -0,0 +1,551 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was adapted from Go src/go/build/read.go at commit 8634a234df2a
+// on 2021-01-26. It's used to extract metadata from .go files without requiring
+// them to be in the same directory.
+
+package main
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type importReader struct {
+ b *bufio.Reader
+ buf []byte
+ peek byte
+ err error
+ eof bool
+ nerr int
+ pos token.Position
+}
+
+func newImportReader(name string, r io.Reader) *importReader {
+ return &importReader{
+ b: bufio.NewReader(r),
+ pos: token.Position{
+ Filename: name,
+ Line: 1,
+ Column: 1,
+ },
+ }
+}
+
+func isIdent(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
+}
+
+var (
+ errSyntax = errors.New("syntax error")
+ errNUL = errors.New("unexpected NUL in input")
+)
+
+// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
+func (r *importReader) syntaxError() {
+ if r.err == nil {
+ r.err = errSyntax
+ }
+}
+
+// readByte reads the next byte from the input, saves it in buf, and returns it.
+// If an error occurs, readByte records the error in r.err and returns 0.
+func (r *importReader) readByte() byte {
+ c, err := r.b.ReadByte()
+ if err == nil {
+ r.buf = append(r.buf, c)
+ if c == 0 {
+ err = errNUL
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ c = 0
+ }
+ return c
+}
+
+// readByteNoBuf is like readByte but doesn't buffer the byte.
+// It exhausts r.buf before reading from r.b.
+func (r *importReader) readByteNoBuf() byte {
+ var c byte
+ var err error
+ if len(r.buf) > 0 {
+ c = r.buf[0]
+ r.buf = r.buf[1:]
+ } else {
+ c, err = r.b.ReadByte()
+ if err == nil && c == 0 {
+ err = errNUL
+ }
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ return 0
+ }
+ r.pos.Offset++
+ if c == '\n' {
+ r.pos.Line++
+ r.pos.Column = 1
+ } else {
+ r.pos.Column++
+ }
+ return c
+}
+
+// peekByte returns the next byte from the input reader but does not advance beyond it.
+// If skipSpace is set, peekByte skips leading spaces and comments.
+func (r *importReader) peekByte(skipSpace bool) byte {
+ if r.err != nil {
+ if r.nerr++; r.nerr > 10000 {
+ panic("go/build: import reader looping")
+ }
+ return 0
+ }
+
+ // Use r.peek as first input byte.
+ // Don't just return r.peek here: it might have been left by peekByte(false)
+ // and this might be peekByte(true).
+ c := r.peek
+ if c == 0 {
+ c = r.readByte()
+ }
+ for r.err == nil && !r.eof {
+ if skipSpace {
+ // For the purposes of this reader, semicolons are never necessary to
+ // understand the input and are treated as spaces.
+ switch c {
+ case ' ', '\f', '\t', '\r', '\n', ';':
+ c = r.readByte()
+ continue
+
+ case '/':
+ c = r.readByte()
+ if c == '/' {
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByte()
+ }
+ } else if c == '*' {
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByte()
+ }
+ } else {
+ r.syntaxError()
+ }
+ c = r.readByte()
+ continue
+ }
+ }
+ break
+ }
+ r.peek = c
+ return r.peek
+}
+
+// nextByte is like peekByte but advances beyond the returned byte.
+func (r *importReader) nextByte(skipSpace bool) byte {
+ c := r.peekByte(skipSpace)
+ r.peek = 0
+ return c
+}
+
+var goEmbed = []byte("go:embed")
+
+// findEmbed advances the input reader to the next //go:embed comment.
+// It reports whether it found a comment.
+// (Otherwise it found an error or EOF.)
+func (r *importReader) findEmbed(first bool) bool {
+ // The import block scan stopped after a non-space character,
+ // so the reader is not at the start of a line on the first call.
+ // After that, each //go:embed extraction leaves the reader
+ // at the end of a line.
+ startLine := !first
+ var c byte
+ for r.err == nil && !r.eof {
+ c = r.readByteNoBuf()
+ Reswitch:
+ switch c {
+ default:
+ startLine = false
+
+ case '\n':
+ startLine = true
+
+ case ' ', '\t':
+ // leave startLine alone
+
+ case '"':
+ startLine = false
+ for r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c = r.readByteNoBuf()
+ if c == '\\' {
+ r.readByteNoBuf()
+ if r.err != nil {
+ r.syntaxError()
+ return false
+ }
+ continue
+ }
+ if c == '"' {
+ c = r.readByteNoBuf()
+ goto Reswitch
+ }
+ }
+ goto Reswitch
+
+ case '`':
+ startLine = false
+ for r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c = r.readByteNoBuf()
+ if c == '`' {
+ c = r.readByteNoBuf()
+ goto Reswitch
+ }
+ }
+
+ case '/':
+ c = r.readByteNoBuf()
+ switch c {
+ default:
+ startLine = false
+ goto Reswitch
+
+ case '*':
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByteNoBuf()
+ }
+ startLine = false
+
+ case '/':
+ if startLine {
+ // Try to read this as a //go:embed comment.
+ for i := range goEmbed {
+ c = r.readByteNoBuf()
+ if c != goEmbed[i] {
+ goto SkipSlashSlash
+ }
+ }
+ c = r.readByteNoBuf()
+ if c == ' ' || c == '\t' {
+ // Found one!
+ return true
+ }
+ }
+ SkipSlashSlash:
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByteNoBuf()
+ }
+ startLine = true
+ }
+ }
+ }
+ return false
+}
+
+// readKeyword reads the given keyword from the input.
+// If the keyword is not present, readKeyword records a syntax error.
+func (r *importReader) readKeyword(kw string) {
+ r.peekByte(true)
+ for i := 0; i < len(kw); i++ {
+ if r.nextByte(false) != kw[i] {
+ r.syntaxError()
+ return
+ }
+ }
+ if isIdent(r.peekByte(false)) {
+ r.syntaxError()
+ }
+}
+
+// readIdent reads an identifier from the input.
+// If an identifier is not present, readIdent records a syntax error.
+func (r *importReader) readIdent() {
+ c := r.peekByte(true)
+ if !isIdent(c) {
+ r.syntaxError()
+ return
+ }
+ for isIdent(r.peekByte(false)) {
+ r.peek = 0
+ }
+}
+
+// readString reads a quoted string literal from the input.
+// If an identifier is not present, readString records a syntax error.
+func (r *importReader) readString() {
+ switch r.nextByte(true) {
+ case '`':
+ for r.err == nil {
+ if r.nextByte(false) == '`' {
+ break
+ }
+ if r.eof {
+ r.syntaxError()
+ }
+ }
+ case '"':
+ for r.err == nil {
+ c := r.nextByte(false)
+ if c == '"' {
+ break
+ }
+ if r.eof || c == '\n' {
+ r.syntaxError()
+ }
+ if c == '\\' {
+ r.nextByte(false)
+ }
+ }
+ default:
+ r.syntaxError()
+ }
+}
+
+// readImport reads an import clause - optional identifier followed by quoted string -
+// from the input.
+func (r *importReader) readImport() {
+ c := r.peekByte(true)
+ if c == '.' {
+ r.peek = 0
+ } else if isIdent(c) {
+ r.readIdent()
+ }
+ r.readString()
+}
+
+// readComments is like io.ReadAll, except that it only reads the leading
+// block of comments in the file.
+func readComments(f io.Reader) ([]byte, error) {
+ r := newImportReader("", f)
+ r.peekByte(true)
+ if r.err == nil && !r.eof {
+ // Didn't reach EOF, so must have found a non-space byte. Remove it.
+ r.buf = r.buf[:len(r.buf)-1]
+ }
+ return r.buf, r.err
+}
+
+// readGoInfo expects a Go file as input and reads the file up to and including the import section.
+// It records what it learned in *info.
+// If info.fset is non-nil, readGoInfo parses the file and sets info.parsed, info.parseErr,
+// info.imports, info.embeds, and info.embedErr.
+//
+// It only returns an error if there are problems reading the file,
+// not for syntax errors in the file itself.
+func readGoInfo(f io.Reader, info *fileInfo) error {
+ r := newImportReader(info.filename, f)
+
+ r.readKeyword("package")
+ r.readIdent()
+ for r.peekByte(true) == 'i' {
+ r.readKeyword("import")
+ if r.peekByte(true) == '(' {
+ r.nextByte(false)
+ for r.peekByte(true) != ')' && r.err == nil {
+ r.readImport()
+ }
+ r.nextByte(false)
+ } else {
+ r.readImport()
+ }
+ }
+
+ info.header = r.buf
+
+ // If we stopped successfully before EOF, we read a byte that told us we were done.
+ // Return all but that last byte, which would cause a syntax error if we let it through.
+ if r.err == nil && !r.eof {
+ info.header = r.buf[:len(r.buf)-1]
+ }
+
+ // If we stopped for a syntax error, consume the whole file so that
+ // we are sure we don't change the errors that go/parser returns.
+ if r.err == errSyntax {
+ r.err = nil
+ for r.err == nil && !r.eof {
+ r.readByte()
+ }
+ info.header = r.buf
+ }
+ if r.err != nil {
+ return r.err
+ }
+
+ if info.fset == nil {
+ return nil
+ }
+
+ // Parse file header & record imports.
+ info.parsed, info.parseErr = parser.ParseFile(info.fset, info.filename, info.header, parser.ImportsOnly|parser.ParseComments)
+ if info.parseErr != nil {
+ return nil
+ }
+ info.pkg = info.parsed.Name.Name
+
+ hasEmbed := false
+ for _, decl := range info.parsed.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, dspec := range d.Specs {
+ spec, ok := dspec.(*ast.ImportSpec)
+ if !ok {
+ continue
+ }
+ quoted := spec.Path.Value
+ path, err := strconv.Unquote(quoted)
+ if err != nil {
+ return fmt.Errorf("parser returned invalid quoted string: <%s>", quoted)
+ }
+ if path == "embed" {
+ hasEmbed = true
+ }
+
+ doc := spec.Doc
+ if doc == nil && len(d.Specs) == 1 {
+ doc = d.Doc
+ }
+ info.imports = append(info.imports, fileImport{path, spec.Pos(), doc})
+ }
+ }
+
+ // If the file imports "embed",
+ // we have to look for //go:embed comments
+ // in the remainder of the file.
+ // The compiler will enforce the mapping of comments to
+ // declared variables. We just need to know the patterns.
+ // If there were //go:embed comments earlier in the file
+ // (near the package statement or imports), the compiler
+ // will reject them. They can be (and have already been) ignored.
+ if hasEmbed {
+ var line []byte
+ for first := true; r.findEmbed(first); first = false {
+ line = line[:0]
+ pos := r.pos
+ for {
+ c := r.readByteNoBuf()
+ if c == '\n' || r.err != nil || r.eof {
+ break
+ }
+ line = append(line, c)
+ }
+ // Add args if line is well-formed.
+ // Ignore badly-formed lines - the compiler will report them when it finds them,
+ // and we can pretend they are not there to help go list succeed with what it knows.
+ embs, err := parseGoEmbed(string(line), pos)
+ if err == nil {
+ info.embeds = append(info.embeds, embs...)
+ }
+ }
+ }
+
+ return nil
+}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// This is based on a similar function in cmd/compile/internal/gc/noder.go;
+// this version calculates position information as well.
+func parseGoEmbed(args string, pos token.Position) ([]fileEmbed, error) {
+ trimBytes := func(n int) {
+ pos.Offset += n
+ pos.Column += utf8.RuneCountInString(args[:n])
+ args = args[n:]
+ }
+ trimSpace := func() {
+ trim := strings.TrimLeftFunc(args, unicode.IsSpace)
+ trimBytes(len(args) - len(trim))
+ }
+
+ var list []fileEmbed
+ for trimSpace(); args != ""; trimSpace() {
+ var path string
+ pathPos := pos
+ Switch:
+ switch args[0] {
+ default:
+ i := len(args)
+ for j, c := range args {
+ if unicode.IsSpace(c) {
+ i = j
+ break
+ }
+ }
+ path = args[:i]
+ trimBytes(i)
+
+ case '`':
+ i := strings.Index(args[1:], "`")
+ if i < 0 {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ path = args[1 : 1+i]
+ trimBytes(1 + i + 1)
+
+ case '"':
+ i := 1
+ for ; i < len(args); i++ {
+ if args[i] == '\\' {
+ i++
+ continue
+ }
+ if args[i] == '"' {
+ q, err := strconv.Unquote(args[:i+1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+ }
+ path = q
+ trimBytes(i + 1)
+ break Switch
+ }
+ }
+ if i >= len(args) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+
+ if args != "" {
+ r, _ := utf8.DecodeRuneInString(args)
+ if !unicode.IsSpace(r) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+ list = append(list, fileEmbed{path, pathPos})
+ }
+ return list, nil
+}
diff --git a/go/tools/builders/replicate.go b/go/tools/builders/replicate.go
new file mode 100644
index 00000000..117f882c
--- /dev/null
+++ b/go/tools/builders/replicate.go
@@ -0,0 +1,167 @@
+// Copyright 2018 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// stdlib builds the standard library in the appropriate mode into a new goroot.
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+type replicateMode int
+
+const (
+ copyMode replicateMode = iota
+ hardlinkMode
+ softlinkMode
+)
+
+type replicateOption func(*replicateConfig)
+type replicateConfig struct {
+ removeFirst bool
+ fileMode replicateMode
+ dirMode replicateMode
+ paths []string
+}
+
+func replicatePaths(paths ...string) replicateOption {
+ return func(config *replicateConfig) {
+ config.paths = append(config.paths, paths...)
+ }
+}
+
+// replicatePrepare is the common preparation steps for a replication entry
+func replicatePrepare(dst string, config *replicateConfig) error {
+ dir := filepath.Dir(dst)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return fmt.Errorf("Failed to make %s: %v", dir, err)
+ }
+ if config.removeFirst {
+ _ = os.Remove(dst)
+ }
+ return nil
+}
+
+// replicateFile is called internally by replicate to map a single file from src into dst.
+func replicateFile(src, dst string, config *replicateConfig) error {
+ if err := replicatePrepare(dst, config); err != nil {
+ return err
+ }
+ switch config.fileMode {
+ case copyMode:
+ in, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+ out, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(out, in)
+ closeerr := out.Close()
+ if err != nil {
+ return err
+ }
+ if closeerr != nil {
+ return closeerr
+ }
+ s, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if err := os.Chmod(dst, s.Mode()); err != nil {
+ return err
+ }
+ return nil
+ case hardlinkMode:
+ return os.Link(src, dst)
+ case softlinkMode:
+ return os.Symlink(src, dst)
+ default:
+ return fmt.Errorf("Invalid replication mode %d", config.fileMode)
+ }
+}
+
+// replicateDir makes a tree of files visible in a new location.
+// It is allowed to take any efficient method of doing so.
+func replicateDir(src, dst string, config *replicateConfig) error {
+ if err := replicatePrepare(dst, config); err != nil {
+ return err
+ }
+ switch config.dirMode {
+ case copyMode:
+ return filepath.Walk(src, func(path string, f os.FileInfo, err error) error {
+ if f.IsDir() {
+ return nil
+ }
+ relative, err := filepath.Rel(src, path)
+ if err != nil {
+ return err
+ }
+ return replicateFile(path, filepath.Join(dst, relative), config)
+ })
+ case hardlinkMode:
+ return os.Link(src, dst)
+ case softlinkMode:
+ return os.Symlink(src, dst)
+ default:
+ return fmt.Errorf("Invalid replication mode %d", config.fileMode)
+ }
+}
+
+// replicateTree is called for each single src dst pair.
+func replicateTree(src, dst string, config *replicateConfig) error {
+ if err := os.RemoveAll(dst); err != nil {
+ return fmt.Errorf("Failed to remove file at destination %s: %v", dst, err)
+ }
+ if l, err := filepath.EvalSymlinks(src); err != nil {
+ return err
+ } else {
+ src = l
+ }
+ if s, err := os.Stat(src); err != nil {
+ return err
+ } else if s.IsDir() {
+ return replicateDir(src, dst, config)
+ }
+ return replicateFile(src, dst, config)
+}
+
+// replicate makes a tree of files visible in a new location.
+// You control how it does so using options, by default it presumes the entire tree
+// of files rooted at src must be visible at dst, and that it should do so by copying.
+// src is allowed to be a file, in which case just the one file is copied.
+func replicate(src, dst string, options ...replicateOption) error {
+ config := replicateConfig{
+ removeFirst: true,
+ }
+ for _, option := range options {
+ option(&config)
+ }
+ if len(config.paths) == 0 {
+ return replicateTree(src, dst, &config)
+ }
+ for _, base := range config.paths {
+ from := filepath.Join(src, base)
+ to := filepath.Join(dst, base)
+ if err := replicateTree(from, to, &config); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/go/tools/builders/stdlib.go b/go/tools/builders/stdlib.go
new file mode 100644
index 00000000..d7b2bf0b
--- /dev/null
+++ b/go/tools/builders/stdlib.go
@@ -0,0 +1,169 @@
+// Copyright 2018 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/build"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+// stdlib builds the standard library in the appropriate mode into a new goroot.
+func stdlib(args []string) error {
+ // process the args
+ flags := flag.NewFlagSet("stdlib", flag.ExitOnError)
+ goenv := envFlags(flags)
+ out := flags.String("out", "", "Path to output go root")
+ race := flags.Bool("race", false, "Build in race mode")
+ shared := flags.Bool("shared", false, "Build in shared mode")
+ dynlink := flags.Bool("dynlink", false, "Build in dynlink mode")
+ var packages multiFlag
+ flags.Var(&packages, "package", "Packages to build")
+ var gcflags quoteMultiFlag
+ flags.Var(&gcflags, "gcflags", "Go compiler flags")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ if err := goenv.checkFlags(); err != nil {
+ return err
+ }
+ goroot := os.Getenv("GOROOT")
+ if goroot == "" {
+ return fmt.Errorf("GOROOT not set")
+ }
+ output := abs(*out)
+
+ // Fail fast if cgo is required but a toolchain is not configured.
+ if os.Getenv("CGO_ENABLED") == "1" && filepath.Base(os.Getenv("CC")) == "vc_installation_error.bat" {
+ return fmt.Errorf(`cgo is required, but a C toolchain has not been configured.
+You may need to use the flags --cpu=x64_windows --compiler=mingw-gcc.`)
+ }
+
+ // Link in the bare minimum needed to the new GOROOT
+ if err := replicate(goroot, output, replicatePaths("src", "pkg/tool", "pkg/include")); err != nil {
+ return err
+ }
+
+ output, err := processPath(output)
+ if err != nil {
+ return err
+ }
+
+ // Now switch to the newly created GOROOT
+ os.Setenv("GOROOT", output)
+
+ // Create a temporary cache directory. "go build" requires this starting
+ // in Go 1.12.
+ cachePath := filepath.Join(output, ".gocache")
+ os.Setenv("GOCACHE", cachePath)
+ defer os.RemoveAll(cachePath)
+
+ // Disable modules for the 'go install' command. Depending on the sandboxing
+ // mode, there may be a go.mod file in a parent directory which will turn
+ // modules on in "auto" mode.
+ os.Setenv("GO111MODULE", "off")
+
+ // Make sure we have an absolute path to the C compiler.
+ // TODO(#1357): also take absolute paths of includes and other paths in flags.
+ os.Setenv("CC", quotePathIfNeeded(abs(os.Getenv("CC"))))
+
+ // Ensure paths are absolute.
+ absPaths := []string{}
+ for _, path := range filepath.SplitList(os.Getenv("PATH")) {
+ absPaths = append(absPaths, abs(path))
+ }
+ os.Setenv("PATH", strings.Join(absPaths, string(os.PathListSeparator)))
+
+ sandboxPath := abs(".")
+
+ // Strip path prefix from source files in debug information.
+ os.Setenv("CGO_CFLAGS", os.Getenv("CGO_CFLAGS")+" "+strings.Join(defaultCFlags(output), " "))
+ os.Setenv("CGO_LDFLAGS", os.Getenv("CGO_LDFLAGS")+" "+strings.Join(defaultLdFlags(), " "))
+
+ // Allow flags in CGO_LDFLAGS that wouldn't pass the security check.
+ // Workaround for golang.org/issue/42565.
+ var b strings.Builder
+ sep := ""
+ cgoLdflags, _ := splitQuoted(os.Getenv("CGO_LDFLAGS"))
+ for _, f := range cgoLdflags {
+ b.WriteString(sep)
+ sep = "|"
+ b.WriteString(regexp.QuoteMeta(f))
+ // If the flag if -framework, the flag value needs to be in the same
+ // condition.
+ if f == "-framework" {
+ sep = " "
+ }
+ }
+ os.Setenv("CGO_LDFLAGS_ALLOW", b.String())
+ os.Setenv("GODEBUG", "installgoroot=all")
+
+ // Build the commands needed to build the std library in the right mode
+ // NOTE: the go command stamps compiled .a files with build ids, which are
+ // cryptographic sums derived from the inputs. This prevents us from
+ // creating reproducible builds because the build ids are hashed from
+ // CGO_CFLAGS, which frequently contains absolute paths. As a workaround,
+ // we strip the build ids, since they won't be used after this.
+ installArgs := goenv.goCmd("install", "-toolexec", abs(os.Args[0])+" filterbuildid")
+ if len(build.Default.BuildTags) > 0 {
+ installArgs = append(installArgs, "-tags", strings.Join(build.Default.BuildTags, ","))
+ }
+
+ ldflags := []string{"-trimpath", sandboxPath}
+ asmflags := []string{"-trimpath", output}
+ if *race {
+ installArgs = append(installArgs, "-race")
+ }
+ if *shared {
+ gcflags = append(gcflags, "-shared")
+ ldflags = append(ldflags, "-shared")
+ asmflags = append(asmflags, "-shared")
+ }
+ if *dynlink {
+ gcflags = append(gcflags, "-dynlink")
+ ldflags = append(ldflags, "-dynlink")
+ asmflags = append(asmflags, "-dynlink")
+ }
+
+ // Since Go 1.10, an all= prefix indicates the flags should apply to the package
+ // and its dependencies, rather than just the package itself. This was the
+ // default behavior before Go 1.10.
+ allSlug := ""
+ for _, t := range build.Default.ReleaseTags {
+ if t == "go1.10" {
+ allSlug = "all="
+ break
+ }
+ }
+ installArgs = append(installArgs, "-gcflags="+allSlug+strings.Join(gcflags, " "))
+ installArgs = append(installArgs, "-ldflags="+allSlug+strings.Join(ldflags, " "))
+ installArgs = append(installArgs, "-asmflags="+allSlug+strings.Join(asmflags, " "))
+
+ // Modifying CGO flags to use only absolute path
+ // because go is having its own sandbox, all CGO flags must use absolute path
+ if err := absEnv(cgoEnvVars, cgoAbsEnvFlags); err != nil {
+ return fmt.Errorf("error modifying cgo environment to absolute path: %v", err)
+ }
+
+ installArgs = append(installArgs, packages...)
+ if err := goenv.runCommand(installArgs); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/go/tools/builders/stdliblist.go b/go/tools/builders/stdliblist.go
new file mode 100644
index 00000000..f6a61442
--- /dev/null
+++ b/go/tools/builders/stdliblist.go
@@ -0,0 +1,293 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "go/build"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// Copy and pasted from golang.org/x/tools/go/packages
+type flatPackagesError struct {
+ Pos string // "file:line:col" or "file:line" or "" or "-"
+ Msg string
+ Kind flatPackagesErrorKind
+}
+
+type flatPackagesErrorKind int
+
+const (
+ UnknownError flatPackagesErrorKind = iota
+ ListError
+ ParseError
+ TypeError
+)
+
+func (err flatPackagesError) Error() string {
+ pos := err.Pos
+ if pos == "" {
+ pos = "-" // like token.Position{}.String()
+ }
+ return pos + ": " + err.Msg
+}
+
+// flatPackage is the JSON form of Package
+// It drops all the type and syntax fields, and transforms the Imports
+type flatPackage struct {
+ ID string
+ Name string `json:",omitempty"`
+ PkgPath string `json:",omitempty"`
+ Standard bool `json:",omitempty"`
+ Errors []flatPackagesError `json:",omitempty"`
+ GoFiles []string `json:",omitempty"`
+ CompiledGoFiles []string `json:",omitempty"`
+ OtherFiles []string `json:",omitempty"`
+ ExportFile string `json:",omitempty"`
+ Imports map[string]string `json:",omitempty"`
+}
+
+type goListPackage struct {
+ Dir string // directory containing package sources
+ ImportPath string // import path of package in dir
+ Name string // package name
+ Target string // install path
+ Goroot bool // is this package in the Go root?
+ Standard bool // is this package part of the standard Go library?
+ Root string // Go root or Go path dir containing this package
+ Export string // file containing export data (when using -export)
+ // Source files
+ GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+ CgoFiles []string // .go source files that import "C"
+ CompiledGoFiles []string // .go files presented to compiler (when using -compiled)
+ IgnoredGoFiles []string // .go source files ignored due to build constraints
+ IgnoredOtherFiles []string // non-.go source files ignored due to build constraints
+ CFiles []string // .c source files
+ CXXFiles []string // .cc, .cxx and .cpp source files
+ MFiles []string // .m source files
+ HFiles []string // .h, .hh, .hpp and .hxx source files
+ FFiles []string // .f, .F, .for and .f90 Fortran source files
+ SFiles []string // .s source files
+ SwigFiles []string // .swig files
+ SwigCXXFiles []string // .swigcxx files
+ SysoFiles []string // .syso object files to add to archive
+ TestGoFiles []string // _test.go files in package
+ XTestGoFiles []string // _test.go files outside package
+ // Embedded files
+ EmbedPatterns []string // //go:embed patterns
+ EmbedFiles []string // files matched by EmbedPatterns
+ TestEmbedPatterns []string // //go:embed patterns in TestGoFiles
+ TestEmbedFiles []string // files matched by TestEmbedPatterns
+ XTestEmbedPatterns []string // //go:embed patterns in XTestGoFiles
+ XTestEmbedFiles []string // files matched by XTestEmbedPatterns
+ // Dependency information
+ Imports []string // import paths used by this package
+ ImportMap map[string]string // map from source import to ImportPath (identity entries omitted)
+ // Error information
+ Incomplete bool // this package or a dependency has an error
+ Error *flatPackagesError // error loading package
+ DepsErrors []*flatPackagesError // errors loading dependencies
+}
+
+func stdlibPackageID(importPath string) string {
+ return "@io_bazel_rules_go//stdlib:" + importPath
+}
+
+// outputBasePath replace the cloneBase with output base label
+func outputBasePath(cloneBase, p string) string {
+ dir, _ := filepath.Rel(cloneBase, p)
+ return filepath.Join("__BAZEL_OUTPUT_BASE__", dir)
+}
+
+// absoluteSourcesPaths replace cloneBase of the absolution
+// paths with the label for all source files in a package
+func absoluteSourcesPaths(cloneBase, pkgDir string, srcs []string) []string {
+ ret := make([]string, 0, len(srcs))
+ pkgDir = outputBasePath(cloneBase, pkgDir)
+ for _, src := range srcs {
+ absPath := src
+
+ // Generated files will already have an absolute path. These come from
+ // the compiler's cache.
+ if !filepath.IsAbs(src) {
+ absPath = filepath.Join(pkgDir, src)
+ }
+
+ ret = append(ret, absPath)
+ }
+ return ret
+}
+
+// filterGoFiles keeps only files either ending in .go or those without an
+// extension (which are from the cache). This is a work around for
+// https://golang.org/issue/28749: cmd/go puts assembly, C, and C++ files in
+// CompiledGoFiles.
+func filterGoFiles(srcs []string) []string {
+ ret := make([]string, 0, len(srcs))
+ for _, f := range srcs {
+ if ext := filepath.Ext(f); ext == ".go" || ext == "" {
+ ret = append(ret, f)
+ }
+ }
+
+ return ret
+}
+
+func flatPackageForStd(cloneBase string, pkg *goListPackage) *flatPackage {
+ goFiles := absoluteSourcesPaths(cloneBase, pkg.Dir, pkg.GoFiles)
+ compiledGoFiles := absoluteSourcesPaths(cloneBase, pkg.Dir, pkg.CompiledGoFiles)
+
+ newPkg := &flatPackage{
+ ID: stdlibPackageID(pkg.ImportPath),
+ Name: pkg.Name,
+ PkgPath: pkg.ImportPath,
+ ExportFile: outputBasePath(cloneBase, pkg.Target),
+ Imports: map[string]string{},
+ Standard: pkg.Standard,
+ GoFiles: goFiles,
+ CompiledGoFiles: filterGoFiles(compiledGoFiles),
+ }
+
+ // imports
+ //
+ // Imports contains the IDs of all imported packages.
+ // ImportsMap records (path, ID) only where they differ.
+ ids := make(map[string]struct{})
+ for _, id := range pkg.Imports {
+ ids[id] = struct{}{}
+ }
+
+ for path, id := range pkg.ImportMap {
+ newPkg.Imports[path] = stdlibPackageID(id)
+ delete(ids, id)
+ }
+
+ for id := range ids {
+ if id != "C" {
+ newPkg.Imports[id] = stdlibPackageID(id)
+ }
+ }
+
+ return newPkg
+}
+
+// stdliblist runs `go list -json` on the standard library and saves it to a file.
+func stdliblist(args []string) error {
+ // process the args
+ flags := flag.NewFlagSet("stdliblist", flag.ExitOnError)
+ goenv := envFlags(flags)
+ out := flags.String("out", "", "Path to output go list json")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ if err := goenv.checkFlags(); err != nil {
+ return err
+ }
+
+ if filepath.IsAbs(goenv.sdk) {
+ return fmt.Errorf("-sdk needs to be a relative path, but got %s", goenv.sdk)
+ }
+
+ // In Go 1.18, the standard library started using go:embed directives.
+ // When Bazel runs this action, it does so inside a sandbox where GOROOT points
+ // to an external/go_sdk directory that contains a symlink farm of all files in
+ // the Go SDK.
+ // If we run "go list" with that GOROOT, this action will fail because those
+ // go:embed directives will refuse to include the symlinks in the sandbox.
+ //
+ // To work around this, cloneGoRoot creates a copy of a subset of external/go_sdk
+ // that is sufficient to call "go list" into a new cloneBase directory, e.g.
+ // "go list" needs to call "compile", which needs "pkg/tool".
+ // We also need to retain the same relative path to the root directory, e.g.
+ // "$OUTPUT_BASE/external/go_sdk" becomes
+ // {cloneBase}/external/go_sdk", which will be set at GOROOT later. This ensures
+ // that file paths in the generated JSON are still valid.
+ //
+ // Here we replicate goRoot(absolute path of goenv.sdk) to newGoRoot.
+ cloneBase, cleanup, err := goenv.workDir()
+ if err != nil {
+ return err
+ }
+ defer func() { cleanup() }()
+
+ newGoRoot := filepath.Join(cloneBase, goenv.sdk)
+ if err := replicate(abs(goenv.sdk), abs(newGoRoot), replicatePaths("src", "pkg/tool", "pkg/include")); err != nil {
+ return err
+ }
+
+ // Ensure paths are absolute.
+ absPaths := []string{}
+ for _, path := range filepath.SplitList(os.Getenv("PATH")) {
+ absPaths = append(absPaths, abs(path))
+ }
+ os.Setenv("PATH", strings.Join(absPaths, string(os.PathListSeparator)))
+ os.Setenv("GOROOT", newGoRoot)
+
+ cgoEnabled := os.Getenv("CGO_ENABLED") == "1"
+ // Make sure we have an absolute path to the C compiler.
+ // TODO(#1357): also take absolute paths of includes and other paths in flags.
+ ccEnv, ok := os.LookupEnv("CC")
+ if cgoEnabled && !ok {
+ return fmt.Errorf("CC must be set")
+ }
+ os.Setenv("CC", quotePathIfNeeded(abs(ccEnv)))
+
+ // We want to keep the cache around so that the processed files can be used by other tools.
+ cachePath := abs(*out + ".gocache")
+ os.Setenv("GOCACHE", cachePath)
+ os.Setenv("GOMODCACHE", cachePath)
+ os.Setenv("GOPATH", cachePath)
+
+ listArgs := goenv.goCmd("list")
+ if len(build.Default.BuildTags) > 0 {
+ listArgs = append(listArgs, "-tags", strings.Join(build.Default.BuildTags, ","))
+ }
+
+ if cgoEnabled {
+ listArgs = append(listArgs, "-compiled=true")
+ }
+
+ listArgs = append(listArgs, "-json", "builtin", "std", "runtime/cgo")
+
+ jsonFile, err := os.Create(*out)
+ if err != nil {
+ return err
+ }
+ defer jsonFile.Close()
+
+ jsonData := &bytes.Buffer{}
+ if err := goenv.runCommandToFile(jsonData, os.Stderr, listArgs); err != nil {
+ return err
+ }
+
+ encoder := json.NewEncoder(jsonFile)
+ decoder := json.NewDecoder(jsonData)
+ for decoder.More() {
+ var pkg *goListPackage
+ if err := decoder.Decode(&pkg); err != nil {
+ return err
+ }
+ if err := encoder.Encode(flatPackageForStd(cloneBase, pkg)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/go/tools/builders/stdliblist_test.go b/go/tools/builders/stdliblist_test.go
new file mode 100644
index 00000000..b456b0be
--- /dev/null
+++ b/go/tools/builders/stdliblist_test.go
@@ -0,0 +1,48 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func Test_stdliblist(t *testing.T) {
+ testDir := t.TempDir()
+ outJSON := filepath.Join(testDir, "out.json")
+
+ test_args := []string{
+ fmt.Sprintf("-out=%s", outJSON),
+ "-sdk=external/go_sdk",
+ }
+
+ if err := stdliblist(test_args); err != nil {
+ t.Errorf("calling stdliblist got err: %v", err)
+ }
+ f, err := os.Open(outJSON)
+ if err != nil {
+ t.Errorf("cannot open output json: %v", err)
+ }
+ defer func() { _ = f.Close() }()
+ decoder := json.NewDecoder(f)
+ for decoder.More() {
+ var result *flatPackage
+ if err := decoder.Decode(&result); err != nil {
+ t.Errorf("unable to decode output json: %v\n", err)
+ }
+
+ if !strings.HasPrefix(result.ID, "@io_bazel_rules_go//stdlib") {
+ t.Errorf("ID should be prefixed with @io_bazel_rules_go//stdlib :%v", result)
+ }
+ if !strings.HasPrefix(result.ExportFile, "__BAZEL_OUTPUT_BASE__") {
+ t.Errorf("export file should be prefixed with __BAZEL_OUTPUT_BASE__ :%v", result)
+ }
+ for _, gofile := range result.GoFiles {
+ if !strings.HasPrefix(gofile, "__BAZEL_OUTPUT_BASE__/external/go_sdk") {
+ t.Errorf("all go files should be prefixed with __BAZEL_OUTPUT_BASE__/external/go_sdk :%v", result)
+ }
+ }
+ }
+}
diff --git a/go/tools/bzltestutil/BUILD.bazel b/go/tools/bzltestutil/BUILD.bazel
new file mode 100644
index 00000000..cbb87e84
--- /dev/null
+++ b/go/tools/bzltestutil/BUILD.bazel
@@ -0,0 +1,45 @@
+load("//go:def.bzl", "go_test", "go_tool_library")
+
+go_tool_library(
+ name = "bzltestutil",
+ srcs = [
+ "init.go",
+ "lcov.go",
+ "test2json.go",
+ "wrap.go",
+ "xml.go",
+ ],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/bzltestutil",
+ visibility = ["//visibility:public"],
+)
+
+go_test(
+ name = "bzltestutil_test",
+ srcs = [
+ "lcov_test.go",
+ "wrap_test.go",
+ "xml_test.go",
+ ],
+ # Empty in integration tests or in distributions.
+ data = glob(
+ ["testdata/**"],
+ allow_empty = True,
+ ),
+ embed = [":bzltestutil"],
+)
+
+filegroup(
+ name = "all_files",
+ testonly = True,
+ srcs = glob(
+ ["**"],
+ exclude = ["testdata/*"],
+ ),
+ visibility = ["//visibility:public"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":bzltestutil",
+ visibility = ["//visibility:public"],
+)
diff --git a/go/tools/bzltestutil/init.go b/go/tools/bzltestutil/init.go
new file mode 100644
index 00000000..ca9852ed
--- /dev/null
+++ b/go/tools/bzltestutil/init.go
@@ -0,0 +1,60 @@
+// Copyright 2020 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bzltestutil
+
+// This package must have no deps beyond Go SDK.
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+var (
+ // Initialized by linker.
+ RunDir string
+
+ // Initial working directory.
+ testExecDir string
+)
+
+// This initializer runs before any user packages.
+func init() {
+ var err error
+ testExecDir, err = os.Getwd()
+ if err != nil {
+ panic(err)
+ }
+
+ // Check if we're being run by Bazel and change directories if so.
+ // TEST_SRCDIR and TEST_WORKSPACE are set by the Bazel test runner, so that makes a decent proxy.
+ testSrcDir, hasSrcDir := os.LookupEnv("TEST_SRCDIR")
+ testWorkspace, hasWorkspace := os.LookupEnv("TEST_WORKSPACE")
+ if hasSrcDir && hasWorkspace && RunDir != "" {
+ abs := RunDir
+ if !filepath.IsAbs(RunDir) {
+ abs = filepath.Join(testSrcDir, testWorkspace, RunDir)
+ }
+ err := os.Chdir(abs)
+ // Ignore the Chdir err when on Windows, since it might have have runfiles symlinks.
+ // https://github.com/bazelbuild/rules_go/pull/1721#issuecomment-422145904
+ if err != nil && runtime.GOOS != "windows" {
+ panic(fmt.Sprintf("could not change to test directory: %v", err))
+ }
+ if err == nil {
+ os.Setenv("PWD", abs)
+ }
+ }
+}
diff --git a/go/tools/bzltestutil/lcov.go b/go/tools/bzltestutil/lcov.go
new file mode 100644
index 00000000..8b94b162
--- /dev/null
+++ b/go/tools/bzltestutil/lcov.go
@@ -0,0 +1,178 @@
+// Copyright 2022 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bzltestutil
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "testing/internal/testdeps"
+)
+
+// ConvertCoverToLcov converts the go coverprofile file coverage.dat.cover to
+// the expectedLcov format and stores it in coverage.dat, where it is picked up by
+// Bazel.
+// The conversion emits line and branch coverage, but not function coverage.
+func ConvertCoverToLcov() error {
+ inPath := flag.Lookup("test.coverprofile").Value.String()
+ in, err := os.Open(inPath)
+ if err != nil {
+ // This can happen if there are no tests and should not be an error.
+ log.Printf("Not collecting coverage: %s has not been created: %s", inPath, err)
+ return nil
+ }
+ defer in.Close()
+
+ // All *.dat files in $COVERAGE_DIR will be merged by Bazel's lcov_merger tool.
+ out, err := os.CreateTemp(os.Getenv("COVERAGE_DIR"), "go_coverage.*.dat")
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ return convertCoverToLcov(in, out)
+}
+
+var _coverLinePattern = regexp.MustCompile(`^(?P<path>.+):(?P<startLine>\d+)\.(?P<startColumn>\d+),(?P<endLine>\d+)\.(?P<endColumn>\d+) (?P<numStmt>\d+) (?P<count>\d+)$`)
+
+const (
+ _pathIdx = 1
+ _startLineIdx = 2
+ _endLineIdx = 4
+ _countIdx = 7
+)
+
+func convertCoverToLcov(coverReader io.Reader, lcovWriter io.Writer) error {
+ cover := bufio.NewScanner(coverReader)
+ lcov := bufio.NewWriter(lcovWriter)
+ defer lcov.Flush()
+ currentPath := ""
+ var lineCounts map[uint32]uint32
+ for cover.Scan() {
+ l := cover.Text()
+ m := _coverLinePattern.FindStringSubmatch(l)
+ if m == nil {
+ if strings.HasPrefix(l, "mode: ") {
+ continue
+ }
+ return fmt.Errorf("invalid go cover line: %s", l)
+ }
+
+ if m[_pathIdx] != currentPath {
+ if currentPath != "" {
+ if err := emitLcovLines(lcov, currentPath, lineCounts); err != nil {
+ return err
+ }
+ }
+ currentPath = m[_pathIdx]
+ lineCounts = make(map[uint32]uint32)
+ }
+
+ startLine, err := strconv.ParseUint(m[_startLineIdx], 10, 32)
+ if err != nil {
+ return err
+ }
+ endLine, err := strconv.ParseUint(m[_endLineIdx], 10, 32)
+ if err != nil {
+ return err
+ }
+ count, err := strconv.ParseUint(m[_countIdx], 10, 32)
+ if err != nil {
+ return err
+ }
+ for line := uint32(startLine); line <= uint32(endLine); line++ {
+ prevCount, ok := lineCounts[line]
+ if !ok || uint32(count) > prevCount {
+ lineCounts[line] = uint32(count)
+ }
+ }
+ }
+ if currentPath != "" {
+ if err := emitLcovLines(lcov, currentPath, lineCounts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func emitLcovLines(lcov io.StringWriter, path string, lineCounts map[uint32]uint32) error {
+ _, err := lcov.WriteString(fmt.Sprintf("SF:%s\n", path))
+ if err != nil {
+ return err
+ }
+
+ // Emit the coverage counters for the individual source lines.
+ sortedLines := make([]uint32, 0, len(lineCounts))
+ for line := range lineCounts {
+ sortedLines = append(sortedLines, line)
+ }
+ sort.Slice(sortedLines, func(i, j int) bool { return sortedLines[i] < sortedLines[j] })
+ numCovered := 0
+ for _, line := range sortedLines {
+ count := lineCounts[line]
+ if count > 0 {
+ numCovered++
+ }
+ _, err := lcov.WriteString(fmt.Sprintf("DA:%d,%d\n", line, count))
+ if err != nil {
+ return err
+ }
+ }
+ // Emit a summary containing the number of all/covered lines and end the info for the current source file.
+ _, err = lcov.WriteString(fmt.Sprintf("LH:%d\nLF:%d\nend_of_record\n", numCovered, len(sortedLines)))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// LcovTestDeps is a patched version of testdeps.TestDeps that allows to
+// hook into the SetPanicOnExit0 call happening right before testing.M.Run
+// returns.
+// This trick relies on the testDeps interface defined in this package being
+// identical to the actual testing.testDeps interface, which differs between
+// major versions of Go.
+type LcovTestDeps struct {
+ testdeps.TestDeps
+ OriginalPanicOnExit bool
+}
+
+// SetPanicOnExit0 is called with true by m.Run() before running all tests,
+// and with false right before returning -- after writing all coverage
+// profiles.
+// https://cs.opensource.google/go/go/+/refs/tags/go1.18.1:src/testing/testing.go;l=1921-1931;drc=refs%2Ftags%2Fgo1.18.1
+//
+// This gives us a good place to intercept the os.Exit(m.Run()) with coverage
+// data already available.
+func (ltd LcovTestDeps) SetPanicOnExit0(panicOnExit bool) {
+ if !panicOnExit {
+ lcovAtExitHook()
+ }
+ ltd.TestDeps.SetPanicOnExit0(ltd.OriginalPanicOnExit)
+}
+
+func lcovAtExitHook() {
+ if err := ConvertCoverToLcov(); err != nil {
+ log.Printf("Failed to collect coverage: %s", err)
+ os.Exit(TestWrapperAbnormalExit)
+ }
+}
diff --git a/go/tools/bzltestutil/lcov_test.go b/go/tools/bzltestutil/lcov_test.go
new file mode 100644
index 00000000..3dc595b6
--- /dev/null
+++ b/go/tools/bzltestutil/lcov_test.go
@@ -0,0 +1,71 @@
+package bzltestutil
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestConvertCoverToLcov(t *testing.T) {
+ var tests = []struct {
+ name string
+ goCover string
+ expectedLcov string
+ }{
+ {
+ "empty",
+ "",
+ "",
+ },
+ {
+ "mode only",
+ "mode: set\n",
+ "",
+ },
+ {
+ "single file",
+ `mode: count
+file.go:0.4,2.10 0 0
+`,
+ `SF:file.go
+DA:0,0
+DA:1,0
+DA:2,0
+LH:0
+LF:3
+end_of_record
+`,
+ },
+ {
+ "narrow ranges",
+ `mode: atomic
+path/to/pkg/file.go:0.1,0.2 5 1
+path/to/pkg/file2.go:1.2,1.2 4 2
+`,
+ `SF:path/to/pkg/file.go
+DA:0,1
+LH:1
+LF:1
+end_of_record
+SF:path/to/pkg/file2.go
+DA:1,2
+LH:1
+LF:1
+end_of_record
+`,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ in := strings.NewReader(tt.goCover)
+ var out strings.Builder
+ err := convertCoverToLcov(in, &out)
+ if err != nil {
+ t.Errorf("convertCoverToLcov returned unexpected error: %+v", err)
+ }
+ actualLcov := out.String()
+ if actualLcov != tt.expectedLcov {
+ t.Errorf("covertCoverToLcov returned:\n%q\n, expected:\n%q\n", actualLcov, tt.expectedLcov)
+ }
+ })
+ }
+}
diff --git a/go/tools/bzltestutil/test2json.go b/go/tools/bzltestutil/test2json.go
new file mode 100644
index 00000000..331773d0
--- /dev/null
+++ b/go/tools/bzltestutil/test2json.go
@@ -0,0 +1,482 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package test2json implements conversion of test binary output to JSON.
+// It is used by cmd/test2json and cmd/go.
+//
+// See the cmd/test2json documentation for details of the JSON encoding.
+//
+// The file test2json.go was copied from upstream go at
+// src/cmd/internal/test2json/test2json.go, revision
+// 1b86bdbdc3991c13c6ed156100a5f4918fdd9c6b. At the time of writing this was
+// deemed the best way of depending on this code that is otherwise not exposed
+// outside of the go toolchain. These files should be kept in sync.
+
+package bzltestutil
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Mode controls details of the conversion.
+type Mode int
+
+const (
+ Timestamp Mode = 1 << iota // include Time in events
+)
+
+// event is the JSON struct we emit.
+type event struct {
+ Time *time.Time `json:",omitempty"`
+ Action string
+ Package string `json:",omitempty"`
+ Test string `json:",omitempty"`
+ Elapsed *float64 `json:",omitempty"`
+ Output *textBytes `json:",omitempty"`
+}
+
+// textBytes is a hack to get JSON to emit a []byte as a string
+// without actually copying it to a string.
+// It implements encoding.TextMarshaler, which returns its text form as a []byte,
+// and then json encodes that text form as a string (which was our goal).
+type textBytes []byte
+
+func (b textBytes) MarshalText() ([]byte, error) { return b, nil }
+
+// A Converter holds the state of a test-to-JSON conversion.
+// It implements io.WriteCloser; the caller writes test output in,
+// and the converter writes JSON output to w.
+type Converter struct {
+ w io.Writer // JSON output stream
+ pkg string // package to name in events
+ mode Mode // mode bits
+ start time.Time // time converter started
+ testName string // name of current test, for output attribution
+ report []*event // pending test result reports (nested for subtests)
+ result string // overall test result if seen
+ input lineBuffer // input buffer
+ output lineBuffer // output buffer
+}
+
+// inBuffer and outBuffer are the input and output buffer sizes.
+// They're variables so that they can be reduced during testing.
+//
+// The input buffer needs to be able to hold any single test
+// directive line we want to recognize, like:
+//
+// <many spaces> --- PASS: very/nested/s/u/b/t/e/s/t
+//
+// If anyone reports a test directive line > 4k not working, it will
+// be defensible to suggest they restructure their test or test names.
+//
+// The output buffer must be >= utf8.UTFMax, so that it can
+// accumulate any single UTF8 sequence. Lines that fit entirely
+// within the output buffer are emitted in single output events.
+// Otherwise they are split into multiple events.
+// The output buffer size therefore limits the size of the encoding
+// of a single JSON output event. 1k seems like a reasonable balance
+// between wanting to avoid splitting an output line and not wanting to
+// generate enormous output events.
+var (
+ inBuffer = 4096
+ outBuffer = 1024
+)
+
+// NewConverter returns a "test to json" converter.
+// Writes on the returned writer are written as JSON to w,
+// with minimal delay.
+//
+// The writes to w are whole JSON events ending in \n,
+// so that it is safe to run multiple tests writing to multiple converters
+// writing to a single underlying output stream w.
+// As long as the underlying output w can handle concurrent writes
+// from multiple goroutines, the result will be a JSON stream
+// describing the relative ordering of execution in all the concurrent tests.
+//
+// The mode flag adjusts the behavior of the converter.
+// Passing ModeTime includes event timestamps and elapsed times.
+//
+// The pkg string, if present, specifies the import path to
+// report in the JSON stream.
+func NewConverter(w io.Writer, pkg string, mode Mode) *Converter {
+ c := new(Converter)
+ *c = Converter{
+ w: w,
+ pkg: pkg,
+ mode: mode,
+ start: time.Now(),
+ input: lineBuffer{
+ b: make([]byte, 0, inBuffer),
+ line: c.handleInputLine,
+ part: c.output.write,
+ },
+ output: lineBuffer{
+ b: make([]byte, 0, outBuffer),
+ line: c.writeOutputEvent,
+ part: c.writeOutputEvent,
+ },
+ }
+ return c
+}
+
+// Write writes the test input to the converter.
+func (c *Converter) Write(b []byte) (int, error) {
+ c.input.write(b)
+ return len(b), nil
+}
+
+// Exited marks the test process as having exited with the given error.
+func (c *Converter) Exited(err error) {
+ if err == nil {
+ c.result = "pass"
+ } else {
+ c.result = "fail"
+ }
+}
+
+var (
+ // printed by test on successful run.
+ bigPass = []byte("PASS\n")
+
+ // printed by test after a normal test failure.
+ bigFail = []byte("FAIL\n")
+
+ // printed by 'go test' along with an error if the test binary terminates
+ // with an error.
+ bigFailErrorPrefix = []byte("FAIL\t")
+
+ updates = [][]byte{
+ []byte("=== RUN "),
+ []byte("=== PAUSE "),
+ []byte("=== CONT "),
+ }
+
+ reports = [][]byte{
+ []byte("--- PASS: "),
+ []byte("--- FAIL: "),
+ []byte("--- SKIP: "),
+ []byte("--- BENCH: "),
+ }
+
+ fourSpace = []byte(" ")
+
+ skipLinePrefix = []byte("? \t")
+ skipLineSuffix = []byte("\t[no test files]\n")
+)
+
+// handleInputLine handles a single whole test output line.
+// It must write the line to c.output but may choose to do so
+// before or after emitting other events.
+func (c *Converter) handleInputLine(line []byte) {
+ // Final PASS or FAIL.
+ if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) || bytes.HasPrefix(line, bigFailErrorPrefix) {
+ c.flushReport(0)
+ c.output.write(line)
+ if bytes.Equal(line, bigPass) {
+ c.result = "pass"
+ } else {
+ c.result = "fail"
+ }
+ return
+ }
+
+ // Special case for entirely skipped test binary: "? \tpkgname\t[no test files]\n" is only line.
+ // Report it as plain output but remember to say skip in the final summary.
+ if bytes.HasPrefix(line, skipLinePrefix) && bytes.HasSuffix(line, skipLineSuffix) && len(c.report) == 0 {
+ c.result = "skip"
+ }
+
+ // "=== RUN "
+ // "=== PAUSE "
+ // "=== CONT "
+ actionColon := false
+ origLine := line
+ ok := false
+ indent := 0
+ for _, magic := range updates {
+ if bytes.HasPrefix(line, magic) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ // "--- PASS: "
+ // "--- FAIL: "
+ // "--- SKIP: "
+ // "--- BENCH: "
+ // but possibly indented.
+ for bytes.HasPrefix(line, fourSpace) {
+ line = line[4:]
+ indent++
+ }
+ for _, magic := range reports {
+ if bytes.HasPrefix(line, magic) {
+ actionColon = true
+ ok = true
+ break
+ }
+ }
+ }
+
+ // Not a special test output line.
+ if !ok {
+ // Lookup the name of the test which produced the output using the
+ // indentation of the output as an index into the stack of the current
+ // subtests.
+ // If the indentation is greater than the number of current subtests
+ // then the output must have included extra indentation. We can't
+ // determine which subtest produced this output, so we default to the
+ // old behaviour of assuming the most recently run subtest produced it.
+ if indent > 0 && indent <= len(c.report) {
+ c.testName = c.report[indent-1].Test
+ }
+ c.output.write(origLine)
+ return
+ }
+
+ // Parse out action and test name.
+ i := 0
+ if actionColon {
+ i = bytes.IndexByte(line, ':') + 1
+ }
+ if i == 0 {
+ i = len(updates[0])
+ }
+ action := strings.ToLower(strings.TrimSuffix(strings.TrimSpace(string(line[4:i])), ":"))
+ name := strings.TrimSpace(string(line[i:]))
+
+ e := &event{Action: action}
+ if line[0] == '-' { // PASS or FAIL report
+ // Parse out elapsed time.
+ if i := strings.Index(name, " ("); i >= 0 {
+ if strings.HasSuffix(name, "s)") {
+ t, err := strconv.ParseFloat(name[i+2:len(name)-2], 64)
+ if err == nil {
+ if c.mode&Timestamp != 0 {
+ e.Elapsed = &t
+ }
+ }
+ }
+ name = name[:i]
+ }
+ if len(c.report) < indent {
+ // Nested deeper than expected.
+ // Treat this line as plain output.
+ c.output.write(origLine)
+ return
+ }
+ // Flush reports at this indentation level or deeper.
+ c.flushReport(indent)
+ e.Test = name
+ c.testName = name
+ c.report = append(c.report, e)
+ c.output.write(origLine)
+ return
+ }
+ // === update.
+ // Finish any pending PASS/FAIL reports.
+ c.flushReport(0)
+ c.testName = name
+
+ if action == "pause" {
+ // For a pause, we want to write the pause notification before
+ // delivering the pause event, just so it doesn't look like the test
+ // is generating output immediately after being paused.
+ c.output.write(origLine)
+ }
+ c.writeEvent(e)
+ if action != "pause" {
+ c.output.write(origLine)
+ }
+
+ return
+}
+
+// flushReport flushes all pending PASS/FAIL reports at levels >= depth.
+func (c *Converter) flushReport(depth int) {
+ c.testName = ""
+ for len(c.report) > depth {
+ e := c.report[len(c.report)-1]
+ c.report = c.report[:len(c.report)-1]
+ c.writeEvent(e)
+ }
+}
+
+// Close marks the end of the go test output.
+// It flushes any pending input and then output (only partial lines at this point)
+// and then emits the final overall package-level pass/fail event.
+func (c *Converter) Close() error {
+ c.input.flush()
+ c.output.flush()
+ if c.result != "" {
+ e := &event{Action: c.result}
+ if c.mode&Timestamp != 0 {
+ dt := time.Since(c.start).Round(1 * time.Millisecond).Seconds()
+ e.Elapsed = &dt
+ }
+ c.writeEvent(e)
+ }
+ return nil
+}
+
+// writeOutputEvent writes a single output event with the given bytes.
+func (c *Converter) writeOutputEvent(out []byte) {
+ c.writeEvent(&event{
+ Action: "output",
+ Output: (*textBytes)(&out),
+ })
+}
+
+// writeEvent writes a single event.
+// It adds the package, time (if requested), and test name (if needed).
+func (c *Converter) writeEvent(e *event) {
+ e.Package = c.pkg
+ if c.mode&Timestamp != 0 {
+ t := time.Now()
+ e.Time = &t
+ }
+ if e.Test == "" {
+ e.Test = c.testName
+ }
+ js, err := json.Marshal(e)
+ if err != nil {
+ // Should not happen - event is valid for json.Marshal.
+ c.w.Write([]byte(fmt.Sprintf("testjson internal error: %v\n", err)))
+ return
+ }
+ js = append(js, '\n')
+ c.w.Write(js)
+}
+
+// A lineBuffer is an I/O buffer that reacts to writes by invoking
+// input-processing callbacks on whole lines or (for long lines that
+// have been split) line fragments.
+//
+// It should be initialized with b set to a buffer of length 0 but non-zero capacity,
+// and line and part set to the desired input processors.
+// The lineBuffer will call line(x) for any whole line x (including the final newline)
+// that fits entirely in cap(b). It will handle input lines longer than cap(b) by
+// calling part(x) for sections of the line. The line will be split at UTF8 boundaries,
+// and the final call to part for a long line includes the final newline.
+type lineBuffer struct {
+ b []byte // buffer
+ mid bool // whether we're in the middle of a long line
+ line func([]byte) // line callback
+ part func([]byte) // partial line callback
+}
+
+// write writes b to the buffer.
+func (l *lineBuffer) write(b []byte) {
+ for len(b) > 0 {
+ // Copy what we can into b.
+ m := copy(l.b[len(l.b):cap(l.b)], b)
+ l.b = l.b[:len(l.b)+m]
+ b = b[m:]
+
+ // Process lines in b.
+ i := 0
+ for i < len(l.b) {
+ j := bytes.IndexByte(l.b[i:], '\n')
+ if j < 0 {
+ if !l.mid {
+ if j := bytes.IndexByte(l.b[i:], '\t'); j >= 0 {
+ if isBenchmarkName(bytes.TrimRight(l.b[i:i+j], " ")) {
+ l.part(l.b[i : i+j+1])
+ l.mid = true
+ i += j + 1
+ }
+ }
+ }
+ break
+ }
+ e := i + j + 1
+ if l.mid {
+ // Found the end of a partial line.
+ l.part(l.b[i:e])
+ l.mid = false
+ } else {
+ // Found a whole line.
+ l.line(l.b[i:e])
+ }
+ i = e
+ }
+
+ // Whatever's left in l.b is a line fragment.
+ if i == 0 && len(l.b) == cap(l.b) {
+ // The whole buffer is a fragment.
+ // Emit it as the beginning (or continuation) of a partial line.
+ t := trimUTF8(l.b)
+ l.part(l.b[:t])
+ l.b = l.b[:copy(l.b, l.b[t:])]
+ l.mid = true
+ }
+
+ // There's room for more input.
+ // Slide it down in hope of completing the line.
+ if i > 0 {
+ l.b = l.b[:copy(l.b, l.b[i:])]
+ }
+ }
+}
+
+// flush flushes the line buffer.
+func (l *lineBuffer) flush() {
+ if len(l.b) > 0 {
+ // Must be a line without a \n, so a partial line.
+ l.part(l.b)
+ l.b = l.b[:0]
+ }
+}
+
+var benchmark = []byte("Benchmark")
+
+// isBenchmarkName reports whether b is a valid benchmark name
+// that might appear as the first field in a benchmark result line.
+func isBenchmarkName(b []byte) bool {
+ if !bytes.HasPrefix(b, benchmark) {
+ return false
+ }
+ if len(b) == len(benchmark) { // just "Benchmark"
+ return true
+ }
+ r, _ := utf8.DecodeRune(b[len(benchmark):])
+ return !unicode.IsLower(r)
+}
+
+// trimUTF8 returns a length t as close to len(b) as possible such that b[:t]
+// does not end in the middle of a possibly-valid UTF-8 sequence.
+//
+// If a large text buffer must be split before position i at the latest,
+// splitting at position trimUTF(b[:i]) avoids splitting a UTF-8 sequence.
+func trimUTF8(b []byte) int {
+ // Scan backward to find non-continuation byte.
+ for i := 1; i < utf8.UTFMax && i <= len(b); i++ {
+ if c := b[len(b)-i]; c&0xc0 != 0x80 {
+ switch {
+ case c&0xe0 == 0xc0:
+ if i < 2 {
+ return len(b) - i
+ }
+ case c&0xf0 == 0xe0:
+ if i < 3 {
+ return len(b) - i
+ }
+ case c&0xf8 == 0xf0:
+ if i < 4 {
+ return len(b) - i
+ }
+ }
+ break
+ }
+ }
+ return len(b)
+}
diff --git a/go/tools/bzltestutil/testdata/empty.json b/go/tools/bzltestutil/testdata/empty.json
new file mode 100644
index 00000000..ef3aac14
--- /dev/null
+++ b/go/tools/bzltestutil/testdata/empty.json
@@ -0,0 +1 @@
+{"Action":"fail"} \ No newline at end of file
diff --git a/go/tools/bzltestutil/testdata/empty.xml b/go/tools/bzltestutil/testdata/empty.xml
new file mode 100644
index 00000000..108da736
--- /dev/null
+++ b/go/tools/bzltestutil/testdata/empty.xml
@@ -0,0 +1,3 @@
+<testsuites>
+ <testsuite errors="0" failures="0" skipped="0" tests="0" time="" name="pkg/testing"></testsuite>
+</testsuites> \ No newline at end of file
diff --git a/go/tools/bzltestutil/testdata/report.json b/go/tools/bzltestutil/testdata/report.json
new file mode 100644
index 00000000..bad356bf
--- /dev/null
+++ b/go/tools/bzltestutil/testdata/report.json
@@ -0,0 +1,47 @@
+{"Action":"run","Test":"TestPass"}
+{"Action":"output","Test":"TestPass","Output":"=== RUN TestPass\n"}
+{"Action":"output","Test":"TestPass","Output":"=== PAUSE TestPass\n"}
+{"Action":"pause","Test":"TestPass"}
+{"Action":"run","Test":"TestPassLog"}
+{"Action":"output","Test":"TestPassLog","Output":"=== RUN TestPassLog\n"}
+{"Action":"output","Test":"TestPassLog","Output":"=== PAUSE TestPassLog\n"}
+{"Action":"pause","Test":"TestPassLog"}
+{"Action":"run","Test":"TestFail"}
+{"Action":"output","Test":"TestFail","Output":"=== RUN TestFail\n"}
+{"Action":"output","Test":"TestFail","Output":"--- FAIL: TestFail (0.00s)\n"}
+{"Action":"output","Test":"TestFail","Output":" test_test.go:23: Not working\n"}
+{"Action":"fail","Test":"TestFail","Elapsed":0}
+{"Action":"run","Test":"TestSubtests"}
+{"Action":"output","Test":"TestSubtests","Output":"=== RUN TestSubtests\n"}
+{"Action":"run","Test":"TestSubtests/subtest_a"}
+{"Action":"output","Test":"TestSubtests/subtest_a","Output":"=== RUN TestSubtests/subtest_a\n"}
+{"Action":"run","Test":"TestSubtests/testB"}
+{"Action":"output","Test":"TestSubtests/testB","Output":"=== RUN TestSubtests/testB\n"}
+{"Action":"run","Test":"TestSubtests/another_subtest"}
+{"Action":"output","Test":"TestSubtests/another_subtest","Output":"=== RUN TestSubtests/another_subtest\n"}
+{"Action":"output","Test":"TestSubtests","Output":"--- FAIL: TestSubtests (0.02s)\n"}
+{"Action":"output","Test":"TestSubtests/subtest_a","Output":" --- SKIP: TestSubtests/subtest_a (0.00s)\n"}
+{"Action":"output","Test":"TestSubtests/subtest_a","Output":" test_test.go:29: from subtest subtest a\n"}
+{"Action":"output","Test":"TestSubtests/subtest_a","Output":" test_test.go:31: from subtest subtest a\n"}
+{"Action":"output","Test":"TestSubtests/subtest_a","Output":" test_test.go:33: skipping this test\n"}
+{"Action":"skip","Test":"TestSubtests/subtest_a","Elapsed":0}
+{"Action":"output","Test":"TestSubtests/testB","Output":" --- PASS: TestSubtests/testB (0.01s)\n"}
+{"Action":"output","Test":"TestSubtests/testB","Output":" test_test.go:29: from subtest testB\n"}
+{"Action":"output","Test":"TestSubtests/testB","Output":" test_test.go:31: from subtest testB\n"}
+{"Action":"pass","Test":"TestSubtests/testB","Elapsed":0.01}
+{"Action":"output","Test":"TestSubtests/another_subtest","Output":" --- FAIL: TestSubtests/another_subtest (0.01s)\n"}
+{"Action":"output","Test":"TestSubtests/another_subtest","Output":" test_test.go:29: from subtest another subtest\n"}
+{"Action":"output","Test":"TestSubtests/another_subtest","Output":" test_test.go:31: from subtest another subtest\n"}
+{"Action":"fail","Test":"TestSubtests/another_subtest","Elapsed":0.01}
+{"Action":"fail","Test":"TestSubtests","Elapsed":0.02}
+{"Action":"cont","Test":"TestPass"}
+{"Action":"output","Test":"TestPass","Output":"=== CONT TestPass\n"}
+{"Action":"cont","Test":"TestPassLog"}
+{"Action":"output","Test":"TestPassLog","Output":"=== CONT TestPassLog\n"}
+{"Action":"output","Test":"TestPass","Output":"--- PASS: TestPass (0.00s)\n"}
+{"Action":"pass","Test":"TestPass","Elapsed":0}
+{"Action":"output","Test":"TestPassLog","Output":"--- PASS: TestPassLog (0.00s)\n"}
+{"Action":"output","Test":"TestPassLog","Output":" test_test.go:19: pass\n"}
+{"Action":"pass","Test":"TestPassLog","Elapsed":0}
+{"Action":"output","Output":"FAIL\n"}
+{"Action":"fail","Elapsed":0.03} \ No newline at end of file
diff --git a/go/tools/bzltestutil/testdata/report.xml b/go/tools/bzltestutil/testdata/report.xml
new file mode 100644
index 00000000..40348954
--- /dev/null
+++ b/go/tools/bzltestutil/testdata/report.xml
@@ -0,0 +1,19 @@
+<testsuites>
+ <testsuite errors="0" failures="3" skipped="1" tests="7" time="0.030" name="pkg/testing">
+ <testcase classname="testing" name="TestFail" time="0.000">
+ <failure message="Failed" type="">=== RUN TestFail&#xA;--- FAIL: TestFail (0.00s)&#xA; test_test.go:23: Not working&#xA;</failure>
+ </testcase>
+ <testcase classname="testing" name="TestPass" time="0.000"></testcase>
+ <testcase classname="testing" name="TestPassLog" time="0.000"></testcase>
+ <testcase classname="testing" name="TestSubtests" time="0.020">
+ <failure message="Failed" type="">=== RUN TestSubtests&#xA;--- FAIL: TestSubtests (0.02s)&#xA;</failure>
+ </testcase>
+ <testcase classname="testing" name="TestSubtests/another_subtest" time="0.010">
+ <failure message="Failed" type="">=== RUN TestSubtests/another_subtest&#xA; --- FAIL: TestSubtests/another_subtest (0.01s)&#xA; test_test.go:29: from subtest another subtest&#xA; test_test.go:31: from subtest another subtest&#xA;</failure>
+ </testcase>
+ <testcase classname="testing" name="TestSubtests/subtest_a" time="0.000">
+ <skipped message="Skipped" type="">=== RUN TestSubtests/subtest_a&#xA; --- SKIP: TestSubtests/subtest_a (0.00s)&#xA; test_test.go:29: from subtest subtest a&#xA; test_test.go:31: from subtest subtest a&#xA; test_test.go:33: skipping this test&#xA;</skipped>
+ </testcase>
+ <testcase classname="testing" name="TestSubtests/testB" time="0.010"></testcase>
+ </testsuite>
+</testsuites> \ No newline at end of file
diff --git a/go/tools/bzltestutil/wrap.go b/go/tools/bzltestutil/wrap.go
new file mode 100644
index 00000000..c8fb65e0
--- /dev/null
+++ b/go/tools/bzltestutil/wrap.go
@@ -0,0 +1,154 @@
+// Copyright 2020 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bzltestutil
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// TestWrapperAbnormalExit is used by Wrap to indicate the child
+// process exitted without an exit code (for example being killed by a signal).
+// We use 6, in line with Bazel's RUN_FAILURE.
+const TestWrapperAbnormalExit = 6
+
+func ShouldWrap() bool {
+ if wrapEnv, ok := os.LookupEnv("GO_TEST_WRAP"); ok {
+ wrap, err := strconv.ParseBool(wrapEnv)
+ if err != nil {
+ log.Fatalf("invalid value for GO_TEST_WRAP: %q", wrapEnv)
+ }
+ return wrap
+ }
+ _, ok := os.LookupEnv("XML_OUTPUT_FILE")
+ return ok
+}
+
+// shouldAddTestV indicates if the test wrapper should prepend a -test.v flag to
+// the test args. This is required to get information about passing tests from
+// test2json for complete XML reports.
+func shouldAddTestV() bool {
+ if wrapEnv, ok := os.LookupEnv("GO_TEST_WRAP_TESTV"); ok {
+ wrap, err := strconv.ParseBool(wrapEnv)
+ if err != nil {
+ log.Fatalf("invalid value for GO_TEST_WRAP_TESTV: %q", wrapEnv)
+ }
+ return wrap
+ }
+ return false
+}
+
+// streamMerger intelligently merges an input stdout and stderr stream and dumps
+// the output to the writer `inner`. Additional synchronization is applied to
+// ensure that one line at a time is written to the inner writer.
+type streamMerger struct {
+ OutW, ErrW *io.PipeWriter
+ mutex sync.Mutex
+ inner io.Writer
+ wg sync.WaitGroup
+ outR, errR *bufio.Reader
+}
+
+func NewStreamMerger(w io.Writer) *streamMerger {
+ outR, outW := io.Pipe()
+ errR, errW := io.Pipe()
+ return &streamMerger{
+ inner: w,
+ OutW: outW,
+ ErrW: errW,
+ outR: bufio.NewReader(outR),
+ errR: bufio.NewReader(errR),
+ }
+}
+
+func (m *streamMerger) Start() {
+ m.wg.Add(2)
+ process := func(r *bufio.Reader) {
+ for {
+ s, err := r.ReadString('\n')
+ if len(s) > 0 {
+ m.mutex.Lock()
+ io.WriteString(m.inner, s)
+ m.mutex.Unlock()
+ }
+ if err == io.EOF {
+ break
+ }
+ }
+ m.wg.Done()
+ }
+ go process(m.outR)
+ go process(m.errR)
+}
+
+func (m *streamMerger) Wait() {
+ m.wg.Wait()
+}
+
+func Wrap(pkg string) error {
+ var jsonBuffer bytes.Buffer
+ jsonConverter := NewConverter(&jsonBuffer, pkg, Timestamp)
+ streamMerger := NewStreamMerger(jsonConverter)
+
+ args := os.Args[1:]
+ if shouldAddTestV() {
+ args = append([]string{"-test.v"}, args...)
+ }
+ exePath := os.Args[0]
+ if !filepath.IsAbs(exePath) && strings.ContainsRune(exePath, filepath.Separator) && testExecDir != "" {
+ exePath = filepath.Join(testExecDir, exePath)
+ }
+ cmd := exec.Command(exePath, args...)
+ cmd.Env = append(os.Environ(), "GO_TEST_WRAP=0")
+ cmd.Stderr = io.MultiWriter(os.Stderr, streamMerger.ErrW)
+ cmd.Stdout = io.MultiWriter(os.Stdout, streamMerger.OutW)
+ streamMerger.Start()
+ err := cmd.Run()
+ streamMerger.ErrW.Close()
+ streamMerger.OutW.Close()
+ streamMerger.Wait()
+ jsonConverter.Close()
+ if out, ok := os.LookupEnv("XML_OUTPUT_FILE"); ok {
+ werr := writeReport(jsonBuffer, pkg, out)
+ if werr != nil {
+ if err != nil {
+ return fmt.Errorf("error while generating testreport: %s, (error wrapping test execution: %s)", werr, err)
+ }
+ return fmt.Errorf("error while generating testreport: %s", werr)
+ }
+ }
+ return err
+}
+
+func writeReport(jsonBuffer bytes.Buffer, pkg string, path string) error {
+ xml, cerr := json2xml(&jsonBuffer, pkg)
+ if cerr != nil {
+ return fmt.Errorf("error converting test output to xml: %s", cerr)
+ }
+ if err := ioutil.WriteFile(path, xml, 0664); err != nil {
+ return fmt.Errorf("error writing test xml: %s", err)
+ }
+ return nil
+}
diff --git a/go/tools/bzltestutil/wrap_test.go b/go/tools/bzltestutil/wrap_test.go
new file mode 100644
index 00000000..8444b7a5
--- /dev/null
+++ b/go/tools/bzltestutil/wrap_test.go
@@ -0,0 +1,63 @@
+// Copyright 2020 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bzltestutil
+
+import (
+ "fmt"
+ "os"
+ "testing"
+)
+
+func TestShouldWrap(t *testing.T) {
+ var tests = []struct {
+ envs map[string]string
+ shouldWrap bool
+ }{
+ {
+ envs: map[string]string{
+ "GO_TEST_WRAP": "0",
+ "XML_OUTPUT_FILE": "",
+ },
+ shouldWrap: false,
+ }, {
+ envs: map[string]string{
+ "GO_TEST_WRAP": "1",
+ "XML_OUTPUT_FILE": "",
+ },
+ shouldWrap: true,
+ }, {
+ envs: map[string]string{
+ "GO_TEST_WRAP": "",
+ "XML_OUTPUT_FILE": "path",
+ },
+ shouldWrap: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%v", tt.envs), func(t *testing.T) {
+ for k, v := range tt.envs {
+ if v == "" {
+ os.Unsetenv(k)
+ } else {
+ os.Setenv(k, v)
+ }
+ }
+ got := ShouldWrap()
+ if tt.shouldWrap != got {
+ t.Errorf("shouldWrap returned %t, expected %t", got, tt.shouldWrap)
+ }
+ })
+ }
+}
diff --git a/go/tools/bzltestutil/xml.go b/go/tools/bzltestutil/xml.go
new file mode 100644
index 00000000..d8ecd1d9
--- /dev/null
+++ b/go/tools/bzltestutil/xml.go
@@ -0,0 +1,181 @@
+// Copyright 2020 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bzltestutil
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "path"
+ "sort"
+ "strings"
+ "time"
+)
+
+type xmlTestSuites struct {
+ XMLName xml.Name `xml:"testsuites"`
+ Suites []xmlTestSuite `xml:"testsuite"`
+}
+
+type xmlTestSuite struct {
+ XMLName xml.Name `xml:"testsuite"`
+ TestCases []xmlTestCase `xml:"testcase"`
+ Errors int `xml:"errors,attr"`
+ Failures int `xml:"failures,attr"`
+ Skipped int `xml:"skipped,attr"`
+ Tests int `xml:"tests,attr"`
+ Time string `xml:"time,attr"`
+ Name string `xml:"name,attr"`
+}
+
+type xmlTestCase struct {
+ XMLName xml.Name `xml:"testcase"`
+ Classname string `xml:"classname,attr"`
+ Name string `xml:"name,attr"`
+ Time string `xml:"time,attr"`
+ Failure *xmlMessage `xml:"failure,omitempty"`
+ Error *xmlMessage `xml:"error,omitempty"`
+ Skipped *xmlMessage `xml:"skipped,omitempty"`
+}
+
+type xmlMessage struct {
+ Message string `xml:"message,attr"`
+ Type string `xml:"type,attr"`
+ Contents string `xml:",chardata"`
+}
+
+// jsonEvent as encoded by the test2json package.
+type jsonEvent struct {
+ Time *time.Time
+ Action string
+ Package string
+ Test string
+ Elapsed *float64
+ Output string
+}
+
+type testCase struct {
+ state string
+ output strings.Builder
+ duration *float64
+}
+
+// json2xml converts test2json's output into an xml output readable by Bazel.
+// http://windyroad.com.au/dl/Open%20Source/JUnit.xsd
+func json2xml(r io.Reader, pkgName string) ([]byte, error) {
+ var pkgDuration *float64
+ testcases := make(map[string]*testCase)
+ testCaseByName := func(name string) *testCase {
+ if name == "" {
+ return nil
+ }
+ if _, ok := testcases[name]; !ok {
+ testcases[name] = &testCase{}
+ }
+ return testcases[name]
+ }
+
+ dec := json.NewDecoder(r)
+ for {
+ var e jsonEvent
+ if err := dec.Decode(&e); err == io.EOF {
+ break
+ } else if err != nil {
+ return nil, fmt.Errorf("error decoding test2json output: %s", err)
+ }
+ switch s := e.Action; s {
+ case "run":
+ if c := testCaseByName(e.Test); c != nil {
+ c.state = s
+ }
+ case "output":
+ if c := testCaseByName(e.Test); c != nil {
+ c.output.WriteString(e.Output)
+ }
+ case "skip":
+ if c := testCaseByName(e.Test); c != nil {
+ c.output.WriteString(e.Output)
+ c.state = s
+ c.duration = e.Elapsed
+ }
+ case "fail":
+ if c := testCaseByName(e.Test); c != nil {
+ c.state = s
+ c.duration = e.Elapsed
+ } else {
+ pkgDuration = e.Elapsed
+ }
+ case "pass":
+ if c := testCaseByName(e.Test); c != nil {
+ c.duration = e.Elapsed
+ c.state = s
+ } else {
+ pkgDuration = e.Elapsed
+ }
+ }
+ }
+
+ return xml.MarshalIndent(toXML(pkgName, pkgDuration, testcases), "", "\t")
+}
+
+func toXML(pkgName string, pkgDuration *float64, testcases map[string]*testCase) *xmlTestSuites {
+ cases := make([]string, 0, len(testcases))
+ for k := range testcases {
+ cases = append(cases, k)
+ }
+ sort.Strings(cases)
+ suite := xmlTestSuite{
+ Name: pkgName,
+ }
+ if pkgDuration != nil {
+ suite.Time = fmt.Sprintf("%.3f", *pkgDuration)
+ }
+ for _, name := range cases {
+ c := testcases[name]
+ suite.Tests++
+ newCase := xmlTestCase{
+ Name: name,
+ Classname: path.Base(pkgName),
+ }
+ if c.duration != nil {
+ newCase.Time = fmt.Sprintf("%.3f", *c.duration)
+ }
+ switch c.state {
+ case "skip":
+ suite.Skipped++
+ newCase.Skipped = &xmlMessage{
+ Message: "Skipped",
+ Contents: c.output.String(),
+ }
+ case "fail":
+ suite.Failures++
+ newCase.Failure = &xmlMessage{
+ Message: "Failed",
+ Contents: c.output.String(),
+ }
+ case "pass":
+ break
+ default:
+ suite.Errors++
+ newCase.Error = &xmlMessage{
+ Message: "No pass/skip/fail event found for test",
+ Contents: c.output.String(),
+ }
+ }
+ suite.TestCases = append(suite.TestCases, newCase)
+ }
+ return &xmlTestSuites{Suites: []xmlTestSuite{suite}}
+}
diff --git a/go/tools/bzltestutil/xml_test.go b/go/tools/bzltestutil/xml_test.go
new file mode 100644
index 00000000..8221e70d
--- /dev/null
+++ b/go/tools/bzltestutil/xml_test.go
@@ -0,0 +1,55 @@
+// Copyright 2020 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bzltestutil
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestJSON2XML(t *testing.T) {
+ files, err := filepath.Glob("testdata/*.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, file := range files {
+ name := strings.TrimSuffix(filepath.Base(file), ".json")
+ t.Run(name, func(t *testing.T) {
+ orig, err := os.Open(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := json2xml(orig, "pkg/testing")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ target := strings.TrimSuffix(file, ".json") + ".xml"
+ want, err := ioutil.ReadFile(target)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(got, want) {
+ t.Errorf("json2xml for %s does not match, got:\n%s\nwant:\n%s\n", name, string(got), string(want))
+ }
+ })
+ }
+}
diff --git a/go/tools/coverdata/BUILD.bazel b/go/tools/coverdata/BUILD.bazel
new file mode 100644
index 00000000..00b60185
--- /dev/null
+++ b/go/tools/coverdata/BUILD.bazel
@@ -0,0 +1,15 @@
+load("//go/private/rules:library.bzl", "go_tool_library")
+
+go_tool_library(
+ name = "coverdata",
+ srcs = ["coverdata.go"],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/coverdata",
+ visibility = ["//visibility:public"],
+)
+
+filegroup(
+ name = "all_files",
+ testonly = True,
+ srcs = glob(["**"]),
+ visibility = ["//visibility:public"],
+)
diff --git a/go/tools/coverdata/coverdata.go b/go/tools/coverdata/coverdata.go
new file mode 100644
index 00000000..1a80b014
--- /dev/null
+++ b/go/tools/coverdata/coverdata.go
@@ -0,0 +1,58 @@
+/* Copyright 2018 The Bazel Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package coverdata provides a registration function for files with
+// coverage instrumentation.
+//
+// This package is part of the Bazel Go rules, and its interface
+// should not be considered public. It may change without notice.
+package coverdata
+
+import (
+ "fmt"
+ "testing"
+)
+
+// Contains all coverage data for the program.
+var (
+ Counters = make(map[string][]uint32)
+ Blocks = make(map[string][]testing.CoverBlock)
+)
+
+// RegisterFile causes the coverage data recorded for a file to be included
+// in program-wide coverage reports. This should be called from init functions
+// in packages with coverage instrumentation.
+func RegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
+ if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
+ panic("coverage: mismatched sizes")
+ }
+ if Counters[fileName] != nil {
+ // Already registered.
+ fmt.Printf("Already covered %s\n", fileName)
+ return
+ }
+ Counters[fileName] = counter
+ block := make([]testing.CoverBlock, len(counter))
+ for i := range counter {
+ block[i] = testing.CoverBlock{
+ Line0: pos[3*i+0],
+ Col0: uint16(pos[3*i+2]),
+ Line1: pos[3*i+1],
+ Col1: uint16(pos[3*i+2] >> 16),
+ Stmts: numStmts[i],
+ }
+ }
+ Blocks[fileName] = block
+}
diff --git a/go/tools/fetch_repo/BUILD.bazel b/go/tools/fetch_repo/BUILD.bazel
new file mode 100644
index 00000000..e865f654
--- /dev/null
+++ b/go/tools/fetch_repo/BUILD.bazel
@@ -0,0 +1,23 @@
+load("//go:def.bzl", "go_binary", "go_library", "go_test")
+
+go_binary(
+ name = "fetch_repo",
+ embed = [":go_default_library"],
+ visibility = ["//visibility:public"],
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = ["main.go"],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/fetch_repo",
+ visibility = ["//visibility:private"],
+ deps = ["@org_golang_x_tools//go/vcs:go_default_library"],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = ["fetch_repo_test.go"],
+ embed = [":go_default_library"],
+ deps = ["@org_golang_x_tools//go/vcs:go_default_library"],
+)
diff --git a/go/tools/fetch_repo/fetch_repo_test.go b/go/tools/fetch_repo/fetch_repo_test.go
new file mode 100644
index 00000000..d0573490
--- /dev/null
+++ b/go/tools/fetch_repo/fetch_repo_test.go
@@ -0,0 +1,96 @@
+package main
+
+import (
+ "os"
+ "reflect"
+ "testing"
+
+ "golang.org/x/tools/go/vcs"
+)
+
+var (
+ root = &vcs.RepoRoot{
+ VCS: vcs.ByCmd("git"),
+ Repo: "https://github.com/bazeltest/rules_go",
+ Root: "github.com/bazeltest/rules_go",
+ }
+)
+
+func TestMain(m *testing.M) {
+ // Replace vcs.RepoRootForImportPath to disable any network calls.
+ repoRootForImportPath = func(_ string, _ bool) (*vcs.RepoRoot, error) {
+ return root, nil
+ }
+ os.Exit(m.Run())
+}
+
+func TestGetRepoRoot(t *testing.T) {
+ for _, tc := range []struct {
+ label string
+ remote string
+ cmd string
+ importpath string
+ r *vcs.RepoRoot
+ }{
+ {
+ label: "all",
+ remote: "https://github.com/bazeltest/rules_go",
+ cmd: "git",
+ importpath: "github.com/bazeltest/rules_go",
+ r: root,
+ },
+ {
+ label: "different remote",
+ remote: "https://example.com/rules_go",
+ cmd: "git",
+ importpath: "github.com/bazeltest/rules_go",
+ r: &vcs.RepoRoot{
+ VCS: vcs.ByCmd("git"),
+ Repo: "https://example.com/rules_go",
+ Root: "github.com/bazeltest/rules_go",
+ },
+ },
+ {
+ label: "only importpath",
+ importpath: "github.com/bazeltest/rules_go",
+ r: root,
+ },
+ } {
+ r, err := getRepoRoot(tc.remote, tc.cmd, tc.importpath)
+ if err != nil {
+ t.Errorf("[%s] %v", tc.label, err)
+ }
+ if !reflect.DeepEqual(r, tc.r) {
+ t.Errorf("[%s] Expected %+v, got %+v", tc.label, tc.r, r)
+ }
+ }
+}
+
+func TestGetRepoRoot_error(t *testing.T) {
+ for _, tc := range []struct {
+ label string
+ remote string
+ cmd string
+ importpath string
+ }{
+ {
+ label: "importpath as remote",
+ remote: "github.com/bazeltest/rules_go",
+ },
+ {
+ label: "missing vcs",
+ remote: "https://github.com/bazeltest/rules_go",
+ importpath: "github.com/bazeltest/rules_go",
+ },
+ {
+ label: "missing remote",
+ cmd: "git",
+ importpath: "github.com/bazeltest/rules_go",
+ },
+ } {
+ r, err := getRepoRoot(tc.remote, tc.cmd, tc.importpath)
+ if err == nil {
+ t.Errorf("[%s] expected error. Got %+v", tc.label, r)
+ }
+ }
+}
diff --git a/go/tools/fetch_repo/main.go b/go/tools/fetch_repo/main.go
new file mode 100644
index 00000000..d3993202
--- /dev/null
+++ b/go/tools/fetch_repo/main.go
@@ -0,0 +1,75 @@
+// Command fetch_repo is similar to "go get -d" but it works even if the given
+// repository path is not a buildable Go package and it checks out a specific
+// revision rather than the latest revision.
+//
+// The difference between fetch_repo and "git clone" or {new_,}git_repository is
+// that fetch_repo recognizes import redirection of Go and it supports other
+// version control systems than git.
+//
+// These differences help us to manage external Go repositories in the manner of
+// Bazel.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+
+ "golang.org/x/tools/go/vcs"
+)
+
+var (
+ remote = flag.String("remote", "", "The URI of the remote repository. Must be used with the --vcs flag.")
+ cmd = flag.String("vcs", "", "Version control system to use to fetch the repository. Should be one of: git,hg,svn,bzr. Must be used with the --remote flag.")
+ rev = flag.String("rev", "", "target revision")
+ dest = flag.String("dest", "", "destination directory")
+ importpath = flag.String("importpath", "", "Go importpath to the repository fetch")
+
+ // Used for overriding in tests to disable network calls.
+ repoRootForImportPath = vcs.RepoRootForImportPath
+)
+
+func getRepoRoot(remote, cmd, importpath string) (*vcs.RepoRoot, error) {
+ if (cmd == "") != (remote == "") {
+ return nil, fmt.Errorf("--remote should be used with the --vcs flag. If this is an import path, use --importpath instead.")
+ }
+
+ if cmd != "" && remote != "" {
+ v := vcs.ByCmd(cmd)
+ if v == nil {
+ return nil, fmt.Errorf("invalid VCS type: %s", cmd)
+ }
+ return &vcs.RepoRoot{
+ VCS: v,
+ Repo: remote,
+ Root: importpath,
+ }, nil
+ }
+
+ // User did not give us complete information for VCS / Remote.
+ // Try to figure out the information from the import path.
+ r, err := repoRootForImportPath(importpath, true)
+ if err != nil {
+ return nil, err
+ }
+ if importpath != r.Root {
+ return nil, fmt.Errorf("not a root of a repository: %s", importpath)
+ }
+ return r, nil
+}
+
+func run() error {
+ r, err := getRepoRoot(*remote, *cmd, *importpath)
+ if err != nil {
+ return err
+ }
+ return r.VCS.CreateAtRev(*dest, r.Repo, *rev)
+}
+
+func main() {
+ flag.Parse()
+
+ if err := run(); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/go/tools/gazelle/README.rst b/go/tools/gazelle/README.rst
new file mode 100644
index 00000000..aa6177bc
--- /dev/null
+++ b/go/tools/gazelle/README.rst
@@ -0,0 +1,2 @@
+Gazelle has moved to a new repository:
+`github.com/bazelbuild/bazel-gazelle <https://github.com/bazelbuild/bazel-gazelle>`_
diff --git a/go/tools/go_bin_runner/BUILD.bazel b/go/tools/go_bin_runner/BUILD.bazel
new file mode 100644
index 00000000..91be016e
--- /dev/null
+++ b/go/tools/go_bin_runner/BUILD.bazel
@@ -0,0 +1,39 @@
+# gazelle:exclude
+
+load("//go:def.bzl", "go_binary", "go_library")
+load("//go/private/rules:go_bin_for_host.bzl", "go_bin_for_host")
+
+go_bin_for_host(
+ name = "go_bin_for_host",
+ visibility = ["//visibility:private"],
+)
+
+go_library(
+ name = "go_bin_runner_lib",
+ srcs = [
+ "main.go",
+ "process.go",
+ ],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/go_bin_runner",
+ visibility = ["//visibility:private"],
+ deps = [
+ "//go/runfiles",
+ ],
+)
+
+go_binary(
+ name = "go_bin_runner",
+ data = [":go_bin_for_host"],
+ embed = [":go_bin_runner_lib"],
+ visibility = ["//go:__pkg__"],
+ x_defs = {
+ "GoBinRlocationPath": "$(rlocationpath :go_bin_for_host)",
+ },
+)
+
+filegroup(
+ name = "all_files",
+ testonly = True,
+ srcs = glob(["**"]),
+ visibility = ["//visibility:public"],
+)
diff --git a/go/tools/go_bin_runner/main.go b/go/tools/go_bin_runner/main.go
new file mode 100644
index 00000000..bca4f7fc
--- /dev/null
+++ b/go/tools/go_bin_runner/main.go
@@ -0,0 +1,41 @@
+package main
+
+import (
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/bazelbuild/rules_go/go/runfiles"
+)
+
+var GoBinRlocationPath = "not set"
+
+func main() {
+ goBin, err := runfiles.Rlocation(GoBinRlocationPath)
+ if err != nil {
+ log.Fatal(err)
+ }
+ // The go binary lies at $GOROOT/bin/go.
+ goRoot, err := filepath.Abs(filepath.Dir(filepath.Dir(goBin)))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ env := os.Environ()
+ var filteredEnv []string
+ for i := 0; i < len(env); i++ {
+ if !strings.HasPrefix(env[i], "GOROOT=") {
+ filteredEnv = append(filteredEnv, env[i])
+ }
+ }
+ filteredEnv = append(filteredEnv, "GOROOT="+goRoot)
+
+ err = os.Chdir(os.Getenv("BUILD_WORKING_DIRECTORY"))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ args := append([]string{goBin}, os.Args[1:]...)
+ log.Fatal(ReplaceWithProcess(args, filteredEnv))
+}
diff --git a/go/tools/go_bin_runner/process.go b/go/tools/go_bin_runner/process.go
new file mode 100644
index 00000000..af236549
--- /dev/null
+++ b/go/tools/go_bin_runner/process.go
@@ -0,0 +1,20 @@
+package main
+
+import (
+ "os"
+ "os/exec"
+)
+
+func ReplaceWithProcess(args, env []string) error {
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ cmd.Env = env
+ err := cmd.Run()
+ if exitErr, ok := err.(*exec.ExitError); ok {
+ os.Exit(exitErr.ExitCode())
+ } else if err == nil {
+ os.Exit(0)
+ }
+ return err
+}
diff --git a/go/tools/gopackagesdriver/BUILD.bazel b/go/tools/gopackagesdriver/BUILD.bazel
new file mode 100644
index 00000000..542b75ad
--- /dev/null
+++ b/go/tools/gopackagesdriver/BUILD.bazel
@@ -0,0 +1,39 @@
+load("//go:def.bzl", "go_binary", "go_library")
+load(":aspect.bzl", "bazel_supports_canonical_label_literals")
+
+go_library(
+ name = "gopackagesdriver_lib",
+ srcs = [
+ "bazel.go",
+ "bazel_json_builder.go",
+ "build_context.go",
+ "driver_request.go",
+ "flatpackage.go",
+ "json_packages_driver.go",
+ "main.go",
+ "packageregistry.go",
+ "utils.go",
+ ],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/gopackagesdriver",
+ visibility = ["//visibility:private"],
+)
+
+go_binary(
+ name = "gopackagesdriver",
+ embed = [":gopackagesdriver_lib"],
+ x_defs = {
+ # Determine the name of the rules_go repository as we need to specify it when invoking the
+ # aspect.
+ # If canonical label literals are supported, we can use a canonical label literal (starting
+ # with @@) to pass the repository_name() through repo mapping unchanged.
+ # If canonical label literals are not supported, then bzlmod is certainly not enabled and
+ # we can assume that the repository name is not affected by repo mappings.
+ # If run in the rules_go repo itself, repository_name() returns "@", which is equivalent to
+ # "@io_bazel_rules_go" since Bazel 6:
+ # https://github.com/bazelbuild/bazel/commit/7694cf75e6366b92e3905c2ad60234cda57627ee
+ # TODO: Once we drop support for Bazel 5, we can remove the feature detection logic and
+ # use "@" + repository_name().
+ "rulesGoRepositoryName": "@" + repository_name() if bazel_supports_canonical_label_literals() else repository_name(),
+ },
+ visibility = ["//visibility:public"],
+)
diff --git a/go/tools/gopackagesdriver/aspect.bzl b/go/tools/gopackagesdriver/aspect.bzl
new file mode 100644
index 00000000..36703c75
--- /dev/null
+++ b/go/tools/gopackagesdriver/aspect.bzl
@@ -0,0 +1,169 @@
+# Copyright 2021 The Bazel Go Rules Authors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load(
+ "//go/private:providers.bzl",
+ "GoArchive",
+ "GoStdLib",
+)
+load(
+ "@bazel_skylib//lib:paths.bzl",
+ "paths",
+)
+
+GoPkgInfo = provider()
+
+DEPS_ATTRS = [
+ "deps",
+ "embed",
+]
+
+PROTO_COMPILER_ATTRS = [
+ "compiler",
+ "compilers",
+ "library",
+]
+
+def bazel_supports_canonical_label_literals():
+ return str(Label("//:bogus")).startswith("@@")
+
+def is_file_external(f):
+ return f.owner.workspace_root != ""
+
+def file_path(f):
+ prefix = "__BAZEL_WORKSPACE__"
+ if not f.is_source:
+ prefix = "__BAZEL_EXECROOT__"
+ elif is_file_external(f):
+ prefix = "__BAZEL_OUTPUT_BASE__"
+ return paths.join(prefix, f.path)
+
+def _go_archive_to_pkg(archive):
+ return struct(
+ ID = str(archive.data.label),
+ PkgPath = archive.data.importpath,
+ ExportFile = file_path(archive.data.export_file),
+ GoFiles = [
+ file_path(src)
+ for src in archive.data.orig_srcs
+ if src.path.endswith(".go")
+ ],
+ CompiledGoFiles = [
+ file_path(src)
+ for src in archive.data.srcs
+ if src.path.endswith(".go")
+ ],
+ OtherFiles = [
+ file_path(src)
+ for src in archive.data.orig_srcs
+ if not src.path.endswith(".go")
+ ],
+ Imports = {
+ pkg.data.importpath: str(pkg.data.label)
+ for pkg in archive.direct
+ },
+ )
+
+def make_pkg_json(ctx, name, pkg_info):
+ pkg_json_file = ctx.actions.declare_file(name + ".pkg.json")
+ ctx.actions.write(pkg_json_file, content = pkg_info.to_json())
+ return pkg_json_file
+
+def _go_pkg_info_aspect_impl(target, ctx):
+ # Fetch the stdlib JSON file from the inner most target
+ stdlib_json_file = None
+
+ transitive_json_files = []
+ transitive_export_files = []
+ transitive_compiled_go_files = []
+
+ for attr in DEPS_ATTRS + PROTO_COMPILER_ATTRS:
+ deps = getattr(ctx.rule.attr, attr, []) or []
+
+ # Some attrs are not iterable, ensure that deps is always iterable.
+ if type(deps) != type([]):
+ deps = [deps]
+
+ for dep in deps:
+ if GoPkgInfo in dep:
+ pkg_info = dep[GoPkgInfo]
+ transitive_json_files.append(pkg_info.pkg_json_files)
+ transitive_compiled_go_files.append(pkg_info.compiled_go_files)
+ transitive_export_files.append(pkg_info.export_files)
+
+ # Fetch the stdlib json from the first dependency
+ if not stdlib_json_file:
+ stdlib_json_file = pkg_info.stdlib_json_file
+
+ pkg_json_files = []
+ compiled_go_files = []
+ export_files = []
+
+ if GoArchive in target:
+ archive = target[GoArchive]
+ compiled_go_files.extend(archive.source.srcs)
+ export_files.append(archive.data.export_file)
+ pkg = _go_archive_to_pkg(archive)
+ pkg_json_files.append(make_pkg_json(ctx, archive.data.name, pkg))
+
+ if ctx.rule.kind == "go_test":
+ for dep_archive in archive.direct:
+ # find the archive containing the test sources
+ if archive.data.label == dep_archive.data.label:
+ pkg = _go_archive_to_pkg(dep_archive)
+ pkg_json_files.append(make_pkg_json(ctx, dep_archive.data.name, pkg))
+ compiled_go_files.extend(dep_archive.source.srcs)
+ export_files.append(dep_archive.data.export_file)
+ break
+
+ # If there was no stdlib json in any dependencies, fetch it from the
+ # current go_ node.
+ if not stdlib_json_file:
+ stdlib_json_file = ctx.attr._go_stdlib[GoStdLib]._list_json
+
+ pkg_info = GoPkgInfo(
+ stdlib_json_file = stdlib_json_file,
+ pkg_json_files = depset(
+ direct = pkg_json_files,
+ transitive = transitive_json_files,
+ ),
+ compiled_go_files = depset(
+ direct = compiled_go_files,
+ transitive = transitive_compiled_go_files,
+ ),
+ export_files = depset(
+ direct = export_files,
+ transitive = transitive_export_files,
+ ),
+ )
+
+ return [
+ pkg_info,
+ OutputGroupInfo(
+ go_pkg_driver_json_file = pkg_info.pkg_json_files,
+ go_pkg_driver_srcs = pkg_info.compiled_go_files,
+ go_pkg_driver_export_file = pkg_info.export_files,
+ go_pkg_driver_stdlib_json_file = depset([pkg_info.stdlib_json_file] if pkg_info.stdlib_json_file else []),
+ ),
+ ]
+
+go_pkg_info_aspect = aspect(
+ implementation = _go_pkg_info_aspect_impl,
+ attr_aspects = DEPS_ATTRS + PROTO_COMPILER_ATTRS,
+ attrs = {
+ "_go_stdlib": attr.label(
+ default = "//:stdlib",
+ ),
+ },
+)
diff --git a/go/tools/gopackagesdriver/bazel.go b/go/tools/gopackagesdriver/bazel.go
new file mode 100644
index 00000000..08da745d
--- /dev/null
+++ b/go/tools/gopackagesdriver/bazel.go
@@ -0,0 +1,164 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+const (
+ toolTag = "gopackagesdriver"
+)
+
+type Bazel struct {
+ bazelBin string
+ workspaceRoot string
+ bazelStartupFlags []string
+ info map[string]string
+}
+
+// Minimal BEP structs to access the build outputs
+type BEPNamedSet struct {
+ NamedSetOfFiles *struct {
+ Files []struct {
+ Name string `json:"name"`
+ URI string `json:"uri"`
+ } `json:"files"`
+ } `json:"namedSetOfFiles"`
+}
+
+func NewBazel(ctx context.Context, bazelBin, workspaceRoot string, bazelStartupFlags []string) (*Bazel, error) {
+ b := &Bazel{
+ bazelBin: bazelBin,
+ workspaceRoot: workspaceRoot,
+ bazelStartupFlags: bazelStartupFlags,
+ }
+ if err := b.fillInfo(ctx); err != nil {
+ return nil, fmt.Errorf("unable to query bazel info: %w", err)
+ }
+ return b, nil
+}
+
+func (b *Bazel) fillInfo(ctx context.Context) error {
+ b.info = map[string]string{}
+ output, err := b.run(ctx, "info")
+ if err != nil {
+ return err
+ }
+ scanner := bufio.NewScanner(bytes.NewBufferString(output))
+ for scanner.Scan() {
+ parts := strings.SplitN(strings.TrimSpace(scanner.Text()), ":", 2)
+ b.info[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+ }
+ return nil
+}
+
+func (b *Bazel) run(ctx context.Context, command string, args ...string) (string, error) {
+ defaultArgs := []string{
+ command,
+ "--tool_tag=" + toolTag,
+ "--ui_actions_shown=0",
+ }
+ cmd := exec.CommandContext(ctx, b.bazelBin, concatStringsArrays(b.bazelStartupFlags, defaultArgs, args)...)
+ fmt.Fprintln(os.Stderr, "Running:", cmd.Args)
+ cmd.Dir = b.WorkspaceRoot()
+ cmd.Stderr = os.Stderr
+ output, err := cmd.Output()
+ return string(output), err
+}
+
+func (b *Bazel) Build(ctx context.Context, args ...string) ([]string, error) {
+ jsonFile, err := ioutil.TempFile("", "gopackagesdriver_bep_")
+ if err != nil {
+ return nil, fmt.Errorf("unable to create BEP JSON file: %w", err)
+ }
+ defer func() {
+ jsonFile.Close()
+ os.Remove(jsonFile.Name())
+ }()
+
+ args = append([]string{
+ "--show_result=0",
+ "--build_event_json_file=" + jsonFile.Name(),
+ "--build_event_json_file_path_conversion=no",
+ }, args...)
+ if _, err := b.run(ctx, "build", args...); err != nil {
+ // Ignore a regular build failure to get partial data.
+ // See https://docs.bazel.build/versions/main/guide.html#what-exit-code-will-i-get on
+ // exit codes.
+ var exerr *exec.ExitError
+ if !errors.As(err, &exerr) || exerr.ExitCode() != 1 {
+ return nil, fmt.Errorf("bazel build failed: %w", err)
+ }
+ }
+
+ files := make([]string, 0)
+ decoder := json.NewDecoder(jsonFile)
+ for decoder.More() {
+ var namedSet BEPNamedSet
+ if err := decoder.Decode(&namedSet); err != nil {
+ return nil, fmt.Errorf("unable to decode %s: %w", jsonFile.Name(), err)
+ }
+
+ if namedSet.NamedSetOfFiles != nil {
+ for _, f := range namedSet.NamedSetOfFiles.Files {
+ fileUrl, err := url.Parse(f.URI)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse file URI: %w", err)
+ }
+ files = append(files, filepath.FromSlash(fileUrl.Path))
+ }
+ }
+ }
+
+ return files, nil
+}
+
+func (b *Bazel) Query(ctx context.Context, args ...string) ([]string, error) {
+ output, err := b.run(ctx, "query", args...)
+ if err != nil {
+ return nil, fmt.Errorf("bazel query failed: %w", err)
+ }
+
+ trimmedOutput := strings.TrimSpace(output)
+ if len(trimmedOutput) == 0 {
+ return nil, nil
+ }
+
+ return strings.Split(trimmedOutput, "\n"), nil
+}
+
+func (b *Bazel) WorkspaceRoot() string {
+ return b.workspaceRoot
+}
+
+func (b *Bazel) ExecutionRoot() string {
+ return b.info["execution_root"]
+}
+
+func (b *Bazel) OutputBase() string {
+ return b.info["output_base"]
+}
diff --git a/go/tools/gopackagesdriver/bazel_json_builder.go b/go/tools/gopackagesdriver/bazel_json_builder.go
new file mode 100644
index 00000000..163be082
--- /dev/null
+++ b/go/tools/gopackagesdriver/bazel_json_builder.go
@@ -0,0 +1,250 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+type BazelJSONBuilder struct {
+ bazel *Bazel
+ includeTests bool
+}
+
+var RulesGoStdlibLabel = rulesGoRepositoryName + "//:stdlib"
+
+var _defaultKinds = []string{"go_library", "go_test", "go_binary"}
+
+var externalRe = regexp.MustCompile(".*\\/external\\/([^\\/]+)(\\/(.*))?\\/([^\\/]+.go)")
+
+func (b *BazelJSONBuilder) fileQuery(filename string) string {
+ label := filename
+
+ if filepath.IsAbs(filename) {
+ label, _ = filepath.Rel(b.bazel.WorkspaceRoot(), filename)
+ } else if strings.HasPrefix(filename, "./") {
+ label = strings.TrimPrefix(filename, "./")
+ }
+
+ if matches := externalRe.FindStringSubmatch(filename); len(matches) == 5 {
+ // if filepath is for a third party lib, we need to know, what external
+ // library this file is part of.
+ matches = append(matches[:2], matches[3:]...)
+ label = fmt.Sprintf("@%s//%s", matches[1], strings.Join(matches[2:], ":"))
+ }
+
+ relToBin, err := filepath.Rel(b.bazel.info["output_path"], filename)
+ if err == nil && !strings.HasPrefix(relToBin, "../") {
+ parts := strings.SplitN(relToBin, string(filepath.Separator), 3)
+ relToBin = parts[2]
+ // We've effectively converted filename from bazel-bin/some/path.go to some/path.go;
+ // Check if a BUILD.bazel files exists under this dir, if not walk up and repeat.
+ relToBin = filepath.Dir(relToBin)
+ _, err = os.Stat(filepath.Join(b.bazel.WorkspaceRoot(), relToBin, "BUILD.bazel"))
+ for errors.Is(err, os.ErrNotExist) && relToBin != "." {
+ relToBin = filepath.Dir(relToBin)
+ _, err = os.Stat(filepath.Join(b.bazel.WorkspaceRoot(), relToBin, "BUILD.bazel"))
+ }
+
+ if err == nil {
+ // return package path found and build all targets (codegen doesn't fall under go_library)
+ // Otherwise fallback to default
+ if relToBin == "." {
+ relToBin = ""
+ }
+ label = fmt.Sprintf("//%s:all", relToBin)
+ additionalKinds = append(additionalKinds, "go_.*")
+ }
+ }
+
+ kinds := append(_defaultKinds, additionalKinds...)
+ return fmt.Sprintf(`kind("%s", same_pkg_direct_rdeps("%s"))`, strings.Join(kinds, "|"), label)
+}
+
+func (b *BazelJSONBuilder) getKind() string {
+ kinds := []string{"go_library"}
+ if b.includeTests {
+ kinds = append(kinds, "go_test")
+ }
+
+ return strings.Join(kinds, "|")
+}
+
+func (b *BazelJSONBuilder) localQuery(request string) string {
+ request = path.Clean(request)
+ if filepath.IsAbs(request) {
+ if relPath, err := filepath.Rel(workspaceRoot, request); err == nil {
+ request = relPath
+ }
+ }
+
+ if !strings.HasSuffix(request, "...") {
+ request = fmt.Sprintf("%s:*", request)
+ }
+
+ return fmt.Sprintf(`kind("%s", %s)`, b.getKind(), request)
+}
+
+func (b *BazelJSONBuilder) packageQuery(importPath string) string {
+ if strings.HasSuffix(importPath, "/...") {
+ importPath = fmt.Sprintf(`^%s(/.+)?$`, strings.TrimSuffix(importPath, "/..."))
+ }
+
+ return fmt.Sprintf(
+ `kind("%s", attr(importpath, "%s", deps(%s)))`,
+ b.getKind(),
+ importPath,
+ bazelQueryScope)
+}
+
+func (b *BazelJSONBuilder) queryFromRequests(requests ...string) string {
+ ret := make([]string, 0, len(requests))
+ for _, request := range requests {
+ result := ""
+ if strings.HasSuffix(request, ".go") {
+ f := strings.TrimPrefix(request, "file=")
+ result = b.fileQuery(f)
+ } else if isLocalPattern(request) {
+ result = b.localQuery(request)
+ } else if request == "builtin" || request == "std" {
+ result = fmt.Sprintf(RulesGoStdlibLabel)
+ } else if bazelQueryScope != "" {
+ result = b.packageQuery(request)
+ }
+
+ if result != "" {
+ ret = append(ret, result)
+ }
+ }
+ if len(ret) == 0 {
+ return RulesGoStdlibLabel
+ }
+ return strings.Join(ret, " union ")
+}
+
+func NewBazelJSONBuilder(bazel *Bazel, includeTests bool) (*BazelJSONBuilder, error) {
+ return &BazelJSONBuilder{
+ bazel: bazel,
+ includeTests: includeTests,
+ }, nil
+}
+
+func (b *BazelJSONBuilder) outputGroupsForMode(mode LoadMode) string {
+ og := "go_pkg_driver_json_file,go_pkg_driver_stdlib_json_file,go_pkg_driver_srcs"
+ if mode&NeedExportsFile != 0 {
+ og += ",go_pkg_driver_export_file"
+ }
+ return og
+}
+
+func (b *BazelJSONBuilder) query(ctx context.Context, query string) ([]string, error) {
+ queryArgs := concatStringsArrays(bazelQueryFlags, []string{
+ "--ui_event_filters=-info,-stderr",
+ "--noshow_progress",
+ "--order_output=no",
+ "--output=label",
+ "--nodep_deps",
+ "--noimplicit_deps",
+ "--notool_deps",
+ query,
+ })
+ labels, err := b.bazel.Query(ctx, queryArgs...)
+ if err != nil {
+ return nil, fmt.Errorf("unable to query: %w", err)
+ }
+
+ return labels, nil
+}
+
+func (b *BazelJSONBuilder) Labels(ctx context.Context, requests []string) ([]string, error) {
+ labels, err := b.query(ctx, b.queryFromRequests(requests...))
+ if err != nil {
+ return nil, fmt.Errorf("query failed: %w", err)
+ }
+
+ if len(labels) == 0 {
+ return nil, fmt.Errorf("found no labels matching the requests")
+ }
+
+ return labels, nil
+}
+
+func (b *BazelJSONBuilder) Build(ctx context.Context, labels []string, mode LoadMode) ([]string, error) {
+ aspects := append(additionalAspects, goDefaultAspect)
+
+ buildArgs := concatStringsArrays([]string{
+ "--experimental_convenience_symlinks=ignore",
+ "--ui_event_filters=-info,-stderr",
+ "--noshow_progress",
+ "--aspects=" + strings.Join(aspects, ","),
+ "--output_groups=" + b.outputGroupsForMode(mode),
+ "--keep_going", // Build all possible packages
+ }, bazelBuildFlags)
+
+ if len(labels) < 100 {
+ buildArgs = append(buildArgs, labels...)
+ } else {
+ // To avoid hitting MAX_ARGS length, write labels to a file and use `--target_pattern_file`
+ targetsFile, err := ioutil.TempFile("", "gopackagesdriver_targets_")
+ if err != nil {
+ return nil, fmt.Errorf("unable to create target pattern file: %w", err)
+ }
+ writer := bufio.NewWriter(targetsFile)
+ defer writer.Flush()
+ for _, l := range labels {
+ writer.WriteString(l + "\n")
+ }
+ if err := writer.Flush(); err != nil {
+ return nil, fmt.Errorf("unable to flush data to target pattern file: %w", err)
+ }
+ defer func() {
+ targetsFile.Close()
+ os.Remove(targetsFile.Name())
+ }()
+
+ buildArgs = append(buildArgs, "--target_pattern_file="+targetsFile.Name())
+ }
+ files, err := b.bazel.Build(ctx, buildArgs...)
+ if err != nil {
+ return nil, fmt.Errorf("unable to bazel build %v: %w", buildArgs, err)
+ }
+
+ ret := []string{}
+ for _, f := range files {
+ if strings.HasSuffix(f, ".pkg.json") {
+ ret = append(ret, f)
+ }
+ }
+
+ return ret, nil
+}
+
+func (b *BazelJSONBuilder) PathResolver() PathResolverFunc {
+ return func(p string) string {
+ p = strings.Replace(p, "__BAZEL_EXECROOT__", b.bazel.ExecutionRoot(), 1)
+ p = strings.Replace(p, "__BAZEL_WORKSPACE__", b.bazel.WorkspaceRoot(), 1)
+ p = strings.Replace(p, "__BAZEL_OUTPUT_BASE__", b.bazel.OutputBase(), 1)
+ return p
+ }
+}
diff --git a/go/tools/gopackagesdriver/build_context.go b/go/tools/gopackagesdriver/build_context.go
new file mode 100644
index 00000000..dac786b9
--- /dev/null
+++ b/go/tools/gopackagesdriver/build_context.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "go/build"
+ "path/filepath"
+ "strings"
+)
+
+var buildContext = makeBuildContext()
+
+func makeBuildContext() *build.Context {
+ bctx := build.Default
+ bctx.BuildTags = strings.Split(getenvDefault("GOTAGS", ""), ",")
+
+ return &bctx
+}
+
+func filterSourceFilesForTags(files []string) []string {
+ ret := make([]string, 0, len(files))
+
+ for _, f := range files {
+ dir, filename := filepath.Split(f)
+ ext := filepath.Ext(f)
+
+ match, _ := buildContext.MatchFile(dir, filename)
+ // MatchFile filters out anything without a file extension. In the
+ // case of CompiledGoFiles (in particular gco processed files from
+ // the cache), we want them.
+ if match || ext == "" {
+ ret = append(ret, f)
+ }
+ }
+ return ret
+}
diff --git a/go/tools/gopackagesdriver/driver_request.go b/go/tools/gopackagesdriver/driver_request.go
new file mode 100644
index 00000000..db572dcc
--- /dev/null
+++ b/go/tools/gopackagesdriver/driver_request.go
@@ -0,0 +1,91 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// From https://pkg.go.dev/golang.org/x/tools/go/packages#LoadMode
+type LoadMode int
+
+// Only NeedExportsFile is needed in our case
+const (
+ // NeedName adds Name and PkgPath.
+ NeedName LoadMode = 1 << iota
+
+ // NeedFiles adds GoFiles and OtherFiles.
+ NeedFiles
+
+ // NeedCompiledGoFiles adds CompiledGoFiles.
+ NeedCompiledGoFiles
+
+ // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
+ // "placeholder" Packages with only the ID set.
+ NeedImports
+
+ // NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
+ NeedDeps
+
+ // NeedExportsFile adds ExportFile.
+ NeedExportFile
+
+ // NeedTypes adds Types, Fset, and IllTyped.
+ NeedTypes
+
+ // NeedSyntax adds Syntax.
+ NeedSyntax
+
+ // NeedTypesInfo adds TypesInfo.
+ NeedTypesInfo
+
+ // NeedTypesSizes adds TypesSizes.
+ NeedTypesSizes
+
+ // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
+ // Modifies CompiledGoFiles and Types, and has no effect on its own.
+ typecheckCgo
+
+ // NeedModule adds Module.
+ NeedModule
+)
+
+// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
+const NeedExportsFile = NeedExportFile
+
+// From https://github.com/golang/tools/blob/v0.1.0/go/packages/external.go#L32
+// Most fields are disabled since there is no need for them
+type DriverRequest struct {
+ Mode LoadMode `json:"mode"`
+ // Env specifies the environment the underlying build system should be run in.
+ // Env []string `json:"env"`
+ // BuildFlags are flags that should be passed to the underlying build system.
+ // BuildFlags []string `json:"build_flags"`
+ // Tests specifies whether the patterns should also return test packages.
+ Tests bool `json:"tests"`
+ // Overlay maps file paths (relative to the driver's working directory) to the byte contents
+ // of overlay files.
+ // Overlay map[string][]byte `json:"overlay"`
+}
+
+func ReadDriverRequest(r io.Reader) (*DriverRequest, error) {
+ req := &DriverRequest{}
+ if err := json.NewDecoder(r).Decode(&req); err != nil {
+ return nil, fmt.Errorf("unable to decode driver request: %w", err)
+ }
+ return req, nil
+}
diff --git a/go/tools/gopackagesdriver/flatpackage.go b/go/tools/gopackagesdriver/flatpackage.go
new file mode 100644
index 00000000..9c22132a
--- /dev/null
+++ b/go/tools/gopackagesdriver/flatpackage.go
@@ -0,0 +1,159 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type ResolvePkgFunc func(importPath string) string
+
+// Copy and pasted from golang.org/x/tools/go/packages
+type FlatPackagesError struct {
+ Pos string // "file:line:col" or "file:line" or "" or "-"
+ Msg string
+ Kind FlatPackagesErrorKind
+}
+
+type FlatPackagesErrorKind int
+
+const (
+ UnknownError FlatPackagesErrorKind = iota
+ ListError
+ ParseError
+ TypeError
+)
+
+func (err FlatPackagesError) Error() string {
+ pos := err.Pos
+ if pos == "" {
+ pos = "-" // like token.Position{}.String()
+ }
+ return pos + ": " + err.Msg
+}
+
+// FlatPackage is the JSON form of Package
+// It drops all the type and syntax fields, and transforms the Imports
+type FlatPackage struct {
+ ID string
+ Name string `json:",omitempty"`
+ PkgPath string `json:",omitempty"`
+ Errors []FlatPackagesError `json:",omitempty"`
+ GoFiles []string `json:",omitempty"`
+ CompiledGoFiles []string `json:",omitempty"`
+ OtherFiles []string `json:",omitempty"`
+ ExportFile string `json:",omitempty"`
+ Imports map[string]string `json:",omitempty"`
+ Standard bool `json:",omitempty"`
+}
+
+type (
+ PackageFunc func(pkg *FlatPackage)
+ PathResolverFunc func(path string) string
+)
+
+func resolvePathsInPlace(prf PathResolverFunc, paths []string) {
+ for i, path := range paths {
+ paths[i] = prf(path)
+ }
+}
+
+func WalkFlatPackagesFromJSON(jsonFile string, onPkg PackageFunc) error {
+ f, err := os.Open(jsonFile)
+ if err != nil {
+ return fmt.Errorf("unable to open package JSON file: %w", err)
+ }
+ defer f.Close()
+
+ decoder := json.NewDecoder(f)
+ for decoder.More() {
+ pkg := &FlatPackage{}
+ if err := decoder.Decode(&pkg); err != nil {
+ return fmt.Errorf("unable to decode package in %s: %w", f.Name(), err)
+ }
+
+ onPkg(pkg)
+ }
+ return nil
+}
+
+func (fp *FlatPackage) ResolvePaths(prf PathResolverFunc) error {
+ resolvePathsInPlace(prf, fp.CompiledGoFiles)
+ resolvePathsInPlace(prf, fp.GoFiles)
+ resolvePathsInPlace(prf, fp.OtherFiles)
+ fp.ExportFile = prf(fp.ExportFile)
+ return nil
+}
+
+// FilterFilesForBuildTags filters the source files given the current build
+// tags.
+func (fp *FlatPackage) FilterFilesForBuildTags() {
+ fp.GoFiles = filterSourceFilesForTags(fp.GoFiles)
+ fp.CompiledGoFiles = filterSourceFilesForTags(fp.CompiledGoFiles)
+}
+
+func (fp *FlatPackage) IsStdlib() bool {
+ return fp.Standard
+}
+
+func (fp *FlatPackage) ResolveImports(resolve ResolvePkgFunc) error {
+ // Stdlib packages are already complete import wise
+ if fp.IsStdlib() {
+ return nil
+ }
+
+ fset := token.NewFileSet()
+
+ for _, file := range fp.CompiledGoFiles {
+ f, err := parser.ParseFile(fset, file, nil, parser.ImportsOnly)
+ if err != nil {
+ return err
+ }
+ // If the name is not provided, fetch it from the sources
+ if fp.Name == "" {
+ fp.Name = f.Name.Name
+ }
+
+ for _, rawImport := range f.Imports {
+ imp, err := strconv.Unquote(rawImport.Path.Value)
+ if err != nil {
+ continue
+ }
+ // We don't handle CGo for now
+ if imp == "C" {
+ continue
+ }
+ if _, ok := fp.Imports[imp]; ok {
+ continue
+ }
+
+ if pkgID := resolve(imp); pkgID != "" {
+ fp.Imports[imp] = pkgID
+ }
+ }
+ }
+
+ return nil
+}
+
+func (fp *FlatPackage) IsRoot() bool {
+ return strings.HasPrefix(fp.ID, "//")
+}
diff --git a/go/tools/gopackagesdriver/json_packages_driver.go b/go/tools/gopackagesdriver/json_packages_driver.go
new file mode 100644
index 00000000..9bbf3408
--- /dev/null
+++ b/go/tools/gopackagesdriver/json_packages_driver.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "fmt"
+ "go/types"
+)
+
+type JSONPackagesDriver struct {
+ registry *PackageRegistry
+}
+
+func NewJSONPackagesDriver(jsonFiles []string, prf PathResolverFunc) (*JSONPackagesDriver, error) {
+ jpd := &JSONPackagesDriver{
+ registry: NewPackageRegistry(),
+ }
+
+ for _, f := range jsonFiles {
+ if err := WalkFlatPackagesFromJSON(f, func(pkg *FlatPackage) {
+ jpd.registry.Add(pkg)
+ }); err != nil {
+ return nil, fmt.Errorf("unable to walk json: %w", err)
+ }
+ }
+
+ if err := jpd.registry.ResolvePaths(prf); err != nil {
+ return nil, fmt.Errorf("unable to resolve paths: %w", err)
+ }
+
+ if err := jpd.registry.ResolveImports(); err != nil {
+ return nil, fmt.Errorf("unable to resolve paths: %w", err)
+ }
+
+ return jpd, nil
+}
+
+func (b *JSONPackagesDriver) GetResponse(labels []string) *driverResponse {
+ rootPkgs, packages := b.registry.Match(labels)
+
+ return &driverResponse{
+ NotHandled: false,
+ Sizes: types.SizesFor("gc", "amd64").(*types.StdSizes),
+ Roots: rootPkgs,
+ Packages: packages,
+ }
+}
diff --git a/go/tools/gopackagesdriver/main.go b/go/tools/gopackagesdriver/main.go
new file mode 100644
index 00000000..fea2d2c1
--- /dev/null
+++ b/go/tools/gopackagesdriver/main.go
@@ -0,0 +1,126 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/types"
+ "os"
+ "strings"
+)
+
+type driverResponse struct {
+ // NotHandled is returned if the request can't be handled by the current
+ // driver. If an external driver returns a response with NotHandled, the
+ // rest of the driverResponse is ignored, and go/packages will fallback
+ // to the next driver. If go/packages is extended in the future to support
+ // lists of multiple drivers, go/packages will fall back to the next driver.
+ NotHandled bool
+
+ // Sizes, if not nil, is the types.Sizes to use when type checking.
+ Sizes *types.StdSizes
+
+ // Roots is the set of package IDs that make up the root packages.
+ // We have to encode this separately because when we encode a single package
+ // we cannot know if it is one of the roots as that requires knowledge of the
+ // graph it is part of.
+ Roots []string `json:",omitempty"`
+
+ // Packages is the full set of packages in the graph.
+ // The packages are not connected into a graph.
+ // The Imports if populated will be stubs that only have their ID set.
+ // Imports will be connected and then type and syntax information added in a
+ // later pass (see refine).
+ Packages []*FlatPackage
+}
+
+var (
+ // Injected via x_defs.
+
+ rulesGoRepositoryName string
+ goDefaultAspect = rulesGoRepositoryName + "//go/tools/gopackagesdriver:aspect.bzl%go_pkg_info_aspect"
+ bazelBin = getenvDefault("GOPACKAGESDRIVER_BAZEL", "bazel")
+ bazelStartupFlags = strings.Fields(os.Getenv("GOPACKAGESDRIVER_BAZEL_FLAGS"))
+ bazelQueryFlags = strings.Fields(os.Getenv("GOPACKAGESDRIVER_BAZEL_QUERY_FLAGS"))
+ bazelQueryScope = getenvDefault("GOPACKAGESDRIVER_BAZEL_QUERY_SCOPE", "")
+ bazelBuildFlags = strings.Fields(os.Getenv("GOPACKAGESDRIVER_BAZEL_BUILD_FLAGS"))
+ workspaceRoot = os.Getenv("BUILD_WORKSPACE_DIRECTORY")
+ additionalAspects = strings.Fields(os.Getenv("GOPACKAGESDRIVER_BAZEL_ADDTL_ASPECTS"))
+ additionalKinds = strings.Fields(os.Getenv("GOPACKAGESDRIVER_BAZEL_KINDS"))
+ emptyResponse = &driverResponse{
+ NotHandled: true,
+ Sizes: types.SizesFor("gc", "amd64").(*types.StdSizes),
+ Roots: []string{},
+ Packages: []*FlatPackage{},
+ }
+)
+
+func run() (*driverResponse, error) {
+ ctx, cancel := signalContext(context.Background(), os.Interrupt)
+ defer cancel()
+
+ queries := os.Args[1:]
+
+ request, err := ReadDriverRequest(os.Stdin)
+ if err != nil {
+ return emptyResponse, fmt.Errorf("unable to read request: %w", err)
+ }
+
+ bazel, err := NewBazel(ctx, bazelBin, workspaceRoot, bazelStartupFlags)
+ if err != nil {
+ return emptyResponse, fmt.Errorf("unable to create bazel instance: %w", err)
+ }
+
+ bazelJsonBuilder, err := NewBazelJSONBuilder(bazel, request.Tests)
+ if err != nil {
+ return emptyResponse, fmt.Errorf("unable to build JSON files: %w", err)
+ }
+
+ labels, err := bazelJsonBuilder.Labels(ctx, queries)
+ if err != nil {
+ return emptyResponse, fmt.Errorf("unable to lookup package: %w", err)
+ }
+
+ jsonFiles, err := bazelJsonBuilder.Build(ctx, labels, request.Mode)
+ if err != nil {
+ return emptyResponse, fmt.Errorf("unable to build JSON files: %w", err)
+ }
+
+ driver, err := NewJSONPackagesDriver(jsonFiles, bazelJsonBuilder.PathResolver())
+ if err != nil {
+ return emptyResponse, fmt.Errorf("unable to load JSON files: %w", err)
+ }
+
+ // Note: we are returning all files required to build a specific package.
+ // For file queries (`file=`), this means that the CompiledGoFiles will
+ // include more than the only file being specified.
+ return driver.GetResponse(labels), nil
+}
+
+func main() {
+ response, err := run()
+ if err := json.NewEncoder(os.Stdout).Encode(response); err != nil {
+ fmt.Fprintf(os.Stderr, "unable to encode response: %v", err)
+ }
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v", err)
+ // gopls will check the packages driver exit code, and if there is an
+ // error, it will fall back to go list. Obviously we don't want that,
+ // so force a 0 exit code.
+ os.Exit(0)
+ }
+}
diff --git a/go/tools/gopackagesdriver/packageregistry.go b/go/tools/gopackagesdriver/packageregistry.go
new file mode 100644
index 00000000..05e620d5
--- /dev/null
+++ b/go/tools/gopackagesdriver/packageregistry.go
@@ -0,0 +1,111 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PackageRegistry struct {
+ packagesByID map[string]*FlatPackage
+ stdlib map[string]string
+}
+
+func NewPackageRegistry(pkgs ...*FlatPackage) *PackageRegistry {
+ pr := &PackageRegistry{
+ packagesByID: map[string]*FlatPackage{},
+ stdlib: map[string]string{},
+ }
+ pr.Add(pkgs...)
+ return pr
+}
+
+func (pr *PackageRegistry) Add(pkgs ...*FlatPackage) *PackageRegistry {
+ for _, pkg := range pkgs {
+ pr.packagesByID[pkg.ID] = pkg
+
+ if pkg.IsStdlib() {
+ pr.stdlib[pkg.PkgPath] = pkg.ID
+ }
+ }
+ return pr
+}
+
+func (pr *PackageRegistry) ResolvePaths(prf PathResolverFunc) error {
+ for _, pkg := range pr.packagesByID {
+ pkg.ResolvePaths(prf)
+ pkg.FilterFilesForBuildTags()
+ }
+ return nil
+}
+
+// ResolveImports adds stdlib imports to packages. This is required because
+// stdlib packages are not part of the JSON file exports as bazel is unaware of
+// them.
+func (pr *PackageRegistry) ResolveImports() error {
+ resolve := func(importPath string) string {
+ if pkgID, ok := pr.stdlib[importPath]; ok {
+ return pkgID
+ }
+
+ return ""
+ }
+
+ for _, pkg := range pr.packagesByID {
+ if err := pkg.ResolveImports(resolve); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pr *PackageRegistry) walk(acc map[string]*FlatPackage, root string) {
+ pkg := pr.packagesByID[root]
+
+ acc[pkg.ID] = pkg
+ for _, pkgID := range pkg.Imports {
+ if _, ok := acc[pkgID]; !ok {
+ pr.walk(acc, pkgID)
+ }
+ }
+}
+
+func (pr *PackageRegistry) Match(labels []string) ([]string, []*FlatPackage) {
+ roots := map[string]struct{}{}
+
+ for _, label := range labels {
+ if !strings.HasPrefix(label, "@") {
+ label = fmt.Sprintf("@%s", label)
+ }
+
+ roots[label] = struct{}{}
+ }
+
+ walkedPackages := map[string]*FlatPackage{}
+ retRoots := make([]string, 0, len(roots))
+ for rootPkg := range roots {
+ retRoots = append(retRoots, rootPkg)
+ pr.walk(walkedPackages, rootPkg)
+ }
+
+ retPkgs := make([]*FlatPackage, 0, len(walkedPackages))
+ for _, pkg := range walkedPackages {
+ retPkgs = append(retPkgs, pkg)
+ }
+
+ return retRoots, retPkgs
+}
diff --git a/go/tools/gopackagesdriver/utils.go b/go/tools/gopackagesdriver/utils.go
new file mode 100644
index 00000000..d5524fdd
--- /dev/null
+++ b/go/tools/gopackagesdriver/utils.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "go/build"
+ "os"
+ "os/signal"
+ "path"
+ "path/filepath"
+)
+
+func getenvDefault(key, defaultValue string) string {
+ if v, ok := os.LookupEnv(key); ok {
+ return v
+ }
+ return defaultValue
+}
+
+func concatStringsArrays(values ...[]string) []string {
+ ret := []string{}
+ for _, v := range values {
+ ret = append(ret, v...)
+ }
+ return ret
+}
+
+func ensureAbsolutePathFromWorkspace(path string) string {
+ if filepath.IsAbs(path) {
+ return path
+ }
+ return filepath.Join(workspaceRoot, path)
+}
+
+func signalContext(parentCtx context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc) {
+ ctx, cancel := context.WithCancel(parentCtx)
+ ch := make(chan os.Signal, 1)
+ go func() {
+ select {
+ case <-ch:
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+ signal.Notify(ch, signals...)
+
+ return ctx, cancel
+}
+
+func isLocalPattern(pattern string) bool {
+ return build.IsLocalImport(pattern) || filepath.IsAbs(pattern)
+}
+
+func packageID(pattern string) string {
+ pattern = path.Clean(pattern)
+ if filepath.IsAbs(pattern) {
+ if relPath, err := filepath.Rel(workspaceRoot, pattern); err == nil {
+ pattern = relPath
+ }
+ }
+
+ return fmt.Sprintf("//%s", pattern)
+}
diff --git a/go/tools/internal/stdlib_tags/BUILD.bazel b/go/tools/internal/stdlib_tags/BUILD.bazel
new file mode 100644
index 00000000..37b8f2a4
--- /dev/null
+++ b/go/tools/internal/stdlib_tags/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+
+go_library(
+ name = "stdlib_tags_lib",
+ srcs = ["stdlib_tags.go"],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/internal/stdlib_tags",
+ visibility = ["//visibility:private"],
+)
+
+go_binary(
+ name = "stdlib_tags",
+ embed = [":stdlib_tags_lib"],
+ visibility = ["//go/tools:__subpackages__"],
+)
diff --git a/go/tools/internal/stdlib_tags/stdlib_tags.go b/go/tools/internal/stdlib_tags/stdlib_tags.go
new file mode 100644
index 00000000..8bd507af
--- /dev/null
+++ b/go/tools/internal/stdlib_tags/stdlib_tags.go
@@ -0,0 +1,174 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "go/build/constraint"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var goVersionRegex = regexp.MustCompile(`^go1.(\d+)$`)
+
+// Used to update the list of tags affecting the standard library kept in
+// transitions.bzl.
+func main() {
+ if len(os.Args) < 2 {
+ log.Fatal("usage: stdlib_tags <go SDK src directory>...")
+ }
+
+ filteredTags, err := extractBuildTags(os.Args[1:]...)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+
+ fmt.Printf("_TAG_AFFECTS_STDLIB = {\n")
+ for _, tag := range filteredTags {
+ fmt.Printf(" %q: None,\n", tag)
+ }
+ fmt.Printf("}\n")
+}
+
+func extractBuildTags(sdkPaths ...string) ([]string, error) {
+ tags := make(map[string]struct{})
+ for _, dir := range sdkPaths {
+ err := filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error {
+ if d.IsDir() {
+ if d.Name() == "testdata" {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ if filepath.Ext(path) != ".go" {
+ return nil
+ }
+ if strings.HasSuffix(filepath.Base(path), "_test.go") {
+ return nil
+ }
+ return walkFile(path, tags)
+ })
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", dir, err)
+ }
+ }
+
+ filteredTags := make([]string, 0, len(tags))
+ for tag := range tags {
+ if !shouldExclude(tag) {
+ filteredTags = append(filteredTags, tag)
+ }
+ }
+ sort.Strings(filteredTags)
+
+ return filteredTags, nil
+}
+
+func shouldExclude(tag string) bool {
+ // Set via CGO_ENABLED
+ return tag == "cgo" ||
+ // Set via GOARCH and GOOS
+ knownOS[tag] || knownArch[tag] || tag == "unix" ||
+ // Set via GOEXPERIMENT and GOAMD64
+ strings.HasPrefix(tag, "goexperiment.") || strings.HasPrefix(tag, "amd64.") ||
+ // Set implicitly
+ goVersionRegex.MatchString(tag)
+}
+
+func walkFile(path string, tags map[string]struct{}) error {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+
+ scanner := bufio.NewScanner(file)
+ // The Go SDK contains some very long lines in vendored files (minified JS).
+ scanner.Buffer(make([]byte, 0, 128*1024), 1024*1024)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !isConstraint(line) {
+ continue
+ }
+ c, err := constraint.Parse(line)
+ if err != nil {
+ continue
+ }
+ walkConstraint(c, tags)
+ }
+
+ if err = scanner.Err(); err != nil {
+ return fmt.Errorf("%s: %w", path, err)
+ }
+ return nil
+}
+
+func walkConstraint(c constraint.Expr, tags map[string]struct{}) {
+ switch c.(type) {
+ case *constraint.AndExpr:
+ walkConstraint(c.(*constraint.AndExpr).X, tags)
+ walkConstraint(c.(*constraint.AndExpr).Y, tags)
+ case *constraint.OrExpr:
+ walkConstraint(c.(*constraint.OrExpr).X, tags)
+ walkConstraint(c.(*constraint.OrExpr).Y, tags)
+ case *constraint.NotExpr:
+ walkConstraint(c.(*constraint.NotExpr).X, tags)
+ case *constraint.TagExpr:
+ tags[c.(*constraint.TagExpr).Tag] = struct{}{}
+ }
+}
+
+func isConstraint(line string) bool {
+ return constraint.IsPlusBuild(line) || constraint.IsGoBuild(line)
+}
+
+// Taken from
+// https://github.com/golang/go/blob/3d5391ed87d813110e10b954c62bf7ed578b591f/src/go/build/syslist.go
+var knownOS = map[string]bool{
+ "aix": true,
+ "android": true,
+ "darwin": true,
+ "dragonfly": true,
+ "freebsd": true,
+ "hurd": true,
+ "illumos": true,
+ "ios": true,
+ "js": true,
+ "linux": true,
+ "nacl": true,
+ "netbsd": true,
+ "openbsd": true,
+ "plan9": true,
+ "solaris": true,
+ "windows": true,
+ "zos": true,
+}
+
+var knownArch = map[string]bool{
+ "386": true,
+ "amd64": true,
+ "amd64p32": true,
+ "arm": true,
+ "armbe": true,
+ "arm64": true,
+ "arm64be": true,
+ "loong64": true,
+ "mips": true,
+ "mipsle": true,
+ "mips64": true,
+ "mips64le": true,
+ "mips64p32": true,
+ "mips64p32le": true,
+ "ppc": true,
+ "ppc64": true,
+ "ppc64le": true,
+ "riscv": true,
+ "riscv64": true,
+ "s390": true,
+ "s390x": true,
+ "sparc": true,
+ "sparc64": true,
+ "wasm": true,
+}
diff --git a/go/tools/internal/txtar/BUILD.bazel b/go/tools/internal/txtar/BUILD.bazel
new file mode 100644
index 00000000..d53e0582
--- /dev/null
+++ b/go/tools/internal/txtar/BUILD.bazel
@@ -0,0 +1,20 @@
+load("//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "txtar",
+ srcs = ["archive.go"],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/internal/txtar",
+ visibility = ["//go/tools:__subpackages__"],
+)
+
+go_test(
+ name = "txtar_test",
+ srcs = ["archive_test.go"],
+ embed = [":txtar"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":txtar",
+ visibility = ["//go/tools:__subpackages__"],
+)
diff --git a/go/tools/internal/txtar/archive.go b/go/tools/internal/txtar/archive.go
new file mode 100644
index 00000000..c384f33b
--- /dev/null
+++ b/go/tools/internal/txtar/archive.go
@@ -0,0 +1,140 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package txtar implements a trivial text-based file archive format.
+//
+// The goals for the format are:
+//
+// - be trivial enough to create and edit by hand.
+// - be able to store trees of text files describing go command test cases.
+// - diff nicely in git history and code reviews.
+//
+// Non-goals include being a completely general archive format,
+// storing binary data, storing file modes, storing special files like
+// symbolic links, and so on.
+//
+// Txtar format
+//
+// A txtar archive is zero or more comment lines and then a sequence of file entries.
+// Each file entry begins with a file marker line of the form "-- FILENAME --"
+// and is followed by zero or more file content lines making up the file data.
+// The comment or file content ends at the next file marker line.
+// The file marker line must begin with the three-byte sequence "-- "
+// and end with the three-byte sequence " --", but the enclosed
+// file name can be surrounding by additional white space,
+// all of which is stripped.
+//
+// If the txtar file is missing a trailing newline on the final line,
+// parsers should consider a final newline to be present anyway.
+//
+// There are no possible syntax errors in a txtar archive.
+package txtar
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "strings"
+)
+
+// An Archive is a collection of files.
+type Archive struct {
+ Comment []byte
+ Files []File
+}
+
+// A File is a single file in an archive.
+type File struct {
+ Name string // name of file ("foo/bar.txt")
+ Data []byte // text content of file
+}
+
+// Format returns the serialized form of an Archive.
+// It is assumed that the Archive data structure is well-formed:
+// a.Comment and all a.File[i].Data contain no file marker lines,
+// and all a.File[i].Name is non-empty.
+func Format(a *Archive) []byte {
+ var buf bytes.Buffer
+ buf.Write(fixNL(a.Comment))
+ for _, f := range a.Files {
+ fmt.Fprintf(&buf, "-- %s --\n", f.Name)
+ buf.Write(fixNL(f.Data))
+ }
+ return buf.Bytes()
+}
+
+// ParseFile parses the named file as an archive.
+func ParseFile(file string) (*Archive, error) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ return Parse(data), nil
+}
+
+// Parse parses the serialized form of an Archive.
+// The returned Archive holds slices of data.
+func Parse(data []byte) *Archive {
+ a := new(Archive)
+ var name string
+ a.Comment, name, data = findFileMarker(data)
+ for name != "" {
+ f := File{name, nil}
+ f.Data, name, data = findFileMarker(data)
+ a.Files = append(a.Files, f)
+ }
+ return a
+}
+
+var (
+ newlineMarker = []byte("\n-- ")
+ marker = []byte("-- ")
+ markerEnd = []byte(" --")
+)
+
+// findFileMarker finds the next file marker in data,
+// extracts the file name, and returns the data before the marker,
+// the file name, and the data after the marker.
+// If there is no next marker, findFileMarker returns before = fixNL(data), name = "", after = nil.
+func findFileMarker(data []byte) (before []byte, name string, after []byte) {
+ var i int
+ for {
+ if name, after = isMarker(data[i:]); name != "" {
+ return data[:i], name, after
+ }
+ j := bytes.Index(data[i:], newlineMarker)
+ if j < 0 {
+ return fixNL(data), "", nil
+ }
+ i += j + 1 // positioned at start of new possible marker
+ }
+}
+
+// isMarker checks whether data begins with a file marker line.
+// If so, it returns the name from the line and the data after the line.
+// Otherwise it returns name == "" with an unspecified after.
+func isMarker(data []byte) (name string, after []byte) {
+ if !bytes.HasPrefix(data, marker) {
+ return "", nil
+ }
+ if i := bytes.IndexByte(data, '\n'); i >= 0 {
+ data, after = data[:i], data[i+1:]
+ }
+ if !bytes.HasSuffix(data, markerEnd) {
+ return "", nil
+ }
+ return strings.TrimSpace(string(data[len(marker) : len(data)-len(markerEnd)])), after
+}
+
+// If data is empty or ends in \n, fixNL returns data.
+// Otherwise fixNL returns a new slice consisting of data with a final \n added.
+func fixNL(data []byte) []byte {
+ if len(data) == 0 || data[len(data)-1] == '\n' {
+ return data
+ }
+ d := make([]byte, len(data)+1)
+ copy(d, data)
+ d[len(data)] = '\n'
+ return d
+}
diff --git a/go/tools/internal/txtar/archive_test.go b/go/tools/internal/txtar/archive_test.go
new file mode 100644
index 00000000..3f734f67
--- /dev/null
+++ b/go/tools/internal/txtar/archive_test.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package txtar
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+var tests = []struct {
+ name string
+ text string
+ parsed *Archive
+}{
+ {
+ name: "basic",
+ text: `comment1
+comment2
+-- file1 --
+File 1 text.
+-- foo ---
+More file 1 text.
+-- file 2 --
+File 2 text.
+-- empty --
+-- noNL --
+hello world`,
+ parsed: &Archive{
+ Comment: []byte("comment1\ncomment2\n"),
+ Files: []File{
+ {"file1", []byte("File 1 text.\n-- foo ---\nMore file 1 text.\n")},
+ {"file 2", []byte("File 2 text.\n")},
+ {"empty", []byte{}},
+ {"noNL", []byte("hello world\n")},
+ },
+ },
+ },
+}
+
+func Test(t *testing.T) {
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ a := Parse([]byte(tt.text))
+ if !reflect.DeepEqual(a, tt.parsed) {
+ t.Fatalf("Parse: wrong output:\nhave:\n%s\nwant:\n%s", shortArchive(a), shortArchive(tt.parsed))
+ }
+ text := Format(a)
+ a = Parse(text)
+ if !reflect.DeepEqual(a, tt.parsed) {
+ t.Fatalf("Parse after Format: wrong output:\nhave:\n%s\nwant:\n%s", shortArchive(a), shortArchive(tt.parsed))
+ }
+ })
+ }
+}
+
+func shortArchive(a *Archive) string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "comment: %q\n", a.Comment)
+ for _, f := range a.Files {
+ fmt.Fprintf(&buf, "file %q: %q\n", f.Name, f.Data)
+ }
+ return buf.String()
+}
diff --git a/go/tools/releaser/BUILD.bazel b/go/tools/releaser/BUILD.bazel
new file mode 100644
index 00000000..e750c67b
--- /dev/null
+++ b/go/tools/releaser/BUILD.bazel
@@ -0,0 +1,37 @@
+load("//go:def.bzl", "go_binary", "go_library", "go_test")
+
+go_binary(
+ name = "releaser",
+ embed = [":releaser_lib"],
+ visibility = ["//visibility:public"],
+)
+
+go_library(
+ name = "releaser_lib",
+ srcs = [
+ "boilerplate.go",
+ "file.go",
+ "git.go",
+ "github.go",
+ "prepare.go",
+ "releaser.go",
+ "run.go",
+ "upgradedep.go",
+ ],
+ importpath = "github.com/bazelbuild/rules_go/go/tools/releaser",
+ visibility = ["//visibility:private"],
+ deps = [
+ "@com_github_bazelbuild_buildtools//build:go_default_library",
+ "@com_github_google_go_github_v36//github",
+ "@org_golang_x_mod//semver",
+ "@org_golang_x_oauth2//:oauth2",
+ "@org_golang_x_sync//errgroup",
+ ],
+)
+
+go_test(
+ name = "releaser_test",
+ srcs = ["upgradedep_test.go"],
+ embed = [":releaser_lib"],
+ deps = ["@com_github_bazelbuild_buildtools//build:go_default_library"],
+)
diff --git a/go/tools/releaser/boilerplate.go b/go/tools/releaser/boilerplate.go
new file mode 100644
index 00000000..828934a5
--- /dev/null
+++ b/go/tools/releaser/boilerplate.go
@@ -0,0 +1,78 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+
+ "golang.org/x/mod/semver"
+)
+
+func genBoilerplate(version, shasum, goVersion string) string {
+ return fmt.Sprintf(`load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "%[2]s",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/%[1]s/rules_go-%[1]s.zip",
+ "https://github.com/bazelbuild/rules_go/releases/download/%[1]s/rules_go-%[1]s.zip",
+ ],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
+
+go_rules_dependencies()
+
+go_register_toolchains(version = "%[3]s")`, version, shasum, goVersion)
+}
+
+func findLatestGoVersion() (v string, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("finding latest go version: %w", err)
+ }
+ }()
+ resp, err := http.Get("https://golang.org/dl/?mode=json")
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+ type version struct {
+ Version string
+ }
+ var versions []version
+ if err := json.Unmarshal(data, &versions); err != nil {
+ return "", err
+ }
+ if len(versions) == 0 {
+ return "", errors.New("no versions found")
+ }
+ sort.Slice(versions, func(i, j int) bool {
+ vi := "v" + versions[i].Version[len("go"):]
+ vj := "v" + versions[j].Version[len("go"):]
+ return semver.Compare(vi, vj) > 0
+ })
+ return versions[0].Version[len("go"):], nil
+}
diff --git a/go/tools/releaser/file.go b/go/tools/releaser/file.go
new file mode 100644
index 00000000..e98fc1c1
--- /dev/null
+++ b/go/tools/releaser/file.go
@@ -0,0 +1,301 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "archive/tar"
+ "archive/zip"
+ "compress/gzip"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+var repoRootState = struct {
+ once sync.Once
+ dir string
+ err error
+}{}
+
+// repoRoot returns the workspace root directory. If this program was invoked
+// with 'bazel run', repoRoot returns the BUILD_WORKSPACE_DIRECTORY environment
+// variable. Otherwise, repoRoot walks up the directory tree and finds a
+// WORKSPACE file.
+func repoRoot() (string, error) {
+ repoRootState.once.Do(func() {
+ if wsDir := os.Getenv("BUILD_WORKSPACE_DIRECTORY"); wsDir != "" {
+ repoRootState.dir = wsDir
+ return
+ }
+ dir, err := os.Getwd()
+ if err != nil {
+ repoRootState.err = err
+ return
+ }
+ for {
+ _, err := os.Stat(filepath.Join(dir, "WORKSPACE"))
+ if err == nil {
+ repoRootState.dir = dir
+ return
+ }
+ if err != os.ErrNotExist {
+ repoRootState.err = err
+ return
+ }
+ parent := filepath.Dir(dir)
+ if parent == dir {
+ repoRootState.err = errors.New("could not find workspace directory")
+ return
+ }
+ dir = parent
+ }
+ })
+ return repoRootState.dir, repoRootState.err
+}
+
+// extractArchive extracts a zip or tar.gz archive opened in f, into the
+// directory dir, stripping stripPrefix from each entry before extraction.
+// name is the name of the archive, used for error reporting.
+func extractArchive(f *os.File, name, dir, stripPrefix string) (err error) {
+ if strings.HasSuffix(name, ".zip") {
+ return extractZip(f, name, dir, stripPrefix)
+ }
+ if strings.HasSuffix(name, ".tar.gz") {
+ zr, err := gzip.NewReader(f)
+ if err != nil {
+ return fmt.Errorf("extracting %s: %w", name, err)
+ }
+ defer func() {
+ if cerr := zr.Close(); err == nil && cerr != nil {
+ err = cerr
+ }
+ }()
+ return extractTar(zr, name, dir, stripPrefix)
+ }
+ return fmt.Errorf("could not determine archive format from extension: %s", name)
+}
+
+func extractZip(zf *os.File, name, dir, stripPrefix string) (err error) {
+ stripPrefix += "/"
+ fi, err := zf.Stat()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("extracting zip %s: %w", name, err)
+ }
+ }()
+
+ zr, err := zip.NewReader(zf, fi.Size())
+ if err != nil {
+ return err
+ }
+
+ extractFile := func(f *zip.File) (err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("extracting %s: %w", f.Name, err)
+ }
+ }()
+ outPath, err := extractedPath(dir, stripPrefix, f.Name)
+ if err != nil {
+ return err
+ }
+ if strings.HasSuffix(f.Name, "/") {
+ return os.MkdirAll(outPath, 0777)
+ }
+ r, err := f.Open()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+ parent := filepath.Dir(outPath)
+ if err := os.MkdirAll(parent, 0777); err != nil {
+ return err
+ }
+ w, err := os.Create(outPath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := w.Close(); err == nil && cerr != nil {
+ err = cerr
+ }
+ }()
+ _, err = io.Copy(w, r)
+ return err
+ }
+
+ for _, f := range zr.File {
+ if err := extractFile(f); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func extractTar(r io.Reader, name, dir, stripPrefix string) (err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("extracting tar %s: %w", name, err)
+ }
+ }()
+
+ tr := tar.NewReader(r)
+ extractFile := func(hdr *tar.Header) (err error) {
+ outPath, err := extractedPath(dir, stripPrefix, hdr.Name)
+ if err != nil {
+ return err
+ }
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ return os.MkdirAll(outPath, 0777)
+ case tar.TypeReg:
+ w, err := os.Create(outPath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := w.Close(); err == nil && cerr != nil {
+ err = cerr
+ }
+ }()
+ _, err = io.Copy(w, tr)
+ return err
+ default:
+ return fmt.Errorf("unsupported file type %x: %q", hdr.Typeflag, hdr.Name)
+ }
+ }
+
+ stripPrefix += "/"
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ if err := extractFile(hdr); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// extractedPath returns the file path that a file in an archive should be
+// extracted to. It verifies that entryName starts with stripPrefix and does not
+// point outside dir.
+func extractedPath(dir, stripPrefix, entryName string) (string, error) {
+ if !strings.HasPrefix(entryName, stripPrefix) {
+ return "", fmt.Errorf("entry does not start with prefix %s: %q", stripPrefix, entryName)
+ }
+ entryName = entryName[len(stripPrefix):]
+ if entryName == "" {
+ return dir, nil
+ }
+ if path.IsAbs(entryName) {
+ return "", fmt.Errorf("entry has an absolute path: %q", entryName)
+ }
+ if strings.HasPrefix(entryName, "../") {
+ return "", fmt.Errorf("entry refers to something outside the archive: %q", entryName)
+ }
+ entryName = strings.TrimSuffix(entryName, "/")
+ if path.Clean(entryName) != entryName {
+ return "", fmt.Errorf("entry does not have a clean path: %q", entryName)
+ }
+ return filepath.Join(dir, entryName), nil
+}
+
+// copyDir recursively copies a directory tree.
+func copyDir(toDir, fromDir string) error {
+ if err := os.MkdirAll(toDir, 0777); err != nil {
+ return err
+ }
+ return filepath.Walk(fromDir, func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ rel, _ := filepath.Rel(fromDir, path)
+ if rel == "." {
+ return nil
+ }
+ outPath := filepath.Join(toDir, rel)
+ if fi.IsDir() {
+ return os.Mkdir(outPath, 0777)
+ } else {
+ return copyFile(outPath, path)
+ }
+ })
+}
+
+func copyFile(toFile, fromFile string) (err error) {
+ r, err := os.Open(fromFile)
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+ w, err := os.Create(toFile)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := w.Close(); err == nil && cerr != nil {
+ err = cerr
+ }
+ }()
+ _, err = io.Copy(w, r)
+ return err
+}
+
+func sha256SumFile(name string) (string, error) {
+ r, err := os.Open(name)
+ if err != nil {
+ return "", err
+ }
+ defer r.Close()
+ h := sha256.New()
+ if _, err := io.Copy(h, r); err != nil {
+ return "", err
+ }
+ sum := h.Sum(nil)
+ return hex.EncodeToString(sum), nil
+}
+
+// copyFileToMirror uploads a file to the GCS bucket backing mirror.bazel.build.
+// gsutil must be installed, and the user must be authenticated with
+// 'gcloud auth login' and be allowed to write files to the bucket.
+func copyFileToMirror(ctx context.Context, path, fileName string) (err error) {
+ dest := "gs://bazel-mirror/" + path
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("copying file %s to %s: %w", fileName, dest, err)
+ }
+ }()
+
+ // This function shells out to gsutil instead of using
+ // cloud.google.com/go/storage because that package has a million
+ // dependencies.
+ return runForError(ctx, ".", "gsutil", "cp", "-n", fileName, dest)
+}
diff --git a/go/tools/releaser/git.go b/go/tools/releaser/git.go
new file mode 100644
index 00000000..83e688c5
--- /dev/null
+++ b/go/tools/releaser/git.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "context"
+ "errors"
+)
+
+func checkNoGitChanges(ctx context.Context, dir string) error {
+ out, err := runForOutput(ctx, dir, "git", "status", "--porcelain", "--untracked-files=no")
+ if err != nil {
+ return err
+ }
+ if len(out) > 0 {
+ return errors.New("Repository has pending changes. Commit them and try again.")
+ }
+ return nil
+}
+
+func gitBranchExists(ctx context.Context, dir, branchName string) bool {
+ err := runForError(ctx, dir, "git", "show-ref", "--verify", "--quiet", "refs/heads/"+branchName)
+ return err == nil
+}
+
+func gitCreateBranch(ctx context.Context, dir, branchName, refName string) error {
+ return runForError(ctx, dir, "git", "branch", branchName, refName)
+}
+
+func gitPushBranch(ctx context.Context, dir, branchName string) error {
+ return runForError(ctx, dir, "git", "push", "origin", branchName)
+}
+
+func gitCreateArchive(ctx context.Context, dir, branchName, arcName string) error {
+ return runForError(ctx, dir, "git", "archive", "--output="+arcName, branchName)
+}
+
+func gitCatFile(ctx context.Context, dir, refName, fileName string) ([]byte, error) {
+ return runForOutput(ctx, dir, "git", "cat-file", "blob", refName+":"+fileName)
+}
diff --git a/go/tools/releaser/github.go b/go/tools/releaser/github.go
new file mode 100644
index 00000000..4fecfc6c
--- /dev/null
+++ b/go/tools/releaser/github.go
@@ -0,0 +1,132 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/google/go-github/v36/github"
+)
+
+type githubClient struct {
+ *github.Client
+}
+
+func (gh *githubClient) listTags(ctx context.Context, org, repo string) (_ []*github.RepositoryTag, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("listing tags in github.com/%s/%s: %w", org, repo, err)
+ }
+ }()
+
+ var allTags []*github.RepositoryTag
+ err = gh.listPages(func(opts *github.ListOptions) (*github.Response, error) {
+ tags, resp, err := gh.Repositories.ListTags(ctx, org, repo, opts)
+ if err != nil {
+ return nil, err
+ }
+ allTags = append(allTags, tags...)
+ return resp, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return allTags, nil
+}
+
+func (gh *githubClient) listReleases(ctx context.Context, org, repo string) (_ []*github.RepositoryRelease, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("listing releases in github.com/%s/%s: %w", org, repo, err)
+ }
+ }()
+
+ var allReleases []*github.RepositoryRelease
+ err = gh.listPages(func(opts *github.ListOptions) (*github.Response, error) {
+ releases, resp, err := gh.Repositories.ListReleases(ctx, org, repo, opts)
+ if err != nil {
+ return nil, err
+ }
+ allReleases = append(allReleases, releases...)
+ return resp, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return allReleases, nil
+}
+
+// getReleaseByTagIncludingDraft is like
+// github.RepositoriesService.GetReleaseByTag, but it also considers draft
+// releases that aren't tagged yet.
+func (gh *githubClient) getReleaseByTagIncludingDraft(ctx context.Context, org, repo, tag string) (*github.RepositoryRelease, error) {
+ releases, err := gh.listReleases(ctx, org, repo)
+ if err != nil {
+ return nil, err
+ }
+ for _, release := range releases {
+ if release.GetTagName() == tag {
+ return release, nil
+ }
+ }
+ return nil, errReleaseNotFound
+}
+
+var errReleaseNotFound = errors.New("release not found")
+
+// githubListPages calls fn repeatedly to get all pages of a large result.
+// This is useful for fetching all tags or all comments or something similar.
+func (gh *githubClient) listPages(fn func(opt *github.ListOptions) (*github.Response, error)) error {
+ opt := &github.ListOptions{PerPage: 50}
+ for {
+ resp, err := fn(opt)
+ if err != nil {
+ return err
+ }
+ if resp.NextPage == 0 {
+ return nil
+ }
+ opt.Page = resp.NextPage
+ }
+}
+
+// githubTokenFlag is used to find a GitHub personal access token on the
+// command line. It accepts a raw token or a path to a file containing a token.
+type githubTokenFlag string
+
+func (f *githubTokenFlag) Set(v string) error {
+ if strings.HasPrefix(v, "ghp_") {
+ *(*string)(f) = v
+ return nil
+ }
+ data, err := os.ReadFile(v)
+ if err != nil {
+ return fmt.Errorf("reading GitHub token: %w", err)
+ }
+ *(*string)(f) = string(bytes.TrimSpace(data))
+ return nil
+}
+
+func (f *githubTokenFlag) String() string {
+ if f == nil {
+ return ""
+ }
+ return string(*f)
+}
diff --git a/go/tools/releaser/prepare.go b/go/tools/releaser/prepare.go
new file mode 100644
index 00000000..4c7b0413
--- /dev/null
+++ b/go/tools/releaser/prepare.go
@@ -0,0 +1,253 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/google/go-github/v36/github"
+ "golang.org/x/mod/semver"
+ "golang.org/x/oauth2"
+)
+
+var prepareCmd = command{
+ name: "prepare",
+ description: "prepares a GitHub release with notes and attached archive",
+ help: `prepare -rnotes=file -version=version -githubtoken=token [-mirror]
+
+'prepare' performs most tasks related to a rules_go release. It does everything
+except publishing and tagging the release, which must be done manually,
+with review. Specifically, prepare does the following:
+
+* Creates the release branch if it doesn't exist locally. Release branches
+ have names like "release-X.Y" where X and Y are the major and minor version
+ numbers.
+* Checks that RULES_GO_VERSION is set in go/def.bzl on the local release branch
+ for the minor version being released. RULES_GO_VERSION must be a sematic
+ version without the "v" prefix that Go uses, like "1.2.4". It must match
+ the -version flag, which does require the "v" prefix.
+* Creates an archive zip file from the tip of the local release branch.
+* Creates or updates a draft GitHub release with the given release notes.
+ http_archive boilerplate is generated and appended to the release notes.
+* Uploads and attaches the release archive to the GitHub release.
+* Uploads the release archive to mirror.bazel.build. If the file already exists,
+ it may be manually removed with 'gsutil rm gs://bazel-mirror/<github-url>'
+ or manually updated with 'gsutil cp <file> gs://bazel-mirror/<github-url>'.
+ This step may be skipped by setting -mirror=false.
+
+After these steps are completed successfully, 'prepare' prompts the user to
+check that CI passes, then review and publish the release.
+
+Note that 'prepare' does not update boilerplate in WORKSPACE or README.rst for
+either rules_go or Gazelle.
+`,
+}
+
+func init() {
+ // break init cycle
+ prepareCmd.run = runPrepare
+}
+
+func runPrepare(ctx context.Context, stderr io.Writer, args []string) error {
+ // Parse arguments.
+ flags := flag.NewFlagSet("releaser prepare", flag.ContinueOnError)
+ var rnotesPath, version string
+ var githubToken githubTokenFlag
+ var uploadToMirror bool
+ flags.Var(&githubToken, "githubtoken", "GitHub personal access token or path to a file containing it")
+ flags.BoolVar(&uploadToMirror, "mirror", true, "whether to upload dependency archives to mirror.bazel.build")
+ flags.StringVar(&rnotesPath, "rnotes", "", "Name of file containing release notes in Markdown")
+ flags.StringVar(&version, "version", "", "Version to release")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ if flags.NArg() > 0 {
+ return usageErrorf(&prepareCmd, "No arguments expected")
+ }
+ if githubToken == "" {
+ return usageErrorf(&prepareCmd, "-githubtoken must be set")
+ }
+ if rnotesPath == "" {
+ return usageErrorf(&prepareCmd, "-rnotes must be set")
+ }
+ if version == "" {
+ return usageErrorf(&prepareCmd, "-version must be set")
+ }
+ if semver.Canonical(version) != version || semver.Prerelease(version) != "" || semver.Build(version) != "" {
+ return usageErrorf(&prepareCmd, "-version must be a canonical version, like v1.2.3")
+ }
+
+ ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: string(githubToken)})
+ tc := oauth2.NewClient(ctx, ts)
+ gh := &githubClient{Client: github.NewClient(tc)}
+
+ // Get the GitHub release.
+ fmt.Fprintf(stderr, "checking if release %s exists...\n", version)
+ release, err := gh.getReleaseByTagIncludingDraft(ctx, "bazelbuild", "rules_go", version)
+ if err != nil && !errors.Is(err, errReleaseNotFound) {
+ return err
+ }
+ if release != nil && !release.GetDraft() {
+ return fmt.Errorf("release %s was already published", version)
+ }
+
+ // Check that RULES_GO_VERSION is set correctly on the release branch.
+ // If this is a minor release (x.y.0), create the release branch if it
+ // does not exist.
+ fmt.Fprintf(stderr, "checking RULES_GO_VERSION...\n")
+ rootDir, err := repoRoot()
+ if err != nil {
+ return err
+ }
+ if err := checkNoGitChanges(ctx, rootDir); err != nil {
+ return err
+ }
+ majorMinor := semver.MajorMinor(version)
+ isMinorRelease := semver.Canonical(majorMinor) == version
+ branchName := "release-" + majorMinor[len("v"):]
+ if !gitBranchExists(ctx, rootDir, branchName) {
+ if !isMinorRelease {
+ return fmt.Errorf("release branch %q does not exist locally. Fetch it, set RULES_GO_VERSION, add commits, and run this command again.")
+ }
+ if err := checkRulesGoVersion(ctx, rootDir, "HEAD", version); err != nil {
+ return err
+ }
+ fmt.Fprintf(stderr, "creating branch %s...\n", branchName)
+ if err := gitCreateBranch(ctx, rootDir, branchName, "HEAD"); err != nil {
+ return err
+ }
+ } else {
+ if err := checkRulesGoVersion(ctx, rootDir, branchName, version); err != nil {
+ return err
+ }
+ }
+
+ // Create an archive.
+ fmt.Fprintf(stderr, "creating archive...\n")
+ arcFile, err := os.CreateTemp("", "rules_go-%s-*.zip")
+ if err != nil {
+ return err
+ }
+ arcName := arcFile.Name()
+ arcFile.Close()
+ defer func() {
+ if rerr := os.Remove(arcName); err == nil && rerr != nil {
+ err = rerr
+ }
+ }()
+ if err := gitCreateArchive(ctx, rootDir, branchName, arcName); err != nil {
+ return err
+ }
+ arcSum, err := sha256SumFile(arcName)
+ if err != nil {
+ return err
+ }
+
+ // Read release notes, append boilerplate.
+ rnotesData, err := os.ReadFile(rnotesPath)
+ if err != nil {
+ return err
+ }
+ rnotesData = bytes.TrimSpace(rnotesData)
+ goVersion, err := findLatestGoVersion()
+ if err != nil {
+ return err
+ }
+ boilerplate := genBoilerplate(version, arcSum, goVersion)
+ rnotesStr := string(rnotesData) + "\n\n## `WORKSPACE` code\n\n```\n" + boilerplate + "\n```\n"
+
+ // Push the release branch.
+ fmt.Fprintf(stderr, "pushing branch %s to origin...\n", branchName)
+ if err := gitPushBranch(ctx, rootDir, branchName); err != nil {
+ return err
+ }
+
+ // Upload to mirror.bazel.build.
+ arcGHURLWithoutScheme := fmt.Sprintf("github.com/bazelbuild/rules_go/releases/download/%[1]s/rules_go-%[1]s.zip", version)
+ if uploadToMirror {
+ fmt.Fprintf(stderr, "uploading archive to mirror.bazel.build...\n")
+ if err := copyFileToMirror(ctx, arcGHURLWithoutScheme, arcName); err != nil {
+ return err
+ }
+ }
+
+ // Create or update the GitHub release.
+ if release == nil {
+ fmt.Fprintf(stderr, "creating draft release...\n")
+ draft := true
+ release = &github.RepositoryRelease{
+ TagName: &version,
+ TargetCommitish: &branchName,
+ Name: &version,
+ Body: &rnotesStr,
+ Draft: &draft,
+ }
+ if release, _, err = gh.Repositories.CreateRelease(ctx, "bazelbuild", "rules_go", release); err != nil {
+ return err
+ }
+ } else {
+ fmt.Fprintf(stderr, "updating release...\n")
+ release.Body = &rnotesStr
+ if release, _, err = gh.Repositories.EditRelease(ctx, "bazelbuild", "rules_go", release.GetID(), release); err != nil {
+ return err
+ }
+ for _, asset := range release.Assets {
+ if _, err := gh.Repositories.DeleteReleaseAsset(ctx, "bazelbuild", "rules_go", asset.GetID()); err != nil {
+ return err
+ }
+ }
+ }
+ arcFile, err = os.Open(arcName)
+ if err != nil {
+ return err
+ }
+ defer arcFile.Close()
+ uploadOpts := &github.UploadOptions{
+ Name: "rules_go-" + version + ".zip",
+ MediaType: "application/zip",
+ }
+ if _, _, err := gh.Repositories.UploadReleaseAsset(ctx, "bazelbuild", "rules_go", release.GetID(), uploadOpts, arcFile); err != nil {
+ return err
+ }
+
+ testURL := fmt.Sprintf("https://buildkite.com/bazel/rules-go-golang/builds?branch=%s", branchName)
+ fmt.Fprintf(stderr, `
+Release %s has been prepared and uploaded.
+
+* Ensure that all tests pass in CI at %s.
+* Review and publish the release at %s.
+* Update README.rst and WORKSPACE if necessary.
+`, version, testURL, release.GetHTMLURL())
+
+ return nil
+}
+
+func checkRulesGoVersion(ctx context.Context, dir, refName, version string) error {
+ data, err := gitCatFile(ctx, dir, refName, "go/def.bzl")
+ if err != nil {
+ return err
+ }
+ rulesGoVersionStr := []byte(fmt.Sprintf(`RULES_GO_VERSION = "%s"`, version[len("v"):]))
+ if !bytes.Contains(data, rulesGoVersionStr) {
+ return fmt.Errorf("RULES_GO_VERSION was not set to %q in go/def.bzl. Set it, add commits, and run this command again.")
+ }
+ return nil
+}
diff --git a/go/tools/releaser/releaser.go b/go/tools/releaser/releaser.go
new file mode 100644
index 00000000..e8ba95c1
--- /dev/null
+++ b/go/tools/releaser/releaser.go
@@ -0,0 +1,127 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// releaser is a tool for maintaining rules_go and Gazelle. It automates
+// multiple tasks related to preparing releases, like upgrading dependencies,
+// and uploading release artifacts.
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+)
+
+func main() {
+ ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
+ defer cancel()
+ if err := run(ctx, os.Stderr, os.Args[1:]); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+type command struct {
+ name, description, help string
+ run func(context.Context, io.Writer, []string) error
+}
+
+var commands = []*command{
+ &helpCmd,
+ &prepareCmd,
+ &upgradeDepCmd,
+}
+
+func run(ctx context.Context, stderr io.Writer, args []string) error {
+ if len(args) == 0 {
+ return errors.New("no command specified. For a list of commands, run:\n\treleaser help")
+ }
+ name, args := args[0], args[1:]
+ for _, arg := range args {
+ if arg == "-h" || name == "-help" || name == "--help" {
+ return helpCmd.run(ctx, stderr, args)
+ }
+ }
+ for _, cmd := range commands {
+ if cmd.name == name {
+ return cmd.run(ctx, stderr, args)
+ }
+ }
+ return fmt.Errorf("unknown command %q. For a list of commands, run:\n\treleaser help", name)
+}
+
+var helpCmd = command{
+ name: "help",
+ description: "prints information on how to use any subcommand",
+ help: `releaser help [subcommand]
+
+The help sub-command prints information on how to use any subcommand. Run help
+without arguments for a list of all subcommands.
+`,
+}
+
+func init() {
+ // break init cycle
+ helpCmd.run = runHelp
+}
+
+func runHelp(ctx context.Context, stderr io.Writer, args []string) error {
+ if len(args) > 1 {
+ return usageErrorf(&helpCmd, "help accepts at most one argument.")
+ }
+
+ if len(args) == 1 {
+ name := args[0]
+ for _, cmd := range commands {
+ if cmd.name == name {
+ fmt.Fprintf(stderr, "%s\n\n%s\n", cmd.description, cmd.help)
+ return nil
+ }
+ }
+ return fmt.Errorf("Unknown command %s. For a list of supported subcommands, run:\n\treleaser help", name)
+ }
+
+ fmt.Fprint(stderr, "releaser supports the following subcommands:\n\n")
+ maxNameLen := 0
+ for _, cmd := range commands {
+ if len(cmd.name) > maxNameLen {
+ maxNameLen = len(cmd.name)
+ }
+ }
+ for _, cmd := range commands {
+ fmt.Fprintf(stderr, "\t%-*s %s\n", maxNameLen, cmd.name, cmd.description)
+ }
+ fmt.Fprintf(stderr, "\nRun 'releaser help <subcommand>' for more information on any command.\n")
+ return nil
+}
+
+type usageError struct {
+ cmd *command
+ err error
+}
+
+func (e *usageError) Error() string {
+ return fmt.Sprintf("%v. For usage info, run:\n\treleaser help %s", e.err, e.cmd.name)
+}
+
+func (e *usageError) Unwrap() error {
+ return e.err
+}
+
+func usageErrorf(cmd *command, format string, args ...interface{}) error {
+ return &usageError{cmd: cmd, err: fmt.Errorf(format, args...)}
+}
diff --git a/go/tools/releaser/run.go b/go/tools/releaser/run.go
new file mode 100644
index 00000000..6ffe0d78
--- /dev/null
+++ b/go/tools/releaser/run.go
@@ -0,0 +1,92 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+// runForError runs a command without showing its output. If the command fails,
+// runForError returns an error containing its stderr.
+func runForError(ctx context.Context, dir string, name string, args ...string) error {
+ stderr := &bytes.Buffer{}
+ cmd := exec.CommandContext(ctx, name, args...)
+ cmd.Env = envWithoutBazel()
+ cmd.Dir = dir
+ cmd.Stdout = nil
+ cmd.Stderr = stderr
+ err := cmd.Run()
+ return cleanCmdError(err, name, args, stderr.Bytes())
+}
+
+// runForOutput runs a command and returns its output. If the command fails,
+// runForOutput returns an error containing its stderr. The command's output
+// is returned whether it failed or not.
+func runForOutput(ctx context.Context, dir string, name string, args ...string) ([]byte, error) {
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ cmd := exec.CommandContext(ctx, name, args...)
+ cmd.Env = envWithoutBazel()
+ cmd.Dir = dir
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err := cmd.Run()
+ return stdout.Bytes(), cleanCmdError(err, name, args, stderr.Bytes())
+}
+
+// envWithoutBazel runs the current process's environment without variables
+// starting with "BUILD_" added by 'bazel run'. These can confuse subprocesses.
+func envWithoutBazel() []string {
+ env := os.Environ()
+ filtered := make([]string, 0, len(env))
+ for _, e := range env {
+ if strings.HasPrefix(e, "BUILD_") {
+ continue
+ }
+ filtered = append(filtered, e)
+ }
+ return filtered
+}
+
+// cleanCmdError wraps an error returned by exec.Cmd.Run with the command that
+// was run and its stderr output.
+func cleanCmdError(err error, name string, args []string, stderr []byte) error {
+ if err == nil {
+ return nil
+ }
+ return &commandError{
+ argv: append([]string{name}, args...),
+ err: err,
+ }
+}
+
+type commandError struct {
+ argv []string
+ stderr []byte
+ err error
+}
+
+func (e *commandError) Error() string {
+ return fmt.Sprintf("running %s: %v\n%s", strings.Join(e.argv, " "), e.err, bytes.TrimSpace(e.stderr))
+}
+
+func (e *commandError) Unwrap() error {
+ return e.err
+}
diff --git a/go/tools/releaser/upgradedep.go b/go/tools/releaser/upgradedep.go
new file mode 100644
index 00000000..6e28c08e
--- /dev/null
+++ b/go/tools/releaser/upgradedep.go
@@ -0,0 +1,561 @@
+// Copyright 2021 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ bzl "github.com/bazelbuild/buildtools/build"
+ "github.com/google/go-github/v36/github"
+ "golang.org/x/mod/semver"
+ "golang.org/x/oauth2"
+ "golang.org/x/sync/errgroup"
+)
+
+var upgradeDepCmd = command{
+ name: "upgrade-dep",
+ description: "upgrades a dependency in WORKSPACE or go_repositories.bzl",
+ help: `releaser upgrade-dep [-githubtoken=token] [-mirror] [-work] deps...
+
+upgrade-dep upgrades one or more rules_go dependencies in WORKSPACE or
+go/private/repositories.bzl. Dependency names (matching the name attributes)
+can be specified with positional arguments. "all" may be specified to upgrade
+all upgradeable dependencies.
+
+For each dependency, upgrade-dep finds the highest version available in the
+upstream repository. If no version is available, upgrade-dep uses the commit
+at the tip of the default branch. If a version is part of a release,
+upgrade-dep will try to use an archive attached to the release; if none is
+available, upgrade-dep uses an archive generated by GitHub.
+
+Once upgrade-dep has found the URL for the latest version, it will:
+
+* Download the archive.
+* Upload the archive to mirror.bazel.build.
+* Re-generate patches, either by running a command or by re-applying the
+ old patches.
+* Update dependency attributes in WORKSPACE and repositories.bzl, then format
+ and rewrite those files.
+
+Upgradeable dependencies need a comment like '# releaser:upgrade-dep org repo'
+where org and repo are the GitHub organization and repository. We could
+potentially fetch archives from proxy.golang.org instead, but it's not available
+in as many countries.
+
+Patches may have a comment like '# releaser:patch-cmd name args...'. If this
+comment is present, upgrade-dep will generate the patch by running the specified
+command in a temporary directory containing the extracted archive with the
+previous patches applied.
+`,
+}
+
+func init() {
+ // break init cycle
+ upgradeDepCmd.run = runUpgradeDep
+}
+
+func runUpgradeDep(ctx context.Context, stderr io.Writer, args []string) error {
+ // Parse arguments.
+ flags := flag.NewFlagSet("releaser upgrade-dep", flag.ContinueOnError)
+ var githubToken githubTokenFlag
+ var uploadToMirror, leaveWorkDir bool
+ flags.Var(&githubToken, "githubtoken", "GitHub personal access token or path to a file containing it")
+ flags.BoolVar(&uploadToMirror, "mirror", true, "whether to upload dependency archives to mirror.bazel.build")
+ flags.BoolVar(&leaveWorkDir, "work", false, "don't delete temporary work directory (for debugging)")
+ if err := flags.Parse(args); err != nil {
+ return err
+ }
+ if flags.NArg() == 0 {
+ return usageErrorf(&upgradeDepCmd, "No dependencies specified")
+ }
+ upgradeAll := false
+ for _, arg := range flags.Args() {
+ if arg == "all" {
+ upgradeAll = true
+ break
+ }
+ }
+ if upgradeAll && flags.NArg() != 1 {
+ return usageErrorf(&upgradeDepCmd, "When 'all' is specified, it must be the only argument")
+ }
+
+ httpClient := http.DefaultClient
+ if githubToken != "" {
+ ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: string(githubToken)})
+ httpClient = oauth2.NewClient(ctx, ts)
+ }
+ gh := &githubClient{Client: github.NewClient(httpClient)}
+
+ workDir, err := os.MkdirTemp("", "releaser-upgrade-dep-*")
+ if leaveWorkDir {
+ fmt.Fprintf(stderr, "work dir: %s\n", workDir)
+ } else {
+ defer func() {
+ if rerr := os.RemoveAll(workDir); err == nil && rerr != nil {
+ err = rerr
+ }
+ }()
+ }
+
+ // Make sure we have everything we need.
+ // upgrade-dep must be run inside rules_go (though we just check for
+ // WORKSPACE), and a few tools must be available.
+ rootDir, err := repoRoot()
+ if err != nil {
+ return err
+ }
+ for _, tool := range []string{"diff", "gazelle", "gsutil", "patch"} {
+ if _, err := exec.LookPath(tool); err != nil {
+ return fmt.Errorf("%s must be installed in PATH", tool)
+ }
+ }
+
+ // Parse and index files we might want to update.
+ type file struct {
+ path string
+ funcName string
+ parsed *bzl.File
+ body []bzl.Expr
+ }
+ files := []file{
+ {path: filepath.Join(rootDir, "WORKSPACE")},
+ {path: filepath.Join(rootDir, "go/private/repositories.bzl"), funcName: "go_rules_dependencies"},
+ }
+ depIndex := make(map[string]*bzl.CallExpr)
+
+ for i := range files {
+ f := &files[i]
+ data, err := os.ReadFile(f.path)
+ if err != nil {
+ return err
+ }
+ f.parsed, err = bzl.Parse(f.path, data)
+ if err != nil {
+ return err
+ }
+
+ if f.funcName == "" {
+ f.body = f.parsed.Stmt
+ } else {
+ for _, expr := range f.parsed.Stmt {
+ def, ok := expr.(*bzl.DefStmt)
+ if !ok {
+ continue
+ }
+ if def.Name == f.funcName {
+ f.body = def.Body
+ break
+ }
+ }
+ if f.body == nil {
+ return fmt.Errorf("in file %s, could not find function %s", f.path, f.funcName)
+ }
+ }
+
+ for _, expr := range f.body {
+ call, ok := expr.(*bzl.CallExpr)
+ if !ok {
+ continue
+ }
+ for _, arg := range call.List {
+ kwarg, ok := arg.(*bzl.AssignExpr)
+ if !ok {
+ continue
+ }
+ key := kwarg.LHS.(*bzl.Ident) // required by parser
+ if key.Name != "name" {
+ continue
+ }
+ value, ok := kwarg.RHS.(*bzl.StringExpr)
+ if !ok {
+ continue
+ }
+ depIndex[value.Value] = call
+ }
+ }
+ }
+
+ // Update dependencies in those files.
+ eg, egctx := errgroup.WithContext(ctx)
+ if upgradeAll {
+ for name := range depIndex {
+ name := name
+ if _, _, err := parseUpgradeDepDirective(depIndex[name]); err != nil {
+ continue
+ }
+ eg.Go(func() error {
+ return upgradeDepDecl(egctx, gh, workDir, name, depIndex[name], uploadToMirror)
+ })
+ }
+ } else {
+ for _, arg := range flags.Args() {
+ if depIndex[arg] == nil {
+ return fmt.Errorf("could not find dependency %s", arg)
+ }
+ }
+ for _, arg := range flags.Args() {
+ arg := arg
+ eg.Go(func() error {
+ return upgradeDepDecl(egctx, gh, workDir, arg, depIndex[arg], uploadToMirror)
+ })
+ }
+ }
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+
+ // Format and write files back to disk.
+ for _, f := range files {
+ if err := os.WriteFile(f.path, bzl.Format(f.parsed), 0666); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// upgradeDepDecl upgrades a specific dependency.
+func upgradeDepDecl(ctx context.Context, gh *githubClient, workDir, name string, call *bzl.CallExpr, uploadToMirror bool) (err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("upgrading %s: %w", name, err)
+ }
+ }()
+
+ // Find a '# releaser:upgrade-dep org repo' comment. We could probably
+ // figure this out from URLs but this also serves to mark a dependency as
+ // being automatically upgradeable.
+ orgName, repoName, err := parseUpgradeDepDirective(call)
+ if err != nil {
+ return err
+ }
+
+ // Find attributes we'll need to read or write. We'll modify these directly
+ // in the AST. Nothing else should read or write them while we're working.
+ attrs := map[string]*bzl.Expr{
+ "patches": nil,
+ "sha256": nil,
+ "strip_prefix": nil,
+ "urls": nil,
+ }
+ var urlsKwarg *bzl.AssignExpr
+ for _, arg := range call.List {
+ kwarg, ok := arg.(*bzl.AssignExpr)
+ if !ok {
+ continue
+ }
+ key := kwarg.LHS.(*bzl.Ident) // required by parser
+ if _, ok := attrs[key.Name]; ok {
+ attrs[key.Name] = &kwarg.RHS
+ }
+ if key.Name == "urls" {
+ urlsKwarg = kwarg
+ }
+ }
+ for key := range attrs {
+ if key == "patches" {
+ // Don't add optional attributes.
+ continue
+ }
+ if attrs[key] == nil {
+ kwarg := &bzl.AssignExpr{LHS: &bzl.Ident{Name: key}, Op: "="}
+ call.List = append(call.List, kwarg)
+ attrs[key] = &kwarg.RHS
+ }
+ }
+
+ // Find the highest tag in semver order, ignoring whether the version has a
+ // leading "v" or not. If there are no tags, find the commit at the tip of the
+ // default branch.
+ tags, err := gh.listTags(ctx, orgName, repoName)
+ if err != nil {
+ return err
+ }
+
+ vname := func(name string) string {
+ if !strings.HasPrefix(name, "v") {
+ return "v" + name
+ }
+ return name
+ }
+
+ w := 0
+ for r := range tags {
+ name := vname(*tags[r].Name)
+ if name != semver.Canonical(name) {
+ continue
+ }
+ tags[w] = tags[r]
+ w++
+ }
+ tags = tags[:w]
+
+ var highestTag *github.RepositoryTag
+ var highestVname string
+ for _, tag := range tags {
+ name := vname(*tag.Name)
+ if highestTag == nil || semver.Compare(name, highestVname) > 0 {
+ highestTag = tag
+ highestVname = name
+ }
+ }
+
+ var ghURL, stripPrefix, urlComment string
+ date := time.Now().Format("2006-01-02")
+ if highestTag != nil {
+ // If the tag is part of a release, check whether there is a release
+ // artifact we should use.
+ release, _, err := gh.Repositories.GetReleaseByTag(ctx, orgName, repoName, *highestTag.Name)
+ if err == nil {
+ wantNames := []string{
+ fmt.Sprintf("%s-%s.tar.gz", repoName, *highestTag.Name),
+ fmt.Sprintf("%s-%s.zip", repoName, *highestTag.Name),
+ }
+ AssetName:
+ for _, asset := range release.Assets {
+ for _, wantName := range wantNames {
+ if *asset.Name == wantName {
+ ghURL = asset.GetBrowserDownloadURL()
+ stripPrefix = "" // may not always be correct
+ break AssetName
+ }
+ }
+ }
+ }
+ if ghURL == "" {
+ ghURL = fmt.Sprintf("https://github.com/%s/%s/archive/refs/tags/%s.zip", orgName, repoName, *highestTag.Name)
+ stripPrefix = repoName + "-" + strings.TrimPrefix(*highestTag.Name, "v")
+ }
+ urlComment = fmt.Sprintf("%s, latest as of %s", *highestTag.Name, date)
+ } else {
+ repo, _, err := gh.Repositories.Get(ctx, orgName, repoName)
+ if err != nil {
+ return err
+ }
+ defaultBranchName := "main"
+ if repo.DefaultBranch != nil {
+ defaultBranchName = *repo.DefaultBranch
+ }
+ branch, _, err := gh.Repositories.GetBranch(ctx, orgName, repoName, defaultBranchName)
+ if err != nil {
+ return err
+ }
+ ghURL = fmt.Sprintf("https://github.com/%s/%s/archive/%s.zip", orgName, repoName, *branch.Commit.SHA)
+ stripPrefix = repoName + "-" + *branch.Commit.SHA
+ urlComment = fmt.Sprintf("%s, as of %s", defaultBranchName, date)
+ }
+ ghURLWithoutScheme := ghURL[len("https://"):]
+ mirrorURL := "https://mirror.bazel.build/" + ghURLWithoutScheme
+
+ // Download the archive and find the SHA.
+ archiveFile, err := os.CreateTemp("", "")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ archiveFile.Close()
+ if rerr := os.Remove(archiveFile.Name()); err == nil && rerr != nil {
+ err = rerr
+ }
+ }()
+ resp, err := http.Get(ghURL)
+ if err != nil {
+ return err
+ }
+ hw := sha256.New()
+ mw := io.MultiWriter(hw, archiveFile)
+ if _, err := io.Copy(mw, resp.Body); err != nil {
+ resp.Body.Close()
+ return err
+ }
+ if err := resp.Body.Close(); err != nil {
+ return err
+ }
+ sha256Sum := hex.EncodeToString(hw.Sum(nil))
+ if _, err := archiveFile.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+
+ // Upload the archive to mirror.bazel.build.
+ if uploadToMirror {
+ if err := copyFileToMirror(ctx, ghURLWithoutScheme, archiveFile.Name()); err != nil {
+ return err
+ }
+ }
+
+ // If there are patches, re-apply or re-generate them.
+ // Patch labels may have "# releaser:patch-cmd name args..." directives
+ // that instruct this program to generate the patch by running a commnad
+ // in the directory. If there is no such directive, we apply the old patch
+ // using "patch". In either case, we'll generate a new patch with "diff".
+ // We'll scrub the timestamps to avoid excessive diffs in the PR that
+ // updates dependencies.
+ rootDir, err := repoRoot()
+ if err != nil {
+ return err
+ }
+ if attrs["patches"] != nil {
+ if err != nil {
+ return err
+ }
+ patchDir := filepath.Join(workDir, name, "a")
+ if err := extractArchive(archiveFile, path.Base(ghURL), patchDir, stripPrefix); err != nil {
+ return err
+ }
+
+ patchesList, ok := (*attrs["patches"]).(*bzl.ListExpr)
+ if !ok {
+ return fmt.Errorf("\"patches\" attribute is not a list")
+ }
+ for patchIndex, patchLabelExpr := range patchesList.List {
+ patchLabelValue, comments, err := parsePatchesItem(patchLabelExpr)
+ if err != nil {
+ return fmt.Errorf("parsing expr %#v : %w", patchLabelExpr, err)
+ }
+
+ if !strings.HasPrefix(patchLabelValue, "//third_party:") {
+ return fmt.Errorf("patch does not start with '//third_party:': %q", patchLabelValue)
+ }
+ patchName := patchLabelValue[len("//third_party:"):]
+ patchPath := filepath.Join(rootDir, "third_party", patchName)
+ prevDir := filepath.Join(workDir, name, string('a'+patchIndex))
+ patchDir := filepath.Join(workDir, name, string('a'+patchIndex+1))
+ var patchCmd []string
+ for _, c := range comments.Before {
+ words := strings.Fields(strings.TrimPrefix(c.Token, "#"))
+ if len(words) > 0 && words[0] == "releaser:patch-cmd" {
+ patchCmd = words[1:]
+ break
+ }
+ }
+
+ if err := copyDir(patchDir, prevDir); err != nil {
+ return err
+ }
+ if patchCmd == nil {
+ if err := runForError(ctx, patchDir, "patch", "-Np1", "-i", patchPath); err != nil {
+ return err
+ }
+ } else {
+ if err := runForError(ctx, patchDir, patchCmd[0], patchCmd[1:]...); err != nil {
+ return err
+ }
+ }
+ patch, _ := runForOutput(ctx, filepath.Join(workDir, name), "diff", "-urN", string('a'+patchIndex), string('a'+patchIndex+1))
+ patch = sanitizePatch(patch)
+ if err := os.WriteFile(patchPath, patch, 0666); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Update the attributes.
+ *attrs["sha256"] = &bzl.StringExpr{Value: sha256Sum}
+ *attrs["strip_prefix"] = &bzl.StringExpr{Value: stripPrefix}
+ *attrs["urls"] = &bzl.ListExpr{
+ List: []bzl.Expr{
+ &bzl.StringExpr{Value: mirrorURL},
+ &bzl.StringExpr{Value: ghURL},
+ },
+ ForceMultiLine: true,
+ }
+ urlsKwarg.Before = []bzl.Comment{{Token: "# " + urlComment}}
+
+ return nil
+}
+
+func parsePatchesItem(patchLabelExpr bzl.Expr) (value string, comments *bzl.Comments, err error) {
+ switch patchLabel := patchLabelExpr.(type) {
+ case *bzl.CallExpr:
+ // Verify the identifier, should be Label
+ if ident, ok := patchLabel.X.(*bzl.Ident); !ok {
+ return "", nil, fmt.Errorf("invalid identifier while parsing patch label")
+ } else if ident.Name != "Label" {
+ return "", nil, fmt.Errorf("invalid patch function: %q", ident.Name)
+ }
+
+ // Expect 1 String argument with the patch
+ if len(patchLabel.List) != 1 {
+ return "", nil, fmt.Errorf("Label expr should have 1 argument, found %d", len(patchLabel.List))
+ }
+
+ // Parse patch as a string
+ patchLabelStr, ok := patchLabel.List[0].(*bzl.StringExpr)
+ if !ok {
+ return "", nil, fmt.Errorf("Label expr does not contain a string literal")
+ }
+ return patchLabelStr.Value, patchLabel.Comment(), nil
+ case *bzl.StringExpr:
+ return strings.TrimPrefix(patchLabel.Value, "@io_bazel_rules_go"), patchLabel.Comment(), nil
+ default:
+ return "", nil, fmt.Errorf("not all patches are string literals or Label()")
+ }
+}
+
+// parseUpgradeDepDirective parses a '# releaser:upgrade-dep org repo' directive
+// and returns the organization and repository name or an error if the directive
+// was not found or malformed.
+func parseUpgradeDepDirective(call *bzl.CallExpr) (orgName, repoName string, err error) {
+ // TODO: support other upgrade strategies. For example, support git_repository
+ // and go_repository (possibly wrapped in _maybe).
+ for _, c := range call.Comment().Before {
+ words := strings.Fields(strings.TrimPrefix(c.Token, "#"))
+ if len(words) == 0 || words[0] != "releaser:upgrade-dep" {
+ continue
+ }
+ if len(words) != 3 {
+ return "", "", errors.New("invalid upgrade-dep directive; expected org, and name fields")
+ }
+ return words[1], words[2], nil
+ }
+ return "", "", errors.New("releaser:upgrade-dep directive not found")
+}
+
+// sanitizePatch sets all of the non-zero patch dates to the same value. This
+// reduces churn in the PR that updates the patches.
+//
+// We avoid changing zero-valued patch dates, which are used in added or
+// deleted files. Since zero-valued dates can vary a bit by time zone, we assume
+// that any year starting with "19" is a zero-valeud date.
+func sanitizePatch(patch []byte) []byte {
+ lines := bytes.Split(patch, []byte{'\n'})
+
+ for i, line := range lines {
+ if !bytes.HasPrefix(line, []byte("+++ ")) && !bytes.HasPrefix(line, []byte("--- ")) {
+ continue
+ }
+
+ tab := bytes.LastIndexByte(line, '\t')
+ if tab < 0 || bytes.HasPrefix(line[tab+1:], []byte("19")) {
+ continue
+ }
+
+ lines[i] = append(line[:tab+1], []byte("2000-01-01 00:00:00.000000000 -0000")...)
+ }
+ return bytes.Join(lines, []byte{'\n'})
+}
diff --git a/go/tools/releaser/upgradedep_test.go b/go/tools/releaser/upgradedep_test.go
new file mode 100644
index 00000000..1371add1
--- /dev/null
+++ b/go/tools/releaser/upgradedep_test.go
@@ -0,0 +1,102 @@
+package main
+
+import (
+ "fmt"
+ "testing"
+
+ bzl "github.com/bazelbuild/buildtools/build"
+)
+
+func TestPatchItemParser_Success(t *testing.T) {
+ tests := []struct {
+ expression []byte
+ result string
+ }{
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ Label("//third_party:org_golang_x_tools-gazelle.patch")`),
+ result: "//third_party:org_golang_x_tools-gazelle.patch",
+ },
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ "@io_bazel_rules_go//third_party:org_golang_x_tools-gazelle.patch"`),
+ result: "//third_party:org_golang_x_tools-gazelle.patch",
+ },
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ "//third_party:org_golang_x_tools-gazelle.patch"`),
+ result: "//third_party:org_golang_x_tools-gazelle.patch",
+ },
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ Label("@io_bazel_rules_go//third_party:org_golang_x_tools-gazelle.patch")`),
+ result: "@io_bazel_rules_go//third_party:org_golang_x_tools-gazelle.patch",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%v", tt.expression), func(t *testing.T) {
+ patchExpr, err := bzl.Parse("repos.bzl", tt.expression)
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ patchLabelStr, _, err := parsePatchesItem(patchExpr.Stmt[0])
+ if err != nil {
+ t.Errorf("unexpected error while parsing expression: %q", err.Error())
+ } else if patchLabelStr != tt.result {
+ t.Errorf("expected result %q, but got result %q instead", tt.result, patchLabelStr)
+ }
+ })
+ }
+}
+
+func TestPatchItemParser_Error(t *testing.T) {
+ tests := []struct {
+ expression []byte
+ error string
+ }{
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ NotLabel("//third_party:org_golang_x_tools-gazelle.patch")`),
+ error: `invalid patch function: "NotLabel"`,
+ },
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ NotLabel(True)`),
+ error: `invalid patch function: "NotLabel"`,
+ },
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ True`),
+ error: "not all patches are string literals or Label()",
+ },
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ Label("//third_party:org_golang_x_tools-gazelle.patch", True)`),
+ error: "Label expr should have 1 argument, found 2",
+ },
+ {
+ expression: []byte(`# releaser:patch-cmd gazelle -repo_root . -go_prefix golang.org/x/tools -go_naming_convention import_alias
+ Label(True)`),
+ error: "Label expr does not contain a string literal",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%v", tt.expression), func(t *testing.T) {
+ patchExpr, err := bzl.Parse("repos.bzl", tt.expression)
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ patchLabelStr, _, err := parsePatchesItem(patchExpr.Stmt[0])
+
+ if err == nil {
+ t.Errorf("expected error %q, but got result %q instead", tt.error, patchLabelStr)
+ } else if err.Error() != tt.error {
+ t.Errorf("expected error %q, but got error %q instead", tt.error, err.Error())
+ }
+ })
+ }
+}
diff --git a/go/tools/windows-testrunner/windows-testrunner.go b/go/tools/windows-testrunner/windows-testrunner.go
new file mode 100644
index 00000000..a3f25603
--- /dev/null
+++ b/go/tools/windows-testrunner/windows-testrunner.go
@@ -0,0 +1,105 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ "gopkg.in/yaml.v2"
+)
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix("testrunner: ")
+
+ var configPath string
+ flag.StringVar(&configPath, "config", "", "location of presubmit.yml")
+ flag.Parse()
+ if configPath == "" {
+ var err error
+ configPath, err = findConfigPath()
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if err := run(configPath, flag.Args()); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func run(configPath string, args []string) error {
+ configData, err := ioutil.ReadFile(configPath)
+ if err != nil {
+ return err
+ }
+ var config interface{}
+ if err := yaml.Unmarshal(configData, &config); err != nil {
+ return err
+ }
+
+ platform := config.(map[interface{}]interface{})["platforms"].(map[interface{}]interface{})["windows"].(map[interface{}]interface{})
+ var buildFlags []string
+ for _, f := range platform["build_flags"].([]interface{}) {
+ buildFlags = append(buildFlags, f.(string))
+ }
+ testFlags := buildFlags
+ for _, f := range platform["test_flags"].([]interface{}) {
+ testFlags = append(testFlags, f.(string))
+ }
+ var buildTargets, testTargets []string
+ if len(args) == 0 {
+ for _, t := range platform["build_targets"].([]interface{}) {
+ buildTargets = append(buildTargets, t.(string))
+ }
+ for _, t := range platform["test_targets"].([]interface{}) {
+ testTargets = append(testTargets, t.(string))
+ }
+ } else {
+ buildTargets = args
+ testTargets = args
+ }
+
+ buildCmd := exec.Command("bazel", "build")
+ buildCmd.Args = append(buildCmd.Args, buildFlags...)
+ buildCmd.Args = append(buildCmd.Args, buildTargets...)
+ buildCmd.Stdout = os.Stdout
+ buildCmd.Stderr = os.Stderr
+ if err := buildCmd.Run(); err != nil {
+ return err
+ }
+
+ testCmd := exec.Command("bazel", "test")
+ testCmd.Args = append(testCmd.Args, testFlags...)
+ testCmd.Args = append(testCmd.Args, testTargets...)
+ testCmd.Stdout = os.Stdout
+ testCmd.Stderr = os.Stderr
+ if err := testCmd.Run(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func findConfigPath() (string, error) {
+ d, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ for {
+ configPath := filepath.Join(d, ".bazelci/presubmit.yml")
+ _, err := os.Stat(configPath)
+ if !os.IsNotExist(err) {
+ return configPath, nil
+ }
+ parent := filepath.Dir(d)
+ if parent == d {
+ return "", fmt.Errorf("could not find presubmit.yml")
+ }
+ d = parent
+ }
+}