aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-04-28 15:57:50 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-04-28 15:57:50 +0000
commit879ee98047efdc5f977d54d25dd353b171a2d142 (patch)
treed7caa181126f59a89d1eccf84789b4fd416db28f
parentca7cde69a659c81b91f837ddc96a9834c83e62a6 (diff)
parent02557d9deecd7e1ccc9ba175ca05d06a41ea0c3f (diff)
downloadavb-android13-frc-permission-release.tar.gz
Snap for 8512216 from 02557d9deecd7e1ccc9ba175ca05d06a41ea0c3f to tm-frc-permission-releaset_frc_per_330444010android13-frc-permission-release
Change-Id: I4178b8a83c6367ea562941088f29ab7ffaf4aebe
-rw-r--r--Android.bp2
-rw-r--r--OWNERS1
-rwxr-xr-xavbtool.py37
-rw-r--r--tools/transparency/verify/README.md37
-rw-r--r--tools/transparency/verify/cmd/verifier/log_pub_key.pem4
-rw-r--r--tools/transparency/verify/cmd/verifier/verifier.go109
-rw-r--r--tools/transparency/verify/go.mod10
-rw-r--r--tools/transparency/verify/go.sum18
-rw-r--r--tools/transparency/verify/internal/checkpoint/checkpoint.go175
-rw-r--r--tools/transparency/verify/internal/checkpoint/checkpoint_test.go133
-rw-r--r--tools/transparency/verify/internal/tiles/reader.go124
-rw-r--r--tools/transparency/verify/internal/tiles/reader_test.go182
12 files changed, 821 insertions, 11 deletions
diff --git a/Android.bp b/Android.bp
index 0aae847..b73a924 100644
--- a/Android.bp
+++ b/Android.bp
@@ -168,7 +168,7 @@ cc_library_static {
linux: {
srcs: ["libavb/avb_sysdeps_posix.c"],
},
- linux_glibc: {
+ host_linux: {
cflags: ["-fno-stack-protector"],
},
},
diff --git a/OWNERS b/OWNERS
index 30abb1d..f9893cf 100644
--- a/OWNERS
+++ b/OWNERS
@@ -3,3 +3,4 @@ samitolvanen@google.com
zeuthen@google.com
dkrahn@google.com
tweek@google.com
+billylau@google.com
diff --git a/avbtool.py b/avbtool.py
index 5ba8dbf..f944af4 100755
--- a/avbtool.py
+++ b/avbtool.py
@@ -3329,7 +3329,8 @@ class Avb(object):
image.truncate(original_image_size)
raise AvbError('Appending VBMeta image failed: {}.'.format(e)) from e
- def add_hash_footer(self, image_filename, partition_size, partition_name,
+ def add_hash_footer(self, image_filename, partition_size,
+ dynamic_partition_size, partition_name,
hash_algorithm, salt, chain_partitions, algorithm_name,
key_path,
public_key_metadata_path, rollback_index, flags,
@@ -3347,6 +3348,7 @@ class Avb(object):
Arguments:
image_filename: File to add the footer to.
partition_size: Size of partition.
+ dynamic_partition_size: Calculate partition size based on image size.
partition_name: Name of partition (without A/B suffix).
hash_algorithm: Hash algorithm to use.
salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
@@ -3380,6 +3382,14 @@ class Avb(object):
Raises:
AvbError: If an argument is incorrect of if adding of hash_footer failed.
"""
+ if not partition_size and not dynamic_partition_size:
+ raise AvbError('--dynamic_partition_size required when not specifying a '
+ 'partition size')
+
+ if dynamic_partition_size and calc_max_image_size:
+ raise AvbError('--calc_max_image_size not supported with '
+ '--dynamic_partition_size')
+
required_libavb_version_minor = 0
if use_persistent_digest or do_not_use_ab:
required_libavb_version_minor = 1
@@ -3395,24 +3405,18 @@ class Avb(object):
# this size + metadata (footer + vbmeta struct) fits in
# |partition_size|.
max_metadata_size = self.MAX_VBMETA_SIZE + self.MAX_FOOTER_SIZE
- if partition_size < max_metadata_size:
+ if not dynamic_partition_size and partition_size < max_metadata_size:
raise AvbError('Parition size of {} is too small. '
'Needs to be at least {}'.format(
partition_size, max_metadata_size))
- max_image_size = partition_size - max_metadata_size
# If we're asked to only calculate the maximum image size, we're done.
if calc_max_image_size:
- print('{}'.format(max_image_size))
+ print('{}'.format(partition_size - max_metadata_size))
return
image = ImageHandler(image_filename)
- if partition_size % image.block_size != 0:
- raise AvbError('Partition size of {} is not a multiple of the image '
- 'block size {}.'.format(partition_size,
- image.block_size))
-
# If there's already a footer, truncate the image to its original
# size. This way 'avbtool add_hash_footer' is idempotent (modulo
# salts).
@@ -3429,6 +3433,16 @@ class Avb(object):
# Image size is too small to possibly contain a footer.
original_image_size = image.image_size
+ if dynamic_partition_size:
+ partition_size = round_to_multiple(
+ original_image_size + max_metadata_size, image.block_size)
+
+ max_image_size = partition_size - max_metadata_size
+ if partition_size % image.block_size != 0:
+ raise AvbError('Partition size of {} is not a multiple of the image '
+ 'block size {}.'.format(partition_size,
+ image.block_size))
+
# If anything goes wrong from here-on, restore the image back to
# its original size.
try:
@@ -4331,6 +4345,9 @@ class AvbTool(object):
sub_parser.add_argument('--partition_size',
help='Partition size',
type=parse_number)
+ sub_parser.add_argument('--dynamic_partition_size',
+ help='Calculate partition size based on image size',
+ action='store_true')
sub_parser.add_argument('--partition_name',
help='Partition name',
default=None)
@@ -4759,7 +4776,7 @@ class AvbTool(object):
"""Implements the 'add_hash_footer' sub-command."""
args = self._fixup_common_args(args)
self.avb.add_hash_footer(args.image.name if args.image else None,
- args.partition_size,
+ args.partition_size, args.dynamic_partition_size,
args.partition_name, args.hash_algorithm,
args.salt, args.chain_partition, args.algorithm,
args.key,
diff --git a/tools/transparency/verify/README.md b/tools/transparency/verify/README.md
new file mode 100644
index 0000000..c69fb05
--- /dev/null
+++ b/tools/transparency/verify/README.md
@@ -0,0 +1,37 @@
+# Verifier of Binary Transparency for Pixel Factory Images
+
+This repository contains code to read the transparency log for [Binary Transparency for Pixel Factory Images](https://developers.google.com/android/binary_transparency/pixel). See the particular section for this tool [here](https://developers.google.com/android/binary_transparency/pixel#verifying-image-inclusion-inclusion-proof).
+
+## Files and Directories
+* `cmd/verifier/`
+ * Contains the binary to read the transparency log. It is embedded with the public key of the log to verify log identity.
+* `internal/`
+ * Internal libraries for the verifier binary.
+
+## Build
+This module requires Go 1.17. Install [here](https://go.dev/doc/install), and run `go build cmd/verifier/verifier.go`.
+
+An executable named `verifier` should be produced upon successful build.
+
+## Usage
+The verifier uses the checkpoint and the log contents (found at the [tile directory](https://developers.google.com/android/binary_transparency/tile)) to check that your image payload is in the transparency log, i.e. that it is published by Google.
+
+To run the verifier after you have built it in the previous section:
+```
+$ ./verifier --payload_path=${PAYLOAD_PATH}
+```
+
+### Input
+The verifier takes a `payload_path` as input.
+
+Each Pixel Factory image corresponds to a [payload](https://developers.google.com/android/binary_transparency/pixel#log-content) stored in the transparency log, the format of which is:
+```
+<build_fingerprint>\n<vbmeta_digest>\n
+```
+See [here](https://developers.google.com/android/binary_transparency/pixel#construct-the-payload-for-verification) for a few methods detailing how to extract this payload from an image.
+
+### Output
+The output of the command is written to stdout:
+ * `OK` if the image is included in the log, i.e. that this [claim](https://developers.google.com/android/binary_transparency/pixel#claimant-model) is true,
+ * `FAILURE` otherwise.
+
diff --git a/tools/transparency/verify/cmd/verifier/log_pub_key.pem b/tools/transparency/verify/cmd/verifier/log_pub_key.pem
new file mode 100644
index 0000000..01a2127
--- /dev/null
+++ b/tools/transparency/verify/cmd/verifier/log_pub_key.pem
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEU83uXNUiTYE53c2TfdWmqpW20bBX
+y4KEf5Ff8dV8GLKlVAXKHyjw3Lp9J3E0yCRJ/39XKeuAAMF7KzSvhD248A==
+-----END PUBLIC KEY-----
diff --git a/tools/transparency/verify/cmd/verifier/verifier.go b/tools/transparency/verify/cmd/verifier/verifier.go
new file mode 100644
index 0000000..fd02241
--- /dev/null
+++ b/tools/transparency/verify/cmd/verifier/verifier.go
@@ -0,0 +1,109 @@
+// Binary `verifier` checks the inclusion of a particular Pixel Factory Image,
+// identified by its build_fingerprint and vbmeta_digest (the payload), in the
+// Transparency Log.
+//
+// Inputs to the tool are:
+// - the log leaf index of the image of interest, from the Pixel Binary
+// Transparency Log, see:
+// https://developers.google.com/android/binary_transparency/image_info.txt
+// - the path to a file containing the payload, see this page for instructions
+// https://developers.google.com/android/binary_transparency/pixel#construct-the-payload-for-verification.
+// - the log's base URL, if different from the default provided.
+//
+// Outputs:
+// - "OK" if the image is included in the log,
+// - "FAILURE" if it isn't.
+//
+// Usage: See README.md.
+// For more details on inclusion proofs, see:
+// https://developers.google.com/android/binary_transparency/pixel#verifying-image-inclusion-inclusion-proof
+package main
+
+import (
+ "bytes"
+ "flag"
+ "log"
+ "os"
+ "path/filepath"
+
+ "android.googlesource.com/platform/external/avb.git/tools/transparency/verify/internal/checkpoint"
+ "android.googlesource.com/platform/external/avb.git/tools/transparency/verify/internal/tiles"
+ "golang.org/x/mod/sumdb/tlog"
+
+ _ "embed"
+)
+
+// Domain separation prefix for Merkle tree hashing with second preimage
+// resistance similar to that used in RFC 6962.
+const (
+ LeafHashPrefix = 0
+ KeyNameForVerifier = "pixel6_transparency_log"
+)
+
+// See https://developers.google.com/android/binary_transparency/pixel#signature-verification.
+//go:embed log_pub_key.pem
+var logPubKey []byte
+
+var (
+ payloadPath = flag.String("payload_path", "", "Path to the payload describing the image of interest.")
+ logBaseURL = flag.String("log_base_url", "https://developers.google.com/android/binary_transparency", "Base url for the verifiable log files.")
+)
+
+func main() {
+ flag.Parse()
+
+ if *payloadPath == "" {
+ log.Fatal("must specify the payload_path for the image payload")
+ }
+ b, err := os.ReadFile(*payloadPath)
+ if err != nil {
+ log.Fatalf("unable to open file %q: %v", *payloadPath, err)
+ }
+ // Payload should not contain excessive leading or trailing whitespace.
+ payloadBytes := bytes.TrimSpace(b)
+ payloadBytes = append(payloadBytes, '\n')
+ if string(b) != string(payloadBytes) {
+ log.Printf("Reformatted payload content from %q to %q", b, payloadBytes)
+ }
+
+
+ v, err := checkpoint.NewVerifier(logPubKey, KeyNameForVerifier)
+ if err != nil {
+ log.Fatalf("error creating verifier: %v", err)
+ }
+ root, err := checkpoint.FromURL(*logBaseURL, v)
+ if err != nil {
+ log.Fatalf("error reading checkpoint for log(%s): %v", *logBaseURL, err)
+ }
+
+ m, err := tiles.ImageInfosIndex(*logBaseURL)
+ if err != nil {
+ log.Fatalf("failed to load image info map to find log index: %v", err)
+ }
+ imageInfoIndex, ok := m[string(payloadBytes)]
+ if !ok {
+ log.Fatalf("failed to find payload %q in %s", string(payloadBytes), filepath.Join(*logBaseURL, "image_info.txt"))
+ }
+
+ var th tlog.Hash
+ copy(th[:], root.Hash)
+
+ logSize := int64(root.Size)
+ r := tiles.HashReader{URL: *logBaseURL}
+ rp, err := tlog.ProveRecord(logSize, imageInfoIndex, r)
+ if err != nil {
+ log.Fatalf("error in tlog.ProveRecord: %v", err)
+ }
+
+ leafHash, err := tiles.PayloadHash(payloadBytes)
+ if err != nil {
+ log.Fatalf("error hashing payload: %v", err)
+ }
+
+ if err := tlog.CheckRecord(rp, logSize, th, imageInfoIndex, leafHash); err != nil {
+ log.Fatalf("FAILURE: inclusion check error in tlog.CheckRecord: %v", err)
+ } else {
+ log.Print("OK. inclusion check success")
+ }
+}
+
diff --git a/tools/transparency/verify/go.mod b/tools/transparency/verify/go.mod
new file mode 100644
index 0000000..9eb49c9
--- /dev/null
+++ b/tools/transparency/verify/go.mod
@@ -0,0 +1,10 @@
+module android.googlesource.com/platform/external/avb.git/tools/transparency/verify
+
+go 1.17
+
+require (
+ github.com/google/go-cmp v0.5.7
+ golang.org/x/mod v0.5.1
+)
+
+require golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 // indirect
diff --git a/tools/transparency/verify/go.sum b/tools/transparency/verify/go.sum
new file mode 100644
index 0000000..f02f24c
--- /dev/null
+++ b/tools/transparency/verify/go.sum
@@ -0,0 +1,18 @@
+github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/tools/transparency/verify/internal/checkpoint/checkpoint.go b/tools/transparency/verify/internal/checkpoint/checkpoint.go
new file mode 100644
index 0000000..600707f
--- /dev/null
+++ b/tools/transparency/verify/internal/checkpoint/checkpoint.go
@@ -0,0 +1,175 @@
+// Package checkpoint implements methods to interact with checkpoints
+// as described below.
+//
+// Root is the internal representation of the information needed to
+// commit to the contents of the tree, and contains the root hash and size.
+//
+// When a commitment needs to be sent to other processes (such as a witness or
+// other log clients), it is put in the form of a checkpoint, which also
+// includes an "ecosystem identifier". The "ecosystem identifier" defines how
+// to parse the checkpoint data. This package deals only with the DEFAULT
+// ecosystem, which has only the information from Root and no additional data.
+// Support for other ecosystems will be added as needed.
+//
+// This checkpoint is signed in a note format (golang.org/x/mod/sumdb/note)
+// before sending out. An unsigned checkpoint is not a valid commitment and
+// must not be used.
+//
+// There is only a single signature.
+// Support for multiple signing identities will be added as needed.
+package checkpoint
+
+import (
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+
+ "golang.org/x/mod/sumdb/note"
+)
+
+const (
+ // defaultEcosystemID identifies a checkpoint in the DEFAULT ecosystem.
+ defaultEcosystemID = "DEFAULT\n"
+)
+
+type verifier interface {
+ Verify(msg []byte, sig []byte) bool
+ Name() string
+ KeyHash() uint32
+}
+
+// EcdsaVerifier verifies a message signature that was signed using ECDSA.
+type EcdsaVerifier struct {
+ PubKey *ecdsa.PublicKey
+ name string
+ hash uint32
+}
+
+// Verify returns whether the signature of the message is valid using its
+// pubKey.
+func (v EcdsaVerifier) Verify(msg, sig []byte) bool {
+ h := sha256.Sum256(msg)
+ if !ecdsa.VerifyASN1(v.PubKey, h[:], sig) {
+ return false
+ }
+ return true
+}
+
+// KeyHash returns a 4 byte hash of the public key to be used as a hint to the
+// verifier.
+func (v EcdsaVerifier) KeyHash() uint32 {
+ return v.hash
+}
+
+// Name returns the name of the key.
+func (v EcdsaVerifier) Name() string {
+ return v.name
+}
+
+// NewVerifier expects an ECDSA public key in PEM format in a file with the provided path and key name.
+func NewVerifier(pemKey []byte, name string) (EcdsaVerifier, error) {
+ b, _ := pem.Decode(pemKey)
+ if b == nil || b.Type != "PUBLIC KEY" {
+ return EcdsaVerifier{}, fmt.Errorf("Failed to decode public key, must contain an ECDSA public key in PEM format")
+ }
+
+ key := b.Bytes
+ sum := sha256.Sum256(key)
+ keyHash := binary.BigEndian.Uint32(sum[:])
+
+ pub, err := x509.ParsePKIXPublicKey(key)
+ if err != nil {
+ return EcdsaVerifier{}, fmt.Errorf("Can't parse key: %v", err)
+ }
+ return EcdsaVerifier{
+ PubKey: pub.(*ecdsa.PublicKey),
+ hash: keyHash,
+ name: name,
+ }, nil
+}
+
+// Root contains the checkpoint data for a DEFAULT ecosystem checkpoint.
+type Root struct {
+ // Size is the number of entries in the log at this point.
+ Size uint64
+ // Hash commits to the contents of the entire log.
+ Hash []byte
+}
+
+func parseCheckpoint(ckpt string) (Root, error) {
+ if !strings.HasPrefix(ckpt, defaultEcosystemID) {
+ return Root{}, errors.New("invalid checkpoint - unknown ecosystem, must be DEFAULT")
+ }
+ // Strip the ecosystem ID and parse the rest of the checkpoint.
+ body := ckpt[len(defaultEcosystemID):]
+ // body must contain exactly 2 lines, size and the root hash.
+ l := strings.SplitN(body, "\n", 3)
+ if len(l) != 3 || len(l[2]) != 0 {
+ return Root{}, errors.New("invalid checkpoint - bad format: must have ecosystem id, size and root hash each followed by newline")
+ }
+ size, err := strconv.ParseUint(l[0], 10, 64)
+ if err != nil {
+ return Root{}, fmt.Errorf("invalid checkpoint - cannot read size: %w", err)
+ }
+ rh, err := base64.StdEncoding.DecodeString(l[1])
+ if err != nil {
+ return Root{}, fmt.Errorf("invalid checkpoint - invalid roothash: %w", err)
+ }
+ return Root{Size: size, Hash: rh}, nil
+}
+
+func getSignedCheckpoint(logURL string) ([]byte, error) {
+ // Sanity check the input url.
+ u, err := url.Parse(logURL)
+ if err != nil {
+ return []byte{}, fmt.Errorf("invalid URL %s: %v", u, err)
+ }
+
+ u.Path = path.Join(u.Path, "checkpoint.txt")
+
+ resp, err := http.Get(u.String())
+ if err != nil {
+ return []byte{}, fmt.Errorf("http.Get(%s): %v", u, err)
+ }
+ defer resp.Body.Close()
+ if code := resp.StatusCode; code != 200 {
+ return []byte{}, fmt.Errorf("http.Get(%s): %s", u, http.StatusText(code))
+ }
+
+ return io.ReadAll(resp.Body)
+}
+
+// FromURL verifies the signature and unpacks and returns a Root.
+//
+// Validates signature before reading data, using a provided verifier.
+// Data at `logURL` is the checkpoint and must be in the note format
+// (golang.org/x/mod/sumdb/note).
+//
+// The checkpoint must be in the DEFAULT ecosystem.
+//
+// Returns error if the signature fails to verify or if the checkpoint
+// does not conform to the following format:
+// []byte("[ecosystem]\n[size]\n[hash]").
+func FromURL(logURL string, v verifier) (Root, error) {
+ b, err := getSignedCheckpoint(logURL)
+ if err != nil {
+ return Root{}, fmt.Errorf("failed to get signed checkpoint: %v", err)
+ }
+
+ n, err := note.Open(b, note.VerifierList(v))
+ if err != nil {
+ return Root{}, fmt.Errorf("failed to verify note signatures: %v", err)
+ }
+ return parseCheckpoint(n.Text)
+}
diff --git a/tools/transparency/verify/internal/checkpoint/checkpoint_test.go b/tools/transparency/verify/internal/checkpoint/checkpoint_test.go
new file mode 100644
index 0000000..1c81394
--- /dev/null
+++ b/tools/transparency/verify/internal/checkpoint/checkpoint_test.go
@@ -0,0 +1,133 @@
+package checkpoint
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "path"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+// privateKeyForTest returns a ecdsa PrivateKey used in tests only.
+func privateKeyForTest(t *testing.T) *ecdsa.PrivateKey {
+ t.Helper()
+ privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Fatalf("GenerateKey(): %v", err)
+ }
+
+ return privateKey
+}
+
+func TestInvalidCheckpointFormat(t *testing.T) {
+ tests := []struct {
+ desc string
+ m string
+ wantErr bool
+ }{
+ {
+ desc: "unknown ecosystem",
+ m: "UNKNOWN\n1\nbananas\n",
+ wantErr: true,
+ },
+ {
+ desc: "bad size",
+ m: "DEFAULT\n-1\nbananas\n",
+ wantErr: true,
+ },
+ {
+ desc: "not enough newlines",
+ m: "DEFAULT\n1\n",
+ wantErr: true,
+ },
+ {
+ desc: "non-numeric size",
+ m: "DEFAULT\nbananas\ndGhlIHZpZXcgZnJvbSB0aGUgdHJlZSB0b3BzIGlzIGdyZWF0IQ==\n",
+ wantErr: true,
+ },
+ {
+ desc: "too many newlines",
+ m: "DEFAULT\n1\n\n\n\n",
+ wantErr: true,
+ },
+ {
+ desc: "does not end with newline",
+ m: "DEFAULT\n1\ngarbage",
+ wantErr: true,
+ },
+ {
+ desc: "invalid - empty header",
+ m: "\n9944\ndGhlIHZpZXcgZnJvbSB0aGUgdHJlZSB0b3BzIGlzIGdyZWF0IQ==\n",
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.desc, func(t *testing.T) {
+ if _, gotErr := parseCheckpoint(tt.m); gotErr == nil {
+ t.Fatalf("fromText(%v): want error, got nil", tt.m)
+ }
+ })
+ }
+}
+
+// testServer serves a test envelope `e` at path "test/file" and 404 otherwise.
+// It is used to minimally test FromURL.
+func testServer(t *testing.T, e string) *httptest.Server {
+ t.Helper()
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/test/file/checkpoint.txt" {
+ w.Write([]byte(e))
+ } else {
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
+
+// TestGetSignedCheckpoint is a minimal test to check URL I/O.
+// Content specific tests are done in the other tests.
+func TestGetSignedCheckpoint(t *testing.T) {
+ serverContent := "testContent"
+ s := testServer(t, serverContent)
+ u, err := url.Parse(s.URL)
+ if err != nil {
+ t.Fatalf("invalid URL for testServer %s: %v", s.URL, err)
+ }
+ defer s.Close()
+
+ for _, tt := range []struct {
+ desc string
+ path string
+ want string
+ wantErr bool
+ }{
+ {
+ desc: "good_file",
+ path: "test/file",
+ want: serverContent,
+ wantErr: false,
+ },
+ {
+ desc: "bad_path",
+ path: "bad/path",
+ wantErr: true,
+ },
+ } {
+ t.Run(tt.desc, func(t *testing.T) {
+ u.Path = path.Join(u.Path, tt.path)
+ b, gotErr := getSignedCheckpoint(u.String())
+ got := string(b)
+ if diff := cmp.Diff(got, tt.want); diff != "" {
+ t.Errorf("bad response body: got %v, want %v", got, tt.want)
+ }
+ if gotErr != nil && !tt.wantErr {
+ t.Errorf("unexpected error: got %t, want %t", gotErr, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/tools/transparency/verify/internal/tiles/reader.go b/tools/transparency/verify/internal/tiles/reader.go
new file mode 100644
index 0000000..f998f54
--- /dev/null
+++ b/tools/transparency/verify/internal/tiles/reader.go
@@ -0,0 +1,124 @@
+// Package tiles contains methods to work with tlog based verifiable logs.
+package tiles
+
+import (
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+
+ "golang.org/x/mod/sumdb/tlog"
+)
+
+// HashReader implements tlog.HashReader, reading from tlog-based log located at
+// URL.
+type HashReader struct {
+ URL string
+}
+
+
+// Domain separation prefix for Merkle tree hashing with second preimage
+// resistance similar to that used in RFC 6962.
+const (
+ leafHashPrefix = 0
+)
+
+// ReadHashes implements tlog.HashReader's ReadHashes.
+// See: https://pkg.go.dev/golang.org/x/mod/sumdb/tlog#HashReader.
+func (h HashReader) ReadHashes(indices []int64) ([]tlog.Hash, error) {
+ tiles := make(map[string][]byte)
+ hashes := make([]tlog.Hash, 0, len(indices))
+ for _, index := range indices {
+ // The PixelBT log is tiled at height = 1.
+ tile := tlog.TileForIndex(1, index)
+
+ var content []byte
+ var exists bool
+ var err error
+ content, exists = tiles[tile.Path()]
+ if !exists {
+ content, err = readFromURL(h.URL, tile.Path())
+ if err != nil {
+ return nil, fmt.Errorf("failed to read from %s: %v", tile.Path(), err)
+ }
+ tiles[tile.Path()] = content
+ }
+
+ hash, err := tlog.HashFromTile(tile, content, index)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read data from tile for index %d: %v", index, err)
+ }
+ hashes = append(hashes, hash)
+ }
+ return hashes, nil
+}
+
+// ImageInfosIndex returns a map from payload to its index in the
+// transparency log according to the image_info.txt.
+func ImageInfosIndex(logBaseURL string) (map[string]int64, error) {
+ b, err := readFromURL(logBaseURL, "image_info.txt")
+ if err != nil {
+ return nil, err
+ }
+
+ imageInfos := string(b)
+ return parseImageInfosIndex(imageInfos)
+}
+
+func parseImageInfosIndex(imageInfos string) (map[string]int64, error) {
+ m := make(map[string]int64)
+
+ infosStr := strings.Split(imageInfos, "\n\n")
+ for _, infoStr := range infosStr {
+ pieces := strings.SplitN(infoStr, "\n", 2)
+ if len(pieces) != 2 {
+ return nil, errors.New("missing newline, malformed image_info.txt")
+ }
+
+ idx, err := strconv.ParseInt(pieces[0], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert %q to int64", pieces[0])
+ }
+
+ // Ensure that each log entry does not have extraneous whitespace, but
+ // also terminates with a newline.
+ logEntry := strings.TrimSpace(pieces[1]) + "\n"
+ m[logEntry] = idx
+ }
+
+ return m, nil
+}
+
+func readFromURL(base, suffix string) ([]byte, error) {
+ u, err := url.Parse(base)
+ if err != nil {
+ return nil, fmt.Errorf("invalid URL %s: %v", base, err)
+ }
+ u.Path = path.Join(u.Path, suffix)
+
+ resp, err := http.Get(u.String())
+ if err != nil {
+ return nil, fmt.Errorf("http.Get(%s): %v", u.String(), err)
+ }
+ defer resp.Body.Close()
+ if code := resp.StatusCode; code != 200 {
+ return nil, fmt.Errorf("http.Get(%s): %s", u.String(), http.StatusText(code))
+ }
+
+ return io.ReadAll(resp.Body)
+}
+
+// PayloadHash returns the hash of the payload.
+func PayloadHash(p []byte) (tlog.Hash, error) {
+ l := append([]byte{leafHashPrefix}, p...)
+ h := sha256.Sum256(l)
+
+ var hash tlog.Hash
+ copy(hash[:], h[:])
+ return hash, nil
+}
diff --git a/tools/transparency/verify/internal/tiles/reader_test.go b/tools/transparency/verify/internal/tiles/reader_test.go
new file mode 100644
index 0000000..47e26c3
--- /dev/null
+++ b/tools/transparency/verify/internal/tiles/reader_test.go
@@ -0,0 +1,182 @@
+package tiles
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "io"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/mod/sumdb/tlog"
+)
+
+const (
+ tileHeight = 1
+)
+
+// mustHexDecode decodes its input string from hex and panics if this fails.
+func mustHexDecode(b string) []byte {
+ r, err := hex.DecodeString(b)
+ if err != nil {
+ log.Fatalf("unable to decode string %v", err)
+ }
+ return r
+}
+
+// nodeHashes is a structured slice of node hashes for all complete subtrees of a Merkle tree built from test data using the RFC 6962 hashing strategy. The first index in the slice is the tree level (zero being the leaves level), the second is the horizontal index within a level.
+var nodeHashes = [][][]byte{{
+ mustHexDecode("6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"),
+ mustHexDecode("96a296d224f285c67bee93c30f8a309157f0daa35dc5b87e410b78630a09cfc7"),
+ mustHexDecode("0298d122906dcfc10892cb53a73992fc5b9f493ea4c9badb27b791b4127a7fe7"),
+ mustHexDecode("07506a85fd9dd2f120eb694f86011e5bb4662e5c415a62917033d4a9624487e7"),
+ mustHexDecode("bc1a0643b12e4d2d7c77918f44e0f4f79a838b6cf9ec5b5c283e1f4d88599e6b"),
+ mustHexDecode("4271a26be0d8a84f0bd54c8c302e7cb3a3b5d1fa6780a40bcce2873477dab658"),
+ mustHexDecode("b08693ec2e721597130641e8211e7eedccb4c26413963eee6c1e2ed16ffb1a5f"),
+ mustHexDecode("46f6ffadd3d06a09ff3c5860d2755c8b9819db7df44251788c7d8e3180de8eb1"),
+}, {
+ mustHexDecode("fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125"),
+ mustHexDecode("5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e"),
+ mustHexDecode("0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a"),
+ mustHexDecode("ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0"),
+}, {
+ mustHexDecode("d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"),
+ mustHexDecode("6b47aaf29ee3c2af9af889bc1fb9254dabd31177f16232dd6aab035ca39bf6e4"),
+}, {
+ mustHexDecode("5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328"),
+}}
+
+// testServer serves a tile based log of height 1, using the test data in
+// nodeHashes.
+func testServer(ctx context.Context, t *testing.T) *httptest.Server {
+ t.Helper()
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ // Parse the tile data out of r.URL.
+ // Strip the leading `/` to get a valid tile path.
+ tile, err := tlog.ParseTilePath(r.URL.String()[1:])
+ if err != nil {
+ t.Fatalf("ParseTilePath(%s): %v", r.URL.String(), err)
+ }
+ // Fill the response with the test nodeHashes ...
+ io.Copy(w, bytes.NewReader(nodeHashes[tile.L][2*tile.N]))
+ if tile.W == 2 {
+ // ... with special handling when the width is 2
+ io.Copy(w, bytes.NewReader(nodeHashes[tile.L][2*tile.N+1]))
+ }
+ }))
+}
+
+func TestReadHashesWithReadTileData(t *testing.T) {
+ ctx := context.Background()
+ s := testServer(ctx, t)
+ defer s.Close()
+
+ for _, tc := range []struct {
+ desc string
+ size uint64
+ want [][]byte
+ }{
+ {desc: "empty-0", size: 0},
+ {
+ desc: "size-3",
+ size: 3,
+ want: [][]byte{
+ nodeHashes[0][0],
+ append(nodeHashes[0][0], nodeHashes[0][1]...),
+ nodeHashes[1][0],
+ nodeHashes[0][2],
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ r := HashReader{URL: s.URL}
+
+ // Read hashes.
+ for i, want := range tc.want {
+ tile := tlog.TileForIndex(tileHeight, int64(i))
+ got, err := tlog.ReadTileData(tile, r)
+ if err != nil {
+ t.Fatalf("ReadTileData: %v", err)
+ }
+ if !cmp.Equal(got, want) {
+ t.Errorf("tile %+v: got %X, want %X", tile, got, want)
+ }
+ }
+ })
+ }
+}
+
+func TestReadHashesCachedTile(t *testing.T) {
+ ctx := context.Background()
+ s := testServer(ctx, t)
+ defer s.Close()
+
+ wantHash := nodeHashes[0][0]
+ r := HashReader{URL: s.URL}
+
+ // Read hash at index 0 twice, to exercise the caching of tiles.
+ // On the first pass, the read is fresh and readFromURL is called.
+ // On the second pass, the tile is cached, so we skip readFromURL.
+ // We don't explicitly check that readFromURL is only called once,
+ // but we do check ReadHashes returns the correct values.
+ indices := []int64{0, 0}
+ hashes, err := r.ReadHashes(indices)
+ if err != nil {
+ t.Fatalf("ReadHashes: %v", err)
+ }
+
+ got := make([][]byte, 0, len(indices))
+ for _, hash := range hashes {
+ got = append(got, hash[:])
+ }
+
+ if !bytes.Equal(got[0], got[1]) {
+ t.Errorf("expected the same hash: got %X, want %X", got[0], got[1])
+ }
+ if !bytes.Equal(got[0], wantHash) {
+ t.Errorf("wrong ReadHashes result: got %X, want %X", got[0], wantHash)
+ }
+}
+
+func TestParseImageInfosIndex(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ imageInfos string
+ want map[string]int64
+ wantErr bool
+ }{
+ {
+ desc: "size 2",
+ imageInfos: "0\nbuild_fingerprint0\nimage_digest0\n\n1\nbuild_fingerprint1\nimage_digest1\n",
+ wantErr: false,
+ want: map[string]int64{
+ "build_fingerprint0\nimage_digest0\n": 0,
+ "build_fingerprint1\nimage_digest1\n": 1,
+ },
+ },
+ {
+ desc: "invalid log entry (no newlines)",
+ imageInfos: "0build_fingerprintimage_digest",
+ wantErr: true,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ got, err := parseImageInfosIndex(tc.imageInfos)
+ if err != nil && !tc.wantErr {
+ t.Fatalf("parseImageInfosIndex(%s) received unexpected err %q", tc.imageInfos, err)
+ }
+
+ if err == nil && tc.wantErr {
+ t.Fatalf("parseImageInfosIndex(%s) did not return err, expected err", tc.imageInfos)
+ }
+
+ if diff := cmp.Diff(tc.want, got); diff != "" {
+ t.Errorf("parseImageInfosIndex returned unexpected diff (-want +got):\n%s", diff)
+ }
+ })
+ }
+}